From 46ea434e4dd3aa1f2d6d35217315c0464781d914 Mon Sep 17 00:00:00 2001 From: kaizen3031593 Date: Wed, 1 Sep 2021 13:53:21 -0400 Subject: [PATCH 01/41] initial implementation of adding outputname to hasoutput --- .../assertions/lib/private/outputs.ts | 15 ++++-- packages/@aws-cdk/assertions/lib/template.ts | 5 +- .../@aws-cdk/assertions/test/template.test.ts | 50 +++++++++++++++++-- 3 files changed, 60 insertions(+), 10 deletions(-) diff --git a/packages/@aws-cdk/assertions/lib/private/outputs.ts b/packages/@aws-cdk/assertions/lib/private/outputs.ts index 46e5a6cb1d52b..0b328ffda7fcb 100644 --- a/packages/@aws-cdk/assertions/lib/private/outputs.ts +++ b/packages/@aws-cdk/assertions/lib/private/outputs.ts @@ -12,20 +12,25 @@ export function findOutputs(inspector: StackInspector, props: any = {}): { [key: return result.matches; } -export function hasOutput(inspector: StackInspector, props: any): string | void { +export function hasOutput(inspector: StackInspector, outputName: string, props: any): string | void { const section: { [key: string]: {} } = inspector.value.Outputs; - const result = matchSection(section, props); - + const result = matchSection(filterName(section, outputName), props); if (result.match) { return; } if (result.closestResult === undefined) { - return 'No outputs found in the template'; + return `No outputs named ${outputName} found in the template.`; } return [ - `Template has ${result.analyzedCount} outputs, but none match as expected.`, + `Template has ${result.analyzedCount} outputs named ${outputName}, but none match as expected.`, formatFailure(result.closestResult), ].join('\n'); +} + +function filterName(section: { [key: string]: {} }, outputName: string): { [key: string]: {} } { + return Object.entries(section ?? {}) + .filter(([k, _]) => k === outputName) + .reduce((agg, [k, v]) => { return { ...agg, [k]: v }; }, {}); } \ No newline at end of file diff --git a/packages/@aws-cdk/assertions/lib/template.ts b/packages/@aws-cdk/assertions/lib/template.ts index 848c46bcc295a..2f9f84c54cabb 100644 --- a/packages/@aws-cdk/assertions/lib/template.ts +++ b/packages/@aws-cdk/assertions/lib/template.ts @@ -109,10 +109,11 @@ export class Template { * Assert that an Output with the given properties exists in the CloudFormation template. * By default, performs partial matching on the resource, via the `Match.objectLike()`. * To configure different behavour, use other matchers in the `Match` class. + * @param outputName the name of the output. * @param props the output as should be expected in the template. */ - public hasOutput(props: any): void { - const matchError = hasOutput(this.inspector, props); + public hasOutput(outputName: string, props: any): void { + const matchError = hasOutput(this.inspector, outputName, props); if (matchError) { throw new Error(matchError); } diff --git a/packages/@aws-cdk/assertions/test/template.test.ts b/packages/@aws-cdk/assertions/test/template.test.ts index 50fb60a1a27f7..fc3c34deecd57 100644 --- a/packages/@aws-cdk/assertions/test/template.test.ts +++ b/packages/@aws-cdk/assertions/test/template.test.ts @@ -342,7 +342,7 @@ describe('Template', () => { }); const inspect = Template.fromStack(stack); - expect(() => inspect.hasOutput({ Value: 'Bar' })).not.toThrow(); + expect(() => inspect.hasOutput('Foo', { Value: 'Bar' })).not.toThrow(); }); test('not matching', (done) => { @@ -357,18 +357,62 @@ describe('Template', () => { const inspect = Template.fromStack(stack); expectToThrow( - () => inspect.hasOutput({ + () => inspect.hasOutput('Foo', { Value: 'Bar', Export: { Name: 'ExportBaz' }, }), [ - /2 outputs/, + /1 outputs named Foo/, /Expected ExportBaz but received ExportBar/, ], done, ); done(); }); + + test('outputName not matching', (done) => { + const stack = new Stack(); + new CfnOutput(stack, 'Foo', { + value: 'Bar', + }); + new CfnOutput(stack, 'Fred', { + value: 'Baz', + }); + + const inspect = Template.fromStack(stack); + expectToThrow( + () => inspect.hasOutput('Fred', { + Value: 'Bar', + }), + [ + /1 outputs named Fred/, + /Expected Bar but received Baz/, + ], + done, + ); + done(); + }); + }); + + test('name not matching', (done) => { + const stack = new Stack(); + new CfnOutput(stack, 'Foo', { + value: 'Bar', + exportName: 'ExportBar', + }); + + const inspect = Template.fromStack(stack); + expectToThrow( + () => inspect.hasOutput('Fred', { + Value: 'Bar', + Export: { Name: 'ExportBar' }, + }), + [ + /No outputs named Fred found in the template./, + ], + done, + ); + done(); }); describe('findOutputs', () => { From f19896be08209f140fdf07189b9d1e4c9d601bc6 Mon Sep 17 00:00:00 2001 From: Niranjan Jayakar Date: Tue, 7 Sep 2021 12:21:58 +0100 Subject: [PATCH 02/41] chore: migrate all remaining modules from nodunit-shim to jest (#16398) Migrate remaining module to pure jest. Remove the module `nodeunit-shim`. Any modules still using nodeunit must migrate directly to jest. ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- packages/@aws-cdk/aws-athena/package.json | 2 +- packages/@aws-cdk/aws-cloudfront/package.json | 2 +- .../aws-cloudfront/test/origin-groups.test.ts | 31 +- .../test/web-distribution.test.ts | 472 ++++----- .../@aws-cdk/aws-codepipeline/package.json | 3 +- .../aws-codepipeline/test/action.test.ts | 207 ++-- .../aws-codepipeline/test/artifacts.test.ts | 85 +- .../test/general-validation.test.ts | 46 +- .../aws-codepipeline/test/stages.test.ts | 109 +- .../aws-codepipeline/test/variables.test.ts | 88 +- packages/@aws-cdk/aws-ec2/package.json | 2 +- .../aws-ec2/test/bastion-host.test.ts | 55 +- .../@aws-cdk/aws-ec2/test/connections.test.ts | 135 ++- .../@aws-cdk/aws-ec2/test/instance.test.ts | 135 ++- .../aws-ec2/test/launch-template.test.ts | 105 +- .../aws-ec2/test/network-utils.test.ts | 177 ++-- .../aws-ec2/test/security-group.test.ts | 982 +++++++++--------- .../@aws-cdk/aws-ec2/test/userdata.test.ts | 245 +++-- packages/@aws-cdk/aws-ec2/test/volume.test.ts | 152 ++- .../aws-ec2/test/vpc-endpoint-service.test.ts | 51 +- .../aws-ec2/test/vpc-endpoint.test.ts | 305 +++--- .../aws-ec2/test/vpc-flow-logs.test.ts | 127 ++- .../aws-ec2/test/vpc.from-lookup.test.ts | 99 +- packages/@aws-cdk/aws-ec2/test/vpc.test.ts | 902 ++++++++-------- packages/@aws-cdk/aws-ec2/test/vpn.test.ts | 153 ++- packages/@aws-cdk/aws-ecr-assets/package.json | 2 +- packages/@aws-cdk/aws-rds/package.json | 2 +- .../aws-rds/test/cluster-engine.test.ts | 82 +- .../aws-rds/test/database-secret.test.ts | 39 +- .../test/database-secretmanager.test.ts | 16 +- .../aws-rds/test/instance-engine.test.ts | 207 ++-- .../aws-rds/test/option-group.test.ts | 49 +- .../aws-rds/test/parameter-group.test.ts | 55 +- packages/@aws-cdk/aws-rds/test/proxy.test.ts | 152 +-- .../aws-rds/test/serverless-cluster.test.ts | 318 +++--- .../sql-server.instance-engine.test.ts | 29 +- .../aws-rds/test/subnet-group.test.ts | 73 +- packages/@aws-cdk/core/package.json | 2 +- .../@aws-cdk/core/test/annotations.test.ts | 33 +- packages/@aws-cdk/core/test/app.test.ts | 135 ++- packages/@aws-cdk/core/test/arn.test.ts | 195 ++-- packages/@aws-cdk/core/test/aspect.test.ts | 43 +- packages/@aws-cdk/core/test/assets.test.ts | 91 +- packages/@aws-cdk/core/test/bundling.test.ts | 185 ++-- packages/@aws-cdk/core/test/cfn-json.test.ts | 39 +- .../@aws-cdk/core/test/cfn-parameter.test.ts | 21 +- .../@aws-cdk/core/test/cfn-resource.test.ts | 83 +- packages/@aws-cdk/core/test/condition.test.ts | 21 +- packages/@aws-cdk/core/test/construct.test.ts | 425 ++++---- packages/@aws-cdk/core/test/context.test.ts | 69 +- .../core/test/cross-environment-token.test.ts | 57 +- .../custom-resource-provider.test.ts | 55 +- .../nodejs-entrypoint.test.ts | 75 +- .../core/test/custom-resource.test.ts | 53 +- packages/@aws-cdk/core/test/duration.test.ts | 277 +++-- .../core/test/dynamic-reference.test.ts | 11 +- .../@aws-cdk/core/test/environment.test.ts | 97 +- .../@aws-cdk/core/test/expiration.test.ts | 73 +- .../@aws-cdk/core/test/feature-flags.test.ts | 31 +- packages/@aws-cdk/core/test/fn.test.ts | 129 ++- .../@aws-cdk/core/test/fs/fs-copy.test.ts | 43 +- .../core/test/fs/fs-fingerprint.test.ts | 93 +- packages/@aws-cdk/core/test/fs/fs.test.ts | 33 +- packages/@aws-cdk/core/test/fs/utils.test.ts | 143 ++- packages/@aws-cdk/core/test/include.test.ts | 35 +- .../@aws-cdk/core/test/logical-id.test.ts | 137 ++- packages/@aws-cdk/core/test/mappings.test.ts | 49 +- packages/@aws-cdk/core/test/output.test.ts | 49 +- packages/@aws-cdk/core/test/parameter.test.ts | 19 +- .../private/physical-name-generator.test.ts | 99 +- .../core/test/private/tree-metadata.test.ts | 65 +- packages/@aws-cdk/core/test/resource.test.ts | 271 +++-- packages/@aws-cdk/core/test/rule.test.ts | 19 +- .../@aws-cdk/core/test/secret-value.test.ts | 85 +- packages/@aws-cdk/core/test/size.test.ts | 150 ++- .../new-style-synthesis.test.ts | 129 ++- packages/@aws-cdk/core/test/stage.test.ts | 165 ++- packages/@aws-cdk/core/test/staging.test.ts | 449 ++++---- packages/@aws-cdk/core/test/synthesis.test.ts | 77 +- .../@aws-cdk/core/test/tag-aspect.test.ts | 129 ++- .../@aws-cdk/core/test/tag-manager.test.ts | 143 ++- packages/@aws-cdk/core/test/tokens.test.ts | 513 +++++---- packages/@aws-cdk/core/test/util.test.ts | 178 ++-- tools/nodeunit-shim/.gitignore | 13 - tools/nodeunit-shim/README.md | 80 -- tools/nodeunit-shim/index.ts | 91 -- tools/nodeunit-shim/package.json | 27 - tools/nodeunit-shim/tsconfig.json | 20 - 88 files changed, 5452 insertions(+), 5746 deletions(-) delete mode 100644 tools/nodeunit-shim/.gitignore delete mode 100644 tools/nodeunit-shim/README.md delete mode 100644 tools/nodeunit-shim/index.ts delete mode 100644 tools/nodeunit-shim/package.json delete mode 100644 tools/nodeunit-shim/tsconfig.json diff --git a/packages/@aws-cdk/aws-athena/package.json b/packages/@aws-cdk/aws-athena/package.json index 4248daf56fb68..57bc9bfd3a677 100644 --- a/packages/@aws-cdk/aws-athena/package.json +++ b/packages/@aws-cdk/aws-athena/package.json @@ -76,7 +76,7 @@ "@types/jest": "^26.0.24", "cdk-build-tools": "0.0.0", "cdk-integ-tools": "0.0.0", - "nodeunit-shim": "0.0.0", + "jest": "^26.6.3", "cfn2ts": "0.0.0", "pkglint": "0.0.0", "@aws-cdk/assertions": "0.0.0" diff --git a/packages/@aws-cdk/aws-cloudfront/package.json b/packages/@aws-cdk/aws-cloudfront/package.json index 58a6b35ef281f..36d4ce40952f5 100644 --- a/packages/@aws-cdk/aws-cloudfront/package.json +++ b/packages/@aws-cdk/aws-cloudfront/package.json @@ -78,7 +78,7 @@ "cdk-build-tools": "0.0.0", "cdk-integ-tools": "0.0.0", "cfn2ts": "0.0.0", - "nodeunit-shim": "0.0.0", + "jest": "^26.6.3", "pkglint": "0.0.0", "@aws-cdk/assert-internal": "0.0.0" }, diff --git a/packages/@aws-cdk/aws-cloudfront/test/origin-groups.test.ts b/packages/@aws-cdk/aws-cloudfront/test/origin-groups.test.ts index 6ffd4b5bd9e8e..891fd10eaff37 100644 --- a/packages/@aws-cdk/aws-cloudfront/test/origin-groups.test.ts +++ b/packages/@aws-cdk/aws-cloudfront/test/origin-groups.test.ts @@ -1,11 +1,10 @@ -import { expect, haveResourceLike } from '@aws-cdk/assert-internal'; +import '@aws-cdk/assert-internal/jest'; import * as s3 from '@aws-cdk/aws-s3'; import * as cdk from '@aws-cdk/core'; -import { nodeunitShim, Test } from 'nodeunit-shim'; import { CloudFrontWebDistribution, FailoverStatusCode } from '../lib'; -nodeunitShim({ - 'Distribution with custom origin failover'(test: Test) { +describe('origin group', () => { + test('Distribution with custom origin failover', () => { const stack = new cdk.Stack(); new CloudFrontWebDistribution(stack, 'ADistribution', { @@ -29,8 +28,8 @@ nodeunitShim({ ], }); - expect(stack).to( - haveResourceLike('AWS::CloudFront::Distribution', { + expect(stack) + .toHaveResourceLike('AWS::CloudFront::Distribution', { DistributionConfig: { OriginGroups: { Items: [ @@ -105,13 +104,12 @@ nodeunitShim({ }, ], }, - }), - ); + }); - test.done(); - }, - 'Distribution with s3 origin failover'(test: Test) { + }); + + test('Distribution with s3 origin failover', () => { const stack = new cdk.Stack(); new CloudFrontWebDistribution(stack, 'ADistribution', { @@ -141,8 +139,8 @@ nodeunitShim({ ], }); - expect(stack).to( - haveResourceLike('AWS::CloudFront::Distribution', { + expect(stack) + .toHaveResourceLike('AWS::CloudFront::Distribution', { DistributionConfig: { OriginGroups: { Items: [ @@ -226,9 +224,8 @@ nodeunitShim({ }, ], }, - }), - ); + }); + - test.done(); - }, + }); }); diff --git a/packages/@aws-cdk/aws-cloudfront/test/web-distribution.test.ts b/packages/@aws-cdk/aws-cloudfront/test/web-distribution.test.ts index 6d799b2f89cf9..9e40e24e3fe42 100644 --- a/packages/@aws-cdk/aws-cloudfront/test/web-distribution.test.ts +++ b/packages/@aws-cdk/aws-cloudfront/test/web-distribution.test.ts @@ -1,9 +1,9 @@ -import { ABSENT, expect, haveResource, haveResourceLike } from '@aws-cdk/assert-internal'; +import '@aws-cdk/assert-internal/jest'; +import { ABSENT } from '@aws-cdk/assert-internal'; import * as certificatemanager from '@aws-cdk/aws-certificatemanager'; import * as lambda from '@aws-cdk/aws-lambda'; import * as s3 from '@aws-cdk/aws-s3'; import * as cdk from '@aws-cdk/core'; -import { nodeunitShim, Test } from 'nodeunit-shim'; import { CfnDistribution, CloudFrontWebDistribution, @@ -33,9 +33,9 @@ dlhHmnVegyPNjP9dNqZ7zwNqMEPOPnS/NOHbJj1KYKpn1f8pPNycQ5MQCntKGnSj NQIDAQAB -----END PUBLIC KEY-----`; -nodeunitShim({ +describe('web distribution', () => { - 'distribution with custom origin adds custom origin'(test: Test) { + test('distribution with custom origin adds custom origin', () => { const stack = new cdk.Stack(); new CloudFrontWebDistribution(stack, 'AnAmazingWebsiteProbably', { @@ -57,7 +57,7 @@ nodeunitShim({ ], }); - expect(stack).toMatch( + expect(stack).toMatchTemplate( { 'Resources': { 'AnAmazingWebsiteProbablyCFDistribution47E3983B': { @@ -126,10 +126,10 @@ nodeunitShim({ }, ); - test.done(); - }, - 'most basic distribution'(test: Test) { + }); + + test('most basic distribution', () => { const stack = new cdk.Stack(); const sourceBucket = new s3.Bucket(stack, 'Bucket'); @@ -148,7 +148,7 @@ nodeunitShim({ ], }); - expect(stack).toMatch({ + expect(stack).toMatchTemplate({ 'Resources': { 'Bucket83908E77': { 'Type': 'AWS::S3::Bucket', @@ -203,10 +203,10 @@ nodeunitShim({ }, }, }); - test.done(); - }, - 'can disable distribution'(test: Test) { + }); + + test('can disable distribution', () => { const stack = new cdk.Stack(); const sourceBucket = new s3.Bucket(stack, 'Bucket'); @@ -226,7 +226,7 @@ nodeunitShim({ ], }); - expect(stack).toMatch({ + expect(stack).toMatchTemplate({ 'Resources': { 'Bucket83908E77': { 'Type': 'AWS::S3::Bucket', @@ -281,10 +281,10 @@ nodeunitShim({ }, }, }); - test.done(); - }, - 'ensure long comments will not break the distribution'(test: Test) { + }); + + test('ensure long comments will not break the distribution', () => { const stack = new cdk.Stack(); const sourceBucket = new s3.Bucket(stack, 'Bucket'); @@ -305,7 +305,7 @@ added the ellipsis so a user would know there was more to read and everything be ], }); - expect(stack).toMatch({ + expect(stack).toMatchTemplate({ Resources: { Bucket83908E77: { Type: 'AWS::S3::Bucket', @@ -353,10 +353,10 @@ added the ellipsis so a user would know there was more to ...`, }, }, }); - test.done(); - }, - 'distribution with bucket and OAI'(test: Test) { + }); + + test('distribution with bucket and OAI', () => { const stack = new cdk.Stack(); const s3BucketSource = new s3.Bucket(stack, 'Bucket'); const originAccessIdentity = new OriginAccessIdentity(stack, 'OAI'); @@ -368,7 +368,7 @@ added the ellipsis so a user would know there was more to ...`, }], }); - expect(stack).to(haveResourceLike('AWS::CloudFront::Distribution', { + expect(stack).toHaveResourceLike('AWS::CloudFront::Distribution', { DistributionConfig: { Origins: [ { @@ -389,9 +389,9 @@ added the ellipsis so a user would know there was more to ...`, }, ], }, - })); + }); - expect(stack).to(haveResourceLike('AWS::S3::BucketPolicy', { + expect(stack).toHaveResourceLike('AWS::S3::BucketPolicy', { PolicyDocument: { Statement: [{ Action: 's3:GetObject', @@ -403,13 +403,13 @@ added the ellipsis so a user would know there was more to ...`, }, }], }, - })); + }); - test.done(); - }, + }); - 'distribution with trusted signers on default distribution'(test: Test) { + + test('distribution with trusted signers on default distribution', () => { const stack = new cdk.Stack(); const sourceBucket = new s3.Bucket(stack, 'Bucket'); const pubKey = new PublicKey(stack, 'MyPubKey', { @@ -440,7 +440,7 @@ added the ellipsis so a user would know there was more to ...`, ], }); - expect(stack).toMatch({ + expect(stack).toMatchTemplate({ 'Resources': { 'Bucket83908E77': { 'Type': 'AWS::S3::Bucket', @@ -524,10 +524,10 @@ added the ellipsis so a user would know there was more to ...`, }, }, }); - test.done(); - }, - 'distribution with ViewerProtocolPolicy set to a non-default value'(test: Test) { + }); + + test('distribution with ViewerProtocolPolicy set to a non-default value', () => { const stack = new cdk.Stack(); const sourceBucket = new s3.Bucket(stack, 'Bucket'); @@ -547,7 +547,7 @@ added the ellipsis so a user would know there was more to ...`, ], }); - expect(stack).toMatch({ + expect(stack).toMatchTemplate({ 'Resources': { 'Bucket83908E77': { 'Type': 'AWS::S3::Bucket', @@ -602,10 +602,10 @@ added the ellipsis so a user would know there was more to ...`, }, }, }); - test.done(); - }, - 'distribution with disabled compression'(test: Test) { + }); + + test('distribution with disabled compression', () => { const stack = new cdk.Stack(); const sourceBucket = new s3.Bucket(stack, 'Bucket'); @@ -625,7 +625,7 @@ added the ellipsis so a user would know there was more to ...`, ], }); - expect(stack).toMatch({ + expect(stack).toMatchTemplate({ 'Resources': { 'Bucket83908E77': { 'Type': 'AWS::S3::Bucket', @@ -680,10 +680,10 @@ added the ellipsis so a user would know there was more to ...`, }, }, }); - test.done(); - }, - 'distribution with CloudFront function-association'(test: Test) { + }); + + test('distribution with CloudFront function-association', () => { const stack = new cdk.Stack(); const sourceBucket = new s3.Bucket(stack, 'Bucket'); @@ -708,7 +708,7 @@ added the ellipsis so a user would know there was more to ...`, ], }); - expect(stack).to(haveResourceLike('AWS::CloudFront::Distribution', { + expect(stack).toHaveResourceLike('AWS::CloudFront::Distribution', { 'DistributionConfig': { 'DefaultCacheBehavior': { 'FunctionAssociations': [ @@ -724,12 +724,12 @@ added the ellipsis so a user would know there was more to ...`, ], }, }, - })); + }); + - test.done(); - }, + }); - 'distribution with resolvable lambda-association'(test: Test) { + test('distribution with resolvable lambda-association', () => { const stack = new cdk.Stack(); const sourceBucket = new s3.Bucket(stack, 'Bucket'); @@ -759,7 +759,7 @@ added the ellipsis so a user would know there was more to ...`, ], }); - expect(stack).to(haveResourceLike('AWS::CloudFront::Distribution', { + expect(stack).toHaveResourceLike('AWS::CloudFront::Distribution', { 'DistributionConfig': { 'DefaultCacheBehavior': { 'LambdaFunctionAssociations': [ @@ -773,12 +773,12 @@ added the ellipsis so a user would know there was more to ...`, ], }, }, - })); + }); + - test.done(); - }, + }); - 'associate a lambda with removable env vars'(test: Test) { + test('associate a lambda with removable env vars', () => { const app = new cdk.App(); const stack = new cdk.Stack(app, 'Stack'); const sourceBucket = new s3.Bucket(stack, 'Bucket'); @@ -809,14 +809,14 @@ added the ellipsis so a user would know there was more to ...`, ], }); - expect(stack).to(haveResource('AWS::Lambda::Function', { + expect(stack).toHaveResource('AWS::Lambda::Function', { Environment: ABSENT, - })); + }); - test.done(); - }, - 'throws when associating a lambda with incompatible env vars'(test: Test) { + }); + + test('throws when associating a lambda with incompatible env vars', () => { const app = new cdk.App(); const stack = new cdk.Stack(app, 'Stack'); const sourceBucket = new s3.Bucket(stack, 'Bucket'); @@ -849,19 +849,19 @@ added the ellipsis so a user would know there was more to ...`, ], }); - test.throws(() => app.synth(), /KEY/); + expect(() => app.synth()).toThrow(/KEY/); + - test.done(); - }, + }); - 'throws when associating a lambda with includeBody and a response event type'(test: Test) { + test('throws when associating a lambda with includeBody and a response event type', () => { const app = new cdk.App(); const stack = new cdk.Stack(app, 'Stack'); const sourceBucket = new s3.Bucket(stack, 'Bucket'); const fnVersion = lambda.Version.fromVersionArn(stack, 'Version', 'arn:aws:lambda:testregion:111111111111:function:myTestFun:v1'); - test.throws(() => { + expect(() => { new CloudFrontWebDistribution(stack, 'AnAmazingWebsiteProbably', { originConfigs: [ { @@ -881,12 +881,12 @@ added the ellipsis so a user would know there was more to ...`, }, ], }); - }, /'includeBody' can only be true for ORIGIN_REQUEST or VIEWER_REQUEST event types./); + }).toThrow(/'includeBody' can only be true for ORIGIN_REQUEST or VIEWER_REQUEST event types./); - test.done(); - }, - 'distribution has a defaultChild'(test: Test) { + }); + + test('distribution has a defaultChild', () => { const stack = new cdk.Stack(); const sourceBucket = new s3.Bucket(stack, 'Bucket'); @@ -901,11 +901,11 @@ added the ellipsis so a user would know there was more to ...`, ], }); - test.ok(distribution.node.defaultChild instanceof CfnDistribution); - test.done(); - }, + expect(distribution.node.defaultChild instanceof CfnDistribution).toEqual(true); + + }); - 'allows multiple aliasConfiguration CloudFrontWebDistribution per stack'(test: Test) { + test('allows multiple aliasConfiguration CloudFrontWebDistribution per stack', () => { const stack = new cdk.Stack(); const s3BucketSource = new s3.Bucket(stack, 'Bucket'); @@ -923,7 +923,7 @@ added the ellipsis so a user would know there was more to ...`, aliasConfiguration: { acmCertRef: 'another_acm_ref', names: ['ftp.example.com'] }, }); - expect(stack).to(haveResourceLike('AWS::CloudFront::Distribution', { + expect(stack).toHaveResourceLike('AWS::CloudFront::Distribution', { 'DistributionConfig': { 'Aliases': ['www.example.com'], 'ViewerCertificate': { @@ -931,9 +931,9 @@ added the ellipsis so a user would know there was more to ...`, 'SslSupportMethod': 'sni-only', }, }, - })); + }); - expect(stack).to(haveResourceLike('AWS::CloudFront::Distribution', { + expect(stack).toHaveResourceLike('AWS::CloudFront::Distribution', { 'DistributionConfig': { 'Aliases': ['ftp.example.com'], 'ViewerCertificate': { @@ -941,13 +941,13 @@ added the ellipsis so a user would know there was more to ...`, 'SslSupportMethod': 'sni-only', }, }, - })); - test.done(); - }, + }); - 'viewerCertificate': { - 'acmCertificate': { - 'base usage'(test: Test) { + }); + + describe('viewerCertificate', () => { + describe('acmCertificate', () => { + test('base usage', () => { const stack = new cdk.Stack(); const sourceBucket = new s3.Bucket(stack, 'Bucket'); @@ -963,7 +963,7 @@ added the ellipsis so a user would know there was more to ...`, viewerCertificate: ViewerCertificate.fromAcmCertificate(certificate), }); - expect(stack).to(haveResourceLike('AWS::CloudFront::Distribution', { + expect(stack).toHaveResourceLike('AWS::CloudFront::Distribution', { 'DistributionConfig': { 'Aliases': [], 'ViewerCertificate': { @@ -973,11 +973,11 @@ added the ellipsis so a user would know there was more to ...`, 'SslSupportMethod': 'sni-only', }, }, - })); + }); - test.done(); - }, - 'imported certificate fromCertificateArn'(test: Test) { + + }); + test('imported certificate fromCertificateArn', () => { const stack = new cdk.Stack(); const sourceBucket = new s3.Bucket(stack, 'Bucket'); @@ -993,7 +993,7 @@ added the ellipsis so a user would know there was more to ...`, viewerCertificate: ViewerCertificate.fromAcmCertificate(certificate), }); - expect(stack).to(haveResourceLike('AWS::CloudFront::Distribution', { + expect(stack).toHaveResourceLike('AWS::CloudFront::Distribution', { 'DistributionConfig': { 'Aliases': [], 'ViewerCertificate': { @@ -1001,11 +1001,11 @@ added the ellipsis so a user would know there was more to ...`, 'SslSupportMethod': 'sni-only', }, }, - })); + }); - test.done(); - }, - 'advanced usage'(test: Test) { + + }); + test('advanced usage', () => { const stack = new cdk.Stack(); const sourceBucket = new s3.Bucket(stack, 'Bucket'); @@ -1025,7 +1025,7 @@ added the ellipsis so a user would know there was more to ...`, }), }); - expect(stack).to(haveResourceLike('AWS::CloudFront::Distribution', { + expect(stack).toHaveResourceLike('AWS::CloudFront::Distribution', { 'DistributionConfig': { 'Aliases': ['example.com', 'www.example.com'], 'ViewerCertificate': { @@ -1036,13 +1036,13 @@ added the ellipsis so a user would know there was more to ...`, 'SslSupportMethod': 'vip', }, }, - })); + }); - test.done(); - }, - }, - 'iamCertificate': { - 'base usage'(test: Test) { + + }); + }); + describe('iamCertificate', () => { + test('base usage', () => { const stack = new cdk.Stack(); const sourceBucket = new s3.Bucket(stack, 'Bucket'); @@ -1054,7 +1054,7 @@ added the ellipsis so a user would know there was more to ...`, viewerCertificate: ViewerCertificate.fromIamCertificate('test'), }); - expect(stack).to(haveResourceLike('AWS::CloudFront::Distribution', { + expect(stack).toHaveResourceLike('AWS::CloudFront::Distribution', { 'DistributionConfig': { 'Aliases': [], 'ViewerCertificate': { @@ -1062,11 +1062,11 @@ added the ellipsis so a user would know there was more to ...`, 'SslSupportMethod': 'sni-only', }, }, - })); + }); - test.done(); - }, - 'advanced usage'(test: Test) { + + }); + test('advanced usage', () => { const stack = new cdk.Stack(); const sourceBucket = new s3.Bucket(stack, 'Bucket'); @@ -1082,7 +1082,7 @@ added the ellipsis so a user would know there was more to ...`, }), }); - expect(stack).to(haveResourceLike('AWS::CloudFront::Distribution', { + expect(stack).toHaveResourceLike('AWS::CloudFront::Distribution', { 'DistributionConfig': { 'Aliases': ['example.com'], 'ViewerCertificate': { @@ -1091,13 +1091,13 @@ added the ellipsis so a user would know there was more to ...`, 'SslSupportMethod': 'vip', }, }, - })); + }); - test.done(); - }, - }, - 'cloudFrontDefaultCertificate': { - 'base usage'(test: Test) { + + }); + }); + describe('cloudFrontDefaultCertificate', () => { + test('base usage', () => { const stack = new cdk.Stack(); const sourceBucket = new s3.Bucket(stack, 'Bucket'); @@ -1109,18 +1109,18 @@ added the ellipsis so a user would know there was more to ...`, viewerCertificate: ViewerCertificate.fromCloudFrontDefaultCertificate(), }); - expect(stack).to(haveResourceLike('AWS::CloudFront::Distribution', { + expect(stack).toHaveResourceLike('AWS::CloudFront::Distribution', { 'DistributionConfig': { 'Aliases': [], 'ViewerCertificate': { 'CloudFrontDefaultCertificate': true, }, }, - })); + }); - test.done(); - }, - 'aliases are set'(test: Test) { + + }); + test('aliases are set', () => { const stack = new cdk.Stack(); const sourceBucket = new s3.Bucket(stack, 'Bucket'); @@ -1132,24 +1132,24 @@ added the ellipsis so a user would know there was more to ...`, viewerCertificate: ViewerCertificate.fromCloudFrontDefaultCertificate('example.com', 'www.example.com'), }); - expect(stack).to(haveResourceLike('AWS::CloudFront::Distribution', { + expect(stack).toHaveResourceLike('AWS::CloudFront::Distribution', { 'DistributionConfig': { 'Aliases': ['example.com', 'www.example.com'], 'ViewerCertificate': { 'CloudFrontDefaultCertificate': true, }, }, - })); + }); - test.done(); - }, - }, - 'errors': { - 'throws if both deprecated aliasConfiguration and viewerCertificate'(test: Test) { + + }); + }); + describe('errors', () => { + test('throws if both deprecated aliasConfiguration and viewerCertificate', () => { const stack = new cdk.Stack(); const sourceBucket = new s3.Bucket(stack, 'Bucket'); - test.throws(() => { + expect(() => { new CloudFrontWebDistribution(stack, 'AnAmazingWebsiteProbably', { originConfigs: [{ s3OriginSource: { s3BucketSource: sourceBucket }, @@ -1158,15 +1158,15 @@ added the ellipsis so a user would know there was more to ...`, aliasConfiguration: { acmCertRef: 'test', names: ['ftp.example.com'] }, viewerCertificate: ViewerCertificate.fromCloudFrontDefaultCertificate('example.com', 'www.example.com'), }); - }, /You cannot set both aliasConfiguration and viewerCertificate properties/); + }).toThrow(/You cannot set both aliasConfiguration and viewerCertificate properties/); - test.done(); - }, - 'throws if invalid security policy for SSL method'(test: Test) { + + }); + test('throws if invalid security policy for SSL method', () => { const stack = new cdk.Stack(); const sourceBucket = new s3.Bucket(stack, 'Bucket'); - test.throws(() => { + expect(() => { new CloudFrontWebDistribution(stack, 'AnAmazingWebsiteProbably', { originConfigs: [{ s3OriginSource: { s3BucketSource: sourceBucket }, @@ -1177,12 +1177,12 @@ added the ellipsis so a user would know there was more to ...`, sslMethod: SSLMethod.VIP, }), }); - }, /TLSv1.1_2016 is not compabtible with sslMethod vip./); + }).toThrow(/TLSv1.1_2016 is not compabtible with sslMethod vip./); - test.done(); - }, + + }); // FIXME https://github.com/aws/aws-cdk/issues/4724 - 'does not throw if acmCertificate explicitly not in us-east-1'(test: Test) { + test('does not throw if acmCertificate explicitly not in us-east-1', () => { const stack = new cdk.Stack(); const sourceBucket = new s3.Bucket(stack, 'Bucket'); @@ -1198,7 +1198,7 @@ added the ellipsis so a user would know there was more to ...`, viewerCertificate: ViewerCertificate.fromAcmCertificate(certificate), }); - expect(stack).to(haveResourceLike('AWS::CloudFront::Distribution', { + expect(stack).toHaveResourceLike('AWS::CloudFront::Distribution', { 'DistributionConfig': { 'Aliases': [], 'ViewerCertificate': { @@ -1206,14 +1206,14 @@ added the ellipsis so a user would know there was more to ...`, 'SslSupportMethod': 'sni-only', }, }, - })); + }); - test.done(); - }, - }, - }, - 'edgelambda.amazonaws.com is added to the trust policy of lambda'(test: Test) { + }); + }); + }); + + test('edgelambda.amazonaws.com is added to the trust policy of lambda', () => { // GIVEN const stack = new cdk.Stack(); const sourceBucket = new s3.Bucket(stack, 'Bucket'); @@ -1245,7 +1245,7 @@ added the ellipsis so a user would know there was more to ...`, }); // THEN - expect(stack).to(haveResource('AWS::IAM::Role', { + expect(stack).toHaveResource('AWS::IAM::Role', { AssumeRolePolicyDocument: { 'Statement': [ { @@ -1265,11 +1265,11 @@ added the ellipsis so a user would know there was more to ...`, ], 'Version': '2012-10-17', }, - })); - test.done(); - }, + }); - 'edgelambda.amazonaws.com is not added to lambda role for imported functions'(test: Test) { + }); + + test('edgelambda.amazonaws.com is not added to lambda role for imported functions', () => { // GIVEN const stack = new cdk.Stack(); const sourceBucket = new s3.Bucket(stack, 'Bucket'); @@ -1295,13 +1295,13 @@ added the ellipsis so a user would know there was more to ...`, ], }); - expect(stack).notTo(haveResourceLike('AWS::IAM::Role')); - test.done(); - }, + expect(stack).not.toHaveResourceLike('AWS::IAM::Role'); + + }); - 'geo restriction': { - 'success': { - 'allowlist'(test: Test) { + describe('geo restriction', () => { + describe('success', () => { + test('allowlist', () => { const stack = new cdk.Stack(); const sourceBucket = new s3.Bucket(stack, 'Bucket'); @@ -1313,7 +1313,7 @@ added the ellipsis so a user would know there was more to ...`, geoRestriction: GeoRestriction.allowlist('US', 'UK'), }); - expect(stack).toMatch({ + expect(stack).toMatchTemplate({ 'Resources': { 'Bucket83908E77': { 'Type': 'AWS::S3::Bucket', @@ -1375,9 +1375,9 @@ added the ellipsis so a user would know there was more to ...`, }, }); - test.done(); - }, - 'denylist'(test: Test) { + + }); + test('denylist', () => { const stack = new cdk.Stack(); const sourceBucket = new s3.Bucket(stack, 'Bucket'); @@ -1389,7 +1389,7 @@ added the ellipsis so a user would know there was more to ...`, geoRestriction: GeoRestriction.denylist('US'), }); - expect(stack).toMatch({ + expect(stack).toMatchTemplate({ 'Resources': { 'Bucket83908E77': { 'Type': 'AWS::S3::Bucket', @@ -1451,40 +1451,40 @@ added the ellipsis so a user would know there was more to ...`, }, }); - test.done(); - }, - }, - 'error': { - 'throws if locations is empty array'(test: Test) { - test.throws(() => { + + }); + }); + describe('error', () => { + test('throws if locations is empty array', () => { + expect(() => { GeoRestriction.allowlist(); - }, /Should provide at least 1 location/); + }).toThrow(/Should provide at least 1 location/); - test.throws(() => { + expect(() => { GeoRestriction.denylist(); - }, /Should provide at least 1 location/); + }).toThrow(/Should provide at least 1 location/); - test.done(); - }, - 'throws if locations format is wrong'(test: Test) { - test.throws(() => { + + }); + test('throws if locations format is wrong', () => { + expect(() => { GeoRestriction.allowlist('us'); - }, /Invalid location format for location: us, location should be two-letter and uppercase country ISO 3166-1-alpha-2 code/); + }).toThrow(/Invalid location format for location: us, location should be two-letter and uppercase country ISO 3166-1-alpha-2 code/); - test.throws(() => { + expect(() => { GeoRestriction.denylist('us'); - }, /Invalid location format for location: us, location should be two-letter and uppercase country ISO 3166-1-alpha-2 code/); + }).toThrow(/Invalid location format for location: us, location should be two-letter and uppercase country ISO 3166-1-alpha-2 code/); - test.done(); - }, - }, - }, - 'Connection behaviors between CloudFront and your origin': { - 'success': { - 'connectionAttempts = 1'(test: Test) { + }); + }); + }); + + describe('Connection behaviors between CloudFront and your origin', () => { + describe('success', () => { + test('connectionAttempts = 1', () => { const stack = new cdk.Stack(); - test.doesNotThrow(() => { + expect(() => { new CloudFrontWebDistribution(stack, 'Distribution', { originConfigs: [{ behaviors: [{ isDefaultBehavior: true }], @@ -1492,12 +1492,12 @@ added the ellipsis so a user would know there was more to ...`, customOriginSource: { domainName: 'myorigin.com' }, }], }); - }, /connectionAttempts: You can specify 1, 2, or 3 as the number of attempts./); - test.done(); - }, - '3 = connectionAttempts'(test: Test) { + }).not.toThrow(/connectionAttempts: You can specify 1, 2, or 3 as the number of attempts./); + + }); + test('3 = connectionAttempts', () => { const stack = new cdk.Stack(); - test.doesNotThrow(() => { + expect(() => { new CloudFrontWebDistribution(stack, 'Distribution', { originConfigs: [{ behaviors: [{ isDefaultBehavior: true }], @@ -1505,12 +1505,12 @@ added the ellipsis so a user would know there was more to ...`, customOriginSource: { domainName: 'myorigin.com' }, }], }); - }, /connectionAttempts: You can specify 1, 2, or 3 as the number of attempts./); - test.done(); - }, - 'connectionTimeout = 1'(test: Test) { + }).not.toThrow(/connectionAttempts: You can specify 1, 2, or 3 as the number of attempts./); + + }); + test('connectionTimeout = 1', () => { const stack = new cdk.Stack(); - test.doesNotThrow(() => { + expect(() => { new CloudFrontWebDistribution(stack, 'Distribution', { originConfigs: [{ behaviors: [{ isDefaultBehavior: true }], @@ -1518,12 +1518,12 @@ added the ellipsis so a user would know there was more to ...`, customOriginSource: { domainName: 'myorigin.com' }, }], }); - }, /connectionTimeout: You can specify a number of seconds between 1 and 10 (inclusive)./); - test.done(); - }, - '10 = connectionTimeout'(test: Test) { + }).not.toThrow(/connectionTimeout: You can specify a number of seconds between 1 and 10 (inclusive)./); + + }); + test('10 = connectionTimeout', () => { const stack = new cdk.Stack(); - test.doesNotThrow(() => { + expect(() => { new CloudFrontWebDistribution(stack, 'Distribution', { originConfigs: [{ behaviors: [{ isDefaultBehavior: true }], @@ -1531,14 +1531,14 @@ added the ellipsis so a user would know there was more to ...`, customOriginSource: { domainName: 'myorigin.com' }, }], }); - }, /connectionTimeout: You can specify a number of seconds between 1 and 10 (inclusive)./); - test.done(); - }, - }, - 'errors': { - 'connectionAttempts = 1.1'(test: Test) { + }).not.toThrow(/connectionTimeout: You can specify a number of seconds between 1 and 10 (inclusive)./); + + }); + }); + describe('errors', () => { + test('connectionAttempts = 1.1', () => { const stack = new cdk.Stack(); - test.throws(() => { + expect(() => { new CloudFrontWebDistribution(stack, 'Distribution', { originConfigs: [{ behaviors: [{ isDefaultBehavior: true }], @@ -1546,12 +1546,12 @@ added the ellipsis so a user would know there was more to ...`, customOriginSource: { domainName: 'myorigin.com' }, }], }); - }, /connectionAttempts: You can specify 1, 2, or 3 as the number of attempts./); - test.done(); - }, - 'connectionAttempts = -1'(test: Test) { + }).toThrow(/connectionAttempts: You can specify 1, 2, or 3 as the number of attempts./); + + }); + test('connectionAttempts = -1', () => { const stack = new cdk.Stack(); - test.throws(() => { + expect(() => { new CloudFrontWebDistribution(stack, 'Distribution', { originConfigs: [{ behaviors: [{ isDefaultBehavior: true }], @@ -1559,12 +1559,12 @@ added the ellipsis so a user would know there was more to ...`, customOriginSource: { domainName: 'myorigin.com' }, }], }); - }, /connectionAttempts: You can specify 1, 2, or 3 as the number of attempts./); - test.done(); - }, - 'connectionAttempts < 1'(test: Test) { + }).toThrow(/connectionAttempts: You can specify 1, 2, or 3 as the number of attempts./); + + }); + test('connectionAttempts < 1', () => { const stack = new cdk.Stack(); - test.throws(() => { + expect(() => { new CloudFrontWebDistribution(stack, 'Distribution', { originConfigs: [{ behaviors: [{ isDefaultBehavior: true }], @@ -1572,12 +1572,12 @@ added the ellipsis so a user would know there was more to ...`, customOriginSource: { domainName: 'myorigin.com' }, }], }); - }, /connectionAttempts: You can specify 1, 2, or 3 as the number of attempts./); - test.done(); - }, - '3 < connectionAttempts'(test: Test) { + }).toThrow(/connectionAttempts: You can specify 1, 2, or 3 as the number of attempts./); + + }); + test('3 < connectionAttempts', () => { const stack = new cdk.Stack(); - test.throws(() => { + expect(() => { new CloudFrontWebDistribution(stack, 'Distribution', { originConfigs: [{ behaviors: [{ isDefaultBehavior: true }], @@ -1585,12 +1585,12 @@ added the ellipsis so a user would know there was more to ...`, customOriginSource: { domainName: 'myorigin.com' }, }], }); - }, /connectionAttempts: You can specify 1, 2, or 3 as the number of attempts./); - test.done(); - }, - 'connectionTimeout = 1.1'(test: Test) { + }).toThrow(/connectionAttempts: You can specify 1, 2, or 3 as the number of attempts./); + + }); + test('connectionTimeout = 1.1', () => { const stack = new cdk.Stack(); - test.throws(() => { + expect(() => { new CloudFrontWebDistribution(stack, 'Distribution', { originConfigs: [{ behaviors: [{ isDefaultBehavior: true }], @@ -1598,12 +1598,12 @@ added the ellipsis so a user would know there was more to ...`, customOriginSource: { domainName: 'myorigin.com' }, }], }); - }, /connectionTimeout: You can specify a number of seconds between 1 and 10 \(inclusive\)./); - test.done(); - }, - 'connectionTimeout < 1'(test: Test) { + }).toThrow(/connectionTimeout: You can specify a number of seconds between 1 and 10 \(inclusive\)./); + + }); + test('connectionTimeout < 1', () => { const stack = new cdk.Stack(); - test.throws(() => { + expect(() => { new CloudFrontWebDistribution(stack, 'Distribution', { originConfigs: [{ behaviors: [{ isDefaultBehavior: true }], @@ -1611,12 +1611,12 @@ added the ellipsis so a user would know there was more to ...`, customOriginSource: { domainName: 'myorigin.com' }, }], }); - }, /connectionTimeout: You can specify a number of seconds between 1 and 10 \(inclusive\)./); - test.done(); - }, - '10 < connectionTimeout'(test: Test) { + }).toThrow(/connectionTimeout: You can specify a number of seconds between 1 and 10 \(inclusive\)./); + + }); + test('10 < connectionTimeout', () => { const stack = new cdk.Stack(); - test.throws(() => { + expect(() => { new CloudFrontWebDistribution(stack, 'Distribution', { originConfigs: [{ behaviors: [{ isDefaultBehavior: true }], @@ -1624,22 +1624,22 @@ added the ellipsis so a user would know there was more to ...`, customOriginSource: { domainName: 'myorigin.com' }, }], }); - }, /connectionTimeout: You can specify a number of seconds between 1 and 10 \(inclusive\)./); - test.done(); - }, - }, - }, + }).toThrow(/connectionTimeout: You can specify a number of seconds between 1 and 10 \(inclusive\)./); - 'existing distributions can be imported'(test: Test) { + }); + }); + }); + + test('existing distributions can be imported', () => { const stack = new cdk.Stack(); const dist = CloudFrontWebDistribution.fromDistributionAttributes(stack, 'ImportedDist', { domainName: 'd111111abcdef8.cloudfront.net', distributionId: '012345ABCDEF', }); - test.equals(dist.distributionDomainName, 'd111111abcdef8.cloudfront.net'); - test.equals(dist.distributionId, '012345ABCDEF'); + expect(dist.distributionDomainName).toEqual('d111111abcdef8.cloudfront.net'); + expect(dist.distributionId).toEqual('012345ABCDEF'); + - test.done(); - }, + }); }); diff --git a/packages/@aws-cdk/aws-codepipeline/package.json b/packages/@aws-cdk/aws-codepipeline/package.json index 95b30ad58ae03..0f6ff83649093 100644 --- a/packages/@aws-cdk/aws-codepipeline/package.json +++ b/packages/@aws-cdk/aws-codepipeline/package.json @@ -83,9 +83,8 @@ "@types/nodeunit": "^0.0.32", "cdk-build-tools": "0.0.0", "cdk-integ-tools": "0.0.0", - "nodeunit-shim": "0.0.0", "cfn2ts": "0.0.0", - "nodeunit": "^0.11.3", + "jest": "^26.6.3", "pkglint": "0.0.0", "@aws-cdk/assert-internal": "0.0.0" }, diff --git a/packages/@aws-cdk/aws-codepipeline/test/action.test.ts b/packages/@aws-cdk/aws-codepipeline/test/action.test.ts index 8f3e81736d0b2..69a5839385fcd 100644 --- a/packages/@aws-cdk/aws-codepipeline/test/action.test.ts +++ b/packages/@aws-cdk/aws-codepipeline/test/action.test.ts @@ -1,7 +1,6 @@ -import { expect, haveResourceLike } from '@aws-cdk/assert-internal'; +import '@aws-cdk/assert-internal/jest'; import * as iam from '@aws-cdk/aws-iam'; import * as cdk from '@aws-cdk/core'; -import { nodeunitShim, Test } from 'nodeunit-shim'; import * as codepipeline from '../lib'; import * as validations from '../lib/private/validation'; import { FakeBuildAction } from './fake-build-action'; @@ -9,61 +8,61 @@ import { FakeSourceAction } from './fake-source-action'; /* eslint-disable quote-props */ -nodeunitShim({ - 'artifact bounds validation': { +describe('action', () => { + describe('artifact bounds validation', () => { - 'artifacts count exceed maximum'(test: Test) { + test('artifacts count exceed maximum', () => { const result = boundsValidationResult(1, 0, 0); - test.deepEqual(result.length, 1); - test.ok(result[0].match(/cannot have more than 0/), 'the validation should have failed'); - test.done(); - }, + expect(result.length).toEqual(1); + expect(result[0]).toMatch(/cannot have more than 0/); - 'artifacts count below minimum'(test: Test) { + }); + + test('artifacts count below minimum', () => { const result = boundsValidationResult(1, 2, 2); - test.deepEqual(result.length, 1); - test.ok(result[0].match(/must have at least 2/), 'the validation should have failed'); - test.done(); - }, + expect(result.length).toEqual(1); + expect(result[0]).toMatch(/must have at least 2/); - 'artifacts count within bounds'(test: Test) { + }); + + test('artifacts count within bounds', () => { const result = boundsValidationResult(1, 0, 2); - test.deepEqual(result.length, 0); - test.done(); - }, - }, + expect(result.length).toEqual(0); - 'action type validation': { + }); + }); - 'must be source and is source'(test: Test) { + describe('action type validation', () => { + + test('must be source and is source', () => { const result = validations.validateSourceAction(true, codepipeline.ActionCategory.SOURCE, 'test action', 'test stage'); - test.deepEqual(result.length, 0); - test.done(); - }, + expect(result.length).toEqual(0); + + }); - 'must be source and is not source'(test: Test) { + test('must be source and is not source', () => { const result = validations.validateSourceAction(true, codepipeline.ActionCategory.DEPLOY, 'test action', 'test stage'); - test.deepEqual(result.length, 1); - test.ok(result[0].match(/may only contain Source actions/), 'the validation should have failed'); - test.done(); - }, + expect(result.length).toEqual(1); + expect(result[0]).toMatch(/may only contain Source actions/); + + }); - 'cannot be source and is source'(test: Test) { + test('cannot be source and is source', () => { const result = validations.validateSourceAction(false, codepipeline.ActionCategory.SOURCE, 'test action', 'test stage'); - test.deepEqual(result.length, 1); - test.ok(result[0].match(/may only occur in first stage/), 'the validation should have failed'); - test.done(); - }, + expect(result.length).toEqual(1); + expect(result[0]).toMatch(/may only occur in first stage/); + + }); - 'cannot be source and is not source'(test: Test) { + test('cannot be source and is not source', () => { const result = validations.validateSourceAction(false, codepipeline.ActionCategory.DEPLOY, 'test action', 'test stage'); - test.deepEqual(result.length, 0); - test.done(); - }, - }, + expect(result.length).toEqual(0); + + }); + }); - 'action name validation': { - 'throws an exception when adding an Action with an empty name to the Pipeline'(test: Test) { + describe('action name validation', () => { + test('throws an exception when adding an Action with an empty name to the Pipeline', () => { const stack = new cdk.Stack(); const action = new FakeSourceAction({ actionName: '', @@ -72,16 +71,16 @@ nodeunitShim({ const pipeline = new codepipeline.Pipeline(stack, 'Pipeline'); const stage = pipeline.addStage({ stageName: 'Source' }); - test.throws(() => { + expect(() => { stage.addAction(action); - }, /Action name must match regular expression:/); + }).toThrow(/Action name must match regular expression:/); - test.done(); - }, - }, - 'action Artifacts validation': { - 'validates that input Artifacts are within bounds'(test: Test) { + }); + }); + + describe('action Artifacts validation', () => { + test('validates that input Artifacts are within bounds', () => { const stack = new cdk.Stack(); const sourceOutput = new codepipeline.Artifact(); const extraOutput1 = new codepipeline.Artifact('A1'); @@ -121,15 +120,15 @@ nodeunitShim({ ], }); - test.throws(() => { - expect(stack).to(haveResourceLike('AWS::CodePipeline::Pipeline', { - })); - }, /Build\/Fake cannot have more than 3 input artifacts/); + expect(() => { + expect(stack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { + }); + }).toThrow(/Build\/Fake cannot have more than 3 input artifacts/); + - test.done(); - }, + }); - 'validates that output Artifacts are within bounds'(test: Test) { + test('validates that output Artifacts are within bounds', () => { const stack = new cdk.Stack(); const sourceOutput = new codepipeline.Artifact(); const extraOutput1 = new codepipeline.Artifact('A1'); @@ -166,16 +165,16 @@ nodeunitShim({ ], }); - test.throws(() => { - expect(stack).to(haveResourceLike('AWS::CodePipeline::Pipeline', { - })); - }, /Source\/Fake cannot have more than 4 output artifacts/); + expect(() => { + expect(stack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { + }); + }).toThrow(/Source\/Fake cannot have more than 4 output artifacts/); - test.done(); - }, - }, - 'automatically assigns artifact names to the Actions'(test: Test) { + }); + }); + + test('automatically assigns artifact names to the Actions', () => { const stack = new cdk.Stack(); const pipeline = new codepipeline.Pipeline(stack, 'pipeline'); @@ -200,7 +199,7 @@ nodeunitShim({ ], }); - expect(stack).to(haveResourceLike('AWS::CodePipeline::Pipeline', { + expect(stack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { 'Stages': [ { 'Name': 'Source', @@ -234,12 +233,12 @@ nodeunitShim({ ], }, ], - })); + }); - test.done(); - }, - 'the same Action can be safely added to 2 different Stages'(test: Test) { + }); + + test('the same Action can be safely added to 2 different Stages', () => { const stack = new cdk.Stack(); const sourceOutput = new codepipeline.Artifact(); @@ -268,47 +267,47 @@ nodeunitShim({ }; pipeline.addStage(stage2); - test.doesNotThrow(() => { + expect(() => { pipeline.addStage(stage3); - }, /FakeAction/); + }).not.toThrow(/FakeAction/); + - test.done(); - }, + }); - 'input Artifacts': { - 'can be added multiple times to an Action safely'(test: Test) { + describe('input Artifacts', () => { + test('can be added multiple times to an Action safely', () => { const artifact = new codepipeline.Artifact('SomeArtifact'); - test.doesNotThrow(() => { + expect(() => { new FakeBuildAction({ actionName: 'CodeBuild', input: artifact, extraInputs: [artifact], }); - }); + }).not.toThrow(); + - test.done(); - }, + }); - 'can have duplicate names'(test: Test) { + test('can have duplicate names', () => { const artifact1 = new codepipeline.Artifact('SomeArtifact'); const artifact2 = new codepipeline.Artifact('SomeArtifact'); - test.doesNotThrow(() => { + expect(() => { new FakeBuildAction({ actionName: 'CodeBuild', input: artifact1, extraInputs: [artifact2], }); - }); + }).not.toThrow(); - test.done(); - }, - }, - 'output Artifacts': { - 'accept multiple Artifacts with the same name safely'(test: Test) { - test.doesNotThrow(() => { + }); + }); + + describe('output Artifacts', () => { + test('accept multiple Artifacts with the same name safely', () => { + expect(() => { new FakeSourceAction({ actionName: 'CodeBuild', output: new codepipeline.Artifact('Artifact1'), @@ -317,13 +316,13 @@ nodeunitShim({ new codepipeline.Artifact('Artifact1'), ], }); - }); + }).not.toThrow(); - test.done(); - }, - }, - 'an Action with a non-AWS owner cannot have a Role passed for it'(test: Test) { + }); + }); + + test('an Action with a non-AWS owner cannot have a Role passed for it', () => { const stack = new cdk.Stack(); const sourceOutput = new codepipeline.Artifact(); @@ -353,14 +352,14 @@ nodeunitShim({ }); // an attempt to add it to the Pipeline is where things blow up - test.throws(() => { + expect(() => { buildStage.addAction(buildAction); - }, /Role is not supported for actions with an owner different than 'AWS' - got 'ThirdParty' \(Action: 'build' in Stage: 'Build'\)/); + }).toThrow(/Role is not supported for actions with an owner different than 'AWS' - got 'ThirdParty' \(Action: 'build' in Stage: 'Build'\)/); - test.done(); - }, - 'actions can be retrieved from stages they have been added to'(test: Test) { + }); + + test('actions can be retrieved from stages they have been added to', () => { const stack = new cdk.Stack(); const sourceOutput = new codepipeline.Artifact(); @@ -394,15 +393,15 @@ nodeunitShim({ ], }); - test.equal(sourceStage.actions.length, 1); - test.equal(sourceStage.actions[0].actionProperties.actionName, 'source'); + expect(sourceStage.actions.length).toEqual(1); + expect(sourceStage.actions[0].actionProperties.actionName).toEqual('source'); + + expect(buildStage.actions.length).toEqual(2); + expect(buildStage.actions[0].actionProperties.actionName).toEqual('build1'); + expect(buildStage.actions[1].actionProperties.actionName).toEqual('build2'); - test.equal(buildStage.actions.length, 2); - test.equal(buildStage.actions[0].actionProperties.actionName, 'build1'); - test.equal(buildStage.actions[1].actionProperties.actionName, 'build2'); - test.done(); - }, + }); }); function boundsValidationResult(numberOfArtifacts: number, min: number, max: number): string[] { diff --git a/packages/@aws-cdk/aws-codepipeline/test/artifacts.test.ts b/packages/@aws-cdk/aws-codepipeline/test/artifacts.test.ts index 29cda05adedf8..5009f5fbdefd0 100644 --- a/packages/@aws-cdk/aws-codepipeline/test/artifacts.test.ts +++ b/packages/@aws-cdk/aws-codepipeline/test/artifacts.test.ts @@ -1,21 +1,20 @@ -import { expect, haveResourceLike } from '@aws-cdk/assert-internal'; +import '@aws-cdk/assert-internal/jest'; import * as cdk from '@aws-cdk/core'; -import { nodeunitShim, Test } from 'nodeunit-shim'; import * as codepipeline from '../lib'; import { FakeBuildAction } from './fake-build-action'; import { FakeSourceAction } from './fake-source-action'; /* eslint-disable quote-props */ -nodeunitShim({ - 'Artifacts in CodePipeline': { - 'cannot be created with an empty name'(test: Test) { - test.throws(() => new codepipeline.Artifact(''), /Artifact name must match regular expression/); +describe('artifacts', () => { + describe('Artifacts in CodePipeline', () => { + test('cannot be created with an empty name', () => { + expect(() => new codepipeline.Artifact('')).toThrow(/Artifact name must match regular expression/); - test.done(); - }, - 'without a name, when used as an input without being used as an output first - should fail validation'(test: Test) { + }); + + test('without a name, when used as an input without being used as an output first - should fail validation', () => { const stack = new cdk.Stack(); const sourceOutput = new codepipeline.Artifact(); const pipeline = new codepipeline.Pipeline(stack, 'Pipeline', { @@ -43,15 +42,15 @@ nodeunitShim({ const errors = validate(stack); - test.equal(errors.length, 1); + expect(errors.length).toEqual(1); const error = errors[0]; - test.same(error.source, pipeline); - test.equal(error.message, "Action 'Build' is using an unnamed input Artifact, which is not being produced in this pipeline"); + expect(error.source).toEqual(pipeline); + expect(error.message).toEqual("Action 'Build' is using an unnamed input Artifact, which is not being produced in this pipeline"); + - test.done(); - }, + }); - 'with a name, when used as an input without being used as an output first - should fail validation'(test: Test) { + test('with a name, when used as an input without being used as an output first - should fail validation', () => { const stack = new cdk.Stack(); const sourceOutput = new codepipeline.Artifact(); const pipeline = new codepipeline.Pipeline(stack, 'Pipeline', { @@ -79,15 +78,15 @@ nodeunitShim({ const errors = validate(stack); - test.equal(errors.length, 1); + expect(errors.length).toEqual(1); const error = errors[0]; - test.same(error.source, pipeline); - test.equal(error.message, "Action 'Build' is using input Artifact 'named', which is not being produced in this pipeline"); + expect(error.source).toEqual(pipeline); + expect(error.message).toEqual("Action 'Build' is using input Artifact 'named', which is not being produced in this pipeline"); - test.done(); - }, - 'without a name, when used as an output multiple times - should fail validation'(test: Test) { + }); + + test('without a name, when used as an output multiple times - should fail validation', () => { const stack = new cdk.Stack(); const sourceOutput = new codepipeline.Artifact(); const pipeline = new codepipeline.Pipeline(stack, 'Pipeline', { @@ -116,15 +115,15 @@ nodeunitShim({ const errors = validate(stack); - test.equal(errors.length, 1); + expect(errors.length).toEqual(1); const error = errors[0]; - test.same(error.source, pipeline); - test.equal(error.message, "Both Actions 'Source' and 'Build' are producting Artifact 'Artifact_Source_Source'. Every artifact can only be produced once."); + expect(error.source).toEqual(pipeline); + expect(error.message).toEqual("Both Actions 'Source' and 'Build' are producting Artifact 'Artifact_Source_Source'. Every artifact can only be produced once."); + - test.done(); - }, + }); - "an Action's output can be used as input for an Action in the same Stage with a higher runOrder"(test: Test) { + test("an Action's output can be used as input for an Action in the same Stage with a higher runOrder", () => { const stack = new cdk.Stack(); const sourceOutput1 = new codepipeline.Artifact('sourceOutput1'); @@ -166,14 +165,12 @@ nodeunitShim({ ], }); - expect(stack).to(haveResourceLike('AWS::CodePipeline::Pipeline', { - // - })); + expect(stack).toHaveResourceLike('AWS::CodePipeline::Pipeline'); - test.done(); - }, - 'violation of runOrder constraints is detected and reported'(test: Test) { + }); + + test('violation of runOrder constraints is detected and reported', () => { const stack = new cdk.Stack(); const sourceOutput1 = new codepipeline.Artifact('sourceOutput1'); @@ -218,15 +215,15 @@ nodeunitShim({ const errors = validate(stack); - test.equal(errors.length, 1); + expect(errors.length).toEqual(1); const error = errors[0]; - test.same(error.source, pipeline); - test.equal(error.message, "Stage 2 Action 2 ('Build'/'build2') is consuming input Artifact 'buildOutput1' before it is being produced at Stage 2 Action 3 ('Build'/'build1')"); + expect(error.source).toEqual(pipeline); + expect(error.message).toEqual("Stage 2 Action 2 ('Build'/'build2') is consuming input Artifact 'buildOutput1' before it is being produced at Stage 2 Action 3 ('Build'/'build1')"); + - test.done(); - }, + }); - 'without a name, sanitize the auto stage-action derived name'(test: Test) { + test('without a name, sanitize the auto stage-action derived name', () => { const stack = new cdk.Stack(); const sourceOutput = new codepipeline.Artifact(); @@ -253,7 +250,7 @@ nodeunitShim({ ], }); - expect(stack).to(haveResourceLike('AWS::CodePipeline::Pipeline', { + expect(stack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { 'Stages': [ { 'Name': 'Source.@', @@ -278,11 +275,11 @@ nodeunitShim({ ], }, ], - })); + }); + - test.done(); - }, - }, + }); + }); }); /* eslint-disable cdk/no-core-construct */ diff --git a/packages/@aws-cdk/aws-codepipeline/test/general-validation.test.ts b/packages/@aws-cdk/aws-codepipeline/test/general-validation.test.ts index 76cd9b71a6941..d8186532d7cea 100644 --- a/packages/@aws-cdk/aws-codepipeline/test/general-validation.test.ts +++ b/packages/@aws-cdk/aws-codepipeline/test/general-validation.test.ts @@ -1,5 +1,5 @@ +import '@aws-cdk/assert-internal/jest'; import * as cdk from '@aws-cdk/core'; -import { nodeunitShim, Test } from 'nodeunit-shim'; import { IStage } from '../lib/action'; import { Artifact } from '../lib/artifact'; import { Pipeline } from '../lib/pipeline'; @@ -12,8 +12,8 @@ interface NameValidationTestCase { explanation: string; } -nodeunitShim({ - 'name validation'(test: Test) { +describe('general validation', () => { + test('name validation', () => { const cases: NameValidationTestCase[] = [ { name: 'BlahBleep123.@-_', shouldPassValidation: true, explanation: 'should be valid' }, { name: '', shouldPassValidation: false, explanation: 'the empty string should be invalid' }, @@ -25,36 +25,36 @@ nodeunitShim({ const name = testCase.name; const validationBlock = () => { validateName('test thing', name); }; if (testCase.shouldPassValidation) { - test.doesNotThrow(validationBlock, Error, `${name} failed validation but ${testCase.explanation}`); + expect(validationBlock).not.toThrow(); } else { - test.throws(validationBlock, Error, `${name} passed validation but ${testCase.explanation}`); + expect(validationBlock).toThrow(); } }); - test.done(); - }, - 'Stage validation': { - 'should fail if Stage has no Actions'(test: Test) { + }); + + describe('Stage validation', () => { + test('should fail if Stage has no Actions', () => { const stage = stageForTesting(); - test.deepEqual((stage as any).validate().length, 1); + expect((stage as any).validate().length).toEqual(1); + - test.done(); - }, - }, + }); + }); - 'Pipeline validation': { - 'should fail if Pipeline has no Stages'(test: Test) { + describe('Pipeline validation', () => { + test('should fail if Pipeline has no Stages', () => { const stack = new cdk.Stack(); const pipeline = new Pipeline(stack, 'Pipeline'); - test.deepEqual(cdk.ConstructNode.validate(pipeline.node).length, 1); + expect(cdk.ConstructNode.validate(pipeline.node).length).toEqual(1); - test.done(); - }, - 'should fail if Pipeline has a Source Action in a non-first Stage'(test: Test) { + }); + + test('should fail if Pipeline has a Source Action in a non-first Stage', () => { const stack = new cdk.Stack(); const pipeline = new Pipeline(stack, 'Pipeline'); @@ -68,11 +68,11 @@ nodeunitShim({ ], }); - test.deepEqual(cdk.ConstructNode.validate(pipeline.node).length, 1); + expect(cdk.ConstructNode.validate(pipeline.node).length).toEqual(1); + - test.done(); - }, - }, + }); + }); }); function stageForTesting(): IStage { diff --git a/packages/@aws-cdk/aws-codepipeline/test/stages.test.ts b/packages/@aws-cdk/aws-codepipeline/test/stages.test.ts index 3cbb290187436..d7931a23a531e 100644 --- a/packages/@aws-cdk/aws-codepipeline/test/stages.test.ts +++ b/packages/@aws-cdk/aws-codepipeline/test/stages.test.ts @@ -1,101 +1,130 @@ -import { expect, haveResourceLike } from '@aws-cdk/assert-internal'; +import '@aws-cdk/assert-internal/jest'; import * as cdk from '@aws-cdk/core'; -import { nodeunitShim, Test } from 'nodeunit-shim'; import * as codepipeline from '../lib'; import { Stage } from '../lib/private/stage'; +import { FakeBuildAction } from './fake-build-action'; +import { FakeSourceAction } from './fake-source-action'; /* eslint-disable quote-props */ -nodeunitShim({ - 'Pipeline Stages': { - 'can be inserted before another Stage'(test: Test) { +describe('stages', () => { + describe('Pipeline Stages', () => { + test('can be inserted before another Stage', () => { const stack = new cdk.Stack(); const pipeline = new codepipeline.Pipeline(stack, 'Pipeline'); const secondStage = pipeline.addStage({ stageName: 'SecondStage' }); - pipeline.addStage({ + const firstStage = pipeline.addStage({ stageName: 'FirstStage', placement: { rightBefore: secondStage, }, }); - expect(stack, true).to(haveResourceLike('AWS::CodePipeline::Pipeline', { + // -- dummy actions here are needed to satisfy validation rules + const sourceArtifact = new codepipeline.Artifact(); + firstStage.addAction(new FakeSourceAction({ + actionName: 'dummyAction', + output: sourceArtifact, + })); + secondStage.addAction(new FakeBuildAction({ + actionName: 'dummyAction', + input: sourceArtifact, + })); + // -- + + expect(stack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { 'Stages': [ { 'Name': 'FirstStage' }, { 'Name': 'SecondStage' }, ], - })); + }); - test.done(); - }, - 'can be inserted after another Stage'(test: Test) { + }); + + test('can be inserted after another Stage', () => { const stack = new cdk.Stack(); const pipeline = new codepipeline.Pipeline(stack, 'Pipeline'); const firstStage = pipeline.addStage({ stageName: 'FirstStage' }); - pipeline.addStage({ stageName: 'ThirdStage' }); - pipeline.addStage({ + const thirdStage = pipeline.addStage({ stageName: 'ThirdStage' }); + const secondStage = pipeline.addStage({ stageName: 'SecondStage', placement: { justAfter: firstStage, }, }); - expect(stack, true).to(haveResourceLike('AWS::CodePipeline::Pipeline', { + // -- dummy actions here are needed to satisfy validation rules + const sourceArtifact = new codepipeline.Artifact(); + firstStage.addAction(new FakeSourceAction({ + actionName: 'dummyAction', + output: sourceArtifact, + })); + secondStage.addAction(new FakeBuildAction({ + actionName: 'dummyAction', + input: sourceArtifact, + })); + thirdStage.addAction(new FakeBuildAction({ + actionName: 'dummyAction', + input: sourceArtifact, + })); + // -- + + expect(stack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { 'Stages': [ { 'Name': 'FirstStage' }, { 'Name': 'SecondStage' }, { 'Name': 'ThirdStage' }, ], - })); + }); + - test.done(); - }, + }); - "attempting to insert a Stage before a Stage that doesn't exist results in an error"(test: Test) { + test("attempting to insert a Stage before a Stage that doesn't exist results in an error", () => { const stack = new cdk.Stack(); const pipeline = new codepipeline.Pipeline(stack, 'Pipeline'); const stage = pipeline.addStage({ stageName: 'Stage' }); const anotherPipeline = new codepipeline.Pipeline(stack, 'AnotherPipeline'); - test.throws(() => { + expect(() => { anotherPipeline.addStage({ stageName: 'AnotherStage', placement: { rightBefore: stage, }, }); - }, /before/i); + }).toThrow(/before/i); - test.done(); - }, - "attempting to insert a Stage after a Stage that doesn't exist results in an error"(test: Test) { + }); + + test("attempting to insert a Stage after a Stage that doesn't exist results in an error", () => { const stack = new cdk.Stack(); const pipeline = new codepipeline.Pipeline(stack, 'Pipeline'); const stage = pipeline.addStage({ stageName: 'Stage' }); const anotherPipeline = new codepipeline.Pipeline(stack, 'AnotherPipeline'); - test.throws(() => { + expect(() => { anotherPipeline.addStage({ stageName: 'AnotherStage', placement: { justAfter: stage, }, }); - }, /after/i); + }).toThrow(/after/i); + - test.done(); - }, + }); - 'providing more than one placement value results in an error'(test: Test) { + test('providing more than one placement value results in an error', () => { const stack = new cdk.Stack(); const pipeline = new codepipeline.Pipeline(stack, 'Pipeline'); const stage = pipeline.addStage({ stageName: 'Stage' }); - test.throws(() => { + expect(() => { pipeline.addStage({ stageName: 'SecondStage', placement: { @@ -105,12 +134,12 @@ nodeunitShim({ }); // incredibly, an arrow function below causes nodeunit to crap out with: // "TypeError: Function has non-object prototype 'undefined' in instanceof check" - }, /(rightBefore.*justAfter)|(justAfter.*rightBefore)/); + }).toThrow(/(rightBefore.*justAfter)|(justAfter.*rightBefore)/); - test.done(); - }, - 'can be retrieved from a pipeline after it has been created'(test: Test) { + }); + + test('can be retrieved from a pipeline after it has been created', () => { const stack = new cdk.Stack(); const pipeline = new codepipeline.Pipeline(stack, 'Pipeline', { stages: [ @@ -122,17 +151,17 @@ nodeunitShim({ pipeline.addStage({ stageName: 'SecondStage' }); - test.equal(pipeline.stages.length, 2); - test.equal(pipeline.stages[0].stageName, 'FirstStage'); - test.equal(pipeline.stages[1].stageName, 'SecondStage'); + expect(pipeline.stages.length).toEqual(2); + expect(pipeline.stages[0].stageName).toEqual('FirstStage'); + expect(pipeline.stages[1].stageName).toEqual('SecondStage'); // adding stages to the returned array should have no effect pipeline.stages.push(new Stage({ stageName: 'ThirdStage', }, pipeline)); - test.equal(pipeline.stageCount, 2); + expect(pipeline.stageCount).toEqual(2); + - test.done(); - }, - }, + }); + }); }); diff --git a/packages/@aws-cdk/aws-codepipeline/test/variables.test.ts b/packages/@aws-cdk/aws-codepipeline/test/variables.test.ts index 265ddd09b0843..537b108df805a 100644 --- a/packages/@aws-cdk/aws-codepipeline/test/variables.test.ts +++ b/packages/@aws-cdk/aws-codepipeline/test/variables.test.ts @@ -1,30 +1,44 @@ -import { expect, haveResourceLike } from '@aws-cdk/assert-internal'; +import '@aws-cdk/assert-internal/jest'; +import { arrayWith, objectLike } from '@aws-cdk/assert-internal'; import * as cdk from '@aws-cdk/core'; -import { nodeunitShim, Test } from 'nodeunit-shim'; import * as codepipeline from '../lib'; import { FakeBuildAction } from './fake-build-action'; import { FakeSourceAction } from './fake-source-action'; /* eslint-disable quote-props */ -nodeunitShim({ - 'Pipeline Variables': { - 'uses the passed namespace when its passed when constructing the Action'(test: Test) { +describe('variables', () => { + describe('Pipeline Variables', () => { + test('uses the passed namespace when its passed when constructing the Action', () => { const stack = new cdk.Stack(); - new codepipeline.Pipeline(stack, 'Pipeline', { + const sourceArtifact = new codepipeline.Artifact(); + const pipeline = new codepipeline.Pipeline(stack, 'Pipeline', { stages: [ { stageName: 'Source', actions: [new FakeSourceAction({ actionName: 'Source', - output: new codepipeline.Artifact(), + output: sourceArtifact, variablesNamespace: 'MyNamespace', })], }, ], }); - expect(stack, true).to(haveResourceLike('AWS::CodePipeline::Pipeline', { + // -- stages and actions here are needed to satisfy validation rules + const first = pipeline.addStage({ stageName: 'FirstStage' }); + first.addAction(new FakeBuildAction({ + actionName: 'dummyAction', + input: sourceArtifact, + })); + const second = pipeline.addStage({ stageName: 'SecondStage' }); + second.addAction(new FakeBuildAction({ + actionName: 'dummyAction', + input: sourceArtifact, + })); + // -- + + expect(stack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { 'Stages': [ { 'Name': 'Source', @@ -36,12 +50,12 @@ nodeunitShim({ ], }, ], - })); + }); + - test.done(); - }, + }); - 'allows using the variable in the configuration of a different action'(test: Test) { + test('allows using the variable in the configuration of a different action', () => { const stack = new cdk.Stack(); const sourceOutput = new codepipeline.Artifact(); const fakeSourceAction = new FakeSourceAction({ @@ -66,7 +80,7 @@ nodeunitShim({ ], }); - expect(stack).to(haveResourceLike('AWS::CodePipeline::Pipeline', { + expect(stack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { 'Stages': [ { 'Name': 'Source', @@ -83,12 +97,12 @@ nodeunitShim({ ], }, ], - })); + }); + - test.done(); - }, + }); - 'fails when trying add an action using variables with an empty string for the namespace to a pipeline'(test: Test) { + test('fails when trying add an action using variables with an empty string for the namespace to a pipeline', () => { const stack = new cdk.Stack(); const pipeline = new codepipeline.Pipeline(stack, 'Pipeline'); const sourceStage = pipeline.addStage({ stageName: 'Source' }); @@ -99,46 +113,54 @@ nodeunitShim({ variablesNamespace: '', }); - test.throws(() => { + expect(() => { sourceStage.addAction(sourceAction); - }, /Namespace name must match regular expression:/); + }).toThrow(/Namespace name must match regular expression:/); - test.done(); - }, - 'can use global variables'(test: Test) { + }); + + test('can use global variables', () => { const stack = new cdk.Stack(); + const sourceArtifact = new codepipeline.Artifact(); new codepipeline.Pipeline(stack, 'Pipeline', { stages: [ { stageName: 'Source', + actions: [new FakeSourceAction({ + actionName: 'Source', + output: sourceArtifact, + })], + }, + { + stageName: 'Build', actions: [new FakeBuildAction({ actionName: 'Build', - input: new codepipeline.Artifact(), + input: sourceArtifact, customConfigKey: codepipeline.GlobalVariables.executionId, })], }, ], }); - expect(stack, true).to(haveResourceLike('AWS::CodePipeline::Pipeline', { - 'Stages': [ + expect(stack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { + 'Stages': arrayWith( { - 'Name': 'Source', + 'Name': 'Build', 'Actions': [ - { + objectLike({ 'Name': 'Build', 'Configuration': { 'CustomConfigKey': '#{codepipeline.PipelineExecutionId}', }, - }, + }), ], }, - ], - })); + ), + }); + - test.done(); - }, - }, + }); + }); }); diff --git a/packages/@aws-cdk/aws-ec2/package.json b/packages/@aws-cdk/aws-ec2/package.json index d7c8591357c7e..6bb38a38ece94 100644 --- a/packages/@aws-cdk/aws-ec2/package.json +++ b/packages/@aws-cdk/aws-ec2/package.json @@ -79,7 +79,7 @@ "cdk-build-tools": "0.0.0", "cdk-integ-tools": "0.0.0", "cfn2ts": "0.0.0", - "nodeunit-shim": "0.0.0", + "jest": "^26.6.3", "pkglint": "0.0.0", "@aws-cdk/cloud-assembly-schema": "0.0.0", "@aws-cdk/assert-internal": "0.0.0" diff --git a/packages/@aws-cdk/aws-ec2/test/bastion-host.test.ts b/packages/@aws-cdk/aws-ec2/test/bastion-host.test.ts index 2bacb5e918920..42e2dd8cd7e0a 100644 --- a/packages/@aws-cdk/aws-ec2/test/bastion-host.test.ts +++ b/packages/@aws-cdk/aws-ec2/test/bastion-host.test.ts @@ -1,10 +1,9 @@ -import { expect, haveResource } from '@aws-cdk/assert-internal'; +import '@aws-cdk/assert-internal/jest'; import { Stack } from '@aws-cdk/core'; -import { nodeunitShim, Test } from 'nodeunit-shim'; import { BastionHostLinux, BlockDeviceVolume, InstanceClass, InstanceSize, InstanceType, SubnetType, Vpc } from '../lib'; -nodeunitShim({ - 'default instance is created in basic'(test: Test) { +describe('bastion host', () => { + test('default instance is created in basic', () => { // GIVEN const stack = new Stack(); const vpc = new Vpc(stack, 'VPC'); @@ -15,14 +14,14 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::EC2::Instance', { + expect(stack).toHaveResource('AWS::EC2::Instance', { InstanceType: 't3.nano', SubnetId: { Ref: 'VPCPrivateSubnet1Subnet8BCA10E0' }, - })); + }); + - test.done(); - }, - 'default instance is created in isolated vpc'(test: Test) { + }); + test('default instance is created in isolated vpc', () => { // GIVEN const stack = new Stack(); const vpc = new Vpc(stack, 'VPC', { @@ -40,14 +39,14 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::EC2::Instance', { + expect(stack).toHaveResource('AWS::EC2::Instance', { InstanceType: 't3.nano', SubnetId: { Ref: 'VPCIsolatedSubnet1SubnetEBD00FC6' }, - })); + }); - test.done(); - }, - 'ebs volume is encrypted'(test: Test) { + + }); + test('ebs volume is encrypted', () => { // GIVEN const stack = new Stack(); const vpc = new Vpc(stack, 'VPC', { @@ -71,7 +70,7 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::EC2::Instance', { + expect(stack).toHaveResource('AWS::EC2::Instance', { BlockDeviceMappings: [ { DeviceName: 'EBSBastionHost', @@ -81,11 +80,11 @@ nodeunitShim({ }, }, ], - })); + }); - test.done(); - }, - 'x86-64 instances use x86-64 image by default'(test: Test) { + + }); + test('x86-64 instances use x86-64 image by default', () => { // GIVEN const stack = new Stack(); const vpc = new Vpc(stack, 'VPC'); @@ -96,15 +95,15 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::EC2::Instance', { + expect(stack).toHaveResource('AWS::EC2::Instance', { ImageId: { Ref: 'SsmParameterValueawsserviceamiamazonlinuxlatestamzn2amihvmx8664gp2C96584B6F00A464EAD1953AFF4B05118Parameter', }, - })); + }); + - test.done(); - }, - 'arm instances use arm image by default'(test: Test) { + }); + test('arm instances use arm image by default', () => { // GIVEN const stack = new Stack(); const vpc = new Vpc(stack, 'VPC'); @@ -116,12 +115,12 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::EC2::Instance', { + expect(stack).toHaveResource('AWS::EC2::Instance', { ImageId: { Ref: 'SsmParameterValueawsserviceamiamazonlinuxlatestamzn2amihvmarm64gp2C96584B6F00A464EAD1953AFF4B05118Parameter', }, - })); + }); + - test.done(); - }, + }); }); diff --git a/packages/@aws-cdk/aws-ec2/test/connections.test.ts b/packages/@aws-cdk/aws-ec2/test/connections.test.ts index 2adc0ce242db8..e5c48f2740f57 100644 --- a/packages/@aws-cdk/aws-ec2/test/connections.test.ts +++ b/packages/@aws-cdk/aws-ec2/test/connections.test.ts @@ -1,6 +1,5 @@ -import { expect, haveResource } from '@aws-cdk/assert-internal'; +import '@aws-cdk/assert-internal/jest'; import { App, Stack } from '@aws-cdk/core'; -import { nodeunitShim, Test } from 'nodeunit-shim'; import { Connections, @@ -10,8 +9,8 @@ import { Vpc, } from '../lib'; -nodeunitShim({ - 'peering between two security groups does not recursive infinitely'(test: Test) { +describe('connections', () => { + test('peering between two security groups does not recursive infinitely', () => { // GIVEN const stack = new Stack(undefined, 'TestStack', { env: { account: '12345678', region: 'dummy' } }); @@ -26,10 +25,10 @@ nodeunitShim({ conn1.connections.allowTo(conn2, Port.tcp(80), 'Test'); // THEN -- it finishes! - test.done(); - }, - '(imported) SecurityGroup can be used as target of .allowTo()'(test: Test) { + }); + + test('(imported) SecurityGroup can be used as target of .allowTo()', () => { // GIVEN const stack = new Stack(); const vpc = new Vpc(stack, 'VPC'); @@ -42,29 +41,29 @@ nodeunitShim({ somethingConnectable.connections.allowTo(securityGroup, Port.allTcp(), 'Connect there'); // THEN: rule to generated security group to connect to imported - expect(stack).to(haveResource('AWS::EC2::SecurityGroupEgress', { + expect(stack).toHaveResource('AWS::EC2::SecurityGroupEgress', { GroupId: { 'Fn::GetAtt': ['SomeSecurityGroupEF219AD6', 'GroupId'] }, IpProtocol: 'tcp', Description: 'Connect there', DestinationSecurityGroupId: 'sg-12345', FromPort: 0, ToPort: 65535, - })); + }); // THEN: rule to imported security group to allow connections from generated - expect(stack).to(haveResource('AWS::EC2::SecurityGroupIngress', { + expect(stack).toHaveResource('AWS::EC2::SecurityGroupIngress', { IpProtocol: 'tcp', Description: 'Connect there', FromPort: 0, GroupId: 'sg-12345', SourceSecurityGroupId: { 'Fn::GetAtt': ['SomeSecurityGroupEF219AD6', 'GroupId'] }, ToPort: 65535, - })); + }); + - test.done(); - }, + }); - 'security groups added to connections after rule still gets rule'(test: Test) { + test('security groups added to connections after rule still gets rule', () => { // GIVEN const stack = new Stack(); const vpc = new Vpc(stack, 'VPC'); @@ -77,7 +76,7 @@ nodeunitShim({ connections.addSecurityGroup(sg2); // THEN - expect(stack).to(haveResource('AWS::EC2::SecurityGroup', { + expect(stack).toHaveResource('AWS::EC2::SecurityGroup', { GroupDescription: 'Default/SecurityGroup1', SecurityGroupIngress: [ { @@ -88,9 +87,9 @@ nodeunitShim({ IpProtocol: 'tcp', }, ], - })); + }); - expect(stack).to(haveResource('AWS::EC2::SecurityGroup', { + expect(stack).toHaveResource('AWS::EC2::SecurityGroup', { GroupDescription: 'Default/SecurityGroup2', SecurityGroupIngress: [ { @@ -101,12 +100,12 @@ nodeunitShim({ IpProtocol: 'tcp', }, ], - })); + }); + - test.done(); - }, + }); - 'when security groups are added to target they also get the rule'(test: Test) { + test('when security groups are added to target they also get the rule', () => { // GIVEN const stack = new Stack(); const vpc = new Vpc(stack, 'VPC'); @@ -122,24 +121,24 @@ nodeunitShim({ connections2.addSecurityGroup(sg3); // THEN - expect(stack).to(haveResource('AWS::EC2::SecurityGroupIngress', { + expect(stack).toHaveResource('AWS::EC2::SecurityGroupIngress', { GroupId: { 'Fn::GetAtt': ['SecurityGroup23BE86BB7', 'GroupId'] }, SourceSecurityGroupId: { 'Fn::GetAtt': ['SecurityGroup1F554B36F', 'GroupId'] }, FromPort: 88, ToPort: 88, - })); + }); - expect(stack).to(haveResource('AWS::EC2::SecurityGroupIngress', { + expect(stack).toHaveResource('AWS::EC2::SecurityGroupIngress', { GroupId: { 'Fn::GetAtt': ['SecurityGroup3E5E374B9', 'GroupId'] }, SourceSecurityGroupId: { 'Fn::GetAtt': ['SecurityGroup1F554B36F', 'GroupId'] }, FromPort: 88, ToPort: 88, - })); + }); - test.done(); - }, - 'multiple security groups allows internally between them'(test: Test) { + }); + + test('multiple security groups allows internally between them', () => { // GIVEN const stack = new Stack(); const vpc = new Vpc(stack, 'VPC'); @@ -152,24 +151,24 @@ nodeunitShim({ connections.addSecurityGroup(sg2); // THEN - expect(stack).to(haveResource('AWS::EC2::SecurityGroupIngress', { + expect(stack).toHaveResource('AWS::EC2::SecurityGroupIngress', { GroupId: { 'Fn::GetAtt': ['SecurityGroup1F554B36F', 'GroupId'] }, SourceSecurityGroupId: { 'Fn::GetAtt': ['SecurityGroup1F554B36F', 'GroupId'] }, FromPort: 88, ToPort: 88, - })); + }); - expect(stack).to(haveResource('AWS::EC2::SecurityGroupEgress', { + expect(stack).toHaveResource('AWS::EC2::SecurityGroupEgress', { DestinationSecurityGroupId: { 'Fn::GetAtt': ['SecurityGroup1F554B36F', 'GroupId'] }, GroupId: { 'Fn::GetAtt': ['SecurityGroup1F554B36F', 'GroupId'] }, FromPort: 88, ToPort: 88, - })); + }); + - test.done(); - }, + }); - 'can establish cross stack Security Group connections - allowFrom'(test: Test) { + test('can establish cross stack Security Group connections - allowFrom', () => { // GIVEN const app = new App(); @@ -187,20 +186,20 @@ nodeunitShim({ // THEN -- both rules are in Stack2 app.synth(); - expect(stack2).to(haveResource('AWS::EC2::SecurityGroupIngress', { + expect(stack2).toHaveResource('AWS::EC2::SecurityGroupIngress', { GroupId: { 'Fn::GetAtt': ['SecurityGroupDD263621', 'GroupId'] }, SourceSecurityGroupId: { 'Fn::ImportValue': 'Stack1:ExportsOutputFnGetAttSecurityGroupDD263621GroupIdDF6F8B09' }, - })); + }); - expect(stack2).to(haveResource('AWS::EC2::SecurityGroupEgress', { + expect(stack2).toHaveResource('AWS::EC2::SecurityGroupEgress', { GroupId: { 'Fn::ImportValue': 'Stack1:ExportsOutputFnGetAttSecurityGroupDD263621GroupIdDF6F8B09' }, DestinationSecurityGroupId: { 'Fn::GetAtt': ['SecurityGroupDD263621', 'GroupId'] }, - })); + }); + - test.done(); - }, + }); - 'can establish cross stack Security Group connections - allowTo'(test: Test) { + test('can establish cross stack Security Group connections - allowTo', () => { // GIVEN const app = new App(); @@ -218,20 +217,20 @@ nodeunitShim({ // THEN -- both rules are in Stack2 app.synth(); - expect(stack2).to(haveResource('AWS::EC2::SecurityGroupIngress', { + expect(stack2).toHaveResource('AWS::EC2::SecurityGroupIngress', { GroupId: { 'Fn::ImportValue': 'Stack1:ExportsOutputFnGetAttSecurityGroupDD263621GroupIdDF6F8B09' }, SourceSecurityGroupId: { 'Fn::GetAtt': ['SecurityGroupDD263621', 'GroupId'] }, - })); + }); - expect(stack2).to(haveResource('AWS::EC2::SecurityGroupEgress', { + expect(stack2).toHaveResource('AWS::EC2::SecurityGroupEgress', { GroupId: { 'Fn::GetAtt': ['SecurityGroupDD263621', 'GroupId'] }, DestinationSecurityGroupId: { 'Fn::ImportValue': 'Stack1:ExportsOutputFnGetAttSecurityGroupDD263621GroupIdDF6F8B09' }, - })); + }); - test.done(); - }, - 'can establish multiple cross-stack SGs'(test: Test) { + }); + + test('can establish multiple cross-stack SGs', () => { // GIVEN const app = new App(); @@ -251,19 +250,19 @@ nodeunitShim({ // THEN -- both egress rules are in Stack2 app.synth(); - expect(stack2).to(haveResource('AWS::EC2::SecurityGroupEgress', { + expect(stack2).toHaveResource('AWS::EC2::SecurityGroupEgress', { GroupId: { 'Fn::ImportValue': 'Stack1:ExportsOutputFnGetAttSecurityGroupAED40ADC5GroupId1D10C76A' }, DestinationSecurityGroupId: { 'Fn::GetAtt': ['SecurityGroupDD263621', 'GroupId'] }, - })); + }); - expect(stack2).to(haveResource('AWS::EC2::SecurityGroupEgress', { + expect(stack2).toHaveResource('AWS::EC2::SecurityGroupEgress', { GroupId: { 'Fn::ImportValue': 'Stack1:ExportsOutputFnGetAttSecurityGroupB04591F90GroupIdFA7208D5' }, DestinationSecurityGroupId: { 'Fn::GetAtt': ['SecurityGroupDD263621', 'GroupId'] }, - })); + }); - test.done(); - }, - 'Imported SecurityGroup does not create egress rule'(test: Test) { + + }); + test('Imported SecurityGroup does not create egress rule', () => { // GIVEN const stack = new Stack(); const vpc = new Vpc(stack, 'VPC'); @@ -276,21 +275,21 @@ nodeunitShim({ somethingConnectable.connections.allowFrom(securityGroup, Port.allTcp(), 'Connect there'); // THEN: rule to generated security group to connect to imported - expect(stack).to(haveResource('AWS::EC2::SecurityGroupIngress', { + expect(stack).toHaveResource('AWS::EC2::SecurityGroupIngress', { GroupId: { 'Fn::GetAtt': ['SomeSecurityGroupEF219AD6', 'GroupId'] }, IpProtocol: 'tcp', Description: 'Connect there', SourceSecurityGroupId: 'sg-12345', FromPort: 0, ToPort: 65535, - })); + }); // THEN: rule to imported security group to allow connections from generated - expect(stack).notTo(haveResource('AWS::EC2::SecurityGroupEgress')); + expect(stack).not.toHaveResource('AWS::EC2::SecurityGroupEgress'); + - test.done(); - }, - 'Imported SecurityGroup with allowAllOutbound: false DOES create egress rule'(test: Test) { + }); + test('Imported SecurityGroup with allowAllOutbound: false DOES create egress rule', () => { // GIVEN const stack = new Stack(); const vpc = new Vpc(stack, 'VPC'); @@ -305,27 +304,27 @@ nodeunitShim({ somethingConnectable.connections.allowFrom(securityGroup, Port.allTcp(), 'Connect there'); // THEN: rule to generated security group to connect to imported - expect(stack).to(haveResource('AWS::EC2::SecurityGroupIngress', { + expect(stack).toHaveResource('AWS::EC2::SecurityGroupIngress', { GroupId: { 'Fn::GetAtt': ['SomeSecurityGroupEF219AD6', 'GroupId'] }, IpProtocol: 'tcp', Description: 'Connect there', SourceSecurityGroupId: 'sg-12345', FromPort: 0, ToPort: 65535, - })); + }); // THEN: rule to imported security group to allow connections from generated - expect(stack).to(haveResource('AWS::EC2::SecurityGroupEgress', { + expect(stack).toHaveResource('AWS::EC2::SecurityGroupEgress', { IpProtocol: 'tcp', Description: 'Connect there', FromPort: 0, GroupId: 'sg-12345', DestinationSecurityGroupId: { 'Fn::GetAtt': ['SomeSecurityGroupEF219AD6', 'GroupId'] }, ToPort: 65535, - })); + }); + - test.done(); - }, + }); }); class SomethingConnectable implements IConnectable { diff --git a/packages/@aws-cdk/aws-ec2/test/instance.test.ts b/packages/@aws-cdk/aws-ec2/test/instance.test.ts index 392aeb8ec22d9..884021f518a84 100644 --- a/packages/@aws-cdk/aws-ec2/test/instance.test.ts +++ b/packages/@aws-cdk/aws-ec2/test/instance.test.ts @@ -1,11 +1,10 @@ import * as path from 'path'; -import { arrayWith, expect as cdkExpect, haveResource, ResourcePart, stringLike, SynthUtils } from '@aws-cdk/assert-internal'; import '@aws-cdk/assert-internal/jest'; +import { arrayWith, ResourcePart, stringLike, SynthUtils } from '@aws-cdk/assert-internal'; import { Asset } from '@aws-cdk/aws-s3-assets'; import { StringParameter } from '@aws-cdk/aws-ssm'; import * as cxschema from '@aws-cdk/cloud-assembly-schema'; import { Stack } from '@aws-cdk/core'; -import { nodeunitShim, Test } from 'nodeunit-shim'; import { AmazonLinuxImage, BlockDeviceVolume, CloudFormationInit, EbsDeviceVolumeType, InitCommand, Instance, InstanceArchitecture, InstanceClass, InstanceSize, InstanceType, UserData, Vpc, @@ -19,8 +18,8 @@ beforeEach(() => { vpc = new Vpc(stack, 'VPC'); }); -nodeunitShim({ - 'instance is created correctly'(test: Test) { +describe('instance', () => { + test('instance is created correctly', () => { // GIVEN const sampleInstances = [{ instanceClass: InstanceClass.BURSTABLE4_GRAVITON, @@ -41,14 +40,14 @@ nodeunitShim({ }); // THEN - cdkExpect(stack).to(haveResource('AWS::EC2::Instance', { + expect(stack).toHaveResource('AWS::EC2::Instance', { InstanceType: sampleInstance.instanceType, - })); + }); } - test.done(); - }, - 'instance is created with source/dest check switched off'(test: Test) { + + }); + test('instance is created with source/dest check switched off', () => { // WHEN new Instance(stack, 'Instance', { vpc, @@ -58,14 +57,14 @@ nodeunitShim({ }); // THEN - cdkExpect(stack).to(haveResource('AWS::EC2::Instance', { + expect(stack).toHaveResource('AWS::EC2::Instance', { InstanceType: 't3.large', SourceDestCheck: false, - })); + }); + - test.done(); - }, - 'instance is grantable'(test: Test) { + }); + test('instance is grantable', () => { // GIVEN const param = new StringParameter(stack, 'Param', { stringValue: 'Foobar' }); const instance = new Instance(stack, 'Instance', { @@ -78,7 +77,7 @@ nodeunitShim({ param.grantRead(instance); // THEN - cdkExpect(stack).to(haveResource('AWS::IAM::Policy', { + expect(stack).toHaveResource('AWS::IAM::Policy', { PolicyDocument: { Statement: [ { @@ -116,11 +115,11 @@ nodeunitShim({ ], Version: '2012-10-17', }, - })); + }); + - test.done(); - }, - 'instance architecture is correctly discerned for arm instances'(test: Test) { + }); + test('instance architecture is correctly discerned for arm instances', () => { // GIVEN const sampleInstanceClasses = [ 'a1', 't4g', 'c6g', 'c6gd', 'c6gn', 'm6g', 'm6gd', 'r6g', 'r6gd', // current Graviton-based instance classes @@ -135,9 +134,9 @@ nodeunitShim({ expect(instanceType.architecture).toBe(InstanceArchitecture.ARM_64); } - test.done(); - }, - 'instance architecture is correctly discerned for x86-64 instance'(test: Test) { + + }); + test('instance architecture is correctly discerned for x86-64 instance', () => { // GIVEN const sampleInstanceClasses = ['c5', 'm5ad', 'r5n', 'm6', 't3a']; // A sample of x86-64 instance classes @@ -149,9 +148,9 @@ nodeunitShim({ expect(instanceType.architecture).toBe(InstanceArchitecture.X86_64); } - test.done(); - }, - 'instances with local NVME drive are correctly named'(test: Test) { + + }); + test('instances with local NVME drive are correctly named', () => { // GIVEN const sampleInstanceClassKeys = [{ key: 'R5D', @@ -182,9 +181,9 @@ nodeunitShim({ expect(instanceType).toBe(instanceClass.value); } - test.done(); - }, - 'instance architecture throws an error when instance type is invalid'(test: Test) { + + }); + test('instance architecture throws an error when instance type is invalid', () => { // GIVEN const malformedInstanceTypes = ['t4', 't4g.nano.', 't4gnano', '']; @@ -193,18 +192,13 @@ nodeunitShim({ const instanceType = new InstanceType(malformedInstanceType); // THEN - try { - instanceType.architecture; - expect(true).toBe(false); // The line above should have thrown an error - } catch (err) { - expect(err.message).toBe('Malformed instance type identifier'); - } + expect(() => instanceType.architecture).toThrow('Malformed instance type identifier'); } - test.done(); - }, - blockDeviceMappings: { - 'can set blockDeviceMappings'(test: Test) { + + }); + describe('blockDeviceMappings', () => { + test('can set blockDeviceMappings', () => { // WHEN new Instance(stack, 'Instance', { vpc, @@ -234,7 +228,7 @@ nodeunitShim({ }); // THEN - cdkExpect(stack).to(haveResource('AWS::EC2::Instance', { + expect(stack).toHaveResource('AWS::EC2::Instance', { BlockDeviceMappings: [ { DeviceName: 'ebs', @@ -261,14 +255,14 @@ nodeunitShim({ VirtualName: 'ephemeral0', }, ], - })); + }); - test.done(); - }, - 'throws if ephemeral volumeIndex < 0'(test: Test) { + }); + + test('throws if ephemeral volumeIndex < 0', () => { // THEN - test.throws(() => { + expect(() => { new Instance(stack, 'Instance', { vpc, machineImage: new AmazonLinuxImage(), @@ -278,14 +272,14 @@ nodeunitShim({ volume: BlockDeviceVolume.ephemeral(-1), }], }); - }, /volumeIndex must be a number starting from 0/); + }).toThrow(/volumeIndex must be a number starting from 0/); - test.done(); - }, - 'throws if volumeType === IO1 without iops'(test: Test) { + }); + + test('throws if volumeType === IO1 without iops', () => { // THEN - test.throws(() => { + expect(() => { new Instance(stack, 'Instance', { vpc, machineImage: new AmazonLinuxImage(), @@ -299,12 +293,12 @@ nodeunitShim({ }), }], }); - }, /ops property is required with volumeType: EbsDeviceVolumeType.IO1/); + }).toThrow(/ops property is required with volumeType: EbsDeviceVolumeType.IO1/); - test.done(); - }, - 'warning if iops without volumeType'(test: Test) { + }); + + test('warning if iops without volumeType', () => { const instance = new Instance(stack, 'Instance', { vpc, machineImage: new AmazonLinuxImage(), @@ -320,13 +314,13 @@ nodeunitShim({ }); // THEN - test.deepEqual(instance.node.metadata[0].type, cxschema.ArtifactMetadataEntryType.WARN); - test.deepEqual(instance.node.metadata[0].data, 'iops will be ignored without volumeType: EbsDeviceVolumeType.IO1'); + expect(instance.node.metadata[0].type).toEqual(cxschema.ArtifactMetadataEntryType.WARN); + expect(instance.node.metadata[0].data).toEqual('iops will be ignored without volumeType: EbsDeviceVolumeType.IO1'); - test.done(); - }, - 'warning if iops and volumeType !== IO1'(test: Test) { + }); + + test('warning if iops and volumeType !== IO1', () => { const instance = new Instance(stack, 'Instance', { vpc, machineImage: new AmazonLinuxImage(), @@ -343,14 +337,14 @@ nodeunitShim({ }); // THEN - test.deepEqual(instance.node.metadata[0].type, cxschema.ArtifactMetadataEntryType.WARN); - test.deepEqual(instance.node.metadata[0].data, 'iops will be ignored without volumeType: EbsDeviceVolumeType.IO1'); + expect(instance.node.metadata[0].type).toEqual(cxschema.ArtifactMetadataEntryType.WARN); + expect(instance.node.metadata[0].data).toEqual('iops will be ignored without volumeType: EbsDeviceVolumeType.IO1'); - test.done(); - }, - }, - 'instance can be created with Private IP Address'(test: Test) { + }); + }); + + test('instance can be created with Private IP Address', () => { // WHEN new Instance(stack, 'Instance', { vpc, @@ -360,15 +354,16 @@ nodeunitShim({ }); // THEN - cdkExpect(stack).to(haveResource('AWS::EC2::Instance', { + expect(stack).toHaveResource('AWS::EC2::Instance', { InstanceType: 't3.large', PrivateIpAddress: '10.0.0.2', - })); + }); - test.done(); - }, + + }); }); + test('add CloudFormation Init to instance', () => { // GIVEN new Instance(stack, 'Instance', { @@ -408,14 +403,14 @@ test('add CloudFormation Init to instance', () => { Version: '2012-10-17', }, }); - cdkExpect(stack).to(haveResource('AWS::EC2::Instance', { + expect(stack).toHaveResource('AWS::EC2::Instance', { CreationPolicy: { ResourceSignal: { Count: 1, Timeout: 'PT5M', }, }, - }, ResourcePart.CompleteDefinition)); + }, ResourcePart.CompleteDefinition); }); test('cause replacement from s3 asset in userdata', () => { diff --git a/packages/@aws-cdk/aws-ec2/test/launch-template.test.ts b/packages/@aws-cdk/aws-ec2/test/launch-template.test.ts index 4d033a0cad78c..27399affe8149 100644 --- a/packages/@aws-cdk/aws-ec2/test/launch-template.test.ts +++ b/packages/@aws-cdk/aws-ec2/test/launch-template.test.ts @@ -1,8 +1,5 @@ +import '@aws-cdk/assert-internal/jest'; import { - countResources, - expect as expectCDK, - haveResource, - haveResourceLike, stringLike, } from '@aws-cdk/assert-internal'; import { @@ -55,7 +52,7 @@ describe('LaunchTemplate', () => { // Note: The following is intentionally a haveResource instead of haveResourceLike // to ensure that only the bare minimum of properties have values when no properties // are given to a LaunchTemplate. - expectCDK(stack).to(haveResource('AWS::EC2::LaunchTemplate', { + expect(stack).toHaveResource('AWS::EC2::LaunchTemplate', { LaunchTemplateData: { TagSpecifications: [ { @@ -78,8 +75,8 @@ describe('LaunchTemplate', () => { }, ], }, - })); - expectCDK(stack).notTo(haveResource('AWS::IAM::InstanceProfile')); + }); + expect(stack).not.toHaveResource('AWS::IAM::InstanceProfile'); expect(() => { template.grantPrincipal; }).toThrow(); expect(() => { template.connections; }).toThrow(); expect(template.osType).toBeUndefined(); @@ -130,9 +127,9 @@ describe('LaunchTemplate', () => { }); // THEN - expectCDK(stack).to(haveResourceLike('AWS::EC2::LaunchTemplate', { + expect(stack).toHaveResourceLike('AWS::EC2::LaunchTemplate', { LaunchTemplateName: 'LTName', - })); + }); }); test('Given instanceType', () => { @@ -142,11 +139,11 @@ describe('LaunchTemplate', () => { }); // THEN - expectCDK(stack).to(haveResourceLike('AWS::EC2::LaunchTemplate', { + expect(stack).toHaveResourceLike('AWS::EC2::LaunchTemplate', { LaunchTemplateData: { InstanceType: 'tt.test', }, - })); + }); }); test('Given machineImage (Linux)', () => { @@ -156,13 +153,13 @@ describe('LaunchTemplate', () => { }); // THEN - expectCDK(stack).to(haveResourceLike('AWS::EC2::LaunchTemplate', { + expect(stack).toHaveResourceLike('AWS::EC2::LaunchTemplate', { LaunchTemplateData: { ImageId: { Ref: stringLike('SsmParameterValueawsserviceamiamazonlinuxlatestamznami*Parameter'), }, }, - })); + }); expect(template.osType).toBe(OperatingSystemType.LINUX); expect(template.userData).toBeUndefined(); }); @@ -174,13 +171,13 @@ describe('LaunchTemplate', () => { }); // THEN - expectCDK(stack).to(haveResourceLike('AWS::EC2::LaunchTemplate', { + expect(stack).toHaveResourceLike('AWS::EC2::LaunchTemplate', { LaunchTemplateData: { ImageId: { Ref: stringLike('SsmParameterValueawsserviceamiwindowslatestWindowsServer2019EnglishFullBase*Parameter'), }, }, - })); + }); expect(template.osType).toBe(OperatingSystemType.WINDOWS); expect(template.userData).toBeUndefined(); }); @@ -196,13 +193,13 @@ describe('LaunchTemplate', () => { }); // THEN - expectCDK(stack).to(haveResourceLike('AWS::EC2::LaunchTemplate', { + expect(stack).toHaveResourceLike('AWS::EC2::LaunchTemplate', { LaunchTemplateData: { UserData: { 'Fn::Base64': '#!/bin/bash\necho Test', }, }, - })); + }); expect(template.userData).toBeDefined(); }); @@ -218,15 +215,15 @@ describe('LaunchTemplate', () => { }); // THEN - expectCDK(stack).to(countResources('AWS::IAM::Role', 1)); - expectCDK(stack).to(haveResourceLike('AWS::IAM::InstanceProfile', { + expect(stack).toCountResources('AWS::IAM::Role', 1); + expect(stack).toHaveResourceLike('AWS::IAM::InstanceProfile', { Roles: [ { Ref: 'TestRole6C9272DF', }, ], - })); - expectCDK(stack).to(haveResource('AWS::EC2::LaunchTemplate', { + }); + expect(stack).toHaveResource('AWS::EC2::LaunchTemplate', { LaunchTemplateData: { IamInstanceProfile: { Arn: stack.resolve((template.node.findChild('Profile') as CfnInstanceProfile).getAtt('Arn')), @@ -252,7 +249,7 @@ describe('LaunchTemplate', () => { }, ], }, - })); + }); expect(template.role).toBeDefined(); expect(template.grantPrincipal).toBeDefined(); }); @@ -289,7 +286,7 @@ describe('LaunchTemplate', () => { }); // THEN - expectCDK(stack).to(haveResourceLike('AWS::EC2::LaunchTemplate', { + expect(stack).toHaveResourceLike('AWS::EC2::LaunchTemplate', { LaunchTemplateData: { BlockDeviceMappings: [ { @@ -318,7 +315,7 @@ describe('LaunchTemplate', () => { }, ], }, - })); + }); }); test.each([ @@ -331,13 +328,13 @@ describe('LaunchTemplate', () => { }); // THEN - expectCDK(stack).to(haveResourceLike('AWS::EC2::LaunchTemplate', { + expect(stack).toHaveResourceLike('AWS::EC2::LaunchTemplate', { LaunchTemplateData: { CreditSpecification: { CpuCredits: expected, }, }, - })); + }); }); test.each([ @@ -350,11 +347,11 @@ describe('LaunchTemplate', () => { }); // THEN - expectCDK(stack).to(haveResourceLike('AWS::EC2::LaunchTemplate', { + expect(stack).toHaveResourceLike('AWS::EC2::LaunchTemplate', { LaunchTemplateData: { DisableApiTermination: expected, }, - })); + }); }); test.each([ @@ -367,11 +364,11 @@ describe('LaunchTemplate', () => { }); // THEN - expectCDK(stack).to(haveResourceLike('AWS::EC2::LaunchTemplate', { + expect(stack).toHaveResourceLike('AWS::EC2::LaunchTemplate', { LaunchTemplateData: { EbsOptimized: expected, }, - })); + }); }); test.each([ @@ -384,13 +381,13 @@ describe('LaunchTemplate', () => { }); // THEN - expectCDK(stack).to(haveResourceLike('AWS::EC2::LaunchTemplate', { + expect(stack).toHaveResourceLike('AWS::EC2::LaunchTemplate', { LaunchTemplateData: { EnclaveOptions: { Enabled: expected, }, }, - })); + }); }); test.each([ @@ -403,11 +400,11 @@ describe('LaunchTemplate', () => { }); // THEN - expectCDK(stack).to(haveResourceLike('AWS::EC2::LaunchTemplate', { + expect(stack).toHaveResourceLike('AWS::EC2::LaunchTemplate', { LaunchTemplateData: { InstanceInitiatedShutdownBehavior: expected, }, - })); + }); }); test('Given keyName', () => { @@ -417,11 +414,11 @@ describe('LaunchTemplate', () => { }); // THEN - expectCDK(stack).to(haveResourceLike('AWS::EC2::LaunchTemplate', { + expect(stack).toHaveResourceLike('AWS::EC2::LaunchTemplate', { LaunchTemplateData: { KeyName: 'TestKeyname', }, - })); + }); }); test.each([ @@ -434,13 +431,13 @@ describe('LaunchTemplate', () => { }); // THEN - expectCDK(stack).to(haveResourceLike('AWS::EC2::LaunchTemplate', { + expect(stack).toHaveResourceLike('AWS::EC2::LaunchTemplate', { LaunchTemplateData: { Monitoring: { Enabled: expected, }, }, - })); + }); }); test('Given securityGroup', () => { @@ -454,7 +451,7 @@ describe('LaunchTemplate', () => { }); // THEN - expectCDK(stack).to(haveResourceLike('AWS::EC2::LaunchTemplate', { + expect(stack).toHaveResourceLike('AWS::EC2::LaunchTemplate', { LaunchTemplateData: { SecurityGroupIds: [ { @@ -465,7 +462,7 @@ describe('LaunchTemplate', () => { }, ], }, - })); + }); expect(template.connections).toBeDefined(); expect(template.connections.securityGroups).toHaveLength(1); expect(template.connections.securityGroups[0]).toBe(sg); @@ -479,7 +476,7 @@ describe('LaunchTemplate', () => { Tags.of(template).add('TestKey', 'TestValue'); // THEN - expectCDK(stack).to(haveResourceLike('AWS::EC2::LaunchTemplate', { + expect(stack).toHaveResourceLike('AWS::EC2::LaunchTemplate', { LaunchTemplateData: { TagSpecifications: [ { @@ -510,7 +507,7 @@ describe('LaunchTemplate', () => { }, ], }, - })); + }); }); }); @@ -530,13 +527,13 @@ describe('LaunchTemplate marketOptions', () => { }); // THEN - expectCDK(stack).to(haveResourceLike('AWS::EC2::LaunchTemplate', { + expect(stack).toHaveResourceLike('AWS::EC2::LaunchTemplate', { LaunchTemplateData: { InstanceMarketOptions: { MarketType: 'spot', }, }, - })); + }); }); test.each([ @@ -576,7 +573,7 @@ describe('LaunchTemplate marketOptions', () => { }); // THEN - expectCDK(stack).to(haveResourceLike('AWS::EC2::LaunchTemplate', { + expect(stack).toHaveResourceLike('AWS::EC2::LaunchTemplate', { LaunchTemplateData: { InstanceMarketOptions: { MarketType: 'spot', @@ -585,7 +582,7 @@ describe('LaunchTemplate marketOptions', () => { }, }, }, - })); + }); }); test.each([ @@ -601,7 +598,7 @@ describe('LaunchTemplate marketOptions', () => { }); // THEN - expectCDK(stack).to(haveResourceLike('AWS::EC2::LaunchTemplate', { + expect(stack).toHaveResourceLike('AWS::EC2::LaunchTemplate', { LaunchTemplateData: { InstanceMarketOptions: { MarketType: 'spot', @@ -610,7 +607,7 @@ describe('LaunchTemplate marketOptions', () => { }, }, }, - })); + }); }); test.each([ @@ -626,7 +623,7 @@ describe('LaunchTemplate marketOptions', () => { }); // THEN - expectCDK(stack).to(haveResourceLike('AWS::EC2::LaunchTemplate', { + expect(stack).toHaveResourceLike('AWS::EC2::LaunchTemplate', { LaunchTemplateData: { InstanceMarketOptions: { MarketType: 'spot', @@ -635,7 +632,7 @@ describe('LaunchTemplate marketOptions', () => { }, }, }, - })); + }); }); test.each([ @@ -650,7 +647,7 @@ describe('LaunchTemplate marketOptions', () => { }); // THEN - expectCDK(stack).to(haveResourceLike('AWS::EC2::LaunchTemplate', { + expect(stack).toHaveResourceLike('AWS::EC2::LaunchTemplate', { LaunchTemplateData: { InstanceMarketOptions: { MarketType: 'spot', @@ -659,7 +656,7 @@ describe('LaunchTemplate marketOptions', () => { }, }, }, - })); + }); }); test('given validUntil', () => { @@ -671,7 +668,7 @@ describe('LaunchTemplate marketOptions', () => { }); // THEN - expectCDK(stack).to(haveResourceLike('AWS::EC2::LaunchTemplate', { + expect(stack).toHaveResourceLike('AWS::EC2::LaunchTemplate', { LaunchTemplateData: { InstanceMarketOptions: { MarketType: 'spot', @@ -680,6 +677,6 @@ describe('LaunchTemplate marketOptions', () => { }, }, }, - })); + }); }); }); diff --git a/packages/@aws-cdk/aws-ec2/test/network-utils.test.ts b/packages/@aws-cdk/aws-ec2/test/network-utils.test.ts index b1904331644f2..f6ed377869fed 100644 --- a/packages/@aws-cdk/aws-ec2/test/network-utils.test.ts +++ b/packages/@aws-cdk/aws-ec2/test/network-utils.test.ts @@ -1,4 +1,3 @@ -import { nodeunitShim, Test } from 'nodeunit-shim'; import { CidrBlock, InvalidCidrRangeError, @@ -6,65 +5,65 @@ import { NetworkUtils, } from '../lib/network-util'; -nodeunitShim({ - IP: { - 'should convert a valid IP Address to an integer'(test: Test) { - test.strictEqual(NetworkUtils.ipToNum('174.66.173.168'), 2923605416); - test.done(); - }, - 'should throw on invalid IP Address'(test: Test) { - test.throws(() => { +describe('network utils', () => { + describe('IP', () => { + test('should convert a valid IP Address to an integer', () => { + expect(NetworkUtils.ipToNum('174.66.173.168')).toEqual(2923605416); + + }); + test('should throw on invalid IP Address', () => { + expect(() => { NetworkUtils.ipToNum('174.266.173.168'); - }, 'is not valid'); - test.done(); - }, - 'should convert a valid IP integer to a staring'(test: Test) { - test.strictEqual(NetworkUtils.numToIp(2923605416), '174.66.173.168'); - test.done(); - }, - 'should throw an error for invalid IP'(test: Test) { - test.throws(() => { + }).toThrow('is not valid'); + + }); + test('should convert a valid IP integer to a staring', () => { + expect(NetworkUtils.numToIp(2923605416)).toEqual('174.66.173.168'); + + }); + test('should throw an error for invalid IP', () => { + expect(() => { NetworkUtils.numToIp(2923605416 * 5); - }, /is not a valid/); - test.throws(() => { + }).toThrow(/is not a valid/); + expect(() => { NetworkUtils.numToIp(-1); - }, /is not a valid/); - test.done(); - }, - 'validIp returns true if octect is in 0-255'(test: Test) { + }).toThrow(/is not a valid/); + + }); + test('validIp returns true if octect is in 0-255', () => { const invalidIps = ['255.255.0.0', '0.0.0.0', '1.2.3.4', '10.0.0.0', '255.01.01.255']; for (const ip of invalidIps) { - test.strictEqual(true, NetworkUtils.validIp(ip)); + expect(true).toEqual(NetworkUtils.validIp(ip)); } - test.done(); - }, - 'validIp returns false if octect is not in 0-255'(test: Test) { + + }); + test('validIp returns false if octect is not in 0-255', () => { const invalidIps = ['1.2.3.4.689', '-1.55.22.22', '', ' ', '255.264.1.01']; for (const ip of invalidIps) { - test.strictEqual(false, NetworkUtils.validIp(ip)); + expect(false).toEqual(NetworkUtils.validIp(ip)); } - test.done(); - }, - }, - CidrBlock: { - 'should return the next valid subnet from offset IP'(test: Test) { + + }); + }); + describe('CidrBlock', () => { + test('should return the next valid subnet from offset IP', () => { const num = NetworkUtils.ipToNum('10.0.1.255'); const newBlock = new CidrBlock(num, 24); - test.strictEqual(newBlock.cidr, '10.0.2.0/24'); - test.done(); - }, - 'nextBlock() returns the next higher CIDR space'(test: Test) { + expect(newBlock.cidr).toEqual('10.0.2.0/24'); + + }); + test('nextBlock() returns the next higher CIDR space', () => { const testValues = [ ['192.168.0.0/24', '192.168.1.0/24'], ['10.85.7.0/28', '10.85.7.16/28'], ]; for (const value of testValues) { const block = new CidrBlock(value[0]); - test.strictEqual(block.nextBlock().cidr, value[1]); + expect(block.nextBlock().cidr).toEqual(value[1]); } - test.done(); - }, - 'maxIp() should return the last usable IP from the CidrBlock'(test: Test) { + + }); + test('maxIp() should return the last usable IP from the CidrBlock', () => { const testValues = [ ['10.0.3.0/28', '10.0.3.15'], ['10.0.3.1/28', '10.0.3.31'], @@ -72,42 +71,42 @@ nodeunitShim({ ]; for (const value of testValues) { const block = new CidrBlock(value[0]); - test.strictEqual(block.maxIp(), value[1]); + expect(block.maxIp()).toEqual(value[1]); } - test.done(); - }, - 'minIp() should return the first usable IP from the CidrBlock'(test: Test) { + + }); + test('minIp() should return the first usable IP from the CidrBlock', () => { const testValues = [ ['192.168.0.0/18', '192.168.0.0'], ['10.0.3.0/24', '10.0.3.0'], ]; for (const answer of testValues) { const block = new CidrBlock(answer[0]); - test.strictEqual(block.minIp(), answer[1]); + expect(block.minIp()).toEqual(answer[1]); } - test.done(); - }, - 'containsCidr returns true if fully contained'(test: Test) { + + }); + test('containsCidr returns true if fully contained', () => { const block = new CidrBlock('10.0.3.0/24'); const contained = new CidrBlock('10.0.3.0/26'); - test.strictEqual(block.containsCidr(contained), true); - test.done(); - }, - 'containsCidr returns false if not fully contained'(test: Test) { + expect(block.containsCidr(contained)).toEqual(true); + + }); + test('containsCidr returns false if not fully contained', () => { const block = new CidrBlock('10.0.3.0/26'); const notContained = new CidrBlock('10.0.3.0/25'); - test.strictEqual(block.containsCidr(notContained), false); - test.done(); - }, - 'calculateNetmask returns the ip string mask'(test: Test) { + expect(block.containsCidr(notContained)).toEqual(false); + + }); + test('calculateNetmask returns the ip string mask', () => { const netmask = CidrBlock.calculateNetmask(27); - test.strictEqual(netmask, '255.255.255.224'); - test.done(); - }, + expect(netmask).toEqual('255.255.255.224'); - }, - NetworkBuilder: { - 'allows you to carve subnets our of CIDR network'(test: Test) { + }); + + }); + describe('NetworkBuilder', () => { + test('allows you to carve subnets our of CIDR network', () => { const answers = [ [ '192.168.0.0/28', @@ -142,49 +141,49 @@ nodeunitShim({ efficient.cidrStrings.sort(), ]; for (let i = 0; i < answers.length; i++) { - test.deepEqual(answers[i].sort(), expected[i]); + expect(answers[i].sort()).toEqual(expected[i]); } - test.done(); - }, - 'throws on subnets < 16 or > 28'(test: Test) { + + }); + test('throws on subnets < 16 or > 28', () => { const builder = new NetworkBuilder('192.168.0.0/18'); - test.throws(() => { + expect(() => { builder.addSubnet(15); - }, InvalidCidrRangeError); - test.throws(() => { + }).toThrow(InvalidCidrRangeError); + expect(() => { builder.addSubnet(29); - }, InvalidCidrRangeError); - test.done(); - }, - 'throws if you add a subnet outside of the cidr'(test: Test) { + }).toThrow(InvalidCidrRangeError); + + }); + test('throws if you add a subnet outside of the cidr', () => { const builder = new NetworkBuilder('192.168.0.0/18'); const builder2 = new NetworkBuilder('10.0.0.0/21'); builder.addSubnets(19, 1); builder2.addSubnets(24, 8); - test.throws(() => { + expect(() => { builder.addSubnet(19); builder.addSubnet(28); - }, /exceeds remaining space/); - test.throws(() => { + }).toThrow(/exceeds remaining space/); + expect(() => { builder2.addSubnet(28); - }, /exceeds remaining space/); - test.done(); - }, - 'maskForRemainingSubnets calcs mask for even split of remaining'(test: Test) { + }).toThrow(/exceeds remaining space/); + + }); + test('maskForRemainingSubnets calcs mask for even split of remaining', () => { const builder = new NetworkBuilder('10.0.0.0/24'); builder.addSubnet(25); - test.strictEqual(27, builder.maskForRemainingSubnets(3)); + expect(27).toEqual(builder.maskForRemainingSubnets(3)); const builder2 = new NetworkBuilder('192.168.176.0/20'); builder2.addSubnets(22, 2); - test.strictEqual(22, builder2.maskForRemainingSubnets(2)); + expect(22).toEqual(builder2.maskForRemainingSubnets(2)); const builder3 = new NetworkBuilder('192.168.0.0/16'); - test.strictEqual(17, builder3.maskForRemainingSubnets(2)); + expect(17).toEqual(builder3.maskForRemainingSubnets(2)); const builder4 = new NetworkBuilder('10.0.0.0/16'); - test.strictEqual(18, builder4.maskForRemainingSubnets(4)); + expect(18).toEqual(builder4.maskForRemainingSubnets(4)); const builder5 = new NetworkBuilder('10.0.0.0/16'); builder5.addSubnets(26, 3); builder5.addSubnets(27, 3); - test.strictEqual(18, builder5.maskForRemainingSubnets(3)); test.done(); - }, - }, + expect(18).toEqual(builder5.maskForRemainingSubnets(3)); + }); + }); }); diff --git a/packages/@aws-cdk/aws-ec2/test/security-group.test.ts b/packages/@aws-cdk/aws-ec2/test/security-group.test.ts index 1a7cb23b51493..09ccc6cdc682e 100644 --- a/packages/@aws-cdk/aws-ec2/test/security-group.test.ts +++ b/packages/@aws-cdk/aws-ec2/test/security-group.test.ts @@ -1,12 +1,11 @@ -import { expect, haveResource, haveResourceLike, not } from '@aws-cdk/assert-internal'; +import '@aws-cdk/assert-internal/jest'; import { App, Intrinsic, Lazy, Stack, Token } from '@aws-cdk/core'; -import { nodeunitShim, Test } from 'nodeunit-shim'; import { Peer, Port, SecurityGroup, SecurityGroupProps, Vpc } from '../lib'; const SECURITY_GROUP_DISABLE_INLINE_RULES_CONTEXT_KEY = '@aws-cdk/aws-ec2.securityGroupDisableInlineRules'; -nodeunitShim({ - 'security group can allows all outbound traffic by default'(test: Test) { +describe('security group', () => { + test('security group can allows all outbound traffic by default', () => { // GIVEN const stack = new Stack(); const vpc = new Vpc(stack, 'VPC'); @@ -15,7 +14,7 @@ nodeunitShim({ new SecurityGroup(stack, 'SG1', { vpc, allowAllOutbound: true }); // THEN - expect(stack).to(haveResource('AWS::EC2::SecurityGroup', { + expect(stack).toHaveResource('AWS::EC2::SecurityGroup', { SecurityGroupEgress: [ { CidrIp: '0.0.0.0/0', @@ -23,12 +22,12 @@ nodeunitShim({ IpProtocol: '-1', }, ], - })); + }); + - test.done(); - }, + }); - 'no new outbound rule is added if we are allowing all traffic anyway'(test: Test) { + test('no new outbound rule is added if we are allowing all traffic anyway', () => { // GIVEN const stack = new Stack(); const vpc = new Vpc(stack, 'VPC'); @@ -38,7 +37,7 @@ nodeunitShim({ sg.addEgressRule(Peer.anyIpv4(), Port.tcp(86), 'This does not show up'); // THEN - expect(stack).to(haveResource('AWS::EC2::SecurityGroup', { + expect(stack).toHaveResource('AWS::EC2::SecurityGroup', { SecurityGroupEgress: [ { CidrIp: '0.0.0.0/0', @@ -46,12 +45,12 @@ nodeunitShim({ IpProtocol: '-1', }, ], - })); + }); + - test.done(); - }, + }); - 'security group disallow outbound traffic by default'(test: Test) { + test('security group disallow outbound traffic by default', () => { // GIVEN const stack = new Stack(); const vpc = new Vpc(stack, 'VPC'); @@ -60,7 +59,7 @@ nodeunitShim({ new SecurityGroup(stack, 'SG1', { vpc, allowAllOutbound: false }); // THEN - expect(stack).to(haveResource('AWS::EC2::SecurityGroup', { + expect(stack).toHaveResource('AWS::EC2::SecurityGroup', { SecurityGroupEgress: [ { CidrIp: '255.255.255.255/32', @@ -70,12 +69,12 @@ nodeunitShim({ ToPort: 86, }, ], - })); + }); + - test.done(); - }, + }); - 'bogus outbound rule disappears if another rule is added'(test: Test) { + test('bogus outbound rule disappears if another rule is added', () => { // GIVEN const stack = new Stack(); const vpc = new Vpc(stack, 'VPC'); @@ -85,7 +84,7 @@ nodeunitShim({ sg.addEgressRule(Peer.anyIpv4(), Port.tcp(86), 'This replaces the other one'); // THEN - expect(stack).to(haveResource('AWS::EC2::SecurityGroup', { + expect(stack).toHaveResource('AWS::EC2::SecurityGroup', { SecurityGroupEgress: [ { CidrIp: '0.0.0.0/0', @@ -95,26 +94,26 @@ nodeunitShim({ ToPort: 86, }, ], - })); + }); + - test.done(); - }, + }); - 'all outbound rule cannot be added after creation'(test: Test) { + test('all outbound rule cannot be added after creation', () => { // GIVEN const stack = new Stack(); const vpc = new Vpc(stack, 'VPC'); // WHEN const sg = new SecurityGroup(stack, 'SG1', { vpc, allowAllOutbound: false }); - test.throws(() => { + expect(() => { sg.addEgressRule(Peer.anyIpv4(), Port.allTraffic(), 'All traffic'); - }, /Cannot add/); + }).toThrow(/Cannot add/); - test.done(); - }, - 'immutable imports do not add rules'(test: Test) { + }); + + test('immutable imports do not add rules', () => { // GIVEN const stack = new Stack(); @@ -123,7 +122,7 @@ nodeunitShim({ sg.addEgressRule(Peer.anyIpv4(), Port.tcp(86), 'This rule was not added'); sg.addIngressRule(Peer.anyIpv4(), Port.tcp(86), 'This rule was not added'); - expect(stack).to(not(haveResource('AWS::EC2::SecurityGroup', { + expect(stack).not.toHaveResource('AWS::EC2::SecurityGroup', { SecurityGroupEgress: [ { CidrIp: '0.0.0.0/0', @@ -133,9 +132,9 @@ nodeunitShim({ ToPort: 86, }, ], - }))); + }); - expect(stack).to(not(haveResource('AWS::EC2::SecurityGroup', { + expect(stack).not.toHaveResource('AWS::EC2::SecurityGroup', { SecurityGroupIngress: [ { CidrIp: '0.0.0.0/0', @@ -145,29 +144,29 @@ nodeunitShim({ ToPort: 86, }, ], - }))); + }); + - test.done(); - }, + }); - 'Inline Rule Control': { + describe('Inline Rule Control', () => { //Not inlined - 'When props.disableInlineRules is true': testRulesAreNotInlined(undefined, true), - 'When context.disableInlineRules is true': testRulesAreNotInlined(true, undefined), - 'When context.disableInlineRules is true and props.disableInlineRules is true': testRulesAreNotInlined(true, true), - 'When context.disableInlineRules is false and props.disableInlineRules is true': testRulesAreNotInlined(false, true), - 'When props.disableInlineRules is true and context.disableInlineRules is null': testRulesAreNotInlined(null, true), + describe('When props.disableInlineRules is true', () => { testRulesAreNotInlined(undefined, true); }); + describe('When context.disableInlineRules is true', () => { testRulesAreNotInlined(true, undefined); }); + describe('When context.disableInlineRules is true and props.disableInlineRules is true', () => { testRulesAreNotInlined(true, true); }); + describe('When context.disableInlineRules is false and props.disableInlineRules is true', () => { testRulesAreNotInlined(false, true); }); + describe('When props.disableInlineRules is true and context.disableInlineRules is null', () => { testRulesAreNotInlined(null, true); }); //Inlined - 'When context.disableInlineRules is false and props.disableInlineRules is false': testRulesAreInlined(false, false), - 'When context.disableInlineRules is true and props.disableInlineRules is false': testRulesAreInlined(true, false), - 'When context.disableInlineRules is false': testRulesAreInlined(false, undefined), - 'When props.disableInlineRules is false': testRulesAreInlined(undefined, false), - 'When neither props.disableInlineRules nor context.disableInlineRules are defined': testRulesAreInlined(undefined, undefined), - 'When props.disableInlineRules is undefined and context.disableInlineRules is null': testRulesAreInlined(null, undefined), - 'When props.disableInlineRules is false and context.disableInlineRules is null': testRulesAreInlined(null, false), - }, - - 'peer between all types of peers and port range types'(test: Test) { + describe('When context.disableInlineRules is false and props.disableInlineRules is false', () => { testRulesAreInlined(false, false); }); + describe('When context.disableInlineRules is true and props.disableInlineRules is false', () => { testRulesAreInlined(true, false); }); + describe('When context.disableInlineRules is false', () => { testRulesAreInlined(false, undefined); }); + describe('When props.disableInlineRules is false', () => { testRulesAreInlined(undefined, false); }); + describe('When neither props.disableInlineRules nor context.disableInlineRules are defined', () => { testRulesAreInlined(undefined, undefined); }); + describe('When props.disableInlineRules is undefined and context.disableInlineRules is null', () => { testRulesAreInlined(null, undefined); }); + describe('When props.disableInlineRules is false and context.disableInlineRules is null', () => { testRulesAreInlined(null, false); }); + }); + + test('peer between all types of peers and port range types', () => { // GIVEN const stack = new Stack(undefined, 'TestStack', { env: { account: '12345678', region: 'dummy' } }); const vpc = new Vpc(stack, 'VPC'); @@ -206,10 +205,10 @@ nodeunitShim({ // THEN -- no crash - test.done(); - }, - 'if tokens are used in ports, `canInlineRule` should be false to avoid cycles'(test: Test) { + }); + + test('if tokens are used in ports, `canInlineRule` should be false to avoid cycles', () => { // GIVEN const p1 = Lazy.number({ produce: () => 80 }); const p2 = Lazy.number({ produce: () => 5000 }); @@ -233,52 +232,52 @@ nodeunitShim({ // THEN for (const range of ports) { - test.equal(range.canInlineRule, false, range.toString()); + expect(range.canInlineRule).toEqual(false); } - test.done(); - }, - 'Peer IP CIDR validation': { - 'passes with valid IPv4 CIDR block'(test: Test) { + }); + + describe('Peer IP CIDR validation', () => { + test('passes with valid IPv4 CIDR block', () => { // GIVEN const cidrIps = ['0.0.0.0/0', '192.168.255.255/24']; // THEN for (const cidrIp of cidrIps) { - test.equal(Peer.ipv4(cidrIp).uniqueId, cidrIp); + expect(Peer.ipv4(cidrIp).uniqueId).toEqual(cidrIp); } - test.done(); - }, - 'passes with unresolved IP CIDR token'(test: Test) { + }); + + test('passes with unresolved IP CIDR token', () => { // GIVEN Token.asString(new Intrinsic('ip')); // THEN: don't throw - test.done(); - }, - 'throws if invalid IPv4 CIDR block'(test: Test) { + }); + + test('throws if invalid IPv4 CIDR block', () => { // THEN - test.throws(() => { + expect(() => { Peer.ipv4('invalid'); - }, /Invalid IPv4 CIDR/); + }).toThrow(/Invalid IPv4 CIDR/); - test.done(); - }, - 'throws if missing mask in IPv4 CIDR block'(test: Test) { - test.throws(() => { + }); + + test('throws if missing mask in IPv4 CIDR block', () => { + expect(() => { Peer.ipv4('0.0.0.0'); - }, /CIDR mask is missing in IPv4/); + }).toThrow(/CIDR mask is missing in IPv4/); + - test.done(); - }, + }); - 'passes with valid IPv6 CIDR block'(test: Test) { + test('passes with valid IPv6 CIDR block', () => { // GIVEN const cidrIps = [ '::/0', @@ -289,31 +288,31 @@ nodeunitShim({ // THEN for (const cidrIp of cidrIps) { - test.equal(Peer.ipv6(cidrIp).uniqueId, cidrIp); + expect(Peer.ipv6(cidrIp).uniqueId).toEqual(cidrIp); } - test.done(); - }, - 'throws if invalid IPv6 CIDR block'(test: Test) { + }); + + test('throws if invalid IPv6 CIDR block', () => { // THEN - test.throws(() => { + expect(() => { Peer.ipv6('invalid'); - }, /Invalid IPv6 CIDR/); + }).toThrow(/Invalid IPv6 CIDR/); - test.done(); - }, - 'throws if missing mask in IPv6 CIDR block'(test: Test) { - test.throws(() => { + }); + + test('throws if missing mask in IPv6 CIDR block', () => { + expect(() => { Peer.ipv6('::'); - }, /IDR mask is missing in IPv6/); + }).toThrow(/IDR mask is missing in IPv6/); - test.done(); - }, - }, - 'can look up a security group'(test: Test) { + }); + }); + + test('can look up a security group', () => { const app = new App(); const stack = new Stack(app, 'stack', { env: { @@ -324,438 +323,435 @@ nodeunitShim({ const securityGroup = SecurityGroup.fromLookup(stack, 'stack', 'sg-1234'); - test.equal(securityGroup.securityGroupId, 'sg-12345'); - test.equal(securityGroup.allowAllOutbound, true); + expect(securityGroup.securityGroupId).toEqual('sg-12345'); + expect(securityGroup.allowAllOutbound).toEqual(true); + - test.done(); - }, + }); }); function testRulesAreInlined(contextDisableInlineRules: boolean | undefined | null, optionsDisableInlineRules: boolean | undefined) { - return { - 'When allowAllOutbound': { - 'new SecurityGroup will create an inline SecurityGroupEgress rule to allow all traffic'(test: Test) { - // GIVEN - const stack = new Stack(); - stack.node.setContext(SECURITY_GROUP_DISABLE_INLINE_RULES_CONTEXT_KEY, contextDisableInlineRules); - const vpc = new Vpc(stack, 'VPC'); - const props: SecurityGroupProps = { vpc, allowAllOutbound: true, disableInlineRules: optionsDisableInlineRules }; - - // WHEN - new SecurityGroup(stack, 'SG1', props); - - expect(stack).to(haveResource('AWS::EC2::SecurityGroup', { - GroupDescription: 'Default/SG1', - VpcId: stack.resolve(vpc.vpcId), - SecurityGroupEgress: [ - { - CidrIp: '0.0.0.0/0', - Description: 'Allow all outbound traffic by default', - IpProtocol: '-1', - }, - ], - })); - expect(stack).to(not(haveResourceLike('AWS::EC2::SecurityGroupEgress', {}))); - expect(stack).to(not(haveResourceLike('AWS::EC2::SecurityGroupIngress', {}))); - test.done(); - }, - 'addEgressRule rule will not modify egress rules'(test: Test) { - // GIVEN - const stack = new Stack(); - stack.node.setContext(SECURITY_GROUP_DISABLE_INLINE_RULES_CONTEXT_KEY, contextDisableInlineRules); - const vpc = new Vpc(stack, 'VPC'); - const props: SecurityGroupProps = { vpc, allowAllOutbound: true, disableInlineRules: optionsDisableInlineRules }; - - // WHEN - const sg = new SecurityGroup(stack, 'SG1', props); - sg.addEgressRule(Peer.anyIpv4(), Port.tcp(86), 'An external Rule'); - - expect(stack).to(haveResource('AWS::EC2::SecurityGroup', { - GroupDescription: 'Default/SG1', - VpcId: stack.resolve(vpc.vpcId), - SecurityGroupEgress: [ - { - CidrIp: '0.0.0.0/0', - Description: 'Allow all outbound traffic by default', - IpProtocol: '-1', - }, - ], - })); - - expect(stack).to(not(haveResourceLike('AWS::EC2::SecurityGroupEgress', {}))); - expect(stack).to(not(haveResourceLike('AWS::EC2::SecurityGroupIngress', {}))); - test.done(); - }, + describe('When allowAllOutbound', () => { + test('new SecurityGroup will create an inline SecurityGroupEgress rule to allow all traffic', () => { + // GIVEN + const stack = new Stack(); + stack.node.setContext(SECURITY_GROUP_DISABLE_INLINE_RULES_CONTEXT_KEY, contextDisableInlineRules); + const vpc = new Vpc(stack, 'VPC'); + const props: SecurityGroupProps = { vpc, allowAllOutbound: true, disableInlineRules: optionsDisableInlineRules }; + + // WHEN + new SecurityGroup(stack, 'SG1', props); + + expect(stack).toHaveResource('AWS::EC2::SecurityGroup', { + GroupDescription: 'Default/SG1', + VpcId: stack.resolve(vpc.vpcId), + SecurityGroupEgress: [ + { + CidrIp: '0.0.0.0/0', + Description: 'Allow all outbound traffic by default', + IpProtocol: '-1', + }, + ], + }); + expect(stack).not.toHaveResourceLike('AWS::EC2::SecurityGroupEgress', {}); + expect(stack).not.toHaveResourceLike('AWS::EC2::SecurityGroupIngress', {}); - 'addIngressRule will add a new ingress rule'(test: Test) { - // GIVEN - const stack = new Stack(); - stack.node.setContext(SECURITY_GROUP_DISABLE_INLINE_RULES_CONTEXT_KEY, contextDisableInlineRules); - const vpc = new Vpc(stack, 'VPC'); - const props: SecurityGroupProps = { vpc, allowAllOutbound: true, disableInlineRules: optionsDisableInlineRules }; - - // WHEN - const sg = new SecurityGroup(stack, 'SG1', props); - sg.addIngressRule(Peer.anyIpv4(), Port.tcp(86), 'An external Rule'); - - expect(stack).to(haveResource('AWS::EC2::SecurityGroup', { - GroupDescription: 'Default/SG1', - VpcId: stack.resolve(vpc.vpcId), - SecurityGroupIngress: [ - { - CidrIp: '0.0.0.0/0', - Description: 'An external Rule', - FromPort: 86, - IpProtocol: 'tcp', - ToPort: 86, - }, - ], - SecurityGroupEgress: [ - { - CidrIp: '0.0.0.0/0', - Description: 'Allow all outbound traffic by default', - IpProtocol: '-1', - }, - ], - })); - - expect(stack).to(not(haveResourceLike('AWS::EC2::SecurityGroupEgress', {}))); - expect(stack).to(not(haveResourceLike('AWS::EC2::SecurityGroupIngress', {}))); - test.done(); - }, - }, - - 'When do not allowAllOutbound': { - 'new SecurityGroup rule will create an egress rule that denies all traffic'(test: Test) { - // GIVEN - const stack = new Stack(); - stack.node.setContext(SECURITY_GROUP_DISABLE_INLINE_RULES_CONTEXT_KEY, contextDisableInlineRules); - const vpc = new Vpc(stack, 'VPC'); - const props: SecurityGroupProps = { vpc, allowAllOutbound: false, disableInlineRules: optionsDisableInlineRules }; - - // WHEN - new SecurityGroup(stack, 'SG1', props); - - expect(stack).to(haveResource('AWS::EC2::SecurityGroup', { - GroupDescription: 'Default/SG1', - VpcId: stack.resolve(vpc.vpcId), - SecurityGroupEgress: [ - { - CidrIp: '255.255.255.255/32', - Description: 'Disallow all traffic', - IpProtocol: 'icmp', - FromPort: 252, - ToPort: 86, - }, - ], - })); - expect(stack).to(not(haveResourceLike('AWS::EC2::SecurityGroupIngress', {}))); - expect(stack).to(not(haveResourceLike('AWS::EC2::SecurityGroupIngress', {}))); - - test.done(); - }, - 'addEgressRule rule will add a new inline egress rule and remove the denyAllTraffic rule'(test: Test) { - // GIVEN - const stack = new Stack(); - stack.node.setContext(SECURITY_GROUP_DISABLE_INLINE_RULES_CONTEXT_KEY, contextDisableInlineRules); - const vpc = new Vpc(stack, 'VPC'); - const props: SecurityGroupProps = { vpc, allowAllOutbound: false, disableInlineRules: optionsDisableInlineRules }; - - // WHEN - const sg = new SecurityGroup(stack, 'SG1', props); - sg.addEgressRule(Peer.anyIpv4(), Port.tcp(86), 'An inline Rule'); - - expect(stack).to(haveResource('AWS::EC2::SecurityGroup', { - GroupDescription: 'Default/SG1', - VpcId: stack.resolve(vpc.vpcId), - SecurityGroupEgress: [ - { - CidrIp: '0.0.0.0/0', - Description: 'An inline Rule', - FromPort: 86, - IpProtocol: 'tcp', - ToPort: 86, - }, - ], - })); - - expect(stack).to(not(haveResourceLike('AWS::EC2::SecurityGroupEgress', {}))); - expect(stack).to(not(haveResourceLike('AWS::EC2::SecurityGroupIngress', {}))); - test.done(); - }, + }); - 'addIngressRule will add a new ingress rule'(test: Test) { - // GIVEN - const stack = new Stack(); - stack.node.setContext(SECURITY_GROUP_DISABLE_INLINE_RULES_CONTEXT_KEY, contextDisableInlineRules); - const vpc = new Vpc(stack, 'VPC'); - const props: SecurityGroupProps = { vpc, allowAllOutbound: false, disableInlineRules: optionsDisableInlineRules }; - - // WHEN - const sg = new SecurityGroup(stack, 'SG1', props); - sg.addIngressRule(Peer.anyIpv4(), Port.tcp(86), 'An external Rule'); - - expect(stack).to(haveResource('AWS::EC2::SecurityGroup', { - GroupDescription: 'Default/SG1', - VpcId: stack.resolve(vpc.vpcId), - SecurityGroupIngress: [ - { - CidrIp: '0.0.0.0/0', - Description: 'An external Rule', - FromPort: 86, - IpProtocol: 'tcp', - ToPort: 86, - }, - ], - SecurityGroupEgress: [ - { - CidrIp: '255.255.255.255/32', - Description: 'Disallow all traffic', - IpProtocol: 'icmp', - FromPort: 252, - ToPort: 86, - }, - ], - })); - - expect(stack).to(not(haveResourceLike('AWS::EC2::SecurityGroupEgress', {}))); - expect(stack).to(not(haveResourceLike('AWS::EC2::SecurityGroupIngress', {}))); - test.done(); - }, - }, - }; -} + test('addEgressRule rule will not modify egress rules', () => { + // GIVEN + const stack = new Stack(); + stack.node.setContext(SECURITY_GROUP_DISABLE_INLINE_RULES_CONTEXT_KEY, contextDisableInlineRules); + const vpc = new Vpc(stack, 'VPC'); + const props: SecurityGroupProps = { vpc, allowAllOutbound: true, disableInlineRules: optionsDisableInlineRules }; + + // WHEN + const sg = new SecurityGroup(stack, 'SG1', props); + sg.addEgressRule(Peer.anyIpv4(), Port.tcp(86), 'An external Rule'); + + expect(stack).toHaveResource('AWS::EC2::SecurityGroup', { + GroupDescription: 'Default/SG1', + VpcId: stack.resolve(vpc.vpcId), + SecurityGroupEgress: [ + { + CidrIp: '0.0.0.0/0', + Description: 'Allow all outbound traffic by default', + IpProtocol: '-1', + }, + ], + }); + + expect(stack).not.toHaveResourceLike('AWS::EC2::SecurityGroupEgress', {}); + expect(stack).not.toHaveResourceLike('AWS::EC2::SecurityGroupIngress', {}); + + }); + + test('addIngressRule will add a new ingress rule', () => { + // GIVEN + const stack = new Stack(); + stack.node.setContext(SECURITY_GROUP_DISABLE_INLINE_RULES_CONTEXT_KEY, contextDisableInlineRules); + const vpc = new Vpc(stack, 'VPC'); + const props: SecurityGroupProps = { vpc, allowAllOutbound: true, disableInlineRules: optionsDisableInlineRules }; + + // WHEN + const sg = new SecurityGroup(stack, 'SG1', props); + sg.addIngressRule(Peer.anyIpv4(), Port.tcp(86), 'An external Rule'); + + expect(stack).toHaveResource('AWS::EC2::SecurityGroup', { + GroupDescription: 'Default/SG1', + VpcId: stack.resolve(vpc.vpcId), + SecurityGroupIngress: [ + { + CidrIp: '0.0.0.0/0', + Description: 'An external Rule', + FromPort: 86, + IpProtocol: 'tcp', + ToPort: 86, + }, + ], + SecurityGroupEgress: [ + { + CidrIp: '0.0.0.0/0', + Description: 'Allow all outbound traffic by default', + IpProtocol: '-1', + }, + ], + }); + + }); + }); + + describe('When do not allowAllOutbound', () => { + test('new SecurityGroup rule will create an egress rule that denies all traffic', () => { + // GIVEN + const stack = new Stack(); + stack.node.setContext(SECURITY_GROUP_DISABLE_INLINE_RULES_CONTEXT_KEY, contextDisableInlineRules); + const vpc = new Vpc(stack, 'VPC'); + const props: SecurityGroupProps = { vpc, allowAllOutbound: false, disableInlineRules: optionsDisableInlineRules }; + + // WHEN + new SecurityGroup(stack, 'SG1', props); + + expect(stack).toHaveResource('AWS::EC2::SecurityGroup', { + GroupDescription: 'Default/SG1', + VpcId: stack.resolve(vpc.vpcId), + SecurityGroupEgress: [ + { + CidrIp: '255.255.255.255/32', + Description: 'Disallow all traffic', + IpProtocol: 'icmp', + FromPort: 252, + ToPort: 86, + }, + ], + }); + expect(stack).not.toHaveResourceLike('AWS::EC2::SecurityGroupIngress', {}); + expect(stack).not.toHaveResourceLike('AWS::EC2::SecurityGroupIngress', {}); + + + }); + test('addEgressRule rule will add a new inline egress rule and remove the denyAllTraffic rule', () => { + // GIVEN + const stack = new Stack(); + stack.node.setContext(SECURITY_GROUP_DISABLE_INLINE_RULES_CONTEXT_KEY, contextDisableInlineRules); + const vpc = new Vpc(stack, 'VPC'); + const props: SecurityGroupProps = { vpc, allowAllOutbound: false, disableInlineRules: optionsDisableInlineRules }; + + // WHEN + const sg = new SecurityGroup(stack, 'SG1', props); + sg.addEgressRule(Peer.anyIpv4(), Port.tcp(86), 'An inline Rule'); + + expect(stack).toHaveResource('AWS::EC2::SecurityGroup', { + GroupDescription: 'Default/SG1', + VpcId: stack.resolve(vpc.vpcId), + SecurityGroupEgress: [ + { + CidrIp: '0.0.0.0/0', + Description: 'An inline Rule', + FromPort: 86, + IpProtocol: 'tcp', + ToPort: 86, + }, + ], + }); + + expect(stack).not.toHaveResourceLike('AWS::EC2::SecurityGroupEgress', {}); + expect(stack).not.toHaveResourceLike('AWS::EC2::SecurityGroupIngress', {}); + + }); + + test('addIngressRule will add a new ingress rule', () => { + // GIVEN + const stack = new Stack(); + stack.node.setContext(SECURITY_GROUP_DISABLE_INLINE_RULES_CONTEXT_KEY, contextDisableInlineRules); + const vpc = new Vpc(stack, 'VPC'); + const props: SecurityGroupProps = { vpc, allowAllOutbound: false, disableInlineRules: optionsDisableInlineRules }; + + // WHEN + const sg = new SecurityGroup(stack, 'SG1', props); + sg.addIngressRule(Peer.anyIpv4(), Port.tcp(86), 'An external Rule'); + + expect(stack).toHaveResource('AWS::EC2::SecurityGroup', { + GroupDescription: 'Default/SG1', + VpcId: stack.resolve(vpc.vpcId), + SecurityGroupIngress: [ + { + CidrIp: '0.0.0.0/0', + Description: 'An external Rule', + FromPort: 86, + IpProtocol: 'tcp', + ToPort: 86, + }, + ], + SecurityGroupEgress: [ + { + CidrIp: '255.255.255.255/32', + Description: 'Disallow all traffic', + IpProtocol: 'icmp', + FromPort: 252, + ToPort: 86, + }, + ], + }); + + expect(stack).not.toHaveResourceLike('AWS::EC2::SecurityGroupEgress', {}); + expect(stack).not.toHaveResourceLike('AWS::EC2::SecurityGroupIngress', {}); + + }); + }); + +}; function testRulesAreNotInlined(contextDisableInlineRules: boolean | undefined | null, optionsDisableInlineRules: boolean | undefined) { - return { - 'When allowAllOutbound': { - 'new SecurityGroup will create an external SecurityGroupEgress rule'(test: Test) { - // GIVEN - const stack = new Stack(); - stack.node.setContext(SECURITY_GROUP_DISABLE_INLINE_RULES_CONTEXT_KEY, contextDisableInlineRules); - const vpc = new Vpc(stack, 'VPC'); - const props: SecurityGroupProps = { vpc, allowAllOutbound: true, disableInlineRules: optionsDisableInlineRules }; - - // WHEN - const sg = new SecurityGroup(stack, 'SG1', props); - - expect(stack).to(haveResource('AWS::EC2::SecurityGroup', { - GroupDescription: 'Default/SG1', - VpcId: stack.resolve(vpc.vpcId), - })); - expect(stack).to(haveResource('AWS::EC2::SecurityGroupEgress', { - GroupId: stack.resolve(sg.securityGroupId), - CidrIp: '0.0.0.0/0', - Description: 'Allow all outbound traffic by default', - IpProtocol: '-1', - })); - expect(stack).to(not(haveResourceLike('AWS::EC2::SecurityGroupIngress', {}))); - test.done(); - }, - 'addIngressRule rule will not remove external allowAllOutbound rule'(test: Test) { - // GIVEN - const stack = new Stack(); - stack.node.setContext(SECURITY_GROUP_DISABLE_INLINE_RULES_CONTEXT_KEY, contextDisableInlineRules); - const vpc = new Vpc(stack, 'VPC'); - const props: SecurityGroupProps = { vpc, allowAllOutbound: true, disableInlineRules: optionsDisableInlineRules }; + describe('When allowAllOutbound', () => { + test('new SecurityGroup will create an external SecurityGroupEgress rule', () => { + // GIVEN + const stack = new Stack(); + stack.node.setContext(SECURITY_GROUP_DISABLE_INLINE_RULES_CONTEXT_KEY, contextDisableInlineRules); + const vpc = new Vpc(stack, 'VPC'); + const props: SecurityGroupProps = { vpc, allowAllOutbound: true, disableInlineRules: optionsDisableInlineRules }; + + // WHEN + const sg = new SecurityGroup(stack, 'SG1', props); + + expect(stack).toHaveResource('AWS::EC2::SecurityGroup', { + GroupDescription: 'Default/SG1', + VpcId: stack.resolve(vpc.vpcId), + }); + expect(stack).toHaveResource('AWS::EC2::SecurityGroupEgress', { + GroupId: stack.resolve(sg.securityGroupId), + CidrIp: '0.0.0.0/0', + Description: 'Allow all outbound traffic by default', + IpProtocol: '-1', + }); + expect(stack).not.toHaveResourceLike('AWS::EC2::SecurityGroupIngress', {}); - // WHEN - const sg = new SecurityGroup(stack, 'SG1', props); - sg.addEgressRule(Peer.anyIpv4(), Port.tcp(86), 'An external Rule'); + }); - expect(stack).to(haveResource('AWS::EC2::SecurityGroup', { - GroupDescription: 'Default/SG1', - VpcId: stack.resolve(vpc.vpcId), - })); + test('addIngressRule rule will not remove external allowAllOutbound rule', () => { + // GIVEN + const stack = new Stack(); + stack.node.setContext(SECURITY_GROUP_DISABLE_INLINE_RULES_CONTEXT_KEY, contextDisableInlineRules); + const vpc = new Vpc(stack, 'VPC'); + const props: SecurityGroupProps = { vpc, allowAllOutbound: true, disableInlineRules: optionsDisableInlineRules }; - expect(stack).to(haveResource('AWS::EC2::SecurityGroupEgress', { - GroupId: stack.resolve(sg.securityGroupId), - CidrIp: '0.0.0.0/0', - Description: 'Allow all outbound traffic by default', - IpProtocol: '-1', - })); + // WHEN + const sg = new SecurityGroup(stack, 'SG1', props); + sg.addEgressRule(Peer.anyIpv4(), Port.tcp(86), 'An external Rule'); - expect(stack).to(not(haveResourceLike('AWS::EC2::SecurityGroupIngress', {}))); - test.done(); - }, + expect(stack).toHaveResource('AWS::EC2::SecurityGroup', { + GroupDescription: 'Default/SG1', + VpcId: stack.resolve(vpc.vpcId), + }); - 'addIngressRule rule will not add a new egress rule'(test: Test) { - // GIVEN - const stack = new Stack(); - stack.node.setContext(SECURITY_GROUP_DISABLE_INLINE_RULES_CONTEXT_KEY, contextDisableInlineRules); - const vpc = new Vpc(stack, 'VPC'); - const props: SecurityGroupProps = { vpc, allowAllOutbound: true, disableInlineRules: optionsDisableInlineRules }; - - // WHEN - const sg = new SecurityGroup(stack, 'SG1', props); - sg.addEgressRule(Peer.anyIpv4(), Port.tcp(86), 'An external Rule'); - - expect(stack).to(haveResource('AWS::EC2::SecurityGroup', { - GroupDescription: 'Default/SG1', - VpcId: stack.resolve(vpc.vpcId), - })); - - expect(stack).to(not(haveResource('AWS::EC2::SecurityGroupEgress', { - GroupId: stack.resolve(sg.securityGroupId), - Description: 'An external Rule', - }))); - - expect(stack).to(not(haveResourceLike('AWS::EC2::SecurityGroupIngress', {}))); - test.done(); - }, + expect(stack).toHaveResource('AWS::EC2::SecurityGroupEgress', { + GroupId: stack.resolve(sg.securityGroupId), + CidrIp: '0.0.0.0/0', + Description: 'Allow all outbound traffic by default', + IpProtocol: '-1', + }); - 'addIngressRule rule will add a new external ingress rule even if it could have been inlined'(test: Test) { - // GIVEN - const stack = new Stack(); - stack.node.setContext(SECURITY_GROUP_DISABLE_INLINE_RULES_CONTEXT_KEY, contextDisableInlineRules); - const vpc = new Vpc(stack, 'VPC'); - const props: SecurityGroupProps = { vpc, allowAllOutbound: true, disableInlineRules: optionsDisableInlineRules }; + expect(stack).not.toHaveResourceLike('AWS::EC2::SecurityGroupIngress', {}); - // WHEN - const sg = new SecurityGroup(stack, 'SG1', props); - sg.addIngressRule(Peer.anyIpv4(), Port.tcp(86), 'An external Rule'); + }); - expect(stack).to(haveResource('AWS::EC2::SecurityGroup', { - GroupDescription: 'Default/SG1', - VpcId: stack.resolve(vpc.vpcId), - })); + test('addIngressRule rule will not add a new egress rule', () => { + // GIVEN + const stack = new Stack(); + stack.node.setContext(SECURITY_GROUP_DISABLE_INLINE_RULES_CONTEXT_KEY, contextDisableInlineRules); + const vpc = new Vpc(stack, 'VPC'); + const props: SecurityGroupProps = { vpc, allowAllOutbound: true, disableInlineRules: optionsDisableInlineRules }; - expect(stack).to(haveResource('AWS::EC2::SecurityGroupIngress', { - GroupId: stack.resolve(sg.securityGroupId), - CidrIp: '0.0.0.0/0', - Description: 'An external Rule', - FromPort: 86, - IpProtocol: 'tcp', - ToPort: 86, - })); + // WHEN + const sg = new SecurityGroup(stack, 'SG1', props); + sg.addEgressRule(Peer.anyIpv4(), Port.tcp(86), 'An external Rule'); - expect(stack).to(haveResource('AWS::EC2::SecurityGroupEgress', { - GroupId: stack.resolve(sg.securityGroupId), - CidrIp: '0.0.0.0/0', - Description: 'Allow all outbound traffic by default', - IpProtocol: '-1', - })); - test.done(); - }, - }, - - 'When do not allowAllOutbound': { - 'new SecurityGroup rule will create an external egress rule that denies all traffic'(test: Test) { - // GIVEN - const stack = new Stack(); - stack.node.setContext(SECURITY_GROUP_DISABLE_INLINE_RULES_CONTEXT_KEY, contextDisableInlineRules); - const vpc = new Vpc(stack, 'VPC'); - const props: SecurityGroupProps = { vpc, allowAllOutbound: false, disableInlineRules: optionsDisableInlineRules }; - - // WHEN - const sg = new SecurityGroup(stack, 'SG1', props); - - expect(stack).to(haveResource('AWS::EC2::SecurityGroup', { - GroupDescription: 'Default/SG1', - VpcId: stack.resolve(vpc.vpcId), - })); - expect(stack).to(not(haveResourceLike('AWS::EC2::SecurityGroupIngress', {}))); - expect(stack).to(haveResource('AWS::EC2::SecurityGroupEgress', { - GroupId: stack.resolve(sg.securityGroupId), - CidrIp: '255.255.255.255/32', - Description: 'Disallow all traffic', - IpProtocol: 'icmp', - FromPort: 252, - ToPort: 86, - })); - test.done(); - }, + expect(stack).toHaveResource('AWS::EC2::SecurityGroup', { + GroupDescription: 'Default/SG1', + VpcId: stack.resolve(vpc.vpcId), + }); - 'addEgressRule rule will remove the rule that denies all traffic if another egress rule is added'(test: Test) { - // GIVEN - const stack = new Stack(); - stack.node.setContext(SECURITY_GROUP_DISABLE_INLINE_RULES_CONTEXT_KEY, contextDisableInlineRules); - const vpc = new Vpc(stack, 'VPC'); - const props: SecurityGroupProps = { vpc, allowAllOutbound: false, disableInlineRules: optionsDisableInlineRules }; - - // WHEN - const sg = new SecurityGroup(stack, 'SG1', props); - sg.addEgressRule(Peer.anyIpv4(), Port.tcp(86), 'An external Rule'); - - expect(stack).to(haveResource('AWS::EC2::SecurityGroup', { - GroupDescription: 'Default/SG1', - VpcId: stack.resolve(vpc.vpcId), - })); - expect(stack).to(not(haveResourceLike('AWS::EC2::SecurityGroupIngress', {}))); - expect(stack).to(not(haveResourceLike('AWS::EC2::SecurityGroupEgress', { - GroupId: stack.resolve(sg.securityGroupId), - CidrIp: '255.255.255.255/32', - }))); - test.done(); - }, + expect(stack).not.toHaveResource('AWS::EC2::SecurityGroupEgress', { + GroupId: stack.resolve(sg.securityGroupId), + Description: 'An external Rule', + }); - 'addEgressRule rule will add a new external egress rule even if it could have been inlined'(test: Test) { - // GIVEN - const stack = new Stack(); - stack.node.setContext(SECURITY_GROUP_DISABLE_INLINE_RULES_CONTEXT_KEY, contextDisableInlineRules); - const vpc = new Vpc(stack, 'VPC'); - const props: SecurityGroupProps = { vpc, allowAllOutbound: false, disableInlineRules: optionsDisableInlineRules }; + expect(stack).not.toHaveResourceLike('AWS::EC2::SecurityGroupIngress', {}); - // WHEN - const sg = new SecurityGroup(stack, 'SG1', props); - sg.addEgressRule(Peer.anyIpv4(), Port.tcp(86), 'An external Rule'); + }); - expect(stack).to(haveResource('AWS::EC2::SecurityGroup', { - GroupDescription: 'Default/SG1', - VpcId: stack.resolve(vpc.vpcId), - })); + test('addIngressRule rule will add a new external ingress rule even if it could have been inlined', () => { + // GIVEN + const stack = new Stack(); + stack.node.setContext(SECURITY_GROUP_DISABLE_INLINE_RULES_CONTEXT_KEY, contextDisableInlineRules); + const vpc = new Vpc(stack, 'VPC'); + const props: SecurityGroupProps = { vpc, allowAllOutbound: true, disableInlineRules: optionsDisableInlineRules }; + + // WHEN + const sg = new SecurityGroup(stack, 'SG1', props); + sg.addIngressRule(Peer.anyIpv4(), Port.tcp(86), 'An external Rule'); + + expect(stack).toHaveResource('AWS::EC2::SecurityGroup', { + GroupDescription: 'Default/SG1', + VpcId: stack.resolve(vpc.vpcId), + }); + + expect(stack).toHaveResource('AWS::EC2::SecurityGroupIngress', { + GroupId: stack.resolve(sg.securityGroupId), + CidrIp: '0.0.0.0/0', + Description: 'An external Rule', + FromPort: 86, + IpProtocol: 'tcp', + ToPort: 86, + }); + + expect(stack).toHaveResource('AWS::EC2::SecurityGroupEgress', { + GroupId: stack.resolve(sg.securityGroupId), + CidrIp: '0.0.0.0/0', + Description: 'Allow all outbound traffic by default', + IpProtocol: '-1', + }); - expect(stack).to(haveResource('AWS::EC2::SecurityGroupEgress', { - GroupId: stack.resolve(sg.securityGroupId), - CidrIp: '0.0.0.0/0', - Description: 'An external Rule', - FromPort: 86, - IpProtocol: 'tcp', - ToPort: 86, - })); + }); + }); - expect(stack).to(not(haveResourceLike('AWS::EC2::SecurityGroupIngress', {}))); - test.done(); - }, + describe('When do not allowAllOutbound', () => { + test('new SecurityGroup rule will create an external egress rule that denies all traffic', () => { + // GIVEN + const stack = new Stack(); + stack.node.setContext(SECURITY_GROUP_DISABLE_INLINE_RULES_CONTEXT_KEY, contextDisableInlineRules); + const vpc = new Vpc(stack, 'VPC'); + const props: SecurityGroupProps = { vpc, allowAllOutbound: false, disableInlineRules: optionsDisableInlineRules }; + + // WHEN + const sg = new SecurityGroup(stack, 'SG1', props); + + expect(stack).toHaveResource('AWS::EC2::SecurityGroup', { + GroupDescription: 'Default/SG1', + VpcId: stack.resolve(vpc.vpcId), + }); + expect(stack).not.toHaveResourceLike('AWS::EC2::SecurityGroupIngress', {}); + expect(stack).toHaveResource('AWS::EC2::SecurityGroupEgress', { + GroupId: stack.resolve(sg.securityGroupId), + CidrIp: '255.255.255.255/32', + Description: 'Disallow all traffic', + IpProtocol: 'icmp', + FromPort: 252, + ToPort: 86, + }); - 'addIngressRule will add a new external ingress rule even if it could have been inlined'(test: Test) { - // GIVEN - const stack = new Stack(); - stack.node.setContext(SECURITY_GROUP_DISABLE_INLINE_RULES_CONTEXT_KEY, contextDisableInlineRules); - const vpc = new Vpc(stack, 'VPC'); - const props: SecurityGroupProps = { vpc, allowAllOutbound: false, disableInlineRules: optionsDisableInlineRules }; + }); - // WHEN - const sg = new SecurityGroup(stack, 'SG1', props); - sg.addIngressRule(Peer.anyIpv4(), Port.tcp(86), 'An external Rule'); + test('addEgressRule rule will remove the rule that denies all traffic if another egress rule is added', () => { + // GIVEN + const stack = new Stack(); + stack.node.setContext(SECURITY_GROUP_DISABLE_INLINE_RULES_CONTEXT_KEY, contextDisableInlineRules); + const vpc = new Vpc(stack, 'VPC'); + const props: SecurityGroupProps = { vpc, allowAllOutbound: false, disableInlineRules: optionsDisableInlineRules }; + + // WHEN + const sg = new SecurityGroup(stack, 'SG1', props); + sg.addEgressRule(Peer.anyIpv4(), Port.tcp(86), 'An external Rule'); + + expect(stack).toHaveResource('AWS::EC2::SecurityGroup', { + GroupDescription: 'Default/SG1', + VpcId: stack.resolve(vpc.vpcId), + }); + expect(stack).not.toHaveResourceLike('AWS::EC2::SecurityGroupIngress', {}); + expect(stack).not.toHaveResourceLike('AWS::EC2::SecurityGroupEgress', { + GroupId: stack.resolve(sg.securityGroupId), + CidrIp: '255.255.255.255/32', + }); - expect(stack).to(haveResource('AWS::EC2::SecurityGroup', { - GroupDescription: 'Default/SG1', - VpcId: stack.resolve(vpc.vpcId), - })); + }); - expect(stack).to(haveResource('AWS::EC2::SecurityGroupIngress', { - GroupId: stack.resolve(sg.securityGroupId), - CidrIp: '0.0.0.0/0', - Description: 'An external Rule', - FromPort: 86, - IpProtocol: 'tcp', - ToPort: 86, - })); + test('addEgressRule rule will add a new external egress rule even if it could have been inlined', () => { + // GIVEN + const stack = new Stack(); + stack.node.setContext(SECURITY_GROUP_DISABLE_INLINE_RULES_CONTEXT_KEY, contextDisableInlineRules); + const vpc = new Vpc(stack, 'VPC'); + const props: SecurityGroupProps = { vpc, allowAllOutbound: false, disableInlineRules: optionsDisableInlineRules }; + + // WHEN + const sg = new SecurityGroup(stack, 'SG1', props); + sg.addEgressRule(Peer.anyIpv4(), Port.tcp(86), 'An external Rule'); + + expect(stack).toHaveResource('AWS::EC2::SecurityGroup', { + GroupDescription: 'Default/SG1', + VpcId: stack.resolve(vpc.vpcId), + }); + + expect(stack).toHaveResource('AWS::EC2::SecurityGroupEgress', { + GroupId: stack.resolve(sg.securityGroupId), + CidrIp: '0.0.0.0/0', + Description: 'An external Rule', + FromPort: 86, + IpProtocol: 'tcp', + ToPort: 86, + }); + + expect(stack).not.toHaveResourceLike('AWS::EC2::SecurityGroupIngress', {}); + + }); + + test('addIngressRule will add a new external ingress rule even if it could have been inlined', () => { + // GIVEN + const stack = new Stack(); + stack.node.setContext(SECURITY_GROUP_DISABLE_INLINE_RULES_CONTEXT_KEY, contextDisableInlineRules); + const vpc = new Vpc(stack, 'VPC'); + const props: SecurityGroupProps = { vpc, allowAllOutbound: false, disableInlineRules: optionsDisableInlineRules }; + + // WHEN + const sg = new SecurityGroup(stack, 'SG1', props); + sg.addIngressRule(Peer.anyIpv4(), Port.tcp(86), 'An external Rule'); + + expect(stack).toHaveResource('AWS::EC2::SecurityGroup', { + GroupDescription: 'Default/SG1', + VpcId: stack.resolve(vpc.vpcId), + }); + + expect(stack).toHaveResource('AWS::EC2::SecurityGroupIngress', { + GroupId: stack.resolve(sg.securityGroupId), + CidrIp: '0.0.0.0/0', + Description: 'An external Rule', + FromPort: 86, + IpProtocol: 'tcp', + ToPort: 86, + }); + + expect(stack).toHaveResource('AWS::EC2::SecurityGroupEgress', { + GroupId: stack.resolve(sg.securityGroupId), + CidrIp: '255.255.255.255/32', + Description: 'Disallow all traffic', + IpProtocol: 'icmp', + FromPort: 252, + ToPort: 86, + }); + + }); + }); - expect(stack).to(haveResource('AWS::EC2::SecurityGroupEgress', { - GroupId: stack.resolve(sg.securityGroupId), - CidrIp: '255.255.255.255/32', - Description: 'Disallow all traffic', - IpProtocol: 'icmp', - FromPort: 252, - ToPort: 86, - })); - test.done(); - }, - }, - }; } \ No newline at end of file diff --git a/packages/@aws-cdk/aws-ec2/test/userdata.test.ts b/packages/@aws-cdk/aws-ec2/test/userdata.test.ts index c3a7538c07f66..272ad60a84b00 100644 --- a/packages/@aws-cdk/aws-ec2/test/userdata.test.ts +++ b/packages/@aws-cdk/aws-ec2/test/userdata.test.ts @@ -1,10 +1,9 @@ import { Bucket } from '@aws-cdk/aws-s3'; import { Aws, Stack } from '@aws-cdk/core'; -import { nodeunitShim, Test } from 'nodeunit-shim'; import * as ec2 from '../lib'; -nodeunitShim({ - 'can create Windows user data'(test: Test) { +describe('user data', () => { + test('can create Windows user data', () => { // GIVEN // WHEN @@ -13,10 +12,10 @@ nodeunitShim({ // THEN const rendered = userData.render(); - test.equals(rendered, 'command1\ncommand2'); - test.done(); - }, - 'can create Windows user data with commands on exit'(test: Test) { + expect(rendered).toEqual('command1\ncommand2'); + + }); + test('can create Windows user data with commands on exit', () => { // GIVEN const userData = ec2.UserData.forWindows(); @@ -26,7 +25,7 @@ nodeunitShim({ // THEN const rendered = userData.render(); - test.equals(rendered, 'trap {\n' + + expect(rendered).toEqual('trap {\n' + '$success=($PSItem.Exception.Message -eq "Success")\n' + 'onexit1\n' + 'onexit2\n' + @@ -35,9 +34,9 @@ nodeunitShim({ 'command1\n' + 'command2\n' + 'throw "Success"'); - test.done(); - }, - 'can create Windows with Signal Command'(test: Test) { + + }); + test('can create Windows with Signal Command', () => { // GIVEN const stack = new Stack(); const resource = new ec2.Vpc(stack, 'RESOURCE'); @@ -50,7 +49,7 @@ nodeunitShim({ // THEN const rendered = userData.render(); - test.equals(rendered, 'trap {\n' + + expect(rendered).toEqual('trap {\n' + '$success=($PSItem.Exception.Message -eq "Success")\n' + `cfn-signal --stack Default --resource RESOURCE1989552F --region ${Aws.REGION} --success ($success.ToString().ToLower())\n` + 'break\n' + @@ -58,9 +57,9 @@ nodeunitShim({ 'command1\n' + 'throw "Success"', ); - test.done(); - }, - 'can windows userdata download S3 files'(test: Test) { + + }); + test('can windows userdata download S3 files', () => { // GIVEN const stack = new Stack(); const userData = ec2.UserData.forWindows(); @@ -80,14 +79,14 @@ nodeunitShim({ // THEN const rendered = userData.render(); - test.equals(rendered, 'mkdir (Split-Path -Path \'C:/temp/filename.bat\' ) -ea 0\n' + + expect(rendered).toEqual('mkdir (Split-Path -Path \'C:/temp/filename.bat\' ) -ea 0\n' + 'Read-S3Object -BucketName \'test\' -key \'filename.bat\' -file \'C:/temp/filename.bat\' -ErrorAction Stop\n' + 'mkdir (Split-Path -Path \'c:\\test\\location\\otherScript.bat\' ) -ea 0\n' + 'Read-S3Object -BucketName \'test2\' -key \'filename2.bat\' -file \'c:\\test\\location\\otherScript.bat\' -ErrorAction Stop', ); - test.done(); - }, - 'can windows userdata execute files'(test: Test) { + + }); + test('can windows userdata execute files', () => { // GIVEN const userData = ec2.UserData.forWindows(); @@ -102,14 +101,14 @@ nodeunitShim({ // THEN const rendered = userData.render(); - test.equals(rendered, '&\'C:\\test\\filename.bat\'\n' + + expect(rendered).toEqual('&\'C:\\test\\filename.bat\'\n' + 'if (!$?) { Write-Error \'Failed to execute the file "C:\\test\\filename.bat"\' -ErrorAction Stop }\n' + '&\'C:\\test\\filename2.bat\' arg1 arg2 -arg $variable\n' + 'if (!$?) { Write-Error \'Failed to execute the file "C:\\test\\filename2.bat"\' -ErrorAction Stop }', ); - test.done(); - }, - 'can create Linux user data'(test: Test) { + + }); + test('can create Linux user data', () => { // GIVEN // WHEN @@ -118,10 +117,10 @@ nodeunitShim({ // THEN const rendered = userData.render(); - test.equals(rendered, '#!/bin/bash\ncommand1\ncommand2'); - test.done(); - }, - 'can create Linux user data with commands on exit'(test: Test) { + expect(rendered).toEqual('#!/bin/bash\ncommand1\ncommand2'); + + }); + test('can create Linux user data with commands on exit', () => { // GIVEN const userData = ec2.UserData.forLinux(); @@ -131,7 +130,7 @@ nodeunitShim({ // THEN const rendered = userData.render(); - test.equals(rendered, '#!/bin/bash\n' + + expect(rendered).toEqual('#!/bin/bash\n' + 'function exitTrap(){\n' + 'exitCode=$?\n' + 'onexit1\n' + @@ -140,9 +139,9 @@ nodeunitShim({ 'trap exitTrap EXIT\n' + 'command1\n' + 'command2'); - test.done(); - }, - 'can create Linux with Signal Command'(test: Test) { + + }); + test('can create Linux with Signal Command', () => { // GIVEN const stack = new Stack(); const resource = new ec2.Vpc(stack, 'RESOURCE'); @@ -154,16 +153,16 @@ nodeunitShim({ // THEN const rendered = userData.render(); - test.equals(rendered, '#!/bin/bash\n' + + expect(rendered).toEqual('#!/bin/bash\n' + 'function exitTrap(){\n' + 'exitCode=$?\n' + `/opt/aws/bin/cfn-signal --stack Default --resource RESOURCE1989552F --region ${Aws.REGION} -e $exitCode || echo \'Failed to send Cloudformation Signal\'\n` + '}\n' + 'trap exitTrap EXIT\n' + 'command1'); - test.done(); - }, - 'can linux userdata download S3 files'(test: Test) { + + }); + test('can linux userdata download S3 files', () => { // GIVEN const stack = new Stack(); const userData = ec2.UserData.forLinux(); @@ -183,15 +182,15 @@ nodeunitShim({ // THEN const rendered = userData.render(); - test.equals(rendered, '#!/bin/bash\n' + + expect(rendered).toEqual('#!/bin/bash\n' + 'mkdir -p $(dirname \'/tmp/filename.sh\')\n' + 'aws s3 cp \'s3://test/filename.sh\' \'/tmp/filename.sh\'\n' + 'mkdir -p $(dirname \'c:\\test\\location\\otherScript.sh\')\n' + 'aws s3 cp \'s3://test2/filename2.sh\' \'c:\\test\\location\\otherScript.sh\'', ); - test.done(); - }, - 'can linux userdata execute files'(test: Test) { + + }); + test('can linux userdata execute files', () => { // GIVEN const userData = ec2.UserData.forLinux(); @@ -206,7 +205,7 @@ nodeunitShim({ // THEN const rendered = userData.render(); - test.equals(rendered, '#!/bin/bash\n' + + expect(rendered).toEqual('#!/bin/bash\n' + 'set -e\n' + 'chmod +x \'/tmp/filename.sh\'\n' + '\'/tmp/filename.sh\'\n' + @@ -214,9 +213,9 @@ nodeunitShim({ 'chmod +x \'/test/filename2.sh\'\n' + '\'/test/filename2.sh\' arg1 arg2 -arg $variable', ); - test.done(); - }, - 'can create Custom user data'(test: Test) { + + }); + test('can create Custom user data', () => { // GIVEN // WHEN @@ -224,19 +223,19 @@ nodeunitShim({ // THEN const rendered = userData.render(); - test.equals(rendered, 'Some\nmultiline\ncontent'); - test.done(); - }, - 'Custom user data throws when adding on exit commands'(test: Test) { + expect(rendered).toEqual('Some\nmultiline\ncontent'); + + }); + test('Custom user data throws when adding on exit commands', () => { // GIVEN // WHEN const userData = ec2.UserData.custom(''); // THEN - test.throws(() => userData.addOnExitCommands( 'a command goes here' )); - test.done(); - }, - 'Custom user data throws when adding signal command'(test: Test) { + expect(() => userData.addOnExitCommands( 'a command goes here' )).toThrow(); + + }); + test('Custom user data throws when adding signal command', () => { // GIVEN const stack = new Stack(); const resource = new ec2.Vpc(stack, 'RESOURCE'); @@ -245,35 +244,35 @@ nodeunitShim({ const userData = ec2.UserData.custom(''); // THEN - test.throws(() => userData.addSignalOnExitCommand( resource )); - test.done(); - }, - 'Custom user data throws when downloading file'(test: Test) { + expect(() => userData.addSignalOnExitCommand( resource )).toThrow(); + + }); + test('Custom user data throws when downloading file', () => { // GIVEN const stack = new Stack(); const userData = ec2.UserData.custom(''); const bucket = Bucket.fromBucketName( stack, 'testBucket', 'test' ); // WHEN // THEN - test.throws(() => userData.addS3DownloadCommand({ + expect(() => userData.addS3DownloadCommand({ bucket, bucketKey: 'filename.sh', - } )); - test.done(); - }, - 'Custom user data throws when executing file'(test: Test) { + })).toThrow(); + + }); + test('Custom user data throws when executing file', () => { // GIVEN const userData = ec2.UserData.custom(''); // WHEN // THEN - test.throws(() => + expect(() => userData.addExecuteFileCommand({ filePath: '/tmp/filename.sh', - } )); - test.done(); - }, + })).toThrow(); - 'Linux user rendering multipart headers'(test: Test) { + }); + + test('Linux user rendering multipart headers', () => { // GIVEN const stack = new Stack(); const linuxUserData = ec2.UserData.forLinux(); @@ -297,10 +296,10 @@ nodeunitShim({ { 'Fn::Base64': '#!/bin/bash\necho \"Hello world\"' }, ]); - test.done(); - }, - 'Default parts separator used, if not specified'(test: Test) { + }); + + test('Default parts separator used, if not specified', () => { // GIVEN const multipart = new ec2.MultipartUserData(); @@ -312,7 +311,7 @@ nodeunitShim({ const out = multipart.render(); // WHEN - test.equals(out, [ + expect(out).toEqual([ 'Content-Type: multipart/mixed; boundary=\"+AWS+CDK+User+Data+Separator==\"', 'MIME-Version: 1.0', '', @@ -323,10 +322,10 @@ nodeunitShim({ '', ].join('\n')); - test.done(); - }, - 'Non-default parts separator used, if not specified'(test: Test) { + }); + + test('Non-default parts separator used, if not specified', () => { // GIVEN const multipart = new ec2.MultipartUserData({ partsSeparator: '//', @@ -340,7 +339,7 @@ nodeunitShim({ const out = multipart.render(); // WHEN - test.equals(out, [ + expect(out).toEqual([ 'Content-Type: multipart/mixed; boundary=\"//\"', 'MIME-Version: 1.0', '', @@ -351,35 +350,35 @@ nodeunitShim({ '', ].join('\n')); - test.done(); - }, - 'Multipart separator validation'(test: Test) { + }); + + test('Multipart separator validation', () => { // Happy path new ec2.MultipartUserData(); new ec2.MultipartUserData({ partsSeparator: 'a-zA-Z0-9()+,-./:=?', }); - [' ', '\n', '\r', '[', ']', '<', '>', '違う'].forEach(s => test.throws(() => { + [' ', '\n', '\r', '[', ']', '<', '>', '違う'].forEach(s => expect(() => { new ec2.MultipartUserData({ partsSeparator: s, }); - }, /Invalid characters in separator/)); + }).toThrow(/Invalid characters in separator/)); - test.done(); - }, - 'Multipart user data throws when adding on exit commands'(test: Test) { + }); + + test('Multipart user data throws when adding on exit commands', () => { // GIVEN // WHEN const userData = new ec2.MultipartUserData(); // THEN - test.throws(() => userData.addOnExitCommands( 'a command goes here' )); - test.done(); - }, - 'Multipart user data throws when adding signal command'(test: Test) { + expect(() => userData.addOnExitCommands( 'a command goes here' )).toThrow(); + + }); + test('Multipart user data throws when adding signal command', () => { // GIVEN const stack = new Stack(); const resource = new ec2.Vpc(stack, 'RESOURCE'); @@ -388,36 +387,36 @@ nodeunitShim({ const userData = new ec2.MultipartUserData(); // THEN - test.throws(() => userData.addSignalOnExitCommand( resource )); - test.done(); - }, - 'Multipart user data throws when downloading file'(test: Test) { + expect(() => userData.addSignalOnExitCommand( resource )).toThrow(); + + }); + test('Multipart user data throws when downloading file', () => { // GIVEN const stack = new Stack(); const userData = new ec2.MultipartUserData(); const bucket = Bucket.fromBucketName( stack, 'testBucket', 'test' ); // WHEN // THEN - test.throws(() => userData.addS3DownloadCommand({ + expect(() => userData.addS3DownloadCommand({ bucket, bucketKey: 'filename.sh', - } )); - test.done(); - }, - 'Multipart user data throws when executing file'(test: Test) { + } )).toThrow(); + + }); + test('Multipart user data throws when executing file', () => { // GIVEN const userData = new ec2.MultipartUserData(); // WHEN // THEN - test.throws(() => + expect(() => userData.addExecuteFileCommand({ filePath: '/tmp/filename.sh', - } )); - test.done(); - }, + } )).toThrow(); + + }); - 'can add commands to Multipart user data'(test: Test) { + test('can add commands to Multipart user data', () => { // GIVEN const stack = new Stack(); const innerUserData = ec2.UserData.forLinux(); @@ -430,9 +429,9 @@ nodeunitShim({ // THEN const expectedInner = '#!/bin/bash\ncommand1\ncommand2'; const rendered = innerUserData.render(); - test.equals(rendered, expectedInner); + expect(rendered).toEqual(expectedInner); const out = stack.resolve(userData.render()); - test.equals(out, { + expect(out).toEqual({ 'Fn::Join': [ '', [ @@ -453,9 +452,9 @@ nodeunitShim({ ], ], }); - test.done(); - }, - 'can add commands on exit to Multipart user data'(test: Test) { + + }); + test('can add commands on exit to Multipart user data', () => { // GIVEN const stack = new Stack(); const innerUserData = ec2.UserData.forLinux(); @@ -477,9 +476,9 @@ nodeunitShim({ 'command1\n' + 'command2'; const rendered = stack.resolve(innerUserData.render()); - test.equals(rendered, expectedInner); + expect(rendered).toEqual(expectedInner); const out = stack.resolve(userData.render()); - test.equals(out, { + expect(out).toEqual({ 'Fn::Join': [ '', [ @@ -500,9 +499,9 @@ nodeunitShim({ ], ], }); - test.done(); - }, - 'can add Signal Command to Multipart user data'(test: Test) { + + }); + test('can add Signal Command to Multipart user data', () => { // GIVEN const stack = new Stack(); const resource = new ec2.Vpc(stack, 'RESOURCE'); @@ -523,9 +522,9 @@ nodeunitShim({ 'trap exitTrap EXIT\n' + 'command1'); const rendered = stack.resolve(innerUserData.render()); - test.equals(rendered, expectedInner); + expect(rendered).toEqual(expectedInner); const out = stack.resolve(userData.render()); - test.equals(out, { + expect(out).toEqual({ 'Fn::Join': [ '', [ @@ -546,9 +545,9 @@ nodeunitShim({ ], ], }); - test.done(); - }, - 'can add download S3 files to Multipart user data'(test: Test) { + + }); + test('can add download S3 files to Multipart user data', () => { // GIVEN const stack = new Stack(); const innerUserData = ec2.UserData.forLinux(); @@ -575,9 +574,9 @@ nodeunitShim({ 'mkdir -p $(dirname \'c:\\test\\location\\otherScript.sh\')\n' + 'aws s3 cp \'s3://test2/filename2.sh\' \'c:\\test\\location\\otherScript.sh\''; const rendered = stack.resolve(innerUserData.render()); - test.equals(rendered, expectedInner); + expect(rendered).toEqual(expectedInner); const out = stack.resolve(userData.render()); - test.equals(out, { + expect(out).toEqual({ 'Fn::Join': [ '', [ @@ -598,9 +597,9 @@ nodeunitShim({ ], ], }); - test.done(); - }, - 'can add execute files to Multipart user data'(test: Test) { + + }); + test('can add execute files to Multipart user data', () => { // GIVEN const stack = new Stack(); const innerUserData = ec2.UserData.forLinux(); @@ -625,9 +624,9 @@ nodeunitShim({ 'chmod +x \'/test/filename2.sh\'\n' + '\'/test/filename2.sh\' arg1 arg2 -arg $variable'; const rendered = stack.resolve(innerUserData.render()); - test.equals(rendered, expectedInner); + expect(rendered).toEqual(expectedInner); const out = stack.resolve(userData.render()); - test.equals(out, { + expect(out).toEqual({ 'Fn::Join': [ '', [ @@ -648,6 +647,6 @@ nodeunitShim({ ], ], }); - test.done(); - }, + + }); }); diff --git a/packages/@aws-cdk/aws-ec2/test/volume.test.ts b/packages/@aws-cdk/aws-ec2/test/volume.test.ts index af88164fcc439..d33acadc3f8dc 100644 --- a/packages/@aws-cdk/aws-ec2/test/volume.test.ts +++ b/packages/@aws-cdk/aws-ec2/test/volume.test.ts @@ -1,8 +1,6 @@ +import '@aws-cdk/assert-internal/jest'; import { arrayWith, - expect as cdkExpect, - haveResource, - haveResourceLike, ResourcePart, } from '@aws-cdk/assert-internal'; import { @@ -36,7 +34,7 @@ describe('volume', () => { }); // THEN - cdkExpect(stack).to(haveResource('AWS::EC2::Volume', { + expect(stack).toHaveResource('AWS::EC2::Volume', { AvailabilityZone: 'us-east-1a', MultiAttachEnabled: false, Size: 8, @@ -47,11 +45,11 @@ describe('volume', () => { Value: 'MyVolume', }, ], - }, ResourcePart.Properties)); + }, ResourcePart.Properties); - cdkExpect(stack).to(haveResource('AWS::EC2::Volume', { + expect(stack).toHaveResource('AWS::EC2::Volume', { DeletionPolicy: 'Retain', - }, ResourcePart.CompleteDefinition)); + }, ResourcePart.CompleteDefinition); }); test('fromVolumeAttributes', () => { @@ -87,7 +85,7 @@ describe('volume', () => { cdk.Tags.of(volume).add('TagKey', 'TagValue'); // THEN - cdkExpect(stack).to(haveResource('AWS::EC2::Volume', { + expect(stack).toHaveResource('AWS::EC2::Volume', { AvailabilityZone: 'us-east-1a', MultiAttachEnabled: false, Size: 8, @@ -96,7 +94,7 @@ describe('volume', () => { Key: 'TagKey', Value: 'TagValue', }], - }, ResourcePart.Properties)); + }, ResourcePart.Properties); }); @@ -113,9 +111,9 @@ describe('volume', () => { }); // THEN - cdkExpect(stack).to(haveResourceLike('AWS::EC2::Volume', { + expect(stack).toHaveResourceLike('AWS::EC2::Volume', { AutoEnableIO: true, - }, ResourcePart.Properties)); + }, ResourcePart.Properties); }); @@ -132,9 +130,9 @@ describe('volume', () => { }); // THEN - cdkExpect(stack).to(haveResourceLike('AWS::EC2::Volume', { + expect(stack).toHaveResourceLike('AWS::EC2::Volume', { Encrypted: true, - }, ResourcePart.Properties)); + }, ResourcePart.Properties); }); @@ -153,7 +151,7 @@ describe('volume', () => { }); // THEN - cdkExpect(stack).to(haveResourceLike('AWS::EC2::Volume', { + expect(stack).toHaveResourceLike('AWS::EC2::Volume', { Encrypted: true, KmsKeyId: { 'Fn::GetAtt': [ @@ -161,8 +159,8 @@ describe('volume', () => { 'Arn', ], }, - }, ResourcePart.Properties)); - cdkExpect(stack).to(haveResourceLike('AWS::KMS::Key', { + }, ResourcePart.Properties); + expect(stack).toHaveResourceLike('AWS::KMS::Key', { KeyPolicy: { Statement: [ {}, @@ -213,7 +211,7 @@ describe('volume', () => { }, ], }, - })); + }); }); @@ -235,7 +233,7 @@ describe('volume', () => { }); // THEN - cdkExpect(stack).to(haveResourceLike('AWS::KMS::Key', { + expect(stack).toHaveResourceLike('AWS::KMS::Key', { KeyPolicy: { Statement: [ {}, @@ -248,7 +246,7 @@ describe('volume', () => { }, ], }, - })); + }); }); @@ -266,10 +264,10 @@ describe('volume', () => { }); // THEN - cdkExpect(stack).to(haveResourceLike('AWS::EC2::Volume', { + expect(stack).toHaveResourceLike('AWS::EC2::Volume', { Iops: 500, VolumeType: 'io1', - }, ResourcePart.Properties)); + }, ResourcePart.Properties); }); @@ -288,9 +286,9 @@ describe('volume', () => { }); // THEN - cdkExpect(stack).to(haveResourceLike('AWS::EC2::Volume', { + expect(stack).toHaveResourceLike('AWS::EC2::Volume', { MultiAttachEnabled: true, - }, ResourcePart.Properties)); + }, ResourcePart.Properties); }); @@ -306,9 +304,9 @@ describe('volume', () => { }); // THEN - cdkExpect(stack).to(haveResourceLike('AWS::EC2::Volume', { + expect(stack).toHaveResourceLike('AWS::EC2::Volume', { SnapshotId: 'snap-00000000', - }, ResourcePart.Properties)); + }, ResourcePart.Properties); }); @@ -325,9 +323,9 @@ describe('volume', () => { }); // THEN - cdkExpect(stack).to(haveResourceLike('AWS::EC2::Volume', { + expect(stack).toHaveResourceLike('AWS::EC2::Volume', { VolumeType: 'standard', - }, ResourcePart.Properties)); + }, ResourcePart.Properties); }); @@ -345,9 +343,9 @@ describe('volume', () => { }); // THEN - cdkExpect(stack).to(haveResourceLike('AWS::EC2::Volume', { + expect(stack).toHaveResourceLike('AWS::EC2::Volume', { VolumeType: 'io1', - }, ResourcePart.Properties)); + }, ResourcePart.Properties); }); @@ -365,9 +363,9 @@ describe('volume', () => { }); // THEN - cdkExpect(stack).to(haveResourceLike('AWS::EC2::Volume', { + expect(stack).toHaveResourceLike('AWS::EC2::Volume', { VolumeType: 'io2', - }, ResourcePart.Properties)); + }, ResourcePart.Properties); }); @@ -384,9 +382,9 @@ describe('volume', () => { }); // THEN - cdkExpect(stack).to(haveResourceLike('AWS::EC2::Volume', { + expect(stack).toHaveResourceLike('AWS::EC2::Volume', { VolumeType: 'gp2', - }, ResourcePart.Properties)); + }, ResourcePart.Properties); }); @@ -403,9 +401,9 @@ describe('volume', () => { }); // THEN - cdkExpect(stack).to(haveResourceLike('AWS::EC2::Volume', { + expect(stack).toHaveResourceLike('AWS::EC2::Volume', { VolumeType: 'gp3', - }, ResourcePart.Properties)); + }, ResourcePart.Properties); }); @@ -422,9 +420,9 @@ describe('volume', () => { }); // THEN - cdkExpect(stack).to(haveResourceLike('AWS::EC2::Volume', { + expect(stack).toHaveResourceLike('AWS::EC2::Volume', { VolumeType: 'st1', - }, ResourcePart.Properties)); + }, ResourcePart.Properties); }); @@ -441,9 +439,9 @@ describe('volume', () => { }); // THEN - cdkExpect(stack).to(haveResourceLike('AWS::EC2::Volume', { + expect(stack).toHaveResourceLike('AWS::EC2::Volume', { VolumeType: 'sc1', - }, ResourcePart.Properties)); + }, ResourcePart.Properties); }); @@ -461,7 +459,7 @@ describe('volume', () => { volume.grantAttachVolume(role); // THEN - cdkExpect(stack).to(haveResource('AWS::IAM::Policy', { + expect(stack).toHaveResource('AWS::IAM::Policy', { PolicyDocument: { Version: '2012-10-17', Statement: [{ @@ -514,7 +512,7 @@ describe('volume', () => { ], }], }, - })); + }); }); @@ -538,7 +536,7 @@ describe('volume', () => { volume.grantAttachVolume(role); // THEN - cdkExpect(stack).to(haveResourceLike('AWS::KMS::Key', { + expect(stack).toHaveResourceLike('AWS::KMS::Key', { KeyPolicy: { Statement: [ {}, @@ -578,7 +576,7 @@ describe('volume', () => { }, ], }, - })); + }); }); @@ -599,7 +597,7 @@ describe('volume', () => { volume.grantAttachVolume(role); // THEN - cdkExpect(stack).to(haveResourceLike('AWS::IAM::Policy', { + expect(stack).toHaveResourceLike('AWS::IAM::Policy', { PolicyDocument: { Statement: arrayWith({ Effect: 'Allow', @@ -632,7 +630,7 @@ describe('volume', () => { }, }), }, - })); + }); }); @@ -672,7 +670,7 @@ describe('volume', () => { volume.grantAttachVolume(role); // THEN - cdkExpect(stack).to(haveResourceLike('AWS::IAM::Policy', { + expect(stack).toHaveResourceLike('AWS::IAM::Policy', { PolicyDocument: { Version: '2012-10-17', Statement: [ @@ -709,7 +707,7 @@ describe('volume', () => { }, ], }, - })); + }); }); @@ -740,7 +738,7 @@ describe('volume', () => { volume.grantAttachVolume(role, [instance1, instance2]); // THEN - cdkExpect(stack).to(haveResourceLike('AWS::IAM::Policy', { + expect(stack).toHaveResourceLike('AWS::IAM::Policy', { PolicyDocument: { Version: '2012-10-17', Statement: [{ @@ -797,7 +795,7 @@ describe('volume', () => { ], }], }, - })); + }); }); @@ -821,7 +819,7 @@ describe('volume', () => { volume.grantAttachVolumeByResourceTag(instance.grantPrincipal, [instance]); // THEN - cdkExpect(stack).to(haveResourceLike('AWS::IAM::Policy', { + expect(stack).toHaveResourceLike('AWS::IAM::Policy', { PolicyDocument: { Version: '2012-10-17', Statement: [{ @@ -857,16 +855,16 @@ describe('volume', () => { }, }], }, - })); - cdkExpect(stack).to(haveResourceLike('AWS::EC2::Volume', { + }); + expect(stack).toHaveResourceLike('AWS::EC2::Volume', { Tags: [ { Key: 'VolumeGrantAttach-B2376B2BDA', Value: 'b2376b2bda65cb40f83c290dd844c4aa', }, ], - }, ResourcePart.Properties)); - cdkExpect(stack).to(haveResourceLike('AWS::EC2::Instance', { + }, ResourcePart.Properties); + expect(stack).toHaveResourceLike('AWS::EC2::Instance', { Tags: [ {}, { @@ -874,7 +872,7 @@ describe('volume', () => { Value: 'b2376b2bda65cb40f83c290dd844c4aa', }, ], - }, ResourcePart.Properties)); + }, ResourcePart.Properties); }); @@ -898,7 +896,7 @@ describe('volume', () => { volume.grantAttachVolumeByResourceTag(instance.grantPrincipal, [instance], 'TestSuffix'); // THEN - cdkExpect(stack).to(haveResourceLike('AWS::IAM::Policy', { + expect(stack).toHaveResourceLike('AWS::IAM::Policy', { PolicyDocument: { Version: '2012-10-17', Statement: [{ @@ -934,16 +932,16 @@ describe('volume', () => { }, }], }, - })); - cdkExpect(stack).to(haveResourceLike('AWS::EC2::Volume', { + }); + expect(stack).toHaveResourceLike('AWS::EC2::Volume', { Tags: [ { Key: 'VolumeGrantAttach-TestSuffix', Value: 'b2376b2bda65cb40f83c290dd844c4aa', }, ], - }, ResourcePart.Properties)); - cdkExpect(stack).to(haveResourceLike('AWS::EC2::Instance', { + }, ResourcePart.Properties); + expect(stack).toHaveResourceLike('AWS::EC2::Instance', { Tags: [ {}, { @@ -951,7 +949,7 @@ describe('volume', () => { Value: 'b2376b2bda65cb40f83c290dd844c4aa', }, ], - }, ResourcePart.Properties)); + }, ResourcePart.Properties); }); @@ -968,7 +966,7 @@ describe('volume', () => { volume.grantDetachVolume(role); // THEN - cdkExpect(stack).to(haveResource('AWS::IAM::Policy', { + expect(stack).toHaveResource('AWS::IAM::Policy', { PolicyDocument: { Version: '2012-10-17', Statement: [{ @@ -1021,7 +1019,7 @@ describe('volume', () => { ], }], }, - })); + }); }); @@ -1051,7 +1049,7 @@ describe('volume', () => { volume.grantDetachVolume(role, [instance1, instance2]); // THEN - cdkExpect(stack).to(haveResourceLike('AWS::IAM::Policy', { + expect(stack).toHaveResourceLike('AWS::IAM::Policy', { PolicyDocument: { Version: '2012-10-17', Statement: [{ @@ -1108,7 +1106,7 @@ describe('volume', () => { ], }], }, - })); + }); }); @@ -1132,7 +1130,7 @@ describe('volume', () => { volume.grantDetachVolumeByResourceTag(instance.grantPrincipal, [instance]); // THEN - cdkExpect(stack).to(haveResourceLike('AWS::IAM::Policy', { + expect(stack).toHaveResourceLike('AWS::IAM::Policy', { PolicyDocument: { Version: '2012-10-17', Statement: [{ @@ -1168,16 +1166,16 @@ describe('volume', () => { }, }], }, - })); - cdkExpect(stack).to(haveResourceLike('AWS::EC2::Volume', { + }); + expect(stack).toHaveResourceLike('AWS::EC2::Volume', { Tags: [ { Key: 'VolumeGrantDetach-B2376B2BDA', Value: 'b2376b2bda65cb40f83c290dd844c4aa', }, ], - }, ResourcePart.Properties)); - cdkExpect(stack).to(haveResourceLike('AWS::EC2::Instance', { + }, ResourcePart.Properties); + expect(stack).toHaveResourceLike('AWS::EC2::Instance', { Tags: [ {}, { @@ -1185,7 +1183,7 @@ describe('volume', () => { Value: 'b2376b2bda65cb40f83c290dd844c4aa', }, ], - }, ResourcePart.Properties)); + }, ResourcePart.Properties); }); @@ -1209,7 +1207,7 @@ describe('volume', () => { volume.grantDetachVolumeByResourceTag(instance.grantPrincipal, [instance], 'TestSuffix'); // THEN - cdkExpect(stack).to(haveResourceLike('AWS::IAM::Policy', { + expect(stack).toHaveResourceLike('AWS::IAM::Policy', { PolicyDocument: { Version: '2012-10-17', Statement: [{ @@ -1245,16 +1243,16 @@ describe('volume', () => { }, }], }, - })); - cdkExpect(stack).to(haveResourceLike('AWS::EC2::Volume', { + }); + expect(stack).toHaveResourceLike('AWS::EC2::Volume', { Tags: [ { Key: 'VolumeGrantDetach-TestSuffix', Value: 'b2376b2bda65cb40f83c290dd844c4aa', }, ], - }, ResourcePart.Properties)); - cdkExpect(stack).to(haveResourceLike('AWS::EC2::Instance', { + }, ResourcePart.Properties); + expect(stack).toHaveResourceLike('AWS::EC2::Instance', { Tags: [ {}, { @@ -1262,7 +1260,7 @@ describe('volume', () => { Value: 'b2376b2bda65cb40f83c290dd844c4aa', }, ], - }, ResourcePart.Properties)); + }, ResourcePart.Properties); }); diff --git a/packages/@aws-cdk/aws-ec2/test/vpc-endpoint-service.test.ts b/packages/@aws-cdk/aws-ec2/test/vpc-endpoint-service.test.ts index 744e32953cc15..2b00460ec793b 100644 --- a/packages/@aws-cdk/aws-ec2/test/vpc-endpoint-service.test.ts +++ b/packages/@aws-cdk/aws-ec2/test/vpc-endpoint-service.test.ts @@ -1,7 +1,6 @@ -import { expect, haveResource } from '@aws-cdk/assert-internal'; +import '@aws-cdk/assert-internal/jest'; import { ArnPrincipal } from '@aws-cdk/aws-iam'; import { Stack } from '@aws-cdk/core'; -import { nodeunitShim, Test } from 'nodeunit-shim'; // eslint-disable-next-line max-len import { IVpcEndpointServiceLoadBalancer, Vpc, VpcEndpointService } from '../lib'; @@ -19,9 +18,9 @@ class DummyEndpointLoadBalacer implements IVpcEndpointServiceLoadBalancer { } } -nodeunitShim({ - 'test vpc endpoint service': { - 'create endpoint service with no principals'(test: Test) { +describe('vpc endpoint service', () => { + describe('test vpc endpoint service', () => { + test('create endpoint service with no principals', () => { // GIVEN const stack = new Stack(); new Vpc(stack, 'MyVPC'); @@ -34,21 +33,21 @@ nodeunitShim({ allowedPrincipals: [new ArnPrincipal('arn:aws:iam::123456789012:root')], }); // THEN - expect(stack).to(haveResource('AWS::EC2::VPCEndpointService', { + expect(stack).toHaveResource('AWS::EC2::VPCEndpointService', { NetworkLoadBalancerArns: ['arn:aws:elasticloadbalancing:us-east-1:123456789012:loadbalancer/net/Test/9bn6qkf4e9jrw77a'], AcceptanceRequired: false, - })); + }); - expect(stack).notTo(haveResource('AWS::EC2::VPCEndpointServicePermissions', { + expect(stack).not.toHaveResource('AWS::EC2::VPCEndpointServicePermissions', { ServiceId: { Ref: 'EndpointServiceED36BE1F', }, AllowedPrincipals: [], - })); + }); + - test.done(); - }, - 'create endpoint service with a principal'(test: Test) { + }); + test('create endpoint service with a principal', () => { // GIVEN const stack = new Stack(); @@ -61,22 +60,22 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::EC2::VPCEndpointService', { + expect(stack).toHaveResource('AWS::EC2::VPCEndpointService', { NetworkLoadBalancerArns: ['arn:aws:elasticloadbalancing:us-east-1:123456789012:loadbalancer/net/Test/9bn6qkf4e9jrw77a'], AcceptanceRequired: false, - })); + }); - expect(stack).to(haveResource('AWS::EC2::VPCEndpointServicePermissions', { + expect(stack).toHaveResource('AWS::EC2::VPCEndpointServicePermissions', { ServiceId: { Ref: 'EndpointServiceED36BE1F', }, AllowedPrincipals: ['arn:aws:iam::123456789012:root'], - })); + }); - test.done(); - }, - 'with acceptance requried'(test: Test) { + }); + + test('with acceptance requried', () => { // GIVEN const stack = new Stack(); @@ -89,19 +88,19 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::EC2::VPCEndpointService', { + expect(stack).toHaveResource('AWS::EC2::VPCEndpointService', { NetworkLoadBalancerArns: ['arn:aws:elasticloadbalancing:us-east-1:123456789012:loadbalancer/net/Test/9bn6qkf4e9jrw77a'], AcceptanceRequired: true, - })); + }); - expect(stack).to(haveResource('AWS::EC2::VPCEndpointServicePermissions', { + expect(stack).toHaveResource('AWS::EC2::VPCEndpointServicePermissions', { ServiceId: { Ref: 'EndpointServiceED36BE1F', }, AllowedPrincipals: ['arn:aws:iam::123456789012:root'], - })); + }); + - test.done(); - }, - }, + }); + }); }); diff --git a/packages/@aws-cdk/aws-ec2/test/vpc-endpoint.test.ts b/packages/@aws-cdk/aws-ec2/test/vpc-endpoint.test.ts index 79a9f3594479e..c5724e91c2bcc 100644 --- a/packages/@aws-cdk/aws-ec2/test/vpc-endpoint.test.ts +++ b/packages/@aws-cdk/aws-ec2/test/vpc-endpoint.test.ts @@ -1,14 +1,13 @@ -import { expect, haveResource, haveResourceLike } from '@aws-cdk/assert-internal'; +import '@aws-cdk/assert-internal/jest'; import { AnyPrincipal, PolicyStatement } from '@aws-cdk/aws-iam'; import * as cxschema from '@aws-cdk/cloud-assembly-schema'; import { ContextProvider, Fn, Stack } from '@aws-cdk/core'; -import { nodeunitShim, Test } from 'nodeunit-shim'; // eslint-disable-next-line max-len import { GatewayVpcEndpoint, GatewayVpcEndpointAwsService, InterfaceVpcEndpoint, InterfaceVpcEndpointAwsService, InterfaceVpcEndpointService, SecurityGroup, SubnetType, Vpc } from '../lib'; -nodeunitShim({ - 'gateway endpoint': { - 'add an endpoint to a vpc'(test: Test) { +describe('vpc endpoint', () => { + describe('gateway endpoint', () => { + test('add an endpoint to a vpc', () => { // GIVEN const stack = new Stack(); @@ -22,7 +21,7 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::EC2::VPCEndpoint', { + expect(stack).toHaveResource('AWS::EC2::VPCEndpoint', { ServiceName: { 'Fn::Join': [ '', @@ -45,12 +44,12 @@ nodeunitShim({ { Ref: 'VpcNetworkPublicSubnet2RouteTableE5F348DF' }, ], VpcEndpointType: 'Gateway', - })); + }); + - test.done(); - }, + }); - 'routing on private and public subnets'(test: Test) { + test('routing on private and public subnets', () => { // GIVEN const stack = new Stack(); @@ -72,7 +71,7 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::EC2::VPCEndpoint', { + expect(stack).toHaveResource('AWS::EC2::VPCEndpoint', { ServiceName: { 'Fn::Join': [ '', @@ -103,12 +102,12 @@ nodeunitShim({ }, ], VpcEndpointType: 'Gateway', - })); + }); - test.done(); - }, - 'add statements to policy'(test: Test) { + }); + + test('add statements to policy', () => { // GIVEN const stack = new Stack(); const vpc = new Vpc(stack, 'VpcNetwork'); @@ -124,7 +123,7 @@ nodeunitShim({ })); // THEN - expect(stack).to(haveResource('AWS::EC2::VPCEndpoint', { + expect(stack).toHaveResource('AWS::EC2::VPCEndpoint', { PolicyDocument: { Statement: [ { @@ -139,12 +138,12 @@ nodeunitShim({ ], Version: '2012-10-17', }, - })); + }); - test.done(); - }, - 'throws when adding a statement without a principal'(test: Test) { + }); + + test('throws when adding a statement without a principal', () => { // GIVEN const stack = new Stack(); const vpc = new Vpc(stack, 'VpcNetwork'); @@ -153,15 +152,15 @@ nodeunitShim({ }); // THEN - test.throws(() => endpoint.addToPolicy(new PolicyStatement({ + expect(() => endpoint.addToPolicy(new PolicyStatement({ actions: ['s3:GetObject', 's3:ListBucket'], resources: ['*'], - })), /`Principal`/); + }))).toThrow(/`Principal`/); + - test.done(); - }, + }); - 'import/export'(test: Test) { + test('import/export', () => { // GIVEN const stack2 = new Stack(); @@ -169,11 +168,11 @@ nodeunitShim({ const ep = GatewayVpcEndpoint.fromGatewayVpcEndpointId(stack2, 'ImportedEndpoint', 'endpoint-id'); // THEN - test.deepEqual(ep.vpcEndpointId, 'endpoint-id'); - test.done(); - }, + expect(ep.vpcEndpointId).toEqual('endpoint-id'); - 'works with an imported vpc'(test: Test) { + }); + + test('works with an imported vpc', () => { // GIVEN const stack = new Stack(); const vpc = Vpc.fromVpcAttributes(stack, 'VPC', { @@ -186,17 +185,17 @@ nodeunitShim({ // THEN vpc.addGatewayEndpoint('Gateway', { service: GatewayVpcEndpointAwsService.S3 }); - expect(stack).to(haveResource('AWS::EC2::VPCEndpoint', { + expect(stack).toHaveResource('AWS::EC2::VPCEndpoint', { ServiceName: { 'Fn::Join': ['', ['com.amazonaws.', { Ref: 'AWS::Region' }, '.s3']] }, VpcId: 'id', RouteTableIds: ['rt1', 'rt2', 'rt3'], VpcEndpointType: 'Gateway', - })); + }); - test.done(); - }, - 'throws with an imported vpc without route tables ids'(test: Test) { + }); + + test('throws with an imported vpc without route tables ids', () => { // GIVEN const stack = new Stack(); const vpc = Vpc.fromVpcAttributes(stack, 'VPC', { @@ -205,14 +204,14 @@ nodeunitShim({ availabilityZones: ['a', 'b', 'c'], }); - test.throws(() => vpc.addGatewayEndpoint('Gateway', { service: GatewayVpcEndpointAwsService.S3 }), /route table/); + expect(() => vpc.addGatewayEndpoint('Gateway', { service: GatewayVpcEndpointAwsService.S3 })).toThrow(/route table/); + - test.done(); - }, - }, + }); + }); - 'interface endpoint': { - 'add an endpoint to a vpc'(test: Test) { + describe('interface endpoint', () => { + test('add an endpoint to a vpc', () => { // GIVEN const stack = new Stack(); const vpc = new Vpc(stack, 'VpcNetwork'); @@ -223,7 +222,7 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::EC2::VPCEndpoint', { + expect(stack).toHaveResource('AWS::EC2::VPCEndpoint', { ServiceName: { 'Fn::Join': [ '', @@ -257,19 +256,19 @@ nodeunitShim({ }, ], VpcEndpointType: 'Interface', - })); + }); - expect(stack).to(haveResource('AWS::EC2::SecurityGroup', { + expect(stack).toHaveResource('AWS::EC2::SecurityGroup', { GroupDescription: 'Default/VpcNetwork/EcrDocker/SecurityGroup', VpcId: { Ref: 'VpcNetworkB258E83A', }, - })); + }); - test.done(); - }, - 'import/export'(test: Test) { + }); + + test('import/export', () => { // GIVEN const stack2 = new Stack(); @@ -282,15 +281,15 @@ nodeunitShim({ importedEndpoint.connections.allowDefaultPortFromAnyIpv4(); // THEN - expect(stack2).to(haveResource('AWS::EC2::SecurityGroupIngress', { + expect(stack2).toHaveResource('AWS::EC2::SecurityGroupIngress', { GroupId: 'security-group-id', - })); - test.deepEqual(importedEndpoint.vpcEndpointId, 'vpc-endpoint-id'); + }); + expect(importedEndpoint.vpcEndpointId).toEqual('vpc-endpoint-id'); - test.done(); - }, - 'import/export without security group'(test: Test) { + }); + + test('import/export without security group', () => { // GIVEN const stack2 = new Stack(); @@ -302,13 +301,13 @@ nodeunitShim({ importedEndpoint.connections.allowDefaultPortFromAnyIpv4(); // THEN - test.deepEqual(importedEndpoint.vpcEndpointId, 'vpc-endpoint-id'); - test.deepEqual(importedEndpoint.connections.securityGroups.length, 0); + expect(importedEndpoint.vpcEndpointId).toEqual('vpc-endpoint-id'); + expect(importedEndpoint.connections.securityGroups.length).toEqual(0); + - test.done(); - }, + }); - 'with existing security groups'(test: Test) { + test('with existing security groups', () => { // GIVEN const stack = new Stack(); const vpc = new Vpc(stack, 'VpcNetwork'); @@ -320,13 +319,13 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::EC2::VPCEndpoint', { + expect(stack).toHaveResource('AWS::EC2::VPCEndpoint', { SecurityGroupIds: ['existing-id'], - })); + }); + - test.done(); - }, - 'with existing security groups for efs'(test: Test) { + }); + test('with existing security groups for efs', () => { // GIVEN const stack = new Stack(); const vpc = new Vpc(stack, 'VpcNetwork'); @@ -338,13 +337,13 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::EC2::VPCEndpoint', { + expect(stack).toHaveResource('AWS::EC2::VPCEndpoint', { SecurityGroupIds: ['existing-id'], - })); + }); - test.done(); - }, - 'security group has ingress by default'(test: Test) { + + }); + test('security group has ingress by default', () => { // GIVEN const stack = new Stack(); const vpc = new Vpc(stack, 'VpcNetwork'); @@ -355,7 +354,7 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResourceLike('AWS::EC2::SecurityGroup', { + expect(stack).toHaveResourceLike('AWS::EC2::SecurityGroup', { SecurityGroupIngress: [ { CidrIp: { 'Fn::GetAtt': ['VpcNetworkB258E83A', 'CidrBlock'] }, @@ -364,11 +363,11 @@ nodeunitShim({ ToPort: 443, }, ], - } )); + }); + - test.done(); - }, - 'non-AWS service interface endpoint'(test: Test) { + }); + test('non-AWS service interface endpoint', () => { // GIVEN const stack = new Stack(); const vpc = new Vpc(stack, 'VpcNetwork'); @@ -379,14 +378,14 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::EC2::VPCEndpoint', { + expect(stack).toHaveResource('AWS::EC2::VPCEndpoint', { ServiceName: 'com.amazonaws.vpce.us-east-1.vpce-svc-uuddlrlrbastrtsvc', PrivateDnsEnabled: false, - })); + }); - test.done(); - }, - 'marketplace partner service interface endpoint'(test: Test) { + + }); + test('marketplace partner service interface endpoint', () => { // GIVEN const stack = new Stack(); const vpc = new Vpc(stack, 'VpcNetwork'); @@ -401,14 +400,14 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::EC2::VPCEndpoint', { + expect(stack).toHaveResource('AWS::EC2::VPCEndpoint', { ServiceName: 'com.amazonaws.vpce.us-east-1.vpce-svc-mktplacesvcwprdns', PrivateDnsEnabled: true, - })); + }); + - test.done(); - }, - 'test endpoint service context azs discovered'(test: Test) { + }); + test('test endpoint service context azs discovered', () => { // GIVEN const stack = new Stack(undefined, 'TestStack', { env: { account: '123456789012', region: 'us-east-1' } }); @@ -440,7 +439,7 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::EC2::VPCEndpoint', { + expect(stack).toHaveResource('AWS::EC2::VPCEndpoint', { ServiceName: 'com.amazonaws.vpce.us-east-1.vpce-svc-uuddlrlrbastrtsvc', SubnetIds: [ { @@ -450,11 +449,11 @@ nodeunitShim({ Ref: 'VPCPrivateSubnet3Subnet3EDCD457', }, ], - })); + }); + - test.done(); - }, - 'endpoint service setup with stack AZ context but no endpoint context'(test: Test) { + }); + test('endpoint service setup with stack AZ context but no endpoint context', () => { // GIVEN const stack = new Stack(undefined, 'TestStack', { env: { account: '123456789012', region: 'us-east-1' } }); @@ -477,7 +476,7 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::EC2::VPCEndpoint', { + expect(stack).toHaveResource('AWS::EC2::VPCEndpoint', { ServiceName: 'com.amazonaws.vpce.us-east-1.vpce-svc-uuddlrlrbastrtsvc', SubnetIds: [ { @@ -490,11 +489,11 @@ nodeunitShim({ Ref: 'VPCPrivateSubnet3Subnet3EDCD457', }, ], - })); + }); - test.done(); - }, - 'test endpoint service context with aws service'(test: Test) { + + }); + test('test endpoint service context with aws service', () => { // GIVEN const stack = new Stack(undefined, 'TestStack', { env: { account: '123456789012', region: 'us-east-1' } }); @@ -523,7 +522,7 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::EC2::VPCEndpoint', { + expect(stack).toHaveResource('AWS::EC2::VPCEndpoint', { ServiceName: 'com.amazonaws.us-east-1.execute-api', SubnetIds: [ { @@ -533,39 +532,39 @@ nodeunitShim({ Ref: 'VPCPrivateSubnet3Subnet3EDCD457', }, ], - })); + }); - test.done(); - }, - 'lookupSupportedAzs fails if account is unresolved'(test: Test) { + + }); + test('lookupSupportedAzs fails if account is unresolved', () => { // GIVEN const stack = new Stack(undefined, 'TestStack', { env: { region: 'us-east-1' } }); const vpc = new Vpc(stack, 'VPC'); // WHEN - test.throws(() =>vpc.addInterfaceEndpoint('YourService', { + expect(() =>vpc.addInterfaceEndpoint('YourService', { service: { name: 'com.amazonaws.vpce.us-east-1.vpce-svc-uuddlrlrbastrtsvc', port: 443, }, lookupSupportedAzs: true, - })); - test.done(); - }, - 'lookupSupportedAzs fails if region is unresolved'(test: Test) { + })).toThrow(); + + }); + test('lookupSupportedAzs fails if region is unresolved', () => { // GIVEN const stack = new Stack(undefined, 'TestStack', { env: { account: '123456789012' } }); const vpc = new Vpc(stack, 'VPC'); // WHEN - test.throws(() =>vpc.addInterfaceEndpoint('YourService', { + expect(() =>vpc.addInterfaceEndpoint('YourService', { service: { name: 'com.amazonaws.vpce.us-east-1.vpce-svc-uuddlrlrbastrtsvc', port: 443, }, lookupSupportedAzs: true, - })); - test.done(); - }, - 'lookupSupportedAzs fails if subnet AZs are tokens'(test: Test) { + })).toThrow(); + + }); + test('lookupSupportedAzs fails if subnet AZs are tokens', () => { // GIVEN const stack = new Stack(undefined, 'TestStack', { env: { account: '123456789012', region: 'us-east-1' } }); const tokenAZs = [ @@ -582,21 +581,21 @@ nodeunitShim({ const vpc = new Vpc(stack, 'VPC'); // WHEN - test.throws(() =>vpc.addInterfaceEndpoint('YourService', { + expect(() =>vpc.addInterfaceEndpoint('YourService', { service: { name: 'com.amazonaws.vpce.us-east-1.vpce-svc-uuddlrlrbastrtsvc', port: 443, }, lookupSupportedAzs: true, - })); - test.done(); - }, - 'vpc endpoint fails if no subnets provided'(test: Test) { + })).toThrow(); + + }); + test('vpc endpoint fails if no subnets provided', () => { // GIVEN const stack = new Stack(undefined, 'TestStack', { env: { account: '123456789012', region: 'us-east-1' } }); const vpc = new Vpc(stack, 'VPC'); // WHEN - test.throws(() =>vpc.addInterfaceEndpoint('YourService', { + expect(() =>vpc.addInterfaceEndpoint('YourService', { service: { name: 'com.amazonaws.vpce.us-east-1.vpce-svc-uuddlrlrbastrtsvc', port: 443, @@ -604,10 +603,10 @@ nodeunitShim({ subnets: vpc.selectSubnets({ subnets: [], }), - })); - test.done(); - }, - 'test vpc interface endpoint with cn.com.amazonaws prefix can be created correctly in cn-north-1'(test: Test) { + })).toThrow(); + + }); + test('test vpc interface endpoint with cn.com.amazonaws prefix can be created correctly in cn-north-1', () => { //GIVEN const stack = new Stack(undefined, 'TestStack', { env: { account: '123456789012', region: 'cn-north-1' } }); const vpc = new Vpc(stack, 'VPC'); @@ -618,13 +617,13 @@ nodeunitShim({ }); //THEN - expect(stack).to(haveResource('AWS::EC2::VPCEndpoint', { + expect(stack).toHaveResource('AWS::EC2::VPCEndpoint', { ServiceName: 'cn.com.amazonaws.cn-north-1.ecr.api', - })); + }); - test.done(); - }, - 'test vpc interface endpoint with cn.com.amazonaws prefix can be created correctly in cn-northwest-1'(test: Test) { + + }); + test('test vpc interface endpoint with cn.com.amazonaws prefix can be created correctly in cn-northwest-1', () => { //GIVEN const stack = new Stack(undefined, 'TestStack', { env: { account: '123456789012', region: 'cn-northwest-1' } }); const vpc = new Vpc(stack, 'VPC'); @@ -635,13 +634,13 @@ nodeunitShim({ }); //THEN - expect(stack).to(haveResource('AWS::EC2::VPCEndpoint', { + expect(stack).toHaveResource('AWS::EC2::VPCEndpoint', { ServiceName: 'cn.com.amazonaws.cn-northwest-1.lambda', - })); + }); - test.done(); - }, - 'test vpc interface endpoint without cn.com.amazonaws prefix can be created correctly in cn-north-1'(test: Test) { + + }); + test('test vpc interface endpoint without cn.com.amazonaws prefix can be created correctly in cn-north-1', () => { //GIVEN const stack = new Stack(undefined, 'TestStack', { env: { account: '123456789012', region: 'cn-north-1' } }); const vpc = new Vpc(stack, 'VPC'); @@ -652,13 +651,13 @@ nodeunitShim({ }); //THEN - expect(stack).to(haveResource('AWS::EC2::VPCEndpoint', { + expect(stack).toHaveResource('AWS::EC2::VPCEndpoint', { ServiceName: 'com.amazonaws.cn-north-1.ecs', - })); + }); + - test.done(); - }, - 'test vpc interface endpoint without cn.com.amazonaws prefix can be created correctly in cn-northwest-1'(test: Test) { + }); + test('test vpc interface endpoint without cn.com.amazonaws prefix can be created correctly in cn-northwest-1', () => { //GIVEN const stack = new Stack(undefined, 'TestStack', { env: { account: '123456789012', region: 'cn-northwest-1' } }); const vpc = new Vpc(stack, 'VPC'); @@ -669,13 +668,13 @@ nodeunitShim({ }); //THEN - expect(stack).to(haveResource('AWS::EC2::VPCEndpoint', { + expect(stack).toHaveResource('AWS::EC2::VPCEndpoint', { ServiceName: 'com.amazonaws.cn-northwest-1.glue', - })); + }); + - test.done(); - }, - 'test vpc interface endpoint for transcribe can be created correctly in non-china regions'(test: Test) { + }); + test('test vpc interface endpoint for transcribe can be created correctly in non-china regions', () => { //GIVEN const stack = new Stack(undefined, 'TestStack', { env: { account: '123456789012', region: 'us-east-1' } }); const vpc = new Vpc(stack, 'VPC'); @@ -686,13 +685,13 @@ nodeunitShim({ }); //THEN - expect(stack).to(haveResource('AWS::EC2::VPCEndpoint', { + expect(stack).toHaveResource('AWS::EC2::VPCEndpoint', { ServiceName: 'com.amazonaws.us-east-1.transcribe', - })); + }); + - test.done(); - }, - 'test vpc interface endpoint for transcribe can be created correctly in cn-north-1'(test: Test) { + }); + test('test vpc interface endpoint for transcribe can be created correctly in cn-north-1', () => { //GIVEN const stack = new Stack(undefined, 'TestStack', { env: { account: '123456789012', region: 'cn-north-1' } }); const vpc = new Vpc(stack, 'VPC'); @@ -703,13 +702,13 @@ nodeunitShim({ }); //THEN - expect(stack).to(haveResource('AWS::EC2::VPCEndpoint', { + expect(stack).toHaveResource('AWS::EC2::VPCEndpoint', { ServiceName: 'cn.com.amazonaws.cn-north-1.transcribe.cn', - })); + }); - test.done(); - }, - 'test vpc interface endpoint for transcribe can be created correctly in cn-northwest-1'(test: Test) { + + }); + test('test vpc interface endpoint for transcribe can be created correctly in cn-northwest-1', () => { //GIVEN const stack = new Stack(undefined, 'TestStack', { env: { account: '123456789012', region: 'cn-northwest-1' } }); const vpc = new Vpc(stack, 'VPC'); @@ -720,11 +719,11 @@ nodeunitShim({ }); //THEN - expect(stack).to(haveResource('AWS::EC2::VPCEndpoint', { + expect(stack).toHaveResource('AWS::EC2::VPCEndpoint', { ServiceName: 'cn.com.amazonaws.cn-northwest-1.transcribe.cn', - })); + }); + - test.done(); - }, - }, + }); + }); }); diff --git a/packages/@aws-cdk/aws-ec2/test/vpc-flow-logs.test.ts b/packages/@aws-cdk/aws-ec2/test/vpc-flow-logs.test.ts index 243d5fef0b46c..956632bf1a0a5 100644 --- a/packages/@aws-cdk/aws-ec2/test/vpc-flow-logs.test.ts +++ b/packages/@aws-cdk/aws-ec2/test/vpc-flow-logs.test.ts @@ -1,23 +1,20 @@ -import { countResources, expect, haveResource } from '@aws-cdk/assert-internal'; +import '@aws-cdk/assert-internal/jest'; import * as iam from '@aws-cdk/aws-iam'; import * as logs from '@aws-cdk/aws-logs'; import * as s3 from '@aws-cdk/aws-s3'; import { Stack } from '@aws-cdk/core'; -import { nodeunitShim, Test } from 'nodeunit-shim'; import { FlowLog, FlowLogDestination, FlowLogResourceType, Vpc } from '../lib'; -nodeunitShim({ - 'with defaults set, it successfully creates with cloudwatch logs destination'( - test: Test, - ) { +describe('vpc flow logs', () => { + test('with defaults set, it successfully creates with cloudwatch logs destination', () => { const stack = getTestStack(); new FlowLog(stack, 'FlowLogs', { resourceType: FlowLogResourceType.fromNetworkInterfaceId('eni-123455'), }); - expect(stack).to( - haveResource('AWS::EC2::FlowLog', { + expect(stack). + toHaveResource('AWS::EC2::FlowLog', { ResourceType: 'NetworkInterface', TrafficType: 'ALL', ResourceId: 'eni-123455', @@ -27,15 +24,15 @@ nodeunitShim({ LogGroupName: { Ref: 'FlowLogsLogGroup9853A85F', }, - }), - ); - - expect(stack).to(countResources('AWS::Logs::LogGroup', 1)); - expect(stack).to(countResources('AWS::IAM::Role', 1)); - expect(stack).notTo(haveResource('AWS::S3::Bucket')); - test.done(); - }, - 'with cloudwatch logs as the destination, allows use of existing resources'(test: Test) { + }, + ); + + expect(stack).toCountResources('AWS::Logs::LogGroup', 1); + expect(stack).toCountResources('AWS::IAM::Role', 1); + expect(stack).not.toHaveResource('AWS::S3::Bucket'); + + }); + test('with cloudwatch logs as the destination, allows use of existing resources', () => { const stack = getTestStack(); new FlowLog(stack, 'FlowLogs', { @@ -51,16 +48,16 @@ nodeunitShim({ ), }); - expect(stack).to(haveResource('AWS::Logs::LogGroup', { + expect(stack).toHaveResource('AWS::Logs::LogGroup', { RetentionInDays: 5, - })); - expect(stack).to(haveResource('AWS::IAM::Role', { + }); + expect(stack).toHaveResource('AWS::IAM::Role', { RoleName: 'TestName', - })); - expect(stack).notTo(haveResource('AWS::S3::Bucket')); - test.done(); - }, - 'with s3 as the destination, allows use of existing resources'(test: Test) { + }); + expect(stack).not.toHaveResource('AWS::S3::Bucket'); + + }); + test('with s3 as the destination, allows use of existing resources', () => { const stack = getTestStack(); new FlowLog(stack, 'FlowLogs', { @@ -72,14 +69,14 @@ nodeunitShim({ ), }); - expect(stack).notTo(haveResource('AWS::Logs::LogGroup')); - expect(stack).notTo(haveResource('AWS::IAM::Role')); - expect(stack).to(haveResource('AWS::S3::Bucket', { + expect(stack).not.toHaveResource('AWS::Logs::LogGroup'); + expect(stack).not.toHaveResource('AWS::IAM::Role'); + expect(stack).toHaveResource('AWS::S3::Bucket', { BucketName: 'testbucket', - })); - test.done(); - }, - 'with s3 as the destination, allows use of key prefix'(test: Test) { + }); + + }); + test('with s3 as the destination, allows use of key prefix', () => { const stack = getTestStack(); new FlowLog(stack, 'FlowLogs', { @@ -92,16 +89,14 @@ nodeunitShim({ ), }); - expect(stack).notTo(haveResource('AWS::Logs::LogGroup')); - expect(stack).notTo(haveResource('AWS::IAM::Role')); - expect(stack).to(haveResource('AWS::S3::Bucket', { + expect(stack).not.toHaveResource('AWS::Logs::LogGroup'); + expect(stack).not.toHaveResource('AWS::IAM::Role'); + expect(stack).toHaveResource('AWS::S3::Bucket', { BucketName: 'testbucket', - })); - test.done(); - }, - 'with s3 as the destination and all the defaults set, it successfully creates all the resources'( - test: Test, - ) { + }); + + }); + test('with s3 as the destination and all the defaults set, it successfully creates all the resources', () => { const stack = getTestStack(); new FlowLog(stack, 'FlowLogs', { @@ -109,22 +104,22 @@ nodeunitShim({ destination: FlowLogDestination.toS3(), }); - expect(stack).to( - haveResource('AWS::EC2::FlowLog', { + expect(stack). + toHaveResource('AWS::EC2::FlowLog', { ResourceType: 'NetworkInterface', TrafficType: 'ALL', ResourceId: 'eni-123456', LogDestination: { 'Fn::GetAtt': ['FlowLogsBucket87F67F60', 'Arn'], }, - }), - ); - expect(stack).notTo(haveResource('AWS::Logs::LogGroup')); - expect(stack).notTo(haveResource('AWS::IAM::Role')); - expect(stack).to(countResources('AWS::S3::Bucket', 1)); - test.done(); - }, - 'create with vpc'(test: Test) { + }, + ); + expect(stack).not.toHaveResource('AWS::Logs::LogGroup'); + expect(stack).not.toHaveResource('AWS::IAM::Role'); + expect(stack).toCountResources('AWS::S3::Bucket', 1); + + }); + test('create with vpc', () => { const stack = getTestStack(); new Vpc(stack, 'VPC', { @@ -133,9 +128,9 @@ nodeunitShim({ }, }); - expect(stack).to(haveResource('AWS::EC2::VPC')); - expect(stack).to( - haveResource('AWS::EC2::FlowLog', { + expect(stack).toHaveResource('AWS::EC2::VPC'); + expect(stack). + toHaveResource('AWS::EC2::FlowLog', { ResourceType: 'VPC', TrafficType: 'ALL', ResourceId: { @@ -147,19 +142,19 @@ nodeunitShim({ LogGroupName: { Ref: 'VPCflowLogsLogGroupE900F980', }, - }), - ); - test.done(); - }, - 'add to vpc'(test: Test) { + }, + ); + + }); + test('add to vpc', () => { const stack = getTestStack(); const vpc = new Vpc(stack, 'VPC'); vpc.addFlowLog('FlowLogs'); - expect(stack).to(haveResource('AWS::EC2::VPC')); - expect(stack).to( - haveResource('AWS::EC2::FlowLog', { + expect(stack).toHaveResource('AWS::EC2::VPC'); + expect(stack). + toHaveResource('AWS::EC2::FlowLog', { ResourceType: 'VPC', TrafficType: 'ALL', ResourceId: { @@ -171,10 +166,10 @@ nodeunitShim({ LogGroupName: { Ref: 'VPCFlowLogsLogGroupF48E1B0A', }, - }), - ); - test.done(); - }, + }, + ); + + }); }); function getTestStack(): Stack { diff --git a/packages/@aws-cdk/aws-ec2/test/vpc.from-lookup.test.ts b/packages/@aws-cdk/aws-ec2/test/vpc.from-lookup.test.ts index 65e4b071a0b53..4ad08203a525e 100644 --- a/packages/@aws-cdk/aws-ec2/test/vpc.from-lookup.test.ts +++ b/packages/@aws-cdk/aws-ec2/test/vpc.from-lookup.test.ts @@ -2,26 +2,25 @@ import * as cxschema from '@aws-cdk/cloud-assembly-schema'; import { ContextProvider, GetContextValueOptions, GetContextValueResult, Lazy, Stack } from '@aws-cdk/core'; import * as cxapi from '@aws-cdk/cx-api'; import { Construct } from 'constructs'; -import { nodeunitShim, Test } from 'nodeunit-shim'; import { GenericLinuxImage, Instance, InstanceType, SubnetType, Vpc } from '../lib'; -nodeunitShim({ - 'Vpc.fromLookup()': { - 'requires concrete values'(test: Test) { +describe('vpc from lookup', () => { + describe('Vpc.fromLookup()', () => { + test('requires concrete values', () => { // GIVEN const stack = new Stack(); - test.throws(() => { + expect(() => { Vpc.fromLookup(stack, 'Vpc', { vpcId: Lazy.string({ produce: () => 'some-id' }), }); - }, 'All arguments to Vpc.fromLookup() must be concrete'); + }).toThrow('All arguments to Vpc.fromLookup() must be concrete'); - test.done(); - }, - 'selecting subnets by name from a looked-up VPC does not throw'(test: Test) { + }); + + test('selecting subnets by name from a looked-up VPC does not throw', () => { // GIVEN const stack = new Stack(undefined, undefined, { env: { region: 'us-east-1', account: '123456789012' } }); const vpc = Vpc.fromLookup(stack, 'VPC', { @@ -33,11 +32,11 @@ nodeunitShim({ // THEN: no exception - test.done(); - }, - 'accepts asymmetric subnets'(test: Test) { - const previous = mockVpcContextProviderWith(test, { + }); + + test('accepts asymmetric subnets', () => { + const previous = mockVpcContextProviderWith({ vpcId: 'vpc-1234', subnetGroups: [ { @@ -84,11 +83,11 @@ nodeunitShim({ }, ], }, options => { - test.deepEqual(options.filter, { + expect(options.filter).toEqual({ isDefault: 'true', }); - test.equal(options.subnetGroupNameTag, undefined); + expect(options.subnetGroupNameTag).toEqual(undefined); }); const stack = new Stack(); @@ -96,17 +95,17 @@ nodeunitShim({ isDefault: true, }); - test.deepEqual(vpc.availabilityZones, ['us-east-1a', 'us-east-1b', 'us-east-1c', 'us-east-1d']); - test.equal(vpc.publicSubnets.length, 2); - test.equal(vpc.privateSubnets.length, 4); - test.equal(vpc.isolatedSubnets.length, 0); + expect(vpc.availabilityZones).toEqual(['us-east-1a', 'us-east-1b', 'us-east-1c', 'us-east-1d']); + expect(vpc.publicSubnets.length).toEqual(2); + expect(vpc.privateSubnets.length).toEqual(4); + expect(vpc.isolatedSubnets.length).toEqual(0); restoreContextProvider(previous); - test.done(); - }, - 'selectSubnets onePerAz works on imported VPC'(test: Test) { - const previous = mockVpcContextProviderWith(test, { + }); + + test('selectSubnets onePerAz works on imported VPC', () => { + const previous = mockVpcContextProviderWith({ vpcId: 'vpc-1234', subnetGroups: [ { @@ -153,11 +152,11 @@ nodeunitShim({ }, ], }, options => { - test.deepEqual(options.filter, { + expect(options.filter).toEqual({ isDefault: 'true', }); - test.equal(options.subnetGroupNameTag, undefined); + expect(options.subnetGroupNameTag).toEqual(undefined); }); const stack = new Stack(); @@ -169,13 +168,13 @@ nodeunitShim({ const subnets = vpc.selectSubnets({ subnetType: SubnetType.PRIVATE_WITH_NAT, onePerAz: true }); // THEN: we got 2 subnets and not 4 - test.deepEqual(subnets.subnets.map(s => s.availabilityZone), ['us-east-1c', 'us-east-1d']); + expect(subnets.subnets.map(s => s.availabilityZone)).toEqual(['us-east-1c', 'us-east-1d']); restoreContextProvider(previous); - test.done(); - }, - 'AZ in dummy lookup VPC matches AZ in Stack'(test: Test) { + }); + + test('AZ in dummy lookup VPC matches AZ in Stack', () => { // GIVEN const stack = new Stack(undefined, 'MyTestStack', { env: { account: '1234567890', region: 'dummy' } }); const vpc = Vpc.fromLookup(stack, 'vpc', { isDefault: true }); @@ -186,12 +185,12 @@ nodeunitShim({ }); // THEN - test.equals(subnets.subnets.length, 2); + expect(subnets.subnets.length).toEqual(2); - test.done(); - }, - 'don\'t crash when using subnetgroup name in lookup VPC'(test: Test) { + }); + + test('don\'t crash when using subnetgroup name in lookup VPC', () => { // GIVEN const stack = new Stack(undefined, 'MyTestStack', { env: { account: '1234567890', region: 'dummy' } }); const vpc = Vpc.fromLookup(stack, 'vpc', { isDefault: true }); @@ -208,10 +207,10 @@ nodeunitShim({ // THEN -- no exception occurred - test.done(); - }, - 'subnets in imported VPC has all expected attributes'(test: Test) { - const previous = mockVpcContextProviderWith(test, { + + }); + test('subnets in imported VPC has all expected attributes', () => { + const previous = mockVpcContextProviderWith({ vpcId: 'vpc-1234', subnetGroups: [ { @@ -228,11 +227,11 @@ nodeunitShim({ }, ], }, options => { - test.deepEqual(options.filter, { + expect(options.filter).toEqual({ isDefault: 'true', }); - test.equal(options.subnetGroupNameTag, undefined); + expect(options.subnetGroupNameTag).toEqual(undefined); }); const stack = new Stack(); @@ -242,16 +241,16 @@ nodeunitShim({ let subnet = vpc.publicSubnets[0]; - test.equal(subnet.availabilityZone, 'us-east-1a'); - test.equal(subnet.subnetId, 'pub-sub-in-us-east-1a'); - test.equal(subnet.routeTable.routeTableId, 'rt-123'); - test.equal(subnet.ipv4CidrBlock, '10.100.0.0/24'); + expect(subnet.availabilityZone).toEqual('us-east-1a'); + expect(subnet.subnetId).toEqual('pub-sub-in-us-east-1a'); + expect(subnet.routeTable.routeTableId).toEqual('rt-123'); + expect(subnet.ipv4CidrBlock).toEqual('10.100.0.0/24'); restoreContextProvider(previous); - test.done(); - }, - }, + + }); + }); }); interface MockVcpContextResponse { @@ -260,15 +259,13 @@ interface MockVcpContextResponse { } function mockVpcContextProviderWith( - test: Test, response: MockVcpContextResponse, + response: MockVcpContextResponse, paramValidator?: (options: cxschema.VpcContextQuery) => void) { const previous = ContextProvider.getValue; ContextProvider.getValue = (_scope: Construct, options: GetContextValueOptions) => { // do some basic sanity checks - test.equal(options.provider, cxschema.ContextProvider.VPC_PROVIDER, - `Expected provider to be: '${cxschema.ContextProvider.VPC_PROVIDER}', got: '${options.provider}'`); - test.equal((options.props || {}).returnAsymmetricSubnets, true, - `Expected options.props.returnAsymmetricSubnets to be true, got: '${(options.props || {}).returnAsymmetricSubnets}'`); + expect(options.provider).toEqual(cxschema.ContextProvider.VPC_PROVIDER); + expect((options.props || {}).returnAsymmetricSubnets).toEqual(true); if (paramValidator) { paramValidator(options.props as any); diff --git a/packages/@aws-cdk/aws-ec2/test/vpc.test.ts b/packages/@aws-cdk/aws-ec2/test/vpc.test.ts index 355f4fd3662c7..ca375dc7fe321 100644 --- a/packages/@aws-cdk/aws-ec2/test/vpc.test.ts +++ b/packages/@aws-cdk/aws-ec2/test/vpc.test.ts @@ -1,6 +1,6 @@ -import { countResources, expect as cdkExpect, haveResource, haveResourceLike, isSuperObject, MatchStyle, SynthUtils } from '@aws-cdk/assert-internal'; +import '@aws-cdk/assert-internal/jest'; +import { isSuperObject, MatchStyle, SynthUtils } from '@aws-cdk/assert-internal'; import { CfnOutput, CfnResource, Fn, Lazy, Stack, Tags } from '@aws-cdk/core'; -import { nodeunitShim, Test } from 'nodeunit-shim'; import { AclCidr, AclTraffic, @@ -28,45 +28,45 @@ import { Vpc, } from '../lib'; -nodeunitShim({ - 'When creating a VPC': { - 'with the default CIDR range': { +describe('vpc', () => { + describe('When creating a VPC', () => { + describe('with the default CIDR range', () => { - 'vpc.vpcId returns a token to the VPC ID'(test: Test) { + test('vpc.vpcId returns a token to the VPC ID', () => { const stack = getTestStack(); const vpc = new Vpc(stack, 'TheVPC'); - test.deepEqual(stack.resolve(vpc.vpcId), { Ref: 'TheVPC92636AB0' }); - test.done(); - }, + expect(stack.resolve(vpc.vpcId)).toEqual({ Ref: 'TheVPC92636AB0' }); - 'it uses the correct network range'(test: Test) { + }); + + test('it uses the correct network range', () => { const stack = getTestStack(); new Vpc(stack, 'TheVPC'); - cdkExpect(stack).to(haveResource('AWS::EC2::VPC', { + expect(stack).toHaveResource('AWS::EC2::VPC', { CidrBlock: Vpc.DEFAULT_CIDR_RANGE, EnableDnsHostnames: true, EnableDnsSupport: true, InstanceTenancy: DefaultInstanceTenancy.DEFAULT, - })); - test.done(); - }, - 'the Name tag is defaulted to path'(test: Test) { + }); + + }); + test('the Name tag is defaulted to path', () => { const stack = getTestStack(); new Vpc(stack, 'TheVPC'); - cdkExpect(stack).to( - haveResource('AWS::EC2::VPC', - hasTags([{ Key: 'Name', Value: 'TestStack/TheVPC' }])), - ); - cdkExpect(stack).to( - haveResource('AWS::EC2::InternetGateway', - hasTags([{ Key: 'Name', Value: 'TestStack/TheVPC' }])), - ); - test.done(); - }, - - }, - - 'with all of the properties set, it successfully sets the correct VPC properties'(test: Test) { + expect(stack). + toHaveResource('AWS::EC2::VPC', + hasTags([{ Key: 'Name', Value: 'TestStack/TheVPC' }]), + ); + expect(stack). + toHaveResource('AWS::EC2::InternetGateway', + hasTags([{ Key: 'Name', Value: 'TestStack/TheVPC' }]), + ); + + }); + + }); + + test('with all of the properties set, it successfully sets the correct VPC properties', () => { const stack = getTestStack(); new Vpc(stack, 'TheVPC', { cidr: '192.168.0.0/16', @@ -75,18 +75,17 @@ nodeunitShim({ defaultInstanceTenancy: DefaultInstanceTenancy.DEDICATED, }); - cdkExpect(stack).to(haveResource('AWS::EC2::VPC', { + expect(stack).toHaveResource('AWS::EC2::VPC', { CidrBlock: '192.168.0.0/16', EnableDnsHostnames: false, EnableDnsSupport: false, InstanceTenancy: DefaultInstanceTenancy.DEDICATED, - })); - test.done(); - }, + }); + + }); - 'dns getters correspond to CFN properties': (() => { + describe('dns getters correspond to CFN properties', () => { - const tests: any = {}; const inputs = [ { dnsSupport: false, dnsHostnames: false }, @@ -97,7 +96,7 @@ nodeunitShim({ for (const input of inputs) { - tests[`[dnsSupport=${input.dnsSupport},dnsHostnames=${input.dnsHostnames}]`] = (test: Test) => { + test(`[dnsSupport=${input.dnsSupport},dnsHostnames=${input.dnsHostnames}]`, () => { const stack = getTestStack(); const vpc = new Vpc(stack, 'TheVPC', { @@ -107,41 +106,41 @@ nodeunitShim({ defaultInstanceTenancy: DefaultInstanceTenancy.DEDICATED, }); - cdkExpect(stack).to(haveResource('AWS::EC2::VPC', { + expect(stack).toHaveResource('AWS::EC2::VPC', { CidrBlock: '192.168.0.0/16', EnableDnsHostnames: input.dnsHostnames, EnableDnsSupport: input.dnsSupport, InstanceTenancy: DefaultInstanceTenancy.DEDICATED, - })); + }); - test.equal(input.dnsSupport, vpc.dnsSupportEnabled); - test.equal(input.dnsHostnames, vpc.dnsHostnamesEnabled); - test.done(); + expect(input.dnsSupport).toEqual(vpc.dnsSupportEnabled); + expect(input.dnsHostnames).toEqual(vpc.dnsHostnamesEnabled); - }; + + }); } - return tests; - })(), - 'contains the correct number of subnets'(test: Test) { + }); + + test('contains the correct number of subnets', () => { const stack = getTestStack(); const vpc = new Vpc(stack, 'TheVPC'); const zones = stack.availabilityZones.length; - test.equal(vpc.publicSubnets.length, zones); - test.equal(vpc.privateSubnets.length, zones); - test.deepEqual(stack.resolve(vpc.vpcId), { Ref: 'TheVPC92636AB0' }); - test.done(); - }, + expect(vpc.publicSubnets.length).toEqual(zones); + expect(vpc.privateSubnets.length).toEqual(zones); + expect(stack.resolve(vpc.vpcId)).toEqual({ Ref: 'TheVPC92636AB0' }); + + }); - 'can refer to the internet gateway'(test: Test) { + test('can refer to the internet gateway', () => { const stack = getTestStack(); const vpc = new Vpc(stack, 'TheVPC'); - test.deepEqual(stack.resolve(vpc.internetGatewayId), { Ref: 'TheVPCIGWFA25CC08' }); - test.done(); - }, + expect(stack.resolve(vpc.internetGatewayId)).toEqual({ Ref: 'TheVPCIGWFA25CC08' }); - 'with only isolated subnets, the VPC should not contain an IGW or NAT Gateways'(test: Test) { + }); + + test('with only isolated subnets, the VPC should not contain an IGW or NAT Gateways', () => { const stack = getTestStack(); new Vpc(stack, 'TheVPC', { subnetConfiguration: [ @@ -151,15 +150,15 @@ nodeunitShim({ }, ], }); - cdkExpect(stack).notTo(haveResource('AWS::EC2::InternetGateway')); - cdkExpect(stack).notTo(haveResource('AWS::EC2::NatGateway')); - cdkExpect(stack).to(haveResource('AWS::EC2::Subnet', { + expect(stack).not.toHaveResource('AWS::EC2::InternetGateway'); + expect(stack).not.toHaveResource('AWS::EC2::NatGateway'); + expect(stack).toHaveResource('AWS::EC2::Subnet', { MapPublicIpOnLaunch: false, - })); - test.done(); - }, + }); - 'with no private subnets, the VPC should have an IGW but no NAT Gateways'(test: Test) { + }); + + test('with no private subnets, the VPC should have an IGW but no NAT Gateways', () => { const stack = getTestStack(); new Vpc(stack, 'TheVPC', { subnetConfiguration: [ @@ -173,11 +172,11 @@ nodeunitShim({ }, ], }); - cdkExpect(stack).to(countResources('AWS::EC2::InternetGateway', 1)); - cdkExpect(stack).notTo(haveResource('AWS::EC2::NatGateway')); - test.done(); - }, - 'with private subnets and custom networkAcl.'(test: Test) { + expect(stack).toCountResources('AWS::EC2::InternetGateway', 1); + expect(stack).not.toHaveResource('AWS::EC2::NatGateway'); + + }); + test('with private subnets and custom networkAcl.', () => { const stack = getTestStack(); const vpc = new Vpc(stack, 'TheVPC', { subnetConfiguration: [ @@ -213,22 +212,22 @@ nodeunitShim({ cidr: AclCidr.anyIpv4(), }); - cdkExpect(stack).to(countResources('AWS::EC2::NetworkAcl', 1)); - cdkExpect(stack).to(countResources('AWS::EC2::NetworkAclEntry', 2)); - cdkExpect(stack).to(countResources('AWS::EC2::SubnetNetworkAclAssociation', 3)); - test.done(); - }, + expect(stack).toCountResources('AWS::EC2::NetworkAcl', 1); + expect(stack).toCountResources('AWS::EC2::NetworkAclEntry', 2); + expect(stack).toCountResources('AWS::EC2::SubnetNetworkAclAssociation', 3); + + }); - 'with no subnets defined, the VPC should have an IGW, and a NAT Gateway per AZ'(test: Test) { + test('with no subnets defined, the VPC should have an IGW, and a NAT Gateway per AZ', () => { const stack = getTestStack(); const zones = stack.availabilityZones.length; new Vpc(stack, 'TheVPC', {}); - cdkExpect(stack).to(countResources('AWS::EC2::InternetGateway', 1)); - cdkExpect(stack).to(countResources('AWS::EC2::NatGateway', zones)); - test.done(); - }, + expect(stack).toCountResources('AWS::EC2::InternetGateway', 1); + expect(stack).toCountResources('AWS::EC2::NatGateway', zones); - 'with isolated and public subnet, should be able to use the internet gateway to define routes'(test: Test) { + }); + + test('with isolated and public subnet, should be able to use the internet gateway to define routes', () => { const stack = getTestStack(); const vpc = new Vpc(stack, 'TheVPC', { subnetConfiguration: [ @@ -247,30 +246,15 @@ nodeunitShim({ routerType: RouterType.GATEWAY, destinationCidrBlock: '8.8.8.8/32', }); - cdkExpect(stack).to(haveResource('AWS::EC2::InternetGateway')); - cdkExpect(stack).to(haveResourceLike('AWS::EC2::Route', { + expect(stack).toHaveResource('AWS::EC2::InternetGateway'); + expect(stack).toHaveResourceLike('AWS::EC2::Route', { DestinationCidrBlock: '8.8.8.8/32', GatewayId: {}, - })); - test.done(); - }, - - 'with only isolated subnets the internet gateway should be undefined'(test: Test) { - const stack = getTestStack(); - const vpc = new Vpc(stack, 'TheVPC', { - subnetConfiguration: [ - { - subnetType: SubnetType.PRIVATE_ISOLATED, - name: 'isolated', - }, - ], }); - test.equal(vpc.internetGatewayId, undefined); - cdkExpect(stack).notTo(haveResource('AWS::EC2::InternetGateway')); - test.done(); - }, - 'with subnets and reserved subnets defined, VPC subnet count should not contain reserved subnets '(test: Test) { + }); + + test('with subnets and reserved subnets defined, VPC subnet count should not contain reserved subnets ', () => { const stack = getTestStack(); new Vpc(stack, 'TheVPC', { cidr: '10.0.0.0/16', @@ -294,10 +278,10 @@ nodeunitShim({ ], maxAzs: 3, }); - cdkExpect(stack).to(countResources('AWS::EC2::Subnet', 6)); - test.done(); - }, - 'with reserved subnets, any other subnets should not have cidrBlock from within reserved space'(test: Test) { + expect(stack).toCountResources('AWS::EC2::Subnet', 6); + + }); + test('with reserved subnets, any other subnets should not have cidrBlock from within reserved space', () => { const stack = getTestStack(); new Vpc(stack, 'TheVPC', { cidr: '10.0.0.0/16', @@ -322,23 +306,23 @@ nodeunitShim({ maxAzs: 3, }); for (let i = 0; i < 3; i++) { - cdkExpect(stack).to(haveResource('AWS::EC2::Subnet', { + expect(stack).toHaveResource('AWS::EC2::Subnet', { CidrBlock: `10.0.${i}.0/24`, - })); + }); } for (let i = 3; i < 6; i++) { - cdkExpect(stack).notTo(haveResource('AWS::EC2::Subnet', { + expect(stack).not.toHaveResource('AWS::EC2::Subnet', { CidrBlock: `10.0.${i}.0/24`, - })); + }); } for (let i = 6; i < 9; i++) { - cdkExpect(stack).to(haveResource('AWS::EC2::Subnet', { + expect(stack).toHaveResource('AWS::EC2::Subnet', { CidrBlock: `10.0.${i}.0/24`, - })); + }); } - test.done(); - }, - 'with custom subnets, the VPC should have the right number of subnets, an IGW, and a NAT Gateway per AZ'(test: Test) { + + }); + test('with custom subnets, the VPC should have the right number of subnets, an IGW, and a NAT Gateway per AZ', () => { const stack = getTestStack(); const zones = stack.availabilityZones.length; new Vpc(stack, 'TheVPC', { @@ -362,22 +346,22 @@ nodeunitShim({ ], maxAzs: 3, }); - cdkExpect(stack).to(countResources('AWS::EC2::InternetGateway', 1)); - cdkExpect(stack).to(countResources('AWS::EC2::NatGateway', zones)); - cdkExpect(stack).to(countResources('AWS::EC2::Subnet', 9)); + expect(stack).toCountResources('AWS::EC2::InternetGateway', 1); + expect(stack).toCountResources('AWS::EC2::NatGateway', zones); + expect(stack).toCountResources('AWS::EC2::Subnet', 9); for (let i = 0; i < 6; i++) { - cdkExpect(stack).to(haveResource('AWS::EC2::Subnet', { + expect(stack).toHaveResource('AWS::EC2::Subnet', { CidrBlock: `10.0.${i}.0/24`, - })); + }); } for (let i = 0; i < 3; i++) { - cdkExpect(stack).to(haveResource('AWS::EC2::Subnet', { + expect(stack).toHaveResource('AWS::EC2::Subnet', { CidrBlock: `10.0.6.${i * 16}/28`, - })); + }); } - test.done(); - }, - 'with custom subents and natGateways = 2 there should be only two NATGW'(test: Test) { + + }); + test('with custom subents and natGateways = 2 there should be only two NATGW', () => { const stack = getTestStack(); new Vpc(stack, 'TheVPC', { cidr: '10.0.0.0/21', @@ -401,30 +385,30 @@ nodeunitShim({ ], maxAzs: 3, }); - cdkExpect(stack).to(countResources('AWS::EC2::InternetGateway', 1)); - cdkExpect(stack).to(countResources('AWS::EC2::NatGateway', 2)); - cdkExpect(stack).to(countResources('AWS::EC2::Subnet', 9)); + expect(stack).toCountResources('AWS::EC2::InternetGateway', 1); + expect(stack).toCountResources('AWS::EC2::NatGateway', 2); + expect(stack).toCountResources('AWS::EC2::Subnet', 9); for (let i = 0; i < 6; i++) { - cdkExpect(stack).to(haveResource('AWS::EC2::Subnet', { + expect(stack).toHaveResource('AWS::EC2::Subnet', { CidrBlock: `10.0.${i}.0/24`, - })); + }); } for (let i = 0; i < 3; i++) { - cdkExpect(stack).to(haveResource('AWS::EC2::Subnet', { + expect(stack).toHaveResource('AWS::EC2::Subnet', { CidrBlock: `10.0.6.${i * 16}/28`, - })); + }); } - test.done(); - }, - 'with enableDnsHostnames enabled but enableDnsSupport disabled, should throw an Error'(test: Test) { + + }); + test('with enableDnsHostnames enabled but enableDnsSupport disabled, should throw an Error', () => { const stack = getTestStack(); - test.throws(() => new Vpc(stack, 'TheVPC', { + expect(() => new Vpc(stack, 'TheVPC', { enableDnsHostnames: true, enableDnsSupport: false, - })); - test.done(); - }, - 'with public subnets MapPublicIpOnLaunch is true'(test: Test) { + })).toThrow(); + + }); + test('with public subnets MapPublicIpOnLaunch is true', () => { const stack = getTestStack(); new Vpc(stack, 'VPC', { maxAzs: 1, @@ -436,63 +420,63 @@ nodeunitShim({ }, ], }); - cdkExpect(stack).to(countResources('AWS::EC2::Subnet', 1)); - cdkExpect(stack).notTo(haveResource('AWS::EC2::NatGateway')); - cdkExpect(stack).to(haveResource('AWS::EC2::Subnet', { + expect(stack).toCountResources('AWS::EC2::Subnet', 1); + expect(stack).not.toHaveResource('AWS::EC2::NatGateway'); + expect(stack).toHaveResource('AWS::EC2::Subnet', { MapPublicIpOnLaunch: true, - })); - test.done(); - }, + }); - 'maxAZs defaults to 3 if unset'(test: Test) { + }); + + test('maxAZs defaults to 3 if unset', () => { const stack = getTestStack(); new Vpc(stack, 'VPC'); - cdkExpect(stack).to(countResources('AWS::EC2::Subnet', 6)); - cdkExpect(stack).to(countResources('AWS::EC2::Route', 6)); + expect(stack).toCountResources('AWS::EC2::Subnet', 6); + expect(stack).toCountResources('AWS::EC2::Route', 6); for (let i = 0; i < 6; i++) { - cdkExpect(stack).to(haveResource('AWS::EC2::Subnet', { + expect(stack).toHaveResource('AWS::EC2::Subnet', { CidrBlock: `10.0.${i * 32}.0/19`, - })); + }); } - cdkExpect(stack).to(haveResourceLike('AWS::EC2::Route', { + expect(stack).toHaveResourceLike('AWS::EC2::Route', { DestinationCidrBlock: '0.0.0.0/0', NatGatewayId: {}, - })); + }); - test.done(); - }, - 'with maxAZs set to 2'(test: Test) { + }); + + test('with maxAZs set to 2', () => { const stack = getTestStack(); new Vpc(stack, 'VPC', { maxAzs: 2 }); - cdkExpect(stack).to(countResources('AWS::EC2::Subnet', 4)); - cdkExpect(stack).to(countResources('AWS::EC2::Route', 4)); + expect(stack).toCountResources('AWS::EC2::Subnet', 4); + expect(stack).toCountResources('AWS::EC2::Route', 4); for (let i = 0; i < 4; i++) { - cdkExpect(stack).to(haveResource('AWS::EC2::Subnet', { + expect(stack).toHaveResource('AWS::EC2::Subnet', { CidrBlock: `10.0.${i * 64}.0/18`, - })); + }); } - cdkExpect(stack).to(haveResourceLike('AWS::EC2::Route', { + expect(stack).toHaveResourceLike('AWS::EC2::Route', { DestinationCidrBlock: '0.0.0.0/0', NatGatewayId: {}, - })); - test.done(); - }, - 'with natGateway set to 1'(test: Test) { + }); + + }); + test('with natGateway set to 1', () => { const stack = getTestStack(); new Vpc(stack, 'VPC', { natGateways: 1, }); - cdkExpect(stack).to(countResources('AWS::EC2::Subnet', 6)); - cdkExpect(stack).to(countResources('AWS::EC2::Route', 6)); - cdkExpect(stack).to(countResources('AWS::EC2::NatGateway', 1)); - cdkExpect(stack).to(haveResourceLike('AWS::EC2::Route', { + expect(stack).toCountResources('AWS::EC2::Subnet', 6); + expect(stack).toCountResources('AWS::EC2::Route', 6); + expect(stack).toCountResources('AWS::EC2::NatGateway', 1); + expect(stack).toHaveResourceLike('AWS::EC2::Route', { DestinationCidrBlock: '0.0.0.0/0', NatGatewayId: {}, - })); - test.done(); - }, - 'with natGateway subnets defined'(test: Test) { + }); + + }); + test('with natGateway subnets defined', () => { const stack = getTestStack(); new Vpc(stack, 'VPC', { subnetConfiguration: [ @@ -516,22 +500,22 @@ nodeunitShim({ subnetGroupName: 'egress', }, }); - cdkExpect(stack).to(countResources('AWS::EC2::NatGateway', 3)); + expect(stack).toCountResources('AWS::EC2::NatGateway', 3); for (let i = 1; i < 4; i++) { - cdkExpect(stack).to(haveResource('AWS::EC2::Subnet', hasTags([{ + expect(stack).toHaveResource('AWS::EC2::Subnet', hasTags([{ Key: 'Name', Value: `TestStack/VPC/egressSubnet${i}`, }, { Key: 'aws-cdk:subnet-name', Value: 'egress', - }]))); + }])); } - test.done(); - }, - 'natGateways = 0 throws if no PRIVATE subnets configured'(test: Test) { + }); + + test('natGateways = 0 throws if no PRIVATE subnets configured', () => { const stack = getTestStack(); - test.throws(() => { + expect(() => { new Vpc(stack, 'VPC', { natGateways: 0, subnetConfiguration: [ @@ -545,34 +529,34 @@ nodeunitShim({ }, ], }); - }, /make sure you don't configure any PRIVATE subnets/); - test.done(); + }).toThrow(/make sure you don't configure any PRIVATE subnets/); + - }, + }); - 'natGateway = 0 defaults with ISOLATED subnet'(test: Test) { + test('natGateway = 0 defaults with ISOLATED subnet', () => { const stack = getTestStack(); new Vpc(stack, 'VPC', { natGateways: 0, }); - cdkExpect(stack).to(haveResource('AWS::EC2::Subnet', hasTags([{ + expect(stack).toHaveResource('AWS::EC2::Subnet', hasTags([{ Key: 'aws-cdk:subnet-type', Value: 'Isolated', - }]))); - test.done(); - }, + }])); + + }); - 'unspecified natGateways constructs with PRIVATE subnet'(test: Test) { + test('unspecified natGateways constructs with PRIVATE subnet', () => { const stack = getTestStack(); new Vpc(stack, 'VPC'); - cdkExpect(stack).to(haveResource('AWS::EC2::Subnet', hasTags([{ + expect(stack).toHaveResource('AWS::EC2::Subnet', hasTags([{ Key: 'aws-cdk:subnet-type', Value: 'Private', - }]))); - test.done(); - }, + }])); - 'natGateways = 0 allows RESERVED PRIVATE subnets'(test: Test) { + }); + + test('natGateways = 0 allows RESERVED PRIVATE subnets', () => { const stack = getTestStack(); new Vpc(stack, 'VPC', { cidr: '10.0.0.0/16', @@ -589,16 +573,16 @@ nodeunitShim({ ], natGateways: 0, }); - cdkExpect(stack).to(haveResource('AWS::EC2::Subnet', hasTags([{ + expect(stack).toHaveResource('AWS::EC2::Subnet', hasTags([{ Key: 'aws-cdk:subnet-name', Value: 'ingress', - }]))); - test.done(); - }, + }])); + + }); - 'with mis-matched nat and subnet configs it throws'(test: Test) { + test('with mis-matched nat and subnet configs it throws', () => { const stack = getTestStack(); - test.throws(() => new Vpc(stack, 'VPC', { + expect(() => new Vpc(stack, 'VPC', { subnetConfiguration: [ { cidrMask: 24, @@ -614,31 +598,31 @@ nodeunitShim({ natGatewaySubnets: { subnetGroupName: 'notthere', }, - })); - test.done(); - }, - 'with a vpn gateway'(test: Test) { + })).toThrow(); + + }); + test('with a vpn gateway', () => { const stack = getTestStack(); new Vpc(stack, 'VPC', { vpnGateway: true, vpnGatewayAsn: 65000, }); - cdkExpect(stack).to(haveResource('AWS::EC2::VPNGateway', { + expect(stack).toHaveResource('AWS::EC2::VPNGateway', { AmazonSideAsn: 65000, Type: 'ipsec.1', - })); + }); - cdkExpect(stack).to(haveResource('AWS::EC2::VPCGatewayAttachment', { + expect(stack).toHaveResource('AWS::EC2::VPCGatewayAttachment', { VpcId: { Ref: 'VPCB9E5F0B4', }, VpnGatewayId: { Ref: 'VPCVpnGatewayB5ABAE68', }, - })); + }); - cdkExpect(stack).to(haveResource('AWS::EC2::VPNGatewayRoutePropagation', { + expect(stack).toHaveResource('AWS::EC2::VPNGatewayRoutePropagation', { RouteTableIds: [ { Ref: 'VPCPrivateSubnet1RouteTableBE8A6027', @@ -653,11 +637,11 @@ nodeunitShim({ VpnGatewayId: { Ref: 'VPCVpnGatewayB5ABAE68', }, - })); + }); - test.done(); - }, - 'with a vpn gateway and route propagation on isolated subnets'(test: Test) { + + }); + test('with a vpn gateway and route propagation on isolated subnets', () => { const stack = getTestStack(); new Vpc(stack, 'VPC', { subnetConfiguration: [ @@ -672,7 +656,7 @@ nodeunitShim({ ], }); - cdkExpect(stack).to(haveResource('AWS::EC2::VPNGatewayRoutePropagation', { + expect(stack).toHaveResource('AWS::EC2::VPNGatewayRoutePropagation', { RouteTableIds: [ { Ref: 'VPCIsolatedSubnet1RouteTableEB156210', @@ -687,11 +671,11 @@ nodeunitShim({ VpnGatewayId: { Ref: 'VPCVpnGatewayB5ABAE68', }, - })); + }); + - test.done(); - }, - 'with a vpn gateway and route propagation on private and isolated subnets'(test: Test) { + }); + test('with a vpn gateway and route propagation on private and isolated subnets', () => { const stack = getTestStack(); new Vpc(stack, 'VPC', { subnetConfiguration: [ @@ -710,7 +694,7 @@ nodeunitShim({ ], }); - cdkExpect(stack).to(haveResource('AWS::EC2::VPNGatewayRoutePropagation', { + expect(stack).toHaveResource('AWS::EC2::VPNGatewayRoutePropagation', { RouteTableIds: [ { Ref: 'VPCPrivateSubnet1RouteTableBE8A6027', @@ -734,11 +718,11 @@ nodeunitShim({ VpnGatewayId: { Ref: 'VPCVpnGatewayB5ABAE68', }, - })); + }); + - test.done(); - }, - 'route propagation defaults to isolated subnets when there are no private subnets'(test: Test) { + }); + test('route propagation defaults to isolated subnets when there are no private subnets', () => { const stack = getTestStack(); new Vpc(stack, 'VPC', { subnetConfiguration: [ @@ -748,7 +732,7 @@ nodeunitShim({ vpnGateway: true, }); - cdkExpect(stack).to(haveResource('AWS::EC2::VPNGatewayRoutePropagation', { + expect(stack).toHaveResource('AWS::EC2::VPNGatewayRoutePropagation', { RouteTableIds: [ { Ref: 'VPCIsolatedSubnet1RouteTableEB156210', @@ -763,11 +747,11 @@ nodeunitShim({ VpnGatewayId: { Ref: 'VPCVpnGatewayB5ABAE68', }, - })); + }); + - test.done(); - }, - 'route propagation defaults to public subnets when there are no private/isolated subnets'(test: Test) { + }); + test('route propagation defaults to public subnets when there are no private/isolated subnets', () => { const stack = getTestStack(); new Vpc(stack, 'VPC', { subnetConfiguration: [ @@ -776,7 +760,7 @@ nodeunitShim({ vpnGateway: true, }); - cdkExpect(stack).to(haveResource('AWS::EC2::VPNGatewayRoutePropagation', { + expect(stack).toHaveResource('AWS::EC2::VPNGatewayRoutePropagation', { RouteTableIds: [ { Ref: 'VPCPublicSubnet1RouteTableFEE4B781', @@ -791,15 +775,15 @@ nodeunitShim({ VpnGatewayId: { Ref: 'VPCVpnGatewayB5ABAE68', }, - })); + }); + - test.done(); - }, - 'fails when specifying vpnConnections with vpnGateway set to false'(test: Test) { + }); + test('fails when specifying vpnConnections with vpnGateway set to false', () => { // GIVEN const stack = new Stack(); - test.throws(() => new Vpc(stack, 'VpcNetwork', { + expect(() => new Vpc(stack, 'VpcNetwork', { vpnGateway: false, vpnConnections: { VpnConnection: { @@ -807,97 +791,97 @@ nodeunitShim({ ip: '192.0.2.1', }, }, - }), /`vpnConnections`.+`vpnGateway`.+false/); + })).toThrow(/`vpnConnections`.+`vpnGateway`.+false/); - test.done(); - }, - 'fails when specifying vpnGatewayAsn with vpnGateway set to false'(test: Test) { + + }); + test('fails when specifying vpnGatewayAsn with vpnGateway set to false', () => { // GIVEN const stack = new Stack(); - test.throws(() => new Vpc(stack, 'VpcNetwork', { + expect(() => new Vpc(stack, 'VpcNetwork', { vpnGateway: false, vpnGatewayAsn: 65000, - }), /`vpnGatewayAsn`.+`vpnGateway`.+false/); + })).toThrow(/`vpnGatewayAsn`.+`vpnGateway`.+false/); + - test.done(); - }, + }); - 'Subnets have a defaultChild'(test: Test) { + test('Subnets have a defaultChild', () => { // GIVEN const stack = new Stack(); const vpc = new Vpc(stack, 'VpcNetwork'); - test.ok(vpc.publicSubnets[0].node.defaultChild instanceof CfnSubnet); + expect(vpc.publicSubnets[0].node.defaultChild instanceof CfnSubnet).toEqual(true); + - test.done(); - }, + }); - 'CIDR cannot be a Token'(test: Test) { + test('CIDR cannot be a Token', () => { const stack = new Stack(); - test.throws(() => { + expect(() => { new Vpc(stack, 'Vpc', { cidr: Lazy.string({ produce: () => 'abc' }), }); - }, /property must be a concrete CIDR string/); + }).toThrow(/property must be a concrete CIDR string/); - test.done(); - }, - 'Default NAT gateway provider'(test: Test) { + }); + + test('Default NAT gateway provider', () => { const stack = new Stack(); const natGatewayProvider = NatProvider.gateway(); new Vpc(stack, 'VpcNetwork', { natGatewayProvider }); - test.ok(natGatewayProvider.configuredGateways.length > 0); + expect(natGatewayProvider.configuredGateways.length).toBeGreaterThan(0); + - test.done(); - }, + }); - 'NAT gateway provider with EIP allocations'(test: Test) { + test('NAT gateway provider with EIP allocations', () => { const stack = new Stack(); const natGatewayProvider = NatProvider.gateway({ eipAllocationIds: ['a', 'b', 'c', 'd'], }); new Vpc(stack, 'VpcNetwork', { natGatewayProvider }); - cdkExpect(stack).to(haveResource('AWS::EC2::NatGateway', { + expect(stack).toHaveResource('AWS::EC2::NatGateway', { AllocationId: 'a', - })); - cdkExpect(stack).to(haveResource('AWS::EC2::NatGateway', { + }); + expect(stack).toHaveResource('AWS::EC2::NatGateway', { AllocationId: 'b', - })); + }); - test.done(); - }, - 'NAT gateway provider with insufficient EIP allocations'(test: Test) { + }); + + test('NAT gateway provider with insufficient EIP allocations', () => { const stack = new Stack(); const natGatewayProvider = NatProvider.gateway({ eipAllocationIds: ['a'] }); expect(() => new Vpc(stack, 'VpcNetwork', { natGatewayProvider })) .toThrow(/Not enough NAT gateway EIP allocation IDs \(1 provided\) for the requested subnet count \(\d+ needed\)/); - test.done(); - }, - 'NAT gateway provider with token EIP allocations'(test: Test) { + }); + + test('NAT gateway provider with token EIP allocations', () => { const stack = new Stack(); const eipAllocationIds = Fn.split(',', Fn.importValue('myVpcId')); const natGatewayProvider = NatProvider.gateway({ eipAllocationIds }); new Vpc(stack, 'VpcNetwork', { natGatewayProvider }); - cdkExpect(stack).to(haveResource('AWS::EC2::NatGateway', { + expect(stack).toHaveResource('AWS::EC2::NatGateway', { AllocationId: stack.resolve(Fn.select(0, eipAllocationIds)), - })); - cdkExpect(stack).to(haveResource('AWS::EC2::NatGateway', { + }); + expect(stack).toHaveResource('AWS::EC2::NatGateway', { AllocationId: stack.resolve(Fn.select(1, eipAllocationIds)), - })); + }); + - test.done(); - }, + }); - 'Can add an IPv6 route'(test: Test) { + test('Can add an IPv6 route', () => { // GIVEN const stack = getTestStack(); @@ -911,14 +895,14 @@ nodeunitShim({ // THEN - cdkExpect(stack).to(haveResourceLike('AWS::EC2::Route', { + expect(stack).toHaveResourceLike('AWS::EC2::Route', { DestinationIpv6CidrBlock: '2001:4860:4860::8888/32', NetworkInterfaceId: 'router-1', - })); + }); + - test.done(); - }, - 'Can add an IPv4 route'(test: Test) { + }); + test('Can add an IPv4 route', () => { // GIVEN const stack = getTestStack(); @@ -932,17 +916,17 @@ nodeunitShim({ // THEN - cdkExpect(stack).to(haveResourceLike('AWS::EC2::Route', { + expect(stack).toHaveResourceLike('AWS::EC2::Route', { DestinationCidrBlock: '0.0.0.0/0', NetworkInterfaceId: 'router-1', - })); + }); + - test.done(); - }, - }, + }); + }); - 'NAT instances': { - 'Can configure NAT instances instead of NAT gateways'(test: Test) { + describe('NAT instances', () => { + test('Can configure NAT instances instead of NAT gateways', () => { // GIVEN const stack = getTestStack(); @@ -956,18 +940,18 @@ nodeunitShim({ new Vpc(stack, 'TheVPC', { natGatewayProvider }); // THEN - cdkExpect(stack).to(countResources('AWS::EC2::Instance', 3)); - cdkExpect(stack).to(haveResource('AWS::EC2::Instance', { + expect(stack).toCountResources('AWS::EC2::Instance', 3); + expect(stack).toHaveResource('AWS::EC2::Instance', { ImageId: 'ami-1', InstanceType: 'q86.mega', SourceDestCheck: false, - })); - cdkExpect(stack).to(haveResource('AWS::EC2::Route', { + }); + expect(stack).toHaveResource('AWS::EC2::Route', { RouteTableId: { Ref: 'TheVPCPrivateSubnet1RouteTableF6513BC2' }, DestinationCidrBlock: '0.0.0.0/0', InstanceId: { Ref: 'TheVPCPublicSubnet1NatInstanceCC514192' }, - })); - cdkExpect(stack).to(haveResource('AWS::EC2::SecurityGroup', { + }); + expect(stack).toHaveResource('AWS::EC2::SecurityGroup', { SecurityGroupEgress: [ { CidrIp: '0.0.0.0/0', @@ -982,12 +966,12 @@ nodeunitShim({ IpProtocol: '-1', }, ], - })); + }); + - test.done(); - }, + }); - 'natGateways controls amount of NAT instances'(test: Test) { + test('natGateways controls amount of NAT instances', () => { // GIVEN const stack = getTestStack(); @@ -1003,12 +987,12 @@ nodeunitShim({ }); // THEN - cdkExpect(stack).to(countResources('AWS::EC2::Instance', 1)); + expect(stack).toCountResources('AWS::EC2::Instance', 1); - test.done(); - }, - 'can configure Security Groups of NAT instances with allowAllTraffic false'(test: Test) { + }); + + test('can configure Security Groups of NAT instances with allowAllTraffic false', () => { // GIVEN const stack = getTestStack(); @@ -1026,7 +1010,7 @@ nodeunitShim({ provider.connections.allowFrom(Peer.ipv4('1.2.3.4/32'), Port.tcp(86)); // THEN - cdkExpect(stack).to(haveResource('AWS::EC2::SecurityGroup', { + expect(stack).toHaveResource('AWS::EC2::SecurityGroup', { SecurityGroupEgress: [ { CidrIp: '0.0.0.0/0', @@ -1043,12 +1027,12 @@ nodeunitShim({ ToPort: 86, }, ], - })); + }); - test.done(); - }, - 'can configure Security Groups of NAT instances with defaultAllowAll INBOUND_AND_OUTBOUND'(test: Test) { + }); + + test('can configure Security Groups of NAT instances with defaultAllowAll INBOUND_AND_OUTBOUND', () => { // GIVEN const stack = getTestStack(); @@ -1065,7 +1049,7 @@ nodeunitShim({ }); // THEN - cdkExpect(stack).to(haveResource('AWS::EC2::SecurityGroup', { + expect(stack).toHaveResource('AWS::EC2::SecurityGroup', { SecurityGroupEgress: [ { CidrIp: '0.0.0.0/0', @@ -1080,12 +1064,12 @@ nodeunitShim({ IpProtocol: '-1', }, ], - })); + }); - test.done(); - }, - 'can configure Security Groups of NAT instances with defaultAllowAll OUTBOUND_ONLY'(test: Test) { + }); + + test('can configure Security Groups of NAT instances with defaultAllowAll OUTBOUND_ONLY', () => { // GIVEN const stack = getTestStack(); @@ -1102,7 +1086,7 @@ nodeunitShim({ }); // THEN - cdkExpect(stack).to(haveResource('AWS::EC2::SecurityGroup', { + expect(stack).toHaveResource('AWS::EC2::SecurityGroup', { SecurityGroupEgress: [ { CidrIp: '0.0.0.0/0', @@ -1110,12 +1094,12 @@ nodeunitShim({ IpProtocol: '-1', }, ], - })); + }); - test.done(); - }, - 'can configure Security Groups of NAT instances with defaultAllowAll NONE'(test: Test) { + }); + + test('can configure Security Groups of NAT instances with defaultAllowAll NONE', () => { // GIVEN const stack = getTestStack(); @@ -1132,7 +1116,7 @@ nodeunitShim({ }); // THEN - cdkExpect(stack).to(haveResource('AWS::EC2::SecurityGroup', { + expect(stack).toHaveResource('AWS::EC2::SecurityGroup', { SecurityGroupEgress: [ { CidrIp: '255.255.255.255/32', @@ -1142,15 +1126,15 @@ nodeunitShim({ ToPort: 86, }, ], - })); + }); - test.done(); - }, - }, + }); - 'Network ACL association': { - 'by default uses default ACL reference'(test: Test) { + }); + + describe('Network ACL association', () => { + test('by default uses default ACL reference', () => { // GIVEN const stack = getTestStack(); @@ -1160,7 +1144,7 @@ nodeunitShim({ value: (vpc.publicSubnets[0] as Subnet).subnetNetworkAclAssociationId, }); - cdkExpect(stack).toMatch({ + expect(stack).toMatchTemplate({ Outputs: { Output: { Value: { 'Fn::GetAtt': ['TheVPCPublicSubnet1Subnet770D4FF2', 'NetworkAclAssociationId'] }, @@ -1168,10 +1152,10 @@ nodeunitShim({ }, }, MatchStyle.SUPERSET); - test.done(); - }, - 'if ACL is replaced new ACL reference is returned'(test: Test) { + }); + + test('if ACL is replaced new ACL reference is returned', () => { // GIVEN const stack = getTestStack(); const vpc = new Vpc(stack, 'TheVPC', { cidr: '192.168.0.0/16' }); @@ -1185,7 +1169,7 @@ nodeunitShim({ subnetSelection: { subnetType: SubnetType.PUBLIC }, }); - cdkExpect(stack).toMatch({ + expect(stack).toMatchTemplate({ Outputs: { Output: { Value: { Ref: 'ACLDBD1BB49' }, @@ -1193,22 +1177,22 @@ nodeunitShim({ }, }, MatchStyle.SUPERSET); - test.done(); - }, - }, - 'When creating a VPC with a custom CIDR range': { - 'vpc.vpcCidrBlock is the correct network range'(test: Test) { + }); + }); + + describe('When creating a VPC with a custom CIDR range', () => { + test('vpc.vpcCidrBlock is the correct network range', () => { const stack = getTestStack(); new Vpc(stack, 'TheVPC', { cidr: '192.168.0.0/16' }); - cdkExpect(stack).to(haveResource('AWS::EC2::VPC', { + expect(stack).toHaveResource('AWS::EC2::VPC', { CidrBlock: '192.168.0.0/16', - })); - test.done(); - }, - }, - 'When tagging': { - 'VPC propagated tags will be on subnet, IGW, routetables, NATGW'(test: Test) { + }); + + }); + }); + describe('When tagging', () => { + test('VPC propagated tags will be on subnet, IGW, routetables, NATGW', () => { const stack = getTestStack(); const tags = { VpcType: 'Good', @@ -1222,44 +1206,44 @@ nodeunitShim({ // overwrite to set propagate Tags.of(vpc).add('BusinessUnit', 'Marketing', { includeResourceTypes: [CfnVPC.CFN_RESOURCE_TYPE_NAME] }); Tags.of(vpc).add('VpcType', 'Good'); - cdkExpect(stack).to(haveResource('AWS::EC2::VPC', hasTags(toCfnTags(allTags)))); + expect(stack).toHaveResource('AWS::EC2::VPC', hasTags(toCfnTags(allTags))); const taggables = ['Subnet', 'InternetGateway', 'NatGateway', 'RouteTable']; const propTags = toCfnTags(tags); const noProp = toCfnTags(noPropTags); for (const resource of taggables) { - cdkExpect(stack).to(haveResource(`AWS::EC2::${resource}`, hasTags(propTags))); - cdkExpect(stack).notTo(haveResource(`AWS::EC2::${resource}`, hasTags(noProp))); + expect(stack).toHaveResource(`AWS::EC2::${resource}`, hasTags(propTags)); + expect(stack).not.toHaveResource(`AWS::EC2::${resource}`, hasTags(noProp)); } - test.done(); - }, - 'Subnet Name will propagate to route tables and NATGW'(test: Test) { + + }); + test('Subnet Name will propagate to route tables and NATGW', () => { const stack = getTestStack(); const vpc = new Vpc(stack, 'TheVPC'); for (const subnet of vpc.publicSubnets) { const tag = { Key: 'Name', Value: subnet.node.path }; - cdkExpect(stack).to(haveResource('AWS::EC2::NatGateway', hasTags([tag]))); - cdkExpect(stack).to(haveResource('AWS::EC2::RouteTable', hasTags([tag]))); + expect(stack).toHaveResource('AWS::EC2::NatGateway', hasTags([tag])); + expect(stack).toHaveResource('AWS::EC2::RouteTable', hasTags([tag])); } for (const subnet of vpc.privateSubnets) { const tag = { Key: 'Name', Value: subnet.node.path }; - cdkExpect(stack).to(haveResource('AWS::EC2::RouteTable', hasTags([tag]))); + expect(stack).toHaveResource('AWS::EC2::RouteTable', hasTags([tag])); } - test.done(); - }, - 'Tags can be added after the Vpc is created with `vpc.tags.setTag(...)`'(test: Test) { + + }); + test('Tags can be added after the Vpc is created with `vpc.tags.setTag(...)`', () => { const stack = getTestStack(); const vpc = new Vpc(stack, 'TheVPC'); const tag = { Key: 'Late', Value: 'Adder' }; - cdkExpect(stack).notTo(haveResource('AWS::EC2::VPC', hasTags([tag]))); + expect(stack).not.toHaveResource('AWS::EC2::VPC', hasTags([tag])); Tags.of(vpc).add(tag.Key, tag.Value); - cdkExpect(stack).to(haveResource('AWS::EC2::VPC', hasTags([tag]))); - test.done(); - }, - }, + expect(stack).toHaveResource('AWS::EC2::VPC', hasTags([tag])); + + }); + }); - 'subnet selection': { - 'selecting default subnets returns the private ones'(test: Test) { + describe('subnet selection', () => { + test('selecting default subnets returns the private ones', () => { // GIVEN const stack = getTestStack(); const vpc = new Vpc(stack, 'VPC'); @@ -1268,11 +1252,11 @@ nodeunitShim({ const { subnetIds } = vpc.selectSubnets(); // THEN - test.deepEqual(subnetIds, vpc.privateSubnets.map(s => s.subnetId)); - test.done(); - }, + expect(subnetIds).toEqual(vpc.privateSubnets.map(s => s.subnetId)); - 'can select public subnets'(test: Test) { + }); + + test('can select public subnets', () => { // GIVEN const stack = getTestStack(); const vpc = new Vpc(stack, 'VPC'); @@ -1281,12 +1265,12 @@ nodeunitShim({ const { subnetIds } = vpc.selectSubnets({ subnetType: SubnetType.PUBLIC }); // THEN - test.deepEqual(subnetIds, vpc.publicSubnets.map(s => s.subnetId)); + expect(subnetIds).toEqual(vpc.publicSubnets.map(s => s.subnetId)); + - test.done(); - }, + }); - 'can select isolated subnets'(test: Test) { + test('can select isolated subnets', () => { // GIVEN const stack = getTestStack(); const vpc = new Vpc(stack, 'VPC', { @@ -1300,12 +1284,12 @@ nodeunitShim({ const { subnetIds } = vpc.selectSubnets({ subnetType: SubnetType.PRIVATE_ISOLATED }); // THEN - test.deepEqual(subnetIds, vpc.isolatedSubnets.map(s => s.subnetId)); + expect(subnetIds).toEqual(vpc.isolatedSubnets.map(s => s.subnetId)); - test.done(); - }, - 'can select subnets by name'(test: Test) { + }); + + test('can select subnets by name', () => { // GIVEN const stack = getTestStack(); const vpc = new Vpc(stack, 'VPC', { @@ -1320,11 +1304,11 @@ nodeunitShim({ const { subnetIds } = vpc.selectSubnets({ subnetGroupName: 'DontTalkToMe' }); // THEN - test.deepEqual(subnetIds, vpc.privateSubnets.map(s => s.subnetId)); - test.done(); - }, + expect(subnetIds).toEqual(vpc.privateSubnets.map(s => s.subnetId)); + + }); - 'subnetName is an alias for subnetGroupName (backwards compat)'(test: Test) { + test('subnetName is an alias for subnetGroupName (backwards compat)', () => { // GIVEN const stack = getTestStack(); const vpc = new Vpc(stack, 'VPC', { @@ -1339,11 +1323,11 @@ nodeunitShim({ const { subnetIds } = vpc.selectSubnets({ subnetName: 'DontTalkToMe' }); // THEN - test.deepEqual(subnetIds, vpc.privateSubnets.map(s => s.subnetId)); - test.done(); - }, + expect(subnetIds).toEqual(vpc.privateSubnets.map(s => s.subnetId)); - 'selecting default subnets in a VPC with only isolated subnets returns the isolateds'(test: Test) { + }); + + test('selecting default subnets in a VPC with only isolated subnets returns the isolateds', () => { // GIVEN const stack = new Stack(); const vpc = Vpc.fromVpcAttributes(stack, 'VPC', { @@ -1357,11 +1341,11 @@ nodeunitShim({ const subnets = vpc.selectSubnets(); // THEN - test.deepEqual(subnets.subnetIds, ['iso-1', 'iso-2', 'iso-3']); - test.done(); - }, + expect(subnets.subnetIds).toEqual(['iso-1', 'iso-2', 'iso-3']); + + }); - 'selecting default subnets in a VPC with only public subnets returns the publics'(test: Test) { + test('selecting default subnets in a VPC with only public subnets returns the publics', () => { // GIVEN const stack = new Stack(); const vpc = Vpc.fromVpcAttributes(stack, 'VPC', { @@ -1375,23 +1359,23 @@ nodeunitShim({ const subnets = vpc.selectSubnets(); // THEN - test.deepEqual(subnets.subnetIds, ['pub-1', 'pub-2', 'pub-3']); - test.done(); - }, + expect(subnets.subnetIds).toEqual(['pub-1', 'pub-2', 'pub-3']); + + }); - 'selecting subnets by name fails if the name is unknown'(test: Test) { + test('selecting subnets by name fails if the name is unknown', () => { // GIVEN const stack = new Stack(); const vpc = new Vpc(stack, 'VPC'); - test.throws(() => { + expect(() => { vpc.selectSubnets({ subnetGroupName: 'Toot' }); - }, /There are no subnet groups with name 'Toot' in this VPC. Available names: Public,Private/); + }).toThrow(/There are no subnet groups with name 'Toot' in this VPC. Available names: Public,Private/); - test.done(); - }, - 'select subnets with az restriction'(test: Test) { + }); + + test('select subnets with az restriction', () => { // GIVEN const stack = getTestStack(); const vpc = new Vpc(stack, 'VpcNetwork', { @@ -1407,12 +1391,12 @@ nodeunitShim({ const { subnetIds } = vpc.selectSubnets({ onePerAz: true }); // THEN - test.deepEqual(subnetIds.length, 1); - test.deepEqual(subnetIds[0], vpc.privateSubnets[0].subnetId); - test.done(); - }, + expect(subnetIds.length).toEqual(1); + expect(subnetIds[0]).toEqual(vpc.privateSubnets[0].subnetId); + + }); - 'fromVpcAttributes using unknown-length list tokens'(test: Test) { + test('fromVpcAttributes using unknown-length list tokens', () => { // GIVEN const stack = getTestStack(); @@ -1435,9 +1419,9 @@ nodeunitShim({ }); // THEN - No exception - cdkExpect(stack).to(haveResource('Some::Resource', { + expect(stack).toHaveResource('Some::Resource', { subnetIds: { 'Fn::Split': [',', { 'Fn::ImportValue': 'myPublicSubnetIds' }] }, - })); + }); // THEN - Warnings have been added to the stack metadata const asm = SynthUtils.synthesize(stack); @@ -1452,10 +1436,10 @@ nodeunitShim({ ), ])); - test.done(); - }, - 'fromVpcAttributes using fixed-length list tokens'(test: Test) { + }); + + test('fromVpcAttributes using fixed-length list tokens', () => { // GIVEN const stack = getTestStack(); @@ -1480,17 +1464,17 @@ nodeunitShim({ // THEN - No exception const publicSubnetList = { 'Fn::Split': [',', { 'Fn::ImportValue': 'myPublicSubnetIds' }] }; - cdkExpect(stack).to(haveResource('Some::Resource', { + expect(stack).toHaveResource('Some::Resource', { subnetIds: [ { 'Fn::Select': [0, publicSubnetList] }, { 'Fn::Select': [1, publicSubnetList] }, ], - })); + }); + - test.done(); - }, + }); - 'select explicitly defined subnets'(test: Test) { + test('select explicitly defined subnets', () => { // GIVEN const stack = getTestStack(); const vpc = Vpc.fromVpcAttributes(stack, 'VPC', { @@ -1509,12 +1493,12 @@ nodeunitShim({ const { subnetIds } = vpc.selectSubnets({ subnets: [subnet] }); // THEN - test.deepEqual(subnetIds.length, 1); - test.deepEqual(subnetIds[0], subnet.subnetId); - test.done(); - }, + expect(subnetIds.length).toEqual(1); + expect(subnetIds[0]).toEqual(subnet.subnetId); - 'subnet created from subnetId'(test: Test) { + }); + + test('subnet created from subnetId', () => { // GIVEN const stack = getTestStack(); @@ -1522,11 +1506,11 @@ nodeunitShim({ const subnet = Subnet.fromSubnetId(stack, 'subnet1', 'pub-1'); // THEN - test.deepEqual(subnet.subnetId, 'pub-1'); - test.done(); - }, + expect(subnet.subnetId).toEqual('pub-1'); + + }); - 'Referencing AZ throws error when subnet created from subnetId'(test: Test) { + test('Referencing AZ throws error when subnet created from subnetId', () => { // GIVEN const stack = getTestStack(); @@ -1535,11 +1519,11 @@ nodeunitShim({ // THEN // eslint-disable-next-line max-len - test.throws(() => subnet.availabilityZone, "You cannot reference a Subnet's availability zone if it was not supplied. Add the availabilityZone when importing using Subnet.fromSubnetAttributes()"); - test.done(); - }, + expect(() => subnet.availabilityZone).toThrow("You cannot reference a Subnet's availability zone if it was not supplied. Add the availabilityZone when importing using Subnet.fromSubnetAttributes()"); + + }); - 'Referencing AZ throws error when subnet created from attributes without az'(test: Test) { + test('Referencing AZ throws error when subnet created from attributes without az', () => { // GIVEN const stack = getTestStack(); @@ -1547,13 +1531,13 @@ nodeunitShim({ const subnet = Subnet.fromSubnetAttributes(stack, 'subnet1', { subnetId: 'pub-1', availabilityZone: '' }); // THEN - test.deepEqual(subnet.subnetId, 'pub-1'); + expect(subnet.subnetId).toEqual('pub-1'); // eslint-disable-next-line max-len - test.throws(() => subnet.availabilityZone, "You cannot reference a Subnet's availability zone if it was not supplied. Add the availabilityZone when importing using Subnet.fromSubnetAttributes()"); - test.done(); - }, + expect(() => subnet.availabilityZone).toThrow("You cannot reference a Subnet's availability zone if it was not supplied. Add the availabilityZone when importing using Subnet.fromSubnetAttributes()"); - 'AZ have value when subnet created from attributes with az'(test: Test) { + }); + + test('AZ have value when subnet created from attributes with az', () => { // GIVEN const stack = getTestStack(); @@ -1561,12 +1545,12 @@ nodeunitShim({ const subnet = Subnet.fromSubnetAttributes(stack, 'subnet1', { subnetId: 'pub-1', availabilityZone: 'az-1234' }); // THEN - test.deepEqual(subnet.subnetId, 'pub-1'); - test.deepEqual(subnet.availabilityZone, 'az-1234'); - test.done(); - }, + expect(subnet.subnetId).toEqual('pub-1'); + expect(subnet.availabilityZone).toEqual('az-1234'); + + }); - 'Can select subnets by type and AZ'(test: Test) { + test('Can select subnets by type and AZ', () => { // GIVEN const stack = getTestStack(); const vpc = new Vpc(stack, 'VPC', { @@ -1585,7 +1569,7 @@ nodeunitShim({ }); // THEN - cdkExpect(stack).to(haveResource('AWS::EC2::VPCEndpoint', { + expect(stack).toHaveResource('AWS::EC2::VPCEndpoint', { ServiceName: 'com.amazonaws.vpce.us-east-1.vpce-svc-uuddlrlrbastrtsvc', SubnetIds: [ { @@ -1595,11 +1579,11 @@ nodeunitShim({ Ref: 'VPCPrivateSubnet3Subnet3EDCD457', }, ], - })); - test.done(); - }, + }); + + }); - 'SubnetSelection filtered on az uses default subnetType when no subnet type specified'(test: Test) { + test('SubnetSelection filtered on az uses default subnetType when no subnet type specified', () => { // GIVEN const stack = getTestStack(); const vpc = new Vpc(stack, 'VPC', { @@ -1616,7 +1600,7 @@ nodeunitShim({ }); // THEN - cdkExpect(stack).to(haveResource('AWS::EC2::VPCEndpoint', { + expect(stack).toHaveResource('AWS::EC2::VPCEndpoint', { ServiceName: 'com.amazonaws.vpce.us-east-1.vpce-svc-uuddlrlrbastrtsvc', SubnetIds: [ { @@ -1626,10 +1610,10 @@ nodeunitShim({ Ref: 'VPCPrivateSubnet3Subnet3EDCD457', }, ], - })); - test.done(); - }, - 'SubnetSelection doesnt throw error when selecting imported subnets'(test: Test) { + }); + + }); + test('SubnetSelection doesnt throw error when selecting imported subnets', () => { // GIVEN const stack = getTestStack(); @@ -1637,15 +1621,15 @@ nodeunitShim({ const vpc = new Vpc(stack, 'VPC'); // THEN - test.doesNotThrow(() => vpc.selectSubnets({ + expect(() => vpc.selectSubnets({ subnets: [ Subnet.fromSubnetId(stack, 'Subnet', 'sub-1'), ], - })); - test.done(); - }, + })).not.toThrow(); - 'can filter by single IP address'(test: Test) { + }); + + test('can filter by single IP address', () => { // GIVEN const stack = getTestStack(); @@ -1668,15 +1652,15 @@ nodeunitShim({ // THEN // 10.0.160.0/19 is the third subnet, sequentially, if you split // 10.0.0.0/16 into 6 pieces - cdkExpect(stack).to(haveResource('AWS::EC2::Instance', { + expect(stack).toHaveResource('AWS::EC2::Instance', { SubnetId: { Ref: 'VPCPrivateSubnet3Subnet3EDCD457', }, - })); - test.done(); - }, + }); + + }); - 'can filter by multiple IP addresses'(test: Test) { + test('can filter by multiple IP addresses', () => { // GIVEN const stack = getTestStack(); @@ -1699,7 +1683,7 @@ nodeunitShim({ }); // THEN - cdkExpect(stack).to(haveResource('AWS::EC2::VPCEndpoint', { + expect(stack).toHaveResource('AWS::EC2::VPCEndpoint', { ServiceName: 'com.amazonaws.vpce.us-east-1.vpce-svc-uuddlrlrbastrtsvc', SubnetIds: [ { @@ -1709,11 +1693,11 @@ nodeunitShim({ Ref: 'VPCPrivateSubnet3Subnet3EDCD457', }, ], - })); - test.done(); - }, + }); + + }); - 'can filter by Subnet Ids'(test: Test) { + test('can filter by Subnet Ids', () => { // GIVEN const stack = getTestStack(); @@ -1734,14 +1718,14 @@ nodeunitShim({ }); // THEN - cdkExpect(stack).to(haveResource('AWS::EC2::VPCEndpoint', { + expect(stack).toHaveResource('AWS::EC2::VPCEndpoint', { ServiceName: 'com.amazonaws.vpce.us-east-1.vpce-svc-uuddlrlrbastrtsvc', SubnetIds: ['priv-1', 'priv-2'], - })); - test.done(); - }, + }); + + }); - 'can filter by Cidr Netmask'(test: Test) { + test('can filter by Cidr Netmask', () => { // GIVEN const stack = getTestStack(); const vpc = new Vpc(stack, 'VpcNetwork', { @@ -1759,12 +1743,12 @@ nodeunitShim({ ); // THEN - test.deepEqual(subnetIds.length, 2); + expect(subnetIds.length).toEqual(2); const expected = vpc.publicSubnets.filter(s => s.ipv4CidrBlock.endsWith('/20')); - test.deepEqual(subnetIds, expected.map(s => s.subnetId)); - test.done(); - }, - }, + expect(subnetIds).toEqual(expected.map(s => s.subnetId)); + + }); + }); }); function getTestStack(): Stack { diff --git a/packages/@aws-cdk/aws-ec2/test/vpn.test.ts b/packages/@aws-cdk/aws-ec2/test/vpn.test.ts index 3240c59a5c9e0..8a4c99159e5ec 100644 --- a/packages/@aws-cdk/aws-ec2/test/vpn.test.ts +++ b/packages/@aws-cdk/aws-ec2/test/vpn.test.ts @@ -1,10 +1,9 @@ -import { expect, haveResource } from '@aws-cdk/assert-internal'; +import '@aws-cdk/assert-internal/jest'; import { Duration, Stack, Token } from '@aws-cdk/core'; -import { nodeunitShim, Test } from 'nodeunit-shim'; import { PublicSubnet, Vpc, VpnConnection } from '../lib'; -nodeunitShim({ - 'can add a vpn connection to a vpc with a vpn gateway'(test: Test) { +describe('vpn', () => { + test('can add a vpn connection to a vpc with a vpn gateway', () => { // GIVEN const stack = new Stack(); @@ -19,13 +18,13 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::EC2::CustomerGateway', { + expect(stack).toHaveResource('AWS::EC2::CustomerGateway', { BgpAsn: 65001, IpAddress: '192.0.2.1', Type: 'ipsec.1', - })); + }); - expect(stack).to(haveResource('AWS::EC2::VPNConnection', { + expect(stack).toHaveResource('AWS::EC2::VPNConnection', { CustomerGatewayId: { Ref: 'VpcNetworkVpnConnectionCustomerGateway8B56D9AF', }, @@ -34,12 +33,12 @@ nodeunitShim({ Ref: 'VpcNetworkVpnGateway501295FA', }, StaticRoutesOnly: false, - })); + }); + - test.done(); - }, + }); - 'with static routing'(test: Test) { + test('with static routing', () => { // GIVEN const stack = new Stack(); @@ -57,7 +56,7 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::EC2::VPNConnection', { + expect(stack).toHaveResource('AWS::EC2::VPNConnection', { CustomerGatewayId: { Ref: 'VpcNetworkstaticCustomerGatewayAF2651CC', }, @@ -66,26 +65,26 @@ nodeunitShim({ Ref: 'VpcNetworkVpnGateway501295FA', }, StaticRoutesOnly: true, - })); + }); - expect(stack).to(haveResource('AWS::EC2::VPNConnectionRoute', { + expect(stack).toHaveResource('AWS::EC2::VPNConnectionRoute', { DestinationCidrBlock: '192.168.10.0/24', VpnConnectionId: { Ref: 'VpcNetworkstaticE33EA98C', }, - })); + }); - expect(stack).to(haveResource('AWS::EC2::VPNConnectionRoute', { + expect(stack).toHaveResource('AWS::EC2::VPNConnectionRoute', { DestinationCidrBlock: '192.168.20.0/24', VpnConnectionId: { Ref: 'VpcNetworkstaticE33EA98C', }, - })); + }); - test.done(); - }, - 'with tunnel options'(test: Test) { + }); + + test('with tunnel options', () => { // GIVEN const stack = new Stack(); @@ -103,7 +102,7 @@ nodeunitShim({ }, }); - expect(stack).to(haveResource('AWS::EC2::VPNConnection', { + expect(stack).toHaveResource('AWS::EC2::VPNConnection', { CustomerGatewayId: { Ref: 'VpcNetworkVpnConnectionCustomerGateway8B56D9AF', }, @@ -118,31 +117,31 @@ nodeunitShim({ TunnelInsideCidr: '169.254.10.0/30', }, ], - })); + }); + - test.done(); - }, + }); - 'fails when ip is invalid'(test: Test) { + test('fails when ip is invalid', () => { // GIVEN const stack = new Stack(); - test.throws(() => new Vpc(stack, 'VpcNetwork', { + expect(() => new Vpc(stack, 'VpcNetwork', { vpnConnections: { VpnConnection: { ip: '192.0.2.256', }, }, - }), /`ip`.+IPv4/); + })).toThrow(/`ip`.+IPv4/); - test.done(); - }, - 'fails when specifying more than two tunnel options'(test: Test) { + }); + + test('fails when specifying more than two tunnel options', () => { // GIVEN const stack = new Stack(); - test.throws(() => new Vpc(stack, 'VpcNetwork', { + expect(() => new Vpc(stack, 'VpcNetwork', { vpnConnections: { VpnConnection: { ip: '192.0.2.1', @@ -159,16 +158,16 @@ nodeunitShim({ ], }, }, - }), /two.+`tunnelOptions`/); + })).toThrow(/two.+`tunnelOptions`/); + - test.done(); - }, + }); - 'fails with duplicate tunnel inside cidr'(test: Test) { + test('fails with duplicate tunnel inside cidr', () => { // GIVEN const stack = new Stack(); - test.throws(() => new Vpc(stack, 'VpcNetwork', { + expect(() => new Vpc(stack, 'VpcNetwork', { vpnConnections: { VpnConnection: { ip: '192.0.2.1', @@ -182,16 +181,16 @@ nodeunitShim({ ], }, }, - }), /`tunnelInsideCidr`.+both tunnels/); + })).toThrow(/`tunnelInsideCidr`.+both tunnels/); - test.done(); - }, - 'fails when specifying an invalid pre-shared key'(test: Test) { + }); + + test('fails when specifying an invalid pre-shared key', () => { // GIVEN const stack = new Stack(); - test.throws(() => new Vpc(stack, 'VpcNetwork', { + expect(() => new Vpc(stack, 'VpcNetwork', { vpnConnections: { VpnConnection: { ip: '192.0.2.1', @@ -202,16 +201,16 @@ nodeunitShim({ ], }, }, - }), /`preSharedKey`/); + })).toThrow(/`preSharedKey`/); + - test.done(); - }, + }); - 'fails when specifying a reserved tunnel inside cidr'(test: Test) { + test('fails when specifying a reserved tunnel inside cidr', () => { // GIVEN const stack = new Stack(); - test.throws(() => new Vpc(stack, 'VpcNetwork', { + expect(() => new Vpc(stack, 'VpcNetwork', { vpnConnections: { VpnConnection: { ip: '192.0.2.1', @@ -222,16 +221,16 @@ nodeunitShim({ ], }, }, - }), /`tunnelInsideCidr`.+reserved/); + })).toThrow(/`tunnelInsideCidr`.+reserved/); + - test.done(); - }, + }); - 'fails when specifying an invalid tunnel inside cidr'(test: Test) { + test('fails when specifying an invalid tunnel inside cidr', () => { // GIVEN const stack = new Stack(); - test.throws(() => new Vpc(stack, 'VpcNetwork', { + expect(() => new Vpc(stack, 'VpcNetwork', { vpnConnections: { VpnConnection: { ip: '192.0.2.1', @@ -242,12 +241,12 @@ nodeunitShim({ ], }, }, - }), /`tunnelInsideCidr`.+size/); + })).toThrow(/`tunnelInsideCidr`.+size/); - test.done(); - }, - 'can use metricTunnelState on a vpn connection'(test: Test) { + }); + + test('can use metricTunnelState on a vpn connection', () => { // GIVEN const stack = new Stack(); @@ -260,7 +259,7 @@ nodeunitShim({ }); // THEN - test.deepEqual(stack.resolve(vpn.metricTunnelState()), { + expect(stack.resolve(vpn.metricTunnelState())).toEqual({ dimensions: { VpnId: { Ref: 'VpcNetworkVpnA476C58D' } }, namespace: 'AWS/VPN', metricName: 'TunnelState', @@ -268,37 +267,37 @@ nodeunitShim({ statistic: 'Average', }); - test.done(); - }, - 'can use metricAllTunnelDataOut'(test: Test) { + }); + + test('can use metricAllTunnelDataOut', () => { // GIVEN const stack = new Stack(); // THEN - test.deepEqual(stack.resolve(VpnConnection.metricAllTunnelDataOut()), { + expect(stack.resolve(VpnConnection.metricAllTunnelDataOut())).toEqual({ namespace: 'AWS/VPN', metricName: 'TunnelDataOut', period: Duration.minutes(5), statistic: 'Sum', }); - test.done(); - }, - 'fails when enabling vpnGateway without having subnets'(test: Test) { + }); + + test('fails when enabling vpnGateway without having subnets', () => { // GIVEN const stack = new Stack(); - test.throws(() => new Vpc(stack, 'VpcNetwork', { + expect(() => new Vpc(stack, 'VpcNetwork', { vpnGateway: true, subnetConfiguration: [], - }), /VPN gateway/); + })).toThrow(/VPN gateway/); - test.done(); - }, - 'can add a vpn connection later to a vpc that initially had no subnets'(test: Test) { + }); + + test('can add a vpn connection later to a vpc that initially had no subnets', () => { // GIVEN const stack = new Stack(); @@ -317,12 +316,12 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::EC2::CustomerGateway', { + expect(stack).toHaveResource('AWS::EC2::CustomerGateway', { Type: 'ipsec.1', - })); - test.done(); - }, - 'can add a vpn connection with a Token as customer gateway ip'(test:Test) { + }); + + }); + test('can add a vpn connection with a Token as customer gateway ip', () => { // GIVEN const stack = new Stack(); const token = Token.asAny('192.0.2.1'); @@ -337,9 +336,9 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::EC2::CustomerGateway', { + expect(stack).toHaveResource('AWS::EC2::CustomerGateway', { IpAddress: '192.0.2.1', - })); - test.done(); - }, + }); + + }); }); diff --git a/packages/@aws-cdk/aws-ecr-assets/package.json b/packages/@aws-cdk/aws-ecr-assets/package.json index edf101cdf28b2..44f7a2d4ffa7c 100644 --- a/packages/@aws-cdk/aws-ecr-assets/package.json +++ b/packages/@aws-cdk/aws-ecr-assets/package.json @@ -70,7 +70,7 @@ "aws-cdk": "0.0.0", "cdk-build-tools": "0.0.0", "cdk-integ-tools": "0.0.0", - "nodeunit-shim": "0.0.0", + "jest": "^26.6.3", "pkglint": "0.0.0", "proxyquire": "^2.1.3", "@aws-cdk/cloud-assembly-schema": "0.0.0", diff --git a/packages/@aws-cdk/aws-rds/package.json b/packages/@aws-cdk/aws-rds/package.json index 6144a8f13493b..d3fcb9f70771c 100644 --- a/packages/@aws-cdk/aws-rds/package.json +++ b/packages/@aws-cdk/aws-rds/package.json @@ -81,7 +81,7 @@ "cdk-integ-tools": "0.0.0", "cfn2ts": "0.0.0", "pkglint": "0.0.0", - "nodeunit-shim": "0.0.0", + "jest": "^26.6.3", "@aws-cdk/assert-internal": "0.0.0" }, "dependencies": { diff --git a/packages/@aws-cdk/aws-rds/test/cluster-engine.test.ts b/packages/@aws-cdk/aws-rds/test/cluster-engine.test.ts index 54a02441fb9c7..9784dfe949473 100644 --- a/packages/@aws-cdk/aws-rds/test/cluster-engine.test.ts +++ b/packages/@aws-cdk/aws-rds/test/cluster-engine.test.ts @@ -1,8 +1,8 @@ -import { nodeunitShim, Test } from 'nodeunit-shim'; +import '@aws-cdk/assert-internal/jest'; import { AuroraEngineVersion, AuroraMysqlEngineVersion, AuroraPostgresEngineVersion, DatabaseClusterEngine } from '../lib'; -nodeunitShim({ - "default parameterGroupFamily for versionless Aurora cluster engine is 'aurora5.6'"(test: Test) { +describe('cluster engine', () => { + test("default parameterGroupFamily for versionless Aurora cluster engine is 'aurora5.6'", () => { // GIVEN const engine = DatabaseClusterEngine.AURORA; @@ -10,12 +10,12 @@ nodeunitShim({ const family = engine.parameterGroupFamily; // THEN - test.equals(family, 'aurora5.6'); + expect(family).toEqual('aurora5.6'); - test.done(); - }, - "default parameterGroupFamily for versionless Aurora MySQL cluster engine is 'aurora-mysql5.7'"(test: Test) { + }); + + test("default parameterGroupFamily for versionless Aurora MySQL cluster engine is 'aurora-mysql5.7'", () => { // GIVEN const engine = DatabaseClusterEngine.AURORA_MYSQL; @@ -23,12 +23,12 @@ nodeunitShim({ const family = engine.parameterGroupFamily; // THEN - test.equals(family, 'aurora-mysql5.7'); + expect(family).toEqual('aurora-mysql5.7'); + - test.done(); - }, + }); - 'default parameterGroupFamily for versionless Aurora PostgreSQL is not defined'(test: Test) { + test('default parameterGroupFamily for versionless Aurora PostgreSQL is not defined', () => { // GIVEN const engine = DatabaseClusterEngine.AURORA_POSTGRESQL; @@ -36,12 +36,12 @@ nodeunitShim({ const family = engine.parameterGroupFamily; // THEN - test.equals(family, undefined); + expect(family).toEqual(undefined); - test.done(); - }, - 'cluster parameter group correctly determined for AURORA and given version'(test: Test) { + }); + + test('cluster parameter group correctly determined for AURORA and given version', () => { // GIVEN const engine = DatabaseClusterEngine.aurora({ version: AuroraEngineVersion.VER_1_22_2, @@ -51,12 +51,12 @@ nodeunitShim({ const family = engine.parameterGroupFamily; // THEN - test.equals(family, 'aurora5.6'); + expect(family).toEqual('aurora5.6'); + - test.done(); - }, + }); - 'cluster parameter group correctly determined for AURORA_MYSQL and given version'(test: Test) { + test('cluster parameter group correctly determined for AURORA_MYSQL and given version', () => { // GIVEN const engine = DatabaseClusterEngine.auroraMysql({ version: AuroraMysqlEngineVersion.VER_2_07_1, @@ -66,12 +66,12 @@ nodeunitShim({ const family = engine.parameterGroupFamily; // THEN - test.equals(family, 'aurora-mysql5.7'); + expect(family).toEqual('aurora-mysql5.7'); - test.done(); - }, - 'cluster parameter group correctly determined for AURORA_POSTGRESQL and given version'(test: Test) { + }); + + test('cluster parameter group correctly determined for AURORA_POSTGRESQL and given version', () => { // GIVEN const engine = DatabaseClusterEngine.auroraPostgres({ version: AuroraPostgresEngineVersion.VER_11_6, @@ -81,38 +81,38 @@ nodeunitShim({ const family = engine.parameterGroupFamily; // THEN - test.equals(family, 'aurora-postgresql11'); + expect(family).toEqual('aurora-postgresql11'); + - test.done(); - }, + }); - 'parameter group family'(test: Test) { + test('parameter group family', () => { // the PostgreSQL engine knows about the following major versions: 9.6, 10 and 11 - test.equals(DatabaseClusterEngine.auroraPostgres({ version: AuroraPostgresEngineVersion.of('8', '8') }).parameterGroupFamily, + expect(DatabaseClusterEngine.auroraPostgres({ version: AuroraPostgresEngineVersion.of('8', '8') }).parameterGroupFamily).toEqual( 'aurora-postgresql8'); - test.equals(DatabaseClusterEngine.auroraPostgres({ version: AuroraPostgresEngineVersion.of('9', '9') }).parameterGroupFamily, + expect(DatabaseClusterEngine.auroraPostgres({ version: AuroraPostgresEngineVersion.of('9', '9') }).parameterGroupFamily).toEqual( 'aurora-postgresql9'); - test.equals(DatabaseClusterEngine.auroraPostgres({ version: AuroraPostgresEngineVersion.of('9.7', '9.7') }).parameterGroupFamily, + expect(DatabaseClusterEngine.auroraPostgres({ version: AuroraPostgresEngineVersion.of('9.7', '9.7') }).parameterGroupFamily).toEqual( 'aurora-postgresql9.7'); - test.equals(DatabaseClusterEngine.auroraPostgres({ version: AuroraPostgresEngineVersion.of('9.6', '9.6') }).parameterGroupFamily, + expect(DatabaseClusterEngine.auroraPostgres({ version: AuroraPostgresEngineVersion.of('9.6', '9.6') }).parameterGroupFamily).toEqual( 'aurora-postgresql9.6'); - test.equals(DatabaseClusterEngine.auroraPostgres({ version: AuroraPostgresEngineVersion.of('9.6.1', '9.6') }).parameterGroupFamily, + expect(DatabaseClusterEngine.auroraPostgres({ version: AuroraPostgresEngineVersion.of('9.6.1', '9.6') }).parameterGroupFamily).toEqual( 'aurora-postgresql9.6'); - test.equals(DatabaseClusterEngine.auroraPostgres({ version: AuroraPostgresEngineVersion.of('10.0', '10') }).parameterGroupFamily, + expect(DatabaseClusterEngine.auroraPostgres({ version: AuroraPostgresEngineVersion.of('10.0', '10') }).parameterGroupFamily).toEqual( 'aurora-postgresql10'); - test.done(); - }, - 'supported log types'(test: Test) { + }); + + test('supported log types', () => { const mysqlLogTypes = ['error', 'general', 'slowquery', 'audit']; - test.deepEqual(DatabaseClusterEngine.aurora({ version: AuroraEngineVersion.VER_1_22_2 }).supportedLogTypes, mysqlLogTypes); - test.deepEqual(DatabaseClusterEngine.auroraMysql({ version: AuroraMysqlEngineVersion.VER_2_08_1 }).supportedLogTypes, mysqlLogTypes); - test.deepEqual(DatabaseClusterEngine.auroraPostgres({ version: AuroraPostgresEngineVersion.VER_9_6_9 }).supportedLogTypes, ['postgresql']); - test.done(); - }, + expect(DatabaseClusterEngine.aurora({ version: AuroraEngineVersion.VER_1_22_2 }).supportedLogTypes).toEqual(mysqlLogTypes); + expect(DatabaseClusterEngine.auroraMysql({ version: AuroraMysqlEngineVersion.VER_2_08_1 }).supportedLogTypes).toEqual(mysqlLogTypes); + expect(DatabaseClusterEngine.auroraPostgres({ version: AuroraPostgresEngineVersion.VER_9_6_9 }).supportedLogTypes).toEqual(['postgresql']); + + }); }); \ No newline at end of file diff --git a/packages/@aws-cdk/aws-rds/test/database-secret.test.ts b/packages/@aws-cdk/aws-rds/test/database-secret.test.ts index c05e07e73a0bb..9fe7793536a1c 100644 --- a/packages/@aws-cdk/aws-rds/test/database-secret.test.ts +++ b/packages/@aws-cdk/aws-rds/test/database-secret.test.ts @@ -1,11 +1,10 @@ -import { expect, haveResource } from '@aws-cdk/assert-internal'; +import '@aws-cdk/assert-internal/jest'; import { CfnResource, Stack } from '@aws-cdk/core'; -import { nodeunitShim, Test } from 'nodeunit-shim'; import { DatabaseSecret } from '../lib'; import { DEFAULT_PASSWORD_EXCLUDE_CHARS } from '../lib/private/util'; -nodeunitShim({ - 'create a database secret'(test: Test) { +describe('database secret', () => { + test('create a database secret', () => { // GIVEN const stack = new Stack(); @@ -15,7 +14,7 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::SecretsManager::Secret', { + expect(stack).toHaveResource('AWS::SecretsManager::Secret', { Description: { 'Fn::Join': [ '', @@ -33,14 +32,14 @@ nodeunitShim({ PasswordLength: 30, SecretStringTemplate: '{"username":"admin-username"}', }, - })); + }); + + expect(getSecretLogicalId(dbSecret, stack)).toEqual('SecretA720EF05'); - test.equal(getSecretLogicalId(dbSecret, stack), 'SecretA720EF05'); - test.done(); - }, + }); - 'with master secret'(test: Test) { + test('with master secret', () => { // GIVEN const stack = new Stack(); const masterSecret = new DatabaseSecret(stack, 'MasterSecret', { @@ -55,7 +54,7 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::SecretsManager::Secret', { + expect(stack).toHaveResource('AWS::SecretsManager::Secret', { GenerateSecretString: { ExcludeCharacters: '"@/\\', GenerateStringKey: 'password', @@ -73,12 +72,12 @@ nodeunitShim({ ], }, }, - })); + }); - test.done(); - }, - 'replace on password critera change'(test: Test) { + }); + + test('replace on password critera change', () => { // GIVEN const stack = new Stack(); @@ -90,7 +89,7 @@ nodeunitShim({ // THEN const dbSecretlogicalId = getSecretLogicalId(dbSecret, stack); - test.equal(dbSecretlogicalId, 'Secret3fdaad7efa858a3daf9490cf0a702aeb'); + expect(dbSecretlogicalId).toEqual('Secret3fdaad7efa858a3daf9490cf0a702aeb'); // same node path but other excluded characters stack.node.tryRemoveChild('Secret'); @@ -99,17 +98,17 @@ nodeunitShim({ replaceOnPasswordCriteriaChanges: true, excludeCharacters: '@!()[]', }); - test.notEqual(dbSecretlogicalId, getSecretLogicalId(otherSecret1, stack)); + expect(dbSecretlogicalId).not.toEqual(getSecretLogicalId(otherSecret1, stack)); // other node path but same excluded characters const otherSecret2 = new DatabaseSecret(stack, 'Secret2', { username: 'admin', replaceOnPasswordCriteriaChanges: true, }); - test.notEqual(dbSecretlogicalId, getSecretLogicalId(otherSecret2, stack)); + expect(dbSecretlogicalId).not.toEqual(getSecretLogicalId(otherSecret2, stack)); + - test.done(); - }, + }); }); function getSecretLogicalId(dbSecret: DatabaseSecret, stack: Stack): string { diff --git a/packages/@aws-cdk/aws-rds/test/database-secretmanager.test.ts b/packages/@aws-cdk/aws-rds/test/database-secretmanager.test.ts index 8914401e189f6..cb8057cc464f5 100644 --- a/packages/@aws-cdk/aws-rds/test/database-secretmanager.test.ts +++ b/packages/@aws-cdk/aws-rds/test/database-secretmanager.test.ts @@ -1,12 +1,12 @@ -import { expect, haveResource, ResourcePart } from '@aws-cdk/assert-internal'; +import '@aws-cdk/assert-internal/jest'; +import { ResourcePart } from '@aws-cdk/assert-internal'; import * as ec2 from '@aws-cdk/aws-ec2'; import * as secretsmanager from '@aws-cdk/aws-secretsmanager'; import * as cdk from '@aws-cdk/core'; -import { nodeunitShim, Test } from 'nodeunit-shim'; import { ServerlessCluster, DatabaseClusterEngine, ParameterGroup, Credentials } from '../lib'; -nodeunitShim({ - 'can create a Serverless Cluster using an existing secret from secretmanager'(test: Test) { +describe('database secret manager', () => { + test('can create a Serverless Cluster using an existing secret from secretmanager', () => { // GIVEN const stack = testStack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -21,7 +21,7 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::RDS::DBCluster', { + expect(stack).toHaveResource('AWS::RDS::DBCluster', { Properties: { Engine: 'aurora-postgresql', DBClusterParameterGroupName: 'default.aurora-postgresql10', @@ -43,10 +43,10 @@ nodeunitShim({ }, DeletionPolicy: 'Snapshot', UpdateReplacePolicy: 'Snapshot', - }, ResourcePart.CompleteDefinition)); + }, ResourcePart.CompleteDefinition); - test.done(); - }, + + }); }); function testStack() { diff --git a/packages/@aws-cdk/aws-rds/test/instance-engine.test.ts b/packages/@aws-cdk/aws-rds/test/instance-engine.test.ts index e3b02c48770d4..99ed162c4f5eb 100644 --- a/packages/@aws-cdk/aws-rds/test/instance-engine.test.ts +++ b/packages/@aws-cdk/aws-rds/test/instance-engine.test.ts @@ -1,133 +1,132 @@ -import { expect, haveResourceLike } from '@aws-cdk/assert-internal'; +import '@aws-cdk/assert-internal/jest'; import * as iam from '@aws-cdk/aws-iam'; import * as cdk from '@aws-cdk/core'; -import { nodeunitShim, Test } from 'nodeunit-shim'; import * as rds from '../lib'; -nodeunitShim({ - 'default parameterGroupFamily for versionless MariaDB instance engine is not defined'(test: Test) { +describe('instance engine', () => { + test('default parameterGroupFamily for versionless MariaDB instance engine is not defined', () => { const engine = rds.DatabaseInstanceEngine.MARIADB; const family = engine.parameterGroupFamily; - test.equals(family, undefined); + expect(family).toEqual(undefined); - test.done(); - }, - 'default parameterGroupFamily for versionless MySQL instance engine is not defined'(test: Test) { + }); + + test('default parameterGroupFamily for versionless MySQL instance engine is not defined', () => { const engine = rds.DatabaseInstanceEngine.MYSQL; const family = engine.parameterGroupFamily; - test.equals(family, undefined); + expect(family).toEqual(undefined); + - test.done(); - }, + }); - 'default parameterGroupFamily for versionless PostgreSQL instance engine is not defined'(test: Test) { + test('default parameterGroupFamily for versionless PostgreSQL instance engine is not defined', () => { const engine = rds.DatabaseInstanceEngine.POSTGRES; const family = engine.parameterGroupFamily; - test.equals(family, undefined); + expect(family).toEqual(undefined); + - test.done(); - }, + }); - "default parameterGroupFamily for versionless Oracle SE instance engine is 'oracle-se-11.2'"(test: Test) { + test("default parameterGroupFamily for versionless Oracle SE instance engine is 'oracle-se-11.2'", () => { const engine = rds.DatabaseInstanceEngine.ORACLE_SE; const family = engine.parameterGroupFamily; - test.equals(family, 'oracle-se-11.2'); + expect(family).toEqual('oracle-se-11.2'); - test.done(); - }, - "default parameterGroupFamily for versionless Oracle SE 1 instance engine is 'oracle-se1-11.2'"(test: Test) { + }); + + test("default parameterGroupFamily for versionless Oracle SE 1 instance engine is 'oracle-se1-11.2'", () => { const engine = rds.DatabaseInstanceEngine.ORACLE_SE1; const family = engine.parameterGroupFamily; - test.equals(family, 'oracle-se1-11.2'); + expect(family).toEqual('oracle-se1-11.2'); + - test.done(); - }, + }); - 'default parameterGroupFamily for versionless Oracle SE 2 instance engine is not defined'(test: Test) { + test('default parameterGroupFamily for versionless Oracle SE 2 instance engine is not defined', () => { const engine = rds.DatabaseInstanceEngine.ORACLE_SE2; const family = engine.parameterGroupFamily; - test.equals(family, undefined); + expect(family).toEqual(undefined); + - test.done(); - }, + }); - 'default parameterGroupFamily for versionless Oracle EE instance engine is not defined'(test: Test) { + test('default parameterGroupFamily for versionless Oracle EE instance engine is not defined', () => { const engine = rds.DatabaseInstanceEngine.ORACLE_EE; const family = engine.parameterGroupFamily; - test.equals(family, undefined); + expect(family).toEqual(undefined); - test.done(); - }, - 'default parameterGroupFamily for versionless SQL Server SE instance engine is not defined'(test: Test) { + }); + + test('default parameterGroupFamily for versionless SQL Server SE instance engine is not defined', () => { const engine = rds.DatabaseInstanceEngine.SQL_SERVER_SE; const family = engine.parameterGroupFamily; - test.equals(family, undefined); + expect(family).toEqual(undefined); + - test.done(); - }, + }); - 'default parameterGroupFamily for versionless SQL Server EX instance engine is not defined'(test: Test) { + test('default parameterGroupFamily for versionless SQL Server EX instance engine is not defined', () => { const engine = rds.DatabaseInstanceEngine.SQL_SERVER_EX; const family = engine.parameterGroupFamily; - test.equals(family, undefined); + expect(family).toEqual(undefined); + - test.done(); - }, + }); - 'default parameterGroupFamily for versionless SQL Server Web instance engine is not defined'(test: Test) { + test('default parameterGroupFamily for versionless SQL Server Web instance engine is not defined', () => { const engine = rds.DatabaseInstanceEngine.SQL_SERVER_WEB; const family = engine.parameterGroupFamily; - test.equals(family, undefined); + expect(family).toEqual(undefined); - test.done(); - }, - 'default parameterGroupFamily for versionless SQL Server EE instance engine is not defined'(test: Test) { + }); + + test('default parameterGroupFamily for versionless SQL Server EE instance engine is not defined', () => { const engine = rds.DatabaseInstanceEngine.SQL_SERVER_EE; const family = engine.parameterGroupFamily; - test.equals(family, undefined); + expect(family).toEqual(undefined); + - test.done(); - }, + }); - 'Oracle engine bindToInstance': { + describe('Oracle engine bindToInstance', () => { - 'returns s3 integration feature'(test: Test) { + test('returns s3 integration feature', () => { const engine = rds.DatabaseInstanceEngine.oracleSe2({ version: rds.OracleEngineVersion.VER_19_0_0_0_2020_04_R1 }); const engineConfig = engine.bindToInstance(new cdk.Stack(), {}); - test.equals(engineConfig.features?.s3Import, 'S3_INTEGRATION'); - test.equals(engineConfig.features?.s3Export, 'S3_INTEGRATION'); + expect(engineConfig.features?.s3Import).toEqual('S3_INTEGRATION'); + expect(engineConfig.features?.s3Export).toEqual('S3_INTEGRATION'); + - test.done(); - }, + }); - 's3 import/export - creates an option group if needed'(test: Test) { + test('s3 import/export - creates an option group if needed', () => { const stack = new cdk.Stack(); const engine = rds.DatabaseInstanceEngine.oracleSe2({ version: rds.OracleEngineVersion.VER_19_0_0_0_2020_04_R1 }); @@ -136,19 +135,19 @@ nodeunitShim({ s3ImportRole: new iam.Role(stack, 'ImportRole', { assumedBy: new iam.AccountRootPrincipal() }), }); - test.ok(engineConfig.optionGroup); - expect(stack).to(haveResourceLike('AWS::RDS::OptionGroup', { + expect(engineConfig.optionGroup).toBeDefined(); + expect(stack).toHaveResourceLike('AWS::RDS::OptionGroup', { EngineName: 'oracle-se2', OptionConfigurations: [{ OptionName: 'S3_INTEGRATION', OptionVersion: '1.0', }], - })); + }); + - test.done(); - }, + }); - 's3 import/export - appends to an existing option group if it exists'(test: Test) { + test('s3 import/export - appends to an existing option group if it exists', () => { const stack = new cdk.Stack(); const engine = rds.DatabaseInstanceEngine.oracleSe2({ version: rds.OracleEngineVersion.VER_19_0_0_0_2020_04_R1 }); const optionGroup = new rds.OptionGroup(stack, 'OptionGroup', { @@ -163,8 +162,8 @@ nodeunitShim({ s3ImportRole: new iam.Role(stack, 'ImportRole', { assumedBy: new iam.AccountRootPrincipal() }), }); - test.equals(engineConfig.optionGroup, optionGroup); - expect(stack).to(haveResourceLike('AWS::RDS::OptionGroup', { + expect(engineConfig.optionGroup).toEqual(optionGroup); + expect(stack).toHaveResourceLike('AWS::RDS::OptionGroup', { EngineName: 'oracle-se2', OptionConfigurations: [{ OptionName: 'MY_OPTION_CONFIG', @@ -173,39 +172,39 @@ nodeunitShim({ OptionName: 'S3_INTEGRATION', OptionVersion: '1.0', }], - })); + }); - test.done(); - }, - }, - 'SQL Server engine bindToInstance': { - 'returns s3 integration feature'(test: Test) { + }); + }); + + describe('SQL Server engine bindToInstance', () => { + test('returns s3 integration feature', () => { const engine = rds.DatabaseInstanceEngine.sqlServerSe({ version: rds.SqlServerEngineVersion.VER_14_00_3192_2_V1 }); const engineConfig = engine.bindToInstance(new cdk.Stack(), {}); - test.equals(engineConfig.features?.s3Import, 'S3_INTEGRATION'); - test.equals(engineConfig.features?.s3Export, 'S3_INTEGRATION'); + expect(engineConfig.features?.s3Import).toEqual('S3_INTEGRATION'); + expect(engineConfig.features?.s3Export).toEqual('S3_INTEGRATION'); + - test.done(); - }, + }); - 's3 import/export - throws if roles are not equal'(test: Test) { + test('s3 import/export - throws if roles are not equal', () => { const stack = new cdk.Stack(); const engine = rds.DatabaseInstanceEngine.sqlServerSe({ version: rds.SqlServerEngineVersion.VER_14_00_3192_2_V1 }); const s3ImportRole = new iam.Role(stack, 'ImportRole', { assumedBy: new iam.AccountRootPrincipal() }); const s3ExportRole = new iam.Role(stack, 'ExportRole', { assumedBy: new iam.AccountRootPrincipal() }); - test.throws(() => engine.bindToInstance(new cdk.Stack(), { s3ImportRole, s3ExportRole }), /S3 import and export roles must be the same/); - test.doesNotThrow(() => engine.bindToInstance(new cdk.Stack(), { s3ImportRole })); - test.doesNotThrow(() => engine.bindToInstance(new cdk.Stack(), { s3ExportRole })); - test.doesNotThrow(() => engine.bindToInstance(new cdk.Stack(), { s3ImportRole, s3ExportRole: s3ImportRole })); + expect(() => engine.bindToInstance(new cdk.Stack(), { s3ImportRole, s3ExportRole })).toThrow(/S3 import and export roles must be the same/); + expect(() => engine.bindToInstance(new cdk.Stack(), { s3ImportRole })).not.toThrow(); + expect(() => engine.bindToInstance(new cdk.Stack(), { s3ExportRole })).not.toThrow(); + expect(() => engine.bindToInstance(new cdk.Stack(), { s3ImportRole, s3ExportRole: s3ImportRole })).not.toThrow(); + - test.done(); - }, + }); - 's3 import/export - creates an option group if needed'(test: Test) { + test('s3 import/export - creates an option group if needed', () => { const stack = new cdk.Stack(); const engine = rds.DatabaseInstanceEngine.sqlServerSe({ version: rds.SqlServerEngineVersion.VER_14_00_3192_2_V1 }); @@ -214,8 +213,8 @@ nodeunitShim({ s3ImportRole: new iam.Role(stack, 'ImportRole', { assumedBy: new iam.AccountRootPrincipal() }), }); - test.ok(engineConfig.optionGroup); - expect(stack).to(haveResourceLike('AWS::RDS::OptionGroup', { + expect(engineConfig.optionGroup).toBeDefined(); + expect(stack).toHaveResourceLike('AWS::RDS::OptionGroup', { EngineName: 'sqlserver-se', OptionConfigurations: [{ OptionName: 'SQLSERVER_BACKUP_RESTORE', @@ -224,12 +223,12 @@ nodeunitShim({ Value: { 'Fn::GetAtt': ['ImportRole0C9E6F9A', 'Arn'] }, }], }], - })); + }); + - test.done(); - }, + }); - 's3 import/export - appends to an existing option group if it exists'(test: Test) { + test('s3 import/export - appends to an existing option group if it exists', () => { const stack = new cdk.Stack(); const engine = rds.DatabaseInstanceEngine.sqlServerSe({ version: rds.SqlServerEngineVersion.VER_14_00_3192_2_V1 }); const optionGroup = new rds.OptionGroup(stack, 'OptionGroup', { @@ -244,8 +243,8 @@ nodeunitShim({ s3ImportRole: new iam.Role(stack, 'ImportRole', { assumedBy: new iam.AccountRootPrincipal() }), }); - test.equals(engineConfig.optionGroup, optionGroup); - expect(stack).to(haveResourceLike('AWS::RDS::OptionGroup', { + expect(engineConfig.optionGroup).toEqual(optionGroup); + expect(stack).toHaveResourceLike('AWS::RDS::OptionGroup', { EngineName: 'sqlserver-se', OptionConfigurations: [{ OptionName: 'MY_OPTION_CONFIG', @@ -257,31 +256,31 @@ nodeunitShim({ Value: { 'Fn::GetAtt': ['ImportRole0C9E6F9A', 'Arn'] }, }], }], - })); + }); - test.done(); - }, - }, - 'PostgreSQL engine bindToInstance': { - 'returns no features for older versions'(test: Test) { + }); + }); + + describe('PostgreSQL engine bindToInstance', () => { + test('returns no features for older versions', () => { const engineNewerVersion = rds.DatabaseInstanceEngine.postgres({ version: rds.PostgresEngineVersion.VER_10_6 }); const engineConfig = engineNewerVersion.bindToInstance(new cdk.Stack(), {}); - test.equals(engineConfig.features?.s3Import, undefined); - test.equals(engineConfig.features?.s3Export, undefined); + expect(engineConfig.features?.s3Import).toEqual(undefined); + expect(engineConfig.features?.s3Export).toEqual(undefined); + - test.done(); - }, + }); - 'returns s3 import/export feature if the version supports it'(test: Test) { + test('returns s3 import/export feature if the version supports it', () => { const engineNewerVersion = rds.DatabaseInstanceEngine.postgres({ version: rds.PostgresEngineVersion.VER_13_3 }); const engineConfig = engineNewerVersion.bindToInstance(new cdk.Stack(), {}); - test.equals(engineConfig.features?.s3Import, 's3Import'); - test.equals(engineConfig.features?.s3Export, 's3Export'); + expect(engineConfig.features?.s3Import).toEqual('s3Import'); + expect(engineConfig.features?.s3Export).toEqual('s3Export'); + - test.done(); - }, - }, + }); + }); }); diff --git a/packages/@aws-cdk/aws-rds/test/option-group.test.ts b/packages/@aws-cdk/aws-rds/test/option-group.test.ts index 2a058411a72ab..77a394a1fa6a7 100644 --- a/packages/@aws-cdk/aws-rds/test/option-group.test.ts +++ b/packages/@aws-cdk/aws-rds/test/option-group.test.ts @@ -1,11 +1,10 @@ -import { expect, haveResource } from '@aws-cdk/assert-internal'; +import '@aws-cdk/assert-internal/jest'; import * as ec2 from '@aws-cdk/aws-ec2'; import * as cdk from '@aws-cdk/core'; -import { nodeunitShim, Test } from 'nodeunit-shim'; import { DatabaseInstanceEngine, OptionGroup, OracleEngineVersion, OracleLegacyEngineVersion } from '../lib'; -nodeunitShim({ - 'create an option group'(test: Test) { +describe('option group', () => { + test('create an option group', () => { // GIVEN const stack = new cdk.Stack(); @@ -22,7 +21,7 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::RDS::OptionGroup', { + expect(stack).toHaveResource('AWS::RDS::OptionGroup', { EngineName: 'oracle-se1', MajorEngineVersion: '11.2', OptionGroupDescription: 'Option group for oracle-se1 11.2', @@ -31,12 +30,12 @@ nodeunitShim({ OptionName: 'XMLDB', }, ], - })); + }); + - test.done(); - }, + }); - 'option group with new security group'(test: Test) { + test('option group with new security group', () => { // GIVEN const stack = new cdk.Stack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -57,7 +56,7 @@ nodeunitShim({ optionGroup.optionConnections.OEM.connections.allowDefaultPortFromAnyIpv4(); // THEN - expect(stack).to(haveResource('AWS::RDS::OptionGroup', { + expect(stack).toHaveResource('AWS::RDS::OptionGroup', { EngineName: 'oracle-se', MajorEngineVersion: '11.2', OptionGroupDescription: 'Option group for oracle-se 11.2', @@ -75,9 +74,9 @@ nodeunitShim({ ], }, ], - })); + }); - expect(stack).to(haveResource('AWS::EC2::SecurityGroup', { + expect(stack).toHaveResource('AWS::EC2::SecurityGroup', { GroupDescription: 'Security group for OEM option', SecurityGroupIngress: [ { @@ -91,12 +90,12 @@ nodeunitShim({ VpcId: { Ref: 'VPCB9E5F0B4', }, - })); + }); - test.done(); - }, - 'option group with existing security group'(test: Test) { + }); + + test('option group with existing security group', () => { // GIVEN const stack = new cdk.Stack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -118,7 +117,7 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::RDS::OptionGroup', { + expect(stack).toHaveResource('AWS::RDS::OptionGroup', { EngineName: 'oracle-se', MajorEngineVersion: '11.2', OptionGroupDescription: 'Option group for oracle-se 11.2', @@ -136,17 +135,17 @@ nodeunitShim({ ], }, ], - })); + }); - test.done(); - }, - 'throws when using an option with port and no vpc'(test: Test) { + }); + + test('throws when using an option with port and no vpc', () => { // GIVEN const stack = new cdk.Stack(); // THEN - test.throws(() => new OptionGroup(stack, 'Options', { + expect(() => new OptionGroup(stack, 'Options', { engine: DatabaseInstanceEngine.oracleSe2({ version: OracleEngineVersion.VER_12_1, }), @@ -156,8 +155,8 @@ nodeunitShim({ port: 1158, }, ], - }), /`port`.*`vpc`/); + })).toThrow(/`port`.*`vpc`/); + - test.done(); - }, + }); }); diff --git a/packages/@aws-cdk/aws-rds/test/parameter-group.test.ts b/packages/@aws-cdk/aws-rds/test/parameter-group.test.ts index aedfe0a005da6..bf8e789aee0ad 100644 --- a/packages/@aws-cdk/aws-rds/test/parameter-group.test.ts +++ b/packages/@aws-cdk/aws-rds/test/parameter-group.test.ts @@ -1,10 +1,9 @@ -import { countResources, expect, haveResource } from '@aws-cdk/assert-internal'; +import '@aws-cdk/assert-internal/jest'; import * as cdk from '@aws-cdk/core'; -import { nodeunitShim, Test } from 'nodeunit-shim'; import { DatabaseClusterEngine, ParameterGroup } from '../lib'; -nodeunitShim({ - "does not create a parameter group if it wasn't bound to a cluster or instance"(test: Test) { +describe('parameter group', () => { + test("does not create a parameter group if it wasn't bound to a cluster or instance", () => { // GIVEN const stack = new cdk.Stack(); @@ -18,13 +17,13 @@ nodeunitShim({ }); // THEN - expect(stack).to(countResources('AWS::RDS::DBParameterGroup', 0)); - expect(stack).to(countResources('AWS::RDS::DBClusterParameterGroup', 0)); + expect(stack).toCountResources('AWS::RDS::DBParameterGroup', 0); + expect(stack).toCountResources('AWS::RDS::DBClusterParameterGroup', 0); - test.done(); - }, - 'create a parameter group when bound to an instance'(test: Test) { + }); + + test('create a parameter group when bound to an instance', () => { // GIVEN const stack = new cdk.Stack(); @@ -39,18 +38,18 @@ nodeunitShim({ parameterGroup.bindToInstance({}); // THEN - expect(stack).to(haveResource('AWS::RDS::DBParameterGroup', { + expect(stack).toHaveResource('AWS::RDS::DBParameterGroup', { Description: 'desc', Family: 'aurora5.6', Parameters: { key: 'value', }, - })); + }); - test.done(); - }, - 'create a parameter group when bound to a cluster'(test: Test) { + }); + + test('create a parameter group when bound to a cluster', () => { // GIVEN const stack = new cdk.Stack(); @@ -65,18 +64,18 @@ nodeunitShim({ parameterGroup.bindToCluster({}); // THEN - expect(stack).to(haveResource('AWS::RDS::DBClusterParameterGroup', { + expect(stack).toHaveResource('AWS::RDS::DBClusterParameterGroup', { Description: 'desc', Family: 'aurora5.6', Parameters: { key: 'value', }, - })); + }); - test.done(); - }, - 'creates 2 parameter groups when bound to a cluster and an instance'(test: Test) { + }); + + test('creates 2 parameter groups when bound to a cluster and an instance', () => { // GIVEN const stack = new cdk.Stack(); @@ -92,13 +91,13 @@ nodeunitShim({ parameterGroup.bindToInstance({}); // THEN - expect(stack).to(countResources('AWS::RDS::DBParameterGroup', 1)); - expect(stack).to(countResources('AWS::RDS::DBClusterParameterGroup', 1)); + expect(stack).toCountResources('AWS::RDS::DBParameterGroup', 1); + expect(stack).toCountResources('AWS::RDS::DBClusterParameterGroup', 1); + - test.done(); - }, + }); - 'Add an additional parameter to an existing parameter group'(test: Test) { + test('Add an additional parameter to an existing parameter group', () => { // GIVEN const stack = new cdk.Stack(); @@ -115,15 +114,15 @@ nodeunitShim({ clusterParameterGroup.addParameter('key2', 'value2'); // THEN - expect(stack).to(haveResource('AWS::RDS::DBClusterParameterGroup', { + expect(stack).toHaveResource('AWS::RDS::DBClusterParameterGroup', { Description: 'desc', Family: 'aurora5.6', Parameters: { key1: 'value1', key2: 'value2', }, - })); + }); + - test.done(); - }, + }); }); diff --git a/packages/@aws-cdk/aws-rds/test/proxy.test.ts b/packages/@aws-cdk/aws-rds/test/proxy.test.ts index 5a4a2c2606959..5e2a65f1f0352 100644 --- a/packages/@aws-cdk/aws-rds/test/proxy.test.ts +++ b/packages/@aws-cdk/aws-rds/test/proxy.test.ts @@ -1,9 +1,9 @@ -import { ABSENT, expect, haveResourceLike, ResourcePart } from '@aws-cdk/assert-internal'; +import '@aws-cdk/assert-internal/jest'; +import { ABSENT, ResourcePart } from '@aws-cdk/assert-internal'; import * as ec2 from '@aws-cdk/aws-ec2'; import { AccountPrincipal, Role } from '@aws-cdk/aws-iam'; import * as secretsmanager from '@aws-cdk/aws-secretsmanager'; import * as cdk from '@aws-cdk/core'; -import { nodeunitShim, Test } from 'nodeunit-shim'; import * as rds from '../lib'; let stack: cdk.Stack; @@ -11,15 +11,15 @@ let vpc: ec2.IVpc; let importedDbProxy: rds.IDatabaseProxy; -nodeunitShim({ - 'setUp'(cb: () => void) { +describe('proxy', () => { + beforeEach(() => { stack = new cdk.Stack(); vpc = new ec2.Vpc(stack, 'VPC'); - cb(); - }, - 'create a DB proxy from an instance'(test: Test) { + }); + + test('create a DB proxy from an instance', () => { // GIVEN const instance = new rds.DatabaseInstance(stack, 'Instance', { engine: rds.DatabaseInstanceEngine.MYSQL, @@ -34,7 +34,7 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResourceLike('AWS::RDS::DBProxy', { + expect(stack).toHaveResourceLike('AWS::RDS::DBProxy', { Auth: [ { AuthScheme: 'SECRETS', @@ -61,10 +61,10 @@ nodeunitShim({ Ref: 'VPCPrivateSubnet2SubnetCFCDAA7A', }, ], - })); + }); // THEN - expect(stack).to(haveResourceLike('AWS::RDS::DBProxyTargetGroup', { + expect(stack).toHaveResourceLike('AWS::RDS::DBProxyTargetGroup', { DBProxyName: { Ref: 'ProxyCB0DFB71', }, @@ -75,12 +75,12 @@ nodeunitShim({ }, ], TargetGroupName: 'default', - })); + }); + - test.done(); - }, + }); - 'create a DB proxy from a cluster'(test: Test) { + test('create a DB proxy from a cluster', () => { // GIVEN const cluster = new rds.DatabaseCluster(stack, 'Database', { engine: rds.DatabaseClusterEngine.auroraPostgres({ @@ -97,7 +97,7 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResourceLike('AWS::RDS::DBProxy', { + expect(stack).toHaveResourceLike('AWS::RDS::DBProxy', { Auth: [ { AuthScheme: 'SECRETS', @@ -124,8 +124,8 @@ nodeunitShim({ Ref: 'VPCPrivateSubnet2SubnetCFCDAA7A', }, ], - })); - expect(stack).to(haveResourceLike('AWS::RDS::DBProxyTargetGroup', { + }); + expect(stack).toHaveResourceLike('AWS::RDS::DBProxyTargetGroup', { DBProxyName: { Ref: 'ProxyCB0DFB71', }, @@ -137,8 +137,8 @@ nodeunitShim({ ], DBInstanceIdentifiers: ABSENT, TargetGroupName: 'default', - })); - expect(stack).to(haveResourceLike('AWS::EC2::SecurityGroupIngress', { + }); + expect(stack).toHaveResourceLike('AWS::EC2::SecurityGroupIngress', { IpProtocol: 'tcp', Description: 'Allow connections to the database Cluster from the Proxy', FromPort: { @@ -153,12 +153,12 @@ nodeunitShim({ ToPort: { 'Fn::GetAtt': ['DatabaseB269D8BB', 'Endpoint.Port'], }, - })); + }); - test.done(); - }, - 'One or more secrets are required.'(test: Test) { + }); + + test('One or more secrets are required.', () => { // GIVEN const cluster = new rds.DatabaseCluster(stack, 'Database', { engine: rds.DatabaseClusterEngine.auroraPostgres({ version: rds.AuroraPostgresEngineVersion.VER_10_7 }), @@ -166,34 +166,34 @@ nodeunitShim({ }); // WHEN - test.throws(() => { + expect(() => { new rds.DatabaseProxy(stack, 'Proxy', { proxyTarget: rds.ProxyTarget.fromCluster(cluster), secrets: [], // No secret vpc, }); - }, 'One or more secrets are required.'); + }).toThrow('One or more secrets are required.'); + - test.done(); - }, + }); - 'fails when trying to create a proxy for a target without an engine'(test: Test) { + test('fails when trying to create a proxy for a target without an engine', () => { const importedCluster = rds.DatabaseCluster.fromDatabaseClusterAttributes(stack, 'Cluster', { clusterIdentifier: 'my-cluster', }); - test.throws(() => { + expect(() => { new rds.DatabaseProxy(stack, 'Proxy', { proxyTarget: rds.ProxyTarget.fromCluster(importedCluster), vpc, secrets: [new secretsmanager.Secret(stack, 'Secret')], }); - }, /Could not determine engine for proxy target 'Default\/Cluster'\. Please provide it explicitly when importing the resource/); + }).toThrow(/Could not determine engine for proxy target 'Default\/Cluster'\. Please provide it explicitly when importing the resource/); + - test.done(); - }, + }); - "fails when trying to create a proxy for a target with an engine that doesn't have engineFamily"(test: Test) { + test("fails when trying to create a proxy for a target with an engine that doesn't have engineFamily", () => { const importedInstance = rds.DatabaseInstance.fromDatabaseInstanceAttributes(stack, 'Cluster', { instanceIdentifier: 'my-instance', instanceEndpointAddress: 'instance-address', @@ -204,18 +204,18 @@ nodeunitShim({ }), }); - test.throws(() => { + expect(() => { new rds.DatabaseProxy(stack, 'Proxy', { proxyTarget: rds.ProxyTarget.fromInstance(importedInstance), vpc, secrets: [new secretsmanager.Secret(stack, 'Secret')], }); - }, /Engine 'mariadb-10\.0\.24' does not support proxies/); + }).toThrow(/Engine 'mariadb-10\.0\.24' does not support proxies/); - test.done(); - }, - 'correctly creates a proxy for an imported Cluster if its engine is known'(test: Test) { + }); + + test('correctly creates a proxy for an imported Cluster if its engine is known', () => { const importedCluster = rds.DatabaseCluster.fromDatabaseClusterAttributes(stack, 'Cluster', { clusterIdentifier: 'my-cluster', engine: rds.DatabaseClusterEngine.auroraPostgres({ @@ -230,24 +230,24 @@ nodeunitShim({ secrets: [new secretsmanager.Secret(stack, 'Secret')], }); - expect(stack).to(haveResourceLike('AWS::RDS::DBProxy', { + expect(stack).toHaveResourceLike('AWS::RDS::DBProxy', { EngineFamily: 'POSTGRESQL', - })); - expect(stack).to(haveResourceLike('AWS::RDS::DBProxyTargetGroup', { + }); + expect(stack).toHaveResourceLike('AWS::RDS::DBProxyTargetGroup', { DBClusterIdentifiers: [ 'my-cluster', ], - })); - expect(stack).to(haveResourceLike('AWS::EC2::SecurityGroup', { + }); + expect(stack).toHaveResourceLike('AWS::EC2::SecurityGroup', { GroupDescription: 'SecurityGroup for Database Proxy', VpcId: { Ref: 'VPCB9E5F0B4' }, - })); + }); + - test.done(); - }, + }); - 'imported Proxies': { - 'setUp'(cb: () => void) { + describe('imported Proxies', () => { + beforeEach(() => { importedDbProxy = rds.DatabaseProxy.fromDatabaseProxyAttributes(stack, 'Proxy', { dbProxyName: 'my-proxy', dbProxyArn: 'arn:aws:rds:us-east-1:123456789012:db-proxy:prx-1234abcd', @@ -255,10 +255,10 @@ nodeunitShim({ securityGroups: [], }); - cb(); - }, - 'grant rds-db:connect in grantConnect() with a dbUser explicitly passed'(test: Test) { + }); + + test('grant rds-db:connect in grantConnect() with a dbUser explicitly passed', () => { // WHEN const role = new Role(stack, 'DBProxyRole', { assumedBy: new AccountPrincipal(stack.account), @@ -267,7 +267,7 @@ nodeunitShim({ importedDbProxy.grantConnect(role, databaseUser); // THEN - expect(stack).to(haveResourceLike('AWS::IAM::Policy', { + expect(stack).toHaveResourceLike('AWS::IAM::Policy', { PolicyDocument: { Statement: [{ Effect: 'Allow', @@ -286,27 +286,27 @@ nodeunitShim({ }], Version: '2012-10-17', }, - })); + }); + - test.done(); - }, + }); - 'throws when grantConnect() is used without a dbUser'(test: Test) { + test('throws when grantConnect() is used without a dbUser', () => { // WHEN const role = new Role(stack, 'DBProxyRole', { assumedBy: new AccountPrincipal(stack.account), }); // THEN - test.throws(() => { + expect(() => { importedDbProxy.grantConnect(role); - }, /For imported Database Proxies, the dbUser is required in grantConnect/); + }).toThrow(/For imported Database Proxies, the dbUser is required in grantConnect/); - test.done(); - }, - }, - 'new Proxy with a single Secret can use grantConnect() without a dbUser passed'(test: Test) { + }); + }); + + test('new Proxy with a single Secret can use grantConnect() without a dbUser passed', () => { // GIVEN const cluster = new rds.DatabaseCluster(stack, 'Database', { engine: rds.DatabaseClusterEngine.AURORA, @@ -326,7 +326,7 @@ nodeunitShim({ proxy.grantConnect(role); // THEN - expect(stack).to(haveResourceLike('AWS::IAM::Policy', { + expect(stack).toHaveResourceLike('AWS::IAM::Policy', { PolicyDocument: { Statement: [{ Effect: 'Allow', @@ -359,12 +359,12 @@ nodeunitShim({ }], Version: '2012-10-17', }, - })); + }); - test.done(); - }, - 'new Proxy with multiple Secrets cannot use grantConnect() without a dbUser passed'(test: Test) { + }); + + test('new Proxy with multiple Secrets cannot use grantConnect() without a dbUser passed', () => { // GIVEN const cluster = new rds.DatabaseCluster(stack, 'Database', { engine: rds.DatabaseClusterEngine.AURORA, @@ -386,14 +386,14 @@ nodeunitShim({ }); // THEN - test.throws(() => { + expect(() => { proxy.grantConnect(role); - }, /When the Proxy contains multiple Secrets, you must pass a dbUser explicitly to grantConnect/); + }).toThrow(/When the Proxy contains multiple Secrets, you must pass a dbUser explicitly to grantConnect/); + - test.done(); - }, + }); - 'DBProxyTargetGroup should have dependency on the proxy targets'(test: Test) { + test('DBProxyTargetGroup should have dependency on the proxy targets', () => { // GIVEN const cluster = new rds.DatabaseCluster(stack, 'cluster', { engine: rds.DatabaseClusterEngine.AURORA, @@ -410,7 +410,7 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResourceLike('AWS::RDS::DBProxyTargetGroup', { + expect(stack).toHaveResourceLike('AWS::RDS::DBProxyTargetGroup', { Properties: { DBProxyName: { Ref: 'proxy3A1DA9C7', @@ -427,8 +427,8 @@ nodeunitShim({ 'clusterSecurityGroupF441DCEA', 'clusterSubnets81E3593F', ], - }, ResourcePart.CompleteDefinition)); + }, ResourcePart.CompleteDefinition); + - test.done(); - }, + }); }); diff --git a/packages/@aws-cdk/aws-rds/test/serverless-cluster.test.ts b/packages/@aws-cdk/aws-rds/test/serverless-cluster.test.ts index ba157ef9aa6b1..dff8d035b78a3 100644 --- a/packages/@aws-cdk/aws-rds/test/serverless-cluster.test.ts +++ b/packages/@aws-cdk/aws-rds/test/serverless-cluster.test.ts @@ -1,14 +1,14 @@ -import { expect, haveResource, haveResourceLike, ResourcePart, SynthUtils } from '@aws-cdk/assert-internal'; +import '@aws-cdk/assert-internal/jest'; +import { ResourcePart, SynthUtils } from '@aws-cdk/assert-internal'; import * as ec2 from '@aws-cdk/aws-ec2'; import * as iam from '@aws-cdk/aws-iam'; import * as kms from '@aws-cdk/aws-kms'; import * as cdk from '@aws-cdk/core'; import * as cxapi from '@aws-cdk/cx-api'; -import { nodeunitShim, Test } from 'nodeunit-shim'; import { AuroraPostgresEngineVersion, ServerlessCluster, DatabaseClusterEngine, ParameterGroup, AuroraCapacityUnit, DatabaseSecret } from '../lib'; -nodeunitShim({ - 'can create a Serverless Cluster with Aurora Postgres database engine'(test: Test) { +describe('serverless cluster', () => { + test('can create a Serverless Cluster with Aurora Postgres database engine', () => { // GIVEN const stack = testStack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -25,7 +25,7 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::RDS::DBCluster', { + expect(stack).toHaveResource('AWS::RDS::DBCluster', { Properties: { Engine: 'aurora-postgresql', DBClusterParameterGroupName: 'default.aurora-postgresql10', @@ -47,12 +47,12 @@ nodeunitShim({ }, DeletionPolicy: 'Snapshot', UpdateReplacePolicy: 'Snapshot', - }, ResourcePart.CompleteDefinition)); + }, ResourcePart.CompleteDefinition); - test.done(); - }, - 'can create a Serverless Cluster with Aurora Mysql database engine'(test: Test) { + }); + + test('can create a Serverless Cluster with Aurora Mysql database engine', () => { // GIVEN const stack = testStack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -64,7 +64,7 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::RDS::DBCluster', { + expect(stack).toHaveResource('AWS::RDS::DBCluster', { Properties: { Engine: 'aurora-mysql', DBClusterParameterGroupName: 'default.aurora-mysql5.7', @@ -108,11 +108,11 @@ nodeunitShim({ }, DeletionPolicy: 'Snapshot', UpdateReplacePolicy: 'Snapshot', - }, ResourcePart.CompleteDefinition)); - test.done(); - }, + }, ResourcePart.CompleteDefinition); + + }); - 'can create a Serverless cluster with imported vpc and security group'(test: Test) { + test('can create a Serverless cluster with imported vpc and security group', () => { // GIVEN const stack = testStack(); const vpc = ec2.Vpc.fromLookup(stack, 'VPC', { @@ -129,7 +129,7 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::RDS::DBCluster', { + expect(stack).toHaveResource('AWS::RDS::DBCluster', { Engine: 'aurora-postgresql', DBClusterParameterGroupName: 'default.aurora-postgresql10', EngineMode: 'serverless', @@ -159,12 +159,12 @@ nodeunitShim({ ], }, VpcSecurityGroupIds: ['SecurityGroupId12345'], - })); + }); - test.done(); - }, - "sets the retention policy of the SubnetGroup to 'Retain' if the Serverless Cluster is created with 'Retain'"(test: Test) { + }); + + test("sets the retention policy of the SubnetGroup to 'Retain' if the Serverless Cluster is created with 'Retain'", () => { const stack = new cdk.Stack(); const vpc = new ec2.Vpc(stack, 'Vpc'); @@ -174,15 +174,15 @@ nodeunitShim({ removalPolicy: cdk.RemovalPolicy.RETAIN, }); - expect(stack).to(haveResourceLike('AWS::RDS::DBSubnetGroup', { + expect(stack).toHaveResourceLike('AWS::RDS::DBSubnetGroup', { DeletionPolicy: 'Retain', UpdateReplacePolicy: 'Retain', - }, ResourcePart.CompleteDefinition)); + }, ResourcePart.CompleteDefinition); + - test.done(); - }, + }); - 'creates a secret when master credentials are not specified'(test: Test) { + test('creates a secret when master credentials are not specified', () => { // GIVEN const stack = testStack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -198,7 +198,7 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::RDS::DBCluster', { + expect(stack).toHaveResource('AWS::RDS::DBCluster', { MasterUsername: { 'Fn::Join': [ '', @@ -223,21 +223,21 @@ nodeunitShim({ ], ], }, - })); + }); - expect(stack).to(haveResource('AWS::SecretsManager::Secret', { + expect(stack).toHaveResource('AWS::SecretsManager::Secret', { GenerateSecretString: { ExcludeCharacters: '"@/\\', GenerateStringKey: 'password', PasswordLength: 30, SecretStringTemplate: '{"username":"myuser"}', }, - })); + }); + - test.done(); - }, + }); - 'create an Serverless cluster with custom KMS key for storage'(test: Test) { + test('create an Serverless cluster with custom KMS key for storage', () => { // GIVEN const stack = testStack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -250,19 +250,19 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::RDS::DBCluster', { + expect(stack).toHaveResource('AWS::RDS::DBCluster', { KmsKeyId: { 'Fn::GetAtt': [ 'Key961B73FD', 'Arn', ], }, - })); + }); - test.done(); - }, - 'create a cluster using a specific version of Postgresql'(test: Test) { + }); + + test('create a cluster using a specific version of Postgresql', () => { // GIVEN const stack = testStack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -276,16 +276,16 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResource('AWS::RDS::DBCluster', { + expect(stack).toHaveResource('AWS::RDS::DBCluster', { Engine: 'aurora-postgresql', EngineMode: 'serverless', EngineVersion: '10.7', - })); + }); + - test.done(); - }, + }); - 'cluster exposes different read and write endpoints'(test: Test) { + test('cluster exposes different read and write endpoints', () => { // GIVEN const stack = testStack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -300,15 +300,13 @@ nodeunitShim({ }); // THEN - test.notDeepEqual( - stack.resolve(cluster.clusterEndpoint), - stack.resolve(cluster.clusterReadEndpoint), - ); + expect(stack.resolve(cluster.clusterEndpoint)).not + .toEqual(stack.resolve(cluster.clusterReadEndpoint)); - test.done(); - }, - 'imported cluster with imported security group honors allowAllOutbound'(test: Test) { + }); + + test('imported cluster with imported security group honors allowAllOutbound', () => { // GIVEN const stack = testStack(); @@ -326,39 +324,39 @@ nodeunitShim({ cluster.connections.allowToAnyIpv4(ec2.Port.tcp(443)); // THEN - expect(stack).to(haveResource('AWS::EC2::SecurityGroupEgress', { + expect(stack).toHaveResource('AWS::EC2::SecurityGroupEgress', { GroupId: 'sg-123456789', - })); + }); + - test.done(); - }, + }); - 'can import a serverless cluster with minimal attributes'(test: Test) { + test('can import a serverless cluster with minimal attributes', () => { const stack = testStack(); const cluster = ServerlessCluster.fromServerlessClusterAttributes(stack, 'Database', { clusterIdentifier: 'identifier', }); - test.equals(cluster.clusterIdentifier, 'identifier'); + expect(cluster.clusterIdentifier).toEqual('identifier'); - test.done(); - }, - 'minimal imported cluster throws on accessing attributes for missing parameters'(test: Test) { + }); + + test('minimal imported cluster throws on accessing attributes for missing parameters', () => { const stack = testStack(); const cluster = ServerlessCluster.fromServerlessClusterAttributes(stack, 'Database', { clusterIdentifier: 'identifier', }); - test.throws(() => cluster.clusterEndpoint, /Cannot access `clusterEndpoint` of an imported cluster/); - test.throws(() => cluster.clusterReadEndpoint, /Cannot access `clusterReadEndpoint` of an imported cluster/); + expect(() => cluster.clusterEndpoint).toThrow(/Cannot access `clusterEndpoint` of an imported cluster/); + expect(() => cluster.clusterReadEndpoint).toThrow(/Cannot access `clusterReadEndpoint` of an imported cluster/); + - test.done(); - }, + }); - 'imported cluster can access properties if attributes are provided'(test: Test) { + test('imported cluster can access properties if attributes are provided', () => { const stack = testStack(); const cluster = ServerlessCluster.fromServerlessClusterAttributes(stack, 'Database', { @@ -371,13 +369,13 @@ nodeunitShim({ })], }); - test.equals(cluster.clusterEndpoint.socketAddress, 'addr:3306'); - test.equals(cluster.clusterReadEndpoint.socketAddress, 'reader-address:3306'); + expect(cluster.clusterEndpoint.socketAddress).toEqual('addr:3306'); + expect(cluster.clusterReadEndpoint.socketAddress).toEqual('reader-address:3306'); + - test.done(); - }, + }); - 'throws when trying to add rotation to a serverless cluster without secret'(test: Test) { + test('throws when trying to add rotation to a serverless cluster without secret', () => { // GIVEN const stack = new cdk.Stack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -393,12 +391,12 @@ nodeunitShim({ }); // THEN - test.throws(() => cluster.addRotationSingleUser(), /without secret/); + expect(() => cluster.addRotationSingleUser()).toThrow(/without secret/); - test.done(); - }, - 'throws when trying to add single user rotation multiple times'(test: Test) { + }); + + test('throws when trying to add single user rotation multiple times', () => { // GIVEN const stack = new cdk.Stack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -412,12 +410,12 @@ nodeunitShim({ cluster.addRotationSingleUser(); // THEN - test.throws(() => cluster.addRotationSingleUser(), /A single user rotation was already added to this cluster/); + expect(() => cluster.addRotationSingleUser()).toThrow(/A single user rotation was already added to this cluster/); + - test.done(); - }, + }); - 'can set deletion protection'(test: Test) { + test('can set deletion protection', () => { // GIVEN const stack = testStack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -430,14 +428,14 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResourceLike('AWS::RDS::DBCluster', { + expect(stack).toHaveResourceLike('AWS::RDS::DBCluster', { DeletionProtection: true, - })); + }); - test.done(); - }, - 'can set backup retention'(test: Test) { + }); + + test('can set backup retention', () => { // GIVEN const stack = testStack(); const vpc = new ec2.Vpc(stack, 'VPC'); @@ -450,14 +448,14 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResourceLike('AWS::RDS::DBCluster', { + expect(stack).toHaveResourceLike('AWS::RDS::DBCluster', { BackupRetentionPeriod: 2, - })); + }); + - test.done(); - }, + }); - 'does not throw (but adds a node error) if a (dummy) VPC does not have sufficient subnets'(test: Test) { + test('does not throw (but adds a node error) if a (dummy) VPC does not have sufficient subnets', () => { // GIVEN const stack = testStack(); const vpc = ec2.Vpc.fromLookup(stack, 'VPC', { isDefault: true }); @@ -474,12 +472,12 @@ nodeunitShim({ // THEN const art = SynthUtils.synthesize(stack); const meta = art.findMetadataByType('aws:cdk:error'); - test.equal(meta[0].data, 'Cluster requires at least 2 subnets, got 0'); + expect(meta[0].data).toEqual('Cluster requires at least 2 subnets, got 0'); - test.done(); - }, - 'can set scaling configuration'(test: Test) { + }); + + test('can set scaling configuration', () => { // GIVEN const stack = testStack(); const vpc = ec2.Vpc.fromLookup(stack, 'VPC', { isDefault: true }); @@ -496,19 +494,19 @@ nodeunitShim({ }); //THEN - expect(stack).to(haveResource('AWS::RDS::DBCluster', { + expect(stack).toHaveResource('AWS::RDS::DBCluster', { ScalingConfiguration: { AutoPause: true, MaxCapacity: 128, MinCapacity: 1, SecondsUntilAutoPause: 600, }, - })); + }); + - test.done(); - }, + }); - 'can enable Data API'(test: Test) { + test('can enable Data API', () => { // GIVEN const stack = testStack(); const vpc = ec2.Vpc.fromLookup(stack, 'VPC', { isDefault: true }); @@ -521,14 +519,14 @@ nodeunitShim({ }); //THEN - expect(stack).to(haveResource('AWS::RDS::DBCluster', { + expect(stack).toHaveResource('AWS::RDS::DBCluster', { EnableHttpEndpoint: true, - })); + }); + - test.done(); - }, + }); - 'default scaling options'(test: Test) { + test('default scaling options', () => { // GIVEN const stack = testStack(); const vpc = ec2.Vpc.fromLookup(stack, 'VPC', { isDefault: true }); @@ -541,16 +539,16 @@ nodeunitShim({ }); //THEN - expect(stack).to(haveResource('AWS::RDS::DBCluster', { + expect(stack).toHaveResource('AWS::RDS::DBCluster', { ScalingConfiguration: { AutoPause: true, }, - })); + }); - test.done(); - }, - 'auto pause is disabled if a time of zero is specified'(test: Test) { + }); + + test('auto pause is disabled if a time of zero is specified', () => { // GIVEN const stack = testStack(); const vpc = ec2.Vpc.fromLookup(stack, 'VPC', { isDefault: true }); @@ -565,72 +563,72 @@ nodeunitShim({ }); //THEN - expect(stack).to(haveResource('AWS::RDS::DBCluster', { + expect(stack).toHaveResource('AWS::RDS::DBCluster', { ScalingConfiguration: { AutoPause: false, }, - })); + }); + - test.done(); - }, + }); - 'throws when invalid auto pause time is specified'(test: Test) { + test('throws when invalid auto pause time is specified', () => { // GIVEN const stack = testStack(); const vpc = ec2.Vpc.fromLookup(stack, 'VPC', { isDefault: true }); // WHEN - test.throws(() => + expect(() => new ServerlessCluster(stack, 'Database', { engine: DatabaseClusterEngine.AURORA_MYSQL, vpc, scaling: { autoPause: cdk.Duration.seconds(30), }, - }), /auto pause time must be between 5 minutes and 1 day./); + })).toThrow(/auto pause time must be between 5 minutes and 1 day./); - test.throws(() => + expect(() => new ServerlessCluster(stack, 'Another Database', { engine: DatabaseClusterEngine.AURORA_MYSQL, vpc, scaling: { autoPause: cdk.Duration.days(2), }, - }), /auto pause time must be between 5 minutes and 1 day./); + })).toThrow(/auto pause time must be between 5 minutes and 1 day./); - test.done(); - }, - 'throws when invalid backup retention period is specified'(test: Test) { + }); + + test('throws when invalid backup retention period is specified', () => { // GIVEN const stack = testStack(); const vpc = ec2.Vpc.fromLookup(stack, 'VPC', { isDefault: true }); // WHEN - test.throws(() => + expect(() => new ServerlessCluster(stack, 'Database', { engine: DatabaseClusterEngine.AURORA_MYSQL, vpc, backupRetention: cdk.Duration.days(0), - }), /backup retention period must be between 1 and 35 days. received: 0/); + })).toThrow(/backup retention period must be between 1 and 35 days. received: 0/); - test.throws(() => + expect(() => new ServerlessCluster(stack, 'Another Database', { engine: DatabaseClusterEngine.AURORA_MYSQL, vpc, backupRetention: cdk.Duration.days(36), - }), /backup retention period must be between 1 and 35 days. received: 36/); + })).toThrow(/backup retention period must be between 1 and 35 days. received: 36/); + - test.done(); - }, + }); - 'throws error when min capacity is greater than max capacity'(test: Test) { + test('throws error when min capacity is greater than max capacity', () => { // GIVEN const stack = testStack(); const vpc = ec2.Vpc.fromLookup(stack, 'VPC', { isDefault: true }); // WHEN - test.throws(() => + expect(() => new ServerlessCluster(stack, 'Database', { engine: DatabaseClusterEngine.AURORA_MYSQL, vpc, @@ -638,12 +636,12 @@ nodeunitShim({ minCapacity: AuroraCapacityUnit.ACU_2, maxCapacity: AuroraCapacityUnit.ACU_1, }, - }), /maximum capacity must be greater than or equal to minimum capacity./); + })).toThrow(/maximum capacity must be greater than or equal to minimum capacity./); + - test.done(); - }, + }); - 'check that clusterArn property works'(test: Test) { + test('check that clusterArn property works', () => { // GIVEN const stack = testStack(); const vpc = ec2.Vpc.fromLookup(stack, 'VPC', { isDefault: true }); @@ -660,7 +658,7 @@ nodeunitShim({ }); // THEN - test.deepEqual(stack.resolve(cluster.clusterArn), { + expect(stack.resolve(cluster.clusterArn)).toEqual({ 'Fn::Join': [ '', [ @@ -671,10 +669,10 @@ nodeunitShim({ ], ], }); - test.done(); - }, - 'can grant Data API access'(test: Test) { + }); + + test('can grant Data API access', () => { // GIVEN const stack = testStack(); const vpc = ec2.Vpc.fromLookup(stack, 'VPC', { isDefault: true }); @@ -689,7 +687,7 @@ nodeunitShim({ cluster.grantDataApiAccess(user); // THEN - expect(stack).to(haveResource('AWS::IAM::Policy', { + expect(stack).toHaveResource('AWS::IAM::Policy', { PolicyDocument: { Statement: [ { @@ -722,12 +720,12 @@ nodeunitShim({ Ref: 'User00B015A1', }, ], - })); + }); + - test.done(); - }, + }); - 'can grant Data API access on imported cluster with given secret'(test: Test) { + test('can grant Data API access on imported cluster with given secret', () => { // GIVEN const stack = testStack(); const secret = new DatabaseSecret(stack, 'Secret', { @@ -744,7 +742,7 @@ nodeunitShim({ cluster.grantDataApiAccess(user); // THEN - expect(stack).to(haveResource('AWS::IAM::Policy', { + expect(stack).toHaveResource('AWS::IAM::Policy', { PolicyDocument: { Statement: [ { @@ -777,12 +775,12 @@ nodeunitShim({ Ref: 'User00B015A1', }, ], - })); + }); + - test.done(); - }, + }); - 'grant Data API access enables the Data API'(test: Test) { + test('grant Data API access enables the Data API', () => { // GIVEN const stack = testStack(); const vpc = ec2.Vpc.fromLookup(stack, 'VPC', { isDefault: true }); @@ -796,14 +794,14 @@ nodeunitShim({ cluster.grantDataApiAccess(user); //THEN - expect(stack).to(haveResource('AWS::RDS::DBCluster', { + expect(stack).toHaveResource('AWS::RDS::DBCluster', { EnableHttpEndpoint: true, - })); + }); - test.done(); - }, - 'grant Data API access throws if the Data API is disabled'(test: Test) { + }); + + test('grant Data API access throws if the Data API is disabled', () => { // GIVEN const stack = testStack(); const vpc = ec2.Vpc.fromLookup(stack, 'VPC', { isDefault: true }); @@ -815,12 +813,12 @@ nodeunitShim({ const user = new iam.User(stack, 'User'); // WHEN - test.throws(() => cluster.grantDataApiAccess(user), /Cannot grant Data API access when the Data API is disabled/); + expect(() => cluster.grantDataApiAccess(user)).toThrow(/Cannot grant Data API access when the Data API is disabled/); + - test.done(); - }, + }); - 'changes the case of the cluster identifier if the lowercaseDbIdentifier feature flag is enabled'(test: Test) { + test('changes the case of the cluster identifier if the lowercaseDbIdentifier feature flag is enabled', () => { // GIVEN const app = new cdk.App({ context: { [cxapi.RDS_LOWERCASE_DB_IDENTIFIER]: true }, @@ -837,14 +835,14 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResourceLike('AWS::RDS::DBCluster', { + expect(stack).toHaveResourceLike('AWS::RDS::DBCluster', { DBClusterIdentifier: clusterIdentifier.toLowerCase(), - })); + }); - test.done(); - }, - 'does not change the case of the cluster identifier if the lowercaseDbIdentifier feature flag is disabled'(test: Test) { + }); + + test('does not change the case of the cluster identifier if the lowercaseDbIdentifier feature flag is disabled', () => { // GIVEN const app = new cdk.App({ context: { '@aws-cdk/aws-rds:lowercaseDbIdentifier': false } }); const stack = testStack(app); @@ -859,12 +857,12 @@ nodeunitShim({ }); // THEN - expect(stack).to(haveResourceLike('AWS::RDS::DBCluster', { + expect(stack).toHaveResourceLike('AWS::RDS::DBCluster', { DBClusterIdentifier: clusterIdentifier, - })); + }); + - test.done(); - }, + }); }); function testStack(app?: cdk.App, id?: string): cdk.Stack { diff --git a/packages/@aws-cdk/aws-rds/test/sql-server/sql-server.instance-engine.test.ts b/packages/@aws-cdk/aws-rds/test/sql-server/sql-server.instance-engine.test.ts index fb815c95ebee6..72615d08f2093 100644 --- a/packages/@aws-cdk/aws-rds/test/sql-server/sql-server.instance-engine.test.ts +++ b/packages/@aws-cdk/aws-rds/test/sql-server/sql-server.instance-engine.test.ts @@ -1,11 +1,10 @@ -import { expect, haveResourceLike } from '@aws-cdk/assert-internal'; +import '@aws-cdk/assert-internal/jest'; import * as core from '@aws-cdk/core'; -import { nodeunitShim, Test } from 'nodeunit-shim'; import * as rds from '../../lib'; -nodeunitShim({ - 'SQL Server instance engine': { - "has ParameterGroup family ending in '11.0' for major version 11"(test: Test) { +describe('sql server instance engine', () => { + describe('SQL Server instance engine', () => { + test("has ParameterGroup family ending in '11.0' for major version 11", () => { const stack = new core.Stack(); new rds.ParameterGroup(stack, 'ParameterGroup', { engine: rds.DatabaseInstanceEngine.sqlServerWeb({ @@ -13,14 +12,14 @@ nodeunitShim({ }), }).bindToInstance({}); - expect(stack).to(haveResourceLike('AWS::RDS::DBParameterGroup', { + expect(stack).toHaveResourceLike('AWS::RDS::DBParameterGroup', { Family: 'sqlserver-web-11.0', - })); + }); + - test.done(); - }, + }); - "has MajorEngineVersion ending in '11.00' for major version 11"(test: Test) { + test("has MajorEngineVersion ending in '11.00' for major version 11", () => { const stack = new core.Stack(); new rds.OptionGroup(stack, 'OptionGroup', { engine: rds.DatabaseInstanceEngine.sqlServerWeb({ @@ -36,11 +35,11 @@ nodeunitShim({ ], }); - expect(stack).to(haveResourceLike('AWS::RDS::OptionGroup', { + expect(stack).toHaveResourceLike('AWS::RDS::OptionGroup', { MajorEngineVersion: '11.00', - })); + }); + - test.done(); - }, - }, + }); + }); }); diff --git a/packages/@aws-cdk/aws-rds/test/subnet-group.test.ts b/packages/@aws-cdk/aws-rds/test/subnet-group.test.ts index 565c10c6b6d92..1dc9718258deb 100644 --- a/packages/@aws-cdk/aws-rds/test/subnet-group.test.ts +++ b/packages/@aws-cdk/aws-rds/test/subnet-group.test.ts @@ -1,37 +1,36 @@ -import { expect, haveResource, haveResourceLike } from '@aws-cdk/assert-internal'; +import '@aws-cdk/assert-internal/jest'; import * as ec2 from '@aws-cdk/aws-ec2'; import * as cdk from '@aws-cdk/core'; -import { nodeunitShim, Test } from 'nodeunit-shim'; import * as rds from '../lib'; let stack: cdk.Stack; let vpc: ec2.IVpc; -nodeunitShim({ - 'setUp'(cb: () => void) { +describe('subnet group', () => { + beforeEach(() => { stack = new cdk.Stack(); vpc = new ec2.Vpc(stack, 'VPC'); - cb(); - }, - 'creates a subnet group from minimal properties'(test: Test) { + }); + + test('creates a subnet group from minimal properties', () => { new rds.SubnetGroup(stack, 'Group', { description: 'MyGroup', vpc, }); - expect(stack).to(haveResource('AWS::RDS::DBSubnetGroup', { + expect(stack).toHaveResource('AWS::RDS::DBSubnetGroup', { DBSubnetGroupDescription: 'MyGroup', SubnetIds: [ { Ref: 'VPCPrivateSubnet1Subnet8BCA10E0' }, { Ref: 'VPCPrivateSubnet2SubnetCFCDAA7A' }, ], - })); + }); - test.done(); - }, - 'creates a subnet group from all properties'(test: Test) { + }); + + test('creates a subnet group from all properties', () => { new rds.SubnetGroup(stack, 'Group', { description: 'My Shared Group', subnetGroupName: 'SharedGroup', @@ -39,19 +38,19 @@ nodeunitShim({ vpcSubnets: { subnetType: ec2.SubnetType.PRIVATE }, }); - expect(stack).to(haveResource('AWS::RDS::DBSubnetGroup', { + expect(stack).toHaveResource('AWS::RDS::DBSubnetGroup', { DBSubnetGroupDescription: 'My Shared Group', DBSubnetGroupName: 'sharedgroup', SubnetIds: [ { Ref: 'VPCPrivateSubnet1Subnet8BCA10E0' }, { Ref: 'VPCPrivateSubnet2SubnetCFCDAA7A' }, ], - })); + }); - test.done(); - }, - 'correctly creates a subnet group with a deploy-time value for its name'(test: Test) { + }); + + test('correctly creates a subnet group with a deploy-time value for its name', () => { const parameter = new cdk.CfnParameter(stack, 'Parameter'); new rds.SubnetGroup(stack, 'Group', { description: 'My Shared Group', @@ -60,57 +59,57 @@ nodeunitShim({ vpcSubnets: { subnetType: ec2.SubnetType.PRIVATE }, }); - expect(stack).to(haveResourceLike('AWS::RDS::DBSubnetGroup', { + expect(stack).toHaveResourceLike('AWS::RDS::DBSubnetGroup', { DBSubnetGroupName: { Ref: 'Parameter', }, - })); + }); + - test.done(); - }, + }); - 'subnet selection': { - 'defaults to private subnets'(test: Test) { + describe('subnet selection', () => { + test('defaults to private subnets', () => { new rds.SubnetGroup(stack, 'Group', { description: 'MyGroup', vpc, }); - expect(stack).to(haveResource('AWS::RDS::DBSubnetGroup', { + expect(stack).toHaveResource('AWS::RDS::DBSubnetGroup', { DBSubnetGroupDescription: 'MyGroup', SubnetIds: [ { Ref: 'VPCPrivateSubnet1Subnet8BCA10E0' }, { Ref: 'VPCPrivateSubnet2SubnetCFCDAA7A' }, ], - })); + }); + - test.done(); - }, + }); - 'can specify subnet type'(test: Test) { + test('can specify subnet type', () => { new rds.SubnetGroup(stack, 'Group', { description: 'MyGroup', vpc, vpcSubnets: { subnetType: ec2.SubnetType.PUBLIC }, }); - expect(stack).to(haveResource('AWS::RDS::DBSubnetGroup', { + expect(stack).toHaveResource('AWS::RDS::DBSubnetGroup', { DBSubnetGroupDescription: 'MyGroup', SubnetIds: [ { Ref: 'VPCPublicSubnet1SubnetB4246D30' }, { Ref: 'VPCPublicSubnet2Subnet74179F39' }, ], - })); - test.done(); - }, - }, + }); - 'import group by name'(test: Test) { + }); + }); + + test('import group by name', () => { const subnetGroup = rds.SubnetGroup.fromSubnetGroupName(stack, 'Group', 'my-subnet-group'); - test.equals(subnetGroup.subnetGroupName, 'my-subnet-group'); + expect(subnetGroup.subnetGroupName).toEqual('my-subnet-group'); + - test.done(); - }, + }); }); diff --git a/packages/@aws-cdk/core/package.json b/packages/@aws-cdk/core/package.json index f4bfcb141730e..e8865fafd3dc2 100644 --- a/packages/@aws-cdk/core/package.json +++ b/packages/@aws-cdk/core/package.json @@ -180,7 +180,7 @@ "cfn2ts": "0.0.0", "fast-check": "^2.17.0", "lodash": "^4.17.21", - "nodeunit-shim": "0.0.0", + "jest": "^26.6.3", "pkglint": "0.0.0", "sinon": "^9.2.4", "ts-mock-imports": "^1.3.7" diff --git a/packages/@aws-cdk/core/test/annotations.test.ts b/packages/@aws-cdk/core/test/annotations.test.ts index 1b0cbf209c528..46e91cf78761a 100644 --- a/packages/@aws-cdk/core/test/annotations.test.ts +++ b/packages/@aws-cdk/core/test/annotations.test.ts @@ -1,17 +1,16 @@ import { CloudAssembly } from '@aws-cdk/cx-api'; -import { nodeunitShim, Test } from 'nodeunit-shim'; import { Construct, App, Stack } from '../lib'; import { Annotations } from '../lib/annotations'; const restore = process.env.CDK_BLOCK_DEPRECATIONS; -nodeunitShim({ - 'tearDown'(cb: any) { +describe('annotations', () => { + afterEach(() => { process.env.CDK_BLOCK_DEPRECATIONS = restore; // restore to the original value - cb(); - }, - 'addDeprecation() annotates the usage of a deprecated API'(test: Test) { + }); + + test('addDeprecation() annotates the usage of a deprecated API', () => { // GIVEN const app = new App(); const stack = new Stack(app, 'MyStack'); @@ -22,16 +21,16 @@ nodeunitShim({ Annotations.of(c1).addDeprecation('@aws-cdk/core.Construct.node', 'use @aws-cdk.Construct.construct instead'); // THEN - test.deepEqual(getWarnings(app.synth()), [ + expect(getWarnings(app.synth())).toEqual([ { path: '/MyStack/Hello', message: 'The API @aws-cdk/core.Construct.node is deprecated: use @aws-cdk.Construct.construct instead. This API will be removed in the next major release', }, ]); - test.done(); - }, - 'deduplicated per node based on "api"'(test: Test) { + }); + + test('deduplicated per node based on "api"', () => { // GIVEN const app = new App(); const stack1 = new Stack(app, 'MyStack1'); @@ -50,7 +49,7 @@ nodeunitShim({ Annotations.of(c1).addDeprecation('@aws-cdk/core.Construct.node', 'use @aws-cdk.Construct.construct instead'); // THEN - test.deepEqual(getWarnings(app.synth()), [ + expect(getWarnings(app.synth())).toEqual([ { path: '/MyStack1/Hello', message: 'The API @aws-cdk/core.Construct.node is deprecated: use @aws-cdk.Construct.construct instead. This API will be removed in the next major release', @@ -64,10 +63,10 @@ nodeunitShim({ message: 'The API @aws-cdk/core.Construct.node is deprecated: use @aws-cdk.Construct.construct instead. This API will be removed in the next major release', }, ]); - test.done(); - }, - 'CDK_BLOCK_DEPRECATIONS will throw if a deprecated API is used'(test: Test) { + }); + + test('CDK_BLOCK_DEPRECATIONS will throw if a deprecated API is used', () => { // GIVEN const app = new App(); const stack = new Stack(app, 'MyStack'); @@ -75,9 +74,9 @@ nodeunitShim({ // THEN process.env.CDK_BLOCK_DEPRECATIONS = '1'; - test.throws(() => Annotations.of(c1).addDeprecation('foo', 'bar'), /MyStack\/Hello: The API foo is deprecated: bar\. This API will be removed in the next major release/); - test.done(); - }, + expect(() => Annotations.of(c1).addDeprecation('foo', 'bar')).toThrow(/MyStack\/Hello: The API foo is deprecated: bar\. This API will be removed in the next major release/); + + }); }); function getWarnings(casm: CloudAssembly) { diff --git a/packages/@aws-cdk/core/test/app.test.ts b/packages/@aws-cdk/core/test/app.test.ts index 3bb178edef56a..c73538067f9ef 100644 --- a/packages/@aws-cdk/core/test/app.test.ts +++ b/packages/@aws-cdk/core/test/app.test.ts @@ -1,6 +1,5 @@ import { ContextProvider } from '@aws-cdk/cloud-assembly-schema'; import * as cxapi from '@aws-cdk/cx-api'; -import { nodeunitShim, Test } from 'nodeunit-shim'; import { CfnResource, Construct, DefaultStackSynthesizer, Stack, StackProps } from '../lib'; import { Annotations } from '../lib/annotations'; import { App, AppProps } from '../lib/app'; @@ -46,27 +45,27 @@ function synthStack(name: string, includeMetadata: boolean = false, context?: an return stack; } -nodeunitShim({ - 'synthesizes all stacks and returns synthesis result'(test: Test) { +describe('app', () => { + test('synthesizes all stacks and returns synthesis result', () => { const response = synth(); delete (response as any).dir; - test.deepEqual(response.stacks.length, 2); + expect(response.stacks.length).toEqual(2); const stack1 = response.stacks[0]; - test.deepEqual(stack1.stackName, 'stack1'); - test.deepEqual(stack1.id, 'stack1'); - test.deepEqual(stack1.environment.account, '12345'); - test.deepEqual(stack1.environment.region, 'us-east-1'); - test.deepEqual(stack1.environment.name, 'aws://12345/us-east-1'); - test.deepEqual(stack1.template, { + expect(stack1.stackName).toEqual('stack1'); + expect(stack1.id).toEqual('stack1'); + expect(stack1.environment.account).toEqual('12345'); + expect(stack1.environment.region).toEqual('us-east-1'); + expect(stack1.environment.name).toEqual('aws://12345/us-east-1'); + expect(stack1.template).toEqual({ Resources: { s1c1: { Type: 'DummyResource', Properties: { Prop1: 'Prop1' } }, s1c2: { Type: 'DummyResource', Properties: { Foo: 123 } }, }, }); - test.deepEqual(stack1.manifest.metadata, { + expect(stack1.manifest.metadata).toEqual({ '/stack1': [{ type: 'meta', data: 111 }], '/stack1/s1c1': [{ type: 'aws:cdk:logicalId', data: 's1c1' }], '/stack1/s1c2': @@ -76,10 +75,10 @@ nodeunitShim({ }); const stack2 = response.stacks[1]; - test.deepEqual(stack2.stackName, 'stack2'); - test.deepEqual(stack2.id, 'stack2'); - test.deepEqual(stack2.environment.name, 'aws://unknown-account/unknown-region'); - test.deepEqual(stack2.template, { + expect(stack2.stackName).toEqual('stack2'); + expect(stack2.id).toEqual('stack2'); + expect(stack2.environment.name).toEqual('aws://unknown-account/unknown-region'); + expect(stack2.template).toEqual({ Resources: { s2c1: { Type: 'DummyResource', Properties: { Prog2: 'Prog2' } }, @@ -87,7 +86,7 @@ nodeunitShim({ s1c2r25F685FFF: { Type: 'ResourceType2' }, }, }); - test.deepEqual(stack2.manifest.metadata, { + expect(stack2.manifest.metadata).toEqual({ '/stack2/s2c1': [{ type: 'aws:cdk:logicalId', data: 's2c1' }], '/stack2/s1c2': [{ type: 'meta', data: { key: 'value' } }], '/stack2/s1c2/r1': @@ -96,21 +95,21 @@ nodeunitShim({ [{ type: 'aws:cdk:logicalId', data: 's1c2r25F685FFF' }], }); - test.done(); - }, - 'context can be passed through CDK_CONTEXT'(test: Test) { + }); + + test('context can be passed through CDK_CONTEXT', () => { process.env[cxapi.CONTEXT_ENV] = JSON.stringify({ key1: 'val1', key2: 'val2', }); const prog = new App(); - test.deepEqual(prog.node.tryGetContext('key1'), 'val1'); - test.deepEqual(prog.node.tryGetContext('key2'), 'val2'); - test.done(); - }, + expect(prog.node.tryGetContext('key1')).toEqual('val1'); + expect(prog.node.tryGetContext('key2')).toEqual('val2'); + + }); - 'context passed through CDK_CONTEXT has precedence'(test: Test) { + test('context passed through CDK_CONTEXT has precedence', () => { process.env[cxapi.CONTEXT_ENV] = JSON.stringify({ key1: 'val1', key2: 'val2', @@ -121,15 +120,15 @@ nodeunitShim({ key2: 'val4', }, }); - test.deepEqual(prog.node.tryGetContext('key1'), 'val1'); - test.deepEqual(prog.node.tryGetContext('key2'), 'val2'); - test.done(); - }, + expect(prog.node.tryGetContext('key1')).toEqual('val1'); + expect(prog.node.tryGetContext('key2')).toEqual('val2'); + + }); - 'context from the command line can be used when creating the stack'(test: Test) { + test('context from the command line can be used when creating the stack', () => { const output = synthStack('stack2', false, { ctx1: 'HELLO' }); - test.deepEqual(output.template, { + expect(output.template).toEqual({ Resources: { s2c1: { Type: 'DummyResource', @@ -148,27 +147,27 @@ nodeunitShim({ }, }, }); - test.done(); - }, - 'setContext(k,v) can be used to set context programmatically'(test: Test) { + }); + + test('setContext(k,v) can be used to set context programmatically', () => { const prog = new App({ context: { foo: 'bar', }, }); - test.deepEqual(prog.node.tryGetContext('foo'), 'bar'); - test.done(); - }, + expect(prog.node.tryGetContext('foo')).toEqual('bar'); + + }); - 'setContext(k,v) cannot be called after stacks have been added because stacks may use the context'(test: Test) { + test('setContext(k,v) cannot be called after stacks have been added because stacks may use the context', () => { const prog = new App(); new Stack(prog, 's1'); - test.throws(() => prog.node.setContext('foo', 'bar')); - test.done(); - }, + expect(() => prog.node.setContext('foo', 'bar')).toThrow(); + + }); - 'app.synth() performs validation first and if there are errors, it returns the errors'(test: Test) { + test('app.synth() performs validation first and if there are errors, it returns the errors', () => { class Child extends Construct { protected validate() { @@ -186,12 +185,12 @@ nodeunitShim({ new Child(parent, 'C1'); new Child(parent, 'C2'); - test.throws(() => app.synth(), /Validation failed with the following errors/); + expect(() => app.synth()).toThrow(/Validation failed with the following errors/); - test.done(); - }, - 'app.synthesizeStack(stack) will return a list of missing contextual information'(test: Test) { + }); + + test('app.synthesizeStack(stack) will return a list of missing contextual information', () => { class MyStack extends Stack { constructor(scope: App, id: string, props?: StackProps) { super(scope, id, props); @@ -222,7 +221,7 @@ nodeunitShim({ new MyStack(app, 'MyStack', { synthesizer: new DefaultStackSynthesizer() }); }); - test.deepEqual(assembly.manifest.missing, [ + expect(assembly.manifest.missing).toEqual([ { key: 'missing-context-key', provider: ContextProvider.AVAILABILITY_ZONE_PROVIDER, @@ -243,25 +242,25 @@ nodeunitShim({ }, ]); - test.done(); - }, + + }); /** * Runtime library versions are now synthesized into the Stack templates directly * * The are not emitted into Cloud Assembly metadata anymore */ - 'runtime library versions are not emitted in asm anymore'(test: Test) { + test('runtime library versions are not emitted in asm anymore', () => { const assembly = withApp({ analyticsReporting: true }, app => { const stack = new Stack(app, 'stack1'); new CfnResource(stack, 'MyResource', { type: 'Resource::Type' }); }); - test.deepEqual(assembly.runtime, { libraries: {} }); - test.done(); - }, + expect(assembly.runtime).toEqual({ libraries: {} }); + + }); - 'deep stack is shown and synthesized properly'(test: Test) { + test('deep stack is shown and synthesized properly', () => { // WHEN const response = withApp({}, (app) => { const topStack = new Stack(app, 'Stack'); @@ -272,7 +271,7 @@ nodeunitShim({ }); // THEN - test.deepEqual(response.stacks.map(s => ({ name: s.stackName, template: s.template })), [ + expect(response.stacks.map(s => ({ name: s.stackName, template: s.template }))).toEqual([ { name: 'Stack', template: { Resources: { Res: { Type: 'CDK::TopStack::Resource' } } }, @@ -283,10 +282,10 @@ nodeunitShim({ }, ]); - test.done(); - }, - 'stacks are written to the assembly file in a topological order'(test: Test) { + }); + + test('stacks are written to the assembly file in a topological order', () => { // WHEN const assembly = withApp({}, (app) => { const stackC = new Stack(app, 'StackC'); @@ -305,14 +304,14 @@ nodeunitShim({ // THEN const artifactsIds = assembly.artifacts.map(a => a.id); - test.ok(artifactsIds.indexOf('StackA') < artifactsIds.indexOf('StackC')); - test.ok(artifactsIds.indexOf('StackB') < artifactsIds.indexOf('StackC')); - test.ok(artifactsIds.indexOf('StackC') < artifactsIds.indexOf('StackD')); + expect(artifactsIds.indexOf('StackA')).toBeLessThan(artifactsIds.indexOf('StackC')); + expect(artifactsIds.indexOf('StackB')).toBeLessThan(artifactsIds.indexOf('StackC')); + expect(artifactsIds.indexOf('StackC')).toBeLessThan(artifactsIds.indexOf('StackD')); + - test.done(); - }, + }); - 'application support any type in context'(test: Test) { + test('application support any type in context', () => { const app = new App({ context: { isString: 'string', @@ -321,12 +320,12 @@ nodeunitShim({ }, }); - test.ok(app.node.tryGetContext('isString') === 'string'); - test.ok(app.node.tryGetContext('isNumber') === 10); - test.deepEqual(app.node.tryGetContext('isObject'), { isString: 'string', isNumber: 10 }); + expect(app.node.tryGetContext('isString')).toEqual('string'); + expect(app.node.tryGetContext('isNumber')).toEqual(10); + expect(app.node.tryGetContext('isObject')).toEqual({ isString: 'string', isNumber: 10 }); + - test.done(); - }, + }); }); class MyConstruct extends Construct { diff --git a/packages/@aws-cdk/core/test/arn.test.ts b/packages/@aws-cdk/core/test/arn.test.ts index 8f1a4d451a111..f1d1c862a5ca4 100644 --- a/packages/@aws-cdk/core/test/arn.test.ts +++ b/packages/@aws-cdk/core/test/arn.test.ts @@ -1,11 +1,10 @@ -import { nodeunitShim, Test } from 'nodeunit-shim'; import { Arn, ArnComponents, ArnFormat, Aws, CfnOutput, ScopedAws, Stack, Token } from '../lib'; import { Intrinsic } from '../lib/private/intrinsic'; import { evaluateCFN } from './evaluate-cfn'; import { toCloudFormation } from './util'; -nodeunitShim({ - 'create from components with defaults'(test: Test) { +describe('arn', () => { + test('create from components with defaults', () => { const stack = new Stack(); const arn = stack.formatArn({ @@ -15,12 +14,12 @@ nodeunitShim({ const pseudo = new ScopedAws(stack); - test.deepEqual(stack.resolve(arn), + expect(stack.resolve(arn)).toEqual( stack.resolve(`arn:${pseudo.partition}:sqs:${pseudo.region}:${pseudo.accountId}:myqueuename`)); - test.done(); - }, - 'create from components with specific values for the various components'(test: Test) { + }); + + test('create from components with specific values for the various components', () => { const stack = new Stack(); const arn = stack.formatArn({ @@ -32,12 +31,12 @@ nodeunitShim({ resourceName: 'mytable/stream/label', }); - test.deepEqual(stack.resolve(arn), + expect(stack.resolve(arn)).toEqual( 'arn:aws-cn:dynamodb:us-east-1:123456789012:table/mytable/stream/label'); - test.done(); - }, - 'allow empty string in components'(test: Test) { + }); + + test('allow empty string in components', () => { const stack = new Stack(); const arn = stack.formatArn({ @@ -48,13 +47,13 @@ nodeunitShim({ partition: 'aws-cn', }); - test.deepEqual(stack.resolve(arn), + expect(stack.resolve(arn)).toEqual( 'arn:aws-cn:s3:::my-bucket'); - test.done(); - }, - 'resourcePathSep can be set to ":" instead of the default "/"'(test: Test) { + }); + + test('resourcePathSep can be set to ":" instead of the default "/"', () => { const stack = new Stack(); const arn = stack.formatArn({ @@ -66,12 +65,12 @@ nodeunitShim({ const pseudo = new ScopedAws(stack); - test.deepEqual(stack.resolve(arn), + expect(stack.resolve(arn)).toEqual( stack.resolve(`arn:${pseudo.partition}:codedeploy:${pseudo.region}:${pseudo.accountId}:application:WordPress_App`)); - test.done(); - }, - 'resourcePathSep can be set to "" instead of the default "/"'(test: Test) { + }); + + test('resourcePathSep can be set to "" instead of the default "/"', () => { const stack = new Stack(); const arn = stack.formatArn({ @@ -83,51 +82,51 @@ nodeunitShim({ const pseudo = new ScopedAws(stack); - test.deepEqual(stack.resolve(arn), + expect(stack.resolve(arn)).toEqual( stack.resolve(`arn:${pseudo.partition}:ssm:${pseudo.region}:${pseudo.accountId}:parameter/parameter-name`)); - test.done(); - }, - 'fails if resourcePathSep is neither ":" nor "/"'(test: Test) { + }); + + test('fails if resourcePathSep is neither ":" nor "/"', () => { const stack = new Stack(); - test.throws(() => stack.formatArn({ + expect(() => stack.formatArn({ service: 'foo', resource: 'bar', sep: 'x', - })); - test.done(); - }, + })).toThrow(); + + }); - 'Arn.parse(s)': { + describe('Arn.parse(s)', () => { - fails: { - 'if doesn\'t start with "arn:"'(test: Test) { + describe('fails', () => { + test('if doesn\'t start with "arn:"', () => { const stack = new Stack(); - test.throws(() => stack.parseArn('barn:foo:x:a:1:2'), /ARNs must start with "arn:".*barn:foo/); - test.done(); - }, + expect(() => stack.parseArn('barn:foo:x:a:1:2')).toThrow(/ARNs must start with "arn:".*barn:foo/); + + }); - 'if the ARN doesnt have enough components'(test: Test) { + test('if the ARN doesnt have enough components', () => { const stack = new Stack(); - test.throws(() => stack.parseArn('arn:is:too:short'), /The `resource` component \(6th component\) of an ARN is required/); - test.done(); - }, + expect(() => stack.parseArn('arn:is:too:short')).toThrow(/The `resource` component \(6th component\) of an ARN is required/); + + }); - 'if "service" is not specified'(test: Test) { + test('if "service" is not specified', () => { const stack = new Stack(); - test.throws(() => stack.parseArn('arn:aws::4:5:6'), /The `service` component \(3rd component\) of an ARN is required/); - test.done(); - }, + expect(() => stack.parseArn('arn:aws::4:5:6')).toThrow(/The `service` component \(3rd component\) of an ARN is required/); + + }); - 'if "resource" is not specified'(test: Test) { + test('if "resource" is not specified', () => { const stack = new Stack(); - test.throws(() => stack.parseArn('arn:aws:service:::'), /The `resource` component \(6th component\) of an ARN is required/); - test.done(); - }, - }, + expect(() => stack.parseArn('arn:aws:service:::')).toThrow(/The `resource` component \(6th component\) of an ARN is required/); + + }); + }); - 'various successful parses'(test: Test) { + test('various successful parses', () => { interface TestArnComponents extends ArnComponents { /** @default true */ checkCfnEncoding?: boolean; @@ -212,10 +211,10 @@ nodeunitShim({ // test the basic case const parsedComponents = stack.splitArn(arn, expectedComponents.arnFormat!); - test.deepEqual(parsedComponents, expectedComponents, arn); + expect(parsedComponents).toEqual(expectedComponents); // test the round-trip - test.equal(stack.formatArn(parsedComponents), arn); + expect(stack.formatArn(parsedComponents)).toEqual(arn); // test that the CloudFormation functions we generate evaluate to the correct value if (skipCheckingCfnEncoding) { @@ -226,44 +225,44 @@ nodeunitShim({ parsedComponents.arnFormat!); const cfnArnComponents = stack.resolve(tokenArnComponents); const evaluatedArnComponents = evaluateCFN(cfnArnComponents, { TheArn: arn }); - test.deepEqual(evaluatedArnComponents, parsedComponents); + expect(evaluatedArnComponents).toEqual(parsedComponents); } - test.done(); - }, - 'a Token with : separator'(test: Test) { + }); + + test('a Token with : separator', () => { const stack = new Stack(); const theToken = { Ref: 'SomeParameter' }; const parsed = stack.parseArn(new Intrinsic(theToken).toString(), ':'); - test.deepEqual(stack.resolve(parsed.partition), { 'Fn::Select': [1, { 'Fn::Split': [':', theToken] }] }); - test.deepEqual(stack.resolve(parsed.service), { 'Fn::Select': [2, { 'Fn::Split': [':', theToken] }] }); - test.deepEqual(stack.resolve(parsed.region), { 'Fn::Select': [3, { 'Fn::Split': [':', theToken] }] }); - test.deepEqual(stack.resolve(parsed.account), { 'Fn::Select': [4, { 'Fn::Split': [':', theToken] }] }); - test.deepEqual(stack.resolve(parsed.resource), { 'Fn::Select': [5, { 'Fn::Split': [':', theToken] }] }); - test.deepEqual(stack.resolve(parsed.resourceName), { 'Fn::Select': [6, { 'Fn::Split': [':', theToken] }] }); - test.equal(parsed.sep, ':'); + expect(stack.resolve(parsed.partition)).toEqual({ 'Fn::Select': [1, { 'Fn::Split': [':', theToken] }] }); + expect(stack.resolve(parsed.service)).toEqual({ 'Fn::Select': [2, { 'Fn::Split': [':', theToken] }] }); + expect(stack.resolve(parsed.region)).toEqual({ 'Fn::Select': [3, { 'Fn::Split': [':', theToken] }] }); + expect(stack.resolve(parsed.account)).toEqual({ 'Fn::Select': [4, { 'Fn::Split': [':', theToken] }] }); + expect(stack.resolve(parsed.resource)).toEqual({ 'Fn::Select': [5, { 'Fn::Split': [':', theToken] }] }); + expect(stack.resolve(parsed.resourceName)).toEqual({ 'Fn::Select': [6, { 'Fn::Split': [':', theToken] }] }); + expect(parsed.sep).toEqual(':'); - test.done(); - }, - 'a Token with / separator'(test: Test) { + }); + + test('a Token with / separator', () => { const stack = new Stack(); const theToken = { Ref: 'SomeParameter' }; const parsed = stack.parseArn(new Intrinsic(theToken).toString()); - test.equal(parsed.sep, '/'); + expect(parsed.sep).toEqual('/'); // eslint-disable-next-line max-len - test.deepEqual(stack.resolve(parsed.resource), { 'Fn::Select': [0, { 'Fn::Split': ['/', { 'Fn::Select': [5, { 'Fn::Split': [':', theToken] }] }] }] }); + expect(stack.resolve(parsed.resource)).toEqual({ 'Fn::Select': [0, { 'Fn::Split': ['/', { 'Fn::Select': [5, { 'Fn::Split': [':', theToken] }] }] }] }); // eslint-disable-next-line max-len - test.deepEqual(stack.resolve(parsed.resourceName), { 'Fn::Select': [1, { 'Fn::Split': ['/', { 'Fn::Select': [5, { 'Fn::Split': [':', theToken] }] }] }] }); + expect(stack.resolve(parsed.resourceName)).toEqual({ 'Fn::Select': [1, { 'Fn::Split': ['/', { 'Fn::Select': [5, { 'Fn::Split': [':', theToken] }] }] }] }); - test.done(); - }, - 'extracting resource name from a complex ARN'(test: Test) { + }); + + test('extracting resource name from a complex ARN', () => { // GIVEN const stack = new Stack(); const theToken = Token.asString({ Ref: 'SomeParameter' }); @@ -272,23 +271,23 @@ nodeunitShim({ const parsed = Arn.extractResourceName(theToken, 'role'); // THEN - test.deepEqual(evaluateCFN(stack.resolve(parsed), { + expect(evaluateCFN(stack.resolve(parsed), { SomeParameter: 'arn:aws:iam::111111111111:role/path/to/role/name', - }), 'path/to/role/name'); + })).toEqual('path/to/role/name'); - test.done(); - }, - 'extractResourceName validates resource type if possible'(test: Test) { + }); + + test('extractResourceName validates resource type if possible', () => { // WHEN - test.throws(() => { + expect(() => { Arn.extractResourceName('arn:aws:iam::111111111111:banana/rama', 'role'); - }, /Expected resource type/); + }).toThrow(/Expected resource type/); - test.done(); - }, - 'returns empty string ARN components'(test: Test) { + }); + + test('returns empty string ARN components', () => { const stack = new Stack(); const arn = 'arn:aws:iam::123456789012:role/abc123'; const expected: ArnComponents = { @@ -302,12 +301,12 @@ nodeunitShim({ arnFormat: ArnFormat.SLASH_RESOURCE_NAME, }; - test.deepEqual(stack.parseArn(arn), expected, arn); - test.done(); - }, - }, + expect(stack.parseArn(arn)).toEqual(expected); + + }); + }); - 'can use a fully specified ARN from a different stack without incurring an import'(test: Test) { + test('can use a fully specified ARN from a different stack without incurring an import', () => { // GIVEN const stack1 = new Stack(undefined, 'Stack1', { env: { account: '12345678', region: 'us-turbo-5' } }); const stack2 = new Stack(undefined, 'Stack2', { env: { account: '87654321', region: 'us-turbo-1' } }); @@ -322,7 +321,7 @@ nodeunitShim({ new CfnOutput(stack2, 'SomeValue', { value: arn }); // THEN - test.deepEqual(toCloudFormation(stack2), { + expect(toCloudFormation(stack2)).toEqual({ Outputs: { SomeValue: { Value: { @@ -333,10 +332,10 @@ nodeunitShim({ }, }); - test.done(); - }, - 'parse other fields if only some are tokens'(test: Test) { + }); + + test('parse other fields if only some are tokens', () => { // GIVEN const stack = new Stack(); @@ -344,14 +343,14 @@ nodeunitShim({ const parsed = stack.parseArn(`arn:${Aws.PARTITION}:iam::123456789012:role/S3Access`); // THEN - test.deepEqual(stack.resolve(parsed.partition), { Ref: 'AWS::Partition' }); - test.deepEqual(stack.resolve(parsed.service), 'iam'); - test.equal(stack.resolve(parsed.region), ''); - test.deepEqual(stack.resolve(parsed.account), '123456789012'); - test.deepEqual(stack.resolve(parsed.resource), 'role'); - test.deepEqual(stack.resolve(parsed.resourceName), 'S3Access'); - test.equal(parsed.sep, '/'); - - test.done(); - }, + expect(stack.resolve(parsed.partition)).toEqual({ Ref: 'AWS::Partition' }); + expect(stack.resolve(parsed.service)).toEqual('iam'); + expect(stack.resolve(parsed.region)).toEqual(''); + expect(stack.resolve(parsed.account)).toEqual('123456789012'); + expect(stack.resolve(parsed.resource)).toEqual('role'); + expect(stack.resolve(parsed.resourceName)).toEqual('S3Access'); + expect(parsed.sep).toEqual('/'); + + + }); }); diff --git a/packages/@aws-cdk/core/test/aspect.test.ts b/packages/@aws-cdk/core/test/aspect.test.ts index 3fc2d2031111a..0ed634cf5e58c 100644 --- a/packages/@aws-cdk/core/test/aspect.test.ts +++ b/packages/@aws-cdk/core/test/aspect.test.ts @@ -1,5 +1,4 @@ import * as cxschema from '@aws-cdk/cloud-assembly-schema'; -import { nodeunitShim, Test } from 'nodeunit-shim'; import { App } from '../lib'; import { IAspect, Aspects } from '../lib/aspect'; import { Construct, IConstruct } from '../lib/construct-compat'; @@ -25,19 +24,19 @@ class MyAspect implements IAspect { } } -nodeunitShim({ - 'Aspects are invoked only once'(test: Test) { +describe('aspect', () => { + test('Aspects are invoked only once', () => { const app = new App(); const root = new MyConstruct(app, 'MyConstruct'); Aspects.of(root).add(new VisitOnce()); app.synth(); - test.deepEqual(root.visitCounter, 1); + expect(root.visitCounter).toEqual(1); app.synth(); - test.deepEqual(root.visitCounter, 1); - test.done(); - }, + expect(root.visitCounter).toEqual(1); - 'Warn if an Aspect is added via another Aspect'(test: Test) { + }); + + test('Warn if an Aspect is added via another Aspect', () => { const app = new App(); const root = new MyConstruct(app, 'MyConstruct'); const child = new MyConstruct(root, 'ChildConstruct'); @@ -51,27 +50,27 @@ nodeunitShim({ }, }); app.synth(); - test.deepEqual(root.node.metadata[0].type, cxschema.ArtifactMetadataEntryType.WARN); - test.deepEqual(root.node.metadata[0].data, 'We detected an Aspect was added via another Aspect, and will not be applied'); + expect(root.node.metadata[0].type).toEqual(cxschema.ArtifactMetadataEntryType.WARN); + expect(root.node.metadata[0].data).toEqual('We detected an Aspect was added via another Aspect, and will not be applied'); // warning is not added to child construct - test.equal(child.node.metadata.length, 0); - test.done(); - }, + expect(child.node.metadata.length).toEqual(0); + + }); - 'Do not warn if an Aspect is added directly (not by another aspect)'(test: Test) { + test('Do not warn if an Aspect is added directly (not by another aspect)', () => { const app = new App(); const root = new MyConstruct(app, 'Construct'); const child = new MyConstruct(root, 'ChildConstruct'); Aspects.of(root).add(new MyAspect()); app.synth(); - test.deepEqual(root.node.metadata[0].type, 'foo'); - test.deepEqual(root.node.metadata[0].data, 'bar'); - test.deepEqual(child.node.metadata[0].type, 'foo'); - test.deepEqual(child.node.metadata[0].data, 'bar'); + expect(root.node.metadata[0].type).toEqual('foo'); + expect(root.node.metadata[0].data).toEqual('bar'); + expect(child.node.metadata[0].type).toEqual('foo'); + expect(child.node.metadata[0].data).toEqual('bar'); // no warning is added - test.equal(root.node.metadata.length, 1); - test.equal(child.node.metadata.length, 1); - test.done(); - }, + expect(root.node.metadata.length).toEqual(1); + expect(child.node.metadata.length).toEqual(1); + + }); }); diff --git a/packages/@aws-cdk/core/test/assets.test.ts b/packages/@aws-cdk/core/test/assets.test.ts index fcfae3a9d147f..206362c79e809 100644 --- a/packages/@aws-cdk/core/test/assets.test.ts +++ b/packages/@aws-cdk/core/test/assets.test.ts @@ -1,10 +1,9 @@ import * as cxschema from '@aws-cdk/cloud-assembly-schema'; -import { nodeunitShim, Test } from 'nodeunit-shim'; import { FileAssetPackaging, Stack } from '../lib'; import { toCloudFormation } from './util'; -nodeunitShim({ - 'addFileAsset correctly sets metadata and creates S3 parameters'(test: Test) { +describe('assets', () => { + test('addFileAsset correctly sets metadata and creates S3 parameters', () => { // GIVEN const stack = new Stack(); @@ -18,17 +17,17 @@ nodeunitShim({ // THEN const assetMetadata = stack.node.metadata.find(({ type }) => type === cxschema.ArtifactMetadataEntryType.ASSET); - test.ok(assetMetadata && assetMetadata.data); + expect(assetMetadata && assetMetadata.data).toBeDefined(); if (assetMetadata && assetMetadata.data) { const data = assetMetadata.data as cxschema.AssetMetadataEntry; - test.equal(data.path, 'file-name'); - test.equal(data.id, 'source-hash'); - test.equal(data.packaging, FileAssetPackaging.ZIP_DIRECTORY); - test.equal(data.sourceHash, 'source-hash'); + expect(data.path).toEqual('file-name'); + expect(data.id).toEqual('source-hash'); + expect(data.packaging).toEqual(FileAssetPackaging.ZIP_DIRECTORY); + expect(data.sourceHash).toEqual('source-hash'); } - test.deepEqual(toCloudFormation(stack), { + expect(toCloudFormation(stack)).toEqual({ Parameters: { AssetParameterssourcehashS3BucketE6E91E3E: { Type: 'String', @@ -45,10 +44,10 @@ nodeunitShim({ }, }); - test.done(); - }, - 'addFileAsset correctly sets object urls'(test: Test) { + }); + + test('addFileAsset correctly sets object urls', () => { // GIVEN const stack = new Stack(); @@ -63,15 +62,15 @@ nodeunitShim({ const expectedS3UrlPrefix = 's3://'; const expectedHttpUrlPrefix = `https://s3.${stack.region}.${stack.urlSuffix}/`; - test.equal( - assetLocation.s3ObjectUrl.replace(expectedS3UrlPrefix, ''), + expect( + assetLocation.s3ObjectUrl.replace(expectedS3UrlPrefix, '')).toEqual( assetLocation.httpUrl.replace(expectedHttpUrlPrefix, ''), ); - test.done(); - }, - 'addDockerImageAsset correctly sets metadata'(test: Test) { + }); + + test('addDockerImageAsset correctly sets metadata', () => { // GIVEN const stack = new Stack(); @@ -85,22 +84,22 @@ nodeunitShim({ // THEN const assetMetadata = stack.node.metadata.find(({ type }) => type === cxschema.ArtifactMetadataEntryType.ASSET); - test.ok(assetMetadata && assetMetadata.data); + expect(assetMetadata && assetMetadata.data).toBeDefined(); if (assetMetadata && assetMetadata.data) { const data = assetMetadata.data as cxschema.ContainerImageAssetMetadataEntry; - test.equal(data.packaging, 'container-image'); - test.equal(data.path, 'directory-name'); - test.equal(data.sourceHash, 'source-hash'); - test.equal(data.repositoryName, 'repository-name'); - test.equal(data.imageTag, 'source-hash'); + expect(data.packaging).toEqual('container-image'); + expect(data.path).toEqual('directory-name'); + expect(data.sourceHash).toEqual('source-hash'); + expect(data.repositoryName).toEqual('repository-name'); + expect(data.imageTag).toEqual('source-hash'); } - test.deepEqual(toCloudFormation(stack), { }); - test.done(); - }, + expect(toCloudFormation(stack)).toEqual({ }); + + }); - 'addDockerImageAsset uses the default repository name'(test: Test) { + test('addDockerImageAsset uses the default repository name', () => { // GIVEN const stack = new Stack(); @@ -113,22 +112,22 @@ nodeunitShim({ // THEN const assetMetadata = stack.node.metadata.find(({ type }) => type === cxschema.ArtifactMetadataEntryType.ASSET); - test.ok(assetMetadata && assetMetadata.data); + expect(assetMetadata && assetMetadata.data).toBeDefined(); if (assetMetadata && assetMetadata.data) { const data = assetMetadata.data as cxschema.ContainerImageAssetMetadataEntry; - test.equal(data.packaging, 'container-image'); - test.equal(data.path, 'directory-name'); - test.equal(data.sourceHash, 'source-hash'); - test.equal(data.repositoryName, 'aws-cdk/assets'); - test.equal(data.imageTag, 'source-hash'); + expect(data.packaging).toEqual('container-image'); + expect(data.path).toEqual('directory-name'); + expect(data.sourceHash).toEqual('source-hash'); + expect(data.repositoryName).toEqual('aws-cdk/assets'); + expect(data.imageTag).toEqual('source-hash'); } - test.deepEqual(toCloudFormation(stack), { }); - test.done(); - }, + expect(toCloudFormation(stack)).toEqual({ }); - 'addDockerImageAsset supports overriding repository name through a context key as a workaround until we have API for that'(test: Test) { + }); + + test('addDockerImageAsset supports overriding repository name through a context key as a workaround until we have API for that', () => { // GIVEN const stack = new Stack(); stack.node.setContext('assets-ecr-repository-name', 'my-custom-repo-name'); @@ -142,18 +141,18 @@ nodeunitShim({ // THEN const assetMetadata = stack.node.metadata.find(({ type }) => type === cxschema.ArtifactMetadataEntryType.ASSET); - test.ok(assetMetadata && assetMetadata.data); + expect(assetMetadata && assetMetadata.data).toBeDefined(); if (assetMetadata && assetMetadata.data) { const data = assetMetadata.data as cxschema.ContainerImageAssetMetadataEntry; - test.equal(data.packaging, 'container-image'); - test.equal(data.path, 'directory-name'); - test.equal(data.sourceHash, 'source-hash'); - test.equal(data.repositoryName, 'my-custom-repo-name'); - test.equal(data.imageTag, 'source-hash'); + expect(data.packaging).toEqual('container-image'); + expect(data.path).toEqual('directory-name'); + expect(data.sourceHash).toEqual('source-hash'); + expect(data.repositoryName).toEqual('my-custom-repo-name'); + expect(data.imageTag).toEqual('source-hash'); } - test.deepEqual(toCloudFormation(stack), { }); - test.done(); - }, + expect(toCloudFormation(stack)).toEqual({ }); + + }); }); diff --git a/packages/@aws-cdk/core/test/bundling.test.ts b/packages/@aws-cdk/core/test/bundling.test.ts index 48522347e698c..a22968f4d17a0 100644 --- a/packages/@aws-cdk/core/test/bundling.test.ts +++ b/packages/@aws-cdk/core/test/bundling.test.ts @@ -1,17 +1,16 @@ import * as child_process from 'child_process'; import * as crypto from 'crypto'; import * as path from 'path'; -import { nodeunitShim, Test } from 'nodeunit-shim'; import * as sinon from 'sinon'; import { BundlingDockerImage, DockerImage, FileSystem } from '../lib'; -nodeunitShim({ - 'tearDown'(callback: any) { +describe('bundling', () => { + afterEach(() => { sinon.restore(); - callback(); - }, - 'bundling with image from registry'(test: Test) { + }); + + test('bundling with image from registry', () => { sinon.stub(process, 'platform').value('darwin'); const spawnSyncStub = sinon.stub(child_process, 'spawnSync').returns({ status: 0, @@ -34,7 +33,7 @@ nodeunitShim({ user: 'user:group', }); - test.ok(spawnSyncStub.calledWith('docker', [ + expect(spawnSyncStub.calledWith('docker', [ 'run', '--rm', '-u', 'user:group', '-v', '/host-path:/container-path:delegated', @@ -43,11 +42,11 @@ nodeunitShim({ '-w', '/working-directory', 'alpine', 'cool', 'command', - ], { stdio: ['ignore', process.stderr, 'inherit'] })); - test.done(); - }, + ], { stdio: ['ignore', process.stderr, 'inherit'] })).toEqual(true); + + }); - 'bundling with image from asset'(test: Test) { + test('bundling with image from asset', () => { const spawnSyncStub = sinon.stub(child_process, 'spawnSync').returns({ status: 0, stderr: Buffer.from('stderr'), @@ -76,20 +75,20 @@ nodeunitShim({ })).digest('hex'); const tag = `cdk-${tagHash}`; - test.ok(spawnSyncStub.firstCall.calledWith('docker', [ + expect(spawnSyncStub.firstCall.calledWith('docker', [ 'build', '-t', tag, '--build-arg', 'TEST_ARG=cdk-test', 'docker-path', - ])); + ])).toEqual(true); - test.ok(spawnSyncStub.secondCall.calledWith('docker', [ + expect(spawnSyncStub.secondCall.calledWith('docker', [ 'run', '--rm', tag, - ])); - test.done(); - }, + ])).toEqual(true); + + }); - 'bundling with image from asset with platform'(test: Test) { + test('bundling with image from asset with platform', () => { const spawnSyncStub = sinon.stub(child_process, 'spawnSync').returns({ status: 0, stderr: Buffer.from('stderr'), @@ -113,20 +112,20 @@ nodeunitShim({ })).digest('hex'); const tag = `cdk-${tagHash}`; - test.ok(spawnSyncStub.firstCall.calledWith('docker', [ + expect(spawnSyncStub.firstCall.calledWith('docker', [ 'build', '-t', tag, '--platform', platform, 'docker-path', - ])); + ])).toEqual(true); - test.ok(spawnSyncStub.secondCall.calledWith('docker', [ + expect(spawnSyncStub.secondCall.calledWith('docker', [ 'run', '--rm', tag, - ])); - test.done(); - }, + ])).toEqual(true); - 'throws in case of spawnSync error'(test: Test) { + }); + + test('throws in case of spawnSync error', () => { sinon.stub(child_process, 'spawnSync').returns({ status: 0, stderr: Buffer.from('stderr'), @@ -138,11 +137,11 @@ nodeunitShim({ }); const image = BundlingDockerImage.fromRegistry('alpine'); - test.throws(() => image.run(), /UnknownError/); - test.done(); - }, + expect(() => image.run()).toThrow(/UnknownError/); + + }); - 'throws if status is not 0'(test: Test) { + test('throws if status is not 0', () => { sinon.stub(child_process, 'spawnSync').returns({ status: -1, stderr: Buffer.from('stderr'), @@ -153,18 +152,18 @@ nodeunitShim({ }); const image = BundlingDockerImage.fromRegistry('alpine'); - test.throws(() => image.run(), /\[Status -1\]/); - test.done(); - }, + expect(() => image.run()).toThrow(/\[Status -1\]/); - 'BundlerDockerImage json is the bundler image name by default'(test: Test) { + }); + + test('BundlerDockerImage json is the bundler image name by default', () => { const image = BundlingDockerImage.fromRegistry('alpine'); - test.equals(image.toJSON(), 'alpine'); - test.done(); - }, + expect(image.toJSON()).toEqual('alpine'); + + }); - 'BundlerDockerImage json is the bundler image if building an image'(test: Test) { + test('BundlerDockerImage json is the bundler image if building an image', () => { sinon.stub(child_process, 'spawnSync').returns({ status: 0, stderr: Buffer.from('stderr'), @@ -183,13 +182,13 @@ nodeunitShim({ path: 'docker-path', })).digest('hex'); - test.equals(image.image, `cdk-${tagHash}`); - test.equals(image.toJSON(), imageHash); - test.ok(fingerprintStub.calledWith('docker-path', sinon.match({ extraHash: JSON.stringify({}) }))); - test.done(); - }, + expect(image.image).toEqual(`cdk-${tagHash}`); + expect(image.toJSON()).toEqual(imageHash); + expect(fingerprintStub.calledWith('docker-path', sinon.match({ extraHash: JSON.stringify({}) }))).toEqual(true); - 'custom dockerfile is passed through to docker exec'(test: Test) { + }); + + test('custom dockerfile is passed through to docker exec', () => { const spawnSyncStub = sinon.stub(child_process, 'spawnSync').returns({ status: 0, stderr: Buffer.from('stderr'), @@ -204,14 +203,14 @@ nodeunitShim({ file: 'my-dockerfile', }); - test.ok(spawnSyncStub.calledOnce); + expect(spawnSyncStub.calledOnce).toEqual(true); const expected = path.join(imagePath, 'my-dockerfile'); - test.ok(new RegExp(`-f ${expected}`).test(spawnSyncStub.firstCall.args[1]?.join(' ') ?? '')); + expect(new RegExp(`-f ${expected}`).test(spawnSyncStub.firstCall.args[1]?.join(' ') ?? '')).toEqual(true); + - test.done(); - }, + }); - 'fromAsset'(test: Test) { + test('fromAsset', () => { sinon.stub(child_process, 'spawnSync').returns({ status: 0, stderr: Buffer.from('stderr'), @@ -225,12 +224,12 @@ nodeunitShim({ const image = BundlingDockerImage.fromAsset(imagePath, { file: 'my-dockerfile', }); - test.ok(image); - test.ok(image.image); - test.done(); - }, + expect(image).toBeDefined(); + expect(image.image).toBeDefined(); - 'custom entrypoint is passed through to docker exec'(test: Test) { + }); + + test('custom entrypoint is passed through to docker exec', () => { sinon.stub(process, 'platform').value('darwin'); const spawnSyncStub = sinon.stub(child_process, 'spawnSync').returns({ status: 0, @@ -254,7 +253,7 @@ nodeunitShim({ user: 'user:group', }); - test.ok(spawnSyncStub.calledWith('docker', [ + expect(spawnSyncStub.calledWith('docker', [ 'run', '--rm', '-u', 'user:group', '-v', '/host-path:/container-path:delegated', @@ -265,11 +264,11 @@ nodeunitShim({ 'alpine', '--cool-entrypoint-arg', 'cool', 'command', - ], { stdio: ['ignore', process.stderr, 'inherit'] })); - test.done(); - }, + ], { stdio: ['ignore', process.stderr, 'inherit'] })).toEqual(true); + + }); - 'cp utility copies from an image'(test: Test) { + test('cp utility copies from an image', () => { // GIVEN const containerId = '1234567890abcdef1234567890abcdef'; const spawnSyncStub = sinon.stub(child_process, 'spawnSync').returns({ @@ -285,14 +284,14 @@ nodeunitShim({ BundlingDockerImage.fromRegistry('alpine').cp('/foo/bar', '/baz'); // THEN - test.ok(spawnSyncStub.calledWith(sinon.match.any, ['create', 'alpine'], sinon.match.any)); - test.ok(spawnSyncStub.calledWith(sinon.match.any, ['cp', `${containerId}:/foo/bar`, '/baz'], sinon.match.any)); - test.ok(spawnSyncStub.calledWith(sinon.match.any, ['rm', '-v', containerId])); + expect(spawnSyncStub.calledWith(sinon.match.any, ['create', 'alpine'], sinon.match.any)).toEqual(true); + expect(spawnSyncStub.calledWith(sinon.match.any, ['cp', `${containerId}:/foo/bar`, '/baz'], sinon.match.any)).toEqual(true); + expect(spawnSyncStub.calledWith(sinon.match.any, ['rm', '-v', containerId])).toEqual(true); + - test.done(); - }, + }); - 'cp utility cleans up after itself'(test: Test) { + test('cp utility cleans up after itself', () => { // GIVEN const containerId = '1234567890abcdef1234567890abcdef'; const spawnSyncStub = sinon.stub(child_process, 'spawnSync').returns({ @@ -315,16 +314,16 @@ nodeunitShim({ }); // WHEN - test.throws(() => { + expect(() => { BundlingDockerImage.fromRegistry('alpine').cp('/foo/bar', '/baz'); - }, /Failed.*copy/i); + }).toThrow(/Failed.*copy/i); // THEN - test.ok(spawnSyncStub.calledWith(sinon.match.any, ['rm', '-v', containerId])); - test.done(); - }, + expect(spawnSyncStub.calledWith(sinon.match.any, ['rm', '-v', containerId])).toEqual(true); - 'cp utility copies to a temp dir of outputPath is omitted'(test: Test) { + }); + + test('cp utility copies to a temp dir of outputPath is omitted', () => { // GIVEN const containerId = '1234567890abcdef1234567890abcdef'; sinon.stub(child_process, 'spawnSync').returns({ @@ -340,12 +339,12 @@ nodeunitShim({ const tempPath = DockerImage.fromRegistry('alpine').cp('/foo/bar'); // THEN - test.ok(/cdk-docker-cp-/.test(tempPath)); + expect(/cdk-docker-cp-/.test(tempPath)).toEqual(true); + - test.done(); - }, + }); - 'adding user provided security-opt'(test: Test) { + test('adding user provided security-opt', () => { // GIVEN sinon.stub(process, 'platform').value('darwin'); const spawnSyncStub = sinon.stub(child_process, 'spawnSync').returns({ @@ -371,7 +370,7 @@ nodeunitShim({ user: 'user:group', }); - test.ok(spawnSyncStub.calledWith('docker', [ + expect(spawnSyncStub.calledWith('docker', [ 'run', '--rm', '--security-opt', 'no-new-privileges', '-u', 'user:group', @@ -381,11 +380,11 @@ nodeunitShim({ '-w', '/working-directory', 'alpine', 'cool', 'command', - ], { stdio: ['ignore', process.stderr, 'inherit'] })); - test.done(); - }, + ], { stdio: ['ignore', process.stderr, 'inherit'] })).toEqual(true); - 'ensure selinux docker mount'(test: Test) { + }); + + test('ensure selinux docker mount', () => { // GIVEN sinon.stub(process, 'platform').value('linux'); const spawnSyncStub = sinon.stub(child_process, 'spawnSync'); @@ -416,18 +415,18 @@ nodeunitShim({ }); // THEN - test.ok(spawnSyncStub.secondCall.calledWith('docker', [ + expect(spawnSyncStub.secondCall.calledWith('docker', [ 'run', '--rm', '-u', 'user:group', '-v', '/host-path:/container-path:z,delegated', '-w', '/working-directory', 'alpine', 'cool', 'command', - ], { stdio: ['ignore', process.stderr, 'inherit'] })); - test.done(); - }, + ], { stdio: ['ignore', process.stderr, 'inherit'] })).toEqual(true); + + }); - 'ensure selinux docker mount on linux with selinux disabled'(test: Test) { + test('ensure selinux docker mount on linux with selinux disabled', () => { // GIVEN sinon.stub(process, 'platform').value('linux'); const spawnSyncStub = sinon.stub(child_process, 'spawnSync'); @@ -458,17 +457,17 @@ nodeunitShim({ }); // THEN - test.ok(spawnSyncStub.secondCall.calledWith('docker', [ + expect(spawnSyncStub.secondCall.calledWith('docker', [ 'run', '--rm', '-u', 'user:group', '-v', '/host-path:/container-path:delegated', '-w', '/working-directory', 'alpine', 'cool', 'command', - ], { stdio: ['ignore', process.stderr, 'inherit'] })); - test.done(); - }, - 'ensure no selinux docker mount if selinuxenabled isn\'t an available command'(test: Test) { + ], { stdio: ['ignore', process.stderr, 'inherit'] })).toEqual(true); + + }); + test('ensure no selinux docker mount if selinuxenabled isn\'t an available command', () => { // GIVEN sinon.stub(process, 'platform').value('linux'); const spawnSyncStub = sinon.stub(child_process, 'spawnSync'); @@ -499,14 +498,14 @@ nodeunitShim({ }); // THEN - test.ok(spawnSyncStub.secondCall.calledWith('docker', [ + expect(spawnSyncStub.secondCall.calledWith('docker', [ 'run', '--rm', '-u', 'user:group', '-v', '/host-path:/container-path:delegated', '-w', '/working-directory', 'alpine', 'cool', 'command', - ], { stdio: ['ignore', process.stderr, 'inherit'] })); - test.done(); - }, + ], { stdio: ['ignore', process.stderr, 'inherit'] })).toEqual(true); + + }); }); diff --git a/packages/@aws-cdk/core/test/cfn-json.test.ts b/packages/@aws-cdk/core/test/cfn-json.test.ts index 150d1971cc39f..ff072b9c2c6b1 100644 --- a/packages/@aws-cdk/core/test/cfn-json.test.ts +++ b/packages/@aws-cdk/core/test/cfn-json.test.ts @@ -1,12 +1,11 @@ -import { nodeunitShim, Test } from 'nodeunit-shim'; import { App, CfnResource, Lazy, Stack } from '../lib'; import { CfnJson } from '../lib/cfn-json'; import { CfnUtilsResourceType } from '../lib/private/cfn-utils-provider/consts'; import { handler } from '../lib/private/cfn-utils-provider/index'; -nodeunitShim({ +describe('cfn json', () => { - 'resolves to a fn::getatt'(test: Test) { + test('resolves to a fn::getatt', () => { // GIVEN const app = new App(); const stack = new Stack(app, 'test'); @@ -23,15 +22,15 @@ nodeunitShim({ const template = app.synth().getStackArtifact(stack.artifactId).template; // input is stringified - test.deepEqual(template.Resources.MyCfnJson248769BB.Properties.Value, '{"hello":1234,"world":{"bar":1234}}'); + expect(template.Resources.MyCfnJson248769BB.Properties.Value).toEqual('{"hello":1234,"world":{"bar":1234}}'); // output is basically an Fn::GetAtt - test.deepEqual(stack.resolve(json), { 'Fn::GetAtt': ['MyCfnJson248769BB', 'Value'] }); + expect(stack.resolve(json)).toEqual({ 'Fn::GetAtt': ['MyCfnJson248769BB', 'Value'] }); - test.done(); - }, - 'tokens and intrinsics can be used freely in keys or values'(test: Test) { + }); + + test('tokens and intrinsics can be used freely in keys or values', () => { // GIVEN const app = new App(); const stack = new Stack(app, 'test'); @@ -50,13 +49,13 @@ nodeunitShim({ // THEN const template = app.synth().getStackArtifact(stack.artifactId).template; - test.deepEqual(template.Resources.MyCfnJson248769BB.Properties.Value, { + expect(template.Resources.MyCfnJson248769BB.Properties.Value).toEqual({ 'Fn::Join': ['', ['{"', { Ref: 'Other' }, '":1234,"world":{"bar":"this is a I am lazy"}}']], }); - test.done(); - }, - 'JSON.stringify() will return the CFN-stringified value to avoid circular references'(test: Test) { + }); + + test('JSON.stringify() will return the CFN-stringified value to avoid circular references', () => { // GIVEN const stack = new Stack(); const res = new CfnResource(stack, 'MyResource', { type: 'Foo' }); @@ -70,15 +69,15 @@ nodeunitShim({ const str = JSON.stringify(cfnjson); // THEN - test.ok(typeof(str) === 'string'); - test.deepEqual(stack.resolve(str), { + expect(typeof(str)).toEqual('string'); + expect(stack.resolve(str)).toEqual({ 'Fn::Join': ['', ['"{"ref=', { Ref: 'MyResource' }, '":"this is a I am lazy"}"']], }); - test.done(); - }, - async 'resource provider simply parses json and reflects back as an attribute'(test: Test) { + }); + + test('resource provider simply parses json and reflects back as an attribute', async () => { const input = { foo: 1234 }; const response = await handler({ ResourceType: CfnUtilsResourceType.CFN_JSON, @@ -86,7 +85,7 @@ nodeunitShim({ Value: JSON.stringify(input), }, } as any); - test.deepEqual(input, response.Data.Value); - test.done(); - }, + expect(input).toEqual(response.Data.Value); + + }); }); diff --git a/packages/@aws-cdk/core/test/cfn-parameter.test.ts b/packages/@aws-cdk/core/test/cfn-parameter.test.ts index 7441e4548cf98..a93b83f973522 100644 --- a/packages/@aws-cdk/core/test/cfn-parameter.test.ts +++ b/packages/@aws-cdk/core/test/cfn-parameter.test.ts @@ -1,8 +1,7 @@ -import { nodeunitShim, Test } from 'nodeunit-shim'; import { CfnParameter, Stack } from '../lib'; -nodeunitShim({ - 'valueAsString supports both string and number types'(test: Test) { +describe('cfn parameter', () => { + test('valueAsString supports both string and number types', () => { // GIVEN const stack = new Stack(); const numberParam = new CfnParameter(stack, 'numberParam', { type: 'Number', default: 10 }); @@ -13,20 +12,20 @@ nodeunitShim({ const strVal = stringParam.valueAsString; // THEN - test.deepEqual(stack.resolve(numVal), { Ref: 'numberParam' }); - test.deepEqual(stack.resolve(strVal), { Ref: 'stringParam' }); + expect(stack.resolve(numVal)).toEqual({ Ref: 'numberParam' }); + expect(stack.resolve(strVal)).toEqual({ Ref: 'stringParam' }); - test.done(); - }, - 'valueAsString fails for unsupported types'(test: Test) { + }); + + test('valueAsString fails for unsupported types', () => { // GIVEN const stack = new Stack(); const listParam = new CfnParameter(stack, 'listParam', { type: 'List', default: 10 }); // WHEN - THEN - test.throws(() => listParam.valueAsList, /Parameter type \(List\)/); + expect(() => listParam.valueAsList).toThrow(/Parameter type \(List\)/); + - test.done(); - }, + }); }); diff --git a/packages/@aws-cdk/core/test/cfn-resource.test.ts b/packages/@aws-cdk/core/test/cfn-resource.test.ts index c625d477179b6..83f9f634edb48 100644 --- a/packages/@aws-cdk/core/test/cfn-resource.test.ts +++ b/packages/@aws-cdk/core/test/cfn-resource.test.ts @@ -1,9 +1,8 @@ -import { nodeunitShim, Test } from 'nodeunit-shim'; import * as core from '../lib'; -nodeunitShim({ - '._toCloudFormation': { - 'does not call renderProperties with an undefined value'(test: Test) { +describe('cfn resource', () => { + describe('._toCloudFormation', () => { + test('does not call renderProperties with an undefined value', () => { const app = new core.App(); const stack = new core.Stack(app, 'TestStack'); const resource = new core.CfnResource(stack, 'DefaultResource', { type: 'Test::Resource::Fake' }); @@ -11,22 +10,22 @@ nodeunitShim({ let called = false; (resource as any).renderProperties = (val: any) => { called = true; - test.notEqual(val, null); + expect(val).not.toBeNull(); }; - test.deepEqual(app.synth().getStackByName(stack.stackName).template, { + expect(app.synth().getStackByName(stack.stackName).template).toEqual({ Resources: { DefaultResource: { Type: 'Test::Resource::Fake', }, }, }); - test.ok(called, 'renderProperties must be called called'); + expect(called).toEqual(true); - test.done(); - }, - 'renders "Properties" for a resource that has only properties set to "false"'(test: Test) { + }); + + test('renders "Properties" for a resource that has only properties set to "false"', () => { const app = new core.App(); const stack = new core.Stack(app, 'TestStack'); new core.CfnResource(stack, 'Resource', { @@ -36,7 +35,7 @@ nodeunitShim({ }, }); - test.deepEqual(app.synth().getStackByName(stack.stackName).template, { + expect(app.synth().getStackByName(stack.stackName).template).toEqual({ Resources: { Resource: { Type: 'Test::Resource::Fake', @@ -47,11 +46,11 @@ nodeunitShim({ }, }); - test.done(); - }, - }, - 'applyRemovalPolicy default includes Update policy'(test: Test) { + }); + }); + + test('applyRemovalPolicy default includes Update policy', () => { // GIVEN const app = new core.App(); const stack = new core.Stack(app, 'TestStack'); @@ -61,7 +60,7 @@ nodeunitShim({ resource.applyRemovalPolicy(core.RemovalPolicy.RETAIN); // THEN - test.deepEqual(app.synth().getStackByName(stack.stackName).template, { + expect(app.synth().getStackByName(stack.stackName).template).toEqual({ Resources: { DefaultResource: { Type: 'Test::Resource::Fake', @@ -71,10 +70,10 @@ nodeunitShim({ }, }); - test.done(); - }, - 'can switch off updating Update policy'(test: Test) { + }); + + test('can switch off updating Update policy', () => { // GIVEN const app = new core.App(); const stack = new core.Stack(app, 'TestStack'); @@ -86,7 +85,7 @@ nodeunitShim({ }); // THEN - test.deepEqual(app.synth().getStackByName(stack.stackName).template, { + expect(app.synth().getStackByName(stack.stackName).template).toEqual({ Resources: { DefaultResource: { Type: 'Test::Resource::Fake', @@ -95,10 +94,10 @@ nodeunitShim({ }, }); - test.done(); - }, - 'can add metadata'(test: Test) { + }); + + test('can add metadata', () => { // GIVEN const app = new core.App(); const stack = new core.Stack(app, 'TestStack'); @@ -108,7 +107,7 @@ nodeunitShim({ resource.addMetadata('Beep', 'Boop'); // THEN - test.deepEqual(app.synth().getStackByName(stack.stackName).template, { + expect(app.synth().getStackByName(stack.stackName).template).toEqual({ Resources: { DefaultResource: { Type: 'Test::Resource::Fake', @@ -119,10 +118,10 @@ nodeunitShim({ }, }); - test.done(); - }, - 'can read metadata'(test: Test) { + }); + + test('can read metadata', () => { // GIVEN const app = new core.App(); const stack = new core.Stack(app, 'TestStack'); @@ -132,10 +131,10 @@ nodeunitShim({ // THEN expect(resource.getMetadata('Beep')).toEqual('Boop'); - test.done(); - }, - 'subclasses can override "shouldSynthesize" to lazy-determine if the resource should be included'(test: Test) { + }); + + test('subclasses can override "shouldSynthesize" to lazy-determine if the resource should be included', () => { // GIVEN class HiddenCfnResource extends core.CfnResource { protected shouldSynthesize() { @@ -155,41 +154,39 @@ nodeunitShim({ r2.node.addDependency(subtree); // THEN - only R2 is synthesized - test.deepEqual(app.synth().getStackByName(stack.stackName).template, { + expect(app.synth().getStackByName(stack.stackName).template).toEqual({ Resources: { R2: { Type: 'Foo::R2' } }, // No DependsOn! }); - test.done(); - }, - 'CfnResource cannot be created outside Stack'(test: Test) { + }); + + test('CfnResource cannot be created outside Stack', () => { const app = new core.App(); - test.throws(() => { + expect(() => { new core.CfnResource(app, 'Resource', { type: 'Some::Resource', }); - }, /should be created in the scope of a Stack, but no Stack found/); + }).toThrow(/should be created in the scope of a Stack, but no Stack found/); - test.done(); - }, + }); /** * Stages start a new scope, which does not count as a Stack anymore */ - 'CfnResource cannot be in Stage in Stack'(test: Test) { + test('CfnResource cannot be in Stage in Stack', () => { const app = new core.App(); const stack = new core.Stack(app, 'Stack'); const stage = new core.Stage(stack, 'Stage'); - test.throws(() => { + expect(() => { new core.CfnResource(stage, 'Resource', { type: 'Some::Resource', }); - }, /should be created in the scope of a Stack, but no Stack found/); + }).toThrow(/should be created in the scope of a Stack, but no Stack found/); - test.done(); - }, + }); }); diff --git a/packages/@aws-cdk/core/test/condition.test.ts b/packages/@aws-cdk/core/test/condition.test.ts index 8c43f9eb1507c..54b5a90fea515 100644 --- a/packages/@aws-cdk/core/test/condition.test.ts +++ b/packages/@aws-cdk/core/test/condition.test.ts @@ -1,9 +1,8 @@ -import { nodeunitShim, Test } from 'nodeunit-shim'; import * as cdk from '../lib'; import { toCloudFormation } from './util'; -nodeunitShim({ - 'chain conditions'(test: Test) { +describe('condition', () => { + test('chain conditions', () => { // GIVEN const stack = new cdk.Stack(); const param = new cdk.CfnParameter(stack, 'Param1', { type: 'String' }); @@ -17,7 +16,7 @@ nodeunitShim({ }); // THEN - test.deepEqual(toCloudFormation(stack), { + expect(toCloudFormation(stack)).toEqual({ Parameters: { Param1: { Type: 'String' } }, Conditions: { Condition1: { 'Fn::Equals': ['a', 'b'] }, @@ -33,10 +32,10 @@ nodeunitShim({ }, }); - test.done(); - }, - 'condition expressions can be embedded as strings'(test: Test) { + }); + + test('condition expressions can be embedded as strings', () => { // GIVEN const stack = new cdk.Stack(); const propValue: string = cdk.Fn.conditionIf('Cond', 'A', 'B').toString(); @@ -50,8 +49,8 @@ nodeunitShim({ }); // THEN - test.ok(cdk.Token.isUnresolved(propValue)); - test.deepEqual(toCloudFormation(stack), { + expect(cdk.Token.isUnresolved(propValue)).toEqual(true); + expect(toCloudFormation(stack)).toEqual({ Resources: { MyResource: { Type: 'AWS::Foo::Bar', @@ -61,6 +60,6 @@ nodeunitShim({ }, }, }); - test.done(); - }, + + }); }); diff --git a/packages/@aws-cdk/core/test/construct.test.ts b/packages/@aws-cdk/core/test/construct.test.ts index a190ebaac7327..e8b7c5f00435d 100644 --- a/packages/@aws-cdk/core/test/construct.test.ts +++ b/packages/@aws-cdk/core/test/construct.test.ts @@ -1,40 +1,39 @@ import * as cxschema from '@aws-cdk/cloud-assembly-schema'; -import { nodeunitShim, Test } from 'nodeunit-shim'; import { App as Root, Aws, Construct, ConstructNode, ConstructOrder, IConstruct, Lazy, ValidationError } from '../lib'; import { Annotations } from '../lib/annotations'; import { reEnableStackTraceCollection, restoreStackTraceColection } from './util'; /* eslint-disable @typescript-eslint/naming-convention */ -nodeunitShim({ - 'the "Root" construct is a special construct which can be used as the root of the tree'(test: Test) { +describe('construct', () => { + test('the "Root" construct is a special construct which can be used as the root of the tree', () => { const root = new Root(); - test.equal(root.node.id, '', 'if not specified, name of a root construct is an empty string'); - test.ok(!root.node.scope, 'no parent'); - test.equal(root.node.children.length, 1); - test.done(); - }, + expect(root.node.id).toEqual(''); + expect(root.node.scope).toBeUndefined(); + expect(root.node.children.length).toEqual(1); - 'constructs cannot be created with an empty name unless they are root'(test: Test) { + }); + + test('constructs cannot be created with an empty name unless they are root', () => { const root = new Root(); - test.throws(() => new Construct(root, '')); - test.done(); - }, + expect(() => new Construct(root, '')).toThrow(); + + }); - 'construct.name returns the name of the construct'(test: Test) { + test('construct.name returns the name of the construct', () => { const t = createTree(); - test.equal(t.child1.node.id, 'Child1'); - test.equal(t.child2.node.id, 'Child2'); - test.equal(t.child1_1.node.id, 'Child11'); - test.equal(t.child1_2.node.id, 'Child12'); - test.equal(t.child1_1_1.node.id, 'Child111'); - test.equal(t.child2_1.node.id, 'Child21'); + expect(t.child1.node.id).toEqual('Child1'); + expect(t.child2.node.id).toEqual('Child2'); + expect(t.child1_1.node.id).toEqual('Child11'); + expect(t.child1_2.node.id).toEqual('Child12'); + expect(t.child1_1_1.node.id).toEqual('Child111'); + expect(t.child2_1.node.id).toEqual('Child21'); + - test.done(); - }, + }); - 'construct id can use any character except the path separator'(test: Test) { + test('construct id can use any character except the path separator', () => { const root = new Root(); new Construct(root, 'valid'); new Construct(root, 'ValiD'); @@ -48,33 +47,33 @@ nodeunitShim({ new Construct(root, 'in-Valid' ); new Construct(root, 'in\\Valid' ); new Construct(root, 'in.Valid' ); - test.done(); - }, - 'if construct id contains path seperators, they will be replaced by double-dash'(test: Test) { + }); + + test('if construct id contains path seperators, they will be replaced by double-dash', () => { const root = new Root(); const c = new Construct(root, 'Boom/Boom/Bam'); - test.deepEqual(c.node.id, 'Boom--Boom--Bam'); - test.done(); - }, + expect(c.node.id).toEqual('Boom--Boom--Bam'); + + }); - 'if "undefined" is forcefully used as an "id", it will be treated as an empty string'(test: Test) { + test('if "undefined" is forcefully used as an "id", it will be treated as an empty string', () => { const c = new Construct(undefined as any, undefined as any); - test.deepEqual(c.node.id, ''); - test.done(); - }, + expect(c.node.id).toEqual(''); - 'dont allow unresolved tokens to be used in construct IDs'(test: Test) { + }); + + test('dont allow unresolved tokens to be used in construct IDs', () => { // GIVEN const root = new Root(); const token = Lazy.string({ produce: () => 'lazy' }); // WHEN + THEN - test.throws(() => new Construct(root, `MyID: ${token}`), /Cannot use tokens in construct ID: MyID: \${Token/); - test.done(); - }, + expect(() => new Construct(root, `MyID: ${token}`)).toThrow(/Cannot use tokens in construct ID: MyID: \${Token/); + + }); - 'construct.uniqueId returns a tree-unique alphanumeric id of this construct'(test: Test) { + test('construct.uniqueId returns a tree-unique alphanumeric id of this construct', () => { const root = new Root(); const child1 = new Construct(root, 'This is the first child'); @@ -82,85 +81,85 @@ nodeunitShim({ const c1 = new Construct(child2, 'My construct'); const c2 = new Construct(child1, 'My construct'); - test.deepEqual(c1.node.path, 'This is the first child/Second level/My construct'); - test.deepEqual(c2.node.path, 'This is the first child/My construct'); - test.deepEqual(c1.node.uniqueId, 'ThisisthefirstchildSecondlevelMyconstruct202131E0'); - test.deepEqual(c2.node.uniqueId, 'ThisisthefirstchildMyconstruct8C288DF9'); - test.done(); - }, + expect(c1.node.path).toEqual('This is the first child/Second level/My construct'); + expect(c2.node.path).toEqual('This is the first child/My construct'); + expect(c1.node.uniqueId).toEqual('ThisisthefirstchildSecondlevelMyconstruct202131E0'); + expect(c2.node.uniqueId).toEqual('ThisisthefirstchildMyconstruct8C288DF9'); + + }); - 'cannot calculate uniqueId if the construct path is ["Default"]'(test: Test) { + test('cannot calculate uniqueId if the construct path is ["Default"]', () => { const root = new Root(); const c = new Construct(root, 'Default'); - test.throws(() => c.node.uniqueId, /Unable to calculate a unique id for an empty set of components/); - test.done(); - }, + expect(() => c.node.uniqueId).toThrow(/Unable to calculate a unique id for an empty set of components/); - 'construct.getChildren() returns an array of all children'(test: Test) { + }); + + test('construct.getChildren() returns an array of all children', () => { const root = new Root(); const child = new Construct(root, 'Child1'); new Construct(root, 'Child2'); - test.equal(child.node.children.length, 0, 'no children'); - test.equal(root.node.children.length, 3, 'three children are expected'); - test.done(); - }, + expect(child.node.children.length).toEqual(0); + expect(root.node.children.length).toEqual(3); + + }); - 'construct.findChild(name) can be used to retrieve a child from a parent'(test: Test) { + test('construct.findChild(name) can be used to retrieve a child from a parent', () => { const root = new Root(); const child = new Construct(root, 'Contruct'); - test.strictEqual(root.node.tryFindChild(child.node.id), child, 'findChild(name) can be used to retrieve the child from a parent'); - test.ok(!root.node.tryFindChild('NotFound'), 'findChild(name) returns undefined if the child is not found'); - test.done(); - }, + expect(root.node.tryFindChild(child.node.id)).toEqual(child); + expect(root.node.tryFindChild('NotFound')).toBeUndefined(); - 'construct.getChild(name) can be used to retrieve a child from a parent'(test: Test) { + }); + + test('construct.getChild(name) can be used to retrieve a child from a parent', () => { const root = new Root(); const child = new Construct(root, 'Contruct'); - test.strictEqual(root.node.findChild(child.node.id), child, 'getChild(name) can be used to retrieve the child from a parent'); - test.throws(() => { + expect(root.node.findChild(child.node.id)).toEqual(child); + expect(() => { root.node.findChild('NotFound'); - }, '', 'getChild(name) returns undefined if the child is not found'); - test.done(); - }, + }).toThrow(); + + }); - 'can remove children from the tree using tryRemoveChild()'(test: Test) { + test('can remove children from the tree using tryRemoveChild()', () => { const root = new Root(); const childrenBeforeAdding = root.node.children.length; // Invariant to adding 'Metadata' resource or not // Add & remove const child = new Construct(root, 'Construct'); - test.equals(true, root.node.tryRemoveChild(child.node.id)); - test.equals(false, root.node.tryRemoveChild(child.node.id)); // Second time does nothing + expect(true).toEqual(root.node.tryRemoveChild(child.node.id)); + expect(false).toEqual(root.node.tryRemoveChild(child.node.id)); // Second time does nothing + + expect(undefined).toEqual(root.node.tryFindChild(child.node.id)); + expect(childrenBeforeAdding).toEqual(root.node.children.length); - test.equals(undefined, root.node.tryFindChild(child.node.id)); - test.equals(childrenBeforeAdding, root.node.children.length); - test.done(); - }, + }); - 'construct.toString() and construct.toTreeString() can be used for diagnostics'(test: Test) { + test('construct.toString() and construct.toTreeString() can be used for diagnostics', () => { const t = createTree(); - test.equal(t.root.toString(), ''); - test.equal(t.child1_1_1.toString(), 'HighChild/Child1/Child11/Child111'); - test.equal(t.child2.toString(), 'HighChild/Child2'); - test.equal(toTreeString(t.root), 'App\n TreeMetadata [Tree]\n Construct [HighChild]\n Construct [Child1]\n Construct [Child11]\n Construct [Child111]\n Construct [Child12]\n Construct [Child2]\n Construct [Child21]\n'); - test.done(); - }, + expect(t.root.toString()).toEqual(''); + expect(t.child1_1_1.toString()).toEqual('HighChild/Child1/Child11/Child111'); + expect(t.child2.toString()).toEqual('HighChild/Child2'); + expect(toTreeString(t.root)).toEqual('App\n TreeMetadata [Tree]\n Construct [HighChild]\n Construct [Child1]\n Construct [Child11]\n Construct [Child111]\n Construct [Child12]\n Construct [Child2]\n Construct [Child21]\n'); - 'construct.getContext(key) can be used to read a value from context defined at the root level'(test: Test) { + }); + + test('construct.getContext(key) can be used to read a value from context defined at the root level', () => { const context = { ctx1: 12, ctx2: 'hello', }; const t = createTree(context); - test.equal(t.child1_2.node.tryGetContext('ctx1'), 12); - test.equal(t.child1_1_1.node.tryGetContext('ctx2'), 'hello'); - test.done(); - }, + expect(t.child1_2.node.tryGetContext('ctx1')).toEqual(12); + expect(t.child1_1_1.node.tryGetContext('ctx2')).toEqual('hello'); + + }); // eslint-disable-next-line max-len - 'construct.setContext(k,v) sets context at some level and construct.getContext(key) will return the lowermost value defined in the stack'(test: Test) { + test('construct.setContext(k,v) sets context at some level and construct.getContext(key) will return the lowermost value defined in the stack', () => { const root = new Root(); const highChild = new Construct(root, 'highChild'); highChild.node.setContext('c1', 'root'); @@ -175,98 +174,98 @@ nodeunitShim({ child3.node.setContext('c1', 'child3'); child3.node.setContext('c4', 'child3'); - test.equal(highChild.node.tryGetContext('c1'), 'root'); - test.equal(highChild.node.tryGetContext('c2'), 'root'); - test.equal(highChild.node.tryGetContext('c3'), undefined); + expect(highChild.node.tryGetContext('c1')).toEqual('root'); + expect(highChild.node.tryGetContext('c2')).toEqual('root'); + expect(highChild.node.tryGetContext('c3')).toEqual(undefined); - test.equal(child1.node.tryGetContext('c1'), 'root'); - test.equal(child1.node.tryGetContext('c2'), 'child1'); - test.equal(child1.node.tryGetContext('c3'), 'child1'); + expect(child1.node.tryGetContext('c1')).toEqual('root'); + expect(child1.node.tryGetContext('c2')).toEqual('child1'); + expect(child1.node.tryGetContext('c3')).toEqual('child1'); - test.equal(child2.node.tryGetContext('c1'), 'root'); - test.equal(child2.node.tryGetContext('c2'), 'root'); - test.equal(child2.node.tryGetContext('c3'), undefined); + expect(child2.node.tryGetContext('c1')).toEqual('root'); + expect(child2.node.tryGetContext('c2')).toEqual('root'); + expect(child2.node.tryGetContext('c3')).toEqual(undefined); - test.equal(child3.node.tryGetContext('c1'), 'child3'); - test.equal(child3.node.tryGetContext('c2'), 'child1'); - test.equal(child3.node.tryGetContext('c3'), 'child1'); - test.equal(child3.node.tryGetContext('c4'), 'child3'); + expect(child3.node.tryGetContext('c1')).toEqual('child3'); + expect(child3.node.tryGetContext('c2')).toEqual('child1'); + expect(child3.node.tryGetContext('c3')).toEqual('child1'); + expect(child3.node.tryGetContext('c4')).toEqual('child3'); - test.done(); - }, - 'construct.setContext(key, value) can only be called before adding any children'(test: Test) { + }); + + test('construct.setContext(key, value) can only be called before adding any children', () => { const root = new Root(); new Construct(root, 'child1'); - test.throws(() => root.node.setContext('k', 'v')); - test.done(); - }, + expect(() => root.node.setContext('k', 'v')); + + }); - 'fails if context key contains unresolved tokens'(test: Test) { + test('fails if context key contains unresolved tokens', () => { const root = new Root(); - test.throws(() => root.node.setContext(`my-${Aws.REGION}`, 'foo'), /Invalid context key/); - test.throws(() => root.node.tryGetContext(Aws.REGION), /Invalid context key/); - test.done(); - }, + expect(() => root.node.setContext(`my-${Aws.REGION}`, 'foo')).toThrow(/Invalid context key/); + expect(() => root.node.tryGetContext(Aws.REGION)).toThrow(/Invalid context key/); - 'construct.pathParts returns an array of strings of all names from root to node'(test: Test) { + }); + + test('construct.pathParts returns an array of strings of all names from root to node', () => { const tree = createTree(); - test.deepEqual(tree.root.node.path, ''); - test.deepEqual(tree.child1_1_1.node.path, 'HighChild/Child1/Child11/Child111'); - test.deepEqual(tree.child2.node.path, 'HighChild/Child2'); - test.done(); - }, + expect(tree.root.node.path).toEqual(''); + expect(tree.child1_1_1.node.path).toEqual('HighChild/Child1/Child11/Child111'); + expect(tree.child2.node.path).toEqual('HighChild/Child2'); + + }); - 'if a root construct has a name, it should be included in the path'(test: Test) { + test('if a root construct has a name, it should be included in the path', () => { const tree = createTree({}); - test.deepEqual(tree.root.node.path, ''); - test.deepEqual(tree.child1_1_1.node.path, 'HighChild/Child1/Child11/Child111'); - test.done(); - }, + expect(tree.root.node.path).toEqual(''); + expect(tree.child1_1_1.node.path).toEqual('HighChild/Child1/Child11/Child111'); - 'construct can not be created with the name of a sibling'(test: Test) { + }); + + test('construct can not be created with the name of a sibling', () => { const root = new Root(); // WHEN new Construct(root, 'SameName'); // THEN: They have different paths - test.throws(() => { + expect(() => { new Construct(root, 'SameName'); - }, /There is already a Construct with name 'SameName' in App/); + }).toThrow(/There is already a Construct with name 'SameName' in App/); // WHEN const c0 = new Construct(root, 'c0'); new Construct(c0, 'SameName'); // THEN: They have different paths - test.throws(() => { + expect(() => { new Construct(c0, 'SameName'); - }, /There is already a Construct with name 'SameName' in Construct \[c0\]/); + }).toThrow(/There is already a Construct with name 'SameName' in Construct \[c0\]/); + - test.done(); - }, + }); - 'addMetadata(type, data) can be used to attach metadata to constructs FIND_ME'(test: Test) { + test('addMetadata(type, data) can be used to attach metadata to constructs FIND_ME', () => { const previousValue = reEnableStackTraceCollection(); const root = new Root(); const con = new Construct(root, 'MyConstruct'); - test.deepEqual(con.node.metadata, [], 'starts empty'); + expect(con.node.metadata).toEqual([]); con.node.addMetadata('key', 'value'); con.node.addMetadata('number', 103); con.node.addMetadata('array', [123, 456]); restoreStackTraceColection(previousValue); - test.deepEqual(con.node.metadata[0].type, 'key'); - test.deepEqual(con.node.metadata[0].data, 'value'); - test.deepEqual(con.node.metadata[1].data, 103); - test.deepEqual(con.node.metadata[2].data, [123, 456]); - test.ok(con.node.metadata[0].trace && con.node.metadata[0].trace[1].indexOf('FIND_ME') !== -1, 'First stack line should include this function\s name'); - test.done(); - }, + expect(con.node.metadata[0].type).toEqual('key'); + expect(con.node.metadata[0].data).toEqual('value'); + expect(con.node.metadata[1].data).toEqual(103); + expect(con.node.metadata[2].data).toEqual([123, 456]); + expect(con.node.metadata[0].trace && con.node.metadata[0].trace[1].indexOf('FIND_ME')).toEqual(-1); + + }); - 'addMetadata(type, undefined/null) is ignored'(test: Test) { + test('addMetadata(type, undefined/null) is ignored', () => { const root = new Root(); const con = new Construct(root, 'Foo'); con.node.addMetadata('Null', null); @@ -277,65 +276,65 @@ nodeunitShim({ const exists = (key: string) => con.node.metadata.find(x => x.type === key); - test.ok(!exists('Null')); - test.ok(!exists('Undefined')); - test.ok(exists('True')); - test.ok(exists('False')); - test.ok(exists('Empty')); - test.done(); - }, + expect(exists('Null')).toBeUndefined(); + expect(exists('Undefined')).toBeUndefined(); + expect(exists('True')).toBeDefined(); + expect(exists('False')).toBeDefined(); + expect(exists('Empty')).toBeDefined(); - 'addWarning(message) can be used to add a "WARNING" message entry to the construct'(test: Test) { + }); + + test('addWarning(message) can be used to add a "WARNING" message entry to the construct', () => { const previousValue = reEnableStackTraceCollection(); const root = new Root(); const con = new Construct(root, 'MyConstruct'); Annotations.of(con).addWarning('This construct is deprecated, use the other one instead'); restoreStackTraceColection(previousValue); - test.deepEqual(con.node.metadata[0].type, cxschema.ArtifactMetadataEntryType.WARN); - test.deepEqual(con.node.metadata[0].data, 'This construct is deprecated, use the other one instead'); - test.ok(con.node.metadata[0].trace && con.node.metadata[0].trace.length > 0); - test.done(); - }, + expect(con.node.metadata[0].type).toEqual(cxschema.ArtifactMetadataEntryType.WARN); + expect(con.node.metadata[0].data).toEqual('This construct is deprecated, use the other one instead'); + expect(con.node.metadata[0].trace && con.node.metadata[0].trace.length > 0).toEqual(true); + + }); - 'addError(message) can be used to add a "ERROR" message entry to the construct'(test: Test) { + test('addError(message) can be used to add a "ERROR" message entry to the construct', () => { const previousValue = reEnableStackTraceCollection(); const root = new Root(); const con = new Construct(root, 'MyConstruct'); Annotations.of(con).addError('Stop!'); restoreStackTraceColection(previousValue); - test.deepEqual(con.node.metadata[0].type, cxschema.ArtifactMetadataEntryType.ERROR); - test.deepEqual(con.node.metadata[0].data, 'Stop!'); - test.ok(con.node.metadata[0].trace && con.node.metadata[0].trace.length > 0); - test.done(); - }, + expect(con.node.metadata[0].type).toEqual(cxschema.ArtifactMetadataEntryType.ERROR); + expect(con.node.metadata[0].data).toEqual('Stop!'); + expect(con.node.metadata[0].trace && con.node.metadata[0].trace.length > 0).toEqual(true); - 'addInfo(message) can be used to add an "INFO" message entry to the construct'(test: Test) { + }); + + test('addInfo(message) can be used to add an "INFO" message entry to the construct', () => { const previousValue = reEnableStackTraceCollection(); const root = new Root(); const con = new Construct(root, 'MyConstruct'); Annotations.of(con).addInfo('Hey there, how do you do?'); restoreStackTraceColection(previousValue); - test.deepEqual(con.node.metadata[0].type, cxschema.ArtifactMetadataEntryType.INFO); - test.deepEqual(con.node.metadata[0].data, 'Hey there, how do you do?'); - test.ok(con.node.metadata[0].trace && con.node.metadata[0].trace.length > 0); - test.done(); - }, + expect(con.node.metadata[0].type).toEqual(cxschema.ArtifactMetadataEntryType.INFO); + expect(con.node.metadata[0].data).toEqual('Hey there, how do you do?'); + expect(con.node.metadata[0].trace && con.node.metadata[0].trace.length > 0).toEqual(true); + + }); - 'multiple children of the same type, with explicit names are welcome'(test: Test) { + test('multiple children of the same type, with explicit names are welcome', () => { const root = new Root(); new MyBeautifulConstruct(root, 'mbc1'); new MyBeautifulConstruct(root, 'mbc2'); new MyBeautifulConstruct(root, 'mbc3'); new MyBeautifulConstruct(root, 'mbc4'); - test.ok(root.node.children.length >= 4); - test.done(); - }, + expect(root.node.children.length).toBeGreaterThanOrEqual(4); + + }); // eslint-disable-next-line max-len - 'construct.validate() can be implemented to perform validation, ConstructNode.validate(construct.node) will return all errors from the subtree (DFS)'(test: Test) { + test('construct.validate() can be implemented to perform validation, ConstructNode.validate(construct.node) will return all errors from the subtree (DFS)', () => { class MyConstruct extends Construct { protected validate() { @@ -379,7 +378,7 @@ nodeunitShim({ const errors = ConstructNode.validate(stack.node).map((v: ValidationError) => ({ path: v.source.node.path, message: v.message })); // validate DFS - test.deepEqual(errors, [ + expect(errors).toEqual([ { path: 'MyConstruct', message: 'my-error1' }, { path: 'MyConstruct', message: 'my-error2' }, { path: 'TheirConstruct/YourConstruct', message: 'your-error1' }, @@ -387,10 +386,10 @@ nodeunitShim({ { path: '', message: 'stack-error' }, ]); - test.done(); - }, - 'construct.lock() protects against adding children anywhere under this construct (direct or indirect)'(test: Test) { + }); + + test('construct.lock() protects against adding children anywhere under this construct (direct or indirect)', () => { class LockableConstruct extends Construct { public lockMe() { @@ -414,9 +413,9 @@ nodeunitShim({ // now we should still be able to add children to c0b, but not to c0a or any its children new Construct(c0b, 'c1a'); - test.throws(() => new Construct(c0a, 'fail1'), /Cannot add children to "c0a" during synthesis/); - test.throws(() => new Construct(c1a, 'fail2'), /Cannot add children to "c0a\/c1a" during synthesis/); - test.throws(() => new Construct(c1b, 'fail3'), /Cannot add children to "c0a\/c1b" during synthesis/); + expect(() => new Construct(c0a, 'fail1')).toThrow(/Cannot add children to "c0a" during synthesis/); + expect(() => new Construct(c1a, 'fail2')).toThrow(/Cannot add children to "c0a\/c1a" during synthesis/); + expect(() => new Construct(c1b, 'fail3')).toThrow(/Cannot add children to "c0a\/c1b" during synthesis/); c0a.unlockMe(); @@ -424,10 +423,10 @@ nodeunitShim({ new Construct(c1a, 'c1aZ'); new Construct(c1b, 'c1bZ'); - test.done(); - }, - 'findAll returns a list of all children in either DFS or BFS'(test: Test) { + }); + + test('findAll returns a list of all children in either DFS or BFS', () => { // GIVEN const c1 = new Construct(undefined as any, '1'); const c2 = new Construct(c1, '2'); @@ -436,75 +435,75 @@ nodeunitShim({ new Construct(c2, '5'); // THEN - test.deepEqual(c1.node.findAll().map(x => x.node.id), c1.node.findAll(ConstructOrder.PREORDER).map(x => x.node.id)); // default is PreOrder - test.deepEqual(c1.node.findAll(ConstructOrder.PREORDER).map(x => x.node.id), ['1', '2', '4', '5', '3']); - test.deepEqual(c1.node.findAll(ConstructOrder.POSTORDER).map(x => x.node.id), ['4', '5', '2', '3', '1']); - test.done(); - }, + expect(c1.node.findAll().map(x => x.node.id)).toEqual(c1.node.findAll(ConstructOrder.PREORDER).map(x => x.node.id)); // default is PreOrder + expect(c1.node.findAll(ConstructOrder.PREORDER).map(x => x.node.id)).toEqual(['1', '2', '4', '5', '3']); + expect(c1.node.findAll(ConstructOrder.POSTORDER).map(x => x.node.id)).toEqual(['4', '5', '2', '3', '1']); - 'ancestors returns a list of parents up to root'(test: Test) { + }); + + test('ancestors returns a list of parents up to root', () => { const { child1_1_1 } = createTree(); - test.deepEqual(child1_1_1.node.scopes.map(x => x.node.id), ['', 'HighChild', 'Child1', 'Child11', 'Child111']); - test.done(); - }, + expect(child1_1_1.node.scopes.map(x => x.node.id)).toEqual(['', 'HighChild', 'Child1', 'Child11', 'Child111']); + + }); - '"root" returns the root construct'(test: Test) { + test('"root" returns the root construct', () => { const { child1, child2, child1_1_1, root } = createTree(); - test.ok(child1.node.root === root); - test.ok(child2.node.root === root); - test.ok(child1_1_1.node.root === root); - test.done(); - }, - - defaultChild: { - 'returns the child with id "Resource"'(test: Test) { + expect(child1.node.root).toEqual(root); + expect(child2.node.root).toEqual(root); + expect(child1_1_1.node.root).toEqual(root); + + }); + + describe('defaultChild', () => { + test('returns the child with id "Resource"', () => { const root = new Root(); new Construct(root, 'child1'); const defaultChild = new Construct(root, 'Resource'); new Construct(root, 'child2'); - test.same(root.node.defaultChild, defaultChild); - test.done(); - }, - 'returns the child with id "Default"'(test: Test) { + expect(root.node.defaultChild).toEqual(defaultChild); + + }); + test('returns the child with id "Default"', () => { const root = new Root(); new Construct(root, 'child1'); const defaultChild = new Construct(root, 'Default'); new Construct(root, 'child2'); - test.same(root.node.defaultChild, defaultChild); - test.done(); - }, - 'can override defaultChild'(test: Test) { + expect(root.node.defaultChild).toEqual(defaultChild); + + }); + test('can override defaultChild', () => { const root = new Root(); new Construct(root, 'Resource'); const defaultChild = new Construct(root, 'OtherResource'); root.node.defaultChild = defaultChild; - test.same(root.node.defaultChild, defaultChild); - test.done(); - }, - 'returns "undefined" if there is no default'(test: Test) { + expect(root.node.defaultChild).toEqual(defaultChild); + + }); + test('returns "undefined" if there is no default', () => { const root = new Root(); new Construct(root, 'child1'); new Construct(root, 'child2'); - test.equal(root.node.defaultChild, undefined); - test.done(); - }, - 'fails if there are both "Resource" and "Default"'(test: Test) { + expect(root.node.defaultChild).toEqual(undefined); + + }); + test('fails if there are both "Resource" and "Default"', () => { const root = new Root(); new Construct(root, 'child1'); new Construct(root, 'Default'); new Construct(root, 'child2'); new Construct(root, 'Resource'); - test.throws(() => root.node.defaultChild, + expect(() => root.node.defaultChild).toThrow( /Cannot determine default child for . There is both a child with id "Resource" and id "Default"/); - test.done(); - }, - }, + + }); + }); }); function createTree(context?: any) { diff --git a/packages/@aws-cdk/core/test/context.test.ts b/packages/@aws-cdk/core/test/context.test.ts index 89c720e735999..b8e0e85c169a4 100644 --- a/packages/@aws-cdk/core/test/context.test.ts +++ b/packages/@aws-cdk/core/test/context.test.ts @@ -1,47 +1,46 @@ -import { nodeunitShim, Test } from 'nodeunit-shim'; import { Construct, Stack } from '../lib'; import { ContextProvider } from '../lib/context-provider'; import { synthesize } from '../lib/private/synthesis'; -nodeunitShim({ - 'AvailabilityZoneProvider returns a list with dummy values if the context is not available'(test: Test) { +describe('context', () => { + test('AvailabilityZoneProvider returns a list with dummy values if the context is not available', () => { const stack = new Stack(undefined, 'TestStack', { env: { account: '12345', region: 'us-east-1' } }); const azs = stack.availabilityZones; - test.deepEqual(azs, ['dummy1a', 'dummy1b', 'dummy1c']); - test.done(); - }, + expect(azs).toEqual(['dummy1a', 'dummy1b', 'dummy1c']); - 'AvailabilityZoneProvider will return context list if available'(test: Test) { + }); + + test('AvailabilityZoneProvider will return context list if available', () => { const stack = new Stack(undefined, 'TestStack', { env: { account: '12345', region: 'us-east-1' } }); const before = stack.availabilityZones; - test.deepEqual(before, ['dummy1a', 'dummy1b', 'dummy1c']); + expect(before).toEqual(['dummy1a', 'dummy1b', 'dummy1c']); const key = expectedContextKey(stack); stack.node.setContext(key, ['us-east-1a', 'us-east-1b']); const azs = stack.availabilityZones; - test.deepEqual(azs, ['us-east-1a', 'us-east-1b']); + expect(azs).toEqual(['us-east-1a', 'us-east-1b']); + - test.done(); - }, + }); - 'AvailabilityZoneProvider will complain if not given a list'(test: Test) { + test('AvailabilityZoneProvider will complain if not given a list', () => { const stack = new Stack(undefined, 'TestStack', { env: { account: '12345', region: 'us-east-1' } }); const before = stack.availabilityZones; - test.deepEqual(before, ['dummy1a', 'dummy1b', 'dummy1c']); + expect(before).toEqual(['dummy1a', 'dummy1b', 'dummy1c']); const key = expectedContextKey(stack); stack.node.setContext(key, 'not-a-list'); - test.throws( + expect( () => stack.availabilityZones, - ); + ).toThrow(); - test.done(); - }, - 'ContextProvider consistently generates a key'(test: Test) { + }); + + test('ContextProvider consistently generates a key', () => { const stack = new Stack(undefined, 'TestStack', { env: { account: '12345', region: 'us-east-1' } }); const key = ContextProvider.getKey(stack, { provider: 'ssm', @@ -51,7 +50,7 @@ nodeunitShim({ }, }); - test.deepEqual(key, { + expect(key).toEqual({ key: 'ssm:account=12345:anyStringParam=bar:parameterName=foo:region=us-east-1', props: { account: '12345', @@ -69,7 +68,7 @@ nodeunitShim({ igw: false, }, }); - test.deepEqual(complexKey, { + expect(complexKey).toEqual({ key: 'vpc:account=12345:cidrBlock=192.168.0.16:igw=false:region=us-east-1:tags.Env=Preprod:tags.Name=MyVPC', props: { account: '12345', @@ -79,10 +78,10 @@ nodeunitShim({ igw: false, }, }); - test.done(); - }, - 'Key generation can contain arbitrarily deep structures'(test: Test) { + }); + + test('Key generation can contain arbitrarily deep structures', () => { // GIVEN const stack = new Stack(undefined, 'TestStack', { env: { account: '12345', region: 'us-east-1' } }); @@ -98,7 +97,7 @@ nodeunitShim({ }); // THEN - test.deepEqual(key, { + expect(key).toEqual({ key: 'provider:account=12345:list.0.key=key1:list.0.value=value1:list.1.key=key2:list.1.value=value2:region=us-east-1', props: { account: '12345', @@ -110,10 +109,10 @@ nodeunitShim({ }, }); - test.done(); - }, - 'Keys with undefined values are not serialized'(test: Test) { + }); + + test('Keys with undefined values are not serialized', () => { // GIVEN const stack = new Stack(undefined, 'TestStack', { env: { account: '12345', region: 'us-east-1' } }); @@ -127,7 +126,7 @@ nodeunitShim({ }); // THEN - test.deepEqual(result, { + expect(result).toEqual({ key: 'provider:account=12345:p1=42:region=us-east-1', props: { account: '12345', @@ -137,10 +136,10 @@ nodeunitShim({ }, }); - test.done(); - }, - 'context provider errors are attached to tree'(test: Test) { + }); + + test('context provider errors are attached to tree', () => { const contextProps = { provider: 'availability-zones' }; const contextKey = 'availability-zones:account=12345:region=us-east-1'; // Depends on the mangling algo @@ -153,7 +152,7 @@ nodeunitShim({ const construct = new Construct(stack, 'Child'); // Verify that we got the right hardcoded key above, give a descriptive error if not - test.equals(ContextProvider.getKey(construct, contextProps).key, contextKey); + expect(ContextProvider.getKey(construct, contextProps).key).toEqual(contextKey); // WHEN ContextProvider.getValue(construct, { @@ -163,10 +162,10 @@ nodeunitShim({ // THEN const error = construct.node.metadata.find(m => m.type === 'aws:cdk:error'); - test.equals(error && error.data, 'I had a boo-boo'); + expect(error && error.data).toEqual('I had a boo-boo'); + - test.done(); - }, + }); }); /** diff --git a/packages/@aws-cdk/core/test/cross-environment-token.test.ts b/packages/@aws-cdk/core/test/cross-environment-token.test.ts index a0d833996121c..e44d4d8af2c0d 100644 --- a/packages/@aws-cdk/core/test/cross-environment-token.test.ts +++ b/packages/@aws-cdk/core/test/cross-environment-token.test.ts @@ -1,12 +1,11 @@ -import { nodeunitShim, Test } from 'nodeunit-shim'; import { App, CfnOutput, CfnResource, Construct, PhysicalName, Resource, Stack } from '../lib'; import { toCloudFormation } from './util'; /* eslint-disable quote-props */ -nodeunitShim({ - 'CrossEnvironmentToken': { - 'can reference an ARN with a fixed physical name directly in a different account'(test: Test) { +describe('cross environment', () => { + describe('CrossEnvironmentToken', () => { + test('can reference an ARN with a fixed physical name directly in a different account', () => { // GIVEN const app = new App(); const stack1 = new Stack(app, 'Stack1', { @@ -30,7 +29,7 @@ nodeunitShim({ }); // THEN - test.deepEqual(toCloudFormation(stack2), { + expect(toCloudFormation(stack2)).toEqual({ Outputs: { Output: { Value: { @@ -49,10 +48,10 @@ nodeunitShim({ }, }); - test.done(); - }, - 'can reference a fixed physical name directly in a different account'(test: Test) { + }); + + test('can reference a fixed physical name directly in a different account', () => { // GIVEN const app = new App(); const stack1 = new Stack(app, 'Stack1', { @@ -75,7 +74,7 @@ nodeunitShim({ }); // THEN - test.deepEqual(toCloudFormation(stack2), { + expect(toCloudFormation(stack2)).toEqual({ Outputs: { Output: { Value: 'PhysicalName', @@ -83,10 +82,10 @@ nodeunitShim({ }, }); - test.done(); - }, - 'can reference an ARN with an assigned physical name directly in a different account'(test: Test) { + }); + + test('can reference an ARN with an assigned physical name directly in a different account', () => { // GIVEN const app = new App(); const stack1 = new Stack(app, 'Stack1', { @@ -110,7 +109,7 @@ nodeunitShim({ }); // THEN - test.deepEqual(toCloudFormation(stack2), { + expect(toCloudFormation(stack2)).toEqual({ Outputs: { Output: { Value: { @@ -129,10 +128,10 @@ nodeunitShim({ }, }); - test.done(); - }, - 'can reference an assigned physical name directly in a different account'(test: Test) { + }); + + test('can reference an assigned physical name directly in a different account', () => { // GIVEN const app = new App(); const stack1 = new Stack(app, 'Stack1', { @@ -155,7 +154,7 @@ nodeunitShim({ }); // THEN - test.deepEqual(toCloudFormation(stack2), { + expect(toCloudFormation(stack2)).toEqual({ Outputs: { Output: { Value: 'stack1stack1myresourcec54ced43683ebf9a3c4c', @@ -163,11 +162,11 @@ nodeunitShim({ }, }); - test.done(); - }, - }, - 'cannot reference a deploy-time physical name across environments'(test: Test) { + }); + }); + + test('cannot reference a deploy-time physical name across environments', () => { // GIVEN const app = new App(); const stack1 = new Stack(app, 'Stack1', { @@ -190,13 +189,13 @@ nodeunitShim({ }); // THEN - test.throws(() => toCloudFormation(stack2), + expect(() => toCloudFormation(stack2)).toThrow( /Cannot use resource 'Stack1\/MyResource' in a cross-environment fashion/); - test.done(); - }, - 'cross environment when stack is a substack'(test: Test) { + }); + + test('cross environment when stack is a substack', () => { const app = new App(); const parentStack = new Stack(app, 'ParentStack', { @@ -218,7 +217,7 @@ nodeunitShim({ const assembly = app.synth(); - test.deepEqual(assembly.getStackByName(parentStack.stackName).template, { + expect(assembly.getStackByName(parentStack.stackName).template).toEqual({ Resources: { ParentResource: { Type: 'Parent::Resource', @@ -229,7 +228,7 @@ nodeunitShim({ }, }); - test.deepEqual(assembly.getStackByName(childStack.stackName).template, { + expect(assembly.getStackByName(childStack.stackName).template).toEqual({ Resources: { ChildResource8C37244D: { Type: 'My::Resource', @@ -240,8 +239,8 @@ nodeunitShim({ }, }); - test.done(); - }, + + }); }); test.each([undefined, 'SomeName'])('stack.exportValue() on name attributes with PhysicalName=%s', physicalName => { diff --git a/packages/@aws-cdk/core/test/custom-resource-provider/custom-resource-provider.test.ts b/packages/@aws-cdk/core/test/custom-resource-provider/custom-resource-provider.test.ts index 5fc12ecc17c2f..c8d9082447f54 100644 --- a/packages/@aws-cdk/core/test/custom-resource-provider/custom-resource-provider.test.ts +++ b/packages/@aws-cdk/core/test/custom-resource-provider/custom-resource-provider.test.ts @@ -1,13 +1,12 @@ import * as fs from 'fs'; import * as path from 'path'; -import { nodeunitShim, Test } from 'nodeunit-shim'; import { App, AssetStaging, CustomResourceProvider, CustomResourceProviderRuntime, DockerImageAssetLocation, DockerImageAssetSource, Duration, FileAssetLocation, FileAssetSource, ISynthesisSession, Size, Stack } from '../../lib'; import { toCloudFormation } from '../util'; const TEST_HANDLER = `${__dirname}/mock-provider`; -nodeunitShim({ - 'minimal configuration'(test: Test) { +describe('custom resource provider', () => { + test('minimal configuration', () => { // GIVEN const stack = new Stack(); @@ -18,7 +17,7 @@ nodeunitShim({ }); // THEN - test.ok(fs.existsSync(path.join(TEST_HANDLER, '__entrypoint__.js')), 'expecting entrypoint to be copied to the handler directory'); + expect(fs.existsSync(path.join(TEST_HANDLER, '__entrypoint__.js'))).toEqual(true); const cfn = toCloudFormation(stack); // The asset hash constantly changes, so in order to not have to chase it, just look @@ -30,7 +29,7 @@ nodeunitShim({ const keyParam = paramNames[1]; const hashParam = paramNames[2]; - test.deepEqual(cfn, { + expect(cfn).toEqual({ Resources: { CustomMyResourceTypeCustomResourceProviderRoleBD5E655F: { Type: 'AWS::IAM::Role', @@ -120,10 +119,10 @@ nodeunitShim({ }, }, }); - test.done(); - }, - 'custom resource provided creates asset in new-style synthesis with relative path'(test: Test) { + }); + + test('custom resource provided creates asset in new-style synthesis with relative path', () => { // GIVEN let assetFilename : string | undefined; @@ -157,10 +156,10 @@ nodeunitShim({ throw new Error(`Asset filename must be a relative path, got: ${assetFilename}`); } - test.done(); - }, - 'policyStatements can be used to add statements to the inline policy'(test: Test) { + }); + + test('policyStatements can be used to add statements to the inline policy', () => { // GIVEN const stack = new Stack(); @@ -177,17 +176,17 @@ nodeunitShim({ // THEN const template = toCloudFormation(stack); const role = template.Resources.CustomMyResourceTypeCustomResourceProviderRoleBD5E655F; - test.deepEqual(role.Properties.Policies, [{ + expect(role.Properties.Policies).toEqual([{ PolicyName: 'Inline', PolicyDocument: { Version: '2012-10-17', Statement: [{ statement1: 123 }, { statement2: { foo: 111 } }], }, }]); - test.done(); - }, - 'memorySize, timeout and description'(test: Test) { + }); + + test('memorySize, timeout and description', () => { // GIVEN const stack = new Stack(); @@ -203,13 +202,13 @@ nodeunitShim({ // THEN const template = toCloudFormation(stack); const lambda = template.Resources.CustomMyResourceTypeCustomResourceProviderHandler29FBDD2A; - test.deepEqual(lambda.Properties.MemorySize, 2048); - test.deepEqual(lambda.Properties.Timeout, 300); - test.deepEqual(lambda.Properties.Description, 'veni vidi vici'); - test.done(); - }, + expect(lambda.Properties.MemorySize).toEqual(2048); + expect(lambda.Properties.Timeout).toEqual(300); + expect(lambda.Properties.Description).toEqual('veni vidi vici'); - 'environment variables'(test: Test) { + }); + + test('environment variables', () => { // GIVEN const stack = new Stack(); @@ -226,16 +225,16 @@ nodeunitShim({ // THEN const template = toCloudFormation(stack); const lambda = template.Resources.CustomMyResourceTypeCustomResourceProviderHandler29FBDD2A; - test.deepEqual(lambda.Properties.Environment, { + expect(lambda.Properties.Environment).toEqual({ Variables: { A: 'a', B: 'b', }, }); - test.done(); - }, - 'roleArn'(test: Test) { + }); + + test('roleArn', () => { // GIVEN const stack = new Stack(); @@ -246,13 +245,13 @@ nodeunitShim({ }); // THEN - test.deepEqual(stack.resolve(cr.roleArn), { + expect(stack.resolve(cr.roleArn)).toEqual({ 'Fn::GetAtt': [ 'CustomMyResourceTypeCustomResourceProviderRoleBD5E655F', 'Arn', ], }); - test.done(); - }, + + }); }); diff --git a/packages/@aws-cdk/core/test/custom-resource-provider/nodejs-entrypoint.test.ts b/packages/@aws-cdk/core/test/custom-resource-provider/nodejs-entrypoint.test.ts index 18e6dfe053ce7..98b2ee9d41924 100644 --- a/packages/@aws-cdk/core/test/custom-resource-provider/nodejs-entrypoint.test.ts +++ b/packages/@aws-cdk/core/test/custom-resource-provider/nodejs-entrypoint.test.ts @@ -4,13 +4,12 @@ import * as https from 'https'; import * as os from 'os'; import * as path from 'path'; import * as url from 'url'; -import { nodeunitShim, Test } from 'nodeunit-shim'; import * as entrypoint from '../../lib/custom-resource-provider/nodejs-entrypoint'; -nodeunitShim({ - 'handler return value is sent back to cloudformation as a success response': { +describe('nodejs entrypoint', () => { + describe('handler return value is sent back to cloudformation as a success response', () => { - async 'physical resource id (ref)'(test: Test) { + test('physical resource id (ref)', async () => { // GIVEN const createEvent = makeEvent({ RequestType: 'Create' }); @@ -18,12 +17,12 @@ nodeunitShim({ const response = await invokeHandler(createEvent, async _ => ({ PhysicalResourceId: 'returned-from-handler' })); // THEN - test.deepEqual(response.Status, 'SUCCESS'); - test.deepEqual(response.PhysicalResourceId, 'returned-from-handler'); - test.done(); - }, + expect(response.Status).toEqual('SUCCESS'); + expect(response.PhysicalResourceId).toEqual('returned-from-handler'); + + }); - async 'data (attributes)'(test: Test) { + test('data (attributes)', async () => { // GIVEN const createEvent = makeEvent({ RequestType: 'Create' }); @@ -40,18 +39,18 @@ nodeunitShim({ }); // THEN - test.deepEqual(response.Status, 'SUCCESS'); - test.deepEqual(response.PhysicalResourceId, '', 'physical id defaults to request id'); - test.deepEqual(response.Data, { + expect(response.Status).toEqual('SUCCESS'); + expect(response.PhysicalResourceId).toEqual(''); + expect(response.Data).toEqual({ Attribute1: 'hello', Attribute2: { Foo: 1111, }, }); - test.done(); - }, - async 'no echo'(test: Test) { + }); + + test('no echo', async () => { // GIVEN const createEvent = makeEvent({ RequestType: 'Create' }); @@ -59,12 +58,12 @@ nodeunitShim({ const response = await invokeHandler(createEvent, async _ => ({ NoEcho: true })); // THEN - test.deepEqual(response.Status, 'SUCCESS'); - test.deepEqual(response.NoEcho, true); - test.done(); - }, + expect(response.Status).toEqual('SUCCESS'); + expect(response.NoEcho).toEqual(true); - async 'reason'(test: Test) { + }); + + test('reason', async () => { // GIVEN const createEvent = makeEvent({ RequestType: 'Create' }); @@ -72,13 +71,13 @@ nodeunitShim({ const response = await invokeHandler(createEvent, async _ => ({ Reason: 'hello, reason' })); // THEN - test.deepEqual(response.Status, 'SUCCESS'); - test.deepEqual(response.Reason, 'hello, reason'); - test.done(); - }, - }, + expect(response.Status).toEqual('SUCCESS'); + expect(response.Reason).toEqual('hello, reason'); - async 'an error thrown by the handler is sent as a failure response to cloudformation'(test: Test) { + }); + }); + + test('an error thrown by the handler is sent as a failure response to cloudformation', async () => { // GIVEN const createEvent = makeEvent({ RequestType: 'Create' }); @@ -88,7 +87,7 @@ nodeunitShim({ }); // THEN - test.deepEqual(response, { + expect(response).toEqual({ Status: 'FAILED', Reason: 'this is an error', StackId: '', @@ -97,10 +96,10 @@ nodeunitShim({ LogicalResourceId: '', }); - test.done(); - }, - async 'physical resource id cannot be changed in DELETE'(test: Test) { + }); + + test('physical resource id cannot be changed in DELETE', async () => { // GIVEN const event = makeEvent({ RequestType: 'Delete' }); @@ -110,7 +109,7 @@ nodeunitShim({ })); // THEN - test.deepEqual(response, { + expect(response).toEqual({ Status: 'FAILED', Reason: 'DELETE: cannot change the physical resource ID from "undefined" to "Changed" during deletion', StackId: '', @@ -119,10 +118,10 @@ nodeunitShim({ LogicalResourceId: '', }); - test.done(); - }, - async 'DELETE after CREATE is ignored with success'(test: Test) { + }); + + test('DELETE after CREATE is ignored with success', async (done) => { // GIVEN const event = makeEvent({ RequestType: 'Delete', @@ -131,11 +130,11 @@ nodeunitShim({ // WHEN const response = await invokeHandler(event, async _ => { - test.ok(false, 'handler should not be called'); + done.fail('handler should not be called'); }); // THEN - test.deepEqual(response, { + expect(response).toEqual({ Status: 'SUCCESS', Reason: 'SUCCESS', StackId: '', @@ -144,9 +143,9 @@ nodeunitShim({ LogicalResourceId: '', }); - test.done(); + done(); - }, + }); }); function makeEvent(req: Partial): AWSLambda.CloudFormationCustomResourceEvent { diff --git a/packages/@aws-cdk/core/test/custom-resource.test.ts b/packages/@aws-cdk/core/test/custom-resource.test.ts index 619a46c062f6b..6f35597864f1b 100644 --- a/packages/@aws-cdk/core/test/custom-resource.test.ts +++ b/packages/@aws-cdk/core/test/custom-resource.test.ts @@ -1,9 +1,8 @@ -import { nodeunitShim, Test } from 'nodeunit-shim'; import { CustomResource, RemovalPolicy, Stack } from '../lib'; import { toCloudFormation } from './util'; -nodeunitShim({ - 'simple case provider identified by service token'(test: Test) { +describe('custom resource', () => { + test('simple case provider identified by service token', () => { // GIVEN const stack = new Stack(); @@ -17,7 +16,7 @@ nodeunitShim({ }); // THEN - test.deepEqual(toCloudFormation(stack), { + expect(toCloudFormation(stack)).toEqual({ Resources: { MyCustomResource: { Type: 'AWS::CloudFormation::CustomResource', @@ -31,10 +30,10 @@ nodeunitShim({ }, }, }); - test.done(); - }, - 'resource type can be specified'(test: Test) { + }); + + test('resource type can be specified', () => { // GIVEN const stack = new Stack(); @@ -45,7 +44,7 @@ nodeunitShim({ }); // THEN - test.deepEqual(toCloudFormation(stack), { + expect(toCloudFormation(stack)).toEqual({ Resources: { MyCustomResource: { Type: 'Custom::MyResourceType', @@ -57,10 +56,10 @@ nodeunitShim({ }, }, }); - test.done(); - }, - 'removal policy'(test: Test) { + }); + + test('removal policy', () => { // GIVEN const stack = new Stack(); @@ -71,7 +70,7 @@ nodeunitShim({ }); // THEN - test.deepEqual(toCloudFormation(stack), { + expect(toCloudFormation(stack)).toEqual({ Resources: { MyCustomResource: { Type: 'AWS::CloudFormation::CustomResource', @@ -83,23 +82,23 @@ nodeunitShim({ }, }, }); - test.done(); - }, - 'resource type must begin with "Custom::"'(test: Test) { + }); + + test('resource type must begin with "Custom::"', () => { // GIVEN const stack = new Stack(); // THEN - test.throws(() => new CustomResource(stack, 'MyCustomResource', { + expect(() => new CustomResource(stack, 'MyCustomResource', { resourceType: 'MyResourceType', serviceToken: 'FooBar', - }), /Custom resource type must begin with "Custom::"/); + })).toThrow(/Custom resource type must begin with "Custom::"/); - test.done(); - }, - 'properties can be pascal-cased'(test: Test) { + }); + + test('properties can be pascal-cased', () => { // GIVEN const stack = new Stack(); @@ -116,7 +115,7 @@ nodeunitShim({ }); // THEN - test.deepEqual(toCloudFormation(stack), { + expect(toCloudFormation(stack)).toEqual({ Resources: { MyCustomResource: { Type: 'AWS::CloudFormation::CustomResource', @@ -132,10 +131,10 @@ nodeunitShim({ }, }, }); - test.done(); - }, - 'pascal-casing of props is disabled by default'(test: Test) { + }); + + test('pascal-casing of props is disabled by default', () => { // GIVEN const stack = new Stack(); @@ -151,7 +150,7 @@ nodeunitShim({ }); // THEN - test.deepEqual(toCloudFormation(stack), { + expect(toCloudFormation(stack)).toEqual({ Resources: { MyCustomResource: { Type: 'AWS::CloudFormation::CustomResource', @@ -167,7 +166,7 @@ nodeunitShim({ }, }, }); - test.done(); - }, + + }); }); diff --git a/packages/@aws-cdk/core/test/duration.test.ts b/packages/@aws-cdk/core/test/duration.test.ts index 5b04827d72f07..68da12881d3ba 100644 --- a/packages/@aws-cdk/core/test/duration.test.ts +++ b/packages/@aws-cdk/core/test/duration.test.ts @@ -1,210 +1,209 @@ -import { nodeunitShim, Test } from 'nodeunit-shim'; import { Duration, Lazy, Stack, Token } from '../lib'; -nodeunitShim({ - 'negative amount'(test: Test) { - test.throws(() => Duration.seconds(-1), /negative/); +describe('duration', () => { + test('negative amount', () => { + expect(() => Duration.seconds(-1)).toThrow(/negative/); - test.done(); - }, - 'unresolved amount'(test: Test) { + }); + + test('unresolved amount', () => { const stack = new Stack(); const lazyDuration = Duration.seconds(Token.asNumber({ resolve: () => 1337 })); - test.equals(stack.resolve(lazyDuration.toSeconds()), 1337); - test.throws( - () => stack.resolve(lazyDuration.toMinutes()), + expect(stack.resolve(lazyDuration.toSeconds())).toEqual(1337); + expect( + () => stack.resolve(lazyDuration.toMinutes())).toThrow( /Unable to perform time unit conversion on un-resolved token/, ); - test.done(); - }, - 'Duration in seconds'(test: Test) { + }); + + test('Duration in seconds', () => { const duration = Duration.seconds(300); - test.equal(duration.toSeconds(), 300); - test.equal(duration.toMinutes(), 5); - test.throws(() => duration.toDays(), /'300 seconds' cannot be converted into a whole number of days/); - floatEqual(test, duration.toDays({ integral: false }), 300 / 86_400); + expect(duration.toSeconds()).toEqual(300); + expect(duration.toMinutes()).toEqual(5); + expect(() => duration.toDays()).toThrow(/'300 seconds' cannot be converted into a whole number of days/); + floatEqual(duration.toDays({ integral: false }), 300 / 86_400); - test.equal(Duration.seconds(60 * 60 * 24).toDays(), 1); + expect(Duration.seconds(60 * 60 * 24).toDays()).toEqual(1); - test.done(); - }, - 'Duration in minutes'(test: Test) { + }); + + test('Duration in minutes', () => { const duration = Duration.minutes(5); - test.equal(duration.toSeconds(), 300); - test.equal(duration.toMinutes(), 5); - test.throws(() => duration.toDays(), /'5 minutes' cannot be converted into a whole number of days/); - floatEqual(test, duration.toDays({ integral: false }), 300 / 86_400); + expect(duration.toSeconds()).toEqual(300); + expect(duration.toMinutes()).toEqual(5); + expect(() => duration.toDays()).toThrow(/'5 minutes' cannot be converted into a whole number of days/); + floatEqual(duration.toDays({ integral: false }), 300 / 86_400); + + expect(Duration.minutes(60 * 24).toDays()).toEqual(1); - test.equal(Duration.minutes(60 * 24).toDays(), 1); - test.done(); - }, + }); - 'Duration in hours'(test: Test) { + test('Duration in hours', () => { const duration = Duration.hours(5); - test.equal(duration.toSeconds(), 18_000); - test.equal(duration.toMinutes(), 300); - test.throws(() => duration.toDays(), /'5 hours' cannot be converted into a whole number of days/); - floatEqual(test, duration.toDays({ integral: false }), 5 / 24); + expect(duration.toSeconds()).toEqual(18_000); + expect(duration.toMinutes()).toEqual(300); + expect(() => duration.toDays()).toThrow(/'5 hours' cannot be converted into a whole number of days/); + floatEqual(duration.toDays({ integral: false }), 5 / 24); - test.equal(Duration.hours(24).toDays(), 1); + expect(Duration.hours(24).toDays()).toEqual(1); - test.done(); - }, - 'seconds to milliseconds'(test: Test) { + }); + + test('seconds to milliseconds', () => { const duration = Duration.seconds(5); - test.equal(duration.toMilliseconds(), 5_000); + expect(duration.toMilliseconds()).toEqual(5_000); + - test.done(); - }, + }); - 'Duration in days'(test: Test) { + test('Duration in days', () => { const duration = Duration.days(1); - test.equal(duration.toSeconds(), 86_400); - test.equal(duration.toMinutes(), 1_440); - test.equal(duration.toDays(), 1); + expect(duration.toSeconds()).toEqual(86_400); + expect(duration.toMinutes()).toEqual(1_440); + expect(duration.toDays()).toEqual(1); + + + }); - test.done(); - }, + test('toISOString', () => { + expect(Duration.millis(0).toISOString()).toEqual('PT0S'); + expect(Duration.seconds(0).toISOString()).toEqual('PT0S'); + expect(Duration.minutes(0).toISOString()).toEqual('PT0S'); + expect(Duration.hours(0).toISOString()).toEqual('PT0S'); + expect(Duration.days(0).toISOString()).toEqual('PT0S'); - 'toISOString'(test: Test) { - test.equal(Duration.millis(0).toISOString(), 'PT0S'); - test.equal(Duration.seconds(0).toISOString(), 'PT0S'); - test.equal(Duration.minutes(0).toISOString(), 'PT0S'); - test.equal(Duration.hours(0).toISOString(), 'PT0S'); - test.equal(Duration.days(0).toISOString(), 'PT0S'); + expect(Duration.millis(5).toISOString()).toEqual('PT0.005S'); + expect(Duration.seconds(5).toISOString()).toEqual('PT5S'); + expect(Duration.minutes(5).toISOString()).toEqual('PT5M'); + expect(Duration.hours(5).toISOString()).toEqual('PT5H'); + expect(Duration.days(5).toISOString()).toEqual('P5D'); - test.equal(Duration.millis(5).toISOString(), 'PT0.005S'); - test.equal(Duration.seconds(5).toISOString(), 'PT5S'); - test.equal(Duration.minutes(5).toISOString(), 'PT5M'); - test.equal(Duration.hours(5).toISOString(), 'PT5H'); - test.equal(Duration.days(5).toISOString(), 'P5D'); + expect(Duration.seconds(1 + 60 * (1 + 60 * (1 + 24))).toISOString()).toEqual('P1DT1H1M1S'); - test.equal(Duration.seconds(1 + 60 * (1 + 60 * (1 + 24))).toISOString(), 'P1DT1H1M1S'); - test.done(); - }, + }); - 'toIsoString'(test: Test) { - test.equal(Duration.millis(0).toIsoString(), 'PT0S'); - test.equal(Duration.seconds(0).toIsoString(), 'PT0S'); - test.equal(Duration.minutes(0).toIsoString(), 'PT0S'); - test.equal(Duration.hours(0).toIsoString(), 'PT0S'); - test.equal(Duration.days(0).toIsoString(), 'PT0S'); + test('toIsoString', () => { + expect(Duration.millis(0).toIsoString()).toEqual('PT0S'); + expect(Duration.seconds(0).toIsoString()).toEqual('PT0S'); + expect(Duration.minutes(0).toIsoString()).toEqual('PT0S'); + expect(Duration.hours(0).toIsoString()).toEqual('PT0S'); + expect(Duration.days(0).toIsoString()).toEqual('PT0S'); - test.equal(Duration.millis(5).toIsoString(), 'PT0.005S'); - test.equal(Duration.seconds(5).toIsoString(), 'PT5S'); - test.equal(Duration.minutes(5).toIsoString(), 'PT5M'); - test.equal(Duration.hours(5).toIsoString(), 'PT5H'); - test.equal(Duration.days(5).toIsoString(), 'P5D'); + expect(Duration.millis(5).toIsoString()).toEqual('PT0.005S'); + expect(Duration.seconds(5).toIsoString()).toEqual('PT5S'); + expect(Duration.minutes(5).toIsoString()).toEqual('PT5M'); + expect(Duration.hours(5).toIsoString()).toEqual('PT5H'); + expect(Duration.days(5).toIsoString()).toEqual('P5D'); - test.equal(Duration.seconds(65).toIsoString(), 'PT1M5S'); - test.equal(Duration.seconds(1 + 60 * (1 + 60 * (1 + 24))).toIsoString(), 'P1DT1H1M1S'); + expect(Duration.seconds(65).toIsoString()).toEqual('PT1M5S'); + expect(Duration.seconds(1 + 60 * (1 + 60 * (1 + 24))).toIsoString()).toEqual('P1DT1H1M1S'); - test.done(); - }, - 'parse'(test: Test) { - test.equal(Duration.parse('PT0S').toSeconds(), 0); - test.equal(Duration.parse('PT0M').toSeconds(), 0); - test.equal(Duration.parse('PT0H').toSeconds(), 0); - test.equal(Duration.parse('P0D').toSeconds(), 0); + }); - test.equal(Duration.parse('PT5S').toSeconds(), 5); - test.equal(Duration.parse('PT5M').toSeconds(), 300); - test.equal(Duration.parse('PT5H').toSeconds(), 18_000); - test.equal(Duration.parse('P5D').toSeconds(), 432_000); + test('parse', () => { + expect(Duration.parse('PT0S').toSeconds()).toEqual(0); + expect(Duration.parse('PT0M').toSeconds()).toEqual(0); + expect(Duration.parse('PT0H').toSeconds()).toEqual(0); + expect(Duration.parse('P0D').toSeconds()).toEqual(0); - test.equal(Duration.parse('P1DT1H1M1S').toSeconds(), 1 + 60 * (1 + 60 * (1 + 24))); + expect(Duration.parse('PT5S').toSeconds()).toEqual(5); + expect(Duration.parse('PT5M').toSeconds()).toEqual(300); + expect(Duration.parse('PT5H').toSeconds()).toEqual(18_000); + expect(Duration.parse('P5D').toSeconds()).toEqual(432_000); - test.done(); - }, + expect(Duration.parse('P1DT1H1M1S').toSeconds()).toEqual(1 + 60 * (1 + 60 * (1 + 24))); - 'reject illegal parses'(test: Test) { + + }); + + test('reject illegal parses', () => { const err = 'Not a valid ISO duration'; - test.throws(() => { + expect(() => { Duration.parse('PT1D'); - }, err); + }).toThrow(err); - test.throws(() => { + expect(() => { Duration.parse('P5S'); - }, err); + }).toThrow(err); + + + }); - test.done(); - }, + test('to human string', () => { + expect(Duration.minutes(0).toHumanString()).toEqual('0 minutes'); + expect(Duration.minutes(Lazy.number({ produce: () => 5 })).toHumanString()).toEqual(' minutes'); - 'to human string'(test: Test) { - test.equal(Duration.minutes(0).toHumanString(), '0 minutes'); - test.equal(Duration.minutes(Lazy.number({ produce: () => 5 })).toHumanString(), ' minutes'); + expect(Duration.days(1).toHumanString()).toEqual('1 day'); + expect(Duration.hours(1).toHumanString()).toEqual('1 hour'); + expect(Duration.minutes(1).toHumanString()).toEqual('1 minute'); + expect(Duration.seconds(1).toHumanString()).toEqual('1 second'); + expect(Duration.millis(1).toHumanString()).toEqual('1 milli'); - test.equal(Duration.days(1).toHumanString(), '1 day'); - test.equal(Duration.hours(1).toHumanString(), '1 hour'); - test.equal(Duration.minutes(1).toHumanString(), '1 minute'); - test.equal(Duration.seconds(1).toHumanString(), '1 second'); - test.equal(Duration.millis(1).toHumanString(), '1 milli'); + expect(Duration.minutes(10).toHumanString()).toEqual('10 minutes'); - test.equal(Duration.minutes(10).toHumanString(), '10 minutes'); + expect(Duration.minutes(62).toHumanString()).toEqual('1 hour 2 minutes'); - test.equal(Duration.minutes(62).toHumanString(), '1 hour 2 minutes'); + expect(Duration.seconds(3666).toHumanString()).toEqual('1 hour 1 minute'); - test.equal(Duration.seconds(3666).toHumanString(), '1 hour 1 minute'); + expect(Duration.millis(3000).toHumanString()).toEqual('3 seconds'); + expect(Duration.millis(3666).toHumanString()).toEqual('3 seconds 666 millis'); - test.equal(Duration.millis(3000).toHumanString(), '3 seconds'); - test.equal(Duration.millis(3666).toHumanString(), '3 seconds 666 millis'); + expect(Duration.millis(3.6).toHumanString()).toEqual('3.6 millis'); - test.equal(Duration.millis(3.6).toHumanString(), '3.6 millis'); - test.done(); - }, + }); - 'add two durations'(test: Test) { - test.equal(Duration.minutes(1).plus(Duration.seconds(30)).toSeconds(), Duration.seconds(90).toSeconds()); - test.equal(Duration.minutes(1).plus(Duration.seconds(30)).toMinutes({ integral: false }), Duration.seconds(90).toMinutes({ integral: false })); + test('add two durations', () => { + expect(Duration.minutes(1).plus(Duration.seconds(30)).toSeconds()).toEqual(Duration.seconds(90).toSeconds()); + expect(Duration.minutes(1).plus(Duration.seconds(30)).toMinutes({ integral: false })) + .toEqual(Duration.seconds(90).toMinutes({ integral: false })); - test.done(); - }, - 'get unit label from duration'(test: Test) { - test.equal(Duration.minutes(Lazy.number({ produce: () => 10 })).unitLabel(), 'minutes'); - test.equal(Duration.minutes(62).unitLabel(), 'minutes'); - test.equal(Duration.seconds(10).unitLabel(), 'seconds'); - test.equal(Duration.millis(1).unitLabel(), 'millis'); - test.equal(Duration.hours(1000).unitLabel(), 'hours'); - test.equal(Duration.days(2).unitLabel(), 'days'); - test.done(); - }, + }); - 'format number token to number'(test: Test) { + test('get unit label from duration', () => { + expect(Duration.minutes(Lazy.number({ produce: () => 10 })).unitLabel()).toEqual('minutes'); + expect(Duration.minutes(62).unitLabel()).toEqual('minutes'); + expect(Duration.seconds(10).unitLabel()).toEqual('seconds'); + expect(Duration.millis(1).unitLabel()).toEqual('millis'); + expect(Duration.hours(1000).unitLabel()).toEqual('hours'); + expect(Duration.days(2).unitLabel()).toEqual('days'); + + }); + + test('format number token to number', () => { const stack = new Stack(); const lazyDuration = Duration.minutes(Lazy.number({ produce: () => 10 })); - test.equal(stack.resolve(lazyDuration.formatTokenToNumber()), '10 minutes'); - test.equal(Duration.hours(10).formatTokenToNumber(), '10 hours'); - test.equal(Duration.days(5).formatTokenToNumber(), '5 days'); - test.done(); - }, + expect(stack.resolve(lazyDuration.formatTokenToNumber())).toEqual('10 minutes'); + expect(Duration.hours(10).formatTokenToNumber()).toEqual('10 hours'); + expect(Duration.days(5).formatTokenToNumber()).toEqual('5 days'); - 'duration is unresolved'(test: Test) { + }); + + test('duration is unresolved', () => { const lazyDuration = Duration.minutes(Lazy.number({ produce: () => 10 })); - test.equal(lazyDuration.isUnresolved(), true); - test.equal(Duration.hours(10).isUnresolved(), false); - test.done(); - }, + expect(lazyDuration.isUnresolved()).toEqual(true); + expect(Duration.hours(10).isUnresolved()).toEqual(false); + + }); }); -function floatEqual(test: Test, actual: number, expected: number) { - test.ok( +function floatEqual(actual: number, expected: number) { + expect( // Floats are subject to rounding errors up to Number.ESPILON actual >= expected - Number.EPSILON && actual <= expected + Number.EPSILON, - `${actual} == ${expected}`, - ); + ).toEqual(true); } diff --git a/packages/@aws-cdk/core/test/dynamic-reference.test.ts b/packages/@aws-cdk/core/test/dynamic-reference.test.ts index e9da4b23b837d..894339d231d7b 100644 --- a/packages/@aws-cdk/core/test/dynamic-reference.test.ts +++ b/packages/@aws-cdk/core/test/dynamic-reference.test.ts @@ -1,8 +1,7 @@ -import { nodeunitShim, Test } from 'nodeunit-shim'; import { CfnDynamicReference, CfnDynamicReferenceService, Stack } from '../lib'; -nodeunitShim({ - 'can create dynamic references with service and key with colons'(test: Test) { +describe('dynamic reference', () => { + test('can create dynamic references with service and key with colons', () => { // GIVEN const stack = new Stack(); @@ -10,8 +9,8 @@ nodeunitShim({ const ref = new CfnDynamicReference(CfnDynamicReferenceService.SSM, 'a:b:c'); // THEN - test.equal(stack.resolve(ref), '{{resolve:ssm:a:b:c}}'); + expect(stack.resolve(ref)).toEqual('{{resolve:ssm:a:b:c}}'); - test.done(); - }, + + }); }); diff --git a/packages/@aws-cdk/core/test/environment.test.ts b/packages/@aws-cdk/core/test/environment.test.ts index 5432fd178f1f8..01a122a825f77 100644 --- a/packages/@aws-cdk/core/test/environment.test.ts +++ b/packages/@aws-cdk/core/test/environment.test.ts @@ -1,33 +1,32 @@ -import { nodeunitShim, Test } from 'nodeunit-shim'; import { App, Aws, Stack, Token } from '../lib'; -nodeunitShim({ - 'By default, environment region and account are not defined and resolve to intrinsics'(test: Test) { +describe('environment', () => { + test('By default, environment region and account are not defined and resolve to intrinsics', () => { const stack = new Stack(); - test.ok(Token.isUnresolved(stack.account)); - test.ok(Token.isUnresolved(stack.region)); - test.deepEqual(stack.resolve(stack.account), { Ref: 'AWS::AccountId' }); - test.deepEqual(stack.resolve(stack.region), { Ref: 'AWS::Region' }); - test.done(); - }, - - 'If only `env.region` or `env.account` are specified, Refs will be used for the other'(test: Test) { + expect(Token.isUnresolved(stack.account)).toEqual(true); + expect(Token.isUnresolved(stack.region)).toEqual(true); + expect(stack.resolve(stack.account)).toEqual({ Ref: 'AWS::AccountId' }); + expect(stack.resolve(stack.region)).toEqual({ Ref: 'AWS::Region' }); + + }); + + test('If only `env.region` or `env.account` are specified, Refs will be used for the other', () => { const app = new App(); const stack1 = new Stack(app, 'S1', { env: { region: 'only-region' } }); const stack2 = new Stack(app, 'S2', { env: { account: 'only-account' } }); - test.deepEqual(stack1.resolve(stack1.account), { Ref: 'AWS::AccountId' }); - test.deepEqual(stack1.resolve(stack1.region), 'only-region'); + expect(stack1.resolve(stack1.account)).toEqual({ Ref: 'AWS::AccountId' }); + expect(stack1.resolve(stack1.region)).toEqual('only-region'); - test.deepEqual(stack2.resolve(stack2.account), 'only-account'); - test.deepEqual(stack2.resolve(stack2.region), { Ref: 'AWS::Region' }); + expect(stack2.resolve(stack2.account)).toEqual('only-account'); + expect(stack2.resolve(stack2.region)).toEqual({ Ref: 'AWS::Region' }); - test.done(); - }, - 'environment defaults': { - 'if "env" is not specified, it implies account/region agnostic'(test: Test) { + }); + + describe('environment defaults', () => { + test('if "env" is not specified, it implies account/region agnostic', () => { // GIVEN const app = new App(); @@ -35,18 +34,18 @@ nodeunitShim({ const stack = new Stack(app, 'stack'); // THEN - test.deepEqual(stack.resolve(stack.account), { Ref: 'AWS::AccountId' }); - test.deepEqual(stack.resolve(stack.region), { Ref: 'AWS::Region' }); - test.deepEqual(app.synth().getStackByName(stack.stackName).environment, { + expect(stack.resolve(stack.account)).toEqual({ Ref: 'AWS::AccountId' }); + expect(stack.resolve(stack.region)).toEqual({ Ref: 'AWS::Region' }); + expect(app.synth().getStackByName(stack.stackName).environment).toEqual({ account: 'unknown-account', region: 'unknown-region', name: 'aws://unknown-account/unknown-region', }); - test.done(); - }, - 'only region is set'(test: Test) { + }); + + test('only region is set', () => { // GIVEN const app = new App(); @@ -54,18 +53,18 @@ nodeunitShim({ const stack = new Stack(app, 'stack', { env: { region: 'explicit-region' } }); // THEN - test.deepEqual(stack.resolve(stack.account), { Ref: 'AWS::AccountId' }); - test.deepEqual(stack.resolve(stack.region), 'explicit-region'); - test.deepEqual(app.synth().getStackByName(stack.stackName).environment, { + expect(stack.resolve(stack.account)).toEqual({ Ref: 'AWS::AccountId' }); + expect(stack.resolve(stack.region)).toEqual('explicit-region'); + expect(app.synth().getStackByName(stack.stackName).environment).toEqual({ account: 'unknown-account', region: 'explicit-region', name: 'aws://unknown-account/explicit-region', }); - test.done(); - }, - 'both "region" and "account" are set'(test: Test) { + }); + + test('both "region" and "account" are set', () => { // GIVEN const app = new App(); @@ -78,18 +77,18 @@ nodeunitShim({ }); // THEN - test.deepEqual(stack.resolve(stack.account), 'explicit-account'); - test.deepEqual(stack.resolve(stack.region), 'explicit-region'); - test.deepEqual(app.synth().getStackByName(stack.stackName).environment, { + expect(stack.resolve(stack.account)).toEqual('explicit-account'); + expect(stack.resolve(stack.region)).toEqual('explicit-region'); + expect(app.synth().getStackByName(stack.stackName).environment).toEqual({ account: 'explicit-account', region: 'explicit-region', name: 'aws://explicit-account/explicit-region', }); - test.done(); - }, - 'token-account and token-region'(test: Test) { + }); + + test('token-account and token-region', () => { // GIVEN const app = new App(); @@ -102,18 +101,18 @@ nodeunitShim({ }); // THEN - test.deepEqual(stack.resolve(stack.account), { Ref: 'AWS::AccountId' }); - test.deepEqual(stack.resolve(stack.region), { Ref: 'AWS::Region' }); - test.deepEqual(app.synth().getStackByName(stack.stackName).environment, { + expect(stack.resolve(stack.account)).toEqual({ Ref: 'AWS::AccountId' }); + expect(stack.resolve(stack.region)).toEqual({ Ref: 'AWS::Region' }); + expect(app.synth().getStackByName(stack.stackName).environment).toEqual({ account: 'unknown-account', region: 'unknown-region', name: 'aws://unknown-account/unknown-region', }); - test.done(); - }, - 'token-account explicit region'(test: Test) { + }); + + test('token-account explicit region', () => { // GIVEN const app = new App(); @@ -126,15 +125,15 @@ nodeunitShim({ }); // THEN - test.deepEqual(stack.resolve(stack.account), { Ref: 'AWS::AccountId' }); - test.deepEqual(stack.resolve(stack.region), 'us-east-2'); - test.deepEqual(app.synth().getStackByName(stack.stackName).environment, { + expect(stack.resolve(stack.account)).toEqual({ Ref: 'AWS::AccountId' }); + expect(stack.resolve(stack.region)).toEqual('us-east-2'); + expect(app.synth().getStackByName(stack.stackName).environment).toEqual({ account: 'unknown-account', region: 'us-east-2', name: 'aws://unknown-account/us-east-2', }); - test.done(); - }, - }, + + }); + }); }); diff --git a/packages/@aws-cdk/core/test/expiration.test.ts b/packages/@aws-cdk/core/test/expiration.test.ts index de17852f4308c..f1770994dfbea 100644 --- a/packages/@aws-cdk/core/test/expiration.test.ts +++ b/packages/@aws-cdk/core/test/expiration.test.ts @@ -1,49 +1,48 @@ -import { nodeunitShim, Test } from 'nodeunit-shim'; import { Duration, Expiration } from '../lib'; -nodeunitShim({ - 'from string'(test: Test) { +describe('expiration', () => { + test('from string', () => { const date = new Date('Sun, 26 Jan 2020 00:53:20 GMT'); - test.equal(Expiration.fromString('Sun, 26 Jan 2020 00:53:20 GMT').date.getDate(), date.getDate()); - test.done(); - }, + expect(Expiration.fromString('Sun, 26 Jan 2020 00:53:20 GMT').date.getDate()).toEqual(date.getDate()); - 'at specified date'(test: Test) { + }); + + test('at specified date', () => { const date = new Date('Sun, 26 Jan 2020 00:53:20 GMT'); - test.equal(Expiration.atDate(new Date('Sun, 26 Jan 2020 00:53:20 GMT')).date.toUTCString(), 'Sun, 26 Jan 2020 00:53:20 GMT'); - test.equal(Expiration.atDate(new Date(1580000000000)).date.toUTCString(), 'Sun, 26 Jan 2020 00:53:20 GMT'); - test.equal(Expiration.atDate(new Date(date)).date.toUTCString(), 'Sun, 26 Jan 2020 00:53:20 GMT'); - test.done(); - }, - - 'at time stamp'(test: Test) { - test.equal(Expiration.atDate(new Date(1580000000000)).date.toUTCString(), 'Sun, 26 Jan 2020 00:53:20 GMT'); - test.done(); - }, - - 'after'(test: Test) { - test.ok(Math.abs(new Date(Expiration.after(Duration.minutes(10)).date.toUTCString()).getTime() - (Date.now() + 600000)) < 15000); - test.done(); - }, - - 'toEpoch returns correct value'(test: Test) { + expect(Expiration.atDate(new Date('Sun, 26 Jan 2020 00:53:20 GMT')).date.toUTCString()).toEqual('Sun, 26 Jan 2020 00:53:20 GMT'); + expect(Expiration.atDate(new Date(1580000000000)).date.toUTCString()).toEqual('Sun, 26 Jan 2020 00:53:20 GMT'); + expect(Expiration.atDate(new Date(date)).date.toUTCString()).toEqual('Sun, 26 Jan 2020 00:53:20 GMT'); + + }); + + test('at time stamp', () => { + expect(Expiration.atDate(new Date(1580000000000)).date.toUTCString()).toEqual('Sun, 26 Jan 2020 00:53:20 GMT'); + + }); + + test('after', () => { + expect(Math.abs(new Date(Expiration.after(Duration.minutes(10)).date.toUTCString()).getTime() - (Date.now() + 600000)) < 15000).toBeDefined(); + + }); + + test('toEpoch returns correct value', () => { const date = new Date('Sun, 26 Jan 2020 00:53:20 GMT'); - test.equal(Expiration.atDate(date).toEpoch(), 1580000000); - test.done(); - }, + expect(Expiration.atDate(date).toEpoch()).toEqual(1580000000); + + }); - 'isBefore'(test: Test) { + test('isBefore', () => { const expire = Expiration.after(Duration.days(2)); - test.ok(!expire.isBefore(Duration.days(1))); - test.ok(expire.isBefore(Duration.days(3))); - test.done(); - }, + expect(expire.isBefore(Duration.days(1))).toEqual(false); + expect(expire.isBefore(Duration.days(3))).toEqual(true); - 'isAfter'(test: Test) { + }); + + test('isAfter', () => { const expire = Expiration.after(Duration.days(2)); - test.ok(expire.isAfter(Duration.days(1))); - test.ok(!expire.isAfter(Duration.days(3))); - test.done(); - }, + expect(expire.isAfter(Duration.days(1))).toEqual(true); + expect(expire.isAfter(Duration.days(3))).toEqual(false); + + }); }); diff --git a/packages/@aws-cdk/core/test/feature-flags.test.ts b/packages/@aws-cdk/core/test/feature-flags.test.ts index b3ca21c45bea8..09b79b7c26123 100644 --- a/packages/@aws-cdk/core/test/feature-flags.test.ts +++ b/packages/@aws-cdk/core/test/feature-flags.test.ts @@ -1,31 +1,30 @@ import * as cxapi from '@aws-cdk/cx-api'; -import { nodeunitShim, Test } from 'nodeunit-shim'; import { FeatureFlags, Stack } from '../lib'; -nodeunitShim({ - isEnabled: { - 'returns true when the flag is enabled'(test: Test) { +describe('feature flags', () => { + describe('isEnabled', () => { + test('returns true when the flag is enabled', () => { const stack = new Stack(); stack.node.setContext(cxapi.NEW_STYLE_STACK_SYNTHESIS_CONTEXT, true); const actual = FeatureFlags.of(stack).isEnabled(cxapi.NEW_STYLE_STACK_SYNTHESIS_CONTEXT); - test.equals(actual, true); - test.done(); - }, + expect(actual).toEqual(true); - 'falls back to the default'(test: Test) { + }); + + test('falls back to the default', () => { const stack = new Stack(); - test.equals(FeatureFlags.of(stack).isEnabled(cxapi.NEW_STYLE_STACK_SYNTHESIS_CONTEXT), + expect(FeatureFlags.of(stack).isEnabled(cxapi.NEW_STYLE_STACK_SYNTHESIS_CONTEXT)).toEqual( cxapi.futureFlagDefault(cxapi.NEW_STYLE_STACK_SYNTHESIS_CONTEXT)); - test.done(); - }, - 'invalid flag'(test: Test) { + }); + + test('invalid flag', () => { const stack = new Stack(); - test.equals(FeatureFlags.of(stack).isEnabled('non-existent-flag'), undefined); - test.done(); - }, - }, + expect(FeatureFlags.of(stack).isEnabled('non-existent-flag')).toEqual(undefined); + + }); + }); }); diff --git a/packages/@aws-cdk/core/test/fn.test.ts b/packages/@aws-cdk/core/test/fn.test.ts index 4168f7d6daada..221a7b6e811a1 100644 --- a/packages/@aws-cdk/core/test/fn.test.ts +++ b/packages/@aws-cdk/core/test/fn.test.ts @@ -1,21 +1,20 @@ import * as fc from 'fast-check'; import * as _ from 'lodash'; -import { nodeunitShim, Test } from 'nodeunit-shim'; import { App, CfnOutput, Fn, Stack, Token } from '../lib'; import { Intrinsic } from '../lib/private/intrinsic'; -function asyncTest(cb: (test: Test) => Promise): (test: Test) => void { - return async (test: Test) => { - let error: Error; +function asyncTest(cb: () => Promise): () => void { + return async () => { + let error: any; try { - await cb(test); + await cb(); } catch (e) { error = e; } finally { - test.doesNotThrow(() => { + expect(() => { if (error) { throw error; } - }); - test.done(); + }).not.toThrow(); + } }; } @@ -24,23 +23,23 @@ const nonEmptyString = fc.string(1, 16); const tokenish = fc.array(nonEmptyString, 2, 2).map(arr => ({ [arr[0]]: arr[1] })); const anyValue = fc.oneof(nonEmptyString, tokenish); -nodeunitShim({ - 'eager resolution for non-tokens': { - 'Fn.select'(test: Test) { - test.deepEqual(Fn.select(2, ['hello', 'you', 'dude']), 'dude'); - test.done(); - }, - 'Fn.split'(test: Test) { - test.deepEqual(Fn.split(':', 'hello:world:yeah'), ['hello', 'world', 'yeah']); - test.done(); - }, - }, - 'FnParseDomainName': { - 'parse domain name from resolved url'(test: Test) { - test.deepEqual(Fn.parseDomainName('https://test.com/'), 'test.com'); - test.done(); - }, - 'parse domain name on token'(test: Test) { +describe('fn', () => { + describe('eager resolution for non-tokens', () => { + test('Fn.select', () => { + expect(Fn.select(2, ['hello', 'you', 'dude'])).toEqual('dude'); + + }); + test('Fn.split', () => { + expect(Fn.split(':', 'hello:world:yeah')).toEqual(['hello', 'world', 'yeah']); + + }); + }); + describe('FnParseDomainName', () => { + test('parse domain name from resolved url', () => { + expect(Fn.parseDomainName('https://test.com/')).toEqual('test.com'); + + }); + test('parse domain name on token', () => { const stack = new Stack(); const url = Fn.join('//', [ 'https:', @@ -49,16 +48,16 @@ nodeunitShim({ 'graphql', ]), ]); - test.deepEqual(Fn.parseDomainName(stack.resolve(url)), 'test.com'); - test.done(); - }, - }, - 'FnJoin': { - 'rejects empty list of arguments to join'(test: Test) { - test.throws(() => Fn.join('.', [])); - test.done(); - }, - 'collapse nested FnJoins even if they contain tokens'(test: Test) { + expect(Fn.parseDomainName(stack.resolve(url))).toEqual('test.com'); + + }); + }); + describe('FnJoin', () => { + test('rejects empty list of arguments to join', () => { + expect(() => Fn.join('.', [])).toThrow(); + + }); + test('collapse nested FnJoins even if they contain tokens', () => { const stack = new Stack(); const obj = Fn.join('', [ @@ -67,7 +66,7 @@ nodeunitShim({ 'd', ]); - test.deepEqual(stack.resolve(obj), { + expect(stack.resolve(obj)).toEqual({ 'Fn::Join': ['', [ 'a', @@ -76,9 +75,9 @@ nodeunitShim({ ]], }); - test.done(); - }, - 'resolves to the value if only one value is joined': asyncTest(async () => { + + }); + test('resolves to the value if only one value is joined', asyncTest(async () => { const stack = new Stack(); fc.assert( fc.property( @@ -87,8 +86,8 @@ nodeunitShim({ ), { verbose: true }, ); - }), - 'pre-concatenates string literals': asyncTest(async () => { + })); + test('pre-concatenates string literals', asyncTest(async () => { const stack = new Stack(); fc.assert( fc.property( @@ -97,8 +96,8 @@ nodeunitShim({ ), { verbose: true }, ); - }), - 'pre-concatenates around tokens': asyncTest(async () => { + })); + test('pre-concatenates around tokens', asyncTest(async () => { const stack = new Stack(); fc.assert( fc.property( @@ -109,8 +108,8 @@ nodeunitShim({ ), { verbose: true, seed: 1539874645005, path: '0:0:0:0:0:0:0:0:0' }, ); - }), - 'flattens joins nested under joins with same delimiter': asyncTest(async () => { + })); + test('flattens joins nested under joins with same delimiter', asyncTest(async () => { const stack = new Stack(); fc.assert( fc.property( @@ -124,8 +123,8 @@ nodeunitShim({ ), { verbose: true }, ); - }), - 'does not flatten joins nested under joins with different delimiter': asyncTest(async () => { + })); + test('does not flatten joins nested under joins with different delimiter', asyncTest(async () => { const stack = new Stack(); fc.assert( fc.property( @@ -144,22 +143,22 @@ nodeunitShim({ ), { verbose: true }, ); - }), - 'Fn::EachMemberIn': asyncTest(async (test) => { + })); + test('Fn::EachMemberIn', asyncTest(async () => { const stack = new Stack(); const eachMemberIn = Fn.conditionEachMemberIn( Fn.valueOfAll('AWS::EC2::Subnet::Id', 'VpcId'), Fn.refAll('AWS::EC2::VPC::Id'), ); - test.deepEqual(stack.resolve(eachMemberIn), { + expect(stack.resolve(eachMemberIn)).toEqual({ 'Fn::EachMemberIn': [ { 'Fn::ValueOfAll': ['AWS::EC2::Subnet::Id', 'VpcId'] }, { 'Fn::RefAll': 'AWS::EC2::VPC::Id' }, ], }); - }), + })); - 'cross-stack FnJoin elements are properly resolved': asyncTest(async (test) => { + test('cross-stack FnJoin elements are properly resolved', asyncTest(async () => { // GIVEN const app = new App(); const stack1 = new Stack(app, 'Stack1'); @@ -173,7 +172,7 @@ nodeunitShim({ // THEN const template = app.synth().getStackByName('Stack2').template; - test.deepEqual(template, { + expect(template).toEqual({ Outputs: { Stack1Id: { Value: { @@ -185,22 +184,22 @@ nodeunitShim({ }, }, }); - }), - }, - 'Ref': { - 'returns a reference given a logical name'(test: Test) { + })); + }); + describe('Ref', () => { + test('returns a reference given a logical name', () => { const stack = new Stack(); - test.deepEqual(stack.resolve(Fn.ref('hello')), { + expect(stack.resolve(Fn.ref('hello'))).toEqual({ Ref: 'hello', }); - test.done(); - }, - }, - 'nested Fn::Join with list token'(test: Test) { + + }); + }); + test('nested Fn::Join with list token', () => { const stack = new Stack(); const inner = Fn.join(',', Token.asList({ NotReallyList: true })); const outer = Fn.join(',', [inner, 'Foo']); - test.deepEqual(stack.resolve(outer), { + expect(stack.resolve(outer)).toEqual({ 'Fn::Join': [ ',', [ @@ -209,8 +208,8 @@ nodeunitShim({ ], ], }); - test.done(); - }, + + }); }); test('Fn.split with an unknown length resolves to simple {Fn::Split}', () => { diff --git a/packages/@aws-cdk/core/test/fs/fs-copy.test.ts b/packages/@aws-cdk/core/test/fs/fs-copy.test.ts index 03ffc770b870c..77e62d7dc58c6 100644 --- a/packages/@aws-cdk/core/test/fs/fs-copy.test.ts +++ b/packages/@aws-cdk/core/test/fs/fs-copy.test.ts @@ -1,11 +1,10 @@ import * as fs from 'fs'; import * as os from 'os'; import * as path from 'path'; -import { nodeunitShim, Test } from 'nodeunit-shim'; import { FileSystem, SymlinkFollowMode } from '../../lib/fs'; -nodeunitShim({ - 'Default: copies all files and subdirectories, with default follow mode is "External"'(test: Test) { +describe('fs copy', () => { + test('Default: copies all files and subdirectories, with default follow mode is "External"', () => { // GIVEN const outdir = fs.mkdtempSync(path.join(os.tmpdir(), 'copy-tests')); @@ -13,7 +12,7 @@ nodeunitShim({ FileSystem.copyDirectory(path.join(__dirname, 'fixtures', 'test1'), outdir); // THEN - test.deepEqual(tree(outdir), [ + expect(tree(outdir)).toEqual([ 'external-link.txt', 'file1.txt', 'local-link.txt => file1.txt', @@ -25,10 +24,10 @@ nodeunitShim({ ' subdir3 (D)', ' file3.txt', ]); - test.done(); - }, - 'Always: follow all symlinks'(test: Test) { + }); + + test('Always: follow all symlinks', () => { // GIVEN const outdir = fs.mkdtempSync(path.join(os.tmpdir(), 'copy-tests')); @@ -38,7 +37,7 @@ nodeunitShim({ }); // THEN - test.deepEqual(tree(outdir), [ + expect(tree(outdir)).toEqual([ 'external-dir-link (D)', ' file2.txt', 'external-link.txt', @@ -50,10 +49,10 @@ nodeunitShim({ ' file-in-subdir.txt', 'normal-file.txt', ]); - test.done(); - }, - 'Never: do not follow all symlinks'(test: Test) { + }); + + test('Never: do not follow all symlinks', () => { // GIVEN const outdir = fs.mkdtempSync(path.join(os.tmpdir(), 'copy-tests')); @@ -63,7 +62,7 @@ nodeunitShim({ }); // THEN - test.deepEqual(tree(outdir), [ + expect(tree(outdir)).toEqual([ 'external-dir-link => ../test1/subdir', 'external-link.txt => ../test1/subdir2/subdir3/file3.txt', 'indirect-external-link.txt => external-link.txt', @@ -73,10 +72,10 @@ nodeunitShim({ ' file-in-subdir.txt', 'normal-file.txt', ]); - test.done(); - }, - 'External: follow only external symlinks'(test: Test) { + }); + + test('External: follow only external symlinks', () => { // GIVEN const outdir = fs.mkdtempSync(path.join(os.tmpdir(), 'copy-tests')); @@ -86,7 +85,7 @@ nodeunitShim({ }); // THEN - test.deepEqual(tree(outdir), [ + expect(tree(outdir)).toEqual([ 'external-dir-link (D)', ' file2.txt', 'external-link.txt', @@ -98,10 +97,10 @@ nodeunitShim({ 'normal-file.txt', ]); - test.done(); - }, - 'exclude'(test: Test) { + }); + + test('exclude', () => { // GIVEN const outdir = fs.mkdtempSync(path.join(os.tmpdir(), 'copy-tests')); @@ -116,14 +115,14 @@ nodeunitShim({ }); // THEN - test.deepEqual(tree(outdir), [ + expect(tree(outdir)).toEqual([ 'subdir2 (D)', ' empty-subdir (D)', ' subdir3 (D)', ' file3.txt', ]); - test.done(); - }, + + }); }); function tree(dir: string, depth = ''): string[] { diff --git a/packages/@aws-cdk/core/test/fs/fs-fingerprint.test.ts b/packages/@aws-cdk/core/test/fs/fs-fingerprint.test.ts index be093c32cbffc..8187778de5453 100644 --- a/packages/@aws-cdk/core/test/fs/fs-fingerprint.test.ts +++ b/packages/@aws-cdk/core/test/fs/fs-fingerprint.test.ts @@ -1,13 +1,12 @@ import * as fs from 'fs'; import * as os from 'os'; import * as path from 'path'; -import { nodeunitShim, Test } from 'nodeunit-shim'; import { FileSystem, SymlinkFollowMode } from '../../lib/fs'; import { contentFingerprint } from '../../lib/fs/fingerprint'; -nodeunitShim({ - files: { - 'does not change with the file name'(test: Test) { +describe('fs fingerprint', () => { + describe('files', () => { + test('does not change with the file name', () => { // GIVEN const workdir = fs.mkdtempSync(path.join(os.tmpdir(), 'hash-tests')); const content = 'Hello, world!'; @@ -24,12 +23,12 @@ nodeunitShim({ const hash3 = FileSystem.fingerprint(input3); // THEN - test.deepEqual(hash1, hash2); - test.notDeepEqual(hash3, hash1); - test.done(); - }, + expect(hash1).toEqual(hash2); + expect(hash3).not.toEqual(hash1); - 'works on empty files'(test: Test) { + }); + + test('works on empty files', () => { // GIVEN const workdir = fs.mkdtempSync(path.join(os.tmpdir(), 'hash-tests')); const input1 = path.join(workdir, 'empty'); @@ -42,13 +41,13 @@ nodeunitShim({ const hash2 = FileSystem.fingerprint(input2); // THEN - test.deepEqual(hash1, hash2); - test.done(); - }, - }, + expect(hash1).toEqual(hash2); + + }); + }); - directories: { - 'works on directories'(test: Test) { + describe('directories', () => { + test('works on directories', () => { // GIVEN const srcdir = path.join(__dirname, 'fixtures', 'symlinks'); const outdir = fs.mkdtempSync(path.join(os.tmpdir(), 'copy-tests')); @@ -59,11 +58,11 @@ nodeunitShim({ const hashCopy = FileSystem.fingerprint(outdir); // THEN - test.deepEqual(hashSrc, hashCopy); - test.done(); - }, + expect(hashSrc).toEqual(hashCopy); + + }); - 'ignores requested files'(test: Test) { + test('ignores requested files', () => { // GIVEN const srcdir = path.join(__dirname, 'fixtures', 'symlinks'); const outdir = fs.mkdtempSync(path.join(os.tmpdir(), 'copy-tests')); @@ -76,11 +75,11 @@ nodeunitShim({ const hashCopy = FileSystem.fingerprint(outdir, { exclude: ['*.ignoreme'] }); // THEN - test.deepEqual(hashSrc, hashCopy); - test.done(); - }, + expect(hashSrc).toEqual(hashCopy); - 'changes with file names'(test: Test) { + }); + + test('changes with file names', () => { // GIVEN const srcdir = path.join(__dirname, 'fixtures', 'symlinks'); const cpydir = fs.mkdtempSync(path.join(os.tmpdir(), 'fingerprint-tests')); @@ -94,13 +93,13 @@ nodeunitShim({ const hashCopy = FileSystem.fingerprint(cpydir); // THEN - test.notDeepEqual(hashSrc, hashCopy); - test.done(); - }, - }, + expect(hashSrc).not.toEqual(hashCopy); + + }); + }); - symlinks: { - 'changes with the contents of followed symlink referent'(test: Test) { + describe('symlinks', () => { + test('changes with the contents of followed symlink referent', () => { // GIVEN const dir1 = fs.mkdtempSync(path.join(os.tmpdir(), 'fingerprint-tests')); const dir2 = fs.mkdtempSync(path.join(os.tmpdir(), 'fingerprint-tests')); @@ -123,12 +122,12 @@ nodeunitShim({ const afterRevert = FileSystem.fingerprint(dir2); // THEN - test.notDeepEqual(original, afterChange); - test.deepEqual(afterRevert, original); - test.done(); - }, + expect(original).not.toEqual(afterChange); + expect(afterRevert).toEqual(original); - 'does not change with the contents of un-followed symlink referent'(test: Test) { + }); + + test('does not change with the contents of un-followed symlink referent', () => { // GIVEN const dir1 = fs.mkdtempSync(path.join(os.tmpdir(), 'fingerprint-tests')); const dir2 = fs.mkdtempSync(path.join(os.tmpdir(), 'fingerprint-tests')); @@ -151,14 +150,14 @@ nodeunitShim({ const afterRevert = FileSystem.fingerprint(dir2, { follow: SymlinkFollowMode.NEVER }); // THEN - test.deepEqual(original, afterChange); - test.deepEqual(afterRevert, original); - test.done(); - }, - }, - - eol: { - 'normalizes line endings'(test: Test) { + expect(original).toEqual(afterChange); + expect(afterRevert).toEqual(original); + + }); + }); + + describe('eol', () => { + test('normalizes line endings', () => { // GIVEN const lf = path.join(__dirname, 'eol', 'lf.txt'); const crlf = path.join(__dirname, 'eol', 'crlf.txt'); @@ -172,11 +171,11 @@ nodeunitShim({ const lfHash = contentFingerprint(lf); // THEN - test.notEqual(crlfStat.size, lfStat.size); // Difference in size due to different line endings - test.deepEqual(crlfHash, lfHash); // Same hash + expect(crlfStat.size).not.toEqual(lfStat.size); // Difference in size due to different line endings + expect(crlfHash).toEqual(lfHash); // Same hash fs.unlinkSync(crlf); - test.done(); - }, - }, + + }); + }); }); diff --git a/packages/@aws-cdk/core/test/fs/fs.test.ts b/packages/@aws-cdk/core/test/fs/fs.test.ts index e1e195daa1005..22260f3ec6e27 100644 --- a/packages/@aws-cdk/core/test/fs/fs.test.ts +++ b/packages/@aws-cdk/core/test/fs/fs.test.ts @@ -1,17 +1,16 @@ import * as fs from 'fs'; import * as os from 'os'; import * as path from 'path'; -import { nodeunitShim, Test } from 'nodeunit-shim'; import * as sinon from 'sinon'; import { FileSystem } from '../../lib/fs'; -nodeunitShim({ - 'tearDown'(callback: any) { +describe('fs', () => { + afterEach(() => { sinon.restore(); - callback(); - }, - 'tmpdir returns a real path and is cached'(test: Test) { + }); + + test('tmpdir returns a real path and is cached', () => { // Create symlink that points to /tmp const symlinkTmp = path.join(__dirname, 'tmp-link'); fs.symlinkSync(os.tmpdir(), symlinkTmp); @@ -19,32 +18,32 @@ nodeunitShim({ // Now stub os.tmpdir() to return this link instead of /tmp const tmpdirStub = sinon.stub(os, 'tmpdir').returns(symlinkTmp); - test.ok(path.isAbsolute(FileSystem.tmpdir)); + expect(path.isAbsolute(FileSystem.tmpdir)).toEqual(true); const p = path.join(FileSystem.tmpdir, 'tmpdir-test.txt'); fs.writeFileSync(p, 'tmpdir-test'); - test.equal(p, fs.realpathSync(p)); - test.equal(fs.readFileSync(p, 'utf8'), 'tmpdir-test'); + expect(p).toEqual(fs.realpathSync(p)); + expect(fs.readFileSync(p, 'utf8')).toEqual('tmpdir-test'); // check that tmpdir() is called either 0 times (in which case it was // proabably cached from before) or once (for this test). - test.ok(tmpdirStub.callCount < 2); + expect(tmpdirStub.callCount).toBeLessThan(2); fs.unlinkSync(p); fs.unlinkSync(symlinkTmp); - test.done(); - }, - 'mkdtemp creates a temporary directory in the system temp'(test: Test) { + }); + + test('mkdtemp creates a temporary directory in the system temp', () => { const tmpdir = FileSystem.mkdtemp('cdk-mkdtemp-'); - test.equal(path.dirname(tmpdir), FileSystem.tmpdir); - test.ok(fs.existsSync(tmpdir)); + expect(path.dirname(tmpdir)).toEqual(FileSystem.tmpdir); + expect(fs.existsSync(tmpdir)).toEqual(true); fs.rmdirSync(tmpdir); - test.done(); - }, + + }); }); diff --git a/packages/@aws-cdk/core/test/fs/utils.test.ts b/packages/@aws-cdk/core/test/fs/utils.test.ts index 1eef1b6c83573..898f3368ec6ee 100644 --- a/packages/@aws-cdk/core/test/fs/utils.test.ts +++ b/packages/@aws-cdk/core/test/fs/utils.test.ts @@ -1,175 +1,174 @@ import * as fs from 'fs'; import * as path from 'path'; -import { nodeunitShim, Test } from 'nodeunit-shim'; import { ImportMock } from 'ts-mock-imports'; import { SymlinkFollowMode } from '../../lib/fs'; import * as util from '../../lib/fs/utils'; -nodeunitShim({ - shouldFollow: { - always: { - 'follows internal'(test: Test) { +describe('utils', () => { + describe('shouldFollow', () => { + describe('always', () => { + test('follows internal', () => { const sourceRoot = path.join('source', 'root'); const linkTarget = path.join(sourceRoot, 'referent'); const mockFsExists = ImportMock.mockFunction(fs, 'existsSync', true); try { - test.ok(util.shouldFollow(SymlinkFollowMode.ALWAYS, sourceRoot, linkTarget)); - test.ok(mockFsExists.calledOnceWith(linkTarget)); - test.done(); + expect(util.shouldFollow(SymlinkFollowMode.ALWAYS, sourceRoot, linkTarget)).toEqual(true); + expect(mockFsExists.calledOnceWith(linkTarget)).toEqual(true); + ; } finally { mockFsExists.restore(); } - }, + }); - 'follows external'(test: Test) { + test('follows external', () => { const sourceRoot = path.join('source', 'root'); const linkTarget = path.join('alternate', 'referent'); const mockFsExists = ImportMock.mockFunction(fs, 'existsSync', true); try { - test.ok(util.shouldFollow(SymlinkFollowMode.ALWAYS, sourceRoot, linkTarget)); - test.ok(mockFsExists.calledOnceWith(linkTarget)); - test.done(); + expect(util.shouldFollow(SymlinkFollowMode.ALWAYS, sourceRoot, linkTarget)).toEqual(true); + expect(mockFsExists.calledOnceWith(linkTarget)).toEqual(true); + ; } finally { mockFsExists.restore(); } - }, + }); - 'does not follow internal when the referent does not exist'(test: Test) { + test('does not follow internal when the referent does not exist', () => { const sourceRoot = path.join('source', 'root'); const linkTarget = path.join(sourceRoot, 'referent'); const mockFsExists = ImportMock.mockFunction(fs, 'existsSync', false); try { - test.ok(!util.shouldFollow(SymlinkFollowMode.ALWAYS, sourceRoot, linkTarget)); - test.ok(mockFsExists.calledOnceWith(linkTarget)); - test.done(); + expect(util.shouldFollow(SymlinkFollowMode.ALWAYS, sourceRoot, linkTarget)).toEqual(false); + expect(mockFsExists.calledOnceWith(linkTarget)).toEqual(true); + ; } finally { mockFsExists.restore(); } - }, + }); - 'does not follow external when the referent does not exist'(test: Test) { + test('does not follow external when the referent does not exist', () => { const sourceRoot = path.join('source', 'root'); const linkTarget = path.join('alternate', 'referent'); const mockFsExists = ImportMock.mockFunction(fs, 'existsSync', false); try { - test.ok(!util.shouldFollow(SymlinkFollowMode.ALWAYS, sourceRoot, linkTarget)); - test.ok(mockFsExists.calledOnceWith(linkTarget)); - test.done(); + expect(util.shouldFollow(SymlinkFollowMode.ALWAYS, sourceRoot, linkTarget)).toEqual(false); + expect(mockFsExists.calledOnceWith(linkTarget)).toEqual(true); + ; } finally { mockFsExists.restore(); } - }, - }, + }); + }); - external: { - 'does not follow internal'(test: Test) { + describe('external', () => { + test('does not follow internal', () => { const sourceRoot = path.join('source', 'root'); const linkTarget = path.join(sourceRoot, 'referent'); const mockFsExists = ImportMock.mockFunction(fs, 'existsSync'); try { - test.ok(!util.shouldFollow(SymlinkFollowMode.EXTERNAL, sourceRoot, linkTarget)); - test.ok(mockFsExists.notCalled); - test.done(); + expect(util.shouldFollow(SymlinkFollowMode.EXTERNAL, sourceRoot, linkTarget)).toEqual(false); + expect(mockFsExists.notCalled).toEqual(true); + ; } finally { mockFsExists.restore(); } - }, + }); - 'follows external'(test: Test) { + test('follows external', () => { const sourceRoot = path.join('source', 'root'); const linkTarget = path.join('alternate', 'referent'); const mockFsExists = ImportMock.mockFunction(fs, 'existsSync', true); try { - test.ok(util.shouldFollow(SymlinkFollowMode.EXTERNAL, sourceRoot, linkTarget)); - test.ok(mockFsExists.calledOnceWith(linkTarget)); - test.done(); + expect(util.shouldFollow(SymlinkFollowMode.EXTERNAL, sourceRoot, linkTarget)).toEqual(true); + expect(mockFsExists.calledOnceWith(linkTarget)).toEqual(true); + ; } finally { mockFsExists.restore(); } - }, + }); - 'does not follow external when referent does not exist'(test: Test) { + test('does not follow external when referent does not exist', () => { const sourceRoot = path.join('source', 'root'); const linkTarget = path.join('alternate', 'referent'); const mockFsExists = ImportMock.mockFunction(fs, 'existsSync', false); try { - test.ok(!util.shouldFollow(SymlinkFollowMode.EXTERNAL, sourceRoot, linkTarget)); - test.ok(mockFsExists.calledOnceWith(linkTarget)); - test.done(); + expect(util.shouldFollow(SymlinkFollowMode.EXTERNAL, sourceRoot, linkTarget)).toEqual(false); + expect(mockFsExists.calledOnceWith(linkTarget)).toEqual(true); + ; } finally { mockFsExists.restore(); } - }, - }, + }); + }); - blockExternal: { - 'follows internal'(test: Test) { + describe('blockExternal', () => { + test('follows internal', () => { const sourceRoot = path.join('source', 'root'); const linkTarget = path.join(sourceRoot, 'referent'); const mockFsExists = ImportMock.mockFunction(fs, 'existsSync', true); try { - test.ok(util.shouldFollow(SymlinkFollowMode.BLOCK_EXTERNAL, sourceRoot, linkTarget)); - test.ok(mockFsExists.calledOnceWith(linkTarget)); - test.done(); + expect(util.shouldFollow(SymlinkFollowMode.BLOCK_EXTERNAL, sourceRoot, linkTarget)).toEqual(true); + expect(mockFsExists.calledOnceWith(linkTarget)).toEqual(true); + ; } finally { mockFsExists.restore(); } - }, + }); - 'does not follow internal when referent does not exist'(test: Test) { + test('does not follow internal when referent does not exist', () => { const sourceRoot = path.join('source', 'root'); const linkTarget = path.join(sourceRoot, 'referent'); const mockFsExists = ImportMock.mockFunction(fs, 'existsSync', false); try { - test.ok(!util.shouldFollow(SymlinkFollowMode.BLOCK_EXTERNAL, sourceRoot, linkTarget)); - test.ok(mockFsExists.calledOnceWith(linkTarget)); - test.done(); + expect(util.shouldFollow(SymlinkFollowMode.BLOCK_EXTERNAL, sourceRoot, linkTarget)).toEqual(false); + expect(mockFsExists.calledOnceWith(linkTarget)).toEqual(true); + ; } finally { mockFsExists.restore(); } - }, + }); - 'does not follow external'(test: Test) { + test('does not follow external', () => { const sourceRoot = path.join('source', 'root'); const linkTarget = path.join('alternate', 'referent'); const mockFsExists = ImportMock.mockFunction(fs, 'existsSync'); try { - test.ok(!util.shouldFollow(SymlinkFollowMode.BLOCK_EXTERNAL, sourceRoot, linkTarget)); - test.ok(mockFsExists.notCalled); - test.done(); + expect(util.shouldFollow(SymlinkFollowMode.BLOCK_EXTERNAL, sourceRoot, linkTarget)).toEqual(false); + expect(mockFsExists.notCalled).toEqual(true); + ; } finally { mockFsExists.restore(); } - }, - }, + }); + }); - never: { - 'does not follow internal'(test: Test) { + describe('never', () => { + test('does not follow internal', () => { const sourceRoot = path.join('source', 'root'); const linkTarget = path.join(sourceRoot, 'referent'); const mockFsExists = ImportMock.mockFunction(fs, 'existsSync'); try { - test.ok(!util.shouldFollow(SymlinkFollowMode.NEVER, sourceRoot, linkTarget)); - test.ok(mockFsExists.notCalled); - test.done(); + expect(util.shouldFollow(SymlinkFollowMode.NEVER, sourceRoot, linkTarget)).toEqual(false); + expect(mockFsExists.notCalled).toEqual(true); + ; } finally { mockFsExists.restore(); } - }, + }); - 'does not follow external'(test: Test) { + test('does not follow external', () => { const sourceRoot = path.join('source', 'root'); const linkTarget = path.join('alternate', 'referent'); const mockFsExists = ImportMock.mockFunction(fs, 'existsSync'); try { - test.ok(!util.shouldFollow(SymlinkFollowMode.NEVER, sourceRoot, linkTarget)); - test.ok(mockFsExists.notCalled); - test.done(); + expect(util.shouldFollow(SymlinkFollowMode.NEVER, sourceRoot, linkTarget)).toEqual(false); + expect(mockFsExists.notCalled).toEqual(true); + ; } finally { mockFsExists.restore(); } - }, - }, - }, + }); + }); + }); }); diff --git a/packages/@aws-cdk/core/test/include.test.ts b/packages/@aws-cdk/core/test/include.test.ts index d36dbf8739d19..3973107536bf6 100644 --- a/packages/@aws-cdk/core/test/include.test.ts +++ b/packages/@aws-cdk/core/test/include.test.ts @@ -1,14 +1,13 @@ -import { nodeunitShim, Test } from 'nodeunit-shim'; import { CfnInclude, CfnOutput, CfnParameter, CfnResource, Stack } from '../lib'; import { toCloudFormation } from './util'; -nodeunitShim({ - 'the Include construct can be used to embed an existing template as-is into a stack'(test: Test) { +describe('include', () => { + test('the Include construct can be used to embed an existing template as-is into a stack', () => { const stack = new Stack(); new CfnInclude(stack, 'T1', { template: clone(template) }); - test.deepEqual(toCloudFormation(stack), { + expect(toCloudFormation(stack)).toEqual({ Parameters: { MyParam: { Type: 'String', Default: 'Hello' } }, Resources: { MyResource1: { Type: 'ResourceType1', Properties: { P1: 1, P2: 2 } }, @@ -16,10 +15,10 @@ nodeunitShim({ }, }); - test.done(); - }, - 'included templates can co-exist with elements created programmatically'(test: Test) { + }); + + test('included templates can co-exist with elements created programmatically', () => { const stack = new Stack(); new CfnInclude(stack, 'T1', { template: clone(template) }); @@ -27,7 +26,7 @@ nodeunitShim({ new CfnOutput(stack, 'MyOutput', { description: 'Out!', value: 'hey' }); new CfnParameter(stack, 'MyParam2', { type: 'Integer' }); - test.deepEqual(toCloudFormation(stack), { + expect(toCloudFormation(stack)).toEqual({ Parameters: { MyParam: { Type: 'String', Default: 'Hello' }, MyParam2: { Type: 'Integer' }, @@ -40,10 +39,10 @@ nodeunitShim({ Outputs: { MyOutput: { Description: 'Out!', Value: 'hey' } }, }); - test.done(); - }, - 'exception is thrown in construction if an entity from an included template has the same id as a programmatic entity'(test: Test) { + }); + + test('exception is thrown in construction if an entity from an included template has the same id as a programmatic entity', () => { const stack = new Stack(); new CfnInclude(stack, 'T1', { template }); @@ -51,11 +50,11 @@ nodeunitShim({ new CfnOutput(stack, 'MyOutput', { description: 'Out!', value: 'in' }); new CfnParameter(stack, 'MyParam', { type: 'Integer' }); // duplicate! - test.throws(() => toCloudFormation(stack)); - test.done(); - }, + expect(() => toCloudFormation(stack)).toThrow(); + + }); - 'correctly merges template sections that contain strings'(test: Test) { + test('correctly merges template sections that contain strings', () => { const stack = new Stack(); new CfnInclude(stack, 'T1', { @@ -71,13 +70,13 @@ nodeunitShim({ }, }); - test.deepEqual(toCloudFormation(stack), { + expect(toCloudFormation(stack)).toEqual({ AWSTemplateFormatVersion: '2010-09-09', Description: 'Test 1\nTest 2', }); - test.done(); - }, + + }); }); const template = { diff --git a/packages/@aws-cdk/core/test/logical-id.test.ts b/packages/@aws-cdk/core/test/logical-id.test.ts index f4ab0f8eb63d9..bfd2381dc3461 100644 --- a/packages/@aws-cdk/core/test/logical-id.test.ts +++ b/packages/@aws-cdk/core/test/logical-id.test.ts @@ -1,12 +1,11 @@ -import { nodeunitShim, Test } from 'nodeunit-shim'; import { CfnElement, CfnResource, Construct, Stack } from '../lib'; import { toCloudFormation } from './util'; /** * These tests are executed once (for specific ID schemes) */ -nodeunitShim({ - 'if the naming scheme uniquifies with a hash we can have the same concatenated identifier'(test: Test) { +describe('logical id', () => { + test('if the naming scheme uniquifies with a hash we can have the same concatenated identifier', () => { // GIVEN const stack = new Stack(undefined, 'TestStack'); @@ -19,10 +18,10 @@ nodeunitShim({ // THEN: no exception - test.done(); - }, - 'special case: if the resource is top-level, a hash is not added'(test: Test) { + }); + + test('special case: if the resource is top-level, a hash is not added', () => { // GIVEN const stack = new Stack(undefined, 'TestStack'); @@ -32,14 +31,14 @@ nodeunitShim({ const r3 = new CfnResource(stack, '*y-'.repeat(255), { type: 'Resource' }); // non-alpha are filtered out (yes, I know it might conflict) // THEN - test.equal(stack.resolve(r.logicalId), 'MyAwesomeness'); - test.equal(stack.resolve(r2.logicalId), 'x'.repeat(255)); - test.equal(stack.resolve(r3.logicalId), 'y'.repeat(255)); + expect(stack.resolve(r.logicalId)).toEqual('MyAwesomeness'); + expect(stack.resolve(r2.logicalId)).toEqual('x'.repeat(255)); + expect(stack.resolve(r3.logicalId)).toEqual('y'.repeat(255)); + - test.done(); - }, + }); - 'if resource is top-level and logical id is longer than allowed, it is trimmed with a hash'(test: Test) { + test('if resource is top-level and logical id is longer than allowed, it is trimmed with a hash', () => { // GIVEN const stack = new Stack(undefined, 'TestStack'); @@ -47,11 +46,11 @@ nodeunitShim({ const r = new CfnResource(stack, 'x'.repeat(256), { type: 'Resource' }); // THEN - test.equals(stack.resolve(r.logicalId), 'x'.repeat(240) + 'C7A139A2'); - test.done(); - }, + expect(stack.resolve(r.logicalId)).toEqual('x'.repeat(240) + 'C7A139A2'); - 'Logical IDs can be renamed at the stack level'(test: Test) { + }); + + test('Logical IDs can be renamed at the stack level', () => { // GIVEN const stack = new Stack(); @@ -62,12 +61,12 @@ nodeunitShim({ // THEN const template = toCloudFormation(stack); - test.ok('Renamed' in template.Resources); + expect('Renamed' in template.Resources).toEqual(true); + - test.done(); - }, + }); - 'Renames for objects that don\'t exist fail'(test: Test) { + test('Renames for objects that don\'t exist fail', () => { // GIVEN const stack = new Stack(); new Construct(stack, 'Parent'); @@ -76,12 +75,12 @@ nodeunitShim({ stack.renameLogicalId('DOESNOTEXIST', 'Renamed'); // THEN - test.throws(() => toCloudFormation(stack)); + expect(() => toCloudFormation(stack)).toThrow(); - test.done(); - }, - 'ID Renames that collide with existing IDs should fail'(test: Test) { + }); + + test('ID Renames that collide with existing IDs should fail', () => { // GIVEN const stack = new Stack(); stack.renameLogicalId('ParentThingResource1916E7808', 'ParentThingResource2F19948CB'); @@ -92,11 +91,11 @@ nodeunitShim({ new CfnResource(parent, 'ThingResource2', { type: 'AWS::TAAS::Thing' }); // THEN - test.throws(() => toCloudFormation(stack), /Two objects have been assigned the same Logical ID/); - test.done(); - }, + expect(() => toCloudFormation(stack)).toThrow(/Two objects have been assigned the same Logical ID/); + + }); - 'hashed naming scheme filters constructs named "Resource" from the human portion'(test: Test) { + test('hashed naming scheme filters constructs named "Resource" from the human portion', () => { // GIVEN const stack = new Stack(); @@ -109,7 +108,7 @@ nodeunitShim({ // THEN const template = toCloudFormation(stack); - test.deepEqual(template, { + expect(template).toEqual({ Resources: { ParentChildHeyThere35220347: { Type: 'AWS::TAAS::Thing', @@ -117,10 +116,10 @@ nodeunitShim({ }, }); - test.done(); - }, - 'can transparently wrap constructs using "Default" id'(test: Test) { + }); + + test('can transparently wrap constructs using "Default" id', () => { // GIVEN const stack1 = new Stack(); const parent1 = new Construct(stack1, 'Parent'); @@ -129,7 +128,7 @@ nodeunitShim({ // AND const theId1 = Object.keys(template1.Resources)[0]; - test.equal('AWS::TAAS::Thing', template1.Resources[theId1].Type); + expect('AWS::TAAS::Thing').toEqual(template1.Resources[theId1].Type); // WHEN const stack2 = new Stack(); @@ -139,33 +138,33 @@ nodeunitShim({ const template2 = toCloudFormation(stack1); const theId2 = Object.keys(template2.Resources)[0]; - test.equal('AWS::TAAS::Thing', template2.Resources[theId2].Type); + expect('AWS::TAAS::Thing').toEqual(template2.Resources[theId2].Type); // THEN: same ID, same object - test.equal(theId1, theId2); + expect(theId1).toEqual(theId2); + - test.done(); - }, + }); - 'non-alphanumeric characters are removed from the human part of the logical ID'(test: Test) { + test('non-alphanumeric characters are removed from the human part of the logical ID', () => { const val1 = logicalForElementInPath(['Foo-bar', 'B00m', 'Hello_World', '&&Horray Horray.']); const val2 = logicalForElementInPath(['Foobar', 'B00m', 'HelloWorld', 'HorrayHorray']); // same human part, different hash - test.deepEqual(val1, 'FoobarB00mHelloWorldHorrayHorray640E99FB'); - test.deepEqual(val2, 'FoobarB00mHelloWorldHorrayHorray744334FD'); - test.done(); - }, + expect(val1).toEqual('FoobarB00mHelloWorldHorrayHorray640E99FB'); + expect(val2).toEqual('FoobarB00mHelloWorldHorrayHorray744334FD'); - 'non-alphanumeric characters are removed even if the ID has only one component'(test: Test) { + }); + + test('non-alphanumeric characters are removed even if the ID has only one component', () => { const val1 = logicalForElementInPath(['Foo-bar']); // same human part, different hash - test.deepEqual(val1, 'Foobar'); - test.done(); - }, + expect(val1).toEqual('Foobar'); + + }); - 'empty identifiers are not allowed'(test: Test) { + test('empty identifiers are not allowed', () => { // GIVEN const stack = new Stack(); @@ -173,11 +172,11 @@ nodeunitShim({ new CfnResource(stack, '.', { type: 'R' }); // THEN - test.throws(() => toCloudFormation(stack), /Logical ID must adhere to the regular expression/); - test.done(); - }, + expect(() => toCloudFormation(stack)).toThrow(/Logical ID must adhere to the regular expression/); - 'too large identifiers are truncated yet still remain unique'(test: Test) { + }); + + test('too large identifiers are truncated yet still remain unique', () => { // GIVEN const stack = new Stack(); const A = new Construct(stack, generateString(100)); @@ -192,14 +191,14 @@ nodeunitShim({ const C2 = new CfnResource(B, firstPart + generateString(40), { type: 'Resource' }); // THEN - test.ok(C1.logicalId.length <= 255); - test.ok(C2.logicalId.length <= 255); - test.notEqual(C1, C2); + expect(C1.logicalId.length).toBeLessThanOrEqual(255); + expect(C2.logicalId.length).toBeLessThanOrEqual(255); + expect(C1).not.toEqual(C2); + - test.done(); - }, + }); - 'Refs and dependencies will correctly reflect renames done at the stack level'(test: Test) { + test('Refs and dependencies will correctly reflect renames done at the stack level', () => { // GIVEN const stack = new Stack(); stack.renameLogicalId('OriginalName', 'NewName'); @@ -212,7 +211,7 @@ nodeunitShim({ c2.node.addDependency(c1); // THEN - test.deepEqual(toCloudFormation(stack), { + expect(toCloudFormation(stack)).toEqual({ Resources: { NewName: { Type: 'R1' }, Construct2: { @@ -223,10 +222,10 @@ nodeunitShim({ }, }); - test.done(); - }, - 'customize logical id allocation behavior by overriding `Stack.allocateLogicalId`'(test: Test) { + }); + + test('customize logical id allocation behavior by overriding `Stack.allocateLogicalId`', () => { class MyStack extends Stack { protected allocateLogicalId(element: CfnElement): string { if (element.node.id === 'A') { return 'LogicalIdOfA'; } @@ -246,17 +245,17 @@ nodeunitShim({ const c = new CfnResource(stack, 'B', { type: 'Type::Of::C' }); c.overrideLogicalId('TheC'); - test.deepEqual(toCloudFormation(stack), { + expect(toCloudFormation(stack)).toEqual({ Resources: { LogicalIdOfA: { Type: 'Type::Of::A' }, BoomBoomB: { Type: 'Type::Of::B' }, TheC: { Type: 'Type::Of::C' }, }, }); - test.done(); - }, - 'detects duplicate logical IDs in the same Stack caused by overrideLogicalId'(test: Test) { + }); + + test('detects duplicate logical IDs in the same Stack caused by overrideLogicalId', () => { const stack = new Stack(); const resource1 = new CfnResource(stack, 'A', { type: 'Type::Of::A' }); const resource2 = new CfnResource(stack, 'B', { type: 'Type::Of::B' }); @@ -264,12 +263,12 @@ nodeunitShim({ resource1.overrideLogicalId('C'); resource2.overrideLogicalId('C'); - test.throws(() => { + expect(() => { toCloudFormation(stack); - }, /section 'Resources' already contains 'C'/); + }).toThrow(/section 'Resources' already contains 'C'/); + - test.done(); - }, + }); }); function generateString(chars: number) { diff --git a/packages/@aws-cdk/core/test/mappings.test.ts b/packages/@aws-cdk/core/test/mappings.test.ts index 4b2e15c4d4bf2..7d1964f60bfe8 100644 --- a/packages/@aws-cdk/core/test/mappings.test.ts +++ b/packages/@aws-cdk/core/test/mappings.test.ts @@ -1,11 +1,10 @@ import { ArtifactMetadataEntryType } from '@aws-cdk/cloud-assembly-schema'; import { CloudAssembly } from '@aws-cdk/cx-api'; -import { nodeunitShim, Test } from 'nodeunit-shim'; import { App, Aws, CfnMapping, CfnResource, Fn, Stack } from '../lib'; import { toCloudFormation } from './util'; -nodeunitShim({ - 'mappings can be added as another type of entity, and mapping.findInMap can be used to get a token'(test: Test) { +describe('mappings', () => { + test('mappings can be added as another type of entity, and mapping.findInMap can be used to get a token', () => { const stack = new Stack(); const mapping = new CfnMapping(stack, 'MyMapping', { mapping: { @@ -26,14 +25,14 @@ nodeunitShim({ RefToValueInMap: mapping.findInMap('TopLevelKey1', 'SecondLevelKey1'), }, }); - test.throws(() => mapping.findInMap('NotFoundTopLevel', 'NotFound'), 'Mapping doesn\'t contain top-level key \'NotFoundTopLevel\''); - test.throws(() => mapping.findInMap('TopLevelKey1', 'NotFound'), 'Mapping doesn\'t contain second-level key \'NotFound\''); + expect(() => mapping.findInMap('NotFoundTopLevel', 'NotFound')).toThrow('Mapping doesn\'t contain top-level key \'NotFoundTopLevel\''); + expect(() => mapping.findInMap('TopLevelKey1', 'NotFound')).toThrow('Mapping doesn\'t contain second-level key \'NotFound\''); // set value can be used to set/modify a specific value mapping.setValue('TopLevelKey2', 'SecondLevelKey2', 'Hi'); mapping.setValue('TopLevelKey1', 'SecondLevelKey1', [1, 2, 3, 4]); - test.deepEqual(toCloudFormation(stack), { + expect(toCloudFormation(stack)).toEqual({ Mappings: { MyMapping: @@ -60,10 +59,10 @@ nodeunitShim({ }, }); - test.done(); - }, - 'allow using unresolved tokens in find-in-map'(test: Test) { + }); + + test('allow using unresolved tokens in find-in-map', () => { const stack = new Stack(); const mapping = new CfnMapping(stack, 'mapping', { @@ -78,19 +77,19 @@ nodeunitShim({ const v2 = Fn.findInMap(mapping.logicalId, 'instanceCount', Aws.REGION); const expected = { 'Fn::FindInMap': ['mapping', 'instanceCount', { Ref: 'AWS::Region' }] }; - test.deepEqual(stack.resolve(v1), expected); - test.deepEqual(stack.resolve(v2), expected); - test.deepEqual(toCloudFormation(stack).Mappings, { + expect(stack.resolve(v1)).toEqual(expected); + expect(stack.resolve(v2)).toEqual(expected); + expect(toCloudFormation(stack).Mappings).toEqual({ mapping: { instanceCount: { 'us-east-1': 12, }, }, }); - test.done(); - }, - 'no validation if first key is token and second is a static string'(test: Test) { + }); + + test('no validation if first key is token and second is a static string', () => { // GIVEN const stack = new Stack(); const mapping = new CfnMapping(stack, 'mapping', { @@ -105,20 +104,20 @@ nodeunitShim({ const v = mapping.findInMap(Aws.REGION, 'size'); // THEN - test.deepEqual(stack.resolve(v), { + expect(stack.resolve(v)).toEqual({ 'Fn::FindInMap': ['mapping', { Ref: 'AWS::Region' }, 'size'], }); - test.deepEqual(toCloudFormation(stack).Mappings, { + expect(toCloudFormation(stack).Mappings).toEqual({ mapping: { 'us-east-1': { size: 12, }, }, }); - test.done(); - }, - 'validate first key if it is a string and second is a token'(test: Test) { + }); + + test('validate first key if it is a string and second is a token', () => { // GIVEN const stack = new Stack(); const mapping = new CfnMapping(stack, 'mapping', { @@ -133,17 +132,17 @@ nodeunitShim({ const v = mapping.findInMap('size', Aws.REGION); // THEN - test.throws(() => mapping.findInMap('not-found', Aws.REGION), /Mapping doesn't contain top-level key 'not-found'/); - test.deepEqual(stack.resolve(v), { 'Fn::FindInMap': ['mapping', 'size', { Ref: 'AWS::Region' }] }); - test.deepEqual(toCloudFormation(stack).Mappings, { + expect(() => mapping.findInMap('not-found', Aws.REGION)).toThrow(/Mapping doesn't contain top-level key 'not-found'/); + expect(stack.resolve(v)).toEqual({ 'Fn::FindInMap': ['mapping', 'size', { Ref: 'AWS::Region' }] }); + expect(toCloudFormation(stack).Mappings).toEqual({ mapping: { size: { 'us-east-1': 12, }, }, }); - test.done(); - }, + + }); }); describe('lazy mapping', () => { diff --git a/packages/@aws-cdk/core/test/output.test.ts b/packages/@aws-cdk/core/test/output.test.ts index 0359049756946..5cc22d474cb2f 100644 --- a/packages/@aws-cdk/core/test/output.test.ts +++ b/packages/@aws-cdk/core/test/output.test.ts @@ -1,4 +1,3 @@ -import { nodeunitShim, Test } from 'nodeunit-shim'; import { App, CfnOutput, CfnResource, ConstructNode, Stack, ValidationError } from '../lib'; import { toCloudFormation } from './util'; @@ -9,8 +8,8 @@ beforeEach(() => { stack = new Stack(app, 'Stack'); }); -nodeunitShim({ - 'outputs can be added to the stack'(test: Test) { +describe('output', () => { + test('outputs can be added to the stack', () => { const res = new CfnResource(stack, 'MyResource', { type: 'R' }); const ref = res.ref; @@ -19,7 +18,7 @@ nodeunitShim({ value: ref, description: 'CfnOutput properties', }); - test.deepEqual(toCloudFormation(stack), { + expect(toCloudFormation(stack)).toEqual({ Resources: { MyResource: { Type: 'R' } }, Outputs: { @@ -31,15 +30,15 @@ nodeunitShim({ }, }, }); - test.done(); - }, - 'No export is created by default'(test: Test) { + }); + + test('No export is created by default', () => { // WHEN new CfnOutput(stack, 'SomeOutput', { value: 'x' }); // THEN - test.deepEqual(toCloudFormation(stack), { + expect(toCloudFormation(stack)).toEqual({ Outputs: { SomeOutput: { Value: 'x', @@ -47,10 +46,10 @@ nodeunitShim({ }, }); - test.done(); - }, - 'importValue can be used to obtain a Fn::ImportValue expression'(test: Test) { + }); + + test('importValue can be used to obtain a Fn::ImportValue expression', () => { // GIVEN const stack2 = new Stack(app, 'Stack2'); @@ -64,7 +63,7 @@ nodeunitShim({ }); // THEN - test.deepEqual(toCloudFormation(stack2), { + expect(toCloudFormation(stack2)).toEqual({ Resources: { Resource: { Type: 'Some::Resource', @@ -75,10 +74,10 @@ nodeunitShim({ }, }); - test.done(); - }, - 'importValue used inside the same stack produces an error'(test: Test) { + }); + + test('importValue used inside the same stack produces an error', () => { // WHEN const output = new CfnOutput(stack, 'SomeOutput', { value: 'x', exportName: 'asdf' }); new CfnResource(stack, 'Resource', { @@ -91,10 +90,10 @@ nodeunitShim({ // THEN expect(() => toCloudFormation(stack)).toThrow(/should only be used in a different Stack/); - test.done(); - }, - 'error message if importValue is used and Output is not exported'(test: Test) { + }); + + test('error message if importValue is used and Output is not exported', () => { // GIVEN const stack2 = new Stack(app, 'Stack2'); @@ -107,14 +106,14 @@ nodeunitShim({ }, }); - test.throws(() => { + expect(() => { toCloudFormation(stack2); - }, /Add an exportName to the CfnOutput/); + }).toThrow(/Add an exportName to the CfnOutput/); + - test.done(); - }, + }); - 'Verify maximum length of export name'(test: Test) { + test('Verify maximum length of export name', () => { new CfnOutput(stack, 'SomeOutput', { value: 'x', exportName: 'x'.repeat(260) }); const errors = ConstructNode.validate(stack.node).map((v: ValidationError) => v.message); @@ -123,6 +122,6 @@ nodeunitShim({ expect.stringContaining('Export name cannot exceed 255 characters'), ]); - test.done(); - }, + + }); }); diff --git a/packages/@aws-cdk/core/test/parameter.test.ts b/packages/@aws-cdk/core/test/parameter.test.ts index d13edbffddb0a..ff5015717713b 100644 --- a/packages/@aws-cdk/core/test/parameter.test.ts +++ b/packages/@aws-cdk/core/test/parameter.test.ts @@ -1,9 +1,8 @@ -import { nodeunitShim, Test } from 'nodeunit-shim'; import { CfnParameter, CfnResource, Construct, Stack } from '../lib'; import { toCloudFormation } from './util'; -nodeunitShim({ - 'parameters can be used and referenced using param.ref'(test: Test) { +describe('parameter', () => { + test('parameters can be used and referenced using param.ref', () => { const stack = new Stack(); const child = new Construct(stack, 'Child'); @@ -15,7 +14,7 @@ nodeunitShim({ new CfnResource(stack, 'Resource', { type: 'Type', properties: { ReferenceToParam: param.value } }); - test.deepEqual(toCloudFormation(stack), { + expect(toCloudFormation(stack)).toEqual({ Parameters: { ChildMyParam3161BF5D: { Default: 10, @@ -31,14 +30,14 @@ nodeunitShim({ }, }); - test.done(); - }, - 'parameters are tokens, so they can be assigned without .ref and their Ref will be taken'(test: Test) { + }); + + test('parameters are tokens, so they can be assigned without .ref and their Ref will be taken', () => { const stack = new Stack(); const param = new CfnParameter(stack, 'MyParam', { type: 'String' }); - test.deepEqual(stack.resolve(param), { Ref: 'MyParam' }); - test.done(); - }, + expect(stack.resolve(param)).toEqual({ Ref: 'MyParam' }); + + }); }); diff --git a/packages/@aws-cdk/core/test/private/physical-name-generator.test.ts b/packages/@aws-cdk/core/test/private/physical-name-generator.test.ts index 22539e435b214..87412c7074188 100644 --- a/packages/@aws-cdk/core/test/private/physical-name-generator.test.ts +++ b/packages/@aws-cdk/core/test/private/physical-name-generator.test.ts @@ -1,23 +1,22 @@ -import { nodeunitShim, Test } from 'nodeunit-shim'; import { App, Aws, Lazy, Resource, Stack, Token } from '../../lib'; import { GeneratedWhenNeededMarker, generatePhysicalName, isGeneratedWhenNeededMarker } from '../../lib/private/physical-name-generator'; -nodeunitShim({ - generatePhysicalName: { - 'generates correct physical names'(test: Test) { +describe('physical name generator', () => { + describe('generatePhysicalName', () => { + test('generates correct physical names', () => { const app = new App(); const stack = new Stack(app, 'TestStack', { env: { account: '012345678912', region: 'bermuda-triangle-1' } }); const testResourceA = new TestResource(stack, 'A'); const testResourceB = new TestResource(testResourceA, 'B'); - test.equal(generatePhysicalName(testResourceA), 'teststackteststackaa164c141d59b37c1b663'); - test.equal(generatePhysicalName(testResourceB), 'teststackteststackab27595cd34d8188283a1f'); + expect(generatePhysicalName(testResourceA)).toEqual('teststackteststackaa164c141d59b37c1b663'); + expect(generatePhysicalName(testResourceB)).toEqual('teststackteststackab27595cd34d8188283a1f'); - test.done(); - }, - 'generates different names in different accounts'(test: Test) { + }); + + test('generates different names in different accounts', () => { const appA = new App(); const stackA = new Stack(appA, 'TestStack', { env: { account: '012345678912', region: 'bermuda-triangle-1' } }); const resourceA = new TestResource(stackA, 'Resource'); @@ -26,12 +25,12 @@ nodeunitShim({ const stackB = new Stack(appB, 'TestStack', { env: { account: '012345678913', region: 'bermuda-triangle-1' } }); const resourceB = new TestResource(stackB, 'Resource'); - test.notEqual(generatePhysicalName(resourceA), generatePhysicalName(resourceB)); + expect(generatePhysicalName(resourceA)).not.toEqual(generatePhysicalName(resourceB)); + - test.done(); - }, + }); - 'generates different names in different regions'(test: Test) { + test('generates different names in different regions', () => { const appA = new App(); const stackA = new Stack(appA, 'TestStack', { env: { account: '012345678912', region: 'bermuda-triangle-1' } }); const resourceA = new TestResource(stackA, 'Resource'); @@ -40,84 +39,84 @@ nodeunitShim({ const stackB = new Stack(appB, 'TestStack', { env: { account: '012345678912', region: 'bermuda-triangle-2' } }); const resourceB = new TestResource(stackB, 'Resource'); - test.notEqual(generatePhysicalName(resourceA), generatePhysicalName(resourceB)); + expect(generatePhysicalName(resourceA)).not.toEqual(generatePhysicalName(resourceB)); + - test.done(); - }, + }); - 'fails when the region is an unresolved token'(test: Test) { + test('fails when the region is an unresolved token', () => { const app = new App(); const stack = new Stack(app, 'TestStack', { env: { account: '012345678912', region: Aws.REGION } }); const testResource = new TestResource(stack, 'A'); - test.throws(() => generatePhysicalName(testResource), + expect(() => generatePhysicalName(testResource)).toThrow( /Cannot generate a physical name for TestStack\/A, because the region is un-resolved or missing/); - test.done(); - }, - 'fails when the region is not provided'(test: Test) { + }); + + test('fails when the region is not provided', () => { const app = new App(); const stack = new Stack(app, 'TestStack', { env: { account: '012345678912' } }); const testResource = new TestResource(stack, 'A'); - test.throws(() => generatePhysicalName(testResource), + expect(() => generatePhysicalName(testResource)).toThrow( /Cannot generate a physical name for TestStack\/A, because the region is un-resolved or missing/); - test.done(); - }, - 'fails when the account is an unresolved token'(test: Test) { + }); + + test('fails when the account is an unresolved token', () => { const app = new App(); const stack = new Stack(app, 'TestStack', { env: { account: Aws.ACCOUNT_ID, region: 'bermuda-triangle-1' } }); const testResource = new TestResource(stack, 'A'); - test.throws(() => generatePhysicalName(testResource), + expect(() => generatePhysicalName(testResource)).toThrow( /Cannot generate a physical name for TestStack\/A, because the account is un-resolved or missing/); - test.done(); - }, - 'fails when the account is not provided'(test: Test) { + }); + + test('fails when the account is not provided', () => { const app = new App(); const stack = new Stack(app, 'TestStack', { env: { region: 'bermuda-triangle-1' } }); const testResource = new TestResource(stack, 'A'); - test.throws(() => generatePhysicalName(testResource), + expect(() => generatePhysicalName(testResource)).toThrow( /Cannot generate a physical name for TestStack\/A, because the account is un-resolved or missing/); - test.done(); - }, - }, - GeneratedWhenNeededMarker: { - 'is correctly recognized'(test: Test) { + }); + }); + + describe('GeneratedWhenNeededMarker', () => { + test('is correctly recognized', () => { const marker = new GeneratedWhenNeededMarker(); const asString = Token.asString(marker); - test.ok(isGeneratedWhenNeededMarker(asString)); + expect(isGeneratedWhenNeededMarker(asString)).toEqual(true); + - test.done(); - }, + }); - 'throws when resolved'(test: Test) { + test('throws when resolved', () => { const marker = new GeneratedWhenNeededMarker(); const asString = Token.asString(marker); - test.throws(() => new Stack().resolve(asString), /Use "this.physicalName" instead/); + expect(() => new Stack().resolve(asString)).toThrow(/Use "this.physicalName" instead/); + + + }); + }); - test.done(); - }, - }, + describe('isGeneratedWhenNeededMarker', () => { + test('correctly response for other tokens', () => { + expect(isGeneratedWhenNeededMarker('this is not even a token!')).toEqual(false); + expect(isGeneratedWhenNeededMarker(Lazy.string({ produce: () => 'Bazinga!' }))).toEqual(false); - isGeneratedWhenNeededMarker: { - 'correctly response for other tokens'(test: Test) { - test.ok(!isGeneratedWhenNeededMarker('this is not even a token!')); - test.ok(!isGeneratedWhenNeededMarker(Lazy.string({ produce: () => 'Bazinga!' }))); - test.done(); - }, - }, + }); + }); }); class TestResource extends Resource {} diff --git a/packages/@aws-cdk/core/test/private/tree-metadata.test.ts b/packages/@aws-cdk/core/test/private/tree-metadata.test.ts index b7173aca585ec..752d6f3fc5f42 100644 --- a/packages/@aws-cdk/core/test/private/tree-metadata.test.ts +++ b/packages/@aws-cdk/core/test/private/tree-metadata.test.ts @@ -1,7 +1,6 @@ import * as fs from 'fs'; import * as path from 'path'; import * as cxschema from '@aws-cdk/cloud-assembly-schema'; -import { nodeunitShim, Test } from 'nodeunit-shim'; import { App, CfnParameter, CfnResource, Construct as CfnConstruct, Lazy, Stack, TreeInspector } from '../../lib/index'; abstract class AbstractCfnResource extends CfnResource { @@ -19,8 +18,8 @@ abstract class AbstractCfnResource extends CfnResource { protected abstract get cfnProperties(): { [key: string]: any }; } -nodeunitShim({ - 'tree metadata is generated as expected'(test: Test) { +describe('tree metadata', () => { + test('tree metadata is generated as expected', () => { const app = new App(); const stack = new Stack(app, 'mystack'); @@ -28,9 +27,9 @@ nodeunitShim({ const assembly = app.synth(); const treeArtifact = assembly.tree(); - test.ok(treeArtifact); + expect(treeArtifact).toBeDefined(); - test.deepEqual(readJson(assembly.directory, treeArtifact!.file), { + expect(readJson(assembly.directory, treeArtifact!.file)).toEqual({ version: 'tree-0.1', tree: expect.objectContaining({ id: 'App', @@ -53,10 +52,10 @@ nodeunitShim({ }, }), }); - test.done(); - }, - 'tree metadata for a Cfn resource'(test: Test) { + }); + + test('tree metadata for a Cfn resource', () => { class MyCfnResource extends AbstractCfnResource { protected get cfnProperties(): { [key: string]: any } { return { @@ -76,9 +75,9 @@ nodeunitShim({ const assembly = app.synth(); const treeArtifact = assembly.tree(); - test.ok(treeArtifact); + expect(treeArtifact).toBeDefined(); - test.deepEqual(readJson(assembly.directory, treeArtifact!.file), { + expect(readJson(assembly.directory, treeArtifact!.file)).toEqual({ version: 'tree-0.1', tree: expect.objectContaining({ id: 'App', @@ -112,10 +111,10 @@ nodeunitShim({ }, }), }); - test.done(); - }, - 'tree metadata has construct class & version in there'(test: Test) { + }); + + test('tree metadata has construct class & version in there', () => { // The runtime metadata this test relies on is only available if the most // recent compile has happened using 'jsii', as the jsii compiler injects // this metadata. @@ -143,11 +142,11 @@ nodeunitShim({ const assembly = app.synth(); const treeArtifact = assembly.tree(); - test.ok(treeArtifact); + expect(treeArtifact).toBeDefined(); const codeBuild = !!process.env.CODEBUILD_BUILD_ID; - test.deepEqual(readJson(assembly.directory, treeArtifact!.file), { + expect(readJson(assembly.directory, treeArtifact!.file)).toEqual({ version: 'tree-0.1', tree: expect.objectContaining({ children: expect.objectContaining({ @@ -169,10 +168,10 @@ nodeunitShim({ }), }); - test.done(); - }, - 'token resolution & cfn parameter'(test: Test) { + }); + + test('token resolution & cfn parameter', () => { const app = new App(); const stack = new Stack(app, 'mystack'); const cfnparam = new CfnParameter(stack, 'mycfnparam'); @@ -190,9 +189,9 @@ nodeunitShim({ const assembly = app.synth(); const treeArtifact = assembly.tree(); - test.ok(treeArtifact); + expect(treeArtifact).toBeDefined(); - test.deepEqual(readJson(assembly.directory, treeArtifact!.file), { + expect(readJson(assembly.directory, treeArtifact!.file)).toEqual({ version: 'tree-0.1', tree: expect.objectContaining({ id: 'App', @@ -226,10 +225,10 @@ nodeunitShim({ }, }), }); - test.done(); - }, - 'cross-stack tokens'(test: Test) { + }); + + test('cross-stack tokens', () => { class MyFirstResource extends AbstractCfnResource { public readonly lazykey: string; @@ -268,9 +267,9 @@ nodeunitShim({ const assembly = app.synth(); const treeArtifact = assembly.tree(); - test.ok(treeArtifact); + expect(treeArtifact).toBeDefined(); - test.deepEqual(readJson(assembly.directory, treeArtifact!.file), { + expect(readJson(assembly.directory, treeArtifact!.file)).toEqual({ version: 'tree-0.1', tree: expect.objectContaining({ id: 'App', @@ -316,10 +315,10 @@ nodeunitShim({ }), }); - test.done(); - }, - 'failing nodes'(test: Test) { + }); + + test('failing nodes', () => { class MyCfnResource extends CfnResource { public inspect(_: TreeInspector) { throw new Error('Forcing an inspect error'); @@ -334,7 +333,7 @@ nodeunitShim({ const assembly = app.synth(); const treeArtifact = assembly.tree(); - test.ok(treeArtifact); + expect(treeArtifact).toBeDefined(); const treenode = app.node.findChild('Tree'); @@ -343,10 +342,10 @@ nodeunitShim({ && /Forcing an inspect error/.test(md.data as string) && /mycfnresource/.test(md.data as string); }); - test.ok(warn); + expect(warn).toBeDefined(); // assert that the rest of the construct tree is rendered - test.deepEqual(readJson(assembly.directory, treeArtifact!.file), { + expect(readJson(assembly.directory, treeArtifact!.file)).toEqual({ version: 'tree-0.1', tree: expect.objectContaining({ id: 'App', @@ -364,8 +363,8 @@ nodeunitShim({ }), }); - test.done(); - }, + + }); }); function readJson(outdir: string, file: string) { diff --git a/packages/@aws-cdk/core/test/resource.test.ts b/packages/@aws-cdk/core/test/resource.test.ts index 4fbe41692756d..5e46f9918c2a8 100644 --- a/packages/@aws-cdk/core/test/resource.test.ts +++ b/packages/@aws-cdk/core/test/resource.test.ts @@ -1,5 +1,4 @@ import * as cxapi from '@aws-cdk/cx-api'; -import { nodeunitShim, Test } from 'nodeunit-shim'; import { App, App as Root, CfnCondition, CfnDeletionPolicy, CfnResource, Construct, @@ -8,8 +7,8 @@ import { import { synthesize } from '../lib/private/synthesis'; import { toCloudFormation } from './util'; -nodeunitShim({ - 'all resources derive from Resource, which derives from Entity'(test: Test) { +describe('resource', () => { + test('all resources derive from Resource, which derives from Entity', () => { const stack = new Stack(); new CfnResource(stack, 'MyResource', { @@ -19,7 +18,7 @@ nodeunitShim({ }, }); - test.deepEqual(toCloudFormation(stack), { + expect(toCloudFormation(stack)).toEqual({ Resources: { MyResource: { Type: 'MyResourceType', @@ -31,16 +30,16 @@ nodeunitShim({ }, }); - test.done(); - }, - 'resources must reside within a Stack and fail upon creation if not'(test: Test) { + }); + + test('resources must reside within a Stack and fail upon creation if not', () => { const root = new Root(); - test.throws(() => new CfnResource(root, 'R1', { type: 'ResourceType' })); - test.done(); - }, + expect(() => new CfnResource(root, 'R1', { type: 'ResourceType' })).toThrow(); - 'all entities have a logical ID calculated based on their full path in the tree'(test: Test) { + }); + + test('all entities have a logical ID calculated based on their full path in the tree', () => { const stack = new Stack(undefined, 'TestStack'); const level1 = new Construct(stack, 'level1'); const level2 = new Construct(level1, 'level2'); @@ -48,28 +47,28 @@ nodeunitShim({ const res1 = new CfnResource(level1, 'childoflevel1', { type: 'MyResourceType1' }); const res2 = new CfnResource(level3, 'childoflevel3', { type: 'MyResourceType2' }); - test.equal(withoutHash(stack.resolve(res1.logicalId)), 'level1childoflevel1'); - test.equal(withoutHash(stack.resolve(res2.logicalId)), 'level1level2level3childoflevel3'); + expect(withoutHash(stack.resolve(res1.logicalId))).toEqual('level1childoflevel1'); + expect(withoutHash(stack.resolve(res2.logicalId))).toEqual('level1level2level3childoflevel3'); - test.done(); - }, - 'resource.props can only be accessed by derived classes'(test: Test) { + }); + + test('resource.props can only be accessed by derived classes', () => { const stack = new Stack(); const res = new Counter(stack, 'MyResource', { Count: 10 }); res.increment(); res.increment(2); - test.deepEqual(toCloudFormation(stack), { + expect(toCloudFormation(stack)).toEqual({ Resources: { MyResource: { Type: 'My::Counter', Properties: { Count: 13 } }, }, }); - test.done(); - }, - 'resource attributes can be retrieved using getAtt(s) or attribute properties'(test: Test) { + }); + + test('resource attributes can be retrieved using getAtt(s) or attribute properties', () => { const stack = new Stack(); const res = new Counter(stack, 'MyResource', { Count: 10 }); @@ -82,7 +81,7 @@ nodeunitShim({ }, }); - test.deepEqual(toCloudFormation(stack), { + expect(toCloudFormation(stack)).toEqual({ Resources: { MyResource: { Type: 'My::Counter', Properties: { Count: 10 } }, YourResource: { @@ -96,10 +95,10 @@ nodeunitShim({ }, }); - test.done(); - }, - 'ARN-type resource attributes have some common functionality'(test: Test) { + }); + + test('ARN-type resource attributes have some common functionality', () => { const stack = new Stack(); const res = new Counter(stack, 'MyResource', { Count: 1 }); new CfnResource(stack, 'MyResource2', { @@ -109,7 +108,7 @@ nodeunitShim({ }, }); - test.deepEqual(toCloudFormation(stack), { + expect(toCloudFormation(stack)).toEqual({ Resources: { MyResource: { Type: 'My::Counter', Properties: { Count: 1 } }, MyResource2: { @@ -123,10 +122,10 @@ nodeunitShim({ }, }); - test.done(); - }, - 'resource.addDependency(e) can be used to add a DependsOn on another resource'(test: Test) { + }); + + test('resource.addDependency(e) can be used to add a DependsOn on another resource', () => { const stack = new Stack(); const r1 = new Counter(stack, 'Counter1', { Count: 1 }); const r2 = new Counter(stack, 'Counter2', { Count: 1 }); @@ -136,7 +135,7 @@ nodeunitShim({ synthesize(stack); - test.deepEqual(toCloudFormation(stack), { + expect(toCloudFormation(stack)).toEqual({ Resources: { Counter1: { Type: 'My::Counter', @@ -154,10 +153,10 @@ nodeunitShim({ }, }); - test.done(); - }, - 'if addDependency is called multiple times with the same resource, it will only appear once'(test: Test) { + }); + + test('if addDependency is called multiple times with the same resource, it will only appear once', () => { // GIVEN const stack = new Stack(); const r1 = new Counter(stack, 'Counter1', { Count: 1 }); @@ -171,7 +170,7 @@ nodeunitShim({ dependent.addDependsOn(r1); // THEN - test.deepEqual(toCloudFormation(stack), { + expect(toCloudFormation(stack)).toEqual({ Resources: { Counter1: { Type: 'My::Counter', @@ -187,24 +186,24 @@ nodeunitShim({ }, }, }); - test.done(); - }, - 'conditions can be attached to a resource'(test: Test) { + }); + + test('conditions can be attached to a resource', () => { const stack = new Stack(); const r1 = new CfnResource(stack, 'Resource', { type: 'Type' }); const cond = new CfnCondition(stack, 'MyCondition', { expression: Fn.conditionNot(Fn.conditionEquals('a', 'b')) }); r1.cfnOptions.condition = cond; - test.deepEqual(toCloudFormation(stack), { + expect(toCloudFormation(stack)).toEqual({ Resources: { Resource: { Type: 'Type', Condition: 'MyCondition' } }, Conditions: { MyCondition: { 'Fn::Not': [{ 'Fn::Equals': ['a', 'b'] }] } }, }); - test.done(); - }, - 'creation/update/updateReplace/deletion policies can be set on a resource'(test: Test) { + }); + + test('creation/update/updateReplace/deletion policies can be set on a resource', () => { const stack = new Stack(); const r1 = new CfnResource(stack, 'Resource', { type: 'Type' }); @@ -222,7 +221,7 @@ nodeunitShim({ r1.cfnOptions.deletionPolicy = CfnDeletionPolicy.RETAIN; r1.cfnOptions.updateReplacePolicy = CfnDeletionPolicy.SNAPSHOT; - test.deepEqual(toCloudFormation(stack), { + expect(toCloudFormation(stack)).toEqual({ Resources: { Resource: { Type: 'Type', @@ -242,16 +241,16 @@ nodeunitShim({ }, }); - test.done(); - }, - 'update policies UseOnlineResharding flag'(test: Test) { + }); + + test('update policies UseOnlineResharding flag', () => { const stack = new Stack(); const r1 = new CfnResource(stack, 'Resource', { type: 'Type' }); r1.cfnOptions.updatePolicy = { useOnlineResharding: true }; - test.deepEqual(toCloudFormation(stack), { + expect(toCloudFormation(stack)).toEqual({ Resources: { Resource: { Type: 'Type', @@ -262,10 +261,10 @@ nodeunitShim({ }, }); - test.done(); - }, - 'metadata can be set on a resource'(test: Test) { + }); + + test('metadata can be set on a resource', () => { const stack = new Stack(); const r1 = new CfnResource(stack, 'Resource', { type: 'Type' }); @@ -274,7 +273,7 @@ nodeunitShim({ MyValue: 99, }; - test.deepEqual(toCloudFormation(stack), { + expect(toCloudFormation(stack)).toEqual({ Resources: { Resource: { Type: 'Type', @@ -286,16 +285,16 @@ nodeunitShim({ }, }); - test.done(); - }, - 'the "type" property is required when creating a resource'(test: Test) { + }); + + test('the "type" property is required when creating a resource', () => { const stack = new Stack(); - test.throws(() => new CfnResource(stack, 'Resource', { notypehere: true } as any)); - test.done(); - }, + expect(() => new CfnResource(stack, 'Resource', { notypehere: true } as any)).toThrow(); + + }); - 'removal policy is a high level abstraction of deletion policy used by l2'(test: Test) { + test('removal policy is a high level abstraction of deletion policy used by l2', () => { const stack = new Stack(); const retain = new CfnResource(stack, 'Retain', { type: 'T1' }); @@ -308,7 +307,7 @@ nodeunitShim({ def.applyRemovalPolicy(undefined, { default: RemovalPolicy.DESTROY }); def2.applyRemovalPolicy(undefined); - test.deepEqual(toCloudFormation(stack), { + expect(toCloudFormation(stack)).toEqual({ Resources: { Retain: { Type: 'T1', DeletionPolicy: 'Retain', UpdateReplacePolicy: 'Retain' }, Destroy: { Type: 'T3', DeletionPolicy: 'Delete', UpdateReplacePolicy: 'Delete' }, @@ -316,10 +315,10 @@ nodeunitShim({ Default2: { Type: 'T4', DeletionPolicy: 'Retain', UpdateReplacePolicy: 'Retain' }, // implicit default }, }); - test.done(); - }, - 'addDependency adds all dependencyElements of dependent constructs'(test: Test) { + }); + + test('addDependency adds all dependencyElements of dependent constructs', () => { class C1 extends Construct { public readonly r1: CfnResource; @@ -364,7 +363,7 @@ nodeunitShim({ synthesize(stack); - test.deepEqual(toCloudFormation(stack), { + expect(toCloudFormation(stack)).toEqual({ Resources: { MyC1R1FB2A562F: { Type: 'T1' }, @@ -382,19 +381,19 @@ nodeunitShim({ }, }, }); - test.done(); - }, - 'resource.ref returns the {Ref} token'(test: Test) { + }); + + test('resource.ref returns the {Ref} token', () => { const stack = new Stack(); const r = new CfnResource(stack, 'MyResource', { type: 'R' }); - test.deepEqual(stack.resolve(r.ref), { Ref: 'MyResource' }); - test.done(); - }, + expect(stack.resolve(r.ref)).toEqual({ Ref: 'MyResource' }); + + }); - overrides: { - 'addOverride(p, v) allows assigning arbitrary values to synthesized resource definitions'(test: Test) { + describe('overrides', () => { + test('addOverride(p, v) allows assigning arbitrary values to synthesized resource definitions', () => { // GIVEN const stack = new Stack(); const r = new CfnResource(stack, 'MyResource', { type: 'AWS::Resource::Type' }); @@ -405,7 +404,7 @@ nodeunitShim({ r.addOverride('Use.Dot.Notation', 'To create subtrees'); // THEN - test.deepEqual(toCloudFormation(stack), { + expect(toCloudFormation(stack)).toEqual({ Resources: { MyResource: @@ -417,10 +416,10 @@ nodeunitShim({ }, }); - test.done(); - }, - 'addPropertyOverride() allows assigning an attribute of a different resource'(test: Test) { + }); + + test('addPropertyOverride() allows assigning an attribute of a different resource', () => { // GIVEN const stack = new Stack(); const r1 = new CfnResource(stack, 'MyResource1', { type: 'AWS::Resource::Type' }); @@ -432,7 +431,7 @@ nodeunitShim({ }); // THEN - test.deepEqual(toCloudFormation(stack), { + expect(toCloudFormation(stack)).toEqual({ Resources: { MyResource1: { Type: 'AWS::Resource::Type', @@ -448,10 +447,10 @@ nodeunitShim({ }, }); - test.done(); - }, - 'addOverride(p, null) will assign an "null" value'(test: Test) { + }); + + test('addOverride(p, null) will assign an "null" value', () => { // GIVEN const stack = new Stack(); @@ -471,7 +470,7 @@ nodeunitShim({ r.addOverride('Properties.Hello.World.Value2', null); // THEN - test.deepEqual(toCloudFormation(stack), { + expect(toCloudFormation(stack)).toEqual({ Resources: { MyResource: @@ -482,10 +481,10 @@ nodeunitShim({ }, }); - test.done(); - }, - 'addOverride(p, undefined) can be used to delete a value'(test: Test) { + }); + + test('addOverride(p, undefined) can be used to delete a value', () => { // GIVEN const stack = new Stack(); @@ -505,7 +504,7 @@ nodeunitShim({ r.addOverride('Properties.Hello.World.Value2', undefined); // THEN - test.deepEqual(toCloudFormation(stack), { + expect(toCloudFormation(stack)).toEqual({ Resources: { MyResource: @@ -516,10 +515,10 @@ nodeunitShim({ }, }); - test.done(); - }, - 'addOverride(p, undefined) will not create empty trees'(test: Test) { + }); + + test('addOverride(p, undefined) will not create empty trees', () => { // GIVEN const stack = new Stack(); @@ -530,7 +529,7 @@ nodeunitShim({ r.addPropertyOverride('Tree.Does.Not.Exist', undefined); // THEN - test.deepEqual(toCloudFormation(stack), { + expect(toCloudFormation(stack)).toEqual({ Resources: { MyResource: @@ -541,10 +540,10 @@ nodeunitShim({ }, }); - test.done(); - }, - 'addDeletionOverride(p) and addPropertyDeletionOverride(pp) are sugar for `undefined`'(test: Test) { + }); + + test('addDeletionOverride(p) and addPropertyDeletionOverride(pp) are sugar for `undefined`', () => { // GIVEN const stack = new Stack(); @@ -566,7 +565,7 @@ nodeunitShim({ r.addPropertyDeletionOverride('Hello.World.Value3'); // THEN - test.deepEqual(toCloudFormation(stack), { + expect(toCloudFormation(stack)).toEqual({ Resources: { MyResource: @@ -577,10 +576,10 @@ nodeunitShim({ }, }); - test.done(); - }, - 'addOverride(p, v) will overwrite any non-objects along the path'(test: Test) { + }); + + test('addOverride(p, v) will overwrite any non-objects along the path', () => { // GIVEN const stack = new Stack(); const r = new CfnResource(stack, 'MyResource', { @@ -598,7 +597,7 @@ nodeunitShim({ r.addOverride('Properties.Hello.World.Foo.Bar', 42); // THEN - test.deepEqual(toCloudFormation(stack), { + expect(toCloudFormation(stack)).toEqual({ Resources: { MyResource: @@ -614,10 +613,10 @@ nodeunitShim({ }, }, }); - test.done(); - }, - 'addOverride(p, v) will not split on escaped dots'(test: Test) { + }); + + test('addOverride(p, v) will not split on escaped dots', () => { // GIVEN const stack = new Stack(); const r = new CfnResource(stack, 'MyResource', { type: 'AWS::Resource::Type' }); @@ -630,7 +629,7 @@ nodeunitShim({ r.addOverride('Properties.EndWith\\', 42); // Raw string cannot end with a backslash // THEN - test.deepEqual(toCloudFormation(stack), { + expect(toCloudFormation(stack)).toEqual({ Resources: { MyResource: @@ -647,10 +646,10 @@ nodeunitShim({ }, }, }); - test.done(); - }, - 'addPropertyOverride(pp, v) is a sugar for overriding properties'(test: Test) { + }); + + test('addPropertyOverride(pp, v) is a sugar for overriding properties', () => { // GIVEN const stack = new Stack(); const r = new CfnResource(stack, 'MyResource', { @@ -662,7 +661,7 @@ nodeunitShim({ r.addPropertyOverride('Hello.World', { Hey: 'Jude' }); // THEN - test.deepEqual(toCloudFormation(stack), { + expect(toCloudFormation(stack)).toEqual({ Resources: { MyResource: @@ -672,10 +671,10 @@ nodeunitShim({ }, }, }); - test.done(); - }, - 'overrides are applied after render'(test: Test) { + }); + + test('overrides are applied after render', () => { // GIVEN class MyResource extends CfnResource { public renderProperties() { @@ -690,7 +689,7 @@ nodeunitShim({ cfn.addOverride('Properties.Foo.Bar', 'Bar'); // THEN - test.deepEqual(toCloudFormation(stack), { + expect(toCloudFormation(stack)).toEqual({ Resources: { rr: { Type: 'AWS::Resource::Type', @@ -704,12 +703,12 @@ nodeunitShim({ }, }, }); - test.done(); - }, - 'using mutable properties': { + }); + + describe('using mutable properties', () => { - 'can be used by derived classes to specify overrides before render()'(test: Test) { + test('can be used by derived classes to specify overrides before render()', () => { const stack = new Stack(); const r = new CustomizableResource(stack, 'MyResource', { @@ -718,7 +717,7 @@ nodeunitShim({ r.prop2 = 'bar'; - test.deepEqual(toCloudFormation(stack), { + expect(toCloudFormation(stack)).toEqual({ Resources: { MyResource: @@ -728,17 +727,17 @@ nodeunitShim({ }, }, }); - test.done(); - }, - '"properties" is undefined'(test: Test) { + }); + + test('"properties" is undefined', () => { const stack = new Stack(); const r = new CustomizableResource(stack, 'MyResource'); r.prop3 = 'zoo'; - test.deepEqual(toCloudFormation(stack), { + expect(toCloudFormation(stack)).toEqual({ Resources: { MyResource: @@ -748,10 +747,10 @@ nodeunitShim({ }, }, }); - test.done(); - }, - '"properties" is empty'(test: Test) { + }); + + test('"properties" is empty', () => { const stack = new Stack(); const r = new CustomizableResource(stack, 'MyResource', { }); @@ -759,7 +758,7 @@ nodeunitShim({ r.prop3 = 'zoo'; r.prop2 = 'hey'; - test.deepEqual(toCloudFormation(stack), { + expect(toCloudFormation(stack)).toEqual({ Resources: { MyResource: @@ -769,12 +768,12 @@ nodeunitShim({ }, }, }); - test.done(); - }, - }, - }, - '"aws:cdk:path" metadata is added if "aws:cdk:path-metadata" context is set to true'(test: Test) { + }); + }); + }); + + test('"aws:cdk:path" metadata is added if "aws:cdk:path-metadata" context is set to true', () => { const stack = new Stack(); stack.node.setContext(cxapi.PATH_METADATA_ENABLE_CONTEXT, true); @@ -784,7 +783,7 @@ nodeunitShim({ type: 'MyResourceType', }); - test.deepEqual(toCloudFormation(stack), { + expect(toCloudFormation(stack)).toEqual({ Resources: { ParentMyResource4B1FDBCC: @@ -795,10 +794,10 @@ nodeunitShim({ }, }); - test.done(); - }, - 'cross-stack construct dependencies are not rendered but turned into stack dependencies'(test: Test) { + }); + + test('cross-stack construct dependencies are not rendered but turned into stack dependencies', () => { // GIVEN const app = new App(); const stackA = new Stack(app, 'StackA'); @@ -813,7 +812,7 @@ nodeunitShim({ const assembly = app.synth(); const templateB = assembly.getStackByName(stackB.stackName).template; - test.deepEqual(templateB, { + expect(templateB).toEqual({ Resources: { Resource: { Type: 'R', @@ -821,12 +820,12 @@ nodeunitShim({ }, }, }); - test.deepEqual(stackB.dependencies.map(s => s.node.id), ['StackA']); + expect(stackB.dependencies.map(s => s.node.id)).toEqual(['StackA']); - test.done(); - }, - 'enableVersionUpgrade can be set on a resource'(test: Test) { + }); + + test('enableVersionUpgrade can be set on a resource', () => { const stack = new Stack(); const r1 = new CfnResource(stack, 'Resource', { type: 'Type' }); @@ -834,7 +833,7 @@ nodeunitShim({ enableVersionUpgrade: true, }; - test.deepEqual(toCloudFormation(stack), { + expect(toCloudFormation(stack)).toEqual({ Resources: { Resource: { Type: 'Type', @@ -845,8 +844,8 @@ nodeunitShim({ }, }); - test.done(); - }, + + }); }); test('Resource can get account and Region from ARN', () => { diff --git a/packages/@aws-cdk/core/test/rule.test.ts b/packages/@aws-cdk/core/test/rule.test.ts index 637f204235f83..4914e7a7d271a 100644 --- a/packages/@aws-cdk/core/test/rule.test.ts +++ b/packages/@aws-cdk/core/test/rule.test.ts @@ -1,16 +1,15 @@ -import { nodeunitShim, Test } from 'nodeunit-shim'; import { CfnRule, Fn, Stack } from '../lib'; import { toCloudFormation } from './util'; -nodeunitShim({ - 'Rule can be used to create rules'(test: Test) { +describe('rule', () => { + test('Rule can be used to create rules', () => { const stack = new Stack(); const rule = new CfnRule(stack, 'MyRule'); rule.addAssertion(Fn.conditionEquals('lhs', 'rhs'), 'lhs equals rhs'); rule.addAssertion(Fn.conditionNot(Fn.conditionAnd(Fn.conditionContains(['hello', 'world'], 'world'))), 'some assertion'); - test.deepEqual(toCloudFormation(stack), { + expect(toCloudFormation(stack)).toEqual({ Rules: { MyRule: { Assertions: [ @@ -27,22 +26,22 @@ nodeunitShim({ }, }); - test.done(); - }, - 'a template can contain multiple Rules'(test: Test) { + }); + + test('a template can contain multiple Rules', () => { const stack = new Stack(); new CfnRule(stack, 'Rule1'); new CfnRule(stack, 'Rule2'); - test.deepEqual(toCloudFormation(stack), { + expect(toCloudFormation(stack)).toEqual({ Rules: { Rule1: {}, Rule2: {}, }, }); - test.done(); - }, + + }); }); diff --git a/packages/@aws-cdk/core/test/secret-value.test.ts b/packages/@aws-cdk/core/test/secret-value.test.ts index 7a119d3534a9b..a32702b98771d 100644 --- a/packages/@aws-cdk/core/test/secret-value.test.ts +++ b/packages/@aws-cdk/core/test/secret-value.test.ts @@ -1,8 +1,7 @@ -import { nodeunitShim, Test } from 'nodeunit-shim'; import { CfnDynamicReference, CfnDynamicReferenceService, CfnParameter, SecretValue, Stack } from '../lib'; -nodeunitShim({ - 'plainText'(test: Test) { +describe('secret value', () => { + test('plainText', () => { // GIVEN const stack = new Stack(); @@ -10,11 +9,11 @@ nodeunitShim({ const v = SecretValue.plainText('this just resolves to a string'); // THEN - test.deepEqual(stack.resolve(v), 'this just resolves to a string'); - test.done(); - }, + expect(stack.resolve(v)).toEqual('this just resolves to a string'); - 'secretsManager'(test: Test) { + }); + + test('secretsManager', () => { // GIVEN const stack = new Stack(); @@ -25,11 +24,11 @@ nodeunitShim({ }); // THEN - test.deepEqual(stack.resolve(v), '{{resolve:secretsmanager:secret-id:SecretString:json-key:version-stage:}}'); - test.done(); - }, + expect(stack.resolve(v)).toEqual('{{resolve:secretsmanager:secret-id:SecretString:json-key:version-stage:}}'); + + }); - 'secretsManager with defaults'(test: Test) { + test('secretsManager with defaults', () => { // GIVEN const stack = new Stack(); @@ -37,33 +36,33 @@ nodeunitShim({ const v = SecretValue.secretsManager('secret-id'); // THEN - test.deepEqual(stack.resolve(v), '{{resolve:secretsmanager:secret-id:SecretString:::}}'); - test.done(); - }, + expect(stack.resolve(v)).toEqual('{{resolve:secretsmanager:secret-id:SecretString:::}}'); + + }); - 'secretsManager with an empty ID'(test: Test) { - test.throws(() => SecretValue.secretsManager(''), /secretId cannot be empty/); - test.done(); - }, + test('secretsManager with an empty ID', () => { + expect(() => SecretValue.secretsManager('')).toThrow(/secretId cannot be empty/); - 'secretsManager with versionStage and versionId'(test: Test) { - test.throws(() => { + }); + + test('secretsManager with versionStage and versionId', () => { + expect(() => { SecretValue.secretsManager('secret-id', { versionStage: 'version-stage', versionId: 'version-id', }); - }, /were both provided but only one is allowed/); + }).toThrow(/were both provided but only one is allowed/); + - test.done(); - }, + }); - 'secretsManager with a non-ARN ID that has colon'(test: Test) { - test.throws(() => SecretValue.secretsManager('not:an:arn'), /is not an ARN but contains ":"/); - test.done(); - }, + test('secretsManager with a non-ARN ID that has colon', () => { + expect(() => SecretValue.secretsManager('not:an:arn')).toThrow(/is not an ARN but contains ":"/); - 'ssmSecure'(test: Test) { + }); + + test('ssmSecure', () => { // GIVEN const stack = new Stack(); @@ -71,11 +70,11 @@ nodeunitShim({ const v = SecretValue.ssmSecure('param-name', 'param-version'); // THEN - test.deepEqual(stack.resolve(v), '{{resolve:ssm-secure:param-name:param-version}}'); - test.done(); - }, + expect(stack.resolve(v)).toEqual('{{resolve:ssm-secure:param-name:param-version}}'); + + }); - 'cfnDynamicReference'(test: Test) { + test('cfnDynamicReference', () => { // GIVEN const stack = new Stack(); @@ -83,11 +82,11 @@ nodeunitShim({ const v = SecretValue.cfnDynamicReference(new CfnDynamicReference(CfnDynamicReferenceService.SSM, 'foo:bar')); // THEN - test.deepEqual(stack.resolve(v), '{{resolve:ssm:foo:bar}}'); - test.done(); - }, + expect(stack.resolve(v)).toEqual('{{resolve:ssm:foo:bar}}'); + + }); - 'cfnParameter (with NoEcho)'(test: Test) { + test('cfnParameter (with NoEcho)', () => { // GIVEN const stack = new Stack(); const p = new CfnParameter(stack, 'MyParam', { type: 'String', noEcho: true }); @@ -96,17 +95,17 @@ nodeunitShim({ const v = SecretValue.cfnParameter(p); // THEN - test.deepEqual(stack.resolve(v), { Ref: 'MyParam' }); - test.done(); - }, + expect(stack.resolve(v)).toEqual({ Ref: 'MyParam' }); - 'fails if cfnParameter does not have NoEcho'(test: Test) { + }); + + test('fails if cfnParameter does not have NoEcho', () => { // GIVEN const stack = new Stack(); const p = new CfnParameter(stack, 'MyParam', { type: 'String' }); // THEN - test.throws(() => SecretValue.cfnParameter(p), /CloudFormation parameter must be configured with "NoEcho"/); - test.done(); - }, + expect(() => SecretValue.cfnParameter(p)).toThrow(/CloudFormation parameter must be configured with "NoEcho"/); + + }); }); diff --git a/packages/@aws-cdk/core/test/size.test.ts b/packages/@aws-cdk/core/test/size.test.ts index 288db78304f2a..9801fc8f53acf 100644 --- a/packages/@aws-cdk/core/test/size.test.ts +++ b/packages/@aws-cdk/core/test/size.test.ts @@ -1,120 +1,118 @@ -import { nodeunitShim, Test } from 'nodeunit-shim'; import { Size, SizeRoundingBehavior, Stack, Token } from '../lib'; -nodeunitShim({ - 'negative amount'(test: Test) { - test.throws(() => Size.kibibytes(-1), /negative/); +describe('size', () => { + test('negative amount', () => { + expect(() => Size.kibibytes(-1)).toThrow(/negative/); - test.done(); - }, - 'unresolved amount'(test: Test) { + }); + + test('unresolved amount', () => { const stack = new Stack(); const lazySize = Size.kibibytes(Token.asNumber({ resolve: () => 1337 })); - test.equals(stack.resolve(lazySize.toKibibytes()), 1337); - test.throws( - () => stack.resolve(lazySize.toMebibytes()), + expect(stack.resolve(lazySize.toKibibytes())).toEqual(1337); + expect( + () => stack.resolve(lazySize.toMebibytes())).toThrow( /Unable to perform time unit conversion on un-resolved token/, ); - test.done(); - }, - 'Size in kibibytes'(test: Test) { + }); + + test('Size in kibibytes', () => { const size = Size.kibibytes(4_294_967_296); - test.equal(size.toKibibytes(), 4_294_967_296); - test.equal(size.toMebibytes(), 4_194_304); - test.equal(size.toGibibytes(), 4_096); - test.equal(size.toTebibytes(), 4); - test.throws(() => size.toPebibytes(), /'4294967296 kibibytes' cannot be converted into a whole number/); - floatEqual(test, size.toPebibytes({ rounding: SizeRoundingBehavior.NONE }), 4_294_967_296 / (1024 * 1024 * 1024 * 1024)); + expect(size.toKibibytes()).toEqual(4_294_967_296); + expect(size.toMebibytes()).toEqual(4_194_304); + expect(size.toGibibytes()).toEqual(4_096); + expect(size.toTebibytes()).toEqual(4); + expect(() => size.toPebibytes()).toThrow(/'4294967296 kibibytes' cannot be converted into a whole number/); + floatEqual(size.toPebibytes({ rounding: SizeRoundingBehavior.NONE }), 4_294_967_296 / (1024 * 1024 * 1024 * 1024)); - test.equal(Size.kibibytes(4 * 1024 * 1024).toGibibytes(), 4); + expect(Size.kibibytes(4 * 1024 * 1024).toGibibytes()).toEqual(4); - test.done(); - }, - 'Size in mebibytes'(test: Test) { + }); + + test('Size in mebibytes', () => { const size = Size.mebibytes(4_194_304); - test.equal(size.toKibibytes(), 4_294_967_296); - test.equal(size.toMebibytes(), 4_194_304); - test.equal(size.toGibibytes(), 4_096); - test.equal(size.toTebibytes(), 4); - test.throws(() => size.toPebibytes(), /'4194304 mebibytes' cannot be converted into a whole number/); - floatEqual(test, size.toPebibytes({ rounding: SizeRoundingBehavior.NONE }), 4_194_304 / (1024 * 1024 * 1024)); + expect(size.toKibibytes()).toEqual(4_294_967_296); + expect(size.toMebibytes()).toEqual(4_194_304); + expect(size.toGibibytes()).toEqual(4_096); + expect(size.toTebibytes()).toEqual(4); + expect(() => size.toPebibytes()).toThrow(/'4194304 mebibytes' cannot be converted into a whole number/); + floatEqual(size.toPebibytes({ rounding: SizeRoundingBehavior.NONE }), 4_194_304 / (1024 * 1024 * 1024)); + + expect(Size.mebibytes(4 * 1024).toGibibytes()).toEqual(4); - test.equal(Size.mebibytes(4 * 1024).toGibibytes(), 4); - test.done(); - }, + }); - 'Size in gibibyte'(test: Test) { + test('Size in gibibyte', () => { const size = Size.gibibytes(5); - test.equal(size.toKibibytes(), 5_242_880); - test.equal(size.toMebibytes(), 5_120); - test.equal(size.toGibibytes(), 5); - test.throws(() => size.toTebibytes(), /'5 gibibytes' cannot be converted into a whole number/); - floatEqual(test, size.toTebibytes({ rounding: SizeRoundingBehavior.NONE }), 5 / 1024); - test.throws(() => size.toPebibytes(), /'5 gibibytes' cannot be converted into a whole number/); - floatEqual(test, size.toPebibytes({ rounding: SizeRoundingBehavior.NONE }), 5 / (1024 * 1024)); + expect(size.toKibibytes()).toEqual(5_242_880); + expect(size.toMebibytes()).toEqual(5_120); + expect(size.toGibibytes()).toEqual(5); + expect(() => size.toTebibytes()).toThrow(/'5 gibibytes' cannot be converted into a whole number/); + floatEqual(size.toTebibytes({ rounding: SizeRoundingBehavior.NONE }), 5 / 1024); + expect(() => size.toPebibytes()).toThrow(/'5 gibibytes' cannot be converted into a whole number/); + floatEqual(size.toPebibytes({ rounding: SizeRoundingBehavior.NONE }), 5 / (1024 * 1024)); - test.equal(Size.gibibytes(4096).toTebibytes(), 4); + expect(Size.gibibytes(4096).toTebibytes()).toEqual(4); - test.done(); - }, - 'Size in tebibyte'(test: Test) { + }); + + test('Size in tebibyte', () => { const size = Size.tebibytes(5); - test.equal(size.toKibibytes(), 5_368_709_120); - test.equal(size.toMebibytes(), 5_242_880); - test.equal(size.toGibibytes(), 5_120); - test.equal(size.toTebibytes(), 5); - test.throws(() => size.toPebibytes(), /'5 tebibytes' cannot be converted into a whole number/); - floatEqual(test, size.toPebibytes({ rounding: SizeRoundingBehavior.NONE }), 5 / 1024); + expect(size.toKibibytes()).toEqual(5_368_709_120); + expect(size.toMebibytes()).toEqual(5_242_880); + expect(size.toGibibytes()).toEqual(5_120); + expect(size.toTebibytes()).toEqual(5); + expect(() => size.toPebibytes()).toThrow(/'5 tebibytes' cannot be converted into a whole number/); + floatEqual(size.toPebibytes({ rounding: SizeRoundingBehavior.NONE }), 5 / 1024); + + expect(Size.tebibytes(4096).toPebibytes()).toEqual(4); - test.equal(Size.tebibytes(4096).toPebibytes(), 4); - test.done(); - }, + }); - 'Size in pebibytes'(test: Test) { + test('Size in pebibytes', () => { const size = Size.pebibytes(5); - test.equal(size.toKibibytes(), 5_497_558_138_880); - test.equal(size.toMebibytes(), 5_368_709_120); - test.equal(size.toGibibytes(), 5_242_880); - test.equal(size.toTebibytes(), 5_120); - test.equal(size.toPebibytes(), 5); + expect(size.toKibibytes()).toEqual(5_497_558_138_880); + expect(size.toMebibytes()).toEqual(5_368_709_120); + expect(size.toGibibytes()).toEqual(5_242_880); + expect(size.toTebibytes()).toEqual(5_120); + expect(size.toPebibytes()).toEqual(5); - test.done(); - }, - 'rounding behavior'(test: Test) { + }); + + test('rounding behavior', () => { const size = Size.mebibytes(5_200); - test.throws(() => size.toGibibytes(), /cannot be converted into a whole number/); - test.throws(() => size.toGibibytes({ rounding: SizeRoundingBehavior.FAIL }), /cannot be converted into a whole number/); + expect(() => size.toGibibytes()).toThrow(/cannot be converted into a whole number/); + expect(() => size.toGibibytes({ rounding: SizeRoundingBehavior.FAIL })).toThrow(/cannot be converted into a whole number/); + + expect(size.toGibibytes({ rounding: SizeRoundingBehavior.FLOOR })).toEqual(5); + expect(size.toTebibytes({ rounding: SizeRoundingBehavior.FLOOR })).toEqual(0); + floatEqual(size.toKibibytes({ rounding: SizeRoundingBehavior.FLOOR }), 5_324_800); - test.equals(size.toGibibytes({ rounding: SizeRoundingBehavior.FLOOR }), 5); - test.equals(size.toTebibytes({ rounding: SizeRoundingBehavior.FLOOR }), 0); - floatEqual(test, size.toKibibytes({ rounding: SizeRoundingBehavior.FLOOR }), 5_324_800); + expect(size.toGibibytes({ rounding: SizeRoundingBehavior.NONE })).toEqual(5.078125); + expect(size.toTebibytes({ rounding: SizeRoundingBehavior.NONE })).toEqual(5200 / (1024 * 1024)); + expect(size.toKibibytes({ rounding: SizeRoundingBehavior.NONE })).toEqual(5_324_800); - test.equals(size.toGibibytes({ rounding: SizeRoundingBehavior.NONE }), 5.078125); - test.equals(size.toTebibytes({ rounding: SizeRoundingBehavior.NONE }), 5200 / (1024 * 1024)); - test.equals(size.toKibibytes({ rounding: SizeRoundingBehavior.NONE }), 5_324_800); - test.done(); - }, + }); }); -function floatEqual(test: Test, actual: number, expected: number) { - test.ok( +function floatEqual(actual: number, expected: number) { + expect( // Floats are subject to rounding errors up to Number.ESPILON actual >= expected - Number.EPSILON && actual <= expected + Number.EPSILON, - `${actual} == ${expected}`, - ); + ).toEqual(true); } diff --git a/packages/@aws-cdk/core/test/stack-synthesis/new-style-synthesis.test.ts b/packages/@aws-cdk/core/test/stack-synthesis/new-style-synthesis.test.ts index 67e8fcbfaae17..1528e91205f80 100644 --- a/packages/@aws-cdk/core/test/stack-synthesis/new-style-synthesis.test.ts +++ b/packages/@aws-cdk/core/test/stack-synthesis/new-style-synthesis.test.ts @@ -1,7 +1,6 @@ import * as fs from 'fs'; import * as cxschema from '@aws-cdk/cloud-assembly-schema'; import * as cxapi from '@aws-cdk/cx-api'; -import { nodeunitShim, Test } from 'nodeunit-shim'; import { App, Aws, CfnResource, ContextProvider, DefaultStackSynthesizer, FileAssetPackaging, Stack } from '../../lib'; import { evaluateCFN } from '../evaluate-cfn'; @@ -13,18 +12,18 @@ const CFN_CONTEXT = { let app: App; let stack: Stack; -nodeunitShim({ - 'setUp'(cb: () => void) { +describe('new style synthesis', () => { + beforeEach(() => { app = new App({ context: { [cxapi.NEW_STYLE_STACK_SYNTHESIS_CONTEXT]: 'true', }, }); stack = new Stack(app, 'Stack'); - cb(); - }, - 'stack template is in asset manifest'(test: Test) { + }); + + test('stack template is in asset manifest', () => { // GIVEN new CfnResource(stack, 'Resource', { type: 'Some::Resource', @@ -38,16 +37,16 @@ nodeunitShim({ const templateObjectKey = last(stackArtifact.stackTemplateAssetObjectUrl?.split('/')); - test.equals(stackArtifact.stackTemplateAssetObjectUrl, `s3://cdk-hnb659fds-assets-\${AWS::AccountId}-\${AWS::Region}/${templateObjectKey}`); + expect(stackArtifact.stackTemplateAssetObjectUrl).toEqual(`s3://cdk-hnb659fds-assets-\${AWS::AccountId}-\${AWS::Region}/${templateObjectKey}`); // THEN - the template is in the asset manifest const manifestArtifact = asm.artifacts.filter(isAssetManifest)[0]; - test.ok(manifestArtifact); + expect(manifestArtifact).toBeDefined(); const manifest: cxschema.AssetManifest = JSON.parse(fs.readFileSync(manifestArtifact.file, { encoding: 'utf-8' })); const firstFile = (manifest.files ? manifest.files[Object.keys(manifest.files)[0]] : undefined) ?? {}; - test.deepEqual(firstFile, { + expect(firstFile).toEqual({ source: { path: 'Stack.template.json', packaging: 'file' }, destinations: { 'current_account-current_region': { @@ -58,10 +57,10 @@ nodeunitShim({ }, }); - test.done(); - }, - 'version check is added to template'(test: Test) { + }); + + test('version check is added to template', () => { // GIVEN new CfnResource(stack, 'Resource', { type: 'Some::Resource', @@ -69,21 +68,21 @@ nodeunitShim({ // THEN const template = app.synth().getStackByName('Stack').template; - test.deepEqual(template?.Parameters?.BootstrapVersion?.Type, 'AWS::SSM::Parameter::Value'); - test.deepEqual(template?.Parameters?.BootstrapVersion?.Default, '/cdk-bootstrap/hnb659fds/version'); + expect(template?.Parameters?.BootstrapVersion?.Type).toEqual('AWS::SSM::Parameter::Value'); + expect(template?.Parameters?.BootstrapVersion?.Default).toEqual('/cdk-bootstrap/hnb659fds/version'); const assertions = template?.Rules?.CheckBootstrapVersion?.Assertions ?? []; - test.deepEqual(assertions.length, 1); - test.deepEqual(assertions[0].Assert, { + expect(assertions.length).toEqual(1); + expect(assertions[0].Assert).toEqual({ 'Fn::Not': [ { 'Fn::Contains': [['1', '2', '3', '4', '5'], { Ref: 'BootstrapVersion' }] }, ], }); - test.done(); - }, - 'version check is not added to template if disabled'(test: Test) { + }); + + test('version check is not added to template if disabled', () => { // GIVEN stack = new Stack(app, 'Stack2', { synthesizer: new DefaultStackSynthesizer({ @@ -96,12 +95,12 @@ nodeunitShim({ // THEN const template = app.synth().getStackByName('Stack2').template; - test.equal(template?.Rules?.CheckBootstrapVersion, undefined); + expect(template?.Rules?.CheckBootstrapVersion).toEqual(undefined); + - test.done(); - }, + }); - 'customize version parameter'(test: Test) { + test('customize version parameter', () => { // GIVEN const myapp = new App(); @@ -125,10 +124,10 @@ nodeunitShim({ // THEN - the asset manifest has an SSM parameter entry expect(manifestArtifact.bootstrapStackVersionSsmParameter).toEqual('stack-version-parameter'); - test.done(); - }, - 'generates missing context with the lookup role ARN as one of the missing context properties'(test: Test) { + }); + + test('generates missing context with the lookup role ARN as one of the missing context properties', () => { // GIVEN stack = new Stack(app, 'Stack2', { synthesizer: new DefaultStackSynthesizer({ @@ -146,12 +145,12 @@ nodeunitShim({ // THEN const assembly = app.synth(); - test.equal(assembly.manifest.missing![0].props.lookupRoleArn, 'arn:${AWS::Partition}:iam::111111111111:role/cdk-hnb659fds-lookup-role-111111111111-us-east-1'); + expect(assembly.manifest.missing![0].props.lookupRoleArn).toEqual('arn:${AWS::Partition}:iam::111111111111:role/cdk-hnb659fds-lookup-role-111111111111-us-east-1'); + - test.done(); - }, + }); - 'add file asset'(test: Test) { + test('add file asset', () => { // WHEN const location = stack.synthesizer.addFileAsset({ fileName: __filename, @@ -160,16 +159,16 @@ nodeunitShim({ }); // THEN - we have a fixed asset location with region placeholders - test.equals(evalCFN(location.bucketName), 'cdk-hnb659fds-assets-the_account-the_region'); - test.equals(evalCFN(location.s3Url), 'https://s3.the_region.domain.aws/cdk-hnb659fds-assets-the_account-the_region/abcdef.js'); + expect(evalCFN(location.bucketName)).toEqual('cdk-hnb659fds-assets-the_account-the_region'); + expect(evalCFN(location.s3Url)).toEqual('https://s3.the_region.domain.aws/cdk-hnb659fds-assets-the_account-the_region/abcdef.js'); // THEN - object key contains source hash somewhere - test.ok(location.objectKey.indexOf('abcdef') > -1); + expect(location.objectKey.indexOf('abcdef')).toBeGreaterThan(-1); + - test.done(); - }, + }); - 'add docker image asset'(test: Test) { + test('add docker image asset', () => { // WHEN const location = stack.synthesizer.addDockerImageAsset({ directoryName: '.', @@ -177,13 +176,13 @@ nodeunitShim({ }); // THEN - we have a fixed asset location with region placeholders - test.equals(evalCFN(location.repositoryName), 'cdk-hnb659fds-container-assets-the_account-the_region'); - test.equals(evalCFN(location.imageUri), 'the_account.dkr.ecr.the_region.domain.aws/cdk-hnb659fds-container-assets-the_account-the_region:abcdef'); + expect(evalCFN(location.repositoryName)).toEqual('cdk-hnb659fds-container-assets-the_account-the_region'); + expect(evalCFN(location.imageUri)).toEqual('the_account.dkr.ecr.the_region.domain.aws/cdk-hnb659fds-container-assets-the_account-the_region:abcdef'); - test.done(); - }, - 'synthesis'(test: Test) { + }); + + test('synthesis', () => { // GIVEN stack.synthesizer.addFileAsset({ fileName: __filename, @@ -202,8 +201,8 @@ nodeunitShim({ const manifestArtifact = getAssetManifest(asm); const manifest = readAssetManifest(manifestArtifact); - test.equals(Object.keys(manifest.files || {}).length, 2); - test.equals(Object.keys(manifest.dockerImages || {}).length, 1); + expect(Object.keys(manifest.files || {}).length).toEqual(2); + expect(Object.keys(manifest.dockerImages || {}).length).toEqual(1); // THEN - the asset manifest has an SSM parameter entry expect(manifestArtifact.bootstrapStackVersionSsmParameter).toEqual('/cdk-bootstrap/hnb659fds/version'); @@ -211,20 +210,20 @@ nodeunitShim({ // THEN - every artifact has an assumeRoleArn for (const file of Object.values(manifest.files ?? {})) { for (const destination of Object.values(file.destinations)) { - test.deepEqual(destination.assumeRoleArn, 'arn:${AWS::Partition}:iam::${AWS::AccountId}:role/cdk-hnb659fds-file-publishing-role-${AWS::AccountId}-${AWS::Region}'); + expect(destination.assumeRoleArn).toEqual('arn:${AWS::Partition}:iam::${AWS::AccountId}:role/cdk-hnb659fds-file-publishing-role-${AWS::AccountId}-${AWS::Region}'); } } for (const file of Object.values(manifest.dockerImages ?? {})) { for (const destination of Object.values(file.destinations)) { - test.deepEqual(destination.assumeRoleArn, 'arn:${AWS::Partition}:iam::${AWS::AccountId}:role/cdk-hnb659fds-image-publishing-role-${AWS::AccountId}-${AWS::Region}'); + expect(destination.assumeRoleArn).toEqual('arn:${AWS::Partition}:iam::${AWS::AccountId}:role/cdk-hnb659fds-image-publishing-role-${AWS::AccountId}-${AWS::Region}'); } } - test.done(); - }, - 'customize publishing resources'(test: Test) { + }); + + test('customize publishing resources', () => { // GIVEN const myapp = new App(); @@ -256,24 +255,24 @@ nodeunitShim({ const asm = myapp.synth(); const manifest = readAssetManifest(getAssetManifest(asm)); - test.deepEqual(manifest.files?.['file-asset-hash']?.destinations?.['current_account-current_region'], { + expect(manifest.files?.['file-asset-hash']?.destinations?.['current_account-current_region']).toEqual({ bucketName: 'file-asset-bucket', objectKey: 'file-asset-hash.js', assumeRoleArn: 'file:role:arn', assumeRoleExternalId: 'file-external-id', }); - test.deepEqual(manifest.dockerImages?.['docker-asset-hash']?.destinations?.['current_account-current_region'], { + expect(manifest.dockerImages?.['docker-asset-hash']?.destinations?.['current_account-current_region']).toEqual({ repositoryName: 'image-ecr-repository', imageTag: 'docker-asset-hash', assumeRoleArn: 'image:role:arn', assumeRoleExternalId: 'image-external-id', }); - test.done(); - }, - 'customize deploy role externalId'(test: Test) { + }); + + test('customize deploy role externalId', () => { // GIVEN const myapp = new App(); @@ -290,10 +289,10 @@ nodeunitShim({ const stackArtifact = asm.getStack(mystack.stackName); expect(stackArtifact.assumeRoleExternalId).toEqual('deploy-external-id'); - test.done(); - }, - 'synthesis with bucketPrefix'(test: Test) { + }); + + test('synthesis with bucketPrefix', () => { // GIVEN const myapp = new App(); @@ -323,7 +322,7 @@ nodeunitShim({ const manifest = readAssetManifest(getAssetManifest(asm)); // THEN - test.deepEqual(manifest.files?.['file-asset-hash-with-prefix']?.destinations?.['current_account-current_region'], { + expect(manifest.files?.['file-asset-hash-with-prefix']?.destinations?.['current_account-current_region']).toEqual({ bucketName: 'file-asset-bucket', objectKey: '000000000000/file-asset-hash-with-prefix.js', assumeRoleArn: 'file:role:arn', @@ -332,22 +331,22 @@ nodeunitShim({ const templateHash = last(stackArtifact.stackTemplateAssetObjectUrl?.split('/')); - test.equals(stackArtifact.stackTemplateAssetObjectUrl, `s3://file-asset-bucket/000000000000/${templateHash}`); + expect(stackArtifact.stackTemplateAssetObjectUrl).toEqual(`s3://file-asset-bucket/000000000000/${templateHash}`); + - test.done(); - }, + }); - 'cannot use same synthesizer for multiple stacks'(test: Test) { + test('cannot use same synthesizer for multiple stacks', () => { // GIVEN const synthesizer = new DefaultStackSynthesizer(); // WHEN new Stack(app, 'Stack2', { synthesizer }); - test.throws(() => { + expect(() => { new Stack(app, 'Stack3', { synthesizer }); - }, /A StackSynthesizer can only be used for one Stack/); - test.done(); - }, + }).toThrow(/A StackSynthesizer can only be used for one Stack/); + + }); }); test('get an exception when using tokens for parameters', () => { diff --git a/packages/@aws-cdk/core/test/stage.test.ts b/packages/@aws-cdk/core/test/stage.test.ts index 897b3513d6163..4178060822d68 100644 --- a/packages/@aws-cdk/core/test/stage.test.ts +++ b/packages/@aws-cdk/core/test/stage.test.ts @@ -1,10 +1,9 @@ import * as cxschema from '@aws-cdk/cloud-assembly-schema'; import * as cxapi from '@aws-cdk/cx-api'; -import { nodeunitShim, Test } from 'nodeunit-shim'; import { App, CfnResource, Construct, IAspect, IConstruct, Stack, Stage, Aspects } from '../lib'; -nodeunitShim({ - 'Stack inherits unspecified part of the env from Stage'(test: Test) { +describe('stage', () => { + test('Stack inherits unspecified part of the env from Stage', () => { // GIVEN const app = new App(); const stage = new Stage(app, 'Stage', { @@ -16,13 +15,13 @@ nodeunitShim({ const stack2 = new Stack(stage, 'Stack2', { env: { account: 'tnuocca' } }); // THEN - test.deepEqual(acctRegion(stack1), ['account', 'elsewhere']); - test.deepEqual(acctRegion(stack2), ['tnuocca', 'region']); + expect(acctRegion(stack1)).toEqual(['account', 'elsewhere']); + expect(acctRegion(stack2)).toEqual(['tnuocca', 'region']); - test.done(); - }, - 'envs are inherited deeply'(test: Test) { + }); + + test('envs are inherited deeply', () => { // GIVEN const app = new App(); const outer = new Stage(app, 'Stage', { @@ -35,14 +34,14 @@ nodeunitShim({ const innerNeither = new Stage(outer, 'Neither'); // THEN - test.deepEqual(acctRegion(new Stack(innerAcct, 'Stack')), ['tnuocca', 'region']); - test.deepEqual(acctRegion(new Stack(innerRegion, 'Stack')), ['account', 'elsewhere']); - test.deepEqual(acctRegion(new Stack(innerNeither, 'Stack')), ['account', 'region']); + expect(acctRegion(new Stack(innerAcct, 'Stack'))).toEqual(['tnuocca', 'region']); + expect(acctRegion(new Stack(innerRegion, 'Stack'))).toEqual(['account', 'elsewhere']); + expect(acctRegion(new Stack(innerNeither, 'Stack'))).toEqual(['account', 'region']); - test.done(); - }, - 'The Stage Assembly is in the app Assembly\'s manifest'(test: Test) { + }); + + test('The Stage Assembly is in the app Assembly\'s manifest', () => { // WHEN const app = new App(); const stage = new Stage(app, 'Stage'); @@ -52,12 +51,12 @@ nodeunitShim({ const appAsm = app.synth(); const artifact = appAsm.artifacts.find(x => x instanceof cxapi.NestedCloudAssemblyArtifact); - test.ok(artifact); + expect(artifact).toBeDefined(); - test.done(); - }, - 'Stacks in Stage are in a different cxasm than Stacks in App'(test: Test) { + }); + + test('Stacks in Stage are in a different cxasm than Stacks in App', () => { // WHEN const app = new App(); const stack1 = new BogusStack(app, 'Stack1'); @@ -66,15 +65,15 @@ nodeunitShim({ // THEN const stageAsm = stage.synth(); - test.deepEqual(stageAsm.stacks.map(s => s.stackName), [stack2.stackName]); + expect(stageAsm.stacks.map(s => s.stackName)).toEqual([stack2.stackName]); const appAsm = app.synth(); - test.deepEqual(appAsm.stacks.map(s => s.stackName), [stack1.stackName]); + expect(appAsm.stacks.map(s => s.stackName)).toEqual([stack1.stackName]); - test.done(); - }, - 'Can nest Stages inside other Stages'(test: Test) { + }); + + test('Can nest Stages inside other Stages', () => { // WHEN const app = new App(); const outer = new Stage(app, 'Outer'); @@ -86,25 +85,25 @@ nodeunitShim({ const outerAsm = appAsm.getNestedAssembly(outer.artifactId); const innerAsm = outerAsm.getNestedAssembly(inner.artifactId); - test.ok(innerAsm.tryGetArtifact(stack.artifactId)); + expect(innerAsm.tryGetArtifact(stack.artifactId)).toBeDefined(); - test.done(); - }, - 'Default stack name in Stage objects incorporates the Stage name and no hash'(test: Test) { + }); + + test('Default stack name in Stage objects incorporates the Stage name and no hash', () => { // WHEN const app = new App(); const stage = new Stage(app, 'MyStage'); const stack = new BogusStack(stage, 'MyStack'); // THEN - test.equal(stage.stageName, 'MyStage'); - test.equal(stack.stackName, 'MyStage-MyStack'); + expect(stage.stageName).toEqual('MyStage'); + expect(stack.stackName).toEqual('MyStage-MyStack'); - test.done(); - }, - 'Can not have dependencies to stacks outside the nested asm'(test: Test) { + }); + + test('Can not have dependencies to stacks outside the nested asm', () => { // GIVEN const app = new App(); const stack1 = new BogusStack(app, 'Stack1'); @@ -112,14 +111,14 @@ nodeunitShim({ const stack2 = new BogusStack(stage, 'Stack2'); // WHEN - test.throws(() => { + expect(() => { stack2.addDependency(stack1); - }, /dependency cannot cross stage boundaries/); + }).toThrow(/dependency cannot cross stage boundaries/); - test.done(); - }, - 'When we synth() a stage, prepare must be called on constructs in the stage'(test: Test) { + }); + + test('When we synth() a stage, prepare must be called on constructs in the stage', () => { // GIVEN const app = new App(); let prepared = false; @@ -136,12 +135,12 @@ nodeunitShim({ stage.synth(); // THEN - test.equals(prepared, true); + expect(prepared).toEqual(true); - test.done(); - }, - 'When we synth() a stage, aspects inside it must have been applied'(test: Test) { + }); + + test('When we synth() a stage, aspects inside it must have been applied', () => { // GIVEN const app = new App(); const stage = new Stage(app, 'MyStage'); @@ -153,15 +152,15 @@ nodeunitShim({ // THEN app.synth(); - test.deepEqual(aspect.visits.map(c => c.node.path), [ + expect(aspect.visits.map(c => c.node.path)).toEqual([ 'MyStage/Stack', 'MyStage/Stack/Resource', ]); - test.done(); - }, - 'Aspects do not apply inside a Stage'(test: Test) { + }); + + test('Aspects do not apply inside a Stage', () => { // GIVEN const app = new App(); const stage = new Stage(app, 'MyStage'); @@ -173,14 +172,14 @@ nodeunitShim({ // THEN app.synth(); - test.deepEqual(aspect.visits.map(c => c.node.path), [ + expect(aspect.visits.map(c => c.node.path)).toEqual([ '', 'Tree', ]); - test.done(); - }, - 'Automatic dependencies inside a stage are available immediately after synth'(test: Test) { + }); + + test('Automatic dependencies inside a stage are available immediately after synth', () => { // GIVEN const app = new App(); const stage = new Stage(app, 'MyStage'); @@ -201,14 +200,14 @@ nodeunitShim({ const asm = stage.synth(); // THEN - test.deepEqual( - asm.getStackArtifact(stack2.artifactId).dependencies.map(d => d.id), + expect( + asm.getStackArtifact(stack2.artifactId).dependencies.map(d => d.id)).toEqual( [stack1.artifactId]); - test.done(); - }, - 'Assemblies can be deeply nested'(test: Test) { + }); + + test('Assemblies can be deeply nested', () => { // GIVEN const app = new App({ treeMetadata: false }); @@ -220,7 +219,7 @@ nodeunitShim({ const rootAssembly = app.synth(); // THEN - test.deepEqual(rootAssembly.manifest.artifacts, { + expect(rootAssembly.manifest.artifacts).toEqual({ 'assembly-StageLevel1': { type: 'cdk:cloud-assembly', properties: { @@ -231,7 +230,7 @@ nodeunitShim({ }); const assemblyLevel1 = rootAssembly.getNestedAssembly('assembly-StageLevel1'); - test.deepEqual(assemblyLevel1.manifest.artifacts, { + expect(assemblyLevel1.manifest.artifacts).toEqual({ 'assembly-StageLevel1-StageLevel2': { type: 'cdk:cloud-assembly', properties: { @@ -242,7 +241,7 @@ nodeunitShim({ }); const assemblyLevel2 = assemblyLevel1.getNestedAssembly('assembly-StageLevel1-StageLevel2'); - test.deepEqual(assemblyLevel2.manifest.artifacts, { + expect(assemblyLevel2.manifest.artifacts).toEqual({ 'assembly-StageLevel1-StageLevel2-StageLevel3': { type: 'cdk:cloud-assembly', properties: { @@ -252,10 +251,10 @@ nodeunitShim({ }, }); - test.done(); - }, - 'stage name validation'(test: Test) { + }); + + test('stage name validation', () => { const app = new App(); new Stage(app, 'abcd'); @@ -264,38 +263,38 @@ nodeunitShim({ new Stage(app, 'abcd123-588dfjjk.sss'); new Stage(app, 'abcd123-588dfjjk.sss_ajsid'); - test.throws(() => new Stage(app, 'abcd123-588dfjjk.sss_ajsid '), /invalid stage name "abcd123-588dfjjk.sss_ajsid "/); - test.throws(() => new Stage(app, 'abcd123-588dfjjk.sss_ajsid/dfo'), /invalid stage name "abcd123-588dfjjk.sss_ajsid\/dfo"/); - test.throws(() => new Stage(app, '&'), /invalid stage name "&"/); - test.throws(() => new Stage(app, '45hello'), /invalid stage name "45hello"/); - test.throws(() => new Stage(app, 'f'), /invalid stage name "f"/); + expect(() => new Stage(app, 'abcd123-588dfjjk.sss_ajsid ')).toThrow(/invalid stage name "abcd123-588dfjjk.sss_ajsid "/); + expect(() => new Stage(app, 'abcd123-588dfjjk.sss_ajsid/dfo')).toThrow(/invalid stage name "abcd123-588dfjjk.sss_ajsid\/dfo"/); + expect(() => new Stage(app, '&')).toThrow(/invalid stage name "&"/); + expect(() => new Stage(app, '45hello')).toThrow(/invalid stage name "45hello"/); + expect(() => new Stage(app, 'f')).toThrow(/invalid stage name "f"/); - test.done(); - }, - 'outdir cannot be specified for nested stages'(test: Test) { + }); + + test('outdir cannot be specified for nested stages', () => { // WHEN const app = new App(); // THEN - test.throws(() => new Stage(app, 'mystage', { outdir: '/tmp/foo/bar' }), /"outdir" cannot be specified for nested stages/); - test.done(); - }, + expect(() => new Stage(app, 'mystage', { outdir: '/tmp/foo/bar' })).toThrow(/"outdir" cannot be specified for nested stages/); + + }); - 'Stage.isStage indicates that a construct is a stage'(test: Test) { + test('Stage.isStage indicates that a construct is a stage', () => { // WHEN const app = new App(); const stack = new Stack(); const stage = new Stage(app, 'Stage'); // THEN - test.ok(Stage.isStage(stage)); - test.ok(Stage.isStage(app)); - test.ok(!Stage.isStage(stack)); - test.done(); - }, + expect(Stage.isStage(stage)).toEqual(true); + expect(Stage.isStage(app)).toEqual(true); + expect(Stage.isStage(stack)).toEqual(false); - 'Stage.isStage indicates that a construct is a stage based on symbol'(test: Test) { + }); + + test('Stage.isStage indicates that a construct is a stage based on symbol', () => { // WHEN const app = new App(); const stage = new Stage(app, 'Stage'); @@ -305,11 +304,11 @@ nodeunitShim({ Object.defineProperty(externalStage, STAGE_SYMBOL, { value: true }); // THEN - test.ok(Stage.isStage(stage)); - test.ok(Stage.isStage(app)); - test.ok(Stage.isStage(externalStage)); - test.done(); - }, + expect(Stage.isStage(stage)).toEqual(true); + expect(Stage.isStage(app)).toEqual(true); + expect(Stage.isStage(externalStage)).toEqual(true); + + }); }); test('missing context in Stages is propagated up to root assembly', () => { diff --git a/packages/@aws-cdk/core/test/staging.test.ts b/packages/@aws-cdk/core/test/staging.test.ts index c55e7a9286326..edd8d124ba978 100644 --- a/packages/@aws-cdk/core/test/staging.test.ts +++ b/packages/@aws-cdk/core/test/staging.test.ts @@ -3,7 +3,6 @@ import * as path from 'path'; import { FileAssetPackaging } from '@aws-cdk/cloud-assembly-schema'; import * as cxapi from '@aws-cdk/cx-api'; import * as fs from 'fs-extra'; -import { nodeunitShim, Test } from 'nodeunit-shim'; import * as sinon from 'sinon'; import { App, AssetHashType, AssetStaging, BundlingDockerImage, BundlingOptions, BundlingOutput, FileSystem, Stack, Stage } from '../lib'; @@ -27,9 +26,9 @@ const USER_ARG = `-u ${userInfo.uid}:${userInfo.gid}`; // this is a way to provide a custom "docker" command for staging. process.env.CDK_DOCKER = `${__dirname}/docker-stub.sh`; -nodeunitShim({ +describe('staging', () => { - 'tearDown'(cb: any) { + afterEach(() => { AssetStaging.clearAssetHashCache(); if (fs.existsSync(STUB_INPUT_FILE)) { fs.unlinkSync(STUB_INPUT_FILE); @@ -37,11 +36,10 @@ nodeunitShim({ if (fs.existsSync(STUB_INPUT_CONCAT_FILE)) { fs.unlinkSync(STUB_INPUT_CONCAT_FILE); } - cb(); sinon.restore(); - }, + }); - 'base case'(test: Test) { + test('base case', () => { // GIVEN const stack = new Stack(); const sourcePath = path.join(__dirname, 'fs', 'fixtures', 'test1'); @@ -49,16 +47,16 @@ nodeunitShim({ // WHEN const staging = new AssetStaging(stack, 's1', { sourcePath }); - test.deepEqual(staging.sourceHash, '2f37f937c51e2c191af66acf9b09f548926008ec68c575bd2ee54b6e997c0e00'); - test.deepEqual(staging.sourcePath, sourcePath); - test.deepEqual(path.basename(staging.stagedPath), 'asset.2f37f937c51e2c191af66acf9b09f548926008ec68c575bd2ee54b6e997c0e00'); - test.deepEqual(path.basename(staging.relativeStagedPath(stack)), 'asset.2f37f937c51e2c191af66acf9b09f548926008ec68c575bd2ee54b6e997c0e00'); - test.deepEqual(staging.packaging, FileAssetPackaging.ZIP_DIRECTORY); - test.deepEqual(staging.isArchive, true); - test.done(); - }, + expect(staging.sourceHash).toEqual('2f37f937c51e2c191af66acf9b09f548926008ec68c575bd2ee54b6e997c0e00'); + expect(staging.sourcePath).toEqual(sourcePath); + expect(path.basename(staging.stagedPath)).toEqual('asset.2f37f937c51e2c191af66acf9b09f548926008ec68c575bd2ee54b6e997c0e00'); + expect(path.basename(staging.relativeStagedPath(stack))).toEqual('asset.2f37f937c51e2c191af66acf9b09f548926008ec68c575bd2ee54b6e997c0e00'); + expect(staging.packaging).toEqual(FileAssetPackaging.ZIP_DIRECTORY); + expect(staging.isArchive).toEqual(true); - 'staging of an archive file correctly sets packaging and isArchive'(test: Test) { + }); + + test('staging of an archive file correctly sets packaging and isArchive', () => { // GIVEN const stack = new Stack(); const sourcePath = path.join(__dirname, 'archive', 'archive.zip'); @@ -66,12 +64,12 @@ nodeunitShim({ // WHEN const staging = new AssetStaging(stack, 's1', { sourcePath }); - test.deepEqual(staging.packaging, FileAssetPackaging.FILE); - test.deepEqual(staging.isArchive, true); - test.done(); - }, + expect(staging.packaging).toEqual(FileAssetPackaging.FILE); + expect(staging.isArchive).toEqual(true); + + }); - 'asset packaging type is correct when staging is skipped because of memory cache'(test: Test) { + test('asset packaging type is correct when staging is skipped because of memory cache', () => { // GIVEN const stack = new Stack(); const sourcePath = path.join(__dirname, 'archive', 'archive.zip'); @@ -80,14 +78,14 @@ nodeunitShim({ const staging1 = new AssetStaging(stack, 's1', { sourcePath }); const staging2 = new AssetStaging(stack, 's2', { sourcePath }); - test.deepEqual(staging1.packaging, FileAssetPackaging.FILE); - test.deepEqual(staging1.isArchive, true); - test.deepEqual(staging2.packaging, staging1.packaging); - test.deepEqual(staging2.isArchive, staging1.isArchive); - test.done(); - }, + expect(staging1.packaging).toEqual(FileAssetPackaging.FILE); + expect(staging1.isArchive).toEqual(true); + expect(staging2.packaging).toEqual(staging1.packaging); + expect(staging2.isArchive).toEqual(staging1.isArchive); + + }); - 'asset packaging type is correct when staging is skipped because of disk cache'(test: Test) { + test('asset packaging type is correct when staging is skipped because of disk cache', () => { // GIVEN const TEST_OUTDIR = path.join(__dirname, 'cdk.out'); if (fs.existsSync(TEST_OUTDIR)) { @@ -113,15 +111,15 @@ nodeunitShim({ const staging2 = new AssetStaging(stack2, 'Asset', { sourcePath }); // THEN - test.deepEqual(staging1.packaging, FileAssetPackaging.FILE); - test.deepEqual(staging1.isArchive, true); - test.deepEqual(staging2.packaging, staging1.packaging); - test.deepEqual(staging2.isArchive, staging1.isArchive); + expect(staging1.packaging).toEqual(FileAssetPackaging.FILE); + expect(staging1.isArchive).toEqual(true); + expect(staging2.packaging).toEqual(staging1.packaging); + expect(staging2.isArchive).toEqual(staging1.isArchive); - test.done(); - }, - 'staging of a non-archive file correctly sets packaging and isArchive'(test: Test) { + }); + + test('staging of a non-archive file correctly sets packaging and isArchive', () => { // GIVEN const stack = new Stack(); const sourcePath = __filename; @@ -129,12 +127,12 @@ nodeunitShim({ // WHEN const staging = new AssetStaging(stack, 's1', { sourcePath }); - test.deepEqual(staging.packaging, FileAssetPackaging.FILE); - test.deepEqual(staging.isArchive, false); - test.done(); - }, + expect(staging.packaging).toEqual(FileAssetPackaging.FILE); + expect(staging.isArchive).toEqual(false); + + }); - 'staging can be disabled through context'(test: Test) { + test('staging can be disabled through context', () => { // GIVEN const stack = new Stack(); stack.node.setContext(cxapi.DISABLE_ASSET_STAGING_CONTEXT, true); @@ -143,14 +141,14 @@ nodeunitShim({ // WHEN const staging = new AssetStaging(stack, 's1', { sourcePath }); - test.deepEqual(staging.sourceHash, '2f37f937c51e2c191af66acf9b09f548926008ec68c575bd2ee54b6e997c0e00'); - test.deepEqual(staging.sourcePath, sourcePath); - test.deepEqual(staging.stagedPath, sourcePath); - test.deepEqual(staging.relativeStagedPath(stack), sourcePath); - test.done(); - }, + expect(staging.sourceHash).toEqual('2f37f937c51e2c191af66acf9b09f548926008ec68c575bd2ee54b6e997c0e00'); + expect(staging.sourcePath).toEqual(sourcePath); + expect(staging.stagedPath).toEqual(sourcePath); + expect(staging.relativeStagedPath(stack)).toEqual(sourcePath); - 'files are copied to the output directory during synth'(test: Test) { + }); + + test('files are copied to the output directory during synth', () => { // GIVEN const app = new App(); const stack = new Stack(app, 'stack'); @@ -161,7 +159,7 @@ nodeunitShim({ // THEN const assembly = app.synth(); - test.deepEqual(fs.readdirSync(assembly.directory), [ + expect(fs.readdirSync(assembly.directory)).toEqual([ 'asset.2f37f937c51e2c191af66acf9b09f548926008ec68c575bd2ee54b6e997c0e00', 'asset.af10ac04b3b607b0f8659c8f0cee8c343025ee75baf0b146f10f0e5311d2c46b.gz', 'cdk.out', @@ -169,10 +167,10 @@ nodeunitShim({ 'stack.template.json', 'tree.json', ]); - test.done(); - }, - 'assets in nested assemblies get staged into assembly root directory'(test: Test) { + }); + + test('assets in nested assemblies get staged into assembly root directory', () => { // GIVEN const app = new App(); const stack1 = new Stack(new Stage(app, 'Stage1'), 'Stack'); @@ -186,7 +184,7 @@ nodeunitShim({ const assembly = app.synth(); // One asset directory at the top - test.deepEqual(fs.readdirSync(assembly.directory), [ + expect(fs.readdirSync(assembly.directory)).toEqual([ 'assembly-Stage1', 'assembly-Stage2', 'asset.2f37f937c51e2c191af66acf9b09f548926008ec68c575bd2ee54b6e997c0e00', @@ -194,10 +192,10 @@ nodeunitShim({ 'manifest.json', 'tree.json', ]); - test.done(); - }, - 'allow specifying extra data to include in the source hash'(test: Test) { + }); + + test('allow specifying extra data to include in the source hash', () => { // GIVEN const app = new App(); const stack = new Stack(app, 'stack'); @@ -208,13 +206,13 @@ nodeunitShim({ const withExtra = new AssetStaging(stack, 'withExtra', { sourcePath: directory, extraHash: 'boom' }); // THEN - test.notEqual(withoutExtra.sourceHash, withExtra.sourceHash); - test.deepEqual(withoutExtra.sourceHash, '2f37f937c51e2c191af66acf9b09f548926008ec68c575bd2ee54b6e997c0e00'); - test.deepEqual(withExtra.sourceHash, 'c95c915a5722bb9019e2c725d11868e5a619b55f36172f76bcbcaa8bb2d10c5f'); - test.done(); - }, + expect(withoutExtra.sourceHash).not.toEqual(withExtra.sourceHash); + expect(withoutExtra.sourceHash).toEqual('2f37f937c51e2c191af66acf9b09f548926008ec68c575bd2ee54b6e997c0e00'); + expect(withExtra.sourceHash).toEqual('c95c915a5722bb9019e2c725d11868e5a619b55f36172f76bcbcaa8bb2d10c5f'); + + }); - 'with bundling'(test: Test) { + test('with bundling', () => { // GIVEN const app = new App(); const stack = new Stack(app, 'stack'); @@ -232,11 +230,11 @@ nodeunitShim({ // THEN const assembly = app.synth(); - test.deepEqual( - readDockerStubInput(), + expect( + readDockerStubInput()).toEqual( `run --rm ${USER_ARG} -v /input:/asset-input:delegated -v /output:/asset-output:delegated -w /asset-input alpine DOCKER_STUB_SUCCESS`, ); - test.deepEqual(fs.readdirSync(assembly.directory), [ + expect(fs.readdirSync(assembly.directory)).toEqual([ 'asset.b1e32e86b3523f2fa512eb99180ee2975a50a4439e63e8badd153f2a68d61aa4', 'cdk.out', 'manifest.json', @@ -245,12 +243,12 @@ nodeunitShim({ ]); // shows a message before bundling - test.ok(processStdErrWriteSpy.calledWith('Bundling asset stack/Asset...\n')); + expect(processStdErrWriteSpy.calledWith('Bundling asset stack/Asset...\n')).toEqual(true); - test.done(); - }, - 'bundled resources have absolute path when staging is disabled'(test: Test) { + }); + + test('bundled resources have absolute path when staging is disabled', () => { // GIVEN const app = new App(); const stack = new Stack(app, 'stack'); @@ -269,7 +267,7 @@ nodeunitShim({ // THEN const assembly = app.synth(); - test.deepEqual(fs.readdirSync(assembly.directory), [ + expect(fs.readdirSync(assembly.directory)).toEqual([ 'asset.b1e32e86b3523f2fa512eb99180ee2975a50a4439e63e8badd153f2a68d61aa4', 'cdk.out', 'manifest.json', @@ -277,18 +275,18 @@ nodeunitShim({ 'tree.json', ]); - test.equal(asset.sourceHash, 'b1e32e86b3523f2fa512eb99180ee2975a50a4439e63e8badd153f2a68d61aa4'); - test.equal(asset.sourcePath, directory); + expect(asset.sourceHash).toEqual('b1e32e86b3523f2fa512eb99180ee2975a50a4439e63e8badd153f2a68d61aa4'); + expect(asset.sourcePath).toEqual(directory); const resolvedStagePath = asset.relativeStagedPath(stack); // absolute path ending with bundling dir - test.ok(path.isAbsolute(resolvedStagePath)); - test.ok(new RegExp('asset.b1e32e86b3523f2fa512eb99180ee2975a50a4439e63e8badd153f2a68d61aa4$').test(resolvedStagePath)); + expect(path.isAbsolute(resolvedStagePath)).toEqual(true); + expect(new RegExp('asset.b1e32e86b3523f2fa512eb99180ee2975a50a4439e63e8badd153f2a68d61aa4$').test(resolvedStagePath)).toEqual(true); + - test.done(); - }, + }); - 'bundler reuses its output when it can'(test: Test) { + test('bundler reuses its output when it can', () => { // GIVEN const app = new App(); const stack = new Stack(app, 'stack'); @@ -315,12 +313,12 @@ nodeunitShim({ const assembly = app.synth(); // We're testing that docker was run exactly once even though there are two bundling assets. - test.deepEqual( - readDockerStubInputConcat(), + expect( + readDockerStubInputConcat()).toEqual( `run --rm ${USER_ARG} -v /input:/asset-input:delegated -v /output:/asset-output:delegated -w /asset-input alpine DOCKER_STUB_SUCCESS`, ); - test.deepEqual(fs.readdirSync(assembly.directory), [ + expect(fs.readdirSync(assembly.directory)).toEqual([ 'asset.b1e32e86b3523f2fa512eb99180ee2975a50a4439e63e8badd153f2a68d61aa4', 'cdk.out', 'manifest.json', @@ -328,10 +326,10 @@ nodeunitShim({ 'tree.json', ]); - test.done(); - }, - 'uses asset hash cache with AssetHashType.OUTPUT'(test: Test) { + }); + + test('uses asset hash cache with AssetHashType.OUTPUT', () => { // GIVEN const app = new App(); const stack = new Stack(app, 'stack'); @@ -362,12 +360,12 @@ nodeunitShim({ // We're testing that docker was run exactly once even though there are two bundling assets // and that the hash is based on the output - test.deepEqual( - readDockerStubInputConcat(), + expect( + readDockerStubInputConcat()).toEqual( `run --rm ${USER_ARG} -v /input:/asset-input:delegated -v /output:/asset-output:delegated -w /asset-input alpine DOCKER_STUB_SUCCESS`, ); - test.deepEqual(fs.readdirSync(assembly.directory), [ + expect(fs.readdirSync(assembly.directory)).toEqual([ 'asset.33cbf2cae5432438e0f046bc45ba8c3cef7b6afcf47b59d1c183775c1918fb1f', 'cdk.out', 'manifest.json', @@ -376,12 +374,12 @@ nodeunitShim({ ]); // Only one fingerprinting - test.ok(fingerPrintSpy.calledOnce); + expect(fingerPrintSpy.calledOnce).toEqual(true); - test.done(); - }, - 'bundler considers its options when reusing bundle output'(test: Test) { + }); + + test('bundler considers its options when reusing bundle output', () => { // GIVEN const app = new App(); const stack = new Stack(app, 'stack'); @@ -412,13 +410,13 @@ nodeunitShim({ // We're testing that docker was run twice - once for each set of bundler options // operating on the same source asset. - test.deepEqual( - readDockerStubInputConcat(), + expect( + readDockerStubInputConcat()).toEqual( `run --rm ${USER_ARG} -v /input:/asset-input:delegated -v /output:/asset-output:delegated -w /asset-input alpine DOCKER_STUB_SUCCESS\n` + `run --rm ${USER_ARG} -v /input:/asset-input:delegated -v /output:/asset-output:delegated --env UNIQUE_ENV_VAR=SOMEVALUE -w /asset-input alpine DOCKER_STUB_SUCCESS`, ); - test.deepEqual(fs.readdirSync(assembly.directory), [ + expect(fs.readdirSync(assembly.directory)).toEqual([ 'asset.b1e32e86b3523f2fa512eb99180ee2975a50a4439e63e8badd153f2a68d61aa4', // 'Asset' 'asset.e80bb8f931b87e84975de193f5a7ecddd7558d3caf3d35d3a536d9ae6539234f', // 'AssetWithDifferentBundlingOptions' 'cdk.out', @@ -427,10 +425,10 @@ nodeunitShim({ 'tree.json', ]); - test.done(); - }, - 'bundler outputs to intermediate dir and renames to asset'(test: Test) { + }); + + test('bundler outputs to intermediate dir and renames to asset', () => { // GIVEN const app = new App(); const stack = new Stack(app, 'stack'); @@ -452,11 +450,11 @@ nodeunitShim({ // THEN const assembly = app.synth(); - test.ok(ensureDirSync.calledWith(sinon.match(path.join(assembly.directory, 'bundling-temp-')))); - test.ok(chmodSyncSpy.calledWith(sinon.match(path.join(assembly.directory, 'bundling-temp-')), 0o777)); - test.ok(renameSyncSpy.calledWith(sinon.match(path.join(assembly.directory, 'bundling-temp-')), sinon.match(path.join(assembly.directory, 'asset.')))); + expect(ensureDirSync.calledWith(sinon.match(path.join(assembly.directory, 'bundling-temp-')))).toEqual(true); + expect(chmodSyncSpy.calledWith(sinon.match(path.join(assembly.directory, 'bundling-temp-')), 0o777)).toEqual(true); + expect(renameSyncSpy.calledWith(sinon.match(path.join(assembly.directory, 'bundling-temp-')), sinon.match(path.join(assembly.directory, 'asset.')))).toEqual(true); - test.deepEqual(fs.readdirSync(assembly.directory), [ + expect(fs.readdirSync(assembly.directory)).toEqual([ 'asset.33cbf2cae5432438e0f046bc45ba8c3cef7b6afcf47b59d1c183775c1918fb1f', // 'Asset' 'cdk.out', 'manifest.json', @@ -464,34 +462,34 @@ nodeunitShim({ 'tree.json', ]); - test.done(); - }, - 'bundling failure preserves the bundleDir for diagnosability'(test: Test) { + }); + + test('bundling failure preserves the bundleDir for diagnosability', () => { // GIVEN const app = new App(); const stack = new Stack(app, 'stack'); const directory = path.join(__dirname, 'fs', 'fixtures', 'test1'); // WHEN - test.throws(() => new AssetStaging(stack, 'Asset', { + expect(() => new AssetStaging(stack, 'Asset', { sourcePath: directory, bundling: { image: BundlingDockerImage.fromRegistry('alpine'), command: [DockerStubCommand.FAIL], }, - }), /Failed.*bundl.*asset.*-error/); + })).toThrow(/Failed.*bundl.*asset.*-error/); // THEN const assembly = app.synth(); const dir = fs.readdirSync(assembly.directory); - test.ok(dir.some(entry => entry.match(/asset.*-error/))); + expect(dir.some(entry => entry.match(/asset.*-error/))).toEqual(true); + - test.done(); - }, + }); - 'bundler re-uses assets from previous synths'(test: Test) { + test('bundler re-uses assets from previous synths', () => { // GIVEN const TEST_OUTDIR = path.join(__dirname, 'cdk.out'); if (fs.existsSync(TEST_OUTDIR)) { @@ -532,13 +530,13 @@ nodeunitShim({ const appAssembly = app.synth(); const app2Assembly = app2.synth(); - test.deepEqual( - readDockerStubInputConcat(), + expect( + readDockerStubInputConcat()).toEqual( `run --rm ${USER_ARG} -v /input:/asset-input:delegated -v /output:/asset-output:delegated -w /asset-input alpine DOCKER_STUB_SUCCESS`, ); - test.equals(appAssembly.directory, app2Assembly.directory); - test.deepEqual(fs.readdirSync(appAssembly.directory), [ + expect(appAssembly.directory).toEqual(app2Assembly.directory); + expect(fs.readdirSync(appAssembly.directory)).toEqual([ 'asset.b1e32e86b3523f2fa512eb99180ee2975a50a4439e63e8badd153f2a68d61aa4', 'cdk.out', 'manifest.json', @@ -546,32 +544,32 @@ nodeunitShim({ 'tree.json', ]); - test.done(); - }, - 'bundling throws when /asset-ouput is empty'(test: Test) { + }); + + test('bundling throws when /asset-ouput is empty', () => { // GIVEN const app = new App(); const stack = new Stack(app, 'stack'); const directory = path.join(__dirname, 'fs', 'fixtures', 'test1'); // THEN - test.throws(() => new AssetStaging(stack, 'Asset', { + expect(() => new AssetStaging(stack, 'Asset', { sourcePath: directory, bundling: { image: BundlingDockerImage.fromRegistry('alpine'), command: [DockerStubCommand.SUCCESS_NO_OUTPUT], }, - }), /Bundling did not produce any output/); + })).toThrow(/Bundling did not produce any output/); - test.equal( - readDockerStubInput(), + expect( + readDockerStubInput()).toEqual( `run --rm ${USER_ARG} -v /input:/asset-input:delegated -v /output:/asset-output:delegated -w /asset-input alpine DOCKER_STUB_SUCCESS_NO_OUTPUT`, ); - test.done(); - }, - 'bundling with BUNDLE asset hash type'(test: Test) { + }); + + test('bundling with BUNDLE asset hash type', () => { // GIVEN const app = new App(); const stack = new Stack(app, 'stack'); @@ -588,17 +586,17 @@ nodeunitShim({ }); // THEN - test.equal( - readDockerStubInput(), + expect( + readDockerStubInput()).toEqual( `run --rm ${USER_ARG} -v /input:/asset-input:delegated -v /output:/asset-output:delegated -w /asset-input alpine DOCKER_STUB_SUCCESS`, ); - test.equal(asset.assetHash, '33cbf2cae5432438e0f046bc45ba8c3cef7b6afcf47b59d1c183775c1918fb1f'); + expect(asset.assetHash).toEqual('33cbf2cae5432438e0f046bc45ba8c3cef7b6afcf47b59d1c183775c1918fb1f'); - test.done(); - }, + }); - 'bundling with docker security option'(test: Test) { + + test('bundling with docker security option', () => { // GIVEN const app = new App(); const stack = new Stack(app, 'stack'); @@ -616,16 +614,16 @@ nodeunitShim({ }); // THEN - test.equal( - readDockerStubInput(), + expect( + readDockerStubInput()).toEqual( `run --rm --security-opt no-new-privileges ${USER_ARG} -v /input:/asset-input:delegated -v /output:/asset-output:delegated -w /asset-input alpine DOCKER_STUB_SUCCESS`, ); - test.equal(asset.assetHash, '33cbf2cae5432438e0f046bc45ba8c3cef7b6afcf47b59d1c183775c1918fb1f'); + expect(asset.assetHash).toEqual('33cbf2cae5432438e0f046bc45ba8c3cef7b6afcf47b59d1c183775c1918fb1f'); + - test.done(); - }, + }); - 'bundling with OUTPUT asset hash type'(test: Test) { + test('bundling with OUTPUT asset hash type', () => { // GIVEN const app = new App(); const stack = new Stack(app, 'stack'); @@ -642,12 +640,12 @@ nodeunitShim({ }); // THEN - test.equal(asset.assetHash, '33cbf2cae5432438e0f046bc45ba8c3cef7b6afcf47b59d1c183775c1918fb1f'); + expect(asset.assetHash).toEqual('33cbf2cae5432438e0f046bc45ba8c3cef7b6afcf47b59d1c183775c1918fb1f'); + - test.done(); - }, + }); - 'custom hash'(test: Test) { + test('custom hash', () => { // GIVEN const app = new App(); const stack = new Stack(app, 'stack'); @@ -660,20 +658,20 @@ nodeunitShim({ }); // THEN - test.equal(fs.existsSync(STUB_INPUT_FILE), false); - test.equal(asset.assetHash, 'b9c77053f5b83bbe5ba343bc18e92db939a49017010813225fea91fa892c4823'); // hash of 'my-custom-hash' + expect(fs.existsSync(STUB_INPUT_FILE)).toEqual(false); + expect(asset.assetHash).toEqual('b9c77053f5b83bbe5ba343bc18e92db939a49017010813225fea91fa892c4823'); // hash of 'my-custom-hash' - test.done(); - }, - 'throws with assetHash and not CUSTOM hash type'(test: Test) { + }); + + test('throws with assetHash and not CUSTOM hash type', () => { // GIVEN const app = new App(); const stack = new Stack(app, 'stack'); const directory = path.join(__dirname, 'fs', 'fixtures', 'test1'); // THEN - test.throws(() => new AssetStaging(stack, 'Asset', { + expect(() => new AssetStaging(stack, 'Asset', { sourcePath: directory, bundling: { image: BundlingDockerImage.fromRegistry('alpine'), @@ -681,82 +679,82 @@ nodeunitShim({ }, assetHash: 'my-custom-hash', assetHashType: AssetHashType.BUNDLE, - }), /Cannot specify `bundle` for `assetHashType`/); + })).toThrow(/Cannot specify `bundle` for `assetHashType`/); + - test.done(); - }, + }); - 'throws with BUNDLE hash type and no bundling'(test: Test) { + test('throws with BUNDLE hash type and no bundling', () => { // GIVEN const app = new App(); const stack = new Stack(app, 'stack'); const directory = path.join(__dirname, 'fs', 'fixtures', 'test1'); // THEN - test.throws(() => new AssetStaging(stack, 'Asset', { + expect(() => new AssetStaging(stack, 'Asset', { sourcePath: directory, assetHashType: AssetHashType.BUNDLE, - }), /Cannot use `bundle` hash type when `bundling` is not specified/); - test.equal(fs.existsSync(STUB_INPUT_FILE), false); + })).toThrow(/Cannot use `bundle` hash type when `bundling` is not specified/); + expect(fs.existsSync(STUB_INPUT_FILE)).toEqual(false); - test.done(); - }, - 'throws with OUTPUT hash type and no bundling'(test: Test) { + }); + + test('throws with OUTPUT hash type and no bundling', () => { // GIVEN const app = new App(); const stack = new Stack(app, 'stack'); const directory = path.join(__dirname, 'fs', 'fixtures', 'test1'); // THEN - test.throws(() => new AssetStaging(stack, 'Asset', { + expect(() => new AssetStaging(stack, 'Asset', { sourcePath: directory, assetHashType: AssetHashType.OUTPUT, - }), /Cannot use `output` hash type when `bundling` is not specified/); - test.equal(fs.existsSync(STUB_INPUT_FILE), false); + })).toThrow(/Cannot use `output` hash type when `bundling` is not specified/); + expect(fs.existsSync(STUB_INPUT_FILE)).toEqual(false); + - test.done(); - }, + }); - 'throws with CUSTOM and no hash'(test: Test) { + test('throws with CUSTOM and no hash', () => { // GIVEN const app = new App(); const stack = new Stack(app, 'stack'); const directory = path.join(__dirname, 'fs', 'fixtures', 'test1'); // THEN - test.throws(() => new AssetStaging(stack, 'Asset', { + expect(() => new AssetStaging(stack, 'Asset', { sourcePath: directory, assetHashType: AssetHashType.CUSTOM, - }), /`assetHash` must be specified when `assetHashType` is set to `AssetHashType.CUSTOM`/); - test.equal(fs.existsSync(STUB_INPUT_FILE), false); // "docker" not executed + })).toThrow(/`assetHash` must be specified when `assetHashType` is set to `AssetHashType.CUSTOM`/); + expect(fs.existsSync(STUB_INPUT_FILE)).toEqual(false); // "docker" not executed - test.done(); - }, - 'throws when bundling fails'(test: Test) { + }); + + test('throws when bundling fails', () => { // GIVEN const app = new App(); const stack = new Stack(app, 'stack'); const directory = path.join(__dirname, 'fs', 'fixtures', 'test1'); // THEN - test.throws(() => new AssetStaging(stack, 'Asset', { + expect(() => new AssetStaging(stack, 'Asset', { sourcePath: directory, bundling: { image: BundlingDockerImage.fromRegistry('this-is-an-invalid-docker-image'), command: [DockerStubCommand.FAIL], }, - }), /Failed to bundle asset stack\/Asset/); - test.equal( - readDockerStubInput(), + })).toThrow(/Failed to bundle asset stack\/Asset/); + expect( + readDockerStubInput()).toEqual( `run --rm ${USER_ARG} -v /input:/asset-input:delegated -v /output:/asset-output:delegated -w /asset-input this-is-an-invalid-docker-image DOCKER_STUB_FAIL`, ); - test.done(); - }, - 'with local bundling'(test: Test) { + }); + + test('with local bundling', () => { // GIVEN const app = new App(); const stack = new Stack(app, 'stack'); @@ -782,18 +780,18 @@ nodeunitShim({ }); // THEN - test.ok(dir && /asset.[0-9a-f]{16,}/.test(dir)); - test.equals(opts?.command?.[0], DockerStubCommand.SUCCESS); - test.throws(() => readDockerStubInput()); + expect(dir && /asset.[0-9a-f]{16,}/.test(dir)).toEqual(true); + expect(opts?.command?.[0]).toEqual(DockerStubCommand.SUCCESS); + expect(() => readDockerStubInput()).toThrow(); if (dir) { fs.removeSync(path.join(dir, 'hello.txt')); } - test.done(); - }, - 'with local bundling returning false'(test: Test) { + }); + + test('with local bundling returning false', () => { // GIVEN const app = new App(); const stack = new Stack(app, 'stack'); @@ -814,12 +812,12 @@ nodeunitShim({ }); // THEN - test.ok(readDockerStubInput()); + expect(readDockerStubInput()).toBeDefined(); + - test.done(); - }, + }); - 'bundling can be skipped by setting context'(test: Test) { + test('bundling can be skipped by setting context', () => { // GIVEN const app = new App(); const stack = new Stack(app, 'MyStack'); @@ -836,16 +834,16 @@ nodeunitShim({ }, }); - test.throws(() => readDockerStubInput()); // Bundling did not run - test.equal(asset.sourcePath, directory); - test.equal(asset.stagedPath, directory); - test.equal(asset.relativeStagedPath(stack), directory); - test.equal(asset.assetHash, 'f66d7421aa2d044a6c1f60ddfc76dc78571fcd8bd228eb48eb394e2dbad94a5c'); + expect(() => readDockerStubInput()).toThrow(); // Bundling did not run + expect(asset.sourcePath).toEqual(directory); + expect(asset.stagedPath).toEqual(directory); + expect(asset.relativeStagedPath(stack)).toEqual(directory); + expect(asset.assetHash).toEqual('f66d7421aa2d044a6c1f60ddfc76dc78571fcd8bd228eb48eb394e2dbad94a5c'); - test.done(); - }, - 'bundling still occurs with partial wildcard'(test: Test) { + }); + + test('bundling still occurs with partial wildcard', () => { // GIVEN const app = new App(); const stack = new Stack(app, 'MyStack'); @@ -862,16 +860,16 @@ nodeunitShim({ }, }); - test.equal( - readDockerStubInput(), + expect( + readDockerStubInput()).toEqual( `run --rm ${USER_ARG} -v /input:/asset-input:delegated -v /output:/asset-output:delegated -w /asset-input alpine DOCKER_STUB_SUCCESS`, ); - test.equal(asset.assetHash, '33cbf2cae5432438e0f046bc45ba8c3cef7b6afcf47b59d1c183775c1918fb1f'); // hash of MyStack/Asset + expect(asset.assetHash).toEqual('33cbf2cae5432438e0f046bc45ba8c3cef7b6afcf47b59d1c183775c1918fb1f'); // hash of MyStack/Asset + - test.done(); - }, + }); - 'bundling still occurs with full wildcard'(test: Test) { + test('bundling still occurs with full wildcard', () => { // GIVEN const app = new App(); const stack = new Stack(app, 'MyStack'); @@ -888,16 +886,16 @@ nodeunitShim({ }, }); - test.equal( - readDockerStubInput(), + expect( + readDockerStubInput()).toEqual( `run --rm ${USER_ARG} -v /input:/asset-input:delegated -v /output:/asset-output:delegated -w /asset-input alpine DOCKER_STUB_SUCCESS`, ); - test.equal(asset.assetHash, '33cbf2cae5432438e0f046bc45ba8c3cef7b6afcf47b59d1c183775c1918fb1f'); // hash of MyStack/Asset + expect(asset.assetHash).toEqual('33cbf2cae5432438e0f046bc45ba8c3cef7b6afcf47b59d1c183775c1918fb1f'); // hash of MyStack/Asset - test.done(); - }, - 'bundling that produces a single archive file is autodiscovered'(test: Test) { + }); + + test('bundling that produces a single archive file is autodiscovered', () => { // GIVEN const app = new App(); const stack = new Stack(app, 'stack'); @@ -914,7 +912,7 @@ nodeunitShim({ // THEN const assembly = app.synth(); - test.deepEqual(fs.readdirSync(assembly.directory), [ + expect(fs.readdirSync(assembly.directory)).toEqual([ 'asset.f43148c61174f444925231b5849b468f21e93b5d1469cd07c53625ffd039ef48', // this is the bundle dir 'asset.f43148c61174f444925231b5849b468f21e93b5d1469cd07c53625ffd039ef48.zip', 'cdk.out', @@ -922,16 +920,16 @@ nodeunitShim({ 'stack.template.json', 'tree.json', ]); - test.deepEqual(fs.readdirSync(path.join(assembly.directory, 'asset.f43148c61174f444925231b5849b468f21e93b5d1469cd07c53625ffd039ef48')), [ + expect(fs.readdirSync(path.join(assembly.directory, 'asset.f43148c61174f444925231b5849b468f21e93b5d1469cd07c53625ffd039ef48'))).toEqual([ 'test.zip', // bundle dir with "touched" bundled output file ]); - test.deepEqual(staging.packaging, FileAssetPackaging.FILE); - test.deepEqual(staging.isArchive, true); + expect(staging.packaging).toEqual(FileAssetPackaging.FILE); + expect(staging.isArchive).toEqual(true); + - test.done(); - }, + }); - 'bundling that produces a single archive file with disk cache'(test: Test) { + test('bundling that produces a single archive file with disk cache', () => { // GIVEN const TEST_OUTDIR = path.join(__dirname, 'cdk.out'); if (fs.existsSync(TEST_OUTDIR)) { @@ -971,15 +969,15 @@ nodeunitShim({ }); // THEN - test.deepEqual(staging1.packaging, FileAssetPackaging.FILE); - test.deepEqual(staging1.isArchive, true); - test.deepEqual(staging2.packaging, staging1.packaging); - test.deepEqual(staging2.isArchive, staging1.isArchive); + expect(staging1.packaging).toEqual(FileAssetPackaging.FILE); + expect(staging1.isArchive).toEqual(true); + expect(staging2.packaging).toEqual(staging1.packaging); + expect(staging2.isArchive).toEqual(staging1.isArchive); - test.done(); - }, - 'bundling that produces a single archive file with NOT_ARCHIVED'(test: Test) { + }); + + test('bundling that produces a single archive file with NOT_ARCHIVED', () => { // GIVEN const app = new App(); const stack = new Stack(app, 'stack'); @@ -997,38 +995,37 @@ nodeunitShim({ // THEN const assembly = app.synth(); - test.deepEqual(fs.readdirSync(assembly.directory), [ + expect(fs.readdirSync(assembly.directory)).toEqual([ 'asset.86ec07746e1d859290cfd8b9c648e581555649c75f51f741f11e22cab6775abc', 'cdk.out', 'manifest.json', 'stack.template.json', 'tree.json', ]); - test.deepEqual(staging.packaging, FileAssetPackaging.ZIP_DIRECTORY); - test.deepEqual(staging.isArchive, true); + expect(staging.packaging).toEqual(FileAssetPackaging.ZIP_DIRECTORY); + expect(staging.isArchive).toEqual(true); + - test.done(); - }, + }); - 'throws with ARCHIVED and bundling that does not produce a single archive file'(test: Test) { + test('throws with ARCHIVED and bundling that does not produce a single archive file', () => { // GIVEN const app = new App(); const stack = new Stack(app, 'stack'); const directory = path.join(__dirname, 'fs', 'fixtures', 'test1'); // WHEN - test.throws(() => new AssetStaging(stack, 'Asset', { + expect(() => new AssetStaging(stack, 'Asset', { sourcePath: directory, bundling: { image: BundlingDockerImage.fromRegistry('alpine'), command: [DockerStubCommand.MULTIPLE_FILES], outputType: BundlingOutput.ARCHIVED, }, - }), /Bundling output directory is expected to include only a single .zip or .jar file when `output` is set to `ARCHIVED`/); + })).toThrow(/Bundling output directory is expected to include only a single .zip or .jar file when `output` is set to `ARCHIVED`/); - test.done(); - }, + }); }); // Reads a docker stub and cleans the volume paths out of the stub. diff --git a/packages/@aws-cdk/core/test/synthesis.test.ts b/packages/@aws-cdk/core/test/synthesis.test.ts index b1dd35cec52fe..42dbb2f3fbc4f 100644 --- a/packages/@aws-cdk/core/test/synthesis.test.ts +++ b/packages/@aws-cdk/core/test/synthesis.test.ts @@ -2,15 +2,14 @@ import * as fs from 'fs'; import * as os from 'os'; import * as path from 'path'; import * as cxschema from '@aws-cdk/cloud-assembly-schema'; -import { nodeunitShim, Test } from 'nodeunit-shim'; import * as cdk from '../lib'; function createModernApp() { return new cdk.App(); } -nodeunitShim({ - 'synthesis with an empty app'(test: Test) { +describe('synthesis', () => { + test('synthesis with an empty app', () => { // GIVEN const app = createModernApp(); @@ -18,15 +17,15 @@ nodeunitShim({ const session = app.synth(); // THEN - test.same(app.synth(), session); // same session if we synth() again - test.deepEqual(list(session.directory), ['cdk.out', 'manifest.json', 'tree.json']); - test.deepEqual(readJson(session.directory, 'manifest.json').artifacts, { + expect(app.synth()).toEqual(session); // same session if we synth() again + expect(list(session.directory)).toEqual(['cdk.out', 'manifest.json', 'tree.json']); + expect(readJson(session.directory, 'manifest.json').artifacts).toEqual({ Tree: { type: 'cdk:tree', properties: { file: 'tree.json' }, }, }); - test.deepEqual(readJson(session.directory, 'tree.json'), { + expect(readJson(session.directory, 'tree.json')).toEqual({ version: 'tree-0.1', tree: expect.objectContaining({ id: 'App', @@ -36,19 +35,19 @@ nodeunitShim({ }, }), }); - test.done(); - }, - 'synthesis respects disabling tree metadata'(test: Test) { + }); + + test('synthesis respects disabling tree metadata', () => { const app = new cdk.App({ treeMetadata: false, }); const assembly = app.synth(); - test.deepEqual(list(assembly.directory), ['cdk.out', 'manifest.json']); - test.done(); - }, + expect(list(assembly.directory)).toEqual(['cdk.out', 'manifest.json']); + + }); - 'single empty stack'(test: Test) { + test('single empty stack', () => { // GIVEN const app = createModernApp(); new cdk.Stack(app, 'one-stack'); @@ -57,11 +56,11 @@ nodeunitShim({ const session = app.synth(); // THEN - test.ok(list(session.directory).includes('one-stack.template.json')); - test.done(); - }, + expect(list(session.directory).includes('one-stack.template.json')).toEqual(true); + + }); - 'some random construct implements "synthesize"'(test: Test) { + test('some random construct implements "synthesize"', () => { // GIVEN const app = createModernApp(); const stack = new cdk.Stack(app, 'one-stack'); @@ -85,11 +84,11 @@ nodeunitShim({ const session = app.synth(); // THEN - test.ok(list(session.directory).includes('one-stack.template.json')); - test.ok(list(session.directory).includes('foo.json')); + expect(list(session.directory).includes('one-stack.template.json')).toEqual(true); + expect(list(session.directory).includes('foo.json')).toEqual(true); - test.deepEqual(readJson(session.directory, 'foo.json'), { bar: 123 }); - test.deepEqual(session.manifest, { + expect(readJson(session.directory, 'foo.json')).toEqual({ bar: 123 }); + expect(session.manifest).toEqual({ version: cxschema.Manifest.version(), artifacts: { 'Tree': { @@ -112,10 +111,10 @@ nodeunitShim({ }, }, }); - test.done(); - }, - 'random construct uses addCustomSynthesis'(test: Test) { + }); + + test('random construct uses addCustomSynthesis', () => { // GIVEN const app = createModernApp(); const stack = new cdk.Stack(app, 'one-stack'); @@ -145,11 +144,11 @@ nodeunitShim({ const session = app.synth(); // THEN - test.ok(list(session.directory).includes('one-stack.template.json')); - test.ok(list(session.directory).includes('foo.json')); + expect(list(session.directory).includes('one-stack.template.json')).toEqual(true); + expect(list(session.directory).includes('foo.json')).toEqual(true); - test.deepEqual(readJson(session.directory, 'foo.json'), { bar: 123 }); - test.deepEqual(session.manifest, { + expect(readJson(session.directory, 'foo.json')).toEqual({ bar: 123 }); + expect(session.manifest).toEqual({ version: cxschema.Manifest.version(), artifacts: { 'Tree': { @@ -172,10 +171,10 @@ nodeunitShim({ }, }, }); - test.done(); - }, - 'it should be possible to synthesize without an app'(test: Test) { + }); + + test('it should be possible to synthesize without an app', () => { const calls = new Array(); class SynthesizeMe extends cdk.Construct { @@ -214,14 +213,14 @@ nodeunitShim({ const root = new SynthesizeMe(); const assembly = cdk.ConstructNode.synth(root.node, { outdir: fs.mkdtempSync(path.join(os.tmpdir(), 'outdir')) }); - test.deepEqual(calls, ['prepare', 'validate', 'synthesize']); + expect(calls).toEqual(['prepare', 'validate', 'synthesize']); const stack = assembly.getStackByName('art'); - test.deepEqual(stack.template, { hello: 123 }); - test.deepEqual(stack.templateFile, 'hey.json'); - test.deepEqual(stack.parameters, { paramId: 'paramValue', paramId2: 'paramValue2' }); - test.deepEqual(stack.environment, { region: 'us-east-1', account: 'unknown-account', name: 'aws://unknown-account/us-east-1' }); - test.done(); - }, + expect(stack.template).toEqual({ hello: 123 }); + expect(stack.templateFile).toEqual('hey.json'); + expect(stack.parameters).toEqual({ paramId: 'paramValue', paramId2: 'paramValue2' }); + expect(stack.environment).toEqual({ region: 'us-east-1', account: 'unknown-account', name: 'aws://unknown-account/us-east-1' }); + + }); }); function list(outdir: string) { diff --git a/packages/@aws-cdk/core/test/tag-aspect.test.ts b/packages/@aws-cdk/core/test/tag-aspect.test.ts index b0871e2d13c02..3667484d8f44d 100644 --- a/packages/@aws-cdk/core/test/tag-aspect.test.ts +++ b/packages/@aws-cdk/core/test/tag-aspect.test.ts @@ -1,4 +1,3 @@ -import { nodeunitShim, Test } from 'nodeunit-shim'; import { CfnResource, CfnResourceProps, Construct, RemoveTag, Stack, Tag, TagManager, TagType, Aspects, Tags } from '../lib'; import { synthesize } from '../lib/private/synthesis'; @@ -38,8 +37,8 @@ class MapTaggableResource extends CfnResource { } } -nodeunitShim({ - 'Tag visit all children of the applied node'(test: Test) { +describe('tag aspect', () => { + test('Tag visit all children of the applied node', () => { const root = new Stack(); const res = new TaggableResource(root, 'FakeResource', { type: 'AWS::Fake::Thing', @@ -58,13 +57,13 @@ nodeunitShim({ synthesize(root); - test.deepEqual(res.tags.renderTags(), [{ key: 'foo', value: 'bar' }]); - test.deepEqual(res2.tags.renderTags(), [{ key: 'foo', value: 'bar' }]); - test.deepEqual(map.tags.renderTags(), { foo: 'bar' }); - test.deepEqual(asg.tags.renderTags(), [{ key: 'foo', value: 'bar', propagateAtLaunch: true }]); - test.done(); - }, - 'The last aspect applied takes precedence'(test: Test) { + expect(res.tags.renderTags()).toEqual([{ key: 'foo', value: 'bar' }]); + expect(res2.tags.renderTags()).toEqual([{ key: 'foo', value: 'bar' }]); + expect(map.tags.renderTags()).toEqual({ foo: 'bar' }); + expect(asg.tags.renderTags()).toEqual([{ key: 'foo', value: 'bar', propagateAtLaunch: true }]); + + }); + test('The last aspect applied takes precedence', () => { const root = new Stack(); const res = new TaggableResource(root, 'FakeResource', { type: 'AWS::Fake::Thing', @@ -77,11 +76,11 @@ nodeunitShim({ Aspects.of(res).add(new Tag('foo', 'baz')); Aspects.of(res2).add(new Tag('foo', 'good')); synthesize(root); - test.deepEqual(res.tags.renderTags(), [{ key: 'foo', value: 'baz' }]); - test.deepEqual(res2.tags.renderTags(), [{ key: 'foo', value: 'good' }]); - test.done(); - }, - 'RemoveTag will remove a tag if it exists'(test: Test) { + expect(res.tags.renderTags()).toEqual([{ key: 'foo', value: 'baz' }]); + expect(res2.tags.renderTags()).toEqual([{ key: 'foo', value: 'good' }]); + + }); + test('RemoveTag will remove a tag if it exists', () => { const root = new Stack(); const res = new TaggableResource(root, 'FakeResource', { type: 'AWS::Fake::Thing', @@ -102,13 +101,13 @@ nodeunitShim({ Aspects.of(res).add(new RemoveTag('doesnotexist')); synthesize(root); - test.deepEqual(res.tags.renderTags(), [{ key: 'first', value: 'there is only 1' }]); - test.deepEqual(map.tags.renderTags(), { first: 'there is only 1' }); - test.deepEqual(asg.tags.renderTags(), [{ key: 'first', value: 'there is only 1', propagateAtLaunch: true }]); - test.deepEqual(res2.tags.renderTags(), [{ key: 'first', value: 'there is only 1' }]); - test.done(); - }, - 'add will add a tag and remove will remove a tag if it exists'(test: Test) { + expect(res.tags.renderTags()).toEqual([{ key: 'first', value: 'there is only 1' }]); + expect(map.tags.renderTags()).toEqual({ first: 'there is only 1' }); + expect(asg.tags.renderTags()).toEqual([{ key: 'first', value: 'there is only 1', propagateAtLaunch: true }]); + expect(res2.tags.renderTags()).toEqual([{ key: 'first', value: 'there is only 1' }]); + + }); + test('add will add a tag and remove will remove a tag if it exists', () => { const root = new Stack(); const res = new TaggableResource(root, 'FakeResource', { type: 'AWS::Fake::Thing', @@ -130,13 +129,13 @@ nodeunitShim({ synthesize(root); - test.deepEqual(res.tags.renderTags(), [{ key: 'first', value: 'there is only 1' }]); - test.deepEqual(map.tags.renderTags(), { first: 'there is only 1' }); - test.deepEqual(asg.tags.renderTags(), [{ key: 'first', value: 'there is only 1', propagateAtLaunch: true }]); - test.deepEqual(res2.tags.renderTags(), [{ key: 'first', value: 'there is only 1' }]); - test.done(); - }, - 'the #visit function is idempotent'(test: Test) { + expect(res.tags.renderTags()).toEqual([{ key: 'first', value: 'there is only 1' }]); + expect(map.tags.renderTags()).toEqual({ first: 'there is only 1' }); + expect(asg.tags.renderTags()).toEqual([{ key: 'first', value: 'there is only 1', propagateAtLaunch: true }]); + expect(res2.tags.renderTags()).toEqual([{ key: 'first', value: 'there is only 1' }]); + + }); + test('the #visit function is idempotent', () => { const root = new Stack(); const res = new TaggableResource(root, 'FakeResource', { type: 'AWS::Fake::Thing', @@ -144,14 +143,14 @@ nodeunitShim({ Aspects.of(res).add(new Tag('foo', 'bar')); synthesize(root); - test.deepEqual(res.tags.renderTags(), [{ key: 'foo', value: 'bar' }]); + expect(res.tags.renderTags()).toEqual([{ key: 'foo', value: 'bar' }]); synthesize(root); - test.deepEqual(res.tags.renderTags(), [{ key: 'foo', value: 'bar' }]); + expect(res.tags.renderTags()).toEqual([{ key: 'foo', value: 'bar' }]); synthesize(root); - test.deepEqual(res.tags.renderTags(), [{ key: 'foo', value: 'bar' }]); - test.done(); - }, - 'removeTag Aspects by default will override child Tag Aspects'(test: Test) { + expect(res.tags.renderTags()).toEqual([{ key: 'foo', value: 'bar' }]); + + }); + test('removeTag Aspects by default will override child Tag Aspects', () => { const root = new Stack(); const res = new TaggableResource(root, 'FakeResource', { type: 'AWS::Fake::Thing', @@ -162,11 +161,11 @@ nodeunitShim({ Aspects.of(res).add(new RemoveTag('key')); Aspects.of(res2).add(new Tag('key', 'value')); synthesize(root); - test.deepEqual(res.tags.renderTags(), undefined); - test.deepEqual(res2.tags.renderTags(), undefined); - test.done(); - }, - 'removeTag Aspects with priority 0 will not override child Tag Aspects'(test: Test) { + expect(res.tags.renderTags()).toEqual(undefined); + expect(res2.tags.renderTags()).toEqual(undefined); + + }); + test('removeTag Aspects with priority 0 will not override child Tag Aspects', () => { const root = new Stack(); const res = new TaggableResource(root, 'FakeResource', { type: 'AWS::Fake::Thing', @@ -177,11 +176,11 @@ nodeunitShim({ Aspects.of(res).add(new RemoveTag('key', { priority: 0 })); Aspects.of(res2).add(new Tag('key', 'value')); synthesize(root); - test.deepEqual(res.tags.renderTags(), undefined); - test.deepEqual(res2.tags.renderTags(), [{ key: 'key', value: 'value' }]); - test.done(); - }, - 'Aspects are merged with tags created by L1 Constructor'(test: Test) { + expect(res.tags.renderTags()).toEqual(undefined); + expect(res2.tags.renderTags()).toEqual([{ key: 'key', value: 'value' }]); + + }); + test('Aspects are merged with tags created by L1 Constructor', () => { const root = new Stack(); const aspectBranch = new TaggableResource(root, 'FakeBranchA', { type: 'AWS::Fake::Thing', @@ -220,22 +219,22 @@ nodeunitShim({ }); Aspects.of(aspectBranch).add(new Tag('aspects', 'rule')); synthesize(root); - test.deepEqual(aspectBranch.testProperties().tags, [{ key: 'aspects', value: 'rule' }, { key: 'cfn', value: 'is cool' }]); - test.deepEqual(asgResource.testProperties().tags, [ + expect(aspectBranch.testProperties().tags).toEqual([{ key: 'aspects', value: 'rule' }, { key: 'cfn', value: 'is cool' }]); + expect(asgResource.testProperties().tags).toEqual([ { key: 'aspects', value: 'rule', propagateAtLaunch: true }, { key: 'cfn', value: 'is cool', propagateAtLaunch: true }, ]); - test.deepEqual(mapTaggable.testProperties().tags, { + expect(mapTaggable.testProperties().tags).toEqual({ aspects: 'rule', cfn: 'is cool', }); - test.deepEqual(cfnBranch.testProperties().tags, [{ key: 'cfn', value: 'is cool' }]); - test.done(); - }, - 'when invalid tag properties are passed from L1s': { - 'map passed instead of array it raises'(test: Test) { + expect(cfnBranch.testProperties().tags).toEqual([{ key: 'cfn', value: 'is cool' }]); + + }); + describe('when invalid tag properties are passed from L1s', () => { + test('map passed instead of array it raises', () => { const root = new Stack(); - test.throws(() => { + expect(() => { new TaggableResource(root, 'FakeBranchA', { type: 'AWS::Fake::Thing', properties: { @@ -245,8 +244,8 @@ nodeunitShim({ }, }, }); - }); - test.throws(() => { + }).toThrow(); + expect(() => { new AsgTaggableResource(root, 'FakeBranchA', { type: 'AWS::Fake::Thing', properties: { @@ -257,12 +256,12 @@ nodeunitShim({ }, }, }); - }); - test.done(); - }, - 'if array is passed instead of map it raises'(test: Test) { + }).toThrow(); + + }); + test('if array is passed instead of map it raises', () => { const root = new Stack(); - test.throws(() => { + expect(() => { new MapTaggableResource(root, 'FakeSam', { type: 'AWS::Fake::Thing', properties: { @@ -272,8 +271,8 @@ nodeunitShim({ ], }, }); - }); - test.done(); - }, - }, + }).toThrow(); + + }); + }); }); diff --git a/packages/@aws-cdk/core/test/tag-manager.test.ts b/packages/@aws-cdk/core/test/tag-manager.test.ts index 7e9e448e97434..79b764f6c13b0 100644 --- a/packages/@aws-cdk/core/test/tag-manager.test.ts +++ b/packages/@aws-cdk/core/test/tag-manager.test.ts @@ -1,63 +1,62 @@ -import { nodeunitShim, Test } from 'nodeunit-shim'; import { TagType } from '../lib/cfn-resource'; import { TagManager } from '../lib/tag-manager'; -nodeunitShim({ - 'TagManagerOptions can set tagPropertyName'(test: Test) { +describe('tag manager', () => { + test('TagManagerOptions can set tagPropertyName', () => { const tagPropName = 'specialName'; const mgr = new TagManager(TagType.MAP, 'Foo', undefined, { tagPropertyName: tagPropName }); - test.deepEqual(mgr.tagPropertyName, tagPropName); - test.done(); - }, - '#setTag() supports setting a tag regardless of Type'(test: Test) { + expect(mgr.tagPropertyName).toEqual(tagPropName); + + }); + test('#setTag() supports setting a tag regardless of Type', () => { const notTaggable = new TagManager(TagType.NOT_TAGGABLE, 'AWS::Resource::Type'); notTaggable.setTag('key', 'value'); - test.deepEqual(notTaggable.renderTags(), undefined); - test.done(); - }, - 'when a tag does not exist': { - '#removeTag() does not throw an error'(test: Test) { + expect(notTaggable.renderTags()).toEqual(undefined); + + }); + describe('when a tag does not exist', () => { + test('#removeTag() does not throw an error', () => { const mgr = new TagManager(TagType.STANDARD, 'AWS::Resource::Type'); - test.doesNotThrow(() => (mgr.removeTag('dne', 0))); - test.done(); - }, - '#setTag() creates the tag'(test: Test) { + expect(() => (mgr.removeTag('dne', 0))).not.toThrow(); + + }); + test('#setTag() creates the tag', () => { const mgr = new TagManager(TagType.STANDARD, 'AWS::Resource::Type'); mgr.setTag('dne', 'notanymore'); - test.deepEqual(mgr.renderTags(), [{ key: 'dne', value: 'notanymore' }]); - test.done(); - }, - }, - 'when a tag does exist': { - '#removeTag() deletes the tag'(test: Test) { + expect(mgr.renderTags()).toEqual([{ key: 'dne', value: 'notanymore' }]); + + }); + }); + describe('when a tag does exist', () => { + test('#removeTag() deletes the tag', () => { const mgr = new TagManager(TagType.STANDARD, 'AWS::Resource::Type'); mgr.setTag('dne', 'notanymore', 0); mgr.removeTag('dne', 0); - test.deepEqual(mgr.renderTags(), undefined); - test.done(); - }, - '#setTag() overwrites the tag'(test: Test) { + expect(mgr.renderTags()).toEqual(undefined); + + }); + test('#setTag() overwrites the tag', () => { const mgr = new TagManager(TagType.STANDARD, 'AWS::Resource::Type'); mgr.setTag('dne', 'notanymore'); mgr.setTag('dne', 'iwin'); - test.deepEqual(mgr.renderTags(), [{ key: 'dne', value: 'iwin' }]); - test.done(); - }, - }, - 'when there are no tags': { - '#renderTags() returns undefined'(test: Test) { + expect(mgr.renderTags()).toEqual([{ key: 'dne', value: 'iwin' }]); + + }); + }); + describe('when there are no tags', () => { + test('#renderTags() returns undefined', () => { const mgr = new TagManager(TagType.STANDARD, 'AWS::Resource::Type'); - test.deepEqual(mgr.renderTags(), undefined); - test.done(); - }, - '#hasTags() returns false'(test: Test) { + expect(mgr.renderTags()).toEqual(undefined); + + }); + test('#hasTags() returns false', () => { const mgr = new TagManager(TagType.STANDARD, 'AWS::Resource::Type'); - test.equal(mgr.hasTags(), false); - test.done(); - }, - }, - '#renderTags() handles standard, map, keyValue, and ASG tag formats'(test: Test) { + expect(mgr.hasTags()).toEqual(false); + + }); + }); + test('#renderTags() handles standard, map, keyValue, and ASG tag formats', () => { const tagged: TagManager[] = []; const standard = new TagManager(TagType.STANDARD, 'AWS::Resource::Type'); const asg = new TagManager(TagType.AUTOSCALING_GROUP, 'AWS::Resource::Type'); @@ -71,79 +70,79 @@ nodeunitShim({ res.setTag('foo', 'bar'); res.setTag('asg', 'only', 0, false); } - test.deepEqual(standard.renderTags(), [ + expect(standard.renderTags()).toEqual([ { key: 'asg', value: 'only' }, { key: 'foo', value: 'bar' }, ]); - test.deepEqual(asg.renderTags(), [ + expect(asg.renderTags()).toEqual([ { key: 'asg', value: 'only', propagateAtLaunch: false }, { key: 'foo', value: 'bar', propagateAtLaunch: true }, ]); - test.deepEqual(keyValue.renderTags(), [ + expect(keyValue.renderTags()).toEqual([ { Key: 'asg', Value: 'only' }, { Key: 'foo', Value: 'bar' }, ]); - test.deepEqual(mapper.renderTags(), { + expect(mapper.renderTags()).toEqual({ foo: 'bar', asg: 'only', }); - test.done(); - }, - 'when there are tags it hasTags returns true'(test: Test) { + + }); + test('when there are tags it hasTags returns true', () => { const mgr = new TagManager(TagType.STANDARD, 'AWS::Resource::Type'); mgr.setTag('key', 'myVal', 2); mgr.setTag('key', 'newVal', 1); - test.equal(mgr.hasTags(), true); - test.done(); - }, - 'tags with higher or equal priority always take precedence'(test: Test) { + expect(mgr.hasTags()).toEqual(true); + + }); + test('tags with higher or equal priority always take precedence', () => { const mgr = new TagManager(TagType.STANDARD, 'AWS::Resource::Type'); mgr.setTag('key', 'myVal', 2); mgr.setTag('key', 'newVal', 1); - test.deepEqual(mgr.renderTags(), [ + expect(mgr.renderTags()).toEqual([ { key: 'key', value: 'myVal' }, ]); mgr.removeTag('key', 1); - test.deepEqual(mgr.renderTags(), [ + expect(mgr.renderTags()).toEqual([ { key: 'key', value: 'myVal' }, ]); mgr.removeTag('key', 2); - test.deepEqual(mgr.renderTags(), undefined); - test.done(); - }, - 'tags are always ordered by key name'(test: Test) { + expect(mgr.renderTags()).toEqual(undefined); + + }); + test('tags are always ordered by key name', () => { const mgr = new TagManager(TagType.STANDARD, 'AWS::Resource::Type'); mgr.setTag('key', 'foo'); mgr.setTag('aardvark', 'zebra'); mgr.setTag('name', 'test'); - test.deepEqual(mgr.renderTags(), [ + expect(mgr.renderTags()).toEqual([ { key: 'aardvark', value: 'zebra' }, { key: 'key', value: 'foo' }, { key: 'name', value: 'test' }, ]); mgr.setTag('myKey', 'myVal'); - test.deepEqual(mgr.renderTags(), [ + expect(mgr.renderTags()).toEqual([ { key: 'aardvark', value: 'zebra' }, { key: 'key', value: 'foo' }, { key: 'myKey', value: 'myVal' }, { key: 'name', value: 'test' }, ]); - test.done(); - }, - 'excludeResourceTypes only tags resources that do not match'(test: Test) { + + }); + test('excludeResourceTypes only tags resources that do not match', () => { const mgr = new TagManager(TagType.STANDARD, 'AWS::Fake::Resource'); - test.equal(false, mgr.applyTagAspectHere([], ['AWS::Fake::Resource'])); - test.equal(true, mgr.applyTagAspectHere([], ['AWS::Wrong::Resource'])); + expect(false).toEqual(mgr.applyTagAspectHere([], ['AWS::Fake::Resource'])); + expect(true).toEqual(mgr.applyTagAspectHere([], ['AWS::Wrong::Resource'])); - test.done(); - }, - 'includeResourceTypes only tags resources that match'(test: Test) { + + }); + test('includeResourceTypes only tags resources that match', () => { const mgr = new TagManager(TagType.STANDARD, 'AWS::Fake::Resource'); - test.equal(true, mgr.applyTagAspectHere(['AWS::Fake::Resource'], [])); - test.equal(false, mgr.applyTagAspectHere(['AWS::Wrong::Resource'], [])); + expect(true).toEqual(mgr.applyTagAspectHere(['AWS::Fake::Resource'], [])); + expect(false).toEqual(mgr.applyTagAspectHere(['AWS::Wrong::Resource'], [])); + - test.done(); - }, + }); }); diff --git a/packages/@aws-cdk/core/test/tokens.test.ts b/packages/@aws-cdk/core/test/tokens.test.ts index eba1e75686048..1aca7312357fa 100644 --- a/packages/@aws-cdk/core/test/tokens.test.ts +++ b/packages/@aws-cdk/core/test/tokens.test.ts @@ -1,4 +1,3 @@ -import { nodeunitShim, Test } from 'nodeunit-shim'; import { Fn, isResolvableObject, Lazy, Stack, Token, Tokenization } from '../lib'; import { createTokenDouble, extractTokenDouble } from '../lib/private/encoding'; import { Intrinsic } from '../lib/private/intrinsic'; @@ -7,32 +6,32 @@ import { IResolvable } from '../lib/resolvable'; import { evaluateCFN } from './evaluate-cfn'; import { reEnableStackTraceCollection, restoreStackTraceColection } from './util'; -nodeunitShim({ - 'resolve a plain old object should just return the object'(test: Test) { +describe('tokens', () => { + test('resolve a plain old object should just return the object', () => { const obj = { PlainOldObject: 123, Array: [1, 2, 3] }; - test.deepEqual(resolve(obj), obj); - test.done(); - }, + expect(resolve(obj)).toEqual(obj); - 'if a value is an object with a token value, it will be evaluated'(test: Test) { + }); + + test('if a value is an object with a token value, it will be evaluated', () => { const obj = { RegularValue: 'hello', LazyValue: new Intrinsic('World'), }; - test.deepEqual(resolve(obj), { + expect(resolve(obj)).toEqual({ RegularValue: 'hello', LazyValue: 'World', }); - test.done(); - }, - 'tokens are evaluated anywhere in the object tree'(test: Test) { + }); + + test('tokens are evaluated anywhere in the object tree', () => { const obj = new Promise1(); const actual = resolve({ Obj: obj }); - test.deepEqual(actual, { + expect(actual).toEqual({ Obj: [ { Data: { @@ -51,14 +50,14 @@ nodeunitShim({ ], }); - test.done(); - }, - 'tokens are evaluated recursively'(test: Test) { + }); + + test('tokens are evaluated recursively', () => { const obj = new Promise1(); const actual = resolve(new Intrinsic({ Obj: obj })); - test.deepEqual(actual, { + expect(actual).toEqual({ Obj: [ { Data: { @@ -77,12 +76,12 @@ nodeunitShim({ ], }); - test.done(); - }, - 'empty arrays or objects are kept'(test: Test) { - test.deepEqual(resolve({ }), { }); - test.deepEqual(resolve([]), []); + }); + + test('empty arrays or objects are kept', () => { + expect(resolve({ })).toEqual({ }); + expect(resolve([])).toEqual([]); const obj = { Prop1: 1234, @@ -98,7 +97,7 @@ nodeunitShim({ }, }; - test.deepEqual(resolve(obj), { + expect(resolve(obj)).toEqual({ Prop1: 1234, Prop2: { }, Prop3: [], @@ -112,44 +111,44 @@ nodeunitShim({ }, }); - test.done(); - }, - 'if an object has a "resolve" property that is not a function, it is not considered a token'(test: Test) { - test.deepEqual(resolve({ a_token: { resolve: () => 78787 } }), { a_token: 78787 }); - test.deepEqual(resolve({ not_a_token: { resolve: 12 } }), { not_a_token: { resolve: 12 } }); - test.done(); - }, + }); + + test('if an object has a "resolve" property that is not a function, it is not considered a token', () => { + expect(resolve({ a_token: { resolve: () => 78787 } })).toEqual({ a_token: 78787 }); + expect(resolve({ not_a_token: { resolve: 12 } })).toEqual({ not_a_token: { resolve: 12 } }); + + }); // eslint-disable-next-line max-len - 'if a resolvable object inherits from a class that is also resolvable, the "constructor" function will not get in the way (uses Object.keys instead of "for in")'(test: Test) { - test.deepEqual(resolve({ prop: new DataType() }), { prop: { foo: 12, goo: 'hello' } }); - test.done(); - }, - - 'isToken(obj) can be used to determine if an object is a token'(test: Test) { - test.ok(isResolvableObject({ resolve: () => 123 })); - test.ok(isResolvableObject({ a: 1, b: 2, resolve: () => 'hello' })); - test.ok(!isResolvableObject({ a: 1, b: 2, resolve: 3 })); - test.done(); - }, - - 'Token can be used to create tokens that contain a constant value'(test: Test) { - test.equal(resolve(new Intrinsic(12)), 12); - test.equal(resolve(new Intrinsic('hello')), 'hello'); - test.deepEqual(resolve(new Intrinsic(['hi', 'there'])), ['hi', 'there']); - test.done(); - }, - - 'resolving leaves a Date object in working order'(test: Test) { + test('if a resolvable object inherits from a class that is also resolvable, the "constructor" function will not get in the way (uses Object.keys instead of "for in")', () => { + expect(resolve({ prop: new DataType() })).toEqual({ prop: { foo: 12, goo: 'hello' } }); + + }); + + test('isToken(obj) can be used to determine if an object is a token', () => { + expect(isResolvableObject({ resolve: () => 123 })).toEqual(true); + expect(isResolvableObject({ a: 1, b: 2, resolve: () => 'hello' })).toEqual(true); + expect(isResolvableObject({ a: 1, b: 2, resolve: 3 })).toEqual(false); + + }); + + test('Token can be used to create tokens that contain a constant value', () => { + expect(resolve(new Intrinsic(12))).toEqual(12); + expect(resolve(new Intrinsic('hello'))).toEqual('hello'); + expect(resolve(new Intrinsic(['hi', 'there']))).toEqual(['hi', 'there']); + + }); + + test('resolving leaves a Date object in working order', () => { const date = new Date('2000-01-01'); const resolved = resolve(date); - test.equal(date.toString(), resolved.toString()); - test.done(); - }, + expect(date.toString()).toEqual(resolved.toString()); - 'tokens can be stringified and evaluated to conceptual value'(test: Test) { + }); + + test('tokens can be stringified and evaluated to conceptual value', () => { // GIVEN const token = new Intrinsic('woof woof'); @@ -158,20 +157,20 @@ nodeunitShim({ const resolved = resolve(stringified); // THEN - test.deepEqual(evaluateCFN(resolved), 'The dog says: woof woof'); - test.done(); - }, + expect(evaluateCFN(resolved)).toEqual('The dog says: woof woof'); + + }); - 'tokens stringification can be reversed'(test: Test) { + test('tokens stringification can be reversed', () => { // GIVEN const token = new Intrinsic('woof woof'); // THEN - test.equal(token, Tokenization.reverseString(`${token}`).firstToken); - test.done(); - }, + expect(token).toEqual(Tokenization.reverseString(`${token}`).firstToken); + + }); - 'Tokens stringification and reversing of CloudFormation Tokens is implemented using Fn::Join'(test: Test) { + test('Tokens stringification and reversing of CloudFormation Tokens is implemented using Fn::Join', () => { // GIVEN const token = new Intrinsic( ({ woof: 'woof' })); @@ -180,13 +179,13 @@ nodeunitShim({ const resolved = resolve(stringified); // THEN - test.deepEqual(resolved, { + expect(resolved).toEqual({ 'Fn::Join': ['', ['The dog says: ', { woof: 'woof' }]], }); - test.done(); - }, - 'Doubly nested strings evaluate correctly in scalar context'(test: Test) { + }); + + test('Doubly nested strings evaluate correctly in scalar context', () => { // GIVEN const token1 = new Intrinsic( 'world'); const token2 = new Intrinsic( `hello ${token1}`); @@ -196,13 +195,13 @@ nodeunitShim({ const resolved2 = resolve(token2); // THEN - test.deepEqual(evaluateCFN(resolved1), 'hello world'); - test.deepEqual(evaluateCFN(resolved2), 'hello world'); + expect(evaluateCFN(resolved1)).toEqual('hello world'); + expect(evaluateCFN(resolved2)).toEqual('hello world'); + - test.done(); - }, + }); - 'integer Tokens can be stringified and evaluate to conceptual value'(test: Test) { + test('integer Tokens can be stringified and evaluate to conceptual value', () => { // GIVEN for (const token of tokensThatResolveTo(1)) { // WHEN @@ -210,12 +209,12 @@ nodeunitShim({ const resolved = resolve(stringified); // THEN - test.deepEqual(evaluateCFN(resolved), 'the number is 1'); + expect(evaluateCFN(resolved)).toEqual('the number is 1'); } - test.done(); - }, - 'intrinsic Tokens can be stringified and evaluate to conceptual value'(test: Test) { + }); + + test('intrinsic Tokens can be stringified and evaluate to conceptual value', () => { // GIVEN for (const bucketName of tokensThatResolveTo({ Ref: 'MyBucket' })) { // WHEN @@ -223,26 +222,26 @@ nodeunitShim({ // THEN const context = { MyBucket: 'TheName' }; - test.equal(evaluateCFN(resolved, context), 'my bucket is named TheName'); + expect(evaluateCFN(resolved, context)).toEqual('my bucket is named TheName'); } - test.done(); - }, - 'tokens resolve properly in initial position'(test: Test) { + }); + + test('tokens resolve properly in initial position', () => { // GIVEN for (const token of tokensThatResolveTo('Hello')) { // WHEN const resolved = resolve(`${token} world`); // THEN - test.equal(evaluateCFN(resolved), 'Hello world'); + expect(evaluateCFN(resolved)).toEqual('Hello world'); } - test.done(); - }, - 'side-by-side Tokens resolve correctly'(test: Test) { + }); + + test('side-by-side Tokens resolve correctly', () => { // GIVEN for (const token1 of tokensThatResolveTo('Hello ')) { for (const token2 of tokensThatResolveTo('world')) { @@ -250,14 +249,14 @@ nodeunitShim({ const resolved = resolve(`${token1}${token2}`); // THEN - test.equal(evaluateCFN(resolved), 'Hello world'); + expect(evaluateCFN(resolved)).toEqual('Hello world'); } } - test.done(); - }, - 'tokens can be used in hash keys but must resolve to a string'(test: Test) { + }); + + test('tokens can be used in hash keys but must resolve to a string', () => { // GIVEN const token = new Intrinsic( 'I am a string'); @@ -267,11 +266,11 @@ nodeunitShim({ }; // THEN - test.deepEqual(resolve(s), { 'I am a string': 'boom I am a string' }); - test.done(); - }, + expect(resolve(s)).toEqual({ 'I am a string': 'boom I am a string' }); - 'tokens can be nested in hash keys'(test: Test) { + }); + + test('tokens can be nested in hash keys', () => { // GIVEN const token = new Intrinsic(Lazy.string({ produce: () => Lazy.string({ produce: (() => 'I am a string') }) })); @@ -281,11 +280,11 @@ nodeunitShim({ }; // THEN - test.deepEqual(resolve(s), { 'I am a string': 'boom I am a string' }); - test.done(); - }, + expect(resolve(s)).toEqual({ 'I am a string': 'boom I am a string' }); + + }); - 'Function passed to Lazy.uncachedString() is evaluated multiple times'(test: Test) { + test('Function passed to Lazy.uncachedString() is evaluated multiple times', () => { // GIVEN let counter = 0; const counterString = Lazy.uncachedString({ produce: () => `${++counter}` }); @@ -294,10 +293,10 @@ nodeunitShim({ expect(resolve(counterString)).toEqual('1'); expect(resolve(counterString)).toEqual('2'); - test.done(); - }, - 'Function passed to Lazy.string() is only evaluated once'(test: Test) { + }); + + test('Function passed to Lazy.string() is only evaluated once', () => { // GIVEN let counter = 0; const counterString = Lazy.string({ produce: () => `${++counter}` }); @@ -306,10 +305,10 @@ nodeunitShim({ expect(resolve(counterString)).toEqual('1'); expect(resolve(counterString)).toEqual('1'); - test.done(); - }, - 'Uncached tokens returned by cached tokens are still evaluated multiple times'(test: Test) { + }); + + test('Uncached tokens returned by cached tokens are still evaluated multiple times', () => { // Check that nested token returns aren't accidentally fully resolved by the // first resolution. On every evaluation, Tokens referenced inside the // structure should be given a chance to be either cached or uncached. @@ -331,10 +330,10 @@ nodeunitShim({ expect(resolve(counterString2)).toEqual('->4'); expect(resolve(counterObject)).toEqual({ finalCount: '5' }); - test.done(); - }, - 'tokens can be nested and concatenated in hash keys'(test: Test) { + }); + + test('tokens can be nested and concatenated in hash keys', () => { // GIVEN const innerToken = new Intrinsic( 'toot'); const token = new Intrinsic( `${innerToken} the woot`); @@ -345,11 +344,11 @@ nodeunitShim({ }; // THEN - test.deepEqual(resolve(s), { 'toot the woot': 'boom chicago' }); - test.done(); - }, + expect(resolve(s)).toEqual({ 'toot the woot': 'boom chicago' }); + + }); - 'can find nested tokens in hash keys'(test: Test) { + test('can find nested tokens in hash keys', () => { // GIVEN const innerToken = new Intrinsic( 'toot'); const token = new Intrinsic( `${innerToken} the woot`); @@ -361,12 +360,12 @@ nodeunitShim({ // THEN const tokens = findTokens(new Stack(), () => s); - test.ok(tokens.some(t => t === innerToken), 'Cannot find innerToken'); - test.ok(tokens.some(t => t === token), 'Cannot find token'); - test.done(); - }, + expect(tokens.some(t => t === innerToken)).toEqual(true); + expect(tokens.some(t => t === token)).toEqual(true); - 'fails if token in a hash key resolves to a non-string'(test: Test) { + }); + + test('fails if token in a hash key resolves to a non-string', () => { // GIVEN const token = new Intrinsic({ Ref: 'Other' }); @@ -376,12 +375,12 @@ nodeunitShim({ }; // THEN - test.throws(() => resolve(s), 'is used as the key in a map so must resolve to a string, but it resolves to:'); - test.done(); - }, + expect(() => resolve(s)).toThrow('is used as the key in a map so must resolve to a string, but it resolves to:'); + + }); - 'list encoding': { - 'can encode Token to string and resolve the encoding'(test: Test) { + describe('list encoding', () => { + test('can encode Token to string and resolve the encoding', () => { // GIVEN const token = new Intrinsic({ Ref: 'Other' }); @@ -391,14 +390,14 @@ nodeunitShim({ }; // THEN - test.deepEqual(resolve(struct), { + expect(resolve(struct)).toEqual({ XYZ: { Ref: 'Other' }, }); - test.done(); - }, - 'cannot add to encoded list'(test: Test) { + }); + + test('cannot add to encoded list', () => { // GIVEN const token = new Intrinsic({ Ref: 'Other' }); @@ -407,14 +406,14 @@ nodeunitShim({ encoded.push('hello'); // THEN - test.throws(() => { + expect(() => { resolve(encoded); - }, /Cannot add elements to list token/); + }).toThrow(/Cannot add elements to list token/); + - test.done(); - }, + }); - 'cannot add to strings in encoded list'(test: Test) { + test('cannot add to strings in encoded list', () => { // GIVEN const token = new Intrinsic({ Ref: 'Other' }); @@ -423,14 +422,14 @@ nodeunitShim({ encoded[0] += 'hello'; // THEN - test.throws(() => { + expect(() => { resolve(encoded); - }, /concatenate strings in/); + }).toThrow(/concatenate strings in/); - test.done(); - }, - 'can pass encoded lists to FnSelect'(test: Test) { + }); + + test('can pass encoded lists to FnSelect', () => { // GIVEN const encoded: string[] = Token.asList(new Intrinsic({ Ref: 'Other' })); @@ -438,14 +437,14 @@ nodeunitShim({ const struct = Fn.select(1, encoded); // THEN - test.deepEqual(resolve(struct), { + expect(resolve(struct)).toEqual({ 'Fn::Select': [1, { Ref: 'Other' }], }); - test.done(); - }, - 'can pass encoded lists to FnJoin'(test: Test) { + }); + + test('can pass encoded lists to FnJoin', () => { // GIVEN const encoded: string[] = Token.asList(new Intrinsic({ Ref: 'Other' })); @@ -453,14 +452,14 @@ nodeunitShim({ const struct = Fn.join('/', encoded); // THEN - test.deepEqual(resolve(struct), { + expect(resolve(struct)).toEqual({ 'Fn::Join': ['/', { Ref: 'Other' }], }); - test.done(); - }, - 'can pass encoded lists to FnJoin, even if join is stringified'(test: Test) { + }); + + test('can pass encoded lists to FnJoin, even if join is stringified', () => { // GIVEN const encoded: string[] = Token.asList(new Intrinsic({ Ref: 'Other' })); @@ -468,33 +467,33 @@ nodeunitShim({ const struct = Fn.join('/', encoded).toString(); // THEN - test.deepEqual(resolve(struct), { + expect(resolve(struct)).toEqual({ 'Fn::Join': ['/', { Ref: 'Other' }], }); - test.done(); - }, - 'detect and error when list token values are illegally extracted'(test: Test) { + }); + + test('detect and error when list token values are illegally extracted', () => { // GIVEN const encoded: string[] = Token.asList({ Ref: 'Other' }); // THEN - test.throws(() => { + expect(() => { resolve({ value: encoded[0] }); - }, /Found an encoded list/); + }).toThrow(/Found an encoded list/); - test.done(); - }, - }, - 'number encoding': { - 'basic integer encoding works'(test: Test) { - test.equal(16, extractTokenDouble(createTokenDouble(16))); - test.done(); - }, + }); + }); - 'arbitrary integers can be encoded, stringified, and recovered'(test: Test) { + describe('number encoding', () => { + test('basic integer encoding works', () => { + expect(16).toEqual(extractTokenDouble(createTokenDouble(16))); + + }); + + test('arbitrary integers can be encoded, stringified, and recovered', () => { for (let i = 0; i < 100; i++) { // We can encode all numbers up to 2^48-1 const x = Math.floor(Math.random() * (Math.pow(2, 48) - 1)); @@ -503,38 +502,38 @@ nodeunitShim({ // Roundtrip through JSONification const roundtripped = JSON.parse(JSON.stringify({ theNumber: encoded })).theNumber; const decoded = extractTokenDouble(roundtripped); - test.equal(decoded, x, `Fail roundtrip encoding of ${x}`); + expect(decoded).toEqual(x); } - test.done(); - }, - 'arbitrary numbers are correctly detected as not being tokens'(test: Test) { - test.equal(undefined, extractTokenDouble(0)); - test.equal(undefined, extractTokenDouble(1243)); - test.equal(undefined, extractTokenDouble(4835e+532)); + }); + + test('arbitrary numbers are correctly detected as not being tokens', () => { + expect(undefined).toEqual(extractTokenDouble(0)); + expect(undefined).toEqual(extractTokenDouble(1243)); + expect(undefined).toEqual(extractTokenDouble(4835e+532)); + - test.done(); - }, + }); - 'can number-encode and resolve Token objects'(test: Test) { + test('can number-encode and resolve Token objects', () => { // GIVEN const x = new Intrinsic( 123); // THEN const encoded = Token.asNumber(x); - test.equal(false, isResolvableObject(encoded), 'encoded number does not test as token'); - test.equal(true, Token.isUnresolved(encoded), 'encoded number does not test as token'); + expect(false).toEqual(isResolvableObject(encoded)); + expect(true).toEqual(Token.isUnresolved(encoded)); // THEN const resolved = resolve({ value: encoded }); - test.deepEqual(resolved, { value: 123 }); + expect(resolved).toEqual({ value: 123 }); + - test.done(); - }, - }, + }); + }); - 'stack trace is captured at token creation'(test: Test) { + test('`stack trace is captured at token creati`on', () => { function fn1() { function fn2() { class ExposeTrace extends Intrinsic { @@ -552,12 +551,12 @@ nodeunitShim({ const previousValue = reEnableStackTraceCollection(); const token = fn1(); restoreStackTraceColection(previousValue); - test.ok(token.creationTrace.find(x => x.includes('fn1'))); - test.ok(token.creationTrace.find(x => x.includes('fn2'))); - test.done(); - }, + expect(token.creationTrace.find(x => x.includes('fn1'))).toBeDefined(); + expect(token.creationTrace.find(x => x.includes('fn2'))).toBeDefined(); + + }); - 'newError returns an error with the creation stack trace'(test: Test) { + test('newError returns an error with the creation stack trace', () => { function fn1() { function fn2() { function fn3() { @@ -577,12 +576,11 @@ nodeunitShim({ const previousValue = reEnableStackTraceCollection(); const token = fn1(); restoreStackTraceColection(previousValue); - test.throws(() => token.throwError('message!'), /Token created:/); - test.done(); - }, + expect(() => token.throwError('message!')).toThrow(/Token created:/); + + }); - 'type coercion': (() => { - const tests: any = { }; + describe('type coercion', () => { const inputs = [ 'a string', @@ -601,56 +599,55 @@ nodeunitShim({ // THEN const expected = input; - tests[`${input}.toNumber()`] = (test: Test) => { - test.deepEqual(resolve(Token.asNumber(new Intrinsic(stringToken))), expected); - test.done(); - }; + test(`${input}.toNumber()`, () => { + expect(resolve(Token.asNumber(new Intrinsic(stringToken)))).toEqual(expected); - tests[`${input}.toNumber()`] = (test: Test) => { - test.deepEqual(resolve(Token.asNumber(new Intrinsic(listToken))), expected); - test.done(); - }; + }); - tests[`${input}.toNumber()`] = (test: Test) => { - test.deepEqual(resolve(Token.asNumber(new Intrinsic(numberToken))), expected); - test.done(); - }; + test(`${input}.toNumber()`, () => { + expect(resolve(Token.asNumber(new Intrinsic(listToken)))).toEqual(expected); - tests[`${input}.toString()`] = (test: Test) => { - test.deepEqual(resolve(new Intrinsic(stringToken).toString()), expected); - test.done(); - }; + }); - tests[`${input}.toString()`] = (test: Test) => { - test.deepEqual(resolve(new Intrinsic(listToken).toString()), expected); - test.done(); - }; + test(`${input}.toNumber()`, () => { + expect(resolve(Token.asNumber(new Intrinsic(numberToken)))).toEqual(expected); - tests[`${input}.toString()`] = (test: Test) => { - test.deepEqual(resolve(new Intrinsic(numberToken).toString()), expected); - test.done(); - }; + }); - tests[`${input}.toList()`] = (test: Test) => { - test.deepEqual(resolve(Token.asList(new Intrinsic(stringToken))), expected); - test.done(); - }; + test(`${input}.toString()`, () => { + expect(resolve(new Intrinsic(stringToken).toString())).toEqual(expected); - tests[`${input}.toList()`] = (test: Test) => { - test.deepEqual(resolve(Token.asList(new Intrinsic(listToken))), expected); - test.done(); - }; + }); - tests[`${input}.toList()`] = (test: Test) => { - test.deepEqual(resolve(Token.asList(new Intrinsic(numberToken))), expected); - test.done(); - }; + test(`${input}.toString()`, () => { + expect(resolve(new Intrinsic(listToken).toString())).toEqual(expected); + + }); + + test(`${input}.toString()`, () => { + expect(resolve(new Intrinsic(numberToken).toString())).toEqual(expected); + + }); + + test(`${input}.toList()`, () => { + expect(resolve(Token.asList(new Intrinsic(stringToken)))).toEqual(expected); + + }); + + test(`${input}.toList()`, () => { + expect(resolve(Token.asList(new Intrinsic(listToken)))).toEqual(expected); + + }); + + test(`${input}.toList()`, () => { + expect(resolve(Token.asList(new Intrinsic(numberToken)))).toEqual(expected); + + }); } - return tests; - })(), + }); - 'creation stack is attached to errors emitted during resolve with CDK_DEBUG=true'(test: Test) { + test('creation stack is attached to errors emitted during resolve with CDK_DEBUG=true', () => { function showMeInTheStackTrace() { return Lazy.string({ produce: () => { throw new Error('fooError'); } }); } @@ -663,16 +660,16 @@ nodeunitShim({ try { resolve(x); } catch (e) { - message = e.message; + message = (e as Error).message; } finally { process.env.CDK_DEBUG = previousValue; } - test.ok(message && message.includes('showMeInTheStackTrace')); - test.done(); - }, + expect(message && message.includes('showMeInTheStackTrace')).toEqual(true); - 'creation stack is omitted without CDK_DEBUG=true'(test: Test) { + }); + + test('creation stack is omitted without CDK_DEBUG=true', () => { function showMeInTheStackTrace() { return Lazy.stringValue({ produce: () => { throw new Error('fooError'); } }); } @@ -685,59 +682,59 @@ nodeunitShim({ try { resolve(x); } catch (e) { - message = e.message; + message = (e as Error).message; } finally { process.env.CDK_DEBUG = previousValue; } - test.ok(message && message.includes('Execute again with CDK_DEBUG=true')); - test.done(); - }, + expect(message && message.includes('Execute again with CDK_DEBUG=true')).toEqual(true); + + }); - 'stringifyNumber': { - 'converts number to string'(test: Test) { - test.equal(Tokenization.stringifyNumber(100), '100'); - test.done(); - }, + describe('stringifyNumber', () => { + test('converts number to string', () => { + expect(Tokenization.stringifyNumber(100)).toEqual('100'); - 'converts tokenized number to string'(test: Test) { - test.equal(resolve(Tokenization.stringifyNumber({ + }); + + test('converts tokenized number to string', () => { + expect(resolve(Tokenization.stringifyNumber({ resolve: () => 100, - } as any)), '100'); - test.done(); - }, + } as any))).toEqual('100'); + + }); - 'string remains the same'(test: Test) { - test.equal(Tokenization.stringifyNumber('123' as any), '123'); - test.done(); - }, + test('string remains the same', () => { + expect(Tokenization.stringifyNumber('123' as any)).toEqual('123'); - 'Ref remains the same'(test: Test) { + }); + + test('Ref remains the same', () => { const val = { Ref: 'SomeLogicalId' }; - test.deepEqual(Tokenization.stringifyNumber(val as any), val); - test.done(); - }, + expect(Tokenization.stringifyNumber(val as any)).toEqual(val); - 'lazy Ref remains the same'(test: Test) { + }); + + test('lazy Ref remains the same', () => { const resolvedVal = { Ref: 'SomeLogicalId' }; const tokenizedVal = Lazy.any({ produce: () => resolvedVal, }); const res = Tokenization.stringifyNumber(tokenizedVal as any) as any; - test.notDeepEqual(res, resolvedVal); - test.deepEqual(resolve(res), resolvedVal); - test.done(); - }, + expect(res).not.toEqual(resolvedVal); + expect(resolve(res)).toEqual(resolvedVal); + + }); - 'tokenized Ref remains the same'(test: Test) { + test('tokenized Ref remains the same', () => { const resolvedVal = { Ref: 'SomeLogicalId' }; const tokenizedVal = Token.asNumber(resolvedVal); const res = Tokenization.stringifyNumber(tokenizedVal) as any; - test.notDeepEqual(res, resolvedVal); - test.deepEqual(resolve(res), resolvedVal); - test.done(); - }, - }, + expect(res).not.toEqual(resolvedVal); + expect(resolve(res)).toEqual(resolvedVal); + + }); + }); }); class Promise2 implements IResolvable { diff --git a/packages/@aws-cdk/core/test/util.test.ts b/packages/@aws-cdk/core/test/util.test.ts index 3b9c33f14c9f4..e2074056dfa4f 100644 --- a/packages/@aws-cdk/core/test/util.test.ts +++ b/packages/@aws-cdk/core/test/util.test.ts @@ -1,106 +1,105 @@ -import { nodeunitShim, Test } from 'nodeunit-shim'; import { CfnResource, Construct, Stack } from '../lib'; import { capitalizePropertyNames, filterUndefined, findLastCommonElement, ignoreEmpty, pathToTopLevelStack } from '../lib/util'; -nodeunitShim({ - 'capitalizeResourceProperties capitalizes all keys of an object (recursively) from camelCase to PascalCase'(test: Test) { +describe('util', () => { + test('capitalizeResourceProperties capitalizes all keys of an object (recursively) from camelCase to PascalCase', () => { const c = new Stack(); - test.equal(capitalizePropertyNames(c, undefined), undefined); - test.equal(capitalizePropertyNames(c, 12), 12); - test.equal(capitalizePropertyNames(c, 'hello'), 'hello'); - test.deepEqual(capitalizePropertyNames(c, ['hello', 88]), ['hello', 88]); - test.deepEqual(capitalizePropertyNames(c, - { Hello: 'world', hey: 'dude' }), - { Hello: 'world', Hey: 'dude' }); - test.deepEqual(capitalizePropertyNames(c, - [1, 2, { three: 3 }]), - [1, 2, { Three: 3 }]); - test.deepEqual(capitalizePropertyNames(c, - { Hello: 'world', recursive: { foo: 123, there: { another: ['hello', { world: 123 }] } } }), - { Hello: 'world', Recursive: { Foo: 123, There: { Another: ['hello', { World: 123 }] } } }); + expect(capitalizePropertyNames(c, undefined)).toEqual(undefined); + expect(capitalizePropertyNames(c, 12)).toEqual(12); + expect(capitalizePropertyNames(c, 'hello')).toEqual('hello'); + expect(capitalizePropertyNames(c, ['hello', 88])).toEqual(['hello', 88]); + expect(capitalizePropertyNames(c, + { Hello: 'world', hey: 'dude' })).toEqual( + { Hello: 'world', Hey: 'dude' }); + expect(capitalizePropertyNames(c, + [1, 2, { three: 3 }])).toEqual( + [1, 2, { Three: 3 }]); + expect(capitalizePropertyNames(c, + { Hello: 'world', recursive: { foo: 123, there: { another: ['hello', { world: 123 }] } } })).toEqual( + { Hello: 'world', Recursive: { Foo: 123, There: { Another: ['hello', { World: 123 }] } } }); // make sure tokens are resolved and result is also capitalized - test.deepEqual(capitalizePropertyNames(c, - { hello: { resolve: () => ({ foo: 'bar' }) }, world: new SomeToken() }), - { Hello: { Foo: 'bar' }, World: 100 }); + expect(capitalizePropertyNames(c, + { hello: { resolve: () => ({ foo: 'bar' }) }, world: new SomeToken() })).toEqual( + { Hello: { Foo: 'bar' }, World: 100 }); - test.done(); - }, - ignoreEmpty: { + }); - '[]'(test: Test) { + describe('ignoreEmpty', () => { + + test('[]', () => { const stack = new Stack(); - test.strictEqual(stack.resolve(ignoreEmpty([])), undefined); - test.done(); - }, + expect(stack.resolve(ignoreEmpty([]))).toEqual(undefined); + + }); - '{}'(test: Test) { + test('{}', () => { const stack = new Stack(); - test.strictEqual(stack.resolve(ignoreEmpty({})), undefined); - test.done(); - }, + expect(stack.resolve(ignoreEmpty({}))).toEqual(undefined); - 'undefined/null'(test: Test) { + }); + + test('undefined/null', () => { const stack = new Stack(); - test.strictEqual(stack.resolve(ignoreEmpty(undefined)), undefined); - test.strictEqual(stack.resolve(ignoreEmpty(null)), null); - test.done(); - }, + expect(stack.resolve(ignoreEmpty(undefined))).toEqual(undefined); + expect(stack.resolve(ignoreEmpty(null))).toEqual(null); + + }); - 'primitives'(test: Test) { + test('primitives', () => { const stack = new Stack(); - test.strictEqual(stack.resolve(ignoreEmpty(12)), 12); - test.strictEqual(stack.resolve(ignoreEmpty('12')), '12'); - test.done(); - }, + expect(stack.resolve(ignoreEmpty(12))).toEqual(12); + expect(stack.resolve(ignoreEmpty('12'))).toEqual('12'); - 'non-empty arrays/objects'(test: Test) { + }); + + test('non-empty arrays/objects', () => { const stack = new Stack(); - test.deepEqual(stack.resolve(ignoreEmpty([1, 2, 3, undefined])), [1, 2, 3]); // undefined array values is cleaned up by "resolve" - test.deepEqual(stack.resolve(ignoreEmpty({ o: 1, b: 2, j: 3 })), { o: 1, b: 2, j: 3 }); - test.done(); - }, + expect(stack.resolve(ignoreEmpty([1, 2, 3, undefined]))).toEqual([1, 2, 3]); // undefined array values is cleaned up by "resolve" + expect(stack.resolve(ignoreEmpty({ o: 1, b: 2, j: 3 }))).toEqual({ o: 1, b: 2, j: 3 }); + + }); - 'resolve first'(test: Test) { + test('resolve first', () => { const stack = new Stack(); - test.deepEqual(stack.resolve(ignoreEmpty({ xoo: { resolve: () => 123 } })), { xoo: 123 }); - test.strictEqual(stack.resolve(ignoreEmpty({ xoo: { resolve: () => undefined } })), undefined); - test.deepEqual(stack.resolve(ignoreEmpty({ xoo: { resolve: () => [] } })), { xoo: [] }); - test.deepEqual(stack.resolve(ignoreEmpty({ xoo: { resolve: () => [undefined, undefined] } })), { xoo: [] }); - test.done(); - }, - }, - - filterUnderined: { - 'is null-safe (aka treats null and undefined the same)'(test: Test) { - test.deepEqual(filterUndefined({ 'a null': null, 'a not null': true }), { 'a not null': true }); - test.done(); - }, - - 'removes undefined, but leaves the rest'(test: Test) { - test.deepEqual(filterUndefined({ 'an undefined': undefined, 'yes': true }), { yes: true }); - test.done(); - }, - }, - - 'pathToTopLevelStack returns the array of stacks that lead to a stack'(test: Test) { + expect(stack.resolve(ignoreEmpty({ xoo: { resolve: () => 123 } }))).toEqual({ xoo: 123 }); + expect(stack.resolve(ignoreEmpty({ xoo: { resolve: () => undefined } }))).toEqual(undefined); + expect(stack.resolve(ignoreEmpty({ xoo: { resolve: () => [] } }))).toEqual({ xoo: [] }); + expect(stack.resolve(ignoreEmpty({ xoo: { resolve: () => [undefined, undefined] } }))).toEqual({ xoo: [] }); + + }); + }); + + describe('filterUnderined', () => { + test('is null-safe (aka treats null and undefined the same)', () => { + expect(filterUndefined({ 'a null': null, 'a not null': true })).toEqual({ 'a not null': true }); + + }); + + test('removes undefined, but leaves the rest', () => { + expect(filterUndefined({ 'an undefined': undefined, 'yes': true })).toEqual({ yes: true }); + + }); + }); + + test('pathToTopLevelStack returns the array of stacks that lead to a stack', () => { const a = new Stack(undefined, 'a'); const aa = new Nested(a, 'aa'); const aaa = new Nested(aa, 'aaa'); - test.deepEqual(path(aaa), ['a', 'aa', 'aaa']); - test.deepEqual(path(aa), ['a', 'aa']); - test.deepEqual(path(a), ['a']); - test.done(); + expect(path(aaa)).toEqual(['a', 'aa', 'aaa']); + expect(path(aa)).toEqual(['a', 'aa']); + expect(path(a)).toEqual(['a']); + function path(s: Stack) { return pathToTopLevelStack(s).map(x => x.node.id); } - }, + }); - 'findCommonStack returns the lowest common stack between two stacks or undefined'(test: Test) { + test('findCommonStack returns the lowest common stack between two stacks or undefined', () => { const a = new Stack(undefined, 'a'); const aa = new Nested(a, 'aa'); const ab = new Nested(a, 'ab'); @@ -112,29 +111,28 @@ nodeunitShim({ const ba = new Nested(b, 'ba'); const baa = new Nested(ba, 'baa'); - test.equal(lca(a, b), undefined); - test.equal(lca(aa, ab), 'a'); - test.equal(lca(ab, aa), 'a'); - test.equal(lca(aa, aba), 'a'); - test.equal(lca(aba, aa), 'a'); - test.equal(lca(ab, aba), 'ab'); - test.equal(lca(aba, ab), 'ab'); - test.equal(lca(aba, aba), 'aba'); - test.equal(lca(aa, aa), 'aa'); - test.equal(lca(a, aaa), 'a'); - test.equal(lca(aaa, aab), 'aa'); - test.equal(lca(aaa, b), undefined); - test.equal(lca(aaa, ba), undefined); - test.equal(lca(baa, ba), 'ba'); - - test.done(); + expect(lca(a, b)).toEqual(undefined); + expect(lca(aa, ab)).toEqual('a'); + expect(lca(ab, aa)).toEqual('a'); + expect(lca(aa, aba)).toEqual('a'); + expect(lca(aba, aa)).toEqual('a'); + expect(lca(ab, aba)).toEqual('ab'); + expect(lca(aba, ab)).toEqual('ab'); + expect(lca(aba, aba)).toEqual('aba'); + expect(lca(aa, aa)).toEqual('aa'); + expect(lca(a, aaa)).toEqual('a'); + expect(lca(aaa, aab)).toEqual('aa'); + expect(lca(aaa, b)).toEqual(undefined); + expect(lca(aaa, ba)).toEqual(undefined); + expect(lca(baa, ba)).toEqual('ba'); + function lca(s1: Stack, s2: Stack) { const res = findLastCommonElement(pathToTopLevelStack(s1), pathToTopLevelStack(s2)); if (!res) { return undefined; } return res.node.id; } - }, + }); }); class SomeToken { diff --git a/tools/nodeunit-shim/.gitignore b/tools/nodeunit-shim/.gitignore deleted file mode 100644 index 2d8e8a2d36377..0000000000000 --- a/tools/nodeunit-shim/.gitignore +++ /dev/null @@ -1,13 +0,0 @@ -*.js -*.js.map -*.d.ts -dist - -.LAST_BUILD -*.snk -!jest.config.js - -.nyc_output -coverage -nyc.config.js -!.eslintrc.js \ No newline at end of file diff --git a/tools/nodeunit-shim/README.md b/tools/nodeunit-shim/README.md deleted file mode 100644 index 163e24b3b1eb7..0000000000000 --- a/tools/nodeunit-shim/README.md +++ /dev/null @@ -1,80 +0,0 @@ -nodeunit-shim -============== - -Tiny helper library to move from nodeunit tests to Jest. - -Why? ----- - -Jest tests have a better runner, better error reporting, better matchers, -and nicer syntax. They're just all around nicer. Plus, nodeunit has long -since been deprecated. - -Rewriting our existing codebase of nodeunit tests is kind of a hassle though. -Therefore, a tiny adapter layer between the 2 APIs. - -How to use ----------- - -### Update package.json - -```json -"devDependencies": { - // Remove these: - "@types/nodeunit": "...", - "nodeunit": "...", - - // Add this - "nodeunit-shim": "0.0.0", -}, -"cdk-build": { - // Add this - "jest": true -} -``` - -### Get jest.config.js - -Copy a `jest.config.js` from another package. - -### Update .gitignore/.npmignore - -Run `yarn pkglint --fix`. - -### Rename tests - -Rename all test files `test.*.ts` -> `*.test.ts` (be sure to rename -the `.js` as well, or remove them). - -### Rewrite tests - -Inside every test file: - -Replace - -```ts -import { Test } from 'nodeunit'; -``` - -with - -```ts -import { nodeunitShim, Test } from 'nodeunit-shim'; -``` - -and replace: - -```ts -export = { - // ... -}; -``` - -with: - -```ts -nodeunitShim({ - // ... -}); -``` - diff --git a/tools/nodeunit-shim/index.ts b/tools/nodeunit-shim/index.ts deleted file mode 100644 index 79cd04f4e2e57..0000000000000 --- a/tools/nodeunit-shim/index.ts +++ /dev/null @@ -1,91 +0,0 @@ -/** - * Jest/Nodeunit compatibility shim - * - * Use this to mass-convert Nodeunit tests to Jest tests. - */ - -/** - * Compatibility shim test - */ -export class Test { - constructor(private readonly cb: () => void) { - } - - public equal(actual: any, expected: any, _message?: string) { - expect(actual).toEqual(expected); - } - - public notEqual(actual: any, expected: any, _message?: string) { - expect(actual).not.toEqual(expected); - } - - public equals(actual: any, expected: any, _message?: string) { - expect(actual).toEqual(expected); - } - - public strictEqual(actual: any, expected: any, _message?: string) { - expect(actual).toEqual(expected); - } - - public deepEqual(actual: any, expected: any, _message?: string) { - expect(actual).toEqual(expected); - } - - public notDeepEqual(actual: any, expected: any, _message?: string) { - expect(actual).not.toEqual(expected); - } - - public ok(actual: any, _message?: string) { - expect(actual).toBeTruthy(); - } - - public same(actual: any, expected: any) { - expect(actual).toBe(expected); - } - - public throws(block: () => any, error?: string | RegExp | ErrorConstructor, _message?: string) { - expect(block).toThrow(error); - } - - public doesNotThrow(block: () => any, error?: string | RegExp | ErrorConstructor, _message?: string) { - expect(block).not.toThrow(error); - } - - public done() { - this.cb(); - } -} - -export function nodeunitShim(exports: Record) { - if (exports.setUp) { - beforeEach(() => { - return new Promise(ok => { - exports.setUp(ok); - }); - }); - } - if (exports.tearDown) { - afterEach(() => { - return new Promise(ok => { - exports.tearDown(ok); - }); - }); - } - for (const [testName, testObj] of Object.entries(exports)) { - if (testName === 'setUp' || testName === 'tearDown') { continue; } - - if (typeof testObj === 'object') { - // It's a suite - describe(testName, () => { - nodeunitShim(testObj); - }); - } else { - // It's a test - test(testName, () => new Promise(ok => { - testObj(new Test(ok)); - })); - } - } -} - -type ErrorConstructor = new (...args: any[]) => Error; diff --git a/tools/nodeunit-shim/package.json b/tools/nodeunit-shim/package.json deleted file mode 100644 index 2c2814cc08a08..0000000000000 --- a/tools/nodeunit-shim/package.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "name": "nodeunit-shim", - "private": true, - "version": "0.0.0", - "description": "A helper package to migrate tests from nodeunit to Jest", - "main": "index.js", - "scripts": { - "build": "tsc", - "watch": "tsc -w", - "test": "echo No tests", - "build+test+package": "npm run build+test", - "build+test": "npm run build && npm test", - "build+test+extract": "npm run build+test", - "build+extract": "npm run build" - }, - "devDependencies": { - "@types/jest": "^26.0.24", - "@types/node": "^10.17.60", - "typescript": "~3.9.10" - }, - "dependencies": { - "jest": "^26.6.3" - }, - "keywords": [], - "author": "", - "license": "ISC" -} diff --git a/tools/nodeunit-shim/tsconfig.json b/tools/nodeunit-shim/tsconfig.json deleted file mode 100644 index 14499cd2abfaf..0000000000000 --- a/tools/nodeunit-shim/tsconfig.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "compilerOptions": { - "target": "ES2018", - "module": "commonjs", - "lib": ["es2018"], - "strict": true, - "alwaysStrict": true, - "declaration": true, - "inlineSourceMap": true, - "inlineSources": true, - "noUnusedLocals": true, - "noUnusedParameters": true, - "noImplicitReturns": true, - "noFallthroughCasesInSwitch": true, - "resolveJsonModule": true, - "composite": true, - "incremental": true - }, - "include": ["**/*.ts"] -} From c2852c9c524a639a312bf296f7f23b0e3b112f6b Mon Sep 17 00:00:00 2001 From: Philipp Garbe Date: Tue, 7 Sep 2021 15:26:31 +0200 Subject: [PATCH 03/41] fix(assets): run executable command of container assets in cloud assembly root directory (#16094) Runs the executable command of container assets in the cloud assembly root directory and not the current directory. Could be a breaking change. fixes #15721 ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- .../cdk-assets/lib/private/handlers/container-images.ts | 8 +++++--- packages/cdk-assets/test/docker-images.test.ts | 2 +- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/packages/cdk-assets/lib/private/handlers/container-images.ts b/packages/cdk-assets/lib/private/handlers/container-images.ts index ec4327775c37c..d2fd7373eb1cf 100644 --- a/packages/cdk-assets/lib/private/handlers/container-images.ts +++ b/packages/cdk-assets/lib/private/handlers/container-images.ts @@ -83,14 +83,16 @@ export class ContainerImageAssetHandler implements IAssetHandler { * External command is responsible for deduplicating the build if possible, * and is expected to return the generated image identifier on stdout. */ - private async buildExternalAsset(executable: string[]): Promise { + private async buildExternalAsset(executable: string[], cwd?: string): Promise { + + const assetPath = cwd ?? this.workDir; + this.host.emitMessage(EventType.BUILD, `Building Docker image using command '${executable}'`); if (this.host.aborted) { return undefined; } - return (await shell(executable, { quiet: true })).trim(); + return (await shell(executable, { cwd: assetPath, quiet: true })).trim(); } - /** * Check whether the image already exists in the ECR repo * diff --git a/packages/cdk-assets/test/docker-images.test.ts b/packages/cdk-assets/test/docker-images.test.ts index 1f36b88025725..cfbb8fc70379b 100644 --- a/packages/cdk-assets/test/docker-images.test.ts +++ b/packages/cdk-assets/test/docker-images.test.ts @@ -183,7 +183,7 @@ describe('external assets', () => { const expectAllSpawns = mockSpawn( { commandLine: ['docker', 'login', '--username', 'user', '--password-stdin', 'https://proxy.com/'] }, - { commandLine: ['sometool'], stdout: externalTag }, + { commandLine: ['sometool'], stdout: externalTag, cwd: '/external/cdk.out' }, { commandLine: ['docker', 'tag', externalTag, '12345.amazonaws.com/repo:ghijkl'] }, { commandLine: ['docker', 'push', '12345.amazonaws.com/repo:ghijkl'] }, ); From 492d33b27bc5b935e3da75f0bddd875bb6f9c15d Mon Sep 17 00:00:00 2001 From: Julian Michel Date: Tue, 7 Sep 2021 16:08:08 +0200 Subject: [PATCH 04/41] fix(autoscaling): EbsDeviceVolumeType.IO2 is not a valid CloudFormation value (#16028) Remove value IO2 from enum EbsDeviceVolumeType because it is not supported in CloudFormation. Fixes #16027. Please double-check before approving: Value from enum is removed which could potentially be a breaking change. However, it's an invalid value. Therefore, it should be correct to remove it. ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- allowed-breaking-changes.txt | 4 ++++ packages/@aws-cdk/aws-autoscaling/lib/volume.ts | 5 ----- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/allowed-breaking-changes.txt b/allowed-breaking-changes.txt index 47f19a6719d17..6def9776b6dc9 100644 --- a/allowed-breaking-changes.txt +++ b/allowed-breaking-changes.txt @@ -74,3 +74,7 @@ removed:@aws-cdk/aws-stepfunctions-tasks.BatchSubmitJobProps.jobDefinition strengthened:@aws-cdk/aws-stepfunctions-tasks.BatchSubmitJobProps removed:@aws-cdk/aws-lambda-event-sources.ManagedKafkaEventSourceProps.cluster strengthened:@aws-cdk/aws-lambda-event-sources.ManagedKafkaEventSourceProps + +# Remove IO2 from autoscaling EbsDeviceVolumeType. This value is not supported +# at the moment and was not supported in the past. +removed:@aws-cdk/aws-autoscaling.EbsDeviceVolumeType.IO2 \ No newline at end of file diff --git a/packages/@aws-cdk/aws-autoscaling/lib/volume.ts b/packages/@aws-cdk/aws-autoscaling/lib/volume.ts index b22bb2c98e3f8..cbe08bac7c6ab 100644 --- a/packages/@aws-cdk/aws-autoscaling/lib/volume.ts +++ b/packages/@aws-cdk/aws-autoscaling/lib/volume.ts @@ -182,11 +182,6 @@ export enum EbsDeviceVolumeType { */ IO1 = 'io1', - /** - * Provisioned IOPS SSD - IO2 - */ - IO2 = 'io2', - /** * General Purpose SSD - GP2 */ From c8bfcf650070a0138b148645f997f542431f70cf Mon Sep 17 00:00:00 2001 From: Mark Nielsen Date: Tue, 7 Sep 2021 07:50:16 -0700 Subject: [PATCH 05/41] fix(iam): permissions boundary aspect doesn't always recognize roles (#16154) Using `instanceof` does not seem to work in all scenarios. Instead, use the `CfnResource.isCfnResource` method to find the L1 constructs. ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- .../@aws-cdk/aws-iam/lib/permissions-boundary.ts | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/packages/@aws-cdk/aws-iam/lib/permissions-boundary.ts b/packages/@aws-cdk/aws-iam/lib/permissions-boundary.ts index 94964918b8658..c1a3dde69a026 100644 --- a/packages/@aws-cdk/aws-iam/lib/permissions-boundary.ts +++ b/packages/@aws-cdk/aws-iam/lib/permissions-boundary.ts @@ -33,10 +33,8 @@ export class PermissionsBoundary { public apply(boundaryPolicy: IManagedPolicy) { Node.of(this.scope).applyAspect({ visit(node: IConstruct) { - if (node instanceof CfnRole || node instanceof CfnUser) { - node.permissionsBoundary = boundaryPolicy.managedPolicyArn; - } else if ( - node instanceof CfnResource && + if ( + CfnResource.isCfnResource(node) && (node.cfnResourceType == CfnRole.CFN_RESOURCE_TYPE_NAME || node.cfnResourceType == CfnUser.CFN_RESOURCE_TYPE_NAME) ) { node.addPropertyOverride('PermissionsBoundary', boundaryPolicy.managedPolicyArn); @@ -51,10 +49,8 @@ export class PermissionsBoundary { public clear() { Node.of(this.scope).applyAspect({ visit(node: IConstruct) { - if (node instanceof CfnRole || node instanceof CfnUser) { - node.permissionsBoundary = undefined; - } else if ( - node instanceof CfnResource && + if ( + CfnResource.isCfnResource(node) && (node.cfnResourceType == CfnRole.CFN_RESOURCE_TYPE_NAME || node.cfnResourceType == CfnUser.CFN_RESOURCE_TYPE_NAME) ) { node.addPropertyDeletionOverride('PermissionsBoundary'); From 816a31984b5c6e08c4c7dd740919e0c1f5d0e196 Mon Sep 17 00:00:00 2001 From: vincent-turato <39069200+vincent-turato@users.noreply.github.com> Date: Tue, 7 Sep 2021 08:31:54 -0700 Subject: [PATCH 06/41] feat(config): EC2_INSTANCE_PROFILE_ATTACHED managed rule (#16011) > Currently [ManagedRuleIdentifiers](https://docs.aws.amazon.com/cdk/api/latest/docs/@aws-cdk_aws-config.ManagedRuleIdentifiers.html) doesn't support identifier for ec2-instance-profile-attached managed rule (EC2_INSTANCE_PROFILE_ATTACHED). The documentation for this rule is [here](https://docs.aws.amazon.com/config/latest/developerguide/ec2-instance-profile-attached.html). Copied from: https://github.com/aws/aws-cdk/issues/15898 ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- packages/@aws-cdk/aws-config/lib/rule.ts | 7 +++++++ .../aws-config/test/test.managed-rules.ts | 20 +++++++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/packages/@aws-cdk/aws-config/lib/rule.ts b/packages/@aws-cdk/aws-config/lib/rule.ts index 2b6ad35d08859..1551d23e6d0c9 100644 --- a/packages/@aws-cdk/aws-config/lib/rule.ts +++ b/packages/@aws-cdk/aws-config/lib/rule.ts @@ -682,6 +682,13 @@ export class ManagedRuleIdentifiers { * @see https://docs.aws.amazon.com/config/latest/developerguide/ec2-instance-managed-by-systems-manager.html */ public static readonly EC2_INSTANCE_MANAGED_BY_SSM = 'EC2_INSTANCE_MANAGED_BY_SSM'; + /** + * Checks if an Amazon Elastic Compute Cloud (Amazon EC2) instance has an Identity and Access + * Management (IAM) profile attached to it. This rule is NON_COMPLIANT if no IAM profile is + * attached to the Amazon EC2 instance. + * @see https://docs.aws.amazon.com/config/latest/developerguide/ec2-instance-profile-attached.html + */ + public static readonly EC2_INSTANCE_PROFILE_ATTACHED = 'EC2_INSTANCE_PROFILE_ATTACHED'; /** * Checks whether Amazon Elastic Compute Cloud (Amazon EC2) instances have a public IP association. * @see https://docs.aws.amazon.com/config/latest/developerguide/ec2-instance-no-public-ip.html diff --git a/packages/@aws-cdk/aws-config/test/test.managed-rules.ts b/packages/@aws-cdk/aws-config/test/test.managed-rules.ts index 76e0790be3313..2c6c9c764fcdd 100644 --- a/packages/@aws-cdk/aws-config/test/test.managed-rules.ts +++ b/packages/@aws-cdk/aws-config/test/test.managed-rules.ts @@ -130,4 +130,24 @@ export = { test.done(); }, + + 'ec2 instance profile attached check'(test: Test) { + // GIVEN + const stack = new cdk.Stack(); + + // WHEN + new config.ManagedRule(stack, 'Ec2InstanceProfileAttached', { + identifier: config.ManagedRuleIdentifiers.EC2_INSTANCE_PROFILE_ATTACHED, + }); + + // THEN + expect(stack).to(haveResource('AWS::Config::ConfigRule', { + Source: { + Owner: 'AWS', + SourceIdentifier: config.ManagedRuleIdentifiers.EC2_INSTANCE_PROFILE_ATTACHED, + }, + })); + + test.done(); + }, }; From 86e63f8f33a386f5d2e4b51d4c0c3e55a3e6d611 Mon Sep 17 00:00:00 2001 From: Otavio Macedo Date: Tue, 7 Sep 2021 17:53:10 +0100 Subject: [PATCH 07/41] chore: bringing the changes from v2-main to master related to the experimental modules (#16401) Bringing the changes from `v2-main` to `master` related to the experimental modules. These changes were made directly in `v2-main` because the version of the `@aws-cdk/assert` library compatible with `aws-cdk-lib` only existed in `v2-main`. ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- buildspec-pr.yaml | 4 +- buildspec.yaml | 1 + package.json | 10 + packages/@aws-cdk/assertions/vendor-in.sh | 12 +- .../test/integ.s3-bucket.lit.ts | 1 + packages/aws-cdk-migration/lib/rewrite.ts | 31 ++- packages/individual-packages/.gitignore | 1 + packages/individual-packages/README.md | 5 + packages/individual-packages/lerna.json | 9 + packages/individual-packages/package.json | 8 + scripts/transform.sh | 70 +++++ tools/individual-pkg-gen/.eslintrc.js | 3 + tools/individual-pkg-gen/.gitignore | 15 ++ tools/individual-pkg-gen/.npmignore | 16 ++ tools/individual-pkg-gen/LICENSE | 201 +++++++++++++++ tools/individual-pkg-gen/NOTICE | 2 + tools/individual-pkg-gen/README.md | 21 ++ .../individual-pkg-gen/bin/individual-pkg-gen | 8 + .../copy-files-removing-deps.ts | 244 ++++++++++++++++++ tools/individual-pkg-gen/jest.config.js | 10 + tools/individual-pkg-gen/package.json | 53 ++++ .../restore-package-jsons.ts | 20 ++ tools/individual-pkg-gen/test/gen.test.ts | 3 + tools/individual-pkg-gen/tsconfig.json | 20 ++ tools/pkglint/lib/rules.ts | 16 +- tools/pkglint/lib/util.ts | 2 +- 26 files changed, 773 insertions(+), 13 deletions(-) create mode 100644 packages/individual-packages/.gitignore create mode 100644 packages/individual-packages/README.md create mode 100644 packages/individual-packages/lerna.json create mode 100644 packages/individual-packages/package.json create mode 100755 scripts/transform.sh create mode 100644 tools/individual-pkg-gen/.eslintrc.js create mode 100644 tools/individual-pkg-gen/.gitignore create mode 100644 tools/individual-pkg-gen/.npmignore create mode 100644 tools/individual-pkg-gen/LICENSE create mode 100644 tools/individual-pkg-gen/NOTICE create mode 100644 tools/individual-pkg-gen/README.md create mode 100755 tools/individual-pkg-gen/bin/individual-pkg-gen create mode 100644 tools/individual-pkg-gen/copy-files-removing-deps.ts create mode 100644 tools/individual-pkg-gen/jest.config.js create mode 100644 tools/individual-pkg-gen/package.json create mode 100644 tools/individual-pkg-gen/restore-package-jsons.ts create mode 100644 tools/individual-pkg-gen/test/gen.test.ts create mode 100644 tools/individual-pkg-gen/tsconfig.json diff --git a/buildspec-pr.yaml b/buildspec-pr.yaml index 017c7d84e9f61..3d6ee87c8d1ff 100644 --- a/buildspec-pr.yaml +++ b/buildspec-pr.yaml @@ -14,4 +14,6 @@ phases: - yarn --version || npm -g install yarn build: commands: - - /bin/bash ./build.sh --extract && git diff-index --exit-code --ignore-space-at-eol --stat HEAD + - /bin/bash ./build.sh --extract + - /bin/bash ./scripts/transform.sh --extract + - git diff-index --exit-code --ignore-space-at-eol --stat HEAD diff --git a/buildspec.yaml b/buildspec.yaml index 429c5719e05c3..3f2bb4e7e1102 100644 --- a/buildspec.yaml +++ b/buildspec.yaml @@ -17,6 +17,7 @@ phases: - 'if ${BUMP_CANDIDATE:-false}; then /bin/bash ./scripts/bump-candidate.sh; fi' - /bin/bash ./scripts/align-version.sh - /bin/bash ./build.sh + - /bin/bash ./scripts/transform.sh post_build: commands: - "[ -f .BUILD_COMPLETED ] && /bin/bash ./pack.sh" diff --git a/package.json b/package.json index 383ddf1c8e4c5..029b1989ea072 100644 --- a/package.json +++ b/package.json @@ -71,6 +71,16 @@ "nohoist": [ "**/jszip", "**/jszip/**", + "@aws-cdk/assertions-alpha/colors", + "@aws-cdk/assertions-alpha/colors/**", + "@aws-cdk/assertions-alpha/diff", + "@aws-cdk/assertions-alpha/diff/**", + "@aws-cdk/assertions-alpha/fast-deep-equal", + "@aws-cdk/assertions-alpha/fast-deep-equal/**", + "@aws-cdk/assertions-alpha/string-width", + "@aws-cdk/assertions-alpha/string-width/**", + "@aws-cdk/assertions-alpha/table", + "@aws-cdk/assertions-alpha/table/**", "@aws-cdk/assertions/colors", "@aws-cdk/assertions/colors/**", "@aws-cdk/assertions/diff", diff --git a/packages/@aws-cdk/assertions/vendor-in.sh b/packages/@aws-cdk/assertions/vendor-in.sh index 9cf5fcfc29d01..803ff1a7a28ff 100755 --- a/packages/@aws-cdk/assertions/vendor-in.sh +++ b/packages/@aws-cdk/assertions/vendor-in.sh @@ -17,6 +17,16 @@ echo "⏳ Vendoring in modules..." scriptdir=$(cd $(dirname $0) && pwd) cd $scriptdir + +if [[ "$PHASE" == "transform" ]]; then + # Make the script short-circuit when running in the packages/individual-packages build context. + # That's required because the build done by individual-pkg-gen does import re-writing when copying the TS files + # (because it needs to know which modules are also unstable to do the rewriting correctly), + # but the vendor.sh script runs only after the 'build' script for this package has been invoked, + # which means any TS files copied by it successfully would not have their imports re-written. + exit 0 +fi + set -euo pipefail dest="lib/vendored" mkdir -p $dest @@ -44,4 +54,4 @@ They also cannot be bundled since they are part of the monorepo. Instead vendor them directly into the assertv2 library. EOF -echo "✅ Vendoring complete" \ No newline at end of file +echo "✅ Vendoring complete" diff --git a/packages/@aws-cdk/aws-kinesisfirehose-destinations/test/integ.s3-bucket.lit.ts b/packages/@aws-cdk/aws-kinesisfirehose-destinations/test/integ.s3-bucket.lit.ts index 0527cbd26f7d7..5c6b35a748c98 100644 --- a/packages/@aws-cdk/aws-kinesisfirehose-destinations/test/integ.s3-bucket.lit.ts +++ b/packages/@aws-cdk/aws-kinesisfirehose-destinations/test/integ.s3-bucket.lit.ts @@ -1,4 +1,5 @@ #!/usr/bin/env node +/// !cdk-integ pragma:ignore-assets import * as path from 'path'; import * as firehose from '@aws-cdk/aws-kinesisfirehose'; import * as kms from '@aws-cdk/aws-kms'; diff --git a/packages/aws-cdk-migration/lib/rewrite.ts b/packages/aws-cdk-migration/lib/rewrite.ts index f25ae7b6735f2..970627e361aa2 100644 --- a/packages/aws-cdk-migration/lib/rewrite.ts +++ b/packages/aws-cdk-migration/lib/rewrite.ts @@ -1,5 +1,15 @@ import * as ts from 'typescript'; +/** + * The options for rewriting the file. + */ +export interface RewriteOptions { + /** + * Optional module names that should result in replacing to something different than just 'aws-cdk-lib'. + */ + readonly customModules?: { [moduleName: string]: string }; +} + /** * Re-writes "hyper-modular" CDK imports (most packages in `@aws-cdk/*`) to the * relevant "mono" CDK import path. The re-writing will only modify the imported @@ -21,14 +31,14 @@ import * as ts from 'typescript'; * * @returns the updated source code. */ -export function rewriteImports(sourceText: string, fileName: string = 'index.ts'): string { +export function rewriteImports(sourceText: string, fileName: string = 'index.ts', options: RewriteOptions = {}): string { const sourceFile = ts.createSourceFile(fileName, sourceText, ts.ScriptTarget.ES2018); const replacements = new Array<{ original: ts.Node, updatedLocation: string }>(); const visitor = (node: T): ts.VisitResult => { const moduleSpecifier = getModuleSpecifier(node); - const newTarget = moduleSpecifier && updatedLocationOf(moduleSpecifier.text); + const newTarget = moduleSpecifier && updatedLocationOf(moduleSpecifier.text, options); if (moduleSpecifier != null && newTarget != null) { replacements.push({ original: moduleSpecifier, updatedLocation: newTarget }); @@ -92,11 +102,20 @@ const EXEMPTIONS = new Set([ '@aws-cdk/cloudformation-diff', ]); -function updatedLocationOf(modulePath: string): string | undefined { +function updatedLocationOf(modulePath: string, options: RewriteOptions): string | undefined { + const customModulePath = options.customModules?.[modulePath]; + if (customModulePath) { + return customModulePath; + } + if (!modulePath.startsWith('@aws-cdk/') || EXEMPTIONS.has(modulePath)) { return undefined; } + if (modulePath.startsWith('@aws-cdk/core/lib')) { + return `aws-cdk-lib/lib/core/lib/${modulePath.substring('@aws-cdk/core/lib/'.length)}`; + } + if (modulePath === '@aws-cdk/core') { return 'aws-cdk-lib'; } @@ -106,6 +125,12 @@ function updatedLocationOf(modulePath: string): string | undefined { return '@aws-cdk/assert'; } + // can't use simple equality here, + // because we have imports like "import '@aws-cdk/assert-internal/jest'" + if (modulePath.startsWith('@aws-cdk/assert-internal')) { + return modulePath.replace(/^@aws-cdk\/assert-internal/, '@aws-cdk/assert'); + } + if (modulePath === '@aws-cdk/assert/jest') { return '@aws-cdk/assert/jest'; } diff --git a/packages/individual-packages/.gitignore b/packages/individual-packages/.gitignore new file mode 100644 index 0000000000000..150f68c80f529 --- /dev/null +++ b/packages/individual-packages/.gitignore @@ -0,0 +1 @@ +*/* diff --git a/packages/individual-packages/README.md b/packages/individual-packages/README.md new file mode 100644 index 0000000000000..92537a80f3944 --- /dev/null +++ b/packages/individual-packages/README.md @@ -0,0 +1,5 @@ +In this directory we generate at build time all individual packages in V2 +that we release separately from `aws-cdk-lib` +(right now, those are the experimental and developer preview packages). + +The packages are generated by the [`individual-pkg-gen` tool](../../tools/individual-pkg-gen). diff --git a/packages/individual-packages/lerna.json b/packages/individual-packages/lerna.json new file mode 100644 index 0000000000000..2d9206447d084 --- /dev/null +++ b/packages/individual-packages/lerna.json @@ -0,0 +1,9 @@ +{ + "lerna": "3.15.0", + "npmClient": "yarn", + "packages": [ + "*" + ], + "rejectCycles": "true", + "version": "independent" +} diff --git a/packages/individual-packages/package.json b/packages/individual-packages/package.json new file mode 100644 index 0000000000000..48a5d51789dc5 --- /dev/null +++ b/packages/individual-packages/package.json @@ -0,0 +1,8 @@ +{ + "name": "individual-packages", + "version": "0.0.0", + "private": true, + "devDependencies": { + "lerna": "^4.0.0" + } +} diff --git a/scripts/transform.sh b/scripts/transform.sh new file mode 100755 index 0000000000000..bbbae4c275cf9 --- /dev/null +++ b/scripts/transform.sh @@ -0,0 +1,70 @@ +#!/bin/bash + +# To run this script in development, first build the following packages: +# packages/@aws-cdk/assert +# packages/aws-cdk-lib +# tools/individual-pkg-gen + +set -euo pipefail +scriptdir=$(cd $(dirname $0) && pwd) + +# Creates a symlink in each individual package's node_modules folder pointing +# to the root folder's node_modules/.bin. This allows Yarn to find the executables +# it needs (e.g., jsii-rosetta) for the build. +# +# The reason Yarn doesn't find the executables in the first place is that they are +# not dependencies of each individual package -- nor should they be. They can't be +# found in the lerna workspace, either, since it only includes the individual +# packages. For potential alternatives to try out in the future, see +# https://github.com/cdklabs/cdk-ops/issues/1636 +createSymlinks() { + find "$1" ! -path "$1" -type d -maxdepth 1 \ + -exec mkdir -p {}/node_modules \; \ + -exec ln -sf "${scriptdir}"/../node_modules/.bin {}/node_modules \; +} + +runtarget="build" +run_tests="true" +extract_snippets="false" +skip_build="" +while [[ "${1:-}" != "" ]]; do + case $1 in + -h|--help) + echo "Usage: transform.sh [--skip-test/build] [--extract]" + exit 1 + ;; + --skip-test|--skip-tests) + run_tests="false" + ;; + --skip-build) + skip_build="true" + ;; + --extract) + extract_snippets="true" + ;; + *) + echo "Unrecognized options: $1" + exit 1 + ;; + esac + shift +done +if [ "$run_tests" == "true" ]; then + runtarget="$runtarget+test" +fi +if [ "$extract_snippets" == "true" ]; then + runtarget="$runtarget+extract" +fi + +export NODE_OPTIONS="--max-old-space-size=4096 --experimental-worker ${NODE_OPTIONS:-}" + +individual_packages_folder=${scriptdir}/../packages/individual-packages +# copy & build the packages that are individually released from 'aws-cdk-lib' +cd "$individual_packages_folder" +../../tools/individual-pkg-gen/bin/individual-pkg-gen + +createSymlinks "$individual_packages_folder" + +if [ "$skip_build" != "true" ]; then + PHASE=transform yarn lerna run --stream $runtarget +fi diff --git a/tools/individual-pkg-gen/.eslintrc.js b/tools/individual-pkg-gen/.eslintrc.js new file mode 100644 index 0000000000000..61dd8dd001f63 --- /dev/null +++ b/tools/individual-pkg-gen/.eslintrc.js @@ -0,0 +1,3 @@ +const baseConfig = require('cdk-build-tools/config/eslintrc'); +baseConfig.parserOptions.project = __dirname + '/tsconfig.json'; +module.exports = baseConfig; diff --git a/tools/individual-pkg-gen/.gitignore b/tools/individual-pkg-gen/.gitignore new file mode 100644 index 0000000000000..acdfee7f84c04 --- /dev/null +++ b/tools/individual-pkg-gen/.gitignore @@ -0,0 +1,15 @@ +*.js +node_modules +*.js.map +*.d.ts + +.LAST_BUILD +.nyc_output +coverage +nyc.config.js +*.snk +!.eslintrc.js + +junit.xml + +!jest.config.js \ No newline at end of file diff --git a/tools/individual-pkg-gen/.npmignore b/tools/individual-pkg-gen/.npmignore new file mode 100644 index 0000000000000..c480a1570dbe3 --- /dev/null +++ b/tools/individual-pkg-gen/.npmignore @@ -0,0 +1,16 @@ +# Don't include original .ts files when doing `npm pack` +*.ts +!*.d.ts +coverage +.nyc_output +*.tgz + +.LAST_BUILD +*.snk +.eslintrc.js + +# exclude cdk artifacts +**/cdk.out +junit.xml + +jest.config.js \ No newline at end of file diff --git a/tools/individual-pkg-gen/LICENSE b/tools/individual-pkg-gen/LICENSE new file mode 100644 index 0000000000000..28e4bdcec77ec --- /dev/null +++ b/tools/individual-pkg-gen/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/tools/individual-pkg-gen/NOTICE b/tools/individual-pkg-gen/NOTICE new file mode 100644 index 0000000000000..5fc3826926b5b --- /dev/null +++ b/tools/individual-pkg-gen/NOTICE @@ -0,0 +1,2 @@ +AWS Cloud Development Kit (AWS CDK) +Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. diff --git a/tools/individual-pkg-gen/README.md b/tools/individual-pkg-gen/README.md new file mode 100644 index 0000000000000..a86d15f679815 --- /dev/null +++ b/tools/individual-pkg-gen/README.md @@ -0,0 +1,21 @@ +# `individual-pkg-gen` + +The tool contains the logic of the copying the packages we release individually, +instead of vending them inside `aws-cdk-lib`, +from their original V1 form in `packages/@aws-cdk/` to `packages/individual-packages`. +It's called from the [`transform.sh` script](../../scripts/transform.sh). + +We do the translation in 2 phases: + +1. Copy all files from the V1 versions of the modules, + and remove all dependencies from the packages besides other experimental ones. + Save the original dependencies in a `_package.json` files of the copied modules. + This is done in the [`copy-files-removing-deps.ts` file](copy-files-removing-deps.ts). +2. Run `lerna bootstrap`. +3. In phase 2, bring back the dependencies by renaming the `_package.json` files to `package.json`. + This is done in the [`restore-package-jsons.ts` file](restore-package-jsons.ts). + +We have to do it this way, +because otherwise `lerna bootstrap` would fail on the main monorepo packages like `cdk-build-tools`. + +The entrypoint of the package is the [`bin/individual-pkg-gen` file](bin/individual-pkg-gen). diff --git a/tools/individual-pkg-gen/bin/individual-pkg-gen b/tools/individual-pkg-gen/bin/individual-pkg-gen new file mode 100755 index 0000000000000..3a335a40c39f7 --- /dev/null +++ b/tools/individual-pkg-gen/bin/individual-pkg-gen @@ -0,0 +1,8 @@ +#!/bin/bash + +set -euo pipefail +scriptdir=$(cd $(dirname $0) && pwd) + +node $scriptdir/../copy-files-removing-deps.js +yarn lerna bootstrap +node $scriptdir/../restore-package-jsons.js diff --git a/tools/individual-pkg-gen/copy-files-removing-deps.ts b/tools/individual-pkg-gen/copy-files-removing-deps.ts new file mode 100644 index 0000000000000..e7576621492c2 --- /dev/null +++ b/tools/individual-pkg-gen/copy-files-removing-deps.ts @@ -0,0 +1,244 @@ +import * as path from 'path'; +import * as awsCdkMigration from 'aws-cdk-migration'; +import * as fs from 'fs-extra'; +// eslint-disable-next-line @typescript-eslint/no-require-imports +const lerna_project = require('@lerna/project'); + +transformPackages(); + +function transformPackages(): void { + // there is a lerna.json in the individual-packages directory, where this script executes + const project = new lerna_project.Project(__dirname); + const packages = project.getPackagesSync(); + const alphaPackages = getAlphaPackages(packages); + + for (const pkg of packages) { + if (!packageIsAlpha(pkg)) { + continue; + } + + const srcDir = pkg.location; + const packageUnscopedName = `${pkg.name.substring('@aws-cdk/'.length)}`; + const destDir = path.join('.', packageUnscopedName); + fs.mkdirpSync(destDir); + + copyOrTransformFiles(pkg, srcDir, destDir, [ + // list of files to _not_ copy from the V1 package root + // .gitignore is not on the list, because pkglint checks it + 'dist', + 'node_modules', + 'coverage', + '.nyc_output', + 'nyc.config.js', + '.jsii', + 'tsconfig.json', + 'tsconfig.tsbuildinfo', + ]); + } + + function copyOrTransformFiles(pkg: any, srcDir: string, destDir: string, ignoredFiles: string[]): void { + const sourceFiles = fs.readdirSync(srcDir); + for (const sourceFileName of sourceFiles) { + if (ignoredFiles.includes(sourceFileName)) { + continue; + } + + const source = path.join(srcDir, sourceFileName); + const destination = path.join(destDir, sourceFileName); + + if (srcDir === pkg.location && sourceFileName === 'package.json') { + // Only transform packageJsons at the package root, not in any nested packages. + transformPackageJson(pkg, source, destination, alphaPackages); + } else if (sourceFileName === '.gitignore') { + // ignore everything, otherwise there are uncommitted files present in testing, + // because the module's .gitignore file has entries like !.eslintrc.js + const gitIgnoreContents = fs.readFileSync(source); + fs.outputFileSync(destination, Buffer.concat([gitIgnoreContents, Buffer.from('\n*\n')])); + } else if (sourceFileName === '.eslintrc.js') { + // Change the default configuration of the import/no-extraneous-dependencies rule + // (as the unstable packages don't use direct dependencies, + // but instead a combination of devDependencies + peerDependencies) + const esLintRcLines = fs.readFileSync(source).toString().split('\n'); + const resultFileLines = []; + for (const line of esLintRcLines) { + resultFileLines.push(line); + // put our new line right after the parserOptions.project setting line, + // as some files export a copy of this object, + // in which case putting it at the end doesn't work + if (line.startsWith('baseConfig.parserOptions.project')) { + resultFileLines.push("\nbaseConfig.rules['import/no-extraneous-dependencies'] = ['error', " + + '{ devDependencies: true, peerDependencies: true } ];\n'); + } + } + fs.outputFileSync(destination, resultFileLines.join('\n')); + } else if (sourceFileName.endsWith('.ts') && !sourceFileName.endsWith('.d.ts')) { + const sourceCode = fs.readFileSync(source).toString(); + const sourceCodeOutput = awsCdkMigration.rewriteImports(sourceCode, sourceFileName, { + customModules: alphaPackages, + }); + fs.outputFileSync(destination, sourceCodeOutput); + } else { + const stat = fs.statSync(source); + if (stat.isDirectory()) { + // we only ignore files on the top level in the package, + // as some subdirectories we do want to copy over + // (for example, synthetics contains a node_modules/ in the test/ directory + // that is needed for running the tests) + copyOrTransformFiles(pkg, source, destination, []); + } else { + fs.copySync(source, destination); + } + } + } + } +} + +function transformPackageJson(pkg: any, source: string, destination: string, alphaPackages: { [dep: string]: string; }) { + const packageJson = fs.readJsonSync(source); + const pkgUnscopedName = `${pkg.name.substring('@aws-cdk/'.length)}`; + + packageJson.name += '-alpha'; + packageJson.repository.directory = `packages/individual-packages/${pkgUnscopedName}`; + + // disable awslint (some rules are hard-coded to @aws-cdk/core) + packageJson.awslint = { + exclude: ['*:*'], + }; + + // add a pkglint exemption for the 'package name = dir name' rule + const pkglint = packageJson.pkglint || {}; + pkglint.exclude = [ + ...(pkglint.exclude || []), + 'naming/package-matches-directory', + // the experimental packages need the "real" assert dependency + 'assert/assert-dependency', + ]; + packageJson.pkglint = pkglint; + + // turn off the L1 generation, which uses @aws-cdk/ modules + if (packageJson.scripts?.gen === 'cfn2ts') { + delete packageJson.scripts.gen; + } + + // https://github.com/aws/aws-cdk/issues/15576 + const jsiiTargets = packageJson.jsii.targets; + jsiiTargets.dotnet.namespace += '.Alpha'; + jsiiTargets.dotnet.packageId += '.Alpha'; + jsiiTargets.java.package += '.alpha'; + jsiiTargets.java.maven.artifactId += '-alpha'; + jsiiTargets.python.distName += '-alpha'; + jsiiTargets.python.module += '_alpha'; + // Typically, only our top-level packages have a Go target. + // moduleName is needed; packageName will be automatically derived by from the package name. + jsiiTargets.go = { + moduleName: 'github.com/aws/aws-cdk-go', + }; + + const finalPackageJson = transformPackageJsonDependencies(packageJson, pkg, alphaPackages); + + fs.writeJsonSync(destination, packageJson, { spaces: 2 }); + fs.writeJsonSync(path.join(path.dirname(destination), '_package.json'), finalPackageJson, { spaces: 2 }); +} + +function transformPackageJsonDependencies(packageJson: any, pkg: any, alphaPackages: { [dep: string]: string; }) { + // regular dependencies + const alphaDependencies: { [dep: string]: string; } = {}; + const constructsAndCdkLibDevDeps: { [dep: string]: string; } = {}; + const bundledDependencies: { [dep: string]: string } = {}; + const v1BundledDependencies: string[] = packageJson.bundledDependencies || []; + for (const dependency of Object.keys(packageJson.dependencies || {})) { + // all 'regular' dependencies on alpha modules will be converted to + // a pair of devDependency on '0.0.0' and peerDependency on '^0.0.0', + // and the package will have no regular dependencies anymore + switch (dependency) { + // @core corresponds to aws-cdk-lib + case '@aws-cdk/core': + constructsAndCdkLibDevDeps['aws-cdk-lib'] = pkg.version; + break; + case 'constructs': + constructsAndCdkLibDevDeps.constructs = packageJson.dependencies.constructs; + break; + default: + if (alphaPackages[dependency]) { + alphaDependencies[alphaPackages[dependency]] = pkg.version; + } else if (v1BundledDependencies.indexOf(dependency) !== -1) { + // ...other than third-party dependencies, which are in bundledDependencies + bundledDependencies[dependency] = packageJson.dependencies[dependency]; + } + } + } + packageJson.dependencies = bundledDependencies; + + // devDependencies + const alphaDevDependencies: { [dep: string]: string; } = {}; + const devDependencies: { [dep: string]: string; } = {}; + for (const v1DevDependency of Object.keys(packageJson.devDependencies || {})) { + switch (v1DevDependency) { + case '@aws-cdk/assert-internal': + case '@aws-cdk/assert': + devDependencies['@aws-cdk/assert'] = packageJson.devDependencies[v1DevDependency]; + break; + default: + if (alphaPackages[v1DevDependency]) { + alphaDevDependencies[alphaPackages[v1DevDependency]] = pkg.version; + } else if (!v1DevDependency.startsWith('@aws-cdk/')) { + devDependencies[v1DevDependency] = packageJson.devDependencies[v1DevDependency]; + } + } + } + const finalPackageJson = { ...packageJson }; + // we save the devDependencies in a temporary _package.json + finalPackageJson.devDependencies = { + ...devDependencies, + ...constructsAndCdkLibDevDeps, + ...alphaDevDependencies, + ...alphaDependencies, + }; + packageJson.devDependencies = { + ...alphaDevDependencies, + ...alphaDependencies, + }; + + // peer dependencies + finalPackageJson.peerDependencies = { + ...(Object.entries(alphaDependencies) + // for other alpha dependencies, we need to depend on exact versions + // (because of the braking changes between them) + .reduce((acc, [depName, depVersion]) => { + acc[depName] = depVersion; + return acc; + }, {} as { [dep: string]: string; })), + ...(Object.entries(constructsAndCdkLibDevDeps) + .reduce((acc, [depName, depVersion]) => { + acc[depName] = `${depVersion.startsWith('^') ? '' : '^'}${depVersion}`; + return acc; + }, {} as { [dep: string]: string; })), + }; + packageJson.peerDependencies = undefined; + return finalPackageJson; +} + +function getAlphaPackages(packages: any[]): { [dep: string]: string } { + return packages + .filter(packageIsAlpha) + .reduce((acc, pkg: any) => { + acc[pkg.name] = `${pkg.name}-alpha`; + return acc; + }, {}); +} + +function packageIsAlpha(pkg: any): boolean { + // allow modules to decide themselves whether they should be packaged separately + const separateModule = pkg.get('separate-module'); + if (separateModule !== undefined) { + return separateModule; + } + + const maturity = pkg.get('maturity'); + if (maturity !== 'experimental' && maturity !== 'developer-preview') { + return false; + } + // we're only interested in '@aws-cdk/' packages, + // and those that are JSII-enabled (so no @aws-cdk/assert) + return pkg.name.startsWith('@aws-cdk/') && !!pkg.get('jsii'); +} diff --git a/tools/individual-pkg-gen/jest.config.js b/tools/individual-pkg-gen/jest.config.js new file mode 100644 index 0000000000000..07f5f6c432bb6 --- /dev/null +++ b/tools/individual-pkg-gen/jest.config.js @@ -0,0 +1,10 @@ +const baseConfig = require('../../tools/cdk-build-tools/config/jest.config'); +module.exports = { + ...baseConfig, + coverageThreshold: { + global: { + ...baseConfig.coverageThreshold.global, + branches: 60, + }, + }, +}; diff --git a/tools/individual-pkg-gen/package.json b/tools/individual-pkg-gen/package.json new file mode 100644 index 0000000000000..c28407673a32c --- /dev/null +++ b/tools/individual-pkg-gen/package.json @@ -0,0 +1,53 @@ +{ + "name": "individual-pkg-gen", + "version": "0.0.0", + "private": true, + "description": "A tool for copying V1 packages to be released separately in V2", + "repository": { + "type": "git", + "url": "https://github.com/aws/aws-cdk.git", + "directory": "tools/individual-pkg-gen" + }, + "scripts": { + "build": "cdk-build", + "watch": "cdk-watch", + "lint": "cdk-lint", + "test": "cdk-test", + "pkglint": "pkglint -f", + "build+test+package": "yarn build+test", + "build+test": "yarn build && yarn test", + "build+extract": "yarn build", + "build+test+extract": "yarn build+test" + }, + "author": { + "name": "Amazon Web Services", + "url": "https://aws.amazon.com", + "organization": true + }, + "license": "Apache-2.0", + "devDependencies": { + "cdk-build-tools": "0.0.0", + "pkglint": "0.0.0", + "@types/jest": "^26.0.24", + "@types/fs-extra": "^8.1.2" + }, + "dependencies": { + "aws-cdk-migration": "0.0.0", + "fs-extra": "^9.1.0", + "@lerna/project": "4.0.0" + }, + "keywords": [ + "aws", + "cdk" + ], + "homepage": "https://github.com/aws/aws-cdk", + "engines": { + "node": ">= 10.13.0 <13 || >=13.7.0" + }, + "cdk-build": { + "jest": true + }, + "ubergen": { + "exclude": true + } +} diff --git a/tools/individual-pkg-gen/restore-package-jsons.ts b/tools/individual-pkg-gen/restore-package-jsons.ts new file mode 100644 index 0000000000000..bb87064cbc9da --- /dev/null +++ b/tools/individual-pkg-gen/restore-package-jsons.ts @@ -0,0 +1,20 @@ +import * as path from 'path'; +import * as fs from 'fs-extra'; +// eslint-disable-next-line @typescript-eslint/no-require-imports +const lerna_project = require('@lerna/project'); + +bringBackDependencies().catch(e => { + // eslint-disable-next-line no-console + console.error(e); + process.exit(1); +}); + +async function bringBackDependencies(): Promise { + const project = new lerna_project.Project(); + const separatePackages = project.getPackagesSync(); + const promises = new Array>(); + for (const separatePkg of separatePackages) { + promises.push(fs.rename(path.join(separatePkg.location, '_package.json'), separatePkg.manifestLocation)); + } + return Promise.all(promises); +} diff --git a/tools/individual-pkg-gen/test/gen.test.ts b/tools/individual-pkg-gen/test/gen.test.ts new file mode 100644 index 0000000000000..c0c8b49fba246 --- /dev/null +++ b/tools/individual-pkg-gen/test/gen.test.ts @@ -0,0 +1,3 @@ +test('No tests are specified for this package', () => { + expect(true).toBe(true); +}); diff --git a/tools/individual-pkg-gen/tsconfig.json b/tools/individual-pkg-gen/tsconfig.json new file mode 100644 index 0000000000000..14499cd2abfaf --- /dev/null +++ b/tools/individual-pkg-gen/tsconfig.json @@ -0,0 +1,20 @@ +{ + "compilerOptions": { + "target": "ES2018", + "module": "commonjs", + "lib": ["es2018"], + "strict": true, + "alwaysStrict": true, + "declaration": true, + "inlineSourceMap": true, + "inlineSources": true, + "noUnusedLocals": true, + "noUnusedParameters": true, + "noImplicitReturns": true, + "noFallthroughCasesInSwitch": true, + "resolveJsonModule": true, + "composite": true, + "incremental": true + }, + "include": ["**/*.ts"] +} diff --git a/tools/pkglint/lib/rules.ts b/tools/pkglint/lib/rules.ts index add3c58fee4b2..885c147bcdb82 100644 --- a/tools/pkglint/lib/rules.ts +++ b/tools/pkglint/lib/rules.ts @@ -839,13 +839,15 @@ function cdkModuleName(name: string) { const pythonName = name.replace(/^@/g, '').replace(/\//g, '.').split('.').map(caseUtils.kebab).join('.'); return { - javaPackage: `software.amazon.awscdk${isLegacyCdkPkg ? '' : `.${name.replace(/^aws-/, 'services-').replace(/-/g, '.')}`}`, + javaPackage: `software.amazon.awscdk${isLegacyCdkPkg ? '' : `.${name.replace(/aws-/, 'services-').replace(/-/g, '.')}`}`, mavenArtifactId: - isLegacyCdkPkg ? 'cdk' - : isCdkPkg ? 'core' - : name.startsWith('aws-') || name.startsWith('alexa-') ? name.replace(/^aws-/, '') - : name.startsWith('cdk-') ? name - : `cdk-${name}`, + isLegacyCdkPkg + ? 'cdk' + : (isCdkPkg + ? 'core' + : (name.startsWith('aws-') || name.startsWith('alexa-') + ? name.replace(/aws-/, '') + : (name.startsWith('cdk-') ? name : `cdk-${name}`))), dotnetNamespace: `Amazon.CDK${isCdkPkg ? '' : `.${dotnetSuffix}`}`, python: { distName: `aws-cdk.${pythonName}`, @@ -1806,7 +1808,7 @@ function readBannerFile(file: string): string { return fs.readFileSync(path.join(__dirname, 'banners', file), { encoding: 'utf-8' }).trim(); } -function cdkMajorVersion() { +function cdkMajorVersion(): number { // eslint-disable-next-line @typescript-eslint/no-require-imports const releaseJson = require(`${monoRepoRoot()}/release.json`); return releaseJson.majorVersion as number; diff --git a/tools/pkglint/lib/util.ts b/tools/pkglint/lib/util.ts index 019dc244c4176..cbf782ae78f8c 100644 --- a/tools/pkglint/lib/util.ts +++ b/tools/pkglint/lib/util.ts @@ -164,7 +164,7 @@ export function findUpward(dir: string, pred: (x: string) => boolean): string | } export function monoRepoRoot() { - const ret = findUpward(process.cwd(), d => fs.existsSync(path.join(d, 'lerna.json')) || fs.existsSync(path.join(d, '.nzmroot'))); + const ret = findUpward(process.cwd(), d => fs.existsSync(path.join(d, 'release.json')) || fs.existsSync(path.join(d, '.nzmroot'))); if (!ret) { throw new Error('Could not find lerna.json'); } From 1ffd89714f8b1c1389d4e43383cc77d16d00ed9e Mon Sep 17 00:00:00 2001 From: kaizen3031593 <36202692+kaizen3031593@users.noreply.github.com> Date: Tue, 7 Sep 2021 15:12:17 -0400 Subject: [PATCH 08/41] fix(cloudwatch): cross account alarms does not support math expressions (#16333) Now they do. Closes #16331. I have also modified the comment to explain `returnData: entry.tag ? undefined : false` but I endeavor to explain even more here. Only 1 metric within an expression can have `returnData = true`; the rest must be `false`. Cloudformation also defaults an undefined return data to `true` as long as the rest are set to `false`, which is why this ternary operation works. The idea behind this line is that `entry.tag` is defined (with a default of `true`) for the top level expression only and thus every other metric within the expression has `returnData = false`. ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- packages/@aws-cdk/aws-cloudwatch/lib/alarm.ts | 5 +- .../test/cross-environment.test.ts | 61 ++++++++++++++++++- 2 files changed, 62 insertions(+), 4 deletions(-) diff --git a/packages/@aws-cdk/aws-cloudwatch/lib/alarm.ts b/packages/@aws-cdk/aws-cloudwatch/lib/alarm.ts index 41928d0af7193..e0692478c8457 100644 --- a/packages/@aws-cdk/aws-cloudwatch/lib/alarm.ts +++ b/packages/@aws-cdk/aws-cloudwatch/lib/alarm.ts @@ -321,8 +321,9 @@ export class Alarm extends AlarmBase { unit: stat.unitFilter, }, id: entry.id || uniqueMetricId(), + accountId: stat.account, label: conf.renderingProperties?.label, - returnData: entry.tag ? undefined : false, // Tag stores "primary" attribute, default is "true" + returnData: entry.tag ? undefined : false, // entry.tag evaluates to true if the metric is the math expression the alarm is based on. }; }, withExpression(expr, conf) { @@ -338,7 +339,7 @@ export class Alarm extends AlarmBase { id: entry.id || uniqueMetricId(), label: conf.renderingProperties?.label, period: hasSubmetrics ? undefined : expr.period, - returnData: entry.tag ? undefined : false, // Tag stores "primary" attribute, default is "true" + returnData: entry.tag ? undefined : false, // entry.tag evaluates to true if the metric is the math expression the alarm is based on. }; }, }) as CfnAlarm.MetricDataQueryProperty), diff --git a/packages/@aws-cdk/aws-cloudwatch/test/cross-environment.test.ts b/packages/@aws-cdk/aws-cloudwatch/test/cross-environment.test.ts index f17391591a8c5..50278d9a83d54 100644 --- a/packages/@aws-cdk/aws-cloudwatch/test/cross-environment.test.ts +++ b/packages/@aws-cdk/aws-cloudwatch/test/cross-environment.test.ts @@ -1,6 +1,6 @@ import { Template } from '@aws-cdk/assertions'; -import { Stack } from '@aws-cdk/core'; -import { Alarm, GraphWidget, IWidget, Metric } from '../lib'; +import { Duration, Stack } from '@aws-cdk/core'; +import { Alarm, GraphWidget, IWidget, MathExpression, Metric } from '../lib'; const a = new Metric({ namespace: 'Test', metricName: 'ACount' }); @@ -177,6 +177,63 @@ describe('cross environment', () => { ], }); }); + + test('math expression can render in a different account', () => { + // GIVEN + const b = new Metric({ + namespace: 'Test', + metricName: 'ACount', + account: '1234', + }); + + const c = new MathExpression({ + expression: 'a + b', + usingMetrics: { a: a.attachTo(stack3), b }, + period: Duration.minutes(1), + }); + + new Alarm(stack1, 'Alarm', { + threshold: 1, + evaluationPeriods: 1, + metric: c, + }); + + // THEN + Template.fromStack(stack1).hasResourceProperties('AWS::CloudWatch::Alarm', { + Metrics: [ + { + Expression: 'a + b', + Id: 'expr_1', + }, + { + AccountId: '0000', + Id: 'a', + MetricStat: { + Metric: { + MetricName: 'ACount', + Namespace: 'Test', + }, + Period: 60, + Stat: 'Average', + }, + ReturnData: false, + }, + { + AccountId: '1234', + Id: 'b', + MetricStat: { + Metric: { + MetricName: 'ACount', + Namespace: 'Test', + }, + Period: 60, + Stat: 'Average', + }, + ReturnData: false, + }, + ], + }); + }); }); }); From 0dc220fd1849702253643f23396dbc05740074f4 Mon Sep 17 00:00:00 2001 From: kaizen3031593 Date: Tue, 7 Sep 2021 16:45:46 -0400 Subject: [PATCH 09/41] outputName for has/find outputs and mapping --- .../assertions/lib/private/mappings.ts | 10 +- .../assertions/lib/private/outputs.ts | 14 +- .../assertions/lib/private/section.ts | 9 ++ packages/@aws-cdk/assertions/lib/template.ts | 17 ++- .../@aws-cdk/assertions/test/template.test.ts | 135 ++++++++++++++++-- 5 files changed, 155 insertions(+), 30 deletions(-) diff --git a/packages/@aws-cdk/assertions/lib/private/mappings.ts b/packages/@aws-cdk/assertions/lib/private/mappings.ts index 0def435cc0e1d..be83027e9276b 100644 --- a/packages/@aws-cdk/assertions/lib/private/mappings.ts +++ b/packages/@aws-cdk/assertions/lib/private/mappings.ts @@ -1,9 +1,9 @@ import { StackInspector } from '../vendored/assert'; -import { formatFailure, matchSection } from './section'; +import { filterLogicalId, formatFailure, matchSection } from './section'; -export function findMappings(inspector: StackInspector, props: any = {}): { [key: string]: any }[] { +export function findMappings(inspector: StackInspector, outputName: string, props: any = {}): { [key: string]: any }[] { const section: { [key: string] : {} } = inspector.value.Mappings; - const result = matchSection(section, props); + const result = matchSection(filterLogicalId(section,outputName), props); if (!result.match) { return []; @@ -12,9 +12,9 @@ export function findMappings(inspector: StackInspector, props: any = {}): { [key return result.matches; } -export function hasMapping(inspector: StackInspector, props: any): string | void { +export function hasMapping(inspector: StackInspector, outputName: string, props: any): string | void { const section: { [key: string]: {} } = inspector.value.Mappings; - const result = matchSection(section, props); + const result = matchSection(filterLogicalId(section, outputName), props); if (result.match) { return; diff --git a/packages/@aws-cdk/assertions/lib/private/outputs.ts b/packages/@aws-cdk/assertions/lib/private/outputs.ts index 0b328ffda7fcb..8d97353cf56d1 100644 --- a/packages/@aws-cdk/assertions/lib/private/outputs.ts +++ b/packages/@aws-cdk/assertions/lib/private/outputs.ts @@ -1,9 +1,9 @@ import { StackInspector } from '../vendored/assert'; -import { formatFailure, matchSection } from './section'; +import { filterLogicalId, formatFailure, matchSection } from './section'; -export function findOutputs(inspector: StackInspector, props: any = {}): { [key: string]: any }[] { +export function findOutputs(inspector: StackInspector, outputName: string, props: any = {}): { [key: string]: any }[] { const section: { [key: string] : {} } = inspector.value.Outputs; - const result = matchSection(section, props); + const result = matchSection(filterLogicalId(section, outputName), props); if (!result.match) { return []; @@ -14,7 +14,7 @@ export function findOutputs(inspector: StackInspector, props: any = {}): { [key: export function hasOutput(inspector: StackInspector, outputName: string, props: any): string | void { const section: { [key: string]: {} } = inspector.value.Outputs; - const result = matchSection(filterName(section, outputName), props); + const result = matchSection(filterLogicalId(section, outputName), props); if (result.match) { return; } @@ -28,9 +28,3 @@ export function hasOutput(inspector: StackInspector, outputName: string, props: formatFailure(result.closestResult), ].join('\n'); } - -function filterName(section: { [key: string]: {} }, outputName: string): { [key: string]: {} } { - return Object.entries(section ?? {}) - .filter(([k, _]) => k === outputName) - .reduce((agg, [k, v]) => { return { ...agg, [k]: v }; }, {}); -} \ No newline at end of file diff --git a/packages/@aws-cdk/assertions/lib/private/section.ts b/packages/@aws-cdk/assertions/lib/private/section.ts index d8f0123de20d6..c7e1d93ecc0f7 100644 --- a/packages/@aws-cdk/assertions/lib/private/section.ts +++ b/packages/@aws-cdk/assertions/lib/private/section.ts @@ -55,4 +55,13 @@ export function formatFailure(closestResult: MatchResult): string { function leftPad(x: string, indent: number = 2): string { const pad = ' '.repeat(indent); return pad + x.split('\n').join(`\n${pad}`); +} + +export function filterLogicalId(section: { [key: string]: {} }, outputName: string): { [key: string]: {} } { + // default signal for all outputs is '*' + if (outputName === '*') return section; + + return Object.entries(section ?? {}) + .filter(([k, _]) => k === outputName) + .reduce((agg, [k, v]) => { return { ...agg, [k]: v }; }, {}); } \ No newline at end of file diff --git a/packages/@aws-cdk/assertions/lib/template.ts b/packages/@aws-cdk/assertions/lib/template.ts index 2f9f84c54cabb..f67d7e64f3018 100644 --- a/packages/@aws-cdk/assertions/lib/template.ts +++ b/packages/@aws-cdk/assertions/lib/template.ts @@ -109,7 +109,7 @@ export class Template { * Assert that an Output with the given properties exists in the CloudFormation template. * By default, performs partial matching on the resource, via the `Match.objectLike()`. * To configure different behavour, use other matchers in the `Match` class. - * @param outputName the name of the output. + * @param outputName the name of the output. Provide '*' to match all Output names in the template. * @param props the output as should be expected in the template. */ public hasOutput(outputName: string, props: any): void { @@ -121,22 +121,24 @@ export class Template { /** * Get the set of matching Outputs that match the given properties in the CloudFormation template. + * @param outputName the name of the output. Provide '*' to match all Output names in the template. * @param props by default, matches all Outputs in the template. * When a literal object is provided, performs a partial match via `Match.objectLike()`. * Use the `Match` APIs to configure a different behaviour. */ - public findOutputs(props: any = {}): { [key: string]: any }[] { - return findOutputs(this.inspector, props); + public findOutputs(outputName: string, props: any = {}): { [key: string]: any }[] { + return findOutputs(this.inspector, outputName, props); } /** * Assert that a Mapping with the given properties exists in the CloudFormation template. * By default, performs partial matching on the resource, via the `Match.objectLike()`. * To configure different behavour, use other matchers in the `Match` class. + * @param outputName the name of the output. Provide '*' to match all Output names in the template. * @param props the output as should be expected in the template. */ - public hasMapping(props: any): void { - const matchError = hasMapping(this.inspector, props); + public hasMapping(outputName: string, props: any): void { + const matchError = hasMapping(this.inspector, outputName, props); if (matchError) { throw new Error(matchError); } @@ -144,12 +146,13 @@ export class Template { /** * Get the set of matching Mappings that match the given properties in the CloudFormation template. + * @param outputName the name of the output. Provide '*' to match all Output names in the template. * @param props by default, matches all Mappings in the template. * When a literal object is provided, performs a partial match via `Match.objectLike()`. * Use the `Match` APIs to configure a different behaviour. */ - public findMappings(props: any = {}): { [key: string]: any }[] { - return findMappings(this.inspector, props); + public findMappings(outputName: string, props: any = {}): { [key: string]: any }[] { + return findMappings(this.inspector, outputName, props); } /** diff --git a/packages/@aws-cdk/assertions/test/template.test.ts b/packages/@aws-cdk/assertions/test/template.test.ts index fc3c34deecd57..6d255eac0c0b7 100644 --- a/packages/@aws-cdk/assertions/test/template.test.ts +++ b/packages/@aws-cdk/assertions/test/template.test.ts @@ -370,7 +370,7 @@ describe('Template', () => { done(); }); - test('outputName not matching', (done) => { + test('value not matching with outputName', (done) => { const stack = new Stack(); new CfnOutput(stack, 'Foo', { value: 'Bar', @@ -394,7 +394,7 @@ describe('Template', () => { }); }); - test('name not matching', (done) => { + test('outputName not matching', (done) => { const stack = new Stack(); new CfnOutput(stack, 'Foo', { value: 'Bar', @@ -432,7 +432,7 @@ describe('Template', () => { }); const inspect = Template.fromStack(stack); - const result = inspect.findOutputs({ Value: 'Fred' }); + const result = inspect.findOutputs('*', { Value: 'Fred' }); expect(result).toEqual([ { Value: 'Fred', Description: 'FooFred' }, { Value: 'Fred', Description: 'BarFred' }, @@ -446,7 +446,37 @@ describe('Template', () => { }); const inspect = Template.fromStack(stack); - const result = inspect.findOutputs({ Value: 'Waldo' }); + const result = inspect.findOutputs('*', { Value: 'Waldo' }); + expect(result.length).toEqual(0); + }); + + test('matching specific output', () => { + const stack = new Stack(); + new CfnOutput(stack, 'Foo', { + value: 'Fred', + }); + new CfnOutput(stack, 'Baz', { + value: 'Waldo', + }); + + const inspect = Template.fromStack(stack); + const result = inspect.findOutputs('Foo', { Value: 'Fred'}); + expect(result).toEqual([ + { Value: 'Fred' }, + ]); + }); + + test('not matching specific output', () => { + const stack = new Stack(); + new CfnOutput(stack, 'Foo', { + value: 'Fred', + }); + new CfnOutput(stack, 'Baz', { + value: 'Waldo', + }); + + const inspect = Template.fromStack(stack); + const result = inspect.findOutputs('Foo', { Value: 'Waldo'}); expect(result.length).toEqual(0); }); }); @@ -467,7 +497,7 @@ describe('Template', () => { }); const inspect = Template.fromStack(stack); - expect(() => inspect.hasMapping({ Foo: { Bar: 'Lightning' } })).not.toThrow(); + expect(() => inspect.hasMapping('*', { Foo: { Bar: 'Lightning' } })).not.toThrow(); }); test('not matching', (done) => { @@ -486,7 +516,7 @@ describe('Template', () => { const inspect = Template.fromStack(stack); expectToThrow( - () => inspect.hasMapping({ + () => inspect.hasMapping('*',{ Foo: { Bar: 'Qux' }, }), [ @@ -497,6 +527,52 @@ describe('Template', () => { ); done(); }); + + test('matching specific outputName', () => { + const stack = new Stack(); + new CfnMapping(stack, 'Foo', { + mapping: { + Foo: { Bar: 'Lightning', Fred: 'Waldo' }, + Baz: { Bar: 'Qux' }, + }, + }); + new CfnMapping(stack, 'Fred', { + mapping: { + Foo: { Bar: 'Lightning' }, + }, + }); + + const inspect = Template.fromStack(stack); + expect(() => inspect.hasMapping('Foo', { Baz: { Bar: 'Qux' } })).not.toThrow(); + }); + + test('not matching specific outputName', (done) => { + const stack = new Stack(); + new CfnMapping(stack, 'Foo', { + mapping: { + Foo: { Bar: 'Fred', Baz: 'Waldo' }, + Qux: { Bar: 'Fred' }, + }, + }); + new CfnMapping(stack, 'Fred', { + mapping: { + Foo: { Baz: 'Baz' }, + }, + }); + + const inspect = Template.fromStack(stack); + expectToThrow( + () => inspect.hasMapping('Fred',{ + Foo: { Baz: 'Fred' }, + }), + [ + /1 mappings/, + /Expected Fred but received Baz/, + ], + done, + ); + done(); + }); }); describe('findMappings', () => { @@ -515,7 +591,7 @@ describe('Template', () => { }); const inspect = Template.fromStack(stack); - const result = inspect.findMappings({ Foo: { Bar: 'Lightning' } }); + const result = inspect.findMappings('*', { Foo: { Bar: 'Lightning' } }); expect(result).toEqual([ { Foo: { Bar: 'Lightning', Fred: 'Waldo' }, @@ -534,7 +610,50 @@ describe('Template', () => { }); const inspect = Template.fromStack(stack); - const result = inspect.findMappings({ Foo: { Bar: 'Waldo' } }); + const result = inspect.findMappings('*', { Foo: { Bar: 'Waldo' } }); + expect(result.length).toEqual(0); + }); + + test('matching with specific outputName', () => { + const stack = new Stack(); + new CfnMapping(stack, 'Foo', { + mapping: { + Foo: { Bar: 'Lightning', Fred: 'Waldo' }, + Baz: { Bar: 'Qux' }, + }, + }); + new CfnMapping(stack, 'Fred', { + mapping: { + Foo: { Bar: 'Lightning' }, + }, + }); + + const inspect = Template.fromStack(stack); + const result = inspect.findMappings('Foo', { Foo: { Bar: 'Lightning' } }); + expect(result).toEqual([ + { + Foo: { Bar: 'Lightning', Fred: 'Waldo' }, + Baz: { Bar: 'Qux' }, + }, + ]); + }); + + test('not matching', () => { + const stack = new Stack(); + new CfnMapping(stack, 'Foo', { + mapping: { + Foo: { Bar: 'Lightning', Fred: 'Waldo' }, + Baz: { Bar: 'Qux' }, + }, + }); + new CfnMapping(stack, 'Fred', { + mapping: { + Foo: { Bar: 'Lightning' }, + }, + }); + + const inspect = Template.fromStack(stack); + const result = inspect.findMappings('Fred', { Baz: { Bar: 'Qux' } }); expect(result.length).toEqual(0); }); }); From a86aea8d26de3900ea1bb3119fa9161de22f533a Mon Sep 17 00:00:00 2001 From: kaizen3031593 Date: Tue, 7 Sep 2021 16:47:36 -0400 Subject: [PATCH 10/41] linter --- packages/@aws-cdk/assertions/lib/private/mappings.ts | 2 +- packages/@aws-cdk/assertions/test/template.test.ts | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/packages/@aws-cdk/assertions/lib/private/mappings.ts b/packages/@aws-cdk/assertions/lib/private/mappings.ts index be83027e9276b..0c5b3d459faeb 100644 --- a/packages/@aws-cdk/assertions/lib/private/mappings.ts +++ b/packages/@aws-cdk/assertions/lib/private/mappings.ts @@ -3,7 +3,7 @@ import { filterLogicalId, formatFailure, matchSection } from './section'; export function findMappings(inspector: StackInspector, outputName: string, props: any = {}): { [key: string]: any }[] { const section: { [key: string] : {} } = inspector.value.Mappings; - const result = matchSection(filterLogicalId(section,outputName), props); + const result = matchSection(filterLogicalId(section, outputName), props); if (!result.match) { return []; diff --git a/packages/@aws-cdk/assertions/test/template.test.ts b/packages/@aws-cdk/assertions/test/template.test.ts index 6d255eac0c0b7..ca5c5c5ea1e58 100644 --- a/packages/@aws-cdk/assertions/test/template.test.ts +++ b/packages/@aws-cdk/assertions/test/template.test.ts @@ -460,7 +460,7 @@ describe('Template', () => { }); const inspect = Template.fromStack(stack); - const result = inspect.findOutputs('Foo', { Value: 'Fred'}); + const result = inspect.findOutputs('Foo', { Value: 'Fred' }); expect(result).toEqual([ { Value: 'Fred' }, ]); @@ -476,7 +476,7 @@ describe('Template', () => { }); const inspect = Template.fromStack(stack); - const result = inspect.findOutputs('Foo', { Value: 'Waldo'}); + const result = inspect.findOutputs('Foo', { Value: 'Waldo' }); expect(result.length).toEqual(0); }); }); @@ -516,7 +516,7 @@ describe('Template', () => { const inspect = Template.fromStack(stack); expectToThrow( - () => inspect.hasMapping('*',{ + () => inspect.hasMapping('*', { Foo: { Bar: 'Qux' }, }), [ @@ -562,7 +562,7 @@ describe('Template', () => { const inspect = Template.fromStack(stack); expectToThrow( - () => inspect.hasMapping('Fred',{ + () => inspect.hasMapping('Fred', { Foo: { Baz: 'Fred' }, }), [ From 8f9b74e10df6ab8e779bb03d8af644547e72ba36 Mon Sep 17 00:00:00 2001 From: kaizen3031593 Date: Tue, 7 Sep 2021 17:00:36 -0400 Subject: [PATCH 11/41] fix neptune tests --- packages/@aws-cdk/aws-neptune/test/instance.test.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/@aws-cdk/aws-neptune/test/instance.test.ts b/packages/@aws-cdk/aws-neptune/test/instance.test.ts index 38a6981ab2a78..ed83e1506496a 100644 --- a/packages/@aws-cdk/aws-neptune/test/instance.test.ts +++ b/packages/@aws-cdk/aws-neptune/test/instance.test.ts @@ -43,7 +43,7 @@ describe('DatabaseInstance', () => { }); // THEN - Template.fromStack(stack).hasOutput({ + Template.fromStack(stack).hasOutput(exportName, { Export: { Name: exportName }, Value: { 'Fn::Join': [ @@ -78,7 +78,7 @@ describe('DatabaseInstance', () => { }); // THEN - Template.fromStack(stack).hasOutput({ + Template.fromStack(stack).hasOutput('EndpointOutput', { Export: { Name: endpointExportName }, Value: `${instanceEndpointAddress}:${port}`, }); From 59691719500895e555b884bd216c1bd10a770ad3 Mon Sep 17 00:00:00 2001 From: kaizen3031593 Date: Tue, 7 Sep 2021 17:13:37 -0400 Subject: [PATCH 12/41] fix firehose tests --- .../@aws-cdk/aws-kinesisfirehose/test/delivery-stream.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/@aws-cdk/aws-kinesisfirehose/test/delivery-stream.test.ts b/packages/@aws-cdk/aws-kinesisfirehose/test/delivery-stream.test.ts index 7c36a29e379b5..f716e56a8f326 100644 --- a/packages/@aws-cdk/aws-kinesisfirehose/test/delivery-stream.test.ts +++ b/packages/@aws-cdk/aws-kinesisfirehose/test/delivery-stream.test.ts @@ -491,7 +491,7 @@ describe('delivery stream', () => { destinations: [mockS3Destination], }); - Template.fromStack(stack).hasMapping({ + Template.fromStack(stack).hasMapping('*', { 'af-south-1': { FirehoseCidrBlock: '13.244.121.224/27', }, From d499c85e4c09cc00b457ca7f2f4611a925ca8aeb Mon Sep 17 00:00:00 2001 From: kaizen3031593 <36202692+kaizen3031593@users.noreply.github.com> Date: Wed, 8 Sep 2021 04:40:11 -0400 Subject: [PATCH 13/41] feat(pipelines): stack-level steps (#16215) Stack-level steps are available via the legacy API but not the modern API. This PR introduces the `stackSteps` property to `AddStageOpts`, allowing the user to specify additional steps at the stack level rather than the stage level. You can specify a step using the `pre`, `changeSet` or `post` options. `pre` steps happen before `stack.prepare`. `changeSet` steps happen between `stack.prepare` and `stack.deploy`. `post` steps happen after `stack.deploy`. A primary use case is to add a `ManualApprovalStep` in `stackSteps.changeSet` to verify changes to the stack before deployment. Closes #16148. ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- packages/@aws-cdk/pipelines/README.md | 18 ++++- .../lib/blueprint/stack-deployment.ts | 28 +++++++ .../lib/blueprint/stage-deployment.ts | 25 +++++- .../@aws-cdk/pipelines/lib/blueprint/step.ts | 34 +++++++- .../@aws-cdk/pipelines/lib/blueprint/wave.ts | 9 ++- .../lib/helpers-internal/pipeline-graph.ts | 29 ++++++- .../pipelines/lib/main/pipeline-base.ts | 1 - .../helpers-internal/pipeline-graph.test.ts | 79 ++++++++++++++++++- .../pipelines/test/testhelpers/test-app.ts | 10 +++ 9 files changed, 226 insertions(+), 7 deletions(-) diff --git a/packages/@aws-cdk/pipelines/README.md b/packages/@aws-cdk/pipelines/README.md index a63f1eab4d7c4..55cd50bfb12bf 100644 --- a/packages/@aws-cdk/pipelines/README.md +++ b/packages/@aws-cdk/pipelines/README.md @@ -460,7 +460,7 @@ manual or automated gates to your pipeline. We recommend putting manual approval the set of `post` steps. The following example shows both an automated approval in the form of a `ShellStep`, and -a manual approvel in the form of a `ManualApprovalStep` added to the pipeline. Both must +a manual approval in the form of a `ManualApprovalStep` added to the pipeline. Both must pass in order to promote from the `PreProd` to the `Prod` environment: ```ts @@ -481,6 +481,22 @@ pipeline.addStage(prod, { }); ``` +You can also specify steps to be executed at the stack level. To achieve this, you can specify the stack and step via the `stackSteps` property: + +```ts +pipeline.addStage(prod, { + stackSteps: [{ + stack: prod.stack1, + pre: [new ManualApprovalStep('Pre-Stack Check')], // Executed before stack is prepared + changeSet: [new ManualApprovalStep('ChangeSet Approval')], // Executed after stack is prepared but before the stack is deployed + post: [new ManualApprovalStep('Post-Deploy Check')], // Executed after staack is deployed + }, { + stack: prod.stack2, + post: [new ManualApprovalStep('Post-Deploy Check')], // Executed after staack is deployed + }], +}); +``` + #### Using CloudFormation Stack Outputs in approvals Because many CloudFormation deployments result in the generation of resources with unpredictable diff --git a/packages/@aws-cdk/pipelines/lib/blueprint/stack-deployment.ts b/packages/@aws-cdk/pipelines/lib/blueprint/stack-deployment.ts index 2fe74ef15ccd3..488551f4eefb7 100644 --- a/packages/@aws-cdk/pipelines/lib/blueprint/stack-deployment.ts +++ b/packages/@aws-cdk/pipelines/lib/blueprint/stack-deployment.ts @@ -4,6 +4,7 @@ import * as cxapi from '@aws-cdk/cx-api'; import { AssetManifestReader, DockerImageManifestEntry, FileManifestEntry } from '../private/asset-manifest'; import { isAssetManifest } from '../private/cloud-assembly-internals'; import { AssetType } from './asset-type'; +import { Step } from './step'; /** * Properties for a `StackDeployment` @@ -191,6 +192,21 @@ export class StackDeployment { */ public readonly absoluteTemplatePath: string; + /** + * Steps that take place before stack is prepared. If your pipeline engine disables 'prepareStep', then this will happen before stack deploys + */ + public readonly pre: Step[] = []; + + /** + * Steps that take place after stack is prepared but before stack deploys. Your pipeline engine may not disable `prepareStep`. + */ + public readonly changeSet: Step[] = []; + + /** + * Steps to execute after stack deploys + */ + public readonly post: Step[] = []; + private constructor(props: StackDeploymentProps) { this.stackArtifactId = props.stackArtifactId; this.constructPath = props.constructPath; @@ -220,6 +236,18 @@ export class StackDeployment { public addStackDependency(stackDeployment: StackDeployment) { this.stackDependencies.push(stackDeployment); } + + /** + * Adds steps to each phase of the stack + * @param pre steps executed before stack.prepare + * @param changeSet steps executed after stack.prepare and before stack.deploy + * @param post steps executed after stack.deploy + */ + public addStackSteps(pre: Step[], changeSet: Step[], post: Step[]) { + this.pre.push(...pre); + this.changeSet.push(...changeSet); + this.post.push(...post); + } } /** diff --git a/packages/@aws-cdk/pipelines/lib/blueprint/stage-deployment.ts b/packages/@aws-cdk/pipelines/lib/blueprint/stage-deployment.ts index b8e3f0ea77536..651ccb3b46d53 100644 --- a/packages/@aws-cdk/pipelines/lib/blueprint/stage-deployment.ts +++ b/packages/@aws-cdk/pipelines/lib/blueprint/stage-deployment.ts @@ -3,7 +3,7 @@ import { CloudFormationStackArtifact } from '@aws-cdk/cx-api'; import { isStackArtifact } from '../private/cloud-assembly-internals'; import { pipelineSynth } from '../private/construct-internals'; import { StackDeployment } from './stack-deployment'; -import { Step } from './step'; +import { StackSteps, Step } from './step'; /** * Properties for a `StageDeployment` @@ -29,6 +29,13 @@ export interface StageDeploymentProps { * @default - No additional steps */ readonly post?: Step[]; + + /** + * Instructions for additional steps that are run at the stack level + * + * @default - No additional instructions + */ + readonly stackSteps?: StackSteps[]; } /** @@ -57,6 +64,16 @@ export class StageDeployment { const step = StackDeployment.fromArtifact(artifact); stepFromArtifact.set(artifact, step); } + if (props.stackSteps) { + for (const stackstep of props.stackSteps) { + const stackArtifact = assembly.getStackArtifact(stackstep.stack.artifactId); + const thisStep = stepFromArtifact.get(stackArtifact); + if (!thisStep) { + throw new Error('Logic error: we just added a step for this artifact but it disappeared.'); + } + thisStep.addStackSteps(stackstep.pre ?? [], stackstep.changeSet ?? [], stackstep.post ?? []); + } + } for (const artifact of assembly.stacks) { const thisStep = stepFromArtifact.get(artifact); @@ -95,12 +112,18 @@ export class StageDeployment { */ public readonly post: Step[]; + /** + * Instructions for additional steps that are run at stack level + */ + public readonly stackSteps: StackSteps[]; + private constructor( /** The stacks deployed in this stage */ public readonly stacks: StackDeployment[], props: StageDeploymentProps = {}) { this.stageName = props.stageName ?? ''; this.pre = props.pre ?? []; this.post = props.post ?? []; + this.stackSteps = props.stackSteps ?? []; } /** diff --git a/packages/@aws-cdk/pipelines/lib/blueprint/step.ts b/packages/@aws-cdk/pipelines/lib/blueprint/step.ts index e04b79bdcd848..31fd96d2a8c32 100644 --- a/packages/@aws-cdk/pipelines/lib/blueprint/step.ts +++ b/packages/@aws-cdk/pipelines/lib/blueprint/step.ts @@ -1,4 +1,4 @@ -import { Token } from '@aws-cdk/core'; +import { Stack, Token } from '@aws-cdk/core'; import { FileSet, IFileSetProducer } from './file-set'; /** @@ -74,4 +74,36 @@ export abstract class Step implements IFileSetProducer { protected configurePrimaryOutput(fs: FileSet) { this._primaryOutput = fs; } +} + +/** + * Instructions for additional steps that are run at stack level + */ +export interface StackSteps { + /** + * The stack you want the steps to run in + */ + readonly stack: Stack; + + /** + * Steps that execute before stack is prepared + * + * @default - no additional steps + */ + readonly pre?: Step[]; + + /** + * Steps that execute after stack is prepared but before stack is deployed + * + * @default - no additional steps + */ + readonly changeSet?: Step[]; + + /** + * Steps that execute after stack is deployed + * + * @default - no additional steps + */ + readonly post?: Step[]; + } \ No newline at end of file diff --git a/packages/@aws-cdk/pipelines/lib/blueprint/wave.ts b/packages/@aws-cdk/pipelines/lib/blueprint/wave.ts index 709d43a1ed8bd..b419277c51132 100644 --- a/packages/@aws-cdk/pipelines/lib/blueprint/wave.ts +++ b/packages/@aws-cdk/pipelines/lib/blueprint/wave.ts @@ -1,6 +1,6 @@ import * as cdk from '@aws-cdk/core'; import { StageDeployment } from './stage-deployment'; -import { Step } from './step'; +import { StackSteps, Step } from './step'; /** * Construction properties for a `Wave` @@ -91,6 +91,13 @@ export interface AddStageOpts { * @default - No additional steps */ readonly post?: Step[]; + + /** + * Instructions for stack level steps + * + * @default - No additional instructions + */ + readonly stackSteps?: StackSteps[]; } /** diff --git a/packages/@aws-cdk/pipelines/lib/helpers-internal/pipeline-graph.ts b/packages/@aws-cdk/pipelines/lib/helpers-internal/pipeline-graph.ts index 8a7b198831808..738576017afb9 100644 --- a/packages/@aws-cdk/pipelines/lib/helpers-internal/pipeline-graph.ts +++ b/packages/@aws-cdk/pipelines/lib/helpers-internal/pipeline-graph.ts @@ -142,8 +142,9 @@ export class PipelineGraph { }); retGraph.add(stackGraph); - stackGraph.add(deployNode); + + // node or node collection that represents first point of contact in each stack let firstDeployNode; if (prepareNode) { stackGraph.add(prepareNode); @@ -153,6 +154,21 @@ export class PipelineGraph { firstDeployNode = deployNode; } + // add changeset steps at the stack level + if (stack.changeSet.length > 0) { + if (prepareNode) { + this.addChangeSet(stack.changeSet, prepareNode, deployNode, stackGraph); + } else { + throw new Error('Your pipeline engine does not support changeSet steps'); + } + } + + // add pre and post steps at the stack level + const preNodes = this.addPrePost(stack.pre, stack.post, stackGraph); + if (preNodes.nodes.length > 0) { + firstDeployNode = preNodes; + } + stackGraphs.set(stack, stackGraph); const cloudAssembly = this.cloudAssemblyFileSet; @@ -201,16 +217,27 @@ export class PipelineGraph { return retGraph; } + private addChangeSet(changeSet: Step[], prepareNode: AGraphNode, deployNode: AGraphNode, graph: AGraph) { + for (const c of changeSet) { + const changeSetNode = this.addAndRecurse(c, graph); + changeSetNode?.dependOn(prepareNode); + deployNode.dependOn(changeSetNode); + } + } + private addPrePost(pre: Step[], post: Step[], parent: AGraph) { const currentNodes = new GraphNodeCollection(parent.nodes); + const preNodes = new GraphNodeCollection(new Array()); for (const p of pre) { const preNode = this.addAndRecurse(p, parent); currentNodes.dependOn(preNode); + preNodes.nodes.push(preNode!); } for (const p of post) { const postNode = this.addAndRecurse(p, parent); postNode?.dependOn(...currentNodes.nodes); } + return preNodes; } private topLevelGraph(name: string): AGraph { diff --git a/packages/@aws-cdk/pipelines/lib/main/pipeline-base.ts b/packages/@aws-cdk/pipelines/lib/main/pipeline-base.ts index 6ff5a1be60853..d69c2a6c89ab1 100644 --- a/packages/@aws-cdk/pipelines/lib/main/pipeline-base.ts +++ b/packages/@aws-cdk/pipelines/lib/main/pipeline-base.ts @@ -84,7 +84,6 @@ export abstract class PipelineBase extends CoreConstruct { if (this.built) { throw new Error('addStage: can\'t add Stages anymore after buildPipeline() has been called'); } - return this.addWave(stage.stageName).addStage(stage, options); } diff --git a/packages/@aws-cdk/pipelines/test/blueprint/helpers-internal/pipeline-graph.test.ts b/packages/@aws-cdk/pipelines/test/blueprint/helpers-internal/pipeline-graph.test.ts index 3b28d4f410a61..92c3a3dcf2dfe 100644 --- a/packages/@aws-cdk/pipelines/test/blueprint/helpers-internal/pipeline-graph.test.ts +++ b/packages/@aws-cdk/pipelines/test/blueprint/helpers-internal/pipeline-graph.test.ts @@ -1,9 +1,10 @@ /* eslint-disable import/no-extraneous-dependencies */ import '@aws-cdk/assert-internal/jest'; import * as cdkp from '../../../lib'; +import { ManualApprovalStep } from '../../../lib'; import { Graph, GraphNode, PipelineGraph } from '../../../lib/helpers-internal'; import { flatten } from '../../../lib/private/javascript'; -import { AppWithOutput, OneStackApp, TestApp } from '../../testhelpers/test-app'; +import { AppWithOutput, AppWithExposedStacks, OneStackApp, TestApp } from '../../testhelpers/test-app'; let app: TestApp; @@ -113,6 +114,34 @@ describe('blueprint with wave and stage', () => { 'Stack', ]); }); + + test('pre, changeSet, and post are added correctly inside stack graph', () => { + // GIVEN + const appWithExposedStacks = new AppWithExposedStacks(app, 'Gamma'); + const stack = appWithExposedStacks.stacks[0]; + blueprint.waves[0].addStage(appWithExposedStacks, { + stackSteps: [{ + stack, + pre: [new cdkp.ManualApprovalStep('Step1'), new cdkp.ManualApprovalStep('Step2'), new cdkp.ManualApprovalStep('Step3')], + changeSet: [new cdkp.ManualApprovalStep('Manual Approval')], + post: [new cdkp.ManualApprovalStep('Post Approval')], + }], + }); + + // WHEN + const graph = new PipelineGraph(blueprint).graph; + + // THEN + expect(childrenAt(graph, 'Wave', 'Gamma', 'Stack1')).toEqual([ + 'Step1', + 'Step2', + 'Step3', + 'Prepare', + 'Manual Approval', + 'Deploy', + 'Post Approval', + ]); + }); }); describe('options for other engines', () => { @@ -153,6 +182,54 @@ describe('options for other engines', () => { // since "prepareStep" is false, it only has "Deploy". expect(childrenAt(graph.graph, 'Alpha', 'Stack')).toStrictEqual(['Deploy']); }); + + test('"prepareStep: false" will not impact "pre" stack steps', () => { + // GIVEN + const blueprint = new Blueprint(app, 'Bp', { + synth: new cdkp.ShellStep('Synth', { + commands: ['build'], + }), + }); + const appWithExposedStacks = new AppWithExposedStacks(app, 'Alpha'); + blueprint.addStage(appWithExposedStacks, { + stackSteps: [{ + stack: appWithExposedStacks.stacks[0], + pre: [new ManualApprovalStep('PreCheck')], + }], + }); + + // WHEN + const graph = new PipelineGraph(blueprint, { + prepareStep: false, + }); + + // THEN + expect(childrenAt(graph.graph, 'Alpha', 'Stack1')).toEqual([ + 'PreCheck', + 'Deploy', + ]); + }); + + test('specifying changeSet step with "prepareStep: false" will throw', () => { + // GIVEN + const blueprint = new Blueprint(app, 'Bp', { + synth: new cdkp.ShellStep('Synth', { + commands: ['build'], + }), + }); + const appWithExposedStacks = new AppWithExposedStacks(app, 'Alpha'); + blueprint.addStage(appWithExposedStacks, { + stackSteps: [{ + stack: appWithExposedStacks.stacks[0], + changeSet: [new ManualApprovalStep('ChangeSetApproval')], + }], + }); + + // THEN + expect(() => new PipelineGraph(blueprint, { + prepareStep: false, + })).toThrow('Your pipeline engine does not support changeSet steps'); + }); }); diff --git a/packages/@aws-cdk/pipelines/test/testhelpers/test-app.ts b/packages/@aws-cdk/pipelines/test/testhelpers/test-app.ts index 1f554b75e2623..a431e56fadaef 100644 --- a/packages/@aws-cdk/pipelines/test/testhelpers/test-app.ts +++ b/packages/@aws-cdk/pipelines/test/testhelpers/test-app.ts @@ -43,6 +43,16 @@ export class TestApp extends App { } } +export class AppWithExposedStacks extends Stage { + public readonly stacks: Stack[]; + constructor(scope: Construct, id: string, props?: StageProps) { + super(scope, id, props); + this.stacks = new Array(); + this.stacks.push(new BucketStack(this, 'Stack1')); + this.stacks.push(new BucketStack(this, 'Stack2')); + this.stacks.push(new BucketStack(this, 'Stack3')); + } +} export class OneStackApp extends Stage { constructor(scope: Construct, id: string, props?: StageProps) { From 6e718067b6c4e1a2c905fedcc60a6863ba3add12 Mon Sep 17 00:00:00 2001 From: Jonathan Goldwasser Date: Wed, 8 Sep 2021 14:10:57 +0200 Subject: [PATCH 14/41] feat(backup): option to prevent recovery point deletions (#16282) Add a `blockRecoveryPointDeletion` prop that add statements to the vault access policy that prevents recovery point deletions. Converted test to use `assertions` while in there. ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- packages/@aws-cdk/aws-backup/README.md | 57 ++++++++++++- packages/@aws-cdk/aws-backup/lib/vault.ts | 24 +++++- packages/@aws-cdk/aws-backup/package.json | 4 +- .../@aws-cdk/aws-backup/test/plan.test.ts | 10 +-- .../aws-backup/test/selection.test.ts | 14 ++-- .../@aws-cdk/aws-backup/test/vault.test.ts | 84 +++++++++++++++++-- 6 files changed, 167 insertions(+), 26 deletions(-) diff --git a/packages/@aws-cdk/aws-backup/README.md b/packages/@aws-cdk/aws-backup/README.md index ffe43e83db2bd..22b2398ba08ea 100644 --- a/packages/@aws-cdk/aws-backup/README.md +++ b/packages/@aws-cdk/aws-backup/README.md @@ -11,11 +11,17 @@ -AWS Backup is a fully managed backup service that makes it easy to centralize and automate the backup of data across AWS services in the cloud and on premises. Using AWS Backup, you can configure backup policies and monitor backup activity for your AWS resources in one place. +AWS Backup is a fully managed backup service that makes it easy to centralize and automate the +backup of data across AWS services in the cloud and on premises. Using AWS Backup, you can +configure backup policies and monitor backup activity for your AWS resources in one place. ## Backup plan and selection -In AWS Backup, a *backup plan* is a policy expression that defines when and how you want to back up your AWS resources, such as Amazon DynamoDB tables or Amazon Elastic File System (Amazon EFS) file systems. You can assign resources to backup plans, and AWS Backup automatically backs up and retains backups for those resources according to the backup plan. You can create multiple backup plans if you have workloads with different backup requirements. +In AWS Backup, a *backup plan* is a policy expression that defines when and how you want to back up + your AWS resources, such as Amazon DynamoDB tables or Amazon Elastic File System (Amazon EFS) file + systems. You can assign resources to backup plans, and AWS Backup automatically backs up and retains + backups for those resources according to the backup plan. You can create multiple backup plans if you + have workloads with different backup requirements. This module provides ready-made backup plans (similar to the console experience): @@ -78,7 +84,11 @@ plan.addRule(backup.BackupPlanRule.monthly1Year(otherVault)); // Use `otherVault ## Backup vault -In AWS Backup, a *backup vault* is a container that you organize your backups in. You can use backup vaults to set the AWS Key Management Service (AWS KMS) encryption key that is used to encrypt backups in the backup vault and to control access to the backups in the backup vault. If you require different encryption keys or access policies for different groups of backups, you can optionally create multiple backup vaults. +In AWS Backup, a *backup vault* is a container that you organize your backups in. You can use backup +vaults to set the AWS Key Management Service (AWS KMS) encryption key that is used to encrypt backups +in the backup vault and to control access to the backups in the backup vault. If you require different +encryption keys or access policies for different groups of backups, you can optionally create multiple +backup vaults. ```ts const myKey = kms.Key.fromKeyArn(this, 'MyKey', 'aaa'); @@ -93,10 +103,49 @@ const vault = new backup.BackupVault(this, 'Vault', { A vault has a default `RemovalPolicy` set to `RETAIN`. Note that removing a vault that contains recovery points will fail. +You can assign policies to backup vaults and the resources they contain. Assigning policies allows +you to do things like grant access to users to create backup plans and on-demand backups, but limit +their ability to delete recovery points after they're created. + +Use the `accessPolicy` property to create a backup vault policy: + +```ts +const vault = new backup.BackupVault(this, 'Vault', { + accessPolicy: new iam.PolicyDocument({ + statements: [ + new iam.PolicyStatement({ + effect: iam.Effect.DENY, + principals: [new iam.AnyPrincipal()], + actions: ['backup:DeleteRecoveryPoint'], + resources: ['*'], + conditions: { + StringNotLike: { + 'aws:userId': [ + 'user1', + 'user2', + ], + }, + }, + }), + ], + }); +}) +``` + +Use the `blockRecoveryPointDeletion` property to add statements to the vault access policy that +prevents recovery point deletions in your vault: + +```ts +new backup.BackupVault(this, 'Vault', { + blockRecoveryPointDeletion: true, +}); +``` + +By default access is not restricted. ## Importing existing backup vault -To import an existing backup vault into your CDK application, use the `BackupVault.fromBackupVaultArn` or `BackupVault.fromBackupVaultName` +To import an existing backup vault into your CDK application, use the `BackupVault.fromBackupVaultArn` or `BackupVault.fromBackupVaultName` static method. Here is an example of giving an IAM Role permission to start a backup job: ```ts diff --git a/packages/@aws-cdk/aws-backup/lib/vault.ts b/packages/@aws-cdk/aws-backup/lib/vault.ts index 4ca43e5875729..ac2c67a19dff3 100644 --- a/packages/@aws-cdk/aws-backup/lib/vault.ts +++ b/packages/@aws-cdk/aws-backup/lib/vault.ts @@ -83,6 +83,14 @@ export interface BackupVaultProps { * @default RemovalPolicy.RETAIN */ readonly removalPolicy?: RemovalPolicy; + + /** + * Whether to add statements to the vault access policy that prevents anyone + * from deleting a recovery point. + * + * @default false + */ + readonly blockRecoveryPointDeletion?: boolean; } /** @@ -206,9 +214,23 @@ export class BackupVault extends BackupVaultBase { props.notificationTopic.grantPublish(new iam.ServicePrincipal('backup.amazonaws.com')); } + const accessPolicy = props.accessPolicy ?? new iam.PolicyDocument(); + if (props.blockRecoveryPointDeletion) { + accessPolicy.addStatements(new iam.PolicyStatement({ + effect: iam.Effect.DENY, + actions: [ + 'backup:DeleteRecoveryPoint', + 'backup:UpdateRecoveryPointLifecycle', + ], + principals: [new iam.AnyPrincipal()], + resources: ['*'], + }), + ); + } + const vault = new CfnBackupVault(this, 'Resource', { backupVaultName: props.backupVaultName || this.uniqueVaultName(), - accessPolicy: props.accessPolicy && props.accessPolicy.toJSON(), + accessPolicy: accessPolicy.toJSON(), encryptionKeyArn: props.encryptionKey && props.encryptionKey.keyArn, notifications, }); diff --git a/packages/@aws-cdk/aws-backup/package.json b/packages/@aws-cdk/aws-backup/package.json index 3c906382aa9b2..78f771c99a2b7 100644 --- a/packages/@aws-cdk/aws-backup/package.json +++ b/packages/@aws-cdk/aws-backup/package.json @@ -75,12 +75,12 @@ }, "license": "Apache-2.0", "devDependencies": { + "@aws-cdk/assertions": "0.0.0", "@types/jest": "^26.0.24", "cdk-build-tools": "0.0.0", "cdk-integ-tools": "0.0.0", "cfn2ts": "0.0.0", - "pkglint": "0.0.0", - "@aws-cdk/assert-internal": "0.0.0" + "pkglint": "0.0.0" }, "dependencies": { "@aws-cdk/aws-dynamodb": "0.0.0", diff --git a/packages/@aws-cdk/aws-backup/test/plan.test.ts b/packages/@aws-cdk/aws-backup/test/plan.test.ts index 91ac4baef7e02..401e961129838 100644 --- a/packages/@aws-cdk/aws-backup/test/plan.test.ts +++ b/packages/@aws-cdk/aws-backup/test/plan.test.ts @@ -1,4 +1,4 @@ -import '@aws-cdk/assert-internal/jest'; +import { Template } from '@aws-cdk/assertions'; import * as events from '@aws-cdk/aws-events'; import { App, Duration, Stack } from '@aws-cdk/core'; import { BackupPlan, BackupPlanRule, BackupVault } from '../lib'; @@ -32,7 +32,7 @@ test('create a plan and add rules', () => { plan.addRule(BackupPlanRule.monthly5Year(otherVault)); // THEN - expect(stack).toHaveResource('AWS::Backup::BackupPlan', { + Template.fromStack(stack).hasResourceProperties('AWS::Backup::BackupPlan', { BackupPlan: { BackupPlanName: 'Plan', BackupPlanRule: [ @@ -75,7 +75,7 @@ test('daily35DayRetention', () => { BackupPlan.daily35DayRetention(stack, 'D35'); // THEN - expect(stack).toHaveResource('AWS::Backup::BackupPlan', { + Template.fromStack(stack).hasResourceProperties('AWS::Backup::BackupPlan', { BackupPlan: { BackupPlanName: 'D35', BackupPlanRule: [ @@ -102,7 +102,7 @@ test('dailyWeeklyMonthly7YearRetention', () => { BackupPlan.dailyWeeklyMonthly7YearRetention(stack, 'DWM7'); // THEN - expect(stack).toHaveResource('AWS::Backup::BackupPlan', { + Template.fromStack(stack).hasResourceProperties('AWS::Backup::BackupPlan', { BackupPlan: { BackupPlanName: 'DWM7', BackupPlanRule: [ @@ -159,7 +159,7 @@ test('automatically creates a new vault', () => { plan.addRule(BackupPlanRule.daily()); // THEN - expect(stack).toHaveResource('AWS::Backup::BackupPlan', { + Template.fromStack(stack).hasResourceProperties('AWS::Backup::BackupPlan', { BackupPlan: { BackupPlanName: 'Plan', BackupPlanRule: [ diff --git a/packages/@aws-cdk/aws-backup/test/selection.test.ts b/packages/@aws-cdk/aws-backup/test/selection.test.ts index 3c4fd6d63ee56..1ff32b886a908 100644 --- a/packages/@aws-cdk/aws-backup/test/selection.test.ts +++ b/packages/@aws-cdk/aws-backup/test/selection.test.ts @@ -1,4 +1,4 @@ -import '@aws-cdk/assert-internal/jest'; +import { Template } from '@aws-cdk/assertions'; import * as dynamodb from '@aws-cdk/aws-dynamodb'; import * as ec2 from '@aws-cdk/aws-ec2'; import * as efs from '@aws-cdk/aws-efs'; @@ -30,7 +30,7 @@ test('create a selection', () => { }); // THEN - expect(stack).toHaveResource('AWS::Backup::BackupSelection', { + Template.fromStack(stack).hasResourceProperties('AWS::Backup::BackupSelection', { BackupPlanId: { 'Fn::GetAtt': [ 'PlanDAF4E53A', @@ -64,7 +64,7 @@ test('create a selection', () => { }, }); - expect(stack).toHaveResource('AWS::IAM::Role', { + Template.fromStack(stack).hasResourceProperties('AWS::IAM::Role', { ManagedPolicyArns: [ { 'Fn::Join': [ @@ -93,7 +93,7 @@ test('allow restores', () => { }); // THEN - expect(stack).toHaveResource('AWS::IAM::Role', { + Template.fromStack(stack).hasResourceProperties('AWS::IAM::Role', { ManagedPolicyArns: [ { 'Fn::Join': [ @@ -158,7 +158,7 @@ test('fromConstruct', () => { }); // THEN - expect(stack).toHaveResource('AWS::Backup::BackupSelection', { + Template.fromStack(stack).hasResourceProperties('AWS::Backup::BackupSelection', { BackupSelection: { IamRoleArn: { 'Fn::GetAtt': [ @@ -259,7 +259,7 @@ test('fromEc2Instance', () => { }); // THEN - expect(stack).toHaveResource('AWS::Backup::BackupSelection', { + Template.fromStack(stack).hasResourceProperties('AWS::Backup::BackupSelection', { BackupSelection: { IamRoleArn: { 'Fn::GetAtt': [ @@ -316,7 +316,7 @@ test('fromDynamoDbTable', () => { }); // THEN - expect(stack).toHaveResource('AWS::Backup::BackupSelection', { + Template.fromStack(stack).hasResourceProperties('AWS::Backup::BackupSelection', { BackupSelection: { IamRoleArn: { 'Fn::GetAtt': [ diff --git a/packages/@aws-cdk/aws-backup/test/vault.test.ts b/packages/@aws-cdk/aws-backup/test/vault.test.ts index e1a67e0e0ed8b..30b145f55d3ac 100644 --- a/packages/@aws-cdk/aws-backup/test/vault.test.ts +++ b/packages/@aws-cdk/aws-backup/test/vault.test.ts @@ -1,4 +1,4 @@ -import '@aws-cdk/assert-internal/jest'; +import { Template } from '@aws-cdk/assertions'; import * as iam from '@aws-cdk/aws-iam'; import * as kms from '@aws-cdk/aws-kms'; import * as sns from '@aws-cdk/aws-sns'; @@ -15,7 +15,7 @@ test('create a vault', () => { new BackupVault(stack, 'Vault'); // THEN - expect(stack).toHaveResource('AWS::Backup::BackupVault', { + Template.fromStack(stack).hasResourceProperties('AWS::Backup::BackupVault', { BackupVaultName: 'Vault', }); }); @@ -46,7 +46,7 @@ test('with access policy', () => { }); // THEN - expect(stack).toHaveResource('AWS::Backup::BackupVault', { + Template.fromStack(stack).hasResourceProperties('AWS::Backup::BackupVault', { AccessPolicy: { Version: '2012-10-17', Statement: [ @@ -68,6 +68,76 @@ test('with access policy', () => { }); }); +test('with blockRecoveryPointDeletion', () => { + // WHEN + new BackupVault(stack, 'Vault', { + blockRecoveryPointDeletion: true, + }); + + // THEN + Template.fromStack(stack).hasResourceProperties('AWS::Backup::BackupVault', { + AccessPolicy: { + Version: '2012-10-17', + Statement: [ + { + Effect: 'Deny', + Principal: { + AWS: '*', + }, + Action: [ + 'backup:DeleteRecoveryPoint', + 'backup:UpdateRecoveryPointLifecycle', + ], + Resource: '*', + }, + ], + }, + }); +}); + +test('merges statements from accessPolicy and blockRecoveryPointDeletion', () => { + // WHEN + new BackupVault(stack, 'Vault', { + accessPolicy: new iam.PolicyDocument({ + statements: [ + new iam.PolicyStatement({ + effect: iam.Effect.DENY, + principals: [new iam.ArnPrincipal('arn:aws:iam::123456789012:role/MyRole')], + actions: ['backup:StartRestoreJob'], + }), + ], + }), + blockRecoveryPointDeletion: true, + }); + + // THEN + Template.fromStack(stack).hasResourceProperties('AWS::Backup::BackupVault', { + AccessPolicy: { + Version: '2012-10-17', + Statement: [ + { + Action: 'backup:StartRestoreJob', + Effect: 'Deny', + Principal: { + AWS: 'arn:aws:iam::123456789012:role/MyRole', + }, + }, + { + Effect: 'Deny', + Principal: { + AWS: '*', + }, + Action: [ + 'backup:DeleteRecoveryPoint', + 'backup:UpdateRecoveryPointLifecycle', + ], + Resource: '*', + }, + ], + }, + }); +}); + test('with encryption key', () => { // GIVEN const encryptionKey = new kms.Key(stack, 'Key'); @@ -78,7 +148,7 @@ test('with encryption key', () => { }); // THEN - expect(stack).toHaveResource('AWS::Backup::BackupVault', { + Template.fromStack(stack).hasResourceProperties('AWS::Backup::BackupVault', { EncryptionKeyArn: { 'Fn::GetAtt': [ 'Key961B73FD', @@ -102,7 +172,7 @@ test('with notifications', () => { }); // THEN - expect(stack).toHaveResource('AWS::Backup::BackupVault', { + Template.fromStack(stack).hasResourceProperties('AWS::Backup::BackupVault', { Notifications: { BackupVaultEvents: [ 'BACKUP_JOB_COMPLETED', @@ -125,7 +195,7 @@ test('defaults to all notifications', () => { }); // THEN - expect(stack).toHaveResource('AWS::Backup::BackupVault', { + Template.fromStack(stack).hasResourceProperties('AWS::Backup::BackupVault', { Notifications: { BackupVaultEvents: Object.values(BackupVaultEvents), SNSTopicArn: { @@ -175,7 +245,7 @@ test('grant action', () => { vault.grant(role, 'backup:StartBackupJob'); // THEN - expect(stack).toHaveResource('AWS::IAM::Policy', { + Template.fromStack(stack).hasResourceProperties('AWS::IAM::Policy', { PolicyDocument: { Statement: [ { From 9b3d40ecc8af1529589bcb2e3912accfef08f3fa Mon Sep 17 00:00:00 2001 From: kaizen3031593 Date: Wed, 8 Sep 2021 11:26:41 -0400 Subject: [PATCH 15/41] change param name from outputName to logicalId --- .../assertions/lib/private/mappings.ts | 8 +++---- .../assertions/lib/private/outputs.ts | 12 +++++----- .../assertions/lib/private/section.ts | 8 +++---- packages/@aws-cdk/assertions/lib/template.ts | 24 +++++++++---------- 4 files changed, 26 insertions(+), 26 deletions(-) diff --git a/packages/@aws-cdk/assertions/lib/private/mappings.ts b/packages/@aws-cdk/assertions/lib/private/mappings.ts index 0c5b3d459faeb..266e322bb1139 100644 --- a/packages/@aws-cdk/assertions/lib/private/mappings.ts +++ b/packages/@aws-cdk/assertions/lib/private/mappings.ts @@ -1,9 +1,9 @@ import { StackInspector } from '../vendored/assert'; import { filterLogicalId, formatFailure, matchSection } from './section'; -export function findMappings(inspector: StackInspector, outputName: string, props: any = {}): { [key: string]: any }[] { +export function findMappings(inspector: StackInspector, logicalId: string, props: any = {}): { [key: string]: any }[] { const section: { [key: string] : {} } = inspector.value.Mappings; - const result = matchSection(filterLogicalId(section, outputName), props); + const result = matchSection(filterLogicalId(section, logicalId), props); if (!result.match) { return []; @@ -12,9 +12,9 @@ export function findMappings(inspector: StackInspector, outputName: string, prop return result.matches; } -export function hasMapping(inspector: StackInspector, outputName: string, props: any): string | void { +export function hasMapping(inspector: StackInspector, logicalId: string, props: any): string | void { const section: { [key: string]: {} } = inspector.value.Mappings; - const result = matchSection(filterLogicalId(section, outputName), props); + const result = matchSection(filterLogicalId(section, logicalId), props); if (result.match) { return; diff --git a/packages/@aws-cdk/assertions/lib/private/outputs.ts b/packages/@aws-cdk/assertions/lib/private/outputs.ts index 8d97353cf56d1..870e00555b254 100644 --- a/packages/@aws-cdk/assertions/lib/private/outputs.ts +++ b/packages/@aws-cdk/assertions/lib/private/outputs.ts @@ -1,9 +1,9 @@ import { StackInspector } from '../vendored/assert'; import { filterLogicalId, formatFailure, matchSection } from './section'; -export function findOutputs(inspector: StackInspector, outputName: string, props: any = {}): { [key: string]: any }[] { +export function findOutputs(inspector: StackInspector, logicalId: string, props: any = {}): { [key: string]: any }[] { const section: { [key: string] : {} } = inspector.value.Outputs; - const result = matchSection(filterLogicalId(section, outputName), props); + const result = matchSection(filterLogicalId(section, logicalId), props); if (!result.match) { return []; @@ -12,19 +12,19 @@ export function findOutputs(inspector: StackInspector, outputName: string, props return result.matches; } -export function hasOutput(inspector: StackInspector, outputName: string, props: any): string | void { +export function hasOutput(inspector: StackInspector, logicalId: string, props: any): string | void { const section: { [key: string]: {} } = inspector.value.Outputs; - const result = matchSection(filterLogicalId(section, outputName), props); + const result = matchSection(filterLogicalId(section, logicalId), props); if (result.match) { return; } if (result.closestResult === undefined) { - return `No outputs named ${outputName} found in the template.`; + return `No outputs named ${logicalId} found in the template.`; } return [ - `Template has ${result.analyzedCount} outputs named ${outputName}, but none match as expected.`, + `Template has ${result.analyzedCount} outputs named ${logicalId}, but none match as expected.`, formatFailure(result.closestResult), ].join('\n'); } diff --git a/packages/@aws-cdk/assertions/lib/private/section.ts b/packages/@aws-cdk/assertions/lib/private/section.ts index c7e1d93ecc0f7..59ad55241e581 100644 --- a/packages/@aws-cdk/assertions/lib/private/section.ts +++ b/packages/@aws-cdk/assertions/lib/private/section.ts @@ -57,11 +57,11 @@ function leftPad(x: string, indent: number = 2): string { return pad + x.split('\n').join(`\n${pad}`); } -export function filterLogicalId(section: { [key: string]: {} }, outputName: string): { [key: string]: {} } { - // default signal for all outputs is '*' - if (outputName === '*') return section; +export function filterLogicalId(section: { [key: string]: {} }, logicalId: string): { [key: string]: {} } { + // default signal for all logicalIds is '*' + if (logicalId === '*') return section; return Object.entries(section ?? {}) - .filter(([k, _]) => k === outputName) + .filter(([k, _]) => k === logicalId) .reduce((agg, [k, v]) => { return { ...agg, [k]: v }; }, {}); } \ No newline at end of file diff --git a/packages/@aws-cdk/assertions/lib/template.ts b/packages/@aws-cdk/assertions/lib/template.ts index f67d7e64f3018..d642e74962080 100644 --- a/packages/@aws-cdk/assertions/lib/template.ts +++ b/packages/@aws-cdk/assertions/lib/template.ts @@ -109,11 +109,11 @@ export class Template { * Assert that an Output with the given properties exists in the CloudFormation template. * By default, performs partial matching on the resource, via the `Match.objectLike()`. * To configure different behavour, use other matchers in the `Match` class. - * @param outputName the name of the output. Provide '*' to match all Output names in the template. + * @param logicalId the name of the output. Provide `'*'` to match all outputs in the template. * @param props the output as should be expected in the template. */ - public hasOutput(outputName: string, props: any): void { - const matchError = hasOutput(this.inspector, outputName, props); + public hasOutput(logicalId: string, props: any): void { + const matchError = hasOutput(this.inspector, logicalId, props); if (matchError) { throw new Error(matchError); } @@ -121,24 +121,24 @@ export class Template { /** * Get the set of matching Outputs that match the given properties in the CloudFormation template. - * @param outputName the name of the output. Provide '*' to match all Output names in the template. + * @param logicalId the name of the output. Provide `'*'` to match all outputs in the template. * @param props by default, matches all Outputs in the template. * When a literal object is provided, performs a partial match via `Match.objectLike()`. * Use the `Match` APIs to configure a different behaviour. */ - public findOutputs(outputName: string, props: any = {}): { [key: string]: any }[] { - return findOutputs(this.inspector, outputName, props); + public findOutputs(logicalId: string, props: any = {}): { [key: string]: any }[] { + return findOutputs(this.inspector, logicalId, props); } /** * Assert that a Mapping with the given properties exists in the CloudFormation template. * By default, performs partial matching on the resource, via the `Match.objectLike()`. * To configure different behavour, use other matchers in the `Match` class. - * @param outputName the name of the output. Provide '*' to match all Output names in the template. + * @param logicalId the name of the mapping. Provide `'*'` to match all mappings in the template. * @param props the output as should be expected in the template. */ - public hasMapping(outputName: string, props: any): void { - const matchError = hasMapping(this.inspector, outputName, props); + public hasMapping(logicalId: string, props: any): void { + const matchError = hasMapping(this.inspector, logicalId, props); if (matchError) { throw new Error(matchError); } @@ -146,13 +146,13 @@ export class Template { /** * Get the set of matching Mappings that match the given properties in the CloudFormation template. - * @param outputName the name of the output. Provide '*' to match all Output names in the template. + * @param logicalId the name of the mapping. Provide `'*'` to match all mappings in the template. * @param props by default, matches all Mappings in the template. * When a literal object is provided, performs a partial match via `Match.objectLike()`. * Use the `Match` APIs to configure a different behaviour. */ - public findMappings(outputName: string, props: any = {}): { [key: string]: any }[] { - return findMappings(this.inspector, outputName, props); + public findMappings(logicalId: string, props: any = {}): { [key: string]: any }[] { + return findMappings(this.inspector, logicalId, props); } /** From 36d1f102b6c48bbd3882c3c07444d6d7a5ba07d3 Mon Sep 17 00:00:00 2001 From: kaizen3031593 Date: Wed, 8 Sep 2021 11:43:16 -0400 Subject: [PATCH 16/41] update readme --- packages/@aws-cdk/assertions/README.md | 25 ++++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/packages/@aws-cdk/assertions/README.md b/packages/@aws-cdk/assertions/README.md index d651ab72c1bc3..7b84a9ec8ad12 100644 --- a/packages/@aws-cdk/assertions/README.md +++ b/packages/@aws-cdk/assertions/README.md @@ -107,16 +107,27 @@ By default, the `hasResource()` and `hasResourceProperties()` APIs perform deep partial object matching. This behavior can be configured using matchers. See subsequent section on [special matchers](#special-matchers). -## Other Sections +## Output and Mapping Matching -Similar to the `hasResource()` and `findResources()`, we have equivalent methods -to check and find other sections of the CloudFormation resources. +The module allows you to assert that an output or matching has specific properties. The following code asserts that a resource contains an output with a `logicalId` of `Foo` and the specified properties - -* Outputs - `hasOutput()` and `findOutputs()` -* Mapping - `hasMapping()` and `findMappings()` +```ts +assert.hasOutput('Foo', { + Value: 'Bar', + Export: { Name: 'ExportBaz' }, +}); +``` + +Alternatively, if you want to match the value to all outputs, you can use the `'*'` special case as the `logicalId`. + +```ts +assert.hasOutput('*', { + Value: 'Bar', + Export: { Name: 'ExportBaz' }, +}); +``` -All of the defaults and behaviour documented for `hasResource()` and -`findResources()` apply to these methods. +`findOutputs()` will return a list of outputs that match the `logicalId` and `props`, and you can use the `'*'` special case as well. `hasMapping()` and `findMappings()` follow a similar pattern to output matching. ## Special Matchers From a209bd3bb4a4d5e5f64524641205e3d591d37d13 Mon Sep 17 00:00:00 2001 From: kaizen3031593 Date: Wed, 8 Sep 2021 11:44:52 -0400 Subject: [PATCH 17/41] better structure --- packages/@aws-cdk/assertions/README.md | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/packages/@aws-cdk/assertions/README.md b/packages/@aws-cdk/assertions/README.md index 7b84a9ec8ad12..d47f3a4323683 100644 --- a/packages/@aws-cdk/assertions/README.md +++ b/packages/@aws-cdk/assertions/README.md @@ -109,7 +109,9 @@ See subsequent section on [special matchers](#special-matchers). ## Output and Mapping Matching -The module allows you to assert that an output or matching has specific properties. The following code asserts that a resource contains an output with a `logicalId` of `Foo` and the specified properties - +The module allows you to assert that an output or matching has specific properties. +The following code asserts that a resource contains an output with a `logicalId` of +`Foo` and the specified properties - ```ts assert.hasOutput('Foo', { @@ -118,7 +120,8 @@ assert.hasOutput('Foo', { }); ``` -Alternatively, if you want to match the value to all outputs, you can use the `'*'` special case as the `logicalId`. +Alternatively, if you want to match the value to all outputs, you can use the `'*'` +special case as the `logicalId`. ```ts assert.hasOutput('*', { @@ -127,7 +130,9 @@ assert.hasOutput('*', { }); ``` -`findOutputs()` will return a list of outputs that match the `logicalId` and `props`, and you can use the `'*'` special case as well. `hasMapping()` and `findMappings()` follow a similar pattern to output matching. +`findOutputs()` will return a list of outputs that match the `logicalId` and `props`, +and you can use the `'*'` special case as well. `hasMapping()` and `findMappings()` +follow a similar pattern to output matching. ## Special Matchers From 74776f393462f7e7d23cb1953ef786a823adc896 Mon Sep 17 00:00:00 2001 From: kaizen3031593 <36202692+kaizen3031593@users.noreply.github.com> Date: Wed, 8 Sep 2021 11:48:03 -0400 Subject: [PATCH 18/41] fix(cli): 'deploy' and 'diff' silently does nothing when given unknown stack name (#16150) **second attempt as last PR failed integration tests** Currently, `cdk deploy` and `cdk diff` on stacks that do not exist return no output on the command line. This PR introduces a descriptive error message for those cases so it is easier to understand what happened. The behavior of the error is that **if** you specify a stack or stacks, the CLI expects to match at least one known stack. If the CLI cannot find a matching stack, the error is thrown. However, if you specify multiple stacks (i.e. `cdk deploy ThisStackExists ThisStackDoesnt`) and one of those stacks do not match, the CLI will silently ignore the request to deploy `ThisStackDoesnt` without throwing an error. closes #15866 while adding the same idea to `diff` as well as `deploy`. ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- packages/aws-cdk/lib/cdk-toolkit.ts | 17 ++++++++++++++--- packages/aws-cdk/test/cdk-toolkit.test.ts | 8 ++++++++ packages/aws-cdk/test/diff.test.ts | 10 ++++++++++ 3 files changed, 32 insertions(+), 3 deletions(-) diff --git a/packages/aws-cdk/lib/cdk-toolkit.ts b/packages/aws-cdk/lib/cdk-toolkit.ts index 636da75d12975..6bc109dd37822 100644 --- a/packages/aws-cdk/lib/cdk-toolkit.ts +++ b/packages/aws-cdk/lib/cdk-toolkit.ts @@ -404,7 +404,8 @@ export class CdkToolkit { defaultBehavior: DefaultSelection.OnlySingle, }); - await this.validateStacks(stacks); + this.validateStacksSelected(stacks, selector.patterns); + this.validateStacks(stacks); return stacks; } @@ -422,7 +423,8 @@ export class CdkToolkit { ? allStacks.filter(art => art.validateOnSynth ?? false) : new StackCollection(assembly, []); - await this.validateStacks(selectedForDiff.concat(autoValidateStacks)); + this.validateStacksSelected(selectedForDiff.concat(autoValidateStacks), stackNames); + this.validateStacks(selectedForDiff.concat(autoValidateStacks)); return selectedForDiff; } @@ -442,7 +444,7 @@ export class CdkToolkit { /** * Validate the stacks for errors and warnings according to the CLI's current settings */ - private async validateStacks(stacks: StackCollection) { + private validateStacks(stacks: StackCollection) { stacks.processMetadataMessages({ ignoreErrors: this.props.ignoreErrors, strict: this.props.strict, @@ -450,6 +452,15 @@ export class CdkToolkit { }); } + /** + * Validate that if a user specified a stack name there exists at least 1 stack selected + */ + private validateStacksSelected(stacks: StackCollection, stackNames: string[]) { + if (stackNames.length != 0 && stacks.stackCount == 0) { + throw new Error(`No stacks match the name(s) ${stackNames}`); + } + } + /** * Select a single stack by its name */ diff --git a/packages/aws-cdk/test/cdk-toolkit.test.ts b/packages/aws-cdk/test/cdk-toolkit.test.ts index 9c42c21261eaa..19276b15b7b7b 100644 --- a/packages/aws-cdk/test/cdk-toolkit.test.ts +++ b/packages/aws-cdk/test/cdk-toolkit.test.ts @@ -40,6 +40,14 @@ function defaultToolkitSetup() { } describe('deploy', () => { + test('fails when no valid stack names are given', async () => { + // GIVEN + const toolkit = defaultToolkitSetup(); + + // WHEN + await expect(() => toolkit.deploy({ selector: { patterns: ['Test-Stack-D'] } })).rejects.toThrow('No stacks match the name(s) Test-Stack-D'); + }); + describe('with hotswap deployment', () => { test("passes through the 'hotswap' option to CloudFormationDeployments.deployStack()", async () => { // GIVEN diff --git a/packages/aws-cdk/test/diff.test.ts b/packages/aws-cdk/test/diff.test.ts index c1bcd11c78caa..829c24d637ca9 100644 --- a/packages/aws-cdk/test/diff.test.ts +++ b/packages/aws-cdk/test/diff.test.ts @@ -96,6 +96,16 @@ test('exits with 1 with diffs and fail set to true', async () => { expect(exitCode).toBe(1); }); +test('throws an error if no valid stack names given', async () => { + const buffer = new StringWritable(); + + // WHEN + await expect(() => toolkit.diff({ + stackNames: ['X', 'Y', 'Z'], + stream: buffer, + })).rejects.toThrow('No stacks match the name(s) X,Y,Z'); +}); + test('exits with 1 with diff in first stack, but not in second stack and fail set to true', async () => { // GIVEN const buffer = new StringWritable(); From 5c670cdfc0b661ef8d25831fd15641eec090b153 Mon Sep 17 00:00:00 2001 From: kaizen3031593 Date: Wed, 8 Sep 2021 12:47:26 -0400 Subject: [PATCH 19/41] readme CR suggestions --- packages/@aws-cdk/assertions/README.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/packages/@aws-cdk/assertions/README.md b/packages/@aws-cdk/assertions/README.md index d47f3a4323683..706d467eb3e59 100644 --- a/packages/@aws-cdk/assertions/README.md +++ b/packages/@aws-cdk/assertions/README.md @@ -107,11 +107,11 @@ By default, the `hasResource()` and `hasResourceProperties()` APIs perform deep partial object matching. This behavior can be configured using matchers. See subsequent section on [special matchers](#special-matchers). -## Output and Mapping Matching +## Output and Mapping sections -The module allows you to assert that an output or matching has specific properties. -The following code asserts that a resource contains an output with a `logicalId` of -`Foo` and the specified properties - +The module allows you to assert that the CloudFormation template contains an Output +that matches specific properties. The following code asserts that a template contains +an Output with a `logicalId` of `Foo` and the specified properties - ```ts assert.hasOutput('Foo', { @@ -120,8 +120,7 @@ assert.hasOutput('Foo', { }); ``` -Alternatively, if you want to match the value to all outputs, you can use the `'*'` -special case as the `logicalId`. +If you want to match against all Outputs in the template, use `*` as the `logicalId`. ```ts assert.hasOutput('*', { @@ -131,8 +130,9 @@ assert.hasOutput('*', { ``` `findOutputs()` will return a list of outputs that match the `logicalId` and `props`, -and you can use the `'*'` special case as well. `hasMapping()` and `findMappings()` -follow a similar pattern to output matching. +and you can use the `'*'` special case as well. + +The APIs `hasMapping()` and `findMappings()` provide similar functionalities. ## Special Matchers From 96ed6cddc3069fc10286063d68ed9dda5777967b Mon Sep 17 00:00:00 2001 From: AWS CDK Team Date: Wed, 8 Sep 2021 18:37:04 +0000 Subject: [PATCH 20/41] chore(release): 1.122.0 --- CHANGELOG.md | 30 ++++++++++++++++++++++++++++++ version.v1.json | 2 +- 2 files changed, 31 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0c4b63bd89989..9dc6a80553c8e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,36 @@ All notable changes to this project will be documented in this file. See [standard-version](https://github.com/conventional-changelog/standard-version) for commit guidelines. +## [1.122.0](https://github.com/aws/aws-cdk/compare/v1.121.0...v1.122.0) (2021-09-08) + + +### Features + +* **aws-ec2:** Allow ApplyCloudformationInitOptions to set additional params ([#16121](https://github.com/aws/aws-cdk/issues/16121)) ([1d94646](https://github.com/aws/aws-cdk/commit/1d94646bd50cbbbc1ace3732a7b8ebb313ed3ddf)), closes [#16004](https://github.com/aws/aws-cdk/issues/16004) +* **backup:** option to prevent recovery point deletions ([#16282](https://github.com/aws/aws-cdk/issues/16282)) ([6e71806](https://github.com/aws/aws-cdk/commit/6e718067b6c4e1a2c905fedcc60a6863ba3add12)) +* **cli:** hotswap deployments ([#15748](https://github.com/aws/aws-cdk/issues/15748)) ([6e55c95](https://github.com/aws/aws-cdk/commit/6e55c952d683f87bb815deb29124b9a37824749a)) +* **config:** EC2_INSTANCE_PROFILE_ATTACHED managed rule ([#16011](https://github.com/aws/aws-cdk/issues/16011)) ([816a319](https://github.com/aws/aws-cdk/commit/816a31984b5c6e08c4c7dd740919e0c1f5d0e196)) +* **ec2:** rename SubnetTypes to improve clarity with EC2 conventions ([#16348](https://github.com/aws/aws-cdk/issues/16348)) ([2023004](https://github.com/aws/aws-cdk/commit/2023004cc941a0e7a908bf3c90ad9887c6679564)), closes [#15929](https://github.com/aws/aws-cdk/issues/15929) +* **ec2:** vpc endpoint for aws keyspaces ([#16306](https://github.com/aws/aws-cdk/issues/16306)) ([ad425d0](https://github.com/aws/aws-cdk/commit/ad425d004dd9154c367622733e2a2e36a38f1cef)) +* **ecs-service-extensions:** Subscribe Extension ([#16049](https://github.com/aws/aws-cdk/issues/16049)) ([66baca5](https://github.com/aws/aws-cdk/commit/66baca58adc294d5c5924cf8f8c5fa122c6d6dfc)) +* **elasticloadbalancingv2:** ALPN policy support for NLB listener ([#15956](https://github.com/aws/aws-cdk/issues/15956)) ([5427578](https://github.com/aws/aws-cdk/commit/5427578515c0b65d172f95c27f32f1933fcf8d60)) +* **kms:** support fromLookup in KMS key to get key by alias name ([#15652](https://github.com/aws/aws-cdk/issues/15652)) ([34a57ed](https://github.com/aws/aws-cdk/commit/34a57eda01ab816cd77f260b10ca466a749586bf)), closes [#8822](https://github.com/aws/aws-cdk/issues/8822) +* **lambda:** python 3.9 runtime ([#16366](https://github.com/aws/aws-cdk/issues/16366)) ([a534829](https://github.com/aws/aws-cdk/commit/a534829b2458c5ed54d05fd5cca025cba2ddaaa7)) +* **pipelines:** stack-level steps ([#16215](https://github.com/aws/aws-cdk/issues/16215)) ([d499c85](https://github.com/aws/aws-cdk/commit/d499c85e4c09cc00b457ca7f2f4611a925ca8aeb)), closes [#16148](https://github.com/aws/aws-cdk/issues/16148) +* **stepfunctions-tasks:** await the eval so async ops can be passed to tasks.EvaluateExpression ([#16290](https://github.com/aws/aws-cdk/issues/16290)) ([174b066](https://github.com/aws/aws-cdk/commit/174b066634755c76d1b78d05ca9b403145dedc47)) + + +### Bug Fixes + +* **apigatewayv2:** some methods of the `defaultStage` are not available without casting it to `IHttpStage` ([#15607](https://github.com/aws/aws-cdk/issues/15607)) ([27a0113](https://github.com/aws/aws-cdk/commit/27a0113ac68a05360faa22fa8897609f2f90b764)) +* **assets:** run executable command of container assets in cloud assembly root directory ([#16094](https://github.com/aws/aws-cdk/issues/16094)) ([c2852c9](https://github.com/aws/aws-cdk/commit/c2852c9c524a639a312bf296f7f23b0e3b112f6b)), closes [#15721](https://github.com/aws/aws-cdk/issues/15721) +* **autoscaling:** EbsDeviceVolumeType.IO2 is not a valid CloudFormation value ([#16028](https://github.com/aws/aws-cdk/issues/16028)) ([492d33b](https://github.com/aws/aws-cdk/commit/492d33b27bc5b935e3da75f0bddd875bb6f9c15d)), closes [#16027](https://github.com/aws/aws-cdk/issues/16027) +* **cli:** 'deploy' and 'diff' silently does nothing when given unknown stack name ([#16150](https://github.com/aws/aws-cdk/issues/16150)) ([74776f3](https://github.com/aws/aws-cdk/commit/74776f393462f7e7d23cb1953ef786a823adc896)), closes [#15866](https://github.com/aws/aws-cdk/issues/15866) +* **cloudwatch:** cross account alarms does not support math expressions ([#16333](https://github.com/aws/aws-cdk/issues/16333)) ([1ffd897](https://github.com/aws/aws-cdk/commit/1ffd89714f8b1c1389d4e43383cc77d16d00ed9e)), closes [#16331](https://github.com/aws/aws-cdk/issues/16331) +* **core:** allow asset bundling when selinux is enabled ([#15742](https://github.com/aws/aws-cdk/issues/15742)) ([dbfebb4](https://github.com/aws/aws-cdk/commit/dbfebb47a8ae61b2bb0557b6ba79a7b073f9d0df)) +* **iam:** permissions boundary aspect doesn't always recognize roles ([#16154](https://github.com/aws/aws-cdk/issues/16154)) ([c8bfcf6](https://github.com/aws/aws-cdk/commit/c8bfcf650070a0138b148645f997f542431f70cf)) +* **stepfunctions-tasks:** Athena StartQueryExecution includes QueryExecutionContext even when object is empty ([#16141](https://github.com/aws/aws-cdk/issues/16141)) ([6e2a3e0](https://github.com/aws/aws-cdk/commit/6e2a3e0f855221df98f78f6465586d5524f5c7d5)), closes [#16133](https://github.com/aws/aws-cdk/issues/16133) [#16133](https://github.com/aws/aws-cdk/issues/16133) + ## [1.121.0](https://github.com/aws/aws-cdk/compare/v1.120.0...v1.121.0) (2021-09-01) diff --git a/version.v1.json b/version.v1.json index be934684cecce..d2fb6576ac697 100644 --- a/version.v1.json +++ b/version.v1.json @@ -1,3 +1,3 @@ { - "version": "1.121.0" + "version": "1.122.0" } \ No newline at end of file From e00abff8e8b1ef7a7a8b9a4f7e4f00f876fe0412 Mon Sep 17 00:00:00 2001 From: Mitchell Valine Date: Wed, 8 Sep 2021 12:02:32 -0700 Subject: [PATCH 21/41] Update CHANGELOG.md Co-authored-by: Christopher Rybicki --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9dc6a80553c8e..ca9ef129cbefa 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,7 +7,7 @@ All notable changes to this project will be documented in this file. See [standa ### Features -* **aws-ec2:** Allow ApplyCloudformationInitOptions to set additional params ([#16121](https://github.com/aws/aws-cdk/issues/16121)) ([1d94646](https://github.com/aws/aws-cdk/commit/1d94646bd50cbbbc1ace3732a7b8ebb313ed3ddf)), closes [#16004](https://github.com/aws/aws-cdk/issues/16004) +* **ec2:** Allow ApplyCloudformationInitOptions to set additional params ([#16121](https://github.com/aws/aws-cdk/issues/16121)) ([1d94646](https://github.com/aws/aws-cdk/commit/1d94646bd50cbbbc1ace3732a7b8ebb313ed3ddf)), closes [#16004](https://github.com/aws/aws-cdk/issues/16004) * **backup:** option to prevent recovery point deletions ([#16282](https://github.com/aws/aws-cdk/issues/16282)) ([6e71806](https://github.com/aws/aws-cdk/commit/6e718067b6c4e1a2c905fedcc60a6863ba3add12)) * **cli:** hotswap deployments ([#15748](https://github.com/aws/aws-cdk/issues/15748)) ([6e55c95](https://github.com/aws/aws-cdk/commit/6e55c952d683f87bb815deb29124b9a37824749a)) * **config:** EC2_INSTANCE_PROFILE_ATTACHED managed rule ([#16011](https://github.com/aws/aws-cdk/issues/16011)) ([816a319](https://github.com/aws/aws-cdk/commit/816a31984b5c6e08c4c7dd740919e0c1f5d0e196)) From fc74110ff7eae544d9cfc11b2f6779169f17d145 Mon Sep 17 00:00:00 2001 From: Ahmed Kamel Date: Wed, 8 Sep 2021 23:55:24 +0100 Subject: [PATCH 22/41] feat(glue): Job construct (#12506) Closes #12443 ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- packages/@aws-cdk/aws-glue/README.md | 82 +- packages/@aws-cdk/aws-glue/lib/code.ts | 112 +++ packages/@aws-cdk/aws-glue/lib/index.ts | 3 + .../@aws-cdk/aws-glue/lib/job-executable.ts | 393 ++++++++ packages/@aws-cdk/aws-glue/lib/job.ts | 803 +++++++++++++++++ packages/@aws-cdk/aws-glue/package.json | 13 +- packages/@aws-cdk/aws-glue/test/code.test.ts | 304 +++++++ .../aws-glue/test/integ.job.expected.json | 571 ++++++++++++ packages/@aws-cdk/aws-glue/test/integ.job.ts | 89 ++ .../aws-glue/test/job-executable.test.ts | 106 +++ .../aws-glue/test/job-script/hello_world.py | 1 + .../aws-glue/test/job-script/hello_world_2.py | 1 + packages/@aws-cdk/aws-glue/test/job.test.ts | 842 ++++++++++++++++++ 13 files changed, 3309 insertions(+), 11 deletions(-) create mode 100644 packages/@aws-cdk/aws-glue/lib/code.ts create mode 100644 packages/@aws-cdk/aws-glue/lib/job-executable.ts create mode 100644 packages/@aws-cdk/aws-glue/lib/job.ts create mode 100644 packages/@aws-cdk/aws-glue/test/code.test.ts create mode 100644 packages/@aws-cdk/aws-glue/test/integ.job.expected.json create mode 100644 packages/@aws-cdk/aws-glue/test/integ.job.ts create mode 100644 packages/@aws-cdk/aws-glue/test/job-executable.test.ts create mode 100644 packages/@aws-cdk/aws-glue/test/job-script/hello_world.py create mode 100644 packages/@aws-cdk/aws-glue/test/job-script/hello_world_2.py create mode 100644 packages/@aws-cdk/aws-glue/test/job.test.ts diff --git a/packages/@aws-cdk/aws-glue/README.md b/packages/@aws-cdk/aws-glue/README.md index 20e08d7c14e31..f5e200f0465e7 100644 --- a/packages/@aws-cdk/aws-glue/README.md +++ b/packages/@aws-cdk/aws-glue/README.md @@ -23,6 +23,69 @@ This module is part of the [AWS Cloud Development Kit](https://github.com/aws/aws-cdk) project. +## Job + +A `Job` encapsulates a script that connects to data sources, processes them, and then writes output to a data target. + +There are 3 types of jobs supported by AWS Glue: Spark ETL, Spark Streaming, and Python Shell jobs. + +The `glue.JobExecutable` allows you to specify the type of job, the language to use and the code assets required by the job. + +`glue.Code` allows you to refer to the different code assets required by the job, either from an existing S3 location or from a local file path. + +### Spark Jobs + +These jobs run in an Apache Spark environment managed by AWS Glue. + +#### ETL Jobs + +An ETL job processes data in batches using Apache Spark. + +```ts +new glue.Job(stack, 'ScalaSparkEtlJob', { + executable: glue.JobExecutable.scalaEtl({ + glueVersion: glue.GlueVersion.V2_0, + script: glue.Code.fromBucket(bucket, 'src/com/example/HelloWorld.scala'), + className: 'com.example.HelloWorld', + extraJars: [glue.Code.fromBucket(bucket, 'jars/HelloWorld.jar')], + }), + description: 'an example Scala ETL job', +}); +``` + +#### Streaming Jobs + +A Streaming job is similar to an ETL job, except that it performs ETL on data streams. It uses the Apache Spark Structured Streaming framework. Some Spark job features are not available to streaming ETL jobs. + +```ts +new glue.Job(stack, 'PythonSparkStreamingJob', { + executable: glue.JobExecutable.pythonStreaming({ + glueVersion: glue.GlueVersion.V2_0, + pythonVersion: glue.PythonVersion.THREE, + script: glue.Code.fromAsset(path.join(__dirname, 'job-script/hello_world.py')), + }), + description: 'an example Python Streaming job', +}); +``` + +### Python Shell Jobs + +A Python shell job runs Python scripts as a shell and supports a Python version that depends on the AWS Glue version you are using. +This can be used to schedule and run tasks that don't require an Apache Spark environment. + +```ts +new glue.Job(stack, 'PythonShellJob', { + executable: glue.JobExecutable.pythonShell({ + glueVersion: glue.GlueVersion.V1_0, + pythonVersion: PythonVersion.THREE, + script: glue.Code.fromBucket(bucket, 'script.py'), + }), + description: 'an example Python Shell job', +}); +``` + +See [documentation](https://docs.aws.amazon.com/glue/latest/dg/add-job.html) for more information on adding jobs in Glue. + ## Connection A `Connection` allows Glue jobs, crawlers and development endpoints to access certain types of data stores. For example, to create a network connection to connect to a data source within a VPC: @@ -41,16 +104,6 @@ If you need to use a connection type that doesn't exist as a static member on `C See [Adding a Connection to Your Data Store](https://docs.aws.amazon.com/glue/latest/dg/populate-add-connection.html) and [Connection Structure](https://docs.aws.amazon.com/glue/latest/dg/aws-glue-api-catalog-connections.html#aws-glue-api-catalog-connections-Connection) documentation for more information on the supported data stores and their configurations. -## Database - -A `Database` is a logical grouping of `Tables` in the Glue Catalog. - -```ts -new glue.Database(stack, 'MyDatabase', { - databaseName: 'my_database' -}); -``` - ## SecurityConfiguration A `SecurityConfiguration` is a set of security properties that can be used by AWS Glue to encrypt data at rest. @@ -84,6 +137,15 @@ new glue.SecurityConfiguration(stack, 'MySecurityConfiguration', { See [documentation](https://docs.aws.amazon.com/glue/latest/dg/encryption-security-configuration.html) for more info for Glue encrypting data written by Crawlers, Jobs, and Development Endpoints. +## Database + +A `Database` is a logical grouping of `Tables` in the Glue Catalog. + +```ts +new glue.Database(stack, 'MyDatabase', { + databaseName: 'my_database' +}); +``` ## Table diff --git a/packages/@aws-cdk/aws-glue/lib/code.ts b/packages/@aws-cdk/aws-glue/lib/code.ts new file mode 100644 index 0000000000000..9f2f03d9884be --- /dev/null +++ b/packages/@aws-cdk/aws-glue/lib/code.ts @@ -0,0 +1,112 @@ +import * as crypto from 'crypto'; +import * as fs from 'fs'; +import * as iam from '@aws-cdk/aws-iam'; +import * as s3 from '@aws-cdk/aws-s3'; +import * as s3assets from '@aws-cdk/aws-s3-assets'; +import * as cdk from '@aws-cdk/core'; +import * as constructs from 'constructs'; + +/** + * Represents a Glue Job's Code assets (an asset can be a scripts, a jar, a python file or any other file). + */ +export abstract class Code { + + /** + * Job code as an S3 object. + * @param bucket The S3 bucket + * @param key The object key + */ + public static fromBucket(bucket: s3.IBucket, key: string): S3Code { + return new S3Code(bucket, key); + } + + /** + * Job code from a local disk path. + * + * @param path code file (not a directory). + */ + public static fromAsset(path: string, options?: s3assets.AssetOptions): AssetCode { + return new AssetCode(path, options); + } + + /** + * Called when the Job is initialized to allow this object to bind. + */ + public abstract bind(scope: constructs.Construct, grantable: iam.IGrantable): CodeConfig; +} + +/** + * Glue job Code from an S3 bucket. + */ +export class S3Code extends Code { + constructor(private readonly bucket: s3.IBucket, private readonly key: string) { + super(); + } + + public bind(_scope: constructs.Construct, grantable: iam.IGrantable): CodeConfig { + this.bucket.grantRead(grantable, this.key); + return { + s3Location: { + bucketName: this.bucket.bucketName, + objectKey: this.key, + }, + }; + } +} + +/** + * Job Code from a local file. + */ +export class AssetCode extends Code { + private asset?: s3assets.Asset; + + /** + * @param path The path to the Code file. + */ + constructor(private readonly path: string, private readonly options: s3assets.AssetOptions = { }) { + super(); + + if (fs.lstatSync(this.path).isDirectory()) { + throw new Error(`Code path ${this.path} is a directory. Only files are supported`); + } + } + + public bind(scope: constructs.Construct, grantable: iam.IGrantable): CodeConfig { + // If the same AssetCode is used multiple times, retain only the first instantiation. + if (!this.asset) { + this.asset = new s3assets.Asset(scope, `Code${this.hashcode(this.path)}`, { + path: this.path, + ...this.options, + }); + } else if (cdk.Stack.of(this.asset) !== cdk.Stack.of(scope)) { + throw new Error(`Asset is already associated with another stack '${cdk.Stack.of(this.asset).stackName}'. ` + + 'Create a new Code instance for every stack.'); + } + this.asset.grantRead(grantable); + return { + s3Location: { + bucketName: this.asset.s3BucketName, + objectKey: this.asset.s3ObjectKey, + }, + }; + } + + /** + * Hash a string + */ + private hashcode(s: string): string { + const hash = crypto.createHash('md5'); + hash.update(s); + return hash.digest('hex'); + }; +} + +/** + * Result of binding `Code` into a `Job`. + */ +export interface CodeConfig { + /** + * The location of the code in S3. + */ + readonly s3Location: s3.Location; +} \ No newline at end of file diff --git a/packages/@aws-cdk/aws-glue/lib/index.ts b/packages/@aws-cdk/aws-glue/lib/index.ts index a3dfa85b3be71..d1da5e9385349 100644 --- a/packages/@aws-cdk/aws-glue/lib/index.ts +++ b/packages/@aws-cdk/aws-glue/lib/index.ts @@ -4,6 +4,9 @@ export * from './glue.generated'; export * from './connection'; export * from './data-format'; export * from './database'; +export * from './job'; +export * from './job-executable'; +export * from './code'; export * from './schema'; export * from './security-configuration'; export * from './table'; \ No newline at end of file diff --git a/packages/@aws-cdk/aws-glue/lib/job-executable.ts b/packages/@aws-cdk/aws-glue/lib/job-executable.ts new file mode 100644 index 0000000000000..8fd7c39da5508 --- /dev/null +++ b/packages/@aws-cdk/aws-glue/lib/job-executable.ts @@ -0,0 +1,393 @@ +import { Code } from './code'; + +/** + * AWS Glue version determines the versions of Apache Spark and Python that are available to the job. + * + * @see https://docs.aws.amazon.com/glue/latest/dg/add-job.html. + * + * If you need to use a GlueVersion that doesn't exist as a static member, you + * can instantiate a `GlueVersion` object, e.g: `GlueVersion.of('1.5')`. + */ +export class GlueVersion { + /** + * Glue version using Spark 2.2.1 and Python 2.7 + */ + public static readonly V0_9 = new GlueVersion('0.9'); + + /** + * Glue version using Spark 2.4.3, Python 2.7 and Python 3.6 + */ + public static readonly V1_0 = new GlueVersion('1.0'); + + /** + * Glue version using Spark 2.4.3 and Python 3.7 + */ + public static readonly V2_0 = new GlueVersion('2.0'); + + /** + * Glue version using Spark 3.1.1 and Python 3.7 + */ + public static readonly V3_0 = new GlueVersion('3.0'); + + /** + * Custom Glue version + * @param version custom version + */ + public static of(version: string): GlueVersion { + return new GlueVersion(version); + } + + /** + * The name of this GlueVersion, as expected by Job resource. + */ + public readonly name: string; + + private constructor(name: string) { + this.name = name; + } +} + +/** + * Runtime language of the Glue job + */ +export enum JobLanguage { + /** + * Scala + */ + SCALA = 'scala', + + /** + * Python + */ + PYTHON = 'python', +} + +/** + * Python version + */ +export enum PythonVersion { + /** + * Python 2 (the exact version depends on GlueVersion and JobCommand used) + */ + TWO = '2', + + /** + * Python 3 (the exact version depends on GlueVersion and JobCommand used) + */ + THREE = '3', +} + +/** + * The job type. + * + * If you need to use a JobType that doesn't exist as a static member, you + * can instantiate a `JobType` object, e.g: `JobType.of('other name')`. + */ +export class JobType { + /** + * Command for running a Glue ETL job. + */ + public static readonly ETL = new JobType('glueetl'); + + /** + * Command for running a Glue streaming job. + */ + public static readonly STREAMING = new JobType('gluestreaming'); + + /** + * Command for running a Glue python shell job. + */ + public static readonly PYTHON_SHELL = new JobType('pythonshell'); + + /** + * Custom type name + * @param name type name + */ + public static of(name: string): JobType { + return new JobType(name); + } + + /** + * The name of this JobType, as expected by Job resource. + */ + public readonly name: string; + + private constructor(name: string) { + this.name = name; + } +} + +interface PythonExecutableProps { + /** + * The Python version to use. + */ + readonly pythonVersion: PythonVersion; + + /** + * Additional Python files that AWS Glue adds to the Python path before executing your script. + * Only individual files are supported, directories are not supported. + * + * @default - no extra python files and argument is not set + * + * @see `--extra-py-files` in https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html + */ + readonly extraPythonFiles?: Code[]; +} + +interface SharedJobExecutableProps { + /** + * Glue version. + * + * @see https://docs.aws.amazon.com/glue/latest/dg/release-notes.html + */ + readonly glueVersion: GlueVersion; + + /** + * The script that executes a job. + */ + readonly script: Code; + + /** + * Additional files, such as configuration files that AWS Glue copies to the working directory of your script before executing it. + * Only individual files are supported, directories are not supported. + * + * @default [] - no extra files are copied to the working directory + * + * @see `--extra-files` in https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html + */ + readonly extraFiles?: Code[]; +} + +interface SharedSparkJobExecutableProps extends SharedJobExecutableProps { + /** + * Additional Java .jar files that AWS Glue adds to the Java classpath before executing your script. + * Only individual files are supported, directories are not supported. + * + * @default [] - no extra jars are added to the classpath + * + * @see `--extra-jars` in https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html + */ + readonly extraJars?: Code[]; + + /** + * Setting this value to true prioritizes the customer's extra JAR files in the classpath. + * + * @default false - priority is not given to user-provided jars + * + * @see `--user-jars-first` in https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html + */ + readonly extraJarsFirst?: boolean; +} + +/** + * Props for creating a Scala Spark (ETL or Streaming) job executable + */ +export interface ScalaJobExecutableProps extends SharedSparkJobExecutableProps { + /** + * The fully qualified Scala class name that serves as the entry point for the job. + * + * @see `--class` in https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html + */ + readonly className: string; +} + +/** + * Props for creating a Python Spark (ETL or Streaming) job executable + */ +export interface PythonSparkJobExecutableProps extends SharedSparkJobExecutableProps, PythonExecutableProps {} + +/** + * Props for creating a Python shell job executable + */ +export interface PythonShellExecutableProps extends SharedJobExecutableProps, PythonExecutableProps {} + +/** + * The executable properties related to the Glue job's GlueVersion, JobType and code + */ +export class JobExecutable { + + /** + * Create Scala executable props for Apache Spark ETL job. + * + * @param props Scala Apache Spark Job props + */ + public static scalaEtl(props: ScalaJobExecutableProps): JobExecutable { + return new JobExecutable({ + ...props, + type: JobType.ETL, + language: JobLanguage.SCALA, + }); + } + + /** + * Create Scala executable props for Apache Spark Streaming job. + * + * @param props Scala Apache Spark Job props + */ + public static scalaStreaming(props: ScalaJobExecutableProps): JobExecutable { + return new JobExecutable({ + ...props, + type: JobType.STREAMING, + language: JobLanguage.SCALA, + }); + } + + /** + * Create Python executable props for Apache Spark ETL job. + * + * @param props Python Apache Spark Job props + */ + public static pythonEtl(props: PythonSparkJobExecutableProps): JobExecutable { + return new JobExecutable({ + ...props, + type: JobType.ETL, + language: JobLanguage.PYTHON, + }); + } + + /** + * Create Python executable props for Apache Spark Streaming job. + * + * @param props Python Apache Spark Job props + */ + public static pythonStreaming(props: PythonSparkJobExecutableProps): JobExecutable { + return new JobExecutable({ + ...props, + type: JobType.STREAMING, + language: JobLanguage.PYTHON, + }); + } + + /** + * Create Python executable props for python shell jobs. + * + * @param props Python Shell Job props. + */ + public static pythonShell(props: PythonShellExecutableProps): JobExecutable { + return new JobExecutable({ + ...props, + type: JobType.PYTHON_SHELL, + language: JobLanguage.PYTHON, + }); + } + + /** + * Create a custom JobExecutable. + * + * @param config custom job executable configuration. + */ + public static of(config: JobExecutableConfig): JobExecutable { + return new JobExecutable(config); + } + + private config: JobExecutableConfig; + + private constructor(config: JobExecutableConfig) { + if (JobType.PYTHON_SHELL === config.type) { + if (config.language !== JobLanguage.PYTHON) { + throw new Error('Python shell requires the language to be set to Python'); + } + if ([GlueVersion.V0_9, GlueVersion.V2_0, GlueVersion.V3_0].includes(config.glueVersion)) { + throw new Error(`Specified GlueVersion ${config.glueVersion.name} does not support Python Shell`); + } + } + if (config.extraJarsFirst && [GlueVersion.V0_9, GlueVersion.V1_0].includes(config.glueVersion)) { + throw new Error(`Specified GlueVersion ${config.glueVersion.name} does not support extraJarsFirst`); + } + if (config.pythonVersion === PythonVersion.TWO && ![GlueVersion.V0_9, GlueVersion.V1_0].includes(config.glueVersion)) { + throw new Error(`Specified GlueVersion ${config.glueVersion.name} does not support PythonVersion ${config.pythonVersion}`); + } + if (JobLanguage.PYTHON !== config.language && config.extraPythonFiles) { + throw new Error('extraPythonFiles is not supported for languages other than JobLanguage.PYTHON'); + } + this.config = config; + } + + /** + * Called during Job initialization to get JobExecutableConfig. + */ + public bind(): JobExecutableConfig { + return this.config; + } +} + +/** + * Result of binding a `JobExecutable` into a `Job`. + */ +export interface JobExecutableConfig { + /** + * Glue version. + * + * @see https://docs.aws.amazon.com/glue/latest/dg/release-notes.html + */ + readonly glueVersion: GlueVersion; + + /** + * The language of the job (Scala or Python). + * + * @see `--job-language` in https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html + */ + readonly language: JobLanguage; + + /** + * Specify the type of the job whether it's an Apache Spark ETL or streaming one or if it's a Python shell job. + */ + readonly type: JobType; + + /** + * The Python version to use. + * + * @default - no python version specified + */ + readonly pythonVersion?: PythonVersion; + + /** + * The script that is executed by a job. + */ + readonly script: Code; + + /** + * The Scala class that serves as the entry point for the job. This applies only if your the job langauage is Scala. + * + * @default - no scala className specified + * + * @see `--class` in https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html + */ + readonly className?: string; + + /** + * Additional Java .jar files that AWS Glue adds to the Java classpath before executing your script. + * + * @default - no extra jars specified. + * + * @see `--extra-jars` in https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html + */ + readonly extraJars?: Code[]; + + /** + * Additional Python files that AWS Glue adds to the Python path before executing your script. + * + * @default - no extra python files specified. + * + * @see `--extra-py-files` in https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html + */ + readonly extraPythonFiles?: Code[]; + + /** + * Additional files, such as configuration files that AWS Glue copies to the working directory of your script before executing it. + * + * @default - no extra files specified. + * + * @see `--extra-files` in https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html + */ + readonly extraFiles?: Code[]; + + /** + * Setting this value to true prioritizes the customer's extra JAR files in the classpath. + * + * @default - extra jars are not prioritized. + * + * @see `--user-jars-first` in https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html + */ + readonly extraJarsFirst?: boolean; +} diff --git a/packages/@aws-cdk/aws-glue/lib/job.ts b/packages/@aws-cdk/aws-glue/lib/job.ts new file mode 100644 index 0000000000000..0233783f94869 --- /dev/null +++ b/packages/@aws-cdk/aws-glue/lib/job.ts @@ -0,0 +1,803 @@ +import * as cloudwatch from '@aws-cdk/aws-cloudwatch'; +import * as events from '@aws-cdk/aws-events'; +import * as iam from '@aws-cdk/aws-iam'; +import * as logs from '@aws-cdk/aws-logs'; +import * as s3 from '@aws-cdk/aws-s3'; +import * as cdk from '@aws-cdk/core'; +import * as constructs from 'constructs'; +import { Code, JobExecutable, JobExecutableConfig, JobType } from '.'; +import { IConnection } from './connection'; +import { CfnJob } from './glue.generated'; +import { ISecurityConfiguration } from './security-configuration'; + +/** + * The type of predefined worker that is allocated when a job runs. + * + * If you need to use a WorkerType that doesn't exist as a static member, you + * can instantiate a `WorkerType` object, e.g: `WorkerType.of('other type')`. + */ +export class WorkerType { + /** + * Each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker. + */ + public static readonly STANDARD = new WorkerType('Standard'); + + /** + * Each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. Suitable for memory-intensive jobs. + */ + public static readonly G_1X = new WorkerType('G.1X'); + + /** + * Each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. Suitable for memory-intensive jobs. + */ + public static readonly G_2X = new WorkerType('G.2X'); + + /** + * Custom worker type + * @param workerType custom worker type + */ + public static of(workerType: string): WorkerType { + return new WorkerType(workerType); + } + + /** + * The name of this WorkerType, as expected by Job resource. + */ + public readonly name: string; + + private constructor(name: string) { + this.name = name; + } +} + +/** + * Job states emitted by Glue to CloudWatch Events. + * + * @see https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/EventTypes.html#glue-event-types for more information. + */ +export enum JobState { + /** + * State indicating job run succeeded + */ + SUCCEEDED = 'SUCCEEDED', + + /** + * State indicating job run failed + */ + FAILED = 'FAILED', + + /** + * State indicating job run timed out + */ + TIMEOUT = 'TIMEOUT', + + /** + * State indicating job is starting + */ + STARTING = 'STARTING', + + /** + * State indicating job is running + */ + RUNNING = 'RUNNING', + + /** + * State indicating job is stopping + */ + STOPPING = 'STOPPING', + + /** + * State indicating job stopped + */ + STOPPED = 'STOPPED', +} + +/** + * The Glue CloudWatch metric type. + * + * @see https://docs.aws.amazon.com/glue/latest/dg/monitoring-awsglue-with-cloudwatch-metrics.html + */ +export enum MetricType { + /** + * A value at a point in time. + */ + GAUGE = 'gauge', + + /** + * An aggregate number. + */ + COUNT = 'count', +} + +/** + * Interface representing a created or an imported {@link Job}. + */ +export interface IJob extends cdk.IResource, iam.IGrantable { + /** + * The name of the job. + * @attribute + */ + readonly jobName: string; + + /** + * The ARN of the job. + * @attribute + */ + readonly jobArn: string; + + /** + * Defines a CloudWatch event rule triggered when something happens with this job. + * + * @see https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/EventTypes.html#glue-event-types + */ + onEvent(id: string, options?: events.OnEventOptions): events.Rule; + + /** + * Defines a CloudWatch event rule triggered when this job moves to the input jobState. + * + * @see https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/EventTypes.html#glue-event-types + */ + onStateChange(id: string, jobState: JobState, options?: events.OnEventOptions): events.Rule; + + /** + * Defines a CloudWatch event rule triggered when this job moves to the SUCCEEDED state. + * + * @see https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/EventTypes.html#glue-event-types + */ + onSuccess(id: string, options?: events.OnEventOptions): events.Rule; + + /** + * Defines a CloudWatch event rule triggered when this job moves to the FAILED state. + * + * @see https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/EventTypes.html#glue-event-types + */ + onFailure(id: string, options?: events.OnEventOptions): events.Rule; + + /** + * Defines a CloudWatch event rule triggered when this job moves to the TIMEOUT state. + * + * @see https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/EventTypes.html#glue-event-types + */ + onTimeout(id: string, options?: events.OnEventOptions): events.Rule; + + /** + * Create a CloudWatch metric. + * + * @param metricName name of the metric typically prefixed with `glue.driver.`, `glue..` or `glue.ALL.`. + * @param type the metric type. + * @param props metric options. + * + * @see https://docs.aws.amazon.com/glue/latest/dg/monitoring-awsglue-with-cloudwatch-metrics.html + */ + metric(metricName: string, type: MetricType, props?: cloudwatch.MetricOptions): cloudwatch.Metric; + + /** + * Create a CloudWatch Metric indicating job success. + */ + metricSuccess(props?: cloudwatch.MetricOptions): cloudwatch.Metric; + + /** + * Create a CloudWatch Metric indicating job failure. + */ + metricFailure(props?: cloudwatch.MetricOptions): cloudwatch.Metric; + + /** + * Create a CloudWatch Metric indicating job timeout. + */ + metricTimeout(props?: cloudwatch.MetricOptions): cloudwatch.Metric; +} + +abstract class JobBase extends cdk.Resource implements IJob { + + public abstract readonly jobArn: string; + public abstract readonly jobName: string; + public abstract readonly grantPrincipal: iam.IPrincipal; + + /** + * Create a CloudWatch Event Rule for this Glue Job when it's in a given state + * + * @param id construct id + * @param options event options. Note that some values are overridden if provided, these are + * - eventPattern.source = ['aws.glue'] + * - eventPattern.detailType = ['Glue Job State Change', 'Glue Job Run Status'] + * - eventPattern.detail.jobName = [this.jobName] + * + * @see https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/EventTypes.html#glue-event-types + */ + public onEvent(id: string, options: events.OnEventOptions = {}): events.Rule { + const rule = new events.Rule(this, id, options); + rule.addTarget(options.target); + rule.addEventPattern({ + source: ['aws.glue'], + detailType: ['Glue Job State Change', 'Glue Job Run Status'], + detail: { + jobName: [this.jobName], + }, + }); + return rule; + } + + /** + * Create a CloudWatch Event Rule for the transition into the input jobState. + * + * @param id construct id. + * @param jobState the job state. + * @param options optional event options. + */ + public onStateChange(id: string, jobState: JobState, options: events.OnEventOptions = {}): events.Rule { + const rule = this.onEvent(id, { + description: `Rule triggered when Glue job ${this.jobName} is in ${jobState} state`, + ...options, + }); + rule.addEventPattern({ + detail: { + state: [jobState], + }, + }); + return rule; + } + + /** + * Create a CloudWatch Event Rule matching JobState.SUCCEEDED. + * + * @param id construct id. + * @param options optional event options. default is {}. + */ + public onSuccess(id: string, options: events.OnEventOptions = {}): events.Rule { + return this.onStateChange(id, JobState.SUCCEEDED, options); + } + + /** + * Return a CloudWatch Event Rule matching FAILED state. + * + * @param id construct id. + * @param options optional event options. default is {}. + */ + public onFailure(id: string, options: events.OnEventOptions = {}): events.Rule { + return this.onStateChange(id, JobState.FAILED, options); + } + + /** + * Return a CloudWatch Event Rule matching TIMEOUT state. + * + * @param id construct id. + * @param options optional event options. default is {}. + */ + public onTimeout(id: string, options: events.OnEventOptions = {}): events.Rule { + return this.onStateChange(id, JobState.TIMEOUT, options); + } + + /** + * Create a CloudWatch metric. + * + * @param metricName name of the metric typically prefixed with `glue.driver.`, `glue..` or `glue.ALL.`. + * @param type the metric type. + * @param props metric options. + * + * @see https://docs.aws.amazon.com/glue/latest/dg/monitoring-awsglue-with-cloudwatch-metrics.html + */ + public metric(metricName: string, type: MetricType, props?: cloudwatch.MetricOptions): cloudwatch.Metric { + return new cloudwatch.Metric({ + metricName, + namespace: 'Glue', + dimensions: { + JobName: this.jobName, + JobRunId: 'ALL', + Type: type, + }, + ...props, + }).attachTo(this); + } + + /** + * Return a CloudWatch Metric indicating job success. + * + * This metric is based on the Rule returned by no-args onSuccess() call. + */ + public metricSuccess(props?: cloudwatch.MetricOptions): cloudwatch.Metric { + return metricRule(this.metricJobStateRule('SuccessMetricRule', JobState.SUCCEEDED), props); + } + + /** + * Return a CloudWatch Metric indicating job failure. + * + * This metric is based on the Rule returned by no-args onFailure() call. + */ + public metricFailure(props?: cloudwatch.MetricOptions): cloudwatch.Metric { + return metricRule(this.metricJobStateRule('FailureMetricRule', JobState.FAILED), props); + } + + /** + * Return a CloudWatch Metric indicating job timeout. + * + * This metric is based on the Rule returned by no-args onTimeout() call. + */ + public metricTimeout(props?: cloudwatch.MetricOptions): cloudwatch.Metric { + return metricRule(this.metricJobStateRule('TimeoutMetricRule', JobState.TIMEOUT), props); + } + + /** + * Creates or retrieves a singleton event rule for the input job state for use with the metric JobState methods. + * + * @param id construct id. + * @param jobState the job state. + * @private + */ + private metricJobStateRule(id: string, jobState: JobState): events.Rule { + return this.node.tryFindChild(id) as events.Rule ?? this.onStateChange(id, jobState); + } +} + +/** + * Properties for enabling Spark UI monitoring feature for Spark-based Glue jobs. + * + * @see https://docs.aws.amazon.com/glue/latest/dg/monitor-spark-ui-jobs.html + * @see https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html + */ +export interface SparkUIProps { + /** + * Enable Spark UI. + */ + readonly enabled: boolean + + /** + * The bucket where the Glue job stores the logs. + * + * @default a new bucket will be created. + */ + readonly bucket?: s3.IBucket; + + /** + * The path inside the bucket (objects prefix) where the Glue job stores the logs. + * + * @default '/' - the logs will be written at the root of the bucket + */ + readonly prefix?: string; +} + +/** + * The Spark UI logging location. + * + * @see https://docs.aws.amazon.com/glue/latest/dg/monitor-spark-ui-jobs.html + * @see https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html + */ +export interface SparkUILoggingLocation { + /** + * The bucket where the Glue job stores the logs. + */ + readonly bucket: s3.IBucket; + + /** + * The path inside the bucket (objects prefix) where the Glue job stores the logs. + * + * @default '/' - the logs will be written at the root of the bucket + */ + readonly prefix?: string; +} + +/** + * Properties for enabling Continuous Logging for Glue Jobs. + * + * @see https://docs.aws.amazon.com/glue/latest/dg/monitor-continuous-logging-enable.html + * @see https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html + */ +export interface ContinuousLoggingProps { + /** + * Enable continouous logging. + */ + readonly enabled: boolean; + + /** + * Specify a custom CloudWatch log group name. + * + * @default - a log group is created with name `/aws-glue/jobs/logs-v2/`. + */ + readonly logGroup?: logs.ILogGroup; + + /** + * Specify a custom CloudWatch log stream prefix. + * + * @default - the job run ID. + */ + readonly logStreamPrefix?: string; + + /** + * Filter out non-useful Apache Spark driver/executor and Apache Hadoop YARN heartbeat log messages. + * + * @default true + */ + readonly quiet?: boolean; + + /** + * Apply the provided conversion pattern. + * + * This is a Log4j Conversion Pattern to customize driver and executor logs. + * + * @default `%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n` + */ + readonly conversionPattern?: string; +} + +/** + * Attributes for importing {@link Job}. + */ +export interface JobAttributes { + /** + * The name of the job. + */ + readonly jobName: string; + + /** + * The IAM role assumed by Glue to run this job. + * + * @default - undefined + */ + readonly role?: iam.IRole; +} + +/** + * Construction properties for {@link Job}. + */ +export interface JobProps { + /** + * The job's executable properties. + */ + readonly executable: JobExecutable; + + /** + * The name of the job. + * + * @default - a name is automatically generated + */ + readonly jobName?: string; + + /** + * The description of the job. + * + * @default - no value + */ + readonly description?: string; + + /** + * The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. + * Cannot be used for Glue version 2.0 and later - workerType and workerCount should be used instead. + * + * @default - 10 when job type is Apache Spark ETL or streaming, 0.0625 when job type is Python shell + */ + readonly maxCapacity?: number; + + /** + * The maximum number of times to retry this job after a job run fails. + * + * @default 0 + */ + readonly maxRetries?: number; + + /** + * The maximum number of concurrent runs allowed for the job. + * + * An error is returned when this threshold is reached. The maximum value you can specify is controlled by a service limit. + * + * @default 1 + */ + readonly maxConcurrentRuns?: number; + + /** + * The number of minutes to wait after a job run starts, before sending a job run delay notification. + * + * @default - no delay notifications + */ + readonly notifyDelayAfter?: cdk.Duration; + + /** + * The maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. + * + * @default cdk.Duration.hours(48) + */ + readonly timeout?: cdk.Duration; + + /** + * The type of predefined worker that is allocated when a job runs. + * + * @default - differs based on specific Glue version + */ + readonly workerType?: WorkerType; + + /** + * The number of workers of a defined {@link WorkerType} that are allocated when a job runs. + * + * @default - differs based on specific Glue version/worker type + */ + readonly workerCount?: number; + + /** + * The {@link Connection}s used for this job. + * + * Connections are used to connect to other AWS Service or resources within a VPC. + * + * @default [] - no connections are added to the job + */ + readonly connections?: IConnection[]; + + /** + * The {@link SecurityConfiguration} to use for this job. + * + * @default - no security configuration. + */ + readonly securityConfiguration?: ISecurityConfiguration; + + /** + * The default arguments for this job, specified as name-value pairs. + * + * @see https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html for a list of reserved parameters + * @default - no arguments + */ + readonly defaultArguments?: { [key: string]: string }; + + /** + * The tags to add to the resources on which the job runs + * + * @default {} - no tags + */ + readonly tags?: { [key: string]: string }; + + /** + * The IAM role assumed by Glue to run this job. + * + * If providing a custom role, it needs to trust the Glue service principal (glue.amazonaws.com) and be granted sufficient permissions. + * + * @see https://docs.aws.amazon.com/glue/latest/dg/getting-started-access.html + * + * @default - a role is automatically generated + */ + readonly role?: iam.IRole; + + /** + * Enables the collection of metrics for job profiling. + * + * @default - no profiling metrics emitted. + * + * @see `--enable-metrics` at https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html + */ + readonly enableProfilingMetrics? :boolean; + + /** + * Enables the Spark UI debugging and monitoring with the specified props. + * + * @default - Spark UI debugging and monitoring is disabled. + * + * @see https://docs.aws.amazon.com/glue/latest/dg/monitor-spark-ui-jobs.html + * @see https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html + */ + readonly sparkUI?: SparkUIProps, + + /** + * Enables continuous logging with the specified props. + * + * @default - continuous logging is disabled. + * + * @see https://docs.aws.amazon.com/glue/latest/dg/monitor-continuous-logging-enable.html + * @see https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html + */ + readonly continuousLogging?: ContinuousLoggingProps, +} + +/** + * A Glue Job. + */ +export class Job extends JobBase { + /** + * Creates a Glue Job + * + * @param scope The scope creating construct (usually `this`). + * @param id The construct's id. + * @param attrs Import attributes + */ + public static fromJobAttributes(scope: constructs.Construct, id: string, attrs: JobAttributes): IJob { + class Import extends JobBase { + public readonly jobName = attrs.jobName; + public readonly jobArn = jobArn(scope, attrs.jobName); + public readonly grantPrincipal = attrs.role ?? new iam.UnknownPrincipal({ resource: this }); + } + + return new Import(scope, id); + } + + /** + * The ARN of the job. + */ + public readonly jobArn: string; + + /** + * The name of the job. + */ + public readonly jobName: string; + + /** + * The IAM role Glue assumes to run this job. + */ + public readonly role: iam.IRole; + + /** + * The principal this Glue Job is running as. + */ + public readonly grantPrincipal: iam.IPrincipal; + + /** + * The Spark UI logs location if Spark UI monitoring and debugging is enabled. + * + * @see https://docs.aws.amazon.com/glue/latest/dg/monitor-spark-ui-jobs.html + * @see https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html + */ + public readonly sparkUILoggingLocation?: SparkUILoggingLocation; + + constructor(scope: constructs.Construct, id: string, props: JobProps) { + super(scope, id, { + physicalName: props.jobName, + }); + + const executable = props.executable.bind(); + + this.role = props.role ?? new iam.Role(this, 'ServiceRole', { + assumedBy: new iam.ServicePrincipal('glue.amazonaws.com'), + managedPolicies: [iam.ManagedPolicy.fromAwsManagedPolicyName('service-role/AWSGlueServiceRole')], + }); + this.grantPrincipal = this.role; + + const sparkUI = props.sparkUI?.enabled ? this.setupSparkUI(executable, this.role, props.sparkUI) : undefined; + this.sparkUILoggingLocation = sparkUI?.location; + const continuousLoggingArgs = props.continuousLogging?.enabled ? this.setupContinuousLogging(this.role, props.continuousLogging) : {}; + const profilingMetricsArgs = props.enableProfilingMetrics ? { '--enable-metrics': '' } : {}; + + const defaultArguments = { + ...this.executableArguments(executable), + ...continuousLoggingArgs, + ...profilingMetricsArgs, + ...sparkUI?.args, + ...this.checkNoReservedArgs(props.defaultArguments), + }; + + const jobResource = new CfnJob(this, 'Resource', { + name: props.jobName, + description: props.description, + role: this.role.roleArn, + command: { + name: executable.type.name, + scriptLocation: this.codeS3ObjectUrl(executable.script), + pythonVersion: executable.pythonVersion, + }, + glueVersion: executable.glueVersion.name, + workerType: props.workerType?.name, + numberOfWorkers: props.workerCount, + maxCapacity: props.maxCapacity, + maxRetries: props.maxRetries, + executionProperty: props.maxConcurrentRuns ? { maxConcurrentRuns: props.maxConcurrentRuns } : undefined, + notificationProperty: props.notifyDelayAfter ? { notifyDelayAfter: props.notifyDelayAfter.toMinutes() } : undefined, + timeout: props.timeout?.toMinutes(), + connections: props.connections ? { connections: props.connections.map((connection) => connection.connectionName) } : undefined, + securityConfiguration: props.securityConfiguration?.securityConfigurationName, + tags: props.tags, + defaultArguments, + }); + + const resourceName = this.getResourceNameAttribute(jobResource.ref); + this.jobArn = jobArn(this, resourceName); + this.jobName = resourceName; + } + + /** + * Check no usage of reserved arguments. + * + * @see https://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-etl-glue-arguments.html + */ + private checkNoReservedArgs(defaultArguments?: { [key: string]: string }) { + if (defaultArguments) { + const reservedArgs = new Set(['--conf', '--debug', '--mode', '--JOB_NAME']); + Object.keys(defaultArguments).forEach((arg) => { + if (reservedArgs.has(arg)) { + throw new Error(`The ${arg} argument is reserved by Glue. Don't set it`); + } + }); + } + return defaultArguments; + } + + private executableArguments(config: JobExecutableConfig) { + const args: { [key: string]: string } = {}; + args['--job-language'] = config.language; + if (config.className) { + args['--class'] = config.className; + } + if (config.extraJars && config.extraJars?.length > 0) { + args['--extra-jars'] = config.extraJars.map(code => this.codeS3ObjectUrl(code)).join(','); + } + if (config.extraPythonFiles && config.extraPythonFiles.length > 0) { + args['--extra-py-files'] = config.extraPythonFiles.map(code => this.codeS3ObjectUrl(code)).join(','); + } + if (config.extraFiles && config.extraFiles.length > 0) { + args['--extra-files'] = config.extraFiles.map(code => this.codeS3ObjectUrl(code)).join(','); + } + if (config.extraJarsFirst) { + args['--user-jars-first'] = 'true'; + } + return args; + } + + private setupSparkUI(executable: JobExecutableConfig, role: iam.IRole, props: SparkUIProps) { + if (JobType.PYTHON_SHELL === executable.type) { + throw new Error('Spark UI is not available for JobType.PYTHON_SHELL jobs'); + } + + const bucket = props.bucket ?? new s3.Bucket(this, 'SparkUIBucket'); + bucket.grantReadWrite(role); + const args = { + '--enable-spark-ui': 'true', + '--spark-event-logs-path': bucket.s3UrlForObject(props.prefix), + }; + + return { + location: { + prefix: props.prefix, + bucket, + }, + args, + }; + } + + private setupContinuousLogging(role: iam.IRole, props: ContinuousLoggingProps) { + const args: {[key: string]: string} = { + '--enable-continuous-cloudwatch-log': 'true', + '--enable-continuous-log-filter': (props.quiet ?? true).toString(), + }; + + if (props.logGroup) { + args['--continuous-log-logGroup'] = props.logGroup.logGroupName; + props.logGroup.grantWrite(role); + } + + if (props.logStreamPrefix) { + args['--continuous-log-logStreamPrefix'] = props.logStreamPrefix; + } + if (props.conversionPattern) { + args['--continuous-log-conversionPattern'] = props.conversionPattern; + } + return args; + } + + private codeS3ObjectUrl(code: Code) { + const s3Location = code.bind(this, this.role).s3Location; + return `s3://${s3Location.bucketName}/${s3Location.objectKey}`; + } +} + +/** + * Create a CloudWatch Metric that's based on Glue Job events + * {@see https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/EventTypes.html#glue-event-types} + * The metric has namespace = 'AWS/Events', metricName = 'TriggeredRules' and RuleName = rule.ruleName dimension. + * + * @param rule for use in setting RuleName dimension value + * @param props metric properties + */ +function metricRule(rule: events.IRule, props?: cloudwatch.MetricOptions): cloudwatch.Metric { + return new cloudwatch.Metric({ + namespace: 'AWS/Events', + metricName: 'TriggeredRules', + dimensions: { RuleName: rule.ruleName }, + statistic: cloudwatch.Statistic.SUM, + ...props, + }).attachTo(rule); +} + + +/** + * Returns the job arn + * @param scope + * @param jobName + */ +function jobArn(scope: constructs.Construct, jobName: string) : string { + return cdk.Stack.of(scope).formatArn({ + service: 'glue', + resource: 'job', + resourceName: jobName, + }); +} diff --git a/packages/@aws-cdk/aws-glue/package.json b/packages/@aws-cdk/aws-glue/package.json index 5f11a3895db92..daaba9400606c 100644 --- a/packages/@aws-cdk/aws-glue/package.json +++ b/packages/@aws-cdk/aws-glue/package.json @@ -84,19 +84,29 @@ "pkglint": "0.0.0" }, "dependencies": { + "@aws-cdk/assets": "0.0.0", + "@aws-cdk/aws-cloudwatch": "0.0.0", + "@aws-cdk/aws-events": "0.0.0", "@aws-cdk/aws-ec2": "0.0.0", "@aws-cdk/aws-iam": "0.0.0", + "@aws-cdk/aws-logs": "0.0.0", "@aws-cdk/aws-kms": "0.0.0", "@aws-cdk/aws-s3": "0.0.0", + "@aws-cdk/aws-s3-assets": "0.0.0", "@aws-cdk/core": "0.0.0", "constructs": "^3.3.69" }, "homepage": "https://github.com/aws/aws-cdk", "peerDependencies": { + "@aws-cdk/assets": "0.0.0", + "@aws-cdk/aws-cloudwatch": "0.0.0", + "@aws-cdk/aws-events": "0.0.0", "@aws-cdk/aws-ec2": "0.0.0", "@aws-cdk/aws-iam": "0.0.0", + "@aws-cdk/aws-logs": "0.0.0", "@aws-cdk/aws-kms": "0.0.0", "@aws-cdk/aws-s3": "0.0.0", + "@aws-cdk/aws-s3-assets": "0.0.0", "@aws-cdk/core": "0.0.0", "constructs": "^3.3.69" }, @@ -143,7 +153,8 @@ "docs-public-apis:@aws-cdk/aws-glue.ClassificationString.XML", "docs-public-apis:@aws-cdk/aws-glue.ClassificationString.PARQUET", "docs-public-apis:@aws-cdk/aws-glue.ClassificationString.ORC", - "docs-public-apis:@aws-cdk/aws-glue.ClassificationString.value" + "docs-public-apis:@aws-cdk/aws-glue.ClassificationString.value", + "events-method-signature:@aws-cdk/aws-glue.Job.onStateChange" ] }, "awscdkio": { diff --git a/packages/@aws-cdk/aws-glue/test/code.test.ts b/packages/@aws-cdk/aws-glue/test/code.test.ts new file mode 100644 index 0000000000000..061f6d26c351f --- /dev/null +++ b/packages/@aws-cdk/aws-glue/test/code.test.ts @@ -0,0 +1,304 @@ +import * as path from 'path'; +import { Template } from '@aws-cdk/assertions'; +import * as s3 from '@aws-cdk/aws-s3'; +import * as cdk from '@aws-cdk/core'; +import * as glue from '../lib'; + +describe('Code', () => { + let stack: cdk.Stack; + let script: glue.Code; + + beforeEach(() => { + stack = new cdk.Stack(); + }); + + describe('.fromBucket()', () => { + const key = 'script'; + let bucket: s3.IBucket; + + test('with valid bucket name and key and bound by job sets the right path and grants the job permissions to read from it', () => { + bucket = s3.Bucket.fromBucketName(stack, 'Bucket', 'bucketName'); + script = glue.Code.fromBucket(bucket, key); + new glue.Job(stack, 'Job1', { + executable: glue.JobExecutable.pythonShell({ + glueVersion: glue.GlueVersion.V1_0, + pythonVersion: glue.PythonVersion.THREE, + script, + }), + }); + + Template.fromStack(stack).hasResourceProperties('AWS::Glue::Job', { + Command: { + ScriptLocation: 's3://bucketName/script', + }, + }); + + // Role policy should grant reading from the assets bucket + Template.fromStack(stack).hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: [ + { + Action: [ + 's3:GetObject*', + 's3:GetBucket*', + 's3:List*', + ], + Effect: 'Allow', + Resource: [ + { + 'Fn::Join': [ + '', + [ + 'arn:', + { + Ref: 'AWS::Partition', + }, + ':s3:::bucketName', + ], + ], + }, + { + 'Fn::Join': [ + '', + [ + 'arn:', + { + Ref: 'AWS::Partition', + }, + ':s3:::bucketName/script', + ], + ], + }, + ], + }, + ], + }, + Roles: [ + { + Ref: 'Job1ServiceRole7AF34CCA', + }, + ], + }); + }); + }); + + describe('.fromAsset()', () => { + const filePath = path.join(__dirname, 'job-script/hello_world.py'); + const directoryPath = path.join(__dirname, 'job-script'); + + beforeEach(() => { + script = glue.Code.fromAsset(filePath); + }); + + test("with valid and existing file path and bound to job sets job's script location and permissions stack metadata", () => { + new glue.Job(stack, 'Job1', { + executable: glue.JobExecutable.pythonShell({ + glueVersion: glue.GlueVersion.V1_0, + pythonVersion: glue.PythonVersion.THREE, + script, + }), + }); + + expect(stack.node.metadata.find(m => m.type === 'aws:cdk:asset')).toBeDefined(); + Template.fromStack(stack).hasResourceProperties('AWS::Glue::Job', { + Command: { + ScriptLocation: { + 'Fn::Join': [ + '', + [ + 's3://', + { + Ref: 'AssetParameters432033e3218068a915d2532fa9be7858a12b228a2ae6e5c10faccd9097b1e855S3Bucket4E517469', + }, + '/', + { + 'Fn::Select': [ + 0, + { + 'Fn::Split': [ + '||', + { + Ref: 'AssetParameters432033e3218068a915d2532fa9be7858a12b228a2ae6e5c10faccd9097b1e855S3VersionKeyF7753763', + }, + ], + }, + ], + }, + { + 'Fn::Select': [ + 1, + { + 'Fn::Split': [ + '||', + { + Ref: 'AssetParameters432033e3218068a915d2532fa9be7858a12b228a2ae6e5c10faccd9097b1e855S3VersionKeyF7753763', + }, + ], + }, + ], + }, + ], + ], + }, + }, + }); + // Role policy should grant reading from the assets bucket + Template.fromStack(stack).hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: [ + { + Action: [ + 's3:GetObject*', + 's3:GetBucket*', + 's3:List*', + ], + Effect: 'Allow', + Resource: [ + { + 'Fn::Join': [ + '', + [ + 'arn:', + { + Ref: 'AWS::Partition', + }, + ':s3:::', + { + Ref: 'AssetParameters432033e3218068a915d2532fa9be7858a12b228a2ae6e5c10faccd9097b1e855S3Bucket4E517469', + }, + ], + ], + }, + { + 'Fn::Join': [ + '', + [ + 'arn:', + { + Ref: 'AWS::Partition', + }, + ':s3:::', + { + Ref: 'AssetParameters432033e3218068a915d2532fa9be7858a12b228a2ae6e5c10faccd9097b1e855S3Bucket4E517469', + }, + '/*', + ], + ], + }, + ], + }, + ], + }, + Roles: [ + { + Ref: 'Job1ServiceRole7AF34CCA', + }, + ], + }); + }); + + test('with an unsupported directory path throws', () => { + expect(() => glue.Code.fromAsset(directoryPath)) + .toThrow(/Only files are supported/); + }); + + test('used in more than 1 job in the same stack should be reused', () => { + new glue.Job(stack, 'Job1', { + executable: glue.JobExecutable.pythonShell({ + glueVersion: glue.GlueVersion.V1_0, + pythonVersion: glue.PythonVersion.THREE, + script, + }), + }); + new glue.Job(stack, 'Job2', { + executable: glue.JobExecutable.pythonShell({ + glueVersion: glue.GlueVersion.V1_0, + pythonVersion: glue.PythonVersion.THREE, + script, + }), + }); + const ScriptLocation = { + 'Fn::Join': [ + '', + [ + 's3://', + { + Ref: 'AssetParameters432033e3218068a915d2532fa9be7858a12b228a2ae6e5c10faccd9097b1e855S3Bucket4E517469', + }, + '/', + { + 'Fn::Select': [ + 0, + { + 'Fn::Split': [ + '||', + { + Ref: 'AssetParameters432033e3218068a915d2532fa9be7858a12b228a2ae6e5c10faccd9097b1e855S3VersionKeyF7753763', + }, + ], + }, + ], + }, + { + 'Fn::Select': [ + 1, + { + 'Fn::Split': [ + '||', + { + Ref: 'AssetParameters432033e3218068a915d2532fa9be7858a12b228a2ae6e5c10faccd9097b1e855S3VersionKeyF7753763', + }, + ], + }, + ], + }, + ], + ], + }; + + expect(stack.node.metadata.find(m => m.type === 'aws:cdk:asset')).toBeDefined(); + // Job1 and Job2 use reuse the asset + Template.fromStack(stack).hasResourceProperties('AWS::Glue::Job', { + Command: { + ScriptLocation, + }, + Role: { + 'Fn::GetAtt': [ + 'Job1ServiceRole7AF34CCA', + 'Arn', + ], + }, + }); + Template.fromStack(stack).hasResourceProperties('AWS::Glue::Job', { + Command: { + ScriptLocation, + }, + Role: { + 'Fn::GetAtt': [ + 'Job2ServiceRole5D2B98FE', + 'Arn', + ], + }, + }); + }); + + test('throws if trying to rebind in another stack', () => { + new glue.Job(stack, 'Job1', { + executable: glue.JobExecutable.pythonShell({ + glueVersion: glue.GlueVersion.V1_0, + pythonVersion: glue.PythonVersion.THREE, + script, + }), + }); + const differentStack = new cdk.Stack(); + + expect(() => new glue.Job(differentStack, 'Job2', { + executable: glue.JobExecutable.pythonShell({ + glueVersion: glue.GlueVersion.V1_0, + pythonVersion: glue.PythonVersion.THREE, + script: script, + }), + })).toThrow(/associated with another stack/); + }); + }); +}); \ No newline at end of file diff --git a/packages/@aws-cdk/aws-glue/test/integ.job.expected.json b/packages/@aws-cdk/aws-glue/test/integ.job.expected.json new file mode 100644 index 0000000000000..61f4f60434db1 --- /dev/null +++ b/packages/@aws-cdk/aws-glue/test/integ.job.expected.json @@ -0,0 +1,571 @@ +{ + "Resources": { + "EtlJobServiceRole837F781B": { + "Type": "AWS::IAM::Role", + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "glue.amazonaws.com" + } + } + ], + "Version": "2012-10-17" + }, + "ManagedPolicyArns": [ + { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":iam::aws:policy/service-role/AWSGlueServiceRole" + ] + ] + } + ] + } + }, + "EtlJobServiceRoleDefaultPolicy8BFE343B": { + "Type": "AWS::IAM::Policy", + "Properties": { + "PolicyDocument": { + "Statement": [ + { + "Action": [ + "s3:GetObject*", + "s3:GetBucket*", + "s3:List*", + "s3:DeleteObject*", + "s3:PutObject", + "s3:Abort*" + ], + "Effect": "Allow", + "Resource": [ + { + "Fn::GetAtt": [ + "EtlJobSparkUIBucketBF23744B", + "Arn" + ] + }, + { + "Fn::Join": [ + "", + [ + { + "Fn::GetAtt": [ + "EtlJobSparkUIBucketBF23744B", + "Arn" + ] + }, + "/*" + ] + ] + } + ] + }, + { + "Action": [ + "s3:GetObject*", + "s3:GetBucket*", + "s3:List*" + ], + "Effect": "Allow", + "Resource": [ + { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":s3:::", + { + "Ref": "AssetParameters432033e3218068a915d2532fa9be7858a12b228a2ae6e5c10faccd9097b1e855S3Bucket4E517469" + } + ] + ] + }, + { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":s3:::", + { + "Ref": "AssetParameters432033e3218068a915d2532fa9be7858a12b228a2ae6e5c10faccd9097b1e855S3Bucket4E517469" + }, + "/*" + ] + ] + } + ] + } + ], + "Version": "2012-10-17" + }, + "PolicyName": "EtlJobServiceRoleDefaultPolicy8BFE343B", + "Roles": [ + { + "Ref": "EtlJobServiceRole837F781B" + } + ] + } + }, + "EtlJobSparkUIBucketBF23744B": { + "Type": "AWS::S3::Bucket", + "UpdateReplacePolicy": "Retain", + "DeletionPolicy": "Retain" + }, + "EtlJob7FC88E45": { + "Type": "AWS::Glue::Job", + "Properties": { + "Command": { + "Name": "glueetl", + "PythonVersion": "3", + "ScriptLocation": { + "Fn::Join": [ + "", + [ + "s3://", + { + "Ref": "AssetParameters432033e3218068a915d2532fa9be7858a12b228a2ae6e5c10faccd9097b1e855S3Bucket4E517469" + }, + "/", + { + "Fn::Select": [ + 0, + { + "Fn::Split": [ + "||", + { + "Ref": "AssetParameters432033e3218068a915d2532fa9be7858a12b228a2ae6e5c10faccd9097b1e855S3VersionKeyF7753763" + } + ] + } + ] + }, + { + "Fn::Select": [ + 1, + { + "Fn::Split": [ + "||", + { + "Ref": "AssetParameters432033e3218068a915d2532fa9be7858a12b228a2ae6e5c10faccd9097b1e855S3VersionKeyF7753763" + } + ] + } + ] + } + ] + ] + } + }, + "Role": { + "Fn::GetAtt": [ + "EtlJobServiceRole837F781B", + "Arn" + ] + }, + "DefaultArguments": { + "--job-language": "python", + "--enable-continuous-cloudwatch-log": "true", + "--enable-continuous-log-filter": "true", + "--continuous-log-logStreamPrefix": "EtlJob", + "--enable-spark-ui": "true", + "--spark-event-logs-path": { + "Fn::Join": [ + "", + [ + "s3://", + { + "Ref": "EtlJobSparkUIBucketBF23744B" + } + ] + ] + }, + "arg1": "value1", + "arg2": "value2" + }, + "ExecutionProperty": { + "MaxConcurrentRuns": 2 + }, + "GlueVersion": "2.0", + "MaxRetries": 2, + "Name": "EtlJob", + "NotificationProperty": { + "NotifyDelayAfter": 1 + }, + "NumberOfWorkers": 10, + "Tags": { + "key": "value" + }, + "Timeout": 5, + "WorkerType": "G.2X" + } + }, + "EtlJobSuccessMetricRuleA72A3EF6": { + "Type": "AWS::Events::Rule", + "Properties": { + "Description": { + "Fn::Join": [ + "", + [ + "Rule triggered when Glue job ", + { + "Ref": "EtlJob7FC88E45" + }, + " is in SUCCEEDED state" + ] + ] + }, + "EventPattern": { + "source": [ + "aws.glue" + ], + "detail-type": [ + "Glue Job State Change", + "Glue Job Run Status" + ], + "detail": { + "jobName": [ + { + "Ref": "EtlJob7FC88E45" + } + ], + "state": [ + "SUCCEEDED" + ] + } + }, + "State": "ENABLED" + } + }, + "StreamingJobServiceRole1B4B8BF9": { + "Type": "AWS::IAM::Role", + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "glue.amazonaws.com" + } + } + ], + "Version": "2012-10-17" + }, + "ManagedPolicyArns": [ + { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":iam::aws:policy/service-role/AWSGlueServiceRole" + ] + ] + } + ] + } + }, + "StreamingJobServiceRoleDefaultPolicyA0CC4C68": { + "Type": "AWS::IAM::Policy", + "Properties": { + "PolicyDocument": { + "Statement": [ + { + "Action": [ + "s3:GetObject*", + "s3:GetBucket*", + "s3:List*" + ], + "Effect": "Allow", + "Resource": [ + { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":s3:::", + { + "Ref": "AssetParameters432033e3218068a915d2532fa9be7858a12b228a2ae6e5c10faccd9097b1e855S3Bucket4E517469" + } + ] + ] + }, + { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":s3:::", + { + "Ref": "AssetParameters432033e3218068a915d2532fa9be7858a12b228a2ae6e5c10faccd9097b1e855S3Bucket4E517469" + }, + "/*" + ] + ] + } + ] + } + ], + "Version": "2012-10-17" + }, + "PolicyName": "StreamingJobServiceRoleDefaultPolicyA0CC4C68", + "Roles": [ + { + "Ref": "StreamingJobServiceRole1B4B8BF9" + } + ] + } + }, + "StreamingJob3783CC17": { + "Type": "AWS::Glue::Job", + "Properties": { + "Command": { + "Name": "gluestreaming", + "PythonVersion": "3", + "ScriptLocation": { + "Fn::Join": [ + "", + [ + "s3://", + { + "Ref": "AssetParameters432033e3218068a915d2532fa9be7858a12b228a2ae6e5c10faccd9097b1e855S3Bucket4E517469" + }, + "/", + { + "Fn::Select": [ + 0, + { + "Fn::Split": [ + "||", + { + "Ref": "AssetParameters432033e3218068a915d2532fa9be7858a12b228a2ae6e5c10faccd9097b1e855S3VersionKeyF7753763" + } + ] + } + ] + }, + { + "Fn::Select": [ + 1, + { + "Fn::Split": [ + "||", + { + "Ref": "AssetParameters432033e3218068a915d2532fa9be7858a12b228a2ae6e5c10faccd9097b1e855S3VersionKeyF7753763" + } + ] + } + ] + } + ] + ] + } + }, + "Role": { + "Fn::GetAtt": [ + "StreamingJobServiceRole1B4B8BF9", + "Arn" + ] + }, + "DefaultArguments": { + "--job-language": "python", + "arg1": "value1", + "arg2": "value2" + }, + "GlueVersion": "2.0", + "Name": "StreamingJob", + "Tags": { + "key": "value" + } + } + }, + "ShellJobServiceRoleCF97BC4B": { + "Type": "AWS::IAM::Role", + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "glue.amazonaws.com" + } + } + ], + "Version": "2012-10-17" + }, + "ManagedPolicyArns": [ + { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":iam::aws:policy/service-role/AWSGlueServiceRole" + ] + ] + } + ] + } + }, + "ShellJobServiceRoleDefaultPolicy7F22D315": { + "Type": "AWS::IAM::Policy", + "Properties": { + "PolicyDocument": { + "Statement": [ + { + "Action": [ + "s3:GetObject*", + "s3:GetBucket*", + "s3:List*" + ], + "Effect": "Allow", + "Resource": [ + { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":s3:::", + { + "Ref": "AssetParameters432033e3218068a915d2532fa9be7858a12b228a2ae6e5c10faccd9097b1e855S3Bucket4E517469" + } + ] + ] + }, + { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":s3:::", + { + "Ref": "AssetParameters432033e3218068a915d2532fa9be7858a12b228a2ae6e5c10faccd9097b1e855S3Bucket4E517469" + }, + "/*" + ] + ] + } + ] + } + ], + "Version": "2012-10-17" + }, + "PolicyName": "ShellJobServiceRoleDefaultPolicy7F22D315", + "Roles": [ + { + "Ref": "ShellJobServiceRoleCF97BC4B" + } + ] + } + }, + "ShellJob42E81F95": { + "Type": "AWS::Glue::Job", + "Properties": { + "Command": { + "Name": "pythonshell", + "PythonVersion": "3", + "ScriptLocation": { + "Fn::Join": [ + "", + [ + "s3://", + { + "Ref": "AssetParameters432033e3218068a915d2532fa9be7858a12b228a2ae6e5c10faccd9097b1e855S3Bucket4E517469" + }, + "/", + { + "Fn::Select": [ + 0, + { + "Fn::Split": [ + "||", + { + "Ref": "AssetParameters432033e3218068a915d2532fa9be7858a12b228a2ae6e5c10faccd9097b1e855S3VersionKeyF7753763" + } + ] + } + ] + }, + { + "Fn::Select": [ + 1, + { + "Fn::Split": [ + "||", + { + "Ref": "AssetParameters432033e3218068a915d2532fa9be7858a12b228a2ae6e5c10faccd9097b1e855S3VersionKeyF7753763" + } + ] + } + ] + } + ] + ] + } + }, + "Role": { + "Fn::GetAtt": [ + "ShellJobServiceRoleCF97BC4B", + "Arn" + ] + }, + "DefaultArguments": { + "--job-language": "python", + "arg1": "value1", + "arg2": "value2" + }, + "GlueVersion": "1.0", + "Name": "ShellJob", + "Tags": { + "key": "value" + } + } + } + }, + "Parameters": { + "AssetParameters432033e3218068a915d2532fa9be7858a12b228a2ae6e5c10faccd9097b1e855S3Bucket4E517469": { + "Type": "String", + "Description": "S3 bucket for asset \"432033e3218068a915d2532fa9be7858a12b228a2ae6e5c10faccd9097b1e855\"" + }, + "AssetParameters432033e3218068a915d2532fa9be7858a12b228a2ae6e5c10faccd9097b1e855S3VersionKeyF7753763": { + "Type": "String", + "Description": "S3 key for asset version \"432033e3218068a915d2532fa9be7858a12b228a2ae6e5c10faccd9097b1e855\"" + }, + "AssetParameters432033e3218068a915d2532fa9be7858a12b228a2ae6e5c10faccd9097b1e855ArtifactHash0C610005": { + "Type": "String", + "Description": "Artifact hash for asset \"432033e3218068a915d2532fa9be7858a12b228a2ae6e5c10faccd9097b1e855\"" + } + } +} \ No newline at end of file diff --git a/packages/@aws-cdk/aws-glue/test/integ.job.ts b/packages/@aws-cdk/aws-glue/test/integ.job.ts new file mode 100644 index 0000000000000..fedbc0b8b8428 --- /dev/null +++ b/packages/@aws-cdk/aws-glue/test/integ.job.ts @@ -0,0 +1,89 @@ +import * as path from 'path'; +import * as cdk from '@aws-cdk/core'; +import * as glue from '../lib'; + +/** + * To verify the ability to run jobs created in this test + * + * Run the job using + * `aws glue start-job-run --region us-east-1 --job-name ` + * This will return a runId + * + * Get the status of the job run using + * `aws glue get-job-run --region us-east-1 --job-name --run-id ` + * + * For example, to test the ShellJob + * - Run: `aws glue start-job-run --region us-east-1 --job-name ShellJob` + * - Get Status: `aws glue get-job-run --region us-east-1 --job-name ShellJob --run-id ` + * - Check output: `aws logs get-log-events --region us-east-1 --log-group-name "/aws-glue/python-jobs/output" --log-stream-name ">` which should show "hello world" + */ +const app = new cdk.App(); + +const stack = new cdk.Stack(app, 'aws-glue-job'); + +const script = glue.Code.fromAsset(path.join(__dirname, 'job-script/hello_world.py')); + +const etlJob = new glue.Job(stack, 'EtlJob', { + jobName: 'EtlJob', + executable: glue.JobExecutable.pythonEtl({ + glueVersion: glue.GlueVersion.V2_0, + pythonVersion: glue.PythonVersion.THREE, + script, + }), + workerType: glue.WorkerType.G_2X, + workerCount: 10, + maxConcurrentRuns: 2, + maxRetries: 2, + timeout: cdk.Duration.minutes(5), + notifyDelayAfter: cdk.Duration.minutes(1), + defaultArguments: { + arg1: 'value1', + arg2: 'value2', + }, + sparkUI: { + enabled: true, + }, + continuousLogging: { + enabled: true, + quiet: true, + logStreamPrefix: 'EtlJob', + }, + tags: { + key: 'value', + }, +}); +etlJob.metricSuccess(); + +new glue.Job(stack, 'StreamingJob', { + jobName: 'StreamingJob', + executable: glue.JobExecutable.pythonStreaming({ + glueVersion: glue.GlueVersion.V2_0, + pythonVersion: glue.PythonVersion.THREE, + script, + }), + defaultArguments: { + arg1: 'value1', + arg2: 'value2', + }, + tags: { + key: 'value', + }, +}); + +new glue.Job(stack, 'ShellJob', { + jobName: 'ShellJob', + executable: glue.JobExecutable.pythonShell({ + glueVersion: glue.GlueVersion.V1_0, + pythonVersion: glue.PythonVersion.THREE, + script, + }), + defaultArguments: { + arg1: 'value1', + arg2: 'value2', + }, + tags: { + key: 'value', + }, +}); + +app.synth(); diff --git a/packages/@aws-cdk/aws-glue/test/job-executable.test.ts b/packages/@aws-cdk/aws-glue/test/job-executable.test.ts new file mode 100644 index 0000000000000..481bd16dc8944 --- /dev/null +++ b/packages/@aws-cdk/aws-glue/test/job-executable.test.ts @@ -0,0 +1,106 @@ +import * as s3 from '@aws-cdk/aws-s3'; +import * as cdk from '@aws-cdk/core'; +import * as glue from '../lib'; + +describe('GlueVersion', () => { + test('.V0_9 should set the name correctly', () => expect(glue.GlueVersion.V0_9.name).toEqual('0.9')); + + test('.V1_0 should set the name correctly', () => expect(glue.GlueVersion.V1_0.name).toEqual('1.0')); + + test('.V2_0 should set the name correctly', () => expect(glue.GlueVersion.V2_0.name).toEqual('2.0')); + + test('.V3_0 should set the name correctly', () => expect(glue.GlueVersion.V3_0.name).toEqual('3.0')); + + test('of(customVersion) should set the name correctly', () => expect(glue.GlueVersion.of('CustomVersion').name).toEqual('CustomVersion')); +}); + +describe('JobType', () => { + test('.ETL should set the name correctly', () => expect(glue.JobType.ETL.name).toEqual('glueetl')); + + test('.STREAMING should set the name correctly', () => expect(glue.JobType.STREAMING.name).toEqual('gluestreaming')); + + test('.PYTHON_SHELL should set the name correctly', () => expect(glue.JobType.PYTHON_SHELL.name).toEqual('pythonshell')); + + test('of(customName) should set the name correctly', () => expect(glue.JobType.of('CustomName').name).toEqual('CustomName')); +}); + +describe('JobExecutable', () => { + let stack: cdk.Stack; + let bucket: s3.IBucket; + let script: glue.Code; + + beforeEach(() => { + stack = new cdk.Stack(); + bucket = s3.Bucket.fromBucketName(stack, 'Bucket', 'bucketName'); + script = glue.Code.fromBucket(bucket, 'script.py'); + }); + + describe('.of()', () => { + test('with valid config should succeed', () => { + expect(glue.JobExecutable.of({ + glueVersion: glue.GlueVersion.V1_0, + type: glue.JobType.PYTHON_SHELL, + language: glue.JobLanguage.PYTHON, + pythonVersion: glue.PythonVersion.THREE, + script, + })).toBeDefined(); + }); + + test('with JobType.PYTHON_SHELL and a language other than JobLanguage.PYTHON should throw', () => { + expect(() => glue.JobExecutable.of({ + glueVersion: glue.GlueVersion.V3_0, + type: glue.JobType.PYTHON_SHELL, + language: glue.JobLanguage.SCALA, + script, + })).toThrow(/Python shell requires the language to be set to Python/); + }); + + test('with a non JobLanguage.PYTHON and extraPythonFiles set should throw', () => { + expect(() => glue.JobExecutable.of({ + glueVersion: glue.GlueVersion.V3_0, + type: glue.JobType.ETL, + language: glue.JobLanguage.SCALA, + className: 'com.Test', + extraPythonFiles: [script], + script, + })).toThrow(/extraPythonFiles is not supported for languages other than JobLanguage.PYTHON/); + }); + + [glue.GlueVersion.V0_9, glue.GlueVersion.V2_0, glue.GlueVersion.V3_0].forEach((glueVersion) => { + test(`with JobType.PYTHON_SHELL and GlueVersion ${glueVersion} should throw`, () => { + expect(() => glue.JobExecutable.of({ + type: glue.JobType.PYTHON_SHELL, + language: glue.JobLanguage.PYTHON, + pythonVersion: glue.PythonVersion.TWO, + script, + glueVersion, + })).toThrow(`Specified GlueVersion ${glueVersion.name} does not support Python Shell`); + }); + }); + + [glue.GlueVersion.V0_9, glue.GlueVersion.V1_0].forEach((glueVersion) => { + test(`with extraJarsFirst set and GlueVersion ${glueVersion.name} should throw`, () => { + expect(() => glue.JobExecutable.of({ + type: glue.JobType.ETL, + language: glue.JobLanguage.PYTHON, + pythonVersion: glue.PythonVersion.TWO, + extraJarsFirst: true, + script, + glueVersion, + })).toThrow(`Specified GlueVersion ${glueVersion.name} does not support extraJarsFirst`); + }); + }); + + [glue.GlueVersion.V2_0, glue.GlueVersion.V3_0].forEach((glueVersion) => { + test(`with PythonVersion.TWO and GlueVersion ${glueVersion} should throw`, () => { + expect(() => glue.JobExecutable.of({ + type: glue.JobType.ETL, + language: glue.JobLanguage.PYTHON, + pythonVersion: glue.PythonVersion.TWO, + script, + glueVersion, + })).toThrow(`Specified GlueVersion ${glueVersion.name} does not support PythonVersion 2`); + }); + }); + }); +}); \ No newline at end of file diff --git a/packages/@aws-cdk/aws-glue/test/job-script/hello_world.py b/packages/@aws-cdk/aws-glue/test/job-script/hello_world.py new file mode 100644 index 0000000000000..e75154b7c390f --- /dev/null +++ b/packages/@aws-cdk/aws-glue/test/job-script/hello_world.py @@ -0,0 +1 @@ +print("hello world") \ No newline at end of file diff --git a/packages/@aws-cdk/aws-glue/test/job-script/hello_world_2.py b/packages/@aws-cdk/aws-glue/test/job-script/hello_world_2.py new file mode 100644 index 0000000000000..e75154b7c390f --- /dev/null +++ b/packages/@aws-cdk/aws-glue/test/job-script/hello_world_2.py @@ -0,0 +1 @@ +print("hello world") \ No newline at end of file diff --git a/packages/@aws-cdk/aws-glue/test/job.test.ts b/packages/@aws-cdk/aws-glue/test/job.test.ts new file mode 100644 index 0000000000000..625e4743570fd --- /dev/null +++ b/packages/@aws-cdk/aws-glue/test/job.test.ts @@ -0,0 +1,842 @@ +import { Template } from '@aws-cdk/assertions'; +import * as cloudwatch from '@aws-cdk/aws-cloudwatch'; +import * as events from '@aws-cdk/aws-events'; +import * as iam from '@aws-cdk/aws-iam'; +import * as logs from '@aws-cdk/aws-logs'; +import * as s3 from '@aws-cdk/aws-s3'; +import * as cdk from '@aws-cdk/core'; +import * as glue from '../lib'; + +describe('WorkerType', () => { + test('.STANDARD should set the name correctly', () => expect(glue.WorkerType.STANDARD.name).toEqual('Standard')); + + test('.G_1X should set the name correctly', () => expect(glue.WorkerType.G_1X.name).toEqual('G.1X')); + + test('.G_2X should set the name correctly', () => expect(glue.WorkerType.G_2X.name).toEqual('G.2X')); + + test('of(customType) should set name correctly', () => expect(glue.WorkerType.of('CustomType').name).toEqual('CustomType')); +}); + +describe('Job', () => { + const jobName = 'test-job'; + let stack: cdk.Stack; + + beforeEach(() => { + stack = new cdk.Stack(); + }); + + describe('.fromJobAttributes()', () => { + test('with required attrs only', () => { + const job = glue.Job.fromJobAttributes(stack, 'ImportedJob', { jobName }); + + expect(job.jobName).toEqual(jobName); + expect(job.jobArn).toEqual(stack.formatArn({ + service: 'glue', + resource: 'job', + resourceName: jobName, + })); + expect(job.grantPrincipal).toEqual(new iam.UnknownPrincipal({ resource: job })); + }); + + test('with all attrs', () => { + const role = iam.Role.fromRoleArn(stack, 'Role', 'arn:aws:iam::123456789012:role/TestRole'); + const job = glue.Job.fromJobAttributes(stack, 'ImportedJob', { jobName, role }); + + expect(job.jobName).toEqual(jobName); + expect(job.jobArn).toEqual(stack.formatArn({ + service: 'glue', + resource: 'job', + resourceName: jobName, + })); + expect(job.grantPrincipal).toEqual(role); + }); + }); + + + describe('new', () => { + const className = 'com.amazon.test.ClassName'; + const codeBucketName = 'bucketName'; + const codeBucketAccessStatement = { + Action: [ + 's3:GetObject*', + 's3:GetBucket*', + 's3:List*', + ], + Effect: 'Allow', + Resource: [ + { + 'Fn::Join': [ + '', + [ + 'arn:', + { + Ref: 'AWS::Partition', + }, + `:s3:::${codeBucketName}`, + ], + ], + }, + { + 'Fn::Join': [ + '', + [ + 'arn:', + { + Ref: 'AWS::Partition', + }, + `:s3:::${codeBucketName}/script`, + ], + ], + }, + ], + }; + let codeBucket: s3.IBucket; + let script: glue.Code; + let extraJars: glue.Code[]; + let extraFiles: glue.Code[]; + let extraPythonFiles: glue.Code[]; + let job: glue.Job; + let defaultProps: glue.JobProps; + + beforeEach(() => { + codeBucket = s3.Bucket.fromBucketName(stack, 'CodeBucket', codeBucketName); + script = glue.Code.fromBucket(codeBucket, 'script'); + extraJars = [glue.Code.fromBucket(codeBucket, 'file1.jar'), glue.Code.fromBucket(codeBucket, 'file2.jar')]; + extraPythonFiles = [glue.Code.fromBucket(codeBucket, 'file1.py'), glue.Code.fromBucket(codeBucket, 'file2.py')]; + extraFiles = [glue.Code.fromBucket(codeBucket, 'file1.txt'), glue.Code.fromBucket(codeBucket, 'file2.txt')]; + defaultProps = { + executable: glue.JobExecutable.scalaEtl({ + glueVersion: glue.GlueVersion.V2_0, + className, + script, + }), + }; + }); + + describe('with necessary props only', () => { + beforeEach(() => { + job = new glue.Job(stack, 'Job', defaultProps); + }); + + test('should create a role and use it with the job', () => { + Template.fromStack(stack).hasResourceProperties('AWS::IAM::Role', { + AssumeRolePolicyDocument: { + Statement: [ + { + Action: 'sts:AssumeRole', + Effect: 'Allow', + Principal: { + Service: 'glue.amazonaws.com', + }, + }, + ], + Version: '2012-10-17', + }, + ManagedPolicyArns: [ + { + 'Fn::Join': [ + '', + [ + 'arn:', + { + Ref: 'AWS::Partition', + }, + ':iam::aws:policy/service-role/AWSGlueServiceRole', + ], + ], + }, + ], + }); + + // Role policy should grant reading from the assets bucket + Template.fromStack(stack).hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: [ + codeBucketAccessStatement, + ], + }, + Roles: [ + { + Ref: 'JobServiceRole4F432993', + }, + ], + }); + + // check the job using the role + Template.fromStack(stack).hasResourceProperties('AWS::Glue::Job', { + Command: { + Name: 'glueetl', + ScriptLocation: 's3://bucketName/script', + }, + Role: { + 'Fn::GetAtt': [ + 'JobServiceRole4F432993', + 'Arn', + ], + }, + }); + }); + + test('should return correct jobName and jobArn from CloudFormation', () => { + expect(stack.resolve(job.jobName)).toEqual({ Ref: 'JobB9D00F9F' }); + expect(stack.resolve(job.jobArn)).toEqual({ + 'Fn::Join': ['', ['arn:', { Ref: 'AWS::Partition' }, ':glue:', { Ref: 'AWS::Region' }, ':', { Ref: 'AWS::AccountId' }, ':job/', { Ref: 'JobB9D00F9F' }]], + }); + }); + + test('with a custom role should use it and set it in CloudFormation', () => { + const role = iam.Role.fromRoleArn(stack, 'Role', 'arn:aws:iam::123456789012:role/TestRole'); + job = new glue.Job(stack, 'JobWithRole', { + ...defaultProps, + role, + }); + + expect(job.grantPrincipal).toEqual(role); + Template.fromStack(stack).hasResourceProperties('AWS::Glue::Job', { + Role: role.roleArn, + }); + }); + + test('with a custom jobName should set it in CloudFormation', () => { + job = new glue.Job(stack, 'JobWithName', { + ...defaultProps, + jobName, + }); + + Template.fromStack(stack).hasResourceProperties('AWS::Glue::Job', { + Name: jobName, + }); + }); + }); + + describe('enabling continuous logging with defaults', () => { + beforeEach(() => { + job = new glue.Job(stack, 'Job', { + ...defaultProps, + continuousLogging: { enabled: true }, + }); + }); + + test('should set minimal default arguments', () => { + Template.fromStack(stack).hasResourceProperties('AWS::Glue::Job', { + DefaultArguments: { + '--enable-continuous-cloudwatch-log': 'true', + '--enable-continuous-log-filter': 'true', + }, + }); + }); + }); + + describe('enabling continuous logging with all props set', () => { + let logGroup; + + beforeEach(() => { + logGroup = logs.LogGroup.fromLogGroupName(stack, 'LogGroup', 'LogGroupName'); + job = new glue.Job(stack, 'Job', { + ...defaultProps, + continuousLogging: { + enabled: true, + quiet: false, + logStreamPrefix: 'LogStreamPrefix', + conversionPattern: '%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n', + logGroup, + }, + }); + }); + + test('should set all arguments', () => { + Template.fromStack(stack).hasResourceProperties('AWS::Glue::Job', { + DefaultArguments: { + '--enable-continuous-cloudwatch-log': 'true', + '--enable-continuous-log-filter': 'false', + '--continuous-log-logGroup': 'LogGroupName', + '--continuous-log-logStreamPrefix': 'LogStreamPrefix', + '--continuous-log-conversionPattern': '%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n', + }, + }); + }); + + test('should grant cloudwatch log write permissions', () => { + Template.fromStack(stack).hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: [ + { + Action: [ + 'logs:CreateLogStream', + 'logs:PutLogEvents', + ], + Effect: 'Allow', + Resource: { + 'Fn::Join': [ + '', + [ + 'arn:', + { + Ref: 'AWS::Partition', + }, + ':logs:', + { + Ref: 'AWS::Region', + }, + ':', + { + Ref: 'AWS::AccountId', + }, + ':log-group:LogGroupName:*', + ], + ], + }, + }, + codeBucketAccessStatement, + ], + }, + Roles: [ + { + Ref: 'JobServiceRole4F432993', + }, + ], + }); + }); + }); + + describe('enabling spark ui', () => { + describe('with no bucket or path provided', () => { + beforeEach(() => { + job = new glue.Job(stack, 'Job', { + ...defaultProps, + sparkUI: { enabled: true }, + }); + }); + + test('should create spark ui bucket', () => { + Template.fromStack(stack).resourceCountIs('AWS::S3::Bucket', 1); + }); + + test('should grant the role read/write permissions to the spark ui bucket', () => { + Template.fromStack(stack).hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: [ + { + Action: [ + 's3:GetObject*', + 's3:GetBucket*', + 's3:List*', + 's3:DeleteObject*', + 's3:PutObject*', + 's3:Abort*', + ], + Effect: 'Allow', + Resource: [ + { + 'Fn::GetAtt': [ + 'JobSparkUIBucket8E6A0139', + 'Arn', + ], + }, + { + 'Fn::Join': [ + '', + [ + { + 'Fn::GetAtt': [ + 'JobSparkUIBucket8E6A0139', + 'Arn', + ], + }, + '/*', + ], + ], + }, + ], + }, + codeBucketAccessStatement, + ], + Version: '2012-10-17', + }, + PolicyName: 'JobServiceRoleDefaultPolicy03F68F9D', + Roles: [ + { + Ref: 'JobServiceRole4F432993', + }, + ], + }); + }); + + test('should set spark arguments on the job', () => { + Template.fromStack(stack).hasResourceProperties('AWS::Glue::Job', { + DefaultArguments: { + '--enable-spark-ui': 'true', + '--spark-event-logs-path': { + 'Fn::Join': [ + '', + [ + 's3://', + { + Ref: 'JobSparkUIBucket8E6A0139', + }, + ], + ], + }, + }, + }); + }); + }); + + describe('with bucket provided', () => { + const sparkUIBucketName = 'sparkBucketName'; + let sparkUIBucket: s3.IBucket; + + beforeEach(() => { + sparkUIBucket = s3.Bucket.fromBucketName(stack, 'SparkBucketId', sparkUIBucketName); + job = new glue.Job(stack, 'Job', { + ...defaultProps, + sparkUI: { + enabled: true, + bucket: sparkUIBucket, + }, + }); + }); + + test('should grant the role read/write permissions to the provided spark ui bucket', () => { + Template.fromStack(stack).hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: [ + { + Action: [ + 's3:GetObject*', + 's3:GetBucket*', + 's3:List*', + 's3:DeleteObject*', + 's3:PutObject*', + 's3:Abort*', + ], + Effect: 'Allow', + Resource: [ + { + 'Fn::Join': [ + '', + [ + 'arn:', + { + Ref: 'AWS::Partition', + }, + ':s3:::sparkBucketName', + ], + ], + }, + { + 'Fn::Join': [ + '', + [ + 'arn:', + { + Ref: 'AWS::Partition', + }, + ':s3:::sparkBucketName/*', + ], + ], + }, + ], + }, + codeBucketAccessStatement, + ], + }, + Roles: [ + { + Ref: 'JobServiceRole4F432993', + }, + ], + }); + }); + + test('should set spark arguments on the job', () => { + Template.fromStack(stack).hasResourceProperties('AWS::Glue::Job', { + DefaultArguments: { + '--enable-spark-ui': 'true', + '--spark-event-logs-path': `s3://${sparkUIBucketName}`, + }, + }); + }); + }); + + describe('with bucket and path provided', () => { + const sparkUIBucketName = 'sparkBucketName'; + const prefix = 'some/path/'; + let sparkUIBucket: s3.IBucket; + + beforeEach(() => { + sparkUIBucket = s3.Bucket.fromBucketName(stack, 'BucketId', sparkUIBucketName); + job = new glue.Job(stack, 'Job', { + ...defaultProps, + sparkUI: { + enabled: true, + bucket: sparkUIBucket, + prefix, + }, + }); + }); + + test('should set spark arguments on the job', () => { + Template.fromStack(stack).hasResourceProperties('AWS::Glue::Job', { + DefaultArguments: { + '--enable-spark-ui': 'true', + '--spark-event-logs-path': `s3://${sparkUIBucketName}/${prefix}`, + }, + }); + }); + }); + }); + + describe('with extended props', () => { + beforeEach(() => { + job = new glue.Job(stack, 'Job', { + ...defaultProps, + jobName, + description: 'test job', + workerType: glue.WorkerType.G_2X, + workerCount: 10, + maxConcurrentRuns: 2, + maxRetries: 2, + timeout: cdk.Duration.minutes(5), + notifyDelayAfter: cdk.Duration.minutes(1), + defaultArguments: { + arg1: 'value1', + arg2: 'value2', + }, + connections: [glue.Connection.fromConnectionName(stack, 'ImportedConnection', 'ConnectionName')], + securityConfiguration: glue.SecurityConfiguration.fromSecurityConfigurationName(stack, 'ImportedSecurityConfiguration', 'SecurityConfigurationName'), + enableProfilingMetrics: true, + tags: { + key: 'value', + }, + }); + }); + + test('should synthesize correctly', () => { + Template.fromStack(stack).hasResourceProperties('AWS::Glue::Job', { + Command: { + Name: 'glueetl', + ScriptLocation: 's3://bucketName/script', + }, + Role: { + 'Fn::GetAtt': [ + 'JobServiceRole4F432993', + 'Arn', + ], + }, + DefaultArguments: { + '--job-language': 'scala', + '--class': 'com.amazon.test.ClassName', + '--enable-metrics': '', + 'arg1': 'value1', + 'arg2': 'value2', + }, + Description: 'test job', + ExecutionProperty: { + MaxConcurrentRuns: 2, + }, + GlueVersion: '2.0', + MaxRetries: 2, + Name: 'test-job', + NotificationProperty: { + NotifyDelayAfter: 1, + }, + NumberOfWorkers: 10, + Tags: { + key: 'value', + }, + Timeout: 5, + WorkerType: 'G.2X', + Connections: { + Connections: [ + 'ConnectionName', + ], + }, + SecurityConfiguration: 'SecurityConfigurationName', + }); + }); + }); + + test('with reserved args should throw', () => { + ['--conf', '--debug', '--mode', '--JOB_NAME'].forEach((arg, index) => { + const defaultArguments: {[key: string]: string} = {}; + defaultArguments[arg] = 'random value'; + + expect(() => new glue.Job(stack, `Job${index}`, { + executable: glue.JobExecutable.scalaEtl({ + glueVersion: glue.GlueVersion.V2_0, + className, + script, + }), + defaultArguments, + })).toThrow(/argument is reserved by Glue/); + }); + }); + + describe('shell job', () => { + test('with unsupported glue version should throw', () => { + expect(() => new glue.Job(stack, 'Job', { + executable: glue.JobExecutable.pythonShell({ + glueVersion: glue.GlueVersion.V0_9, + pythonVersion: glue.PythonVersion.TWO, + script, + }), + })).toThrow('Specified GlueVersion 0.9 does not support Python Shell'); + }); + + test('with unsupported Spark UI prop should throw', () => { + expect(() => new glue.Job(stack, 'Job', { + executable: glue.JobExecutable.pythonShell({ + glueVersion: glue.GlueVersion.V1_0, + pythonVersion: glue.PythonVersion.THREE, + script, + }), + sparkUI: { enabled: true }, + })).toThrow('Spark UI is not available for JobType.PYTHON_SHELL'); + }); + }); + + + test('etl job with all props should synthesize correctly', () => { + new glue.Job(stack, 'Job', { + executable: glue.JobExecutable.pythonEtl({ + glueVersion: glue.GlueVersion.V2_0, + pythonVersion: glue.PythonVersion.THREE, + extraJarsFirst: true, + script, + extraPythonFiles, + extraJars, + extraFiles, + }), + }); + + Template.fromStack(stack).hasResourceProperties('AWS::Glue::Job', { + GlueVersion: '2.0', + Command: { + Name: 'glueetl', + ScriptLocation: 's3://bucketName/script', + PythonVersion: '3', + }, + Role: { + 'Fn::GetAtt': [ + 'JobServiceRole4F432993', + 'Arn', + ], + }, + DefaultArguments: { + '--job-language': 'python', + '--extra-jars': 's3://bucketName/file1.jar,s3://bucketName/file2.jar', + '--extra-py-files': 's3://bucketName/file1.py,s3://bucketName/file2.py', + '--extra-files': 's3://bucketName/file1.txt,s3://bucketName/file2.txt', + '--user-jars-first': 'true', + }, + }); + }); + + test('streaming job with all props should synthesize correctly', () => { + new glue.Job(stack, 'Job', { + executable: glue.JobExecutable.scalaStreaming({ + glueVersion: glue.GlueVersion.V2_0, + extraJarsFirst: true, + className, + script, + extraJars, + extraFiles, + }), + }); + + Template.fromStack(stack).hasResourceProperties('AWS::Glue::Job', { + GlueVersion: '2.0', + Command: { + Name: 'gluestreaming', + ScriptLocation: 's3://bucketName/script', + }, + Role: { + 'Fn::GetAtt': [ + 'JobServiceRole4F432993', + 'Arn', + ], + }, + DefaultArguments: { + '--job-language': 'scala', + '--class': 'com.amazon.test.ClassName', + '--extra-jars': 's3://bucketName/file1.jar,s3://bucketName/file2.jar', + '--extra-files': 's3://bucketName/file1.txt,s3://bucketName/file2.txt', + '--user-jars-first': 'true', + }, + }); + }); + + describe('event rules and rule-based metrics', () => { + beforeEach(() => { + job = new glue.Job(stack, 'Job', { + executable: glue.JobExecutable.scalaEtl({ + glueVersion: glue.GlueVersion.V2_0, + className, + script, + }), + }); + }); + + test('.onEvent() should create the expected event rule', () => { + job.onEvent('eventId', {}); + + Template.fromStack(stack).hasResourceProperties('AWS::Events::Rule', { + EventPattern: { + 'source': [ + 'aws.glue', + ], + 'detail-type': [ + 'Glue Job State Change', + 'Glue Job Run Status', + ], + 'detail': { + jobName: [ + { + Ref: 'JobB9D00F9F', + }, + ], + }, + }, + State: 'ENABLED', + }); + }); + + [ + { name: 'onSuccess()', invoke: (testJob: glue.Job) => testJob.onSuccess('SuccessRule'), state: 'SUCCEEDED' }, + { name: 'onFailure()', invoke: (testJob: glue.Job) => testJob.onFailure('FailureRule'), state: 'FAILED' }, + { name: 'onTimeout()', invoke: (testJob: glue.Job) => testJob.onTimeout('TimeoutRule'), state: 'TIMEOUT' }, + ].forEach((testCase) => { + test(`${testCase.name} should create a rule with correct properties`, () => { + testCase.invoke(job); + + Template.fromStack(stack).hasResourceProperties('AWS::Events::Rule', { + Description: { + 'Fn::Join': [ + '', + [ + 'Rule triggered when Glue job ', + { + Ref: 'JobB9D00F9F', + }, + ` is in ${testCase.state} state`, + ], + ], + }, + EventPattern: { + 'source': [ + 'aws.glue', + ], + 'detail-type': [ + 'Glue Job State Change', + 'Glue Job Run Status', + ], + 'detail': { + state: [ + testCase.state, + ], + jobName: [ + { + Ref: 'JobB9D00F9F', + }, + ], + }, + }, + State: 'ENABLED', + }); + }); + }); + + [ + { name: '.metricSuccess()', invoke: (testJob: glue.Job) => testJob.metricSuccess(), state: 'SUCCEEDED', ruleId: 'SuccessMetricRule' }, + { name: '.metricFailure()', invoke: (testJob: glue.Job) => testJob.metricFailure(), state: 'FAILED', ruleId: 'FailureMetricRule' }, + { name: '.metricTimeout()', invoke: (testJob: glue.Job) => testJob.metricTimeout(), state: 'TIMEOUT', ruleId: 'TimeoutMetricRule' }, + ].forEach((testCase) => { + test(`${testCase.name} should create the expected singleton event rule and corresponding metric`, () => { + const metric = testCase.invoke(job); + testCase.invoke(job); + + expect(metric).toEqual(new cloudwatch.Metric({ + dimensions: { + RuleName: (job.node.findChild(testCase.ruleId) as events.Rule).ruleName, + }, + metricName: 'TriggeredRules', + namespace: 'AWS/Events', + statistic: 'Sum', + })); + + Template.fromStack(stack).resourceCountIs('AWS::Events::Rule', 1); + Template.fromStack(stack).hasResourceProperties('AWS::Events::Rule', { + Description: { + 'Fn::Join': [ + '', + [ + 'Rule triggered when Glue job ', + { + Ref: 'JobB9D00F9F', + }, + ` is in ${testCase.state} state`, + ], + ], + }, + EventPattern: { + 'source': [ + 'aws.glue', + ], + 'detail-type': [ + 'Glue Job State Change', + 'Glue Job Run Status', + ], + 'detail': { + state: [ + testCase.state, + ], + jobName: [ + { + Ref: 'JobB9D00F9F', + }, + ], + }, + }, + State: 'ENABLED', + }); + }); + }); + }); + + describe('.metric()', () => { + + test('with MetricType.COUNT should create a count sum metric', () => { + const metricName = 'glue.driver.aggregate.bytesRead'; + const props = { statistic: cloudwatch.Statistic.SUM }; + + expect(job.metric(metricName, glue.MetricType.COUNT, props)).toEqual(new cloudwatch.Metric({ + metricName, + statistic: 'Sum', + namespace: 'Glue', + dimensions: { + JobName: job.jobName, + JobRunId: 'ALL', + Type: 'count', + }, + })); + }); + + test('with MetricType.GAUGE should create a gauge average metric', () => { + const metricName = 'glue.driver.BlockManager.disk.diskSpaceUsed_MB'; + const props = { statistic: cloudwatch.Statistic.AVERAGE }; + + expect(job.metric(metricName, glue.MetricType.GAUGE, props)).toEqual(new cloudwatch.Metric({ + metricName, + statistic: 'Average', + namespace: 'Glue', + dimensions: { + JobName: job.jobName, + JobRunId: 'ALL', + Type: 'gauge', + }, + })); + }); + }); + }); +}); From 1e247d89adbc09ff79b87753fcd78b238a6752e8 Mon Sep 17 00:00:00 2001 From: Niranjan Jayakar Date: Thu, 9 Sep 2021 10:33:12 +0100 Subject: [PATCH 23/41] fix(apigatewayv2): ApiMapping does not depend on DomainName (#16201) When an ApiMapping resource is deployed using the Domain defined in the DomainName resource, the DomainName resource must be deployed before the ApiMapping resource. Since the current logic uses the CloudFormation Output of DomainName as a fall back, preferring the user provided string first, this dependency is not expressed in the resulting template. Remove the preference for the user provided string, will inform synthesis that the dependency must be declared. fixes #15464 ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- .../@aws-cdk/aws-apigatewayv2/lib/common/domain-name.ts | 2 +- .../aws-apigatewayv2/test/common/api-mapping.test.ts | 2 -- .../aws-apigatewayv2/test/http/domain-name.test.ts | 8 ++++++-- .../@aws-cdk/aws-apigatewayv2/test/http/stage.test.ts | 6 +++++- 4 files changed, 12 insertions(+), 6 deletions(-) diff --git a/packages/@aws-cdk/aws-apigatewayv2/lib/common/domain-name.ts b/packages/@aws-cdk/aws-apigatewayv2/lib/common/domain-name.ts index dca1a60bd4548..6b1123512c678 100644 --- a/packages/@aws-cdk/aws-apigatewayv2/lib/common/domain-name.ts +++ b/packages/@aws-cdk/aws-apigatewayv2/lib/common/domain-name.ts @@ -98,7 +98,7 @@ export class DomainName extends Resource implements IDomainName { ], }; const resource = new CfnDomainName(this, 'Resource', domainNameProps); - this.name = props.domainName ?? resource.ref; + this.name = resource.ref; this.regionalDomainName = Token.asString(resource.getAtt('RegionalDomainName')); this.regionalHostedZoneId = Token.asString(resource.getAtt('RegionalHostedZoneId')); } diff --git a/packages/@aws-cdk/aws-apigatewayv2/test/common/api-mapping.test.ts b/packages/@aws-cdk/aws-apigatewayv2/test/common/api-mapping.test.ts index ff53d3dad11fc..855ad32dda137 100644 --- a/packages/@aws-cdk/aws-apigatewayv2/test/common/api-mapping.test.ts +++ b/packages/@aws-cdk/aws-apigatewayv2/test/common/api-mapping.test.ts @@ -26,7 +26,6 @@ describe('ApiMapping', () => { ApiId: { Ref: 'ApiF70053CD', }, - DomainName: 'example.com', Stage: '$default', }); }); @@ -58,7 +57,6 @@ describe('ApiMapping', () => { ApiId: { Ref: 'ApiF70053CD', }, - DomainName: 'example.com', Stage: 'beta', ApiMappingKey: 'beta', }); diff --git a/packages/@aws-cdk/aws-apigatewayv2/test/http/domain-name.test.ts b/packages/@aws-cdk/aws-apigatewayv2/test/http/domain-name.test.ts index 30c981a1da1d5..2d0d856c7ae15 100644 --- a/packages/@aws-cdk/aws-apigatewayv2/test/http/domain-name.test.ts +++ b/packages/@aws-cdk/aws-apigatewayv2/test/http/domain-name.test.ts @@ -102,7 +102,9 @@ describe('DomainName', () => { ApiId: { Ref: 'ApiF70053CD', }, - DomainName: 'example.com', + DomainName: { + Ref: 'DNFDC76583', + }, Stage: 'beta', ApiMappingKey: 'beta', }); @@ -139,7 +141,9 @@ describe('DomainName', () => { ApiId: { Ref: 'ApiF70053CD', }, - DomainName: 'example.com', + DomainName: { + Ref: 'DNFDC76583', + }, Stage: '$default', }); }); diff --git a/packages/@aws-cdk/aws-apigatewayv2/test/http/stage.test.ts b/packages/@aws-cdk/aws-apigatewayv2/test/http/stage.test.ts index e7a05924719d3..b617fe4613a51 100644 --- a/packages/@aws-cdk/aws-apigatewayv2/test/http/stage.test.ts +++ b/packages/@aws-cdk/aws-apigatewayv2/test/http/stage.test.ts @@ -140,7 +140,11 @@ describe('HttpStage with domain mapping', () => { }, }); - expect(stage.domainUrl).toBe(`https://${domainName}/`); + expect(stack.resolve(stage.domainUrl)).toEqual({ + 'Fn::Join': ['', [ + 'https://', { Ref: 'DNFDC76583' }, '/', + ]], + }); }); test('domainUrl throws error if domainMapping is not configured', () => { From f3fc1d58883df35e662d4f6390b1f439045ee52a Mon Sep 17 00:00:00 2001 From: Niranjan Jayakar Date: Thu, 9 Sep 2021 12:42:43 +0100 Subject: [PATCH 24/41] chore: add breaking changes entry to 1.122.0 release (#16430) This was missed by the auto bump script since the pull request - https://github.com/aws/aws-cdk/pull/16329 - was merged without squash. ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- CHANGELOG.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ca9ef129cbefa..2f039368437a3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,12 @@ All notable changes to this project will be documented in this file. See [standa ## [1.122.0](https://github.com/aws/aws-cdk/compare/v1.121.0...v1.122.0) (2021-09-08) +### ⚠ BREAKING CHANGES TO EXPERIMENTAL FEATURES + +* **assertions:** `hasOutput(props: any)` becomes `hasOutput(logicalId: string, props: any)` +* **assertions:** `findOutputs(props: any = {})` becomes `findOutputs(logicalId: string, props: any = {})` +* **assertions:** `hasMapping(props: any)` becomes `hasMapping(logicalId: string, props: any)` +* **assertions:** `findMappings(props: any = {})` becomes `findMappings(logicalId: string, props: any = {})` ### Features @@ -24,6 +30,7 @@ All notable changes to this project will be documented in this file. See [standa ### Bug Fixes * **apigatewayv2:** some methods of the `defaultStage` are not available without casting it to `IHttpStage` ([#15607](https://github.com/aws/aws-cdk/issues/15607)) ([27a0113](https://github.com/aws/aws-cdk/commit/27a0113ac68a05360faa22fa8897609f2f90b764)) +* **assertions:** output and mapping assertions do not accept logical id ([#16329](https://github.com/aws/aws-cdk/issues/16329)), closes [#16242](https://github.com/aws/aws-cdk/issues/16242) * **assets:** run executable command of container assets in cloud assembly root directory ([#16094](https://github.com/aws/aws-cdk/issues/16094)) ([c2852c9](https://github.com/aws/aws-cdk/commit/c2852c9c524a639a312bf296f7f23b0e3b112f6b)), closes [#15721](https://github.com/aws/aws-cdk/issues/15721) * **autoscaling:** EbsDeviceVolumeType.IO2 is not a valid CloudFormation value ([#16028](https://github.com/aws/aws-cdk/issues/16028)) ([492d33b](https://github.com/aws/aws-cdk/commit/492d33b27bc5b935e3da75f0bddd875bb6f9c15d)), closes [#16027](https://github.com/aws/aws-cdk/issues/16027) * **cli:** 'deploy' and 'diff' silently does nothing when given unknown stack name ([#16150](https://github.com/aws/aws-cdk/issues/16150)) ([74776f3](https://github.com/aws/aws-cdk/commit/74776f393462f7e7d23cb1953ef786a823adc896)), closes [#15866](https://github.com/aws/aws-cdk/issues/15866) From 430f50a546e9c575f8cdbd259367e440d985e68f Mon Sep 17 00:00:00 2001 From: Rico Huijbers Date: Thu, 9 Sep 2021 14:59:21 +0200 Subject: [PATCH 25/41] feat(ec2/ecs): `cacheInContext` properties for machine images (#16021) Most `MachineImage` implementations look up AMIs from SSM Parameters, and by default they will all look up the Parameters on each deployment. This leads to instance replacement. Since we already know the SSM Parameter Name and CDK already has a cached SSM context lookup, it should be simple to get a stable AMI ID. This is not ideal because the AMI will grow outdated over time, but users should have the option to pick non-updating images in a convenient way. Fixes #12484. ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- .../@aws-cdk/aws-ec2/lib/machine-image.ts | 154 +++++++- .../aws-ec2/test/machine-image.test.ts | 19 + packages/@aws-cdk/aws-ecs/README.md | 17 + packages/@aws-cdk/aws-ecs/lib/amis.ts | 373 ++++++++++++++++++ packages/@aws-cdk/aws-ecs/lib/cluster.ts | 295 ++------------ packages/@aws-cdk/aws-ecs/lib/index.ts | 1 + .../@aws-cdk/aws-ecs/test/cluster.test.ts | 2 +- .../test/ec2/integ.bottlerocket.expected.json | 8 +- .../integ.firelens-s3-config.expected.json | 12 +- .../integ.graviton-bottlerocket.expected.json | 4 +- .../integ.firelens-cloudwatch.expected.json | 12 +- 11 files changed, 600 insertions(+), 297 deletions(-) create mode 100644 packages/@aws-cdk/aws-ecs/lib/amis.ts diff --git a/packages/@aws-cdk/aws-ec2/lib/machine-image.ts b/packages/@aws-cdk/aws-ec2/lib/machine-image.ts index df4a1eece07e0..c7340af1d47e5 100644 --- a/packages/@aws-cdk/aws-ec2/lib/machine-image.ts +++ b/packages/@aws-cdk/aws-ec2/lib/machine-image.ts @@ -80,11 +80,27 @@ export abstract class MachineImage { * @param parameterName The name of SSM parameter containing the AMi id * @param os The operating system type of the AMI * @param userData optional user data for the given image + * @deprecated Use `MachineImage.fromSsmParameter()` instead */ public static fromSSMParameter(parameterName: string, os: OperatingSystemType, userData?: UserData): IMachineImage { return new GenericSSMParameterImage(parameterName, os, userData); } + /** + * An image specified in SSM parameter store + * + * By default, the SSM parameter is refreshed at every deployment, + * causing your instances to be replaced whenever a new version of the AMI + * is released. + * + * Pass `{ cachedInContext: true }` to keep the AMI ID stable. If you do, you + * will have to remember to periodically invalidate the context to refresh + * to the newest AMI ID. + */ + public static fromSsmParameter(parameterName: string, options?: SsmParameterImageOptions): IMachineImage { + return new GenericSsmParameterImage(parameterName, options); + } + /** * Look up a shared Machine Image using DescribeImages * @@ -96,6 +112,8 @@ export abstract class MachineImage { * will be used on future runs. To refresh the AMI lookup, you will have to * evict the value from the cache using the `cdk context` command. See * https://docs.aws.amazon.com/cdk/latest/guide/context.html for more information. + * + * This function can not be used in environment-agnostic stacks. */ public static lookup(props: LookupMachineImageProps): IMachineImage { return new LookupMachineImage(props); @@ -131,10 +149,17 @@ export interface MachineImageConfig { * on the instance if you are using this image. * * The AMI ID is selected using the values published to the SSM parameter store. + * + * @deprecated Use `MachineImage.fromSsmParameter()` instead */ export class GenericSSMParameterImage implements IMachineImage { + /** + * Name of the SSM parameter we're looking up + */ + public readonly parameterName: string; - constructor(private readonly parameterName: string, private readonly os: OperatingSystemType, private readonly userData?: UserData) { + constructor(parameterName: string, private readonly os: OperatingSystemType, private readonly userData?: UserData) { + this.parameterName = parameterName; } /** @@ -150,6 +175,75 @@ export class GenericSSMParameterImage implements IMachineImage { } } +/** + * Properties for GenericSsmParameterImage + */ +export interface SsmParameterImageOptions { + /** + * Operating system + * + * @default OperatingSystemType.LINUX + */ + readonly os?: OperatingSystemType; + + /** + * Custom UserData + * + * @default - UserData appropriate for the OS + */ + readonly userData?: UserData; + + /** + * Whether the AMI ID is cached to be stable between deployments + * + * By default, the newest image is used on each deployment. This will cause + * instances to be replaced whenever a new version is released, and may cause + * downtime if there aren't enough running instances in the AutoScalingGroup + * to reschedule the tasks on. + * + * If set to true, the AMI ID will be cached in `cdk.context.json` and the + * same value will be used on future runs. Your instances will not be replaced + * but your AMI version will grow old over time. To refresh the AMI lookup, + * you will have to evict the value from the cache using the `cdk context` + * command. See https://docs.aws.amazon.com/cdk/latest/guide/context.html for + * more information. + * + * Can not be set to `true` in environment-agnostic stacks. + * + * @default false + */ + readonly cachedInContext?: boolean; +} + +/** + * Select the image based on a given SSM parameter + * + * This Machine Image automatically updates to the latest version on every + * deployment. Be aware this will cause your instances to be replaced when a + * new version of the image becomes available. Do not store stateful information + * on the instance if you are using this image. + * + * The AMI ID is selected using the values published to the SSM parameter store. + */ +class GenericSsmParameterImage implements IMachineImage { + constructor(private readonly parameterName: string, private readonly props: SsmParameterImageOptions = {}) { + } + + /** + * Return the image to use in the given context + */ + public getImage(scope: Construct): MachineImageConfig { + const imageId = lookupImage(scope, this.props.cachedInContext, this.parameterName); + + const osType = this.props.os ?? OperatingSystemType.LINUX; + return { + imageId, + osType, + userData: this.props.userData ?? (osType === OperatingSystemType.WINDOWS ? UserData.forWindows() : UserData.forLinux()), + }; + } +} + /** * Configuration options for WindowsImage */ @@ -240,6 +334,27 @@ export interface AmazonLinuxImageProps { * @default X86_64 */ readonly cpuType?: AmazonLinuxCpuType; + + /** + * Whether the AMI ID is cached to be stable between deployments + * + * By default, the newest image is used on each deployment. This will cause + * instances to be replaced whenever a new version is released, and may cause + * downtime if there aren't enough running instances in the AutoScalingGroup + * to reschedule the tasks on. + * + * If set to true, the AMI ID will be cached in `cdk.context.json` and the + * same value will be used on future runs. Your instances will not be replaced + * but your AMI version will grow old over time. To refresh the AMI lookup, + * you will have to evict the value from the cache using the `cdk context` + * command. See https://docs.aws.amazon.com/cdk/latest/guide/context.html for + * more information. + * + * Can not be set to `true` in environment-agnostic stacks. + * + * @default false + */ + readonly cachedInContext?: boolean; } /** @@ -253,8 +368,10 @@ export interface AmazonLinuxImageProps { * The AMI ID is selected using the values published to the SSM parameter store. */ export class AmazonLinuxImage extends GenericSSMParameterImage { - - constructor(props: AmazonLinuxImageProps = {}) { + /** + * Return the SSM parameter name that will contain the Amazon Linux image with the given attributes + */ + public static ssmParameterName(props: AmazonLinuxImageProps = {}) { const generation = (props && props.generation) || AmazonLinuxGeneration.AMAZON_LINUX; const edition = (props && props.edition) || AmazonLinuxEdition.STANDARD; const virtualization = (props && props.virtualization) || AmazonLinuxVirt.HVM; @@ -269,8 +386,29 @@ export class AmazonLinuxImage extends GenericSSMParameterImage { storage, ].filter(x => x !== undefined); // Get rid of undefineds - const parameterName = '/aws/service/ami-amazon-linux-latest/' + parts.join('-'); - super(parameterName, OperatingSystemType.LINUX, props.userData); + return '/aws/service/ami-amazon-linux-latest/' + parts.join('-'); + } + + private readonly cachedInContext: boolean; + + constructor(private readonly props: AmazonLinuxImageProps = {}) { + super(AmazonLinuxImage.ssmParameterName(props), OperatingSystemType.LINUX, props.userData); + + this.cachedInContext = props.cachedInContext ?? false; + } + + /** + * Return the image to use in the given context + */ + public getImage(scope: Construct): MachineImageConfig { + const imageId = lookupImage(scope, this.cachedInContext, this.parameterName); + + const osType = OperatingSystemType.LINUX; + return { + imageId, + osType, + userData: this.props.userData ?? UserData.forLinux(), + }; } } @@ -536,3 +674,9 @@ export interface LookupMachineImageProps { */ readonly userData?: UserData; } + +function lookupImage(scope: Construct, cachedInContext: boolean | undefined, parameterName: string) { + return cachedInContext + ? ssm.StringParameter.valueFromLookup(scope, parameterName) + : ssm.StringParameter.valueForTypedStringParameter(scope, parameterName, ssm.ParameterType.AWS_EC2_IMAGE_ID); +} \ No newline at end of file diff --git a/packages/@aws-cdk/aws-ec2/test/machine-image.test.ts b/packages/@aws-cdk/aws-ec2/test/machine-image.test.ts index 6a6a6f3c7332f..a25acdfbb4cfa 100644 --- a/packages/@aws-cdk/aws-ec2/test/machine-image.test.ts +++ b/packages/@aws-cdk/aws-ec2/test/machine-image.test.ts @@ -158,6 +158,25 @@ test('LookupMachineImage creates correct type of UserData', () => { expect(isLinuxUserData(linuxDetails.userData)).toBeTruthy(); }); +test('cached lookups of Amazon Linux', () => { + // WHEN + const ami = ec2.MachineImage.latestAmazonLinux({ cachedInContext: true }).getImage(stack).imageId; + + // THEN + expect(ami).toEqual('dummy-value-for-/aws/service/ami-amazon-linux-latest/amzn-ami-hvm-x86_64-gp2'); + expect(app.synth().manifest.missing).toEqual([ + { + key: 'ssm:account=1234:parameterName=/aws/service/ami-amazon-linux-latest/amzn-ami-hvm-x86_64-gp2:region=testregion', + props: { + account: '1234', + region: 'testregion', + parameterName: '/aws/service/ami-amazon-linux-latest/amzn-ami-hvm-x86_64-gp2', + }, + provider: 'ssm', + }, + ]); +}); + function isWindowsUserData(ud: ec2.UserData) { return ud.render().indexOf('powershell') > -1; } diff --git a/packages/@aws-cdk/aws-ecs/README.md b/packages/@aws-cdk/aws-ecs/README.md index 5454d93e61095..8ffbde1d9a0e8 100644 --- a/packages/@aws-cdk/aws-ecs/README.md +++ b/packages/@aws-cdk/aws-ecs/README.md @@ -131,6 +131,23 @@ cluster.addAutoScalingGroup(autoScalingGroup); If you omit the property `vpc`, the construct will create a new VPC with two AZs. +By default, all machine images will auto-update to the latest version +on each deployment, causing a replacement of the instances in your AutoScalingGroup +if the AMI has been updated since the last deployment. + +If task draining is enabled, ECS will transparently reschedule tasks on to the new +instances before terminating your old instances. If you have disabled task draining, +the tasks will be terminated along with the instance. To prevent that, you +can pick a non-updating AMI by passing `cacheInContext: true`, but be sure +to periodically update to the latest AMI manually by using the [CDK CLI +context management commands](https://docs.aws.amazon.com/cdk/latest/guide/context.html): + +```ts +const autoScalingGroup = new autoscaling.AutoScalingGroup(this, 'ASG', { + // ... + machineImage: EcsOptimizedImage.amazonLinux({ cacheInContext: true }), +}); +``` ### Bottlerocket diff --git a/packages/@aws-cdk/aws-ecs/lib/amis.ts b/packages/@aws-cdk/aws-ecs/lib/amis.ts new file mode 100644 index 0000000000000..f7e4139c10c3a --- /dev/null +++ b/packages/@aws-cdk/aws-ecs/lib/amis.ts @@ -0,0 +1,373 @@ +import * as ec2 from '@aws-cdk/aws-ec2'; +import * as ssm from '@aws-cdk/aws-ssm'; + +// v2 - keep this import as a separate section to reduce merge conflict when forward merging with the v2 branch. +// eslint-disable-next-line +import { Construct as CoreConstruct } from '@aws-cdk/core'; + +/** + * The ECS-optimized AMI variant to use. For more information, see + * [Amazon ECS-optimized AMIs](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html). + */ +export enum AmiHardwareType { + + /** + * Use the standard Amazon ECS-optimized AMI. + */ + STANDARD = 'Standard', + + /** + * Use the Amazon ECS GPU-optimized AMI. + */ + GPU = 'GPU', + + /** + * Use the Amazon ECS-optimized Amazon Linux 2 (arm64) AMI. + */ + ARM = 'ARM64', +} + + +/** + * ECS-optimized Windows version list + */ +export enum WindowsOptimizedVersion { + SERVER_2019 = '2019', + SERVER_2016 = '2016', +} + +/* + * TODO:v2.0.0 + * * remove `export` keyword + * * remove @deprecated + */ +/** + * The properties that define which ECS-optimized AMI is used. + * + * @deprecated see {@link EcsOptimizedImage} + */ +export interface EcsOptimizedAmiProps { + /** + * The Amazon Linux generation to use. + * + * @default AmazonLinuxGeneration.AmazonLinux2 + */ + readonly generation?: ec2.AmazonLinuxGeneration; + + /** + * The Windows Server version to use. + * + * @default none, uses Linux generation + */ + readonly windowsVersion?: WindowsOptimizedVersion; + + /** + * The ECS-optimized AMI variant to use. + * + * @default AmiHardwareType.Standard + */ + readonly hardwareType?: AmiHardwareType; + + /** + * Whether the AMI ID is cached to be stable between deployments + * + * By default, the newest image is used on each deployment. This will cause + * instances to be replaced whenever a new version is released, and may cause + * downtime if there aren't enough running instances in the AutoScalingGroup + * to reschedule the tasks on. + * + * If set to true, the AMI ID will be cached in `cdk.context.json` and the + * same value will be used on future runs. Your instances will not be replaced + * but your AMI version will grow old over time. To refresh the AMI lookup, + * you will have to evict the value from the cache using the `cdk context` + * command. See https://docs.aws.amazon.com/cdk/latest/guide/context.html for + * more information. + * + * Can not be set to `true` in environment-agnostic stacks. + * + * @default false + */ + readonly cachedInContext?: boolean; +} + +/* + * TODO:v2.0.0 remove EcsOptimizedAmi + */ +/** + * Construct a Linux or Windows machine image from the latest ECS Optimized AMI published in SSM + * + * @deprecated see {@link EcsOptimizedImage#amazonLinux}, {@link EcsOptimizedImage#amazonLinux} and {@link EcsOptimizedImage#windows} + */ +export class EcsOptimizedAmi implements ec2.IMachineImage { + private readonly generation?: ec2.AmazonLinuxGeneration; + private readonly windowsVersion?: WindowsOptimizedVersion; + private readonly hwType: AmiHardwareType; + + private readonly amiParameterName: string; + private readonly cachedInContext: boolean; + + /** + * Constructs a new instance of the EcsOptimizedAmi class. + */ + constructor(props?: EcsOptimizedAmiProps) { + this.hwType = (props && props.hardwareType) || AmiHardwareType.STANDARD; + if (props && props.generation) { // generation defined in the props object + if (props.generation === ec2.AmazonLinuxGeneration.AMAZON_LINUX && this.hwType !== AmiHardwareType.STANDARD) { + throw new Error('Amazon Linux does not support special hardware type. Use Amazon Linux 2 instead'); + } else if (props.windowsVersion) { + throw new Error('"windowsVersion" and Linux image "generation" cannot be both set'); + } else { + this.generation = props.generation; + } + } else if (props && props.windowsVersion) { + if (this.hwType !== AmiHardwareType.STANDARD) { + throw new Error('Windows Server does not support special hardware type'); + } else { + this.windowsVersion = props.windowsVersion; + } + } else { // generation not defined in props object + // always default to Amazon Linux v2 regardless of HW + this.generation = ec2.AmazonLinuxGeneration.AMAZON_LINUX_2; + } + + // set the SSM parameter name + this.amiParameterName = '/aws/service/ecs/optimized-ami/' + + (this.generation === ec2.AmazonLinuxGeneration.AMAZON_LINUX ? 'amazon-linux/' : '') + + (this.generation === ec2.AmazonLinuxGeneration.AMAZON_LINUX_2 ? 'amazon-linux-2/' : '') + + (this.windowsVersion ? `windows_server/${this.windowsVersion}/english/full/` : '') + + (this.hwType === AmiHardwareType.GPU ? 'gpu/' : '') + + (this.hwType === AmiHardwareType.ARM ? 'arm64/' : '') + + 'recommended/image_id'; + + this.cachedInContext = props?.cachedInContext ?? false; + } + + /** + * Return the correct image + */ + public getImage(scope: CoreConstruct): ec2.MachineImageConfig { + const ami = lookupImage(scope, this.cachedInContext, this.amiParameterName); + + const osType = this.windowsVersion ? ec2.OperatingSystemType.WINDOWS : ec2.OperatingSystemType.LINUX; + return { + imageId: ami, + osType, + userData: ec2.UserData.forOperatingSystem(osType), + }; + } +} + +/** + * Additional configuration properties for EcsOptimizedImage factory functions + */ +export interface EcsOptimizedImageOptions { + /** + * Whether the AMI ID is cached to be stable between deployments + * + * By default, the newest image is used on each deployment. This will cause + * instances to be replaced whenever a new version is released, and may cause + * downtime if there aren't enough running instances in the AutoScalingGroup + * to reschedule the tasks on. + * + * If set to true, the AMI ID will be cached in `cdk.context.json` and the + * same value will be used on future runs. Your instances will not be replaced + * but your AMI version will grow old over time. To refresh the AMI lookup, + * you will have to evict the value from the cache using the `cdk context` + * command. See https://docs.aws.amazon.com/cdk/latest/guide/context.html for + * more information. + * + * Can not be set to `true` in environment-agnostic stacks. + * + * @default false + */ + readonly cachedInContext?: boolean; +} + +/** + * Construct a Linux or Windows machine image from the latest ECS Optimized AMI published in SSM + */ +export class EcsOptimizedImage implements ec2.IMachineImage { + /** + * Construct an Amazon Linux 2 image from the latest ECS Optimized AMI published in SSM + * + * @param hardwareType ECS-optimized AMI variant to use + */ + public static amazonLinux2(hardwareType = AmiHardwareType.STANDARD, options: EcsOptimizedImageOptions = {}): EcsOptimizedImage { + return new EcsOptimizedImage({ + generation: ec2.AmazonLinuxGeneration.AMAZON_LINUX_2, + hardwareType, + cachedInContext: options.cachedInContext, + }); + } + + /** + * Construct an Amazon Linux AMI image from the latest ECS Optimized AMI published in SSM + */ + public static amazonLinux(options: EcsOptimizedImageOptions = {}): EcsOptimizedImage { + return new EcsOptimizedImage({ + generation: ec2.AmazonLinuxGeneration.AMAZON_LINUX, + cachedInContext: options.cachedInContext, + }); + } + + /** + * Construct a Windows image from the latest ECS Optimized AMI published in SSM + * + * @param windowsVersion Windows Version to use + */ + public static windows(windowsVersion: WindowsOptimizedVersion, options: EcsOptimizedImageOptions = {}): EcsOptimizedImage { + return new EcsOptimizedImage({ + windowsVersion, + cachedInContext: options.cachedInContext, + }); + } + + private readonly generation?: ec2.AmazonLinuxGeneration; + private readonly windowsVersion?: WindowsOptimizedVersion; + private readonly hwType?: AmiHardwareType; + + private readonly amiParameterName: string; + private readonly cachedInContext: boolean; + + /** + * Constructs a new instance of the EcsOptimizedAmi class. + */ + private constructor(props: EcsOptimizedAmiProps) { + this.hwType = props && props.hardwareType; + + if (props.windowsVersion) { + this.windowsVersion = props.windowsVersion; + } else if (props.generation) { + this.generation = props.generation; + } else { + throw new Error('This error should never be thrown'); + } + + // set the SSM parameter name + this.amiParameterName = '/aws/service/ecs/optimized-ami/' + + (this.generation === ec2.AmazonLinuxGeneration.AMAZON_LINUX ? 'amazon-linux/' : '') + + (this.generation === ec2.AmazonLinuxGeneration.AMAZON_LINUX_2 ? 'amazon-linux-2/' : '') + + (this.windowsVersion ? `windows_server/${this.windowsVersion}/english/full/` : '') + + (this.hwType === AmiHardwareType.GPU ? 'gpu/' : '') + + (this.hwType === AmiHardwareType.ARM ? 'arm64/' : '') + + 'recommended/image_id'; + + this.cachedInContext = props?.cachedInContext ?? false; + } + + /** + * Return the correct image + */ + public getImage(scope: CoreConstruct): ec2.MachineImageConfig { + const ami = lookupImage(scope, this.cachedInContext, this.amiParameterName); + + const osType = this.windowsVersion ? ec2.OperatingSystemType.WINDOWS : ec2.OperatingSystemType.LINUX; + return { + imageId: ami, + osType, + userData: ec2.UserData.forOperatingSystem(osType), + }; + } +} + +/** + * Amazon ECS variant + */ +export enum BottlerocketEcsVariant { + /** + * aws-ecs-1 variant + */ + AWS_ECS_1 = 'aws-ecs-1' + +} + +/** + * Properties for BottleRocketImage + */ +export interface BottleRocketImageProps { + /** + * The Amazon ECS variant to use. + * Only `aws-ecs-1` is currently available + * + * @default - BottlerocketEcsVariant.AWS_ECS_1 + */ + readonly variant?: BottlerocketEcsVariant; + + /** + * The CPU architecture + * + * @default - x86_64 + */ + readonly architecture?: ec2.InstanceArchitecture; + + /** + * Whether the AMI ID is cached to be stable between deployments + * + * By default, the newest image is used on each deployment. This will cause + * instances to be replaced whenever a new version is released, and may cause + * downtime if there aren't enough running instances in the AutoScalingGroup + * to reschedule the tasks on. + * + * If set to true, the AMI ID will be cached in `cdk.context.json` and the + * same value will be used on future runs. Your instances will not be replaced + * but your AMI version will grow old over time. To refresh the AMI lookup, + * you will have to evict the value from the cache using the `cdk context` + * command. See https://docs.aws.amazon.com/cdk/latest/guide/context.html for + * more information. + * + * Can not be set to `true` in environment-agnostic stacks. + * + * @default false + */ + readonly cachedInContext?: boolean; +} + +/** + * Construct an Bottlerocket image from the latest AMI published in SSM + */ +export class BottleRocketImage implements ec2.IMachineImage { + private readonly amiParameterName: string; + /** + * Amazon ECS variant for Bottlerocket AMI + */ + private readonly variant: string; + + /** + * Instance architecture + */ + private readonly architecture: ec2.InstanceArchitecture; + + private readonly cachedInContext: boolean; + + /** + * Constructs a new instance of the BottleRocketImage class. + */ + public constructor(props: BottleRocketImageProps = {}) { + this.variant = props.variant ?? BottlerocketEcsVariant.AWS_ECS_1; + this.architecture = props.architecture ?? ec2.InstanceArchitecture.X86_64; + + // set the SSM parameter name + this.amiParameterName = `/aws/service/bottlerocket/${this.variant}/${this.architecture}/latest/image_id`; + + this.cachedInContext = props.cachedInContext ?? false; + } + + /** + * Return the correct image + */ + public getImage(scope: CoreConstruct): ec2.MachineImageConfig { + const ami = lookupImage(scope, this.cachedInContext, this.amiParameterName); + + return { + imageId: ami, + osType: ec2.OperatingSystemType.LINUX, + userData: ec2.UserData.custom(''), + }; + } +} + +function lookupImage(scope: CoreConstruct, cachedInContext: boolean | undefined, parameterName: string) { + return cachedInContext + ? ssm.StringParameter.valueFromLookup(scope, parameterName) + : ssm.StringParameter.valueForTypedStringParameter(scope, parameterName, ssm.ParameterType.AWS_EC2_IMAGE_ID); +} \ No newline at end of file diff --git a/packages/@aws-cdk/aws-ecs/lib/cluster.ts b/packages/@aws-cdk/aws-ecs/lib/cluster.ts index 4ea5e24b7529c..49d99bf68925d 100644 --- a/packages/@aws-cdk/aws-ecs/lib/cluster.ts +++ b/packages/@aws-cdk/aws-ecs/lib/cluster.ts @@ -6,9 +6,9 @@ import * as kms from '@aws-cdk/aws-kms'; import * as logs from '@aws-cdk/aws-logs'; import * as s3 from '@aws-cdk/aws-s3'; import * as cloudmap from '@aws-cdk/aws-servicediscovery'; -import * as ssm from '@aws-cdk/aws-ssm'; import { Duration, Lazy, IResource, Resource, Stack, Aspects, IAspect, IConstruct } from '@aws-cdk/core'; import { Construct } from 'constructs'; +import { BottleRocketImage, EcsOptimizedAmi } from './amis'; import { InstanceDrainHook } from './drain-hook/instance-drain-hook'; import { ECSMetrics } from './ecs-canned-metrics.generated'; import { CfnCluster, CfnCapacityProvider, CfnClusterCapacityProviderAssociations } from './ecs.generated'; @@ -570,253 +570,6 @@ export class Cluster extends Resource implements ICluster { } } -/** - * ECS-optimized Windows version list - */ -export enum WindowsOptimizedVersion { - SERVER_2019 = '2019', - SERVER_2016 = '2016', -} - -/* - * TODO:v2.0.0 - * * remove `export` keyword - * * remove @deprecated - */ -/** - * The properties that define which ECS-optimized AMI is used. - * - * @deprecated see {@link EcsOptimizedImage} - */ -export interface EcsOptimizedAmiProps { - /** - * The Amazon Linux generation to use. - * - * @default AmazonLinuxGeneration.AmazonLinux2 - */ - readonly generation?: ec2.AmazonLinuxGeneration; - - /** - * The Windows Server version to use. - * - * @default none, uses Linux generation - */ - readonly windowsVersion?: WindowsOptimizedVersion; - - /** - * The ECS-optimized AMI variant to use. - * - * @default AmiHardwareType.Standard - */ - readonly hardwareType?: AmiHardwareType; -} - -/* - * TODO:v2.0.0 remove EcsOptimizedAmi - */ -/** - * Construct a Linux or Windows machine image from the latest ECS Optimized AMI published in SSM - * - * @deprecated see {@link EcsOptimizedImage#amazonLinux}, {@link EcsOptimizedImage#amazonLinux} and {@link EcsOptimizedImage#windows} - */ -export class EcsOptimizedAmi implements ec2.IMachineImage { - private readonly generation?: ec2.AmazonLinuxGeneration; - private readonly windowsVersion?: WindowsOptimizedVersion; - private readonly hwType: AmiHardwareType; - - private readonly amiParameterName: string; - - /** - * Constructs a new instance of the EcsOptimizedAmi class. - */ - constructor(props?: EcsOptimizedAmiProps) { - this.hwType = (props && props.hardwareType) || AmiHardwareType.STANDARD; - if (props && props.generation) { // generation defined in the props object - if (props.generation === ec2.AmazonLinuxGeneration.AMAZON_LINUX && this.hwType !== AmiHardwareType.STANDARD) { - throw new Error('Amazon Linux does not support special hardware type. Use Amazon Linux 2 instead'); - } else if (props.windowsVersion) { - throw new Error('"windowsVersion" and Linux image "generation" cannot be both set'); - } else { - this.generation = props.generation; - } - } else if (props && props.windowsVersion) { - if (this.hwType !== AmiHardwareType.STANDARD) { - throw new Error('Windows Server does not support special hardware type'); - } else { - this.windowsVersion = props.windowsVersion; - } - } else { // generation not defined in props object - // always default to Amazon Linux v2 regardless of HW - this.generation = ec2.AmazonLinuxGeneration.AMAZON_LINUX_2; - } - - // set the SSM parameter name - this.amiParameterName = '/aws/service/ecs/optimized-ami/' - + (this.generation === ec2.AmazonLinuxGeneration.AMAZON_LINUX ? 'amazon-linux/' : '') - + (this.generation === ec2.AmazonLinuxGeneration.AMAZON_LINUX_2 ? 'amazon-linux-2/' : '') - + (this.windowsVersion ? `windows_server/${this.windowsVersion}/english/full/` : '') - + (this.hwType === AmiHardwareType.GPU ? 'gpu/' : '') - + (this.hwType === AmiHardwareType.ARM ? 'arm64/' : '') - + 'recommended/image_id'; - } - - /** - * Return the correct image - */ - public getImage(scope: CoreConstruct): ec2.MachineImageConfig { - const ami = ssm.StringParameter.valueForTypedStringParameter(scope, this.amiParameterName, ssm.ParameterType.AWS_EC2_IMAGE_ID); - const osType = this.windowsVersion ? ec2.OperatingSystemType.WINDOWS : ec2.OperatingSystemType.LINUX; - return { - imageId: ami, - osType, - userData: ec2.UserData.forOperatingSystem(osType), - }; - } -} - -/** - * Construct a Linux or Windows machine image from the latest ECS Optimized AMI published in SSM - */ -export class EcsOptimizedImage implements ec2.IMachineImage { - /** - * Construct an Amazon Linux 2 image from the latest ECS Optimized AMI published in SSM - * - * @param hardwareType ECS-optimized AMI variant to use - */ - public static amazonLinux2(hardwareType = AmiHardwareType.STANDARD): EcsOptimizedImage { - return new EcsOptimizedImage({ generation: ec2.AmazonLinuxGeneration.AMAZON_LINUX_2, hardwareType }); - } - - /** - * Construct an Amazon Linux AMI image from the latest ECS Optimized AMI published in SSM - */ - public static amazonLinux(): EcsOptimizedImage { - return new EcsOptimizedImage({ generation: ec2.AmazonLinuxGeneration.AMAZON_LINUX }); - } - - /** - * Construct a Windows image from the latest ECS Optimized AMI published in SSM - * - * @param windowsVersion Windows Version to use - */ - public static windows(windowsVersion: WindowsOptimizedVersion): EcsOptimizedImage { - return new EcsOptimizedImage({ windowsVersion }); - } - - private readonly generation?: ec2.AmazonLinuxGeneration; - private readonly windowsVersion?: WindowsOptimizedVersion; - private readonly hwType?: AmiHardwareType; - - private readonly amiParameterName: string; - - /** - * Constructs a new instance of the EcsOptimizedAmi class. - */ - private constructor(props: EcsOptimizedAmiProps) { - this.hwType = props && props.hardwareType; - - if (props.windowsVersion) { - this.windowsVersion = props.windowsVersion; - } else if (props.generation) { - this.generation = props.generation; - } else { - throw new Error('This error should never be thrown'); - } - - // set the SSM parameter name - this.amiParameterName = '/aws/service/ecs/optimized-ami/' - + (this.generation === ec2.AmazonLinuxGeneration.AMAZON_LINUX ? 'amazon-linux/' : '') - + (this.generation === ec2.AmazonLinuxGeneration.AMAZON_LINUX_2 ? 'amazon-linux-2/' : '') - + (this.windowsVersion ? `windows_server/${this.windowsVersion}/english/full/` : '') - + (this.hwType === AmiHardwareType.GPU ? 'gpu/' : '') - + (this.hwType === AmiHardwareType.ARM ? 'arm64/' : '') - + 'recommended/image_id'; - } - - /** - * Return the correct image - */ - public getImage(scope: CoreConstruct): ec2.MachineImageConfig { - const ami = ssm.StringParameter.valueForTypedStringParameter(scope, this.amiParameterName, ssm.ParameterType.AWS_EC2_IMAGE_ID); - const osType = this.windowsVersion ? ec2.OperatingSystemType.WINDOWS : ec2.OperatingSystemType.LINUX; - return { - imageId: ami, - osType, - userData: ec2.UserData.forOperatingSystem(osType), - }; - } -} - -/** - * Amazon ECS variant - */ -export enum BottlerocketEcsVariant { - /** - * aws-ecs-1 variant - */ - AWS_ECS_1 = 'aws-ecs-1' - -} - -/** - * Properties for BottleRocketImage - */ -export interface BottleRocketImageProps { - /** - * The Amazon ECS variant to use. - * Only `aws-ecs-1` is currently available - * - * @default - BottlerocketEcsVariant.AWS_ECS_1 - */ - readonly variant?: BottlerocketEcsVariant; - - /** - * The CPU architecture - * - * @default - x86_64 - */ - readonly architecture?: ec2.InstanceArchitecture; -} - -/** - * Construct an Bottlerocket image from the latest AMI published in SSM - */ -export class BottleRocketImage implements ec2.IMachineImage { - private readonly amiParameterName: string; - /** - * Amazon ECS variant for Bottlerocket AMI - */ - private readonly variant: string; - - /** - * Instance architecture - */ - private readonly architecture: ec2.InstanceArchitecture; - - /** - * Constructs a new instance of the BottleRocketImage class. - */ - public constructor(props: BottleRocketImageProps = {}) { - this.variant = props.variant ?? BottlerocketEcsVariant.AWS_ECS_1; - this.architecture = props.architecture ?? ec2.InstanceArchitecture.X86_64; - - // set the SSM parameter name - this.amiParameterName = `/aws/service/bottlerocket/${this.variant}/${this.architecture}/latest/image_id`; - } - - /** - * Return the correct image - */ - public getImage(scope: CoreConstruct): ec2.MachineImageConfig { - const ami = ssm.StringParameter.valueForStringParameter(scope, this.amiParameterName); - return { - imageId: ami, - osType: ec2.OperatingSystemType.LINUX, - userData: ec2.UserData.custom(''), - }; - } -} - /** * A regional grouping of one or more container instances on which you can run tasks and services. */ @@ -1058,11 +811,29 @@ export interface AddCapacityOptions extends AddAutoScalingGroupCapacityOptions, readonly instanceType: ec2.InstanceType; /** - * The ECS-optimized AMI variant to use. For more information, see - * [Amazon ECS-optimized AMIs](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html). + * The ECS-optimized AMI variant to use + * + * The default is to use an ECS-optimized AMI of Amazon Linux 2 which is + * automatically updated to the latest version on every deployment. This will + * replace the instances in the AutoScalingGroup. Make sure you have not disabled + * task draining, to avoid downtime when the AMI updates. + * + * To use an image that does not update on every deployment, pass: + * + * ```ts + * { + * machineImage: EcsOptimizedImage.amazonLinux2(AmiHardwareType.STANDARD, { + * cachedInContext: true, + * }), + * } + * ``` + * + * For more information, see [Amazon ECS-optimized + * AMIs](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html). + * * You must define either `machineImage` or `machineImageType`, not both. * - * @default - Amazon Linux 2 + * @default - Automatically updated, ECS-optimized Amazon Linux 2 */ readonly machineImage?: ec2.IMachineImage; } @@ -1091,28 +862,6 @@ export interface CloudMapNamespaceOptions { readonly vpc?: ec2.IVpc; } -/** - * The ECS-optimized AMI variant to use. For more information, see - * [Amazon ECS-optimized AMIs](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html). - */ -export enum AmiHardwareType { - - /** - * Use the standard Amazon ECS-optimized AMI. - */ - STANDARD = 'Standard', - - /** - * Use the Amazon ECS GPU-optimized AMI. - */ - GPU = 'GPU', - - /** - * Use the Amazon ECS-optimized Amazon Linux 2 (arm64) AMI. - */ - ARM = 'ARM64', -} - enum ContainerInsights { /** * Enable CloudWatch Container Insights for the cluster diff --git a/packages/@aws-cdk/aws-ecs/lib/index.ts b/packages/@aws-cdk/aws-ecs/lib/index.ts index 0c1cee2a56ff9..bd076ccfd05f7 100644 --- a/packages/@aws-cdk/aws-ecs/lib/index.ts +++ b/packages/@aws-cdk/aws-ecs/lib/index.ts @@ -4,6 +4,7 @@ export * from './base/task-definition'; export * from './container-definition'; export * from './container-image'; +export * from './amis'; export * from './cluster'; export * from './environment-file'; export * from './firelens-log-router'; diff --git a/packages/@aws-cdk/aws-ecs/test/cluster.test.ts b/packages/@aws-cdk/aws-ecs/test/cluster.test.ts index cfe65183d1c02..52de16b353137 100644 --- a/packages/@aws-cdk/aws-ecs/test/cluster.test.ts +++ b/packages/@aws-cdk/aws-ecs/test/cluster.test.ts @@ -1694,7 +1694,7 @@ describe('cluster', () => { const template = assembly.getStackByName(stack.stackName).template; expect(template.Parameters).toEqual({ SsmParameterValueawsservicebottlerocketawsecs1arm64latestimageidC96584B6F00A464EAD1953AFF4B05118Parameter: { - Type: 'AWS::SSM::Parameter::Value', + Type: 'AWS::SSM::Parameter::Value', Default: '/aws/service/bottlerocket/aws-ecs-1/arm64/latest/image_id', }, }); diff --git a/packages/@aws-cdk/aws-ecs/test/ec2/integ.bottlerocket.expected.json b/packages/@aws-cdk/aws-ecs/test/ec2/integ.bottlerocket.expected.json index 616a80172092e..45ea355b976e9 100644 --- a/packages/@aws-cdk/aws-ecs/test/ec2/integ.bottlerocket.expected.json +++ b/packages/@aws-cdk/aws-ecs/test/ec2/integ.bottlerocket.expected.json @@ -95,15 +95,15 @@ "VpcPublicSubnet1NATGateway4D7517AA": { "Type": "AWS::EC2::NatGateway", "Properties": { + "SubnetId": { + "Ref": "VpcPublicSubnet1Subnet5C2D37C4" + }, "AllocationId": { "Fn::GetAtt": [ "VpcPublicSubnet1EIPD7E02669", "AllocationId" ] }, - "SubnetId": { - "Ref": "VpcPublicSubnet1Subnet5C2D37C4" - }, "Tags": [ { "Key": "Name", @@ -835,7 +835,7 @@ }, "Parameters": { "SsmParameterValueawsservicebottlerocketawsecs1x8664latestimageidC96584B6F00A464EAD1953AFF4B05118Parameter": { - "Type": "AWS::SSM::Parameter::Value", + "Type": "AWS::SSM::Parameter::Value", "Default": "/aws/service/bottlerocket/aws-ecs-1/x86_64/latest/image_id" } } diff --git a/packages/@aws-cdk/aws-ecs/test/ec2/integ.firelens-s3-config.expected.json b/packages/@aws-cdk/aws-ecs/test/ec2/integ.firelens-s3-config.expected.json index e863d75fa16fb..b082b18fd7281 100644 --- a/packages/@aws-cdk/aws-ecs/test/ec2/integ.firelens-s3-config.expected.json +++ b/packages/@aws-cdk/aws-ecs/test/ec2/integ.firelens-s3-config.expected.json @@ -95,15 +95,15 @@ "VpcPublicSubnet1NATGateway4D7517AA": { "Type": "AWS::EC2::NatGateway", "Properties": { + "SubnetId": { + "Ref": "VpcPublicSubnet1Subnet5C2D37C4" + }, "AllocationId": { "Fn::GetAtt": [ "VpcPublicSubnet1EIPD7E02669", "AllocationId" ] }, - "SubnetId": { - "Ref": "VpcPublicSubnet1Subnet5C2D37C4" - }, "Tags": [ { "Key": "Name", @@ -192,15 +192,15 @@ "VpcPublicSubnet2NATGateway9182C01D": { "Type": "AWS::EC2::NatGateway", "Properties": { + "SubnetId": { + "Ref": "VpcPublicSubnet2Subnet691E08A3" + }, "AllocationId": { "Fn::GetAtt": [ "VpcPublicSubnet2EIP3C605A87", "AllocationId" ] }, - "SubnetId": { - "Ref": "VpcPublicSubnet2Subnet691E08A3" - }, "Tags": [ { "Key": "Name", diff --git a/packages/@aws-cdk/aws-ecs/test/ec2/integ.graviton-bottlerocket.expected.json b/packages/@aws-cdk/aws-ecs/test/ec2/integ.graviton-bottlerocket.expected.json index fb6c80ff66b00..f2c15441bbe28 100644 --- a/packages/@aws-cdk/aws-ecs/test/ec2/integ.graviton-bottlerocket.expected.json +++ b/packages/@aws-cdk/aws-ecs/test/ec2/integ.graviton-bottlerocket.expected.json @@ -867,8 +867,8 @@ }, "Parameters": { "SsmParameterValueawsservicebottlerocketawsecs1arm64latestimageidC96584B6F00A464EAD1953AFF4B05118Parameter": { - "Type": "AWS::SSM::Parameter::Value", + "Type": "AWS::SSM::Parameter::Value", "Default": "/aws/service/bottlerocket/aws-ecs-1/arm64/latest/image_id" } } -} \ No newline at end of file +} diff --git a/packages/@aws-cdk/aws-ecs/test/fargate/integ.firelens-cloudwatch.expected.json b/packages/@aws-cdk/aws-ecs/test/fargate/integ.firelens-cloudwatch.expected.json index c6efbdee23f66..a10c635e498d6 100644 --- a/packages/@aws-cdk/aws-ecs/test/fargate/integ.firelens-cloudwatch.expected.json +++ b/packages/@aws-cdk/aws-ecs/test/fargate/integ.firelens-cloudwatch.expected.json @@ -95,15 +95,15 @@ "VpcPublicSubnet1NATGateway4D7517AA": { "Type": "AWS::EC2::NatGateway", "Properties": { + "SubnetId": { + "Ref": "VpcPublicSubnet1Subnet5C2D37C4" + }, "AllocationId": { "Fn::GetAtt": [ "VpcPublicSubnet1EIPD7E02669", "AllocationId" ] }, - "SubnetId": { - "Ref": "VpcPublicSubnet1Subnet5C2D37C4" - }, "Tags": [ { "Key": "Name", @@ -192,15 +192,15 @@ "VpcPublicSubnet2NATGateway9182C01D": { "Type": "AWS::EC2::NatGateway", "Properties": { + "SubnetId": { + "Ref": "VpcPublicSubnet2Subnet691E08A3" + }, "AllocationId": { "Fn::GetAtt": [ "VpcPublicSubnet2EIP3C605A87", "AllocationId" ] }, - "SubnetId": { - "Ref": "VpcPublicSubnet2Subnet691E08A3" - }, "Tags": [ { "Key": "Name", From cc74f92f275a338cb53caa7d6f124ab0dd960f0b Mon Sep 17 00:00:00 2001 From: Niranjan Jayakar Date: Thu, 9 Sep 2021 16:09:37 +0100 Subject: [PATCH 26/41] feat(assertions): capture matching value (#16426) The `assertions` module now has the ability to capture values during template matching. These captured values can then later be retrieved and used for further processing. This change also adds support for `anyValue()` matcher. This matcher will match any non-nullish targets during template matching. Migrated some tests in `pipelines` module to the `assertions` module, using the new capture and `anyValue()` features. ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- packages/@aws-cdk/assertions/README.md | 71 +++++++++++++- packages/@aws-cdk/assertions/lib/capture.ts | 98 +++++++++++++++++++ packages/@aws-cdk/assertions/lib/index.ts | 1 + packages/@aws-cdk/assertions/lib/match.ts | 36 +++++-- packages/@aws-cdk/assertions/lib/matcher.ts | 2 + .../@aws-cdk/assertions/lib/private/type.ts | 5 + .../@aws-cdk/assertions/test/capture.test.ts | 69 +++++++++++++ .../@aws-cdk/assertions/test/match.test.ts | 38 +++++++ packages/@aws-cdk/pipelines/package.json | 1 + .../blueprint/logicalid-stability.test.ts | 1 - .../codepipeline/codepipeline-sources.test.ts | 75 +++++++------- .../test/compliance/basic-behavior.test.ts | 75 +++++++------- .../test/compliance/environments.test.ts | 94 +++++++++--------- .../test/compliance/escape-hatching.test.ts | 47 +++++---- .../pipelines/test/docker-credentials.test.ts | 37 ++++--- .../pipelines/test/testhelpers/index.ts | 3 +- .../pipelines/test/testhelpers/matchers.ts | 32 ++++++ 17 files changed, 512 insertions(+), 173 deletions(-) create mode 100644 packages/@aws-cdk/assertions/lib/capture.ts create mode 100644 packages/@aws-cdk/assertions/lib/private/type.ts create mode 100644 packages/@aws-cdk/assertions/test/capture.test.ts create mode 100644 packages/@aws-cdk/pipelines/test/testhelpers/matchers.ts diff --git a/packages/@aws-cdk/assertions/README.md b/packages/@aws-cdk/assertions/README.md index 706d467eb3e59..1fc0bb28e0cd3 100644 --- a/packages/@aws-cdk/assertions/README.md +++ b/packages/@aws-cdk/assertions/README.md @@ -184,7 +184,9 @@ assert.hasResourceProperties('Foo::Bar', { The `Match.objectEquals()` API can be used to assert a target as a deep exact match. -In addition, the `Match.absentProperty()` can be used to specify that a specific +### Presence and Absence + +The `Match.absentProperty()` matcher can be used to specify that a specific property should not exist on the target. This can be used within `Match.objectLike()` or outside of any matchers. @@ -218,6 +220,42 @@ assert.hasResourceProperties('Foo::Bar', { }); ``` +The `Match.anyValue()` matcher can be used to specify that a specific value should be found +at the location. This matcher will fail if when the target location has null-ish values +(i.e., `null` or `undefined`). + +This matcher can be combined with any of the other matchers. + +```ts +// Given a template - +// { +// "Resources": { +// "MyBar": { +// "Type": "Foo::Bar", +// "Properties": { +// "Fred": { +// "Wobble": ["Flob", "Flib"], +// } +// } +// } +// } +// } + +// The following will NOT throw an assertion error +assert.hasResourceProperties('Foo::Bar', { + Fred: { + Wobble: [Match.anyValue(), "Flip"], + }, +}); + +// The following will throw an assertion error +assert.hasResourceProperties('Foo::Bar', { + Fred: { + Wimble: Match.anyValue(), + }, +}); +``` + ### Array Matchers The `Match.arrayWith()` API can be used to assert that the target is equal to or a subset @@ -283,6 +321,37 @@ assert.hasResourceProperties('Foo::Bar', Match.objectLike({ }}); ``` +## Capturing Values + +This matcher APIs documented above allow capturing values in the matching entry +(Resource, Output, Mapping, etc.). The following code captures a string from a +matching resource. + +```ts +// Given a template - +// { +// "Resources": { +// "MyBar": { +// "Type": "Foo::Bar", +// "Properties": { +// "Fred": ["Flob", "Cat"], +// "Waldo": ["Qix", "Qux"], +// } +// } +// } +// } + +const fredCapture = new Capture(); +const waldoCapture = new Capture(); +assert.hasResourceProperties('Foo::Bar', { + Fred: fredCapture, + Waldo: ["Qix", waldoCapture], +}); + +fredCapture.asArray(); // returns ["Flob", "Cat"] +waldoCapture.asString(); // returns "Qux" +``` + ## Strongly typed languages Some of the APIs documented above, such as `templateMatches()` and diff --git a/packages/@aws-cdk/assertions/lib/capture.ts b/packages/@aws-cdk/assertions/lib/capture.ts new file mode 100644 index 0000000000000..c639dec79583f --- /dev/null +++ b/packages/@aws-cdk/assertions/lib/capture.ts @@ -0,0 +1,98 @@ +import { Matcher, MatchResult } from './matcher'; +import { Type, getType } from './private/type'; + +/** + * Capture values while matching templates. + * Using an instance of this class within a Matcher will capture the matching value. + * The `as*()` APIs on the instance can be used to get the captured value. + */ +export class Capture extends Matcher { + public readonly name: string; + private value: any = null; + + constructor() { + super(); + this.name = 'Capture'; + } + + public test(actual: any): MatchResult { + this.value = actual; + + const result = new MatchResult(actual); + if (actual == null) { + result.push(this, [], `Can only capture non-nullish values. Found ${actual}`); + } + return result; + } + + /** + * Retrieve the captured value as a string. + * An error is generated if no value is captured or if the value is not a string. + */ + public asString(): string { + this.checkNotNull(); + if (getType(this.value) === 'string') { + return this.value; + } + this.reportIncorrectType('string'); + } + + /** + * Retrieve the captured value as a number. + * An error is generated if no value is captured or if the value is not a number. + */ + public asNumber(): number { + this.checkNotNull(); + if (getType(this.value) === 'number') { + return this.value; + } + this.reportIncorrectType('number'); + } + + /** + * Retrieve the captured value as a boolean. + * An error is generated if no value is captured or if the value is not a boolean. + */ + public asBoolean(): boolean { + this.checkNotNull(); + if (getType(this.value) === 'boolean') { + return this.value; + } + this.reportIncorrectType('boolean'); + } + + /** + * Retrieve the captured value as an array. + * An error is generated if no value is captured or if the value is not an array. + */ + public asArray(): any[] { + this.checkNotNull(); + if (getType(this.value) === 'array') { + return this.value; + } + this.reportIncorrectType('array'); + } + + /** + * Retrieve the captured value as a JSON object. + * An error is generated if no value is captured or if the value is not an object. + */ + public asObject(): { [key: string]: any } { + this.checkNotNull(); + if (getType(this.value) === 'object') { + return this.value; + } + this.reportIncorrectType('object'); + } + + private checkNotNull(): void { + if (this.value == null) { + throw new Error('No value captured'); + } + } + + private reportIncorrectType(expected: Type): never { + throw new Error(`Captured value is expected to be ${expected} but found ${getType(this.value)}. ` + + `Value is ${JSON.stringify(this.value, undefined, 2)}`); + } +} \ No newline at end of file diff --git a/packages/@aws-cdk/assertions/lib/index.ts b/packages/@aws-cdk/assertions/lib/index.ts index 963039f921bc1..492fad1227af3 100644 --- a/packages/@aws-cdk/assertions/lib/index.ts +++ b/packages/@aws-cdk/assertions/lib/index.ts @@ -1,3 +1,4 @@ +export * from './capture'; export * from './template'; export * from './match'; export * from './matcher'; \ No newline at end of file diff --git a/packages/@aws-cdk/assertions/lib/match.ts b/packages/@aws-cdk/assertions/lib/match.ts index 802acdc603e70..5c7e3fad8e90c 100644 --- a/packages/@aws-cdk/assertions/lib/match.ts +++ b/packages/@aws-cdk/assertions/lib/match.ts @@ -1,4 +1,5 @@ import { Matcher, MatchResult } from './matcher'; +import { getType } from './private/type'; import { ABSENT } from './vendored/assert'; /** @@ -63,6 +64,13 @@ export abstract class Match { public static not(pattern: any): Matcher { return new NotMatch('not', pattern); } + + /** + * Matches any non-null value at the target. + */ + public static anyValue(): Matcher { + return new AnyMatch('anyValue'); + } } /** @@ -141,7 +149,7 @@ interface ArrayMatchOptions { * Match class that matches arrays. */ class ArrayMatch extends Matcher { - private readonly partial: boolean; + private readonly subsequence: boolean; constructor( public readonly name: string, @@ -149,14 +157,14 @@ class ArrayMatch extends Matcher { options: ArrayMatchOptions = {}) { super(); - this.partial = options.subsequence ?? true; + this.subsequence = options.subsequence ?? true; } public test(actual: any): MatchResult { if (!Array.isArray(actual)) { return new MatchResult(actual).push(this, [], `Expected type array but received ${getType(actual)}`); } - if (!this.partial && this.pattern.length !== actual.length) { + if (!this.subsequence && this.pattern.length !== actual.length) { return new MatchResult(actual).push(this, [], `Expected array of length ${this.pattern.length} but received ${actual.length}`); } @@ -166,10 +174,16 @@ class ArrayMatch extends Matcher { const result = new MatchResult(actual); while (patternIdx < this.pattern.length && actualIdx < actual.length) { const patternElement = this.pattern[patternIdx]; + const matcher = Matcher.isMatcher(patternElement) ? patternElement : new LiteralMatch(this.name, patternElement); + if (this.subsequence && matcher instanceof AnyMatch) { + // array subsequence matcher is not compatible with anyValue() matcher. They don't make sense to be used together. + throw new Error('The Matcher anyValue() cannot be nested within arrayWith()'); + } + const innerResult = matcher.test(actual[actualIdx]); - if (!this.partial || !innerResult.hasFailed()) { + if (!this.subsequence || !innerResult.hasFailed()) { result.compose(`[${actualIdx}]`, innerResult); patternIdx++; actualIdx++; @@ -271,6 +285,16 @@ class NotMatch extends Matcher { } } -function getType(obj: any): string { - return Array.isArray(obj) ? 'array' : typeof obj; +class AnyMatch extends Matcher { + constructor(public readonly name: string) { + super(); + } + + public test(actual: any): MatchResult { + const result = new MatchResult(actual); + if (actual == null) { + result.push(this, [], 'Expected a value but found none'); + } + return result; + } } \ No newline at end of file diff --git a/packages/@aws-cdk/assertions/lib/matcher.ts b/packages/@aws-cdk/assertions/lib/matcher.ts index a3263d6f00829..fdf8954a90b93 100644 --- a/packages/@aws-cdk/assertions/lib/matcher.ts +++ b/packages/@aws-cdk/assertions/lib/matcher.ts @@ -17,6 +17,8 @@ export abstract class Matcher { /** * Test whether a target matches the provided pattern. + * Every Matcher must implement this method. + * This method will be invoked by the assertions framework. Do not call this method directly. * @param actual the target to match * @return the list of match failures. An empty array denotes a successful match. */ diff --git a/packages/@aws-cdk/assertions/lib/private/type.ts b/packages/@aws-cdk/assertions/lib/private/type.ts new file mode 100644 index 0000000000000..ffd894c576939 --- /dev/null +++ b/packages/@aws-cdk/assertions/lib/private/type.ts @@ -0,0 +1,5 @@ +export type Type = 'string' | 'number' | 'bigint' | 'boolean' | 'symbol' | 'undefined' | 'object' | 'function' | 'array'; + +export function getType(obj: any): Type { + return Array.isArray(obj) ? 'array' : typeof obj; +} \ No newline at end of file diff --git a/packages/@aws-cdk/assertions/test/capture.test.ts b/packages/@aws-cdk/assertions/test/capture.test.ts new file mode 100644 index 0000000000000..818592fa35baf --- /dev/null +++ b/packages/@aws-cdk/assertions/test/capture.test.ts @@ -0,0 +1,69 @@ +import { Capture, Match } from '../lib'; + +describe('Capture', () => { + test('uncaptured', () => { + const capture = new Capture(); + expect(() => capture.asString()).toThrow(/No value captured/); + }); + + test('nullish', () => { + const capture = new Capture(); + const matcher = Match.objectEquals({ foo: capture }); + + const result = matcher.test({ foo: null }); + expect(result.failCount).toEqual(1); + expect(result.toHumanStrings()[0]).toMatch(/Can only capture non-nullish values/); + }); + + test('asString()', () => { + const capture = new Capture(); + const matcher = Match.objectEquals({ foo: capture }); + + matcher.test({ foo: 'bar' }); + expect(capture.asString()).toEqual('bar'); + + matcher.test({ foo: 3 }); + expect(() => capture.asString()).toThrow(/expected to be string but found number/); + }); + + test('asNumber()', () => { + const capture = new Capture(); + const matcher = Match.objectEquals({ foo: capture }); + + matcher.test({ foo: 3 }); + expect(capture.asNumber()).toEqual(3); + + matcher.test({ foo: 'bar' }); + expect(() => capture.asNumber()).toThrow(/expected to be number but found string/); + }); + + test('asArray()', () => { + const capture = new Capture(); + const matcher = Match.objectEquals({ foo: capture }); + + matcher.test({ foo: ['bar'] }); + expect(capture.asArray()).toEqual(['bar']); + + matcher.test({ foo: 'bar' }); + expect(() => capture.asArray()).toThrow(/expected to be array but found string/); + }); + + test('asObject()', () => { + const capture = new Capture(); + const matcher = Match.objectEquals({ foo: capture }); + + matcher.test({ foo: { fred: 'waldo' } }); + expect(capture.asObject()).toEqual({ fred: 'waldo' }); + + matcher.test({ foo: 'bar' }); + expect(() => capture.asObject()).toThrow(/expected to be object but found string/); + }); + + test('nested within an array', () => { + const capture = new Capture(); + const matcher = Match.objectEquals({ foo: ['bar', capture] }); + + matcher.test({ foo: ['bar', 'baz'] }); + expect(capture.asString()).toEqual('baz'); + }); +}); \ No newline at end of file diff --git a/packages/@aws-cdk/assertions/test/match.test.ts b/packages/@aws-cdk/assertions/test/match.test.ts index b46eb0d53d204..268810857f9a8 100644 --- a/packages/@aws-cdk/assertions/test/match.test.ts +++ b/packages/@aws-cdk/assertions/test/match.test.ts @@ -128,6 +128,11 @@ describe('Matchers', () => { test('absent', () => { expect(() => Match.arrayWith([Match.absentProperty()]).test(['foo'])).toThrow(/absentProperty/); }); + + test('incompatible with anyValue', () => { + matcher = Match.arrayWith(['foo', Match.anyValue()]); + expect(() => matcher.test(['foo', 'bar'])).toThrow(/anyValue\(\) cannot be nested within arrayWith\(\)/); + }); }); describe('arrayEquals', () => { @@ -285,6 +290,39 @@ describe('Matchers', () => { }, [msg]); }); }); + + describe('anyValue()', () => { + let matcher: Matcher; + + test('simple', () => { + matcher = Match.anyValue(); + expectPass(matcher, 'foo'); + expectPass(matcher, 5); + expectPass(matcher, false); + expectPass(matcher, []); + expectPass(matcher, {}); + + expectFailure(matcher, null, ['Expected a value but found none']); + expectFailure(matcher, undefined, ['Expected a value but found none']); + }); + + test('nested in array', () => { + matcher = Match.arrayEquals(['foo', Match.anyValue(), 'bar']); + expectPass(matcher, ['foo', 'baz', 'bar']); + expectPass(matcher, ['foo', 3, 'bar']); + + expectFailure(matcher, ['foo', null, 'bar'], ['Expected a value but found none at [1]']); + }); + + test('nested in object', () => { + matcher = Match.objectLike({ foo: Match.anyValue() }); + expectPass(matcher, { foo: 'bar' }); + expectPass(matcher, { foo: [1, 2] }); + + expectFailure(matcher, { foo: null }, ['Expected a value but found none at /foo']); + expectFailure(matcher, {}, ['Missing key at /foo']); + }); + }); }); function expectPass(matcher: Matcher, target: any): void { diff --git a/packages/@aws-cdk/pipelines/package.json b/packages/@aws-cdk/pipelines/package.json index 5edf5a75d7f85..37605df5210d4 100644 --- a/packages/@aws-cdk/pipelines/package.json +++ b/packages/@aws-cdk/pipelines/package.json @@ -33,6 +33,7 @@ }, "devDependencies": { "@aws-cdk/assert-internal": "0.0.0", + "@aws-cdk/assertions": "0.0.0", "@aws-cdk/aws-apigateway": "0.0.0", "@aws-cdk/aws-ecr-assets": "0.0.0", "@aws-cdk/aws-sqs": "0.0.0", diff --git a/packages/@aws-cdk/pipelines/test/blueprint/logicalid-stability.test.ts b/packages/@aws-cdk/pipelines/test/blueprint/logicalid-stability.test.ts index 319d25203c92b..52d63d00d64b3 100644 --- a/packages/@aws-cdk/pipelines/test/blueprint/logicalid-stability.test.ts +++ b/packages/@aws-cdk/pipelines/test/blueprint/logicalid-stability.test.ts @@ -1,4 +1,3 @@ -import '@aws-cdk/assert-internal/jest'; import { Stack } from '@aws-cdk/core'; import { mkdict } from '../../lib/private/javascript'; import { PIPELINE_ENV, TestApp, LegacyTestGitHubNpmPipeline, ModernTestGitHubNpmPipeline, MegaAssetsApp, stackTemplate } from '../testhelpers'; diff --git a/packages/@aws-cdk/pipelines/test/codepipeline/codepipeline-sources.test.ts b/packages/@aws-cdk/pipelines/test/codepipeline/codepipeline-sources.test.ts index 84aef10bb171c..46fb468c37623 100644 --- a/packages/@aws-cdk/pipelines/test/codepipeline/codepipeline-sources.test.ts +++ b/packages/@aws-cdk/pipelines/test/codepipeline/codepipeline-sources.test.ts @@ -1,5 +1,4 @@ -import { anything, arrayWith, Capture, objectLike } from '@aws-cdk/assert-internal'; -import '@aws-cdk/assert-internal/jest'; +import { Capture, Match, Template } from '@aws-cdk/assertions'; import * as ccommit from '@aws-cdk/aws-codecommit'; import { CodeCommitTrigger, GitHubTrigger } from '@aws-cdk/aws-codepipeline-actions'; import { AnyPrincipal, Role } from '@aws-cdk/aws-iam'; @@ -28,18 +27,18 @@ test('CodeCommit source handles tokenized names correctly', () => { input: cdkp.CodePipelineSource.codeCommit(repo, 'main'), }); - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'Source', Actions: [ - objectLike({ - Configuration: objectLike({ - RepositoryName: { 'Fn::GetAtt': [anything(), 'Name'] }, + Match.objectLike({ + Configuration: Match.objectLike({ + RepositoryName: { 'Fn::GetAtt': [Match.anyValue(), 'Name'] }, }), - Name: { 'Fn::GetAtt': [anything(), 'Name'] }, + Name: { 'Fn::GetAtt': [Match.anyValue(), 'Name'] }, }), ], - }), + }]), }); }); @@ -58,20 +57,20 @@ test('CodeCommit source honors all valid properties', () => { }), }); - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'Source', Actions: [ - objectLike({ - Configuration: objectLike({ + Match.objectLike({ + Configuration: Match.objectLike({ BranchName: 'main', PollForSourceChanges: true, OutputArtifactFormat: 'CODEBUILD_CLONE_REF', }), - RoleArn: { 'Fn::GetAtt': [anything(), 'Arn'] }, + RoleArn: { 'Fn::GetAtt': [Match.anyValue(), 'Arn'] }, }), ], - }), + }]), }); }); @@ -81,19 +80,19 @@ test('S3 source handles tokenized names correctly', () => { input: cdkp.CodePipelineSource.s3(buckit, 'thefile.zip'), }); - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'Source', Actions: [ - objectLike({ - Configuration: objectLike({ - S3Bucket: { Ref: anything() }, + Match.objectLike({ + Configuration: Match.objectLike({ + S3Bucket: { Ref: Match.anyValue() }, S3ObjectKey: 'thefile.zip', }), - Name: { Ref: anything() }, + Name: { Ref: Match.anyValue() }, }), ], - }), + }]), }); }); @@ -105,12 +104,12 @@ test('GitHub source honors all valid properties', () => { }), }); - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'Source', Actions: [ - objectLike({ - Configuration: objectLike({ + Match.objectLike({ + Configuration: Match.objectLike({ Owner: 'owner', Repo: 'repo', Branch: 'main', @@ -120,7 +119,7 @@ test('GitHub source honors all valid properties', () => { Name: 'owner_repo', }), ], - }), + }]), }); }); @@ -145,17 +144,17 @@ test('Dashes in repo names are removed from artifact names', () => { input: cdkp.CodePipelineSource.gitHub('owner/my-repo', 'main'), }); - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'Source', Actions: [ - objectLike({ + Match.objectLike({ OutputArtifacts: [ { Name: 'owner_my_repo_Source' }, ], }), ], - }), + }]), }); }); @@ -164,19 +163,19 @@ test('artifact names are never longer than 128 characters', () => { input: cdkp.CodePipelineSource.gitHub('owner/' + 'my-repo'.repeat(100), 'main'), }); - const artifactId = Capture.aString(); - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + const artifactId = new Capture(); + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'Source', Actions: [ - objectLike({ + Match.objectLike({ OutputArtifacts: [ - { Name: artifactId.capture() }, + { Name: artifactId }, ], }), ], - }), + }]), }); - expect(artifactId.capturedValue.length).toBeLessThanOrEqual(128); + expect(artifactId.asString().length).toBeLessThanOrEqual(128); }); \ No newline at end of file diff --git a/packages/@aws-cdk/pipelines/test/compliance/basic-behavior.test.ts b/packages/@aws-cdk/pipelines/test/compliance/basic-behavior.test.ts index 1248831737bdf..85d7d5911dd0a 100644 --- a/packages/@aws-cdk/pipelines/test/compliance/basic-behavior.test.ts +++ b/packages/@aws-cdk/pipelines/test/compliance/basic-behavior.test.ts @@ -1,11 +1,11 @@ /* eslint-disable import/no-extraneous-dependencies */ import * as fs from 'fs'; import * as path from 'path'; -import { arrayWith, Capture, objectLike, stringLike } from '@aws-cdk/assert-internal'; +import { Capture, Match, Template } from '@aws-cdk/assertions'; import '@aws-cdk/assert-internal/jest'; import { Stack, Stage, StageProps, Tags } from '@aws-cdk/core'; import { Construct } from 'constructs'; -import { behavior, LegacyTestGitHubNpmPipeline, OneStackApp, BucketStack, PIPELINE_ENV, TestApp, ModernTestGitHubNpmPipeline } from '../testhelpers'; +import { behavior, LegacyTestGitHubNpmPipeline, OneStackApp, BucketStack, PIPELINE_ENV, TestApp, ModernTestGitHubNpmPipeline, stringLike } from '../testhelpers'; let app: TestApp; let pipelineStack: Stack; @@ -37,20 +37,20 @@ behavior('stack templates in nested assemblies are correctly addressed', (suite) }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'App', - Actions: arrayWith( - objectLike({ + Actions: Match.arrayWith([ + Match.objectLike({ Name: stringLike('*Prepare'), - InputArtifacts: [objectLike({})], - Configuration: objectLike({ + InputArtifacts: [Match.objectLike({})], + Configuration: Match.objectLike({ StackName: 'App-Stack', TemplatePath: stringLike('*::assembly-App/*.template.json'), }), }), - ), - }), + ]), + }]), }); } }); @@ -94,27 +94,27 @@ behavior('overridden stack names are respected', (suite) => { }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith( + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([ { Name: 'App1', - Actions: arrayWith(objectLike({ + Actions: Match.arrayWith([Match.objectLike({ Name: stringLike('*Prepare'), - Configuration: objectLike({ + Configuration: Match.objectLike({ StackName: 'MyFancyStack', }), - })), + })]), }, { Name: 'App2', - Actions: arrayWith(objectLike({ + Actions: Match.arrayWith([Match.objectLike({ Name: stringLike('*Prepare'), - Configuration: objectLike({ + Configuration: Match.objectLike({ StackName: 'MyFancyStack', }), - })), + })]), }, - ), + ]), }); } }); @@ -154,17 +154,17 @@ behavior('changing CLI version leads to a different pipeline structure (restarti function THEN_codePipelineExpectation(stack2: Stack, stack3: Stack) { // THEN - const structure2 = Capture.anyType(); - const structure3 = Capture.anyType(); + const structure2 = new Capture(); + const structure3 = new Capture(); - expect(stack2).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: structure2.capture(), + Template.fromStack(stack2).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: structure2, }); - expect(stack3).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: structure3.capture(), + Template.fromStack(stack3).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: structure3, }); - expect(JSON.stringify(structure2.capturedValue)).not.toEqual(JSON.stringify(structure3.capturedValue)); + expect(JSON.stringify(structure2.asArray())).not.toEqual(JSON.stringify(structure3.asArray())); } }); @@ -190,24 +190,25 @@ behavior('tags get reflected in pipeline', (suite) => { function THEN_codePipelineExpectation() { // THEN - const templateConfig = Capture.aString(); - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + const templateConfig = new Capture(); + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'App', - Actions: arrayWith( - objectLike({ + Actions: Match.arrayWith([ + Match.objectLike({ Name: stringLike('*Prepare'), - InputArtifacts: [objectLike({})], - Configuration: objectLike({ + InputArtifacts: [Match.objectLike({})], + Configuration: Match.objectLike({ StackName: 'App-Stack', - TemplateConfiguration: templateConfig.capture(stringLike('*::assembly-App/*.template.*json')), + TemplateConfiguration: templateConfig, }), }), - ), - }), + ]), + }]), }); - const [, relConfigFile] = templateConfig.capturedValue.split('::'); + expect(templateConfig.asString()).toMatch(/::assembly-App\/.*\.template\..*json/); + const [, relConfigFile] = templateConfig.asString().split('::'); const absConfigFile = path.join(app.outdir, relConfigFile); const configFile = JSON.parse(fs.readFileSync(absConfigFile, { encoding: 'utf-8' })); expect(configFile).toEqual(expect.objectContaining({ diff --git a/packages/@aws-cdk/pipelines/test/compliance/environments.test.ts b/packages/@aws-cdk/pipelines/test/compliance/environments.test.ts index d30e5a423fcb3..6ab303e7df43d 100644 --- a/packages/@aws-cdk/pipelines/test/compliance/environments.test.ts +++ b/packages/@aws-cdk/pipelines/test/compliance/environments.test.ts @@ -1,8 +1,7 @@ /* eslint-disable import/no-extraneous-dependencies */ -import { arrayWith, objectLike, stringLike } from '@aws-cdk/assert-internal'; -import '@aws-cdk/assert-internal/jest'; +import { Match, Template } from '@aws-cdk/assertions'; import { Stack } from '@aws-cdk/core'; -import { behavior, LegacyTestGitHubNpmPipeline, OneStackApp, PIPELINE_ENV, TestApp, ModernTestGitHubNpmPipeline } from '../testhelpers'; +import { behavior, LegacyTestGitHubNpmPipeline, OneStackApp, PIPELINE_ENV, TestApp, ModernTestGitHubNpmPipeline, stringLike } from '../testhelpers'; let app: TestApp; let pipelineStack: Stack; @@ -51,38 +50,38 @@ behavior('action has right settings for same-env deployment', (suite) => { function THEN_codePipelineExpection(roleArn: (x: string) => any) { // THEN: pipeline structure is correct - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'Same', Actions: [ - objectLike({ + Match.objectLike({ Name: stringLike('*Prepare'), RoleArn: roleArn('deploy-role'), - Configuration: objectLike({ + Configuration: Match.objectLike({ StackName: 'Same-Stack', RoleArn: roleArn('cfn-exec-role'), }), }), - objectLike({ + Match.objectLike({ Name: stringLike('*Deploy'), RoleArn: roleArn('deploy-role'), - Configuration: objectLike({ + Configuration: Match.objectLike({ StackName: 'Same-Stack', }), }), ], - }), + }]), }); // THEN: artifact bucket can be read by deploy role - expect(pipelineStack).toHaveResourceLike('AWS::S3::BucketPolicy', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::S3::BucketPolicy', { PolicyDocument: { - Statement: arrayWith(objectLike({ + Statement: Match.arrayWith([Match.objectLike({ Action: ['s3:GetObject*', 's3:GetBucket*', 's3:List*'], Principal: { AWS: roleArn('deploy-role'), }, - })), + })]), }, }); } @@ -109,11 +108,11 @@ behavior('action has right settings for cross-account deployment', (suite) => { function THEN_codePipelineExpectation() { // THEN: Pipelien structure is correct - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'CrossAccount', Actions: [ - objectLike({ + Match.objectLike({ Name: stringLike('*Prepare'), RoleArn: { 'Fn::Join': ['', [ @@ -123,7 +122,7 @@ behavior('action has right settings for cross-account deployment', (suite) => { { Ref: 'AWS::Region' }, ]], }, - Configuration: objectLike({ + Configuration: Match.objectLike({ StackName: 'CrossAccount-Stack', RoleArn: { 'Fn::Join': ['', [ @@ -135,7 +134,7 @@ behavior('action has right settings for cross-account deployment', (suite) => { }, }), }), - objectLike({ + Match.objectLike({ Name: stringLike('*Deploy'), RoleArn: { 'Fn::Join': ['', [ @@ -145,18 +144,18 @@ behavior('action has right settings for cross-account deployment', (suite) => { { Ref: 'AWS::Region' }, ]], }, - Configuration: objectLike({ + Configuration: Match.objectLike({ StackName: 'CrossAccount-Stack', }), }), ], - }), + }]), }); // THEN: Artifact bucket can be read by deploy role - expect(pipelineStack).toHaveResourceLike('AWS::S3::BucketPolicy', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::S3::BucketPolicy', { PolicyDocument: { - Statement: arrayWith(objectLike({ + Statement: Match.arrayWith([Match.objectLike({ Action: ['s3:GetObject*', 's3:GetBucket*', 's3:List*'], Principal: { AWS: { @@ -168,7 +167,7 @@ behavior('action has right settings for cross-account deployment', (suite) => { ]], }, }, - })), + })]), }, }); } @@ -194,11 +193,11 @@ behavior('action has right settings for cross-region deployment', (suite) => { function THEN_codePipelineExpectation() { // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'CrossRegion', Actions: [ - objectLike({ + Match.objectLike({ Name: stringLike('*Prepare'), RoleArn: { 'Fn::Join': ['', [ @@ -212,7 +211,7 @@ behavior('action has right settings for cross-region deployment', (suite) => { ]], }, Region: 'elsewhere', - Configuration: objectLike({ + Configuration: Match.objectLike({ StackName: 'CrossRegion-Stack', RoleArn: { 'Fn::Join': ['', [ @@ -227,7 +226,7 @@ behavior('action has right settings for cross-region deployment', (suite) => { }, }), }), - objectLike({ + Match.objectLike({ Name: stringLike('*Deploy'), RoleArn: { 'Fn::Join': ['', [ @@ -241,12 +240,12 @@ behavior('action has right settings for cross-region deployment', (suite) => { ]], }, Region: 'elsewhere', - Configuration: objectLike({ + Configuration: Match.objectLike({ StackName: 'CrossRegion-Stack', }), }), ], - }), + }]), }); } }); @@ -282,11 +281,13 @@ behavior('action has right settings for cross-account/cross-region deployment', function THEN_codePipelineExpectations() { // THEN: pipeline structure must be correct - expect(app.stackArtifact(pipelineStack)).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + const stack = app.stackArtifact(pipelineStack); + expect(stack).toBeDefined(); + Template.fromStack(stack!).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'CrossBoth', Actions: [ - objectLike({ + Match.objectLike({ Name: stringLike('*Prepare'), RoleArn: { 'Fn::Join': ['', [ @@ -296,7 +297,7 @@ behavior('action has right settings for cross-account/cross-region deployment', ]], }, Region: 'elsewhere', - Configuration: objectLike({ + Configuration: Match.objectLike({ StackName: 'CrossBoth-Stack', RoleArn: { 'Fn::Join': ['', [ @@ -307,7 +308,7 @@ behavior('action has right settings for cross-account/cross-region deployment', }, }), }), - objectLike({ + Match.objectLike({ Name: stringLike('*Deploy'), RoleArn: { 'Fn::Join': ['', [ @@ -317,20 +318,21 @@ behavior('action has right settings for cross-account/cross-region deployment', ]], }, Region: 'elsewhere', - Configuration: objectLike({ + Configuration: Match.objectLike({ StackName: 'CrossBoth-Stack', }), }), ], - }), + }]), }); // THEN: artifact bucket can be read by deploy role - const supportStack = 'PipelineStack-support-elsewhere'; - expect(app.stackArtifact(supportStack)).toHaveResourceLike('AWS::S3::BucketPolicy', { + const supportStack = app.stackArtifact('PipelineStack-support-elsewhere'); + expect(supportStack).toBeDefined(); + Template.fromStack(supportStack!).hasResourceProperties('AWS::S3::BucketPolicy', { PolicyDocument: { - Statement: arrayWith(objectLike({ - Action: arrayWith('s3:GetObject*', 's3:GetBucket*', 's3:List*'), + Statement: Match.arrayWith([Match.objectLike({ + Action: Match.arrayWith(['s3:GetObject*', 's3:GetBucket*', 's3:List*']), Principal: { AWS: { 'Fn::Join': ['', [ @@ -340,15 +342,15 @@ behavior('action has right settings for cross-account/cross-region deployment', ]], }, }, - })), + })]), }, }); // And the key to go along with it - expect(app.stackArtifact(supportStack)).toHaveResourceLike('AWS::KMS::Key', { + Template.fromStack(supportStack!).hasResourceProperties('AWS::KMS::Key', { KeyPolicy: { - Statement: arrayWith(objectLike({ - Action: arrayWith('kms:Decrypt', 'kms:DescribeKey'), + Statement: Match.arrayWith([Match.objectLike({ + Action: Match.arrayWith(['kms:Decrypt', 'kms:DescribeKey']), Principal: { AWS: { 'Fn::Join': ['', [ @@ -358,7 +360,7 @@ behavior('action has right settings for cross-account/cross-region deployment', ]], }, }, - })), + })]), }, }); } diff --git a/packages/@aws-cdk/pipelines/test/compliance/escape-hatching.test.ts b/packages/@aws-cdk/pipelines/test/compliance/escape-hatching.test.ts index 3a8cb26d4d2fd..822a4f06f6164 100644 --- a/packages/@aws-cdk/pipelines/test/compliance/escape-hatching.test.ts +++ b/packages/@aws-cdk/pipelines/test/compliance/escape-hatching.test.ts @@ -1,5 +1,4 @@ -import { arrayWith, objectLike } from '@aws-cdk/assert-internal'; -import '@aws-cdk/assert-internal/jest'; +import { Match, Template } from '@aws-cdk/assertions'; import * as cp from '@aws-cdk/aws-codepipeline'; import * as cpa from '@aws-cdk/aws-codepipeline-actions'; import { SecretValue, Stack } from '@aws-cdk/core'; @@ -69,11 +68,11 @@ describe('with empty existing CodePipeline', () => { function THEN_codePipelineExpectation() { // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { Stages: [ - objectLike({ Name: 'Source' }), - objectLike({ Name: 'Build' }), - objectLike({ Name: 'UpdatePipeline' }), + Match.objectLike({ Name: 'Source' }), + Match.objectLike({ Name: 'Build' }), + Match.objectLike({ Name: 'UpdatePipeline' }), ], }); } @@ -118,11 +117,11 @@ describe('with custom Source stage in existing Pipeline', () => { function THEN_codePipelineExpectation() { // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { Stages: [ - objectLike({ Name: 'CustomSource' }), - objectLike({ Name: 'Build' }), - objectLike({ Name: 'UpdatePipeline' }), + Match.objectLike({ Name: 'CustomSource' }), + Match.objectLike({ Name: 'Build' }), + Match.objectLike({ Name: 'UpdatePipeline' }), ], }); } @@ -167,11 +166,11 @@ describe('with Source and Build stages in existing Pipeline', () => { function THEN_codePipelineExpectation() { // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { Stages: [ - objectLike({ Name: 'CustomSource' }), - objectLike({ Name: 'CustomBuild' }), - objectLike({ Name: 'UpdatePipeline' }), + Match.objectLike({ Name: 'CustomSource' }), + Match.objectLike({ Name: 'CustomBuild' }), + Match.objectLike({ Name: 'UpdatePipeline' }), ], }); } @@ -209,14 +208,14 @@ behavior('can add another action to an existing stage', (suite) => { }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'Source', Actions: [ - objectLike({ ActionTypeId: objectLike({ Provider: 'GitHub' }) }), - objectLike({ ActionTypeId: objectLike({ Provider: 'GitHub' }), Name: 'GitHub2' }), + Match.objectLike({ ActionTypeId: Match.objectLike({ Provider: 'GitHub' }) }), + Match.objectLike({ ActionTypeId: Match.objectLike({ Provider: 'GitHub' }), Name: 'GitHub2' }), ], - }), + }]), }); } }); @@ -264,12 +263,12 @@ behavior('assets stage inserted after existing pipeline actions', (suite) => { }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { Stages: [ - objectLike({ Name: 'CustomSource' }), - objectLike({ Name: 'CustomBuild' }), - objectLike({ Name: 'Assets' }), - objectLike({ Name: 'App' }), + Match.objectLike({ Name: 'CustomSource' }), + Match.objectLike({ Name: 'CustomBuild' }), + Match.objectLike({ Name: 'Assets' }), + Match.objectLike({ Name: 'App' }), ], }); } diff --git a/packages/@aws-cdk/pipelines/test/docker-credentials.test.ts b/packages/@aws-cdk/pipelines/test/docker-credentials.test.ts index 9e3559242e04c..a2b5fc2c577dd 100644 --- a/packages/@aws-cdk/pipelines/test/docker-credentials.test.ts +++ b/packages/@aws-cdk/pipelines/test/docker-credentials.test.ts @@ -1,5 +1,4 @@ -import { arrayWith } from '@aws-cdk/assert-internal'; -import '@aws-cdk/assert-internal/jest'; +import { Match, Template } from '@aws-cdk/assertions'; import * as ec2 from '@aws-cdk/aws-ec2'; import * as ecr from '@aws-cdk/aws-ecr'; import * as iam from '@aws-cdk/aws-iam'; @@ -44,7 +43,7 @@ describe('ExternalDockerCredential', () => { }); }); - test('maximmum example includes all expected properties', () => { + test('maximum example includes all expected properties', () => { const roleArn = 'arn:aws:iam::0123456789012:role/MyRole'; const creds = cdkp.DockerCredential.customRegistry('example.com', secret, { secretUsernameField: 'login', @@ -71,7 +70,7 @@ describe('ExternalDockerCredential', () => { const user = new iam.User(stack, 'User'); creds.grantRead(user, cdkp.DockerCredentialUsage.ASSET_PUBLISHING); - expect(stack).toHaveResource('AWS::IAM::Policy', { + Template.fromStack(stack).hasResourceProperties('AWS::IAM::Policy', { PolicyDocument: { Statement: [ { @@ -93,7 +92,7 @@ describe('ExternalDockerCredential', () => { const user = new iam.User(stack, 'User'); creds.grantRead(user, cdkp.DockerCredentialUsage.ASSET_PUBLISHING); - expect(stack).toHaveResource('AWS::IAM::Policy', { + Template.fromStack(stack).hasResourceProperties('AWS::IAM::Policy', { PolicyDocument: { Statement: [{ Action: ['secretsmanager:GetSecretValue', 'secretsmanager:DescribeSecret'], @@ -104,7 +103,7 @@ describe('ExternalDockerCredential', () => { }, Roles: ['MyRole'], }); - expect(stack).toHaveResource('AWS::IAM::Policy', { + Template.fromStack(stack).hasResourceProperties('AWS::IAM::Policy', { PolicyDocument: { Statement: [{ Action: 'sts:AssumeRole', @@ -127,7 +126,7 @@ describe('ExternalDockerCredential', () => { const user = new iam.User(stack, 'User'); creds.grantRead(user, cdkp.DockerCredentialUsage.SELF_UPDATE); - expect(stack).not.toHaveResource('AWS::IAM::Policy'); + Template.fromStack(stack).resourceCountIs('AWS::IAM::Policy', 0); }); }); @@ -193,7 +192,7 @@ describe('EcrDockerCredential', () => { const user = new iam.User(stack, 'User'); creds.grantRead(user, cdkp.DockerCredentialUsage.ASSET_PUBLISHING); - expect(stack).toHaveResource('AWS::IAM::Policy', { + Template.fromStack(stack).hasResourceProperties('AWS::IAM::Policy', { PolicyDocument: { Statement: [{ Action: [ @@ -222,7 +221,7 @@ describe('EcrDockerCredential', () => { const user = new iam.User(stack, 'User'); creds.grantRead(user, cdkp.DockerCredentialUsage.ASSET_PUBLISHING); - expect(stack).toHaveResource('AWS::IAM::Policy', { + Template.fromStack(stack).hasResourceProperties('AWS::IAM::Policy', { PolicyDocument: { Statement: [{ Action: [ @@ -242,7 +241,7 @@ describe('EcrDockerCredential', () => { }, Roles: ['MyRole'], }); - expect(stack).toHaveResource('AWS::IAM::Policy', { + Template.fromStack(stack).hasResourceProperties('AWS::IAM::Policy', { PolicyDocument: { Statement: [{ Action: 'sts:AssumeRole', @@ -262,9 +261,9 @@ describe('EcrDockerCredential', () => { const user = new iam.User(stack, 'User'); creds.grantRead(user, cdkp.DockerCredentialUsage.ASSET_PUBLISHING); - expect(stack).toHaveResource('AWS::IAM::Policy', { + Template.fromStack(stack).hasResourceProperties('AWS::IAM::Policy', { PolicyDocument: { - Statement: arrayWith({ + Statement: Match.arrayWith([{ Action: [ 'ecr:BatchCheckLayerAvailability', 'ecr:GetDownloadUrlForLayer', @@ -273,6 +272,11 @@ describe('EcrDockerCredential', () => { Effect: 'Allow', Resource: 'arn:aws:ecr:eu-west-1:0123456789012:repository/Repo', }, + { + Action: 'ecr:GetAuthorizationToken', + Effect: 'Allow', + Resource: '*', + }, { Action: [ 'ecr:BatchCheckLayerAvailability', @@ -281,12 +285,7 @@ describe('EcrDockerCredential', () => { ], Effect: 'Allow', Resource: 'arn:aws:ecr:eu-west-1:0123456789012:repository/Repo2', - }, - { - Action: 'ecr:GetAuthorizationToken', - Effect: 'Allow', - Resource: '*', - }), + }]), Version: '2012-10-17', }, Users: [{ Ref: 'User00B015A1' }], @@ -299,7 +298,7 @@ describe('EcrDockerCredential', () => { const user = new iam.User(stack, 'User'); creds.grantRead(user, cdkp.DockerCredentialUsage.ASSET_PUBLISHING); - expect(stack).not.toHaveResource('AWS::IAM::Policy'); + Template.fromStack(stack).resourceCountIs('AWS::IAM::Policy', 0); }); }); diff --git a/packages/@aws-cdk/pipelines/test/testhelpers/index.ts b/packages/@aws-cdk/pipelines/test/testhelpers/index.ts index 21ca108240f27..87a02ce0b6a66 100644 --- a/packages/@aws-cdk/pipelines/test/testhelpers/index.ts +++ b/packages/@aws-cdk/pipelines/test/testhelpers/index.ts @@ -2,4 +2,5 @@ export * from './compliance'; export * from './legacy-pipeline'; export * from './modern-pipeline'; export * from './test-app'; -export * from './testmatchers'; \ No newline at end of file +export * from './testmatchers'; +export * from './matchers'; \ No newline at end of file diff --git a/packages/@aws-cdk/pipelines/test/testhelpers/matchers.ts b/packages/@aws-cdk/pipelines/test/testhelpers/matchers.ts new file mode 100644 index 0000000000000..4ace0148c5eaa --- /dev/null +++ b/packages/@aws-cdk/pipelines/test/testhelpers/matchers.ts @@ -0,0 +1,32 @@ +import { Matcher, MatchResult } from '@aws-cdk/assertions'; + +export function stringLike(pattern: string) { + return new StringLike(pattern); +} + +// Reimplementation of +// https://github.com/aws/aws-cdk/blob/430f50a546e9c575f8cdbd259367e440d985e68f/packages/%40aws-cdk/assert-internal/lib/assertions/have-resource-matchers.ts#L244 +class StringLike extends Matcher { + public name = 'StringLike'; + + constructor(private readonly pattern: string) { + super(); + } + + public test(actual: any): MatchResult { + if (typeof(actual) !== 'string') { + throw new Error(`Expected string but found ${typeof(actual)}`); + } + const re = new RegExp(`^${this.pattern.split('*').map(escapeRegex).join('.*')}$`); + + const result = new MatchResult(actual); + if (!re.test(actual)) { + result.push(this, [], `Looking for string with pattern "${this.pattern}" but found "${actual}"`); + } + return result; + } +} + +function escapeRegex(s: string) { + return s.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); // $& means the whole matched string +} \ No newline at end of file From 36ff73809a37998e15176cb8815c118e7ea0c295 Mon Sep 17 00:00:00 2001 From: Rico Huijbers Date: Thu, 9 Sep 2021 17:56:16 +0200 Subject: [PATCH 27/41] fix(core): asset hash of symlinked dir is wrong (#16429) When the root directory of an asset is a symlink (such as can happen in CDK Pipelines), the asset hash calculation incorrectly doesn't follow the symlink and hashes the link itself it instead. This leads to the asset hash never changing, even though the files inside the directory do change. Instead, we resolve the asset root dir, and make sure to hash the target directory on disk. Handling of symlinks found *inside* the target dir remains unchanged. ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- packages/@aws-cdk/core/lib/fs/fingerprint.ts | 10 ++++-- packages/@aws-cdk/core/test/staging.test.ts | 35 +++++++++++++++----- 2 files changed, 35 insertions(+), 10 deletions(-) diff --git a/packages/@aws-cdk/core/lib/fs/fingerprint.ts b/packages/@aws-cdk/core/lib/fs/fingerprint.ts index 7ba7214109b2c..4abfbe93c61bc 100644 --- a/packages/@aws-cdk/core/lib/fs/fingerprint.ts +++ b/packages/@aws-cdk/core/lib/fs/fingerprint.ts @@ -31,7 +31,14 @@ export function fingerprint(fileOrDirectory: string, options: FingerprintOptions const follow = options.follow || SymlinkFollowMode.EXTERNAL; _hashField(hash, 'options.follow', follow); - const rootDirectory = fs.statSync(fileOrDirectory).isDirectory() + // Resolve symlinks in the initial path (for example, the root directory + // might be symlinked). It's important that we know the absolute path, so we + // can judge if further symlinks inside the target directory are within the + // target or not (if we don't resolve, we would test w.r.t. the wrong path). + fileOrDirectory = fs.realpathSync(fileOrDirectory); + + const isDir = fs.statSync(fileOrDirectory).isDirectory(); + const rootDirectory = isDir ? fileOrDirectory : path.dirname(fileOrDirectory); @@ -41,7 +48,6 @@ export function fingerprint(fileOrDirectory: string, options: FingerprintOptions } const ignoreStrategy = IgnoreStrategy.fromCopyOptions(options, fileOrDirectory); - const isDir = fs.statSync(fileOrDirectory).isDirectory(); _processFileOrDirectory(fileOrDirectory, isDir); return hash.digest('hex'); diff --git a/packages/@aws-cdk/core/test/staging.test.ts b/packages/@aws-cdk/core/test/staging.test.ts index edd8d124ba978..f9313f5095d2d 100644 --- a/packages/@aws-cdk/core/test/staging.test.ts +++ b/packages/@aws-cdk/core/test/staging.test.ts @@ -18,6 +18,7 @@ enum DockerStubCommand { } const FIXTURE_TEST1_DIR = path.join(__dirname, 'fs', 'fixtures', 'test1'); +const FIXTURE_TEST1_HASH = '2f37f937c51e2c191af66acf9b09f548926008ec68c575bd2ee54b6e997c0e00'; const FIXTURE_TARBALL = path.join(__dirname, 'fs', 'fixtures.tar.gz'); const userInfo = os.userInfo(); @@ -42,18 +43,36 @@ describe('staging', () => { test('base case', () => { // GIVEN const stack = new Stack(); - const sourcePath = path.join(__dirname, 'fs', 'fixtures', 'test1'); + const sourcePath = FIXTURE_TEST1_DIR; // WHEN const staging = new AssetStaging(stack, 's1', { sourcePath }); - expect(staging.sourceHash).toEqual('2f37f937c51e2c191af66acf9b09f548926008ec68c575bd2ee54b6e997c0e00'); + expect(staging.sourceHash).toEqual(FIXTURE_TEST1_HASH); expect(staging.sourcePath).toEqual(sourcePath); - expect(path.basename(staging.stagedPath)).toEqual('asset.2f37f937c51e2c191af66acf9b09f548926008ec68c575bd2ee54b6e997c0e00'); - expect(path.basename(staging.relativeStagedPath(stack))).toEqual('asset.2f37f937c51e2c191af66acf9b09f548926008ec68c575bd2ee54b6e997c0e00'); + expect(path.basename(staging.stagedPath)).toEqual(`asset.${FIXTURE_TEST1_HASH}`); + expect(path.basename(staging.relativeStagedPath(stack))).toEqual(`asset.${FIXTURE_TEST1_HASH}`); expect(staging.packaging).toEqual(FileAssetPackaging.ZIP_DIRECTORY); expect(staging.isArchive).toEqual(true); + }); + test('base case if source directory is a symlink', () => { + // GIVEN + const stack = new Stack(); + const sourcePath = path.join(os.tmpdir(), 'asset-symlink'); + if (fs.existsSync(sourcePath)) { fs.unlinkSync(sourcePath); } + fs.symlinkSync(FIXTURE_TEST1_DIR, sourcePath); + + try { + const staging = new AssetStaging(stack, 's1', { sourcePath }); + + // Should be the same asset hash as in the previous test + expect(staging.assetHash).toEqual(FIXTURE_TEST1_HASH); + } finally { + if (fs.existsSync(sourcePath)) { + fs.unlinkSync(sourcePath); + } + } }); test('staging of an archive file correctly sets packaging and isArchive', () => { @@ -141,7 +160,7 @@ describe('staging', () => { // WHEN const staging = new AssetStaging(stack, 's1', { sourcePath }); - expect(staging.sourceHash).toEqual('2f37f937c51e2c191af66acf9b09f548926008ec68c575bd2ee54b6e997c0e00'); + expect(staging.sourceHash).toEqual(FIXTURE_TEST1_HASH); expect(staging.sourcePath).toEqual(sourcePath); expect(staging.stagedPath).toEqual(sourcePath); expect(staging.relativeStagedPath(stack)).toEqual(sourcePath); @@ -160,7 +179,7 @@ describe('staging', () => { // THEN const assembly = app.synth(); expect(fs.readdirSync(assembly.directory)).toEqual([ - 'asset.2f37f937c51e2c191af66acf9b09f548926008ec68c575bd2ee54b6e997c0e00', + `asset.${FIXTURE_TEST1_HASH}`, 'asset.af10ac04b3b607b0f8659c8f0cee8c343025ee75baf0b146f10f0e5311d2c46b.gz', 'cdk.out', 'manifest.json', @@ -187,7 +206,7 @@ describe('staging', () => { expect(fs.readdirSync(assembly.directory)).toEqual([ 'assembly-Stage1', 'assembly-Stage2', - 'asset.2f37f937c51e2c191af66acf9b09f548926008ec68c575bd2ee54b6e997c0e00', + `asset.${FIXTURE_TEST1_HASH}`, 'cdk.out', 'manifest.json', 'tree.json', @@ -207,7 +226,7 @@ describe('staging', () => { // THEN expect(withoutExtra.sourceHash).not.toEqual(withExtra.sourceHash); - expect(withoutExtra.sourceHash).toEqual('2f37f937c51e2c191af66acf9b09f548926008ec68c575bd2ee54b6e997c0e00'); + expect(withoutExtra.sourceHash).toEqual(FIXTURE_TEST1_HASH); expect(withExtra.sourceHash).toEqual('c95c915a5722bb9019e2c725d11868e5a619b55f36172f76bcbcaa8bb2d10c5f'); }); From 38426c985d5e0713bbbf14fa639520eca6294124 Mon Sep 17 00:00:00 2001 From: vincent-turato <39069200+vincent-turato@users.noreply.github.com> Date: Thu, 9 Sep 2021 16:15:10 -0700 Subject: [PATCH 28/41] fix(cloudformation-diff): cdk diff not picking up differences if old/new value is in format n.n.n (#16050) "cdk diff" in the current version doesn't pick up differences if the old/new value has a number-like format but actually isn't a number (e.g. 0.31.1) Example: two version strings like "0.31.1-prod" and "0.31.2-prod" are both parsed into "0.31" (and hence incorrectly considered equal). Closes #15935. ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- .../cloudformation-diff/lib/diff/util.ts | 5 + .../test/diff-template.test.ts | 91 +++++++++++++++++++ 2 files changed, 96 insertions(+) diff --git a/packages/@aws-cdk/cloudformation-diff/lib/diff/util.ts b/packages/@aws-cdk/cloudformation-diff/lib/diff/util.ts index f4c452499ee09..59c8606be0a35 100644 --- a/packages/@aws-cdk/cloudformation-diff/lib/diff/util.ts +++ b/packages/@aws-cdk/cloudformation-diff/lib/diff/util.ts @@ -141,11 +141,16 @@ export function unionOf(lv: string[] | Set, rv: string[] | Set): */ function safeParseFloat(str: string): number { const ret = parseFloat(str); + const nonNumericRegex = /\d*\.\d+\./; if (ret === 0) { // if the str is exactly '0', that's OK; // but parseFloat() also returns 0 for things like '0.0'; // in this case, return NaN, so we'll fall back to string comparison return str === '0' ? ret : NaN; + } else if (nonNumericRegex.test(str)) { + // if the str contains non-numeric characters, + // return NaN, so we'll fall back to string comparison + return NaN; } else { return ret; } diff --git a/packages/@aws-cdk/cloudformation-diff/test/diff-template.test.ts b/packages/@aws-cdk/cloudformation-diff/test/diff-template.test.ts index bde05ef09056e..9241d0e8e28eb 100644 --- a/packages/@aws-cdk/cloudformation-diff/test/diff-template.test.ts +++ b/packages/@aws-cdk/cloudformation-diff/test/diff-template.test.ts @@ -581,3 +581,94 @@ test('when a property changes including equivalent DependsOn', () => { differences = diffTemplate(newTemplate, currentTemplate); expect(differences.resources.differenceCount).toBe(1); }); + +test('when a property with a number-like format changes', () => { + const bucketName = 'ShineyBucketName'; + const tagChanges = { + '0.31.1-prod': '0.31.2-prod', + '8.0.5.5.4-identifier': '8.0.5.5.5-identifier', + '1.1.1.1': '1.1.2.2', + '1.2.3': '1.2.4', + '2.2.2.2': '2.2.3.2', + '3.3.3.3': '3.4.3.3', + }; + const oldTags = Object.keys(tagChanges); + const newTags = Object.values(tagChanges); + const currentTemplate = { + Resources: { + QueueResource: { + Type: 'AWS::SQS::Queue', + }, + BucketResource: { + Type: 'AWS::S3::Bucket', + Properties: { + BucketName: bucketName, + Tags: oldTags, + }, + }, + }, + }; + const newTemplate = { + Resources: { + QueueResource: { + Type: 'AWS::SQS::Queue', + }, + BucketResource: { + Type: 'AWS::S3::Bucket', + Properties: { + BucketName: bucketName, + Tags: newTags, + }, + }, + }, + }; + + const differences = diffTemplate(currentTemplate, newTemplate); + expect(differences.differenceCount).toBe(1); + expect(differences.resources.differenceCount).toBe(1); + const difference = differences.resources.changes.BucketResource; + expect(difference).not.toBeUndefined(); + expect(difference?.oldResourceType).toEqual('AWS::S3::Bucket'); + expect(difference?.propertyUpdates).toEqual({ + Tags: { oldValue: oldTags, newValue: newTags, changeImpact: ResourceImpact.WILL_UPDATE, isDifferent: true }, + }); +}); + +test('when a property with a number-like format doesn\'t change', () => { + const bucketName = 'ShineyBucketName'; + const tags = ['0.31.1-prod', '8.0.5.5.4-identifier', '1.1.1.1', '1.2.3']; + const currentTemplate = { + Resources: { + QueueResource: { + Type: 'AWS::SQS::Queue', + }, + BucketResource: { + Type: 'AWS::S3::Bucket', + Properties: { + BucketName: bucketName, + Tags: tags, + }, + }, + }, + }; + const newTemplate = { + Resources: { + QueueResource: { + Type: 'AWS::SQS::Queue', + }, + BucketResource: { + Type: 'AWS::S3::Bucket', + Properties: { + BucketName: bucketName, + Tags: tags, + }, + }, + }, + }; + + const differences = diffTemplate(currentTemplate, newTemplate); + expect(differences.differenceCount).toBe(0); + expect(differences.resources.differenceCount).toBe(0); + const difference = differences.resources.changes.BucketResource; + expect(difference).toBeUndefined(); +}); \ No newline at end of file From 18aff6b4c0a5e17c64685ac384b243c16cd910f1 Mon Sep 17 00:00:00 2001 From: david-doyle-as24 <78368860+david-doyle-as24@users.noreply.github.com> Date: Fri, 10 Sep 2021 12:19:59 +0200 Subject: [PATCH 29/41] feat(sns): adding support for firehose subscription protocol (#15764) This PR adds support for the firehose subscription protocol by extending the protocol enum and by requiring the field "subscriptionRoleArn" if the protocol is set to firehose. This is so that users can take advantage of the new SNS-to-Firehose integration introduced in February 2021 (see [here](https://aws.amazon.com/blogs/compute/introducing-message-archiving-and-analytics-for-amazon-sns/) for the announcement). Here also is a link to the [sample cloudformation](https://docs.aws.amazon.com/sns/latest/dg/firehose-example-cfn.html), documenting the SNS-to-Firehose integration. *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- packages/@aws-cdk/aws-sns/README.md | 14 +++++++++ packages/@aws-cdk/aws-sns/lib/subscription.ts | 30 +++++++++++++++++-- packages/@aws-cdk/aws-sns/package.json | 6 ++-- .../aws-sns/test/subscription.test.ts | 29 +++++++++++++++--- 4 files changed, 69 insertions(+), 10 deletions(-) diff --git a/packages/@aws-cdk/aws-sns/README.md b/packages/@aws-cdk/aws-sns/README.md index ab7120411df39..23ee15e4af508 100644 --- a/packages/@aws-cdk/aws-sns/README.md +++ b/packages/@aws-cdk/aws-sns/README.md @@ -95,6 +95,20 @@ topic.addSubscription(new subs.LambdaSubscription(fn, { })); ``` +### Example of Firehose Subscription + +```typescript + import { Subscription, SubscriptionProtocol, Topic } from '@aws-cdk/aws-sns'; + import { DeliveryStream } from '@aws-cdk/aws-kinesisfirehose'; + const topic = new Topic(stack, 'Topic'); + const stream = new DeliveryStream(stack, 'DeliveryStream', ...) + new Subscription(stack, 'Subscription', { + endpoint: stream.deliveryStreamArn, + protocol: SubscriptionProtocol.FIREHOSE, + subscriptionRoleArn: "SAMPLE_ARN", //role with permissions to send messages to a firehose delivery stream + }) +``` + ## DLQ setup for SNS Subscription CDK can attach provided Queue as DLQ for your SNS subscription. diff --git a/packages/@aws-cdk/aws-sns/lib/subscription.ts b/packages/@aws-cdk/aws-sns/lib/subscription.ts index 88817b6db6fdd..54e26cd24eb60 100644 --- a/packages/@aws-cdk/aws-sns/lib/subscription.ts +++ b/packages/@aws-cdk/aws-sns/lib/subscription.ts @@ -52,6 +52,13 @@ export interface SubscriptionOptions { * @default - No dead letter queue enabled. */ readonly deadLetterQueue?: IQueue; + + /** + * Arn of role allowing access to firehose delivery stream. + * Required for a firehose subscription protocol. + * @default - No subscription role is provided + */ + readonly subscriptionRoleArn?: string; } /** * Properties for creating a new subscription @@ -81,8 +88,15 @@ export class Subscription extends Resource { constructor(scope: Construct, id: string, props: SubscriptionProps) { super(scope, id); - if (props.rawMessageDelivery && ['http', 'https', 'sqs'].indexOf(props.protocol) < 0) { - throw new Error('Raw message delivery can only be enabled for HTTP/S and SQS subscriptions.'); + if (props.rawMessageDelivery && + [ + SubscriptionProtocol.HTTP, + SubscriptionProtocol.HTTPS, + SubscriptionProtocol.SQS, + SubscriptionProtocol.FIREHOSE, + ] + .indexOf(props.protocol) < 0) { + throw new Error('Raw message delivery can only be enabled for HTTP, HTTPS, SQS, and Firehose subscriptions.'); } if (props.filterPolicy) { @@ -103,6 +117,10 @@ export class Subscription extends Resource { } } + if (props.protocol === SubscriptionProtocol.FIREHOSE && !props.subscriptionRoleArn) { + throw new Error('Subscription role arn is required field for subscriptions with a firehose protocol.'); + } + this.deadLetterQueue = this.buildDeadLetterQueue(props); new CfnSubscription(this, 'Resource', { @@ -113,6 +131,7 @@ export class Subscription extends Resource { filterPolicy: this.filterPolicy, region: props.region, redrivePolicy: this.buildDeadLetterConfig(this.deadLetterQueue), + subscriptionRoleArn: props.subscriptionRoleArn, }); } @@ -189,5 +208,10 @@ export enum SubscriptionProtocol { /** * Notifications trigger a Lambda function. */ - LAMBDA = 'lambda' + LAMBDA = 'lambda', + + /** + * Notifications put records into a firehose delivery stream. + */ + FIREHOSE = 'firehose' } diff --git a/packages/@aws-cdk/aws-sns/package.json b/packages/@aws-cdk/aws-sns/package.json index c7734d512cdfd..26d7441d5ee6d 100644 --- a/packages/@aws-cdk/aws-sns/package.json +++ b/packages/@aws-cdk/aws-sns/package.json @@ -76,8 +76,8 @@ }, "license": "Apache-2.0", "devDependencies": { - "@aws-cdk/aws-s3": "0.0.0", "@aws-cdk/assertions": "0.0.0", + "@aws-cdk/aws-s3": "0.0.0", "@types/jest": "^26.0.24", "cdk-build-tools": "0.0.0", "cdk-integ-tools": "0.0.0", @@ -86,8 +86,8 @@ "pkglint": "0.0.0" }, "dependencies": { - "@aws-cdk/aws-codestarnotifications": "0.0.0", "@aws-cdk/aws-cloudwatch": "0.0.0", + "@aws-cdk/aws-codestarnotifications": "0.0.0", "@aws-cdk/aws-events": "0.0.0", "@aws-cdk/aws-iam": "0.0.0", "@aws-cdk/aws-kms": "0.0.0", @@ -97,8 +97,8 @@ }, "homepage": "https://github.com/aws/aws-cdk", "peerDependencies": { - "@aws-cdk/aws-codestarnotifications": "0.0.0", "@aws-cdk/aws-cloudwatch": "0.0.0", + "@aws-cdk/aws-codestarnotifications": "0.0.0", "@aws-cdk/aws-events": "0.0.0", "@aws-cdk/aws-iam": "0.0.0", "@aws-cdk/aws-kms": "0.0.0", diff --git a/packages/@aws-cdk/aws-sns/test/subscription.test.ts b/packages/@aws-cdk/aws-sns/test/subscription.test.ts index 0c19eddc0c7d5..a495769648d4b 100644 --- a/packages/@aws-cdk/aws-sns/test/subscription.test.ts +++ b/packages/@aws-cdk/aws-sns/test/subscription.test.ts @@ -2,6 +2,7 @@ import { Template } from '@aws-cdk/assertions'; import { Queue } from '@aws-cdk/aws-sqs'; import * as cdk from '@aws-cdk/core'; import * as sns from '../lib'; +import { SubscriptionProtocol } from '../lib'; describe('Subscription', () => { test('create a subscription', () => { @@ -176,19 +177,26 @@ describe('Subscription', () => { }); - test('throws with raw delivery for protocol other than http, https or sqs', () => { + + test.each( + [ + SubscriptionProtocol.LAMBDA, + SubscriptionProtocol.EMAIL, + SubscriptionProtocol.EMAIL_JSON, + SubscriptionProtocol.SMS, + SubscriptionProtocol.APPLICATION, + ]) + ('throws with raw delivery for %s protocol', (protocol: SubscriptionProtocol) => { // GIVEN const stack = new cdk.Stack(); const topic = new sns.Topic(stack, 'Topic'); - // THEN expect(() => new sns.Subscription(stack, 'Subscription', { endpoint: 'endpoint', - protocol: sns.SubscriptionProtocol.LAMBDA, + protocol: protocol, topic, rawMessageDelivery: true, })).toThrow(/Raw message delivery/); - }); test('throws with more than 5 attributes in a filter policy', () => { @@ -232,4 +240,17 @@ describe('Subscription', () => { })).toThrow(/\(120\) must not exceed 100/); }); + + test('throws an error when subscription role arn is not entered with firehose subscription protocol', () => { + // GIVEN + const stack = new cdk.Stack(); + const topic = new sns.Topic(stack, 'Topic'); + + //THEN + expect(() => new sns.Subscription(stack, 'Subscription', { + endpoint: 'endpoint', + protocol: sns.SubscriptionProtocol.FIREHOSE, + topic, + })).toThrow(/Subscription role arn is required field for subscriptions with a firehose protocol./); + }); }); From deaac4a16e957bd046f24a6c26d735fc4cf980bd Mon Sep 17 00:00:00 2001 From: Julian Michel Date: Fri, 10 Sep 2021 13:45:42 +0200 Subject: [PATCH 30/41] feat(neptune): add engine version 1.0.5.0 (#16394) Add AWS Neptune engine versions 1.0.5.0. https://docs.aws.amazon.com/neptune/latest/userguide/engine-releases-1.0.5.0.html Closes #16388. Deployment successfully testet: ```ts new neptune.DatabaseCluster(this, 'Database', { vpc, instanceType: neptune.InstanceType.T3_MEDIUM, engineVersion: neptune.EngineVersion.V1_0_5_0, }); ``` ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- packages/@aws-cdk/aws-neptune/lib/cluster.ts | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/packages/@aws-cdk/aws-neptune/lib/cluster.ts b/packages/@aws-cdk/aws-neptune/lib/cluster.ts index 4420659499a59..af8061ad6c21e 100644 --- a/packages/@aws-cdk/aws-neptune/lib/cluster.ts +++ b/packages/@aws-cdk/aws-neptune/lib/cluster.ts @@ -46,6 +46,10 @@ export class EngineVersion { * Neptune engine version 1.0.4.1 */ public static readonly V1_0_4_1 = new EngineVersion('1.0.4.1'); + /** + * Neptune engine version 1.0.5.0 + */ + public static readonly V1_0_5_0 = new EngineVersion('1.0.5.0'); /** * Constructor for specifying a custom engine version From 3e9f04dbbd7aadb8ab4394fefd6281f1d6d30fe0 Mon Sep 17 00:00:00 2001 From: ddl-denis-parnovskiy <77747622+ddl-denis-parnovskiy@users.noreply.github.com> Date: Fri, 10 Sep 2021 07:13:09 -0700 Subject: [PATCH 31/41] fix(logs): log retention fails with OperationAbortedException (#16083) Fixes: aws#15709 When creating a lambda with log retention, CDK actually creates 2 lambda functions. The second lambda function alters log retention of the log group of the first lambda and the retention of its own log group. Because log group creation is asynchronous, the log retention lambda tries to pre-create both log groups to guarantee it has an object to work on. If a normal lambda execution also creates the related log group at the same time, an "OperationAbortedException:... Please retry" error is returned. The existing code handles this situation for log retention lambda but not for the first lambda. This fix adds the retry pattern to the general log group creation code. Also existing code had a bug: if OperationAbortedException is hit, the error is hidden but the retention policy is skipped and not actually applied. This fix addresses this bug as well. ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- .../lib/log-retention-provider/index.ts | 64 +++++++----- .../test/test.log-retention-provider.ts | 98 ++++++++++++++++++- 2 files changed, 138 insertions(+), 24 deletions(-) diff --git a/packages/@aws-cdk/aws-logs/lib/log-retention-provider/index.ts b/packages/@aws-cdk/aws-logs/lib/log-retention-provider/index.ts index d3f508f668ecc..da537c149f013 100644 --- a/packages/@aws-cdk/aws-logs/lib/log-retention-provider/index.ts +++ b/packages/@aws-cdk/aws-logs/lib/log-retention-provider/index.ts @@ -18,14 +18,39 @@ interface SdkRetryOptions { * @param options CloudWatch API SDK options. */ async function createLogGroupSafe(logGroupName: string, region?: string, options?: SdkRetryOptions) { - try { // Try to create the log group - const cloudwatchlogs = new AWS.CloudWatchLogs({ apiVersion: '2014-03-28', region, ...options }); - await cloudwatchlogs.createLogGroup({ logGroupName }).promise(); - } catch (e) { - if (e.code !== 'ResourceAlreadyExistsException') { - throw e; + // If we set the log retention for a lambda, then due to the async nature of + // Lambda logging there could be a race condition when the same log group is + // already being created by the lambda execution. This can sometime result in + // an error "OperationAbortedException: A conflicting operation is currently + // in progress...Please try again." + // To avoid an error, we do as requested and try again. + let retryCount = options?.maxRetries == undefined ? 10 : options.maxRetries; + const delay = options?.retryOptions?.base == undefined ? 10 : options.retryOptions.base; + do { + try { + const cloudwatchlogs = new AWS.CloudWatchLogs({ apiVersion: '2014-03-28', region, ...options }); + await cloudwatchlogs.createLogGroup({ logGroupName }).promise(); + return; + } catch (error) { + if (error.code === 'ResourceAlreadyExistsException') { + // The log group is already created by the lambda execution + return; + } + if (error.code === 'OperationAbortedException') { + if (retryCount > 0) { + retryCount--; + await new Promise(resolve => setTimeout(resolve, delay)); + continue; + } else { + // The log group is still being created by another execution but we are out of retries + throw new Error('Out of attempts to create a logGroup'); + } + } + // Any other error + console.error(error); + throw error; } - } + } while (true); // exit happens on retry count check } /** @@ -64,21 +89,16 @@ export async function handler(event: AWSLambda.CloudFormationCustomResourceEvent await setRetentionPolicy(logGroupName, logGroupRegion, retryOptions, parseInt(event.ResourceProperties.RetentionInDays, 10)); if (event.RequestType === 'Create') { - // Set a retention policy of 1 day on the logs of this function. The log - // group for this function should already exist at this stage because we - // already logged the event but due to the async nature of Lambda logging - // there could be a race condition. So we also try to create the log group - // of this function first. If multiple LogRetention constructs are present - // in the stack, they will try to act on this function's log group at the - // same time. This can sometime result in an OperationAbortedException. To - // avoid this and because this operation is not critical we catch all errors. - try { - const region = process.env.AWS_REGION; - await createLogGroupSafe(`/aws/lambda/${context.functionName}`, region, retryOptions); - await setRetentionPolicy(`/aws/lambda/${context.functionName}`, region, retryOptions, 1); - } catch (e) { - console.log(e); - } + // Set a retention policy of 1 day on the logs of this very function. + // Due to the async nature of the log group creation, the log group for this function might + // still be not created yet at this point. Therefore we attempt to create it. + // In case it is being created, createLogGroupSafe will handle the conflic. + const region = process.env.AWS_REGION; + await createLogGroupSafe(`/aws/lambda/${context.functionName}`, region, retryOptions); + // If createLogGroupSafe fails, the log group is not created even after multiple attempts + // In this case we have nothing to set the retention policy on but an exception will skip + // the next line. + await setRetentionPolicy(`/aws/lambda/${context.functionName}`, region, retryOptions, 1); } } diff --git a/packages/@aws-cdk/aws-logs/test/test.log-retention-provider.ts b/packages/@aws-cdk/aws-logs/test/test.log-retention-provider.ts index a08ff060dc2a4..ba67371ca9d60 100644 --- a/packages/@aws-cdk/aws-logs/test/test.log-retention-provider.ts +++ b/packages/@aws-cdk/aws-logs/test/test.log-retention-provider.ts @@ -27,6 +27,14 @@ function createRequest(type: string) { .reply(200); } +class MyError extends Error { + code: string; + constructor(message: string, code: string) { + super(message); + this.code = code; + } +} + export = { 'tearDown'(callback: any) { AWS.restore(); @@ -231,10 +239,60 @@ export = { test.done(); }, - async 'does not fail when operations on provider log group fail'(test: Test) { + async 'does not if when operations on provider log group fails'(test: Test) { + let attempt = 2; const createLogGroupFake = (params: AWSSDK.CloudWatchLogs.CreateLogGroupRequest) => { if (params.logGroupName === '/aws/lambda/provider') { - return Promise.reject(new Error('OperationAbortedException')); + if (attempt > 0) { + attempt--; + return Promise.reject(new MyError( + 'A conflicting operation is currently in progress against this resource. Please try again.', + 'OperationAbortedException')); + } else { + return Promise.resolve({}); + } + } + return Promise.resolve({}); + }; + + const putRetentionPolicyFake = sinon.fake.resolves({}); + const deleteRetentionPolicyFake = sinon.fake.resolves({}); + + AWS.mock('CloudWatchLogs', 'createLogGroup', createLogGroupFake); + AWS.mock('CloudWatchLogs', 'putRetentionPolicy', putRetentionPolicyFake); + AWS.mock('CloudWatchLogs', 'deleteRetentionPolicy', deleteRetentionPolicyFake); + + const event = { + ...eventCommon, + RequestType: 'Create', + ResourceProperties: { + ServiceToken: 'token', + RetentionInDays: '30', + LogGroupName: 'group', + }, + }; + + const request = createRequest('SUCCESS'); + + await provider.handler(event as AWSLambda.CloudFormationCustomResourceCreateEvent, context); + + test.equal(request.isDone(), true); + + test.done(); + }, + + async 'does not fail if operations on CDK lambda log group fails twice'(test: Test) { + let attempt = 2; + const createLogGroupFake = (params: AWSSDK.CloudWatchLogs.CreateLogGroupRequest) => { + if (params.logGroupName === 'group') { + if (attempt > 0) { + attempt--; + return Promise.reject(new MyError( + 'A conflicting operation is currently in progress against this resource. Please try again.', + 'OperationAbortedException')); + } else { + return Promise.resolve({}); + } } return Promise.resolve({}); }; @@ -265,6 +323,42 @@ export = { test.done(); }, + async 'does fail if operations on CDK lambda log group fails indefinitely'(test: Test) { + const createLogGroupFake = (params: AWSSDK.CloudWatchLogs.CreateLogGroupRequest) => { + if (params.logGroupName === 'group') { + return Promise.reject(new MyError( + 'A conflicting operation is currently in progress against this resource. Please try again.', + 'OperationAbortedException')); + } + return Promise.resolve({}); + }; + + const putRetentionPolicyFake = sinon.fake.resolves({}); + const deleteRetentionPolicyFake = sinon.fake.resolves({}); + + AWS.mock('CloudWatchLogs', 'createLogGroup', createLogGroupFake); + AWS.mock('CloudWatchLogs', 'putRetentionPolicy', putRetentionPolicyFake); + AWS.mock('CloudWatchLogs', 'deleteRetentionPolicy', deleteRetentionPolicyFake); + + const event = { + ...eventCommon, + RequestType: 'Create', + ResourceProperties: { + ServiceToken: 'token', + RetentionInDays: '30', + LogGroupName: 'group', + }, + }; + + const request = createRequest('FAILED'); + + await provider.handler(event as AWSLambda.CloudFormationCustomResourceCreateEvent, context); + + test.equal(request.isDone(), true); + + test.done(); + }, + async 'response data contains the log group name'(test: Test) { AWS.mock('CloudWatchLogs', 'createLogGroup', sinon.fake.resolves({})); AWS.mock('CloudWatchLogs', 'putRetentionPolicy', sinon.fake.resolves({})); From 7229ad22498dc96ff8c3f05f3bfd789945d950b0 Mon Sep 17 00:00:00 2001 From: Rico Huijbers Date: Sun, 12 Sep 2021 12:33:10 +0200 Subject: [PATCH 32/41] docs(pipelines): CLI not compatible with the library used by your application (#16424) Another non-obvious consequence of the self-mutating nature of CDK pipelines that needs to be thoroughly documented. ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- packages/@aws-cdk/pipelines/README.md | 23 ++++++++++++++++++- .../lib/codepipeline/codepipeline.ts | 15 ++++++++++-- 2 files changed, 35 insertions(+), 3 deletions(-) diff --git a/packages/@aws-cdk/pipelines/README.md b/packages/@aws-cdk/pipelines/README.md index 55cd50bfb12bf..1e0df7a4b2c2a 100644 --- a/packages/@aws-cdk/pipelines/README.md +++ b/packages/@aws-cdk/pipelines/README.md @@ -481,7 +481,7 @@ pipeline.addStage(prod, { }); ``` -You can also specify steps to be executed at the stack level. To achieve this, you can specify the stack and step via the `stackSteps` property: +You can also specify steps to be executed at the stack level. To achieve this, you can specify the stack and step via the `stackSteps` property: ```ts pipeline.addStage(prod, { @@ -1274,6 +1274,27 @@ encryption key policy for the artifacts bucket may have a statement that looks l Any resource or policy that references the qualifier (`hnb659fds` by default) will need to be updated. +### This CDK CLI is not compatible with the CDK library used by your application + +The CDK CLI version used in your pipeline is too old to read the Cloud Assembly +produced by your CDK app. + +Most likely this happens in the `SelfMutate` action, you are passing the `cliVersion` +parameter to control the version of the CDK CLI, and you just updated the CDK +framework version that your application uses. You either forgot to change the +`cliVersion` parameter, or changed the `cliVersion` in the same commit in which +you changed the framework version. Because a change to the pipeline settings needs +a successful run of the `SelfMutate` step to be applied, the next iteration of the +`SelfMutate` step still executes with the *old* CLI version, and that old CLI version +is not able to read the cloud assembly produced by the new framework version. + +Solution: change the `cliVersion` first, commit, push and deploy, and only then +change the framework version. + +We recommend you avoid specifying the `cliVersion` parameter at all. By default +the pipeline will use the latest CLI version, which will support all cloud assembly +versions. + ## Known Issues There are some usability issues that are caused by underlying technology, and diff --git a/packages/@aws-cdk/pipelines/lib/codepipeline/codepipeline.ts b/packages/@aws-cdk/pipelines/lib/codepipeline/codepipeline.ts index 7b1d0d87d9c22..37ca819d82bc5 100644 --- a/packages/@aws-cdk/pipelines/lib/codepipeline/codepipeline.ts +++ b/packages/@aws-cdk/pipelines/lib/codepipeline/codepipeline.ts @@ -63,7 +63,18 @@ export interface CodePipelineProps { * If you want to lock the CDK CLI version used in the pipeline, by steps * that are automatically generated for you, specify the version here. * - * You should not typically need to specify this value. + * We recommend you do not specify this value, as not specifying it always + * uses the latest CLI version which is backwards compatible with old versions. + * + * If you do specify it, be aware that this version should always be equal to or higher than the + * version of the CDK framework used by the CDK app, when the CDK commands are + * run during your pipeline execution. When you change this version, the *next + * time* the `SelfMutate` step runs it will still be using the CLI of the the + * *previous* version that was in this property: it will only start using the + * new version after `SelfMutate` completes successfully. That means that if + * you want to update both framework and CLI version, you should update the + * CLI version first, commit, push and deploy, and only then update the + * framework version. * * @default - Latest version */ @@ -871,4 +882,4 @@ function chunkTranches(n: number, xss: A[][]): A[][][] { function isCodePipelineActionFactory(x: any): x is ICodePipelineActionFactory { return !!(x as ICodePipelineActionFactory).produceAction; -} \ No newline at end of file +} From a7320786f57d891bb74bfbc17a2169b65620006e Mon Sep 17 00:00:00 2001 From: Elad Ben-Israel Date: Sun, 12 Sep 2021 14:30:03 +0300 Subject: [PATCH 33/41] docs(assets): tarball path should be absolute (#15750) Improve docs to indicate that the path to a tarball should be absolute and not relative. Fixes #15721 ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- packages/@aws-cdk/aws-ecr-assets/lib/tarball-asset.ts | 6 +++++- packages/@aws-cdk/aws-ecs/lib/container-image.ts | 3 ++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/packages/@aws-cdk/aws-ecr-assets/lib/tarball-asset.ts b/packages/@aws-cdk/aws-ecr-assets/lib/tarball-asset.ts index bb7c40617b4ea..983b56e838876 100644 --- a/packages/@aws-cdk/aws-ecr-assets/lib/tarball-asset.ts +++ b/packages/@aws-cdk/aws-ecr-assets/lib/tarball-asset.ts @@ -16,7 +16,11 @@ import { Construct as CoreConstruct } from '@aws-cdk/core'; */ export interface TarballImageAssetProps { /** - * Path to the tarball. + * Absolute path to the tarball. + * + * It is recommended to to use the script running directory (e.g. `__dirname` + * in Node.js projects or dirname of `__file__` in Python) if your tarball + * is located as a resource inside your project. */ readonly tarballFile: string; } diff --git a/packages/@aws-cdk/aws-ecs/lib/container-image.ts b/packages/@aws-cdk/aws-ecs/lib/container-image.ts index 05b098fdafedd..f3c53bb527ba0 100644 --- a/packages/@aws-cdk/aws-ecs/lib/container-image.ts +++ b/packages/@aws-cdk/aws-ecs/lib/container-image.ts @@ -59,7 +59,8 @@ export abstract class ContainerImage { * Use this method if the container image has already been created by another process (e.g. jib) * and you want to add it as a container image asset. * - * @param tarballFile Path to the tarball (relative to the directory). + * @param tarballFile Absolute path to the tarball. You can use language-specific idioms (such as `__dirname` in Node.js) + * to create an absolute path based on the current script running directory. */ public static fromTarball(tarballFile: string): ContainerImage { return { From 066bcb1e5d53192bd465190c8a4f81c5838987f4 Mon Sep 17 00:00:00 2001 From: Dylan Seidt Date: Sun, 12 Sep 2021 14:28:41 -0500 Subject: [PATCH 34/41] feat(batch): fargate support for jobs (#15848) Added Fargate support for Batch jobs. Note: this is not entirely my work - most of it was done by @kokachev. It is an updated version of Fargate support for batch jobs based on the feedback left in #13591. - Doc fixes - Integration test addition - Network configuration for Fargate - Support `ResourceRequirements` for Fargate jobs - Other minor fixes revealed by integration test closes: #13590, #13591 ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- packages/@aws-cdk/aws-batch/README.md | 17 +- .../aws-batch/lib/compute-environment.ts | 170 ++-- .../@aws-cdk/aws-batch/lib/job-definition.ts | 109 ++- .../test/compute-environment.test.ts | 160 +++- .../aws-batch/test/integ.batch.expected.json | 728 ++++++++++++++---- .../@aws-cdk/aws-batch/test/integ.batch.ts | 40 + .../aws-batch/test/job-definition.test.ts | 128 ++- .../integ.job-definition-events.expected.json | 9 +- .../batch/integ.run-batch-job.expected.json | 9 +- .../test/batch/integ.submit-job.expected.json | 9 +- 10 files changed, 1168 insertions(+), 211 deletions(-) diff --git a/packages/@aws-cdk/aws-batch/README.md b/packages/@aws-cdk/aws-batch/README.md index 48d5b7edf65d8..f2900da8cda0f 100644 --- a/packages/@aws-cdk/aws-batch/README.md +++ b/packages/@aws-cdk/aws-batch/README.md @@ -37,7 +37,7 @@ For more information on **AWS Batch** visit the [AWS Docs for Batch](https://doc ## Compute Environment -At the core of AWS Batch is the compute environment. All batch jobs are processed within a compute environment, which uses resource like OnDemand or Spot EC2 instances. +At the core of AWS Batch is the compute environment. All batch jobs are processed within a compute environment, which uses resource like OnDemand/Spot EC2 instances or Fargate. In **MANAGED** mode, AWS will handle the provisioning of compute resources to accommodate the demand. Otherwise, in **UNMANAGED** mode, you will need to manage the provisioning of those resources. @@ -74,6 +74,21 @@ const spotEnvironment = new batch.ComputeEnvironment(stack, 'MySpotEnvironment', }); ``` +### Fargate Compute Environment + +It is possible to have AWS Batch submit jobs to be run on Fargate compute resources. Below is an example of how this can be done: + +```ts +const vpc = new ec2.Vpc(this, 'VPC'); + +const fargateSpotEnvironment = new batch.ComputeEnvironment(stack, 'MyFargateEnvironment', { + computeResources: { + type: batch.ComputeResourceType.FARGATE_SPOT, + vpc, + }, +}); +``` + ### Understanding Progressive Allocation Strategies AWS Batch uses an [allocation strategy](https://docs.aws.amazon.com/batch/latest/userguide/allocation-strategies.html) to determine what compute resource will efficiently handle incoming job requests. By default, **BEST_FIT** will pick an available compute instance based on vCPU requirements. If none exist, the job will wait until resources become available. However, with this strategy, you may have jobs waiting in the queue unnecessarily despite having more powerful instances available. Below is an example of how that situation might look like: diff --git a/packages/@aws-cdk/aws-batch/lib/compute-environment.ts b/packages/@aws-cdk/aws-batch/lib/compute-environment.ts index 18a2d1a446325..408e16c7bb98a 100644 --- a/packages/@aws-cdk/aws-batch/lib/compute-environment.ts +++ b/packages/@aws-cdk/aws-batch/lib/compute-environment.ts @@ -6,7 +6,7 @@ import { CfnComputeEnvironment } from './batch.generated'; /** * Property to specify if the compute environment - * uses On-Demand or SpotFleet compute resources. + * uses On-Demand, SpotFleet, Fargate, or Fargate Spot compute resources. */ export enum ComputeResourceType { /** @@ -18,6 +18,20 @@ export enum ComputeResourceType { * Resources will be EC2 SpotFleet resources. */ SPOT = 'SPOT', + + /** + * Resources will be Fargate resources. + */ + FARGATE = 'FARGATE', + + /** + * Resources will be Fargate Spot resources. + * + * Fargate Spot uses spare capacity in the AWS cloud to run your fault-tolerant, + * time-flexible jobs at up to a 70% discount. If AWS needs the resources back, + * jobs running on Fargate Spot will be interrupted with two minutes of notification. + */ + FARGATE_SPOT = 'FARGATE_SPOT', } /** @@ -135,7 +149,7 @@ export interface ComputeResources { readonly vpcSubnets?: ec2.SubnetSelection; /** - * The type of compute environment: ON_DEMAND or SPOT. + * The type of compute environment: ON_DEMAND, SPOT, FARGATE, or FARGATE_SPOT. * * @default ON_DEMAND */ @@ -340,7 +354,10 @@ export class ComputeEnvironment extends Resource implements IComputeEnvironment physicalName: props.computeEnvironmentName, }); - this.validateProps(props); + const isFargate = ComputeResourceType.FARGATE === props.computeResources?.type + || ComputeResourceType.FARGATE_SPOT === props.computeResources?.type;; + + this.validateProps(props, isFargate); const spotFleetRole = this.getSpotFleetRole(props); let computeResources: CfnComputeEnvironment.ComputeResourcesProperty | undefined; @@ -348,36 +365,38 @@ export class ComputeEnvironment extends Resource implements IComputeEnvironment // Only allow compute resources to be set when using MANAGED type if (props.computeResources && this.isManaged(props)) { computeResources = { - allocationStrategy: props.computeResources.allocationStrategy - || ( - props.computeResources.type === ComputeResourceType.SPOT - ? AllocationStrategy.SPOT_CAPACITY_OPTIMIZED - : AllocationStrategy.BEST_FIT - ), bidPercentage: props.computeResources.bidPercentage, desiredvCpus: props.computeResources.desiredvCpus, ec2KeyPair: props.computeResources.ec2KeyPair, imageId: props.computeResources.image && props.computeResources.image.getImage(this).imageId, - instanceRole: props.computeResources.instanceRole - ? props.computeResources.instanceRole - : new iam.CfnInstanceProfile(this, 'Instance-Profile', { - roles: [new iam.Role(this, 'Ecs-Instance-Role', { - assumedBy: new iam.ServicePrincipal('ec2.amazonaws.com'), - managedPolicies: [ - iam.ManagedPolicy.fromAwsManagedPolicyName('service-role/AmazonEC2ContainerServiceforEC2Role'), - ], - }).roleName], - }).attrArn, - instanceTypes: this.buildInstanceTypes(props.computeResources.instanceTypes), launchTemplate: props.computeResources.launchTemplate, maxvCpus: props.computeResources.maxvCpus || 256, - minvCpus: props.computeResources.minvCpus || 0, placementGroup: props.computeResources.placementGroup, securityGroupIds: this.buildSecurityGroupIds(props.computeResources.vpc, props.computeResources.securityGroups), spotIamFleetRole: spotFleetRole?.roleArn, subnets: props.computeResources.vpc.selectSubnets(props.computeResources.vpcSubnets).subnetIds, tags: props.computeResources.computeResourcesTags, type: props.computeResources.type || ComputeResourceType.ON_DEMAND, + ...(!isFargate ? { + allocationStrategy: props.computeResources.allocationStrategy + || ( + props.computeResources.type === ComputeResourceType.SPOT + ? AllocationStrategy.SPOT_CAPACITY_OPTIMIZED + : AllocationStrategy.BEST_FIT + ), + instanceRole: props.computeResources.instanceRole + ? props.computeResources.instanceRole + : new iam.CfnInstanceProfile(this, 'Instance-Profile', { + roles: [new iam.Role(this, 'Ecs-Instance-Role', { + assumedBy: new iam.ServicePrincipal('ec2.amazonaws.com'), + managedPolicies: [ + iam.ManagedPolicy.fromAwsManagedPolicyName('service-role/AmazonEC2ContainerServiceforEC2Role'), + ], + }).roleName], + }).attrArn, + instanceTypes: this.buildInstanceTypes(props.computeResources.instanceTypes), + minvCpus: props.computeResources.minvCpus || 0, + } : {}), }; } @@ -414,7 +433,7 @@ export class ComputeEnvironment extends Resource implements IComputeEnvironment /** * Validates the properties provided for a new batch compute environment. */ - private validateProps(props: ComputeEnvironmentProps) { + private validateProps(props: ComputeEnvironmentProps, isFargate: boolean) { if (props === undefined) { return; } @@ -427,41 +446,100 @@ export class ComputeEnvironment extends Resource implements IComputeEnvironment throw new Error('computeResources is missing but required on a managed compute environment'); } - // Setting a bid percentage is only allowed on SPOT resources + - // Cannot use SPOT_CAPACITY_OPTIMIZED when using ON_DEMAND if (props.computeResources) { - if (props.computeResources.type === ComputeResourceType.ON_DEMAND) { - // VALIDATE FOR ON_DEMAND + if (isFargate) { + // VALIDATE FOR FARGATE - // Bid percentage is not allowed + // Bid percentage cannot be set for Fargate evnvironments if (props.computeResources.bidPercentage !== undefined) { - throw new Error('Setting the bid percentage is only allowed for SPOT type resources on a batch compute environment'); + throw new Error('Bid percentage must not be set for Fargate compute environments'); } - // SPOT_CAPACITY_OPTIMIZED allocation is not allowed - if (props.computeResources.allocationStrategy && props.computeResources.allocationStrategy === AllocationStrategy.SPOT_CAPACITY_OPTIMIZED) { - throw new Error('The SPOT_CAPACITY_OPTIMIZED allocation strategy is only allowed if the environment is a SPOT type compute environment'); + // Allocation strategy cannot be set for Fargate evnvironments + if (props.computeResources.allocationStrategy !== undefined) { + throw new Error('Allocation strategy must not be set for Fargate compute environments'); } - } else { - // VALIDATE FOR SPOT - // Bid percentage must be from 0 - 100 - if (props.computeResources.bidPercentage !== undefined && - (props.computeResources.bidPercentage < 0 || props.computeResources.bidPercentage > 100)) { - throw new Error('Bid percentage can only be a value between 0 and 100'); + // Desired vCPUs cannot be set for Fargate evnvironments + if (props.computeResources.desiredvCpus !== undefined) { + throw new Error('Desired vCPUs must not be set for Fargate compute environments'); } - } - if (props.computeResources.minvCpus) { - // minvCpus cannot be less than 0 - if (props.computeResources.minvCpus < 0) { - throw new Error('Minimum vCpus for a batch compute environment cannot be less than 0'); + // Image ID cannot be set for Fargate evnvironments + if (props.computeResources.image !== undefined) { + throw new Error('Image must not be set for Fargate compute environments'); } - // minvCpus cannot exceed max vCpus - if (props.computeResources.maxvCpus && - props.computeResources.minvCpus > props.computeResources.maxvCpus) { - throw new Error('Minimum vCpus cannot be greater than the maximum vCpus'); + // Instance types cannot be set for Fargate evnvironments + if (props.computeResources.instanceTypes !== undefined) { + throw new Error('Instance types must not be set for Fargate compute environments'); + } + + // EC2 key pair cannot be set for Fargate evnvironments + if (props.computeResources.ec2KeyPair !== undefined) { + throw new Error('EC2 key pair must not be set for Fargate compute environments'); + } + + // Instance role cannot be set for Fargate evnvironments + if (props.computeResources.instanceRole !== undefined) { + throw new Error('Instance role must not be set for Fargate compute environments'); + } + + // Launch template cannot be set for Fargate evnvironments + if (props.computeResources.launchTemplate !== undefined) { + throw new Error('Launch template must not be set for Fargate compute environments'); + } + + // Min vCPUs cannot be set for Fargate evnvironments + if (props.computeResources.minvCpus !== undefined) { + throw new Error('Min vCPUs must not be set for Fargate compute environments'); + } + + // Placement group cannot be set for Fargate evnvironments + if (props.computeResources.placementGroup !== undefined) { + throw new Error('Placement group must not be set for Fargate compute environments'); + } + + // Spot fleet role cannot be set for Fargate evnvironments + if (props.computeResources.spotFleetRole !== undefined) { + throw new Error('Spot fleet role must not be set for Fargate compute environments'); + } + } else { + // VALIDATE FOR ON_DEMAND AND SPOT + if (props.computeResources.minvCpus) { + // minvCpus cannot be less than 0 + if (props.computeResources.minvCpus < 0) { + throw new Error('Minimum vCpus for a batch compute environment cannot be less than 0'); + } + + // minvCpus cannot exceed max vCpus + if (props.computeResources.maxvCpus && + props.computeResources.minvCpus > props.computeResources.maxvCpus) { + throw new Error('Minimum vCpus cannot be greater than the maximum vCpus'); + } + } + // Setting a bid percentage is only allowed on SPOT resources + + // Cannot use SPOT_CAPACITY_OPTIMIZED when using ON_DEMAND + if (props.computeResources.type === ComputeResourceType.ON_DEMAND) { + // VALIDATE FOR ON_DEMAND + + // Bid percentage is not allowed + if (props.computeResources.bidPercentage !== undefined) { + throw new Error('Setting the bid percentage is only allowed for SPOT type resources on a batch compute environment'); + } + + // SPOT_CAPACITY_OPTIMIZED allocation is not allowed + if (props.computeResources.allocationStrategy && props.computeResources.allocationStrategy === AllocationStrategy.SPOT_CAPACITY_OPTIMIZED) { + throw new Error('The SPOT_CAPACITY_OPTIMIZED allocation strategy is only allowed if the environment is a SPOT type compute environment'); + } + } else if (props.computeResources.type === ComputeResourceType.SPOT) { + // VALIDATE FOR SPOT + + // Bid percentage must be from 0 - 100 + if (props.computeResources.bidPercentage !== undefined && + (props.computeResources.bidPercentage < 0 || props.computeResources.bidPercentage > 100)) { + throw new Error('Bid percentage can only be a value between 0 and 100'); + } } } } diff --git a/packages/@aws-cdk/aws-batch/lib/job-definition.ts b/packages/@aws-cdk/aws-batch/lib/job-definition.ts index 88107b0266615..dab8515acb6d1 100644 --- a/packages/@aws-cdk/aws-batch/lib/job-definition.ts +++ b/packages/@aws-cdk/aws-batch/lib/job-definition.ts @@ -52,6 +52,21 @@ export enum LogDriver { SYSLOG = 'syslog' } +/** + * Platform capabilities + */ +export enum PlatformCapabilities { + /** + * Specifies EC2 environment. + */ + EC2 = 'EC2', + + /** + * Specifies Fargate environment. + */ + FARGATE = 'FARGATE' +} + /** * Log configuration options to send to a custom log driver for the container. */ @@ -135,9 +150,9 @@ export interface JobDefinitionContainer { /** * The hard limit (in MiB) of memory to present to the container. If your container attempts to exceed - * the memory specified here, the container is killed. You must specify at least 4 MiB of memory for a job. + * the memory specified here, the container is killed. You must specify at least 4 MiB of memory for EC2 and 512 MiB for Fargate. * - * @default 4 + * @default - 4 for EC2, 512 for Fargate */ readonly memoryLimitMiB?: number; @@ -185,9 +200,9 @@ export interface JobDefinitionContainer { /** * The number of vCPUs reserved for the container. Each vCPU is equivalent to - * 1,024 CPU shares. You must specify at least one vCPU. + * 1,024 CPU shares. You must specify at least one vCPU for EC2 and 0.25 for Fargate. * - * @default 1 + * @default - 1 for EC2, 0.25 for Fargate */ readonly vcpus?: number; @@ -197,6 +212,28 @@ export interface JobDefinitionContainer { * @default - No data volumes will be used. */ readonly volumes?: ecs.Volume[]; + + /** + * Fargate platform version + * + * @default - LATEST platform version will be used + */ + readonly platformVersion?: ecs.FargatePlatformVersion + + /** + * The IAM role that AWS Batch can assume. + * Required when using Fargate. + * + * @default - None + */ + readonly executionRole?: iam.IRole; + + /** + * Whether or not to assign a public IP to the job + * + * @default - false + */ + readonly assignPublicIp?: boolean } /** @@ -252,6 +289,13 @@ export interface JobDefinitionProps { * @default - undefined */ readonly timeout?: Duration; + + /** + * The platform capabilities required by the job definition. + * + * @default - EC2 + */ + readonly platformCapabilities?: PlatformCapabilities[]; } /** @@ -382,16 +426,20 @@ export class JobDefinition extends Resource implements IJobDefinition { physicalName: props.jobDefinitionName, }); + this.validateProps(props); + this.imageConfig = new JobDefinitionImageConfig(this, props.container); + const isFargate = !!props.platformCapabilities?.includes(PlatformCapabilities.FARGATE); + const jobDef = new CfnJobDefinition(this, 'Resource', { jobDefinitionName: props.jobDefinitionName, - containerProperties: this.buildJobContainer(props.container), + containerProperties: this.buildJobContainer(props.container, isFargate), type: 'container', nodeProperties: props.nodeProps ? { mainNode: props.nodeProps.mainNode, - nodeRangeProperties: this.buildNodeRangeProps(props.nodeProps), + nodeRangeProperties: this.buildNodeRangeProps(props.nodeProps, isFargate), numNodes: props.nodeProps.count, } : undefined, @@ -402,6 +450,7 @@ export class JobDefinition extends Resource implements IJobDefinition { timeout: { attemptDurationSeconds: props.timeout ? props.timeout.toSeconds() : undefined, }, + platformCapabilities: props.platformCapabilities ?? [PlatformCapabilities.EC2], }); this.jobDefinitionArn = this.getResourceArnAttribute(jobDef.ref, { @@ -412,7 +461,7 @@ export class JobDefinition extends Resource implements IJobDefinition { this.jobDefinitionName = this.getResourceNameAttribute(jobDef.ref); } - private deserializeEnvVariables(env?: { [name: string]: string}): CfnJobDefinition.EnvironmentProperty[] | undefined { + private deserializeEnvVariables(env?: { [name: string]: string }): CfnJobDefinition.EnvironmentProperty[] | undefined { const vars = new Array(); if (env === undefined) { @@ -426,7 +475,31 @@ export class JobDefinition extends Resource implements IJobDefinition { return vars; } - private buildJobContainer(container?: JobDefinitionContainer): CfnJobDefinition.ContainerPropertiesProperty | undefined { + /** + * Validates the properties provided for a new job definition. + */ + private validateProps(props: JobDefinitionProps) { + if (props === undefined) { + return; + } + + if (props.platformCapabilities !== undefined && props.platformCapabilities.includes(PlatformCapabilities.FARGATE) + && props.container.executionRole === undefined) { + throw new Error('Fargate job must have executionRole set'); + } + + if (props.platformCapabilities !== undefined && props.platformCapabilities.includes(PlatformCapabilities.FARGATE) + && props.container.gpuCount !== undefined) { + throw new Error('Fargate job must not have gpuCount set'); + } + + if ((props.platformCapabilities === undefined || props.platformCapabilities.includes(PlatformCapabilities.EC2)) + && props.container.assignPublicIp !== undefined) { + throw new Error('EC2 job must not have assignPublicIp set'); + } + } + + private buildJobContainer(container: JobDefinitionContainer, isFargate: boolean): CfnJobDefinition.ContainerPropertiesProperty | undefined { if (container === undefined) { return undefined; } @@ -437,6 +510,7 @@ export class JobDefinition extends Resource implements IJobDefinition { image: this.imageConfig.imageName, instanceType: container.instanceType && container.instanceType.toString(), jobRoleArn: container.jobRole && container.jobRole.roleArn, + executionRoleArn: container.executionRole && container.executionRole.roleArn, linuxParameters: container.linuxParams ? { devices: container.linuxParams.renderLinuxParameters().devices } : undefined, @@ -447,26 +521,31 @@ export class JobDefinition extends Resource implements IJobDefinition { ? this.buildLogConfigurationSecretOptions(container.logConfiguration.secretOptions) : undefined, } : undefined, - memory: container.memoryLimitMiB || 4, mountPoints: container.mountPoints, privileged: container.privileged || false, - resourceRequirements: container.gpuCount - ? [{ type: 'GPU', value: String(container.gpuCount) }] - : undefined, + networkConfiguration: container.assignPublicIp ? { + assignPublicIp: container.assignPublicIp ? 'ENABLED' : 'DISABLED', + } : undefined, readonlyRootFilesystem: container.readOnly || false, ulimits: container.ulimits, user: container.user, - vcpus: container.vcpus || 1, volumes: container.volumes, + fargatePlatformConfiguration: container.platformVersion ? { + platformVersion: container.platformVersion, + } : undefined, + resourceRequirements: [ + { type: 'VCPU', value: String(container.vcpus || (isFargate ? 0.25 : 1)) }, + { type: 'MEMORY', value: String(container.memoryLimitMiB || (isFargate ? 512 : 4)) }, + ].concat(container.gpuCount ? [{ type: 'GPU', value: String(container.gpuCount) }] : []), }; } - private buildNodeRangeProps(multiNodeProps: IMultiNodeProps): CfnJobDefinition.NodeRangePropertyProperty[] { + private buildNodeRangeProps(multiNodeProps: IMultiNodeProps, isFargate: boolean): CfnJobDefinition.NodeRangePropertyProperty[] { const rangeProps = new Array(); for (const prop of multiNodeProps.rangeProps) { rangeProps.push({ - container: this.buildJobContainer(prop.container), + container: this.buildJobContainer(prop.container, isFargate), targetNodes: `${prop.fromNodeIndex || 0}:${prop.toNodeIndex || multiNodeProps.count}`, }); } diff --git a/packages/@aws-cdk/aws-batch/test/compute-environment.test.ts b/packages/@aws-cdk/aws-batch/test/compute-environment.test.ts index c7ead4cd47de8..4cd446eec3774 100644 --- a/packages/@aws-cdk/aws-batch/test/compute-environment.test.ts +++ b/packages/@aws-cdk/aws-batch/test/compute-environment.test.ts @@ -6,7 +6,7 @@ import * as iam from '@aws-cdk/aws-iam'; import * as cdk from '@aws-cdk/core'; import * as batch from '../lib'; -describe('Batch Compute Evironment', () => { +describe('Batch Compute Environment', () => { let expectedManagedDefaultComputeProps: any; let defaultServiceRole: any; @@ -81,6 +81,164 @@ describe('Batch Compute Evironment', () => { }); }); }); + describe('using fargate resources', () => { + test('should deny setting bid percentage', () => { + // THEN + throws(() => { + // WHEN + new batch.ComputeEnvironment(stack, 'test-compute-env', { + managed: true, + computeResources: { + vpc, + type: batch.ComputeResourceType.FARGATE, + bidPercentage: -1, + }, + }); + }); + }); + test('should deny setting allocation strategy', () => { + // THEN + throws(() => { + // WHEN + new batch.ComputeEnvironment(stack, 'test-compute-env', { + managed: true, + computeResources: { + vpc, + type: batch.ComputeResourceType.FARGATE, + allocationStrategy: batch.AllocationStrategy.BEST_FIT, + }, + }); + }); + }); + test('should deny setting desired vCPUs', () => { + // THEN + throws(() => { + // WHEN + new batch.ComputeEnvironment(stack, 'test-compute-env', { + managed: true, + computeResources: { + vpc, + type: batch.ComputeResourceType.FARGATE, + desiredvCpus: 1, + }, + }); + }); + }); + test('should deny setting min vCPUs', () => { + // THEN + throws(() => { + // WHEN + new batch.ComputeEnvironment(stack, 'test-compute-env', { + managed: true, + computeResources: { + vpc, + type: batch.ComputeResourceType.FARGATE, + minvCpus: 1, + }, + }); + }); + }); + test('should deny setting image', () => { + // THEN + throws(() => { + // WHEN + new batch.ComputeEnvironment(stack, 'test-compute-env', { + managed: true, + computeResources: { + vpc, + type: batch.ComputeResourceType.FARGATE, + image: ec2.MachineImage.latestAmazonLinux(), + }, + }); + }); + }); + test('should deny setting instance types', () => { + // THEN + throws(() => { + // WHEN + new batch.ComputeEnvironment(stack, 'test-compute-env', { + managed: true, + computeResources: { + vpc, + type: batch.ComputeResourceType.FARGATE, + instanceTypes: [], + }, + }); + }); + }); + test('should deny setting EC2 key pair', () => { + // THEN + throws(() => { + // WHEN + new batch.ComputeEnvironment(stack, 'test-compute-env', { + managed: true, + computeResources: { + vpc, + type: batch.ComputeResourceType.FARGATE, + ec2KeyPair: 'test', + }, + }); + }); + }); + test('should deny setting instance role', () => { + // THEN + throws(() => { + // WHEN + new batch.ComputeEnvironment(stack, 'test-compute-env', { + managed: true, + computeResources: { + vpc, + type: batch.ComputeResourceType.FARGATE, + instanceRole: 'test', + }, + }); + }); + }); + test('should deny setting launch template', () => { + // THEN + throws(() => { + // WHEN + new batch.ComputeEnvironment(stack, 'test-compute-env', { + managed: true, + computeResources: { + vpc, + type: batch.ComputeResourceType.FARGATE, + launchTemplate: { + launchTemplateName: 'test-template', + }, + }, + }); + }); + }); + test('should deny setting placement group', () => { + // THEN + throws(() => { + // WHEN + new batch.ComputeEnvironment(stack, 'test-compute-env', { + managed: true, + computeResources: { + vpc, + type: batch.ComputeResourceType.FARGATE, + placementGroup: 'test', + }, + }); + }); + }); + test('should deny setting spot fleet role', () => { + // THEN + throws(() => { + // WHEN + new batch.ComputeEnvironment(stack, 'test-compute-env', { + managed: true, + computeResources: { + vpc, + type: batch.ComputeResourceType.FARGATE, + spotFleetRole: iam.Role.fromRoleArn(stack, 'test-role-arn', 'test-role'), + }, + }); + }); + }); + }); describe('using spot resources', () => { test('should provide a spot fleet role if one is not given and allocationStrategy is BEST_FIT', () => { diff --git a/packages/@aws-cdk/aws-batch/test/integ.batch.expected.json b/packages/@aws-cdk/aws-batch/test/integ.batch.expected.json index 09d021e49bd9d..7624200d45321 100644 --- a/packages/@aws-cdk/aws-batch/test/integ.batch.expected.json +++ b/packages/@aws-cdk/aws-batch/test/integ.batch.expected.json @@ -95,15 +95,15 @@ "vpcPublicSubnet1NATGateway9C16659E": { "Type": "AWS::EC2::NatGateway", "Properties": { + "SubnetId": { + "Ref": "vpcPublicSubnet1Subnet2E65531E" + }, "AllocationId": { "Fn::GetAtt": [ "vpcPublicSubnet1EIPDA49DCBE", "AllocationId" ] }, - "SubnetId": { - "Ref": "vpcPublicSubnet1Subnet2E65531E" - }, "Tags": [ { "Key": "Name", @@ -192,15 +192,15 @@ "vpcPublicSubnet2NATGateway9B8AE11A": { "Type": "AWS::EC2::NatGateway", "Properties": { + "SubnetId": { + "Ref": "vpcPublicSubnet2Subnet009B674F" + }, "AllocationId": { "Fn::GetAtt": [ "vpcPublicSubnet2EIP9B3743B1", "AllocationId" ] }, - "SubnetId": { - "Ref": "vpcPublicSubnet2Subnet009B674F" - }, "Tags": [ { "Key": "Name", @@ -289,15 +289,15 @@ "vpcPublicSubnet3NATGateway82F6CA9E": { "Type": "AWS::EC2::NatGateway", "Properties": { + "SubnetId": { + "Ref": "vpcPublicSubnet3Subnet11B92D7C" + }, "AllocationId": { "Fn::GetAtt": [ "vpcPublicSubnet3EIP2C3B9D91", "AllocationId" ] }, - "SubnetId": { - "Ref": "vpcPublicSubnet3Subnet11B92D7C" - }, "Tags": [ { "Key": "Name", @@ -566,55 +566,30 @@ "batchunmanagedcomputeenvED550298": { "Type": "AWS::Batch::ComputeEnvironment", "Properties": { + "Type": "UNMANAGED", "ServiceRole": { "Fn::GetAtt": [ "batchunmanagedcomputeenvResourceServiceInstanceRoleCA40AF77", "Arn" ] }, - "Type": "UNMANAGED", "State": "ENABLED" } }, - "batchdemandcomputeenvlaunchtemplateEcsInstanceRole24D4E799": { - "Type": "AWS::IAM::Role", + "batchdemandcomputeenvlaunchtemplateResourceSecurityGroup23599B84": { + "Type": "AWS::EC2::SecurityGroup", "Properties": { - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": { - "Fn::Join": [ - "", - [ - "ec2.", - { - "Ref": "AWS::URLSuffix" - } - ] - ] - } - } - } - ], - "Version": "2012-10-17" - }, - "ManagedPolicyArns": [ + "GroupDescription": "batch-stack/batch-demand-compute-env-launch-template/Resource-Security-Group", + "SecurityGroupEgress": [ { - "Fn::Join": [ - "", - [ - "arn:", - { - "Ref": "AWS::Partition" - }, - ":iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role" - ] - ] + "CidrIp": "0.0.0.0/0", + "Description": "Allow all outbound traffic by default", + "IpProtocol": "-1" } - ] + ], + "VpcId": { + "Ref": "vpcA2121C38" + } }, "DependsOn": [ "vpcIGWE57CBDCA", @@ -652,12 +627,43 @@ "vpcVPCGW7984C166" ] }, - "batchdemandcomputeenvlaunchtemplateInstanceProfile2DEC3A97": { - "Type": "AWS::IAM::InstanceProfile", + "batchdemandcomputeenvlaunchtemplateEcsInstanceRole24D4E799": { + "Type": "AWS::IAM::Role", "Properties": { - "Roles": [ + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": { + "Fn::Join": [ + "", + [ + "ec2.", + { + "Ref": "AWS::URLSuffix" + } + ] + ] + } + } + } + ], + "Version": "2012-10-17" + }, + "ManagedPolicyArns": [ { - "Ref": "batchdemandcomputeenvlaunchtemplateEcsInstanceRole24D4E799" + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role" + ] + ] } ] }, @@ -697,20 +703,14 @@ "vpcVPCGW7984C166" ] }, - "batchdemandcomputeenvlaunchtemplateResourceSecurityGroup23599B84": { - "Type": "AWS::EC2::SecurityGroup", + "batchdemandcomputeenvlaunchtemplateInstanceProfile2DEC3A97": { + "Type": "AWS::IAM::InstanceProfile", "Properties": { - "GroupDescription": "batch-stack/batch-demand-compute-env-launch-template/Resource-Security-Group", - "SecurityGroupEgress": [ + "Roles": [ { - "CidrIp": "0.0.0.0/0", - "Description": "Allow all outbound traffic by default", - "IpProtocol": "-1" + "Ref": "batchdemandcomputeenvlaunchtemplateEcsInstanceRole24D4E799" } - ], - "VpcId": { - "Ref": "vpcA2121C38" - } + ] }, "DependsOn": [ "vpcIGWE57CBDCA", @@ -817,12 +817,6 @@ "batchdemandcomputeenvlaunchtemplateF8A5B233": { "Type": "AWS::Batch::ComputeEnvironment", "Properties": { - "ServiceRole": { - "Fn::GetAtt": [ - "batchdemandcomputeenvlaunchtemplateResourceServiceInstanceRole76AD99CC", - "Arn" - ] - }, "Type": "MANAGED", "ComputeResources": { "AllocationStrategy": "BEST_FIT", @@ -864,6 +858,12 @@ }, "Type": "EC2" }, + "ServiceRole": { + "Fn::GetAtt": [ + "batchdemandcomputeenvlaunchtemplateResourceServiceInstanceRole76AD99CC", + "Arn" + ] + }, "State": "ENABLED" }, "DependsOn": [ @@ -902,45 +902,20 @@ "vpcVPCGW7984C166" ] }, - "batchspotcomputeenvEcsInstanceRoleE976826B": { - "Type": "AWS::IAM::Role", + "batchspotcomputeenvResourceSecurityGroup07B09BF9": { + "Type": "AWS::EC2::SecurityGroup", "Properties": { - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": { - "Fn::Join": [ - "", - [ - "ec2.", - { - "Ref": "AWS::URLSuffix" - } - ] - ] - } - } - } - ], - "Version": "2012-10-17" - }, - "ManagedPolicyArns": [ + "GroupDescription": "batch-stack/batch-spot-compute-env/Resource-Security-Group", + "SecurityGroupEgress": [ { - "Fn::Join": [ - "", - [ - "arn:", - { - "Ref": "AWS::Partition" - }, - ":iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role" - ] - ] + "CidrIp": "0.0.0.0/0", + "Description": "Allow all outbound traffic by default", + "IpProtocol": "-1" } - ] + ], + "VpcId": { + "Ref": "vpcA2121C38" + } }, "DependsOn": [ "vpcIGWE57CBDCA", @@ -978,12 +953,43 @@ "vpcVPCGW7984C166" ] }, - "batchspotcomputeenvInstanceProfileFA613AC2": { - "Type": "AWS::IAM::InstanceProfile", + "batchspotcomputeenvEcsInstanceRoleE976826B": { + "Type": "AWS::IAM::Role", "Properties": { - "Roles": [ + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": { + "Fn::Join": [ + "", + [ + "ec2.", + { + "Ref": "AWS::URLSuffix" + } + ] + ] + } + } + } + ], + "Version": "2012-10-17" + }, + "ManagedPolicyArns": [ { - "Ref": "batchspotcomputeenvEcsInstanceRoleE976826B" + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role" + ] + ] } ] }, @@ -1023,20 +1029,14 @@ "vpcVPCGW7984C166" ] }, - "batchspotcomputeenvResourceSecurityGroup07B09BF9": { - "Type": "AWS::EC2::SecurityGroup", + "batchspotcomputeenvInstanceProfileFA613AC2": { + "Type": "AWS::IAM::InstanceProfile", "Properties": { - "GroupDescription": "batch-stack/batch-spot-compute-env/Resource-Security-Group", - "SecurityGroupEgress": [ + "Roles": [ { - "CidrIp": "0.0.0.0/0", - "Description": "Allow all outbound traffic by default", - "IpProtocol": "-1" + "Ref": "batchspotcomputeenvEcsInstanceRoleE976826B" } - ], - "VpcId": { - "Ref": "vpcA2121C38" - } + ] }, "DependsOn": [ "vpcIGWE57CBDCA", @@ -1143,12 +1143,6 @@ "batchspotcomputeenv2CE4DFD9": { "Type": "AWS::Batch::ComputeEnvironment", "Properties": { - "ServiceRole": { - "Fn::GetAtt": [ - "batchspotcomputeenvResourceServiceInstanceRole8B0DF5A7", - "Arn" - ] - }, "Type": "MANAGED", "ComputeResources": { "AllocationStrategy": "SPOT_CAPACITY_OPTIMIZED", @@ -1201,6 +1195,12 @@ ], "Type": "SPOT" }, + "ServiceRole": { + "Fn::GetAtt": [ + "batchspotcomputeenvResourceServiceInstanceRole8B0DF5A7", + "Arn" + ] + }, "State": "ENABLED" }, "DependsOn": [ @@ -1266,13 +1266,410 @@ "State": "ENABLED" } }, - "batchjobrepo4C508C51": { - "Type": "AWS::ECR::Repository", - "UpdateReplacePolicy": "Retain", - "DeletionPolicy": "Retain" - }, - "batchjobdeffromecrE0E30DAD": { - "Type": "AWS::Batch::JobDefinition", + "batchfargatecomputeenvResourceSecurityGroupE2963776": { + "Type": "AWS::EC2::SecurityGroup", + "Properties": { + "GroupDescription": "batch-stack/batch-fargate-compute-env/Resource-Security-Group", + "SecurityGroupEgress": [ + { + "CidrIp": "0.0.0.0/0", + "Description": "Allow all outbound traffic by default", + "IpProtocol": "-1" + } + ], + "VpcId": { + "Ref": "vpcA2121C38" + } + }, + "DependsOn": [ + "vpcIGWE57CBDCA", + "vpcPrivateSubnet1DefaultRoute1AA8E2E5", + "vpcPrivateSubnet1RouteTableB41A48CC", + "vpcPrivateSubnet1RouteTableAssociation67945127", + "vpcPrivateSubnet1Subnet934893E8", + "vpcPrivateSubnet2DefaultRouteB0E07F99", + "vpcPrivateSubnet2RouteTable7280F23E", + "vpcPrivateSubnet2RouteTableAssociation007E94D3", + "vpcPrivateSubnet2Subnet7031C2BA", + "vpcPrivateSubnet3DefaultRoute30C45F47", + "vpcPrivateSubnet3RouteTable24DA79A0", + "vpcPrivateSubnet3RouteTableAssociationC58B3C2C", + "vpcPrivateSubnet3Subnet985AC459", + "vpcPublicSubnet1DefaultRoute10708846", + "vpcPublicSubnet1EIPDA49DCBE", + "vpcPublicSubnet1NATGateway9C16659E", + "vpcPublicSubnet1RouteTable48A2DF9B", + "vpcPublicSubnet1RouteTableAssociation5D3F4579", + "vpcPublicSubnet1Subnet2E65531E", + "vpcPublicSubnet2DefaultRouteA1EC0F60", + "vpcPublicSubnet2EIP9B3743B1", + "vpcPublicSubnet2NATGateway9B8AE11A", + "vpcPublicSubnet2RouteTableEB40D4CB", + "vpcPublicSubnet2RouteTableAssociation21F81B59", + "vpcPublicSubnet2Subnet009B674F", + "vpcPublicSubnet3DefaultRoute3F356A11", + "vpcPublicSubnet3EIP2C3B9D91", + "vpcPublicSubnet3NATGateway82F6CA9E", + "vpcPublicSubnet3RouteTableA3C00665", + "vpcPublicSubnet3RouteTableAssociationD102D1C4", + "vpcPublicSubnet3Subnet11B92D7C", + "vpcA2121C38", + "vpcVPCGW7984C166" + ] + }, + "batchfargatecomputeenvResourceServiceInstanceRole94D7AA5F": { + "Type": "AWS::IAM::Role", + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "batch.amazonaws.com" + } + } + ], + "Version": "2012-10-17" + }, + "ManagedPolicyArns": [ + { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":iam::aws:policy/service-role/AWSBatchServiceRole" + ] + ] + } + ] + }, + "DependsOn": [ + "vpcIGWE57CBDCA", + "vpcPrivateSubnet1DefaultRoute1AA8E2E5", + "vpcPrivateSubnet1RouteTableB41A48CC", + "vpcPrivateSubnet1RouteTableAssociation67945127", + "vpcPrivateSubnet1Subnet934893E8", + "vpcPrivateSubnet2DefaultRouteB0E07F99", + "vpcPrivateSubnet2RouteTable7280F23E", + "vpcPrivateSubnet2RouteTableAssociation007E94D3", + "vpcPrivateSubnet2Subnet7031C2BA", + "vpcPrivateSubnet3DefaultRoute30C45F47", + "vpcPrivateSubnet3RouteTable24DA79A0", + "vpcPrivateSubnet3RouteTableAssociationC58B3C2C", + "vpcPrivateSubnet3Subnet985AC459", + "vpcPublicSubnet1DefaultRoute10708846", + "vpcPublicSubnet1EIPDA49DCBE", + "vpcPublicSubnet1NATGateway9C16659E", + "vpcPublicSubnet1RouteTable48A2DF9B", + "vpcPublicSubnet1RouteTableAssociation5D3F4579", + "vpcPublicSubnet1Subnet2E65531E", + "vpcPublicSubnet2DefaultRouteA1EC0F60", + "vpcPublicSubnet2EIP9B3743B1", + "vpcPublicSubnet2NATGateway9B8AE11A", + "vpcPublicSubnet2RouteTableEB40D4CB", + "vpcPublicSubnet2RouteTableAssociation21F81B59", + "vpcPublicSubnet2Subnet009B674F", + "vpcPublicSubnet3DefaultRoute3F356A11", + "vpcPublicSubnet3EIP2C3B9D91", + "vpcPublicSubnet3NATGateway82F6CA9E", + "vpcPublicSubnet3RouteTableA3C00665", + "vpcPublicSubnet3RouteTableAssociationD102D1C4", + "vpcPublicSubnet3Subnet11B92D7C", + "vpcA2121C38", + "vpcVPCGW7984C166" + ] + }, + "batchfargatecomputeenvE9C3FCA4": { + "Type": "AWS::Batch::ComputeEnvironment", + "Properties": { + "Type": "MANAGED", + "ComputeResources": { + "MaxvCpus": 256, + "SecurityGroupIds": [ + { + "Fn::GetAtt": [ + "batchfargatecomputeenvResourceSecurityGroupE2963776", + "GroupId" + ] + } + ], + "Subnets": [ + { + "Ref": "vpcPrivateSubnet1Subnet934893E8" + }, + { + "Ref": "vpcPrivateSubnet2Subnet7031C2BA" + }, + { + "Ref": "vpcPrivateSubnet3Subnet985AC459" + } + ], + "Type": "FARGATE" + }, + "ServiceRole": { + "Fn::GetAtt": [ + "batchfargatecomputeenvResourceServiceInstanceRole94D7AA5F", + "Arn" + ] + }, + "State": "ENABLED" + }, + "DependsOn": [ + "vpcIGWE57CBDCA", + "vpcPrivateSubnet1DefaultRoute1AA8E2E5", + "vpcPrivateSubnet1RouteTableB41A48CC", + "vpcPrivateSubnet1RouteTableAssociation67945127", + "vpcPrivateSubnet1Subnet934893E8", + "vpcPrivateSubnet2DefaultRouteB0E07F99", + "vpcPrivateSubnet2RouteTable7280F23E", + "vpcPrivateSubnet2RouteTableAssociation007E94D3", + "vpcPrivateSubnet2Subnet7031C2BA", + "vpcPrivateSubnet3DefaultRoute30C45F47", + "vpcPrivateSubnet3RouteTable24DA79A0", + "vpcPrivateSubnet3RouteTableAssociationC58B3C2C", + "vpcPrivateSubnet3Subnet985AC459", + "vpcPublicSubnet1DefaultRoute10708846", + "vpcPublicSubnet1EIPDA49DCBE", + "vpcPublicSubnet1NATGateway9C16659E", + "vpcPublicSubnet1RouteTable48A2DF9B", + "vpcPublicSubnet1RouteTableAssociation5D3F4579", + "vpcPublicSubnet1Subnet2E65531E", + "vpcPublicSubnet2DefaultRouteA1EC0F60", + "vpcPublicSubnet2EIP9B3743B1", + "vpcPublicSubnet2NATGateway9B8AE11A", + "vpcPublicSubnet2RouteTableEB40D4CB", + "vpcPublicSubnet2RouteTableAssociation21F81B59", + "vpcPublicSubnet2Subnet009B674F", + "vpcPublicSubnet3DefaultRoute3F356A11", + "vpcPublicSubnet3EIP2C3B9D91", + "vpcPublicSubnet3NATGateway82F6CA9E", + "vpcPublicSubnet3RouteTableA3C00665", + "vpcPublicSubnet3RouteTableAssociationD102D1C4", + "vpcPublicSubnet3Subnet11B92D7C", + "vpcA2121C38", + "vpcVPCGW7984C166" + ] + }, + "batchfargatespotcomputeenvResourceSecurityGroup923D2390": { + "Type": "AWS::EC2::SecurityGroup", + "Properties": { + "GroupDescription": "batch-stack/batch-fargate-spot-compute-env/Resource-Security-Group", + "SecurityGroupEgress": [ + { + "CidrIp": "0.0.0.0/0", + "Description": "Allow all outbound traffic by default", + "IpProtocol": "-1" + } + ], + "VpcId": { + "Ref": "vpcA2121C38" + } + }, + "DependsOn": [ + "vpcIGWE57CBDCA", + "vpcPrivateSubnet1DefaultRoute1AA8E2E5", + "vpcPrivateSubnet1RouteTableB41A48CC", + "vpcPrivateSubnet1RouteTableAssociation67945127", + "vpcPrivateSubnet1Subnet934893E8", + "vpcPrivateSubnet2DefaultRouteB0E07F99", + "vpcPrivateSubnet2RouteTable7280F23E", + "vpcPrivateSubnet2RouteTableAssociation007E94D3", + "vpcPrivateSubnet2Subnet7031C2BA", + "vpcPrivateSubnet3DefaultRoute30C45F47", + "vpcPrivateSubnet3RouteTable24DA79A0", + "vpcPrivateSubnet3RouteTableAssociationC58B3C2C", + "vpcPrivateSubnet3Subnet985AC459", + "vpcPublicSubnet1DefaultRoute10708846", + "vpcPublicSubnet1EIPDA49DCBE", + "vpcPublicSubnet1NATGateway9C16659E", + "vpcPublicSubnet1RouteTable48A2DF9B", + "vpcPublicSubnet1RouteTableAssociation5D3F4579", + "vpcPublicSubnet1Subnet2E65531E", + "vpcPublicSubnet2DefaultRouteA1EC0F60", + "vpcPublicSubnet2EIP9B3743B1", + "vpcPublicSubnet2NATGateway9B8AE11A", + "vpcPublicSubnet2RouteTableEB40D4CB", + "vpcPublicSubnet2RouteTableAssociation21F81B59", + "vpcPublicSubnet2Subnet009B674F", + "vpcPublicSubnet3DefaultRoute3F356A11", + "vpcPublicSubnet3EIP2C3B9D91", + "vpcPublicSubnet3NATGateway82F6CA9E", + "vpcPublicSubnet3RouteTableA3C00665", + "vpcPublicSubnet3RouteTableAssociationD102D1C4", + "vpcPublicSubnet3Subnet11B92D7C", + "vpcA2121C38", + "vpcVPCGW7984C166" + ] + }, + "batchfargatespotcomputeenvResourceServiceInstanceRole6462BFB0": { + "Type": "AWS::IAM::Role", + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "batch.amazonaws.com" + } + } + ], + "Version": "2012-10-17" + }, + "ManagedPolicyArns": [ + { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":iam::aws:policy/service-role/AWSBatchServiceRole" + ] + ] + } + ] + }, + "DependsOn": [ + "vpcIGWE57CBDCA", + "vpcPrivateSubnet1DefaultRoute1AA8E2E5", + "vpcPrivateSubnet1RouteTableB41A48CC", + "vpcPrivateSubnet1RouteTableAssociation67945127", + "vpcPrivateSubnet1Subnet934893E8", + "vpcPrivateSubnet2DefaultRouteB0E07F99", + "vpcPrivateSubnet2RouteTable7280F23E", + "vpcPrivateSubnet2RouteTableAssociation007E94D3", + "vpcPrivateSubnet2Subnet7031C2BA", + "vpcPrivateSubnet3DefaultRoute30C45F47", + "vpcPrivateSubnet3RouteTable24DA79A0", + "vpcPrivateSubnet3RouteTableAssociationC58B3C2C", + "vpcPrivateSubnet3Subnet985AC459", + "vpcPublicSubnet1DefaultRoute10708846", + "vpcPublicSubnet1EIPDA49DCBE", + "vpcPublicSubnet1NATGateway9C16659E", + "vpcPublicSubnet1RouteTable48A2DF9B", + "vpcPublicSubnet1RouteTableAssociation5D3F4579", + "vpcPublicSubnet1Subnet2E65531E", + "vpcPublicSubnet2DefaultRouteA1EC0F60", + "vpcPublicSubnet2EIP9B3743B1", + "vpcPublicSubnet2NATGateway9B8AE11A", + "vpcPublicSubnet2RouteTableEB40D4CB", + "vpcPublicSubnet2RouteTableAssociation21F81B59", + "vpcPublicSubnet2Subnet009B674F", + "vpcPublicSubnet3DefaultRoute3F356A11", + "vpcPublicSubnet3EIP2C3B9D91", + "vpcPublicSubnet3NATGateway82F6CA9E", + "vpcPublicSubnet3RouteTableA3C00665", + "vpcPublicSubnet3RouteTableAssociationD102D1C4", + "vpcPublicSubnet3Subnet11B92D7C", + "vpcA2121C38", + "vpcVPCGW7984C166" + ] + }, + "batchfargatespotcomputeenv374749B0": { + "Type": "AWS::Batch::ComputeEnvironment", + "Properties": { + "Type": "MANAGED", + "ComputeResources": { + "MaxvCpus": 256, + "SecurityGroupIds": [ + { + "Fn::GetAtt": [ + "batchfargatespotcomputeenvResourceSecurityGroup923D2390", + "GroupId" + ] + } + ], + "Subnets": [ + { + "Ref": "vpcPrivateSubnet1Subnet934893E8" + }, + { + "Ref": "vpcPrivateSubnet2Subnet7031C2BA" + }, + { + "Ref": "vpcPrivateSubnet3Subnet985AC459" + } + ], + "Type": "FARGATE_SPOT" + }, + "ServiceRole": { + "Fn::GetAtt": [ + "batchfargatespotcomputeenvResourceServiceInstanceRole6462BFB0", + "Arn" + ] + }, + "State": "ENABLED" + }, + "DependsOn": [ + "vpcIGWE57CBDCA", + "vpcPrivateSubnet1DefaultRoute1AA8E2E5", + "vpcPrivateSubnet1RouteTableB41A48CC", + "vpcPrivateSubnet1RouteTableAssociation67945127", + "vpcPrivateSubnet1Subnet934893E8", + "vpcPrivateSubnet2DefaultRouteB0E07F99", + "vpcPrivateSubnet2RouteTable7280F23E", + "vpcPrivateSubnet2RouteTableAssociation007E94D3", + "vpcPrivateSubnet2Subnet7031C2BA", + "vpcPrivateSubnet3DefaultRoute30C45F47", + "vpcPrivateSubnet3RouteTable24DA79A0", + "vpcPrivateSubnet3RouteTableAssociationC58B3C2C", + "vpcPrivateSubnet3Subnet985AC459", + "vpcPublicSubnet1DefaultRoute10708846", + "vpcPublicSubnet1EIPDA49DCBE", + "vpcPublicSubnet1NATGateway9C16659E", + "vpcPublicSubnet1RouteTable48A2DF9B", + "vpcPublicSubnet1RouteTableAssociation5D3F4579", + "vpcPublicSubnet1Subnet2E65531E", + "vpcPublicSubnet2DefaultRouteA1EC0F60", + "vpcPublicSubnet2EIP9B3743B1", + "vpcPublicSubnet2NATGateway9B8AE11A", + "vpcPublicSubnet2RouteTableEB40D4CB", + "vpcPublicSubnet2RouteTableAssociation21F81B59", + "vpcPublicSubnet2Subnet009B674F", + "vpcPublicSubnet3DefaultRoute3F356A11", + "vpcPublicSubnet3EIP2C3B9D91", + "vpcPublicSubnet3NATGateway82F6CA9E", + "vpcPublicSubnet3RouteTableA3C00665", + "vpcPublicSubnet3RouteTableAssociationD102D1C4", + "vpcPublicSubnet3Subnet11B92D7C", + "vpcA2121C38", + "vpcVPCGW7984C166" + ] + }, + "batchjobfargatequeue5A12983E": { + "Type": "AWS::Batch::JobQueue", + "Properties": { + "ComputeEnvironmentOrder": [ + { + "ComputeEnvironment": { + "Ref": "batchfargatecomputeenvE9C3FCA4" + }, + "Order": 1 + }, + { + "ComputeEnvironment": { + "Ref": "batchfargatespotcomputeenv374749B0" + }, + "Order": 2 + } + ], + "Priority": 1, + "State": "ENABLED" + } + }, + "batchjobrepo4C508C51": { + "Type": "AWS::ECR::Repository", + "UpdateReplacePolicy": "Retain", + "DeletionPolicy": "Retain" + }, + "batchjobdeffromecrE0E30DAD": { + "Type": "AWS::Batch::JobDefinition", "Properties": { "Type": "container", "ContainerProperties": { @@ -1325,11 +1722,16 @@ ] ] }, - "Memory": 4, "Privileged": false, "ReadonlyRootFilesystem": false, - "Vcpus": 1 + "ResourceRequirements": [ + { "Type": "VCPU", "Value": "1" }, + { "Type": "MEMORY", "Value": "4" } + ] }, + "PlatformCapabilities": [ + "EC2" + ], "RetryStrategy": { "Attempts": 1 }, @@ -1342,11 +1744,67 @@ "Type": "container", "ContainerProperties": { "Image": "docker/whalesay", - "Memory": 4, "Privileged": false, "ReadonlyRootFilesystem": false, - "Vcpus": 1 + "ResourceRequirements": [ + { "Type": "VCPU", "Value": "1" }, + { "Type": "MEMORY", "Value": "4" } + ] }, + "PlatformCapabilities": [ + "EC2" + ], + "RetryStrategy": { + "Attempts": 1 + }, + "Timeout": {} + } + }, + "executionroleD9A39BE6": { + "Type": "AWS::IAM::Role", + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "batch.amazonaws.com" + } + } + ], + "Version": "2012-10-17" + } + } + }, + "batchjobdeffargate7FE30059": { + "Type": "AWS::Batch::JobDefinition", + "Properties": { + "Type": "container", + "ContainerProperties": { + "ExecutionRoleArn": { + "Fn::GetAtt": [ + "executionroleD9A39BE6", + "Arn" + ] + }, + "Image": "docker/whalesay", + "Privileged": false, + "ReadonlyRootFilesystem": false, + "ResourceRequirements": [ + { + "Type": "VCPU", + "Value": "0.25" + }, + { + "Type": "MEMORY", + "Value": "512" + } + ] + }, + "PlatformCapabilities": [ + "FARGATE" + ], "RetryStrategy": { "Attempts": 1 }, diff --git a/packages/@aws-cdk/aws-batch/test/integ.batch.ts b/packages/@aws-cdk/aws-batch/test/integ.batch.ts index 4e19da37ca897..4430cda4a7bf3 100644 --- a/packages/@aws-cdk/aws-batch/test/integ.batch.ts +++ b/packages/@aws-cdk/aws-batch/test/integ.batch.ts @@ -1,6 +1,7 @@ import * as ec2 from '@aws-cdk/aws-ec2'; import * as ecr from '@aws-cdk/aws-ecr'; import * as ecs from '@aws-cdk/aws-ecs'; +import * as iam from '@aws-cdk/aws-iam'; import * as cdk from '@aws-cdk/core'; import * as batch from '../lib/'; @@ -64,6 +65,33 @@ new batch.JobQueue(stack, 'batch-job-queue', { ], }); +// Split out into two job queues because each queue +// supports a max of 3 compute environments +new batch.JobQueue(stack, 'batch-job-fargate-queue', { + computeEnvironments: [ + { + computeEnvironment: new batch.ComputeEnvironment(stack, 'batch-fargate-compute-env', { + managed: true, + computeResources: { + type: batch.ComputeResourceType.FARGATE, + vpc, + }, + }), + order: 1, + }, + { + computeEnvironment: new batch.ComputeEnvironment(stack, 'batch-fargate-spot-compute-env', { + managed: true, + computeResources: { + type: batch.ComputeResourceType.FARGATE_SPOT, + vpc, + }, + }), + order: 2, + }, + ], +}); + const repo = new ecr.Repository(stack, 'batch-job-repo'); new batch.JobDefinition(stack, 'batch-job-def-from-ecr', { @@ -77,3 +105,15 @@ new batch.JobDefinition(stack, 'batch-job-def-from-', { image: ecs.ContainerImage.fromRegistry('docker/whalesay'), }, }); + +const executionRole = new iam.Role(stack, 'execution-role', { + assumedBy: new iam.ServicePrincipal('batch.amazonaws.com'), +}); + +new batch.JobDefinition(stack, 'batch-job-def-fargate', { + platformCapabilities: [batch.PlatformCapabilities.FARGATE], + container: { + image: ecs.ContainerImage.fromRegistry('docker/whalesay'), + executionRole, + }, +}); diff --git a/packages/@aws-cdk/aws-batch/test/job-definition.test.ts b/packages/@aws-cdk/aws-batch/test/job-definition.test.ts index ed9bffb7a90bc..13926b6b80788 100644 --- a/packages/@aws-cdk/aws-batch/test/job-definition.test.ts +++ b/packages/@aws-cdk/aws-batch/test/job-definition.test.ts @@ -1,3 +1,4 @@ +import { throws } from 'assert'; import { Template } from '@aws-cdk/assertions'; import * as ec2 from '@aws-cdk/aws-ec2'; import * as ecr from '@aws-cdk/aws-ecr'; @@ -7,6 +8,7 @@ import * as secretsmanager from '@aws-cdk/aws-secretsmanager'; import * as ssm from '@aws-cdk/aws-ssm'; import * as cdk from '@aws-cdk/core'; import * as batch from '../lib'; +import { PlatformCapabilities } from '../lib'; describe('Batch Job Definition', () => { let stack: cdk.Stack; @@ -61,6 +63,7 @@ describe('Batch Job Definition', () => { }, retryAttempts: 2, timeout: cdk.Duration.seconds(30), + platformCapabilities: [batch.PlatformCapabilities.EC2], }; }); @@ -87,14 +90,83 @@ describe('Batch Job Definition', () => { 'awslogs-region': 'us-east-1', }, }, - Memory: jobDefProps.container.memoryLimitMiB, MountPoints: [], Privileged: jobDefProps.container.privileged, ReadonlyRootFilesystem: jobDefProps.container.readOnly, - ResourceRequirements: [{ Type: 'GPU', Value: String(jobDefProps.container.gpuCount) }], + ResourceRequirements: [ + { Type: 'VCPU', Value: String(jobDefProps.container.vcpus) }, + { Type: 'MEMORY', Value: String(jobDefProps.container.memoryLimitMiB) }, + { Type: 'GPU', Value: String(jobDefProps.container.gpuCount) }, + ], + Ulimits: [], + User: jobDefProps.container.user, + Volumes: [], + } : undefined, + NodeProperties: jobDefProps.nodeProps ? { + MainNode: jobDefProps.nodeProps.mainNode, + NodeRangeProperties: [], + NumNodes: jobDefProps.nodeProps.count, + } : undefined, + Parameters: { + foo: 'bar', + }, + RetryStrategy: { + Attempts: jobDefProps.retryAttempts, + }, + Timeout: { + AttemptDurationSeconds: jobDefProps.timeout ? jobDefProps.timeout.toSeconds() : -1, + }, + Type: 'container', + PlatformCapabilities: ['EC2'], + }); + }); + + test('renders the correct cloudformation properties for a Fargate job definition', () => { + // WHEN + const executionRole = new iam.Role(stack, 'execution-role', { + assumedBy: new iam.ServicePrincipal('ecs-tasks.amazonaws.com'), + }); + + new batch.JobDefinition(stack, 'job-def', { + ...jobDefProps, + container: { ...jobDefProps.container, executionRole, gpuCount: undefined }, + platformCapabilities: [PlatformCapabilities.FARGATE], + }); + + // THEN + Template.fromStack(stack).hasResourceProperties('AWS::Batch::JobDefinition', { + JobDefinitionName: jobDefProps.jobDefinitionName, + ContainerProperties: jobDefProps.container ? { + Command: jobDefProps.container.command, + Environment: [ + { + Name: 'foo', + Value: 'bar', + }, + ], + ExecutionRoleArn: { + 'Fn::GetAtt': [ + 'executionroleD9A39BE6', + 'Arn', + ], + }, + InstanceType: jobDefProps.container.instanceType ? jobDefProps.container.instanceType.toString() : '', + LinuxParameters: {}, + LogConfiguration: { + LogDriver: 'awslogs', + Options: { + 'awslogs-region': 'us-east-1', + }, + }, + MountPoints: [], + Privileged: jobDefProps.container.privileged, + ReadonlyRootFilesystem: jobDefProps.container.readOnly, + ResourceRequirements: [ + { Type: 'VCPU', Value: String(jobDefProps.container.vcpus) }, + { Type: 'MEMORY', Value: String(jobDefProps.container.memoryLimitMiB) }, + ], Ulimits: [], User: jobDefProps.container.user, - Vcpus: jobDefProps.container.vcpus, Volumes: [], } : undefined, NodeProperties: jobDefProps.nodeProps ? { @@ -112,8 +184,10 @@ describe('Batch Job Definition', () => { AttemptDurationSeconds: jobDefProps.timeout ? jobDefProps.timeout.toSeconds() : -1, }, Type: 'container', + PlatformCapabilities: ['FARGATE'], }); }); + test('can use an ecr image', () => { // WHEN const repo = new ecr.Repository(stack, 'image-repo'); @@ -176,10 +250,12 @@ describe('Batch Job Definition', () => { ], ], }, - Memory: 4, Privileged: false, ReadonlyRootFilesystem: false, - Vcpus: 1, + ResourceRequirements: [ + { Type: 'VCPU', Value: '1' }, + { Type: 'MEMORY', Value: '4' }, + ], }, }); }); @@ -196,10 +272,12 @@ describe('Batch Job Definition', () => { Template.fromStack(stack).hasResourceProperties('AWS::Batch::JobDefinition', { ContainerProperties: { Image: 'docker/whalesay', - Memory: 4, Privileged: false, ReadonlyRootFilesystem: false, - Vcpus: 1, + ResourceRequirements: [ + { Type: 'VCPU', Value: '1' }, + { Type: 'MEMORY', Value: '4' }, + ], }, }); }); @@ -286,4 +364,40 @@ describe('Batch Job Definition', () => { }, }); }); + describe('using fargate job definition', () => { + test('can configure platform configuration properly', () => { + // GIVEN + const executionRole = new iam.Role(stack, 'execution-role', { + assumedBy: new iam.ServicePrincipal('batch.amazonaws.com'), + }); + // WHEN + new batch.JobDefinition(stack, 'job-def', { + platformCapabilities: [batch.PlatformCapabilities.FARGATE], + container: { + image: ecs.EcrImage.fromRegistry('docker/whalesay'), + platformVersion: ecs.FargatePlatformVersion.LATEST, + executionRole: executionRole, + }, + }); + // THEN + Template.fromStack(stack).hasResourceProperties('AWS::Batch::JobDefinition', { + ContainerProperties: { + FargatePlatformConfiguration: { + PlatformVersion: 'LATEST', + }, + }, + }); + }); + test('must require executionRole', () => { + throws(() => { + // WHEN + new batch.JobDefinition(stack, 'job-def', { + platformCapabilities: [batch.PlatformCapabilities.FARGATE], + container: { + image: ecs.EcrImage.fromRegistry('docker/whalesay'), + }, + }); + }); + }); + }); }); diff --git a/packages/@aws-cdk/aws-events-targets/test/batch/integ.job-definition-events.expected.json b/packages/@aws-cdk/aws-events-targets/test/batch/integ.job-definition-events.expected.json index 77a8854041e1f..f4dfe0408f63e 100644 --- a/packages/@aws-cdk/aws-events-targets/test/batch/integ.job-definition-events.expected.json +++ b/packages/@aws-cdk/aws-events-targets/test/batch/integ.job-definition-events.expected.json @@ -65,11 +65,16 @@ "Type": "container", "ContainerProperties": { "Image": "test-repo", - "Memory": 4, "Privileged": false, "ReadonlyRootFilesystem": false, - "Vcpus": 1 + "ResourceRequirements": [ + { "Type": "VCPU", "Value": "1" }, + { "Type": "MEMORY", "Value": "4" } + ] }, + "PlatformCapabilities": [ + "EC2" + ], "RetryStrategy": { "Attempts": 1 }, diff --git a/packages/@aws-cdk/aws-stepfunctions-tasks/test/batch/integ.run-batch-job.expected.json b/packages/@aws-cdk/aws-stepfunctions-tasks/test/batch/integ.run-batch-job.expected.json index 97eea60b24dcc..f37bcd6e520f6 100644 --- a/packages/@aws-cdk/aws-stepfunctions-tasks/test/batch/integ.run-batch-job.expected.json +++ b/packages/@aws-cdk/aws-stepfunctions-tasks/test/batch/integ.run-batch-job.expected.json @@ -873,11 +873,16 @@ ] ] }, - "Memory": 4, "Privileged": false, "ReadonlyRootFilesystem": false, - "Vcpus": 1 + "ResourceRequirements": [ + { "Type": "VCPU", "Value": "1" }, + { "Type": "MEMORY", "Value": "4" } + ] }, + "PlatformCapabilities": [ + "EC2" + ], "RetryStrategy": { "Attempts": 1 }, diff --git a/packages/@aws-cdk/aws-stepfunctions-tasks/test/batch/integ.submit-job.expected.json b/packages/@aws-cdk/aws-stepfunctions-tasks/test/batch/integ.submit-job.expected.json index 2026e45ae3c4e..a3851fd3fb5b7 100644 --- a/packages/@aws-cdk/aws-stepfunctions-tasks/test/batch/integ.submit-job.expected.json +++ b/packages/@aws-cdk/aws-stepfunctions-tasks/test/batch/integ.submit-job.expected.json @@ -873,11 +873,16 @@ ] ] }, - "Memory": 4, "Privileged": false, "ReadonlyRootFilesystem": false, - "Vcpus": 1 + "ResourceRequirements": [ + { "Type": "VCPU", "Value": "1" }, + { "Type": "MEMORY", "Value": "4" } + ] }, + "PlatformCapabilities": [ + "EC2" + ], "RetryStrategy": { "Attempts": 1 }, From 367a24a6d5131047f849f6f4028a624b90f3e9d1 Mon Sep 17 00:00:00 2001 From: Rico Huijbers Date: Mon, 13 Sep 2021 11:25:02 +0200 Subject: [PATCH 35/41] chore(deps): bump version of `conventional-commits-parser` (#16470) ...so that we transitively depend on a newer version of `trim-off-newlines`, which has a CVE filed against it. ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- tools/cdk-release/package.json | 2 +- yarn.lock | 12 ++++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/tools/cdk-release/package.json b/tools/cdk-release/package.json index 1f69eefdebddc..8823387b5c7e4 100644 --- a/tools/cdk-release/package.json +++ b/tools/cdk-release/package.json @@ -40,7 +40,7 @@ "conventional-changelog": "^3.1.24", "conventional-changelog-config-spec": "^2.1.0", "conventional-changelog-preset-loader": "^2.3.4", - "conventional-commits-parser": "^3.2.1", + "conventional-commits-parser": "^3.2.2", "conventional-changelog-writer": "^4.1.0", "fs-extra": "^9.1.0", "git-raw-commits": "^2.0.10", diff --git a/yarn.lock b/yarn.lock index e9d7fe62947d7..38c8a3761acd7 100644 --- a/yarn.lock +++ b/yarn.lock @@ -3216,6 +3216,18 @@ conventional-commits-parser@^3.2.0, conventional-commits-parser@^3.2.1: through2 "^4.0.0" trim-off-newlines "^1.0.0" +conventional-commits-parser@^3.2.2: + version "3.2.2" + resolved "https://registry.yarnpkg.com/conventional-commits-parser/-/conventional-commits-parser-3.2.2.tgz#190fb9900c6e02be0c0bca9b03d57e24982639fd" + integrity sha512-Jr9KAKgqAkwXMRHjxDwO/zOCDKod1XdAESHAGuJX38iZ7ZzVti/tvVoysO0suMsdAObp9NQ2rHSsSbnAqZ5f5g== + dependencies: + JSONStream "^1.0.4" + is-text-path "^1.0.1" + lodash "^4.17.15" + meow "^8.0.0" + split2 "^3.0.0" + through2 "^4.0.0" + conventional-recommended-bump@6.1.0, conventional-recommended-bump@^6.1.0: version "6.1.0" resolved "https://registry.yarnpkg.com/conventional-recommended-bump/-/conventional-recommended-bump-6.1.0.tgz#cfa623285d1de554012f2ffde70d9c8a22231f55" From c68d0592ec829cbba51a1ac7ebf110c76c87ec5c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Sep 2021 10:16:29 +0000 Subject: [PATCH 36/41] chore(deps): bump axios from 0.21.1 to 0.21.4 (#16421) Bumps [axios](https://github.com/axios/axios) from 0.21.1 to 0.21.4. - [Release notes](https://github.com/axios/axios/releases) - [Changelog](https://github.com/axios/axios/blob/master/CHANGELOG.md) - [Commits](https://github.com/axios/axios/compare/v0.21.1...v0.21.4) --- updated-dependencies: - dependency-name: axios dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- yarn.lock | 114 +++++++----------------------------------------------- 1 file changed, 13 insertions(+), 101 deletions(-) diff --git a/yarn.lock b/yarn.lock index 38c8a3761acd7..739470e52b176 100644 --- a/yarn.lock +++ b/yarn.lock @@ -2063,11 +2063,6 @@ anymatch@^3.0.3: normalize-path "^3.0.0" picomatch "^2.0.4" -app-root-path@^2.2.1: - version "2.2.1" - resolved "https://registry.yarnpkg.com/app-root-path/-/app-root-path-2.2.1.tgz#d0df4a682ee408273583d43f6f79e9892624bc9a" - integrity sha512-91IFKeKk7FjfmezPKkwtaRvSpnUc4gDwPAjA1YZ9Gn0q0PPeW+vbeUsZuyDwjI7+QTHhcLen2v25fi/AmhvbJA== - append-transform@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/append-transform/-/append-transform-1.0.0.tgz#046a52ae582a228bd72f58acfbe2967c678759ab" @@ -2289,21 +2284,6 @@ aws-sdk-mock@^5.2.1: sinon "^11.1.1" traverse "^0.6.6" -aws-sdk@^2.596.0: - version "2.970.0" - resolved "https://registry.yarnpkg.com/aws-sdk/-/aws-sdk-2.970.0.tgz#dc258b61b4727dcb5130c494376b598eb19f827b" - integrity sha512-9+ktvE5xgpHr3RsFOcq1SrhXLvU+jUji44jbecFZb5C2lzoEEB29aeN39OLJMW0ZuOrR+3TNum8c3f8YVx6A7w== - dependencies: - buffer "4.9.2" - events "1.1.1" - ieee754 "1.1.13" - jmespath "0.15.0" - querystring "0.2.0" - sax "1.2.1" - url "0.10.3" - uuid "3.3.2" - xml2js "0.4.19" - aws-sdk@^2.848.0, aws-sdk@^2.928.0: version "2.950.0" resolved "https://registry.yarnpkg.com/aws-sdk/-/aws-sdk-2.950.0.tgz#cffb65590c50de9479c87ed04df57d355d1d8a22" @@ -2345,11 +2325,11 @@ aws4@^1.8.0: integrity sha512-xh1Rl34h6Fi1DC2WWKfxUTVqRsNnr6LsKz2+hfwDxQJWmrx8+c7ylaqBMcHfl1U1r2dsifOvKX3LQuLNZ+XSvA== axios@^0.21.1: - version "0.21.1" - resolved "https://registry.yarnpkg.com/axios/-/axios-0.21.1.tgz#22563481962f4d6bde9a76d516ef0e5d3c09b2b8" - integrity sha512-dKQiRHxGD9PPRIUNIWvZhPTPpl1rf/OxTYKsqKUDjBwYylTvV7SjSHJb9ratfyzM6wCdLCOYLzs73qpg5c4iGA== + version "0.21.4" + resolved "https://registry.yarnpkg.com/axios/-/axios-0.21.4.tgz#c67b90dc0568e5c1cf2b0b858c43ba28e2eda575" + integrity sha512-ut5vewkiu8jjGBdqpM44XxjuCjq9LAKeHVmoVfHVzy8eHgxxq8SbAVQNovDA8mVi05kP0Ea/n/UzcSHcTJQfNg== dependencies: - follow-redirects "^1.10.0" + follow-redirects "^1.14.0" babel-jest@^26.6.3: version "26.6.3" @@ -3680,16 +3660,6 @@ dot-prop@^6.0.1: dependencies: is-obj "^2.0.0" -dotenv-json@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/dotenv-json/-/dotenv-json-1.0.0.tgz#fc7f672aafea04bed33818733b9f94662332815c" - integrity sha512-jAssr+6r4nKhKRudQ0HOzMskOFFi9+ubXWwmrSGJFgTvpjyPXCXsCsYbjif6mXp7uxA7xY3/LGaiTQukZzSbOQ== - -dotenv@^8.0.0: - version "8.6.0" - resolved "https://registry.yarnpkg.com/dotenv/-/dotenv-8.6.0.tgz#061af664d19f7f4d8fc6e4ff9b584ce237adcb8b" - integrity sha512-IrPdXQsk2BbzvCBGBOTmmSH5SodmqZNt4ERAZDmW4CT+tL8VtvinqywuANaFu4bOMWki16nqf0e4oC0QIaDr/g== - dotgitignore@^2.1.0: version "2.1.0" resolved "https://registry.yarnpkg.com/dotgitignore/-/dotgitignore-2.1.0.tgz#a4b15a4e4ef3cf383598aaf1dfa4a04bcc089b7b" @@ -3895,11 +3865,6 @@ escodegen@^2.0.0: optionalDependencies: source-map "~0.6.1" -eslint-config-standard@^14.1.1: - version "14.1.1" - resolved "https://registry.yarnpkg.com/eslint-config-standard/-/eslint-config-standard-14.1.1.tgz#830a8e44e7aef7de67464979ad06b406026c56ea" - integrity sha512-Z9B+VR+JIXRxz21udPTL9HpFMyoMUEeX1G251EQ6e05WD9aPVtVBn09XUmZ259wCMlCDmYDSZG62Hhm+ZTJcUg== - eslint-import-resolver-node@^0.3.4: version "0.3.4" resolved "https://registry.yarnpkg.com/eslint-import-resolver-node/-/eslint-import-resolver-node-0.3.4.tgz#85ffa81942c25012d8231096ddf679c03042c717" @@ -3927,14 +3892,6 @@ eslint-module-utils@^2.6.1: debug "^3.2.7" pkg-dir "^2.0.0" -eslint-plugin-es@^3.0.0: - version "3.0.1" - resolved "https://registry.yarnpkg.com/eslint-plugin-es/-/eslint-plugin-es-3.0.1.tgz#75a7cdfdccddc0589934aeeb384175f221c57893" - integrity sha512-GUmAsJaN4Fc7Gbtl8uOBlayo2DqhwWvEzykMHSCZHU3XdJ+NSzzZcVhXh3VxX5icqQ+oQdIEawXX8xkR3mIFmQ== - dependencies: - eslint-utils "^2.0.0" - regexpp "^3.0.0" - eslint-plugin-import@^2.23.4: version "2.23.4" resolved "https://registry.yarnpkg.com/eslint-plugin-import/-/eslint-plugin-import-2.23.4.tgz#8dceb1ed6b73e46e50ec9a5bb2411b645e7d3d97" @@ -3963,33 +3920,11 @@ eslint-plugin-jest@^24.3.7: dependencies: "@typescript-eslint/experimental-utils" "^4.0.1" -eslint-plugin-node@^11.1.0: - version "11.1.0" - resolved "https://registry.yarnpkg.com/eslint-plugin-node/-/eslint-plugin-node-11.1.0.tgz#c95544416ee4ada26740a30474eefc5402dc671d" - integrity sha512-oUwtPJ1W0SKD0Tr+wqu92c5xuCeQqB3hSCHasn/ZgjFdA9iDGNkNf2Zi9ztY7X+hNuMib23LNGRm6+uN+KLE3g== - dependencies: - eslint-plugin-es "^3.0.0" - eslint-utils "^2.0.0" - ignore "^5.1.1" - minimatch "^3.0.4" - resolve "^1.10.1" - semver "^6.1.0" - -eslint-plugin-promise@^4.3.1: - version "4.3.1" - resolved "https://registry.yarnpkg.com/eslint-plugin-promise/-/eslint-plugin-promise-4.3.1.tgz#61485df2a359e03149fdafc0a68b0e030ad2ac45" - integrity sha512-bY2sGqyptzFBDLh/GMbAxfdJC+b0f23ME63FOE4+Jao0oZ3E1LEwFtWJX/1pGMJLiTtrSSern2CRM/g+dfc0eQ== - eslint-plugin-rulesdir@^0.2.0: version "0.2.0" resolved "https://registry.yarnpkg.com/eslint-plugin-rulesdir/-/eslint-plugin-rulesdir-0.2.0.tgz#0d729e3f11bcb1a18d9b724a29a6d1a082ac2d62" integrity sha512-PPQPCsPkzF3upl1862swPA1bmDAAHKHmJJ4JTHJ11JCVCU4sycB0K5LLA/Rwr6r4VbnpScvUvHV4hqfdjvFmhQ== -eslint-plugin-standard@^4.1.0: - version "4.1.0" - resolved "https://registry.yarnpkg.com/eslint-plugin-standard/-/eslint-plugin-standard-4.1.0.tgz#0c3bf3a67e853f8bbbc580fb4945fbf16f41b7c5" - integrity sha512-ZL7+QRixjTR6/528YNGyDotyffm5OQst/sGxKDwGb9Uqs4In5Egi4+jbobhqJoyoCM6/7v/1A5fhQ7ScMtDjaQ== - eslint-scope@^5.1.1: version "5.1.1" resolved "https://registry.yarnpkg.com/eslint-scope/-/eslint-scope-5.1.1.tgz#e786e59a66cb92b3f6c1fb0d508aab174848f48c" @@ -3998,7 +3933,7 @@ eslint-scope@^5.1.1: esrecurse "^4.3.0" estraverse "^4.1.1" -eslint-utils@^2.0.0, eslint-utils@^2.1.0: +eslint-utils@^2.1.0: version "2.1.0" resolved "https://registry.yarnpkg.com/eslint-utils/-/eslint-utils-2.1.0.tgz#d2de5e03424e707dc10c74068ddedae708741b27" integrity sha512-w94dQYoauyvlDc43XnGB8lU3Zt713vNChgt4EWwhXAP2XkBvndfxF0AgIqKOOasjPIPzj9JqgwkwbCYD0/V3Zg== @@ -4453,10 +4388,10 @@ flatted@^3.1.0: resolved "https://registry.yarnpkg.com/flatted/-/flatted-3.2.1.tgz#bbef080d95fca6709362c73044a1634f7c6e7d05" integrity sha512-OMQjaErSFHmHqZe+PSidH5n8j3O0F2DdnVh8JB4j4eUQ2k6KvB0qGfrKIhapvez5JerBbmWkaLYUYWISaESoXg== -follow-redirects@^1.10.0, follow-redirects@^1.11.0: - version "1.14.1" - resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.14.1.tgz#d9114ded0a1cfdd334e164e6662ad02bfd91ff43" - integrity sha512-HWqDgT7ZEkqRzBvc2s64vSZ/hfOceEol3ac/7tKwzuvEyWx3/4UegXh5oBOIotkGsObyk3xznnSRVADBgWSQVg== +follow-redirects@^1.11.0, follow-redirects@^1.14.0: + version "1.14.3" + resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.14.3.tgz#6ada78118d8d24caee595595accdc0ac6abd022e" + integrity sha512-3MkHxknWMUtb23apkgz/83fDoe+y+qr0TdgacGIA7bew+QLBo3vdgEN2xEsuXNivpFy4CyDhBBZnNZOtalmenw== for-in@^1.0.2: version "1.0.2" @@ -5056,7 +4991,7 @@ ignore@^4.0.6: resolved "https://registry.yarnpkg.com/ignore/-/ignore-4.0.6.tgz#750e3db5862087b4737ebac8207ffd1ef27b25fc" integrity sha512-cyFDKrqc/YdcWFniJhzI42+AzS+gNwmUzOSFcRCQYwySuBBBy/KjuxWLZ/FHEH6Moq1NizMOBWyTcv8O4OZIMg== -ignore@^5.1.1, ignore@^5.1.4, ignore@^5.1.8, ignore@~5.1.8: +ignore@^5.1.4, ignore@^5.1.8, ignore@~5.1.8: version "5.1.8" resolved "https://registry.yarnpkg.com/ignore/-/ignore-5.1.8.tgz#f150a8b50a34289b33e22f5889abd4d8016f0e57" integrity sha512-BMpfD7PpiETpBl/A6S498BaIJ6Y/ABT93ETbby2fP00v4EbvPBXWEoaR1UBPKs3iR53pJY7EtZk5KACI57i1Uw== @@ -6323,24 +6258,6 @@ kleur@^3.0.3: resolved "https://registry.yarnpkg.com/kleur/-/kleur-3.0.3.tgz#a79c9ecc86ee1ce3fa6206d1216c501f147fc07e" integrity sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w== -lambda-leak@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/lambda-leak/-/lambda-leak-2.0.0.tgz#771985d3628487f6e885afae2b54510dcfb2cd7e" - integrity sha1-dxmF02KEh/boha+uK1RRDc+yzX4= - -lambda-tester@^3.6.0: - version "3.6.0" - resolved "https://registry.yarnpkg.com/lambda-tester/-/lambda-tester-3.6.0.tgz#ceb7d4f4f0da768487a05cff37dcd088508b5247" - integrity sha512-F2ZTGWCLyIR95o/jWK46V/WnOCFAEUG/m/V7/CLhPJ7PCM+pror1rZ6ujP3TkItSGxUfpJi0kqwidw+M/nEqWw== - dependencies: - app-root-path "^2.2.1" - dotenv "^8.0.0" - dotenv-json "^1.0.0" - lambda-leak "^2.0.0" - semver "^6.1.1" - uuid "^3.3.2" - vandium-utils "^1.1.1" - lazystream@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/lazystream/-/lazystream-1.0.0.tgz#f6995fe0f820392f61396be89462407bb77168e4" @@ -8424,7 +8341,7 @@ regexp.prototype.flags@^1.3.0: call-bind "^1.0.2" define-properties "^1.1.3" -regexpp@^3.0.0, regexpp@^3.1.0: +regexpp@^3.1.0: version "3.2.0" resolved "https://registry.yarnpkg.com/regexpp/-/regexpp-3.2.0.tgz#0425a2768d8f23bad70ca4b90461fa2f1213e1b2" integrity sha512-pq2bWo9mVD43nbts2wGv17XLiNLya+GklZ8kaDLV2Z08gDCsGpnKn9BFMepvWuHCbyVvY7J5o5+BVvoQbmlJLg== @@ -8514,7 +8431,7 @@ resolve-url@^0.2.1: resolved "https://registry.yarnpkg.com/resolve-url/-/resolve-url-0.2.1.tgz#2c637fe77c893afd2a663fe21aa9080068e2052a" integrity sha1-LGN/53yJOv0qZj/iGqkIAGjiBSo= -resolve@^1.10.0, resolve@^1.10.1, resolve@^1.11.1, resolve@^1.13.1, resolve@^1.17.0, resolve@^1.18.1, resolve@^1.20.0: +resolve@^1.10.0, resolve@^1.11.1, resolve@^1.13.1, resolve@^1.17.0, resolve@^1.18.1, resolve@^1.20.0: version "1.20.0" resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.20.0.tgz#629a013fb3f70755d6f0b7935cc1c2c5378b1975" integrity sha512-wENBPt4ySzg4ybFQW2TT1zMQucPK95HSh/nq2CFTZVOGut2+pQvSsgtda4d26YrYcr067wjbmzOG8byDPBX63A== @@ -8661,7 +8578,7 @@ semver@7.x, semver@^7.1.1, semver@^7.1.3, semver@^7.2.1, semver@^7.3.2, semver@^ dependencies: lru-cache "^6.0.0" -semver@^6.0.0, semver@^6.1.0, semver@^6.1.1, semver@^6.3.0: +semver@^6.0.0, semver@^6.3.0: version "6.3.0" resolved "https://registry.yarnpkg.com/semver/-/semver-6.3.0.tgz#ee0a64c8af5e8ceea67687b133761e1becbd1d3d" integrity sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw== @@ -9936,11 +9853,6 @@ validate-npm-package-name@^3.0.0: dependencies: builtins "^1.0.3" -vandium-utils@^1.1.1: - version "1.2.0" - resolved "https://registry.yarnpkg.com/vandium-utils/-/vandium-utils-1.2.0.tgz#44735de4b7641a05de59ebe945f174e582db4f59" - integrity sha1-RHNd5LdkGgXeWevpRfF05YLbT1k= - verror@1.10.0: version "1.10.0" resolved "https://registry.yarnpkg.com/verror/-/verror-1.10.0.tgz#3a105ca17053af55d6e270c1f8288682e18da400" From 1df0e1b12bd6a57c629263f7e35f6bf2a681316b Mon Sep 17 00:00:00 2001 From: Rico Huijbers Date: Mon, 13 Sep 2021 13:05:12 +0200 Subject: [PATCH 37/41] chore(deps): bump `proxy-agent` to `5.0.0` (#16469) Addresses another vulnerability in `pac-resolver`. ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- packages/aws-cdk/package.json | 2 +- yarn.lock | 44 ++++++++++++++++++++--------------- 2 files changed, 26 insertions(+), 20 deletions(-) diff --git a/packages/aws-cdk/package.json b/packages/aws-cdk/package.json index f179a9bd58bb6..96bae8765c526 100644 --- a/packages/aws-cdk/package.json +++ b/packages/aws-cdk/package.json @@ -85,7 +85,7 @@ "json-diff": "^0.5.4", "minimatch": ">=3.0", "promptly": "^3.2.0", - "proxy-agent": "^4.0.1", + "proxy-agent": "^5.0.0", "semver": "^7.3.5", "source-map-support": "^0.5.19", "table": "^6.7.1", diff --git a/yarn.lock b/yarn.lock index 739470e52b176..5d08236c73a03 100644 --- a/yarn.lock +++ b/yarn.lock @@ -3529,14 +3529,15 @@ define-property@^2.0.2: is-descriptor "^1.0.2" isobject "^3.0.1" -degenerator@^2.2.0: - version "2.2.0" - resolved "https://registry.yarnpkg.com/degenerator/-/degenerator-2.2.0.tgz#49e98c11fa0293c5b26edfbb52f15729afcdb254" - integrity sha512-aiQcQowF01RxFI4ZLFMpzyotbQonhNpBao6dkI8JPk5a+hmSjR5ErHp2CQySmQe8os3VBqLCIh87nDBgZXvsmg== +degenerator@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/degenerator/-/degenerator-3.0.1.tgz#7ef78ec0c8577a544477308ddf1d2d6e88d51f5b" + integrity sha512-LFsIFEeLPlKvAKXu7j3ssIG6RT0TbI7/GhsqrI0DnHASEQjXQ0LUSYcjJteGgRGmZbl1TnMSxpNQIAiJ7Du5TQ== dependencies: ast-types "^0.13.2" escodegen "^1.8.1" esprima "^4.0.0" + vm2 "^3.9.3" delay@5.0.0: version "5.0.0" @@ -7688,10 +7689,10 @@ p-waterfall@^2.1.1: dependencies: p-reduce "^2.0.0" -pac-proxy-agent@^4.1.0: - version "4.1.0" - resolved "https://registry.yarnpkg.com/pac-proxy-agent/-/pac-proxy-agent-4.1.0.tgz#66883eeabadc915fc5e95457324cb0f0ac78defb" - integrity sha512-ejNgYm2HTXSIYX9eFlkvqFp8hyJ374uDf0Zq5YUAifiSh1D6fo+iBivQZirGvVv8dCYUsLhmLBRhlAYvBKI5+Q== +pac-proxy-agent@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/pac-proxy-agent/-/pac-proxy-agent-5.0.0.tgz#b718f76475a6a5415c2efbe256c1c971c84f635e" + integrity sha512-CcFG3ZtnxO8McDigozwE3AqAw15zDvGH+OjXO4kzf7IkEKkQ4gxQ+3sdF50WmhQ4P/bVusXcqNE2S3XrNURwzQ== dependencies: "@tootallnate/once" "1" agent-base "6" @@ -7699,16 +7700,16 @@ pac-proxy-agent@^4.1.0: get-uri "3" http-proxy-agent "^4.0.1" https-proxy-agent "5" - pac-resolver "^4.1.0" + pac-resolver "^5.0.0" raw-body "^2.2.0" socks-proxy-agent "5" -pac-resolver@^4.1.0: - version "4.2.0" - resolved "https://registry.yarnpkg.com/pac-resolver/-/pac-resolver-4.2.0.tgz#b82bcb9992d48166920bc83c7542abb454bd9bdd" - integrity sha512-rPACZdUyuxT5Io/gFKUeeZFfE5T7ve7cAkE5TUZRRfuKP0u5Hocwe48X7ZEm6mYB+bTB0Qf+xlVlA/RM/i6RCQ== +pac-resolver@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/pac-resolver/-/pac-resolver-5.0.0.tgz#1d717a127b3d7a9407a16d6e1b012b13b9ba8dc0" + integrity sha512-H+/A6KitiHNNW+bxBKREk2MCGSxljfqRX76NjummWEYIat7ldVXRU3dhRIE3iXZ0nvGBk6smv3nntxKkzRL8NA== dependencies: - degenerator "^2.2.0" + degenerator "^3.0.1" ip "^1.1.5" netmask "^2.0.1" @@ -8047,17 +8048,17 @@ protocols@^1.1.0, protocols@^1.4.0: resolved "https://registry.yarnpkg.com/protocols/-/protocols-1.4.8.tgz#48eea2d8f58d9644a4a32caae5d5db290a075ce8" integrity sha512-IgjKyaUSjsROSO8/D49Ab7hP8mJgTYcqApOqdPhLoPxAplXmkp+zRvsrSQjFn5by0rhm4VH0GAUELIPpx7B1yg== -proxy-agent@^4.0.1: - version "4.0.1" - resolved "https://registry.yarnpkg.com/proxy-agent/-/proxy-agent-4.0.1.tgz#326c3250776c7044cd19655ccbfadf2e065a045c" - integrity sha512-ODnQnW2jc/FUVwHHuaZEfN5otg/fMbvMxz9nMSUQfJ9JU7q2SZvSULSsjLloVgJOiv9yhc8GlNMKc4GkFmcVEA== +proxy-agent@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/proxy-agent/-/proxy-agent-5.0.0.tgz#d31405c10d6e8431fde96cba7a0c027ce01d633b" + integrity sha512-gkH7BkvLVkSfX9Dk27W6TyNOWWZWRilRfk1XxGNWOYJ2TuedAv1yFpCaU9QSBmBe716XOTNpYNOzhysyw8xn7g== dependencies: agent-base "^6.0.0" debug "4" http-proxy-agent "^4.0.0" https-proxy-agent "^5.0.0" lru-cache "^5.1.1" - pac-proxy-agent "^4.1.0" + pac-proxy-agent "^5.0.0" proxy-from-env "^1.0.0" socks-proxy-agent "^5.0.0" @@ -9862,6 +9863,11 @@ verror@1.10.0: core-util-is "1.0.2" extsprintf "^1.2.0" +vm2@^3.9.3: + version "3.9.3" + resolved "https://registry.yarnpkg.com/vm2/-/vm2-3.9.3.tgz#29917f6cc081cc43a3f580c26c5b553fd3c91f40" + integrity sha512-smLS+18RjXYMl9joyJxMNI9l4w7biW8ilSDaVRvFBDwOH8P0BK1ognFQTpg0wyQ6wIKLTblHJvROW692L/E53Q== + w3c-hr-time@^1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/w3c-hr-time/-/w3c-hr-time-1.0.2.tgz#0a89cdf5cc15822df9c360543676963e0cc308cd" From 5835c2d1db515463b12f77efffc2b26ba9e00ec3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Sep 2021 11:56:39 +0000 Subject: [PATCH 38/41] chore(deps): Bump tar from 4.4.16 to 4.4.19 (#16304) Bumps [tar](https://github.com/npm/node-tar) from 4.4.16 to 4.4.19. - [Release notes](https://github.com/npm/node-tar/releases) - [Changelog](https://github.com/npm/node-tar/blob/main/CHANGELOG.md) - [Commits](https://github.com/npm/node-tar/compare/v4.4.16...v4.4.19) --- updated-dependencies: - dependency-name: tar dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- yarn.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/yarn.lock b/yarn.lock index 5d08236c73a03..e9744b94014b6 100644 --- a/yarn.lock +++ b/yarn.lock @@ -9262,9 +9262,9 @@ tar-stream@^2.2.0: readable-stream "^3.1.1" tar@^4.4.12: - version "4.4.16" - resolved "https://registry.yarnpkg.com/tar/-/tar-4.4.16.tgz#4a48b3c025e77d9d0c788f038a09b91c594d326d" - integrity sha512-gOVUT/KWPkGFZQmCRDVFNUWBl7niIo/PRR7lzrIqtZpit+st54lGROuVjc6zEQM9FhH+dJfQIl+9F0k8GNXg5g== + version "4.4.19" + resolved "https://registry.yarnpkg.com/tar/-/tar-4.4.19.tgz#2e4d7263df26f2b914dee10c825ab132123742f3" + integrity sha512-a20gEsvHnWe0ygBY8JbxoM4w3SJdhc7ZAuxkLqh+nvNQN2IOt0B5lLgM490X5Hl8FF0dl0tOf2ewFYAlIFgzVA== dependencies: chownr "^1.1.4" fs-minipass "^1.2.7" From fed30fc815bac1006003524ac6232778f3c3babe Mon Sep 17 00:00:00 2001 From: Niranjan Jayakar Date: Mon, 13 Sep 2021 14:23:19 +0100 Subject: [PATCH 39/41] feat(assertions): match into serialized json (#16456) Introduce `Match.serializedJson()` that can parse JSON serialized as a string, and continue matching into the parsed JSON. Migrate the rest of the tests in the `pipelines` module. ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- packages/@aws-cdk/assertions/README.md | 41 +++ packages/@aws-cdk/assertions/lib/match.ts | 41 +++ .../@aws-cdk/assertions/test/match.test.ts | 51 +++- packages/@aws-cdk/pipelines/package.json | 1 - .../pipelines/test/compliance/assets.test.ts | 229 ++++++++-------- .../compliance/docker-credentials.test.ts | 65 +++-- .../test/compliance/security-check.test.ts | 127 +++++---- .../test/compliance/self-mutation.test.ts | 57 ++-- .../test/compliance/stack-ordering.test.ts | 91 ++++--- .../pipelines/test/compliance/synths.test.ts | 151 ++++++----- .../test/compliance/validations.test.ts | 247 +++++++++--------- .../pipelines/test/testhelpers/index.ts | 1 - .../pipelines/test/testhelpers/matchers.ts | 63 ++++- .../test/testhelpers/testmatchers.ts | 42 --- 14 files changed, 675 insertions(+), 532 deletions(-) delete mode 100644 packages/@aws-cdk/pipelines/test/testhelpers/testmatchers.ts diff --git a/packages/@aws-cdk/assertions/README.md b/packages/@aws-cdk/assertions/README.md index 1fc0bb28e0cd3..d2763d0ba24ef 100644 --- a/packages/@aws-cdk/assertions/README.md +++ b/packages/@aws-cdk/assertions/README.md @@ -321,6 +321,47 @@ assert.hasResourceProperties('Foo::Bar', Match.objectLike({ }}); ``` +### Serialized JSON + +Often, we find that some CloudFormation Resource types declare properties as a string, +but actually expect JSON serialized as a string. +For example, the [`BuildSpec` property of `AWS::CodeBuild::Project`][Pipeline BuildSpec], +the [`Definition` property of `AWS::StepFunctions::StateMachine`][StateMachine Definition], +to name a couple. + +The `Match.serializedJson()` matcher allows deep matching within a stringified JSON. + +```ts +// Given a template - +// { +// "Resources": { +// "MyBar": { +// "Type": "Foo::Bar", +// "Properties": { +// "Baz": "{ \"Fred\": [\"Waldo\", \"Willow\"] }" +// } +// } +// } +// } + +// The following will NOT throw an assertion error +assert.hasResourceProperties('Foo::Bar', { + Baz: Match.serializedJson({ + Fred: Match.arrayWith(["Waldo"]), + }), +}); + +// The following will throw an assertion error +assert.hasResourceProperties('Foo::Bar', { + Baz: Match.serializedJson({ + Fred: ["Waldo", "Johnny"], + }), +}); +``` + +[Pipeline BuildSpec]: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-codebuild-project-source.html#cfn-codebuild-project-source-buildspec +[StateMachine Definition]: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-stepfunctions-statemachine.html#cfn-stepfunctions-statemachine-definition + ## Capturing Values This matcher APIs documented above allow capturing values in the matching entry diff --git a/packages/@aws-cdk/assertions/lib/match.ts b/packages/@aws-cdk/assertions/lib/match.ts index 5c7e3fad8e90c..4fea0ed0f713e 100644 --- a/packages/@aws-cdk/assertions/lib/match.ts +++ b/packages/@aws-cdk/assertions/lib/match.ts @@ -65,6 +65,14 @@ export abstract class Match { return new NotMatch('not', pattern); } + /** + * Matches any string-encoded JSON and applies the specified pattern after parsing it. + * @param pattern the pattern to match after parsing the encoded JSON. + */ + public static serializedJson(pattern: any): Matcher { + return new SerializedJson('serializedJson', pattern); + } + /** * Matches any non-null value at the target. */ @@ -265,6 +273,39 @@ class ObjectMatch extends Matcher { } } +class SerializedJson extends Matcher { + constructor( + public readonly name: string, + private readonly pattern: any, + ) { + super(); + }; + + public test(actual: any): MatchResult { + const result = new MatchResult(actual); + if (getType(actual) !== 'string') { + result.push(this, [], `Expected JSON as a string but found ${getType(actual)}`); + return result; + } + let parsed; + try { + parsed = JSON.parse(actual); + } catch (err) { + if (err instanceof SyntaxError) { + result.push(this, [], `Invalid JSON string: ${actual}`); + return result; + } else { + throw err; + } + } + + const matcher = Matcher.isMatcher(this.pattern) ? this.pattern : new LiteralMatch(this.name, this.pattern); + const innerResult = matcher.test(parsed); + result.compose(`(${this.name})`, innerResult); + return result; + } +} + class NotMatch extends Matcher { constructor( public readonly name: string, diff --git a/packages/@aws-cdk/assertions/test/match.test.ts b/packages/@aws-cdk/assertions/test/match.test.ts index 268810857f9a8..b0c92a2da2c8f 100644 --- a/packages/@aws-cdk/assertions/test/match.test.ts +++ b/packages/@aws-cdk/assertions/test/match.test.ts @@ -323,18 +323,63 @@ describe('Matchers', () => { expectFailure(matcher, {}, ['Missing key at /foo']); }); }); + + describe('serializedJson()', () => { + let matcher: Matcher; + + test('all types', () => { + matcher = Match.serializedJson({ Foo: 'Bar', Baz: 3, Boo: true, Fred: [1, 2] }); + expectPass(matcher, '{ "Foo": "Bar", "Baz": 3, "Boo": true, "Fred": [1, 2] }'); + }); + + test('simple match', () => { + matcher = Match.serializedJson({ Foo: 'Bar' }); + expectPass(matcher, '{ "Foo": "Bar" }'); + + expectFailure(matcher, '{ "Foo": "Baz" }', ['Expected Bar but received Baz at (serializedJson)/Foo']); + expectFailure(matcher, '{ "Foo": 4 }', ['Expected type string but received number at (serializedJson)/Foo']); + expectFailure(matcher, '{ "Bar": "Baz" }', [ + 'Unexpected key at (serializedJson)/Bar', + 'Missing key at (serializedJson)/Foo', + ]); + }); + + test('nested matcher', () => { + matcher = Match.serializedJson(Match.objectLike({ + Foo: Match.arrayWith(['Bar']), + })); + + expectPass(matcher, '{ "Foo": ["Bar"] }'); + expectPass(matcher, '{ "Foo": ["Bar", "Baz"] }'); + expectPass(matcher, '{ "Foo": ["Bar", "Baz"], "Fred": "Waldo" }'); + + expectFailure(matcher, '{ "Foo": ["Baz"] }', ['Missing element [Bar] at pattern index 0 at (serializedJson)/Foo']); + expectFailure(matcher, '{ "Bar": ["Baz"] }', ['Missing key at (serializedJson)/Foo']); + }); + + test('invalid json string', () => { + matcher = Match.serializedJson({ Foo: 'Bar' }); + + expectFailure(matcher, '{ "Foo"', [/invalid JSON string/i]); + }); + }); }); function expectPass(matcher: Matcher, target: any): void { - expect(matcher.test(target).hasFailed()).toEqual(false); + const result = matcher.test(target); + if (result.hasFailed()) { + fail(result.toHumanStrings()); // eslint-disable-line jest/no-jasmine-globals + } } function expectFailure(matcher: Matcher, target: any, expected: (string | RegExp)[] = []): void { const result = matcher.test(target); expect(result.failCount).toBeGreaterThan(0); const actual = result.toHumanStrings(); - if (expected.length > 0) { - expect(actual.length).toEqual(expected.length); + if (expected.length > 0 && actual.length !== expected.length) { + // only do this if the lengths are different, so as to display a nice failure message. + // otherwise need to use `toMatch()` to support RegExp + expect(actual).toEqual(expected); } for (let i = 0; i < expected.length; i++) { const e = expected[i]; diff --git a/packages/@aws-cdk/pipelines/package.json b/packages/@aws-cdk/pipelines/package.json index 37605df5210d4..7daeb0589feb0 100644 --- a/packages/@aws-cdk/pipelines/package.json +++ b/packages/@aws-cdk/pipelines/package.json @@ -32,7 +32,6 @@ "organization": true }, "devDependencies": { - "@aws-cdk/assert-internal": "0.0.0", "@aws-cdk/assertions": "0.0.0", "@aws-cdk/aws-apigateway": "0.0.0", "@aws-cdk/aws-ecr-assets": "0.0.0", diff --git a/packages/@aws-cdk/pipelines/test/compliance/assets.test.ts b/packages/@aws-cdk/pipelines/test/compliance/assets.test.ts index c1b72cf7ab316..68b10d259683f 100644 --- a/packages/@aws-cdk/pipelines/test/compliance/assets.test.ts +++ b/packages/@aws-cdk/pipelines/test/compliance/assets.test.ts @@ -1,11 +1,10 @@ import * as fs from 'fs'; import * as path from 'path'; -import { arrayWith, Capture, deepObjectLike, encodedJson, notMatching, objectLike, ResourcePart, stringLike, SynthUtils } from '@aws-cdk/assert-internal'; -import '@aws-cdk/assert-internal/jest'; +import { Capture, Match, Template } from '@aws-cdk/assertions'; import * as cb from '@aws-cdk/aws-codebuild'; import * as ec2 from '@aws-cdk/aws-ec2'; -import { Stack } from '@aws-cdk/core'; -import { behavior, PIPELINE_ENV, TestApp, LegacyTestGitHubNpmPipeline, ModernTestGitHubNpmPipeline, FileAssetApp, MegaAssetsApp, TwoFileAssetsApp, DockerAssetApp, PlainStackApp } from '../testhelpers'; +import { Stack, Stage } from '@aws-cdk/core'; +import { behavior, PIPELINE_ENV, TestApp, LegacyTestGitHubNpmPipeline, ModernTestGitHubNpmPipeline, FileAssetApp, MegaAssetsApp, TwoFileAssetsApp, DockerAssetApp, PlainStackApp, stringLike } from '../testhelpers'; const FILE_ASSET_SOURCE_HASH = '8289faf53c7da377bb2b90615999171adef5e1d8f6b88810e5fef75e6ca09ba5'; const FILE_ASSET_SOURCE_HASH2 = 'ac76997971c3f6ddf37120660003f1ced72b4fc58c498dfd99c78fa77e721e0e'; @@ -42,10 +41,10 @@ describe('basic pipeline', () => { function THEN_codePipelineExpectation() { // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: notMatching(arrayWith(objectLike({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.not(Match.arrayWith([Match.objectLike({ Name: 'Assets', - }))), + })])), }); } }); @@ -67,13 +66,13 @@ describe('basic pipeline', () => { }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { Stages: [ - objectLike({ Name: 'Source' }), - objectLike({ Name: 'Build' }), - objectLike({ Name: 'UpdatePipeline' }), - objectLike({ Name: 'Assets' }), - objectLike({ Name: 'App' }), + Match.objectLike({ Name: 'Source' }), + Match.objectLike({ Name: 'Build' }), + Match.objectLike({ Name: 'UpdatePipeline' }), + Match.objectLike({ Name: 'Assets' }), + Match.objectLike({ Name: 'App' }), ], }); } @@ -96,13 +95,13 @@ describe('basic pipeline', () => { }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { Stages: [ - objectLike({ Name: 'Source' }), - objectLike({ Name: 'Build' }), - objectLike({ Name: 'UpdatePipeline' }), - objectLike({ Name: 'Assets' }), - objectLike({ Name: 'App' }), + Match.objectLike({ Name: 'Source' }), + Match.objectLike({ Name: 'Build' }), + Match.objectLike({ Name: 'UpdatePipeline' }), + Match.objectLike({ Name: 'Assets' }), + Match.objectLike({ Name: 'App' }), ], }); } @@ -126,14 +125,14 @@ describe('basic pipeline', () => { }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { Stages: [ - objectLike({ Name: 'Source' }), - objectLike({ Name: 'Build' }), - objectLike({ Name: 'UpdatePipeline' }), - objectLike({ Name: stringLike('Assets*') }), - objectLike({ Name: stringLike('Assets*2') }), - objectLike({ Name: 'App' }), + Match.objectLike({ Name: 'Source' }), + Match.objectLike({ Name: 'Build' }), + Match.objectLike({ Name: 'UpdatePipeline' }), + Match.objectLike({ Name: stringLike('Assets*') }), + Match.objectLike({ Name: stringLike('Assets*2') }), + Match.objectLike({ Name: 'App' }), ], }); } @@ -155,15 +154,15 @@ describe('basic pipeline', () => { }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { Stages: [ - objectLike({ Name: 'Source' }), - objectLike({ Name: 'Build' }), - objectLike({ Name: 'UpdatePipeline' }), - objectLike({ Name: stringLike('Assets*') }), // 'Assets' vs 'Assets.1' - objectLike({ Name: stringLike('Assets*2') }), - objectLike({ Name: stringLike('Assets*3') }), - objectLike({ Name: 'App' }), + Match.objectLike({ Name: 'Source' }), + Match.objectLike({ Name: 'Build' }), + Match.objectLike({ Name: 'UpdatePipeline' }), + Match.objectLike({ Name: stringLike('Assets*') }), // 'Assets' vs 'Assets.1' + Match.objectLike({ Name: stringLike('Assets*2') }), + Match.objectLike({ Name: stringLike('Assets*3') }), + Match.objectLike({ Name: 'App' }), ], }); } @@ -186,15 +185,15 @@ describe('basic pipeline', () => { }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Environment: { Image: 'aws/codebuild/standard:5.0', }, Source: { - BuildSpec: encodedJson(deepObjectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ phases: { build: { - commands: arrayWith(`cdk-assets --path "assembly-FileAssetApp/FileAssetAppStackEADD68C5.assets.json" --verbose publish "${FILE_ASSET_SOURCE_HASH}:current_account-current_region"`), + commands: Match.arrayWith([`cdk-assets --path "assembly-FileAssetApp/FileAssetAppStackEADD68C5.assets.json" --verbose publish "${FILE_ASSET_SOURCE_HASH}:current_account-current_region"`]), }, }, })), @@ -220,14 +219,14 @@ describe('basic pipeline', () => { }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'Assets', Actions: [ - objectLike({ RunOrder: 1 }), - objectLike({ RunOrder: 1 }), + Match.objectLike({ RunOrder: 1 }), + Match.objectLike({ RunOrder: 1 }), ], - }), + }]), }); } }); @@ -242,16 +241,16 @@ describe('basic pipeline', () => { pipeline.addStage('SomeStage').addStackArtifactDeployment(asm.getStackByName('FileAssetApp-Stack')); // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'Assets', Actions: [ - objectLike({ + Match.objectLike({ Name: 'FileAsset1', RunOrder: 1, }), ], - }), + }]), }); }); @@ -277,17 +276,17 @@ describe('basic pipeline', () => { }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Source: { - BuildSpec: encodedJson(deepObjectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ phases: { build: { - commands: arrayWith(stringLike('cdk-assets *')), + commands: Match.arrayWith([stringLike('cdk-assets *')]), }, }, })), }, - Environment: objectLike({ + Environment: Match.objectLike({ PrivilegedMode: false, Image: 'aws/codebuild/standard:5.0', }), @@ -311,17 +310,17 @@ describe('basic pipeline', () => { }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Source: { - BuildSpec: encodedJson(deepObjectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ phases: { build: { - commands: arrayWith(stringLike('cdk-assets *')), + commands: Match.arrayWith([stringLike('cdk-assets *')]), }, }, })), }, - Environment: objectLike({ + Environment: Match.objectLike({ Image: 'aws/codebuild/standard:5.0', PrivilegedMode: true, }), @@ -349,12 +348,12 @@ describe('basic pipeline', () => { }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Environment: { Image: 'aws/codebuild/standard:5.0', }, Source: { - BuildSpec: encodedJson(deepObjectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ phases: { install: { commands: ['npm install -g cdk-assets@1.2.3'], @@ -386,7 +385,7 @@ describe('basic pipeline', () => { }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::IAM::Role', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::IAM::Role', { AssumeRolePolicyDocument: { Statement: [{ Action: 'sts:AssumeRole', @@ -402,7 +401,7 @@ describe('basic pipeline', () => { }], }, }); - expect(pipelineStack).toHaveResourceLike('AWS::IAM::Policy', + Template.fromStack(pipelineStack).hasResourceProperties('AWS::IAM::Policy', expectedAssetRolePolicy(FILE_PUBLISHING_ROLE, 'CdkAssetsFileRole6BE17A07')); } }); @@ -439,7 +438,7 @@ describe('basic pipeline', () => { }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::IAM::Policy', + Template.fromStack(pipelineStack).hasResourceProperties('AWS::IAM::Policy', expectedAssetRolePolicy([FILE_PUBLISHING_ROLE, 'arn:${AWS::Partition}:iam::0123456789012:role/cdk-hnb659fds-file-publishing-role-0123456789012-eu-west-1'], 'CdkAssetsFileRole6BE17A07')); } @@ -468,7 +467,7 @@ describe('basic pipeline', () => { }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::IAM::Policy', + Template.fromStack(pipelineStack).hasResourceProperties('AWS::IAM::Policy', expectedAssetRolePolicy(FILE_PUBLISHING_ROLE, 'CdkAssetsFileRole6BE17A07')); } }); @@ -492,7 +491,7 @@ describe('basic pipeline', () => { }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::IAM::Role', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::IAM::Role', { AssumeRolePolicyDocument: { Statement: [{ Action: 'sts:AssumeRole', @@ -508,7 +507,7 @@ describe('basic pipeline', () => { }], }, }); - expect(pipelineStack).toHaveResourceLike('AWS::IAM::Policy', + Template.fromStack(pipelineStack).hasResourceProperties('AWS::IAM::Policy', expectedAssetRolePolicy(IMAGE_PUBLISHING_ROLE, 'CdkAssetsDockerRole484B6DD3')); } }); @@ -534,9 +533,9 @@ describe('basic pipeline', () => { }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::IAM::Policy', + Template.fromStack(pipelineStack).hasResourceProperties('AWS::IAM::Policy', expectedAssetRolePolicy(FILE_PUBLISHING_ROLE, 'CdkAssetsFileRole6BE17A07')); - expect(pipelineStack).toHaveResourceLike('AWS::IAM::Policy', + Template.fromStack(pipelineStack).hasResourceProperties('AWS::IAM::Policy', expectedAssetRolePolicy(IMAGE_PUBLISHING_ROLE, 'CdkAssetsDockerRole484B6DD3')); } }); @@ -576,12 +575,12 @@ behavior('can supply pre-install scripts to asset upload', (suite) => { }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Environment: { Image: 'aws/codebuild/standard:5.0', }, Source: { - BuildSpec: encodedJson(deepObjectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ phases: { install: { commands: ['npm config set registry https://registry.com', 'npm install -g cdk-assets'], @@ -620,8 +619,8 @@ describe('pipeline with VPC', () => { function THEN_codePipelineExpectation() { // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { - VpcConfig: objectLike({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { + VpcConfig: Match.objectLike({ SecurityGroupIds: [ { 'Fn::GetAtt': ['CdkAssetsDockerAsset1SecurityGroup078F5C66', 'GroupId'] }, ], @@ -655,16 +654,16 @@ describe('pipeline with VPC', () => { function THEN_codePipelineExpectation() { // Assets Project - expect(pipelineStack).toHaveResourceLike('AWS::IAM::Policy', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::IAM::Policy', { Roles: [ { Ref: 'CdkAssetsDockerRole484B6DD3' }, ], PolicyDocument: { - Statement: arrayWith({ - Action: arrayWith('ec2:DescribeSecurityGroups'), + Statement: Match.arrayWith([{ + Action: Match.arrayWith(['ec2:DescribeSecurityGroups']), Effect: 'Allow', Resource: '*', - }), + }]), }, }); } @@ -690,10 +689,10 @@ describe('pipeline with VPC', () => { function THEN_codePipelineExpectation() { // Assets Project - expect(pipelineStack).toHaveResourceLike('AWS::IAM::Policy', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::IAM::Policy', { PolicyDocument: { Statement: [ - { + Match.objectLike({ Resource: '*', Action: [ 'ec2:CreateNetworkInterface', @@ -704,19 +703,19 @@ describe('pipeline with VPC', () => { 'ec2:DescribeDhcpOptions', 'ec2:DescribeVpcs', ], - }, + }), ], }, Roles: [{ Ref: 'CdkAssetsDockerRole484B6DD3' }], }); - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResource('AWS::CodeBuild::Project', { Properties: { ServiceRole: { 'Fn::GetAtt': ['CdkAssetsDockerRole484B6DD3', 'Arn'] }, }, DependsOn: [ 'CdkAssetsDockerAsset1PolicyDocument8DA96A22', ], - }, ResourcePart.CompleteDefinition); + }); } }); }); @@ -743,28 +742,29 @@ describe('pipeline with single asset publisher', () => { function THEN_codePipelineExpectation() { // THEN - const buildSpecName = Capture.aString(); - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + const buildSpecName = new Capture(); + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'Assets', Actions: [ // Only one file asset action - objectLike({ RunOrder: 1, Name: 'FileAsset' }), + Match.objectLike({ RunOrder: 1, Name: 'FileAsset' }), ], - }), + }]), }); - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Environment: { Image: 'aws/codebuild/standard:5.0', }, Source: { - BuildSpec: buildSpecName.capture(stringLike('buildspec-*.yaml')), + BuildSpec: buildSpecName, }, }); - const assembly = SynthUtils.synthesize(pipelineStack, { skipValidation: true }).assembly; + const assembly = synthesize(pipelineStack); - const actualFileName = buildSpecName.capturedValue; + const actualFileName = buildSpecName.asString(); + expect(actualFileName).toMatch(/^buildspec-.*\.yaml$/); const buildSpec = JSON.parse(fs.readFileSync(path.join(assembly.directory, actualFileName), { encoding: 'utf-8' })); expect(buildSpec.phases.build.commands).toContain(`cdk-assets --path "assembly-FileAssetApp/FileAssetAppStackEADD68C5.assets.json" --verbose publish "${FILE_ASSET_SOURCE_HASH}:current_account-current_region"`); expect(buildSpec.phases.build.commands).toContain(`cdk-assets --path "assembly-FileAssetApp/FileAssetAppStackEADD68C5.assets.json" --verbose publish "${FILE_ASSET_SOURCE_HASH2}:current_account-current_region"`); @@ -804,20 +804,20 @@ describe('pipeline with single asset publisher', () => { function THEN_codePipelineExpectation(pipelineStack2: Stack) { // THEN - const buildSpecName1 = Capture.aString(); - const buildSpecName2 = Capture.aString(); - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + const buildSpecName1 = new Capture(); + const buildSpecName2 = new Capture(); + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Source: { - BuildSpec: buildSpecName1.capture(stringLike('buildspec-*.yaml')), + BuildSpec: buildSpecName1, }, }); - expect(pipelineStack2).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack2).hasResourceProperties('AWS::CodeBuild::Project', { Source: { - BuildSpec: buildSpecName2.capture(stringLike('buildspec-*.yaml')), + BuildSpec: buildSpecName2, }, }); - expect(buildSpecName1.capturedValue).not.toEqual(buildSpecName2.capturedValue); + expect(buildSpecName1.asString()).not.toEqual(buildSpecName2.asString()); } }); }); @@ -870,27 +870,27 @@ describe('pipeline with custom asset publisher BuildSpec', () => { function THEN_codePipelineExpectation() { - const buildSpecName = Capture.aString(); + const buildSpecName = new Capture(); - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'Assets', Actions: [ // Only one file asset action - objectLike({ RunOrder: 1, Name: 'FileAsset' }), + Match.objectLike({ RunOrder: 1, Name: 'FileAsset' }), ], - }), + }]), }); - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Environment: { Image: 'aws/codebuild/standard:5.0', }, Source: { - BuildSpec: buildSpecName.capture(stringLike('buildspec-*.yaml')), + BuildSpec: buildSpecName, }, }); - const assembly = SynthUtils.synthesize(pipelineStack, { skipValidation: true }).assembly; - const buildSpec = JSON.parse(fs.readFileSync(path.join(assembly.directory, buildSpecName.capturedValue)).toString()); + const assembly = synthesize(pipelineStack); + const buildSpec = JSON.parse(fs.readFileSync(path.join(assembly.directory, buildSpecName.asString())).toString()); expect(buildSpec.phases.build.commands).toContain(`cdk-assets --path "assembly-FileAssetApp/FileAssetAppStackEADD68C5.assets.json" --verbose publish "${FILE_ASSET_SOURCE_HASH}:current_account-current_region"`); expect(buildSpec.phases.build.commands).toContain(`cdk-assets --path "assembly-FileAssetApp/FileAssetAppStackEADD68C5.assets.json" --verbose publish "${FILE_ASSET_SOURCE_HASH2}:current_account-current_region"`); expect(buildSpec.phases.pre_install.commands).toContain('preinstall'); @@ -978,9 +978,9 @@ behavior('necessary secrets manager permissions get added to asset roles', suite }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::IAM::Policy', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::IAM::Policy', { PolicyDocument: { - Statement: arrayWith({ + Statement: Match.arrayWith([{ Action: 'secretsmanager:GetSecretValue', Effect: 'Allow', Resource: { @@ -993,7 +993,7 @@ behavior('necessary secrets manager permissions get added to asset roles', suite ], ], }, - }), + }]), }, Roles: [ { Ref: 'PipelineAssetsFileRole59943A77' }, @@ -1021,10 +1021,10 @@ behavior('adding environment variable to assets job adds SecretsManager permissi }); pipeline.addStage(new FileAssetApp(pipelineStack, 'MyApp')); - expect(pipelineStack).toHaveResourceLike('AWS::IAM::Policy', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::IAM::Policy', { PolicyDocument: { - Statement: arrayWith( - objectLike({ + Statement: Match.arrayWith([ + Match.objectLike({ Action: 'secretsmanager:GetSecretValue', Effect: 'Allow', Resource: { @@ -1035,8 +1035,17 @@ behavior('adding environment variable to assets job adds SecretsManager permissi ]], }, }), - ), + ]), }, }); }); -}); \ No newline at end of file +}); + +function synthesize(stack: Stack) { + const root = stack.node.root; + if (!Stage.isStage(root)) { + throw new Error('unexpected: all stacks must be part of a Stage'); + } + + return root.synth({ skipValidation: true }); +} \ No newline at end of file diff --git a/packages/@aws-cdk/pipelines/test/compliance/docker-credentials.test.ts b/packages/@aws-cdk/pipelines/test/compliance/docker-credentials.test.ts index 5ada88b49b937..e1356304fe811 100644 --- a/packages/@aws-cdk/pipelines/test/compliance/docker-credentials.test.ts +++ b/packages/@aws-cdk/pipelines/test/compliance/docker-credentials.test.ts @@ -1,12 +1,11 @@ -import { arrayWith, deepObjectLike, encodedJson, stringLike } from '@aws-cdk/assert-internal'; -import '@aws-cdk/assert-internal/jest'; +import { Match, Template } from '@aws-cdk/assertions'; import * as cb from '@aws-cdk/aws-codebuild'; import * as secretsmanager from '@aws-cdk/aws-secretsmanager'; import { Stack } from '@aws-cdk/core'; import { Construct } from 'constructs'; import * as cdkp from '../../lib'; import { CodeBuildStep } from '../../lib'; -import { behavior, PIPELINE_ENV, TestApp, LegacyTestGitHubNpmPipeline, ModernTestGitHubNpmPipeline, DockerAssetApp } from '../testhelpers'; +import { behavior, PIPELINE_ENV, TestApp, LegacyTestGitHubNpmPipeline, ModernTestGitHubNpmPipeline, DockerAssetApp, stringLike } from '../testhelpers'; const secretSynthArn = 'arn:aws:secretsmanager:eu-west-1:0123456789012:secret:synth-012345'; const secretUpdateArn = 'arn:aws:secretsmanager:eu-west-1:0123456789012:secret:update-012345'; @@ -51,32 +50,32 @@ behavior('synth action receives install commands and access to relevant credenti domainCredentials: { 'synth.example.com': { secretsManagerSecretId: secretSynthArn } }, }); - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Environment: { Image: 'aws/codebuild/standard:5.0' }, Source: { - BuildSpec: encodedJson(deepObjectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ phases: { pre_build: { - commands: arrayWith( + commands: Match.arrayWith([ 'mkdir $HOME/.cdk', `echo '${expectedCredsConfig}' > $HOME/.cdk/cdk-docker-creds.json`, - ), + ]), }, // Prove we're looking at the Synth project build: { - commands: arrayWith(stringLike('*cdk*synth*')), + commands: Match.arrayWith([stringLike('*cdk*synth*')]), }, }, })), }, }); - expect(pipelineStack).toHaveResource('AWS::IAM::Policy', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::IAM::Policy', { PolicyDocument: { - Statement: arrayWith({ + Statement: Match.arrayWith([{ Action: ['secretsmanager:GetSecretValue', 'secretsmanager:DescribeSecret'], Effect: 'Allow', Resource: secretSynthArn, - }), + }]), Version: '2012-10-17', }, Roles: [{ Ref: stringLike('Cdk*BuildProjectRole*') }], @@ -121,20 +120,20 @@ behavior('synth action receives Windows install commands if a Windows image is d domainCredentials: { 'synth.example.com': { secretsManagerSecretId: secretSynthArn } }, }); - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Environment: { Image: 'aws/codebuild/windows-base:2.0' }, Source: { - BuildSpec: encodedJson(deepObjectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ phases: { pre_build: { - commands: arrayWith( + commands: Match.arrayWith([ 'mkdir %USERPROFILE%\\.cdk', `echo '${expectedCredsConfig}' > %USERPROFILE%\\.cdk\\cdk-docker-creds.json`, - ), + ]), }, // Prove we're looking at the Synth project build: { - commands: arrayWith(stringLike('*cdk*synth*')), + commands: Match.arrayWith([stringLike('*cdk*synth*')]), }, }, })), @@ -164,34 +163,34 @@ behavior('self-update receives install commands and access to relevant credentia domainCredentials: { 'selfupdate.example.com': { secretsManagerSecretId: secretUpdateArn } }, }); - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Environment: { Image: 'aws/codebuild/standard:5.0' }, Source: { - BuildSpec: encodedJson(deepObjectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ phases: { [expectedPhase]: { - commands: arrayWith( + commands: Match.arrayWith([ 'mkdir $HOME/.cdk', `echo '${expectedCredsConfig}' > $HOME/.cdk/cdk-docker-creds.json`, - ), + ]), }, // Prove we're looking at the SelfMutate project build: { - commands: arrayWith( + commands: Match.arrayWith([ stringLike('cdk * deploy PipelineStack*'), - ), + ]), }, }, })), }, }); - expect(pipelineStack).toHaveResource('AWS::IAM::Policy', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::IAM::Policy', { PolicyDocument: { - Statement: arrayWith({ + Statement: Match.arrayWith([{ Action: ['secretsmanager:GetSecretValue', 'secretsmanager:DescribeSecret'], Effect: 'Allow', Resource: secretUpdateArn, - }), + }]), Version: '2012-10-17', }, Roles: [{ Ref: stringLike('*SelfMutat*Role*') }], @@ -220,32 +219,32 @@ behavior('asset publishing receives install commands and access to relevant cred domainCredentials: { 'publish.example.com': { secretsManagerSecretId: secretPublishArn } }, }); - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Environment: { Image: 'aws/codebuild/standard:5.0' }, Source: { - BuildSpec: encodedJson(deepObjectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ phases: { [expectedPhase]: { - commands: arrayWith( + commands: Match.arrayWith([ 'mkdir $HOME/.cdk', `echo '${expectedCredsConfig}' > $HOME/.cdk/cdk-docker-creds.json`, - ), + ]), }, // Prove we're looking at the Publishing project build: { - commands: arrayWith(stringLike('cdk-assets*')), + commands: Match.arrayWith([stringLike('cdk-assets*')]), }, }, })), }, }); - expect(pipelineStack).toHaveResource('AWS::IAM::Policy', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::IAM::Policy', { PolicyDocument: { - Statement: arrayWith({ + Statement: Match.arrayWith([{ Action: ['secretsmanager:GetSecretValue', 'secretsmanager:DescribeSecret'], Effect: 'Allow', Resource: secretPublishArn, - }), + }]), Version: '2012-10-17', }, Roles: [{ Ref: 'CdkAssetsDockerRole484B6DD3' }], diff --git a/packages/@aws-cdk/pipelines/test/compliance/security-check.test.ts b/packages/@aws-cdk/pipelines/test/compliance/security-check.test.ts index 7367930e6618a..d2ea77f45ff7d 100644 --- a/packages/@aws-cdk/pipelines/test/compliance/security-check.test.ts +++ b/packages/@aws-cdk/pipelines/test/compliance/security-check.test.ts @@ -1,9 +1,8 @@ -import { anything, arrayWith, encodedJson, objectLike, stringLike } from '@aws-cdk/assert-internal'; -import '@aws-cdk/assert-internal/jest'; +import { Match, Template } from '@aws-cdk/assertions'; import { Topic } from '@aws-cdk/aws-sns'; import { Stack } from '@aws-cdk/core'; import * as cdkp from '../../lib'; -import { LegacyTestGitHubNpmPipeline, ModernTestGitHubNpmPipeline, OneStackApp, PIPELINE_ENV, TestApp } from '../testhelpers'; +import { LegacyTestGitHubNpmPipeline, ModernTestGitHubNpmPipeline, OneStackApp, PIPELINE_ENV, TestApp, stringLike } from '../testhelpers'; import { behavior } from '../testhelpers/compliance'; let app: TestApp; @@ -41,8 +40,8 @@ behavior('security check option generates lambda/codebuild at pipeline scope', ( }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toCountResources('AWS::Lambda::Function', 1); - expect(pipelineStack).toHaveResourceLike('AWS::Lambda::Function', { + Template.fromStack(pipelineStack).resourceCountIs('AWS::Lambda::Function', 1); + Template.fromStack(pipelineStack).hasResourceProperties('AWS::Lambda::Function', { Role: { 'Fn::GetAtt': [ stringLike('CdkPipeline*SecurityCheckCDKPipelinesAutoApproveServiceRole*'), @@ -51,7 +50,7 @@ behavior('security check option generates lambda/codebuild at pipeline scope', ( }, }); // 1 for github build, 1 for synth stage, and 1 for the application security check - expect(pipelineStack).toCountResources('AWS::CodeBuild::Project', 3); + Template.fromStack(pipelineStack).resourceCountIs('AWS::CodeBuild::Project', 3); } }); @@ -78,24 +77,24 @@ behavior('security check option passes correct environment variables to check pr }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith( + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([ { Name: 'App', - Actions: arrayWith( - objectLike({ + Actions: Match.arrayWith([ + Match.objectLike({ Name: stringLike('*Check'), - Configuration: objectLike({ - EnvironmentVariables: encodedJson([ + Configuration: Match.objectLike({ + EnvironmentVariables: Match.serializedJson([ { name: 'STAGE_PATH', type: 'PLAINTEXT', value: 'PipelineSecurityStack/App' }, { name: 'STAGE_NAME', type: 'PLAINTEXT', value: 'App' }, - { name: 'ACTION_NAME', type: 'PLAINTEXT', value: anything() }, + { name: 'ACTION_NAME', type: 'PLAINTEXT', value: Match.anyValue() }, ]), }), }), - ), + ]), }, - ), + ]), }); } }); @@ -124,7 +123,7 @@ behavior('pipeline created with auto approve tags and lambda/codebuild w/ valid function THEN_codePipelineExpectation() { // CodePipeline must be tagged as SECURITY_CHECK=ALLOW_APPROVE - expect(pipelineStack).toHaveResource('AWS::CodePipeline::Pipeline', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { Tags: [ { Key: 'SECURITY_CHECK', @@ -133,7 +132,7 @@ behavior('pipeline created with auto approve tags and lambda/codebuild w/ valid ], }); // Lambda Function only has access to pipelines tagged SECURITY_CHECK=ALLOW_APPROVE - expect(pipelineStack).toHaveResourceLike('AWS::IAM::Policy', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::IAM::Policy', { PolicyDocument: { Statement: [ { @@ -148,9 +147,9 @@ behavior('pipeline created with auto approve tags and lambda/codebuild w/ valid }, }); // CodeBuild must have access to the stacks and invoking the lambda function - expect(pipelineStack).toHaveResourceLike('AWS::IAM::Policy', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::IAM::Policy', { PolicyDocument: { - Statement: arrayWith( + Statement: Match.arrayWith([ { Action: 'sts:AssumeRole', Condition: { @@ -173,7 +172,7 @@ behavior('pipeline created with auto approve tags and lambda/codebuild w/ valid ], }, }, - ), + ]), }, }); } @@ -193,32 +192,32 @@ behavior('confirmBroadeningPermissions option at addApplicationStage runs securi suite.doesNotApply.modern(); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { Stages: [ { - Actions: [{ Name: 'GitHub', RunOrder: 1 }], + Actions: [Match.objectLike({ Name: 'GitHub', RunOrder: 1 })], Name: 'Source', }, { - Actions: [{ Name: 'Synth', RunOrder: 1 }], + Actions: [Match.objectLike({ Name: 'Synth', RunOrder: 1 })], Name: 'Build', }, { - Actions: [{ Name: 'SelfMutate', RunOrder: 1 }], + Actions: [Match.objectLike({ Name: 'SelfMutate', RunOrder: 1 })], Name: 'UpdatePipeline', }, { Actions: [ - { Name: 'StageSecurityCheckStackSecurityCheck', RunOrder: 1 }, - { Name: 'StageSecurityCheckStackManualApproval', RunOrder: 2 }, - { Name: 'AnotherStackSecurityCheck', RunOrder: 5 }, - { Name: 'AnotherStackManualApproval', RunOrder: 6 }, - { Name: 'Stack.Prepare', RunOrder: 3 }, - { Name: 'Stack.Deploy', RunOrder: 4 }, - { Name: 'AnotherStack-Stack.Prepare', RunOrder: 7 }, - { Name: 'AnotherStack-Stack.Deploy', RunOrder: 8 }, - { Name: 'SkipCheckStack-Stack.Prepare', RunOrder: 9 }, - { Name: 'SkipCheckStack-Stack.Deploy', RunOrder: 10 }, + Match.objectLike({ Name: 'StageSecurityCheckStackSecurityCheck', RunOrder: 1 }), + Match.objectLike({ Name: 'StageSecurityCheckStackManualApproval', RunOrder: 2 }), + Match.objectLike({ Name: 'AnotherStackSecurityCheck', RunOrder: 5 }), + Match.objectLike({ Name: 'AnotherStackManualApproval', RunOrder: 6 }), + Match.objectLike({ Name: 'Stack.Prepare', RunOrder: 3 }), + Match.objectLike({ Name: 'Stack.Deploy', RunOrder: 4 }), + Match.objectLike({ Name: 'AnotherStack-Stack.Prepare', RunOrder: 7 }), + Match.objectLike({ Name: 'AnotherStack-Stack.Deploy', RunOrder: 8 }), + Match.objectLike({ Name: 'SkipCheckStack-Stack.Prepare', RunOrder: 9 }), + Match.objectLike({ Name: 'SkipCheckStack-Stack.Deploy', RunOrder: 10 }), ], Name: 'StageSecurityCheckStack', }, @@ -240,28 +239,28 @@ behavior('confirmBroadeningPermissions option at addApplication runs security ch suite.doesNotApply.modern(); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { Stages: [ { - Actions: [{ Name: 'GitHub', RunOrder: 1 }], + Actions: [Match.objectLike({ Name: 'GitHub', RunOrder: 1 })], Name: 'Source', }, { - Actions: [{ Name: 'Synth', RunOrder: 1 }], + Actions: [Match.objectLike({ Name: 'Synth', RunOrder: 1 })], Name: 'Build', }, { - Actions: [{ Name: 'SelfMutate', RunOrder: 1 }], + Actions: [Match.objectLike({ Name: 'SelfMutate', RunOrder: 1 })], Name: 'UpdatePipeline', }, { Actions: [ - { Name: 'EnableCheckStackSecurityCheck', RunOrder: 3 }, - { Name: 'EnableCheckStackManualApproval', RunOrder: 4 }, - { Name: 'Stack.Prepare', RunOrder: 1 }, - { Name: 'Stack.Deploy', RunOrder: 2 }, - { Name: 'EnableCheckStack-Stack.Prepare', RunOrder: 5 }, - { Name: 'EnableCheckStack-Stack.Deploy', RunOrder: 6 }, + Match.objectLike({ Name: 'EnableCheckStackSecurityCheck', RunOrder: 3 }), + Match.objectLike({ Name: 'EnableCheckStackManualApproval', RunOrder: 4 }), + Match.objectLike({ Name: 'Stack.Prepare', RunOrder: 1 }), + Match.objectLike({ Name: 'Stack.Deploy', RunOrder: 2 }), + Match.objectLike({ Name: 'EnableCheckStack-Stack.Prepare', RunOrder: 5 }), + Match.objectLike({ Name: 'EnableCheckStack-Stack.Deploy', RunOrder: 6 }), ], Name: 'NoSecurityCheckStack', }, @@ -299,13 +298,13 @@ behavior('confirmBroadeningPermissions and notification topic options generates }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toCountResources('AWS::SNS::Topic', 1); - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith( + Template.fromStack(pipelineStack).resourceCountIs('AWS::SNS::Topic', 1); + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([ { Name: 'MyStack', Actions: [ - objectLike({ + Match.objectLike({ Configuration: { ProjectName: { Ref: stringLike('*SecurityCheck*') }, EnvironmentVariables: { @@ -320,7 +319,7 @@ behavior('confirmBroadeningPermissions and notification topic options generates Namespace: stringLike('*'), RunOrder: 1, }), - objectLike({ + Match.objectLike({ Configuration: { CustomData: stringLike('#{*.MESSAGE}'), ExternalEntityLink: stringLike('#{*.LINK}'), @@ -328,11 +327,11 @@ behavior('confirmBroadeningPermissions and notification topic options generates Name: stringLike('*Approv*'), RunOrder: 2, }), - objectLike({ Name: 'Stack.Prepare', RunOrder: 3 }), - objectLike({ Name: 'Stack.Deploy', RunOrder: 4 }), + Match.objectLike({ Name: 'Stack.Prepare', RunOrder: 3 }), + Match.objectLike({ Name: 'Stack.Deploy', RunOrder: 4 }), ], }, - ), + ]), }); } }); @@ -365,10 +364,10 @@ behavior('Stages declared outside the pipeline create their own ApplicationSecur suite.doesNotApply.modern(); function THEN_codePipelineExpectation() { - expect(pipelineStack).toCountResources('AWS::Lambda::Function', 1); + Template.fromStack(pipelineStack).resourceCountIs('AWS::Lambda::Function', 1); // 1 for github build, 1 for synth stage, and 1 for the application security check - expect(pipelineStack).toCountResources('AWS::CodeBuild::Project', 3); - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { + Template.fromStack(pipelineStack).resourceCountIs('AWS::CodeBuild::Project', 3); + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { Tags: [ { Key: 'SECURITY_CHECK', @@ -376,28 +375,28 @@ behavior('Stages declared outside the pipeline create their own ApplicationSecur }, ], Stages: [ - { Name: 'Source' }, - { Name: 'Build' }, - { Name: 'UpdatePipeline' }, + Match.objectLike({ Name: 'Source' }), + Match.objectLike({ Name: 'Build' }), + Match.objectLike({ Name: 'UpdatePipeline' }), { Actions: [ - { + Match.objectLike({ Configuration: { ProjectName: { Ref: 'UnattachedStageStageApplicationSecurityCheckCDKSecurityCheckADCE795B' }, }, Name: 'UnattachedStageSecurityCheck', RunOrder: 1, - }, - { + }), + Match.objectLike({ Configuration: { CustomData: '#{UnattachedStageSecurityCheck.MESSAGE}', ExternalEntityLink: '#{UnattachedStageSecurityCheck.LINK}', }, Name: 'UnattachedStageManualApproval', RunOrder: 2, - }, - { Name: 'Stack.Prepare', RunOrder: 3 }, - { Name: 'Stack.Deploy', RunOrder: 4 }, + }), + Match.objectLike({ Name: 'Stack.Prepare', RunOrder: 3 }), + Match.objectLike({ Name: 'Stack.Deploy', RunOrder: 4 }), ], Name: 'UnattachedStage', }, diff --git a/packages/@aws-cdk/pipelines/test/compliance/self-mutation.test.ts b/packages/@aws-cdk/pipelines/test/compliance/self-mutation.test.ts index 8196c84a0920b..f672898107c30 100644 --- a/packages/@aws-cdk/pipelines/test/compliance/self-mutation.test.ts +++ b/packages/@aws-cdk/pipelines/test/compliance/self-mutation.test.ts @@ -1,10 +1,9 @@ /* eslint-disable import/no-extraneous-dependencies */ -import { anything, arrayWith, deepObjectLike, encodedJson, notMatching, objectLike } from '@aws-cdk/assert-internal'; -import '@aws-cdk/assert-internal/jest'; +import { Match, Template } from '@aws-cdk/assertions'; import * as cb from '@aws-cdk/aws-codebuild'; import * as cp from '@aws-cdk/aws-codepipeline'; import { Stack, Stage } from '@aws-cdk/core'; -import { behavior, LegacyTestGitHubNpmPipeline, PIPELINE_ENV, stackTemplate, TestApp, ModernTestGitHubNpmPipeline } from '../testhelpers'; +import { behavior, LegacyTestGitHubNpmPipeline, PIPELINE_ENV, TestApp, ModernTestGitHubNpmPipeline } from '../testhelpers'; let app: TestApp; let pipelineStack: Stack; @@ -31,32 +30,32 @@ behavior('CodePipeline has self-mutation stage', (suite) => { function THEN_codePipelineExpectation() { // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'UpdatePipeline', Actions: [ - objectLike({ + Match.objectLike({ Name: 'SelfMutate', - Configuration: objectLike({ - ProjectName: { Ref: anything() }, + Configuration: Match.objectLike({ + ProjectName: { Ref: Match.anyValue() }, }), }), ], - }), + }]), }); - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Environment: { Image: 'aws/codebuild/standard:5.0', }, Source: { - BuildSpec: encodedJson(deepObjectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ phases: { install: { commands: ['npm install -g aws-cdk'], }, build: { - commands: arrayWith('cdk -a . deploy PipelineStack --require-approval=never --verbose'), + commands: Match.arrayWith(['cdk -a . deploy PipelineStack --require-approval=never --verbose']), }, }, })), @@ -84,15 +83,15 @@ behavior('selfmutation stage correctly identifies nested assembly of pipeline st }); function THEN_codePipelineExpectation(nestedPipelineStack: Stack) { - expect(stackTemplate(nestedPipelineStack)).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(nestedPipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Environment: { Image: 'aws/codebuild/standard:5.0', }, Source: { - BuildSpec: encodedJson(deepObjectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ phases: { build: { - commands: arrayWith('cdk -a assembly-PipelineStage deploy PipelineStage/PipelineStack --require-approval=never --verbose'), + commands: Match.arrayWith(['cdk -a assembly-PipelineStage deploy PipelineStage/PipelineStack --require-approval=never --verbose']), }, }, })), @@ -123,11 +122,11 @@ behavior('selfmutation feature can be turned off', (suite) => { }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: notMatching(arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.not(Match.arrayWith([{ Name: 'UpdatePipeline', - Actions: anything(), - })), + Actions: Match.anyValue(), + }])), }); } }); @@ -154,10 +153,10 @@ behavior('can control fix/CLI version used in pipeline selfupdate', (suite) => { function THEN_codePipelineExpectation() { // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Name: 'vpipe-selfupdate', Source: { - BuildSpec: encodedJson(deepObjectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ phases: { install: { commands: ['npm install -g aws-cdk@1.2.3'], @@ -177,7 +176,7 @@ behavior('Pipeline stack itself can use assets (has implications for selfupdate) }); // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Environment: { PrivilegedMode: true, }, @@ -191,7 +190,7 @@ behavior('Pipeline stack itself can use assets (has implications for selfupdate) }); // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Environment: { PrivilegedMode: true, }, @@ -212,9 +211,9 @@ behavior('self-update project role uses tagged bootstrap-role permissions', (sui }); function THEN_codePipelineExpectations() { - expect(pipelineStack).toHaveResourceLike('AWS::IAM::Policy', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::IAM::Policy', { PolicyDocument: { - Statement: arrayWith( + Statement: Match.arrayWith([ { Action: 'sts:AssumeRole', Effect: 'Allow', @@ -235,7 +234,7 @@ behavior('self-update project role uses tagged bootstrap-role permissions', (sui Effect: 'Allow', Resource: '*', }, - ), + ]), }, }); } @@ -280,19 +279,19 @@ behavior('self-mutation stage can be customized with BuildSpec', (suite) => { }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Environment: { Image: 'aws/codebuild/standard:5.0', PrivilegedMode: false, }, Source: { - BuildSpec: encodedJson(deepObjectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ phases: { install: { commands: ['npm config set registry example.com', 'npm install -g aws-cdk'], }, build: { - commands: arrayWith('cdk -a . deploy PipelineStack --require-approval=never --verbose'), + commands: Match.arrayWith(['cdk -a . deploy PipelineStack --require-approval=never --verbose']), }, }, cache: { diff --git a/packages/@aws-cdk/pipelines/test/compliance/stack-ordering.test.ts b/packages/@aws-cdk/pipelines/test/compliance/stack-ordering.test.ts index cb21139b16364..9b056b8af2ece 100644 --- a/packages/@aws-cdk/pipelines/test/compliance/stack-ordering.test.ts +++ b/packages/@aws-cdk/pipelines/test/compliance/stack-ordering.test.ts @@ -1,7 +1,6 @@ -import { arrayWith, objectLike } from '@aws-cdk/assert-internal'; -import '@aws-cdk/assert-internal/jest'; +import { Match, Template } from '@aws-cdk/assertions'; import { App, Stack } from '@aws-cdk/core'; -import { behavior, LegacyTestGitHubNpmPipeline, ModernTestGitHubNpmPipeline, OneStackApp, PIPELINE_ENV, sortedByRunOrder, TestApp, ThreeStackApp, TwoStackApp } from '../testhelpers'; +import { behavior, LegacyTestGitHubNpmPipeline, ModernTestGitHubNpmPipeline, OneStackApp, PIPELINE_ENV, sortByRunOrder, TestApp, ThreeStackApp, TwoStackApp } from '../testhelpers'; let app: App; let pipelineStack: Stack; @@ -28,16 +27,16 @@ behavior('interdependent stacks are in the right order', (suite) => { function THEN_codePipelineExpectation() { // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'MyApp', - Actions: sortedByRunOrder([ - objectLike({ Name: 'Stack1.Prepare' }), - objectLike({ Name: 'Stack1.Deploy' }), - objectLike({ Name: 'Stack2.Prepare' }), - objectLike({ Name: 'Stack2.Deploy' }), + Actions: sortByRunOrder([ + Match.objectLike({ Name: 'Stack1.Prepare' }), + Match.objectLike({ Name: 'Stack1.Deploy' }), + Match.objectLike({ Name: 'Stack2.Prepare' }), + Match.objectLike({ Name: 'Stack2.Deploy' }), ]), - }), + }]), }); } }); @@ -59,20 +58,20 @@ behavior('multiple independent stacks go in parallel', (suite) => { }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'MyApp', - Actions: sortedByRunOrder([ + Actions: sortByRunOrder([ // 1 and 2 in parallel - objectLike({ Name: 'Stack1.Prepare' }), - objectLike({ Name: 'Stack2.Prepare' }), - objectLike({ Name: 'Stack1.Deploy' }), - objectLike({ Name: 'Stack2.Deploy' }), + Match.objectLike({ Name: 'Stack1.Prepare' }), + Match.objectLike({ Name: 'Stack2.Prepare' }), + Match.objectLike({ Name: 'Stack1.Deploy' }), + Match.objectLike({ Name: 'Stack2.Deploy' }), // Then 3 - objectLike({ Name: 'Stack3.Prepare' }), - objectLike({ Name: 'Stack3.Deploy' }), + Match.objectLike({ Name: 'Stack3.Prepare' }), + Match.objectLike({ Name: 'Stack3.Deploy' }), ]), - }), + }]), }); } }); @@ -86,18 +85,18 @@ behavior('user can request manual change set approvals', (suite) => { }); // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'MyApp', - Actions: sortedByRunOrder([ - objectLike({ Name: 'Stack1.Prepare' }), - objectLike({ Name: 'ManualApproval' }), - objectLike({ Name: 'Stack1.Deploy' }), - objectLike({ Name: 'Stack2.Prepare' }), - objectLike({ Name: 'ManualApproval2' }), - objectLike({ Name: 'Stack2.Deploy' }), + Actions: sortByRunOrder([ + Match.objectLike({ Name: 'Stack1.Prepare' }), + Match.objectLike({ Name: 'ManualApproval' }), + Match.objectLike({ Name: 'Stack1.Deploy' }), + Match.objectLike({ Name: 'Stack2.Prepare' }), + Match.objectLike({ Name: 'ManualApproval2' }), + Match.objectLike({ Name: 'Stack2.Deploy' }), ]), - }), + }]), }); }); @@ -114,28 +113,28 @@ behavior('user can request extra runorder space between prepare and deploy', (su }); // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'MyApp', - Actions: sortedByRunOrder([ - objectLike({ + Actions: sortByRunOrder([ + Match.objectLike({ Name: 'Stack1.Prepare', RunOrder: 1, }), - objectLike({ + Match.objectLike({ Name: 'Stack1.Deploy', RunOrder: 3, }), - objectLike({ + Match.objectLike({ Name: 'Stack2.Prepare', RunOrder: 4, }), - objectLike({ + Match.objectLike({ Name: 'Stack2.Deploy', RunOrder: 6, }), ]), - }), + }]), }); }); @@ -153,24 +152,24 @@ behavior('user can request both manual change set approval and extraRunOrderSpac }); // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'MyApp', - Actions: sortedByRunOrder([ - objectLike({ + Actions: sortByRunOrder([ + Match.objectLike({ Name: 'Stack.Prepare', RunOrder: 1, }), - objectLike({ + Match.objectLike({ Name: 'ManualApproval', RunOrder: 2, }), - objectLike({ + Match.objectLike({ Name: 'Stack.Deploy', RunOrder: 4, }), ]), - }), + }]), }); }); diff --git a/packages/@aws-cdk/pipelines/test/compliance/synths.test.ts b/packages/@aws-cdk/pipelines/test/compliance/synths.test.ts index 58bae441ee156..f8e39a536309f 100644 --- a/packages/@aws-cdk/pipelines/test/compliance/synths.test.ts +++ b/packages/@aws-cdk/pipelines/test/compliance/synths.test.ts @@ -1,5 +1,4 @@ -import { arrayWith, deepObjectLike, encodedJson, objectLike, Capture, anything } from '@aws-cdk/assert-internal'; -import '@aws-cdk/assert-internal/jest'; +import { Capture, Match, Template } from '@aws-cdk/assertions'; import * as cbuild from '@aws-cdk/aws-codebuild'; import * as codepipeline from '@aws-cdk/aws-codepipeline'; import * as ec2 from '@aws-cdk/aws-ec2'; @@ -64,12 +63,12 @@ behavior('synth takes arrays of commands', (suite) => { function THEN_codePipelineExpectation(installPhase: string) { // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Environment: { Image: 'aws/codebuild/standard:5.0', }, Source: { - BuildSpec: encodedJson(deepObjectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ phases: { [installPhase]: { commands: [ @@ -112,12 +111,12 @@ behavior('synth sets artifact base-directory to cdk.out', (suite) => { function THEN_codePipelineExpectation() { // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Environment: { Image: 'aws/codebuild/standard:5.0', }, Source: { - BuildSpec: encodedJson(deepObjectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ artifacts: { 'base-directory': 'cdk.out', }, @@ -154,15 +153,15 @@ behavior('synth supports setting subdirectory', (suite) => { function THEN_codePipelineExpectation(installPhase: string) { // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Environment: { Image: 'aws/codebuild/standard:5.0', }, Source: { - BuildSpec: encodedJson(deepObjectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ phases: { [installPhase]: { - commands: arrayWith('cd subdir'), + commands: Match.arrayWith(['cd subdir']), }, }, artifacts: { @@ -201,7 +200,7 @@ behavior('npm synth sets, or allows setting, UNSAFE_PERM=true', (suite) => { function THEN_codePipelineExpectation() { // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Environment: { EnvironmentVariables: [ { @@ -225,12 +224,12 @@ behavior('synth assumes a JavaScript project by default (no build, yes synth)', }); // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Environment: { Image: 'aws/codebuild/standard:5.0', }, Source: { - BuildSpec: encodedJson(deepObjectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ phases: { pre_build: { commands: ['npm ci'], @@ -278,24 +277,24 @@ behavior('Magic CodePipeline variables passed to synth envvars must be rendered function THEN_codePipelineExpectation() { // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'Build', Actions: [ - objectLike({ + Match.objectLike({ Name: 'Synth', - Configuration: objectLike({ - EnvironmentVariables: encodedJson(arrayWith( + Configuration: Match.objectLike({ + EnvironmentVariables: Match.serializedJson(Match.arrayWith([ { name: 'VERSION', type: 'PLAINTEXT', value: '#{codepipeline.PipelineExecutionId}', }, - )), + ])), }), }), ], - }), + }]), }); } }); @@ -354,24 +353,24 @@ behavior('CodeBuild: environment variables specified in multiple places are corr function THEN_codePipelineExpectation(installPhase: string) { // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { - Environment: objectLike({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { + Environment: Match.objectLike({ PrivilegedMode: true, - EnvironmentVariables: arrayWith( + EnvironmentVariables: Match.arrayWith([ { - Name: 'SOME_ENV_VAR', + Name: 'INNER_VAR', Type: 'PLAINTEXT', - Value: 'SomeValue', + Value: 'InnerValue', }, { - Name: 'INNER_VAR', + Name: 'SOME_ENV_VAR', Type: 'PLAINTEXT', - Value: 'InnerValue', + Value: 'SomeValue', }, - ), + ]), }), Source: { - BuildSpec: encodedJson(deepObjectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ phases: { [installPhase]: { commands: ['install1', 'install2'], @@ -413,12 +412,12 @@ behavior('install command can be overridden/specified', (suite) => { function THEN_codePipelineExpectation(installPhase: string) { // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Environment: { Image: 'aws/codebuild/standard:5.0', }, Source: { - BuildSpec: encodedJson(deepObjectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ phases: { [installPhase]: { commands: ['/bin/true'], @@ -445,12 +444,12 @@ behavior('synth can have its test commands set', (suite) => { }); // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Environment: { Image: 'aws/codebuild/standard:5.0', }, Source: { - BuildSpec: encodedJson(objectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ phases: { pre_build: { commands: ['/bin/true'], @@ -506,12 +505,12 @@ behavior('Synth can output additional artifacts', (suite) => { function THEN_codePipelineExpectation(asmArtifact: string, testArtifact: string) { // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Environment: { Image: 'aws/codebuild/standard:5.0', }, Source: { - BuildSpec: encodedJson(deepObjectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ artifacts: { 'secondary-artifacts': { [asmArtifact]: { @@ -585,7 +584,7 @@ behavior('Synth can be made to run in a VPC', (suite) => { function THEN_codePipelineExpectation() { // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { VpcConfig: { SecurityGroupIds: [ { 'Fn::GetAtt': ['CdkPipelineBuildSynthCdkBuildProjectSecurityGroupEA44D7C2', 'GroupId'] }, @@ -599,16 +598,16 @@ behavior('Synth can be made to run in a VPC', (suite) => { }, }); - expect(pipelineStack).toHaveResourceLike('AWS::IAM::Policy', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::IAM::Policy', { Roles: [ { Ref: 'CdkPipelineBuildSynthCdkBuildProjectRole5E173C62' }, ], PolicyDocument: { - Statement: arrayWith({ - Action: arrayWith('ec2:DescribeSecurityGroups'), + Statement: Match.arrayWith([{ + Action: Match.arrayWith(['ec2:DescribeSecurityGroups']), Effect: 'Allow', Resource: '*', - }), + }]), }, }); } @@ -721,28 +720,28 @@ behavior('Pipeline action contains a hash that changes as the buildspec changes' } function captureProjectConfigHash(_pipelineStack: Stack) { - const theHash = Capture.aString(); - expect(_pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + const theHash = new Capture(); + Template.fromStack(_pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'Build', Actions: [ - objectLike({ + Match.objectLike({ Name: 'Synth', - Configuration: objectLike({ - EnvironmentVariables: encodedJson([ + Configuration: Match.objectLike({ + EnvironmentVariables: Match.serializedJson([ { name: '_PROJECT_CONFIG_HASH', type: 'PLAINTEXT', - value: theHash.capture(), + value: theHash, }, ]), }), }), ], - }), + }]), }); - return theHash.capturedValue; + return theHash.asString(); } }); @@ -784,12 +783,12 @@ behavior('Synth CodeBuild project role can be granted permissions', (suite) => { function THEN_codePipelineExpectation() { // THEN - expect(pipelineStack).toHaveResourceLike('AWS::IAM::Policy', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::IAM::Policy', { PolicyDocument: { - Statement: arrayWith(deepObjectLike({ + Statement: Match.arrayWith([Match.objectLike({ Action: ['s3:GetObject*', 's3:GetBucket*', 's3:List*'], Resource: ['arn:aws:s3:::ThisParticularBucket', 'arn:aws:s3:::ThisParticularBucket/*'], - })), + })]), }, }); } @@ -878,15 +877,15 @@ behavior('CodeBuild: Can specify additional policy statements', (suite) => { }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::IAM::Policy', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::IAM::Policy', { PolicyDocument: { - Statement: arrayWith(deepObjectLike({ + Statement: Match.arrayWith([Match.objectLike({ Action: [ 'codeartifact:*', 'sts:GetServiceBearerToken', ], Resource: 'arn:my:arn', - })), + })]), }, }); } @@ -913,38 +912,38 @@ behavior('Multiple input sources in side-by-side directories', (suite) => { }), }); - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith( + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([ { Name: 'Source', Actions: [ - objectLike({ Configuration: objectLike({ Repo: 'bar' }) }), - objectLike({ Configuration: objectLike({ Repo: 'build' }) }), - objectLike({ Configuration: objectLike({ Repo: 'test' }) }), + Match.objectLike({ Configuration: Match.objectLike({ Repo: 'bar' }) }), + Match.objectLike({ Configuration: Match.objectLike({ Repo: 'build' }) }), + Match.objectLike({ Configuration: Match.objectLike({ Repo: 'test' }) }), ], }, { Name: 'Build', Actions: [ - objectLike({ Name: 'Prebuild', RunOrder: 1 }), - objectLike({ + Match.objectLike({ Name: 'Prebuild', RunOrder: 1 }), + Match.objectLike({ Name: 'Synth', RunOrder: 2, InputArtifacts: [ // 3 input artifacts - anything(), - anything(), - anything(), + Match.anyValue(), + Match.anyValue(), + Match.anyValue(), ], }), ], }, - ), + ]), }); - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Source: { - BuildSpec: encodedJson(deepObjectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ phases: { install: { commands: [ @@ -975,12 +974,12 @@ behavior('Can easily switch on privileged mode for synth', (suite) => { commands: ['LookAtMe'], }); - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { - Environment: objectLike({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { + Environment: Match.objectLike({ PrivilegedMode: true, }), Source: { - BuildSpec: encodedJson(deepObjectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ phases: { build: { commands: [ @@ -1079,19 +1078,19 @@ behavior('can provide custom BuildSpec that is merged with generated one', (suit function THEN_codePipelineExpectation() { // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { - Environment: objectLike({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { + Environment: Match.objectLike({ PrivilegedMode: true, - EnvironmentVariables: arrayWith( + EnvironmentVariables: Match.arrayWith([ { Name: 'INNER_VAR', Type: 'PLAINTEXT', Value: 'InnerValue', }, - ), + ]), }), Source: { - BuildSpec: encodedJson(deepObjectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ env: { variables: { FOO: 'bar', @@ -1099,7 +1098,7 @@ behavior('can provide custom BuildSpec that is merged with generated one', (suit }, phases: { pre_build: { - commands: arrayWith('installCustom'), + commands: Match.arrayWith(['installCustom']), }, build: { commands: ['synth'], diff --git a/packages/@aws-cdk/pipelines/test/compliance/validations.test.ts b/packages/@aws-cdk/pipelines/test/compliance/validations.test.ts index 447e22da59124..7a6a562a8707a 100644 --- a/packages/@aws-cdk/pipelines/test/compliance/validations.test.ts +++ b/packages/@aws-cdk/pipelines/test/compliance/validations.test.ts @@ -1,6 +1,5 @@ /* eslint-disable import/no-extraneous-dependencies */ -import { anything, arrayWith, Capture, deepObjectLike, encodedJson, objectLike } from '@aws-cdk/assert-internal'; -import '@aws-cdk/assert-internal/jest'; +import { Capture, Match, Template } from '@aws-cdk/assertions'; import * as codebuild from '@aws-cdk/aws-codebuild'; import * as codepipeline from '@aws-cdk/aws-codepipeline'; import * as ec2 from '@aws-cdk/aws-ec2'; @@ -9,7 +8,7 @@ import * as s3 from '@aws-cdk/aws-s3'; import { Stack } from '@aws-cdk/core'; import * as cdkp from '../../lib'; import { CodePipelineSource, ShellStep } from '../../lib'; -import { AppWithOutput, behavior, LegacyTestGitHubNpmPipeline, ModernTestGitHubNpmPipeline, OneStackApp, PIPELINE_ENV, sortedByRunOrder, StageWithStackOutput, stringNoLongerThan, TestApp, TwoStackApp } from '../testhelpers'; +import { AppWithOutput, behavior, LegacyTestGitHubNpmPipeline, ModernTestGitHubNpmPipeline, OneStackApp, PIPELINE_ENV, sortByRunOrder, StageWithStackOutput, stringNoLongerThan, TestApp, TwoStackApp } from '../testhelpers'; let app: TestApp; let pipelineStack: Stack; @@ -37,17 +36,17 @@ behavior('can add manual approval after app', (suite) => { }); // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'MyApp', - Actions: sortedByRunOrder([ - objectLike({ Name: 'Stack1.Prepare' }), - objectLike({ Name: 'Stack1.Deploy' }), - objectLike({ Name: 'Stack2.Prepare' }), - objectLike({ Name: 'Stack2.Deploy' }), - objectLike({ Name: 'Approve' }), + Actions: sortByRunOrder([ + Match.objectLike({ Name: 'Stack1.Prepare' }), + Match.objectLike({ Name: 'Stack1.Deploy' }), + Match.objectLike({ Name: 'Stack2.Prepare' }), + Match.objectLike({ Name: 'Stack2.Deploy' }), + Match.objectLike({ Name: 'Approve' }), ]), - }), + }]), }); }); }); @@ -69,19 +68,19 @@ behavior('can add steps to wave', (suite) => { wave.addStage(new OneStackApp(pipelineStack, 'Stage3')); // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'MyWave', - Actions: sortedByRunOrder([ - objectLike({ Name: 'Stage1.Stack.Prepare' }), - objectLike({ Name: 'Stage2.Stack.Prepare' }), - objectLike({ Name: 'Stage3.Stack.Prepare' }), - objectLike({ Name: 'Stage1.Stack.Deploy' }), - objectLike({ Name: 'Stage2.Stack.Deploy' }), - objectLike({ Name: 'Stage3.Stack.Deploy' }), - objectLike({ Name: 'Approve' }), + Actions: sortByRunOrder([ + Match.objectLike({ Name: 'Stage1.Stack.Prepare' }), + Match.objectLike({ Name: 'Stage2.Stack.Prepare' }), + Match.objectLike({ Name: 'Stage3.Stack.Prepare' }), + Match.objectLike({ Name: 'Stage1.Stack.Deploy' }), + Match.objectLike({ Name: 'Stage2.Stack.Deploy' }), + Match.objectLike({ Name: 'Stage3.Stack.Deploy' }), + Match.objectLike({ Name: 'Approve' }), ]), - }), + }]), }); }); }); @@ -104,37 +103,37 @@ behavior('script validation steps can use stack outputs as environment variables })); // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'MyApp', - Actions: arrayWith( - deepObjectLike({ - Name: 'Stack.Deploy', - OutputArtifacts: [{ Name: anything() }], - Configuration: { - OutputFileName: 'outputs.json', - }, - }), - deepObjectLike({ + Actions: Match.arrayWith([ + Match.objectLike({ ActionTypeId: { Provider: 'CodeBuild', }, Configuration: { - ProjectName: anything(), + ProjectName: Match.anyValue(), }, - InputArtifacts: [{ Name: anything() }], + InputArtifacts: [{ Name: Match.anyValue() }], Name: 'TestOutput', }), - ), - }), + Match.objectLike({ + Name: 'Stack.Deploy', + OutputArtifacts: [{ Name: Match.anyValue() }], + Configuration: { + OutputFileName: 'outputs.json', + }, + }), + ]), + }]), }); - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Environment: { Image: 'aws/codebuild/standard:5.0', }, Source: { - BuildSpec: encodedJson(deepObjectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ phases: { build: { commands: [ @@ -164,24 +163,24 @@ behavior('script validation steps can use stack outputs as environment variables }); // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'Alpha', - Actions: arrayWith( - objectLike({ + Actions: Match.arrayWith([ + Match.objectLike({ Name: 'Stack.Deploy', Namespace: 'AlphaStack6B3389FA', }), - objectLike({ + Match.objectLike({ Name: 'Approve', - Configuration: objectLike({ - EnvironmentVariables: encodedJson([ + Configuration: Match.objectLike({ + EnvironmentVariables: Match.serializedJson([ { name: 'THE_OUTPUT', value: '#{AlphaStack6B3389FA.MyOutput}', type: 'PLAINTEXT' }, ]), }), }), - ), - }), + ]), + }]), }); }); }); @@ -200,29 +199,29 @@ behavior('stackOutput generates names limited to 100 characters', (suite) => { })); // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'APreposterouslyLongAndComplicatedNameMadeUpJustToMakeItExceedTheLimitDefinedByCodeBuild', - Actions: arrayWith( - deepObjectLike({ - Name: 'Stack.Deploy', - OutputArtifacts: [{ Name: stringNoLongerThan(100) }], - Configuration: { - OutputFileName: 'outputs.json', - }, - }), - deepObjectLike({ + Actions: Match.arrayWith([ + Match.objectLike({ ActionTypeId: { Provider: 'CodeBuild', }, Configuration: { - ProjectName: anything(), + ProjectName: Match.anyValue(), }, InputArtifacts: [{ Name: stringNoLongerThan(100) }], Name: 'TestOutput', }), - ), - }), + Match.objectLike({ + Name: 'Stack.Deploy', + OutputArtifacts: [{ Name: stringNoLongerThan(100) }], + Configuration: { + OutputFileName: 'outputs.json', + }, + }), + ]), + }]), }); }); @@ -240,16 +239,16 @@ behavior('stackOutput generates names limited to 100 characters', (suite) => { ], }); - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'APreposterouslyLongAndComplicatedNameMadeUpJustToMakeItExceedTheLimitDefinedByCodeBuild', - Actions: arrayWith( - deepObjectLike({ + Actions: Match.arrayWith([ + Match.objectLike({ Name: 'Stack.Deploy', Namespace: stringNoLongerThan(100), }), - ), - }), + ]), + }]), }); }); }); @@ -283,35 +282,35 @@ behavior('validation step can run from scripts in source', (suite) => { }); function THEN_codePipelineExpectation() { - const sourceArtifact = Capture.aString(); + const sourceArtifact = new Capture(); - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'Source', Actions: [ - deepObjectLike({ - OutputArtifacts: [{ Name: sourceArtifact.capture() }], + Match.objectLike({ + OutputArtifacts: [{ Name: sourceArtifact }], }), ], - }), + }]), }); - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'Test', - Actions: arrayWith( - deepObjectLike({ + Actions: Match.arrayWith([ + Match.objectLike({ Name: 'UseSources', - InputArtifacts: [{ Name: sourceArtifact.capturedValue }], + InputArtifacts: [{ Name: sourceArtifact.asString() }], }), - ), - }), + ]), + }]), }); - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Environment: { Image: 'aws/codebuild/standard:5.0', }, Source: { - BuildSpec: encodedJson(deepObjectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ phases: { build: { commands: [ @@ -361,40 +360,40 @@ behavior('can use additional output artifacts from build', (suite) => { }); function THEN_codePipelineExpectation() { - const integArtifact = Capture.aString(); + const integArtifact = new Capture(); - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'Build', Actions: [ - deepObjectLike({ + Match.objectLike({ Name: 'Synth', OutputArtifacts: [ - { Name: anything() }, // It's not the first output - { Name: integArtifact.capture() }, + { Name: Match.anyValue() }, // It's not the first output + { Name: integArtifact }, ], }), ], - }), + }]), }); - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'Test', - Actions: arrayWith( - deepObjectLike({ + Actions: Match.arrayWith([ + Match.objectLike({ Name: 'UseBuildArtifact', - InputArtifacts: [{ Name: integArtifact.capturedValue }], + InputArtifacts: [{ Name: integArtifact.asString() }], }), - ), - }), + ]), + }]), }); - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Environment: { Image: 'aws/codebuild/standard:5.0', }, Source: { - BuildSpec: encodedJson(deepObjectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ phases: { build: { commands: [ @@ -450,12 +449,12 @@ behavior('can add policy statements to shell script action', (suite) => { function THEN_codePipelineExpectation() { // THEN - expect(pipelineStack).toHaveResourceLike('AWS::IAM::Policy', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::IAM::Policy', { PolicyDocument: { - Statement: arrayWith(deepObjectLike({ + Statement: Match.arrayWith([Match.objectLike({ Action: 's3:Banana', Resource: '*', - })), + })]), }, }); } @@ -502,12 +501,12 @@ behavior('can grant permissions to shell script action', (suite) => { function THEN_codePipelineExpectation() { // THEN - expect(pipelineStack).toHaveResourceLike('AWS::IAM::Policy', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::IAM::Policy', { PolicyDocument: { - Statement: arrayWith(deepObjectLike({ + Statement: Match.arrayWith([Match.objectLike({ Action: ['s3:GetObject*', 's3:GetBucket*', 's3:List*'], Resource: ['arn:aws:s3:::ThisParticularBucket', 'arn:aws:s3:::ThisParticularBucket/*'], - })), + })]), }, }); } @@ -562,7 +561,7 @@ behavior('can run shell script actions in a VPC', (suite) => { }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Environment: { Image: 'aws/codebuild/standard:5.0', }, @@ -583,7 +582,7 @@ behavior('can run shell script actions in a VPC', (suite) => { }, }, Source: { - BuildSpec: encodedJson(deepObjectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ phases: { build: { commands: [ @@ -636,17 +635,17 @@ behavior('can run shell script actions with a specific SecurityGroup', (suite) = }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'Test', - Actions: arrayWith( - deepObjectLike({ + Actions: Match.arrayWith([ + Match.objectLike({ Name: 'sgAction', }), - ), - }), + ]), + }]), }); - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { VpcConfig: { SecurityGroupIds: [ { @@ -714,7 +713,7 @@ behavior('can run scripts with specified BuildEnvironment', (suite) => { }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Environment: { Image: 'aws/codebuild/standard:2.0', }, @@ -755,14 +754,14 @@ behavior('can run scripts with magic environment variables', (suite) => { function THEN_codePipelineExpectation() { // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'Test', - Actions: arrayWith( - objectLike({ + Actions: Match.arrayWith([ + Match.objectLike({ Name: 'imageAction', - Configuration: objectLike({ - EnvironmentVariables: encodedJson([ + Configuration: Match.objectLike({ + EnvironmentVariables: Match.serializedJson([ { name: 'VERSION', type: 'PLAINTEXT', @@ -771,8 +770,8 @@ behavior('can run scripts with magic environment variables', (suite) => { ]), }), }), - ), - }), + ]), + }]), }); } }); diff --git a/packages/@aws-cdk/pipelines/test/testhelpers/index.ts b/packages/@aws-cdk/pipelines/test/testhelpers/index.ts index 87a02ce0b6a66..fbc50d3b1a003 100644 --- a/packages/@aws-cdk/pipelines/test/testhelpers/index.ts +++ b/packages/@aws-cdk/pipelines/test/testhelpers/index.ts @@ -2,5 +2,4 @@ export * from './compliance'; export * from './legacy-pipeline'; export * from './modern-pipeline'; export * from './test-app'; -export * from './testmatchers'; export * from './matchers'; \ No newline at end of file diff --git a/packages/@aws-cdk/pipelines/test/testhelpers/matchers.ts b/packages/@aws-cdk/pipelines/test/testhelpers/matchers.ts index 4ace0148c5eaa..97a02fc1dc10d 100644 --- a/packages/@aws-cdk/pipelines/test/testhelpers/matchers.ts +++ b/packages/@aws-cdk/pipelines/test/testhelpers/matchers.ts @@ -1,9 +1,20 @@ -import { Matcher, MatchResult } from '@aws-cdk/assertions'; +import { Match, Matcher, MatchResult } from '@aws-cdk/assertions'; export function stringLike(pattern: string) { return new StringLike(pattern); } +export function sortByRunOrder(pattern: any[]): Matcher { + return new Sorter('SortByRunOrder', pattern, (a: any, b: any) => { + if (a.RunOrder !== b.RunOrder) { return a.RunOrder - b.RunOrder; } + return (a.Name as string).localeCompare(b.Name); + }); +} + +export function stringNoLongerThan(max: number): Matcher { + return new StringLengthMatcher(max); +} + // Reimplementation of // https://github.com/aws/aws-cdk/blob/430f50a546e9c575f8cdbd259367e440d985e68f/packages/%40aws-cdk/assert-internal/lib/assertions/have-resource-matchers.ts#L244 class StringLike extends Matcher { @@ -24,9 +35,55 @@ class StringLike extends Matcher { result.push(this, [], `Looking for string with pattern "${this.pattern}" but found "${actual}"`); } return result; + + function escapeRegex(s: string) { + return s.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); // $& means the whole matched string + } + } +} + +class Sorter extends Matcher { + constructor( + public readonly name: string, + private readonly pattern: any[], + private readonly compareFn: (a: any, b: any) => number, + ) { + super(); + } + + public test(actual: any): MatchResult { + const result = new MatchResult(actual); + if (!Array.isArray(actual)) { + result.push(this, [], `Expected an Array, but got '${typeof actual}'`); + return result; + } + + const copy = actual.slice(); + copy.sort(this.compareFn); + + const matcher = Matcher.isMatcher(this.pattern) ? this.pattern : Match.exact(this.pattern); + return matcher.test(copy); } } -function escapeRegex(s: string) { - return s.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); // $& means the whole matched string +class StringLengthMatcher extends Matcher { + public name: string = 'StringLength' + + constructor(private readonly length: number) { + super(); + } + + public test(actual: any): MatchResult { + const result = new MatchResult(actual); + + if (typeof actual !== 'string') { + result.push(this, [], `Expected a string, but got '${typeof actual}'`); + } + + if (actual.length > this.length) { + result.push(this, [], `String is ${actual.length} characters long. Expected at most ${this.length} characters`); + } + + return result; + } } \ No newline at end of file diff --git a/packages/@aws-cdk/pipelines/test/testhelpers/testmatchers.ts b/packages/@aws-cdk/pipelines/test/testhelpers/testmatchers.ts deleted file mode 100644 index 8faa855b71abf..0000000000000 --- a/packages/@aws-cdk/pipelines/test/testhelpers/testmatchers.ts +++ /dev/null @@ -1,42 +0,0 @@ -/* eslint-disable import/no-extraneous-dependencies */ -import { annotateMatcher, InspectionFailure, matcherFrom, PropertyMatcher } from '@aws-cdk/assert-internal'; - -/** - * Sort an array (of Actions) by their RunOrder field before applying a matcher. - * - * Makes the matcher independent of the order in which the Actions get synthed - * to the template. Elements with the same RunOrder will be sorted by name. - */ -export function sortedByRunOrder(matcher: any): PropertyMatcher { - return annotateMatcher({ $sortedByRunOrder: matcher }, (value: any, failure: InspectionFailure) => { - if (!Array.isArray(value)) { - failure.failureReason = `Expected an Array, but got '${typeof value}'`; - return false; - } - - value = value.slice(); - - value.sort((a: any, b: any) => { - if (a.RunOrder !== b.RunOrder) { return a.RunOrder - b.RunOrder; } - return (a.Name as string).localeCompare(b.Name); - }); - - return matcherFrom(matcher)(value, failure); - }); -} - -export function stringNoLongerThan(length: number): PropertyMatcher { - return annotateMatcher({ $stringIsNoLongerThan: length }, (value: any, failure: InspectionFailure) => { - if (typeof value !== 'string') { - failure.failureReason = `Expected a string, but got '${typeof value}'`; - return false; - } - - if (value.length > length) { - failure.failureReason = `String is ${value.length} characters long. Expected at most ${length} characters`; - return false; - } - - return true; - }); -} \ No newline at end of file From d5dd2d0b48fa1d56bc482f01ad182497f4363675 Mon Sep 17 00:00:00 2001 From: Julian Michel Date: Mon, 13 Sep 2021 17:59:22 +0200 Subject: [PATCH 40/41] chore(rds): add MariaDB ver 10.5.12, 10.4.21, 10.3.31, 10.2.40 and Aurora Postgres ver 13.3 (#16466) Add new RDS versions: **MariaDbEngineVersion 10.5.12, 10.4.21, 10.3.31, 10.2.40** Announcement: https://aws.amazon.com/about-aws/whats-new/2021/09/amazon-rds-mariadb-new-minor-versions/ **AuroraPostgresEngineVersion 13.3** Announcement: https://aws.amazon.com/about-aws/whats-new/2021/08/amazon-aurora-postgresql-13/ According to AWS CLI and AWS Console, the exact EngineVersion is 13.3. s3Export and s3Import are supported, see command `aws rds describe-db-engine-versions --region us-east-1 --engine aurora-postgresql --engine-version 13`. ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- packages/@aws-cdk/aws-rds/lib/cluster-engine.ts | 2 ++ packages/@aws-cdk/aws-rds/lib/instance-engine.ts | 8 ++++++++ 2 files changed, 10 insertions(+) diff --git a/packages/@aws-cdk/aws-rds/lib/cluster-engine.ts b/packages/@aws-cdk/aws-rds/lib/cluster-engine.ts index 973f682e932c7..6bbb94261cec8 100644 --- a/packages/@aws-cdk/aws-rds/lib/cluster-engine.ts +++ b/packages/@aws-cdk/aws-rds/lib/cluster-engine.ts @@ -465,6 +465,8 @@ export class AuroraPostgresEngineVersion { public static readonly VER_12_4 = AuroraPostgresEngineVersion.of('12.4', '12', { s3Import: true, s3Export: true }); /** Version "12.6". */ public static readonly VER_12_6 = AuroraPostgresEngineVersion.of('12.6', '12', { s3Import: true, s3Export: true }); + /** Version "13.3". */ + public static readonly VER_13_3 = AuroraPostgresEngineVersion.of('13.3', '13', { s3Import: true, s3Export: true }); /** * Create a new AuroraPostgresEngineVersion with an arbitrary version. diff --git a/packages/@aws-cdk/aws-rds/lib/instance-engine.ts b/packages/@aws-cdk/aws-rds/lib/instance-engine.ts index c070b0988e314..3ed27351c2196 100644 --- a/packages/@aws-cdk/aws-rds/lib/instance-engine.ts +++ b/packages/@aws-cdk/aws-rds/lib/instance-engine.ts @@ -249,6 +249,8 @@ export class MariaDbEngineVersion { public static readonly VER_10_2_37 = MariaDbEngineVersion.of('10.2.37', '10.2'); /** Version "10.2.39". */ public static readonly VER_10_2_39 = MariaDbEngineVersion.of('10.2.39', '10.2'); + /** Version "10.2.40". */ + public static readonly VER_10_2_40 = MariaDbEngineVersion.of('10.2.40', '10.2'); /** Version "10.3" (only a major version, without a specific minor version). */ public static readonly VER_10_3 = MariaDbEngineVersion.of('10.3', '10.3'); @@ -262,6 +264,8 @@ export class MariaDbEngineVersion { public static readonly VER_10_3_23 = MariaDbEngineVersion.of('10.3.23', '10.3'); /** Version "10.3.28". */ public static readonly VER_10_3_28 = MariaDbEngineVersion.of('10.3.28', '10.3'); + /** Version "10.3.31". */ + public static readonly VER_10_3_31 = MariaDbEngineVersion.of('10.3.31', '10.3'); /** Version "10.4" (only a major version, without a specific minor version). */ public static readonly VER_10_4 = MariaDbEngineVersion.of('10.4', '10.4'); @@ -271,6 +275,8 @@ export class MariaDbEngineVersion { public static readonly VER_10_4_13 = MariaDbEngineVersion.of('10.4.13', '10.4'); /** Version "10.4.18". */ public static readonly VER_10_4_18 = MariaDbEngineVersion.of('10.4.18', '10.4'); + /** Version "10.4.21". */ + public static readonly VER_10_4_21 = MariaDbEngineVersion.of('10.4.21', '10.4'); /** Version "10.5" (only a major version, without a specific minor version). */ public static readonly VER_10_5 = MariaDbEngineVersion.of('10.5', '10.5'); @@ -278,6 +284,8 @@ export class MariaDbEngineVersion { public static readonly VER_10_5_8 = MariaDbEngineVersion.of('10.5.8', '10.5'); /** Version "10.5.9". */ public static readonly VER_10_5_9 = MariaDbEngineVersion.of('10.5.9', '10.5'); + /** Version "10.5.12". */ + public static readonly VER_10_5_12 = MariaDbEngineVersion.of('10.5.12', '10.5'); /** * Create a new MariaDbEngineVersion with an arbitrary version. From a9d51185a144cd4962c85227ae5b904510399fa4 Mon Sep 17 00:00:00 2001 From: Ben Chaimberg Date: Mon, 13 Sep 2021 14:40:49 -0400 Subject: [PATCH 41/41] feat(redshift): manage database users and tables via cdk (#15931) This feature allows users to manage Redshift database resources, such as users, tables, and grants, within their CDK application. Because these resources do not have CloudFormation handlers, this feature leverages custom resources and the Amazon Redshift Data API for creation and modification. The generic construct for this type of resource is `DatabaseQuery`. This construct provides the base functionality required for interacting with Redshift database resources, including configuring administrator credentials, creating a custom resource handler, and granting necessary IAM permissions. The custom resource handler code contains utility functions for executing query statements against the Redshift database. Specific resources that use the `DatabaseQuery` construct, such as `User` and `Table` are responsible for providing the following to `DatabaseQuery`: generic database configuration properties, specific configuration properties that will get passed to the custom resource handler (eg., `username` for `User`). Specific resources are also responsible for writing the lifecycle-management code within the handler. In general, this consists of: configuration extraction (eg., pulling `username` from the `AWSLambda.CloudFormationCustomResourceEvent` passed to the handler) and one method for each lifecycle event (create, update, delete) that queries the database using calls to the generic utility function. Users have a fairly simple lifecycle that allows them to be created, deleted, and updated when a secret containing a password is updated (secret rotation has not been implemented yet). Because of #9815, the custom resource provider queries Secrets Manager in order to access the password. Tables have a more complicated lifecycle because we want to allow columns to be added to the table without resource replacement, as well as ensuring that dropped columns do not lose data. For these reasons, we generate a unique name per-deployment when the table name is requested to be generated by the end user. We also notify create a new table (using a new generated name) if a column is to be dropped and let CFN lifecycle rules dictate whether the old table should be removed or kept. User privileges on tables are implemented via the `UserTablePrivileges` construct. This construct is located in the `private` directory to ensure that it is not exported for direct public use. This means that user privileges must be managed through the `Table.grant` method or the `User.addTablePrivileges` method. Thus, each `User` will have at most one `UserTablePrivileges` construct to manage its privileges. This is to avoid a situation where privileges could be erroneously removed when the same privilege is managed from two different CDK applications. For more details, see the README, under "Granting Privileges". ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- packages/@aws-cdk/aws-redshift/README.md | 201 ++- .../aws-redshift/lib/database-options.ts | 26 + packages/@aws-cdk/aws-redshift/lib/index.ts | 3 + .../database-query-provider/handler-name.ts | 5 + .../private/database-query-provider/index.ts | 20 + .../database-query-provider/privileges.ts | 70 + .../private/database-query-provider/table.ts | 75 + .../private/database-query-provider/user.ts | 82 + .../private/database-query-provider/util.ts | 40 + .../lib/private/database-query.ts | 105 ++ .../aws-redshift/lib/private/handler-props.ts | 31 + .../aws-redshift/lib/private/privileges.ts | 101 ++ packages/@aws-cdk/aws-redshift/lib/table.ts | 222 +++ packages/@aws-cdk/aws-redshift/lib/user.ts | 186 +++ packages/@aws-cdk/aws-redshift/package.json | 15 +- .../aws-redshift/rosetta/cluster.ts-fixture | 20 + .../aws-redshift/rosetta/default.ts-fixture | 11 + .../database-query-provider/index.test.ts | 50 + .../privileges.test.ts | 163 ++ .../database-query-provider/table.test.ts | 202 +++ .../test/database-query-provider/user.test.ts | 163 ++ .../aws-redshift/test/database-query.test.ts | 200 +++ .../test/integ.database.expected.json | 1377 +++++++++++++++++ .../aws-redshift/test/integ.database.ts | 44 + .../aws-redshift/test/privileges.test.ts | 113 ++ .../@aws-cdk/aws-redshift/test/table.test.ts | 138 ++ .../@aws-cdk/aws-redshift/test/user.test.ts | 215 +++ 27 files changed, 3861 insertions(+), 17 deletions(-) create mode 100644 packages/@aws-cdk/aws-redshift/lib/database-options.ts create mode 100644 packages/@aws-cdk/aws-redshift/lib/private/database-query-provider/handler-name.ts create mode 100644 packages/@aws-cdk/aws-redshift/lib/private/database-query-provider/index.ts create mode 100644 packages/@aws-cdk/aws-redshift/lib/private/database-query-provider/privileges.ts create mode 100644 packages/@aws-cdk/aws-redshift/lib/private/database-query-provider/table.ts create mode 100644 packages/@aws-cdk/aws-redshift/lib/private/database-query-provider/user.ts create mode 100644 packages/@aws-cdk/aws-redshift/lib/private/database-query-provider/util.ts create mode 100644 packages/@aws-cdk/aws-redshift/lib/private/database-query.ts create mode 100644 packages/@aws-cdk/aws-redshift/lib/private/handler-props.ts create mode 100644 packages/@aws-cdk/aws-redshift/lib/private/privileges.ts create mode 100644 packages/@aws-cdk/aws-redshift/lib/table.ts create mode 100644 packages/@aws-cdk/aws-redshift/lib/user.ts create mode 100644 packages/@aws-cdk/aws-redshift/rosetta/cluster.ts-fixture create mode 100644 packages/@aws-cdk/aws-redshift/rosetta/default.ts-fixture create mode 100644 packages/@aws-cdk/aws-redshift/test/database-query-provider/index.test.ts create mode 100644 packages/@aws-cdk/aws-redshift/test/database-query-provider/privileges.test.ts create mode 100644 packages/@aws-cdk/aws-redshift/test/database-query-provider/table.test.ts create mode 100644 packages/@aws-cdk/aws-redshift/test/database-query-provider/user.test.ts create mode 100644 packages/@aws-cdk/aws-redshift/test/database-query.test.ts create mode 100644 packages/@aws-cdk/aws-redshift/test/integ.database.expected.json create mode 100644 packages/@aws-cdk/aws-redshift/test/integ.database.ts create mode 100644 packages/@aws-cdk/aws-redshift/test/privileges.test.ts create mode 100644 packages/@aws-cdk/aws-redshift/test/table.test.ts create mode 100644 packages/@aws-cdk/aws-redshift/test/user.test.ts diff --git a/packages/@aws-cdk/aws-redshift/README.md b/packages/@aws-cdk/aws-redshift/README.md index 576068b02f818..8ff734a6be255 100644 --- a/packages/@aws-cdk/aws-redshift/README.md +++ b/packages/@aws-cdk/aws-redshift/README.md @@ -26,15 +26,16 @@ To set up a Redshift cluster, define a `Cluster`. It will be launched in a VPC. You can specify a VPC, otherwise one will be created. The nodes are always launched in private subnets and are encrypted by default. -``` typescript -import redshift = require('@aws-cdk/aws-redshift'); -... -const cluster = new redshift.Cluster(this, 'Redshift', { - masterUser: { - masterUsername: 'admin', - }, - vpc - }); +```ts +import * as ec2 from '@aws-cdk/aws-ec2'; + +const vpc = new ec2.Vpc(this, 'Vpc'); +const cluster = new Cluster(this, 'Redshift', { + masterUser: { + masterUsername: 'admin', + }, + vpc +}); ``` By default, the master password will be generated and stored in AWS Secrets Manager. @@ -49,13 +50,13 @@ Depending on your use case, you can make the cluster publicly accessible with th To control who can access the cluster, use the `.connections` attribute. Redshift Clusters have a default port, so you don't need to specify the port: -```ts -cluster.connections.allowFromAnyIpv4('Open to the world'); +```ts fixture=cluster +cluster.connections.allowDefaultPortFromAnyIpv4('Open to the world'); ``` The endpoint to access your database cluster will be available as the `.clusterEndpoint` attribute: -```ts +```ts fixture=cluster cluster.clusterEndpoint.socketAddress; // "HOSTNAME:PORT" ``` @@ -63,16 +64,184 @@ cluster.clusterEndpoint.socketAddress; // "HOSTNAME:PORT" When the master password is generated and stored in AWS Secrets Manager, it can be rotated automatically: -```ts +```ts fixture=cluster cluster.addRotationSingleUser(); // Will rotate automatically after 30 days ``` The multi user rotation scheme is also available: -```ts +```ts fixture=cluster +import * as secretsmanager from '@aws-cdk/aws-secretsmanager'; + cluster.addRotationMultiUser('MyUser', { - secret: myImportedSecret + secret: secretsmanager.Secret.fromSecretNameV2(this, 'Imported Secret', 'my-secret'), +}); +``` + +## Database Resources + +This module allows for the creation of non-CloudFormation database resources such as users +and tables. This allows you to manage identities, permissions, and stateful resources +within your Redshift cluster from your CDK application. + +Because these resources are not available in CloudFormation, this library leverages +[custom +resources](https://docs.aws.amazon.com/cdk/api/latest/docs/custom-resources-readme.html) +to manage them. In addition to the IAM permissions required to make Redshift service +calls, the execution role for the custom resource handler requires database credentials to +create resources within the cluster. + +These database credentials can be supplied explicitly through the `adminUser` properties +of the various database resource constructs. Alternatively, the credentials can be +automatically pulled from the Redshift cluster's default administrator +credentials. However, this option is only available if the password for the credentials +was generated by the CDK application (ie., no value vas provided for [the `masterPassword` +property](https://docs.aws.amazon.com/cdk/api/latest/docs/@aws-cdk_aws-redshift.Login.html#masterpasswordspan-classapi-icon-api-icon-experimental-titlethis-api-element-is-experimental-it-may-change-without-noticespan) +of +[`Cluster.masterUser`](https://docs.aws.amazon.com/cdk/api/latest/docs/@aws-cdk_aws-redshift.Cluster.html#masteruserspan-classapi-icon-api-icon-experimental-titlethis-api-element-is-experimental-it-may-change-without-noticespan)). + +### Creating Users + +Create a user within a Redshift cluster database by instantiating a `User` construct. This +will generate a username and password, store the credentials in a [AWS Secrets Manager +`Secret`](https://docs.aws.amazon.com/cdk/api/latest/docs/@aws-cdk_aws-secretsmanager.Secret.html), +and make a query to the Redshift cluster to create a new database user with the +credentials. + +```ts fixture=cluster +new User(this, 'User', { + cluster: cluster, + databaseName: 'databaseName', +}); +``` + +By default, the user credentials are encrypted with your AWS account's default Secrets +Manager encryption key. You can specify the encryption key used for this purpose by +supplying a key in the `encryptionKey` property. + +```ts fixture=cluster +import * as kms from '@aws-cdk/aws-kms'; + +const encryptionKey = new kms.Key(this, 'Key'); +new User(this, 'User', { + encryptionKey: encryptionKey, + cluster: cluster, + databaseName: 'databaseName', +}); +``` + +By default, a username is automatically generated from the user construct ID and its path +in the construct tree. You can specify a particular username by providing a value for the +`username` property. Usernames must be valid identifiers; see: [Names and +identifiers](https://docs.aws.amazon.com/redshift/latest/dg/r_names.html) in the *Amazon +Redshift Database Developer Guide*. + +```ts fixture=cluster +new User(this, 'User', { + username: 'myuser', + cluster: cluster, + databaseName: 'databaseName', +}); +``` + +The user password is generated by AWS Secrets Manager using the default configuration +found in +[`secretsmanager.SecretStringGenerator`](https://docs.aws.amazon.com/cdk/api/latest/docs/@aws-cdk_aws-secretsmanager.SecretStringGenerator.html), +except with password length `30` and some SQL-incompliant characters excluded. The +plaintext for the password will never be present in the CDK application; instead, a +[CloudFormation Dynamic +Reference](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/dynamic-references.html) +will be used wherever the password value is required. + +### Creating Tables + +Create a table within a Redshift cluster database by instantiating a `Table` +construct. This will make a query to the Redshift cluster to create a new database table +with the supplied schema. + +```ts fixture=cluster +new Table(this, 'Table', { + tableColumns: [{ name: 'col1', dataType: 'varchar(4)' }, { name: 'col2', dataType: 'float' }], + cluster: cluster, + databaseName: 'databaseName', +}); +``` + +### Granting Privileges + +You can give a user privileges to perform certain actions on a table by using the +`Table.grant()` method. + +```ts fixture=cluster +const user = new User(this, 'User', { + cluster: cluster, + databaseName: 'databaseName', +}); +const table = new Table(this, 'Table', { + tableColumns: [{ name: 'col1', dataType: 'varchar(4)' }, { name: 'col2', dataType: 'float' }], + cluster: cluster, + databaseName: 'databaseName', +}); + +table.grant(user, TableAction.DROP, TableAction.SELECT); +``` + +Take care when managing privileges via the CDK, as attempting to manage a user's +privileges on the same table in multiple CDK applications could lead to accidentally +overriding these permissions. Consider the following two CDK applications which both refer +to the same user and table. In application 1, the resources are created and the user is +given `INSERT` permissions on the table: + +```ts fixture=cluster +const databaseName = 'databaseName'; +const username = 'myuser' +const tableName = 'mytable' + +const user = new User(this, 'User', { + username: username, + cluster: cluster, + databaseName: databaseName, +}); +const table = new Table(this, 'Table', { + tableColumns: [{ name: 'col1', dataType: 'varchar(4)' }, { name: 'col2', dataType: 'float' }], + cluster: cluster, + databaseName: databaseName, +}); +table.grant(user, TableAction.INSERT); +``` + +In application 2, the resources are imported and the user is given `INSERT` permissions on +the table: + +```ts fixture=cluster +const databaseName = 'databaseName'; +const username = 'myuser' +const tableName = 'mytable' + +const user = User.fromUserAttributes(this, 'User', { + username: username, + password: SecretValue.plainText('NOT_FOR_PRODUCTION'), + cluster: cluster, + databaseName: databaseName, +}); +const table = Table.fromTableAttributes(this, 'Table', { + tableName: tableName, + tableColumns: [{ name: 'col1', dataType: 'varchar(4)' }, { name: 'col2', dataType: 'float' }], + cluster: cluster, + databaseName: 'databaseName', }); +table.grant(user, TableAction.INSERT); ``` -This module is part of the [AWS Cloud Development Kit](https://github.com/aws/aws-cdk) project. +Both applications attempt to grant the user the appropriate privilege on the table by +submitting a `GRANT USER` SQL query to the Redshift cluster. Note that the latter of these +two calls will have no effect since the user has already been granted the privilege. + +Now, if application 1 were to remove the call to `grant`, a `REVOKE USER` SQL query is +submitted to the Redshift cluster. In general, application 1 does not know that +application 2 has also granted this permission and thus cannot decide not to issue the +revocation. This leads to the undesirable state where application 2 still contains the +call to `grant` but the user does not have the specified permission. + +Note that this does not occur when duplicate privileges are granted within the same +application, as such privileges are de-duplicated before any SQL query is submitted. diff --git a/packages/@aws-cdk/aws-redshift/lib/database-options.ts b/packages/@aws-cdk/aws-redshift/lib/database-options.ts new file mode 100644 index 0000000000000..b7eb21e57e24c --- /dev/null +++ b/packages/@aws-cdk/aws-redshift/lib/database-options.ts @@ -0,0 +1,26 @@ +import * as secretsmanager from '@aws-cdk/aws-secretsmanager'; +import { ICluster } from './cluster'; + +/** + * Properties for accessing a Redshift database + */ +export interface DatabaseOptions { + /** + * The cluster containing the database. + */ + readonly cluster: ICluster; + + /** + * The name of the database. + */ + readonly databaseName: string; + + /** + * The secret containing credentials to a Redshift user with administrator privileges. + * + * Secret JSON schema: `{ username: string; password: string }`. + * + * @default - the admin secret is taken from the cluster + */ + readonly adminUser?: secretsmanager.ISecret; +} diff --git a/packages/@aws-cdk/aws-redshift/lib/index.ts b/packages/@aws-cdk/aws-redshift/lib/index.ts index 8a8fc89428ce3..ec552d2da8c3c 100644 --- a/packages/@aws-cdk/aws-redshift/lib/index.ts +++ b/packages/@aws-cdk/aws-redshift/lib/index.ts @@ -1,8 +1,11 @@ export * from './cluster'; export * from './parameter-group'; +export * from './database-options'; export * from './database-secret'; export * from './endpoint'; export * from './subnet-group'; +export * from './table'; +export * from './user'; // AWS::Redshift CloudFormation Resources: export * from './redshift.generated'; diff --git a/packages/@aws-cdk/aws-redshift/lib/private/database-query-provider/handler-name.ts b/packages/@aws-cdk/aws-redshift/lib/private/database-query-provider/handler-name.ts new file mode 100644 index 0000000000000..b758fb5819063 --- /dev/null +++ b/packages/@aws-cdk/aws-redshift/lib/private/database-query-provider/handler-name.ts @@ -0,0 +1,5 @@ +export enum HandlerName { + User = 'user', + Table = 'table', + UserTablePrivileges = 'user-table-privileges', +} diff --git a/packages/@aws-cdk/aws-redshift/lib/private/database-query-provider/index.ts b/packages/@aws-cdk/aws-redshift/lib/private/database-query-provider/index.ts new file mode 100644 index 0000000000000..60eb2a009173c --- /dev/null +++ b/packages/@aws-cdk/aws-redshift/lib/private/database-query-provider/index.ts @@ -0,0 +1,20 @@ +/* eslint-disable-next-line import/no-unresolved */ +import * as AWSLambda from 'aws-lambda'; +import { HandlerName } from './handler-name'; +import { handler as managePrivileges } from './privileges'; +import { handler as manageTable } from './table'; +import { handler as manageUser } from './user'; + +const HANDLERS: { [key in HandlerName]: ((props: any, event: AWSLambda.CloudFormationCustomResourceEvent) => Promise) } = { + [HandlerName.Table]: manageTable, + [HandlerName.User]: manageUser, + [HandlerName.UserTablePrivileges]: managePrivileges, +}; + +export async function handler(event: AWSLambda.CloudFormationCustomResourceEvent) { + const subHandler = HANDLERS[event.ResourceProperties.handler as HandlerName]; + if (!subHandler) { + throw new Error(`Requested handler ${event.ResourceProperties.handler} is not in supported set: ${JSON.stringify(Object.keys(HANDLERS))}`); + } + return subHandler(event.ResourceProperties, event); +} diff --git a/packages/@aws-cdk/aws-redshift/lib/private/database-query-provider/privileges.ts b/packages/@aws-cdk/aws-redshift/lib/private/database-query-provider/privileges.ts new file mode 100644 index 0000000000000..9f2064d0e5e5a --- /dev/null +++ b/packages/@aws-cdk/aws-redshift/lib/private/database-query-provider/privileges.ts @@ -0,0 +1,70 @@ +/* eslint-disable-next-line import/no-unresolved */ +import * as AWSLambda from 'aws-lambda'; +import { TablePrivilege, UserTablePrivilegesHandlerProps } from '../handler-props'; +import { ClusterProps, executeStatement, makePhysicalId } from './util'; + +export async function handler(props: UserTablePrivilegesHandlerProps & ClusterProps, event: AWSLambda.CloudFormationCustomResourceEvent) { + const username = props.username; + const tablePrivileges = props.tablePrivileges; + const clusterProps = props; + + if (event.RequestType === 'Create') { + await grantPrivileges(username, tablePrivileges, clusterProps); + return { PhysicalResourceId: makePhysicalId(username, clusterProps, event.RequestId) }; + } else if (event.RequestType === 'Delete') { + await revokePrivileges(username, tablePrivileges, clusterProps); + return; + } else if (event.RequestType === 'Update') { + const { replace } = await updatePrivileges( + username, + tablePrivileges, + clusterProps, + event.OldResourceProperties as UserTablePrivilegesHandlerProps & ClusterProps, + ); + const physicalId = replace ? makePhysicalId(username, clusterProps, event.RequestId) : event.PhysicalResourceId; + return { PhysicalResourceId: physicalId }; + } else { + /* eslint-disable-next-line dot-notation */ + throw new Error(`Unrecognized event type: ${event['RequestType']}`); + } +} + +async function revokePrivileges(username: string, tablePrivileges: TablePrivilege[], clusterProps: ClusterProps) { + await Promise.all(tablePrivileges.map(({ tableName, actions }) => { + return executeStatement(`REVOKE ${actions.join(', ')} ON ${tableName} FROM ${username}`, clusterProps); + })); +} + +async function grantPrivileges(username: string, tablePrivileges: TablePrivilege[], clusterProps: ClusterProps) { + await Promise.all(tablePrivileges.map(({ tableName, actions }) => { + return executeStatement(`GRANT ${actions.join(', ')} ON ${tableName} TO ${username}`, clusterProps); + })); +} + +async function updatePrivileges( + username: string, + tablePrivileges: TablePrivilege[], + clusterProps: ClusterProps, + oldResourceProperties: UserTablePrivilegesHandlerProps & ClusterProps, +): Promise<{ replace: boolean }> { + const oldClusterProps = oldResourceProperties; + if (clusterProps.clusterName !== oldClusterProps.clusterName || clusterProps.databaseName !== oldClusterProps.databaseName) { + await grantPrivileges(username, tablePrivileges, clusterProps); + return { replace: true }; + } + + const oldUsername = oldResourceProperties.username; + if (oldUsername !== username) { + await grantPrivileges(username, tablePrivileges, clusterProps); + return { replace: true }; + } + + const oldTablePrivileges = oldResourceProperties.tablePrivileges; + if (oldTablePrivileges !== tablePrivileges) { + await revokePrivileges(username, oldTablePrivileges, clusterProps); + await grantPrivileges(username, tablePrivileges, clusterProps); + return { replace: false }; + } + + return { replace: false }; +} diff --git a/packages/@aws-cdk/aws-redshift/lib/private/database-query-provider/table.ts b/packages/@aws-cdk/aws-redshift/lib/private/database-query-provider/table.ts new file mode 100644 index 0000000000000..a2e2a4dc4bee9 --- /dev/null +++ b/packages/@aws-cdk/aws-redshift/lib/private/database-query-provider/table.ts @@ -0,0 +1,75 @@ +/* eslint-disable-next-line import/no-unresolved */ +import * as AWSLambda from 'aws-lambda'; +import { Column } from '../../table'; +import { TableHandlerProps } from '../handler-props'; +import { ClusterProps, executeStatement } from './util'; + +export async function handler(props: TableHandlerProps & ClusterProps, event: AWSLambda.CloudFormationCustomResourceEvent) { + const tableNamePrefix = props.tableName.prefix; + const tableNameSuffix = props.tableName.generateSuffix ? `${event.RequestId.substring(0, 8)}` : ''; + const tableColumns = props.tableColumns; + const clusterProps = props; + + if (event.RequestType === 'Create') { + const tableName = await createTable(tableNamePrefix, tableNameSuffix, tableColumns, clusterProps); + return { PhysicalResourceId: tableName }; + } else if (event.RequestType === 'Delete') { + await dropTable(event.PhysicalResourceId, clusterProps); + return; + } else if (event.RequestType === 'Update') { + const tableName = await updateTable( + event.PhysicalResourceId, + tableNamePrefix, + tableNameSuffix, + tableColumns, + clusterProps, + event.OldResourceProperties as TableHandlerProps & ClusterProps, + ); + return { PhysicalResourceId: tableName }; + } else { + /* eslint-disable-next-line dot-notation */ + throw new Error(`Unrecognized event type: ${event['RequestType']}`); + } +} + +async function createTable(tableNamePrefix: string, tableNameSuffix: string, tableColumns: Column[], clusterProps: ClusterProps): Promise { + const tableName = tableNamePrefix + tableNameSuffix; + const tableColumnsString = tableColumns.map(column => `${column.name} ${column.dataType}`).join(); + await executeStatement(`CREATE TABLE ${tableName} (${tableColumnsString})`, clusterProps); + return tableName; +} + +async function dropTable(tableName: string, clusterProps: ClusterProps) { + await executeStatement(`DROP TABLE ${tableName}`, clusterProps); +} + +async function updateTable( + tableName: string, + tableNamePrefix: string, + tableNameSuffix: string, + tableColumns: Column[], + clusterProps: ClusterProps, + oldResourceProperties: TableHandlerProps & ClusterProps, +): Promise { + const oldClusterProps = oldResourceProperties; + if (clusterProps.clusterName !== oldClusterProps.clusterName || clusterProps.databaseName !== oldClusterProps.databaseName) { + return createTable(tableNamePrefix, tableNameSuffix, tableColumns, clusterProps); + } + + const oldTableNamePrefix = oldResourceProperties.tableName.prefix; + if (tableNamePrefix !== oldTableNamePrefix) { + return createTable(tableNamePrefix, tableNameSuffix, tableColumns, clusterProps); + } + + const oldTableColumns = oldResourceProperties.tableColumns; + if (!oldTableColumns.every(oldColumn => tableColumns.some(column => column.name === oldColumn.name && column.dataType === oldColumn.dataType))) { + return createTable(tableNamePrefix, tableNameSuffix, tableColumns, clusterProps); + } + + const additions = tableColumns.filter(column => { + return !oldTableColumns.some(oldColumn => column.name === oldColumn.name && column.dataType === oldColumn.dataType); + }).map(column => `ADD ${column.name} ${column.dataType}`); + await Promise.all(additions.map(addition => executeStatement(`ALTER TABLE ${tableName} ${addition}`, clusterProps))); + + return tableName; +} diff --git a/packages/@aws-cdk/aws-redshift/lib/private/database-query-provider/user.ts b/packages/@aws-cdk/aws-redshift/lib/private/database-query-provider/user.ts new file mode 100644 index 0000000000000..707af78714e43 --- /dev/null +++ b/packages/@aws-cdk/aws-redshift/lib/private/database-query-provider/user.ts @@ -0,0 +1,82 @@ +/* eslint-disable-next-line import/no-unresolved */ +import * as AWSLambda from 'aws-lambda'; +/* eslint-disable-next-line import/no-extraneous-dependencies */ +import * as SecretsManager from 'aws-sdk/clients/secretsmanager'; +import { UserHandlerProps } from '../handler-props'; +import { ClusterProps, executeStatement, makePhysicalId } from './util'; + +const secretsManager = new SecretsManager(); + +export async function handler(props: UserHandlerProps & ClusterProps, event: AWSLambda.CloudFormationCustomResourceEvent) { + const username = props.username; + const passwordSecretArn = props.passwordSecretArn; + const clusterProps = props; + + if (event.RequestType === 'Create') { + await createUser(username, passwordSecretArn, clusterProps); + return { PhysicalResourceId: makePhysicalId(username, clusterProps, event.RequestId), Data: { username: username } }; + } else if (event.RequestType === 'Delete') { + await dropUser(username, clusterProps); + return; + } else if (event.RequestType === 'Update') { + const { replace } = await updateUser(username, passwordSecretArn, clusterProps, event.OldResourceProperties as UserHandlerProps & ClusterProps); + const physicalId = replace ? makePhysicalId(username, clusterProps, event.RequestId) : event.PhysicalResourceId; + return { PhysicalResourceId: physicalId, Data: { username: username } }; + } else { + /* eslint-disable-next-line dot-notation */ + throw new Error(`Unrecognized event type: ${event['RequestType']}`); + } +} + +async function dropUser(username: string, clusterProps: ClusterProps) { + await executeStatement(`DROP USER ${username}`, clusterProps); +} + +async function createUser(username: string, passwordSecretArn: string, clusterProps: ClusterProps) { + const password = await getPasswordFromSecret(passwordSecretArn); + + await executeStatement(`CREATE USER ${username} PASSWORD '${password}'`, clusterProps); +} + +async function updateUser( + username: string, + passwordSecretArn: string, + clusterProps: ClusterProps, + oldResourceProperties: UserHandlerProps & ClusterProps, +): Promise<{ replace: boolean }> { + const oldClusterProps = oldResourceProperties; + if (clusterProps.clusterName !== oldClusterProps.clusterName || clusterProps.databaseName !== oldClusterProps.databaseName) { + await createUser(username, passwordSecretArn, clusterProps); + return { replace: true }; + } + + const oldUsername = oldResourceProperties.username; + const oldPasswordSecretArn = oldResourceProperties.passwordSecretArn; + const oldPassword = await getPasswordFromSecret(oldPasswordSecretArn); + const password = await getPasswordFromSecret(passwordSecretArn); + + if (username !== oldUsername) { + await createUser(username, passwordSecretArn, clusterProps); + return { replace: true }; + } + + if (password !== oldPassword) { + await executeStatement(`ALTER USER ${username} PASSWORD '${password}'`, clusterProps); + return { replace: false }; + } + + return { replace: false }; +} + +async function getPasswordFromSecret(passwordSecretArn: string): Promise { + const secretValue = await secretsManager.getSecretValue({ + SecretId: passwordSecretArn, + }).promise(); + const secretString = secretValue.SecretString; + if (!secretString) { + throw new Error(`Secret string for ${passwordSecretArn} was empty`); + } + const { password } = JSON.parse(secretString); + + return password; +} diff --git a/packages/@aws-cdk/aws-redshift/lib/private/database-query-provider/util.ts b/packages/@aws-cdk/aws-redshift/lib/private/database-query-provider/util.ts new file mode 100644 index 0000000000000..d834cd474f986 --- /dev/null +++ b/packages/@aws-cdk/aws-redshift/lib/private/database-query-provider/util.ts @@ -0,0 +1,40 @@ +/* eslint-disable-next-line import/no-extraneous-dependencies */ +import * as RedshiftData from 'aws-sdk/clients/redshiftdata'; +import { DatabaseQueryHandlerProps } from '../handler-props'; + +const redshiftData = new RedshiftData(); + +export type ClusterProps = Omit; + +export async function executeStatement(statement: string, clusterProps: ClusterProps): Promise { + const executeStatementProps = { + ClusterIdentifier: clusterProps.clusterName, + Database: clusterProps.databaseName, + SecretArn: clusterProps.adminUserArn, + Sql: statement, + }; + const executedStatement = await redshiftData.executeStatement(executeStatementProps).promise(); + if (!executedStatement.Id) { + throw new Error('Service error: Statement execution did not return a statement ID'); + } + await waitForStatementComplete(executedStatement.Id); +} + +const waitTimeout = 100; +async function waitForStatementComplete(statementId: string): Promise { + await new Promise((resolve: (value: void) => void) => { + setTimeout(() => resolve(), waitTimeout); + }); + const statement = await redshiftData.describeStatement({ Id: statementId }).promise(); + if (statement.Status !== 'FINISHED' && statement.Status !== 'FAILED' && statement.Status !== 'ABORTED') { + return waitForStatementComplete(statementId); + } else if (statement.Status === 'FINISHED') { + return; + } else { + throw new Error(`Statement status was ${statement.Status}: ${statement.Error}`); + } +} + +export function makePhysicalId(resourceName: string, clusterProps: ClusterProps, requestId: string): string { + return `${clusterProps.clusterName}:${clusterProps.databaseName}:${resourceName}:${requestId}`; +} diff --git a/packages/@aws-cdk/aws-redshift/lib/private/database-query.ts b/packages/@aws-cdk/aws-redshift/lib/private/database-query.ts new file mode 100644 index 0000000000000..2f724334b637a --- /dev/null +++ b/packages/@aws-cdk/aws-redshift/lib/private/database-query.ts @@ -0,0 +1,105 @@ +import * as path from 'path'; +import * as iam from '@aws-cdk/aws-iam'; +import * as lambda from '@aws-cdk/aws-lambda'; +import * as secretsmanager from '@aws-cdk/aws-secretsmanager'; +import * as cdk from '@aws-cdk/core'; +import * as customresources from '@aws-cdk/custom-resources'; +import { Construct } from 'constructs'; +import { Cluster } from '../cluster'; +import { DatabaseOptions } from '../database-options'; +import { DatabaseQueryHandlerProps } from './handler-props'; + +// keep this import separate from other imports to reduce chance for merge conflicts with v2-main +// eslint-disable-next-line no-duplicate-imports, import/order +import { Construct as CoreConstruct } from '@aws-cdk/core'; + +export interface DatabaseQueryProps extends DatabaseOptions { + readonly handler: string; + readonly properties: HandlerProps; + /** + * The policy to apply when this resource is removed from the application. + * + * @default cdk.RemovalPolicy.Destroy + */ + readonly removalPolicy?: cdk.RemovalPolicy; +} + +export class DatabaseQuery extends CoreConstruct implements iam.IGrantable { + readonly grantPrincipal: iam.IPrincipal; + readonly ref: string; + + private readonly resource: cdk.CustomResource; + + constructor(scope: Construct, id: string, props: DatabaseQueryProps) { + super(scope, id); + + const adminUser = this.getAdminUser(props); + const handler = new lambda.SingletonFunction(this, 'Handler', { + code: lambda.Code.fromAsset(path.join(__dirname, 'database-query-provider')), + runtime: lambda.Runtime.NODEJS_14_X, + handler: 'index.handler', + timeout: cdk.Duration.minutes(1), + uuid: '3de5bea7-27da-4796-8662-5efb56431b5f', + lambdaPurpose: 'Query Redshift Database', + }); + handler.addToRolePolicy(new iam.PolicyStatement({ + actions: ['redshift-data:DescribeStatement', 'redshift-data:ExecuteStatement'], + resources: ['*'], + })); + adminUser.grantRead(handler); + + const provider = new customresources.Provider(this, 'Provider', { + onEventHandler: handler, + }); + + const queryHandlerProps: DatabaseQueryHandlerProps & HandlerProps = { + handler: props.handler, + clusterName: props.cluster.clusterName, + adminUserArn: adminUser.secretArn, + databaseName: props.databaseName, + ...props.properties, + }; + this.resource = new cdk.CustomResource(this, 'Resource', { + resourceType: 'Custom::RedshiftDatabaseQuery', + serviceToken: provider.serviceToken, + removalPolicy: props.removalPolicy, + properties: queryHandlerProps, + }); + + this.grantPrincipal = handler.grantPrincipal; + this.ref = this.resource.ref; + } + + public applyRemovalPolicy(policy: cdk.RemovalPolicy): void { + this.resource.applyRemovalPolicy(policy); + } + + public getAtt(attributeName: string): cdk.Reference { + return this.resource.getAtt(attributeName); + } + + public getAttString(attributeName: string): string { + return this.resource.getAttString(attributeName); + } + + private getAdminUser(props: DatabaseOptions): secretsmanager.ISecret { + const cluster = props.cluster; + let adminUser = props.adminUser; + if (!adminUser) { + if (cluster instanceof Cluster) { + if (cluster.secret) { + adminUser = cluster.secret; + } else { + throw new Error( + 'Administrative access to the Redshift cluster is required but an admin user secret was not provided and the cluster did not generate admin user credentials (they were provided explicitly)', + ); + } + } else { + throw new Error( + 'Administrative access to the Redshift cluster is required but an admin user secret was not provided and the cluster was imported', + ); + } + } + return adminUser; + } +} diff --git a/packages/@aws-cdk/aws-redshift/lib/private/handler-props.ts b/packages/@aws-cdk/aws-redshift/lib/private/handler-props.ts new file mode 100644 index 0000000000000..b00cc667a2ced --- /dev/null +++ b/packages/@aws-cdk/aws-redshift/lib/private/handler-props.ts @@ -0,0 +1,31 @@ +import { Column } from '../table'; + +export interface DatabaseQueryHandlerProps { + readonly handler: string; + readonly clusterName: string; + readonly adminUserArn: string; + readonly databaseName: string; +} + +export interface UserHandlerProps { + readonly username: string; + readonly passwordSecretArn: string; +} + +export interface TableHandlerProps { + readonly tableName: { + readonly prefix: string; + readonly generateSuffix: boolean; + }; + readonly tableColumns: Column[]; +} + +export interface TablePrivilege { + readonly tableName: string; + readonly actions: string[]; +} + +export interface UserTablePrivilegesHandlerProps { + readonly username: string; + readonly tablePrivileges: TablePrivilege[]; +} diff --git a/packages/@aws-cdk/aws-redshift/lib/private/privileges.ts b/packages/@aws-cdk/aws-redshift/lib/private/privileges.ts new file mode 100644 index 0000000000000..e8d9ed13d13dc --- /dev/null +++ b/packages/@aws-cdk/aws-redshift/lib/private/privileges.ts @@ -0,0 +1,101 @@ +import * as cdk from '@aws-cdk/core'; +import { Construct } from 'constructs'; +import { DatabaseOptions } from '../database-options'; +import { ITable, TableAction } from '../table'; +import { IUser } from '../user'; +import { DatabaseQuery } from './database-query'; +import { HandlerName } from './database-query-provider/handler-name'; +import { TablePrivilege as SerializedTablePrivilege, UserTablePrivilegesHandlerProps } from './handler-props'; + +// keep this import separate from other imports to reduce chance for merge conflicts with v2-main +// eslint-disable-next-line no-duplicate-imports, import/order +import { Construct as CoreConstruct } from '@aws-cdk/core'; + +/** + * The Redshift table and action that make up a privilege that can be granted to a Redshift user. + */ +export interface TablePrivilege { + /** + * The table on which privileges will be granted. + */ + readonly table: ITable; + + /** + * The actions that will be granted. + */ + readonly actions: TableAction[]; +} + +/** + * Properties for specifying privileges granted to a Redshift user on Redshift tables. + */ +export interface UserTablePrivilegesProps extends DatabaseOptions { + /** + * The user to which privileges will be granted. + */ + readonly user: IUser; + + /** + * The privileges to be granted. + * + * @default [] - use `addPrivileges` to grant privileges after construction + */ + readonly privileges?: TablePrivilege[]; +} + +/** + * Privileges granted to a Redshift user on Redshift tables. + * + * This construct is located in the `private` directory to ensure that it is not exported for direct public use. This + * means that user privileges must be managed through the `Table.grant` method or the `User.addTablePrivileges` + * method. Thus, each `User` will have at most one `UserTablePrivileges` construct to manage its privileges. For details + * on why this is a Good Thing, see the README, under "Granting Privileges". + */ +export class UserTablePrivileges extends CoreConstruct { + private privileges: TablePrivilege[]; + + constructor(scope: Construct, id: string, props: UserTablePrivilegesProps) { + super(scope, id); + + this.privileges = props.privileges ?? []; + + new DatabaseQuery(this, 'Resource', { + ...props, + handler: HandlerName.UserTablePrivileges, + properties: { + username: props.user.username, + tablePrivileges: cdk.Lazy.any({ + produce: () => { + const reducedPrivileges = this.privileges.reduce((privileges, { table, actions }) => { + const tableName = table.tableName; + if (!(tableName in privileges)) { + privileges[tableName] = []; + } + actions = actions.concat(privileges[tableName]); + if (actions.includes(TableAction.ALL)) { + actions = [TableAction.ALL]; + } + if (actions.includes(TableAction.UPDATE) || actions.includes(TableAction.DELETE)) { + actions.push(TableAction.SELECT); + } + privileges[tableName] = Array.from(new Set(actions)); + return privileges; + }, {} as { [key: string]: TableAction[] }); + const serializedPrivileges: SerializedTablePrivilege[] = Object.entries(reducedPrivileges).map(([tableName, actions]) => ({ + tableName: tableName, + actions: actions.map(action => TableAction[action]), + })); + return serializedPrivileges; + }, + }) as any, + }, + }); + } + + /** + * Grant this user additional privileges. + */ + addPrivileges(table: ITable, ...actions: TableAction[]): void { + this.privileges.push({ table, actions }); + } +} diff --git a/packages/@aws-cdk/aws-redshift/lib/table.ts b/packages/@aws-cdk/aws-redshift/lib/table.ts new file mode 100644 index 0000000000000..337abdedd00a1 --- /dev/null +++ b/packages/@aws-cdk/aws-redshift/lib/table.ts @@ -0,0 +1,222 @@ +import * as cdk from '@aws-cdk/core'; +import { Construct } from 'constructs'; +import { ICluster } from './cluster'; +import { DatabaseOptions } from './database-options'; +import { DatabaseQuery } from './private/database-query'; +import { HandlerName } from './private/database-query-provider/handler-name'; +import { TableHandlerProps } from './private/handler-props'; +import { IUser } from './user'; + +// keep this import separate from other imports to reduce chance for merge conflicts with v2-main +// eslint-disable-next-line no-duplicate-imports, import/order +import { Construct as CoreConstruct } from '@aws-cdk/core'; + +/** + * An action that a Redshift user can be granted privilege to perform on a table. + */ +export enum TableAction { + /** + * Grants privilege to select data from a table or view using a SELECT statement. + */ + SELECT, + + /** + * Grants privilege to load data into a table using an INSERT statement or a COPY statement. + */ + INSERT, + + /** + * Grants privilege to update a table column using an UPDATE statement. + */ + UPDATE, + + /** + * Grants privilege to delete a data row from a table. + */ + DELETE, + + /** + * Grants privilege to drop a table. + */ + DROP, + + /** + * Grants privilege to create a foreign key constraint. + * + * You need to grant this privilege on both the referenced table and the referencing table; otherwise, the user can't create the constraint. + */ + REFERENCES, + + /** + * Grants all available privileges at once to the specified user or user group. + */ + ALL +} + +/** + * A column in a Redshift table. + */ +export interface Column { + /** + * The name of the column. + */ + readonly name: string; + + /** + * The data type of the column. + */ + readonly dataType: string; +} + +/** + * Properties for configuring a Redshift table. + */ +export interface TableProps extends DatabaseOptions { + /** + * The name of the table. + * + * @default - a name is generated + */ + readonly tableName?: string; + + /** + * The columns of the table. + */ + readonly tableColumns: Column[]; + + /** + * The policy to apply when this resource is removed from the application. + * + * @default cdk.RemovalPolicy.Retain + */ + readonly removalPolicy?: cdk.RemovalPolicy; +} + +/** + * Represents a table in a Redshift database. + */ +export interface ITable extends cdk.IConstruct { + /** + * Name of the table. + */ + readonly tableName: string; + + /** + * The columns of the table. + */ + readonly tableColumns: Column[]; + + /** + * The cluster where the table is located. + */ + readonly cluster: ICluster; + + /** + * The name of the database where the table is located. + */ + readonly databaseName: string; + + /** + * Grant a user privilege to access this table. + */ + grant(user: IUser, ...actions: TableAction[]): void; +} + +/** + * A full specification of a Redshift table that can be used to import it fluently into the CDK application. + */ +export interface TableAttributes { + /** + * Name of the table. + */ + readonly tableName: string; + + /** + * The columns of the table. + */ + readonly tableColumns: Column[]; + + /** + * The cluster where the table is located. + */ + readonly cluster: ICluster; + + /** + * The name of the database where the table is located. + */ + readonly databaseName: string; +} + +abstract class TableBase extends CoreConstruct implements ITable { + abstract readonly tableName: string; + abstract readonly tableColumns: Column[]; + abstract readonly cluster: ICluster; + abstract readonly databaseName: string; + grant(user: IUser, ...actions: TableAction[]) { + user.addTablePrivileges(this, ...actions); + } +} + +/** + * A table in a Redshift cluster. + */ +export class Table extends TableBase { + /** + * Specify a Redshift table using a table name and schema that already exists. + */ + static fromTableAttributes(scope: Construct, id: string, attrs: TableAttributes): ITable { + return new class extends TableBase { + readonly tableName = attrs.tableName; + readonly tableColumns = attrs.tableColumns; + readonly cluster = attrs.cluster; + readonly databaseName = attrs.databaseName; + }(scope, id); + } + + readonly tableName: string; + readonly tableColumns: Column[]; + readonly cluster: ICluster; + readonly databaseName: string; + + private resource: DatabaseQuery; + + constructor(scope: Construct, id: string, props: TableProps) { + super(scope, id); + + this.tableColumns = props.tableColumns; + this.cluster = props.cluster; + this.databaseName = props.databaseName; + + this.resource = new DatabaseQuery(this, 'Resource', { + removalPolicy: cdk.RemovalPolicy.RETAIN, + ...props, + handler: HandlerName.Table, + properties: { + tableName: { + prefix: props.tableName ?? cdk.Names.uniqueId(this), + generateSuffix: !props.tableName, + }, + tableColumns: this.tableColumns, + }, + }); + + this.tableName = this.resource.ref; + } + + /** + * Apply the given removal policy to this resource + * + * The Removal Policy controls what happens to this resource when it stops + * being managed by CloudFormation, either because you've removed it from the + * CDK application or because you've made a change that requires the resource + * to be replaced. + * + * The resource can be destroyed (`RemovalPolicy.DESTROY`), or left in your AWS + * account for data recovery and cleanup later (`RemovalPolicy.RETAIN`). + * + * This resource is retained by default. + */ + public applyRemovalPolicy(policy: cdk.RemovalPolicy): void { + this.resource.applyRemovalPolicy(policy); + } +} diff --git a/packages/@aws-cdk/aws-redshift/lib/user.ts b/packages/@aws-cdk/aws-redshift/lib/user.ts new file mode 100644 index 0000000000000..3b5c8d0829ef8 --- /dev/null +++ b/packages/@aws-cdk/aws-redshift/lib/user.ts @@ -0,0 +1,186 @@ +import * as kms from '@aws-cdk/aws-kms'; +import * as cdk from '@aws-cdk/core'; +import { Construct } from 'constructs'; +import { ICluster } from './cluster'; +import { DatabaseOptions } from './database-options'; +import { DatabaseSecret } from './database-secret'; +import { DatabaseQuery } from './private/database-query'; +import { HandlerName } from './private/database-query-provider/handler-name'; +import { UserHandlerProps } from './private/handler-props'; +import { UserTablePrivileges } from './private/privileges'; +import { ITable, TableAction } from './table'; + +// keep this import separate from other imports to reduce chance for merge conflicts with v2-main +// eslint-disable-next-line no-duplicate-imports, import/order +import { Construct as CoreConstruct } from '@aws-cdk/core'; + +/** + * Properties for configuring a Redshift user. + */ +export interface UserProps extends DatabaseOptions { + /** + * The name of the user. + * + * For valid values, see: https://docs.aws.amazon.com/redshift/latest/dg/r_names.html + * + * @default - a name is generated + */ + readonly username?: string; + + /** + * KMS key to encrypt the generated secret. + * + * @default - the default AWS managed key is used + */ + readonly encryptionKey?: kms.IKey; + + /** + * The policy to apply when this resource is removed from the application. + * + * @default cdk.RemovalPolicy.Destroy + */ + readonly removalPolicy?: cdk.RemovalPolicy; +} + +/** + * Represents a user in a Redshift database. + */ +export interface IUser extends cdk.IConstruct { + /** + * The name of the user. + */ + readonly username: string; + + /** + * The password of the user. + */ + readonly password: cdk.SecretValue; + + /** + * The cluster where the table is located. + */ + readonly cluster: ICluster; + + /** + * The name of the database where the table is located. + */ + readonly databaseName: string; + + /** + * Grant this user privilege to access a table. + */ + addTablePrivileges(table: ITable, ...actions: TableAction[]): void; +} + +/** + * A full specification of a Redshift user that can be used to import it fluently into the CDK application. + */ +export interface UserAttributes extends DatabaseOptions { + /** + * The name of the user. + */ + readonly username: string; + + /** + * The password of the user. + * + * Do not put passwords in CDK code directly. + */ + readonly password: cdk.SecretValue; +} + +abstract class UserBase extends CoreConstruct implements IUser { + abstract readonly username: string; + abstract readonly password: cdk.SecretValue; + abstract readonly cluster: ICluster; + abstract readonly databaseName: string; + + /** + * The tables that user will have access to + */ + private privileges?: UserTablePrivileges; + + protected abstract readonly databaseProps: DatabaseOptions; + + addTablePrivileges(table: ITable, ...actions: TableAction[]): void { + if (!this.privileges) { + this.privileges = new UserTablePrivileges(this, 'TablePrivileges', { + ...this.databaseProps, + user: this, + }); + } + + this.privileges.addPrivileges(table, ...actions); + } +} + +/** + * A user in a Redshift cluster. + */ +export class User extends UserBase { + /** + * Specify a Redshift user using credentials that already exist. + */ + static fromUserAttributes(scope: Construct, id: string, attrs: UserAttributes): IUser { + return new class extends UserBase { + readonly username = attrs.username; + readonly password = attrs.password; + readonly cluster = attrs.cluster; + readonly databaseName = attrs.databaseName; + protected readonly databaseProps = attrs; + }(scope, id); + } + + readonly username: string; + readonly password: cdk.SecretValue; + readonly cluster: ICluster; + readonly databaseName: string; + protected databaseProps: DatabaseOptions; + + private resource: DatabaseQuery; + + constructor(scope: Construct, id: string, props: UserProps) { + super(scope, id); + + this.databaseProps = props; + this.cluster = props.cluster; + this.databaseName = props.databaseName; + + const username = props.username ?? cdk.Names.uniqueId(this).toLowerCase(); + const secret = new DatabaseSecret(this, 'Secret', { + username, + encryptionKey: props.encryptionKey, + }); + const attachedSecret = secret.attach(props.cluster); + this.password = attachedSecret.secretValueFromJson('password'); + + this.resource = new DatabaseQuery(this, 'Resource', { + ...this.databaseProps, + handler: HandlerName.User, + properties: { + username, + passwordSecretArn: attachedSecret.secretArn, + }, + }); + attachedSecret.grantRead(this.resource); + + this.username = this.resource.getAttString('username'); + } + + /** + * Apply the given removal policy to this resource + * + * The Removal Policy controls what happens to this resource when it stops + * being managed by CloudFormation, either because you've removed it from the + * CDK application or because you've made a change that requires the resource + * to be replaced. + * + * The resource can be destroyed (`RemovalPolicy.DESTROY`), or left in your AWS + * account for data recovery and cleanup later (`RemovalPolicy.RETAIN`). + * + * This resource is destroyed by default. + */ + public applyRemovalPolicy(policy: cdk.RemovalPolicy): void { + this.resource.applyRemovalPolicy(policy); + } +} diff --git a/packages/@aws-cdk/aws-redshift/package.json b/packages/@aws-cdk/aws-redshift/package.json index 3bf492f83ee7b..71042529a3e69 100644 --- a/packages/@aws-cdk/aws-redshift/package.json +++ b/packages/@aws-cdk/aws-redshift/package.json @@ -28,7 +28,14 @@ ] } }, - "projectReferences": true + "projectReferences": true, + "metadata": { + "jsii": { + "rosetta": { + "strict": true + } + } + } }, "repository": { "type": "git", @@ -75,7 +82,9 @@ "devDependencies": { "@aws-cdk/assertions": "0.0.0", "@types/jest": "^26.0.24", + "aws-sdk": "^2.848.0", "cdk-build-tools": "0.0.0", + "cdk-integ-tools": "0.0.0", "cfn2ts": "0.0.0", "jest": "^26.6.3", "pkglint": "0.0.0" @@ -84,9 +93,11 @@ "@aws-cdk/aws-ec2": "0.0.0", "@aws-cdk/aws-iam": "0.0.0", "@aws-cdk/aws-kms": "0.0.0", + "@aws-cdk/aws-lambda": "0.0.0", "@aws-cdk/aws-s3": "0.0.0", "@aws-cdk/aws-secretsmanager": "0.0.0", "@aws-cdk/core": "0.0.0", + "@aws-cdk/custom-resources": "0.0.0", "constructs": "^3.3.69" }, "homepage": "https://github.com/aws/aws-cdk", @@ -94,9 +105,11 @@ "@aws-cdk/aws-ec2": "0.0.0", "@aws-cdk/aws-iam": "0.0.0", "@aws-cdk/aws-kms": "0.0.0", + "@aws-cdk/aws-lambda": "0.0.0", "@aws-cdk/aws-s3": "0.0.0", "@aws-cdk/aws-secretsmanager": "0.0.0", "@aws-cdk/core": "0.0.0", + "@aws-cdk/custom-resources": "0.0.0", "constructs": "^3.3.69" }, "engines": { diff --git a/packages/@aws-cdk/aws-redshift/rosetta/cluster.ts-fixture b/packages/@aws-cdk/aws-redshift/rosetta/cluster.ts-fixture new file mode 100644 index 0000000000000..82d98ca3e381e --- /dev/null +++ b/packages/@aws-cdk/aws-redshift/rosetta/cluster.ts-fixture @@ -0,0 +1,20 @@ +// Fixture with cluster already created +import { Construct, SecretValue, Stack } from '@aws-cdk/core'; +import { Vpc } from '@aws-cdk/aws-ec2'; +import { Cluster, Table, TableAction, User } from '@aws-cdk/aws-redshift'; + +class Fixture extends Stack { + constructor(scope: Construct, id: string) { + super(scope, id); + + const vpc = new Vpc(this, 'Vpc'); + const cluster = new Cluster(this, 'Cluster', { + vpc, + masterUser: { + masterUsername: 'admin', + }, + }); + + /// here + } +} diff --git a/packages/@aws-cdk/aws-redshift/rosetta/default.ts-fixture b/packages/@aws-cdk/aws-redshift/rosetta/default.ts-fixture new file mode 100644 index 0000000000000..928b036cf2611 --- /dev/null +++ b/packages/@aws-cdk/aws-redshift/rosetta/default.ts-fixture @@ -0,0 +1,11 @@ +// Fixture with packages imported, but nothing else +import { Construct, Stack } from '@aws-cdk/core'; +import { Cluster } from '@aws-cdk/aws-redshift'; + +class Fixture extends Stack { + constructor(scope: Construct, id: string) { + super(scope, id); + + /// here + } +} diff --git a/packages/@aws-cdk/aws-redshift/test/database-query-provider/index.test.ts b/packages/@aws-cdk/aws-redshift/test/database-query-provider/index.test.ts new file mode 100644 index 0000000000000..18091a6627167 --- /dev/null +++ b/packages/@aws-cdk/aws-redshift/test/database-query-provider/index.test.ts @@ -0,0 +1,50 @@ +/* eslint-disable-next-line import/no-unresolved */ +import type * as AWSLambda from 'aws-lambda'; + +const resourceProperties = { + handler: 'table', + ServiceToken: '', +}; +const requestId = 'requestId'; +const baseEvent: AWSLambda.CloudFormationCustomResourceEvent = { + ResourceProperties: resourceProperties, + RequestType: 'Create', + ServiceToken: '', + ResponseURL: '', + StackId: '', + RequestId: requestId, + LogicalResourceId: '', + ResourceType: '', +}; + +const mockSubHandler = jest.fn(); +jest.mock('../../lib/private/database-query-provider/table', () => ({ + __esModule: true, + handler: mockSubHandler, +})); +import { handler } from '../../lib/private/database-query-provider/index'; + +beforeEach(() => { + jest.clearAllMocks(); +}); + +test('calls sub handler', async () => { + const event = baseEvent; + + await handler(event); + + expect(mockSubHandler).toHaveBeenCalled(); +}); + +test('throws with unregistered subhandler', async () => { + const event = { + ...baseEvent, + ResourceProperties: { + ...resourceProperties, + handler: 'unregistered', + }, + }; + + await expect(handler(event)).rejects.toThrow(/Requested handler unregistered is not in supported set/); + expect(mockSubHandler).not.toHaveBeenCalled(); +}); diff --git a/packages/@aws-cdk/aws-redshift/test/database-query-provider/privileges.test.ts b/packages/@aws-cdk/aws-redshift/test/database-query-provider/privileges.test.ts new file mode 100644 index 0000000000000..daa3835b89f24 --- /dev/null +++ b/packages/@aws-cdk/aws-redshift/test/database-query-provider/privileges.test.ts @@ -0,0 +1,163 @@ +/* eslint-disable-next-line import/no-unresolved */ +import type * as AWSLambda from 'aws-lambda'; + +const username = 'username'; +const tableName = 'tableName'; +const tablePrivileges = [{ tableName, actions: ['INSERT', 'SELECT'] }]; +const clusterName = 'clusterName'; +const adminUserArn = 'adminUserArn'; +const databaseName = 'databaseName'; +const physicalResourceId = 'PhysicalResourceId'; +const resourceProperties = { + username, + tablePrivileges, + clusterName, + adminUserArn, + databaseName, + ServiceToken: '', +}; +const requestId = 'requestId'; +const genericEvent: AWSLambda.CloudFormationCustomResourceEventCommon = { + ResourceProperties: resourceProperties, + ServiceToken: '', + ResponseURL: '', + StackId: '', + RequestId: requestId, + LogicalResourceId: '', + ResourceType: '', +}; + +const mockExecuteStatement = jest.fn(() => ({ promise: jest.fn(() => ({ Id: 'statementId' })) })); +jest.mock('aws-sdk/clients/redshiftdata', () => class { + executeStatement = mockExecuteStatement; + describeStatement = () => ({ promise: jest.fn(() => ({ Status: 'FINISHED' })) }); +}); +import { handler as managePrivileges } from '../../lib/private/database-query-provider/privileges'; + +beforeEach(() => { + jest.clearAllMocks(); +}); + +describe('create', () => { + const baseEvent: AWSLambda.CloudFormationCustomResourceCreateEvent = { + RequestType: 'Create', + ...genericEvent, + }; + + test('serializes properties in statement and creates physical resource ID', async () => { + const event = baseEvent; + + await expect(managePrivileges(resourceProperties, event)).resolves.toEqual({ + PhysicalResourceId: 'clusterName:databaseName:username:requestId', + }); + expect(mockExecuteStatement).toHaveBeenCalledWith(expect.objectContaining({ + Sql: `GRANT INSERT, SELECT ON ${tableName} TO ${username}`, + })); + }); +}); + +describe('delete', () => { + const baseEvent: AWSLambda.CloudFormationCustomResourceDeleteEvent = { + RequestType: 'Delete', + PhysicalResourceId: physicalResourceId, + ...genericEvent, + }; + + test('executes statement', async () => { + const event = baseEvent; + + await managePrivileges(resourceProperties, event); + + expect(mockExecuteStatement).toHaveBeenCalledWith(expect.objectContaining({ + Sql: `REVOKE INSERT, SELECT ON ${tableName} FROM ${username}`, + })); + }); +}); + +describe('update', () => { + const event: AWSLambda.CloudFormationCustomResourceUpdateEvent = { + RequestType: 'Update', + OldResourceProperties: resourceProperties, + PhysicalResourceId: physicalResourceId, + ...genericEvent, + }; + + test('replaces if cluster name changes', async () => { + const newClusterName = 'newClusterName'; + const newResourceProperties = { + ...resourceProperties, + clusterName: newClusterName, + }; + + await expect(managePrivileges(newResourceProperties, event)).resolves.not.toMatchObject({ + PhysicalResourceId: physicalResourceId, + }); + expect(mockExecuteStatement).toHaveBeenCalledWith(expect.objectContaining({ + ClusterIdentifier: newClusterName, + Sql: expect.stringMatching(/GRANT/), + })); + }); + + test('does not replace if admin user ARN changes', async () => { + const newAdminUserArn = 'newAdminUserArn'; + const newResourceProperties = { + ...resourceProperties, + adminUserArn: newAdminUserArn, + }; + + await expect(managePrivileges(newResourceProperties, event)).resolves.toMatchObject({ + PhysicalResourceId: physicalResourceId, + }); + expect(mockExecuteStatement).not.toHaveBeenCalled(); + }); + + test('replaces if database name changes', async () => { + const newDatabaseName = 'newDatabaseName'; + const newResourceProperties = { + ...resourceProperties, + databaseName: newDatabaseName, + }; + + await expect(managePrivileges(newResourceProperties, event)).resolves.not.toMatchObject({ + PhysicalResourceId: physicalResourceId, + }); + expect(mockExecuteStatement).toHaveBeenCalledWith(expect.objectContaining({ + Database: newDatabaseName, + Sql: expect.stringMatching(/GRANT/), + })); + }); + + test('replaces if user name changes', async () => { + const newUsername = 'newUsername'; + const newResourceProperties = { + ...resourceProperties, + username: newUsername, + }; + + await expect(managePrivileges(newResourceProperties, event)).resolves.not.toMatchObject({ + PhysicalResourceId: physicalResourceId, + }); + expect(mockExecuteStatement).toHaveBeenCalledWith(expect.objectContaining({ + Sql: expect.stringMatching(new RegExp(`GRANT .* TO ${newUsername}`)), + })); + }); + + test('does not replace when privileges change', async () => { + const newTableName = 'newTableName'; + const newTablePrivileges = [{ tableName: newTableName, actions: ['DROP'] }]; + const newResourceProperties = { + ...resourceProperties, + tablePrivileges: newTablePrivileges, + }; + + await expect(managePrivileges(newResourceProperties, event)).resolves.toMatchObject({ + PhysicalResourceId: physicalResourceId, + }); + expect(mockExecuteStatement).toHaveBeenCalledWith(expect.objectContaining({ + Sql: `REVOKE INSERT, SELECT ON ${tableName} FROM ${username}`, + })); + expect(mockExecuteStatement).toHaveBeenCalledWith(expect.objectContaining({ + Sql: `GRANT DROP ON ${newTableName} TO ${username}`, + })); + }); +}); diff --git a/packages/@aws-cdk/aws-redshift/test/database-query-provider/table.test.ts b/packages/@aws-cdk/aws-redshift/test/database-query-provider/table.test.ts new file mode 100644 index 0000000000000..956efca1ab81f --- /dev/null +++ b/packages/@aws-cdk/aws-redshift/test/database-query-provider/table.test.ts @@ -0,0 +1,202 @@ +/* eslint-disable-next-line import/no-unresolved */ +import type * as AWSLambda from 'aws-lambda'; + +const tableNamePrefix = 'tableNamePrefix'; +const tableColumns = [{ name: 'col1', dataType: 'varchar(1)' }]; +const clusterName = 'clusterName'; +const adminUserArn = 'adminUserArn'; +const databaseName = 'databaseName'; +const physicalResourceId = 'PhysicalResourceId'; +const resourceProperties = { + tableName: { + prefix: tableNamePrefix, + generateSuffix: true, + }, + tableColumns, + clusterName, + adminUserArn, + databaseName, + ServiceToken: '', +}; +const requestId = 'requestId'; +const requestIdTruncated = 'requestI'; +const genericEvent: AWSLambda.CloudFormationCustomResourceEventCommon = { + ResourceProperties: resourceProperties, + ServiceToken: '', + ResponseURL: '', + StackId: '', + RequestId: requestId, + LogicalResourceId: '', + ResourceType: '', +}; + +const mockExecuteStatement = jest.fn(() => ({ promise: jest.fn(() => ({ Id: 'statementId' })) })); +jest.mock('aws-sdk/clients/redshiftdata', () => class { + executeStatement = mockExecuteStatement; + describeStatement = () => ({ promise: jest.fn(() => ({ Status: 'FINISHED' })) }); +}); +import { handler as manageTable } from '../../lib/private/database-query-provider/table'; + +beforeEach(() => { + jest.clearAllMocks(); +}); + +describe('create', () => { + const baseEvent: AWSLambda.CloudFormationCustomResourceCreateEvent = { + RequestType: 'Create', + ...genericEvent, + }; + + test('serializes properties in statement and creates physical resource ID', async () => { + const event = baseEvent; + + await expect(manageTable(resourceProperties, event)).resolves.toEqual({ + PhysicalResourceId: `${tableNamePrefix}${requestIdTruncated}`, + }); + expect(mockExecuteStatement).toHaveBeenCalledWith(expect.objectContaining({ + Sql: `CREATE TABLE ${tableNamePrefix}${requestIdTruncated} (col1 varchar(1))`, + })); + }); + + test('does not modify table name if no suffix generation requested', async () => { + const event = baseEvent; + const newResourceProperties = { + ...resourceProperties, + tableName: { + ...resourceProperties.tableName, + generateSuffix: false, + }, + }; + + await expect(manageTable(newResourceProperties, event)).resolves.toEqual({ + PhysicalResourceId: tableNamePrefix, + }); + expect(mockExecuteStatement).toHaveBeenCalledWith(expect.objectContaining({ + Sql: `CREATE TABLE ${tableNamePrefix} (col1 varchar(1))`, + })); + }); +}); + +describe('delete', () => { + const baseEvent: AWSLambda.CloudFormationCustomResourceDeleteEvent = { + RequestType: 'Delete', + PhysicalResourceId: physicalResourceId, + ...genericEvent, + }; + + test('executes statement', async () => { + const event = baseEvent; + + await manageTable(resourceProperties, event); + + expect(mockExecuteStatement).toHaveBeenCalledWith(expect.objectContaining({ + Sql: `DROP TABLE ${physicalResourceId}`, + })); + }); +}); + +describe('update', () => { + const event: AWSLambda.CloudFormationCustomResourceUpdateEvent = { + RequestType: 'Update', + OldResourceProperties: resourceProperties, + PhysicalResourceId: physicalResourceId, + ...genericEvent, + }; + + test('replaces if cluster name changes', async () => { + const newClusterName = 'newClusterName'; + const newResourceProperties = { + ...resourceProperties, + clusterName: newClusterName, + }; + + await expect(manageTable(newResourceProperties, event)).resolves.not.toMatchObject({ + PhysicalResourceId: physicalResourceId, + }); + expect(mockExecuteStatement).toHaveBeenCalledWith(expect.objectContaining({ + ClusterIdentifier: newClusterName, + Sql: expect.stringMatching(new RegExp(`CREATE TABLE ${tableNamePrefix}${requestIdTruncated}`)), + })); + }); + + test('does not replace if admin user ARN changes', async () => { + const newAdminUserArn = 'newAdminUserArn'; + const newResourceProperties = { + ...resourceProperties, + adminUserArn: newAdminUserArn, + }; + + await expect(manageTable(newResourceProperties, event)).resolves.toMatchObject({ + PhysicalResourceId: physicalResourceId, + }); + expect(mockExecuteStatement).not.toHaveBeenCalled(); + }); + + test('replaces if database name changes', async () => { + const newDatabaseName = 'newDatabaseName'; + const newResourceProperties = { + ...resourceProperties, + databaseName: newDatabaseName, + }; + + await expect(manageTable(newResourceProperties, event)).resolves.not.toMatchObject({ + PhysicalResourceId: physicalResourceId, + }); + expect(mockExecuteStatement).toHaveBeenCalledWith(expect.objectContaining({ + Database: newDatabaseName, + Sql: expect.stringMatching(new RegExp(`CREATE TABLE ${tableNamePrefix}${requestIdTruncated}`)), + })); + }); + + test('replaces if table name changes', async () => { + const newTableNamePrefix = 'newTableNamePrefix'; + const newResourceProperties = { + ...resourceProperties, + tableName: { + ...resourceProperties.tableName, + prefix: newTableNamePrefix, + }, + }; + + await expect(manageTable(newResourceProperties, event)).resolves.not.toMatchObject({ + PhysicalResourceId: physicalResourceId, + }); + expect(mockExecuteStatement).toHaveBeenCalledWith(expect.objectContaining({ + Sql: expect.stringMatching(new RegExp(`CREATE TABLE ${newTableNamePrefix}${requestIdTruncated}`)), + })); + }); + + test('replaces if table columns change', async () => { + const newTableColumnName = 'col2'; + const newTableColumnDataType = 'varchar(1)'; + const newTableColumns = [{ name: newTableColumnName, dataType: newTableColumnDataType }]; + const newResourceProperties = { + ...resourceProperties, + tableColumns: newTableColumns, + }; + + await expect(manageTable(newResourceProperties, event)).resolves.not.toMatchObject({ + PhysicalResourceId: physicalResourceId, + }); + expect(mockExecuteStatement).toHaveBeenCalledWith(expect.objectContaining({ + Sql: `CREATE TABLE ${tableNamePrefix}${requestIdTruncated} (${newTableColumnName} ${newTableColumnDataType})`, + })); + }); + + test('does not replace if table columns added', async () => { + const newTableColumnName = 'col2'; + const newTableColumnDataType = 'varchar(1)'; + const newTableColumns = [{ name: 'col1', dataType: 'varchar(1)' }, { name: newTableColumnName, dataType: newTableColumnDataType }]; + const newResourceProperties = { + ...resourceProperties, + tableColumns: newTableColumns, + }; + + await expect(manageTable(newResourceProperties, event)).resolves.toMatchObject({ + PhysicalResourceId: physicalResourceId, + }); + expect(mockExecuteStatement).toHaveBeenCalledWith(expect.objectContaining({ + Sql: `ALTER TABLE ${physicalResourceId} ADD ${newTableColumnName} ${newTableColumnDataType}`, + })); + }); +}); diff --git a/packages/@aws-cdk/aws-redshift/test/database-query-provider/user.test.ts b/packages/@aws-cdk/aws-redshift/test/database-query-provider/user.test.ts new file mode 100644 index 0000000000000..87c3bdd0043de --- /dev/null +++ b/packages/@aws-cdk/aws-redshift/test/database-query-provider/user.test.ts @@ -0,0 +1,163 @@ +/* eslint-disable-next-line import/no-unresolved */ +import type * as AWSLambda from 'aws-lambda'; + +const password = 'password'; +const username = 'username'; +const passwordSecretArn = 'passwordSecretArn'; +const clusterName = 'clusterName'; +const adminUserArn = 'adminUserArn'; +const databaseName = 'databaseName'; +const physicalResourceId = 'PhysicalResourceId'; +const resourceProperties = { + username, + passwordSecretArn, + clusterName, + adminUserArn, + databaseName, + ServiceToken: '', +}; +const requestId = 'requestId'; +const genericEvent: AWSLambda.CloudFormationCustomResourceEventCommon = { + ResourceProperties: resourceProperties, + ServiceToken: '', + ResponseURL: '', + StackId: '', + RequestId: requestId, + LogicalResourceId: '', + ResourceType: '', +}; + +const mockExecuteStatement = jest.fn(() => ({ promise: jest.fn(() => ({ Id: 'statementId' })) })); +jest.mock('aws-sdk/clients/redshiftdata', () => class { + executeStatement = mockExecuteStatement; + describeStatement = () => ({ promise: jest.fn(() => ({ Status: 'FINISHED' })) }); +}); +const mockGetSecretValue = jest.fn(() => ({ promise: jest.fn(() => ({ SecretString: JSON.stringify({ password }) })) })); +jest.mock('aws-sdk/clients/secretsmanager', () => class { + getSecretValue = mockGetSecretValue; +}); +import { handler as manageUser } from '../../lib/private/database-query-provider/user'; + +beforeEach(() => { + jest.clearAllMocks(); +}); + +describe('create', () => { + const baseEvent: AWSLambda.CloudFormationCustomResourceCreateEvent = { + RequestType: 'Create', + ...genericEvent, + }; + + test('serializes properties in statement and creates physical resource ID', async () => { + const event = baseEvent; + + await expect(manageUser(resourceProperties, event)).resolves.toEqual({ + PhysicalResourceId: 'clusterName:databaseName:username:requestId', + Data: { + username: username, + }, + }); + expect(mockExecuteStatement).toHaveBeenCalledWith(expect.objectContaining({ + Sql: `CREATE USER username PASSWORD '${password}'`, + })); + }); +}); + +describe('delete', () => { + const baseEvent: AWSLambda.CloudFormationCustomResourceDeleteEvent = { + RequestType: 'Delete', + PhysicalResourceId: physicalResourceId, + ...genericEvent, + }; + + test('executes statement', async () => { + const event = baseEvent; + + await manageUser(resourceProperties, event); + + expect(mockExecuteStatement).toHaveBeenCalledWith(expect.objectContaining({ + Sql: 'DROP USER username', + })); + }); +}); + +describe('update', () => { + const event: AWSLambda.CloudFormationCustomResourceUpdateEvent = { + RequestType: 'Update', + OldResourceProperties: resourceProperties, + PhysicalResourceId: physicalResourceId, + ...genericEvent, + }; + + test('replaces if cluster name changes', async () => { + const newClusterName = 'newClusterName'; + const newResourceProperties = { + ...resourceProperties, + clusterName: newClusterName, + }; + + await expect(manageUser(newResourceProperties, event)).resolves.not.toMatchObject({ + PhysicalResourceId: physicalResourceId, + }); + expect(mockExecuteStatement).toHaveBeenCalledWith(expect.objectContaining({ + ClusterIdentifier: newClusterName, + Sql: expect.stringMatching(/CREATE USER/), + })); + }); + + test('does not replace if admin user ARN changes', async () => { + const newAdminUserArn = 'newAdminUserArn'; + const newResourceProperties = { + ...resourceProperties, + adminUserArn: newAdminUserArn, + }; + + await expect(manageUser(newResourceProperties, event)).resolves.toMatchObject({ + PhysicalResourceId: physicalResourceId, + }); + expect(mockExecuteStatement).not.toHaveBeenCalled(); + }); + + test('replaces if database name changes', async () => { + const newDatabaseName = 'newDatabaseName'; + const newResourceProperties = { + ...resourceProperties, + databaseName: newDatabaseName, + }; + + await expect(manageUser(newResourceProperties, event)).resolves.not.toMatchObject({ + PhysicalResourceId: physicalResourceId, + }); + expect(mockExecuteStatement).toHaveBeenCalledWith(expect.objectContaining({ + Database: newDatabaseName, + Sql: expect.stringMatching(/CREATE USER/), + })); + }); + + test('replaces if user name changes', async () => { + const newUsername = 'newUsername'; + const newResourceProperties = { + ...resourceProperties, + username: newUsername, + }; + + await expect(manageUser(newResourceProperties, event)).resolves.not.toMatchObject({ + PhysicalResourceId: physicalResourceId, + }); + expect(mockExecuteStatement).toHaveBeenCalledWith(expect.objectContaining({ + Sql: expect.stringMatching(new RegExp(`CREATE USER ${newUsername}`)), + })); + }); + + test('does not replace if password changes', async () => { + const newPassword = 'newPassword'; + mockGetSecretValue.mockImplementationOnce(() => ({ promise: jest.fn(() => ({ SecretString: JSON.stringify({ password: newPassword }) })) })); + + await expect(manageUser(resourceProperties, event)).resolves.toMatchObject({ + PhysicalResourceId: physicalResourceId, + }); + expect(mockExecuteStatement).toHaveBeenCalledWith(expect.objectContaining({ + Sql: expect.stringMatching(new RegExp(`ALTER USER ${username} PASSWORD '${password}'`)), + })); + }); +}); diff --git a/packages/@aws-cdk/aws-redshift/test/database-query.test.ts b/packages/@aws-cdk/aws-redshift/test/database-query.test.ts new file mode 100644 index 0000000000000..1b3bfe76d2e3e --- /dev/null +++ b/packages/@aws-cdk/aws-redshift/test/database-query.test.ts @@ -0,0 +1,200 @@ +import { Match, Template } from '@aws-cdk/assertions'; +import * as ec2 from '@aws-cdk/aws-ec2'; +import * as secretsmanager from '@aws-cdk/aws-secretsmanager'; +import * as cdk from '@aws-cdk/core'; +import * as redshift from '../lib'; +import { DatabaseQuery, DatabaseQueryProps } from '../lib/private/database-query'; + +describe('database query', () => { + let stack: cdk.Stack; + let vpc: ec2.Vpc; + let cluster: redshift.ICluster; + let minimalProps: DatabaseQueryProps; + + beforeEach(() => { + stack = new cdk.Stack(); + vpc = new ec2.Vpc(stack, 'VPC'); + cluster = new redshift.Cluster(stack, 'Cluster', { + vpc: vpc, + masterUser: { + masterUsername: 'admin', + }, + }); + minimalProps = { + cluster: cluster, + databaseName: 'databaseName', + handler: 'handler', + properties: {}, + }; + }); + + describe('admin user', () => { + it('takes from cluster by default', () => { + new DatabaseQuery(stack, 'Query', { + ...minimalProps, + }); + + Template.fromStack(stack).hasResourceProperties('Custom::RedshiftDatabaseQuery', { + adminUserArn: { Ref: 'ClusterSecretAttachment769E6258' }, + }); + }); + + it('grants read permission to handler', () => { + new DatabaseQuery(stack, 'Query', { + ...minimalProps, + }); + + Template.fromStack(stack).hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: Match.arrayWith([{ + Action: ['secretsmanager:GetSecretValue', 'secretsmanager:DescribeSecret'], + Effect: 'Allow', + Resource: { Ref: 'ClusterSecretAttachment769E6258' }, + }]), + }, + Roles: [{ Ref: 'QueryRedshiftDatabase3de5bea727da479686625efb56431b5fServiceRole0A90D717' }], + }); + }); + + it('uses admin user if provided', () => { + cluster = new redshift.Cluster(stack, 'Cluster With Provided Admin Secret', { + vpc, + vpcSubnets: { + subnetType: ec2.SubnetType.PUBLIC, + }, + masterUser: { + masterUsername: 'admin', + masterPassword: cdk.SecretValue.plainText('INSECURE_NOT_FOR_PRODUCTION'), + }, + publiclyAccessible: true, + }); + + new DatabaseQuery(stack, 'Query', { + ...minimalProps, + adminUser: secretsmanager.Secret.fromSecretNameV2(stack, 'Imported Admin User', 'imported-admin-secret'), + cluster, + }); + + Template.fromStack(stack).hasResourceProperties('Custom::RedshiftDatabaseQuery', { + adminUserArn: { + 'Fn::Join': [ + '', + [ + 'arn:', + { + Ref: 'AWS::Partition', + }, + ':secretsmanager:', + { + Ref: 'AWS::Region', + }, + ':', + { + Ref: 'AWS::AccountId', + }, + ':secret:imported-admin-secret', + ], + ], + }, + }); + }); + + it('throws error if admin user not provided and cluster was provided a admin password', () => { + cluster = new redshift.Cluster(stack, 'Cluster With Provided Admin Secret', { + vpc, + vpcSubnets: { + subnetType: ec2.SubnetType.PUBLIC, + }, + masterUser: { + masterUsername: 'admin', + masterPassword: cdk.SecretValue.plainText('INSECURE_NOT_FOR_PRODUCTION'), + }, + publiclyAccessible: true, + }); + + expect(() => new DatabaseQuery(stack, 'Query', { + ...minimalProps, + cluster, + })).toThrowError('Administrative access to the Redshift cluster is required but an admin user secret was not provided and the cluster did not generate admin user credentials (they were provided explicitly)'); + }); + + it('throws error if admin user not provided and cluster was imported', () => { + cluster = redshift.Cluster.fromClusterAttributes(stack, 'Imported Cluster', { + clusterName: 'imported-cluster', + clusterEndpointAddress: 'imported-cluster.abcdefghijk.xx-west-1.redshift.amazonaws.com', + clusterEndpointPort: 5439, + }); + + expect(() => new DatabaseQuery(stack, 'Query', { + ...minimalProps, + cluster, + })).toThrowError('Administrative access to the Redshift cluster is required but an admin user secret was not provided and the cluster was imported'); + }); + }); + + it('provides database params to Lambda handler', () => { + new DatabaseQuery(stack, 'Query', { + ...minimalProps, + }); + + Template.fromStack(stack).hasResourceProperties('Custom::RedshiftDatabaseQuery', { + clusterName: { + Ref: 'ClusterEB0386A7', + }, + adminUserArn: { + Ref: 'ClusterSecretAttachment769E6258', + }, + databaseName: 'databaseName', + handler: 'handler', + }); + }); + + it('grants statement permissions to handler', () => { + new DatabaseQuery(stack, 'Query', { + ...minimalProps, + }); + + Template.fromStack(stack).hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: Match.arrayWith([{ + Action: ['redshift-data:DescribeStatement', 'redshift-data:ExecuteStatement'], + Effect: 'Allow', + Resource: '*', + }]), + }, + Roles: [{ Ref: 'QueryRedshiftDatabase3de5bea727da479686625efb56431b5fServiceRole0A90D717' }], + }); + }); + + it('passes removal policy through', () => { + new DatabaseQuery(stack, 'Query', { + ...minimalProps, + removalPolicy: cdk.RemovalPolicy.DESTROY, + }); + + Template.fromStack(stack).hasResource('Custom::RedshiftDatabaseQuery', { + DeletionPolicy: 'Delete', + }); + }); + + it('passes applyRemovalPolicy through', () => { + const query = new DatabaseQuery(stack, 'Query', { + ...minimalProps, + }); + + query.applyRemovalPolicy(cdk.RemovalPolicy.DESTROY); + + Template.fromStack(stack).hasResource('Custom::RedshiftDatabaseQuery', { + DeletionPolicy: 'Delete', + }); + }); + + it('passes gettAtt through', () => { + const query = new DatabaseQuery(stack, 'Query', { + ...minimalProps, + }); + + expect(stack.resolve(query.getAtt('attribute'))).toStrictEqual({ 'Fn::GetAtt': ['Query435140A1', 'attribute'] }); + expect(stack.resolve(query.getAttString('attribute'))).toStrictEqual({ 'Fn::GetAtt': ['Query435140A1', 'attribute'] }); + }); +}); diff --git a/packages/@aws-cdk/aws-redshift/test/integ.database.expected.json b/packages/@aws-cdk/aws-redshift/test/integ.database.expected.json new file mode 100644 index 0000000000000..b346d3e7abfb3 --- /dev/null +++ b/packages/@aws-cdk/aws-redshift/test/integ.database.expected.json @@ -0,0 +1,1377 @@ +{ + "Resources": { + "Vpc8378EB38": { + "Type": "AWS::EC2::VPC", + "Properties": { + "CidrBlock": "10.0.0.0/16", + "EnableDnsHostnames": true, + "EnableDnsSupport": true, + "InstanceTenancy": "default", + "Tags": [ + { + "Key": "Name", + "Value": "aws-cdk-redshift-cluster-database/Vpc" + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPublicSubnet1Subnet5C2D37C4": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "CidrBlock": "10.0.0.0/19", + "VpcId": { + "Ref": "Vpc8378EB38" + }, + "AvailabilityZone": "test-region-1a", + "MapPublicIpOnLaunch": true, + "Tags": [ + { + "Key": "aws-cdk:subnet-name", + "Value": "Public" + }, + { + "Key": "aws-cdk:subnet-type", + "Value": "Public" + }, + { + "Key": "Name", + "Value": "aws-cdk-redshift-cluster-database/Vpc/PublicSubnet1" + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPublicSubnet1RouteTable6C95E38E": { + "Type": "AWS::EC2::RouteTable", + "Properties": { + "VpcId": { + "Ref": "Vpc8378EB38" + }, + "Tags": [ + { + "Key": "Name", + "Value": "aws-cdk-redshift-cluster-database/Vpc/PublicSubnet1" + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPublicSubnet1RouteTableAssociation97140677": { + "Type": "AWS::EC2::SubnetRouteTableAssociation", + "Properties": { + "RouteTableId": { + "Ref": "VpcPublicSubnet1RouteTable6C95E38E" + }, + "SubnetId": { + "Ref": "VpcPublicSubnet1Subnet5C2D37C4" + } + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPublicSubnet1DefaultRoute3DA9E72A": { + "Type": "AWS::EC2::Route", + "Properties": { + "RouteTableId": { + "Ref": "VpcPublicSubnet1RouteTable6C95E38E" + }, + "DestinationCidrBlock": "0.0.0.0/0", + "GatewayId": { + "Ref": "VpcIGWD7BA715C" + } + }, + "DependsOn": [ + "VpcVPCGWBF912B6E" + ], + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPublicSubnet1EIPD7E02669": { + "Type": "AWS::EC2::EIP", + "Properties": { + "Domain": "vpc", + "Tags": [ + { + "Key": "Name", + "Value": "aws-cdk-redshift-cluster-database/Vpc/PublicSubnet1" + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPublicSubnet1NATGateway4D7517AA": { + "Type": "AWS::EC2::NatGateway", + "Properties": { + "SubnetId": { + "Ref": "VpcPublicSubnet1Subnet5C2D37C4" + }, + "AllocationId": { + "Fn::GetAtt": [ + "VpcPublicSubnet1EIPD7E02669", + "AllocationId" + ] + }, + "Tags": [ + { + "Key": "Name", + "Value": "aws-cdk-redshift-cluster-database/Vpc/PublicSubnet1" + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPublicSubnet2Subnet691E08A3": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "CidrBlock": "10.0.32.0/19", + "VpcId": { + "Ref": "Vpc8378EB38" + }, + "AvailabilityZone": "test-region-1b", + "MapPublicIpOnLaunch": true, + "Tags": [ + { + "Key": "aws-cdk:subnet-name", + "Value": "Public" + }, + { + "Key": "aws-cdk:subnet-type", + "Value": "Public" + }, + { + "Key": "Name", + "Value": "aws-cdk-redshift-cluster-database/Vpc/PublicSubnet2" + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPublicSubnet2RouteTable94F7E489": { + "Type": "AWS::EC2::RouteTable", + "Properties": { + "VpcId": { + "Ref": "Vpc8378EB38" + }, + "Tags": [ + { + "Key": "Name", + "Value": "aws-cdk-redshift-cluster-database/Vpc/PublicSubnet2" + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPublicSubnet2RouteTableAssociationDD5762D8": { + "Type": "AWS::EC2::SubnetRouteTableAssociation", + "Properties": { + "RouteTableId": { + "Ref": "VpcPublicSubnet2RouteTable94F7E489" + }, + "SubnetId": { + "Ref": "VpcPublicSubnet2Subnet691E08A3" + } + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPublicSubnet2DefaultRoute97F91067": { + "Type": "AWS::EC2::Route", + "Properties": { + "RouteTableId": { + "Ref": "VpcPublicSubnet2RouteTable94F7E489" + }, + "DestinationCidrBlock": "0.0.0.0/0", + "GatewayId": { + "Ref": "VpcIGWD7BA715C" + } + }, + "DependsOn": [ + "VpcVPCGWBF912B6E" + ], + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPublicSubnet2EIP3C605A87": { + "Type": "AWS::EC2::EIP", + "Properties": { + "Domain": "vpc", + "Tags": [ + { + "Key": "Name", + "Value": "aws-cdk-redshift-cluster-database/Vpc/PublicSubnet2" + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPublicSubnet2NATGateway9182C01D": { + "Type": "AWS::EC2::NatGateway", + "Properties": { + "SubnetId": { + "Ref": "VpcPublicSubnet2Subnet691E08A3" + }, + "AllocationId": { + "Fn::GetAtt": [ + "VpcPublicSubnet2EIP3C605A87", + "AllocationId" + ] + }, + "Tags": [ + { + "Key": "Name", + "Value": "aws-cdk-redshift-cluster-database/Vpc/PublicSubnet2" + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPublicSubnet3SubnetBE12F0B6": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "CidrBlock": "10.0.64.0/19", + "VpcId": { + "Ref": "Vpc8378EB38" + }, + "AvailabilityZone": "test-region-1c", + "MapPublicIpOnLaunch": true, + "Tags": [ + { + "Key": "aws-cdk:subnet-name", + "Value": "Public" + }, + { + "Key": "aws-cdk:subnet-type", + "Value": "Public" + }, + { + "Key": "Name", + "Value": "aws-cdk-redshift-cluster-database/Vpc/PublicSubnet3" + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPublicSubnet3RouteTable93458DBB": { + "Type": "AWS::EC2::RouteTable", + "Properties": { + "VpcId": { + "Ref": "Vpc8378EB38" + }, + "Tags": [ + { + "Key": "Name", + "Value": "aws-cdk-redshift-cluster-database/Vpc/PublicSubnet3" + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPublicSubnet3RouteTableAssociation1F1EDF02": { + "Type": "AWS::EC2::SubnetRouteTableAssociation", + "Properties": { + "RouteTableId": { + "Ref": "VpcPublicSubnet3RouteTable93458DBB" + }, + "SubnetId": { + "Ref": "VpcPublicSubnet3SubnetBE12F0B6" + } + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPublicSubnet3DefaultRoute4697774F": { + "Type": "AWS::EC2::Route", + "Properties": { + "RouteTableId": { + "Ref": "VpcPublicSubnet3RouteTable93458DBB" + }, + "DestinationCidrBlock": "0.0.0.0/0", + "GatewayId": { + "Ref": "VpcIGWD7BA715C" + } + }, + "DependsOn": [ + "VpcVPCGWBF912B6E" + ], + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPublicSubnet3EIP3A666A23": { + "Type": "AWS::EC2::EIP", + "Properties": { + "Domain": "vpc", + "Tags": [ + { + "Key": "Name", + "Value": "aws-cdk-redshift-cluster-database/Vpc/PublicSubnet3" + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPublicSubnet3NATGateway7640CD1D": { + "Type": "AWS::EC2::NatGateway", + "Properties": { + "SubnetId": { + "Ref": "VpcPublicSubnet3SubnetBE12F0B6" + }, + "AllocationId": { + "Fn::GetAtt": [ + "VpcPublicSubnet3EIP3A666A23", + "AllocationId" + ] + }, + "Tags": [ + { + "Key": "Name", + "Value": "aws-cdk-redshift-cluster-database/Vpc/PublicSubnet3" + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPrivateSubnet1Subnet536B997A": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "CidrBlock": "10.0.96.0/19", + "VpcId": { + "Ref": "Vpc8378EB38" + }, + "AvailabilityZone": "test-region-1a", + "MapPublicIpOnLaunch": false, + "Tags": [ + { + "Key": "aws-cdk:subnet-name", + "Value": "Private" + }, + { + "Key": "aws-cdk:subnet-type", + "Value": "Private" + }, + { + "Key": "Name", + "Value": "aws-cdk-redshift-cluster-database/Vpc/PrivateSubnet1" + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPrivateSubnet1RouteTableB2C5B500": { + "Type": "AWS::EC2::RouteTable", + "Properties": { + "VpcId": { + "Ref": "Vpc8378EB38" + }, + "Tags": [ + { + "Key": "Name", + "Value": "aws-cdk-redshift-cluster-database/Vpc/PrivateSubnet1" + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPrivateSubnet1RouteTableAssociation70C59FA6": { + "Type": "AWS::EC2::SubnetRouteTableAssociation", + "Properties": { + "RouteTableId": { + "Ref": "VpcPrivateSubnet1RouteTableB2C5B500" + }, + "SubnetId": { + "Ref": "VpcPrivateSubnet1Subnet536B997A" + } + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPrivateSubnet1DefaultRouteBE02A9ED": { + "Type": "AWS::EC2::Route", + "Properties": { + "RouteTableId": { + "Ref": "VpcPrivateSubnet1RouteTableB2C5B500" + }, + "DestinationCidrBlock": "0.0.0.0/0", + "NatGatewayId": { + "Ref": "VpcPublicSubnet1NATGateway4D7517AA" + } + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPrivateSubnet2Subnet3788AAA1": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "CidrBlock": "10.0.128.0/19", + "VpcId": { + "Ref": "Vpc8378EB38" + }, + "AvailabilityZone": "test-region-1b", + "MapPublicIpOnLaunch": false, + "Tags": [ + { + "Key": "aws-cdk:subnet-name", + "Value": "Private" + }, + { + "Key": "aws-cdk:subnet-type", + "Value": "Private" + }, + { + "Key": "Name", + "Value": "aws-cdk-redshift-cluster-database/Vpc/PrivateSubnet2" + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPrivateSubnet2RouteTableA678073B": { + "Type": "AWS::EC2::RouteTable", + "Properties": { + "VpcId": { + "Ref": "Vpc8378EB38" + }, + "Tags": [ + { + "Key": "Name", + "Value": "aws-cdk-redshift-cluster-database/Vpc/PrivateSubnet2" + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPrivateSubnet2RouteTableAssociationA89CAD56": { + "Type": "AWS::EC2::SubnetRouteTableAssociation", + "Properties": { + "RouteTableId": { + "Ref": "VpcPrivateSubnet2RouteTableA678073B" + }, + "SubnetId": { + "Ref": "VpcPrivateSubnet2Subnet3788AAA1" + } + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPrivateSubnet2DefaultRoute060D2087": { + "Type": "AWS::EC2::Route", + "Properties": { + "RouteTableId": { + "Ref": "VpcPrivateSubnet2RouteTableA678073B" + }, + "DestinationCidrBlock": "0.0.0.0/0", + "NatGatewayId": { + "Ref": "VpcPublicSubnet2NATGateway9182C01D" + } + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPrivateSubnet3SubnetF258B56E": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "CidrBlock": "10.0.160.0/19", + "VpcId": { + "Ref": "Vpc8378EB38" + }, + "AvailabilityZone": "test-region-1c", + "MapPublicIpOnLaunch": false, + "Tags": [ + { + "Key": "aws-cdk:subnet-name", + "Value": "Private" + }, + { + "Key": "aws-cdk:subnet-type", + "Value": "Private" + }, + { + "Key": "Name", + "Value": "aws-cdk-redshift-cluster-database/Vpc/PrivateSubnet3" + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPrivateSubnet3RouteTableD98824C7": { + "Type": "AWS::EC2::RouteTable", + "Properties": { + "VpcId": { + "Ref": "Vpc8378EB38" + }, + "Tags": [ + { + "Key": "Name", + "Value": "aws-cdk-redshift-cluster-database/Vpc/PrivateSubnet3" + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPrivateSubnet3RouteTableAssociation16BDDC43": { + "Type": "AWS::EC2::SubnetRouteTableAssociation", + "Properties": { + "RouteTableId": { + "Ref": "VpcPrivateSubnet3RouteTableD98824C7" + }, + "SubnetId": { + "Ref": "VpcPrivateSubnet3SubnetF258B56E" + } + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPrivateSubnet3DefaultRoute94B74F0D": { + "Type": "AWS::EC2::Route", + "Properties": { + "RouteTableId": { + "Ref": "VpcPrivateSubnet3RouteTableD98824C7" + }, + "DestinationCidrBlock": "0.0.0.0/0", + "NatGatewayId": { + "Ref": "VpcPublicSubnet3NATGateway7640CD1D" + } + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcIGWD7BA715C": { + "Type": "AWS::EC2::InternetGateway", + "Properties": { + "Tags": [ + { + "Key": "Name", + "Value": "aws-cdk-redshift-cluster-database/Vpc" + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcVPCGWBF912B6E": { + "Type": "AWS::EC2::VPCGatewayAttachment", + "Properties": { + "VpcId": { + "Ref": "Vpc8378EB38" + }, + "InternetGatewayId": { + "Ref": "VpcIGWD7BA715C" + } + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "ClusterSubnetsDCFA5CB7": { + "Type": "AWS::Redshift::ClusterSubnetGroup", + "Properties": { + "Description": "Subnets for Cluster Redshift cluster", + "SubnetIds": [ + { + "Ref": "VpcPublicSubnet1Subnet5C2D37C4" + }, + { + "Ref": "VpcPublicSubnet2Subnet691E08A3" + }, + { + "Ref": "VpcPublicSubnet3SubnetBE12F0B6" + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "ClusterSecurityGroup0921994B": { + "Type": "AWS::EC2::SecurityGroup", + "Properties": { + "GroupDescription": "Redshift security group", + "SecurityGroupEgress": [ + { + "CidrIp": "0.0.0.0/0", + "Description": "Allow all outbound traffic by default", + "IpProtocol": "-1" + } + ], + "VpcId": { + "Ref": "Vpc8378EB38" + } + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "ClusterSecret6368BD0F": { + "Type": "AWS::SecretsManager::Secret", + "Properties": { + "GenerateSecretString": { + "ExcludeCharacters": "\"@/\\ '", + "GenerateStringKey": "password", + "PasswordLength": 30, + "SecretStringTemplate": "{\"username\":\"admin\"}" + } + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "ClusterSecretAttachment769E6258": { + "Type": "AWS::SecretsManager::SecretTargetAttachment", + "Properties": { + "SecretId": { + "Ref": "ClusterSecret6368BD0F" + }, + "TargetId": { + "Ref": "ClusterEB0386A7" + }, + "TargetType": "AWS::Redshift::Cluster" + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "ClusterEB0386A7": { + "Type": "AWS::Redshift::Cluster", + "Properties": { + "ClusterType": "multi-node", + "DBName": "my_db", + "MasterUsername": { + "Fn::Join": [ + "", + [ + "{{resolve:secretsmanager:", + { + "Ref": "ClusterSecret6368BD0F" + }, + ":SecretString:username::}}" + ] + ] + }, + "MasterUserPassword": { + "Fn::Join": [ + "", + [ + "{{resolve:secretsmanager:", + { + "Ref": "ClusterSecret6368BD0F" + }, + ":SecretString:password::}}" + ] + ] + }, + "NodeType": "dc2.large", + "AllowVersionUpgrade": true, + "AutomatedSnapshotRetentionPeriod": 1, + "ClusterSubnetGroupName": { + "Ref": "ClusterSubnetsDCFA5CB7" + }, + "Encrypted": true, + "NumberOfNodes": 2, + "PubliclyAccessible": true, + "VpcSecurityGroupIds": [ + { + "Fn::GetAtt": [ + "ClusterSecurityGroup0921994B", + "GroupId" + ] + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "UserSecretE2C04A69": { + "Type": "AWS::SecretsManager::Secret", + "Properties": { + "GenerateSecretString": { + "ExcludeCharacters": "\"@/\\ '", + "GenerateStringKey": "password", + "PasswordLength": 30, + "SecretStringTemplate": "{\"username\":\"awscdkredshiftclusterdatabaseuserc17d5ebd\"}" + } + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "UserSecretAttachment02022609": { + "Type": "AWS::SecretsManager::SecretTargetAttachment", + "Properties": { + "SecretId": { + "Ref": "UserSecretE2C04A69" + }, + "TargetId": { + "Ref": "ClusterEB0386A7" + }, + "TargetType": "AWS::Redshift::Cluster" + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "UserProviderframeworkonEventServiceRole8FBA2FBD": { + "Type": "AWS::IAM::Role", + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "lambda.amazonaws.com" + } + } + ], + "Version": "2012-10-17" + }, + "ManagedPolicyArns": [ + { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + ] + ] + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "UserProviderframeworkonEventServiceRoleDefaultPolicy9A9E044F": { + "Type": "AWS::IAM::Policy", + "Properties": { + "PolicyDocument": { + "Statement": [ + { + "Action": "lambda:InvokeFunction", + "Effect": "Allow", + "Resource": { + "Fn::GetAtt": [ + "QueryRedshiftDatabase3de5bea727da479686625efb56431b5f3DF81997", + "Arn" + ] + } + } + ], + "Version": "2012-10-17" + }, + "PolicyName": "UserProviderframeworkonEventServiceRoleDefaultPolicy9A9E044F", + "Roles": [ + { + "Ref": "UserProviderframeworkonEventServiceRole8FBA2FBD" + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "UserProviderframeworkonEvent4EC32885": { + "Type": "AWS::Lambda::Function", + "Properties": { + "Code": { + "S3Bucket": { + "Ref": "AssetParametersdaeb79e3cee39c9b902dc0d5c780223e227ed573ea60976252947adab5fb2be1S3BucketDC4B98B1" + }, + "S3Key": { + "Fn::Join": [ + "", + [ + { + "Fn::Select": [ + 0, + { + "Fn::Split": [ + "||", + { + "Ref": "AssetParametersdaeb79e3cee39c9b902dc0d5c780223e227ed573ea60976252947adab5fb2be1S3VersionKeyA495226F" + } + ] + } + ] + }, + { + "Fn::Select": [ + 1, + { + "Fn::Split": [ + "||", + { + "Ref": "AssetParametersdaeb79e3cee39c9b902dc0d5c780223e227ed573ea60976252947adab5fb2be1S3VersionKeyA495226F" + } + ] + } + ] + } + ] + ] + } + }, + "Role": { + "Fn::GetAtt": [ + "UserProviderframeworkonEventServiceRole8FBA2FBD", + "Arn" + ] + }, + "Description": "AWS CDK resource provider framework - onEvent (aws-cdk-redshift-cluster-database/User/Resource/Provider)", + "Environment": { + "Variables": { + "USER_ON_EVENT_FUNCTION_ARN": { + "Fn::GetAtt": [ + "QueryRedshiftDatabase3de5bea727da479686625efb56431b5f3DF81997", + "Arn" + ] + } + } + }, + "Handler": "framework.onEvent", + "Runtime": "nodejs14.x", + "Timeout": 900 + }, + "DependsOn": [ + "UserProviderframeworkonEventServiceRoleDefaultPolicy9A9E044F", + "UserProviderframeworkonEventServiceRole8FBA2FBD" + ], + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "UserFDDCDD17": { + "Type": "Custom::RedshiftDatabaseQuery", + "Properties": { + "ServiceToken": { + "Fn::GetAtt": [ + "UserProviderframeworkonEvent4EC32885", + "Arn" + ] + }, + "handler": "user", + "clusterName": { + "Ref": "ClusterEB0386A7" + }, + "adminUserArn": { + "Ref": "ClusterSecretAttachment769E6258" + }, + "databaseName": "my_db", + "username": "awscdkredshiftclusterdatabaseuserc17d5ebd", + "passwordSecretArn": { + "Ref": "UserSecretAttachment02022609" + } + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "UserTablePrivilegesProviderframeworkonEventServiceRole56BAEC9A": { + "Type": "AWS::IAM::Role", + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "lambda.amazonaws.com" + } + } + ], + "Version": "2012-10-17" + }, + "ManagedPolicyArns": [ + { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + ] + ] + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "UserTablePrivilegesProviderframeworkonEventServiceRoleDefaultPolicy3B6EF50C": { + "Type": "AWS::IAM::Policy", + "Properties": { + "PolicyDocument": { + "Statement": [ + { + "Action": "lambda:InvokeFunction", + "Effect": "Allow", + "Resource": { + "Fn::GetAtt": [ + "QueryRedshiftDatabase3de5bea727da479686625efb56431b5f3DF81997", + "Arn" + ] + } + } + ], + "Version": "2012-10-17" + }, + "PolicyName": "UserTablePrivilegesProviderframeworkonEventServiceRoleDefaultPolicy3B6EF50C", + "Roles": [ + { + "Ref": "UserTablePrivilegesProviderframeworkonEventServiceRole56BAEC9A" + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "UserTablePrivilegesProviderframeworkonEvent3F5C1851": { + "Type": "AWS::Lambda::Function", + "Properties": { + "Code": { + "S3Bucket": { + "Ref": "AssetParametersdaeb79e3cee39c9b902dc0d5c780223e227ed573ea60976252947adab5fb2be1S3BucketDC4B98B1" + }, + "S3Key": { + "Fn::Join": [ + "", + [ + { + "Fn::Select": [ + 0, + { + "Fn::Split": [ + "||", + { + "Ref": "AssetParametersdaeb79e3cee39c9b902dc0d5c780223e227ed573ea60976252947adab5fb2be1S3VersionKeyA495226F" + } + ] + } + ] + }, + { + "Fn::Select": [ + 1, + { + "Fn::Split": [ + "||", + { + "Ref": "AssetParametersdaeb79e3cee39c9b902dc0d5c780223e227ed573ea60976252947adab5fb2be1S3VersionKeyA495226F" + } + ] + } + ] + } + ] + ] + } + }, + "Role": { + "Fn::GetAtt": [ + "UserTablePrivilegesProviderframeworkonEventServiceRole56BAEC9A", + "Arn" + ] + }, + "Description": "AWS CDK resource provider framework - onEvent (aws-cdk-redshift-cluster-database/User/TablePrivileges/Resource/Provider)", + "Environment": { + "Variables": { + "USER_ON_EVENT_FUNCTION_ARN": { + "Fn::GetAtt": [ + "QueryRedshiftDatabase3de5bea727da479686625efb56431b5f3DF81997", + "Arn" + ] + } + } + }, + "Handler": "framework.onEvent", + "Runtime": "nodejs14.x", + "Timeout": 900 + }, + "DependsOn": [ + "UserTablePrivilegesProviderframeworkonEventServiceRoleDefaultPolicy3B6EF50C", + "UserTablePrivilegesProviderframeworkonEventServiceRole56BAEC9A" + ], + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "UserTablePrivileges3829D614": { + "Type": "Custom::RedshiftDatabaseQuery", + "Properties": { + "ServiceToken": { + "Fn::GetAtt": [ + "UserTablePrivilegesProviderframeworkonEvent3F5C1851", + "Arn" + ] + }, + "handler": "user-table-privileges", + "clusterName": { + "Ref": "ClusterEB0386A7" + }, + "adminUserArn": { + "Ref": "ClusterSecretAttachment769E6258" + }, + "databaseName": "my_db", + "username": { + "Fn::GetAtt": [ + "UserFDDCDD17", + "username" + ] + }, + "tablePrivileges": [ + { + "tableName": { + "Ref": "Table7ABB320E" + }, + "actions": [ + "INSERT", + "DELETE", + "SELECT" + ] + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "QueryRedshiftDatabase3de5bea727da479686625efb56431b5fServiceRole0A90D717": { + "Type": "AWS::IAM::Role", + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "lambda.amazonaws.com" + } + } + ], + "Version": "2012-10-17" + }, + "ManagedPolicyArns": [ + { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + ] + ] + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "QueryRedshiftDatabase3de5bea727da479686625efb56431b5fServiceRoleDefaultPolicyDDD1388D": { + "Type": "AWS::IAM::Policy", + "Properties": { + "PolicyDocument": { + "Statement": [ + { + "Action": [ + "redshift-data:DescribeStatement", + "redshift-data:ExecuteStatement" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "secretsmanager:GetSecretValue", + "secretsmanager:DescribeSecret" + ], + "Effect": "Allow", + "Resource": { + "Ref": "ClusterSecretAttachment769E6258" + } + }, + { + "Action": [ + "secretsmanager:GetSecretValue", + "secretsmanager:DescribeSecret" + ], + "Effect": "Allow", + "Resource": { + "Ref": "UserSecretAttachment02022609" + } + } + ], + "Version": "2012-10-17" + }, + "PolicyName": "QueryRedshiftDatabase3de5bea727da479686625efb56431b5fServiceRoleDefaultPolicyDDD1388D", + "Roles": [ + { + "Ref": "QueryRedshiftDatabase3de5bea727da479686625efb56431b5fServiceRole0A90D717" + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "QueryRedshiftDatabase3de5bea727da479686625efb56431b5f3DF81997": { + "Type": "AWS::Lambda::Function", + "Properties": { + "Code": { + "S3Bucket": { + "Ref": "AssetParameters483841e46ab98aa099d0371a7800e2ace3ddbbb12cb8efb3162ca172ebdafd49S3Bucket148631C8" + }, + "S3Key": { + "Fn::Join": [ + "", + [ + { + "Fn::Select": [ + 0, + { + "Fn::Split": [ + "||", + { + "Ref": "AssetParameters483841e46ab98aa099d0371a7800e2ace3ddbbb12cb8efb3162ca172ebdafd49S3VersionKey1A4E04E7" + } + ] + } + ] + }, + { + "Fn::Select": [ + 1, + { + "Fn::Split": [ + "||", + { + "Ref": "AssetParameters483841e46ab98aa099d0371a7800e2ace3ddbbb12cb8efb3162ca172ebdafd49S3VersionKey1A4E04E7" + } + ] + } + ] + } + ] + ] + } + }, + "Role": { + "Fn::GetAtt": [ + "QueryRedshiftDatabase3de5bea727da479686625efb56431b5fServiceRole0A90D717", + "Arn" + ] + }, + "Handler": "index.handler", + "Runtime": "nodejs14.x", + "Timeout": 60 + }, + "DependsOn": [ + "QueryRedshiftDatabase3de5bea727da479686625efb56431b5fServiceRoleDefaultPolicyDDD1388D", + "QueryRedshiftDatabase3de5bea727da479686625efb56431b5fServiceRole0A90D717" + ], + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "TableProviderframeworkonEventServiceRoleC3128F67": { + "Type": "AWS::IAM::Role", + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "lambda.amazonaws.com" + } + } + ], + "Version": "2012-10-17" + }, + "ManagedPolicyArns": [ + { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + ] + ] + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "TableProviderframeworkonEventServiceRoleDefaultPolicyAD08715D": { + "Type": "AWS::IAM::Policy", + "Properties": { + "PolicyDocument": { + "Statement": [ + { + "Action": "lambda:InvokeFunction", + "Effect": "Allow", + "Resource": { + "Fn::GetAtt": [ + "QueryRedshiftDatabase3de5bea727da479686625efb56431b5f3DF81997", + "Arn" + ] + } + } + ], + "Version": "2012-10-17" + }, + "PolicyName": "TableProviderframeworkonEventServiceRoleDefaultPolicyAD08715D", + "Roles": [ + { + "Ref": "TableProviderframeworkonEventServiceRoleC3128F67" + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "TableProviderframeworkonEvent97F3951A": { + "Type": "AWS::Lambda::Function", + "Properties": { + "Code": { + "S3Bucket": { + "Ref": "AssetParametersdaeb79e3cee39c9b902dc0d5c780223e227ed573ea60976252947adab5fb2be1S3BucketDC4B98B1" + }, + "S3Key": { + "Fn::Join": [ + "", + [ + { + "Fn::Select": [ + 0, + { + "Fn::Split": [ + "||", + { + "Ref": "AssetParametersdaeb79e3cee39c9b902dc0d5c780223e227ed573ea60976252947adab5fb2be1S3VersionKeyA495226F" + } + ] + } + ] + }, + { + "Fn::Select": [ + 1, + { + "Fn::Split": [ + "||", + { + "Ref": "AssetParametersdaeb79e3cee39c9b902dc0d5c780223e227ed573ea60976252947adab5fb2be1S3VersionKeyA495226F" + } + ] + } + ] + } + ] + ] + } + }, + "Role": { + "Fn::GetAtt": [ + "TableProviderframeworkonEventServiceRoleC3128F67", + "Arn" + ] + }, + "Description": "AWS CDK resource provider framework - onEvent (aws-cdk-redshift-cluster-database/Table/Resource/Provider)", + "Environment": { + "Variables": { + "USER_ON_EVENT_FUNCTION_ARN": { + "Fn::GetAtt": [ + "QueryRedshiftDatabase3de5bea727da479686625efb56431b5f3DF81997", + "Arn" + ] + } + } + }, + "Handler": "framework.onEvent", + "Runtime": "nodejs14.x", + "Timeout": 900 + }, + "DependsOn": [ + "TableProviderframeworkonEventServiceRoleDefaultPolicyAD08715D", + "TableProviderframeworkonEventServiceRoleC3128F67" + ], + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "Table7ABB320E": { + "Type": "Custom::RedshiftDatabaseQuery", + "Properties": { + "ServiceToken": { + "Fn::GetAtt": [ + "TableProviderframeworkonEvent97F3951A", + "Arn" + ] + }, + "handler": "table", + "clusterName": { + "Ref": "ClusterEB0386A7" + }, + "adminUserArn": { + "Ref": "ClusterSecretAttachment769E6258" + }, + "databaseName": "my_db", + "tableName": { + "prefix": "awscdkredshiftclusterdatabaseTable24923533", + "generateSuffix": true + }, + "tableColumns": [ + { + "name": "col1", + "dataType": "varchar(4)" + }, + { + "name": "col2", + "dataType": "float" + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + } + }, + "Parameters": { + "AssetParameters483841e46ab98aa099d0371a7800e2ace3ddbbb12cb8efb3162ca172ebdafd49S3Bucket148631C8": { + "Type": "String", + "Description": "S3 bucket for asset \"483841e46ab98aa099d0371a7800e2ace3ddbbb12cb8efb3162ca172ebdafd49\"" + }, + "AssetParameters483841e46ab98aa099d0371a7800e2ace3ddbbb12cb8efb3162ca172ebdafd49S3VersionKey1A4E04E7": { + "Type": "String", + "Description": "S3 key for asset version \"483841e46ab98aa099d0371a7800e2ace3ddbbb12cb8efb3162ca172ebdafd49\"" + }, + "AssetParameters483841e46ab98aa099d0371a7800e2ace3ddbbb12cb8efb3162ca172ebdafd49ArtifactHashEB952795": { + "Type": "String", + "Description": "Artifact hash for asset \"483841e46ab98aa099d0371a7800e2ace3ddbbb12cb8efb3162ca172ebdafd49\"" + }, + "AssetParametersdaeb79e3cee39c9b902dc0d5c780223e227ed573ea60976252947adab5fb2be1S3BucketDC4B98B1": { + "Type": "String", + "Description": "S3 bucket for asset \"daeb79e3cee39c9b902dc0d5c780223e227ed573ea60976252947adab5fb2be1\"" + }, + "AssetParametersdaeb79e3cee39c9b902dc0d5c780223e227ed573ea60976252947adab5fb2be1S3VersionKeyA495226F": { + "Type": "String", + "Description": "S3 key for asset version \"daeb79e3cee39c9b902dc0d5c780223e227ed573ea60976252947adab5fb2be1\"" + }, + "AssetParametersdaeb79e3cee39c9b902dc0d5c780223e227ed573ea60976252947adab5fb2be1ArtifactHashA521A16F": { + "Type": "String", + "Description": "Artifact hash for asset \"daeb79e3cee39c9b902dc0d5c780223e227ed573ea60976252947adab5fb2be1\"" + } + } +} \ No newline at end of file diff --git a/packages/@aws-cdk/aws-redshift/test/integ.database.ts b/packages/@aws-cdk/aws-redshift/test/integ.database.ts new file mode 100644 index 0000000000000..3a3b955a2b5aa --- /dev/null +++ b/packages/@aws-cdk/aws-redshift/test/integ.database.ts @@ -0,0 +1,44 @@ +#!/usr/bin/env node +/// !cdk-integ pragma:ignore-assets +import * as ec2 from '@aws-cdk/aws-ec2'; +import * as cdk from '@aws-cdk/core'; +import * as constructs from 'constructs'; +import * as redshift from '../lib'; + +const app = new cdk.App(); + +const stack = new cdk.Stack(app, 'aws-cdk-redshift-cluster-database'); +cdk.Aspects.of(stack).add({ + visit(node: constructs.IConstruct) { + if (cdk.CfnResource.isCfnResource(node)) { + node.applyRemovalPolicy(cdk.RemovalPolicy.DESTROY); + } + }, +}); + +const vpc = new ec2.Vpc(stack, 'Vpc'); +const databaseName = 'my_db'; +const cluster = new redshift.Cluster(stack, 'Cluster', { + vpc: vpc, + vpcSubnets: { + subnetType: ec2.SubnetType.PUBLIC, + }, + masterUser: { + masterUsername: 'admin', + }, + defaultDatabaseName: databaseName, + publiclyAccessible: true, +}); + +const databaseOptions = { + cluster: cluster, + databaseName: databaseName, +}; +const user = new redshift.User(stack, 'User', databaseOptions); +const table = new redshift.Table(stack, 'Table', { + ...databaseOptions, + tableColumns: [{ name: 'col1', dataType: 'varchar(4)' }, { name: 'col2', dataType: 'float' }], +}); +table.grant(user, redshift.TableAction.INSERT, redshift.TableAction.DELETE); + +app.synth(); diff --git a/packages/@aws-cdk/aws-redshift/test/privileges.test.ts b/packages/@aws-cdk/aws-redshift/test/privileges.test.ts new file mode 100644 index 0000000000000..91419b2eaa709 --- /dev/null +++ b/packages/@aws-cdk/aws-redshift/test/privileges.test.ts @@ -0,0 +1,113 @@ +import { Template } from '@aws-cdk/assertions'; +import * as ec2 from '@aws-cdk/aws-ec2'; +import * as cdk from '@aws-cdk/core'; +import * as redshift from '../lib'; + +describe('table privileges', () => { + let stack: cdk.Stack; + let vpc: ec2.Vpc; + let cluster: redshift.ICluster; + const databaseName = 'databaseName'; + let databaseOptions: redshift.DatabaseOptions; + const tableColumns = [{ name: 'col1', dataType: 'varchar(4)' }, { name: 'col2', dataType: 'float' }]; + let table: redshift.ITable; + let table2: redshift.ITable; + + beforeEach(() => { + stack = new cdk.Stack(); + vpc = new ec2.Vpc(stack, 'VPC'); + cluster = new redshift.Cluster(stack, 'Cluster', { + vpc: vpc, + vpcSubnets: { + subnetType: ec2.SubnetType.PUBLIC, + }, + masterUser: { + masterUsername: 'admin', + }, + publiclyAccessible: true, + }); + databaseOptions = { + cluster, + databaseName, + }; + table = redshift.Table.fromTableAttributes(stack, 'Table', { + tableName: 'tableName', + tableColumns, + cluster, + databaseName, + }); + table2 = redshift.Table.fromTableAttributes(stack, 'Table 2', { + tableName: 'tableName2', + tableColumns, + cluster, + databaseName, + }); + }); + + it('adding table privilege creates custom resource', () => { + const user = new redshift.User(stack, 'User', databaseOptions); + + user.addTablePrivileges(table, redshift.TableAction.INSERT); + user.addTablePrivileges(table2, redshift.TableAction.SELECT, redshift.TableAction.DROP); + + Template.fromStack(stack).hasResourceProperties('Custom::RedshiftDatabaseQuery', { + username: { + 'Fn::GetAtt': [ + 'UserFDDCDD17', + 'username', + ], + }, + tablePrivileges: [{ tableName: 'tableName', actions: ['INSERT'] }, { tableName: 'tableName2', actions: ['SELECT', 'DROP'] }], + }); + }); + + it('table privileges are deduplicated', () => { + const user = new redshift.User(stack, 'User', databaseOptions); + + user.addTablePrivileges(table, redshift.TableAction.INSERT, redshift.TableAction.INSERT, redshift.TableAction.DELETE); + user.addTablePrivileges(table, redshift.TableAction.SELECT, redshift.TableAction.DELETE); + + Template.fromStack(stack).hasResourceProperties('Custom::RedshiftDatabaseQuery', { + username: { + 'Fn::GetAtt': [ + 'UserFDDCDD17', + 'username', + ], + }, + tablePrivileges: [{ tableName: 'tableName', actions: ['SELECT', 'DELETE', 'INSERT'] }], + }); + }); + + it('table privileges are removed when ALL specified', () => { + const user = new redshift.User(stack, 'User', databaseOptions); + + user.addTablePrivileges(table, redshift.TableAction.ALL, redshift.TableAction.INSERT); + + Template.fromStack(stack).hasResourceProperties('Custom::RedshiftDatabaseQuery', { + username: { + 'Fn::GetAtt': [ + 'UserFDDCDD17', + 'username', + ], + }, + tablePrivileges: [{ tableName: 'tableName', actions: ['ALL'] }], + }); + }); + + it('SELECT table privilege is added when UPDATE or DELETE is specified', () => { + const user = new redshift.User(stack, 'User', databaseOptions); + + user.addTablePrivileges(table, redshift.TableAction.UPDATE); + user.addTablePrivileges(table2, redshift.TableAction.DELETE); + + Template.fromStack(stack).hasResourceProperties('Custom::RedshiftDatabaseQuery', { + username: { + 'Fn::GetAtt': [ + 'UserFDDCDD17', + 'username', + ], + }, + tablePrivileges: [{ tableName: 'tableName', actions: ['UPDATE', 'SELECT'] }, { tableName: 'tableName2', actions: ['DELETE', 'SELECT'] }], + }); + }); +}); diff --git a/packages/@aws-cdk/aws-redshift/test/table.test.ts b/packages/@aws-cdk/aws-redshift/test/table.test.ts new file mode 100644 index 0000000000000..97f66b57042f5 --- /dev/null +++ b/packages/@aws-cdk/aws-redshift/test/table.test.ts @@ -0,0 +1,138 @@ +import { Template } from '@aws-cdk/assertions'; +import * as ec2 from '@aws-cdk/aws-ec2'; +import * as cdk from '@aws-cdk/core'; +import * as redshift from '../lib'; + +describe('cluster table', () => { + const tableName = 'tableName'; + const tableColumns = [{ name: 'col1', dataType: 'varchar(4)' }, { name: 'col2', dataType: 'float' }]; + + let stack: cdk.Stack; + let vpc: ec2.Vpc; + let cluster: redshift.ICluster; + let databaseOptions: redshift.DatabaseOptions; + + beforeEach(() => { + stack = new cdk.Stack(); + vpc = new ec2.Vpc(stack, 'VPC'); + cluster = new redshift.Cluster(stack, 'Cluster', { + vpc: vpc, + vpcSubnets: { + subnetType: ec2.SubnetType.PUBLIC, + }, + masterUser: { + masterUsername: 'admin', + }, + publiclyAccessible: true, + }); + databaseOptions = { + cluster: cluster, + databaseName: 'databaseName', + }; + }); + + it('creates using custom resource', () => { + new redshift.Table(stack, 'Table', { + ...databaseOptions, + tableColumns, + }); + + Template.fromStack(stack).hasResourceProperties('Custom::RedshiftDatabaseQuery', { + tableName: { + prefix: 'Table', + generateSuffix: true, + }, + tableColumns, + }); + }); + + it('tableName property is pulled from custom resource', () => { + const table = new redshift.Table(stack, 'Table', { + ...databaseOptions, + tableColumns, + }); + + expect(stack.resolve(table.tableName)).toStrictEqual({ + Ref: 'Table7ABB320E', + }); + }); + + it('uses table name when provided', () => { + new redshift.Table(stack, 'Table', { + ...databaseOptions, + tableName, + tableColumns, + }); + + Template.fromStack(stack).hasResourceProperties('Custom::RedshiftDatabaseQuery', { + tableName: { + prefix: tableName, + generateSuffix: false, + }, + }); + }); + + it('can import from name and columns', () => { + const table = redshift.Table.fromTableAttributes(stack, 'Table', { + tableName, + tableColumns, + cluster, + databaseName: 'databaseName', + }); + + expect(table.tableName).toBe(tableName); + expect(table.tableColumns).toBe(tableColumns); + expect(table.cluster).toBe(cluster); + expect(table.databaseName).toBe('databaseName'); + }); + + it('grant adds privileges to user', () => { + const user = redshift.User.fromUserAttributes(stack, 'User', { + ...databaseOptions, + username: 'username', + password: cdk.SecretValue.plainText('INSECURE_NOT_FOR_PRODUCTION'), + }); + const table = redshift.Table.fromTableAttributes(stack, 'Table', { + tableName, + tableColumns, + cluster, + databaseName: 'databaseName', + }); + + table.grant(user, redshift.TableAction.INSERT); + + Template.fromStack(stack).hasResourceProperties('Custom::RedshiftDatabaseQuery', { + handler: 'user-table-privileges', + }); + }); + + it('retains table on deletion by default', () => { + new redshift.Table(stack, 'Table', { + ...databaseOptions, + tableColumns, + }); + + Template.fromStack(stack).hasResource('Custom::RedshiftDatabaseQuery', { + Properties: { + handler: 'table', + }, + DeletionPolicy: 'Retain', + }); + }); + + it('destroys table on deletion if requested', () => { + const table = new redshift.Table(stack, 'Table', { + ...databaseOptions, + tableColumns, + }); + + table.applyRemovalPolicy(cdk.RemovalPolicy.DESTROY); + + Template.fromStack(stack).hasResource('Custom::RedshiftDatabaseQuery', { + Properties: { + handler: 'table', + }, + DeletionPolicy: 'Delete', + }); + }); +}); diff --git a/packages/@aws-cdk/aws-redshift/test/user.test.ts b/packages/@aws-cdk/aws-redshift/test/user.test.ts new file mode 100644 index 0000000000000..24b9bc748cc8f --- /dev/null +++ b/packages/@aws-cdk/aws-redshift/test/user.test.ts @@ -0,0 +1,215 @@ +import { Match, Template } from '@aws-cdk/assertions'; +import * as ec2 from '@aws-cdk/aws-ec2'; +import * as kms from '@aws-cdk/aws-kms'; +import * as secretsmanager from '@aws-cdk/aws-secretsmanager'; +import * as cdk from '@aws-cdk/core'; +import * as redshift from '../lib'; + +describe('cluster user', () => { + let stack: cdk.Stack; + let vpc: ec2.Vpc; + let cluster: redshift.ICluster; + const databaseName = 'databaseName'; + let databaseOptions: redshift.DatabaseOptions; + + beforeEach(() => { + stack = new cdk.Stack(); + vpc = new ec2.Vpc(stack, 'VPC'); + cluster = new redshift.Cluster(stack, 'Cluster', { + vpc: vpc, + vpcSubnets: { + subnetType: ec2.SubnetType.PUBLIC, + }, + masterUser: { + masterUsername: 'admin', + }, + publiclyAccessible: true, + }); + databaseOptions = { + cluster, + databaseName, + }; + }); + + it('creates using custom resource', () => { + new redshift.User(stack, 'User', databaseOptions); + + Template.fromStack(stack).hasResourceProperties('Custom::RedshiftDatabaseQuery', { + passwordSecretArn: { Ref: 'UserSecretAttachment02022609' }, + }); + Template.fromStack(stack).hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: Match.arrayWith([{ + Action: ['secretsmanager:GetSecretValue', 'secretsmanager:DescribeSecret'], + Effect: 'Allow', + Resource: { Ref: 'UserSecretAttachment02022609' }, + }]), + }, + Roles: [{ Ref: 'QueryRedshiftDatabase3de5bea727da479686625efb56431b5fServiceRole0A90D717' }], + }); + }); + + it('creates database secret', () => { + const user = new redshift.User(stack, 'User', databaseOptions); + + Template.fromStack(stack).hasResourceProperties('AWS::SecretsManager::Secret', { + GenerateSecretString: { + SecretStringTemplate: `{"username":"${cdk.Names.uniqueId(user).toLowerCase()}"}`, + }, + }); + Template.fromStack(stack).hasResourceProperties('AWS::SecretsManager::SecretTargetAttachment', { + SecretId: { Ref: 'UserSecretE2C04A69' }, + }); + }); + + it('username property is pulled from custom resource', () => { + const user = new redshift.User(stack, 'User', databaseOptions); + + expect(stack.resolve(user.username)).toStrictEqual({ + 'Fn::GetAtt': [ + 'UserFDDCDD17', + 'username', + ], + }); + }); + + it('password property is pulled from attached secret', () => { + const user = new redshift.User(stack, 'User', databaseOptions); + + expect(stack.resolve(user.password)).toStrictEqual({ + 'Fn::Join': [ + '', + [ + '{{resolve:secretsmanager:', + { + Ref: 'UserSecretAttachment02022609', + }, + ':SecretString:password::}}', + ], + ], + }); + }); + + it('uses username when provided', () => { + const username = 'username'; + + new redshift.User(stack, 'User', { + ...databaseOptions, + username, + }); + + Template.fromStack(stack).hasResourceProperties('AWS::SecretsManager::Secret', { + GenerateSecretString: { + SecretStringTemplate: `{"username":"${username}"}`, + }, + }); + }); + + it('can import from username and password', () => { + const userSecret = secretsmanager.Secret.fromSecretNameV2(stack, 'User Secret', 'redshift-user-secret'); + + const user = redshift.User.fromUserAttributes(stack, 'User', { + ...databaseOptions, + username: userSecret.secretValueFromJson('username').toString(), + password: userSecret.secretValueFromJson('password'), + }); + + expect(stack.resolve(user.username)).toStrictEqual({ + 'Fn::Join': [ + '', + [ + '{{resolve:secretsmanager:arn:', + { + Ref: 'AWS::Partition', + }, + ':secretsmanager:', + { + Ref: 'AWS::Region', + }, + ':', + { + Ref: 'AWS::AccountId', + }, + ':secret:redshift-user-secret:SecretString:username::}}', + ], + ], + }); + expect(stack.resolve(user.password)).toStrictEqual({ + 'Fn::Join': [ + '', + [ + '{{resolve:secretsmanager:arn:', + { + Ref: 'AWS::Partition', + }, + ':secretsmanager:', + { + Ref: 'AWS::Region', + }, + ':', + { + Ref: 'AWS::AccountId', + }, + ':secret:redshift-user-secret:SecretString:password::}}', + ], + ], + }); + }); + + it('destroys user on deletion by default', () => { + new redshift.User(stack, 'User', databaseOptions); + + Template.fromStack(stack).hasResource('Custom::RedshiftDatabaseQuery', { + Properties: { + passwordSecretArn: { Ref: 'UserSecretAttachment02022609' }, + }, + DeletionPolicy: 'Delete', + }); + }); + + it('retains user on deletion if requested', () => { + const user = new redshift.User(stack, 'User', databaseOptions); + + user.applyRemovalPolicy(cdk.RemovalPolicy.RETAIN); + + Template.fromStack(stack).hasResource('Custom::RedshiftDatabaseQuery', { + Properties: { + passwordSecretArn: { Ref: 'UserSecretAttachment02022609' }, + }, + DeletionPolicy: 'Retain', + }); + }); + + it('uses encryption key if one is provided', () => { + const encryptionKey = new kms.Key(stack, 'Key'); + + new redshift.User(stack, 'User', { + ...databaseOptions, + encryptionKey, + }); + + Template.fromStack(stack).hasResourceProperties('AWS::SecretsManager::Secret', { + KmsKeyId: stack.resolve(encryptionKey.keyArn), + }); + }); + + it('addTablePrivileges grants access to table', () => { + const user = redshift.User.fromUserAttributes(stack, 'User', { + ...databaseOptions, + username: 'username', + password: cdk.SecretValue.plainText('INSECURE_NOT_FOR_PRODUCTION'), + }); + const table = redshift.Table.fromTableAttributes(stack, 'Table', { + tableName: 'tableName', + tableColumns: [{ name: 'col1', dataType: 'varchar(4)' }, { name: 'col2', dataType: 'float' }], + cluster, + databaseName: 'databaseName', + }); + + user.addTablePrivileges(table, redshift.TableAction.INSERT); + + Template.fromStack(stack).hasResourceProperties('Custom::RedshiftDatabaseQuery', { + handler: 'user-table-privileges', + }); + }); +});