diff --git a/packages/@aws-cdk/assertions/README.md b/packages/@aws-cdk/assertions/README.md index 1fc0bb28e0cd3..d2763d0ba24ef 100644 --- a/packages/@aws-cdk/assertions/README.md +++ b/packages/@aws-cdk/assertions/README.md @@ -321,6 +321,47 @@ assert.hasResourceProperties('Foo::Bar', Match.objectLike({ }}); ``` +### Serialized JSON + +Often, we find that some CloudFormation Resource types declare properties as a string, +but actually expect JSON serialized as a string. +For example, the [`BuildSpec` property of `AWS::CodeBuild::Project`][Pipeline BuildSpec], +the [`Definition` property of `AWS::StepFunctions::StateMachine`][StateMachine Definition], +to name a couple. + +The `Match.serializedJson()` matcher allows deep matching within a stringified JSON. + +```ts +// Given a template - +// { +// "Resources": { +// "MyBar": { +// "Type": "Foo::Bar", +// "Properties": { +// "Baz": "{ \"Fred\": [\"Waldo\", \"Willow\"] }" +// } +// } +// } +// } + +// The following will NOT throw an assertion error +assert.hasResourceProperties('Foo::Bar', { + Baz: Match.serializedJson({ + Fred: Match.arrayWith(["Waldo"]), + }), +}); + +// The following will throw an assertion error +assert.hasResourceProperties('Foo::Bar', { + Baz: Match.serializedJson({ + Fred: ["Waldo", "Johnny"], + }), +}); +``` + +[Pipeline BuildSpec]: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-codebuild-project-source.html#cfn-codebuild-project-source-buildspec +[StateMachine Definition]: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-stepfunctions-statemachine.html#cfn-stepfunctions-statemachine-definition + ## Capturing Values This matcher APIs documented above allow capturing values in the matching entry diff --git a/packages/@aws-cdk/assertions/lib/match.ts b/packages/@aws-cdk/assertions/lib/match.ts index 5c7e3fad8e90c..4fea0ed0f713e 100644 --- a/packages/@aws-cdk/assertions/lib/match.ts +++ b/packages/@aws-cdk/assertions/lib/match.ts @@ -65,6 +65,14 @@ export abstract class Match { return new NotMatch('not', pattern); } + /** + * Matches any string-encoded JSON and applies the specified pattern after parsing it. + * @param pattern the pattern to match after parsing the encoded JSON. + */ + public static serializedJson(pattern: any): Matcher { + return new SerializedJson('serializedJson', pattern); + } + /** * Matches any non-null value at the target. */ @@ -265,6 +273,39 @@ class ObjectMatch extends Matcher { } } +class SerializedJson extends Matcher { + constructor( + public readonly name: string, + private readonly pattern: any, + ) { + super(); + }; + + public test(actual: any): MatchResult { + const result = new MatchResult(actual); + if (getType(actual) !== 'string') { + result.push(this, [], `Expected JSON as a string but found ${getType(actual)}`); + return result; + } + let parsed; + try { + parsed = JSON.parse(actual); + } catch (err) { + if (err instanceof SyntaxError) { + result.push(this, [], `Invalid JSON string: ${actual}`); + return result; + } else { + throw err; + } + } + + const matcher = Matcher.isMatcher(this.pattern) ? this.pattern : new LiteralMatch(this.name, this.pattern); + const innerResult = matcher.test(parsed); + result.compose(`(${this.name})`, innerResult); + return result; + } +} + class NotMatch extends Matcher { constructor( public readonly name: string, diff --git a/packages/@aws-cdk/assertions/test/match.test.ts b/packages/@aws-cdk/assertions/test/match.test.ts index 268810857f9a8..b0c92a2da2c8f 100644 --- a/packages/@aws-cdk/assertions/test/match.test.ts +++ b/packages/@aws-cdk/assertions/test/match.test.ts @@ -323,18 +323,63 @@ describe('Matchers', () => { expectFailure(matcher, {}, ['Missing key at /foo']); }); }); + + describe('serializedJson()', () => { + let matcher: Matcher; + + test('all types', () => { + matcher = Match.serializedJson({ Foo: 'Bar', Baz: 3, Boo: true, Fred: [1, 2] }); + expectPass(matcher, '{ "Foo": "Bar", "Baz": 3, "Boo": true, "Fred": [1, 2] }'); + }); + + test('simple match', () => { + matcher = Match.serializedJson({ Foo: 'Bar' }); + expectPass(matcher, '{ "Foo": "Bar" }'); + + expectFailure(matcher, '{ "Foo": "Baz" }', ['Expected Bar but received Baz at (serializedJson)/Foo']); + expectFailure(matcher, '{ "Foo": 4 }', ['Expected type string but received number at (serializedJson)/Foo']); + expectFailure(matcher, '{ "Bar": "Baz" }', [ + 'Unexpected key at (serializedJson)/Bar', + 'Missing key at (serializedJson)/Foo', + ]); + }); + + test('nested matcher', () => { + matcher = Match.serializedJson(Match.objectLike({ + Foo: Match.arrayWith(['Bar']), + })); + + expectPass(matcher, '{ "Foo": ["Bar"] }'); + expectPass(matcher, '{ "Foo": ["Bar", "Baz"] }'); + expectPass(matcher, '{ "Foo": ["Bar", "Baz"], "Fred": "Waldo" }'); + + expectFailure(matcher, '{ "Foo": ["Baz"] }', ['Missing element [Bar] at pattern index 0 at (serializedJson)/Foo']); + expectFailure(matcher, '{ "Bar": ["Baz"] }', ['Missing key at (serializedJson)/Foo']); + }); + + test('invalid json string', () => { + matcher = Match.serializedJson({ Foo: 'Bar' }); + + expectFailure(matcher, '{ "Foo"', [/invalid JSON string/i]); + }); + }); }); function expectPass(matcher: Matcher, target: any): void { - expect(matcher.test(target).hasFailed()).toEqual(false); + const result = matcher.test(target); + if (result.hasFailed()) { + fail(result.toHumanStrings()); // eslint-disable-line jest/no-jasmine-globals + } } function expectFailure(matcher: Matcher, target: any, expected: (string | RegExp)[] = []): void { const result = matcher.test(target); expect(result.failCount).toBeGreaterThan(0); const actual = result.toHumanStrings(); - if (expected.length > 0) { - expect(actual.length).toEqual(expected.length); + if (expected.length > 0 && actual.length !== expected.length) { + // only do this if the lengths are different, so as to display a nice failure message. + // otherwise need to use `toMatch()` to support RegExp + expect(actual).toEqual(expected); } for (let i = 0; i < expected.length; i++) { const e = expected[i]; diff --git a/packages/@aws-cdk/aws-batch/README.md b/packages/@aws-cdk/aws-batch/README.md index 48d5b7edf65d8..f2900da8cda0f 100644 --- a/packages/@aws-cdk/aws-batch/README.md +++ b/packages/@aws-cdk/aws-batch/README.md @@ -37,7 +37,7 @@ For more information on **AWS Batch** visit the [AWS Docs for Batch](https://doc ## Compute Environment -At the core of AWS Batch is the compute environment. All batch jobs are processed within a compute environment, which uses resource like OnDemand or Spot EC2 instances. +At the core of AWS Batch is the compute environment. All batch jobs are processed within a compute environment, which uses resource like OnDemand/Spot EC2 instances or Fargate. In **MANAGED** mode, AWS will handle the provisioning of compute resources to accommodate the demand. Otherwise, in **UNMANAGED** mode, you will need to manage the provisioning of those resources. @@ -74,6 +74,21 @@ const spotEnvironment = new batch.ComputeEnvironment(stack, 'MySpotEnvironment', }); ``` +### Fargate Compute Environment + +It is possible to have AWS Batch submit jobs to be run on Fargate compute resources. Below is an example of how this can be done: + +```ts +const vpc = new ec2.Vpc(this, 'VPC'); + +const fargateSpotEnvironment = new batch.ComputeEnvironment(stack, 'MyFargateEnvironment', { + computeResources: { + type: batch.ComputeResourceType.FARGATE_SPOT, + vpc, + }, +}); +``` + ### Understanding Progressive Allocation Strategies AWS Batch uses an [allocation strategy](https://docs.aws.amazon.com/batch/latest/userguide/allocation-strategies.html) to determine what compute resource will efficiently handle incoming job requests. By default, **BEST_FIT** will pick an available compute instance based on vCPU requirements. If none exist, the job will wait until resources become available. However, with this strategy, you may have jobs waiting in the queue unnecessarily despite having more powerful instances available. Below is an example of how that situation might look like: diff --git a/packages/@aws-cdk/aws-batch/lib/compute-environment.ts b/packages/@aws-cdk/aws-batch/lib/compute-environment.ts index 18a2d1a446325..408e16c7bb98a 100644 --- a/packages/@aws-cdk/aws-batch/lib/compute-environment.ts +++ b/packages/@aws-cdk/aws-batch/lib/compute-environment.ts @@ -6,7 +6,7 @@ import { CfnComputeEnvironment } from './batch.generated'; /** * Property to specify if the compute environment - * uses On-Demand or SpotFleet compute resources. + * uses On-Demand, SpotFleet, Fargate, or Fargate Spot compute resources. */ export enum ComputeResourceType { /** @@ -18,6 +18,20 @@ export enum ComputeResourceType { * Resources will be EC2 SpotFleet resources. */ SPOT = 'SPOT', + + /** + * Resources will be Fargate resources. + */ + FARGATE = 'FARGATE', + + /** + * Resources will be Fargate Spot resources. + * + * Fargate Spot uses spare capacity in the AWS cloud to run your fault-tolerant, + * time-flexible jobs at up to a 70% discount. If AWS needs the resources back, + * jobs running on Fargate Spot will be interrupted with two minutes of notification. + */ + FARGATE_SPOT = 'FARGATE_SPOT', } /** @@ -135,7 +149,7 @@ export interface ComputeResources { readonly vpcSubnets?: ec2.SubnetSelection; /** - * The type of compute environment: ON_DEMAND or SPOT. + * The type of compute environment: ON_DEMAND, SPOT, FARGATE, or FARGATE_SPOT. * * @default ON_DEMAND */ @@ -340,7 +354,10 @@ export class ComputeEnvironment extends Resource implements IComputeEnvironment physicalName: props.computeEnvironmentName, }); - this.validateProps(props); + const isFargate = ComputeResourceType.FARGATE === props.computeResources?.type + || ComputeResourceType.FARGATE_SPOT === props.computeResources?.type;; + + this.validateProps(props, isFargate); const spotFleetRole = this.getSpotFleetRole(props); let computeResources: CfnComputeEnvironment.ComputeResourcesProperty | undefined; @@ -348,36 +365,38 @@ export class ComputeEnvironment extends Resource implements IComputeEnvironment // Only allow compute resources to be set when using MANAGED type if (props.computeResources && this.isManaged(props)) { computeResources = { - allocationStrategy: props.computeResources.allocationStrategy - || ( - props.computeResources.type === ComputeResourceType.SPOT - ? AllocationStrategy.SPOT_CAPACITY_OPTIMIZED - : AllocationStrategy.BEST_FIT - ), bidPercentage: props.computeResources.bidPercentage, desiredvCpus: props.computeResources.desiredvCpus, ec2KeyPair: props.computeResources.ec2KeyPair, imageId: props.computeResources.image && props.computeResources.image.getImage(this).imageId, - instanceRole: props.computeResources.instanceRole - ? props.computeResources.instanceRole - : new iam.CfnInstanceProfile(this, 'Instance-Profile', { - roles: [new iam.Role(this, 'Ecs-Instance-Role', { - assumedBy: new iam.ServicePrincipal('ec2.amazonaws.com'), - managedPolicies: [ - iam.ManagedPolicy.fromAwsManagedPolicyName('service-role/AmazonEC2ContainerServiceforEC2Role'), - ], - }).roleName], - }).attrArn, - instanceTypes: this.buildInstanceTypes(props.computeResources.instanceTypes), launchTemplate: props.computeResources.launchTemplate, maxvCpus: props.computeResources.maxvCpus || 256, - minvCpus: props.computeResources.minvCpus || 0, placementGroup: props.computeResources.placementGroup, securityGroupIds: this.buildSecurityGroupIds(props.computeResources.vpc, props.computeResources.securityGroups), spotIamFleetRole: spotFleetRole?.roleArn, subnets: props.computeResources.vpc.selectSubnets(props.computeResources.vpcSubnets).subnetIds, tags: props.computeResources.computeResourcesTags, type: props.computeResources.type || ComputeResourceType.ON_DEMAND, + ...(!isFargate ? { + allocationStrategy: props.computeResources.allocationStrategy + || ( + props.computeResources.type === ComputeResourceType.SPOT + ? AllocationStrategy.SPOT_CAPACITY_OPTIMIZED + : AllocationStrategy.BEST_FIT + ), + instanceRole: props.computeResources.instanceRole + ? props.computeResources.instanceRole + : new iam.CfnInstanceProfile(this, 'Instance-Profile', { + roles: [new iam.Role(this, 'Ecs-Instance-Role', { + assumedBy: new iam.ServicePrincipal('ec2.amazonaws.com'), + managedPolicies: [ + iam.ManagedPolicy.fromAwsManagedPolicyName('service-role/AmazonEC2ContainerServiceforEC2Role'), + ], + }).roleName], + }).attrArn, + instanceTypes: this.buildInstanceTypes(props.computeResources.instanceTypes), + minvCpus: props.computeResources.minvCpus || 0, + } : {}), }; } @@ -414,7 +433,7 @@ export class ComputeEnvironment extends Resource implements IComputeEnvironment /** * Validates the properties provided for a new batch compute environment. */ - private validateProps(props: ComputeEnvironmentProps) { + private validateProps(props: ComputeEnvironmentProps, isFargate: boolean) { if (props === undefined) { return; } @@ -427,41 +446,100 @@ export class ComputeEnvironment extends Resource implements IComputeEnvironment throw new Error('computeResources is missing but required on a managed compute environment'); } - // Setting a bid percentage is only allowed on SPOT resources + - // Cannot use SPOT_CAPACITY_OPTIMIZED when using ON_DEMAND if (props.computeResources) { - if (props.computeResources.type === ComputeResourceType.ON_DEMAND) { - // VALIDATE FOR ON_DEMAND + if (isFargate) { + // VALIDATE FOR FARGATE - // Bid percentage is not allowed + // Bid percentage cannot be set for Fargate evnvironments if (props.computeResources.bidPercentage !== undefined) { - throw new Error('Setting the bid percentage is only allowed for SPOT type resources on a batch compute environment'); + throw new Error('Bid percentage must not be set for Fargate compute environments'); } - // SPOT_CAPACITY_OPTIMIZED allocation is not allowed - if (props.computeResources.allocationStrategy && props.computeResources.allocationStrategy === AllocationStrategy.SPOT_CAPACITY_OPTIMIZED) { - throw new Error('The SPOT_CAPACITY_OPTIMIZED allocation strategy is only allowed if the environment is a SPOT type compute environment'); + // Allocation strategy cannot be set for Fargate evnvironments + if (props.computeResources.allocationStrategy !== undefined) { + throw new Error('Allocation strategy must not be set for Fargate compute environments'); } - } else { - // VALIDATE FOR SPOT - // Bid percentage must be from 0 - 100 - if (props.computeResources.bidPercentage !== undefined && - (props.computeResources.bidPercentage < 0 || props.computeResources.bidPercentage > 100)) { - throw new Error('Bid percentage can only be a value between 0 and 100'); + // Desired vCPUs cannot be set for Fargate evnvironments + if (props.computeResources.desiredvCpus !== undefined) { + throw new Error('Desired vCPUs must not be set for Fargate compute environments'); } - } - if (props.computeResources.minvCpus) { - // minvCpus cannot be less than 0 - if (props.computeResources.minvCpus < 0) { - throw new Error('Minimum vCpus for a batch compute environment cannot be less than 0'); + // Image ID cannot be set for Fargate evnvironments + if (props.computeResources.image !== undefined) { + throw new Error('Image must not be set for Fargate compute environments'); } - // minvCpus cannot exceed max vCpus - if (props.computeResources.maxvCpus && - props.computeResources.minvCpus > props.computeResources.maxvCpus) { - throw new Error('Minimum vCpus cannot be greater than the maximum vCpus'); + // Instance types cannot be set for Fargate evnvironments + if (props.computeResources.instanceTypes !== undefined) { + throw new Error('Instance types must not be set for Fargate compute environments'); + } + + // EC2 key pair cannot be set for Fargate evnvironments + if (props.computeResources.ec2KeyPair !== undefined) { + throw new Error('EC2 key pair must not be set for Fargate compute environments'); + } + + // Instance role cannot be set for Fargate evnvironments + if (props.computeResources.instanceRole !== undefined) { + throw new Error('Instance role must not be set for Fargate compute environments'); + } + + // Launch template cannot be set for Fargate evnvironments + if (props.computeResources.launchTemplate !== undefined) { + throw new Error('Launch template must not be set for Fargate compute environments'); + } + + // Min vCPUs cannot be set for Fargate evnvironments + if (props.computeResources.minvCpus !== undefined) { + throw new Error('Min vCPUs must not be set for Fargate compute environments'); + } + + // Placement group cannot be set for Fargate evnvironments + if (props.computeResources.placementGroup !== undefined) { + throw new Error('Placement group must not be set for Fargate compute environments'); + } + + // Spot fleet role cannot be set for Fargate evnvironments + if (props.computeResources.spotFleetRole !== undefined) { + throw new Error('Spot fleet role must not be set for Fargate compute environments'); + } + } else { + // VALIDATE FOR ON_DEMAND AND SPOT + if (props.computeResources.minvCpus) { + // minvCpus cannot be less than 0 + if (props.computeResources.minvCpus < 0) { + throw new Error('Minimum vCpus for a batch compute environment cannot be less than 0'); + } + + // minvCpus cannot exceed max vCpus + if (props.computeResources.maxvCpus && + props.computeResources.minvCpus > props.computeResources.maxvCpus) { + throw new Error('Minimum vCpus cannot be greater than the maximum vCpus'); + } + } + // Setting a bid percentage is only allowed on SPOT resources + + // Cannot use SPOT_CAPACITY_OPTIMIZED when using ON_DEMAND + if (props.computeResources.type === ComputeResourceType.ON_DEMAND) { + // VALIDATE FOR ON_DEMAND + + // Bid percentage is not allowed + if (props.computeResources.bidPercentage !== undefined) { + throw new Error('Setting the bid percentage is only allowed for SPOT type resources on a batch compute environment'); + } + + // SPOT_CAPACITY_OPTIMIZED allocation is not allowed + if (props.computeResources.allocationStrategy && props.computeResources.allocationStrategy === AllocationStrategy.SPOT_CAPACITY_OPTIMIZED) { + throw new Error('The SPOT_CAPACITY_OPTIMIZED allocation strategy is only allowed if the environment is a SPOT type compute environment'); + } + } else if (props.computeResources.type === ComputeResourceType.SPOT) { + // VALIDATE FOR SPOT + + // Bid percentage must be from 0 - 100 + if (props.computeResources.bidPercentage !== undefined && + (props.computeResources.bidPercentage < 0 || props.computeResources.bidPercentage > 100)) { + throw new Error('Bid percentage can only be a value between 0 and 100'); + } } } } diff --git a/packages/@aws-cdk/aws-batch/lib/job-definition.ts b/packages/@aws-cdk/aws-batch/lib/job-definition.ts index 88107b0266615..dab8515acb6d1 100644 --- a/packages/@aws-cdk/aws-batch/lib/job-definition.ts +++ b/packages/@aws-cdk/aws-batch/lib/job-definition.ts @@ -52,6 +52,21 @@ export enum LogDriver { SYSLOG = 'syslog' } +/** + * Platform capabilities + */ +export enum PlatformCapabilities { + /** + * Specifies EC2 environment. + */ + EC2 = 'EC2', + + /** + * Specifies Fargate environment. + */ + FARGATE = 'FARGATE' +} + /** * Log configuration options to send to a custom log driver for the container. */ @@ -135,9 +150,9 @@ export interface JobDefinitionContainer { /** * The hard limit (in MiB) of memory to present to the container. If your container attempts to exceed - * the memory specified here, the container is killed. You must specify at least 4 MiB of memory for a job. + * the memory specified here, the container is killed. You must specify at least 4 MiB of memory for EC2 and 512 MiB for Fargate. * - * @default 4 + * @default - 4 for EC2, 512 for Fargate */ readonly memoryLimitMiB?: number; @@ -185,9 +200,9 @@ export interface JobDefinitionContainer { /** * The number of vCPUs reserved for the container. Each vCPU is equivalent to - * 1,024 CPU shares. You must specify at least one vCPU. + * 1,024 CPU shares. You must specify at least one vCPU for EC2 and 0.25 for Fargate. * - * @default 1 + * @default - 1 for EC2, 0.25 for Fargate */ readonly vcpus?: number; @@ -197,6 +212,28 @@ export interface JobDefinitionContainer { * @default - No data volumes will be used. */ readonly volumes?: ecs.Volume[]; + + /** + * Fargate platform version + * + * @default - LATEST platform version will be used + */ + readonly platformVersion?: ecs.FargatePlatformVersion + + /** + * The IAM role that AWS Batch can assume. + * Required when using Fargate. + * + * @default - None + */ + readonly executionRole?: iam.IRole; + + /** + * Whether or not to assign a public IP to the job + * + * @default - false + */ + readonly assignPublicIp?: boolean } /** @@ -252,6 +289,13 @@ export interface JobDefinitionProps { * @default - undefined */ readonly timeout?: Duration; + + /** + * The platform capabilities required by the job definition. + * + * @default - EC2 + */ + readonly platformCapabilities?: PlatformCapabilities[]; } /** @@ -382,16 +426,20 @@ export class JobDefinition extends Resource implements IJobDefinition { physicalName: props.jobDefinitionName, }); + this.validateProps(props); + this.imageConfig = new JobDefinitionImageConfig(this, props.container); + const isFargate = !!props.platformCapabilities?.includes(PlatformCapabilities.FARGATE); + const jobDef = new CfnJobDefinition(this, 'Resource', { jobDefinitionName: props.jobDefinitionName, - containerProperties: this.buildJobContainer(props.container), + containerProperties: this.buildJobContainer(props.container, isFargate), type: 'container', nodeProperties: props.nodeProps ? { mainNode: props.nodeProps.mainNode, - nodeRangeProperties: this.buildNodeRangeProps(props.nodeProps), + nodeRangeProperties: this.buildNodeRangeProps(props.nodeProps, isFargate), numNodes: props.nodeProps.count, } : undefined, @@ -402,6 +450,7 @@ export class JobDefinition extends Resource implements IJobDefinition { timeout: { attemptDurationSeconds: props.timeout ? props.timeout.toSeconds() : undefined, }, + platformCapabilities: props.platformCapabilities ?? [PlatformCapabilities.EC2], }); this.jobDefinitionArn = this.getResourceArnAttribute(jobDef.ref, { @@ -412,7 +461,7 @@ export class JobDefinition extends Resource implements IJobDefinition { this.jobDefinitionName = this.getResourceNameAttribute(jobDef.ref); } - private deserializeEnvVariables(env?: { [name: string]: string}): CfnJobDefinition.EnvironmentProperty[] | undefined { + private deserializeEnvVariables(env?: { [name: string]: string }): CfnJobDefinition.EnvironmentProperty[] | undefined { const vars = new Array(); if (env === undefined) { @@ -426,7 +475,31 @@ export class JobDefinition extends Resource implements IJobDefinition { return vars; } - private buildJobContainer(container?: JobDefinitionContainer): CfnJobDefinition.ContainerPropertiesProperty | undefined { + /** + * Validates the properties provided for a new job definition. + */ + private validateProps(props: JobDefinitionProps) { + if (props === undefined) { + return; + } + + if (props.platformCapabilities !== undefined && props.platformCapabilities.includes(PlatformCapabilities.FARGATE) + && props.container.executionRole === undefined) { + throw new Error('Fargate job must have executionRole set'); + } + + if (props.platformCapabilities !== undefined && props.platformCapabilities.includes(PlatformCapabilities.FARGATE) + && props.container.gpuCount !== undefined) { + throw new Error('Fargate job must not have gpuCount set'); + } + + if ((props.platformCapabilities === undefined || props.platformCapabilities.includes(PlatformCapabilities.EC2)) + && props.container.assignPublicIp !== undefined) { + throw new Error('EC2 job must not have assignPublicIp set'); + } + } + + private buildJobContainer(container: JobDefinitionContainer, isFargate: boolean): CfnJobDefinition.ContainerPropertiesProperty | undefined { if (container === undefined) { return undefined; } @@ -437,6 +510,7 @@ export class JobDefinition extends Resource implements IJobDefinition { image: this.imageConfig.imageName, instanceType: container.instanceType && container.instanceType.toString(), jobRoleArn: container.jobRole && container.jobRole.roleArn, + executionRoleArn: container.executionRole && container.executionRole.roleArn, linuxParameters: container.linuxParams ? { devices: container.linuxParams.renderLinuxParameters().devices } : undefined, @@ -447,26 +521,31 @@ export class JobDefinition extends Resource implements IJobDefinition { ? this.buildLogConfigurationSecretOptions(container.logConfiguration.secretOptions) : undefined, } : undefined, - memory: container.memoryLimitMiB || 4, mountPoints: container.mountPoints, privileged: container.privileged || false, - resourceRequirements: container.gpuCount - ? [{ type: 'GPU', value: String(container.gpuCount) }] - : undefined, + networkConfiguration: container.assignPublicIp ? { + assignPublicIp: container.assignPublicIp ? 'ENABLED' : 'DISABLED', + } : undefined, readonlyRootFilesystem: container.readOnly || false, ulimits: container.ulimits, user: container.user, - vcpus: container.vcpus || 1, volumes: container.volumes, + fargatePlatformConfiguration: container.platformVersion ? { + platformVersion: container.platformVersion, + } : undefined, + resourceRequirements: [ + { type: 'VCPU', value: String(container.vcpus || (isFargate ? 0.25 : 1)) }, + { type: 'MEMORY', value: String(container.memoryLimitMiB || (isFargate ? 512 : 4)) }, + ].concat(container.gpuCount ? [{ type: 'GPU', value: String(container.gpuCount) }] : []), }; } - private buildNodeRangeProps(multiNodeProps: IMultiNodeProps): CfnJobDefinition.NodeRangePropertyProperty[] { + private buildNodeRangeProps(multiNodeProps: IMultiNodeProps, isFargate: boolean): CfnJobDefinition.NodeRangePropertyProperty[] { const rangeProps = new Array(); for (const prop of multiNodeProps.rangeProps) { rangeProps.push({ - container: this.buildJobContainer(prop.container), + container: this.buildJobContainer(prop.container, isFargate), targetNodes: `${prop.fromNodeIndex || 0}:${prop.toNodeIndex || multiNodeProps.count}`, }); } diff --git a/packages/@aws-cdk/aws-batch/test/compute-environment.test.ts b/packages/@aws-cdk/aws-batch/test/compute-environment.test.ts index c7ead4cd47de8..4cd446eec3774 100644 --- a/packages/@aws-cdk/aws-batch/test/compute-environment.test.ts +++ b/packages/@aws-cdk/aws-batch/test/compute-environment.test.ts @@ -6,7 +6,7 @@ import * as iam from '@aws-cdk/aws-iam'; import * as cdk from '@aws-cdk/core'; import * as batch from '../lib'; -describe('Batch Compute Evironment', () => { +describe('Batch Compute Environment', () => { let expectedManagedDefaultComputeProps: any; let defaultServiceRole: any; @@ -81,6 +81,164 @@ describe('Batch Compute Evironment', () => { }); }); }); + describe('using fargate resources', () => { + test('should deny setting bid percentage', () => { + // THEN + throws(() => { + // WHEN + new batch.ComputeEnvironment(stack, 'test-compute-env', { + managed: true, + computeResources: { + vpc, + type: batch.ComputeResourceType.FARGATE, + bidPercentage: -1, + }, + }); + }); + }); + test('should deny setting allocation strategy', () => { + // THEN + throws(() => { + // WHEN + new batch.ComputeEnvironment(stack, 'test-compute-env', { + managed: true, + computeResources: { + vpc, + type: batch.ComputeResourceType.FARGATE, + allocationStrategy: batch.AllocationStrategy.BEST_FIT, + }, + }); + }); + }); + test('should deny setting desired vCPUs', () => { + // THEN + throws(() => { + // WHEN + new batch.ComputeEnvironment(stack, 'test-compute-env', { + managed: true, + computeResources: { + vpc, + type: batch.ComputeResourceType.FARGATE, + desiredvCpus: 1, + }, + }); + }); + }); + test('should deny setting min vCPUs', () => { + // THEN + throws(() => { + // WHEN + new batch.ComputeEnvironment(stack, 'test-compute-env', { + managed: true, + computeResources: { + vpc, + type: batch.ComputeResourceType.FARGATE, + minvCpus: 1, + }, + }); + }); + }); + test('should deny setting image', () => { + // THEN + throws(() => { + // WHEN + new batch.ComputeEnvironment(stack, 'test-compute-env', { + managed: true, + computeResources: { + vpc, + type: batch.ComputeResourceType.FARGATE, + image: ec2.MachineImage.latestAmazonLinux(), + }, + }); + }); + }); + test('should deny setting instance types', () => { + // THEN + throws(() => { + // WHEN + new batch.ComputeEnvironment(stack, 'test-compute-env', { + managed: true, + computeResources: { + vpc, + type: batch.ComputeResourceType.FARGATE, + instanceTypes: [], + }, + }); + }); + }); + test('should deny setting EC2 key pair', () => { + // THEN + throws(() => { + // WHEN + new batch.ComputeEnvironment(stack, 'test-compute-env', { + managed: true, + computeResources: { + vpc, + type: batch.ComputeResourceType.FARGATE, + ec2KeyPair: 'test', + }, + }); + }); + }); + test('should deny setting instance role', () => { + // THEN + throws(() => { + // WHEN + new batch.ComputeEnvironment(stack, 'test-compute-env', { + managed: true, + computeResources: { + vpc, + type: batch.ComputeResourceType.FARGATE, + instanceRole: 'test', + }, + }); + }); + }); + test('should deny setting launch template', () => { + // THEN + throws(() => { + // WHEN + new batch.ComputeEnvironment(stack, 'test-compute-env', { + managed: true, + computeResources: { + vpc, + type: batch.ComputeResourceType.FARGATE, + launchTemplate: { + launchTemplateName: 'test-template', + }, + }, + }); + }); + }); + test('should deny setting placement group', () => { + // THEN + throws(() => { + // WHEN + new batch.ComputeEnvironment(stack, 'test-compute-env', { + managed: true, + computeResources: { + vpc, + type: batch.ComputeResourceType.FARGATE, + placementGroup: 'test', + }, + }); + }); + }); + test('should deny setting spot fleet role', () => { + // THEN + throws(() => { + // WHEN + new batch.ComputeEnvironment(stack, 'test-compute-env', { + managed: true, + computeResources: { + vpc, + type: batch.ComputeResourceType.FARGATE, + spotFleetRole: iam.Role.fromRoleArn(stack, 'test-role-arn', 'test-role'), + }, + }); + }); + }); + }); describe('using spot resources', () => { test('should provide a spot fleet role if one is not given and allocationStrategy is BEST_FIT', () => { diff --git a/packages/@aws-cdk/aws-batch/test/integ.batch.expected.json b/packages/@aws-cdk/aws-batch/test/integ.batch.expected.json index 09d021e49bd9d..7624200d45321 100644 --- a/packages/@aws-cdk/aws-batch/test/integ.batch.expected.json +++ b/packages/@aws-cdk/aws-batch/test/integ.batch.expected.json @@ -95,15 +95,15 @@ "vpcPublicSubnet1NATGateway9C16659E": { "Type": "AWS::EC2::NatGateway", "Properties": { + "SubnetId": { + "Ref": "vpcPublicSubnet1Subnet2E65531E" + }, "AllocationId": { "Fn::GetAtt": [ "vpcPublicSubnet1EIPDA49DCBE", "AllocationId" ] }, - "SubnetId": { - "Ref": "vpcPublicSubnet1Subnet2E65531E" - }, "Tags": [ { "Key": "Name", @@ -192,15 +192,15 @@ "vpcPublicSubnet2NATGateway9B8AE11A": { "Type": "AWS::EC2::NatGateway", "Properties": { + "SubnetId": { + "Ref": "vpcPublicSubnet2Subnet009B674F" + }, "AllocationId": { "Fn::GetAtt": [ "vpcPublicSubnet2EIP9B3743B1", "AllocationId" ] }, - "SubnetId": { - "Ref": "vpcPublicSubnet2Subnet009B674F" - }, "Tags": [ { "Key": "Name", @@ -289,15 +289,15 @@ "vpcPublicSubnet3NATGateway82F6CA9E": { "Type": "AWS::EC2::NatGateway", "Properties": { + "SubnetId": { + "Ref": "vpcPublicSubnet3Subnet11B92D7C" + }, "AllocationId": { "Fn::GetAtt": [ "vpcPublicSubnet3EIP2C3B9D91", "AllocationId" ] }, - "SubnetId": { - "Ref": "vpcPublicSubnet3Subnet11B92D7C" - }, "Tags": [ { "Key": "Name", @@ -566,55 +566,30 @@ "batchunmanagedcomputeenvED550298": { "Type": "AWS::Batch::ComputeEnvironment", "Properties": { + "Type": "UNMANAGED", "ServiceRole": { "Fn::GetAtt": [ "batchunmanagedcomputeenvResourceServiceInstanceRoleCA40AF77", "Arn" ] }, - "Type": "UNMANAGED", "State": "ENABLED" } }, - "batchdemandcomputeenvlaunchtemplateEcsInstanceRole24D4E799": { - "Type": "AWS::IAM::Role", + "batchdemandcomputeenvlaunchtemplateResourceSecurityGroup23599B84": { + "Type": "AWS::EC2::SecurityGroup", "Properties": { - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": { - "Fn::Join": [ - "", - [ - "ec2.", - { - "Ref": "AWS::URLSuffix" - } - ] - ] - } - } - } - ], - "Version": "2012-10-17" - }, - "ManagedPolicyArns": [ + "GroupDescription": "batch-stack/batch-demand-compute-env-launch-template/Resource-Security-Group", + "SecurityGroupEgress": [ { - "Fn::Join": [ - "", - [ - "arn:", - { - "Ref": "AWS::Partition" - }, - ":iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role" - ] - ] + "CidrIp": "0.0.0.0/0", + "Description": "Allow all outbound traffic by default", + "IpProtocol": "-1" } - ] + ], + "VpcId": { + "Ref": "vpcA2121C38" + } }, "DependsOn": [ "vpcIGWE57CBDCA", @@ -652,12 +627,43 @@ "vpcVPCGW7984C166" ] }, - "batchdemandcomputeenvlaunchtemplateInstanceProfile2DEC3A97": { - "Type": "AWS::IAM::InstanceProfile", + "batchdemandcomputeenvlaunchtemplateEcsInstanceRole24D4E799": { + "Type": "AWS::IAM::Role", "Properties": { - "Roles": [ + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": { + "Fn::Join": [ + "", + [ + "ec2.", + { + "Ref": "AWS::URLSuffix" + } + ] + ] + } + } + } + ], + "Version": "2012-10-17" + }, + "ManagedPolicyArns": [ { - "Ref": "batchdemandcomputeenvlaunchtemplateEcsInstanceRole24D4E799" + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role" + ] + ] } ] }, @@ -697,20 +703,14 @@ "vpcVPCGW7984C166" ] }, - "batchdemandcomputeenvlaunchtemplateResourceSecurityGroup23599B84": { - "Type": "AWS::EC2::SecurityGroup", + "batchdemandcomputeenvlaunchtemplateInstanceProfile2DEC3A97": { + "Type": "AWS::IAM::InstanceProfile", "Properties": { - "GroupDescription": "batch-stack/batch-demand-compute-env-launch-template/Resource-Security-Group", - "SecurityGroupEgress": [ + "Roles": [ { - "CidrIp": "0.0.0.0/0", - "Description": "Allow all outbound traffic by default", - "IpProtocol": "-1" + "Ref": "batchdemandcomputeenvlaunchtemplateEcsInstanceRole24D4E799" } - ], - "VpcId": { - "Ref": "vpcA2121C38" - } + ] }, "DependsOn": [ "vpcIGWE57CBDCA", @@ -817,12 +817,6 @@ "batchdemandcomputeenvlaunchtemplateF8A5B233": { "Type": "AWS::Batch::ComputeEnvironment", "Properties": { - "ServiceRole": { - "Fn::GetAtt": [ - "batchdemandcomputeenvlaunchtemplateResourceServiceInstanceRole76AD99CC", - "Arn" - ] - }, "Type": "MANAGED", "ComputeResources": { "AllocationStrategy": "BEST_FIT", @@ -864,6 +858,12 @@ }, "Type": "EC2" }, + "ServiceRole": { + "Fn::GetAtt": [ + "batchdemandcomputeenvlaunchtemplateResourceServiceInstanceRole76AD99CC", + "Arn" + ] + }, "State": "ENABLED" }, "DependsOn": [ @@ -902,45 +902,20 @@ "vpcVPCGW7984C166" ] }, - "batchspotcomputeenvEcsInstanceRoleE976826B": { - "Type": "AWS::IAM::Role", + "batchspotcomputeenvResourceSecurityGroup07B09BF9": { + "Type": "AWS::EC2::SecurityGroup", "Properties": { - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": "sts:AssumeRole", - "Effect": "Allow", - "Principal": { - "Service": { - "Fn::Join": [ - "", - [ - "ec2.", - { - "Ref": "AWS::URLSuffix" - } - ] - ] - } - } - } - ], - "Version": "2012-10-17" - }, - "ManagedPolicyArns": [ + "GroupDescription": "batch-stack/batch-spot-compute-env/Resource-Security-Group", + "SecurityGroupEgress": [ { - "Fn::Join": [ - "", - [ - "arn:", - { - "Ref": "AWS::Partition" - }, - ":iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role" - ] - ] + "CidrIp": "0.0.0.0/0", + "Description": "Allow all outbound traffic by default", + "IpProtocol": "-1" } - ] + ], + "VpcId": { + "Ref": "vpcA2121C38" + } }, "DependsOn": [ "vpcIGWE57CBDCA", @@ -978,12 +953,43 @@ "vpcVPCGW7984C166" ] }, - "batchspotcomputeenvInstanceProfileFA613AC2": { - "Type": "AWS::IAM::InstanceProfile", + "batchspotcomputeenvEcsInstanceRoleE976826B": { + "Type": "AWS::IAM::Role", "Properties": { - "Roles": [ + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": { + "Fn::Join": [ + "", + [ + "ec2.", + { + "Ref": "AWS::URLSuffix" + } + ] + ] + } + } + } + ], + "Version": "2012-10-17" + }, + "ManagedPolicyArns": [ { - "Ref": "batchspotcomputeenvEcsInstanceRoleE976826B" + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role" + ] + ] } ] }, @@ -1023,20 +1029,14 @@ "vpcVPCGW7984C166" ] }, - "batchspotcomputeenvResourceSecurityGroup07B09BF9": { - "Type": "AWS::EC2::SecurityGroup", + "batchspotcomputeenvInstanceProfileFA613AC2": { + "Type": "AWS::IAM::InstanceProfile", "Properties": { - "GroupDescription": "batch-stack/batch-spot-compute-env/Resource-Security-Group", - "SecurityGroupEgress": [ + "Roles": [ { - "CidrIp": "0.0.0.0/0", - "Description": "Allow all outbound traffic by default", - "IpProtocol": "-1" + "Ref": "batchspotcomputeenvEcsInstanceRoleE976826B" } - ], - "VpcId": { - "Ref": "vpcA2121C38" - } + ] }, "DependsOn": [ "vpcIGWE57CBDCA", @@ -1143,12 +1143,6 @@ "batchspotcomputeenv2CE4DFD9": { "Type": "AWS::Batch::ComputeEnvironment", "Properties": { - "ServiceRole": { - "Fn::GetAtt": [ - "batchspotcomputeenvResourceServiceInstanceRole8B0DF5A7", - "Arn" - ] - }, "Type": "MANAGED", "ComputeResources": { "AllocationStrategy": "SPOT_CAPACITY_OPTIMIZED", @@ -1201,6 +1195,12 @@ ], "Type": "SPOT" }, + "ServiceRole": { + "Fn::GetAtt": [ + "batchspotcomputeenvResourceServiceInstanceRole8B0DF5A7", + "Arn" + ] + }, "State": "ENABLED" }, "DependsOn": [ @@ -1266,13 +1266,410 @@ "State": "ENABLED" } }, - "batchjobrepo4C508C51": { - "Type": "AWS::ECR::Repository", - "UpdateReplacePolicy": "Retain", - "DeletionPolicy": "Retain" - }, - "batchjobdeffromecrE0E30DAD": { - "Type": "AWS::Batch::JobDefinition", + "batchfargatecomputeenvResourceSecurityGroupE2963776": { + "Type": "AWS::EC2::SecurityGroup", + "Properties": { + "GroupDescription": "batch-stack/batch-fargate-compute-env/Resource-Security-Group", + "SecurityGroupEgress": [ + { + "CidrIp": "0.0.0.0/0", + "Description": "Allow all outbound traffic by default", + "IpProtocol": "-1" + } + ], + "VpcId": { + "Ref": "vpcA2121C38" + } + }, + "DependsOn": [ + "vpcIGWE57CBDCA", + "vpcPrivateSubnet1DefaultRoute1AA8E2E5", + "vpcPrivateSubnet1RouteTableB41A48CC", + "vpcPrivateSubnet1RouteTableAssociation67945127", + "vpcPrivateSubnet1Subnet934893E8", + "vpcPrivateSubnet2DefaultRouteB0E07F99", + "vpcPrivateSubnet2RouteTable7280F23E", + "vpcPrivateSubnet2RouteTableAssociation007E94D3", + "vpcPrivateSubnet2Subnet7031C2BA", + "vpcPrivateSubnet3DefaultRoute30C45F47", + "vpcPrivateSubnet3RouteTable24DA79A0", + "vpcPrivateSubnet3RouteTableAssociationC58B3C2C", + "vpcPrivateSubnet3Subnet985AC459", + "vpcPublicSubnet1DefaultRoute10708846", + "vpcPublicSubnet1EIPDA49DCBE", + "vpcPublicSubnet1NATGateway9C16659E", + "vpcPublicSubnet1RouteTable48A2DF9B", + "vpcPublicSubnet1RouteTableAssociation5D3F4579", + "vpcPublicSubnet1Subnet2E65531E", + "vpcPublicSubnet2DefaultRouteA1EC0F60", + "vpcPublicSubnet2EIP9B3743B1", + "vpcPublicSubnet2NATGateway9B8AE11A", + "vpcPublicSubnet2RouteTableEB40D4CB", + "vpcPublicSubnet2RouteTableAssociation21F81B59", + "vpcPublicSubnet2Subnet009B674F", + "vpcPublicSubnet3DefaultRoute3F356A11", + "vpcPublicSubnet3EIP2C3B9D91", + "vpcPublicSubnet3NATGateway82F6CA9E", + "vpcPublicSubnet3RouteTableA3C00665", + "vpcPublicSubnet3RouteTableAssociationD102D1C4", + "vpcPublicSubnet3Subnet11B92D7C", + "vpcA2121C38", + "vpcVPCGW7984C166" + ] + }, + "batchfargatecomputeenvResourceServiceInstanceRole94D7AA5F": { + "Type": "AWS::IAM::Role", + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "batch.amazonaws.com" + } + } + ], + "Version": "2012-10-17" + }, + "ManagedPolicyArns": [ + { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":iam::aws:policy/service-role/AWSBatchServiceRole" + ] + ] + } + ] + }, + "DependsOn": [ + "vpcIGWE57CBDCA", + "vpcPrivateSubnet1DefaultRoute1AA8E2E5", + "vpcPrivateSubnet1RouteTableB41A48CC", + "vpcPrivateSubnet1RouteTableAssociation67945127", + "vpcPrivateSubnet1Subnet934893E8", + "vpcPrivateSubnet2DefaultRouteB0E07F99", + "vpcPrivateSubnet2RouteTable7280F23E", + "vpcPrivateSubnet2RouteTableAssociation007E94D3", + "vpcPrivateSubnet2Subnet7031C2BA", + "vpcPrivateSubnet3DefaultRoute30C45F47", + "vpcPrivateSubnet3RouteTable24DA79A0", + "vpcPrivateSubnet3RouteTableAssociationC58B3C2C", + "vpcPrivateSubnet3Subnet985AC459", + "vpcPublicSubnet1DefaultRoute10708846", + "vpcPublicSubnet1EIPDA49DCBE", + "vpcPublicSubnet1NATGateway9C16659E", + "vpcPublicSubnet1RouteTable48A2DF9B", + "vpcPublicSubnet1RouteTableAssociation5D3F4579", + "vpcPublicSubnet1Subnet2E65531E", + "vpcPublicSubnet2DefaultRouteA1EC0F60", + "vpcPublicSubnet2EIP9B3743B1", + "vpcPublicSubnet2NATGateway9B8AE11A", + "vpcPublicSubnet2RouteTableEB40D4CB", + "vpcPublicSubnet2RouteTableAssociation21F81B59", + "vpcPublicSubnet2Subnet009B674F", + "vpcPublicSubnet3DefaultRoute3F356A11", + "vpcPublicSubnet3EIP2C3B9D91", + "vpcPublicSubnet3NATGateway82F6CA9E", + "vpcPublicSubnet3RouteTableA3C00665", + "vpcPublicSubnet3RouteTableAssociationD102D1C4", + "vpcPublicSubnet3Subnet11B92D7C", + "vpcA2121C38", + "vpcVPCGW7984C166" + ] + }, + "batchfargatecomputeenvE9C3FCA4": { + "Type": "AWS::Batch::ComputeEnvironment", + "Properties": { + "Type": "MANAGED", + "ComputeResources": { + "MaxvCpus": 256, + "SecurityGroupIds": [ + { + "Fn::GetAtt": [ + "batchfargatecomputeenvResourceSecurityGroupE2963776", + "GroupId" + ] + } + ], + "Subnets": [ + { + "Ref": "vpcPrivateSubnet1Subnet934893E8" + }, + { + "Ref": "vpcPrivateSubnet2Subnet7031C2BA" + }, + { + "Ref": "vpcPrivateSubnet3Subnet985AC459" + } + ], + "Type": "FARGATE" + }, + "ServiceRole": { + "Fn::GetAtt": [ + "batchfargatecomputeenvResourceServiceInstanceRole94D7AA5F", + "Arn" + ] + }, + "State": "ENABLED" + }, + "DependsOn": [ + "vpcIGWE57CBDCA", + "vpcPrivateSubnet1DefaultRoute1AA8E2E5", + "vpcPrivateSubnet1RouteTableB41A48CC", + "vpcPrivateSubnet1RouteTableAssociation67945127", + "vpcPrivateSubnet1Subnet934893E8", + "vpcPrivateSubnet2DefaultRouteB0E07F99", + "vpcPrivateSubnet2RouteTable7280F23E", + "vpcPrivateSubnet2RouteTableAssociation007E94D3", + "vpcPrivateSubnet2Subnet7031C2BA", + "vpcPrivateSubnet3DefaultRoute30C45F47", + "vpcPrivateSubnet3RouteTable24DA79A0", + "vpcPrivateSubnet3RouteTableAssociationC58B3C2C", + "vpcPrivateSubnet3Subnet985AC459", + "vpcPublicSubnet1DefaultRoute10708846", + "vpcPublicSubnet1EIPDA49DCBE", + "vpcPublicSubnet1NATGateway9C16659E", + "vpcPublicSubnet1RouteTable48A2DF9B", + "vpcPublicSubnet1RouteTableAssociation5D3F4579", + "vpcPublicSubnet1Subnet2E65531E", + "vpcPublicSubnet2DefaultRouteA1EC0F60", + "vpcPublicSubnet2EIP9B3743B1", + "vpcPublicSubnet2NATGateway9B8AE11A", + "vpcPublicSubnet2RouteTableEB40D4CB", + "vpcPublicSubnet2RouteTableAssociation21F81B59", + "vpcPublicSubnet2Subnet009B674F", + "vpcPublicSubnet3DefaultRoute3F356A11", + "vpcPublicSubnet3EIP2C3B9D91", + "vpcPublicSubnet3NATGateway82F6CA9E", + "vpcPublicSubnet3RouteTableA3C00665", + "vpcPublicSubnet3RouteTableAssociationD102D1C4", + "vpcPublicSubnet3Subnet11B92D7C", + "vpcA2121C38", + "vpcVPCGW7984C166" + ] + }, + "batchfargatespotcomputeenvResourceSecurityGroup923D2390": { + "Type": "AWS::EC2::SecurityGroup", + "Properties": { + "GroupDescription": "batch-stack/batch-fargate-spot-compute-env/Resource-Security-Group", + "SecurityGroupEgress": [ + { + "CidrIp": "0.0.0.0/0", + "Description": "Allow all outbound traffic by default", + "IpProtocol": "-1" + } + ], + "VpcId": { + "Ref": "vpcA2121C38" + } + }, + "DependsOn": [ + "vpcIGWE57CBDCA", + "vpcPrivateSubnet1DefaultRoute1AA8E2E5", + "vpcPrivateSubnet1RouteTableB41A48CC", + "vpcPrivateSubnet1RouteTableAssociation67945127", + "vpcPrivateSubnet1Subnet934893E8", + "vpcPrivateSubnet2DefaultRouteB0E07F99", + "vpcPrivateSubnet2RouteTable7280F23E", + "vpcPrivateSubnet2RouteTableAssociation007E94D3", + "vpcPrivateSubnet2Subnet7031C2BA", + "vpcPrivateSubnet3DefaultRoute30C45F47", + "vpcPrivateSubnet3RouteTable24DA79A0", + "vpcPrivateSubnet3RouteTableAssociationC58B3C2C", + "vpcPrivateSubnet3Subnet985AC459", + "vpcPublicSubnet1DefaultRoute10708846", + "vpcPublicSubnet1EIPDA49DCBE", + "vpcPublicSubnet1NATGateway9C16659E", + "vpcPublicSubnet1RouteTable48A2DF9B", + "vpcPublicSubnet1RouteTableAssociation5D3F4579", + "vpcPublicSubnet1Subnet2E65531E", + "vpcPublicSubnet2DefaultRouteA1EC0F60", + "vpcPublicSubnet2EIP9B3743B1", + "vpcPublicSubnet2NATGateway9B8AE11A", + "vpcPublicSubnet2RouteTableEB40D4CB", + "vpcPublicSubnet2RouteTableAssociation21F81B59", + "vpcPublicSubnet2Subnet009B674F", + "vpcPublicSubnet3DefaultRoute3F356A11", + "vpcPublicSubnet3EIP2C3B9D91", + "vpcPublicSubnet3NATGateway82F6CA9E", + "vpcPublicSubnet3RouteTableA3C00665", + "vpcPublicSubnet3RouteTableAssociationD102D1C4", + "vpcPublicSubnet3Subnet11B92D7C", + "vpcA2121C38", + "vpcVPCGW7984C166" + ] + }, + "batchfargatespotcomputeenvResourceServiceInstanceRole6462BFB0": { + "Type": "AWS::IAM::Role", + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "batch.amazonaws.com" + } + } + ], + "Version": "2012-10-17" + }, + "ManagedPolicyArns": [ + { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":iam::aws:policy/service-role/AWSBatchServiceRole" + ] + ] + } + ] + }, + "DependsOn": [ + "vpcIGWE57CBDCA", + "vpcPrivateSubnet1DefaultRoute1AA8E2E5", + "vpcPrivateSubnet1RouteTableB41A48CC", + "vpcPrivateSubnet1RouteTableAssociation67945127", + "vpcPrivateSubnet1Subnet934893E8", + "vpcPrivateSubnet2DefaultRouteB0E07F99", + "vpcPrivateSubnet2RouteTable7280F23E", + "vpcPrivateSubnet2RouteTableAssociation007E94D3", + "vpcPrivateSubnet2Subnet7031C2BA", + "vpcPrivateSubnet3DefaultRoute30C45F47", + "vpcPrivateSubnet3RouteTable24DA79A0", + "vpcPrivateSubnet3RouteTableAssociationC58B3C2C", + "vpcPrivateSubnet3Subnet985AC459", + "vpcPublicSubnet1DefaultRoute10708846", + "vpcPublicSubnet1EIPDA49DCBE", + "vpcPublicSubnet1NATGateway9C16659E", + "vpcPublicSubnet1RouteTable48A2DF9B", + "vpcPublicSubnet1RouteTableAssociation5D3F4579", + "vpcPublicSubnet1Subnet2E65531E", + "vpcPublicSubnet2DefaultRouteA1EC0F60", + "vpcPublicSubnet2EIP9B3743B1", + "vpcPublicSubnet2NATGateway9B8AE11A", + "vpcPublicSubnet2RouteTableEB40D4CB", + "vpcPublicSubnet2RouteTableAssociation21F81B59", + "vpcPublicSubnet2Subnet009B674F", + "vpcPublicSubnet3DefaultRoute3F356A11", + "vpcPublicSubnet3EIP2C3B9D91", + "vpcPublicSubnet3NATGateway82F6CA9E", + "vpcPublicSubnet3RouteTableA3C00665", + "vpcPublicSubnet3RouteTableAssociationD102D1C4", + "vpcPublicSubnet3Subnet11B92D7C", + "vpcA2121C38", + "vpcVPCGW7984C166" + ] + }, + "batchfargatespotcomputeenv374749B0": { + "Type": "AWS::Batch::ComputeEnvironment", + "Properties": { + "Type": "MANAGED", + "ComputeResources": { + "MaxvCpus": 256, + "SecurityGroupIds": [ + { + "Fn::GetAtt": [ + "batchfargatespotcomputeenvResourceSecurityGroup923D2390", + "GroupId" + ] + } + ], + "Subnets": [ + { + "Ref": "vpcPrivateSubnet1Subnet934893E8" + }, + { + "Ref": "vpcPrivateSubnet2Subnet7031C2BA" + }, + { + "Ref": "vpcPrivateSubnet3Subnet985AC459" + } + ], + "Type": "FARGATE_SPOT" + }, + "ServiceRole": { + "Fn::GetAtt": [ + "batchfargatespotcomputeenvResourceServiceInstanceRole6462BFB0", + "Arn" + ] + }, + "State": "ENABLED" + }, + "DependsOn": [ + "vpcIGWE57CBDCA", + "vpcPrivateSubnet1DefaultRoute1AA8E2E5", + "vpcPrivateSubnet1RouteTableB41A48CC", + "vpcPrivateSubnet1RouteTableAssociation67945127", + "vpcPrivateSubnet1Subnet934893E8", + "vpcPrivateSubnet2DefaultRouteB0E07F99", + "vpcPrivateSubnet2RouteTable7280F23E", + "vpcPrivateSubnet2RouteTableAssociation007E94D3", + "vpcPrivateSubnet2Subnet7031C2BA", + "vpcPrivateSubnet3DefaultRoute30C45F47", + "vpcPrivateSubnet3RouteTable24DA79A0", + "vpcPrivateSubnet3RouteTableAssociationC58B3C2C", + "vpcPrivateSubnet3Subnet985AC459", + "vpcPublicSubnet1DefaultRoute10708846", + "vpcPublicSubnet1EIPDA49DCBE", + "vpcPublicSubnet1NATGateway9C16659E", + "vpcPublicSubnet1RouteTable48A2DF9B", + "vpcPublicSubnet1RouteTableAssociation5D3F4579", + "vpcPublicSubnet1Subnet2E65531E", + "vpcPublicSubnet2DefaultRouteA1EC0F60", + "vpcPublicSubnet2EIP9B3743B1", + "vpcPublicSubnet2NATGateway9B8AE11A", + "vpcPublicSubnet2RouteTableEB40D4CB", + "vpcPublicSubnet2RouteTableAssociation21F81B59", + "vpcPublicSubnet2Subnet009B674F", + "vpcPublicSubnet3DefaultRoute3F356A11", + "vpcPublicSubnet3EIP2C3B9D91", + "vpcPublicSubnet3NATGateway82F6CA9E", + "vpcPublicSubnet3RouteTableA3C00665", + "vpcPublicSubnet3RouteTableAssociationD102D1C4", + "vpcPublicSubnet3Subnet11B92D7C", + "vpcA2121C38", + "vpcVPCGW7984C166" + ] + }, + "batchjobfargatequeue5A12983E": { + "Type": "AWS::Batch::JobQueue", + "Properties": { + "ComputeEnvironmentOrder": [ + { + "ComputeEnvironment": { + "Ref": "batchfargatecomputeenvE9C3FCA4" + }, + "Order": 1 + }, + { + "ComputeEnvironment": { + "Ref": "batchfargatespotcomputeenv374749B0" + }, + "Order": 2 + } + ], + "Priority": 1, + "State": "ENABLED" + } + }, + "batchjobrepo4C508C51": { + "Type": "AWS::ECR::Repository", + "UpdateReplacePolicy": "Retain", + "DeletionPolicy": "Retain" + }, + "batchjobdeffromecrE0E30DAD": { + "Type": "AWS::Batch::JobDefinition", "Properties": { "Type": "container", "ContainerProperties": { @@ -1325,11 +1722,16 @@ ] ] }, - "Memory": 4, "Privileged": false, "ReadonlyRootFilesystem": false, - "Vcpus": 1 + "ResourceRequirements": [ + { "Type": "VCPU", "Value": "1" }, + { "Type": "MEMORY", "Value": "4" } + ] }, + "PlatformCapabilities": [ + "EC2" + ], "RetryStrategy": { "Attempts": 1 }, @@ -1342,11 +1744,67 @@ "Type": "container", "ContainerProperties": { "Image": "docker/whalesay", - "Memory": 4, "Privileged": false, "ReadonlyRootFilesystem": false, - "Vcpus": 1 + "ResourceRequirements": [ + { "Type": "VCPU", "Value": "1" }, + { "Type": "MEMORY", "Value": "4" } + ] }, + "PlatformCapabilities": [ + "EC2" + ], + "RetryStrategy": { + "Attempts": 1 + }, + "Timeout": {} + } + }, + "executionroleD9A39BE6": { + "Type": "AWS::IAM::Role", + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "batch.amazonaws.com" + } + } + ], + "Version": "2012-10-17" + } + } + }, + "batchjobdeffargate7FE30059": { + "Type": "AWS::Batch::JobDefinition", + "Properties": { + "Type": "container", + "ContainerProperties": { + "ExecutionRoleArn": { + "Fn::GetAtt": [ + "executionroleD9A39BE6", + "Arn" + ] + }, + "Image": "docker/whalesay", + "Privileged": false, + "ReadonlyRootFilesystem": false, + "ResourceRequirements": [ + { + "Type": "VCPU", + "Value": "0.25" + }, + { + "Type": "MEMORY", + "Value": "512" + } + ] + }, + "PlatformCapabilities": [ + "FARGATE" + ], "RetryStrategy": { "Attempts": 1 }, diff --git a/packages/@aws-cdk/aws-batch/test/integ.batch.ts b/packages/@aws-cdk/aws-batch/test/integ.batch.ts index 4e19da37ca897..4430cda4a7bf3 100644 --- a/packages/@aws-cdk/aws-batch/test/integ.batch.ts +++ b/packages/@aws-cdk/aws-batch/test/integ.batch.ts @@ -1,6 +1,7 @@ import * as ec2 from '@aws-cdk/aws-ec2'; import * as ecr from '@aws-cdk/aws-ecr'; import * as ecs from '@aws-cdk/aws-ecs'; +import * as iam from '@aws-cdk/aws-iam'; import * as cdk from '@aws-cdk/core'; import * as batch from '../lib/'; @@ -64,6 +65,33 @@ new batch.JobQueue(stack, 'batch-job-queue', { ], }); +// Split out into two job queues because each queue +// supports a max of 3 compute environments +new batch.JobQueue(stack, 'batch-job-fargate-queue', { + computeEnvironments: [ + { + computeEnvironment: new batch.ComputeEnvironment(stack, 'batch-fargate-compute-env', { + managed: true, + computeResources: { + type: batch.ComputeResourceType.FARGATE, + vpc, + }, + }), + order: 1, + }, + { + computeEnvironment: new batch.ComputeEnvironment(stack, 'batch-fargate-spot-compute-env', { + managed: true, + computeResources: { + type: batch.ComputeResourceType.FARGATE_SPOT, + vpc, + }, + }), + order: 2, + }, + ], +}); + const repo = new ecr.Repository(stack, 'batch-job-repo'); new batch.JobDefinition(stack, 'batch-job-def-from-ecr', { @@ -77,3 +105,15 @@ new batch.JobDefinition(stack, 'batch-job-def-from-', { image: ecs.ContainerImage.fromRegistry('docker/whalesay'), }, }); + +const executionRole = new iam.Role(stack, 'execution-role', { + assumedBy: new iam.ServicePrincipal('batch.amazonaws.com'), +}); + +new batch.JobDefinition(stack, 'batch-job-def-fargate', { + platformCapabilities: [batch.PlatformCapabilities.FARGATE], + container: { + image: ecs.ContainerImage.fromRegistry('docker/whalesay'), + executionRole, + }, +}); diff --git a/packages/@aws-cdk/aws-batch/test/job-definition.test.ts b/packages/@aws-cdk/aws-batch/test/job-definition.test.ts index ed9bffb7a90bc..13926b6b80788 100644 --- a/packages/@aws-cdk/aws-batch/test/job-definition.test.ts +++ b/packages/@aws-cdk/aws-batch/test/job-definition.test.ts @@ -1,3 +1,4 @@ +import { throws } from 'assert'; import { Template } from '@aws-cdk/assertions'; import * as ec2 from '@aws-cdk/aws-ec2'; import * as ecr from '@aws-cdk/aws-ecr'; @@ -7,6 +8,7 @@ import * as secretsmanager from '@aws-cdk/aws-secretsmanager'; import * as ssm from '@aws-cdk/aws-ssm'; import * as cdk from '@aws-cdk/core'; import * as batch from '../lib'; +import { PlatformCapabilities } from '../lib'; describe('Batch Job Definition', () => { let stack: cdk.Stack; @@ -61,6 +63,7 @@ describe('Batch Job Definition', () => { }, retryAttempts: 2, timeout: cdk.Duration.seconds(30), + platformCapabilities: [batch.PlatformCapabilities.EC2], }; }); @@ -87,14 +90,83 @@ describe('Batch Job Definition', () => { 'awslogs-region': 'us-east-1', }, }, - Memory: jobDefProps.container.memoryLimitMiB, MountPoints: [], Privileged: jobDefProps.container.privileged, ReadonlyRootFilesystem: jobDefProps.container.readOnly, - ResourceRequirements: [{ Type: 'GPU', Value: String(jobDefProps.container.gpuCount) }], + ResourceRequirements: [ + { Type: 'VCPU', Value: String(jobDefProps.container.vcpus) }, + { Type: 'MEMORY', Value: String(jobDefProps.container.memoryLimitMiB) }, + { Type: 'GPU', Value: String(jobDefProps.container.gpuCount) }, + ], + Ulimits: [], + User: jobDefProps.container.user, + Volumes: [], + } : undefined, + NodeProperties: jobDefProps.nodeProps ? { + MainNode: jobDefProps.nodeProps.mainNode, + NodeRangeProperties: [], + NumNodes: jobDefProps.nodeProps.count, + } : undefined, + Parameters: { + foo: 'bar', + }, + RetryStrategy: { + Attempts: jobDefProps.retryAttempts, + }, + Timeout: { + AttemptDurationSeconds: jobDefProps.timeout ? jobDefProps.timeout.toSeconds() : -1, + }, + Type: 'container', + PlatformCapabilities: ['EC2'], + }); + }); + + test('renders the correct cloudformation properties for a Fargate job definition', () => { + // WHEN + const executionRole = new iam.Role(stack, 'execution-role', { + assumedBy: new iam.ServicePrincipal('ecs-tasks.amazonaws.com'), + }); + + new batch.JobDefinition(stack, 'job-def', { + ...jobDefProps, + container: { ...jobDefProps.container, executionRole, gpuCount: undefined }, + platformCapabilities: [PlatformCapabilities.FARGATE], + }); + + // THEN + Template.fromStack(stack).hasResourceProperties('AWS::Batch::JobDefinition', { + JobDefinitionName: jobDefProps.jobDefinitionName, + ContainerProperties: jobDefProps.container ? { + Command: jobDefProps.container.command, + Environment: [ + { + Name: 'foo', + Value: 'bar', + }, + ], + ExecutionRoleArn: { + 'Fn::GetAtt': [ + 'executionroleD9A39BE6', + 'Arn', + ], + }, + InstanceType: jobDefProps.container.instanceType ? jobDefProps.container.instanceType.toString() : '', + LinuxParameters: {}, + LogConfiguration: { + LogDriver: 'awslogs', + Options: { + 'awslogs-region': 'us-east-1', + }, + }, + MountPoints: [], + Privileged: jobDefProps.container.privileged, + ReadonlyRootFilesystem: jobDefProps.container.readOnly, + ResourceRequirements: [ + { Type: 'VCPU', Value: String(jobDefProps.container.vcpus) }, + { Type: 'MEMORY', Value: String(jobDefProps.container.memoryLimitMiB) }, + ], Ulimits: [], User: jobDefProps.container.user, - Vcpus: jobDefProps.container.vcpus, Volumes: [], } : undefined, NodeProperties: jobDefProps.nodeProps ? { @@ -112,8 +184,10 @@ describe('Batch Job Definition', () => { AttemptDurationSeconds: jobDefProps.timeout ? jobDefProps.timeout.toSeconds() : -1, }, Type: 'container', + PlatformCapabilities: ['FARGATE'], }); }); + test('can use an ecr image', () => { // WHEN const repo = new ecr.Repository(stack, 'image-repo'); @@ -176,10 +250,12 @@ describe('Batch Job Definition', () => { ], ], }, - Memory: 4, Privileged: false, ReadonlyRootFilesystem: false, - Vcpus: 1, + ResourceRequirements: [ + { Type: 'VCPU', Value: '1' }, + { Type: 'MEMORY', Value: '4' }, + ], }, }); }); @@ -196,10 +272,12 @@ describe('Batch Job Definition', () => { Template.fromStack(stack).hasResourceProperties('AWS::Batch::JobDefinition', { ContainerProperties: { Image: 'docker/whalesay', - Memory: 4, Privileged: false, ReadonlyRootFilesystem: false, - Vcpus: 1, + ResourceRequirements: [ + { Type: 'VCPU', Value: '1' }, + { Type: 'MEMORY', Value: '4' }, + ], }, }); }); @@ -286,4 +364,40 @@ describe('Batch Job Definition', () => { }, }); }); + describe('using fargate job definition', () => { + test('can configure platform configuration properly', () => { + // GIVEN + const executionRole = new iam.Role(stack, 'execution-role', { + assumedBy: new iam.ServicePrincipal('batch.amazonaws.com'), + }); + // WHEN + new batch.JobDefinition(stack, 'job-def', { + platformCapabilities: [batch.PlatformCapabilities.FARGATE], + container: { + image: ecs.EcrImage.fromRegistry('docker/whalesay'), + platformVersion: ecs.FargatePlatformVersion.LATEST, + executionRole: executionRole, + }, + }); + // THEN + Template.fromStack(stack).hasResourceProperties('AWS::Batch::JobDefinition', { + ContainerProperties: { + FargatePlatformConfiguration: { + PlatformVersion: 'LATEST', + }, + }, + }); + }); + test('must require executionRole', () => { + throws(() => { + // WHEN + new batch.JobDefinition(stack, 'job-def', { + platformCapabilities: [batch.PlatformCapabilities.FARGATE], + container: { + image: ecs.EcrImage.fromRegistry('docker/whalesay'), + }, + }); + }); + }); + }); }); diff --git a/packages/@aws-cdk/aws-config/lib/rule.ts b/packages/@aws-cdk/aws-config/lib/rule.ts index 1551d23e6d0c9..05ecebe7d93e5 100644 --- a/packages/@aws-cdk/aws-config/lib/rule.ts +++ b/packages/@aws-cdk/aws-config/lib/rule.ts @@ -1355,7 +1355,7 @@ export class ResourceType { /** Amazon EC2 customer gateway */ public static readonly EC2_CUSTOMER_GATEWAY = new ResourceType('AWS::EC2::CustomerGateway'); /** Amazon EC2 internet gateway */ - public static readonly EC2_INTERNET_GATEWAY = new ResourceType('AWS::EC2::CustomerGateway'); + public static readonly EC2_INTERNET_GATEWAY = new ResourceType('AWS::EC2::InternetGateway'); /** Amazon EC2 network ACL */ public static readonly EC2_NETWORK_ACL = new ResourceType('AWS::EC2::NetworkAcl'); /** Amazon EC2 route table */ diff --git a/packages/@aws-cdk/aws-ecr-assets/lib/tarball-asset.ts b/packages/@aws-cdk/aws-ecr-assets/lib/tarball-asset.ts index bb7c40617b4ea..983b56e838876 100644 --- a/packages/@aws-cdk/aws-ecr-assets/lib/tarball-asset.ts +++ b/packages/@aws-cdk/aws-ecr-assets/lib/tarball-asset.ts @@ -16,7 +16,11 @@ import { Construct as CoreConstruct } from '@aws-cdk/core'; */ export interface TarballImageAssetProps { /** - * Path to the tarball. + * Absolute path to the tarball. + * + * It is recommended to to use the script running directory (e.g. `__dirname` + * in Node.js projects or dirname of `__file__` in Python) if your tarball + * is located as a resource inside your project. */ readonly tarballFile: string; } diff --git a/packages/@aws-cdk/aws-ecs/lib/container-image.ts b/packages/@aws-cdk/aws-ecs/lib/container-image.ts index 05b098fdafedd..f3c53bb527ba0 100644 --- a/packages/@aws-cdk/aws-ecs/lib/container-image.ts +++ b/packages/@aws-cdk/aws-ecs/lib/container-image.ts @@ -59,7 +59,8 @@ export abstract class ContainerImage { * Use this method if the container image has already been created by another process (e.g. jib) * and you want to add it as a container image asset. * - * @param tarballFile Path to the tarball (relative to the directory). + * @param tarballFile Absolute path to the tarball. You can use language-specific idioms (such as `__dirname` in Node.js) + * to create an absolute path based on the current script running directory. */ public static fromTarball(tarballFile: string): ContainerImage { return { diff --git a/packages/@aws-cdk/aws-events-targets/test/batch/integ.job-definition-events.expected.json b/packages/@aws-cdk/aws-events-targets/test/batch/integ.job-definition-events.expected.json index 77a8854041e1f..f4dfe0408f63e 100644 --- a/packages/@aws-cdk/aws-events-targets/test/batch/integ.job-definition-events.expected.json +++ b/packages/@aws-cdk/aws-events-targets/test/batch/integ.job-definition-events.expected.json @@ -65,11 +65,16 @@ "Type": "container", "ContainerProperties": { "Image": "test-repo", - "Memory": 4, "Privileged": false, "ReadonlyRootFilesystem": false, - "Vcpus": 1 + "ResourceRequirements": [ + { "Type": "VCPU", "Value": "1" }, + { "Type": "MEMORY", "Value": "4" } + ] }, + "PlatformCapabilities": [ + "EC2" + ], "RetryStrategy": { "Attempts": 1 }, diff --git a/packages/@aws-cdk/aws-lambda/README.md b/packages/@aws-cdk/aws-lambda/README.md index d257bbd635df5..c6098ff19fe11 100644 --- a/packages/@aws-cdk/aws-lambda/README.md +++ b/packages/@aws-cdk/aws-lambda/README.md @@ -78,6 +78,9 @@ new DockerImageFunction(this, 'ECRFunction', { }); ``` +The props for these docker image resources allow overriding the image's `CMD`, `ENTRYPOINT`, and `WORKDIR` +configurations. See their docs for more information. + ## Execution Role Lambda functions assume an IAM role during execution. In CDK by default, Lambda diff --git a/packages/@aws-cdk/aws-lambda/lib/code.ts b/packages/@aws-cdk/aws-lambda/lib/code.ts index b78859bf3515c..293c91f1485d9 100644 --- a/packages/@aws-cdk/aws-lambda/lib/code.ts +++ b/packages/@aws-cdk/aws-lambda/lib/code.ts @@ -202,6 +202,14 @@ export interface CodeImageConfig { * @default - use the ENTRYPOINT in the docker image or Dockerfile. */ readonly entrypoint?: string[]; + + /** + * Specify or override the WORKDIR on the specified Docker image or Dockerfile. + * A WORKDIR allows you to configure the working directory the container will use. + * @see https://docs.docker.com/engine/reference/builder/#workdir + * @default - use the WORKDIR in the docker image or Dockerfile. + */ + readonly workingDirectory?: string; } /** @@ -435,6 +443,14 @@ export interface EcrImageCodeProps { */ readonly entrypoint?: string[]; + /** + * Specify or override the WORKDIR on the specified Docker image or Dockerfile. + * A WORKDIR allows you to configure the working directory the container will use. + * @see https://docs.docker.com/engine/reference/builder/#workdir + * @default - use the WORKDIR in the docker image or Dockerfile. + */ + readonly workingDirectory?: string; + /** * The image tag to use when pulling the image from ECR. * @default 'latest' @@ -460,6 +476,7 @@ export class EcrImageCode extends Code { imageUri: this.repository.repositoryUriForTag(this.props?.tag ?? 'latest'), cmd: this.props.cmd, entrypoint: this.props.entrypoint, + workingDirectory: this.props.workingDirectory, }, }; } @@ -485,6 +502,14 @@ export interface AssetImageCodeProps extends ecr_assets.DockerImageAssetOptions * @default - use the ENTRYPOINT in the docker image or Dockerfile. */ readonly entrypoint?: string[]; + + /** + * Specify or override the WORKDIR on the specified Docker image or Dockerfile. + * A WORKDIR allows you to configure the working directory the container will use. + * @see https://docs.docker.com/engine/reference/builder/#workdir + * @default - use the WORKDIR in the docker image or Dockerfile. + */ + readonly workingDirectory?: string; } /** @@ -510,6 +535,7 @@ export class AssetImageCode extends Code { imageUri: asset.imageUri, entrypoint: this.props.entrypoint, cmd: this.props.cmd, + workingDirectory: this.props.workingDirectory, }, }; } diff --git a/packages/@aws-cdk/aws-lambda/lib/function.ts b/packages/@aws-cdk/aws-lambda/lib/function.ts index 2d2fadc8d808b..9cd67a478f003 100644 --- a/packages/@aws-cdk/aws-lambda/lib/function.ts +++ b/packages/@aws-cdk/aws-lambda/lib/function.ts @@ -675,6 +675,7 @@ export class Function extends FunctionBase { imageConfig: undefinedIfNoKeys({ command: code.image?.cmd, entryPoint: code.image?.entrypoint, + workingDirectory: code.image?.workingDirectory, }), kmsKeyArn: props.environmentEncryption?.keyArn, fileSystemConfigs, diff --git a/packages/@aws-cdk/aws-lambda/lib/runtime.ts b/packages/@aws-cdk/aws-lambda/lib/runtime.ts index 74d41bbb7e9e4..9c0e305d4441e 100644 --- a/packages/@aws-cdk/aws-lambda/lib/runtime.ts +++ b/packages/@aws-cdk/aws-lambda/lib/runtime.ts @@ -152,23 +152,17 @@ export class Runtime { /** * The .NET Core 2.1 runtime (dotnetcore2.1) */ - public static readonly DOTNET_CORE_2_1 = new Runtime('dotnetcore2.1', RuntimeFamily.DOTNET_CORE, { - bundlingDockerImage: 'lambci/lambda:build-dotnetcore2.1', - }); + public static readonly DOTNET_CORE_2_1 = new Runtime('dotnetcore2.1', RuntimeFamily.DOTNET_CORE); /** * The .NET Core 3.1 runtime (dotnetcore3.1) */ - public static readonly DOTNET_CORE_3_1 = new Runtime('dotnetcore3.1', RuntimeFamily.DOTNET_CORE, { - bundlingDockerImage: 'lambci/lambda:build-dotnetcore3.1', - }); + public static readonly DOTNET_CORE_3_1 = new Runtime('dotnetcore3.1', RuntimeFamily.DOTNET_CORE); /** * The Go 1.x runtime (go1.x) */ - public static readonly GO_1_X = new Runtime('go1.x', RuntimeFamily.GO, { - bundlingDockerImage: 'lambci/lambda:build-go1.x', - }); + public static readonly GO_1_X = new Runtime('go1.x', RuntimeFamily.GO); /** * The Ruby 2.5 runtime (ruby2.5) diff --git a/packages/@aws-cdk/aws-lambda/test/code.test.ts b/packages/@aws-cdk/aws-lambda/test/code.test.ts index ec9732baa9ea7..49e87cf220ebe 100644 --- a/packages/@aws-cdk/aws-lambda/test/code.test.ts +++ b/packages/@aws-cdk/aws-lambda/test/code.test.ts @@ -225,6 +225,7 @@ describe('code', () => { cmd: ['cmd', 'param1'], entrypoint: ['entrypoint', 'param2'], tag: 'mytag', + workingDirectory: '/some/path', }), handler: lambda.Handler.FROM_IMAGE, runtime: lambda.Runtime.FROM_IMAGE, @@ -238,6 +239,7 @@ describe('code', () => { ImageConfig: { Command: ['cmd', 'param1'], EntryPoint: ['entrypoint', 'param2'], + WorkingDirectory: '/some/path', }, }); }); @@ -315,6 +317,7 @@ describe('code', () => { code: lambda.Code.fromAssetImage(path.join(__dirname, 'docker-lambda-handler'), { cmd: ['cmd', 'param1'], entrypoint: ['entrypoint', 'param2'], + workingDirectory: '/some/path', }), handler: lambda.Handler.FROM_IMAGE, runtime: lambda.Runtime.FROM_IMAGE, @@ -325,6 +328,7 @@ describe('code', () => { ImageConfig: { Command: ['cmd', 'param1'], EntryPoint: ['entrypoint', 'param2'], + WorkingDirectory: '/some/path', }, }); }); diff --git a/packages/@aws-cdk/aws-lambda/test/function.test.ts b/packages/@aws-cdk/aws-lambda/test/function.test.ts index f86a3b6fb1ebb..b4f79aff6cc9e 100644 --- a/packages/@aws-cdk/aws-lambda/test/function.test.ts +++ b/packages/@aws-cdk/aws-lambda/test/function.test.ts @@ -2112,6 +2112,7 @@ describe('function', () => { imageUri: 'ecr image uri', cmd: ['cmd', 'param1'], entrypoint: ['entrypoint', 'param2'], + workingDirectory: '/some/path', }, }), handler: lambda.Handler.FROM_IMAGE, @@ -2122,6 +2123,7 @@ describe('function', () => { ImageConfig: { Command: ['cmd', 'param1'], EntryPoint: ['entrypoint', 'param2'], + WorkingDirectory: '/some/path', }, }); }); diff --git a/packages/@aws-cdk/aws-lambda/test/runtime.test.ts b/packages/@aws-cdk/aws-lambda/test/runtime.test.ts index 1b16b12c697b9..17203a11f9d7e 100644 --- a/packages/@aws-cdk/aws-lambda/test/runtime.test.ts +++ b/packages/@aws-cdk/aws-lambda/test/runtime.test.ts @@ -55,10 +55,4 @@ describe('runtime', () => { // THEN expect(runtime.bundlingDockerImage.image).toEqual('my-docker-image'); }); - - test('dotnetcore and go have overridden images', () => { - expect(lambda.Runtime.DOTNET_CORE_3_1.bundlingDockerImage.image).toEqual('lambci/lambda:build-dotnetcore3.1'); - expect(lambda.Runtime.DOTNET_CORE_2_1.bundlingDockerImage.image).toEqual('lambci/lambda:build-dotnetcore2.1'); - expect(lambda.Runtime.GO_1_X.bundlingDockerImage.image).toEqual('lambci/lambda:build-go1.x'); - }); }); diff --git a/packages/@aws-cdk/aws-logs/lib/log-retention-provider/index.ts b/packages/@aws-cdk/aws-logs/lib/log-retention-provider/index.ts index d3f508f668ecc..da537c149f013 100644 --- a/packages/@aws-cdk/aws-logs/lib/log-retention-provider/index.ts +++ b/packages/@aws-cdk/aws-logs/lib/log-retention-provider/index.ts @@ -18,14 +18,39 @@ interface SdkRetryOptions { * @param options CloudWatch API SDK options. */ async function createLogGroupSafe(logGroupName: string, region?: string, options?: SdkRetryOptions) { - try { // Try to create the log group - const cloudwatchlogs = new AWS.CloudWatchLogs({ apiVersion: '2014-03-28', region, ...options }); - await cloudwatchlogs.createLogGroup({ logGroupName }).promise(); - } catch (e) { - if (e.code !== 'ResourceAlreadyExistsException') { - throw e; + // If we set the log retention for a lambda, then due to the async nature of + // Lambda logging there could be a race condition when the same log group is + // already being created by the lambda execution. This can sometime result in + // an error "OperationAbortedException: A conflicting operation is currently + // in progress...Please try again." + // To avoid an error, we do as requested and try again. + let retryCount = options?.maxRetries == undefined ? 10 : options.maxRetries; + const delay = options?.retryOptions?.base == undefined ? 10 : options.retryOptions.base; + do { + try { + const cloudwatchlogs = new AWS.CloudWatchLogs({ apiVersion: '2014-03-28', region, ...options }); + await cloudwatchlogs.createLogGroup({ logGroupName }).promise(); + return; + } catch (error) { + if (error.code === 'ResourceAlreadyExistsException') { + // The log group is already created by the lambda execution + return; + } + if (error.code === 'OperationAbortedException') { + if (retryCount > 0) { + retryCount--; + await new Promise(resolve => setTimeout(resolve, delay)); + continue; + } else { + // The log group is still being created by another execution but we are out of retries + throw new Error('Out of attempts to create a logGroup'); + } + } + // Any other error + console.error(error); + throw error; } - } + } while (true); // exit happens on retry count check } /** @@ -64,21 +89,16 @@ export async function handler(event: AWSLambda.CloudFormationCustomResourceEvent await setRetentionPolicy(logGroupName, logGroupRegion, retryOptions, parseInt(event.ResourceProperties.RetentionInDays, 10)); if (event.RequestType === 'Create') { - // Set a retention policy of 1 day on the logs of this function. The log - // group for this function should already exist at this stage because we - // already logged the event but due to the async nature of Lambda logging - // there could be a race condition. So we also try to create the log group - // of this function first. If multiple LogRetention constructs are present - // in the stack, they will try to act on this function's log group at the - // same time. This can sometime result in an OperationAbortedException. To - // avoid this and because this operation is not critical we catch all errors. - try { - const region = process.env.AWS_REGION; - await createLogGroupSafe(`/aws/lambda/${context.functionName}`, region, retryOptions); - await setRetentionPolicy(`/aws/lambda/${context.functionName}`, region, retryOptions, 1); - } catch (e) { - console.log(e); - } + // Set a retention policy of 1 day on the logs of this very function. + // Due to the async nature of the log group creation, the log group for this function might + // still be not created yet at this point. Therefore we attempt to create it. + // In case it is being created, createLogGroupSafe will handle the conflic. + const region = process.env.AWS_REGION; + await createLogGroupSafe(`/aws/lambda/${context.functionName}`, region, retryOptions); + // If createLogGroupSafe fails, the log group is not created even after multiple attempts + // In this case we have nothing to set the retention policy on but an exception will skip + // the next line. + await setRetentionPolicy(`/aws/lambda/${context.functionName}`, region, retryOptions, 1); } } diff --git a/packages/@aws-cdk/aws-logs/test/test.log-retention-provider.ts b/packages/@aws-cdk/aws-logs/test/test.log-retention-provider.ts index a08ff060dc2a4..ba67371ca9d60 100644 --- a/packages/@aws-cdk/aws-logs/test/test.log-retention-provider.ts +++ b/packages/@aws-cdk/aws-logs/test/test.log-retention-provider.ts @@ -27,6 +27,14 @@ function createRequest(type: string) { .reply(200); } +class MyError extends Error { + code: string; + constructor(message: string, code: string) { + super(message); + this.code = code; + } +} + export = { 'tearDown'(callback: any) { AWS.restore(); @@ -231,10 +239,60 @@ export = { test.done(); }, - async 'does not fail when operations on provider log group fail'(test: Test) { + async 'does not if when operations on provider log group fails'(test: Test) { + let attempt = 2; const createLogGroupFake = (params: AWSSDK.CloudWatchLogs.CreateLogGroupRequest) => { if (params.logGroupName === '/aws/lambda/provider') { - return Promise.reject(new Error('OperationAbortedException')); + if (attempt > 0) { + attempt--; + return Promise.reject(new MyError( + 'A conflicting operation is currently in progress against this resource. Please try again.', + 'OperationAbortedException')); + } else { + return Promise.resolve({}); + } + } + return Promise.resolve({}); + }; + + const putRetentionPolicyFake = sinon.fake.resolves({}); + const deleteRetentionPolicyFake = sinon.fake.resolves({}); + + AWS.mock('CloudWatchLogs', 'createLogGroup', createLogGroupFake); + AWS.mock('CloudWatchLogs', 'putRetentionPolicy', putRetentionPolicyFake); + AWS.mock('CloudWatchLogs', 'deleteRetentionPolicy', deleteRetentionPolicyFake); + + const event = { + ...eventCommon, + RequestType: 'Create', + ResourceProperties: { + ServiceToken: 'token', + RetentionInDays: '30', + LogGroupName: 'group', + }, + }; + + const request = createRequest('SUCCESS'); + + await provider.handler(event as AWSLambda.CloudFormationCustomResourceCreateEvent, context); + + test.equal(request.isDone(), true); + + test.done(); + }, + + async 'does not fail if operations on CDK lambda log group fails twice'(test: Test) { + let attempt = 2; + const createLogGroupFake = (params: AWSSDK.CloudWatchLogs.CreateLogGroupRequest) => { + if (params.logGroupName === 'group') { + if (attempt > 0) { + attempt--; + return Promise.reject(new MyError( + 'A conflicting operation is currently in progress against this resource. Please try again.', + 'OperationAbortedException')); + } else { + return Promise.resolve({}); + } } return Promise.resolve({}); }; @@ -265,6 +323,42 @@ export = { test.done(); }, + async 'does fail if operations on CDK lambda log group fails indefinitely'(test: Test) { + const createLogGroupFake = (params: AWSSDK.CloudWatchLogs.CreateLogGroupRequest) => { + if (params.logGroupName === 'group') { + return Promise.reject(new MyError( + 'A conflicting operation is currently in progress against this resource. Please try again.', + 'OperationAbortedException')); + } + return Promise.resolve({}); + }; + + const putRetentionPolicyFake = sinon.fake.resolves({}); + const deleteRetentionPolicyFake = sinon.fake.resolves({}); + + AWS.mock('CloudWatchLogs', 'createLogGroup', createLogGroupFake); + AWS.mock('CloudWatchLogs', 'putRetentionPolicy', putRetentionPolicyFake); + AWS.mock('CloudWatchLogs', 'deleteRetentionPolicy', deleteRetentionPolicyFake); + + const event = { + ...eventCommon, + RequestType: 'Create', + ResourceProperties: { + ServiceToken: 'token', + RetentionInDays: '30', + LogGroupName: 'group', + }, + }; + + const request = createRequest('FAILED'); + + await provider.handler(event as AWSLambda.CloudFormationCustomResourceCreateEvent, context); + + test.equal(request.isDone(), true); + + test.done(); + }, + async 'response data contains the log group name'(test: Test) { AWS.mock('CloudWatchLogs', 'createLogGroup', sinon.fake.resolves({})); AWS.mock('CloudWatchLogs', 'putRetentionPolicy', sinon.fake.resolves({})); diff --git a/packages/@aws-cdk/aws-neptune/lib/cluster.ts b/packages/@aws-cdk/aws-neptune/lib/cluster.ts index 4420659499a59..af8061ad6c21e 100644 --- a/packages/@aws-cdk/aws-neptune/lib/cluster.ts +++ b/packages/@aws-cdk/aws-neptune/lib/cluster.ts @@ -46,6 +46,10 @@ export class EngineVersion { * Neptune engine version 1.0.4.1 */ public static readonly V1_0_4_1 = new EngineVersion('1.0.4.1'); + /** + * Neptune engine version 1.0.5.0 + */ + public static readonly V1_0_5_0 = new EngineVersion('1.0.5.0'); /** * Constructor for specifying a custom engine version diff --git a/packages/@aws-cdk/aws-rds/lib/cluster-engine.ts b/packages/@aws-cdk/aws-rds/lib/cluster-engine.ts index 973f682e932c7..6bbb94261cec8 100644 --- a/packages/@aws-cdk/aws-rds/lib/cluster-engine.ts +++ b/packages/@aws-cdk/aws-rds/lib/cluster-engine.ts @@ -465,6 +465,8 @@ export class AuroraPostgresEngineVersion { public static readonly VER_12_4 = AuroraPostgresEngineVersion.of('12.4', '12', { s3Import: true, s3Export: true }); /** Version "12.6". */ public static readonly VER_12_6 = AuroraPostgresEngineVersion.of('12.6', '12', { s3Import: true, s3Export: true }); + /** Version "13.3". */ + public static readonly VER_13_3 = AuroraPostgresEngineVersion.of('13.3', '13', { s3Import: true, s3Export: true }); /** * Create a new AuroraPostgresEngineVersion with an arbitrary version. diff --git a/packages/@aws-cdk/aws-rds/lib/instance-engine.ts b/packages/@aws-cdk/aws-rds/lib/instance-engine.ts index c070b0988e314..3ed27351c2196 100644 --- a/packages/@aws-cdk/aws-rds/lib/instance-engine.ts +++ b/packages/@aws-cdk/aws-rds/lib/instance-engine.ts @@ -249,6 +249,8 @@ export class MariaDbEngineVersion { public static readonly VER_10_2_37 = MariaDbEngineVersion.of('10.2.37', '10.2'); /** Version "10.2.39". */ public static readonly VER_10_2_39 = MariaDbEngineVersion.of('10.2.39', '10.2'); + /** Version "10.2.40". */ + public static readonly VER_10_2_40 = MariaDbEngineVersion.of('10.2.40', '10.2'); /** Version "10.3" (only a major version, without a specific minor version). */ public static readonly VER_10_3 = MariaDbEngineVersion.of('10.3', '10.3'); @@ -262,6 +264,8 @@ export class MariaDbEngineVersion { public static readonly VER_10_3_23 = MariaDbEngineVersion.of('10.3.23', '10.3'); /** Version "10.3.28". */ public static readonly VER_10_3_28 = MariaDbEngineVersion.of('10.3.28', '10.3'); + /** Version "10.3.31". */ + public static readonly VER_10_3_31 = MariaDbEngineVersion.of('10.3.31', '10.3'); /** Version "10.4" (only a major version, without a specific minor version). */ public static readonly VER_10_4 = MariaDbEngineVersion.of('10.4', '10.4'); @@ -271,6 +275,8 @@ export class MariaDbEngineVersion { public static readonly VER_10_4_13 = MariaDbEngineVersion.of('10.4.13', '10.4'); /** Version "10.4.18". */ public static readonly VER_10_4_18 = MariaDbEngineVersion.of('10.4.18', '10.4'); + /** Version "10.4.21". */ + public static readonly VER_10_4_21 = MariaDbEngineVersion.of('10.4.21', '10.4'); /** Version "10.5" (only a major version, without a specific minor version). */ public static readonly VER_10_5 = MariaDbEngineVersion.of('10.5', '10.5'); @@ -278,6 +284,8 @@ export class MariaDbEngineVersion { public static readonly VER_10_5_8 = MariaDbEngineVersion.of('10.5.8', '10.5'); /** Version "10.5.9". */ public static readonly VER_10_5_9 = MariaDbEngineVersion.of('10.5.9', '10.5'); + /** Version "10.5.12". */ + public static readonly VER_10_5_12 = MariaDbEngineVersion.of('10.5.12', '10.5'); /** * Create a new MariaDbEngineVersion with an arbitrary version. diff --git a/packages/@aws-cdk/aws-redshift/README.md b/packages/@aws-cdk/aws-redshift/README.md index 576068b02f818..8ff734a6be255 100644 --- a/packages/@aws-cdk/aws-redshift/README.md +++ b/packages/@aws-cdk/aws-redshift/README.md @@ -26,15 +26,16 @@ To set up a Redshift cluster, define a `Cluster`. It will be launched in a VPC. You can specify a VPC, otherwise one will be created. The nodes are always launched in private subnets and are encrypted by default. -``` typescript -import redshift = require('@aws-cdk/aws-redshift'); -... -const cluster = new redshift.Cluster(this, 'Redshift', { - masterUser: { - masterUsername: 'admin', - }, - vpc - }); +```ts +import * as ec2 from '@aws-cdk/aws-ec2'; + +const vpc = new ec2.Vpc(this, 'Vpc'); +const cluster = new Cluster(this, 'Redshift', { + masterUser: { + masterUsername: 'admin', + }, + vpc +}); ``` By default, the master password will be generated and stored in AWS Secrets Manager. @@ -49,13 +50,13 @@ Depending on your use case, you can make the cluster publicly accessible with th To control who can access the cluster, use the `.connections` attribute. Redshift Clusters have a default port, so you don't need to specify the port: -```ts -cluster.connections.allowFromAnyIpv4('Open to the world'); +```ts fixture=cluster +cluster.connections.allowDefaultPortFromAnyIpv4('Open to the world'); ``` The endpoint to access your database cluster will be available as the `.clusterEndpoint` attribute: -```ts +```ts fixture=cluster cluster.clusterEndpoint.socketAddress; // "HOSTNAME:PORT" ``` @@ -63,16 +64,184 @@ cluster.clusterEndpoint.socketAddress; // "HOSTNAME:PORT" When the master password is generated and stored in AWS Secrets Manager, it can be rotated automatically: -```ts +```ts fixture=cluster cluster.addRotationSingleUser(); // Will rotate automatically after 30 days ``` The multi user rotation scheme is also available: -```ts +```ts fixture=cluster +import * as secretsmanager from '@aws-cdk/aws-secretsmanager'; + cluster.addRotationMultiUser('MyUser', { - secret: myImportedSecret + secret: secretsmanager.Secret.fromSecretNameV2(this, 'Imported Secret', 'my-secret'), +}); +``` + +## Database Resources + +This module allows for the creation of non-CloudFormation database resources such as users +and tables. This allows you to manage identities, permissions, and stateful resources +within your Redshift cluster from your CDK application. + +Because these resources are not available in CloudFormation, this library leverages +[custom +resources](https://docs.aws.amazon.com/cdk/api/latest/docs/custom-resources-readme.html) +to manage them. In addition to the IAM permissions required to make Redshift service +calls, the execution role for the custom resource handler requires database credentials to +create resources within the cluster. + +These database credentials can be supplied explicitly through the `adminUser` properties +of the various database resource constructs. Alternatively, the credentials can be +automatically pulled from the Redshift cluster's default administrator +credentials. However, this option is only available if the password for the credentials +was generated by the CDK application (ie., no value vas provided for [the `masterPassword` +property](https://docs.aws.amazon.com/cdk/api/latest/docs/@aws-cdk_aws-redshift.Login.html#masterpasswordspan-classapi-icon-api-icon-experimental-titlethis-api-element-is-experimental-it-may-change-without-noticespan) +of +[`Cluster.masterUser`](https://docs.aws.amazon.com/cdk/api/latest/docs/@aws-cdk_aws-redshift.Cluster.html#masteruserspan-classapi-icon-api-icon-experimental-titlethis-api-element-is-experimental-it-may-change-without-noticespan)). + +### Creating Users + +Create a user within a Redshift cluster database by instantiating a `User` construct. This +will generate a username and password, store the credentials in a [AWS Secrets Manager +`Secret`](https://docs.aws.amazon.com/cdk/api/latest/docs/@aws-cdk_aws-secretsmanager.Secret.html), +and make a query to the Redshift cluster to create a new database user with the +credentials. + +```ts fixture=cluster +new User(this, 'User', { + cluster: cluster, + databaseName: 'databaseName', +}); +``` + +By default, the user credentials are encrypted with your AWS account's default Secrets +Manager encryption key. You can specify the encryption key used for this purpose by +supplying a key in the `encryptionKey` property. + +```ts fixture=cluster +import * as kms from '@aws-cdk/aws-kms'; + +const encryptionKey = new kms.Key(this, 'Key'); +new User(this, 'User', { + encryptionKey: encryptionKey, + cluster: cluster, + databaseName: 'databaseName', +}); +``` + +By default, a username is automatically generated from the user construct ID and its path +in the construct tree. You can specify a particular username by providing a value for the +`username` property. Usernames must be valid identifiers; see: [Names and +identifiers](https://docs.aws.amazon.com/redshift/latest/dg/r_names.html) in the *Amazon +Redshift Database Developer Guide*. + +```ts fixture=cluster +new User(this, 'User', { + username: 'myuser', + cluster: cluster, + databaseName: 'databaseName', +}); +``` + +The user password is generated by AWS Secrets Manager using the default configuration +found in +[`secretsmanager.SecretStringGenerator`](https://docs.aws.amazon.com/cdk/api/latest/docs/@aws-cdk_aws-secretsmanager.SecretStringGenerator.html), +except with password length `30` and some SQL-incompliant characters excluded. The +plaintext for the password will never be present in the CDK application; instead, a +[CloudFormation Dynamic +Reference](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/dynamic-references.html) +will be used wherever the password value is required. + +### Creating Tables + +Create a table within a Redshift cluster database by instantiating a `Table` +construct. This will make a query to the Redshift cluster to create a new database table +with the supplied schema. + +```ts fixture=cluster +new Table(this, 'Table', { + tableColumns: [{ name: 'col1', dataType: 'varchar(4)' }, { name: 'col2', dataType: 'float' }], + cluster: cluster, + databaseName: 'databaseName', +}); +``` + +### Granting Privileges + +You can give a user privileges to perform certain actions on a table by using the +`Table.grant()` method. + +```ts fixture=cluster +const user = new User(this, 'User', { + cluster: cluster, + databaseName: 'databaseName', +}); +const table = new Table(this, 'Table', { + tableColumns: [{ name: 'col1', dataType: 'varchar(4)' }, { name: 'col2', dataType: 'float' }], + cluster: cluster, + databaseName: 'databaseName', +}); + +table.grant(user, TableAction.DROP, TableAction.SELECT); +``` + +Take care when managing privileges via the CDK, as attempting to manage a user's +privileges on the same table in multiple CDK applications could lead to accidentally +overriding these permissions. Consider the following two CDK applications which both refer +to the same user and table. In application 1, the resources are created and the user is +given `INSERT` permissions on the table: + +```ts fixture=cluster +const databaseName = 'databaseName'; +const username = 'myuser' +const tableName = 'mytable' + +const user = new User(this, 'User', { + username: username, + cluster: cluster, + databaseName: databaseName, +}); +const table = new Table(this, 'Table', { + tableColumns: [{ name: 'col1', dataType: 'varchar(4)' }, { name: 'col2', dataType: 'float' }], + cluster: cluster, + databaseName: databaseName, +}); +table.grant(user, TableAction.INSERT); +``` + +In application 2, the resources are imported and the user is given `INSERT` permissions on +the table: + +```ts fixture=cluster +const databaseName = 'databaseName'; +const username = 'myuser' +const tableName = 'mytable' + +const user = User.fromUserAttributes(this, 'User', { + username: username, + password: SecretValue.plainText('NOT_FOR_PRODUCTION'), + cluster: cluster, + databaseName: databaseName, +}); +const table = Table.fromTableAttributes(this, 'Table', { + tableName: tableName, + tableColumns: [{ name: 'col1', dataType: 'varchar(4)' }, { name: 'col2', dataType: 'float' }], + cluster: cluster, + databaseName: 'databaseName', }); +table.grant(user, TableAction.INSERT); ``` -This module is part of the [AWS Cloud Development Kit](https://github.com/aws/aws-cdk) project. +Both applications attempt to grant the user the appropriate privilege on the table by +submitting a `GRANT USER` SQL query to the Redshift cluster. Note that the latter of these +two calls will have no effect since the user has already been granted the privilege. + +Now, if application 1 were to remove the call to `grant`, a `REVOKE USER` SQL query is +submitted to the Redshift cluster. In general, application 1 does not know that +application 2 has also granted this permission and thus cannot decide not to issue the +revocation. This leads to the undesirable state where application 2 still contains the +call to `grant` but the user does not have the specified permission. + +Note that this does not occur when duplicate privileges are granted within the same +application, as such privileges are de-duplicated before any SQL query is submitted. diff --git a/packages/@aws-cdk/aws-redshift/lib/database-options.ts b/packages/@aws-cdk/aws-redshift/lib/database-options.ts new file mode 100644 index 0000000000000..b7eb21e57e24c --- /dev/null +++ b/packages/@aws-cdk/aws-redshift/lib/database-options.ts @@ -0,0 +1,26 @@ +import * as secretsmanager from '@aws-cdk/aws-secretsmanager'; +import { ICluster } from './cluster'; + +/** + * Properties for accessing a Redshift database + */ +export interface DatabaseOptions { + /** + * The cluster containing the database. + */ + readonly cluster: ICluster; + + /** + * The name of the database. + */ + readonly databaseName: string; + + /** + * The secret containing credentials to a Redshift user with administrator privileges. + * + * Secret JSON schema: `{ username: string; password: string }`. + * + * @default - the admin secret is taken from the cluster + */ + readonly adminUser?: secretsmanager.ISecret; +} diff --git a/packages/@aws-cdk/aws-redshift/lib/index.ts b/packages/@aws-cdk/aws-redshift/lib/index.ts index 8a8fc89428ce3..ec552d2da8c3c 100644 --- a/packages/@aws-cdk/aws-redshift/lib/index.ts +++ b/packages/@aws-cdk/aws-redshift/lib/index.ts @@ -1,8 +1,11 @@ export * from './cluster'; export * from './parameter-group'; +export * from './database-options'; export * from './database-secret'; export * from './endpoint'; export * from './subnet-group'; +export * from './table'; +export * from './user'; // AWS::Redshift CloudFormation Resources: export * from './redshift.generated'; diff --git a/packages/@aws-cdk/aws-redshift/lib/private/database-query-provider/handler-name.ts b/packages/@aws-cdk/aws-redshift/lib/private/database-query-provider/handler-name.ts new file mode 100644 index 0000000000000..b758fb5819063 --- /dev/null +++ b/packages/@aws-cdk/aws-redshift/lib/private/database-query-provider/handler-name.ts @@ -0,0 +1,5 @@ +export enum HandlerName { + User = 'user', + Table = 'table', + UserTablePrivileges = 'user-table-privileges', +} diff --git a/packages/@aws-cdk/aws-redshift/lib/private/database-query-provider/index.ts b/packages/@aws-cdk/aws-redshift/lib/private/database-query-provider/index.ts new file mode 100644 index 0000000000000..60eb2a009173c --- /dev/null +++ b/packages/@aws-cdk/aws-redshift/lib/private/database-query-provider/index.ts @@ -0,0 +1,20 @@ +/* eslint-disable-next-line import/no-unresolved */ +import * as AWSLambda from 'aws-lambda'; +import { HandlerName } from './handler-name'; +import { handler as managePrivileges } from './privileges'; +import { handler as manageTable } from './table'; +import { handler as manageUser } from './user'; + +const HANDLERS: { [key in HandlerName]: ((props: any, event: AWSLambda.CloudFormationCustomResourceEvent) => Promise) } = { + [HandlerName.Table]: manageTable, + [HandlerName.User]: manageUser, + [HandlerName.UserTablePrivileges]: managePrivileges, +}; + +export async function handler(event: AWSLambda.CloudFormationCustomResourceEvent) { + const subHandler = HANDLERS[event.ResourceProperties.handler as HandlerName]; + if (!subHandler) { + throw new Error(`Requested handler ${event.ResourceProperties.handler} is not in supported set: ${JSON.stringify(Object.keys(HANDLERS))}`); + } + return subHandler(event.ResourceProperties, event); +} diff --git a/packages/@aws-cdk/aws-redshift/lib/private/database-query-provider/privileges.ts b/packages/@aws-cdk/aws-redshift/lib/private/database-query-provider/privileges.ts new file mode 100644 index 0000000000000..9f2064d0e5e5a --- /dev/null +++ b/packages/@aws-cdk/aws-redshift/lib/private/database-query-provider/privileges.ts @@ -0,0 +1,70 @@ +/* eslint-disable-next-line import/no-unresolved */ +import * as AWSLambda from 'aws-lambda'; +import { TablePrivilege, UserTablePrivilegesHandlerProps } from '../handler-props'; +import { ClusterProps, executeStatement, makePhysicalId } from './util'; + +export async function handler(props: UserTablePrivilegesHandlerProps & ClusterProps, event: AWSLambda.CloudFormationCustomResourceEvent) { + const username = props.username; + const tablePrivileges = props.tablePrivileges; + const clusterProps = props; + + if (event.RequestType === 'Create') { + await grantPrivileges(username, tablePrivileges, clusterProps); + return { PhysicalResourceId: makePhysicalId(username, clusterProps, event.RequestId) }; + } else if (event.RequestType === 'Delete') { + await revokePrivileges(username, tablePrivileges, clusterProps); + return; + } else if (event.RequestType === 'Update') { + const { replace } = await updatePrivileges( + username, + tablePrivileges, + clusterProps, + event.OldResourceProperties as UserTablePrivilegesHandlerProps & ClusterProps, + ); + const physicalId = replace ? makePhysicalId(username, clusterProps, event.RequestId) : event.PhysicalResourceId; + return { PhysicalResourceId: physicalId }; + } else { + /* eslint-disable-next-line dot-notation */ + throw new Error(`Unrecognized event type: ${event['RequestType']}`); + } +} + +async function revokePrivileges(username: string, tablePrivileges: TablePrivilege[], clusterProps: ClusterProps) { + await Promise.all(tablePrivileges.map(({ tableName, actions }) => { + return executeStatement(`REVOKE ${actions.join(', ')} ON ${tableName} FROM ${username}`, clusterProps); + })); +} + +async function grantPrivileges(username: string, tablePrivileges: TablePrivilege[], clusterProps: ClusterProps) { + await Promise.all(tablePrivileges.map(({ tableName, actions }) => { + return executeStatement(`GRANT ${actions.join(', ')} ON ${tableName} TO ${username}`, clusterProps); + })); +} + +async function updatePrivileges( + username: string, + tablePrivileges: TablePrivilege[], + clusterProps: ClusterProps, + oldResourceProperties: UserTablePrivilegesHandlerProps & ClusterProps, +): Promise<{ replace: boolean }> { + const oldClusterProps = oldResourceProperties; + if (clusterProps.clusterName !== oldClusterProps.clusterName || clusterProps.databaseName !== oldClusterProps.databaseName) { + await grantPrivileges(username, tablePrivileges, clusterProps); + return { replace: true }; + } + + const oldUsername = oldResourceProperties.username; + if (oldUsername !== username) { + await grantPrivileges(username, tablePrivileges, clusterProps); + return { replace: true }; + } + + const oldTablePrivileges = oldResourceProperties.tablePrivileges; + if (oldTablePrivileges !== tablePrivileges) { + await revokePrivileges(username, oldTablePrivileges, clusterProps); + await grantPrivileges(username, tablePrivileges, clusterProps); + return { replace: false }; + } + + return { replace: false }; +} diff --git a/packages/@aws-cdk/aws-redshift/lib/private/database-query-provider/table.ts b/packages/@aws-cdk/aws-redshift/lib/private/database-query-provider/table.ts new file mode 100644 index 0000000000000..a2e2a4dc4bee9 --- /dev/null +++ b/packages/@aws-cdk/aws-redshift/lib/private/database-query-provider/table.ts @@ -0,0 +1,75 @@ +/* eslint-disable-next-line import/no-unresolved */ +import * as AWSLambda from 'aws-lambda'; +import { Column } from '../../table'; +import { TableHandlerProps } from '../handler-props'; +import { ClusterProps, executeStatement } from './util'; + +export async function handler(props: TableHandlerProps & ClusterProps, event: AWSLambda.CloudFormationCustomResourceEvent) { + const tableNamePrefix = props.tableName.prefix; + const tableNameSuffix = props.tableName.generateSuffix ? `${event.RequestId.substring(0, 8)}` : ''; + const tableColumns = props.tableColumns; + const clusterProps = props; + + if (event.RequestType === 'Create') { + const tableName = await createTable(tableNamePrefix, tableNameSuffix, tableColumns, clusterProps); + return { PhysicalResourceId: tableName }; + } else if (event.RequestType === 'Delete') { + await dropTable(event.PhysicalResourceId, clusterProps); + return; + } else if (event.RequestType === 'Update') { + const tableName = await updateTable( + event.PhysicalResourceId, + tableNamePrefix, + tableNameSuffix, + tableColumns, + clusterProps, + event.OldResourceProperties as TableHandlerProps & ClusterProps, + ); + return { PhysicalResourceId: tableName }; + } else { + /* eslint-disable-next-line dot-notation */ + throw new Error(`Unrecognized event type: ${event['RequestType']}`); + } +} + +async function createTable(tableNamePrefix: string, tableNameSuffix: string, tableColumns: Column[], clusterProps: ClusterProps): Promise { + const tableName = tableNamePrefix + tableNameSuffix; + const tableColumnsString = tableColumns.map(column => `${column.name} ${column.dataType}`).join(); + await executeStatement(`CREATE TABLE ${tableName} (${tableColumnsString})`, clusterProps); + return tableName; +} + +async function dropTable(tableName: string, clusterProps: ClusterProps) { + await executeStatement(`DROP TABLE ${tableName}`, clusterProps); +} + +async function updateTable( + tableName: string, + tableNamePrefix: string, + tableNameSuffix: string, + tableColumns: Column[], + clusterProps: ClusterProps, + oldResourceProperties: TableHandlerProps & ClusterProps, +): Promise { + const oldClusterProps = oldResourceProperties; + if (clusterProps.clusterName !== oldClusterProps.clusterName || clusterProps.databaseName !== oldClusterProps.databaseName) { + return createTable(tableNamePrefix, tableNameSuffix, tableColumns, clusterProps); + } + + const oldTableNamePrefix = oldResourceProperties.tableName.prefix; + if (tableNamePrefix !== oldTableNamePrefix) { + return createTable(tableNamePrefix, tableNameSuffix, tableColumns, clusterProps); + } + + const oldTableColumns = oldResourceProperties.tableColumns; + if (!oldTableColumns.every(oldColumn => tableColumns.some(column => column.name === oldColumn.name && column.dataType === oldColumn.dataType))) { + return createTable(tableNamePrefix, tableNameSuffix, tableColumns, clusterProps); + } + + const additions = tableColumns.filter(column => { + return !oldTableColumns.some(oldColumn => column.name === oldColumn.name && column.dataType === oldColumn.dataType); + }).map(column => `ADD ${column.name} ${column.dataType}`); + await Promise.all(additions.map(addition => executeStatement(`ALTER TABLE ${tableName} ${addition}`, clusterProps))); + + return tableName; +} diff --git a/packages/@aws-cdk/aws-redshift/lib/private/database-query-provider/user.ts b/packages/@aws-cdk/aws-redshift/lib/private/database-query-provider/user.ts new file mode 100644 index 0000000000000..707af78714e43 --- /dev/null +++ b/packages/@aws-cdk/aws-redshift/lib/private/database-query-provider/user.ts @@ -0,0 +1,82 @@ +/* eslint-disable-next-line import/no-unresolved */ +import * as AWSLambda from 'aws-lambda'; +/* eslint-disable-next-line import/no-extraneous-dependencies */ +import * as SecretsManager from 'aws-sdk/clients/secretsmanager'; +import { UserHandlerProps } from '../handler-props'; +import { ClusterProps, executeStatement, makePhysicalId } from './util'; + +const secretsManager = new SecretsManager(); + +export async function handler(props: UserHandlerProps & ClusterProps, event: AWSLambda.CloudFormationCustomResourceEvent) { + const username = props.username; + const passwordSecretArn = props.passwordSecretArn; + const clusterProps = props; + + if (event.RequestType === 'Create') { + await createUser(username, passwordSecretArn, clusterProps); + return { PhysicalResourceId: makePhysicalId(username, clusterProps, event.RequestId), Data: { username: username } }; + } else if (event.RequestType === 'Delete') { + await dropUser(username, clusterProps); + return; + } else if (event.RequestType === 'Update') { + const { replace } = await updateUser(username, passwordSecretArn, clusterProps, event.OldResourceProperties as UserHandlerProps & ClusterProps); + const physicalId = replace ? makePhysicalId(username, clusterProps, event.RequestId) : event.PhysicalResourceId; + return { PhysicalResourceId: physicalId, Data: { username: username } }; + } else { + /* eslint-disable-next-line dot-notation */ + throw new Error(`Unrecognized event type: ${event['RequestType']}`); + } +} + +async function dropUser(username: string, clusterProps: ClusterProps) { + await executeStatement(`DROP USER ${username}`, clusterProps); +} + +async function createUser(username: string, passwordSecretArn: string, clusterProps: ClusterProps) { + const password = await getPasswordFromSecret(passwordSecretArn); + + await executeStatement(`CREATE USER ${username} PASSWORD '${password}'`, clusterProps); +} + +async function updateUser( + username: string, + passwordSecretArn: string, + clusterProps: ClusterProps, + oldResourceProperties: UserHandlerProps & ClusterProps, +): Promise<{ replace: boolean }> { + const oldClusterProps = oldResourceProperties; + if (clusterProps.clusterName !== oldClusterProps.clusterName || clusterProps.databaseName !== oldClusterProps.databaseName) { + await createUser(username, passwordSecretArn, clusterProps); + return { replace: true }; + } + + const oldUsername = oldResourceProperties.username; + const oldPasswordSecretArn = oldResourceProperties.passwordSecretArn; + const oldPassword = await getPasswordFromSecret(oldPasswordSecretArn); + const password = await getPasswordFromSecret(passwordSecretArn); + + if (username !== oldUsername) { + await createUser(username, passwordSecretArn, clusterProps); + return { replace: true }; + } + + if (password !== oldPassword) { + await executeStatement(`ALTER USER ${username} PASSWORD '${password}'`, clusterProps); + return { replace: false }; + } + + return { replace: false }; +} + +async function getPasswordFromSecret(passwordSecretArn: string): Promise { + const secretValue = await secretsManager.getSecretValue({ + SecretId: passwordSecretArn, + }).promise(); + const secretString = secretValue.SecretString; + if (!secretString) { + throw new Error(`Secret string for ${passwordSecretArn} was empty`); + } + const { password } = JSON.parse(secretString); + + return password; +} diff --git a/packages/@aws-cdk/aws-redshift/lib/private/database-query-provider/util.ts b/packages/@aws-cdk/aws-redshift/lib/private/database-query-provider/util.ts new file mode 100644 index 0000000000000..d834cd474f986 --- /dev/null +++ b/packages/@aws-cdk/aws-redshift/lib/private/database-query-provider/util.ts @@ -0,0 +1,40 @@ +/* eslint-disable-next-line import/no-extraneous-dependencies */ +import * as RedshiftData from 'aws-sdk/clients/redshiftdata'; +import { DatabaseQueryHandlerProps } from '../handler-props'; + +const redshiftData = new RedshiftData(); + +export type ClusterProps = Omit; + +export async function executeStatement(statement: string, clusterProps: ClusterProps): Promise { + const executeStatementProps = { + ClusterIdentifier: clusterProps.clusterName, + Database: clusterProps.databaseName, + SecretArn: clusterProps.adminUserArn, + Sql: statement, + }; + const executedStatement = await redshiftData.executeStatement(executeStatementProps).promise(); + if (!executedStatement.Id) { + throw new Error('Service error: Statement execution did not return a statement ID'); + } + await waitForStatementComplete(executedStatement.Id); +} + +const waitTimeout = 100; +async function waitForStatementComplete(statementId: string): Promise { + await new Promise((resolve: (value: void) => void) => { + setTimeout(() => resolve(), waitTimeout); + }); + const statement = await redshiftData.describeStatement({ Id: statementId }).promise(); + if (statement.Status !== 'FINISHED' && statement.Status !== 'FAILED' && statement.Status !== 'ABORTED') { + return waitForStatementComplete(statementId); + } else if (statement.Status === 'FINISHED') { + return; + } else { + throw new Error(`Statement status was ${statement.Status}: ${statement.Error}`); + } +} + +export function makePhysicalId(resourceName: string, clusterProps: ClusterProps, requestId: string): string { + return `${clusterProps.clusterName}:${clusterProps.databaseName}:${resourceName}:${requestId}`; +} diff --git a/packages/@aws-cdk/aws-redshift/lib/private/database-query.ts b/packages/@aws-cdk/aws-redshift/lib/private/database-query.ts new file mode 100644 index 0000000000000..2f724334b637a --- /dev/null +++ b/packages/@aws-cdk/aws-redshift/lib/private/database-query.ts @@ -0,0 +1,105 @@ +import * as path from 'path'; +import * as iam from '@aws-cdk/aws-iam'; +import * as lambda from '@aws-cdk/aws-lambda'; +import * as secretsmanager from '@aws-cdk/aws-secretsmanager'; +import * as cdk from '@aws-cdk/core'; +import * as customresources from '@aws-cdk/custom-resources'; +import { Construct } from 'constructs'; +import { Cluster } from '../cluster'; +import { DatabaseOptions } from '../database-options'; +import { DatabaseQueryHandlerProps } from './handler-props'; + +// keep this import separate from other imports to reduce chance for merge conflicts with v2-main +// eslint-disable-next-line no-duplicate-imports, import/order +import { Construct as CoreConstruct } from '@aws-cdk/core'; + +export interface DatabaseQueryProps extends DatabaseOptions { + readonly handler: string; + readonly properties: HandlerProps; + /** + * The policy to apply when this resource is removed from the application. + * + * @default cdk.RemovalPolicy.Destroy + */ + readonly removalPolicy?: cdk.RemovalPolicy; +} + +export class DatabaseQuery extends CoreConstruct implements iam.IGrantable { + readonly grantPrincipal: iam.IPrincipal; + readonly ref: string; + + private readonly resource: cdk.CustomResource; + + constructor(scope: Construct, id: string, props: DatabaseQueryProps) { + super(scope, id); + + const adminUser = this.getAdminUser(props); + const handler = new lambda.SingletonFunction(this, 'Handler', { + code: lambda.Code.fromAsset(path.join(__dirname, 'database-query-provider')), + runtime: lambda.Runtime.NODEJS_14_X, + handler: 'index.handler', + timeout: cdk.Duration.minutes(1), + uuid: '3de5bea7-27da-4796-8662-5efb56431b5f', + lambdaPurpose: 'Query Redshift Database', + }); + handler.addToRolePolicy(new iam.PolicyStatement({ + actions: ['redshift-data:DescribeStatement', 'redshift-data:ExecuteStatement'], + resources: ['*'], + })); + adminUser.grantRead(handler); + + const provider = new customresources.Provider(this, 'Provider', { + onEventHandler: handler, + }); + + const queryHandlerProps: DatabaseQueryHandlerProps & HandlerProps = { + handler: props.handler, + clusterName: props.cluster.clusterName, + adminUserArn: adminUser.secretArn, + databaseName: props.databaseName, + ...props.properties, + }; + this.resource = new cdk.CustomResource(this, 'Resource', { + resourceType: 'Custom::RedshiftDatabaseQuery', + serviceToken: provider.serviceToken, + removalPolicy: props.removalPolicy, + properties: queryHandlerProps, + }); + + this.grantPrincipal = handler.grantPrincipal; + this.ref = this.resource.ref; + } + + public applyRemovalPolicy(policy: cdk.RemovalPolicy): void { + this.resource.applyRemovalPolicy(policy); + } + + public getAtt(attributeName: string): cdk.Reference { + return this.resource.getAtt(attributeName); + } + + public getAttString(attributeName: string): string { + return this.resource.getAttString(attributeName); + } + + private getAdminUser(props: DatabaseOptions): secretsmanager.ISecret { + const cluster = props.cluster; + let adminUser = props.adminUser; + if (!adminUser) { + if (cluster instanceof Cluster) { + if (cluster.secret) { + adminUser = cluster.secret; + } else { + throw new Error( + 'Administrative access to the Redshift cluster is required but an admin user secret was not provided and the cluster did not generate admin user credentials (they were provided explicitly)', + ); + } + } else { + throw new Error( + 'Administrative access to the Redshift cluster is required but an admin user secret was not provided and the cluster was imported', + ); + } + } + return adminUser; + } +} diff --git a/packages/@aws-cdk/aws-redshift/lib/private/handler-props.ts b/packages/@aws-cdk/aws-redshift/lib/private/handler-props.ts new file mode 100644 index 0000000000000..b00cc667a2ced --- /dev/null +++ b/packages/@aws-cdk/aws-redshift/lib/private/handler-props.ts @@ -0,0 +1,31 @@ +import { Column } from '../table'; + +export interface DatabaseQueryHandlerProps { + readonly handler: string; + readonly clusterName: string; + readonly adminUserArn: string; + readonly databaseName: string; +} + +export interface UserHandlerProps { + readonly username: string; + readonly passwordSecretArn: string; +} + +export interface TableHandlerProps { + readonly tableName: { + readonly prefix: string; + readonly generateSuffix: boolean; + }; + readonly tableColumns: Column[]; +} + +export interface TablePrivilege { + readonly tableName: string; + readonly actions: string[]; +} + +export interface UserTablePrivilegesHandlerProps { + readonly username: string; + readonly tablePrivileges: TablePrivilege[]; +} diff --git a/packages/@aws-cdk/aws-redshift/lib/private/privileges.ts b/packages/@aws-cdk/aws-redshift/lib/private/privileges.ts new file mode 100644 index 0000000000000..e8d9ed13d13dc --- /dev/null +++ b/packages/@aws-cdk/aws-redshift/lib/private/privileges.ts @@ -0,0 +1,101 @@ +import * as cdk from '@aws-cdk/core'; +import { Construct } from 'constructs'; +import { DatabaseOptions } from '../database-options'; +import { ITable, TableAction } from '../table'; +import { IUser } from '../user'; +import { DatabaseQuery } from './database-query'; +import { HandlerName } from './database-query-provider/handler-name'; +import { TablePrivilege as SerializedTablePrivilege, UserTablePrivilegesHandlerProps } from './handler-props'; + +// keep this import separate from other imports to reduce chance for merge conflicts with v2-main +// eslint-disable-next-line no-duplicate-imports, import/order +import { Construct as CoreConstruct } from '@aws-cdk/core'; + +/** + * The Redshift table and action that make up a privilege that can be granted to a Redshift user. + */ +export interface TablePrivilege { + /** + * The table on which privileges will be granted. + */ + readonly table: ITable; + + /** + * The actions that will be granted. + */ + readonly actions: TableAction[]; +} + +/** + * Properties for specifying privileges granted to a Redshift user on Redshift tables. + */ +export interface UserTablePrivilegesProps extends DatabaseOptions { + /** + * The user to which privileges will be granted. + */ + readonly user: IUser; + + /** + * The privileges to be granted. + * + * @default [] - use `addPrivileges` to grant privileges after construction + */ + readonly privileges?: TablePrivilege[]; +} + +/** + * Privileges granted to a Redshift user on Redshift tables. + * + * This construct is located in the `private` directory to ensure that it is not exported for direct public use. This + * means that user privileges must be managed through the `Table.grant` method or the `User.addTablePrivileges` + * method. Thus, each `User` will have at most one `UserTablePrivileges` construct to manage its privileges. For details + * on why this is a Good Thing, see the README, under "Granting Privileges". + */ +export class UserTablePrivileges extends CoreConstruct { + private privileges: TablePrivilege[]; + + constructor(scope: Construct, id: string, props: UserTablePrivilegesProps) { + super(scope, id); + + this.privileges = props.privileges ?? []; + + new DatabaseQuery(this, 'Resource', { + ...props, + handler: HandlerName.UserTablePrivileges, + properties: { + username: props.user.username, + tablePrivileges: cdk.Lazy.any({ + produce: () => { + const reducedPrivileges = this.privileges.reduce((privileges, { table, actions }) => { + const tableName = table.tableName; + if (!(tableName in privileges)) { + privileges[tableName] = []; + } + actions = actions.concat(privileges[tableName]); + if (actions.includes(TableAction.ALL)) { + actions = [TableAction.ALL]; + } + if (actions.includes(TableAction.UPDATE) || actions.includes(TableAction.DELETE)) { + actions.push(TableAction.SELECT); + } + privileges[tableName] = Array.from(new Set(actions)); + return privileges; + }, {} as { [key: string]: TableAction[] }); + const serializedPrivileges: SerializedTablePrivilege[] = Object.entries(reducedPrivileges).map(([tableName, actions]) => ({ + tableName: tableName, + actions: actions.map(action => TableAction[action]), + })); + return serializedPrivileges; + }, + }) as any, + }, + }); + } + + /** + * Grant this user additional privileges. + */ + addPrivileges(table: ITable, ...actions: TableAction[]): void { + this.privileges.push({ table, actions }); + } +} diff --git a/packages/@aws-cdk/aws-redshift/lib/table.ts b/packages/@aws-cdk/aws-redshift/lib/table.ts new file mode 100644 index 0000000000000..337abdedd00a1 --- /dev/null +++ b/packages/@aws-cdk/aws-redshift/lib/table.ts @@ -0,0 +1,222 @@ +import * as cdk from '@aws-cdk/core'; +import { Construct } from 'constructs'; +import { ICluster } from './cluster'; +import { DatabaseOptions } from './database-options'; +import { DatabaseQuery } from './private/database-query'; +import { HandlerName } from './private/database-query-provider/handler-name'; +import { TableHandlerProps } from './private/handler-props'; +import { IUser } from './user'; + +// keep this import separate from other imports to reduce chance for merge conflicts with v2-main +// eslint-disable-next-line no-duplicate-imports, import/order +import { Construct as CoreConstruct } from '@aws-cdk/core'; + +/** + * An action that a Redshift user can be granted privilege to perform on a table. + */ +export enum TableAction { + /** + * Grants privilege to select data from a table or view using a SELECT statement. + */ + SELECT, + + /** + * Grants privilege to load data into a table using an INSERT statement or a COPY statement. + */ + INSERT, + + /** + * Grants privilege to update a table column using an UPDATE statement. + */ + UPDATE, + + /** + * Grants privilege to delete a data row from a table. + */ + DELETE, + + /** + * Grants privilege to drop a table. + */ + DROP, + + /** + * Grants privilege to create a foreign key constraint. + * + * You need to grant this privilege on both the referenced table and the referencing table; otherwise, the user can't create the constraint. + */ + REFERENCES, + + /** + * Grants all available privileges at once to the specified user or user group. + */ + ALL +} + +/** + * A column in a Redshift table. + */ +export interface Column { + /** + * The name of the column. + */ + readonly name: string; + + /** + * The data type of the column. + */ + readonly dataType: string; +} + +/** + * Properties for configuring a Redshift table. + */ +export interface TableProps extends DatabaseOptions { + /** + * The name of the table. + * + * @default - a name is generated + */ + readonly tableName?: string; + + /** + * The columns of the table. + */ + readonly tableColumns: Column[]; + + /** + * The policy to apply when this resource is removed from the application. + * + * @default cdk.RemovalPolicy.Retain + */ + readonly removalPolicy?: cdk.RemovalPolicy; +} + +/** + * Represents a table in a Redshift database. + */ +export interface ITable extends cdk.IConstruct { + /** + * Name of the table. + */ + readonly tableName: string; + + /** + * The columns of the table. + */ + readonly tableColumns: Column[]; + + /** + * The cluster where the table is located. + */ + readonly cluster: ICluster; + + /** + * The name of the database where the table is located. + */ + readonly databaseName: string; + + /** + * Grant a user privilege to access this table. + */ + grant(user: IUser, ...actions: TableAction[]): void; +} + +/** + * A full specification of a Redshift table that can be used to import it fluently into the CDK application. + */ +export interface TableAttributes { + /** + * Name of the table. + */ + readonly tableName: string; + + /** + * The columns of the table. + */ + readonly tableColumns: Column[]; + + /** + * The cluster where the table is located. + */ + readonly cluster: ICluster; + + /** + * The name of the database where the table is located. + */ + readonly databaseName: string; +} + +abstract class TableBase extends CoreConstruct implements ITable { + abstract readonly tableName: string; + abstract readonly tableColumns: Column[]; + abstract readonly cluster: ICluster; + abstract readonly databaseName: string; + grant(user: IUser, ...actions: TableAction[]) { + user.addTablePrivileges(this, ...actions); + } +} + +/** + * A table in a Redshift cluster. + */ +export class Table extends TableBase { + /** + * Specify a Redshift table using a table name and schema that already exists. + */ + static fromTableAttributes(scope: Construct, id: string, attrs: TableAttributes): ITable { + return new class extends TableBase { + readonly tableName = attrs.tableName; + readonly tableColumns = attrs.tableColumns; + readonly cluster = attrs.cluster; + readonly databaseName = attrs.databaseName; + }(scope, id); + } + + readonly tableName: string; + readonly tableColumns: Column[]; + readonly cluster: ICluster; + readonly databaseName: string; + + private resource: DatabaseQuery; + + constructor(scope: Construct, id: string, props: TableProps) { + super(scope, id); + + this.tableColumns = props.tableColumns; + this.cluster = props.cluster; + this.databaseName = props.databaseName; + + this.resource = new DatabaseQuery(this, 'Resource', { + removalPolicy: cdk.RemovalPolicy.RETAIN, + ...props, + handler: HandlerName.Table, + properties: { + tableName: { + prefix: props.tableName ?? cdk.Names.uniqueId(this), + generateSuffix: !props.tableName, + }, + tableColumns: this.tableColumns, + }, + }); + + this.tableName = this.resource.ref; + } + + /** + * Apply the given removal policy to this resource + * + * The Removal Policy controls what happens to this resource when it stops + * being managed by CloudFormation, either because you've removed it from the + * CDK application or because you've made a change that requires the resource + * to be replaced. + * + * The resource can be destroyed (`RemovalPolicy.DESTROY`), or left in your AWS + * account for data recovery and cleanup later (`RemovalPolicy.RETAIN`). + * + * This resource is retained by default. + */ + public applyRemovalPolicy(policy: cdk.RemovalPolicy): void { + this.resource.applyRemovalPolicy(policy); + } +} diff --git a/packages/@aws-cdk/aws-redshift/lib/user.ts b/packages/@aws-cdk/aws-redshift/lib/user.ts new file mode 100644 index 0000000000000..3b5c8d0829ef8 --- /dev/null +++ b/packages/@aws-cdk/aws-redshift/lib/user.ts @@ -0,0 +1,186 @@ +import * as kms from '@aws-cdk/aws-kms'; +import * as cdk from '@aws-cdk/core'; +import { Construct } from 'constructs'; +import { ICluster } from './cluster'; +import { DatabaseOptions } from './database-options'; +import { DatabaseSecret } from './database-secret'; +import { DatabaseQuery } from './private/database-query'; +import { HandlerName } from './private/database-query-provider/handler-name'; +import { UserHandlerProps } from './private/handler-props'; +import { UserTablePrivileges } from './private/privileges'; +import { ITable, TableAction } from './table'; + +// keep this import separate from other imports to reduce chance for merge conflicts with v2-main +// eslint-disable-next-line no-duplicate-imports, import/order +import { Construct as CoreConstruct } from '@aws-cdk/core'; + +/** + * Properties for configuring a Redshift user. + */ +export interface UserProps extends DatabaseOptions { + /** + * The name of the user. + * + * For valid values, see: https://docs.aws.amazon.com/redshift/latest/dg/r_names.html + * + * @default - a name is generated + */ + readonly username?: string; + + /** + * KMS key to encrypt the generated secret. + * + * @default - the default AWS managed key is used + */ + readonly encryptionKey?: kms.IKey; + + /** + * The policy to apply when this resource is removed from the application. + * + * @default cdk.RemovalPolicy.Destroy + */ + readonly removalPolicy?: cdk.RemovalPolicy; +} + +/** + * Represents a user in a Redshift database. + */ +export interface IUser extends cdk.IConstruct { + /** + * The name of the user. + */ + readonly username: string; + + /** + * The password of the user. + */ + readonly password: cdk.SecretValue; + + /** + * The cluster where the table is located. + */ + readonly cluster: ICluster; + + /** + * The name of the database where the table is located. + */ + readonly databaseName: string; + + /** + * Grant this user privilege to access a table. + */ + addTablePrivileges(table: ITable, ...actions: TableAction[]): void; +} + +/** + * A full specification of a Redshift user that can be used to import it fluently into the CDK application. + */ +export interface UserAttributes extends DatabaseOptions { + /** + * The name of the user. + */ + readonly username: string; + + /** + * The password of the user. + * + * Do not put passwords in CDK code directly. + */ + readonly password: cdk.SecretValue; +} + +abstract class UserBase extends CoreConstruct implements IUser { + abstract readonly username: string; + abstract readonly password: cdk.SecretValue; + abstract readonly cluster: ICluster; + abstract readonly databaseName: string; + + /** + * The tables that user will have access to + */ + private privileges?: UserTablePrivileges; + + protected abstract readonly databaseProps: DatabaseOptions; + + addTablePrivileges(table: ITable, ...actions: TableAction[]): void { + if (!this.privileges) { + this.privileges = new UserTablePrivileges(this, 'TablePrivileges', { + ...this.databaseProps, + user: this, + }); + } + + this.privileges.addPrivileges(table, ...actions); + } +} + +/** + * A user in a Redshift cluster. + */ +export class User extends UserBase { + /** + * Specify a Redshift user using credentials that already exist. + */ + static fromUserAttributes(scope: Construct, id: string, attrs: UserAttributes): IUser { + return new class extends UserBase { + readonly username = attrs.username; + readonly password = attrs.password; + readonly cluster = attrs.cluster; + readonly databaseName = attrs.databaseName; + protected readonly databaseProps = attrs; + }(scope, id); + } + + readonly username: string; + readonly password: cdk.SecretValue; + readonly cluster: ICluster; + readonly databaseName: string; + protected databaseProps: DatabaseOptions; + + private resource: DatabaseQuery; + + constructor(scope: Construct, id: string, props: UserProps) { + super(scope, id); + + this.databaseProps = props; + this.cluster = props.cluster; + this.databaseName = props.databaseName; + + const username = props.username ?? cdk.Names.uniqueId(this).toLowerCase(); + const secret = new DatabaseSecret(this, 'Secret', { + username, + encryptionKey: props.encryptionKey, + }); + const attachedSecret = secret.attach(props.cluster); + this.password = attachedSecret.secretValueFromJson('password'); + + this.resource = new DatabaseQuery(this, 'Resource', { + ...this.databaseProps, + handler: HandlerName.User, + properties: { + username, + passwordSecretArn: attachedSecret.secretArn, + }, + }); + attachedSecret.grantRead(this.resource); + + this.username = this.resource.getAttString('username'); + } + + /** + * Apply the given removal policy to this resource + * + * The Removal Policy controls what happens to this resource when it stops + * being managed by CloudFormation, either because you've removed it from the + * CDK application or because you've made a change that requires the resource + * to be replaced. + * + * The resource can be destroyed (`RemovalPolicy.DESTROY`), or left in your AWS + * account for data recovery and cleanup later (`RemovalPolicy.RETAIN`). + * + * This resource is destroyed by default. + */ + public applyRemovalPolicy(policy: cdk.RemovalPolicy): void { + this.resource.applyRemovalPolicy(policy); + } +} diff --git a/packages/@aws-cdk/aws-redshift/package.json b/packages/@aws-cdk/aws-redshift/package.json index 3bf492f83ee7b..71042529a3e69 100644 --- a/packages/@aws-cdk/aws-redshift/package.json +++ b/packages/@aws-cdk/aws-redshift/package.json @@ -28,7 +28,14 @@ ] } }, - "projectReferences": true + "projectReferences": true, + "metadata": { + "jsii": { + "rosetta": { + "strict": true + } + } + } }, "repository": { "type": "git", @@ -75,7 +82,9 @@ "devDependencies": { "@aws-cdk/assertions": "0.0.0", "@types/jest": "^26.0.24", + "aws-sdk": "^2.848.0", "cdk-build-tools": "0.0.0", + "cdk-integ-tools": "0.0.0", "cfn2ts": "0.0.0", "jest": "^26.6.3", "pkglint": "0.0.0" @@ -84,9 +93,11 @@ "@aws-cdk/aws-ec2": "0.0.0", "@aws-cdk/aws-iam": "0.0.0", "@aws-cdk/aws-kms": "0.0.0", + "@aws-cdk/aws-lambda": "0.0.0", "@aws-cdk/aws-s3": "0.0.0", "@aws-cdk/aws-secretsmanager": "0.0.0", "@aws-cdk/core": "0.0.0", + "@aws-cdk/custom-resources": "0.0.0", "constructs": "^3.3.69" }, "homepage": "https://github.com/aws/aws-cdk", @@ -94,9 +105,11 @@ "@aws-cdk/aws-ec2": "0.0.0", "@aws-cdk/aws-iam": "0.0.0", "@aws-cdk/aws-kms": "0.0.0", + "@aws-cdk/aws-lambda": "0.0.0", "@aws-cdk/aws-s3": "0.0.0", "@aws-cdk/aws-secretsmanager": "0.0.0", "@aws-cdk/core": "0.0.0", + "@aws-cdk/custom-resources": "0.0.0", "constructs": "^3.3.69" }, "engines": { diff --git a/packages/@aws-cdk/aws-redshift/rosetta/cluster.ts-fixture b/packages/@aws-cdk/aws-redshift/rosetta/cluster.ts-fixture new file mode 100644 index 0000000000000..82d98ca3e381e --- /dev/null +++ b/packages/@aws-cdk/aws-redshift/rosetta/cluster.ts-fixture @@ -0,0 +1,20 @@ +// Fixture with cluster already created +import { Construct, SecretValue, Stack } from '@aws-cdk/core'; +import { Vpc } from '@aws-cdk/aws-ec2'; +import { Cluster, Table, TableAction, User } from '@aws-cdk/aws-redshift'; + +class Fixture extends Stack { + constructor(scope: Construct, id: string) { + super(scope, id); + + const vpc = new Vpc(this, 'Vpc'); + const cluster = new Cluster(this, 'Cluster', { + vpc, + masterUser: { + masterUsername: 'admin', + }, + }); + + /// here + } +} diff --git a/packages/@aws-cdk/aws-redshift/rosetta/default.ts-fixture b/packages/@aws-cdk/aws-redshift/rosetta/default.ts-fixture new file mode 100644 index 0000000000000..928b036cf2611 --- /dev/null +++ b/packages/@aws-cdk/aws-redshift/rosetta/default.ts-fixture @@ -0,0 +1,11 @@ +// Fixture with packages imported, but nothing else +import { Construct, Stack } from '@aws-cdk/core'; +import { Cluster } from '@aws-cdk/aws-redshift'; + +class Fixture extends Stack { + constructor(scope: Construct, id: string) { + super(scope, id); + + /// here + } +} diff --git a/packages/@aws-cdk/aws-redshift/test/database-query-provider/index.test.ts b/packages/@aws-cdk/aws-redshift/test/database-query-provider/index.test.ts new file mode 100644 index 0000000000000..18091a6627167 --- /dev/null +++ b/packages/@aws-cdk/aws-redshift/test/database-query-provider/index.test.ts @@ -0,0 +1,50 @@ +/* eslint-disable-next-line import/no-unresolved */ +import type * as AWSLambda from 'aws-lambda'; + +const resourceProperties = { + handler: 'table', + ServiceToken: '', +}; +const requestId = 'requestId'; +const baseEvent: AWSLambda.CloudFormationCustomResourceEvent = { + ResourceProperties: resourceProperties, + RequestType: 'Create', + ServiceToken: '', + ResponseURL: '', + StackId: '', + RequestId: requestId, + LogicalResourceId: '', + ResourceType: '', +}; + +const mockSubHandler = jest.fn(); +jest.mock('../../lib/private/database-query-provider/table', () => ({ + __esModule: true, + handler: mockSubHandler, +})); +import { handler } from '../../lib/private/database-query-provider/index'; + +beforeEach(() => { + jest.clearAllMocks(); +}); + +test('calls sub handler', async () => { + const event = baseEvent; + + await handler(event); + + expect(mockSubHandler).toHaveBeenCalled(); +}); + +test('throws with unregistered subhandler', async () => { + const event = { + ...baseEvent, + ResourceProperties: { + ...resourceProperties, + handler: 'unregistered', + }, + }; + + await expect(handler(event)).rejects.toThrow(/Requested handler unregistered is not in supported set/); + expect(mockSubHandler).not.toHaveBeenCalled(); +}); diff --git a/packages/@aws-cdk/aws-redshift/test/database-query-provider/privileges.test.ts b/packages/@aws-cdk/aws-redshift/test/database-query-provider/privileges.test.ts new file mode 100644 index 0000000000000..daa3835b89f24 --- /dev/null +++ b/packages/@aws-cdk/aws-redshift/test/database-query-provider/privileges.test.ts @@ -0,0 +1,163 @@ +/* eslint-disable-next-line import/no-unresolved */ +import type * as AWSLambda from 'aws-lambda'; + +const username = 'username'; +const tableName = 'tableName'; +const tablePrivileges = [{ tableName, actions: ['INSERT', 'SELECT'] }]; +const clusterName = 'clusterName'; +const adminUserArn = 'adminUserArn'; +const databaseName = 'databaseName'; +const physicalResourceId = 'PhysicalResourceId'; +const resourceProperties = { + username, + tablePrivileges, + clusterName, + adminUserArn, + databaseName, + ServiceToken: '', +}; +const requestId = 'requestId'; +const genericEvent: AWSLambda.CloudFormationCustomResourceEventCommon = { + ResourceProperties: resourceProperties, + ServiceToken: '', + ResponseURL: '', + StackId: '', + RequestId: requestId, + LogicalResourceId: '', + ResourceType: '', +}; + +const mockExecuteStatement = jest.fn(() => ({ promise: jest.fn(() => ({ Id: 'statementId' })) })); +jest.mock('aws-sdk/clients/redshiftdata', () => class { + executeStatement = mockExecuteStatement; + describeStatement = () => ({ promise: jest.fn(() => ({ Status: 'FINISHED' })) }); +}); +import { handler as managePrivileges } from '../../lib/private/database-query-provider/privileges'; + +beforeEach(() => { + jest.clearAllMocks(); +}); + +describe('create', () => { + const baseEvent: AWSLambda.CloudFormationCustomResourceCreateEvent = { + RequestType: 'Create', + ...genericEvent, + }; + + test('serializes properties in statement and creates physical resource ID', async () => { + const event = baseEvent; + + await expect(managePrivileges(resourceProperties, event)).resolves.toEqual({ + PhysicalResourceId: 'clusterName:databaseName:username:requestId', + }); + expect(mockExecuteStatement).toHaveBeenCalledWith(expect.objectContaining({ + Sql: `GRANT INSERT, SELECT ON ${tableName} TO ${username}`, + })); + }); +}); + +describe('delete', () => { + const baseEvent: AWSLambda.CloudFormationCustomResourceDeleteEvent = { + RequestType: 'Delete', + PhysicalResourceId: physicalResourceId, + ...genericEvent, + }; + + test('executes statement', async () => { + const event = baseEvent; + + await managePrivileges(resourceProperties, event); + + expect(mockExecuteStatement).toHaveBeenCalledWith(expect.objectContaining({ + Sql: `REVOKE INSERT, SELECT ON ${tableName} FROM ${username}`, + })); + }); +}); + +describe('update', () => { + const event: AWSLambda.CloudFormationCustomResourceUpdateEvent = { + RequestType: 'Update', + OldResourceProperties: resourceProperties, + PhysicalResourceId: physicalResourceId, + ...genericEvent, + }; + + test('replaces if cluster name changes', async () => { + const newClusterName = 'newClusterName'; + const newResourceProperties = { + ...resourceProperties, + clusterName: newClusterName, + }; + + await expect(managePrivileges(newResourceProperties, event)).resolves.not.toMatchObject({ + PhysicalResourceId: physicalResourceId, + }); + expect(mockExecuteStatement).toHaveBeenCalledWith(expect.objectContaining({ + ClusterIdentifier: newClusterName, + Sql: expect.stringMatching(/GRANT/), + })); + }); + + test('does not replace if admin user ARN changes', async () => { + const newAdminUserArn = 'newAdminUserArn'; + const newResourceProperties = { + ...resourceProperties, + adminUserArn: newAdminUserArn, + }; + + await expect(managePrivileges(newResourceProperties, event)).resolves.toMatchObject({ + PhysicalResourceId: physicalResourceId, + }); + expect(mockExecuteStatement).not.toHaveBeenCalled(); + }); + + test('replaces if database name changes', async () => { + const newDatabaseName = 'newDatabaseName'; + const newResourceProperties = { + ...resourceProperties, + databaseName: newDatabaseName, + }; + + await expect(managePrivileges(newResourceProperties, event)).resolves.not.toMatchObject({ + PhysicalResourceId: physicalResourceId, + }); + expect(mockExecuteStatement).toHaveBeenCalledWith(expect.objectContaining({ + Database: newDatabaseName, + Sql: expect.stringMatching(/GRANT/), + })); + }); + + test('replaces if user name changes', async () => { + const newUsername = 'newUsername'; + const newResourceProperties = { + ...resourceProperties, + username: newUsername, + }; + + await expect(managePrivileges(newResourceProperties, event)).resolves.not.toMatchObject({ + PhysicalResourceId: physicalResourceId, + }); + expect(mockExecuteStatement).toHaveBeenCalledWith(expect.objectContaining({ + Sql: expect.stringMatching(new RegExp(`GRANT .* TO ${newUsername}`)), + })); + }); + + test('does not replace when privileges change', async () => { + const newTableName = 'newTableName'; + const newTablePrivileges = [{ tableName: newTableName, actions: ['DROP'] }]; + const newResourceProperties = { + ...resourceProperties, + tablePrivileges: newTablePrivileges, + }; + + await expect(managePrivileges(newResourceProperties, event)).resolves.toMatchObject({ + PhysicalResourceId: physicalResourceId, + }); + expect(mockExecuteStatement).toHaveBeenCalledWith(expect.objectContaining({ + Sql: `REVOKE INSERT, SELECT ON ${tableName} FROM ${username}`, + })); + expect(mockExecuteStatement).toHaveBeenCalledWith(expect.objectContaining({ + Sql: `GRANT DROP ON ${newTableName} TO ${username}`, + })); + }); +}); diff --git a/packages/@aws-cdk/aws-redshift/test/database-query-provider/table.test.ts b/packages/@aws-cdk/aws-redshift/test/database-query-provider/table.test.ts new file mode 100644 index 0000000000000..956efca1ab81f --- /dev/null +++ b/packages/@aws-cdk/aws-redshift/test/database-query-provider/table.test.ts @@ -0,0 +1,202 @@ +/* eslint-disable-next-line import/no-unresolved */ +import type * as AWSLambda from 'aws-lambda'; + +const tableNamePrefix = 'tableNamePrefix'; +const tableColumns = [{ name: 'col1', dataType: 'varchar(1)' }]; +const clusterName = 'clusterName'; +const adminUserArn = 'adminUserArn'; +const databaseName = 'databaseName'; +const physicalResourceId = 'PhysicalResourceId'; +const resourceProperties = { + tableName: { + prefix: tableNamePrefix, + generateSuffix: true, + }, + tableColumns, + clusterName, + adminUserArn, + databaseName, + ServiceToken: '', +}; +const requestId = 'requestId'; +const requestIdTruncated = 'requestI'; +const genericEvent: AWSLambda.CloudFormationCustomResourceEventCommon = { + ResourceProperties: resourceProperties, + ServiceToken: '', + ResponseURL: '', + StackId: '', + RequestId: requestId, + LogicalResourceId: '', + ResourceType: '', +}; + +const mockExecuteStatement = jest.fn(() => ({ promise: jest.fn(() => ({ Id: 'statementId' })) })); +jest.mock('aws-sdk/clients/redshiftdata', () => class { + executeStatement = mockExecuteStatement; + describeStatement = () => ({ promise: jest.fn(() => ({ Status: 'FINISHED' })) }); +}); +import { handler as manageTable } from '../../lib/private/database-query-provider/table'; + +beforeEach(() => { + jest.clearAllMocks(); +}); + +describe('create', () => { + const baseEvent: AWSLambda.CloudFormationCustomResourceCreateEvent = { + RequestType: 'Create', + ...genericEvent, + }; + + test('serializes properties in statement and creates physical resource ID', async () => { + const event = baseEvent; + + await expect(manageTable(resourceProperties, event)).resolves.toEqual({ + PhysicalResourceId: `${tableNamePrefix}${requestIdTruncated}`, + }); + expect(mockExecuteStatement).toHaveBeenCalledWith(expect.objectContaining({ + Sql: `CREATE TABLE ${tableNamePrefix}${requestIdTruncated} (col1 varchar(1))`, + })); + }); + + test('does not modify table name if no suffix generation requested', async () => { + const event = baseEvent; + const newResourceProperties = { + ...resourceProperties, + tableName: { + ...resourceProperties.tableName, + generateSuffix: false, + }, + }; + + await expect(manageTable(newResourceProperties, event)).resolves.toEqual({ + PhysicalResourceId: tableNamePrefix, + }); + expect(mockExecuteStatement).toHaveBeenCalledWith(expect.objectContaining({ + Sql: `CREATE TABLE ${tableNamePrefix} (col1 varchar(1))`, + })); + }); +}); + +describe('delete', () => { + const baseEvent: AWSLambda.CloudFormationCustomResourceDeleteEvent = { + RequestType: 'Delete', + PhysicalResourceId: physicalResourceId, + ...genericEvent, + }; + + test('executes statement', async () => { + const event = baseEvent; + + await manageTable(resourceProperties, event); + + expect(mockExecuteStatement).toHaveBeenCalledWith(expect.objectContaining({ + Sql: `DROP TABLE ${physicalResourceId}`, + })); + }); +}); + +describe('update', () => { + const event: AWSLambda.CloudFormationCustomResourceUpdateEvent = { + RequestType: 'Update', + OldResourceProperties: resourceProperties, + PhysicalResourceId: physicalResourceId, + ...genericEvent, + }; + + test('replaces if cluster name changes', async () => { + const newClusterName = 'newClusterName'; + const newResourceProperties = { + ...resourceProperties, + clusterName: newClusterName, + }; + + await expect(manageTable(newResourceProperties, event)).resolves.not.toMatchObject({ + PhysicalResourceId: physicalResourceId, + }); + expect(mockExecuteStatement).toHaveBeenCalledWith(expect.objectContaining({ + ClusterIdentifier: newClusterName, + Sql: expect.stringMatching(new RegExp(`CREATE TABLE ${tableNamePrefix}${requestIdTruncated}`)), + })); + }); + + test('does not replace if admin user ARN changes', async () => { + const newAdminUserArn = 'newAdminUserArn'; + const newResourceProperties = { + ...resourceProperties, + adminUserArn: newAdminUserArn, + }; + + await expect(manageTable(newResourceProperties, event)).resolves.toMatchObject({ + PhysicalResourceId: physicalResourceId, + }); + expect(mockExecuteStatement).not.toHaveBeenCalled(); + }); + + test('replaces if database name changes', async () => { + const newDatabaseName = 'newDatabaseName'; + const newResourceProperties = { + ...resourceProperties, + databaseName: newDatabaseName, + }; + + await expect(manageTable(newResourceProperties, event)).resolves.not.toMatchObject({ + PhysicalResourceId: physicalResourceId, + }); + expect(mockExecuteStatement).toHaveBeenCalledWith(expect.objectContaining({ + Database: newDatabaseName, + Sql: expect.stringMatching(new RegExp(`CREATE TABLE ${tableNamePrefix}${requestIdTruncated}`)), + })); + }); + + test('replaces if table name changes', async () => { + const newTableNamePrefix = 'newTableNamePrefix'; + const newResourceProperties = { + ...resourceProperties, + tableName: { + ...resourceProperties.tableName, + prefix: newTableNamePrefix, + }, + }; + + await expect(manageTable(newResourceProperties, event)).resolves.not.toMatchObject({ + PhysicalResourceId: physicalResourceId, + }); + expect(mockExecuteStatement).toHaveBeenCalledWith(expect.objectContaining({ + Sql: expect.stringMatching(new RegExp(`CREATE TABLE ${newTableNamePrefix}${requestIdTruncated}`)), + })); + }); + + test('replaces if table columns change', async () => { + const newTableColumnName = 'col2'; + const newTableColumnDataType = 'varchar(1)'; + const newTableColumns = [{ name: newTableColumnName, dataType: newTableColumnDataType }]; + const newResourceProperties = { + ...resourceProperties, + tableColumns: newTableColumns, + }; + + await expect(manageTable(newResourceProperties, event)).resolves.not.toMatchObject({ + PhysicalResourceId: physicalResourceId, + }); + expect(mockExecuteStatement).toHaveBeenCalledWith(expect.objectContaining({ + Sql: `CREATE TABLE ${tableNamePrefix}${requestIdTruncated} (${newTableColumnName} ${newTableColumnDataType})`, + })); + }); + + test('does not replace if table columns added', async () => { + const newTableColumnName = 'col2'; + const newTableColumnDataType = 'varchar(1)'; + const newTableColumns = [{ name: 'col1', dataType: 'varchar(1)' }, { name: newTableColumnName, dataType: newTableColumnDataType }]; + const newResourceProperties = { + ...resourceProperties, + tableColumns: newTableColumns, + }; + + await expect(manageTable(newResourceProperties, event)).resolves.toMatchObject({ + PhysicalResourceId: physicalResourceId, + }); + expect(mockExecuteStatement).toHaveBeenCalledWith(expect.objectContaining({ + Sql: `ALTER TABLE ${physicalResourceId} ADD ${newTableColumnName} ${newTableColumnDataType}`, + })); + }); +}); diff --git a/packages/@aws-cdk/aws-redshift/test/database-query-provider/user.test.ts b/packages/@aws-cdk/aws-redshift/test/database-query-provider/user.test.ts new file mode 100644 index 0000000000000..87c3bdd0043de --- /dev/null +++ b/packages/@aws-cdk/aws-redshift/test/database-query-provider/user.test.ts @@ -0,0 +1,163 @@ +/* eslint-disable-next-line import/no-unresolved */ +import type * as AWSLambda from 'aws-lambda'; + +const password = 'password'; +const username = 'username'; +const passwordSecretArn = 'passwordSecretArn'; +const clusterName = 'clusterName'; +const adminUserArn = 'adminUserArn'; +const databaseName = 'databaseName'; +const physicalResourceId = 'PhysicalResourceId'; +const resourceProperties = { + username, + passwordSecretArn, + clusterName, + adminUserArn, + databaseName, + ServiceToken: '', +}; +const requestId = 'requestId'; +const genericEvent: AWSLambda.CloudFormationCustomResourceEventCommon = { + ResourceProperties: resourceProperties, + ServiceToken: '', + ResponseURL: '', + StackId: '', + RequestId: requestId, + LogicalResourceId: '', + ResourceType: '', +}; + +const mockExecuteStatement = jest.fn(() => ({ promise: jest.fn(() => ({ Id: 'statementId' })) })); +jest.mock('aws-sdk/clients/redshiftdata', () => class { + executeStatement = mockExecuteStatement; + describeStatement = () => ({ promise: jest.fn(() => ({ Status: 'FINISHED' })) }); +}); +const mockGetSecretValue = jest.fn(() => ({ promise: jest.fn(() => ({ SecretString: JSON.stringify({ password }) })) })); +jest.mock('aws-sdk/clients/secretsmanager', () => class { + getSecretValue = mockGetSecretValue; +}); +import { handler as manageUser } from '../../lib/private/database-query-provider/user'; + +beforeEach(() => { + jest.clearAllMocks(); +}); + +describe('create', () => { + const baseEvent: AWSLambda.CloudFormationCustomResourceCreateEvent = { + RequestType: 'Create', + ...genericEvent, + }; + + test('serializes properties in statement and creates physical resource ID', async () => { + const event = baseEvent; + + await expect(manageUser(resourceProperties, event)).resolves.toEqual({ + PhysicalResourceId: 'clusterName:databaseName:username:requestId', + Data: { + username: username, + }, + }); + expect(mockExecuteStatement).toHaveBeenCalledWith(expect.objectContaining({ + Sql: `CREATE USER username PASSWORD '${password}'`, + })); + }); +}); + +describe('delete', () => { + const baseEvent: AWSLambda.CloudFormationCustomResourceDeleteEvent = { + RequestType: 'Delete', + PhysicalResourceId: physicalResourceId, + ...genericEvent, + }; + + test('executes statement', async () => { + const event = baseEvent; + + await manageUser(resourceProperties, event); + + expect(mockExecuteStatement).toHaveBeenCalledWith(expect.objectContaining({ + Sql: 'DROP USER username', + })); + }); +}); + +describe('update', () => { + const event: AWSLambda.CloudFormationCustomResourceUpdateEvent = { + RequestType: 'Update', + OldResourceProperties: resourceProperties, + PhysicalResourceId: physicalResourceId, + ...genericEvent, + }; + + test('replaces if cluster name changes', async () => { + const newClusterName = 'newClusterName'; + const newResourceProperties = { + ...resourceProperties, + clusterName: newClusterName, + }; + + await expect(manageUser(newResourceProperties, event)).resolves.not.toMatchObject({ + PhysicalResourceId: physicalResourceId, + }); + expect(mockExecuteStatement).toHaveBeenCalledWith(expect.objectContaining({ + ClusterIdentifier: newClusterName, + Sql: expect.stringMatching(/CREATE USER/), + })); + }); + + test('does not replace if admin user ARN changes', async () => { + const newAdminUserArn = 'newAdminUserArn'; + const newResourceProperties = { + ...resourceProperties, + adminUserArn: newAdminUserArn, + }; + + await expect(manageUser(newResourceProperties, event)).resolves.toMatchObject({ + PhysicalResourceId: physicalResourceId, + }); + expect(mockExecuteStatement).not.toHaveBeenCalled(); + }); + + test('replaces if database name changes', async () => { + const newDatabaseName = 'newDatabaseName'; + const newResourceProperties = { + ...resourceProperties, + databaseName: newDatabaseName, + }; + + await expect(manageUser(newResourceProperties, event)).resolves.not.toMatchObject({ + PhysicalResourceId: physicalResourceId, + }); + expect(mockExecuteStatement).toHaveBeenCalledWith(expect.objectContaining({ + Database: newDatabaseName, + Sql: expect.stringMatching(/CREATE USER/), + })); + }); + + test('replaces if user name changes', async () => { + const newUsername = 'newUsername'; + const newResourceProperties = { + ...resourceProperties, + username: newUsername, + }; + + await expect(manageUser(newResourceProperties, event)).resolves.not.toMatchObject({ + PhysicalResourceId: physicalResourceId, + }); + expect(mockExecuteStatement).toHaveBeenCalledWith(expect.objectContaining({ + Sql: expect.stringMatching(new RegExp(`CREATE USER ${newUsername}`)), + })); + }); + + test('does not replace if password changes', async () => { + const newPassword = 'newPassword'; + mockGetSecretValue.mockImplementationOnce(() => ({ promise: jest.fn(() => ({ SecretString: JSON.stringify({ password: newPassword }) })) })); + + await expect(manageUser(resourceProperties, event)).resolves.toMatchObject({ + PhysicalResourceId: physicalResourceId, + }); + expect(mockExecuteStatement).toHaveBeenCalledWith(expect.objectContaining({ + Sql: expect.stringMatching(new RegExp(`ALTER USER ${username} PASSWORD '${password}'`)), + })); + }); +}); diff --git a/packages/@aws-cdk/aws-redshift/test/database-query.test.ts b/packages/@aws-cdk/aws-redshift/test/database-query.test.ts new file mode 100644 index 0000000000000..1b3bfe76d2e3e --- /dev/null +++ b/packages/@aws-cdk/aws-redshift/test/database-query.test.ts @@ -0,0 +1,200 @@ +import { Match, Template } from '@aws-cdk/assertions'; +import * as ec2 from '@aws-cdk/aws-ec2'; +import * as secretsmanager from '@aws-cdk/aws-secretsmanager'; +import * as cdk from '@aws-cdk/core'; +import * as redshift from '../lib'; +import { DatabaseQuery, DatabaseQueryProps } from '../lib/private/database-query'; + +describe('database query', () => { + let stack: cdk.Stack; + let vpc: ec2.Vpc; + let cluster: redshift.ICluster; + let minimalProps: DatabaseQueryProps; + + beforeEach(() => { + stack = new cdk.Stack(); + vpc = new ec2.Vpc(stack, 'VPC'); + cluster = new redshift.Cluster(stack, 'Cluster', { + vpc: vpc, + masterUser: { + masterUsername: 'admin', + }, + }); + minimalProps = { + cluster: cluster, + databaseName: 'databaseName', + handler: 'handler', + properties: {}, + }; + }); + + describe('admin user', () => { + it('takes from cluster by default', () => { + new DatabaseQuery(stack, 'Query', { + ...minimalProps, + }); + + Template.fromStack(stack).hasResourceProperties('Custom::RedshiftDatabaseQuery', { + adminUserArn: { Ref: 'ClusterSecretAttachment769E6258' }, + }); + }); + + it('grants read permission to handler', () => { + new DatabaseQuery(stack, 'Query', { + ...minimalProps, + }); + + Template.fromStack(stack).hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: Match.arrayWith([{ + Action: ['secretsmanager:GetSecretValue', 'secretsmanager:DescribeSecret'], + Effect: 'Allow', + Resource: { Ref: 'ClusterSecretAttachment769E6258' }, + }]), + }, + Roles: [{ Ref: 'QueryRedshiftDatabase3de5bea727da479686625efb56431b5fServiceRole0A90D717' }], + }); + }); + + it('uses admin user if provided', () => { + cluster = new redshift.Cluster(stack, 'Cluster With Provided Admin Secret', { + vpc, + vpcSubnets: { + subnetType: ec2.SubnetType.PUBLIC, + }, + masterUser: { + masterUsername: 'admin', + masterPassword: cdk.SecretValue.plainText('INSECURE_NOT_FOR_PRODUCTION'), + }, + publiclyAccessible: true, + }); + + new DatabaseQuery(stack, 'Query', { + ...minimalProps, + adminUser: secretsmanager.Secret.fromSecretNameV2(stack, 'Imported Admin User', 'imported-admin-secret'), + cluster, + }); + + Template.fromStack(stack).hasResourceProperties('Custom::RedshiftDatabaseQuery', { + adminUserArn: { + 'Fn::Join': [ + '', + [ + 'arn:', + { + Ref: 'AWS::Partition', + }, + ':secretsmanager:', + { + Ref: 'AWS::Region', + }, + ':', + { + Ref: 'AWS::AccountId', + }, + ':secret:imported-admin-secret', + ], + ], + }, + }); + }); + + it('throws error if admin user not provided and cluster was provided a admin password', () => { + cluster = new redshift.Cluster(stack, 'Cluster With Provided Admin Secret', { + vpc, + vpcSubnets: { + subnetType: ec2.SubnetType.PUBLIC, + }, + masterUser: { + masterUsername: 'admin', + masterPassword: cdk.SecretValue.plainText('INSECURE_NOT_FOR_PRODUCTION'), + }, + publiclyAccessible: true, + }); + + expect(() => new DatabaseQuery(stack, 'Query', { + ...minimalProps, + cluster, + })).toThrowError('Administrative access to the Redshift cluster is required but an admin user secret was not provided and the cluster did not generate admin user credentials (they were provided explicitly)'); + }); + + it('throws error if admin user not provided and cluster was imported', () => { + cluster = redshift.Cluster.fromClusterAttributes(stack, 'Imported Cluster', { + clusterName: 'imported-cluster', + clusterEndpointAddress: 'imported-cluster.abcdefghijk.xx-west-1.redshift.amazonaws.com', + clusterEndpointPort: 5439, + }); + + expect(() => new DatabaseQuery(stack, 'Query', { + ...minimalProps, + cluster, + })).toThrowError('Administrative access to the Redshift cluster is required but an admin user secret was not provided and the cluster was imported'); + }); + }); + + it('provides database params to Lambda handler', () => { + new DatabaseQuery(stack, 'Query', { + ...minimalProps, + }); + + Template.fromStack(stack).hasResourceProperties('Custom::RedshiftDatabaseQuery', { + clusterName: { + Ref: 'ClusterEB0386A7', + }, + adminUserArn: { + Ref: 'ClusterSecretAttachment769E6258', + }, + databaseName: 'databaseName', + handler: 'handler', + }); + }); + + it('grants statement permissions to handler', () => { + new DatabaseQuery(stack, 'Query', { + ...minimalProps, + }); + + Template.fromStack(stack).hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: Match.arrayWith([{ + Action: ['redshift-data:DescribeStatement', 'redshift-data:ExecuteStatement'], + Effect: 'Allow', + Resource: '*', + }]), + }, + Roles: [{ Ref: 'QueryRedshiftDatabase3de5bea727da479686625efb56431b5fServiceRole0A90D717' }], + }); + }); + + it('passes removal policy through', () => { + new DatabaseQuery(stack, 'Query', { + ...minimalProps, + removalPolicy: cdk.RemovalPolicy.DESTROY, + }); + + Template.fromStack(stack).hasResource('Custom::RedshiftDatabaseQuery', { + DeletionPolicy: 'Delete', + }); + }); + + it('passes applyRemovalPolicy through', () => { + const query = new DatabaseQuery(stack, 'Query', { + ...minimalProps, + }); + + query.applyRemovalPolicy(cdk.RemovalPolicy.DESTROY); + + Template.fromStack(stack).hasResource('Custom::RedshiftDatabaseQuery', { + DeletionPolicy: 'Delete', + }); + }); + + it('passes gettAtt through', () => { + const query = new DatabaseQuery(stack, 'Query', { + ...minimalProps, + }); + + expect(stack.resolve(query.getAtt('attribute'))).toStrictEqual({ 'Fn::GetAtt': ['Query435140A1', 'attribute'] }); + expect(stack.resolve(query.getAttString('attribute'))).toStrictEqual({ 'Fn::GetAtt': ['Query435140A1', 'attribute'] }); + }); +}); diff --git a/packages/@aws-cdk/aws-redshift/test/integ.database.expected.json b/packages/@aws-cdk/aws-redshift/test/integ.database.expected.json new file mode 100644 index 0000000000000..b346d3e7abfb3 --- /dev/null +++ b/packages/@aws-cdk/aws-redshift/test/integ.database.expected.json @@ -0,0 +1,1377 @@ +{ + "Resources": { + "Vpc8378EB38": { + "Type": "AWS::EC2::VPC", + "Properties": { + "CidrBlock": "10.0.0.0/16", + "EnableDnsHostnames": true, + "EnableDnsSupport": true, + "InstanceTenancy": "default", + "Tags": [ + { + "Key": "Name", + "Value": "aws-cdk-redshift-cluster-database/Vpc" + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPublicSubnet1Subnet5C2D37C4": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "CidrBlock": "10.0.0.0/19", + "VpcId": { + "Ref": "Vpc8378EB38" + }, + "AvailabilityZone": "test-region-1a", + "MapPublicIpOnLaunch": true, + "Tags": [ + { + "Key": "aws-cdk:subnet-name", + "Value": "Public" + }, + { + "Key": "aws-cdk:subnet-type", + "Value": "Public" + }, + { + "Key": "Name", + "Value": "aws-cdk-redshift-cluster-database/Vpc/PublicSubnet1" + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPublicSubnet1RouteTable6C95E38E": { + "Type": "AWS::EC2::RouteTable", + "Properties": { + "VpcId": { + "Ref": "Vpc8378EB38" + }, + "Tags": [ + { + "Key": "Name", + "Value": "aws-cdk-redshift-cluster-database/Vpc/PublicSubnet1" + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPublicSubnet1RouteTableAssociation97140677": { + "Type": "AWS::EC2::SubnetRouteTableAssociation", + "Properties": { + "RouteTableId": { + "Ref": "VpcPublicSubnet1RouteTable6C95E38E" + }, + "SubnetId": { + "Ref": "VpcPublicSubnet1Subnet5C2D37C4" + } + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPublicSubnet1DefaultRoute3DA9E72A": { + "Type": "AWS::EC2::Route", + "Properties": { + "RouteTableId": { + "Ref": "VpcPublicSubnet1RouteTable6C95E38E" + }, + "DestinationCidrBlock": "0.0.0.0/0", + "GatewayId": { + "Ref": "VpcIGWD7BA715C" + } + }, + "DependsOn": [ + "VpcVPCGWBF912B6E" + ], + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPublicSubnet1EIPD7E02669": { + "Type": "AWS::EC2::EIP", + "Properties": { + "Domain": "vpc", + "Tags": [ + { + "Key": "Name", + "Value": "aws-cdk-redshift-cluster-database/Vpc/PublicSubnet1" + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPublicSubnet1NATGateway4D7517AA": { + "Type": "AWS::EC2::NatGateway", + "Properties": { + "SubnetId": { + "Ref": "VpcPublicSubnet1Subnet5C2D37C4" + }, + "AllocationId": { + "Fn::GetAtt": [ + "VpcPublicSubnet1EIPD7E02669", + "AllocationId" + ] + }, + "Tags": [ + { + "Key": "Name", + "Value": "aws-cdk-redshift-cluster-database/Vpc/PublicSubnet1" + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPublicSubnet2Subnet691E08A3": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "CidrBlock": "10.0.32.0/19", + "VpcId": { + "Ref": "Vpc8378EB38" + }, + "AvailabilityZone": "test-region-1b", + "MapPublicIpOnLaunch": true, + "Tags": [ + { + "Key": "aws-cdk:subnet-name", + "Value": "Public" + }, + { + "Key": "aws-cdk:subnet-type", + "Value": "Public" + }, + { + "Key": "Name", + "Value": "aws-cdk-redshift-cluster-database/Vpc/PublicSubnet2" + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPublicSubnet2RouteTable94F7E489": { + "Type": "AWS::EC2::RouteTable", + "Properties": { + "VpcId": { + "Ref": "Vpc8378EB38" + }, + "Tags": [ + { + "Key": "Name", + "Value": "aws-cdk-redshift-cluster-database/Vpc/PublicSubnet2" + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPublicSubnet2RouteTableAssociationDD5762D8": { + "Type": "AWS::EC2::SubnetRouteTableAssociation", + "Properties": { + "RouteTableId": { + "Ref": "VpcPublicSubnet2RouteTable94F7E489" + }, + "SubnetId": { + "Ref": "VpcPublicSubnet2Subnet691E08A3" + } + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPublicSubnet2DefaultRoute97F91067": { + "Type": "AWS::EC2::Route", + "Properties": { + "RouteTableId": { + "Ref": "VpcPublicSubnet2RouteTable94F7E489" + }, + "DestinationCidrBlock": "0.0.0.0/0", + "GatewayId": { + "Ref": "VpcIGWD7BA715C" + } + }, + "DependsOn": [ + "VpcVPCGWBF912B6E" + ], + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPublicSubnet2EIP3C605A87": { + "Type": "AWS::EC2::EIP", + "Properties": { + "Domain": "vpc", + "Tags": [ + { + "Key": "Name", + "Value": "aws-cdk-redshift-cluster-database/Vpc/PublicSubnet2" + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPublicSubnet2NATGateway9182C01D": { + "Type": "AWS::EC2::NatGateway", + "Properties": { + "SubnetId": { + "Ref": "VpcPublicSubnet2Subnet691E08A3" + }, + "AllocationId": { + "Fn::GetAtt": [ + "VpcPublicSubnet2EIP3C605A87", + "AllocationId" + ] + }, + "Tags": [ + { + "Key": "Name", + "Value": "aws-cdk-redshift-cluster-database/Vpc/PublicSubnet2" + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPublicSubnet3SubnetBE12F0B6": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "CidrBlock": "10.0.64.0/19", + "VpcId": { + "Ref": "Vpc8378EB38" + }, + "AvailabilityZone": "test-region-1c", + "MapPublicIpOnLaunch": true, + "Tags": [ + { + "Key": "aws-cdk:subnet-name", + "Value": "Public" + }, + { + "Key": "aws-cdk:subnet-type", + "Value": "Public" + }, + { + "Key": "Name", + "Value": "aws-cdk-redshift-cluster-database/Vpc/PublicSubnet3" + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPublicSubnet3RouteTable93458DBB": { + "Type": "AWS::EC2::RouteTable", + "Properties": { + "VpcId": { + "Ref": "Vpc8378EB38" + }, + "Tags": [ + { + "Key": "Name", + "Value": "aws-cdk-redshift-cluster-database/Vpc/PublicSubnet3" + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPublicSubnet3RouteTableAssociation1F1EDF02": { + "Type": "AWS::EC2::SubnetRouteTableAssociation", + "Properties": { + "RouteTableId": { + "Ref": "VpcPublicSubnet3RouteTable93458DBB" + }, + "SubnetId": { + "Ref": "VpcPublicSubnet3SubnetBE12F0B6" + } + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPublicSubnet3DefaultRoute4697774F": { + "Type": "AWS::EC2::Route", + "Properties": { + "RouteTableId": { + "Ref": "VpcPublicSubnet3RouteTable93458DBB" + }, + "DestinationCidrBlock": "0.0.0.0/0", + "GatewayId": { + "Ref": "VpcIGWD7BA715C" + } + }, + "DependsOn": [ + "VpcVPCGWBF912B6E" + ], + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPublicSubnet3EIP3A666A23": { + "Type": "AWS::EC2::EIP", + "Properties": { + "Domain": "vpc", + "Tags": [ + { + "Key": "Name", + "Value": "aws-cdk-redshift-cluster-database/Vpc/PublicSubnet3" + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPublicSubnet3NATGateway7640CD1D": { + "Type": "AWS::EC2::NatGateway", + "Properties": { + "SubnetId": { + "Ref": "VpcPublicSubnet3SubnetBE12F0B6" + }, + "AllocationId": { + "Fn::GetAtt": [ + "VpcPublicSubnet3EIP3A666A23", + "AllocationId" + ] + }, + "Tags": [ + { + "Key": "Name", + "Value": "aws-cdk-redshift-cluster-database/Vpc/PublicSubnet3" + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPrivateSubnet1Subnet536B997A": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "CidrBlock": "10.0.96.0/19", + "VpcId": { + "Ref": "Vpc8378EB38" + }, + "AvailabilityZone": "test-region-1a", + "MapPublicIpOnLaunch": false, + "Tags": [ + { + "Key": "aws-cdk:subnet-name", + "Value": "Private" + }, + { + "Key": "aws-cdk:subnet-type", + "Value": "Private" + }, + { + "Key": "Name", + "Value": "aws-cdk-redshift-cluster-database/Vpc/PrivateSubnet1" + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPrivateSubnet1RouteTableB2C5B500": { + "Type": "AWS::EC2::RouteTable", + "Properties": { + "VpcId": { + "Ref": "Vpc8378EB38" + }, + "Tags": [ + { + "Key": "Name", + "Value": "aws-cdk-redshift-cluster-database/Vpc/PrivateSubnet1" + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPrivateSubnet1RouteTableAssociation70C59FA6": { + "Type": "AWS::EC2::SubnetRouteTableAssociation", + "Properties": { + "RouteTableId": { + "Ref": "VpcPrivateSubnet1RouteTableB2C5B500" + }, + "SubnetId": { + "Ref": "VpcPrivateSubnet1Subnet536B997A" + } + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPrivateSubnet1DefaultRouteBE02A9ED": { + "Type": "AWS::EC2::Route", + "Properties": { + "RouteTableId": { + "Ref": "VpcPrivateSubnet1RouteTableB2C5B500" + }, + "DestinationCidrBlock": "0.0.0.0/0", + "NatGatewayId": { + "Ref": "VpcPublicSubnet1NATGateway4D7517AA" + } + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPrivateSubnet2Subnet3788AAA1": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "CidrBlock": "10.0.128.0/19", + "VpcId": { + "Ref": "Vpc8378EB38" + }, + "AvailabilityZone": "test-region-1b", + "MapPublicIpOnLaunch": false, + "Tags": [ + { + "Key": "aws-cdk:subnet-name", + "Value": "Private" + }, + { + "Key": "aws-cdk:subnet-type", + "Value": "Private" + }, + { + "Key": "Name", + "Value": "aws-cdk-redshift-cluster-database/Vpc/PrivateSubnet2" + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPrivateSubnet2RouteTableA678073B": { + "Type": "AWS::EC2::RouteTable", + "Properties": { + "VpcId": { + "Ref": "Vpc8378EB38" + }, + "Tags": [ + { + "Key": "Name", + "Value": "aws-cdk-redshift-cluster-database/Vpc/PrivateSubnet2" + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPrivateSubnet2RouteTableAssociationA89CAD56": { + "Type": "AWS::EC2::SubnetRouteTableAssociation", + "Properties": { + "RouteTableId": { + "Ref": "VpcPrivateSubnet2RouteTableA678073B" + }, + "SubnetId": { + "Ref": "VpcPrivateSubnet2Subnet3788AAA1" + } + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPrivateSubnet2DefaultRoute060D2087": { + "Type": "AWS::EC2::Route", + "Properties": { + "RouteTableId": { + "Ref": "VpcPrivateSubnet2RouteTableA678073B" + }, + "DestinationCidrBlock": "0.0.0.0/0", + "NatGatewayId": { + "Ref": "VpcPublicSubnet2NATGateway9182C01D" + } + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPrivateSubnet3SubnetF258B56E": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "CidrBlock": "10.0.160.0/19", + "VpcId": { + "Ref": "Vpc8378EB38" + }, + "AvailabilityZone": "test-region-1c", + "MapPublicIpOnLaunch": false, + "Tags": [ + { + "Key": "aws-cdk:subnet-name", + "Value": "Private" + }, + { + "Key": "aws-cdk:subnet-type", + "Value": "Private" + }, + { + "Key": "Name", + "Value": "aws-cdk-redshift-cluster-database/Vpc/PrivateSubnet3" + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPrivateSubnet3RouteTableD98824C7": { + "Type": "AWS::EC2::RouteTable", + "Properties": { + "VpcId": { + "Ref": "Vpc8378EB38" + }, + "Tags": [ + { + "Key": "Name", + "Value": "aws-cdk-redshift-cluster-database/Vpc/PrivateSubnet3" + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPrivateSubnet3RouteTableAssociation16BDDC43": { + "Type": "AWS::EC2::SubnetRouteTableAssociation", + "Properties": { + "RouteTableId": { + "Ref": "VpcPrivateSubnet3RouteTableD98824C7" + }, + "SubnetId": { + "Ref": "VpcPrivateSubnet3SubnetF258B56E" + } + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcPrivateSubnet3DefaultRoute94B74F0D": { + "Type": "AWS::EC2::Route", + "Properties": { + "RouteTableId": { + "Ref": "VpcPrivateSubnet3RouteTableD98824C7" + }, + "DestinationCidrBlock": "0.0.0.0/0", + "NatGatewayId": { + "Ref": "VpcPublicSubnet3NATGateway7640CD1D" + } + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcIGWD7BA715C": { + "Type": "AWS::EC2::InternetGateway", + "Properties": { + "Tags": [ + { + "Key": "Name", + "Value": "aws-cdk-redshift-cluster-database/Vpc" + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "VpcVPCGWBF912B6E": { + "Type": "AWS::EC2::VPCGatewayAttachment", + "Properties": { + "VpcId": { + "Ref": "Vpc8378EB38" + }, + "InternetGatewayId": { + "Ref": "VpcIGWD7BA715C" + } + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "ClusterSubnetsDCFA5CB7": { + "Type": "AWS::Redshift::ClusterSubnetGroup", + "Properties": { + "Description": "Subnets for Cluster Redshift cluster", + "SubnetIds": [ + { + "Ref": "VpcPublicSubnet1Subnet5C2D37C4" + }, + { + "Ref": "VpcPublicSubnet2Subnet691E08A3" + }, + { + "Ref": "VpcPublicSubnet3SubnetBE12F0B6" + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "ClusterSecurityGroup0921994B": { + "Type": "AWS::EC2::SecurityGroup", + "Properties": { + "GroupDescription": "Redshift security group", + "SecurityGroupEgress": [ + { + "CidrIp": "0.0.0.0/0", + "Description": "Allow all outbound traffic by default", + "IpProtocol": "-1" + } + ], + "VpcId": { + "Ref": "Vpc8378EB38" + } + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "ClusterSecret6368BD0F": { + "Type": "AWS::SecretsManager::Secret", + "Properties": { + "GenerateSecretString": { + "ExcludeCharacters": "\"@/\\ '", + "GenerateStringKey": "password", + "PasswordLength": 30, + "SecretStringTemplate": "{\"username\":\"admin\"}" + } + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "ClusterSecretAttachment769E6258": { + "Type": "AWS::SecretsManager::SecretTargetAttachment", + "Properties": { + "SecretId": { + "Ref": "ClusterSecret6368BD0F" + }, + "TargetId": { + "Ref": "ClusterEB0386A7" + }, + "TargetType": "AWS::Redshift::Cluster" + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "ClusterEB0386A7": { + "Type": "AWS::Redshift::Cluster", + "Properties": { + "ClusterType": "multi-node", + "DBName": "my_db", + "MasterUsername": { + "Fn::Join": [ + "", + [ + "{{resolve:secretsmanager:", + { + "Ref": "ClusterSecret6368BD0F" + }, + ":SecretString:username::}}" + ] + ] + }, + "MasterUserPassword": { + "Fn::Join": [ + "", + [ + "{{resolve:secretsmanager:", + { + "Ref": "ClusterSecret6368BD0F" + }, + ":SecretString:password::}}" + ] + ] + }, + "NodeType": "dc2.large", + "AllowVersionUpgrade": true, + "AutomatedSnapshotRetentionPeriod": 1, + "ClusterSubnetGroupName": { + "Ref": "ClusterSubnetsDCFA5CB7" + }, + "Encrypted": true, + "NumberOfNodes": 2, + "PubliclyAccessible": true, + "VpcSecurityGroupIds": [ + { + "Fn::GetAtt": [ + "ClusterSecurityGroup0921994B", + "GroupId" + ] + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "UserSecretE2C04A69": { + "Type": "AWS::SecretsManager::Secret", + "Properties": { + "GenerateSecretString": { + "ExcludeCharacters": "\"@/\\ '", + "GenerateStringKey": "password", + "PasswordLength": 30, + "SecretStringTemplate": "{\"username\":\"awscdkredshiftclusterdatabaseuserc17d5ebd\"}" + } + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "UserSecretAttachment02022609": { + "Type": "AWS::SecretsManager::SecretTargetAttachment", + "Properties": { + "SecretId": { + "Ref": "UserSecretE2C04A69" + }, + "TargetId": { + "Ref": "ClusterEB0386A7" + }, + "TargetType": "AWS::Redshift::Cluster" + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "UserProviderframeworkonEventServiceRole8FBA2FBD": { + "Type": "AWS::IAM::Role", + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "lambda.amazonaws.com" + } + } + ], + "Version": "2012-10-17" + }, + "ManagedPolicyArns": [ + { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + ] + ] + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "UserProviderframeworkonEventServiceRoleDefaultPolicy9A9E044F": { + "Type": "AWS::IAM::Policy", + "Properties": { + "PolicyDocument": { + "Statement": [ + { + "Action": "lambda:InvokeFunction", + "Effect": "Allow", + "Resource": { + "Fn::GetAtt": [ + "QueryRedshiftDatabase3de5bea727da479686625efb56431b5f3DF81997", + "Arn" + ] + } + } + ], + "Version": "2012-10-17" + }, + "PolicyName": "UserProviderframeworkonEventServiceRoleDefaultPolicy9A9E044F", + "Roles": [ + { + "Ref": "UserProviderframeworkonEventServiceRole8FBA2FBD" + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "UserProviderframeworkonEvent4EC32885": { + "Type": "AWS::Lambda::Function", + "Properties": { + "Code": { + "S3Bucket": { + "Ref": "AssetParametersdaeb79e3cee39c9b902dc0d5c780223e227ed573ea60976252947adab5fb2be1S3BucketDC4B98B1" + }, + "S3Key": { + "Fn::Join": [ + "", + [ + { + "Fn::Select": [ + 0, + { + "Fn::Split": [ + "||", + { + "Ref": "AssetParametersdaeb79e3cee39c9b902dc0d5c780223e227ed573ea60976252947adab5fb2be1S3VersionKeyA495226F" + } + ] + } + ] + }, + { + "Fn::Select": [ + 1, + { + "Fn::Split": [ + "||", + { + "Ref": "AssetParametersdaeb79e3cee39c9b902dc0d5c780223e227ed573ea60976252947adab5fb2be1S3VersionKeyA495226F" + } + ] + } + ] + } + ] + ] + } + }, + "Role": { + "Fn::GetAtt": [ + "UserProviderframeworkonEventServiceRole8FBA2FBD", + "Arn" + ] + }, + "Description": "AWS CDK resource provider framework - onEvent (aws-cdk-redshift-cluster-database/User/Resource/Provider)", + "Environment": { + "Variables": { + "USER_ON_EVENT_FUNCTION_ARN": { + "Fn::GetAtt": [ + "QueryRedshiftDatabase3de5bea727da479686625efb56431b5f3DF81997", + "Arn" + ] + } + } + }, + "Handler": "framework.onEvent", + "Runtime": "nodejs14.x", + "Timeout": 900 + }, + "DependsOn": [ + "UserProviderframeworkonEventServiceRoleDefaultPolicy9A9E044F", + "UserProviderframeworkonEventServiceRole8FBA2FBD" + ], + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "UserFDDCDD17": { + "Type": "Custom::RedshiftDatabaseQuery", + "Properties": { + "ServiceToken": { + "Fn::GetAtt": [ + "UserProviderframeworkonEvent4EC32885", + "Arn" + ] + }, + "handler": "user", + "clusterName": { + "Ref": "ClusterEB0386A7" + }, + "adminUserArn": { + "Ref": "ClusterSecretAttachment769E6258" + }, + "databaseName": "my_db", + "username": "awscdkredshiftclusterdatabaseuserc17d5ebd", + "passwordSecretArn": { + "Ref": "UserSecretAttachment02022609" + } + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "UserTablePrivilegesProviderframeworkonEventServiceRole56BAEC9A": { + "Type": "AWS::IAM::Role", + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "lambda.amazonaws.com" + } + } + ], + "Version": "2012-10-17" + }, + "ManagedPolicyArns": [ + { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + ] + ] + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "UserTablePrivilegesProviderframeworkonEventServiceRoleDefaultPolicy3B6EF50C": { + "Type": "AWS::IAM::Policy", + "Properties": { + "PolicyDocument": { + "Statement": [ + { + "Action": "lambda:InvokeFunction", + "Effect": "Allow", + "Resource": { + "Fn::GetAtt": [ + "QueryRedshiftDatabase3de5bea727da479686625efb56431b5f3DF81997", + "Arn" + ] + } + } + ], + "Version": "2012-10-17" + }, + "PolicyName": "UserTablePrivilegesProviderframeworkonEventServiceRoleDefaultPolicy3B6EF50C", + "Roles": [ + { + "Ref": "UserTablePrivilegesProviderframeworkonEventServiceRole56BAEC9A" + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "UserTablePrivilegesProviderframeworkonEvent3F5C1851": { + "Type": "AWS::Lambda::Function", + "Properties": { + "Code": { + "S3Bucket": { + "Ref": "AssetParametersdaeb79e3cee39c9b902dc0d5c780223e227ed573ea60976252947adab5fb2be1S3BucketDC4B98B1" + }, + "S3Key": { + "Fn::Join": [ + "", + [ + { + "Fn::Select": [ + 0, + { + "Fn::Split": [ + "||", + { + "Ref": "AssetParametersdaeb79e3cee39c9b902dc0d5c780223e227ed573ea60976252947adab5fb2be1S3VersionKeyA495226F" + } + ] + } + ] + }, + { + "Fn::Select": [ + 1, + { + "Fn::Split": [ + "||", + { + "Ref": "AssetParametersdaeb79e3cee39c9b902dc0d5c780223e227ed573ea60976252947adab5fb2be1S3VersionKeyA495226F" + } + ] + } + ] + } + ] + ] + } + }, + "Role": { + "Fn::GetAtt": [ + "UserTablePrivilegesProviderframeworkonEventServiceRole56BAEC9A", + "Arn" + ] + }, + "Description": "AWS CDK resource provider framework - onEvent (aws-cdk-redshift-cluster-database/User/TablePrivileges/Resource/Provider)", + "Environment": { + "Variables": { + "USER_ON_EVENT_FUNCTION_ARN": { + "Fn::GetAtt": [ + "QueryRedshiftDatabase3de5bea727da479686625efb56431b5f3DF81997", + "Arn" + ] + } + } + }, + "Handler": "framework.onEvent", + "Runtime": "nodejs14.x", + "Timeout": 900 + }, + "DependsOn": [ + "UserTablePrivilegesProviderframeworkonEventServiceRoleDefaultPolicy3B6EF50C", + "UserTablePrivilegesProviderframeworkonEventServiceRole56BAEC9A" + ], + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "UserTablePrivileges3829D614": { + "Type": "Custom::RedshiftDatabaseQuery", + "Properties": { + "ServiceToken": { + "Fn::GetAtt": [ + "UserTablePrivilegesProviderframeworkonEvent3F5C1851", + "Arn" + ] + }, + "handler": "user-table-privileges", + "clusterName": { + "Ref": "ClusterEB0386A7" + }, + "adminUserArn": { + "Ref": "ClusterSecretAttachment769E6258" + }, + "databaseName": "my_db", + "username": { + "Fn::GetAtt": [ + "UserFDDCDD17", + "username" + ] + }, + "tablePrivileges": [ + { + "tableName": { + "Ref": "Table7ABB320E" + }, + "actions": [ + "INSERT", + "DELETE", + "SELECT" + ] + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "QueryRedshiftDatabase3de5bea727da479686625efb56431b5fServiceRole0A90D717": { + "Type": "AWS::IAM::Role", + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "lambda.amazonaws.com" + } + } + ], + "Version": "2012-10-17" + }, + "ManagedPolicyArns": [ + { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + ] + ] + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "QueryRedshiftDatabase3de5bea727da479686625efb56431b5fServiceRoleDefaultPolicyDDD1388D": { + "Type": "AWS::IAM::Policy", + "Properties": { + "PolicyDocument": { + "Statement": [ + { + "Action": [ + "redshift-data:DescribeStatement", + "redshift-data:ExecuteStatement" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "secretsmanager:GetSecretValue", + "secretsmanager:DescribeSecret" + ], + "Effect": "Allow", + "Resource": { + "Ref": "ClusterSecretAttachment769E6258" + } + }, + { + "Action": [ + "secretsmanager:GetSecretValue", + "secretsmanager:DescribeSecret" + ], + "Effect": "Allow", + "Resource": { + "Ref": "UserSecretAttachment02022609" + } + } + ], + "Version": "2012-10-17" + }, + "PolicyName": "QueryRedshiftDatabase3de5bea727da479686625efb56431b5fServiceRoleDefaultPolicyDDD1388D", + "Roles": [ + { + "Ref": "QueryRedshiftDatabase3de5bea727da479686625efb56431b5fServiceRole0A90D717" + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "QueryRedshiftDatabase3de5bea727da479686625efb56431b5f3DF81997": { + "Type": "AWS::Lambda::Function", + "Properties": { + "Code": { + "S3Bucket": { + "Ref": "AssetParameters483841e46ab98aa099d0371a7800e2ace3ddbbb12cb8efb3162ca172ebdafd49S3Bucket148631C8" + }, + "S3Key": { + "Fn::Join": [ + "", + [ + { + "Fn::Select": [ + 0, + { + "Fn::Split": [ + "||", + { + "Ref": "AssetParameters483841e46ab98aa099d0371a7800e2ace3ddbbb12cb8efb3162ca172ebdafd49S3VersionKey1A4E04E7" + } + ] + } + ] + }, + { + "Fn::Select": [ + 1, + { + "Fn::Split": [ + "||", + { + "Ref": "AssetParameters483841e46ab98aa099d0371a7800e2ace3ddbbb12cb8efb3162ca172ebdafd49S3VersionKey1A4E04E7" + } + ] + } + ] + } + ] + ] + } + }, + "Role": { + "Fn::GetAtt": [ + "QueryRedshiftDatabase3de5bea727da479686625efb56431b5fServiceRole0A90D717", + "Arn" + ] + }, + "Handler": "index.handler", + "Runtime": "nodejs14.x", + "Timeout": 60 + }, + "DependsOn": [ + "QueryRedshiftDatabase3de5bea727da479686625efb56431b5fServiceRoleDefaultPolicyDDD1388D", + "QueryRedshiftDatabase3de5bea727da479686625efb56431b5fServiceRole0A90D717" + ], + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "TableProviderframeworkonEventServiceRoleC3128F67": { + "Type": "AWS::IAM::Role", + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "lambda.amazonaws.com" + } + } + ], + "Version": "2012-10-17" + }, + "ManagedPolicyArns": [ + { + "Fn::Join": [ + "", + [ + "arn:", + { + "Ref": "AWS::Partition" + }, + ":iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + ] + ] + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "TableProviderframeworkonEventServiceRoleDefaultPolicyAD08715D": { + "Type": "AWS::IAM::Policy", + "Properties": { + "PolicyDocument": { + "Statement": [ + { + "Action": "lambda:InvokeFunction", + "Effect": "Allow", + "Resource": { + "Fn::GetAtt": [ + "QueryRedshiftDatabase3de5bea727da479686625efb56431b5f3DF81997", + "Arn" + ] + } + } + ], + "Version": "2012-10-17" + }, + "PolicyName": "TableProviderframeworkonEventServiceRoleDefaultPolicyAD08715D", + "Roles": [ + { + "Ref": "TableProviderframeworkonEventServiceRoleC3128F67" + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "TableProviderframeworkonEvent97F3951A": { + "Type": "AWS::Lambda::Function", + "Properties": { + "Code": { + "S3Bucket": { + "Ref": "AssetParametersdaeb79e3cee39c9b902dc0d5c780223e227ed573ea60976252947adab5fb2be1S3BucketDC4B98B1" + }, + "S3Key": { + "Fn::Join": [ + "", + [ + { + "Fn::Select": [ + 0, + { + "Fn::Split": [ + "||", + { + "Ref": "AssetParametersdaeb79e3cee39c9b902dc0d5c780223e227ed573ea60976252947adab5fb2be1S3VersionKeyA495226F" + } + ] + } + ] + }, + { + "Fn::Select": [ + 1, + { + "Fn::Split": [ + "||", + { + "Ref": "AssetParametersdaeb79e3cee39c9b902dc0d5c780223e227ed573ea60976252947adab5fb2be1S3VersionKeyA495226F" + } + ] + } + ] + } + ] + ] + } + }, + "Role": { + "Fn::GetAtt": [ + "TableProviderframeworkonEventServiceRoleC3128F67", + "Arn" + ] + }, + "Description": "AWS CDK resource provider framework - onEvent (aws-cdk-redshift-cluster-database/Table/Resource/Provider)", + "Environment": { + "Variables": { + "USER_ON_EVENT_FUNCTION_ARN": { + "Fn::GetAtt": [ + "QueryRedshiftDatabase3de5bea727da479686625efb56431b5f3DF81997", + "Arn" + ] + } + } + }, + "Handler": "framework.onEvent", + "Runtime": "nodejs14.x", + "Timeout": 900 + }, + "DependsOn": [ + "TableProviderframeworkonEventServiceRoleDefaultPolicyAD08715D", + "TableProviderframeworkonEventServiceRoleC3128F67" + ], + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + }, + "Table7ABB320E": { + "Type": "Custom::RedshiftDatabaseQuery", + "Properties": { + "ServiceToken": { + "Fn::GetAtt": [ + "TableProviderframeworkonEvent97F3951A", + "Arn" + ] + }, + "handler": "table", + "clusterName": { + "Ref": "ClusterEB0386A7" + }, + "adminUserArn": { + "Ref": "ClusterSecretAttachment769E6258" + }, + "databaseName": "my_db", + "tableName": { + "prefix": "awscdkredshiftclusterdatabaseTable24923533", + "generateSuffix": true + }, + "tableColumns": [ + { + "name": "col1", + "dataType": "varchar(4)" + }, + { + "name": "col2", + "dataType": "float" + } + ] + }, + "UpdateReplacePolicy": "Delete", + "DeletionPolicy": "Delete" + } + }, + "Parameters": { + "AssetParameters483841e46ab98aa099d0371a7800e2ace3ddbbb12cb8efb3162ca172ebdafd49S3Bucket148631C8": { + "Type": "String", + "Description": "S3 bucket for asset \"483841e46ab98aa099d0371a7800e2ace3ddbbb12cb8efb3162ca172ebdafd49\"" + }, + "AssetParameters483841e46ab98aa099d0371a7800e2ace3ddbbb12cb8efb3162ca172ebdafd49S3VersionKey1A4E04E7": { + "Type": "String", + "Description": "S3 key for asset version \"483841e46ab98aa099d0371a7800e2ace3ddbbb12cb8efb3162ca172ebdafd49\"" + }, + "AssetParameters483841e46ab98aa099d0371a7800e2ace3ddbbb12cb8efb3162ca172ebdafd49ArtifactHashEB952795": { + "Type": "String", + "Description": "Artifact hash for asset \"483841e46ab98aa099d0371a7800e2ace3ddbbb12cb8efb3162ca172ebdafd49\"" + }, + "AssetParametersdaeb79e3cee39c9b902dc0d5c780223e227ed573ea60976252947adab5fb2be1S3BucketDC4B98B1": { + "Type": "String", + "Description": "S3 bucket for asset \"daeb79e3cee39c9b902dc0d5c780223e227ed573ea60976252947adab5fb2be1\"" + }, + "AssetParametersdaeb79e3cee39c9b902dc0d5c780223e227ed573ea60976252947adab5fb2be1S3VersionKeyA495226F": { + "Type": "String", + "Description": "S3 key for asset version \"daeb79e3cee39c9b902dc0d5c780223e227ed573ea60976252947adab5fb2be1\"" + }, + "AssetParametersdaeb79e3cee39c9b902dc0d5c780223e227ed573ea60976252947adab5fb2be1ArtifactHashA521A16F": { + "Type": "String", + "Description": "Artifact hash for asset \"daeb79e3cee39c9b902dc0d5c780223e227ed573ea60976252947adab5fb2be1\"" + } + } +} \ No newline at end of file diff --git a/packages/@aws-cdk/aws-redshift/test/integ.database.ts b/packages/@aws-cdk/aws-redshift/test/integ.database.ts new file mode 100644 index 0000000000000..3a3b955a2b5aa --- /dev/null +++ b/packages/@aws-cdk/aws-redshift/test/integ.database.ts @@ -0,0 +1,44 @@ +#!/usr/bin/env node +/// !cdk-integ pragma:ignore-assets +import * as ec2 from '@aws-cdk/aws-ec2'; +import * as cdk from '@aws-cdk/core'; +import * as constructs from 'constructs'; +import * as redshift from '../lib'; + +const app = new cdk.App(); + +const stack = new cdk.Stack(app, 'aws-cdk-redshift-cluster-database'); +cdk.Aspects.of(stack).add({ + visit(node: constructs.IConstruct) { + if (cdk.CfnResource.isCfnResource(node)) { + node.applyRemovalPolicy(cdk.RemovalPolicy.DESTROY); + } + }, +}); + +const vpc = new ec2.Vpc(stack, 'Vpc'); +const databaseName = 'my_db'; +const cluster = new redshift.Cluster(stack, 'Cluster', { + vpc: vpc, + vpcSubnets: { + subnetType: ec2.SubnetType.PUBLIC, + }, + masterUser: { + masterUsername: 'admin', + }, + defaultDatabaseName: databaseName, + publiclyAccessible: true, +}); + +const databaseOptions = { + cluster: cluster, + databaseName: databaseName, +}; +const user = new redshift.User(stack, 'User', databaseOptions); +const table = new redshift.Table(stack, 'Table', { + ...databaseOptions, + tableColumns: [{ name: 'col1', dataType: 'varchar(4)' }, { name: 'col2', dataType: 'float' }], +}); +table.grant(user, redshift.TableAction.INSERT, redshift.TableAction.DELETE); + +app.synth(); diff --git a/packages/@aws-cdk/aws-redshift/test/privileges.test.ts b/packages/@aws-cdk/aws-redshift/test/privileges.test.ts new file mode 100644 index 0000000000000..91419b2eaa709 --- /dev/null +++ b/packages/@aws-cdk/aws-redshift/test/privileges.test.ts @@ -0,0 +1,113 @@ +import { Template } from '@aws-cdk/assertions'; +import * as ec2 from '@aws-cdk/aws-ec2'; +import * as cdk from '@aws-cdk/core'; +import * as redshift from '../lib'; + +describe('table privileges', () => { + let stack: cdk.Stack; + let vpc: ec2.Vpc; + let cluster: redshift.ICluster; + const databaseName = 'databaseName'; + let databaseOptions: redshift.DatabaseOptions; + const tableColumns = [{ name: 'col1', dataType: 'varchar(4)' }, { name: 'col2', dataType: 'float' }]; + let table: redshift.ITable; + let table2: redshift.ITable; + + beforeEach(() => { + stack = new cdk.Stack(); + vpc = new ec2.Vpc(stack, 'VPC'); + cluster = new redshift.Cluster(stack, 'Cluster', { + vpc: vpc, + vpcSubnets: { + subnetType: ec2.SubnetType.PUBLIC, + }, + masterUser: { + masterUsername: 'admin', + }, + publiclyAccessible: true, + }); + databaseOptions = { + cluster, + databaseName, + }; + table = redshift.Table.fromTableAttributes(stack, 'Table', { + tableName: 'tableName', + tableColumns, + cluster, + databaseName, + }); + table2 = redshift.Table.fromTableAttributes(stack, 'Table 2', { + tableName: 'tableName2', + tableColumns, + cluster, + databaseName, + }); + }); + + it('adding table privilege creates custom resource', () => { + const user = new redshift.User(stack, 'User', databaseOptions); + + user.addTablePrivileges(table, redshift.TableAction.INSERT); + user.addTablePrivileges(table2, redshift.TableAction.SELECT, redshift.TableAction.DROP); + + Template.fromStack(stack).hasResourceProperties('Custom::RedshiftDatabaseQuery', { + username: { + 'Fn::GetAtt': [ + 'UserFDDCDD17', + 'username', + ], + }, + tablePrivileges: [{ tableName: 'tableName', actions: ['INSERT'] }, { tableName: 'tableName2', actions: ['SELECT', 'DROP'] }], + }); + }); + + it('table privileges are deduplicated', () => { + const user = new redshift.User(stack, 'User', databaseOptions); + + user.addTablePrivileges(table, redshift.TableAction.INSERT, redshift.TableAction.INSERT, redshift.TableAction.DELETE); + user.addTablePrivileges(table, redshift.TableAction.SELECT, redshift.TableAction.DELETE); + + Template.fromStack(stack).hasResourceProperties('Custom::RedshiftDatabaseQuery', { + username: { + 'Fn::GetAtt': [ + 'UserFDDCDD17', + 'username', + ], + }, + tablePrivileges: [{ tableName: 'tableName', actions: ['SELECT', 'DELETE', 'INSERT'] }], + }); + }); + + it('table privileges are removed when ALL specified', () => { + const user = new redshift.User(stack, 'User', databaseOptions); + + user.addTablePrivileges(table, redshift.TableAction.ALL, redshift.TableAction.INSERT); + + Template.fromStack(stack).hasResourceProperties('Custom::RedshiftDatabaseQuery', { + username: { + 'Fn::GetAtt': [ + 'UserFDDCDD17', + 'username', + ], + }, + tablePrivileges: [{ tableName: 'tableName', actions: ['ALL'] }], + }); + }); + + it('SELECT table privilege is added when UPDATE or DELETE is specified', () => { + const user = new redshift.User(stack, 'User', databaseOptions); + + user.addTablePrivileges(table, redshift.TableAction.UPDATE); + user.addTablePrivileges(table2, redshift.TableAction.DELETE); + + Template.fromStack(stack).hasResourceProperties('Custom::RedshiftDatabaseQuery', { + username: { + 'Fn::GetAtt': [ + 'UserFDDCDD17', + 'username', + ], + }, + tablePrivileges: [{ tableName: 'tableName', actions: ['UPDATE', 'SELECT'] }, { tableName: 'tableName2', actions: ['DELETE', 'SELECT'] }], + }); + }); +}); diff --git a/packages/@aws-cdk/aws-redshift/test/table.test.ts b/packages/@aws-cdk/aws-redshift/test/table.test.ts new file mode 100644 index 0000000000000..97f66b57042f5 --- /dev/null +++ b/packages/@aws-cdk/aws-redshift/test/table.test.ts @@ -0,0 +1,138 @@ +import { Template } from '@aws-cdk/assertions'; +import * as ec2 from '@aws-cdk/aws-ec2'; +import * as cdk from '@aws-cdk/core'; +import * as redshift from '../lib'; + +describe('cluster table', () => { + const tableName = 'tableName'; + const tableColumns = [{ name: 'col1', dataType: 'varchar(4)' }, { name: 'col2', dataType: 'float' }]; + + let stack: cdk.Stack; + let vpc: ec2.Vpc; + let cluster: redshift.ICluster; + let databaseOptions: redshift.DatabaseOptions; + + beforeEach(() => { + stack = new cdk.Stack(); + vpc = new ec2.Vpc(stack, 'VPC'); + cluster = new redshift.Cluster(stack, 'Cluster', { + vpc: vpc, + vpcSubnets: { + subnetType: ec2.SubnetType.PUBLIC, + }, + masterUser: { + masterUsername: 'admin', + }, + publiclyAccessible: true, + }); + databaseOptions = { + cluster: cluster, + databaseName: 'databaseName', + }; + }); + + it('creates using custom resource', () => { + new redshift.Table(stack, 'Table', { + ...databaseOptions, + tableColumns, + }); + + Template.fromStack(stack).hasResourceProperties('Custom::RedshiftDatabaseQuery', { + tableName: { + prefix: 'Table', + generateSuffix: true, + }, + tableColumns, + }); + }); + + it('tableName property is pulled from custom resource', () => { + const table = new redshift.Table(stack, 'Table', { + ...databaseOptions, + tableColumns, + }); + + expect(stack.resolve(table.tableName)).toStrictEqual({ + Ref: 'Table7ABB320E', + }); + }); + + it('uses table name when provided', () => { + new redshift.Table(stack, 'Table', { + ...databaseOptions, + tableName, + tableColumns, + }); + + Template.fromStack(stack).hasResourceProperties('Custom::RedshiftDatabaseQuery', { + tableName: { + prefix: tableName, + generateSuffix: false, + }, + }); + }); + + it('can import from name and columns', () => { + const table = redshift.Table.fromTableAttributes(stack, 'Table', { + tableName, + tableColumns, + cluster, + databaseName: 'databaseName', + }); + + expect(table.tableName).toBe(tableName); + expect(table.tableColumns).toBe(tableColumns); + expect(table.cluster).toBe(cluster); + expect(table.databaseName).toBe('databaseName'); + }); + + it('grant adds privileges to user', () => { + const user = redshift.User.fromUserAttributes(stack, 'User', { + ...databaseOptions, + username: 'username', + password: cdk.SecretValue.plainText('INSECURE_NOT_FOR_PRODUCTION'), + }); + const table = redshift.Table.fromTableAttributes(stack, 'Table', { + tableName, + tableColumns, + cluster, + databaseName: 'databaseName', + }); + + table.grant(user, redshift.TableAction.INSERT); + + Template.fromStack(stack).hasResourceProperties('Custom::RedshiftDatabaseQuery', { + handler: 'user-table-privileges', + }); + }); + + it('retains table on deletion by default', () => { + new redshift.Table(stack, 'Table', { + ...databaseOptions, + tableColumns, + }); + + Template.fromStack(stack).hasResource('Custom::RedshiftDatabaseQuery', { + Properties: { + handler: 'table', + }, + DeletionPolicy: 'Retain', + }); + }); + + it('destroys table on deletion if requested', () => { + const table = new redshift.Table(stack, 'Table', { + ...databaseOptions, + tableColumns, + }); + + table.applyRemovalPolicy(cdk.RemovalPolicy.DESTROY); + + Template.fromStack(stack).hasResource('Custom::RedshiftDatabaseQuery', { + Properties: { + handler: 'table', + }, + DeletionPolicy: 'Delete', + }); + }); +}); diff --git a/packages/@aws-cdk/aws-redshift/test/user.test.ts b/packages/@aws-cdk/aws-redshift/test/user.test.ts new file mode 100644 index 0000000000000..24b9bc748cc8f --- /dev/null +++ b/packages/@aws-cdk/aws-redshift/test/user.test.ts @@ -0,0 +1,215 @@ +import { Match, Template } from '@aws-cdk/assertions'; +import * as ec2 from '@aws-cdk/aws-ec2'; +import * as kms from '@aws-cdk/aws-kms'; +import * as secretsmanager from '@aws-cdk/aws-secretsmanager'; +import * as cdk from '@aws-cdk/core'; +import * as redshift from '../lib'; + +describe('cluster user', () => { + let stack: cdk.Stack; + let vpc: ec2.Vpc; + let cluster: redshift.ICluster; + const databaseName = 'databaseName'; + let databaseOptions: redshift.DatabaseOptions; + + beforeEach(() => { + stack = new cdk.Stack(); + vpc = new ec2.Vpc(stack, 'VPC'); + cluster = new redshift.Cluster(stack, 'Cluster', { + vpc: vpc, + vpcSubnets: { + subnetType: ec2.SubnetType.PUBLIC, + }, + masterUser: { + masterUsername: 'admin', + }, + publiclyAccessible: true, + }); + databaseOptions = { + cluster, + databaseName, + }; + }); + + it('creates using custom resource', () => { + new redshift.User(stack, 'User', databaseOptions); + + Template.fromStack(stack).hasResourceProperties('Custom::RedshiftDatabaseQuery', { + passwordSecretArn: { Ref: 'UserSecretAttachment02022609' }, + }); + Template.fromStack(stack).hasResourceProperties('AWS::IAM::Policy', { + PolicyDocument: { + Statement: Match.arrayWith([{ + Action: ['secretsmanager:GetSecretValue', 'secretsmanager:DescribeSecret'], + Effect: 'Allow', + Resource: { Ref: 'UserSecretAttachment02022609' }, + }]), + }, + Roles: [{ Ref: 'QueryRedshiftDatabase3de5bea727da479686625efb56431b5fServiceRole0A90D717' }], + }); + }); + + it('creates database secret', () => { + const user = new redshift.User(stack, 'User', databaseOptions); + + Template.fromStack(stack).hasResourceProperties('AWS::SecretsManager::Secret', { + GenerateSecretString: { + SecretStringTemplate: `{"username":"${cdk.Names.uniqueId(user).toLowerCase()}"}`, + }, + }); + Template.fromStack(stack).hasResourceProperties('AWS::SecretsManager::SecretTargetAttachment', { + SecretId: { Ref: 'UserSecretE2C04A69' }, + }); + }); + + it('username property is pulled from custom resource', () => { + const user = new redshift.User(stack, 'User', databaseOptions); + + expect(stack.resolve(user.username)).toStrictEqual({ + 'Fn::GetAtt': [ + 'UserFDDCDD17', + 'username', + ], + }); + }); + + it('password property is pulled from attached secret', () => { + const user = new redshift.User(stack, 'User', databaseOptions); + + expect(stack.resolve(user.password)).toStrictEqual({ + 'Fn::Join': [ + '', + [ + '{{resolve:secretsmanager:', + { + Ref: 'UserSecretAttachment02022609', + }, + ':SecretString:password::}}', + ], + ], + }); + }); + + it('uses username when provided', () => { + const username = 'username'; + + new redshift.User(stack, 'User', { + ...databaseOptions, + username, + }); + + Template.fromStack(stack).hasResourceProperties('AWS::SecretsManager::Secret', { + GenerateSecretString: { + SecretStringTemplate: `{"username":"${username}"}`, + }, + }); + }); + + it('can import from username and password', () => { + const userSecret = secretsmanager.Secret.fromSecretNameV2(stack, 'User Secret', 'redshift-user-secret'); + + const user = redshift.User.fromUserAttributes(stack, 'User', { + ...databaseOptions, + username: userSecret.secretValueFromJson('username').toString(), + password: userSecret.secretValueFromJson('password'), + }); + + expect(stack.resolve(user.username)).toStrictEqual({ + 'Fn::Join': [ + '', + [ + '{{resolve:secretsmanager:arn:', + { + Ref: 'AWS::Partition', + }, + ':secretsmanager:', + { + Ref: 'AWS::Region', + }, + ':', + { + Ref: 'AWS::AccountId', + }, + ':secret:redshift-user-secret:SecretString:username::}}', + ], + ], + }); + expect(stack.resolve(user.password)).toStrictEqual({ + 'Fn::Join': [ + '', + [ + '{{resolve:secretsmanager:arn:', + { + Ref: 'AWS::Partition', + }, + ':secretsmanager:', + { + Ref: 'AWS::Region', + }, + ':', + { + Ref: 'AWS::AccountId', + }, + ':secret:redshift-user-secret:SecretString:password::}}', + ], + ], + }); + }); + + it('destroys user on deletion by default', () => { + new redshift.User(stack, 'User', databaseOptions); + + Template.fromStack(stack).hasResource('Custom::RedshiftDatabaseQuery', { + Properties: { + passwordSecretArn: { Ref: 'UserSecretAttachment02022609' }, + }, + DeletionPolicy: 'Delete', + }); + }); + + it('retains user on deletion if requested', () => { + const user = new redshift.User(stack, 'User', databaseOptions); + + user.applyRemovalPolicy(cdk.RemovalPolicy.RETAIN); + + Template.fromStack(stack).hasResource('Custom::RedshiftDatabaseQuery', { + Properties: { + passwordSecretArn: { Ref: 'UserSecretAttachment02022609' }, + }, + DeletionPolicy: 'Retain', + }); + }); + + it('uses encryption key if one is provided', () => { + const encryptionKey = new kms.Key(stack, 'Key'); + + new redshift.User(stack, 'User', { + ...databaseOptions, + encryptionKey, + }); + + Template.fromStack(stack).hasResourceProperties('AWS::SecretsManager::Secret', { + KmsKeyId: stack.resolve(encryptionKey.keyArn), + }); + }); + + it('addTablePrivileges grants access to table', () => { + const user = redshift.User.fromUserAttributes(stack, 'User', { + ...databaseOptions, + username: 'username', + password: cdk.SecretValue.plainText('INSECURE_NOT_FOR_PRODUCTION'), + }); + const table = redshift.Table.fromTableAttributes(stack, 'Table', { + tableName: 'tableName', + tableColumns: [{ name: 'col1', dataType: 'varchar(4)' }, { name: 'col2', dataType: 'float' }], + cluster, + databaseName: 'databaseName', + }); + + user.addTablePrivileges(table, redshift.TableAction.INSERT); + + Template.fromStack(stack).hasResourceProperties('Custom::RedshiftDatabaseQuery', { + handler: 'user-table-privileges', + }); + }); +}); diff --git a/packages/@aws-cdk/aws-sns/README.md b/packages/@aws-cdk/aws-sns/README.md index ab7120411df39..23ee15e4af508 100644 --- a/packages/@aws-cdk/aws-sns/README.md +++ b/packages/@aws-cdk/aws-sns/README.md @@ -95,6 +95,20 @@ topic.addSubscription(new subs.LambdaSubscription(fn, { })); ``` +### Example of Firehose Subscription + +```typescript + import { Subscription, SubscriptionProtocol, Topic } from '@aws-cdk/aws-sns'; + import { DeliveryStream } from '@aws-cdk/aws-kinesisfirehose'; + const topic = new Topic(stack, 'Topic'); + const stream = new DeliveryStream(stack, 'DeliveryStream', ...) + new Subscription(stack, 'Subscription', { + endpoint: stream.deliveryStreamArn, + protocol: SubscriptionProtocol.FIREHOSE, + subscriptionRoleArn: "SAMPLE_ARN", //role with permissions to send messages to a firehose delivery stream + }) +``` + ## DLQ setup for SNS Subscription CDK can attach provided Queue as DLQ for your SNS subscription. diff --git a/packages/@aws-cdk/aws-sns/lib/subscription.ts b/packages/@aws-cdk/aws-sns/lib/subscription.ts index 88817b6db6fdd..54e26cd24eb60 100644 --- a/packages/@aws-cdk/aws-sns/lib/subscription.ts +++ b/packages/@aws-cdk/aws-sns/lib/subscription.ts @@ -52,6 +52,13 @@ export interface SubscriptionOptions { * @default - No dead letter queue enabled. */ readonly deadLetterQueue?: IQueue; + + /** + * Arn of role allowing access to firehose delivery stream. + * Required for a firehose subscription protocol. + * @default - No subscription role is provided + */ + readonly subscriptionRoleArn?: string; } /** * Properties for creating a new subscription @@ -81,8 +88,15 @@ export class Subscription extends Resource { constructor(scope: Construct, id: string, props: SubscriptionProps) { super(scope, id); - if (props.rawMessageDelivery && ['http', 'https', 'sqs'].indexOf(props.protocol) < 0) { - throw new Error('Raw message delivery can only be enabled for HTTP/S and SQS subscriptions.'); + if (props.rawMessageDelivery && + [ + SubscriptionProtocol.HTTP, + SubscriptionProtocol.HTTPS, + SubscriptionProtocol.SQS, + SubscriptionProtocol.FIREHOSE, + ] + .indexOf(props.protocol) < 0) { + throw new Error('Raw message delivery can only be enabled for HTTP, HTTPS, SQS, and Firehose subscriptions.'); } if (props.filterPolicy) { @@ -103,6 +117,10 @@ export class Subscription extends Resource { } } + if (props.protocol === SubscriptionProtocol.FIREHOSE && !props.subscriptionRoleArn) { + throw new Error('Subscription role arn is required field for subscriptions with a firehose protocol.'); + } + this.deadLetterQueue = this.buildDeadLetterQueue(props); new CfnSubscription(this, 'Resource', { @@ -113,6 +131,7 @@ export class Subscription extends Resource { filterPolicy: this.filterPolicy, region: props.region, redrivePolicy: this.buildDeadLetterConfig(this.deadLetterQueue), + subscriptionRoleArn: props.subscriptionRoleArn, }); } @@ -189,5 +208,10 @@ export enum SubscriptionProtocol { /** * Notifications trigger a Lambda function. */ - LAMBDA = 'lambda' + LAMBDA = 'lambda', + + /** + * Notifications put records into a firehose delivery stream. + */ + FIREHOSE = 'firehose' } diff --git a/packages/@aws-cdk/aws-sns/package.json b/packages/@aws-cdk/aws-sns/package.json index c7734d512cdfd..26d7441d5ee6d 100644 --- a/packages/@aws-cdk/aws-sns/package.json +++ b/packages/@aws-cdk/aws-sns/package.json @@ -76,8 +76,8 @@ }, "license": "Apache-2.0", "devDependencies": { - "@aws-cdk/aws-s3": "0.0.0", "@aws-cdk/assertions": "0.0.0", + "@aws-cdk/aws-s3": "0.0.0", "@types/jest": "^26.0.24", "cdk-build-tools": "0.0.0", "cdk-integ-tools": "0.0.0", @@ -86,8 +86,8 @@ "pkglint": "0.0.0" }, "dependencies": { - "@aws-cdk/aws-codestarnotifications": "0.0.0", "@aws-cdk/aws-cloudwatch": "0.0.0", + "@aws-cdk/aws-codestarnotifications": "0.0.0", "@aws-cdk/aws-events": "0.0.0", "@aws-cdk/aws-iam": "0.0.0", "@aws-cdk/aws-kms": "0.0.0", @@ -97,8 +97,8 @@ }, "homepage": "https://github.com/aws/aws-cdk", "peerDependencies": { - "@aws-cdk/aws-codestarnotifications": "0.0.0", "@aws-cdk/aws-cloudwatch": "0.0.0", + "@aws-cdk/aws-codestarnotifications": "0.0.0", "@aws-cdk/aws-events": "0.0.0", "@aws-cdk/aws-iam": "0.0.0", "@aws-cdk/aws-kms": "0.0.0", diff --git a/packages/@aws-cdk/aws-sns/test/subscription.test.ts b/packages/@aws-cdk/aws-sns/test/subscription.test.ts index 0c19eddc0c7d5..a495769648d4b 100644 --- a/packages/@aws-cdk/aws-sns/test/subscription.test.ts +++ b/packages/@aws-cdk/aws-sns/test/subscription.test.ts @@ -2,6 +2,7 @@ import { Template } from '@aws-cdk/assertions'; import { Queue } from '@aws-cdk/aws-sqs'; import * as cdk from '@aws-cdk/core'; import * as sns from '../lib'; +import { SubscriptionProtocol } from '../lib'; describe('Subscription', () => { test('create a subscription', () => { @@ -176,19 +177,26 @@ describe('Subscription', () => { }); - test('throws with raw delivery for protocol other than http, https or sqs', () => { + + test.each( + [ + SubscriptionProtocol.LAMBDA, + SubscriptionProtocol.EMAIL, + SubscriptionProtocol.EMAIL_JSON, + SubscriptionProtocol.SMS, + SubscriptionProtocol.APPLICATION, + ]) + ('throws with raw delivery for %s protocol', (protocol: SubscriptionProtocol) => { // GIVEN const stack = new cdk.Stack(); const topic = new sns.Topic(stack, 'Topic'); - // THEN expect(() => new sns.Subscription(stack, 'Subscription', { endpoint: 'endpoint', - protocol: sns.SubscriptionProtocol.LAMBDA, + protocol: protocol, topic, rawMessageDelivery: true, })).toThrow(/Raw message delivery/); - }); test('throws with more than 5 attributes in a filter policy', () => { @@ -232,4 +240,17 @@ describe('Subscription', () => { })).toThrow(/\(120\) must not exceed 100/); }); + + test('throws an error when subscription role arn is not entered with firehose subscription protocol', () => { + // GIVEN + const stack = new cdk.Stack(); + const topic = new sns.Topic(stack, 'Topic'); + + //THEN + expect(() => new sns.Subscription(stack, 'Subscription', { + endpoint: 'endpoint', + protocol: sns.SubscriptionProtocol.FIREHOSE, + topic, + })).toThrow(/Subscription role arn is required field for subscriptions with a firehose protocol./); + }); }); diff --git a/packages/@aws-cdk/aws-stepfunctions-tasks/test/batch/integ.run-batch-job.expected.json b/packages/@aws-cdk/aws-stepfunctions-tasks/test/batch/integ.run-batch-job.expected.json index 97eea60b24dcc..f37bcd6e520f6 100644 --- a/packages/@aws-cdk/aws-stepfunctions-tasks/test/batch/integ.run-batch-job.expected.json +++ b/packages/@aws-cdk/aws-stepfunctions-tasks/test/batch/integ.run-batch-job.expected.json @@ -873,11 +873,16 @@ ] ] }, - "Memory": 4, "Privileged": false, "ReadonlyRootFilesystem": false, - "Vcpus": 1 + "ResourceRequirements": [ + { "Type": "VCPU", "Value": "1" }, + { "Type": "MEMORY", "Value": "4" } + ] }, + "PlatformCapabilities": [ + "EC2" + ], "RetryStrategy": { "Attempts": 1 }, diff --git a/packages/@aws-cdk/aws-stepfunctions-tasks/test/batch/integ.submit-job.expected.json b/packages/@aws-cdk/aws-stepfunctions-tasks/test/batch/integ.submit-job.expected.json index 2026e45ae3c4e..a3851fd3fb5b7 100644 --- a/packages/@aws-cdk/aws-stepfunctions-tasks/test/batch/integ.submit-job.expected.json +++ b/packages/@aws-cdk/aws-stepfunctions-tasks/test/batch/integ.submit-job.expected.json @@ -873,11 +873,16 @@ ] ] }, - "Memory": 4, "Privileged": false, "ReadonlyRootFilesystem": false, - "Vcpus": 1 + "ResourceRequirements": [ + { "Type": "VCPU", "Value": "1" }, + { "Type": "MEMORY", "Value": "4" } + ] }, + "PlatformCapabilities": [ + "EC2" + ], "RetryStrategy": { "Attempts": 1 }, diff --git a/packages/@aws-cdk/pipelines/README.md b/packages/@aws-cdk/pipelines/README.md index 55cd50bfb12bf..1e0df7a4b2c2a 100644 --- a/packages/@aws-cdk/pipelines/README.md +++ b/packages/@aws-cdk/pipelines/README.md @@ -481,7 +481,7 @@ pipeline.addStage(prod, { }); ``` -You can also specify steps to be executed at the stack level. To achieve this, you can specify the stack and step via the `stackSteps` property: +You can also specify steps to be executed at the stack level. To achieve this, you can specify the stack and step via the `stackSteps` property: ```ts pipeline.addStage(prod, { @@ -1274,6 +1274,27 @@ encryption key policy for the artifacts bucket may have a statement that looks l Any resource or policy that references the qualifier (`hnb659fds` by default) will need to be updated. +### This CDK CLI is not compatible with the CDK library used by your application + +The CDK CLI version used in your pipeline is too old to read the Cloud Assembly +produced by your CDK app. + +Most likely this happens in the `SelfMutate` action, you are passing the `cliVersion` +parameter to control the version of the CDK CLI, and you just updated the CDK +framework version that your application uses. You either forgot to change the +`cliVersion` parameter, or changed the `cliVersion` in the same commit in which +you changed the framework version. Because a change to the pipeline settings needs +a successful run of the `SelfMutate` step to be applied, the next iteration of the +`SelfMutate` step still executes with the *old* CLI version, and that old CLI version +is not able to read the cloud assembly produced by the new framework version. + +Solution: change the `cliVersion` first, commit, push and deploy, and only then +change the framework version. + +We recommend you avoid specifying the `cliVersion` parameter at all. By default +the pipeline will use the latest CLI version, which will support all cloud assembly +versions. + ## Known Issues There are some usability issues that are caused by underlying technology, and diff --git a/packages/@aws-cdk/pipelines/lib/codepipeline/codepipeline.ts b/packages/@aws-cdk/pipelines/lib/codepipeline/codepipeline.ts index 7b1d0d87d9c22..37ca819d82bc5 100644 --- a/packages/@aws-cdk/pipelines/lib/codepipeline/codepipeline.ts +++ b/packages/@aws-cdk/pipelines/lib/codepipeline/codepipeline.ts @@ -63,7 +63,18 @@ export interface CodePipelineProps { * If you want to lock the CDK CLI version used in the pipeline, by steps * that are automatically generated for you, specify the version here. * - * You should not typically need to specify this value. + * We recommend you do not specify this value, as not specifying it always + * uses the latest CLI version which is backwards compatible with old versions. + * + * If you do specify it, be aware that this version should always be equal to or higher than the + * version of the CDK framework used by the CDK app, when the CDK commands are + * run during your pipeline execution. When you change this version, the *next + * time* the `SelfMutate` step runs it will still be using the CLI of the the + * *previous* version that was in this property: it will only start using the + * new version after `SelfMutate` completes successfully. That means that if + * you want to update both framework and CLI version, you should update the + * CLI version first, commit, push and deploy, and only then update the + * framework version. * * @default - Latest version */ @@ -871,4 +882,4 @@ function chunkTranches(n: number, xss: A[][]): A[][][] { function isCodePipelineActionFactory(x: any): x is ICodePipelineActionFactory { return !!(x as ICodePipelineActionFactory).produceAction; -} \ No newline at end of file +} diff --git a/packages/@aws-cdk/pipelines/package.json b/packages/@aws-cdk/pipelines/package.json index 37605df5210d4..7daeb0589feb0 100644 --- a/packages/@aws-cdk/pipelines/package.json +++ b/packages/@aws-cdk/pipelines/package.json @@ -32,7 +32,6 @@ "organization": true }, "devDependencies": { - "@aws-cdk/assert-internal": "0.0.0", "@aws-cdk/assertions": "0.0.0", "@aws-cdk/aws-apigateway": "0.0.0", "@aws-cdk/aws-ecr-assets": "0.0.0", diff --git a/packages/@aws-cdk/pipelines/test/compliance/assets.test.ts b/packages/@aws-cdk/pipelines/test/compliance/assets.test.ts index c1b72cf7ab316..68b10d259683f 100644 --- a/packages/@aws-cdk/pipelines/test/compliance/assets.test.ts +++ b/packages/@aws-cdk/pipelines/test/compliance/assets.test.ts @@ -1,11 +1,10 @@ import * as fs from 'fs'; import * as path from 'path'; -import { arrayWith, Capture, deepObjectLike, encodedJson, notMatching, objectLike, ResourcePart, stringLike, SynthUtils } from '@aws-cdk/assert-internal'; -import '@aws-cdk/assert-internal/jest'; +import { Capture, Match, Template } from '@aws-cdk/assertions'; import * as cb from '@aws-cdk/aws-codebuild'; import * as ec2 from '@aws-cdk/aws-ec2'; -import { Stack } from '@aws-cdk/core'; -import { behavior, PIPELINE_ENV, TestApp, LegacyTestGitHubNpmPipeline, ModernTestGitHubNpmPipeline, FileAssetApp, MegaAssetsApp, TwoFileAssetsApp, DockerAssetApp, PlainStackApp } from '../testhelpers'; +import { Stack, Stage } from '@aws-cdk/core'; +import { behavior, PIPELINE_ENV, TestApp, LegacyTestGitHubNpmPipeline, ModernTestGitHubNpmPipeline, FileAssetApp, MegaAssetsApp, TwoFileAssetsApp, DockerAssetApp, PlainStackApp, stringLike } from '../testhelpers'; const FILE_ASSET_SOURCE_HASH = '8289faf53c7da377bb2b90615999171adef5e1d8f6b88810e5fef75e6ca09ba5'; const FILE_ASSET_SOURCE_HASH2 = 'ac76997971c3f6ddf37120660003f1ced72b4fc58c498dfd99c78fa77e721e0e'; @@ -42,10 +41,10 @@ describe('basic pipeline', () => { function THEN_codePipelineExpectation() { // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: notMatching(arrayWith(objectLike({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.not(Match.arrayWith([Match.objectLike({ Name: 'Assets', - }))), + })])), }); } }); @@ -67,13 +66,13 @@ describe('basic pipeline', () => { }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { Stages: [ - objectLike({ Name: 'Source' }), - objectLike({ Name: 'Build' }), - objectLike({ Name: 'UpdatePipeline' }), - objectLike({ Name: 'Assets' }), - objectLike({ Name: 'App' }), + Match.objectLike({ Name: 'Source' }), + Match.objectLike({ Name: 'Build' }), + Match.objectLike({ Name: 'UpdatePipeline' }), + Match.objectLike({ Name: 'Assets' }), + Match.objectLike({ Name: 'App' }), ], }); } @@ -96,13 +95,13 @@ describe('basic pipeline', () => { }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { Stages: [ - objectLike({ Name: 'Source' }), - objectLike({ Name: 'Build' }), - objectLike({ Name: 'UpdatePipeline' }), - objectLike({ Name: 'Assets' }), - objectLike({ Name: 'App' }), + Match.objectLike({ Name: 'Source' }), + Match.objectLike({ Name: 'Build' }), + Match.objectLike({ Name: 'UpdatePipeline' }), + Match.objectLike({ Name: 'Assets' }), + Match.objectLike({ Name: 'App' }), ], }); } @@ -126,14 +125,14 @@ describe('basic pipeline', () => { }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { Stages: [ - objectLike({ Name: 'Source' }), - objectLike({ Name: 'Build' }), - objectLike({ Name: 'UpdatePipeline' }), - objectLike({ Name: stringLike('Assets*') }), - objectLike({ Name: stringLike('Assets*2') }), - objectLike({ Name: 'App' }), + Match.objectLike({ Name: 'Source' }), + Match.objectLike({ Name: 'Build' }), + Match.objectLike({ Name: 'UpdatePipeline' }), + Match.objectLike({ Name: stringLike('Assets*') }), + Match.objectLike({ Name: stringLike('Assets*2') }), + Match.objectLike({ Name: 'App' }), ], }); } @@ -155,15 +154,15 @@ describe('basic pipeline', () => { }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { Stages: [ - objectLike({ Name: 'Source' }), - objectLike({ Name: 'Build' }), - objectLike({ Name: 'UpdatePipeline' }), - objectLike({ Name: stringLike('Assets*') }), // 'Assets' vs 'Assets.1' - objectLike({ Name: stringLike('Assets*2') }), - objectLike({ Name: stringLike('Assets*3') }), - objectLike({ Name: 'App' }), + Match.objectLike({ Name: 'Source' }), + Match.objectLike({ Name: 'Build' }), + Match.objectLike({ Name: 'UpdatePipeline' }), + Match.objectLike({ Name: stringLike('Assets*') }), // 'Assets' vs 'Assets.1' + Match.objectLike({ Name: stringLike('Assets*2') }), + Match.objectLike({ Name: stringLike('Assets*3') }), + Match.objectLike({ Name: 'App' }), ], }); } @@ -186,15 +185,15 @@ describe('basic pipeline', () => { }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Environment: { Image: 'aws/codebuild/standard:5.0', }, Source: { - BuildSpec: encodedJson(deepObjectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ phases: { build: { - commands: arrayWith(`cdk-assets --path "assembly-FileAssetApp/FileAssetAppStackEADD68C5.assets.json" --verbose publish "${FILE_ASSET_SOURCE_HASH}:current_account-current_region"`), + commands: Match.arrayWith([`cdk-assets --path "assembly-FileAssetApp/FileAssetAppStackEADD68C5.assets.json" --verbose publish "${FILE_ASSET_SOURCE_HASH}:current_account-current_region"`]), }, }, })), @@ -220,14 +219,14 @@ describe('basic pipeline', () => { }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'Assets', Actions: [ - objectLike({ RunOrder: 1 }), - objectLike({ RunOrder: 1 }), + Match.objectLike({ RunOrder: 1 }), + Match.objectLike({ RunOrder: 1 }), ], - }), + }]), }); } }); @@ -242,16 +241,16 @@ describe('basic pipeline', () => { pipeline.addStage('SomeStage').addStackArtifactDeployment(asm.getStackByName('FileAssetApp-Stack')); // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'Assets', Actions: [ - objectLike({ + Match.objectLike({ Name: 'FileAsset1', RunOrder: 1, }), ], - }), + }]), }); }); @@ -277,17 +276,17 @@ describe('basic pipeline', () => { }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Source: { - BuildSpec: encodedJson(deepObjectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ phases: { build: { - commands: arrayWith(stringLike('cdk-assets *')), + commands: Match.arrayWith([stringLike('cdk-assets *')]), }, }, })), }, - Environment: objectLike({ + Environment: Match.objectLike({ PrivilegedMode: false, Image: 'aws/codebuild/standard:5.0', }), @@ -311,17 +310,17 @@ describe('basic pipeline', () => { }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Source: { - BuildSpec: encodedJson(deepObjectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ phases: { build: { - commands: arrayWith(stringLike('cdk-assets *')), + commands: Match.arrayWith([stringLike('cdk-assets *')]), }, }, })), }, - Environment: objectLike({ + Environment: Match.objectLike({ Image: 'aws/codebuild/standard:5.0', PrivilegedMode: true, }), @@ -349,12 +348,12 @@ describe('basic pipeline', () => { }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Environment: { Image: 'aws/codebuild/standard:5.0', }, Source: { - BuildSpec: encodedJson(deepObjectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ phases: { install: { commands: ['npm install -g cdk-assets@1.2.3'], @@ -386,7 +385,7 @@ describe('basic pipeline', () => { }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::IAM::Role', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::IAM::Role', { AssumeRolePolicyDocument: { Statement: [{ Action: 'sts:AssumeRole', @@ -402,7 +401,7 @@ describe('basic pipeline', () => { }], }, }); - expect(pipelineStack).toHaveResourceLike('AWS::IAM::Policy', + Template.fromStack(pipelineStack).hasResourceProperties('AWS::IAM::Policy', expectedAssetRolePolicy(FILE_PUBLISHING_ROLE, 'CdkAssetsFileRole6BE17A07')); } }); @@ -439,7 +438,7 @@ describe('basic pipeline', () => { }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::IAM::Policy', + Template.fromStack(pipelineStack).hasResourceProperties('AWS::IAM::Policy', expectedAssetRolePolicy([FILE_PUBLISHING_ROLE, 'arn:${AWS::Partition}:iam::0123456789012:role/cdk-hnb659fds-file-publishing-role-0123456789012-eu-west-1'], 'CdkAssetsFileRole6BE17A07')); } @@ -468,7 +467,7 @@ describe('basic pipeline', () => { }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::IAM::Policy', + Template.fromStack(pipelineStack).hasResourceProperties('AWS::IAM::Policy', expectedAssetRolePolicy(FILE_PUBLISHING_ROLE, 'CdkAssetsFileRole6BE17A07')); } }); @@ -492,7 +491,7 @@ describe('basic pipeline', () => { }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::IAM::Role', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::IAM::Role', { AssumeRolePolicyDocument: { Statement: [{ Action: 'sts:AssumeRole', @@ -508,7 +507,7 @@ describe('basic pipeline', () => { }], }, }); - expect(pipelineStack).toHaveResourceLike('AWS::IAM::Policy', + Template.fromStack(pipelineStack).hasResourceProperties('AWS::IAM::Policy', expectedAssetRolePolicy(IMAGE_PUBLISHING_ROLE, 'CdkAssetsDockerRole484B6DD3')); } }); @@ -534,9 +533,9 @@ describe('basic pipeline', () => { }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::IAM::Policy', + Template.fromStack(pipelineStack).hasResourceProperties('AWS::IAM::Policy', expectedAssetRolePolicy(FILE_PUBLISHING_ROLE, 'CdkAssetsFileRole6BE17A07')); - expect(pipelineStack).toHaveResourceLike('AWS::IAM::Policy', + Template.fromStack(pipelineStack).hasResourceProperties('AWS::IAM::Policy', expectedAssetRolePolicy(IMAGE_PUBLISHING_ROLE, 'CdkAssetsDockerRole484B6DD3')); } }); @@ -576,12 +575,12 @@ behavior('can supply pre-install scripts to asset upload', (suite) => { }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Environment: { Image: 'aws/codebuild/standard:5.0', }, Source: { - BuildSpec: encodedJson(deepObjectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ phases: { install: { commands: ['npm config set registry https://registry.com', 'npm install -g cdk-assets'], @@ -620,8 +619,8 @@ describe('pipeline with VPC', () => { function THEN_codePipelineExpectation() { // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { - VpcConfig: objectLike({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { + VpcConfig: Match.objectLike({ SecurityGroupIds: [ { 'Fn::GetAtt': ['CdkAssetsDockerAsset1SecurityGroup078F5C66', 'GroupId'] }, ], @@ -655,16 +654,16 @@ describe('pipeline with VPC', () => { function THEN_codePipelineExpectation() { // Assets Project - expect(pipelineStack).toHaveResourceLike('AWS::IAM::Policy', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::IAM::Policy', { Roles: [ { Ref: 'CdkAssetsDockerRole484B6DD3' }, ], PolicyDocument: { - Statement: arrayWith({ - Action: arrayWith('ec2:DescribeSecurityGroups'), + Statement: Match.arrayWith([{ + Action: Match.arrayWith(['ec2:DescribeSecurityGroups']), Effect: 'Allow', Resource: '*', - }), + }]), }, }); } @@ -690,10 +689,10 @@ describe('pipeline with VPC', () => { function THEN_codePipelineExpectation() { // Assets Project - expect(pipelineStack).toHaveResourceLike('AWS::IAM::Policy', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::IAM::Policy', { PolicyDocument: { Statement: [ - { + Match.objectLike({ Resource: '*', Action: [ 'ec2:CreateNetworkInterface', @@ -704,19 +703,19 @@ describe('pipeline with VPC', () => { 'ec2:DescribeDhcpOptions', 'ec2:DescribeVpcs', ], - }, + }), ], }, Roles: [{ Ref: 'CdkAssetsDockerRole484B6DD3' }], }); - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResource('AWS::CodeBuild::Project', { Properties: { ServiceRole: { 'Fn::GetAtt': ['CdkAssetsDockerRole484B6DD3', 'Arn'] }, }, DependsOn: [ 'CdkAssetsDockerAsset1PolicyDocument8DA96A22', ], - }, ResourcePart.CompleteDefinition); + }); } }); }); @@ -743,28 +742,29 @@ describe('pipeline with single asset publisher', () => { function THEN_codePipelineExpectation() { // THEN - const buildSpecName = Capture.aString(); - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + const buildSpecName = new Capture(); + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'Assets', Actions: [ // Only one file asset action - objectLike({ RunOrder: 1, Name: 'FileAsset' }), + Match.objectLike({ RunOrder: 1, Name: 'FileAsset' }), ], - }), + }]), }); - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Environment: { Image: 'aws/codebuild/standard:5.0', }, Source: { - BuildSpec: buildSpecName.capture(stringLike('buildspec-*.yaml')), + BuildSpec: buildSpecName, }, }); - const assembly = SynthUtils.synthesize(pipelineStack, { skipValidation: true }).assembly; + const assembly = synthesize(pipelineStack); - const actualFileName = buildSpecName.capturedValue; + const actualFileName = buildSpecName.asString(); + expect(actualFileName).toMatch(/^buildspec-.*\.yaml$/); const buildSpec = JSON.parse(fs.readFileSync(path.join(assembly.directory, actualFileName), { encoding: 'utf-8' })); expect(buildSpec.phases.build.commands).toContain(`cdk-assets --path "assembly-FileAssetApp/FileAssetAppStackEADD68C5.assets.json" --verbose publish "${FILE_ASSET_SOURCE_HASH}:current_account-current_region"`); expect(buildSpec.phases.build.commands).toContain(`cdk-assets --path "assembly-FileAssetApp/FileAssetAppStackEADD68C5.assets.json" --verbose publish "${FILE_ASSET_SOURCE_HASH2}:current_account-current_region"`); @@ -804,20 +804,20 @@ describe('pipeline with single asset publisher', () => { function THEN_codePipelineExpectation(pipelineStack2: Stack) { // THEN - const buildSpecName1 = Capture.aString(); - const buildSpecName2 = Capture.aString(); - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + const buildSpecName1 = new Capture(); + const buildSpecName2 = new Capture(); + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Source: { - BuildSpec: buildSpecName1.capture(stringLike('buildspec-*.yaml')), + BuildSpec: buildSpecName1, }, }); - expect(pipelineStack2).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack2).hasResourceProperties('AWS::CodeBuild::Project', { Source: { - BuildSpec: buildSpecName2.capture(stringLike('buildspec-*.yaml')), + BuildSpec: buildSpecName2, }, }); - expect(buildSpecName1.capturedValue).not.toEqual(buildSpecName2.capturedValue); + expect(buildSpecName1.asString()).not.toEqual(buildSpecName2.asString()); } }); }); @@ -870,27 +870,27 @@ describe('pipeline with custom asset publisher BuildSpec', () => { function THEN_codePipelineExpectation() { - const buildSpecName = Capture.aString(); + const buildSpecName = new Capture(); - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'Assets', Actions: [ // Only one file asset action - objectLike({ RunOrder: 1, Name: 'FileAsset' }), + Match.objectLike({ RunOrder: 1, Name: 'FileAsset' }), ], - }), + }]), }); - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Environment: { Image: 'aws/codebuild/standard:5.0', }, Source: { - BuildSpec: buildSpecName.capture(stringLike('buildspec-*.yaml')), + BuildSpec: buildSpecName, }, }); - const assembly = SynthUtils.synthesize(pipelineStack, { skipValidation: true }).assembly; - const buildSpec = JSON.parse(fs.readFileSync(path.join(assembly.directory, buildSpecName.capturedValue)).toString()); + const assembly = synthesize(pipelineStack); + const buildSpec = JSON.parse(fs.readFileSync(path.join(assembly.directory, buildSpecName.asString())).toString()); expect(buildSpec.phases.build.commands).toContain(`cdk-assets --path "assembly-FileAssetApp/FileAssetAppStackEADD68C5.assets.json" --verbose publish "${FILE_ASSET_SOURCE_HASH}:current_account-current_region"`); expect(buildSpec.phases.build.commands).toContain(`cdk-assets --path "assembly-FileAssetApp/FileAssetAppStackEADD68C5.assets.json" --verbose publish "${FILE_ASSET_SOURCE_HASH2}:current_account-current_region"`); expect(buildSpec.phases.pre_install.commands).toContain('preinstall'); @@ -978,9 +978,9 @@ behavior('necessary secrets manager permissions get added to asset roles', suite }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::IAM::Policy', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::IAM::Policy', { PolicyDocument: { - Statement: arrayWith({ + Statement: Match.arrayWith([{ Action: 'secretsmanager:GetSecretValue', Effect: 'Allow', Resource: { @@ -993,7 +993,7 @@ behavior('necessary secrets manager permissions get added to asset roles', suite ], ], }, - }), + }]), }, Roles: [ { Ref: 'PipelineAssetsFileRole59943A77' }, @@ -1021,10 +1021,10 @@ behavior('adding environment variable to assets job adds SecretsManager permissi }); pipeline.addStage(new FileAssetApp(pipelineStack, 'MyApp')); - expect(pipelineStack).toHaveResourceLike('AWS::IAM::Policy', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::IAM::Policy', { PolicyDocument: { - Statement: arrayWith( - objectLike({ + Statement: Match.arrayWith([ + Match.objectLike({ Action: 'secretsmanager:GetSecretValue', Effect: 'Allow', Resource: { @@ -1035,8 +1035,17 @@ behavior('adding environment variable to assets job adds SecretsManager permissi ]], }, }), - ), + ]), }, }); }); -}); \ No newline at end of file +}); + +function synthesize(stack: Stack) { + const root = stack.node.root; + if (!Stage.isStage(root)) { + throw new Error('unexpected: all stacks must be part of a Stage'); + } + + return root.synth({ skipValidation: true }); +} \ No newline at end of file diff --git a/packages/@aws-cdk/pipelines/test/compliance/docker-credentials.test.ts b/packages/@aws-cdk/pipelines/test/compliance/docker-credentials.test.ts index 5ada88b49b937..e1356304fe811 100644 --- a/packages/@aws-cdk/pipelines/test/compliance/docker-credentials.test.ts +++ b/packages/@aws-cdk/pipelines/test/compliance/docker-credentials.test.ts @@ -1,12 +1,11 @@ -import { arrayWith, deepObjectLike, encodedJson, stringLike } from '@aws-cdk/assert-internal'; -import '@aws-cdk/assert-internal/jest'; +import { Match, Template } from '@aws-cdk/assertions'; import * as cb from '@aws-cdk/aws-codebuild'; import * as secretsmanager from '@aws-cdk/aws-secretsmanager'; import { Stack } from '@aws-cdk/core'; import { Construct } from 'constructs'; import * as cdkp from '../../lib'; import { CodeBuildStep } from '../../lib'; -import { behavior, PIPELINE_ENV, TestApp, LegacyTestGitHubNpmPipeline, ModernTestGitHubNpmPipeline, DockerAssetApp } from '../testhelpers'; +import { behavior, PIPELINE_ENV, TestApp, LegacyTestGitHubNpmPipeline, ModernTestGitHubNpmPipeline, DockerAssetApp, stringLike } from '../testhelpers'; const secretSynthArn = 'arn:aws:secretsmanager:eu-west-1:0123456789012:secret:synth-012345'; const secretUpdateArn = 'arn:aws:secretsmanager:eu-west-1:0123456789012:secret:update-012345'; @@ -51,32 +50,32 @@ behavior('synth action receives install commands and access to relevant credenti domainCredentials: { 'synth.example.com': { secretsManagerSecretId: secretSynthArn } }, }); - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Environment: { Image: 'aws/codebuild/standard:5.0' }, Source: { - BuildSpec: encodedJson(deepObjectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ phases: { pre_build: { - commands: arrayWith( + commands: Match.arrayWith([ 'mkdir $HOME/.cdk', `echo '${expectedCredsConfig}' > $HOME/.cdk/cdk-docker-creds.json`, - ), + ]), }, // Prove we're looking at the Synth project build: { - commands: arrayWith(stringLike('*cdk*synth*')), + commands: Match.arrayWith([stringLike('*cdk*synth*')]), }, }, })), }, }); - expect(pipelineStack).toHaveResource('AWS::IAM::Policy', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::IAM::Policy', { PolicyDocument: { - Statement: arrayWith({ + Statement: Match.arrayWith([{ Action: ['secretsmanager:GetSecretValue', 'secretsmanager:DescribeSecret'], Effect: 'Allow', Resource: secretSynthArn, - }), + }]), Version: '2012-10-17', }, Roles: [{ Ref: stringLike('Cdk*BuildProjectRole*') }], @@ -121,20 +120,20 @@ behavior('synth action receives Windows install commands if a Windows image is d domainCredentials: { 'synth.example.com': { secretsManagerSecretId: secretSynthArn } }, }); - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Environment: { Image: 'aws/codebuild/windows-base:2.0' }, Source: { - BuildSpec: encodedJson(deepObjectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ phases: { pre_build: { - commands: arrayWith( + commands: Match.arrayWith([ 'mkdir %USERPROFILE%\\.cdk', `echo '${expectedCredsConfig}' > %USERPROFILE%\\.cdk\\cdk-docker-creds.json`, - ), + ]), }, // Prove we're looking at the Synth project build: { - commands: arrayWith(stringLike('*cdk*synth*')), + commands: Match.arrayWith([stringLike('*cdk*synth*')]), }, }, })), @@ -164,34 +163,34 @@ behavior('self-update receives install commands and access to relevant credentia domainCredentials: { 'selfupdate.example.com': { secretsManagerSecretId: secretUpdateArn } }, }); - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Environment: { Image: 'aws/codebuild/standard:5.0' }, Source: { - BuildSpec: encodedJson(deepObjectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ phases: { [expectedPhase]: { - commands: arrayWith( + commands: Match.arrayWith([ 'mkdir $HOME/.cdk', `echo '${expectedCredsConfig}' > $HOME/.cdk/cdk-docker-creds.json`, - ), + ]), }, // Prove we're looking at the SelfMutate project build: { - commands: arrayWith( + commands: Match.arrayWith([ stringLike('cdk * deploy PipelineStack*'), - ), + ]), }, }, })), }, }); - expect(pipelineStack).toHaveResource('AWS::IAM::Policy', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::IAM::Policy', { PolicyDocument: { - Statement: arrayWith({ + Statement: Match.arrayWith([{ Action: ['secretsmanager:GetSecretValue', 'secretsmanager:DescribeSecret'], Effect: 'Allow', Resource: secretUpdateArn, - }), + }]), Version: '2012-10-17', }, Roles: [{ Ref: stringLike('*SelfMutat*Role*') }], @@ -220,32 +219,32 @@ behavior('asset publishing receives install commands and access to relevant cred domainCredentials: { 'publish.example.com': { secretsManagerSecretId: secretPublishArn } }, }); - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Environment: { Image: 'aws/codebuild/standard:5.0' }, Source: { - BuildSpec: encodedJson(deepObjectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ phases: { [expectedPhase]: { - commands: arrayWith( + commands: Match.arrayWith([ 'mkdir $HOME/.cdk', `echo '${expectedCredsConfig}' > $HOME/.cdk/cdk-docker-creds.json`, - ), + ]), }, // Prove we're looking at the Publishing project build: { - commands: arrayWith(stringLike('cdk-assets*')), + commands: Match.arrayWith([stringLike('cdk-assets*')]), }, }, })), }, }); - expect(pipelineStack).toHaveResource('AWS::IAM::Policy', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::IAM::Policy', { PolicyDocument: { - Statement: arrayWith({ + Statement: Match.arrayWith([{ Action: ['secretsmanager:GetSecretValue', 'secretsmanager:DescribeSecret'], Effect: 'Allow', Resource: secretPublishArn, - }), + }]), Version: '2012-10-17', }, Roles: [{ Ref: 'CdkAssetsDockerRole484B6DD3' }], diff --git a/packages/@aws-cdk/pipelines/test/compliance/security-check.test.ts b/packages/@aws-cdk/pipelines/test/compliance/security-check.test.ts index 7367930e6618a..d2ea77f45ff7d 100644 --- a/packages/@aws-cdk/pipelines/test/compliance/security-check.test.ts +++ b/packages/@aws-cdk/pipelines/test/compliance/security-check.test.ts @@ -1,9 +1,8 @@ -import { anything, arrayWith, encodedJson, objectLike, stringLike } from '@aws-cdk/assert-internal'; -import '@aws-cdk/assert-internal/jest'; +import { Match, Template } from '@aws-cdk/assertions'; import { Topic } from '@aws-cdk/aws-sns'; import { Stack } from '@aws-cdk/core'; import * as cdkp from '../../lib'; -import { LegacyTestGitHubNpmPipeline, ModernTestGitHubNpmPipeline, OneStackApp, PIPELINE_ENV, TestApp } from '../testhelpers'; +import { LegacyTestGitHubNpmPipeline, ModernTestGitHubNpmPipeline, OneStackApp, PIPELINE_ENV, TestApp, stringLike } from '../testhelpers'; import { behavior } from '../testhelpers/compliance'; let app: TestApp; @@ -41,8 +40,8 @@ behavior('security check option generates lambda/codebuild at pipeline scope', ( }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toCountResources('AWS::Lambda::Function', 1); - expect(pipelineStack).toHaveResourceLike('AWS::Lambda::Function', { + Template.fromStack(pipelineStack).resourceCountIs('AWS::Lambda::Function', 1); + Template.fromStack(pipelineStack).hasResourceProperties('AWS::Lambda::Function', { Role: { 'Fn::GetAtt': [ stringLike('CdkPipeline*SecurityCheckCDKPipelinesAutoApproveServiceRole*'), @@ -51,7 +50,7 @@ behavior('security check option generates lambda/codebuild at pipeline scope', ( }, }); // 1 for github build, 1 for synth stage, and 1 for the application security check - expect(pipelineStack).toCountResources('AWS::CodeBuild::Project', 3); + Template.fromStack(pipelineStack).resourceCountIs('AWS::CodeBuild::Project', 3); } }); @@ -78,24 +77,24 @@ behavior('security check option passes correct environment variables to check pr }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith( + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([ { Name: 'App', - Actions: arrayWith( - objectLike({ + Actions: Match.arrayWith([ + Match.objectLike({ Name: stringLike('*Check'), - Configuration: objectLike({ - EnvironmentVariables: encodedJson([ + Configuration: Match.objectLike({ + EnvironmentVariables: Match.serializedJson([ { name: 'STAGE_PATH', type: 'PLAINTEXT', value: 'PipelineSecurityStack/App' }, { name: 'STAGE_NAME', type: 'PLAINTEXT', value: 'App' }, - { name: 'ACTION_NAME', type: 'PLAINTEXT', value: anything() }, + { name: 'ACTION_NAME', type: 'PLAINTEXT', value: Match.anyValue() }, ]), }), }), - ), + ]), }, - ), + ]), }); } }); @@ -124,7 +123,7 @@ behavior('pipeline created with auto approve tags and lambda/codebuild w/ valid function THEN_codePipelineExpectation() { // CodePipeline must be tagged as SECURITY_CHECK=ALLOW_APPROVE - expect(pipelineStack).toHaveResource('AWS::CodePipeline::Pipeline', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { Tags: [ { Key: 'SECURITY_CHECK', @@ -133,7 +132,7 @@ behavior('pipeline created with auto approve tags and lambda/codebuild w/ valid ], }); // Lambda Function only has access to pipelines tagged SECURITY_CHECK=ALLOW_APPROVE - expect(pipelineStack).toHaveResourceLike('AWS::IAM::Policy', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::IAM::Policy', { PolicyDocument: { Statement: [ { @@ -148,9 +147,9 @@ behavior('pipeline created with auto approve tags and lambda/codebuild w/ valid }, }); // CodeBuild must have access to the stacks and invoking the lambda function - expect(pipelineStack).toHaveResourceLike('AWS::IAM::Policy', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::IAM::Policy', { PolicyDocument: { - Statement: arrayWith( + Statement: Match.arrayWith([ { Action: 'sts:AssumeRole', Condition: { @@ -173,7 +172,7 @@ behavior('pipeline created with auto approve tags and lambda/codebuild w/ valid ], }, }, - ), + ]), }, }); } @@ -193,32 +192,32 @@ behavior('confirmBroadeningPermissions option at addApplicationStage runs securi suite.doesNotApply.modern(); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { Stages: [ { - Actions: [{ Name: 'GitHub', RunOrder: 1 }], + Actions: [Match.objectLike({ Name: 'GitHub', RunOrder: 1 })], Name: 'Source', }, { - Actions: [{ Name: 'Synth', RunOrder: 1 }], + Actions: [Match.objectLike({ Name: 'Synth', RunOrder: 1 })], Name: 'Build', }, { - Actions: [{ Name: 'SelfMutate', RunOrder: 1 }], + Actions: [Match.objectLike({ Name: 'SelfMutate', RunOrder: 1 })], Name: 'UpdatePipeline', }, { Actions: [ - { Name: 'StageSecurityCheckStackSecurityCheck', RunOrder: 1 }, - { Name: 'StageSecurityCheckStackManualApproval', RunOrder: 2 }, - { Name: 'AnotherStackSecurityCheck', RunOrder: 5 }, - { Name: 'AnotherStackManualApproval', RunOrder: 6 }, - { Name: 'Stack.Prepare', RunOrder: 3 }, - { Name: 'Stack.Deploy', RunOrder: 4 }, - { Name: 'AnotherStack-Stack.Prepare', RunOrder: 7 }, - { Name: 'AnotherStack-Stack.Deploy', RunOrder: 8 }, - { Name: 'SkipCheckStack-Stack.Prepare', RunOrder: 9 }, - { Name: 'SkipCheckStack-Stack.Deploy', RunOrder: 10 }, + Match.objectLike({ Name: 'StageSecurityCheckStackSecurityCheck', RunOrder: 1 }), + Match.objectLike({ Name: 'StageSecurityCheckStackManualApproval', RunOrder: 2 }), + Match.objectLike({ Name: 'AnotherStackSecurityCheck', RunOrder: 5 }), + Match.objectLike({ Name: 'AnotherStackManualApproval', RunOrder: 6 }), + Match.objectLike({ Name: 'Stack.Prepare', RunOrder: 3 }), + Match.objectLike({ Name: 'Stack.Deploy', RunOrder: 4 }), + Match.objectLike({ Name: 'AnotherStack-Stack.Prepare', RunOrder: 7 }), + Match.objectLike({ Name: 'AnotherStack-Stack.Deploy', RunOrder: 8 }), + Match.objectLike({ Name: 'SkipCheckStack-Stack.Prepare', RunOrder: 9 }), + Match.objectLike({ Name: 'SkipCheckStack-Stack.Deploy', RunOrder: 10 }), ], Name: 'StageSecurityCheckStack', }, @@ -240,28 +239,28 @@ behavior('confirmBroadeningPermissions option at addApplication runs security ch suite.doesNotApply.modern(); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { Stages: [ { - Actions: [{ Name: 'GitHub', RunOrder: 1 }], + Actions: [Match.objectLike({ Name: 'GitHub', RunOrder: 1 })], Name: 'Source', }, { - Actions: [{ Name: 'Synth', RunOrder: 1 }], + Actions: [Match.objectLike({ Name: 'Synth', RunOrder: 1 })], Name: 'Build', }, { - Actions: [{ Name: 'SelfMutate', RunOrder: 1 }], + Actions: [Match.objectLike({ Name: 'SelfMutate', RunOrder: 1 })], Name: 'UpdatePipeline', }, { Actions: [ - { Name: 'EnableCheckStackSecurityCheck', RunOrder: 3 }, - { Name: 'EnableCheckStackManualApproval', RunOrder: 4 }, - { Name: 'Stack.Prepare', RunOrder: 1 }, - { Name: 'Stack.Deploy', RunOrder: 2 }, - { Name: 'EnableCheckStack-Stack.Prepare', RunOrder: 5 }, - { Name: 'EnableCheckStack-Stack.Deploy', RunOrder: 6 }, + Match.objectLike({ Name: 'EnableCheckStackSecurityCheck', RunOrder: 3 }), + Match.objectLike({ Name: 'EnableCheckStackManualApproval', RunOrder: 4 }), + Match.objectLike({ Name: 'Stack.Prepare', RunOrder: 1 }), + Match.objectLike({ Name: 'Stack.Deploy', RunOrder: 2 }), + Match.objectLike({ Name: 'EnableCheckStack-Stack.Prepare', RunOrder: 5 }), + Match.objectLike({ Name: 'EnableCheckStack-Stack.Deploy', RunOrder: 6 }), ], Name: 'NoSecurityCheckStack', }, @@ -299,13 +298,13 @@ behavior('confirmBroadeningPermissions and notification topic options generates }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toCountResources('AWS::SNS::Topic', 1); - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith( + Template.fromStack(pipelineStack).resourceCountIs('AWS::SNS::Topic', 1); + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([ { Name: 'MyStack', Actions: [ - objectLike({ + Match.objectLike({ Configuration: { ProjectName: { Ref: stringLike('*SecurityCheck*') }, EnvironmentVariables: { @@ -320,7 +319,7 @@ behavior('confirmBroadeningPermissions and notification topic options generates Namespace: stringLike('*'), RunOrder: 1, }), - objectLike({ + Match.objectLike({ Configuration: { CustomData: stringLike('#{*.MESSAGE}'), ExternalEntityLink: stringLike('#{*.LINK}'), @@ -328,11 +327,11 @@ behavior('confirmBroadeningPermissions and notification topic options generates Name: stringLike('*Approv*'), RunOrder: 2, }), - objectLike({ Name: 'Stack.Prepare', RunOrder: 3 }), - objectLike({ Name: 'Stack.Deploy', RunOrder: 4 }), + Match.objectLike({ Name: 'Stack.Prepare', RunOrder: 3 }), + Match.objectLike({ Name: 'Stack.Deploy', RunOrder: 4 }), ], }, - ), + ]), }); } }); @@ -365,10 +364,10 @@ behavior('Stages declared outside the pipeline create their own ApplicationSecur suite.doesNotApply.modern(); function THEN_codePipelineExpectation() { - expect(pipelineStack).toCountResources('AWS::Lambda::Function', 1); + Template.fromStack(pipelineStack).resourceCountIs('AWS::Lambda::Function', 1); // 1 for github build, 1 for synth stage, and 1 for the application security check - expect(pipelineStack).toCountResources('AWS::CodeBuild::Project', 3); - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { + Template.fromStack(pipelineStack).resourceCountIs('AWS::CodeBuild::Project', 3); + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { Tags: [ { Key: 'SECURITY_CHECK', @@ -376,28 +375,28 @@ behavior('Stages declared outside the pipeline create their own ApplicationSecur }, ], Stages: [ - { Name: 'Source' }, - { Name: 'Build' }, - { Name: 'UpdatePipeline' }, + Match.objectLike({ Name: 'Source' }), + Match.objectLike({ Name: 'Build' }), + Match.objectLike({ Name: 'UpdatePipeline' }), { Actions: [ - { + Match.objectLike({ Configuration: { ProjectName: { Ref: 'UnattachedStageStageApplicationSecurityCheckCDKSecurityCheckADCE795B' }, }, Name: 'UnattachedStageSecurityCheck', RunOrder: 1, - }, - { + }), + Match.objectLike({ Configuration: { CustomData: '#{UnattachedStageSecurityCheck.MESSAGE}', ExternalEntityLink: '#{UnattachedStageSecurityCheck.LINK}', }, Name: 'UnattachedStageManualApproval', RunOrder: 2, - }, - { Name: 'Stack.Prepare', RunOrder: 3 }, - { Name: 'Stack.Deploy', RunOrder: 4 }, + }), + Match.objectLike({ Name: 'Stack.Prepare', RunOrder: 3 }), + Match.objectLike({ Name: 'Stack.Deploy', RunOrder: 4 }), ], Name: 'UnattachedStage', }, diff --git a/packages/@aws-cdk/pipelines/test/compliance/self-mutation.test.ts b/packages/@aws-cdk/pipelines/test/compliance/self-mutation.test.ts index 8196c84a0920b..f672898107c30 100644 --- a/packages/@aws-cdk/pipelines/test/compliance/self-mutation.test.ts +++ b/packages/@aws-cdk/pipelines/test/compliance/self-mutation.test.ts @@ -1,10 +1,9 @@ /* eslint-disable import/no-extraneous-dependencies */ -import { anything, arrayWith, deepObjectLike, encodedJson, notMatching, objectLike } from '@aws-cdk/assert-internal'; -import '@aws-cdk/assert-internal/jest'; +import { Match, Template } from '@aws-cdk/assertions'; import * as cb from '@aws-cdk/aws-codebuild'; import * as cp from '@aws-cdk/aws-codepipeline'; import { Stack, Stage } from '@aws-cdk/core'; -import { behavior, LegacyTestGitHubNpmPipeline, PIPELINE_ENV, stackTemplate, TestApp, ModernTestGitHubNpmPipeline } from '../testhelpers'; +import { behavior, LegacyTestGitHubNpmPipeline, PIPELINE_ENV, TestApp, ModernTestGitHubNpmPipeline } from '../testhelpers'; let app: TestApp; let pipelineStack: Stack; @@ -31,32 +30,32 @@ behavior('CodePipeline has self-mutation stage', (suite) => { function THEN_codePipelineExpectation() { // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'UpdatePipeline', Actions: [ - objectLike({ + Match.objectLike({ Name: 'SelfMutate', - Configuration: objectLike({ - ProjectName: { Ref: anything() }, + Configuration: Match.objectLike({ + ProjectName: { Ref: Match.anyValue() }, }), }), ], - }), + }]), }); - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Environment: { Image: 'aws/codebuild/standard:5.0', }, Source: { - BuildSpec: encodedJson(deepObjectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ phases: { install: { commands: ['npm install -g aws-cdk'], }, build: { - commands: arrayWith('cdk -a . deploy PipelineStack --require-approval=never --verbose'), + commands: Match.arrayWith(['cdk -a . deploy PipelineStack --require-approval=never --verbose']), }, }, })), @@ -84,15 +83,15 @@ behavior('selfmutation stage correctly identifies nested assembly of pipeline st }); function THEN_codePipelineExpectation(nestedPipelineStack: Stack) { - expect(stackTemplate(nestedPipelineStack)).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(nestedPipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Environment: { Image: 'aws/codebuild/standard:5.0', }, Source: { - BuildSpec: encodedJson(deepObjectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ phases: { build: { - commands: arrayWith('cdk -a assembly-PipelineStage deploy PipelineStage/PipelineStack --require-approval=never --verbose'), + commands: Match.arrayWith(['cdk -a assembly-PipelineStage deploy PipelineStage/PipelineStack --require-approval=never --verbose']), }, }, })), @@ -123,11 +122,11 @@ behavior('selfmutation feature can be turned off', (suite) => { }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: notMatching(arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.not(Match.arrayWith([{ Name: 'UpdatePipeline', - Actions: anything(), - })), + Actions: Match.anyValue(), + }])), }); } }); @@ -154,10 +153,10 @@ behavior('can control fix/CLI version used in pipeline selfupdate', (suite) => { function THEN_codePipelineExpectation() { // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Name: 'vpipe-selfupdate', Source: { - BuildSpec: encodedJson(deepObjectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ phases: { install: { commands: ['npm install -g aws-cdk@1.2.3'], @@ -177,7 +176,7 @@ behavior('Pipeline stack itself can use assets (has implications for selfupdate) }); // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Environment: { PrivilegedMode: true, }, @@ -191,7 +190,7 @@ behavior('Pipeline stack itself can use assets (has implications for selfupdate) }); // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Environment: { PrivilegedMode: true, }, @@ -212,9 +211,9 @@ behavior('self-update project role uses tagged bootstrap-role permissions', (sui }); function THEN_codePipelineExpectations() { - expect(pipelineStack).toHaveResourceLike('AWS::IAM::Policy', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::IAM::Policy', { PolicyDocument: { - Statement: arrayWith( + Statement: Match.arrayWith([ { Action: 'sts:AssumeRole', Effect: 'Allow', @@ -235,7 +234,7 @@ behavior('self-update project role uses tagged bootstrap-role permissions', (sui Effect: 'Allow', Resource: '*', }, - ), + ]), }, }); } @@ -280,19 +279,19 @@ behavior('self-mutation stage can be customized with BuildSpec', (suite) => { }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Environment: { Image: 'aws/codebuild/standard:5.0', PrivilegedMode: false, }, Source: { - BuildSpec: encodedJson(deepObjectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ phases: { install: { commands: ['npm config set registry example.com', 'npm install -g aws-cdk'], }, build: { - commands: arrayWith('cdk -a . deploy PipelineStack --require-approval=never --verbose'), + commands: Match.arrayWith(['cdk -a . deploy PipelineStack --require-approval=never --verbose']), }, }, cache: { diff --git a/packages/@aws-cdk/pipelines/test/compliance/stack-ordering.test.ts b/packages/@aws-cdk/pipelines/test/compliance/stack-ordering.test.ts index cb21139b16364..9b056b8af2ece 100644 --- a/packages/@aws-cdk/pipelines/test/compliance/stack-ordering.test.ts +++ b/packages/@aws-cdk/pipelines/test/compliance/stack-ordering.test.ts @@ -1,7 +1,6 @@ -import { arrayWith, objectLike } from '@aws-cdk/assert-internal'; -import '@aws-cdk/assert-internal/jest'; +import { Match, Template } from '@aws-cdk/assertions'; import { App, Stack } from '@aws-cdk/core'; -import { behavior, LegacyTestGitHubNpmPipeline, ModernTestGitHubNpmPipeline, OneStackApp, PIPELINE_ENV, sortedByRunOrder, TestApp, ThreeStackApp, TwoStackApp } from '../testhelpers'; +import { behavior, LegacyTestGitHubNpmPipeline, ModernTestGitHubNpmPipeline, OneStackApp, PIPELINE_ENV, sortByRunOrder, TestApp, ThreeStackApp, TwoStackApp } from '../testhelpers'; let app: App; let pipelineStack: Stack; @@ -28,16 +27,16 @@ behavior('interdependent stacks are in the right order', (suite) => { function THEN_codePipelineExpectation() { // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'MyApp', - Actions: sortedByRunOrder([ - objectLike({ Name: 'Stack1.Prepare' }), - objectLike({ Name: 'Stack1.Deploy' }), - objectLike({ Name: 'Stack2.Prepare' }), - objectLike({ Name: 'Stack2.Deploy' }), + Actions: sortByRunOrder([ + Match.objectLike({ Name: 'Stack1.Prepare' }), + Match.objectLike({ Name: 'Stack1.Deploy' }), + Match.objectLike({ Name: 'Stack2.Prepare' }), + Match.objectLike({ Name: 'Stack2.Deploy' }), ]), - }), + }]), }); } }); @@ -59,20 +58,20 @@ behavior('multiple independent stacks go in parallel', (suite) => { }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'MyApp', - Actions: sortedByRunOrder([ + Actions: sortByRunOrder([ // 1 and 2 in parallel - objectLike({ Name: 'Stack1.Prepare' }), - objectLike({ Name: 'Stack2.Prepare' }), - objectLike({ Name: 'Stack1.Deploy' }), - objectLike({ Name: 'Stack2.Deploy' }), + Match.objectLike({ Name: 'Stack1.Prepare' }), + Match.objectLike({ Name: 'Stack2.Prepare' }), + Match.objectLike({ Name: 'Stack1.Deploy' }), + Match.objectLike({ Name: 'Stack2.Deploy' }), // Then 3 - objectLike({ Name: 'Stack3.Prepare' }), - objectLike({ Name: 'Stack3.Deploy' }), + Match.objectLike({ Name: 'Stack3.Prepare' }), + Match.objectLike({ Name: 'Stack3.Deploy' }), ]), - }), + }]), }); } }); @@ -86,18 +85,18 @@ behavior('user can request manual change set approvals', (suite) => { }); // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'MyApp', - Actions: sortedByRunOrder([ - objectLike({ Name: 'Stack1.Prepare' }), - objectLike({ Name: 'ManualApproval' }), - objectLike({ Name: 'Stack1.Deploy' }), - objectLike({ Name: 'Stack2.Prepare' }), - objectLike({ Name: 'ManualApproval2' }), - objectLike({ Name: 'Stack2.Deploy' }), + Actions: sortByRunOrder([ + Match.objectLike({ Name: 'Stack1.Prepare' }), + Match.objectLike({ Name: 'ManualApproval' }), + Match.objectLike({ Name: 'Stack1.Deploy' }), + Match.objectLike({ Name: 'Stack2.Prepare' }), + Match.objectLike({ Name: 'ManualApproval2' }), + Match.objectLike({ Name: 'Stack2.Deploy' }), ]), - }), + }]), }); }); @@ -114,28 +113,28 @@ behavior('user can request extra runorder space between prepare and deploy', (su }); // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'MyApp', - Actions: sortedByRunOrder([ - objectLike({ + Actions: sortByRunOrder([ + Match.objectLike({ Name: 'Stack1.Prepare', RunOrder: 1, }), - objectLike({ + Match.objectLike({ Name: 'Stack1.Deploy', RunOrder: 3, }), - objectLike({ + Match.objectLike({ Name: 'Stack2.Prepare', RunOrder: 4, }), - objectLike({ + Match.objectLike({ Name: 'Stack2.Deploy', RunOrder: 6, }), ]), - }), + }]), }); }); @@ -153,24 +152,24 @@ behavior('user can request both manual change set approval and extraRunOrderSpac }); // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'MyApp', - Actions: sortedByRunOrder([ - objectLike({ + Actions: sortByRunOrder([ + Match.objectLike({ Name: 'Stack.Prepare', RunOrder: 1, }), - objectLike({ + Match.objectLike({ Name: 'ManualApproval', RunOrder: 2, }), - objectLike({ + Match.objectLike({ Name: 'Stack.Deploy', RunOrder: 4, }), ]), - }), + }]), }); }); diff --git a/packages/@aws-cdk/pipelines/test/compliance/synths.test.ts b/packages/@aws-cdk/pipelines/test/compliance/synths.test.ts index 58bae441ee156..f8e39a536309f 100644 --- a/packages/@aws-cdk/pipelines/test/compliance/synths.test.ts +++ b/packages/@aws-cdk/pipelines/test/compliance/synths.test.ts @@ -1,5 +1,4 @@ -import { arrayWith, deepObjectLike, encodedJson, objectLike, Capture, anything } from '@aws-cdk/assert-internal'; -import '@aws-cdk/assert-internal/jest'; +import { Capture, Match, Template } from '@aws-cdk/assertions'; import * as cbuild from '@aws-cdk/aws-codebuild'; import * as codepipeline from '@aws-cdk/aws-codepipeline'; import * as ec2 from '@aws-cdk/aws-ec2'; @@ -64,12 +63,12 @@ behavior('synth takes arrays of commands', (suite) => { function THEN_codePipelineExpectation(installPhase: string) { // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Environment: { Image: 'aws/codebuild/standard:5.0', }, Source: { - BuildSpec: encodedJson(deepObjectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ phases: { [installPhase]: { commands: [ @@ -112,12 +111,12 @@ behavior('synth sets artifact base-directory to cdk.out', (suite) => { function THEN_codePipelineExpectation() { // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Environment: { Image: 'aws/codebuild/standard:5.0', }, Source: { - BuildSpec: encodedJson(deepObjectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ artifacts: { 'base-directory': 'cdk.out', }, @@ -154,15 +153,15 @@ behavior('synth supports setting subdirectory', (suite) => { function THEN_codePipelineExpectation(installPhase: string) { // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Environment: { Image: 'aws/codebuild/standard:5.0', }, Source: { - BuildSpec: encodedJson(deepObjectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ phases: { [installPhase]: { - commands: arrayWith('cd subdir'), + commands: Match.arrayWith(['cd subdir']), }, }, artifacts: { @@ -201,7 +200,7 @@ behavior('npm synth sets, or allows setting, UNSAFE_PERM=true', (suite) => { function THEN_codePipelineExpectation() { // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Environment: { EnvironmentVariables: [ { @@ -225,12 +224,12 @@ behavior('synth assumes a JavaScript project by default (no build, yes synth)', }); // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Environment: { Image: 'aws/codebuild/standard:5.0', }, Source: { - BuildSpec: encodedJson(deepObjectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ phases: { pre_build: { commands: ['npm ci'], @@ -278,24 +277,24 @@ behavior('Magic CodePipeline variables passed to synth envvars must be rendered function THEN_codePipelineExpectation() { // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'Build', Actions: [ - objectLike({ + Match.objectLike({ Name: 'Synth', - Configuration: objectLike({ - EnvironmentVariables: encodedJson(arrayWith( + Configuration: Match.objectLike({ + EnvironmentVariables: Match.serializedJson(Match.arrayWith([ { name: 'VERSION', type: 'PLAINTEXT', value: '#{codepipeline.PipelineExecutionId}', }, - )), + ])), }), }), ], - }), + }]), }); } }); @@ -354,24 +353,24 @@ behavior('CodeBuild: environment variables specified in multiple places are corr function THEN_codePipelineExpectation(installPhase: string) { // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { - Environment: objectLike({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { + Environment: Match.objectLike({ PrivilegedMode: true, - EnvironmentVariables: arrayWith( + EnvironmentVariables: Match.arrayWith([ { - Name: 'SOME_ENV_VAR', + Name: 'INNER_VAR', Type: 'PLAINTEXT', - Value: 'SomeValue', + Value: 'InnerValue', }, { - Name: 'INNER_VAR', + Name: 'SOME_ENV_VAR', Type: 'PLAINTEXT', - Value: 'InnerValue', + Value: 'SomeValue', }, - ), + ]), }), Source: { - BuildSpec: encodedJson(deepObjectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ phases: { [installPhase]: { commands: ['install1', 'install2'], @@ -413,12 +412,12 @@ behavior('install command can be overridden/specified', (suite) => { function THEN_codePipelineExpectation(installPhase: string) { // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Environment: { Image: 'aws/codebuild/standard:5.0', }, Source: { - BuildSpec: encodedJson(deepObjectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ phases: { [installPhase]: { commands: ['/bin/true'], @@ -445,12 +444,12 @@ behavior('synth can have its test commands set', (suite) => { }); // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Environment: { Image: 'aws/codebuild/standard:5.0', }, Source: { - BuildSpec: encodedJson(objectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ phases: { pre_build: { commands: ['/bin/true'], @@ -506,12 +505,12 @@ behavior('Synth can output additional artifacts', (suite) => { function THEN_codePipelineExpectation(asmArtifact: string, testArtifact: string) { // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Environment: { Image: 'aws/codebuild/standard:5.0', }, Source: { - BuildSpec: encodedJson(deepObjectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ artifacts: { 'secondary-artifacts': { [asmArtifact]: { @@ -585,7 +584,7 @@ behavior('Synth can be made to run in a VPC', (suite) => { function THEN_codePipelineExpectation() { // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { VpcConfig: { SecurityGroupIds: [ { 'Fn::GetAtt': ['CdkPipelineBuildSynthCdkBuildProjectSecurityGroupEA44D7C2', 'GroupId'] }, @@ -599,16 +598,16 @@ behavior('Synth can be made to run in a VPC', (suite) => { }, }); - expect(pipelineStack).toHaveResourceLike('AWS::IAM::Policy', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::IAM::Policy', { Roles: [ { Ref: 'CdkPipelineBuildSynthCdkBuildProjectRole5E173C62' }, ], PolicyDocument: { - Statement: arrayWith({ - Action: arrayWith('ec2:DescribeSecurityGroups'), + Statement: Match.arrayWith([{ + Action: Match.arrayWith(['ec2:DescribeSecurityGroups']), Effect: 'Allow', Resource: '*', - }), + }]), }, }); } @@ -721,28 +720,28 @@ behavior('Pipeline action contains a hash that changes as the buildspec changes' } function captureProjectConfigHash(_pipelineStack: Stack) { - const theHash = Capture.aString(); - expect(_pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + const theHash = new Capture(); + Template.fromStack(_pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'Build', Actions: [ - objectLike({ + Match.objectLike({ Name: 'Synth', - Configuration: objectLike({ - EnvironmentVariables: encodedJson([ + Configuration: Match.objectLike({ + EnvironmentVariables: Match.serializedJson([ { name: '_PROJECT_CONFIG_HASH', type: 'PLAINTEXT', - value: theHash.capture(), + value: theHash, }, ]), }), }), ], - }), + }]), }); - return theHash.capturedValue; + return theHash.asString(); } }); @@ -784,12 +783,12 @@ behavior('Synth CodeBuild project role can be granted permissions', (suite) => { function THEN_codePipelineExpectation() { // THEN - expect(pipelineStack).toHaveResourceLike('AWS::IAM::Policy', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::IAM::Policy', { PolicyDocument: { - Statement: arrayWith(deepObjectLike({ + Statement: Match.arrayWith([Match.objectLike({ Action: ['s3:GetObject*', 's3:GetBucket*', 's3:List*'], Resource: ['arn:aws:s3:::ThisParticularBucket', 'arn:aws:s3:::ThisParticularBucket/*'], - })), + })]), }, }); } @@ -878,15 +877,15 @@ behavior('CodeBuild: Can specify additional policy statements', (suite) => { }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::IAM::Policy', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::IAM::Policy', { PolicyDocument: { - Statement: arrayWith(deepObjectLike({ + Statement: Match.arrayWith([Match.objectLike({ Action: [ 'codeartifact:*', 'sts:GetServiceBearerToken', ], Resource: 'arn:my:arn', - })), + })]), }, }); } @@ -913,38 +912,38 @@ behavior('Multiple input sources in side-by-side directories', (suite) => { }), }); - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith( + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([ { Name: 'Source', Actions: [ - objectLike({ Configuration: objectLike({ Repo: 'bar' }) }), - objectLike({ Configuration: objectLike({ Repo: 'build' }) }), - objectLike({ Configuration: objectLike({ Repo: 'test' }) }), + Match.objectLike({ Configuration: Match.objectLike({ Repo: 'bar' }) }), + Match.objectLike({ Configuration: Match.objectLike({ Repo: 'build' }) }), + Match.objectLike({ Configuration: Match.objectLike({ Repo: 'test' }) }), ], }, { Name: 'Build', Actions: [ - objectLike({ Name: 'Prebuild', RunOrder: 1 }), - objectLike({ + Match.objectLike({ Name: 'Prebuild', RunOrder: 1 }), + Match.objectLike({ Name: 'Synth', RunOrder: 2, InputArtifacts: [ // 3 input artifacts - anything(), - anything(), - anything(), + Match.anyValue(), + Match.anyValue(), + Match.anyValue(), ], }), ], }, - ), + ]), }); - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Source: { - BuildSpec: encodedJson(deepObjectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ phases: { install: { commands: [ @@ -975,12 +974,12 @@ behavior('Can easily switch on privileged mode for synth', (suite) => { commands: ['LookAtMe'], }); - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { - Environment: objectLike({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { + Environment: Match.objectLike({ PrivilegedMode: true, }), Source: { - BuildSpec: encodedJson(deepObjectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ phases: { build: { commands: [ @@ -1079,19 +1078,19 @@ behavior('can provide custom BuildSpec that is merged with generated one', (suit function THEN_codePipelineExpectation() { // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { - Environment: objectLike({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { + Environment: Match.objectLike({ PrivilegedMode: true, - EnvironmentVariables: arrayWith( + EnvironmentVariables: Match.arrayWith([ { Name: 'INNER_VAR', Type: 'PLAINTEXT', Value: 'InnerValue', }, - ), + ]), }), Source: { - BuildSpec: encodedJson(deepObjectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ env: { variables: { FOO: 'bar', @@ -1099,7 +1098,7 @@ behavior('can provide custom BuildSpec that is merged with generated one', (suit }, phases: { pre_build: { - commands: arrayWith('installCustom'), + commands: Match.arrayWith(['installCustom']), }, build: { commands: ['synth'], diff --git a/packages/@aws-cdk/pipelines/test/compliance/validations.test.ts b/packages/@aws-cdk/pipelines/test/compliance/validations.test.ts index 447e22da59124..7a6a562a8707a 100644 --- a/packages/@aws-cdk/pipelines/test/compliance/validations.test.ts +++ b/packages/@aws-cdk/pipelines/test/compliance/validations.test.ts @@ -1,6 +1,5 @@ /* eslint-disable import/no-extraneous-dependencies */ -import { anything, arrayWith, Capture, deepObjectLike, encodedJson, objectLike } from '@aws-cdk/assert-internal'; -import '@aws-cdk/assert-internal/jest'; +import { Capture, Match, Template } from '@aws-cdk/assertions'; import * as codebuild from '@aws-cdk/aws-codebuild'; import * as codepipeline from '@aws-cdk/aws-codepipeline'; import * as ec2 from '@aws-cdk/aws-ec2'; @@ -9,7 +8,7 @@ import * as s3 from '@aws-cdk/aws-s3'; import { Stack } from '@aws-cdk/core'; import * as cdkp from '../../lib'; import { CodePipelineSource, ShellStep } from '../../lib'; -import { AppWithOutput, behavior, LegacyTestGitHubNpmPipeline, ModernTestGitHubNpmPipeline, OneStackApp, PIPELINE_ENV, sortedByRunOrder, StageWithStackOutput, stringNoLongerThan, TestApp, TwoStackApp } from '../testhelpers'; +import { AppWithOutput, behavior, LegacyTestGitHubNpmPipeline, ModernTestGitHubNpmPipeline, OneStackApp, PIPELINE_ENV, sortByRunOrder, StageWithStackOutput, stringNoLongerThan, TestApp, TwoStackApp } from '../testhelpers'; let app: TestApp; let pipelineStack: Stack; @@ -37,17 +36,17 @@ behavior('can add manual approval after app', (suite) => { }); // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'MyApp', - Actions: sortedByRunOrder([ - objectLike({ Name: 'Stack1.Prepare' }), - objectLike({ Name: 'Stack1.Deploy' }), - objectLike({ Name: 'Stack2.Prepare' }), - objectLike({ Name: 'Stack2.Deploy' }), - objectLike({ Name: 'Approve' }), + Actions: sortByRunOrder([ + Match.objectLike({ Name: 'Stack1.Prepare' }), + Match.objectLike({ Name: 'Stack1.Deploy' }), + Match.objectLike({ Name: 'Stack2.Prepare' }), + Match.objectLike({ Name: 'Stack2.Deploy' }), + Match.objectLike({ Name: 'Approve' }), ]), - }), + }]), }); }); }); @@ -69,19 +68,19 @@ behavior('can add steps to wave', (suite) => { wave.addStage(new OneStackApp(pipelineStack, 'Stage3')); // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'MyWave', - Actions: sortedByRunOrder([ - objectLike({ Name: 'Stage1.Stack.Prepare' }), - objectLike({ Name: 'Stage2.Stack.Prepare' }), - objectLike({ Name: 'Stage3.Stack.Prepare' }), - objectLike({ Name: 'Stage1.Stack.Deploy' }), - objectLike({ Name: 'Stage2.Stack.Deploy' }), - objectLike({ Name: 'Stage3.Stack.Deploy' }), - objectLike({ Name: 'Approve' }), + Actions: sortByRunOrder([ + Match.objectLike({ Name: 'Stage1.Stack.Prepare' }), + Match.objectLike({ Name: 'Stage2.Stack.Prepare' }), + Match.objectLike({ Name: 'Stage3.Stack.Prepare' }), + Match.objectLike({ Name: 'Stage1.Stack.Deploy' }), + Match.objectLike({ Name: 'Stage2.Stack.Deploy' }), + Match.objectLike({ Name: 'Stage3.Stack.Deploy' }), + Match.objectLike({ Name: 'Approve' }), ]), - }), + }]), }); }); }); @@ -104,37 +103,37 @@ behavior('script validation steps can use stack outputs as environment variables })); // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'MyApp', - Actions: arrayWith( - deepObjectLike({ - Name: 'Stack.Deploy', - OutputArtifacts: [{ Name: anything() }], - Configuration: { - OutputFileName: 'outputs.json', - }, - }), - deepObjectLike({ + Actions: Match.arrayWith([ + Match.objectLike({ ActionTypeId: { Provider: 'CodeBuild', }, Configuration: { - ProjectName: anything(), + ProjectName: Match.anyValue(), }, - InputArtifacts: [{ Name: anything() }], + InputArtifacts: [{ Name: Match.anyValue() }], Name: 'TestOutput', }), - ), - }), + Match.objectLike({ + Name: 'Stack.Deploy', + OutputArtifacts: [{ Name: Match.anyValue() }], + Configuration: { + OutputFileName: 'outputs.json', + }, + }), + ]), + }]), }); - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Environment: { Image: 'aws/codebuild/standard:5.0', }, Source: { - BuildSpec: encodedJson(deepObjectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ phases: { build: { commands: [ @@ -164,24 +163,24 @@ behavior('script validation steps can use stack outputs as environment variables }); // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'Alpha', - Actions: arrayWith( - objectLike({ + Actions: Match.arrayWith([ + Match.objectLike({ Name: 'Stack.Deploy', Namespace: 'AlphaStack6B3389FA', }), - objectLike({ + Match.objectLike({ Name: 'Approve', - Configuration: objectLike({ - EnvironmentVariables: encodedJson([ + Configuration: Match.objectLike({ + EnvironmentVariables: Match.serializedJson([ { name: 'THE_OUTPUT', value: '#{AlphaStack6B3389FA.MyOutput}', type: 'PLAINTEXT' }, ]), }), }), - ), - }), + ]), + }]), }); }); }); @@ -200,29 +199,29 @@ behavior('stackOutput generates names limited to 100 characters', (suite) => { })); // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'APreposterouslyLongAndComplicatedNameMadeUpJustToMakeItExceedTheLimitDefinedByCodeBuild', - Actions: arrayWith( - deepObjectLike({ - Name: 'Stack.Deploy', - OutputArtifacts: [{ Name: stringNoLongerThan(100) }], - Configuration: { - OutputFileName: 'outputs.json', - }, - }), - deepObjectLike({ + Actions: Match.arrayWith([ + Match.objectLike({ ActionTypeId: { Provider: 'CodeBuild', }, Configuration: { - ProjectName: anything(), + ProjectName: Match.anyValue(), }, InputArtifacts: [{ Name: stringNoLongerThan(100) }], Name: 'TestOutput', }), - ), - }), + Match.objectLike({ + Name: 'Stack.Deploy', + OutputArtifacts: [{ Name: stringNoLongerThan(100) }], + Configuration: { + OutputFileName: 'outputs.json', + }, + }), + ]), + }]), }); }); @@ -240,16 +239,16 @@ behavior('stackOutput generates names limited to 100 characters', (suite) => { ], }); - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'APreposterouslyLongAndComplicatedNameMadeUpJustToMakeItExceedTheLimitDefinedByCodeBuild', - Actions: arrayWith( - deepObjectLike({ + Actions: Match.arrayWith([ + Match.objectLike({ Name: 'Stack.Deploy', Namespace: stringNoLongerThan(100), }), - ), - }), + ]), + }]), }); }); }); @@ -283,35 +282,35 @@ behavior('validation step can run from scripts in source', (suite) => { }); function THEN_codePipelineExpectation() { - const sourceArtifact = Capture.aString(); + const sourceArtifact = new Capture(); - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'Source', Actions: [ - deepObjectLike({ - OutputArtifacts: [{ Name: sourceArtifact.capture() }], + Match.objectLike({ + OutputArtifacts: [{ Name: sourceArtifact }], }), ], - }), + }]), }); - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'Test', - Actions: arrayWith( - deepObjectLike({ + Actions: Match.arrayWith([ + Match.objectLike({ Name: 'UseSources', - InputArtifacts: [{ Name: sourceArtifact.capturedValue }], + InputArtifacts: [{ Name: sourceArtifact.asString() }], }), - ), - }), + ]), + }]), }); - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Environment: { Image: 'aws/codebuild/standard:5.0', }, Source: { - BuildSpec: encodedJson(deepObjectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ phases: { build: { commands: [ @@ -361,40 +360,40 @@ behavior('can use additional output artifacts from build', (suite) => { }); function THEN_codePipelineExpectation() { - const integArtifact = Capture.aString(); + const integArtifact = new Capture(); - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'Build', Actions: [ - deepObjectLike({ + Match.objectLike({ Name: 'Synth', OutputArtifacts: [ - { Name: anything() }, // It's not the first output - { Name: integArtifact.capture() }, + { Name: Match.anyValue() }, // It's not the first output + { Name: integArtifact }, ], }), ], - }), + }]), }); - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'Test', - Actions: arrayWith( - deepObjectLike({ + Actions: Match.arrayWith([ + Match.objectLike({ Name: 'UseBuildArtifact', - InputArtifacts: [{ Name: integArtifact.capturedValue }], + InputArtifacts: [{ Name: integArtifact.asString() }], }), - ), - }), + ]), + }]), }); - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Environment: { Image: 'aws/codebuild/standard:5.0', }, Source: { - BuildSpec: encodedJson(deepObjectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ phases: { build: { commands: [ @@ -450,12 +449,12 @@ behavior('can add policy statements to shell script action', (suite) => { function THEN_codePipelineExpectation() { // THEN - expect(pipelineStack).toHaveResourceLike('AWS::IAM::Policy', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::IAM::Policy', { PolicyDocument: { - Statement: arrayWith(deepObjectLike({ + Statement: Match.arrayWith([Match.objectLike({ Action: 's3:Banana', Resource: '*', - })), + })]), }, }); } @@ -502,12 +501,12 @@ behavior('can grant permissions to shell script action', (suite) => { function THEN_codePipelineExpectation() { // THEN - expect(pipelineStack).toHaveResourceLike('AWS::IAM::Policy', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::IAM::Policy', { PolicyDocument: { - Statement: arrayWith(deepObjectLike({ + Statement: Match.arrayWith([Match.objectLike({ Action: ['s3:GetObject*', 's3:GetBucket*', 's3:List*'], Resource: ['arn:aws:s3:::ThisParticularBucket', 'arn:aws:s3:::ThisParticularBucket/*'], - })), + })]), }, }); } @@ -562,7 +561,7 @@ behavior('can run shell script actions in a VPC', (suite) => { }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Environment: { Image: 'aws/codebuild/standard:5.0', }, @@ -583,7 +582,7 @@ behavior('can run shell script actions in a VPC', (suite) => { }, }, Source: { - BuildSpec: encodedJson(deepObjectLike({ + BuildSpec: Match.serializedJson(Match.objectLike({ phases: { build: { commands: [ @@ -636,17 +635,17 @@ behavior('can run shell script actions with a specific SecurityGroup', (suite) = }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'Test', - Actions: arrayWith( - deepObjectLike({ + Actions: Match.arrayWith([ + Match.objectLike({ Name: 'sgAction', }), - ), - }), + ]), + }]), }); - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { VpcConfig: { SecurityGroupIds: [ { @@ -714,7 +713,7 @@ behavior('can run scripts with specified BuildEnvironment', (suite) => { }); function THEN_codePipelineExpectation() { - expect(pipelineStack).toHaveResourceLike('AWS::CodeBuild::Project', { + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodeBuild::Project', { Environment: { Image: 'aws/codebuild/standard:2.0', }, @@ -755,14 +754,14 @@ behavior('can run scripts with magic environment variables', (suite) => { function THEN_codePipelineExpectation() { // THEN - expect(pipelineStack).toHaveResourceLike('AWS::CodePipeline::Pipeline', { - Stages: arrayWith({ + Template.fromStack(pipelineStack).hasResourceProperties('AWS::CodePipeline::Pipeline', { + Stages: Match.arrayWith([{ Name: 'Test', - Actions: arrayWith( - objectLike({ + Actions: Match.arrayWith([ + Match.objectLike({ Name: 'imageAction', - Configuration: objectLike({ - EnvironmentVariables: encodedJson([ + Configuration: Match.objectLike({ + EnvironmentVariables: Match.serializedJson([ { name: 'VERSION', type: 'PLAINTEXT', @@ -771,8 +770,8 @@ behavior('can run scripts with magic environment variables', (suite) => { ]), }), }), - ), - }), + ]), + }]), }); } }); diff --git a/packages/@aws-cdk/pipelines/test/testhelpers/index.ts b/packages/@aws-cdk/pipelines/test/testhelpers/index.ts index 87a02ce0b6a66..fbc50d3b1a003 100644 --- a/packages/@aws-cdk/pipelines/test/testhelpers/index.ts +++ b/packages/@aws-cdk/pipelines/test/testhelpers/index.ts @@ -2,5 +2,4 @@ export * from './compliance'; export * from './legacy-pipeline'; export * from './modern-pipeline'; export * from './test-app'; -export * from './testmatchers'; export * from './matchers'; \ No newline at end of file diff --git a/packages/@aws-cdk/pipelines/test/testhelpers/matchers.ts b/packages/@aws-cdk/pipelines/test/testhelpers/matchers.ts index 4ace0148c5eaa..97a02fc1dc10d 100644 --- a/packages/@aws-cdk/pipelines/test/testhelpers/matchers.ts +++ b/packages/@aws-cdk/pipelines/test/testhelpers/matchers.ts @@ -1,9 +1,20 @@ -import { Matcher, MatchResult } from '@aws-cdk/assertions'; +import { Match, Matcher, MatchResult } from '@aws-cdk/assertions'; export function stringLike(pattern: string) { return new StringLike(pattern); } +export function sortByRunOrder(pattern: any[]): Matcher { + return new Sorter('SortByRunOrder', pattern, (a: any, b: any) => { + if (a.RunOrder !== b.RunOrder) { return a.RunOrder - b.RunOrder; } + return (a.Name as string).localeCompare(b.Name); + }); +} + +export function stringNoLongerThan(max: number): Matcher { + return new StringLengthMatcher(max); +} + // Reimplementation of // https://github.com/aws/aws-cdk/blob/430f50a546e9c575f8cdbd259367e440d985e68f/packages/%40aws-cdk/assert-internal/lib/assertions/have-resource-matchers.ts#L244 class StringLike extends Matcher { @@ -24,9 +35,55 @@ class StringLike extends Matcher { result.push(this, [], `Looking for string with pattern "${this.pattern}" but found "${actual}"`); } return result; + + function escapeRegex(s: string) { + return s.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); // $& means the whole matched string + } + } +} + +class Sorter extends Matcher { + constructor( + public readonly name: string, + private readonly pattern: any[], + private readonly compareFn: (a: any, b: any) => number, + ) { + super(); + } + + public test(actual: any): MatchResult { + const result = new MatchResult(actual); + if (!Array.isArray(actual)) { + result.push(this, [], `Expected an Array, but got '${typeof actual}'`); + return result; + } + + const copy = actual.slice(); + copy.sort(this.compareFn); + + const matcher = Matcher.isMatcher(this.pattern) ? this.pattern : Match.exact(this.pattern); + return matcher.test(copy); } } -function escapeRegex(s: string) { - return s.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); // $& means the whole matched string +class StringLengthMatcher extends Matcher { + public name: string = 'StringLength' + + constructor(private readonly length: number) { + super(); + } + + public test(actual: any): MatchResult { + const result = new MatchResult(actual); + + if (typeof actual !== 'string') { + result.push(this, [], `Expected a string, but got '${typeof actual}'`); + } + + if (actual.length > this.length) { + result.push(this, [], `String is ${actual.length} characters long. Expected at most ${this.length} characters`); + } + + return result; + } } \ No newline at end of file diff --git a/packages/@aws-cdk/pipelines/test/testhelpers/testmatchers.ts b/packages/@aws-cdk/pipelines/test/testhelpers/testmatchers.ts deleted file mode 100644 index 8faa855b71abf..0000000000000 --- a/packages/@aws-cdk/pipelines/test/testhelpers/testmatchers.ts +++ /dev/null @@ -1,42 +0,0 @@ -/* eslint-disable import/no-extraneous-dependencies */ -import { annotateMatcher, InspectionFailure, matcherFrom, PropertyMatcher } from '@aws-cdk/assert-internal'; - -/** - * Sort an array (of Actions) by their RunOrder field before applying a matcher. - * - * Makes the matcher independent of the order in which the Actions get synthed - * to the template. Elements with the same RunOrder will be sorted by name. - */ -export function sortedByRunOrder(matcher: any): PropertyMatcher { - return annotateMatcher({ $sortedByRunOrder: matcher }, (value: any, failure: InspectionFailure) => { - if (!Array.isArray(value)) { - failure.failureReason = `Expected an Array, but got '${typeof value}'`; - return false; - } - - value = value.slice(); - - value.sort((a: any, b: any) => { - if (a.RunOrder !== b.RunOrder) { return a.RunOrder - b.RunOrder; } - return (a.Name as string).localeCompare(b.Name); - }); - - return matcherFrom(matcher)(value, failure); - }); -} - -export function stringNoLongerThan(length: number): PropertyMatcher { - return annotateMatcher({ $stringIsNoLongerThan: length }, (value: any, failure: InspectionFailure) => { - if (typeof value !== 'string') { - failure.failureReason = `Expected a string, but got '${typeof value}'`; - return false; - } - - if (value.length > length) { - failure.failureReason = `String is ${value.length} characters long. Expected at most ${length} characters`; - return false; - } - - return true; - }); -} \ No newline at end of file diff --git a/packages/aws-cdk/package.json b/packages/aws-cdk/package.json index f179a9bd58bb6..96bae8765c526 100644 --- a/packages/aws-cdk/package.json +++ b/packages/aws-cdk/package.json @@ -85,7 +85,7 @@ "json-diff": "^0.5.4", "minimatch": ">=3.0", "promptly": "^3.2.0", - "proxy-agent": "^4.0.1", + "proxy-agent": "^5.0.0", "semver": "^7.3.5", "source-map-support": "^0.5.19", "table": "^6.7.1", diff --git a/tools/cdk-release/package.json b/tools/cdk-release/package.json index 1f69eefdebddc..8823387b5c7e4 100644 --- a/tools/cdk-release/package.json +++ b/tools/cdk-release/package.json @@ -40,7 +40,7 @@ "conventional-changelog": "^3.1.24", "conventional-changelog-config-spec": "^2.1.0", "conventional-changelog-preset-loader": "^2.3.4", - "conventional-commits-parser": "^3.2.1", + "conventional-commits-parser": "^3.2.2", "conventional-changelog-writer": "^4.1.0", "fs-extra": "^9.1.0", "git-raw-commits": "^2.0.10", diff --git a/yarn.lock b/yarn.lock index e9d7fe62947d7..e9744b94014b6 100644 --- a/yarn.lock +++ b/yarn.lock @@ -2063,11 +2063,6 @@ anymatch@^3.0.3: normalize-path "^3.0.0" picomatch "^2.0.4" -app-root-path@^2.2.1: - version "2.2.1" - resolved "https://registry.yarnpkg.com/app-root-path/-/app-root-path-2.2.1.tgz#d0df4a682ee408273583d43f6f79e9892624bc9a" - integrity sha512-91IFKeKk7FjfmezPKkwtaRvSpnUc4gDwPAjA1YZ9Gn0q0PPeW+vbeUsZuyDwjI7+QTHhcLen2v25fi/AmhvbJA== - append-transform@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/append-transform/-/append-transform-1.0.0.tgz#046a52ae582a228bd72f58acfbe2967c678759ab" @@ -2289,21 +2284,6 @@ aws-sdk-mock@^5.2.1: sinon "^11.1.1" traverse "^0.6.6" -aws-sdk@^2.596.0: - version "2.970.0" - resolved "https://registry.yarnpkg.com/aws-sdk/-/aws-sdk-2.970.0.tgz#dc258b61b4727dcb5130c494376b598eb19f827b" - integrity sha512-9+ktvE5xgpHr3RsFOcq1SrhXLvU+jUji44jbecFZb5C2lzoEEB29aeN39OLJMW0ZuOrR+3TNum8c3f8YVx6A7w== - dependencies: - buffer "4.9.2" - events "1.1.1" - ieee754 "1.1.13" - jmespath "0.15.0" - querystring "0.2.0" - sax "1.2.1" - url "0.10.3" - uuid "3.3.2" - xml2js "0.4.19" - aws-sdk@^2.848.0, aws-sdk@^2.928.0: version "2.950.0" resolved "https://registry.yarnpkg.com/aws-sdk/-/aws-sdk-2.950.0.tgz#cffb65590c50de9479c87ed04df57d355d1d8a22" @@ -2345,11 +2325,11 @@ aws4@^1.8.0: integrity sha512-xh1Rl34h6Fi1DC2WWKfxUTVqRsNnr6LsKz2+hfwDxQJWmrx8+c7ylaqBMcHfl1U1r2dsifOvKX3LQuLNZ+XSvA== axios@^0.21.1: - version "0.21.1" - resolved "https://registry.yarnpkg.com/axios/-/axios-0.21.1.tgz#22563481962f4d6bde9a76d516ef0e5d3c09b2b8" - integrity sha512-dKQiRHxGD9PPRIUNIWvZhPTPpl1rf/OxTYKsqKUDjBwYylTvV7SjSHJb9ratfyzM6wCdLCOYLzs73qpg5c4iGA== + version "0.21.4" + resolved "https://registry.yarnpkg.com/axios/-/axios-0.21.4.tgz#c67b90dc0568e5c1cf2b0b858c43ba28e2eda575" + integrity sha512-ut5vewkiu8jjGBdqpM44XxjuCjq9LAKeHVmoVfHVzy8eHgxxq8SbAVQNovDA8mVi05kP0Ea/n/UzcSHcTJQfNg== dependencies: - follow-redirects "^1.10.0" + follow-redirects "^1.14.0" babel-jest@^26.6.3: version "26.6.3" @@ -3216,6 +3196,18 @@ conventional-commits-parser@^3.2.0, conventional-commits-parser@^3.2.1: through2 "^4.0.0" trim-off-newlines "^1.0.0" +conventional-commits-parser@^3.2.2: + version "3.2.2" + resolved "https://registry.yarnpkg.com/conventional-commits-parser/-/conventional-commits-parser-3.2.2.tgz#190fb9900c6e02be0c0bca9b03d57e24982639fd" + integrity sha512-Jr9KAKgqAkwXMRHjxDwO/zOCDKod1XdAESHAGuJX38iZ7ZzVti/tvVoysO0suMsdAObp9NQ2rHSsSbnAqZ5f5g== + dependencies: + JSONStream "^1.0.4" + is-text-path "^1.0.1" + lodash "^4.17.15" + meow "^8.0.0" + split2 "^3.0.0" + through2 "^4.0.0" + conventional-recommended-bump@6.1.0, conventional-recommended-bump@^6.1.0: version "6.1.0" resolved "https://registry.yarnpkg.com/conventional-recommended-bump/-/conventional-recommended-bump-6.1.0.tgz#cfa623285d1de554012f2ffde70d9c8a22231f55" @@ -3537,14 +3529,15 @@ define-property@^2.0.2: is-descriptor "^1.0.2" isobject "^3.0.1" -degenerator@^2.2.0: - version "2.2.0" - resolved "https://registry.yarnpkg.com/degenerator/-/degenerator-2.2.0.tgz#49e98c11fa0293c5b26edfbb52f15729afcdb254" - integrity sha512-aiQcQowF01RxFI4ZLFMpzyotbQonhNpBao6dkI8JPk5a+hmSjR5ErHp2CQySmQe8os3VBqLCIh87nDBgZXvsmg== +degenerator@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/degenerator/-/degenerator-3.0.1.tgz#7ef78ec0c8577a544477308ddf1d2d6e88d51f5b" + integrity sha512-LFsIFEeLPlKvAKXu7j3ssIG6RT0TbI7/GhsqrI0DnHASEQjXQ0LUSYcjJteGgRGmZbl1TnMSxpNQIAiJ7Du5TQ== dependencies: ast-types "^0.13.2" escodegen "^1.8.1" esprima "^4.0.0" + vm2 "^3.9.3" delay@5.0.0: version "5.0.0" @@ -3668,16 +3661,6 @@ dot-prop@^6.0.1: dependencies: is-obj "^2.0.0" -dotenv-json@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/dotenv-json/-/dotenv-json-1.0.0.tgz#fc7f672aafea04bed33818733b9f94662332815c" - integrity sha512-jAssr+6r4nKhKRudQ0HOzMskOFFi9+ubXWwmrSGJFgTvpjyPXCXsCsYbjif6mXp7uxA7xY3/LGaiTQukZzSbOQ== - -dotenv@^8.0.0: - version "8.6.0" - resolved "https://registry.yarnpkg.com/dotenv/-/dotenv-8.6.0.tgz#061af664d19f7f4d8fc6e4ff9b584ce237adcb8b" - integrity sha512-IrPdXQsk2BbzvCBGBOTmmSH5SodmqZNt4ERAZDmW4CT+tL8VtvinqywuANaFu4bOMWki16nqf0e4oC0QIaDr/g== - dotgitignore@^2.1.0: version "2.1.0" resolved "https://registry.yarnpkg.com/dotgitignore/-/dotgitignore-2.1.0.tgz#a4b15a4e4ef3cf383598aaf1dfa4a04bcc089b7b" @@ -3883,11 +3866,6 @@ escodegen@^2.0.0: optionalDependencies: source-map "~0.6.1" -eslint-config-standard@^14.1.1: - version "14.1.1" - resolved "https://registry.yarnpkg.com/eslint-config-standard/-/eslint-config-standard-14.1.1.tgz#830a8e44e7aef7de67464979ad06b406026c56ea" - integrity sha512-Z9B+VR+JIXRxz21udPTL9HpFMyoMUEeX1G251EQ6e05WD9aPVtVBn09XUmZ259wCMlCDmYDSZG62Hhm+ZTJcUg== - eslint-import-resolver-node@^0.3.4: version "0.3.4" resolved "https://registry.yarnpkg.com/eslint-import-resolver-node/-/eslint-import-resolver-node-0.3.4.tgz#85ffa81942c25012d8231096ddf679c03042c717" @@ -3915,14 +3893,6 @@ eslint-module-utils@^2.6.1: debug "^3.2.7" pkg-dir "^2.0.0" -eslint-plugin-es@^3.0.0: - version "3.0.1" - resolved "https://registry.yarnpkg.com/eslint-plugin-es/-/eslint-plugin-es-3.0.1.tgz#75a7cdfdccddc0589934aeeb384175f221c57893" - integrity sha512-GUmAsJaN4Fc7Gbtl8uOBlayo2DqhwWvEzykMHSCZHU3XdJ+NSzzZcVhXh3VxX5icqQ+oQdIEawXX8xkR3mIFmQ== - dependencies: - eslint-utils "^2.0.0" - regexpp "^3.0.0" - eslint-plugin-import@^2.23.4: version "2.23.4" resolved "https://registry.yarnpkg.com/eslint-plugin-import/-/eslint-plugin-import-2.23.4.tgz#8dceb1ed6b73e46e50ec9a5bb2411b645e7d3d97" @@ -3951,33 +3921,11 @@ eslint-plugin-jest@^24.3.7: dependencies: "@typescript-eslint/experimental-utils" "^4.0.1" -eslint-plugin-node@^11.1.0: - version "11.1.0" - resolved "https://registry.yarnpkg.com/eslint-plugin-node/-/eslint-plugin-node-11.1.0.tgz#c95544416ee4ada26740a30474eefc5402dc671d" - integrity sha512-oUwtPJ1W0SKD0Tr+wqu92c5xuCeQqB3hSCHasn/ZgjFdA9iDGNkNf2Zi9ztY7X+hNuMib23LNGRm6+uN+KLE3g== - dependencies: - eslint-plugin-es "^3.0.0" - eslint-utils "^2.0.0" - ignore "^5.1.1" - minimatch "^3.0.4" - resolve "^1.10.1" - semver "^6.1.0" - -eslint-plugin-promise@^4.3.1: - version "4.3.1" - resolved "https://registry.yarnpkg.com/eslint-plugin-promise/-/eslint-plugin-promise-4.3.1.tgz#61485df2a359e03149fdafc0a68b0e030ad2ac45" - integrity sha512-bY2sGqyptzFBDLh/GMbAxfdJC+b0f23ME63FOE4+Jao0oZ3E1LEwFtWJX/1pGMJLiTtrSSern2CRM/g+dfc0eQ== - eslint-plugin-rulesdir@^0.2.0: version "0.2.0" resolved "https://registry.yarnpkg.com/eslint-plugin-rulesdir/-/eslint-plugin-rulesdir-0.2.0.tgz#0d729e3f11bcb1a18d9b724a29a6d1a082ac2d62" integrity sha512-PPQPCsPkzF3upl1862swPA1bmDAAHKHmJJ4JTHJ11JCVCU4sycB0K5LLA/Rwr6r4VbnpScvUvHV4hqfdjvFmhQ== -eslint-plugin-standard@^4.1.0: - version "4.1.0" - resolved "https://registry.yarnpkg.com/eslint-plugin-standard/-/eslint-plugin-standard-4.1.0.tgz#0c3bf3a67e853f8bbbc580fb4945fbf16f41b7c5" - integrity sha512-ZL7+QRixjTR6/528YNGyDotyffm5OQst/sGxKDwGb9Uqs4In5Egi4+jbobhqJoyoCM6/7v/1A5fhQ7ScMtDjaQ== - eslint-scope@^5.1.1: version "5.1.1" resolved "https://registry.yarnpkg.com/eslint-scope/-/eslint-scope-5.1.1.tgz#e786e59a66cb92b3f6c1fb0d508aab174848f48c" @@ -3986,7 +3934,7 @@ eslint-scope@^5.1.1: esrecurse "^4.3.0" estraverse "^4.1.1" -eslint-utils@^2.0.0, eslint-utils@^2.1.0: +eslint-utils@^2.1.0: version "2.1.0" resolved "https://registry.yarnpkg.com/eslint-utils/-/eslint-utils-2.1.0.tgz#d2de5e03424e707dc10c74068ddedae708741b27" integrity sha512-w94dQYoauyvlDc43XnGB8lU3Zt713vNChgt4EWwhXAP2XkBvndfxF0AgIqKOOasjPIPzj9JqgwkwbCYD0/V3Zg== @@ -4441,10 +4389,10 @@ flatted@^3.1.0: resolved "https://registry.yarnpkg.com/flatted/-/flatted-3.2.1.tgz#bbef080d95fca6709362c73044a1634f7c6e7d05" integrity sha512-OMQjaErSFHmHqZe+PSidH5n8j3O0F2DdnVh8JB4j4eUQ2k6KvB0qGfrKIhapvez5JerBbmWkaLYUYWISaESoXg== -follow-redirects@^1.10.0, follow-redirects@^1.11.0: - version "1.14.1" - resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.14.1.tgz#d9114ded0a1cfdd334e164e6662ad02bfd91ff43" - integrity sha512-HWqDgT7ZEkqRzBvc2s64vSZ/hfOceEol3ac/7tKwzuvEyWx3/4UegXh5oBOIotkGsObyk3xznnSRVADBgWSQVg== +follow-redirects@^1.11.0, follow-redirects@^1.14.0: + version "1.14.3" + resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.14.3.tgz#6ada78118d8d24caee595595accdc0ac6abd022e" + integrity sha512-3MkHxknWMUtb23apkgz/83fDoe+y+qr0TdgacGIA7bew+QLBo3vdgEN2xEsuXNivpFy4CyDhBBZnNZOtalmenw== for-in@^1.0.2: version "1.0.2" @@ -5044,7 +4992,7 @@ ignore@^4.0.6: resolved "https://registry.yarnpkg.com/ignore/-/ignore-4.0.6.tgz#750e3db5862087b4737ebac8207ffd1ef27b25fc" integrity sha512-cyFDKrqc/YdcWFniJhzI42+AzS+gNwmUzOSFcRCQYwySuBBBy/KjuxWLZ/FHEH6Moq1NizMOBWyTcv8O4OZIMg== -ignore@^5.1.1, ignore@^5.1.4, ignore@^5.1.8, ignore@~5.1.8: +ignore@^5.1.4, ignore@^5.1.8, ignore@~5.1.8: version "5.1.8" resolved "https://registry.yarnpkg.com/ignore/-/ignore-5.1.8.tgz#f150a8b50a34289b33e22f5889abd4d8016f0e57" integrity sha512-BMpfD7PpiETpBl/A6S498BaIJ6Y/ABT93ETbby2fP00v4EbvPBXWEoaR1UBPKs3iR53pJY7EtZk5KACI57i1Uw== @@ -6311,24 +6259,6 @@ kleur@^3.0.3: resolved "https://registry.yarnpkg.com/kleur/-/kleur-3.0.3.tgz#a79c9ecc86ee1ce3fa6206d1216c501f147fc07e" integrity sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w== -lambda-leak@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/lambda-leak/-/lambda-leak-2.0.0.tgz#771985d3628487f6e885afae2b54510dcfb2cd7e" - integrity sha1-dxmF02KEh/boha+uK1RRDc+yzX4= - -lambda-tester@^3.6.0: - version "3.6.0" - resolved "https://registry.yarnpkg.com/lambda-tester/-/lambda-tester-3.6.0.tgz#ceb7d4f4f0da768487a05cff37dcd088508b5247" - integrity sha512-F2ZTGWCLyIR95o/jWK46V/WnOCFAEUG/m/V7/CLhPJ7PCM+pror1rZ6ujP3TkItSGxUfpJi0kqwidw+M/nEqWw== - dependencies: - app-root-path "^2.2.1" - dotenv "^8.0.0" - dotenv-json "^1.0.0" - lambda-leak "^2.0.0" - semver "^6.1.1" - uuid "^3.3.2" - vandium-utils "^1.1.1" - lazystream@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/lazystream/-/lazystream-1.0.0.tgz#f6995fe0f820392f61396be89462407bb77168e4" @@ -7759,10 +7689,10 @@ p-waterfall@^2.1.1: dependencies: p-reduce "^2.0.0" -pac-proxy-agent@^4.1.0: - version "4.1.0" - resolved "https://registry.yarnpkg.com/pac-proxy-agent/-/pac-proxy-agent-4.1.0.tgz#66883eeabadc915fc5e95457324cb0f0ac78defb" - integrity sha512-ejNgYm2HTXSIYX9eFlkvqFp8hyJ374uDf0Zq5YUAifiSh1D6fo+iBivQZirGvVv8dCYUsLhmLBRhlAYvBKI5+Q== +pac-proxy-agent@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/pac-proxy-agent/-/pac-proxy-agent-5.0.0.tgz#b718f76475a6a5415c2efbe256c1c971c84f635e" + integrity sha512-CcFG3ZtnxO8McDigozwE3AqAw15zDvGH+OjXO4kzf7IkEKkQ4gxQ+3sdF50WmhQ4P/bVusXcqNE2S3XrNURwzQ== dependencies: "@tootallnate/once" "1" agent-base "6" @@ -7770,16 +7700,16 @@ pac-proxy-agent@^4.1.0: get-uri "3" http-proxy-agent "^4.0.1" https-proxy-agent "5" - pac-resolver "^4.1.0" + pac-resolver "^5.0.0" raw-body "^2.2.0" socks-proxy-agent "5" -pac-resolver@^4.1.0: - version "4.2.0" - resolved "https://registry.yarnpkg.com/pac-resolver/-/pac-resolver-4.2.0.tgz#b82bcb9992d48166920bc83c7542abb454bd9bdd" - integrity sha512-rPACZdUyuxT5Io/gFKUeeZFfE5T7ve7cAkE5TUZRRfuKP0u5Hocwe48X7ZEm6mYB+bTB0Qf+xlVlA/RM/i6RCQ== +pac-resolver@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/pac-resolver/-/pac-resolver-5.0.0.tgz#1d717a127b3d7a9407a16d6e1b012b13b9ba8dc0" + integrity sha512-H+/A6KitiHNNW+bxBKREk2MCGSxljfqRX76NjummWEYIat7ldVXRU3dhRIE3iXZ0nvGBk6smv3nntxKkzRL8NA== dependencies: - degenerator "^2.2.0" + degenerator "^3.0.1" ip "^1.1.5" netmask "^2.0.1" @@ -8118,17 +8048,17 @@ protocols@^1.1.0, protocols@^1.4.0: resolved "https://registry.yarnpkg.com/protocols/-/protocols-1.4.8.tgz#48eea2d8f58d9644a4a32caae5d5db290a075ce8" integrity sha512-IgjKyaUSjsROSO8/D49Ab7hP8mJgTYcqApOqdPhLoPxAplXmkp+zRvsrSQjFn5by0rhm4VH0GAUELIPpx7B1yg== -proxy-agent@^4.0.1: - version "4.0.1" - resolved "https://registry.yarnpkg.com/proxy-agent/-/proxy-agent-4.0.1.tgz#326c3250776c7044cd19655ccbfadf2e065a045c" - integrity sha512-ODnQnW2jc/FUVwHHuaZEfN5otg/fMbvMxz9nMSUQfJ9JU7q2SZvSULSsjLloVgJOiv9yhc8GlNMKc4GkFmcVEA== +proxy-agent@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/proxy-agent/-/proxy-agent-5.0.0.tgz#d31405c10d6e8431fde96cba7a0c027ce01d633b" + integrity sha512-gkH7BkvLVkSfX9Dk27W6TyNOWWZWRilRfk1XxGNWOYJ2TuedAv1yFpCaU9QSBmBe716XOTNpYNOzhysyw8xn7g== dependencies: agent-base "^6.0.0" debug "4" http-proxy-agent "^4.0.0" https-proxy-agent "^5.0.0" lru-cache "^5.1.1" - pac-proxy-agent "^4.1.0" + pac-proxy-agent "^5.0.0" proxy-from-env "^1.0.0" socks-proxy-agent "^5.0.0" @@ -8412,7 +8342,7 @@ regexp.prototype.flags@^1.3.0: call-bind "^1.0.2" define-properties "^1.1.3" -regexpp@^3.0.0, regexpp@^3.1.0: +regexpp@^3.1.0: version "3.2.0" resolved "https://registry.yarnpkg.com/regexpp/-/regexpp-3.2.0.tgz#0425a2768d8f23bad70ca4b90461fa2f1213e1b2" integrity sha512-pq2bWo9mVD43nbts2wGv17XLiNLya+GklZ8kaDLV2Z08gDCsGpnKn9BFMepvWuHCbyVvY7J5o5+BVvoQbmlJLg== @@ -8502,7 +8432,7 @@ resolve-url@^0.2.1: resolved "https://registry.yarnpkg.com/resolve-url/-/resolve-url-0.2.1.tgz#2c637fe77c893afd2a663fe21aa9080068e2052a" integrity sha1-LGN/53yJOv0qZj/iGqkIAGjiBSo= -resolve@^1.10.0, resolve@^1.10.1, resolve@^1.11.1, resolve@^1.13.1, resolve@^1.17.0, resolve@^1.18.1, resolve@^1.20.0: +resolve@^1.10.0, resolve@^1.11.1, resolve@^1.13.1, resolve@^1.17.0, resolve@^1.18.1, resolve@^1.20.0: version "1.20.0" resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.20.0.tgz#629a013fb3f70755d6f0b7935cc1c2c5378b1975" integrity sha512-wENBPt4ySzg4ybFQW2TT1zMQucPK95HSh/nq2CFTZVOGut2+pQvSsgtda4d26YrYcr067wjbmzOG8byDPBX63A== @@ -8649,7 +8579,7 @@ semver@7.x, semver@^7.1.1, semver@^7.1.3, semver@^7.2.1, semver@^7.3.2, semver@^ dependencies: lru-cache "^6.0.0" -semver@^6.0.0, semver@^6.1.0, semver@^6.1.1, semver@^6.3.0: +semver@^6.0.0, semver@^6.3.0: version "6.3.0" resolved "https://registry.yarnpkg.com/semver/-/semver-6.3.0.tgz#ee0a64c8af5e8ceea67687b133761e1becbd1d3d" integrity sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw== @@ -9332,9 +9262,9 @@ tar-stream@^2.2.0: readable-stream "^3.1.1" tar@^4.4.12: - version "4.4.16" - resolved "https://registry.yarnpkg.com/tar/-/tar-4.4.16.tgz#4a48b3c025e77d9d0c788f038a09b91c594d326d" - integrity sha512-gOVUT/KWPkGFZQmCRDVFNUWBl7niIo/PRR7lzrIqtZpit+st54lGROuVjc6zEQM9FhH+dJfQIl+9F0k8GNXg5g== + version "4.4.19" + resolved "https://registry.yarnpkg.com/tar/-/tar-4.4.19.tgz#2e4d7263df26f2b914dee10c825ab132123742f3" + integrity sha512-a20gEsvHnWe0ygBY8JbxoM4w3SJdhc7ZAuxkLqh+nvNQN2IOt0B5lLgM490X5Hl8FF0dl0tOf2ewFYAlIFgzVA== dependencies: chownr "^1.1.4" fs-minipass "^1.2.7" @@ -9924,11 +9854,6 @@ validate-npm-package-name@^3.0.0: dependencies: builtins "^1.0.3" -vandium-utils@^1.1.1: - version "1.2.0" - resolved "https://registry.yarnpkg.com/vandium-utils/-/vandium-utils-1.2.0.tgz#44735de4b7641a05de59ebe945f174e582db4f59" - integrity sha1-RHNd5LdkGgXeWevpRfF05YLbT1k= - verror@1.10.0: version "1.10.0" resolved "https://registry.yarnpkg.com/verror/-/verror-1.10.0.tgz#3a105ca17053af55d6e270c1f8288682e18da400" @@ -9938,6 +9863,11 @@ verror@1.10.0: core-util-is "1.0.2" extsprintf "^1.2.0" +vm2@^3.9.3: + version "3.9.3" + resolved "https://registry.yarnpkg.com/vm2/-/vm2-3.9.3.tgz#29917f6cc081cc43a3f580c26c5b553fd3c91f40" + integrity sha512-smLS+18RjXYMl9joyJxMNI9l4w7biW8ilSDaVRvFBDwOH8P0BK1ognFQTpg0wyQ6wIKLTblHJvROW692L/E53Q== + w3c-hr-time@^1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/w3c-hr-time/-/w3c-hr-time-1.0.2.tgz#0a89cdf5cc15822df9c360543676963e0cc308cd"