Skip to content

Commit

Permalink
Merge branch 'master' into msimpsonnz/feature-lambda-tumblingwindow
Browse files Browse the repository at this point in the history
  • Loading branch information
mergify[bot] authored Mar 26, 2021
2 parents 28f1297 + 88f2c5a commit e5a4e4d
Show file tree
Hide file tree
Showing 11 changed files with 91 additions and 80 deletions.
2 changes: 0 additions & 2 deletions packages/@aws-cdk/aws-lambda/lib/log-retention.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,6 @@ import { Construct } from 'constructs';

/**
* Retry options for all AWS API calls.
*
* @deprecated use `LogRetentionRetryOptions` from '@aws-cdk/aws-logs' instead
*/
export interface LogRetentionRetryOptions extends logs.LogRetentionRetryOptions {
}
Expand Down
9 changes: 8 additions & 1 deletion packages/@aws-cdk/aws-lambda/lib/runtime.ts
Original file line number Diff line number Diff line change
Expand Up @@ -209,16 +209,23 @@ export class Runtime {
public readonly family?: RuntimeFamily;

/**
* The bundling Docker image for this runtime.
* DEPRECATED
* @deprecated use `bundlingImage`
*/
public readonly bundlingDockerImage: BundlingDockerImage;

/**
* The bundling Docker image for this runtime.
*/
public readonly bundlingImage: DockerImage;

constructor(name: string, family?: RuntimeFamily, props: LambdaRuntimeProps = { }) {
this.name = name;
this.supportsInlineCode = !!props.supportsInlineCode;
this.family = family;
const imageName = props.bundlingDockerImage ?? `amazon/aws-sam-cli-build-image-${name}`;
this.bundlingDockerImage = DockerImage.fromRegistry(imageName);
this.bundlingImage = this.bundlingDockerImage;
this.supportsCodeGuruProfiling = props.supportsCodeGuruProfiling ?? false;

Runtime.ALL.push(this);
Expand Down
3 changes: 3 additions & 0 deletions packages/@aws-cdk/aws-rds/lib/cluster.ts
Original file line number Diff line number Diff line change
Expand Up @@ -651,6 +651,9 @@ interface InstanceConfig {
*/
function createInstances(cluster: DatabaseClusterNew, props: DatabaseClusterBaseProps, subnetGroup: ISubnetGroup): InstanceConfig {
const instanceCount = props.instances != null ? props.instances : 2;
if (Token.isUnresolved(instanceCount)) {
throw new Error('The number of instances an RDS Cluster consists of cannot be provided as a deploy-time only value!');
}
if (instanceCount < 1) {
throw new Error('At least one instance is required');
}
Expand Down
50 changes: 14 additions & 36 deletions packages/@aws-cdk/aws-rds/test/cluster.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -50,8 +50,22 @@ describe('cluster', () => {
DeletionPolicy: 'Delete',
UpdateReplacePolicy: 'Delete',
}, ResourcePart.CompleteDefinition);
});

test('validates that the number of instances is not a deploy-time value', () => {
const stack = testStack();
const vpc = new ec2.Vpc(stack, 'VPC');
const parameter = new cdk.CfnParameter(stack, 'Param', { type: 'Number' });

expect(() => {
new DatabaseCluster(stack, 'Database', {
instances: parameter.valueAsNumber,
engine: DatabaseClusterEngine.AURORA,
instanceProps: {
vpc,
},
});
}).toThrow('The number of instances an RDS Cluster consists of cannot be provided as a deploy-time only value!');
});

test('can create a cluster with a single instance', () => {
Expand Down Expand Up @@ -81,8 +95,6 @@ describe('cluster', () => {
MasterUserPassword: 'tooshort',
VpcSecurityGroupIds: [{ 'Fn::GetAtt': ['DatabaseSecurityGroup5C91FDCB', 'GroupId'] }],
});


});

test('can create a cluster with imported vpc and security group', () => {
Expand Down Expand Up @@ -116,8 +128,6 @@ describe('cluster', () => {
MasterUserPassword: 'tooshort',
VpcSecurityGroupIds: ['SecurityGroupId12345'],
});


});

test('cluster with parameter group', () => {
Expand Down Expand Up @@ -150,8 +160,6 @@ describe('cluster', () => {
expect(stack).toHaveResource('AWS::RDS::DBCluster', {
DBClusterParameterGroupName: { Ref: 'ParamsA8366201' },
});


});

test("sets the retention policy of the SubnetGroup to 'Retain' if the Cluster is created with 'Retain'", () => {
Expand All @@ -172,8 +180,6 @@ describe('cluster', () => {
DeletionPolicy: 'Retain',
UpdateReplacePolicy: 'Retain',
}, ResourcePart.CompleteDefinition);


});

test('creates a secret when master credentials are not specified', () => {
Expand Down Expand Up @@ -230,8 +236,6 @@ describe('cluster', () => {
SecretStringTemplate: '{"username":"admin"}',
},
});


});

test('create an encrypted cluster with custom KMS key', () => {
Expand Down Expand Up @@ -261,8 +265,6 @@ describe('cluster', () => {
],
},
});


});

test('cluster with instance parameter group', () => {
Expand Down Expand Up @@ -294,8 +296,6 @@ describe('cluster', () => {
Ref: 'ParameterGroup5E32DECB',
},
});


});

describe('performance insights', () => {
Expand Down Expand Up @@ -323,8 +323,6 @@ describe('cluster', () => {
PerformanceInsightsRetentionPeriod: 731,
PerformanceInsightsKMSKeyId: { 'Fn::GetAtt': ['Key961B73FD', 'Arn'] },
});


});

test('setting performance insights fields enables performance insights', () => {
Expand All @@ -348,8 +346,6 @@ describe('cluster', () => {
EnablePerformanceInsights: true,
PerformanceInsightsRetentionPeriod: 731,
});


});

test('throws if performance insights fields are set but performance insights is disabled', () => {
Expand All @@ -370,8 +366,6 @@ describe('cluster', () => {
},
});
}).toThrow(/`enablePerformanceInsights` disabled, but `performanceInsightRetention` or `performanceInsightEncryptionKey` was set/);


});
});

Expand All @@ -392,8 +386,6 @@ describe('cluster', () => {
expect(stack).toHaveResource('AWS::RDS::DBInstance', {
AutoMinorVersionUpgrade: false,
});


});

test('cluster with allow upgrade of major version', () => {
Expand All @@ -413,8 +405,6 @@ describe('cluster', () => {
expect(stack).toHaveResourceLike('AWS::RDS::DBInstance', {
AllowMajorVersionUpgrade: true,
});


});

test('cluster with disallow remove backups', () => {
Expand All @@ -434,8 +424,6 @@ describe('cluster', () => {
expect(stack).toHaveResourceLike('AWS::RDS::DBInstance', {
DeleteAutomatedBackups: false,
});


});

test('create a cluster using a specific version of MySQL', () => {
Expand All @@ -462,8 +450,6 @@ describe('cluster', () => {
Engine: 'aurora-mysql',
EngineVersion: '5.7.mysql_aurora.2.04.4',
});


});

test('create a cluster using a specific version of Postgresql', () => {
Expand Down Expand Up @@ -513,8 +499,6 @@ describe('cluster', () => {

// THEN
expect(stack.resolve(cluster.clusterEndpoint)).not.toEqual(stack.resolve(cluster.clusterReadEndpoint));


});

test('imported cluster with imported security group honors allowAllOutbound', () => {
Expand All @@ -540,8 +524,6 @@ describe('cluster', () => {
expect(stack).toHaveResource('AWS::EC2::SecurityGroupEgress', {
GroupId: 'sg-123456789',
});


});

test('can import a cluster with minimal attributes', () => {
Expand All @@ -567,8 +549,6 @@ describe('cluster', () => {
expect(() => cluster.clusterReadEndpoint).toThrow(/Cannot access `clusterReadEndpoint` of an imported cluster/);
expect(() => cluster.instanceIdentifiers).toThrow(/Cannot access `instanceIdentifiers` of an imported cluster/);
expect(() => cluster.instanceEndpoints).toThrow(/Cannot access `instanceEndpoints` of an imported cluster/);


});

test('imported cluster can access properties if attributes are provided', () => {
Expand All @@ -590,8 +570,6 @@ describe('cluster', () => {
expect(cluster.clusterReadEndpoint.socketAddress).toEqual('reader-address:3306');
expect(cluster.instanceIdentifiers).toEqual(['identifier']);
expect(cluster.instanceEndpoints.map(endpoint => endpoint.socketAddress)).toEqual(['instance-addr:3306']);


});

test('cluster supports metrics', () => {
Expand Down
58 changes: 29 additions & 29 deletions packages/@aws-cdk/core/lib/bundling.ts
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ export interface BundlingOptions {
/**
* The Docker image where the command will run.
*/
readonly image: BundlingDockerImage;
readonly image: DockerImage;

/**
* The entrypoint to run in the Docker container.
Expand Down Expand Up @@ -158,33 +158,7 @@ export class BundlingDockerImage {
* @deprecated use DockerImage.fromBuild()
*/
public static fromAsset(path: string, options: DockerBuildOptions = {}) {
const buildArgs = options.buildArgs || {};

if (options.file && isAbsolute(options.file)) {
throw new Error(`"file" must be relative to the docker build directory. Got ${options.file}`);
}

// Image tag derived from path and build options
const input = JSON.stringify({ path, ...options });
const tagHash = crypto.createHash('sha256').update(input).digest('hex');
const tag = `cdk-${tagHash}`;

const dockerArgs: string[] = [
'build', '-t', tag,
...(options.file ? ['-f', join(path, options.file)] : []),
...flatten(Object.entries(buildArgs).map(([k, v]) => ['--build-arg', `${k}=${v}`])),
path,
];

dockerExec(dockerArgs);

// Fingerprints the directory containing the Dockerfile we're building and
// differentiates the fingerprint based on build arguments. We do this so
// we can provide a stable image hash. Otherwise, the image ID will be
// different every time the Docker layer cache is cleared, due primarily to
// timestamps.
const hash = FileSystem.fingerprint(path, { extraHash: JSON.stringify(options) });
return new BundlingDockerImage(tag, hash);
DockerImage.fromBuild(path, options) as BundlingDockerImage;
}

/** @param image The Docker image */
Expand Down Expand Up @@ -276,7 +250,33 @@ export class DockerImage extends BundlingDockerImage {
* @param options Docker build options
*/
public static fromBuild(path: string, options: DockerBuildOptions = {}) {
return BundlingDockerImage.fromAsset(path, options);
const buildArgs = options.buildArgs || {};

if (options.file && isAbsolute(options.file)) {
throw new Error(`"file" must be relative to the docker build directory. Got ${options.file}`);
}

// Image tag derived from path and build options
const input = JSON.stringify({ path, ...options });
const tagHash = crypto.createHash('sha256').update(input).digest('hex');
const tag = `cdk-${tagHash}`;

const dockerArgs: string[] = [
'build', '-t', tag,
...(options.file ? ['-f', join(path, options.file)] : []),
...flatten(Object.entries(buildArgs).map(([k, v]) => ['--build-arg', `${k}=${v}`])),
path,
];

dockerExec(dockerArgs);

// Fingerprints the directory containing the Dockerfile we're building and
// differentiates the fingerprint based on build arguments. We do this so
// we can provide a stable image hash. Otherwise, the image ID will be
// different every time the Docker layer cache is cleared, due primarily to
// timestamps.
const hash = FileSystem.fingerprint(path, { extraHash: JSON.stringify(options) });
return new DockerImage(tag, hash);
}

/**
Expand Down
8 changes: 7 additions & 1 deletion packages/@aws-cdk/core/lib/construct-compat.ts
Original file line number Diff line number Diff line change
Expand Up @@ -412,11 +412,17 @@ export class ConstructNode {
return this._actualNode.tryGetContext(key);
}

/**
* DEPRECATED
* @deprecated use `metadataEntry`
*/
public get metadata() { return this._actualNode.metadata as cxapi.MetadataEntry[]; }

/**
* An immutable array of metadata objects associated with this construct.
* This can be used, for example, to implement support for deprecation notices, source mapping, etc.
*/
public get metadata() { return this._actualNode.metadata as cxapi.MetadataEntry[]; }
public get metadataEntry() { return this._actualNode.metadata; }

/**
* Adds a metadata entry to this construct.
Expand Down
18 changes: 13 additions & 5 deletions packages/@aws-cdk/core/lib/stack.ts
Original file line number Diff line number Diff line change
Expand Up @@ -424,6 +424,17 @@ export class Stack extends CoreConstruct implements ITaggable {
return CloudFormationLang.toJSON(obj, space).toString();
}

/**
* DEPRECATED
* @deprecated use `reportMissingContextKey()`
*/
public reportMissingContext(report: cxapi.MissingContext) {
if (!Object.values(cxschema.ContextProvider).includes(report.provider as cxschema.ContextProvider)) {
throw new Error(`Unknown context provider requested in: ${JSON.stringify(report)}`);
}
this.reportMissingContextKey(report as cxschema.MissingContext);
}

/**
* Indicate that a context key was expected
*
Expand All @@ -432,11 +443,8 @@ export class Stack extends CoreConstruct implements ITaggable {
*
* @param report The set of parameters needed to obtain the context
*/
public reportMissingContext(report: cxapi.MissingContext) {
if (!Object.values(cxschema.ContextProvider).includes(report.provider as cxschema.ContextProvider)) {
throw new Error(`Unknown context provider requested in: ${JSON.stringify(report)}`);
}
this._missingContext.push(report as cxschema.MissingContext);
public reportMissingContextKey(report: cxschema.MissingContext) {
this._missingContext.push(report);
}

/**
Expand Down
4 changes: 2 additions & 2 deletions packages/@aws-cdk/core/test/bundling.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ nodeunitShim({
const fingerprintStub = sinon.stub(FileSystem, 'fingerprint');
fingerprintStub.callsFake(() => imageHash);

const image = BundlingDockerImage.fromAsset('docker-path', {
const image = DockerImage.fromBuild('docker-path', {
buildArgs: {
TEST_ARG: 'cdk-test',
},
Expand Down Expand Up @@ -139,7 +139,7 @@ nodeunitShim({
const fingerprintStub = sinon.stub(FileSystem, 'fingerprint');
fingerprintStub.callsFake(() => imageHash);

const image = BundlingDockerImage.fromAsset('docker-path');
const image = DockerImage.fromBuild('docker-path');

const tagHash = crypto.createHash('sha256').update(JSON.stringify({
path: 'docker-path',
Expand Down
3 changes: 2 additions & 1 deletion packages/aws-cdk-lib/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,8 @@
"cdk-build": {
"eslint": {
"disable": true
}
},
"stripDeprecated": true
},
"pkglint": {
"exclude": [
Expand Down
2 changes: 1 addition & 1 deletion tools/cdk-build-tools/lib/compile.ts
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ import { Timers } from './timer';
*/
export async function compileCurrentPackage(options: CDKBuildOptions, timers: Timers, compilers: CompilerOverrides = {}): Promise<void> {
const env = options.env;
await shell(packageCompiler(compilers), { timers, env });
await shell(packageCompiler(compilers, options), { timers, env });

// Find files in bin/ that look like they should be executable, and make them so.
const scripts = currentPackageJson().bin || {};
Expand Down
Loading

0 comments on commit e5a4e4d

Please sign in to comment.