diff --git a/packages/@aws-cdk/aws-codecommit/package.json b/packages/@aws-cdk/aws-codecommit/package.json index f5b3925eac5a2..2374b358d9194 100644 --- a/packages/@aws-cdk/aws-codecommit/package.json +++ b/packages/@aws-cdk/aws-codecommit/package.json @@ -84,7 +84,7 @@ }, "license": "Apache-2.0", "devDependencies": { - "@aws-cdk/assert-internal": "0.0.0", + "@aws-cdk/assertions": "0.0.0", "@aws-cdk/aws-sns": "0.0.0", "@aws-cdk/cdk-build-tools": "0.0.0", "@aws-cdk/cdk-integ-tools": "0.0.0", diff --git a/packages/@aws-cdk/aws-codecommit/test/codecommit.test.ts b/packages/@aws-cdk/aws-codecommit/test/codecommit.test.ts index 3dca30532e1ad..623f66eb4675a 100644 --- a/packages/@aws-cdk/aws-codecommit/test/codecommit.test.ts +++ b/packages/@aws-cdk/aws-codecommit/test/codecommit.test.ts @@ -1,5 +1,5 @@ -import '@aws-cdk/assert-internal/jest'; import { join, resolve } from 'path'; +import { Template } from '@aws-cdk/assertions'; import { Role, ServicePrincipal } from '@aws-cdk/aws-iam'; import { Asset } from '@aws-cdk/aws-s3-assets'; import * as cxschema from '@aws-cdk/cloud-assembly-schema'; @@ -19,7 +19,7 @@ describe('codecommit', () => { new Repository(stack, 'MyRepository', props).notify(snsArn); - expect(stack).toMatchTemplate({ + Template.fromStack(stack).templateMatches({ Resources: { MyRepository4C4BD5FC: { Type: 'AWS::CodeCommit::Repository', @@ -258,7 +258,7 @@ describe('codecommit', () => { repository.grantPullPush(role); // THEN - expect(stack).toHaveResource('AWS::IAM::Policy', { + Template.fromStack(stack).hasResourceProperties('AWS::IAM::Policy', { PolicyDocument: { Statement: [ { diff --git a/packages/@aws-cdk/aws-codecommit/test/notification-rule.test.ts b/packages/@aws-cdk/aws-codecommit/test/notification-rule.test.ts index 721ce01d4c490..ae65a5f4f2ef8 100644 --- a/packages/@aws-cdk/aws-codecommit/test/notification-rule.test.ts +++ b/packages/@aws-cdk/aws-codecommit/test/notification-rule.test.ts @@ -1,4 +1,4 @@ -import '@aws-cdk/assert-internal/jest'; +import { Template } from '@aws-cdk/assertions'; import * as sns from '@aws-cdk/aws-sns'; import * as cdk from '@aws-cdk/core'; import * as codecommit from '../lib'; @@ -16,7 +16,7 @@ describe('notification rule', () => { repository.notifyOnPullRequestMerged('NotifyOnPullRequestMerged', target); - expect(stack).toHaveResource('AWS::CodeStarNotifications::NotificationRule', { + Template.fromStack(stack).hasResourceProperties('AWS::CodeStarNotifications::NotificationRule', { Name: 'MyCodecommitRepositoryNotifyOnPullRequestCreatedBB14EA32', DetailType: 'FULL', EventTypeIds: [ @@ -38,7 +38,7 @@ describe('notification rule', () => { ], }); - expect(stack).toHaveResource('AWS::CodeStarNotifications::NotificationRule', { + Template.fromStack(stack).hasResourceProperties('AWS::CodeStarNotifications::NotificationRule', { Name: 'MyCodecommitRepositoryNotifyOnPullRequestMerged34A7EDF1', DetailType: 'FULL', EventTypeIds: [ diff --git a/packages/@aws-cdk/aws-ecr/package.json b/packages/@aws-cdk/aws-ecr/package.json index e8e9c3402eed3..e2b8bccc7c29f 100644 --- a/packages/@aws-cdk/aws-ecr/package.json +++ b/packages/@aws-cdk/aws-ecr/package.json @@ -83,7 +83,7 @@ }, "license": "Apache-2.0", "devDependencies": { - "@aws-cdk/assert-internal": "0.0.0", + "@aws-cdk/assertions": "0.0.0", "@aws-cdk/cdk-build-tools": "0.0.0", "@aws-cdk/cdk-integ-tools": "0.0.0", "@aws-cdk/cfn2ts": "0.0.0", diff --git a/packages/@aws-cdk/aws-ecr/test/auth-token.test.ts b/packages/@aws-cdk/aws-ecr/test/auth-token.test.ts index f9be93b1e15d0..421ba7c2f6645 100644 --- a/packages/@aws-cdk/aws-ecr/test/auth-token.test.ts +++ b/packages/@aws-cdk/aws-ecr/test/auth-token.test.ts @@ -1,4 +1,4 @@ -import { expect as expectCDK, haveResourceLike } from '@aws-cdk/assert-internal'; +import { Template } from '@aws-cdk/assertions'; import * as iam from '@aws-cdk/aws-iam'; import { Stack } from '@aws-cdk/core'; import { AuthorizationToken, PublicGalleryAuthorizationToken } from '../lib'; @@ -13,7 +13,7 @@ describe('auth-token', () => { AuthorizationToken.grantRead(user); // THEN - expectCDK(stack).to(haveResourceLike('AWS::IAM::Policy', { + Template.fromStack(stack).hasResourceProperties('AWS::IAM::Policy', { PolicyDocument: { Statement: [ { @@ -23,7 +23,7 @@ describe('auth-token', () => { }, ], }, - })); + }); }); test('PublicGalleryAuthorizationToken.grantRead()', () => { @@ -35,7 +35,7 @@ describe('auth-token', () => { PublicGalleryAuthorizationToken.grantRead(user); // THEN - expectCDK(stack).to(haveResourceLike('AWS::IAM::Policy', { + Template.fromStack(stack).hasResourceProperties('AWS::IAM::Policy', { PolicyDocument: { Statement: [ { @@ -48,6 +48,6 @@ describe('auth-token', () => { }, ], }, - })); + }); }); }); diff --git a/packages/@aws-cdk/aws-ecr/test/repository.test.ts b/packages/@aws-cdk/aws-ecr/test/repository.test.ts index 37cc58a8485eb..470638d89355e 100644 --- a/packages/@aws-cdk/aws-ecr/test/repository.test.ts +++ b/packages/@aws-cdk/aws-ecr/test/repository.test.ts @@ -1,5 +1,5 @@ import { EOL } from 'os'; -import { expect as expectCDK, haveResource, haveResourceLike, ResourcePart } from '@aws-cdk/assert-internal'; +import { Template } from '@aws-cdk/assertions'; import * as iam from '@aws-cdk/aws-iam'; import * as cdk from '@aws-cdk/core'; import * as ecr from '../lib'; @@ -15,7 +15,7 @@ describe('repository', () => { new ecr.Repository(stack, 'Repo'); // THEN - expectCDK(stack).toMatch({ + Template.fromStack(stack).templateMatches({ Resources: { Repo02AC86CF: { Type: 'AWS::ECR::Repository', @@ -34,11 +34,11 @@ describe('repository', () => { new ecr.Repository(stack, 'Repo', { imageScanOnPush: true }); // THEN - expectCDK(stack).to(haveResource('AWS::ECR::Repository', { + Template.fromStack(stack).hasResourceProperties('AWS::ECR::Repository', { ImageScanningConfiguration: { ScanOnPush: true, }, - })); + }); }); test('tag-based lifecycle policy', () => { @@ -50,12 +50,12 @@ describe('repository', () => { repo.addLifecycleRule({ tagPrefixList: ['abc'], maxImageCount: 1 }); // THEN - expectCDK(stack).to(haveResource('AWS::ECR::Repository', { + Template.fromStack(stack).hasResourceProperties('AWS::ECR::Repository', { LifecyclePolicy: { // eslint-disable-next-line max-len LifecyclePolicyText: '{"rules":[{"rulePriority":1,"selection":{"tagStatus":"tagged","tagPrefixList":["abc"],"countType":"imageCountMoreThan","countNumber":1},"action":{"type":"expire"}}]}', }, - })); + }); }); test('image tag mutability can be set', () => { @@ -64,9 +64,9 @@ describe('repository', () => { new ecr.Repository(stack, 'Repo', { imageTagMutability: ecr.TagMutability.IMMUTABLE }); // THEN - expectCDK(stack).to(haveResource('AWS::ECR::Repository', { + Template.fromStack(stack).hasResourceProperties('AWS::ECR::Repository', { ImageTagMutability: 'IMMUTABLE', - })); + }); }); test('add day-based lifecycle policy', () => { @@ -80,12 +80,12 @@ describe('repository', () => { }); // THEN - expectCDK(stack).to(haveResource('AWS::ECR::Repository', { + Template.fromStack(stack).hasResourceProperties('AWS::ECR::Repository', { LifecyclePolicy: { // eslint-disable-next-line max-len LifecyclePolicyText: '{"rules":[{"rulePriority":1,"selection":{"tagStatus":"any","countType":"sinceImagePushed","countNumber":5,"countUnit":"days"},"action":{"type":"expire"}}]}', }, - })); + }); }); test('add count-based lifecycle policy', () => { @@ -99,12 +99,12 @@ describe('repository', () => { }); // THEN - expectCDK(stack).to(haveResource('AWS::ECR::Repository', { + Template.fromStack(stack).hasResourceProperties('AWS::ECR::Repository', { LifecyclePolicy: { // eslint-disable-next-line max-len LifecyclePolicyText: '{"rules":[{"rulePriority":1,"selection":{"tagStatus":"any","countType":"imageCountMoreThan","countNumber":5},"action":{"type":"expire"}}]}', }, - })); + }); }); test('mixing numbered and unnumbered rules', () => { @@ -117,12 +117,12 @@ describe('repository', () => { repo.addLifecycleRule({ rulePriority: 10, tagStatus: ecr.TagStatus.TAGGED, tagPrefixList: ['b'], maxImageCount: 5 }); // THEN - expectCDK(stack).to(haveResource('AWS::ECR::Repository', { + Template.fromStack(stack).hasResourceProperties('AWS::ECR::Repository', { LifecyclePolicy: { // eslint-disable-next-line max-len LifecyclePolicyText: '{"rules":[{"rulePriority":10,"selection":{"tagStatus":"tagged","tagPrefixList":["b"],"countType":"imageCountMoreThan","countNumber":5},"action":{"type":"expire"}},{"rulePriority":11,"selection":{"tagStatus":"tagged","tagPrefixList":["a"],"countType":"imageCountMoreThan","countNumber":5},"action":{"type":"expire"}}]}', }, - })); + }); }); test('tagstatus Any is automatically sorted to the back', () => { @@ -135,12 +135,12 @@ describe('repository', () => { repo.addLifecycleRule({ tagStatus: ecr.TagStatus.TAGGED, tagPrefixList: ['important'], maxImageCount: 999 }); // THEN - expectCDK(stack).to(haveResource('AWS::ECR::Repository', { + Template.fromStack(stack).hasResourceProperties('AWS::ECR::Repository', { LifecyclePolicy: { // eslint-disable-next-line max-len LifecyclePolicyText: '{"rules":[{"rulePriority":1,"selection":{"tagStatus":"tagged","tagPrefixList":["important"],"countType":"imageCountMoreThan","countNumber":999},"action":{"type":"expire"}},{"rulePriority":2,"selection":{"tagStatus":"any","countType":"imageCountMoreThan","countNumber":5},"action":{"type":"expire"}}]}', }, - })); + }); }); test('lifecycle rules can be added upon initialization', () => { @@ -155,12 +155,12 @@ describe('repository', () => { }); // THEN - expectCDK(stack).to(haveResource('AWS::ECR::Repository', { + Template.fromStack(stack).hasResourceProperties('AWS::ECR::Repository', { 'LifecyclePolicy': { // eslint-disable-next-line max-len 'LifecyclePolicyText': '{"rules":[{"rulePriority":1,"selection":{"tagStatus":"any","countType":"imageCountMoreThan","countNumber":3},"action":{"type":"expire"}}]}', }, - })); + }); }); test('calculate repository URI', () => { @@ -168,21 +168,24 @@ describe('repository', () => { const stack = new cdk.Stack(); const repo = new ecr.Repository(stack, 'Repo'); - // WHEN - const uri = repo.repositoryUri; + new cdk.CfnOutput(stack, 'RepoUri', { + value: repo.repositoryUri, + }); // THEN const arnSplit = { 'Fn::Split': [':', { 'Fn::GetAtt': ['Repo02AC86CF', 'Arn'] }] }; - expectCDK(stack.resolve(uri)).toMatch({ - 'Fn::Join': ['', [ - { 'Fn::Select': [4, arnSplit] }, - '.dkr.ecr.', - { 'Fn::Select': [3, arnSplit] }, - '.', - { Ref: 'AWS::URLSuffix' }, - '/', - { Ref: 'Repo02AC86CF' }, - ]], + Template.fromStack(stack).hasOutput('*', { + 'Value': { + 'Fn::Join': ['', [ + { 'Fn::Select': [4, arnSplit] }, + '.dkr.ecr.', + { 'Fn::Select': [3, arnSplit] }, + '.', + { Ref: 'AWS::URLSuffix' }, + '/', + { Ref: 'Repo02AC86CF' }, + ]], + }, }); }); @@ -217,10 +220,20 @@ describe('repository', () => { repositoryArn: cdk.Fn.getAtt('Boom', 'Arn').toString(), repositoryName: cdk.Fn.getAtt('Boom', 'Name').toString(), }); + new cdk.CfnOutput(stack, 'RepoArn', { + value: repo.repositoryArn, + }); + new cdk.CfnOutput(stack, 'RepoName', { + value: repo.repositoryName, + }); // THEN - expectCDK(stack.resolve(repo.repositoryArn)).toMatch({ 'Fn::GetAtt': ['Boom', 'Arn'] }); - expectCDK(stack.resolve(repo.repositoryName)).toMatch({ 'Fn::GetAtt': ['Boom', 'Name'] }); + Template.fromStack(stack).hasOutput('*', { + Value: { 'Fn::GetAtt': ['Boom', 'Arn'] }, + }); + Template.fromStack(stack).hasOutput('*', { + Value: { 'Fn::GetAtt': ['Boom', 'Name'] }, + }); }); test('import only with a repository name (arn is deduced)', () => { @@ -229,20 +242,30 @@ describe('repository', () => { // WHEN const repo = ecr.Repository.fromRepositoryName(stack, 'just-name', 'my-repo'); + new cdk.CfnOutput(stack, 'RepoArn', { + value: repo.repositoryArn, + }); + new cdk.CfnOutput(stack, 'RepoName', { + value: repo.repositoryName, + }); // THEN - expectCDK(stack.resolve(repo.repositoryArn)).toMatch({ - 'Fn::Join': ['', [ - 'arn:', - { Ref: 'AWS::Partition' }, - ':ecr:', - { Ref: 'AWS::Region' }, - ':', - { Ref: 'AWS::AccountId' }, - ':repository/my-repo', - ]], - }); - expect(stack.resolve(repo.repositoryName)).toBe('my-repo'); + Template.fromStack(stack).hasOutput('*', { + Value: { + 'Fn::Join': ['', [ + 'arn:', + { Ref: 'AWS::Partition' }, + ':ecr:', + { Ref: 'AWS::Region' }, + ':', + { Ref: 'AWS::AccountId' }, + ':repository/my-repo', + ]], + }, + }); + Template.fromStack(stack).hasOutput('*', { + Value: 'my-repo', + }); }); test('arnForLocalRepository can be used to render an ARN for a local repository', () => { @@ -255,20 +278,30 @@ describe('repository', () => { repositoryArn: ecr.Repository.arnForLocalRepository(repoName, stack), repositoryName: repoName, }); + new cdk.CfnOutput(stack, 'RepoArn', { + value: repo.repositoryArn, + }); + new cdk.CfnOutput(stack, 'RepoName', { + value: repo.repositoryName, + }); // THEN - expectCDK(stack.resolve(repo.repositoryName)).toMatch({ 'Fn::GetAtt': ['Boom', 'Name'] }); - expectCDK(stack.resolve(repo.repositoryArn)).toMatch({ - 'Fn::Join': ['', [ - 'arn:', - { Ref: 'AWS::Partition' }, - ':ecr:', - { Ref: 'AWS::Region' }, - ':', - { Ref: 'AWS::AccountId' }, - ':repository/', - { 'Fn::GetAtt': ['Boom', 'Name'] }, - ]], + Template.fromStack(stack).hasOutput('*', { + Value: { 'Fn::GetAtt': ['Boom', 'Name'] }, + }); + Template.fromStack(stack).hasOutput('*', { + Value: { + 'Fn::Join': ['', [ + 'arn:', + { Ref: 'AWS::Partition' }, + ':ecr:', + { Ref: 'AWS::Region' }, + ':', + { Ref: 'AWS::AccountId' }, + ':repository/', + { 'Fn::GetAtt': ['Boom', 'Name'] }, + ]], + }, }); }); @@ -284,7 +317,7 @@ describe('repository', () => { })); // THEN - expectCDK(stack).to(haveResource('AWS::ECR::Repository', { + Template.fromStack(stack).hasResourceProperties('AWS::ECR::Repository', { RepositoryPolicyText: { Statement: [ { @@ -295,7 +328,7 @@ describe('repository', () => { ], Version: '2012-10-17', }, - })); + }); }); test('fails if repository policy has no actions', () => { @@ -341,7 +374,7 @@ describe('repository', () => { }, }); - expectCDK(stack).to(haveResourceLike('AWS::Events::Rule', { + Template.fromStack(stack).hasResourceProperties('AWS::Events::Rule', { 'EventPattern': { 'source': [ 'aws.ecr', @@ -360,7 +393,7 @@ describe('repository', () => { }, }, 'State': 'ENABLED', - })); + }); }); test('onImageScanCompleted without imageTags creates the correct event', () => { @@ -373,7 +406,7 @@ describe('repository', () => { }, }); - expectCDK(stack).to(haveResourceLike('AWS::Events::Rule', { + Template.fromStack(stack).hasResourceProperties('AWS::Events::Rule', { 'EventPattern': { 'source': [ 'aws.ecr', @@ -390,7 +423,7 @@ describe('repository', () => { }, }, 'State': 'ENABLED', - })); + }); }); test('onImageScanCompleted with one imageTag creates the correct event', () => { @@ -404,7 +437,7 @@ describe('repository', () => { }, }); - expectCDK(stack).to(haveResourceLike('AWS::Events::Rule', { + Template.fromStack(stack).hasResourceProperties('AWS::Events::Rule', { 'EventPattern': { 'source': [ 'aws.ecr', @@ -424,7 +457,7 @@ describe('repository', () => { }, }, 'State': 'ENABLED', - })); + }); }); test('onImageScanCompleted with multiple imageTags creates the correct event', () => { @@ -438,7 +471,7 @@ describe('repository', () => { }, }); - expectCDK(stack).to(haveResourceLike('AWS::Events::Rule', { + Template.fromStack(stack).hasResourceProperties('AWS::Events::Rule', { 'EventPattern': { 'source': [ 'aws.ecr', @@ -460,7 +493,7 @@ describe('repository', () => { }, }, 'State': 'ENABLED', - })); + }); }); test('removal policy is "Retain" by default', () => { @@ -471,10 +504,10 @@ describe('repository', () => { new ecr.Repository(stack, 'Repo'); // THEN - expectCDK(stack).to(haveResource('AWS::ECR::Repository', { + Template.fromStack(stack).hasResource('AWS::ECR::Repository', { 'Type': 'AWS::ECR::Repository', 'DeletionPolicy': 'Retain', - }, ResourcePart.CompleteDefinition)); + }); }); test('"Delete" removal policy can be set explicitly', () => { @@ -487,10 +520,10 @@ describe('repository', () => { }); // THEN - expectCDK(stack).to(haveResource('AWS::ECR::Repository', { + Template.fromStack(stack).hasResource('AWS::ECR::Repository', { 'Type': 'AWS::ECR::Repository', 'DeletionPolicy': 'Delete', - }, ResourcePart.CompleteDefinition)); + }); }); test('grant adds appropriate resource-*', () => { @@ -502,7 +535,7 @@ describe('repository', () => { repo.grantPull(new iam.AnyPrincipal()); // THEN - expectCDK(stack).to(haveResource('AWS::ECR::Repository', { + Template.fromStack(stack).hasResourceProperties('AWS::ECR::Repository', { 'RepositoryPolicyText': { 'Statement': [ { @@ -512,12 +545,12 @@ describe('repository', () => { 'ecr:BatchGetImage', ], 'Effect': 'Allow', - 'Principal': { AWS: '*' }, + 'Principal': { 'AWS': '*' }, }, ], 'Version': '2012-10-17', }, - })); + }); }); }); diff --git a/packages/@aws-cdk/aws-servicediscovery/package.json b/packages/@aws-cdk/aws-servicediscovery/package.json index 9a9d267d35904..3fcee22ba4a66 100644 --- a/packages/@aws-cdk/aws-servicediscovery/package.json +++ b/packages/@aws-cdk/aws-servicediscovery/package.json @@ -75,7 +75,7 @@ }, "license": "Apache-2.0", "devDependencies": { - "@aws-cdk/assert-internal": "0.0.0", + "@aws-cdk/assertions": "0.0.0", "@aws-cdk/cdk-build-tools": "0.0.0", "@aws-cdk/cdk-integ-tools": "0.0.0", "@aws-cdk/cfn2ts": "0.0.0", diff --git a/packages/@aws-cdk/aws-servicediscovery/test/instance.test.ts b/packages/@aws-cdk/aws-servicediscovery/test/instance.test.ts index 89ee3a81f4bfa..0fb472c1b1130 100644 --- a/packages/@aws-cdk/aws-servicediscovery/test/instance.test.ts +++ b/packages/@aws-cdk/aws-servicediscovery/test/instance.test.ts @@ -1,4 +1,4 @@ -import '@aws-cdk/assert-internal/jest'; +import { Template } from '@aws-cdk/assertions'; import * as ec2 from '@aws-cdk/aws-ec2'; import * as elbv2 from '@aws-cdk/aws-elasticloadbalancingv2'; import * as cdk from '@aws-cdk/core'; @@ -24,7 +24,7 @@ describe('instance', () => { }); // THEN - expect(stack).toHaveResource('AWS::ServiceDiscovery::Instance', { + Template.fromStack(stack).hasResourceProperties('AWS::ServiceDiscovery::Instance', { InstanceAttributes: { AWS_INSTANCE_IPV4: '10.0.0.0', AWS_INSTANCE_IPV6: '0:0:0:0:0:ffff:a00:0', @@ -62,7 +62,7 @@ describe('instance', () => { }); // THEN - expect(stack).toHaveResource('AWS::ServiceDiscovery::Instance', { + Template.fromStack(stack).hasResourceProperties('AWS::ServiceDiscovery::Instance', { InstanceAttributes: { AWS_INSTANCE_IPV4: '54.239.25.192', AWS_INSTANCE_IPV6: '0:0:0:0:0:ffff:a00:0', @@ -102,7 +102,7 @@ describe('instance', () => { }); // THEN - expect(stack).toHaveResource('AWS::ServiceDiscovery::Instance', { + Template.fromStack(stack).hasResourceProperties('AWS::ServiceDiscovery::Instance', { InstanceAttributes: { AWS_INSTANCE_IPV4: '10.0.0.0', AWS_INSTANCE_IPV6: '0:0:0:0:0:ffff:a00:0', @@ -256,7 +256,7 @@ describe('instance', () => { service.registerLoadBalancer('Loadbalancer', alb, customAttributes); // THEN - expect(stack).toHaveResource('AWS::ServiceDiscovery::Instance', { + Template.fromStack(stack).hasResourceProperties('AWS::ServiceDiscovery::Instance', { InstanceAttributes: { AWS_ALIAS_DNS_NAME: { 'Fn::GetAtt': [ @@ -343,7 +343,7 @@ describe('instance', () => { }); // THEN - expect(stack).toHaveResource('AWS::ServiceDiscovery::Instance', { + Template.fromStack(stack).hasResourceProperties('AWS::ServiceDiscovery::Instance', { InstanceAttributes: { AWS_INSTANCE_CNAME: 'foo.com', dogs: 'good', @@ -397,7 +397,7 @@ describe('instance', () => { }); // THEN - expect(stack).toHaveResource('AWS::ServiceDiscovery::Instance', { + Template.fromStack(stack).hasResourceProperties('AWS::ServiceDiscovery::Instance', { InstanceAttributes: { dogs: 'good', }, @@ -494,7 +494,7 @@ describe('instance', () => { }); // THEN - expect(stack).toCountResources('AWS::ServiceDiscovery::Instance', 2); + Template.fromStack(stack).resourceCountIs('AWS::ServiceDiscovery::Instance', 2); }); diff --git a/packages/@aws-cdk/aws-servicediscovery/test/namespace.test.ts b/packages/@aws-cdk/aws-servicediscovery/test/namespace.test.ts index d0c70b057534b..30ccf20fc85f1 100644 --- a/packages/@aws-cdk/aws-servicediscovery/test/namespace.test.ts +++ b/packages/@aws-cdk/aws-servicediscovery/test/namespace.test.ts @@ -1,4 +1,4 @@ -import '@aws-cdk/assert-internal/jest'; +import { Template } from '@aws-cdk/assertions'; import * as ec2 from '@aws-cdk/aws-ec2'; import * as cdk from '@aws-cdk/core'; import * as servicediscovery from '../lib'; @@ -11,7 +11,7 @@ describe('namespace', () => { name: 'foobar.com', }); - expect(stack).toMatchTemplate({ + Template.fromStack(stack).templateMatches({ Resources: { MyNamespaceD0BB8558: { Type: 'AWS::ServiceDiscovery::HttpNamespace', @@ -32,7 +32,7 @@ describe('namespace', () => { name: 'foobar.com', }); - expect(stack).toMatchTemplate({ + Template.fromStack(stack).templateMatches({ Resources: { MyNamespaceD0BB8558: { Type: 'AWS::ServiceDiscovery::PublicDnsNamespace', @@ -55,7 +55,7 @@ describe('namespace', () => { vpc, }); - expect(stack).toHaveResource('AWS::ServiceDiscovery::PrivateDnsNamespace', { + Template.fromStack(stack).hasResourceProperties('AWS::ServiceDiscovery::PrivateDnsNamespace', { Name: 'foobar.com', Vpc: { Ref: 'MyVpcF9F0CA6F', diff --git a/packages/@aws-cdk/aws-servicediscovery/test/service.test.ts b/packages/@aws-cdk/aws-servicediscovery/test/service.test.ts index 44c5ba3e97c6b..556bc6c2c7260 100644 --- a/packages/@aws-cdk/aws-servicediscovery/test/service.test.ts +++ b/packages/@aws-cdk/aws-servicediscovery/test/service.test.ts @@ -1,4 +1,4 @@ -import '@aws-cdk/assert-internal/jest'; +import { Template } from '@aws-cdk/assertions'; import * as ec2 from '@aws-cdk/aws-ec2'; import * as cdk from '@aws-cdk/core'; import * as servicediscovery from '../lib'; @@ -21,7 +21,7 @@ describe('service', () => { }); // THEN - expect(stack).toMatchTemplate({ + Template.fromStack(stack).templateMatches({ Resources: { MyNamespaceD0BB8558: { Type: 'AWS::ServiceDiscovery::HttpNamespace', @@ -69,7 +69,7 @@ describe('service', () => { }); // THEN - expect(stack).toMatchTemplate({ + Template.fromStack(stack).templateMatches({ Resources: { MyNamespaceD0BB8558: { Type: 'AWS::ServiceDiscovery::HttpNamespace', @@ -118,7 +118,7 @@ describe('service', () => { }); // THEN - expect(stack).toMatchTemplate({ + Template.fromStack(stack).templateMatches({ Resources: { MyNamespaceD0BB8558: { Type: 'AWS::ServiceDiscovery::PublicDnsNamespace', @@ -176,7 +176,7 @@ describe('service', () => { }); // THEN - expect(stack).toMatchTemplate({ + Template.fromStack(stack).templateMatches({ Resources: { MyNamespaceD0BB8558: { Type: 'AWS::ServiceDiscovery::PublicDnsNamespace', @@ -233,7 +233,7 @@ describe('service', () => { }); // THEN - expect(stack).toMatchTemplate({ + Template.fromStack(stack).templateMatches({ Resources: { MyNamespaceD0BB8558: { Type: 'AWS::ServiceDiscovery::PublicDnsNamespace', @@ -417,11 +417,11 @@ describe('service', () => { }); // THEN - expect(stack).toHaveResource('AWS::ServiceDiscovery::PrivateDnsNamespace', { + Template.fromStack(stack).hasResourceProperties('AWS::ServiceDiscovery::PrivateDnsNamespace', { Name: 'private', }); - expect(stack).toHaveResource('AWS::ServiceDiscovery::Service', { + Template.fromStack(stack).hasResourceProperties('AWS::ServiceDiscovery::Service', { Description: 'service description', DnsConfig: { DnsRecords: [ diff --git a/packages/@aws-cdk/cfnspec/spec-source/cfn-docs/cfn-docs.json b/packages/@aws-cdk/cfnspec/spec-source/cfn-docs/cfn-docs.json index 5b6f0ce4d8f0c..9b9671adcf8ae 100644 --- a/packages/@aws-cdk/cfnspec/spec-source/cfn-docs/cfn-docs.json +++ b/packages/@aws-cdk/cfnspec/spec-source/cfn-docs/cfn-docs.json @@ -8317,18 +8317,18 @@ }, "AWS::Cognito::UserPool.CustomEmailSender": { "attributes": {}, - "description": "", + "description": "A custom email sender AWS Lambda trigger.", "properties": { - "LambdaArn": "", - "LambdaVersion": "" + "LambdaArn": "The Amazon Resource Name (ARN) of the AWS Lambda function that Amazon Cognito triggers to send email notifications to users.", + "LambdaVersion": "The Lambda version represents the signature of the \"request\" attribute in the \"event\" information that Amazon Cognito passes to your custom email sender AWS Lambda function. The only supported value is `V1_0` ." } }, "AWS::Cognito::UserPool.CustomSMSSender": { "attributes": {}, - "description": "", + "description": "A custom SMS sender AWS Lambda trigger.", "properties": { - "LambdaArn": "", - "LambdaVersion": "" + "LambdaArn": "The Amazon Resource Name (ARN) of the AWS Lambda function that Amazon Cognito triggers to send SMS notifications to users.", + "LambdaVersion": "The Lambda version represents the signature of the \"request\" attribute in the \"event\" information Amazon Cognito passes to your custom SMS sender Lambda function. The only supported value is `V1_0` ." } }, "AWS::Cognito::UserPool.DeviceConfiguration": { @@ -8364,11 +8364,11 @@ "description": "Specifies the configuration for AWS Lambda triggers.", "properties": { "CreateAuthChallenge": "Creates an authentication challenge.", - "CustomEmailSender": "", + "CustomEmailSender": "A custom email sender AWS Lambda trigger.", "CustomMessage": "A custom Message AWS Lambda trigger.", - "CustomSMSSender": "", + "CustomSMSSender": "A custom SMS sender AWS Lambda trigger.", "DefineAuthChallenge": "Defines the authentication challenge.", - "KMSKeyID": "", + "KMSKeyID": "The Amazon Resource Name of a AWS Key Management Service ( AWS KMS ) key. Amazon Cognito uses the key to encrypt codes and temporary passwords sent to `CustomEmailSender` and `CustomSMSSender` .", "PostAuthentication": "A post-authentication AWS Lambda trigger.", "PostConfirmation": "A post-confirmation AWS Lambda trigger.", "PreAuthentication": "A pre-authentication AWS Lambda trigger.", @@ -8500,7 +8500,7 @@ "attributes": {}, "description": "The Amazon Pinpoint analytics configuration for collecting metrics for a user pool.\n\n> In Regions where Pinpoint isn't available, User Pools only supports sending events to Amazon Pinpoint projects in us-east-1. In Regions where Pinpoint is available, User Pools will support sending events to Amazon Pinpoint projects within that same Region.", "properties": { - "ApplicationArn": "", + "ApplicationArn": "The Amazon Resource Name (ARN) of an Amazon Pinpoint project. You can use the Amazon Pinpoint project for integration with the chosen user pool client. Amazon Cognito publishes events to the Amazon Pinpoint project that the app ARN declares.", "ApplicationId": "The application ID for an Amazon Pinpoint application.", "ExternalId": "The external ID.", "RoleArn": "The ARN of an AWS Identity and Access Management role that authorizes Amazon Cognito to publish events to Amazon Pinpoint analytics.", @@ -8768,7 +8768,7 @@ }, "AWS::Config::ConfigurationAggregator": { "attributes": { - "ConfigurationAggregatorArn": "", + "ConfigurationAggregatorArn": "The Amazon Resource Name (ARN) of the aggregator.", "Ref": "`Ref` returns the ConfigurationAggregatorName, such as `myConfigurationAggregator` ." }, "description": "The details about the configuration aggregator, including information about source accounts, regions, and metadata of the aggregator.", @@ -8999,8 +8999,8 @@ }, "AWS::Config::StoredQuery": { "attributes": { - "QueryArn": "", - "QueryId": "", + "QueryArn": "Amazon Resource Name (ARN) of the query. For example, arn:partition:service:region:account-id:resource-type/resource-name/resource-id.", + "QueryId": "The ID of the query.", "Ref": "" }, "description": "Provides the details of a stored query.", @@ -13883,7 +13883,7 @@ }, "AWS::ECS::TaskDefinition.EphemeralStorage": { "attributes": {}, - "description": "The amount of ephemeral storage to allocate for the task. This parameter is used to expand the total amount of ephemeral storage available, beyond the default amount, for tasks hosted on AWS Fargate . For more information, see [Fargate task storage](https://docs.aws.amazon.com/AmazonECS/latest/userguide/using_data_volumes.html) in the *Amazon ECS User Guide for AWS Fargate* .\n\n> This parameter is only supported for tasks hosted on Fargate using the following platform versions:\n> \n> - Linux platform version `1.4.0` or later.\n> - Windows platform version `1.0.0` or later.", + "description": "The amount of ephemeral storage to allocate for the task. This parameter is used to expand the total amount of ephemeral storage available, beyond the default amount, for tasks hosted on AWS Fargate . For more information, see [Fargate task storage](https://docs.aws.amazon.com/AmazonECS/latest/userguide/using_data_volumes.html) in the *Amazon ECS User Guide for AWS Fargate* .\n\n> This parameter is only supported for tasks hosted on Fargate using Linux platform version `1.4.0` or later. This parameter is not supported for Windows containers on Fargate.", "properties": { "SizeInGiB": "The total amount, in GiB, of ephemeral storage to set for the task. The minimum supported value is `21` GiB and the maximum supported value is `200` GiB." } @@ -17982,10 +17982,10 @@ }, "AWS::Glue::Database.PrincipalPrivileges": { "attributes": {}, - "description": "", + "description": "the permissions granted to a principal", "properties": { - "Permissions": "", - "Principal": "" + "Permissions": "The permissions that are granted to the principal.", + "Principal": "The principal who is granted permissions." } }, "AWS::Glue::DevEndpoint": { @@ -22798,7 +22798,7 @@ "ServiceNowConfiguration": "Provides configuration for data sources that connect to ServiceNow instances.", "SharePointConfiguration": "Provides information necessary to create a data source connector for a Microsoft SharePoint site.", "WebCrawlerConfiguration": "Provides the configuration information required for Amazon Kendra Web Crawler.", - "WorkDocsConfiguration": "" + "WorkDocsConfiguration": "Provides the configuration information to connect to WorkDocs as your data source." } }, "AWS::Kendra::DataSource.DataSourceToIndexFieldMapping": { @@ -22873,11 +22873,11 @@ }, "AWS::Kendra::DataSource.ProxyConfiguration": { "attributes": {}, - "description": "", + "description": "Provides the configuration information for a web proxy to connect to website hosts.", "properties": { - "Credentials": "", - "Host": "", - "Port": "" + "Credentials": "Your secret ARN, which you can create in [AWS Secrets Manager](https://docs.aws.amazon.com/secretsmanager/latest/userguide/intro.html)\n\nThe credentials are optional. You use a secret if web proxy credentials are required to connect to a website host. Amazon Kendra currently support basic authentication to connect to a web proxy server. The secret stores your credentials.", + "Host": "The name of the website host you want to connect to via a web proxy server.\n\nFor example, the host name of https://a.example.com/page1.html is \"a.example.com\".", + "Port": "The port number of the website host you want to connect to via a web proxy server.\n\nFor example, the port for https://a.example.com/page1.html is 443, the standard port for HTTPS." } }, "AWS::Kendra::DataSource.S3DataSourceConfiguration": { @@ -23054,14 +23054,14 @@ "description": "", "properties": { "AuthenticationConfiguration": "", - "CrawlDepth": "", - "MaxContentSizePerPageInMegaBytes": "", - "MaxLinksPerPage": "", - "MaxUrlsPerMinuteCrawlRate": "", - "ProxyConfiguration": "", - "UrlExclusionPatterns": "", - "UrlInclusionPatterns": "", - "Urls": "Specifies the seed or starting point URLs of the websites or the sitemap URLs of the websites you want to crawl.\n\nYou can include website subdomains. You can list up to 100 seed URLs and up to three sitemap URLs.\n\nYou can only crawl websites that use the secure communication protocol, Hypertext Transfer Protocol Secure (HTTPS). If you receive an error when crawling a website, it could be that the website is blocked from crawling.\n\n*When selecting websites to index, you must adhere to the [Amazon Acceptable Use Policy](https://docs.aws.amazon.com/aup/) and all other Amazon terms. Remember that you must only use the Amazon Kendra web crawler to index your own webpages, or webpages that you have authorization to index.*" + "CrawlDepth": "Specifies the number of levels in a website that you want to crawl.\n\nThe first level begins from the website seed or starting point URL. For example, if a website has 3 levels \u2013 index level (i.e. seed in this example), sections level, and subsections level \u2013 and you are only interested in crawling information up to the sections level (i.e. levels 0-1), you can set your depth to 1.\n\nThe default crawl depth is set to 2.", + "MaxContentSizePerPageInMegaBytes": "The maximum size (in MB) of a webpage or attachment to crawl.\n\nFiles larger than this size (in MB) are skipped/not crawled.\n\nThe default maximum size of a webpage or attachment is set to 50 MB.", + "MaxLinksPerPage": "The maximum number of URLs on a webpage to include when crawling a website. This number is per webpage.\n\nAs a website\u2019s webpages are crawled, any URLs the webpages link to are also crawled. URLs on a webpage are crawled in order of appearance.\n\nThe default maximum links per page is 100.", + "MaxUrlsPerMinuteCrawlRate": "The maximum number of URLs crawled per website host per minute.\n\nA minimum of one URL is required.\n\nThe default maximum number of URLs crawled per website host per minute is 300.", + "ProxyConfiguration": "Provides configuration information required to connect to your internal websites via a web proxy.\n\nYou must provide the website host name and port number. For example, the host name of https://a.example.com/page1.html is \"a.example.com\" and the port is 443, the standard port for HTTPS.\n\nWeb proxy credentials are optional and you can use them to connect to a web proxy server that requires basic authentication. To store web proxy credentials, you use a secret in [AWS Secrets Manager](https://docs.aws.amazon.com/secretsmanager/latest/userguide/intro.html) .", + "UrlExclusionPatterns": "The regular expression pattern to exclude certain URLs to crawl.\n\nIf there is a regular expression pattern to include certain URLs that conflicts with the exclude pattern, the exclude pattern takes precedence.", + "UrlInclusionPatterns": "The regular expression pattern to include certain URLs to crawl.\n\nIf there is a regular expression pattern to exclude certain URLs that conflicts with the include pattern, the exclude pattern takes precedence.", + "Urls": "Specifies the seed or starting point URLs of the websites or the sitemap URLs of the websites you want to crawl.\n\nYou can include website subdomains. You can list up to 100 seed URLs and up to three sitemap URLs.\n\nYou can only crawl websites that use the secure communication protocol, Hypertext Transfer Protocol Secure (HTTPS). If you receive an error when crawling a website, it could be that the website is blocked from crawling.\n\n*When selecting websites to index, you must adhere to the [Amazon Acceptable Use Policy](https://docs.aws.amazon.com/aup/) and all other Amazon terms. Remember that you must only use Amazon Kendra Web Crawler to index your own webpages, or webpages that you have authorization to index.*" } }, "AWS::Kendra::DataSource.WebCrawlerSeedUrlConfiguration": { @@ -23076,7 +23076,7 @@ "attributes": {}, "description": "Provides the configuration information of the sitemap URLs to crawl.\n\n*When selecting websites to index, you must adhere to the [Amazon Acceptable Use Policy](https://docs.aws.amazon.com/aup/) and all other Amazon terms. Remember that you must only use the Amazon Kendra web crawler to index your own webpages, or webpages that you have authorization to index.*", "properties": { - "SiteMaps": "" + "SiteMaps": "The list of sitemap URLs of the websites you want to crawl.\n\nThe list can include a maximum of three sitemap URLs." } }, "AWS::Kendra::DataSource.WebCrawlerUrls": { @@ -23089,14 +23089,14 @@ }, "AWS::Kendra::DataSource.WorkDocsConfiguration": { "attributes": {}, - "description": "", + "description": "Provides the configuration information to connect to Amazon WorkDocs as your data source.\n\nAmazon WorkDocs connector is available in Oregon, North Virginia, Sydney, Singapore and Ireland regions.", "properties": { - "CrawlComments": "", - "ExclusionPatterns": "", - "FieldMappings": "", - "InclusionPatterns": "", - "OrganizationId": "", - "UseChangeLog": "" + "CrawlComments": "`TRUE` to include comments on documents in your index. Including comments in your index means each comment is a document that can be searched on.\n\nThe default is set to `FALSE` .", + "ExclusionPatterns": "A list of regular expression patterns to exclude certain files in your Amazon WorkDocs site repository. Files that match the patterns are excluded from the index. Files that don\u2019t match the patterns are included in the index. If a file matches both an inclusion pattern and an exclusion pattern, the exclusion pattern takes precedence and the file isn\u2019t included in the index.", + "FieldMappings": "A list of `DataSourceToIndexFieldMapping` objects that map Amazon WorkDocs field names to custom index field names in Amazon Kendra. You must first create the custom index fields using the `UpdateIndex` operation before you map to Amazon WorkDocs fields. For more information, see [Mapping Data Source Fields](https://docs.aws.amazon.com/kendra/latest/dg/field-mapping.html) . The Amazon WorkDocs data source field names need to exist in your Amazon WorkDocs custom metadata.", + "InclusionPatterns": "A list of regular expression patterns to include certain files in your Amazon WorkDocs site repository. Files that match the patterns are included in the index. Files that don't match the patterns are excluded from the index. If a file matches both an inclusion pattern and an exclusion pattern, the exclusion pattern takes precedence and the file isn\u2019t included in the index.", + "OrganizationId": "The identifier of the directory corresponding to your Amazon WorkDocs site repository.\n\nYou can find the organization ID in the [AWS Directory Service](https://docs.aws.amazon.com/directoryservicev2/) by going to *Active Directory* , then *Directories* . Your Amazon WorkDocs site directory has an ID, which is the organization ID. You can also set up a new Amazon WorkDocs directory in the AWS Directory Service console and enable a Amazon WorkDocs site for the directory in the Amazon WorkDocs console.", + "UseChangeLog": "`TRUE` to use the change logs to update documents in your index instead of scanning all documents.\n\nIf you are syncing your Amazon WorkDocs data source with your index for the first time, all documents are scanned. After your first sync, you can use the change logs to update your documents in your index for future syncs.\n\nThe default is set to `FALSE` ." } }, "AWS::Kendra::Faq": { @@ -32158,7 +32158,7 @@ "AllowMajorVersionUpgrade": "A value that indicates whether major version upgrades are allowed. Changing this parameter doesn't result in an outage and the change is asynchronously applied as soon as possible.\n\nConstraints: Major version upgrades must be allowed when specifying a value for the `EngineVersion` parameter that is a different major version than the DB instance's current version.", "AssociatedRoles": "The AWS Identity and Access Management (IAM) roles associated with the DB instance.", "AutoMinorVersionUpgrade": "A value that indicates whether minor engine upgrades are applied automatically to the DB instance during the maintenance window. By default, minor engine upgrades are applied automatically.", - "AvailabilityZone": "The Availability Zone (AZ) where the database will be created. For information on AWS Regions and Availability Zones, see [Regions and Availability Zones](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html) .\n\n*Amazon Aurora*\n\nNot applicable. Availability Zones are managed by the DB cluster.\n\nDefault: A random, system-chosen Availability Zone in the endpoint's AWS Region.\n\nExample: `us-east-1d`\n\nConstraint: The `AvailabilityZone` parameter can't be specified if the DB instance is a Multi-AZ deployment. The specified Availability Zone must be in the same AWS Region as the current endpoint.\n\n> If you're creating a DB instance in an RDS on VMware environment, specify the identifier of the custom Availability Zone to create the DB instance in.\n> \n> For more information about RDS on VMware, see the [RDS on VMware User Guide.](https://docs.aws.amazon.com/AmazonRDS/latest/RDSonVMwareUserGuide/rds-on-vmware.html)", + "AvailabilityZone": "The Availability Zone that the database instance will be created in.\n\nDefault: A random, system-chosen Availability Zone in the endpoint's region.\n\nExample: `us-east-1d`\n\nConstraint: The AvailabilityZone parameter cannot be specified if the MultiAZ parameter is set to `true` . The specified Availability Zone must be in the same region as the current endpoint.", "BackupRetentionPeriod": "The number of days for which automated backups are retained. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.\n\n*Amazon Aurora*\n\nNot applicable. The retention period for automated backups is managed by the DB cluster.\n\nDefault: 1\n\nConstraints:\n\n- Must be a value from 0 to 35\n- Can't be set to 0 if the DB instance is a source to read replicas", "CACertificateIdentifier": "The identifier of the CA certificate for this DB instance.\n\n> Specifying or updating this property triggers a reboot. \n\nFor more information about CA certificate identifiers for RDS DB engines, see [Rotating Your SSL/TLS Certificate](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.SSL-certificate-rotation.html) in the *Amazon RDS User Guide* .\n\nFor more information about CA certificate identifiers for Aurora DB engines, see [Rotating Your SSL/TLS Certificate](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.SSL-certificate-rotation.html) in the *Amazon Aurora User Guide* .", "CharacterSetName": "For supported engines, indicates that the DB instance should be associated with the specified character set.\n\n*Amazon Aurora*\n\nNot applicable. The character set is managed by the DB cluster. For more information, see [AWS::RDS::DBCluster](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbcluster.html) .", @@ -32230,7 +32230,7 @@ }, "description": "The `AWS::RDS::DBParameterGroup` resource creates a custom parameter group for an RDS database family.\n\nThis type can be declared in a template and referenced in the `DBParameterGroupName` property of an `[AWS::RDS::DBInstance](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-rds-database-instance.html)` resource.\n\nFor information about configuring parameters for Amazon RDS DB instances, see [Working with DB parameter groups](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_WorkingWithParamGroups.html) in the *Amazon RDS User Guide* .\n\nFor information about configuring parameters for Amazon Aurora DB instances, see [Working with DB parameter groups and DB cluster parameter groups](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_WorkingWithParamGroups.html) in the *Amazon Aurora User Guide* .\n\n> Applying a parameter group to a DB instance may require the DB instance to reboot, resulting in a database outage for the duration of the reboot.", "properties": { - "Description": "Provides the customer-specified description for this DB parameter group.", + "Description": "Provides the customer-specified description for this DB Parameter Group.", "Family": "The DB parameter group family name. A DB parameter group can be associated with one and only one DB parameter group family, and can be applied only to a DB instance running a DB engine and engine version compatible with that DB parameter group family.\n\n> The DB parameter group family can't be changed when updating a DB parameter group. \n\nTo list all of the available parameter group families, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\"`\n\nThe output contains duplicates.\n\nFor more information, see `[CreateDBParameterGroup](https://docs.aws.amazon.com//AmazonRDS/latest/APIReference/API_CreateDBParameterGroup.html)` .", "Parameters": "An array of parameter names and values for the parameter update. At least one parameter name and value must be supplied. Subsequent arguments are optional.\n\nFor more information about DB parameters and DB parameter groups for Amazon RDS DB engines, see [Working with DB Parameter Groups](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_WorkingWithParamGroups.html) in the *Amazon RDS User Guide* .\n\nFor more information about DB cluster and DB instance parameters and parameter groups for Amazon Aurora DB engines, see [Working with DB Parameter Groups and DB Cluster Parameter Groups](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_WorkingWithParamGroups.html) in the *Amazon Aurora User Guide* .\n\n> AWS CloudFormation doesn't support specifying an apply method for each individual parameter. The default apply method for each parameter is used.", "Tags": "Tags to assign to the DB parameter group." @@ -32334,7 +32334,7 @@ "properties": { "DBSecurityGroupIngress": "Ingress rules to be applied to the DB security group.", "EC2VpcId": "The identifier of an Amazon VPC. This property indicates the VPC that this DB security group belongs to.\n\n> The `EC2VpcId` property is for backward compatibility with older regions, and is no longer recommended for providing security information to an RDS DB instance.", - "GroupDescription": "Provides the description of the DB security group.", + "GroupDescription": "Provides the description of the DB Security Group.", "Tags": "Tags to assign to the DB security group." } }, @@ -32343,9 +32343,9 @@ "description": "The `Ingress` property type specifies an individual ingress rule within an `AWS::RDS::DBSecurityGroup` resource.", "properties": { "CIDRIP": "The IP range to authorize.", - "EC2SecurityGroupId": "Id of the EC2 security group to authorize. For VPC DB security groups, `EC2SecurityGroupId` must be provided. Otherwise, `EC2SecurityGroupOwnerId` and either `EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided.", - "EC2SecurityGroupName": "Name of the EC2 security group to authorize. For VPC DB security groups, `EC2SecurityGroupId` must be provided. Otherwise, `EC2SecurityGroupOwnerId` and either `EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided.", - "EC2SecurityGroupOwnerId": "AWS account number of the owner of the EC2 security group specified in the `EC2SecurityGroupName` parameter. The AWS access key ID isn't an acceptable value. For VPC DB security groups, `EC2SecurityGroupId` must be provided. Otherwise, `EC2SecurityGroupOwnerId` and either `EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided." + "EC2SecurityGroupId": "Id of the EC2 Security Group to authorize. For VPC DB Security Groups, `EC2SecurityGroupId` must be provided. Otherwise, EC2SecurityGroupOwnerId and either `EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided.", + "EC2SecurityGroupName": "Name of the EC2 Security Group to authorize. For VPC DB Security Groups, `EC2SecurityGroupId` must be provided. Otherwise, EC2SecurityGroupOwnerId and either `EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided.", + "EC2SecurityGroupOwnerId": "AWS Account Number of the owner of the EC2 Security Group specified in the EC2SecurityGroupName parameter. The AWS Access Key ID is not an acceptable value. For VPC DB Security Groups, `EC2SecurityGroupId` must be provided. Otherwise, EC2SecurityGroupOwnerId and either `EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided." } }, "AWS::RDS::DBSecurityGroupIngress": { @@ -32355,10 +32355,10 @@ "description": "The `AWS::RDS::DBSecurityGroupIngress` resource enables ingress to a DB security group using one of two forms of authorization. First, you can add EC2 or VPC security groups to the DB security group if the application using the database is running on EC2 or VPC instances. Second, IP ranges are available if the application accessing your database is running on the Internet.\n\nThis type supports updates. For more information about updating stacks, see [AWS CloudFormation Stacks Updates](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks.html) .\n\nFor details about the settings for DB security group ingress, see [AuthorizeDBSecurityGroupIngress](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_AuthorizeDBSecurityGroupIngress.html) .", "properties": { "CIDRIP": "The IP range to authorize.", - "DBSecurityGroupName": "The name of the DB security group to add authorization to.", - "EC2SecurityGroupId": "Id of the EC2 security group to authorize. For VPC DB security groups, `EC2SecurityGroupId` must be provided. Otherwise, `EC2SecurityGroupOwnerId` and either `EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided.", - "EC2SecurityGroupName": "Name of the EC2 security group to authorize. For VPC DB security groups, `EC2SecurityGroupId` must be provided. Otherwise, `EC2SecurityGroupOwnerId` and either `EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided.", - "EC2SecurityGroupOwnerId": "AWS account number of the owner of the EC2 security group specified in the `EC2SecurityGroupName` parameter. The AWS access key ID isn't an acceptable value. For VPC DB security groups, `EC2SecurityGroupId` must be provided. Otherwise, `EC2SecurityGroupOwnerId` and either `EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided." + "DBSecurityGroupName": "The name of the DB Security Group to add authorization to.", + "EC2SecurityGroupId": "Id of the EC2 Security Group to authorize. For VPC DB Security Groups, `EC2SecurityGroupId` must be provided. Otherwise, EC2SecurityGroupOwnerId and either `EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided.", + "EC2SecurityGroupName": "Name of the EC2 Security Group to authorize. For VPC DB Security Groups, `EC2SecurityGroupId` must be provided. Otherwise, EC2SecurityGroupOwnerId and either `EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided.", + "EC2SecurityGroupOwnerId": "AWS Account Number of the owner of the EC2 Security Group specified in the EC2SecurityGroupName parameter. The AWS Access Key ID is not an acceptable value. For VPC DB Security Groups, `EC2SecurityGroupId` must be provided. Otherwise, EC2SecurityGroupOwnerId and either `EC2SecurityGroupName` or `EC2SecurityGroupId` must be provided." } }, "AWS::RDS::DBSubnetGroup": { @@ -32367,9 +32367,9 @@ }, "description": "The `AWS::RDS::DBSubnetGroup` resource creates a database subnet group. Subnet groups must contain at least two subnets in two different Availability Zones in the same region.\n\nFor more information, see [Working with DB subnet groups](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.WorkingWithRDSInstanceinaVPC.html#USER_VPC.Subnets) in the *Amazon RDS User Guide* .", "properties": { - "DBSubnetGroupDescription": "The description for the DB subnet group.", + "DBSubnetGroupDescription": "The description for the DB Subnet Group.", "DBSubnetGroupName": "The name for the DB subnet group. This value is stored as a lowercase string.\n\nConstraints: Must contain no more than 255 lowercase alphanumeric characters or hyphens. Must not be \"Default\".\n\nExample: `mysubnetgroup`", - "SubnetIds": "The EC2 Subnet IDs for the DB subnet group.", + "SubnetIds": "The EC2 Subnet IDs for the DB Subnet Group.", "Tags": "Tags to assign to the DB subnet group." } }, @@ -32379,8 +32379,8 @@ }, "description": "The `AWS::RDS::EventSubscription` resource allows you to receive notifications for Amazon Relational Database Service events through the Amazon Simple Notification Service (Amazon SNS). For more information, see [Using Amazon RDS Event Notification](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Events.html) in the *Amazon RDS User Guide* .", "properties": { - "Enabled": "A value that indicates whether to activate the subscription. If the event notification subscription isn't activated, the subscription is created but not active.", - "EventCategories": "A list of event categories for a particular source type ( `SourceType` ) that you want to subscribe to. You can see a list of the categories for a given source type in the \"Amazon RDS event categories and event messages\" section of the [*Amazon RDS User Guide*](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Events.Messages.html) or the [*Amazon Aurora User Guide*](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_Events.Messages.html) . You can also see this list by using the `DescribeEventCategories` operation.", + "Enabled": "A Boolean value; set to *true* to activate the subscription, set to *false* to create the subscription but not active it.", + "EventCategories": "A list of event categories for a SourceType that you want to subscribe to. You can see a list of the categories for a given SourceType in the [Events](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Events.html) topic in the Amazon RDS User Guide or by using the *DescribeEventCategories* action.", "SnsTopicArn": "The Amazon Resource Name (ARN) of the SNS topic created for event notification. The ARN is created by Amazon SNS when you create a topic and subscribe to it.", "SourceIds": "The list of identifiers of the event sources for which events are returned. If not specified, then all sources are included in the response. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens. It can't end with a hyphen or contain two consecutive hyphens.\n\nConstraints:\n\n- If a `SourceIds` value is supplied, `SourceType` must also be provided.\n- If the source type is a DB instance, a `DBInstanceIdentifier` value must be supplied.\n- If the source type is a DB cluster, a `DBClusterIdentifier` value must be supplied.\n- If the source type is a DB parameter group, a `DBParameterGroupName` value must be supplied.\n- If the source type is a DB security group, a `DBSecurityGroupName` value must be supplied.\n- If the source type is a DB snapshot, a `DBSnapshotIdentifier` value must be supplied.\n- If the source type is a DB cluster snapshot, a `DBClusterSnapshotIdentifier` value must be supplied.", "SourceType": "The type of source that is generating the events. For example, if you want to be notified of events generated by a DB instance, set this parameter to `db-instance` . If this value isn't specified, all events are returned.\n\nValid values: `db-instance` | `db-cluster` | `db-parameter-group` | `db-security-group` | `db-snapshot` | `db-cluster-snapshot`" @@ -36800,8 +36800,8 @@ "MasterSecretKmsKeyArn": "The ARN of the KMS key that Secrets Manager uses to encrypt the elevated secret if you use the [alternating users strategy](https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotating-secrets_strategies.html#rotating-secrets-two-users) . If you don't specify this value and you use the alternating users strategy, then Secrets Manager uses the key `aws/secretsmanager` . If `aws/secretsmanager` doesn't yet exist, then Secrets Manager creates it for you automatically the first time it encrypts the secret value.", "RotationLambdaName": "The name of the Lambda rotation function.", "RotationType": "The type of rotation template to use. For more information, see [Secrets Manager rotation function templates](https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_available-rotation-templates.html) .\n\nYou can specify one of the following `RotationTypes` :\n\n- MySQLSingleUser\n- MySQLMultiUser\n- PostgreSQLSingleUser\n- PostgreSQLMultiUser\n- OracleSingleUser\n- OracleMultiUser\n- MariaDBSingleUser\n- MariaDBMultiUser\n- SQLServerSingleUser\n- SQLServerMultiUser\n- RedshiftSingleUser\n- RedshiftMultiUser\n- MongoDBSingleUser\n- MongoDBMultiUser", - "SuperuserSecretArn": "", - "SuperuserSecretKmsKeyArn": "", + "SuperuserSecretArn": "The ARN of the secret that contains elevated credentials. The Lambda rotation function uses this secret for the [Alternating users rotation strategy](https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotating-secrets_strategies.html#rotating-secrets-two-users) .", + "SuperuserSecretKmsKeyArn": "The ARN of the KMS key that Secrets Manager uses to encrypt the elevated secret if you use the [alternating users strategy](https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotating-secrets_strategies.html#rotating-secrets-two-users) . If you don't specify this value and you use the alternating users strategy, then Secrets Manager uses the key `aws/secretsmanager` . If `aws/secretsmanager` doesn't yet exist, then Secrets Manager creates it for you automatically the first time it encrypts the secret value.", "VpcSecurityGroupIds": "A comma-separated list of security group IDs applied to the target database.\n\nThe templates applies the same security groups as on the Lambda rotation function that is created as part of this stack.", "VpcSubnetIds": "A comma separated list of VPC subnet IDs of the target database network. The Lambda rotation function is in the same subnet group." }