diff --git a/packages/@aws-cdk/aws-ecs/README.md b/packages/@aws-cdk/aws-ecs/README.md index 8ffbde1d9a0e8..6a27855e031f5 100644 --- a/packages/@aws-cdk/aws-ecs/README.md +++ b/packages/@aws-cdk/aws-ecs/README.md @@ -23,7 +23,7 @@ The following example creates an Amazon ECS cluster, adds capacity to it, and runs a service on it: ```ts -import * as ecs from '@aws-cdk/aws-ecs'; +declare const vpc: ec2.Vpc; // Create an ECS cluster const cluster = new ecs.Cluster(this, 'Cluster', { @@ -89,8 +89,10 @@ tasks on. You can run many tasks on a single cluster. The following code creates a cluster that can run AWS Fargate tasks: ```ts +declare const vpc: ec2.Vpc; + const cluster = new ecs.Cluster(this, 'Cluster', { - vpc: vpc + vpc, }); ``` @@ -105,8 +107,10 @@ with various instance types. The following example creates an Amazon ECS cluster and adds capacity to it: ```ts +declare const vpc: ec2.Vpc; + const cluster = new ecs.Cluster(this, 'Cluster', { - vpc: vpc + vpc, }); // Either add default capacity @@ -119,7 +123,7 @@ cluster.addCapacity('DefaultAutoScalingGroupCapacity', { const autoScalingGroup = new autoscaling.AutoScalingGroup(this, 'ASG', { vpc, instanceType: new ec2.InstanceType('t2.xlarge'), - machineImage: EcsOptimizedImage.amazonLinux(), + machineImage: ecs.EcsOptimizedImage.amazonLinux(), // Or use Amazon ECS-Optimized Amazon Linux 2 AMI // machineImage: EcsOptimizedImage.amazonLinux2(), desiredCapacity: 3, @@ -143,9 +147,11 @@ to periodically update to the latest AMI manually by using the [CDK CLI context management commands](https://docs.aws.amazon.com/cdk/latest/guide/context.html): ```ts +declare const vpc: ec2.Vpc; const autoScalingGroup = new autoscaling.AutoScalingGroup(this, 'ASG', { - // ... - machineImage: EcsOptimizedImage.amazonLinux({ cacheInContext: true }), + machineImage: ecs.EcsOptimizedImage.amazonLinux({ cachedInContext: true }), + vpc, + instanceType: new ec2.InstanceType('t2.micro'), }); ``` @@ -159,6 +165,8 @@ The following example will create a capacity with self-managed Amazon EC2 capaci The following example adds Bottlerocket capacity to the cluster: ```ts +declare const cluster: ecs.Cluster; + cluster.addCapacity('bottlerocket-asg', { minCapacity: 2, instanceType: new ec2.InstanceType('c5.large'), @@ -174,6 +182,8 @@ for use when launching your EC2 instances that are powered by Arm-based AWS Graviton Processors. ```ts +declare const cluster: ecs.Cluster; + cluster.addCapacity('graviton-cluster', { minCapacity: 2, instanceType: new ec2.InstanceType('c6g.large'), @@ -184,10 +194,12 @@ cluster.addCapacity('graviton-cluster', { Bottlerocket is also supported: ```ts +declare const cluster: ecs.Cluster; + cluster.addCapacity('graviton-cluster', { minCapacity: 2, instanceType: new ec2.InstanceType('c6g.large'), - machineImage: ecs.MachineImageType.BOTTLEROCKET, + machineImageType: ecs.MachineImageType.BOTTLEROCKET, }); ``` @@ -196,6 +208,8 @@ cluster.addCapacity('graviton-cluster', { To add spot instances into the cluster, you must specify the `spotPrice` in the `ecs.AddCapacityOptions` and optionally enable the `spotInstanceDraining` property. ```ts +declare const cluster: ecs.Cluster; + // Add an AutoScalingGroup with spot instances to the existing cluster cluster.addCapacity('AsgSpot', { maxCapacity: 2, @@ -217,7 +231,8 @@ then you may do so by providing a KMS key for the `topicEncryptionKey` property ```ts // Given -const key = kms.Key(...); +declare const cluster: ecs.Cluster; +declare const key: kms.Key; // Then, use that key to encrypt the lifecycle-event SNS Topic. cluster.addCapacity('ASGEncryptedSNS', { instanceType: new ec2.InstanceType("t2.xlarge"), @@ -244,7 +259,7 @@ For a `FargateTaskDefinition`, specify the task size (`memoryLimitMiB` and `cpu` ```ts const fargateTaskDefinition = new ecs.FargateTaskDefinition(this, 'TaskDef', { memoryLimitMiB: 512, - cpu: 256 + cpu: 256, }); ``` @@ -255,13 +270,17 @@ On Fargate Platform Version 1.4.0 or later, you may specify up to 200GiB of const fargateTaskDefinition = new ecs.FargateTaskDefinition(this, 'TaskDef', { memoryLimitMiB: 512, cpu: 256, - ephemeralStorageGiB: 100 + ephemeralStorageGiB: 100, }); ``` To add containers to a task definition, call `addContainer()`: ```ts +const fargateTaskDefinition = new ecs.FargateTaskDefinition(this, 'TaskDef', { + memoryLimitMiB: 512, + cpu: 256, +}); const container = fargateTaskDefinition.addContainer("WebContainer", { // Use an image from DockerHub image: ecs.ContainerImage.fromRegistry("amazon/amazon-ecs-sample"), @@ -273,13 +292,13 @@ For a `Ec2TaskDefinition`: ```ts const ec2TaskDefinition = new ecs.Ec2TaskDefinition(this, 'TaskDef', { - networkMode: NetworkMode.BRIDGE + networkMode: ecs.NetworkMode.BRIDGE, }); const container = ec2TaskDefinition.addContainer("WebContainer", { // Use an image from DockerHub image: ecs.ContainerImage.fromRegistry("amazon/amazon-ecs-sample"), - memoryLimitMiB: 1024 + memoryLimitMiB: 1024, // ... other options here ... }); ``` @@ -292,7 +311,7 @@ const externalTaskDefinition = new ecs.ExternalTaskDefinition(this, 'TaskDef'); const container = externalTaskDefinition.addContainer("WebContainer", { // Use an image from DockerHub image: ecs.ContainerImage.fromRegistry("amazon/amazon-ecs-sample"), - memoryLimitMiB: 1024 + memoryLimitMiB: 1024, // ... other options here ... }); ``` @@ -302,34 +321,42 @@ You can specify container properties when you add them to the task definition, o To add a port mapping when adding a container to the task definition, specify the `portMappings` option: ```ts +declare const taskDefinition: ecs.TaskDefinition; + taskDefinition.addContainer("WebContainer", { image: ecs.ContainerImage.fromRegistry("amazon/amazon-ecs-sample"), memoryLimitMiB: 1024, - portMappings: [{ containerPort: 3000 }] + portMappings: [{ containerPort: 3000 }], }); ``` To add port mappings directly to a container definition, call `addPortMappings()`: ```ts +declare const container: ecs.ContainerDefinition; + container.addPortMappings({ - containerPort: 3000 + containerPort: 3000, }); ``` To add data volumes to a task definition, call `addVolume()`: ```ts +const fargateTaskDefinition = new ecs.FargateTaskDefinition(this, 'TaskDef', { + memoryLimitMiB: 512, + cpu: 256, +}); const volume = { // Use an Elastic FileSystem name: "mydatavolume", - efsVolumeConfiguration: ecs.EfsVolumeConfiguration({ - fileSystemId: "EFS" + efsVolumeConfiguration: { + fileSystemId: "EFS", // ... other options here ... - }) + }, }; -const container = fargateTaskDefinition.addVolume("mydatavolume"); +const container = fargateTaskDefinition.addVolume(volume); ``` > Note: ECS Anywhere doesn't support volume attachments in the task definition. @@ -345,7 +372,7 @@ The following example uses both: const taskDefinition = new ecs.TaskDefinition(this, 'TaskDef', { memoryMiB: '512', cpu: '256', - networkMode: NetworkMode.AWS_VPC, + networkMode: ecs.NetworkMode.AWS_VPC, compatibility: ecs.Compatibility.EC2_AND_FARGATE, }); ``` @@ -372,6 +399,12 @@ obtained from either DockerHub or from ECR repositories, built directly from a l To pass environment variables to the container, you can use the `environment`, `environmentFiles`, and `secrets` props. ```ts +declare const secret: secretsmanager.Secret; +declare const dbSecret: secretsmanager.Secret; +declare const parameter: ssm.StringParameter; +declare const taskDefinition: ecs.TaskDefinition; +declare const s3Bucket: s3.Bucket; + taskDefinition.addContainer('container', { image: ecs.ContainerImage.fromRegistry("amazon/amazon-ecs-sample"), memoryLimitMiB: 1024, @@ -386,7 +419,7 @@ taskDefinition.addContainer('container', { SECRET: ecs.Secret.fromSecretsManager(secret), DB_PASSWORD: ecs.Secret.fromSecretsManager(dbSecret, 'password'), // Reference a specific JSON field, (requires platform version 1.4.0 or later for Fargate tasks) PARAMETER: ecs.Secret.fromSsmParameter(parameter), - } + }, }); ``` @@ -402,24 +435,26 @@ If a task fails, Amazon ECS automatically restarts the task. ```ts -const taskDefinition; +declare const cluster: ecs.Cluster; +declare const taskDefinition: ecs.TaskDefinition; const service = new ecs.FargateService(this, 'Service', { cluster, taskDefinition, - desiredCount: 5 + desiredCount: 5, }); ``` ECS Anywhere service definition looks like: ```ts -const taskDefinition; +declare const cluster: ecs.Cluster; +declare const taskDefinition: ecs.TaskDefinition; const service = new ecs.ExternalService(this, 'Service', { cluster, taskDefinition, - desiredCount: 5 + desiredCount: 5, }); ``` @@ -434,7 +469,9 @@ deployment circuit breaker and optionally enable `rollback` for automatic rollba for more details. ```ts -const service = new ecs.FargateService(stack, 'Service', { +declare const cluster: ecs.Cluster; +declare const taskDefinition: ecs.TaskDefinition; +const service = new ecs.FargateService(this, 'Service', { cluster, taskDefinition, circuitBreaker: { rollback: true }, @@ -448,22 +485,23 @@ const service = new ecs.FargateService(stack, 'Service', { `Services` are load balancing targets and can be added to a target group, which will be attached to an application/network load balancers: ```ts -import * as elbv2 from '@aws-cdk/aws-elasticloadbalancingv2'; - -const service = new ecs.FargateService(this, 'Service', { /* ... */ }); +declare const vpc: ec2.Vpc; +declare const cluster: ecs.Cluster; +declare const taskDefinition: ecs.TaskDefinition; +const service = new ecs.FargateService(this, 'Service', { cluster, taskDefinition }); const lb = new elbv2.ApplicationLoadBalancer(this, 'LB', { vpc, internetFacing: true }); const listener = lb.addListener('Listener', { port: 80 }); const targetGroup1 = listener.addTargets('ECS1', { port: 80, - targets: [service] + targets: [service], }); const targetGroup2 = listener.addTargets('ECS2', { port: 80, targets: [service.loadBalancerTarget({ containerName: 'MyContainer', containerPort: 8080 - })] + })], }); ``` @@ -474,9 +512,10 @@ Note that in the example above, the default `service` only allows you to registe Alternatively, you can also create all load balancer targets to be registered in this service, add them to target groups, and attach target groups to listeners accordingly. ```ts -import * as elbv2 from '@aws-cdk/aws-elasticloadbalancingv2'; - -const service = new ecs.FargateService(this, 'Service', { /* ... */ }); +declare const cluster: ecs.Cluster; +declare const taskDefinition: ecs.TaskDefinition; +declare const vpc: ec2.Vpc; +const service = new ecs.FargateService(this, 'Service', { cluster, taskDefinition }); const lb = new elbv2.ApplicationLoadBalancer(this, 'LB', { vpc, internetFacing: true }); const listener = lb.addListener('Listener', { port: 80 }); @@ -512,11 +551,12 @@ for the alternatives. `Services` can also be directly attached to a classic load balancer as targets: ```ts -import * as elb from '@aws-cdk/aws-elasticloadbalancing'; - -const service = new ecs.Ec2Service(this, 'Service', { /* ... */ }); +declare const cluster: ecs.Cluster; +declare const taskDefinition: ecs.TaskDefinition; +declare const vpc: ec2.Vpc; +const service = new ecs.Ec2Service(this, 'Service', { cluster, taskDefinition }); -const lb = new elb.LoadBalancer(stack, 'LB', { vpc }); +const lb = new elb.LoadBalancer(this, 'LB', { vpc }); lb.addListener({ externalPort: 80 }); lb.addTarget(service); ``` @@ -524,15 +564,16 @@ lb.addTarget(service); Similarly, if you want to have more control over load balancer targeting: ```ts -import * as elb from '@aws-cdk/aws-elasticloadbalancing'; +declare const cluster: ecs.Cluster; +declare const taskDefinition: ecs.TaskDefinition; +declare const vpc: ec2.Vpc; +const service = new ecs.Ec2Service(this, 'Service', { cluster, taskDefinition }); -const service = new ecs.Ec2Service(this, 'Service', { /* ... */ }); - -const lb = new elb.LoadBalancer(stack, 'LB', { vpc }); +const lb = new elb.LoadBalancer(this, 'LB', { vpc }); lb.addListener({ externalPort: 80 }); lb.addTarget(service.loadBalancerTarget({ containerName: 'MyContainer', - containerPort: 80 + containerPort: 80, })); ``` @@ -547,15 +588,17 @@ You can configure the task count of a service to match demand. Task auto-scaling configured by calling `autoScaleTaskCount()`: ```ts +declare const target: elbv2.ApplicationTargetGroup; +declare const service: ecs.BaseService; const scaling = service.autoScaleTaskCount({ maxCapacity: 10 }); scaling.scaleOnCpuUtilization('CpuScaling', { - targetUtilizationPercent: 50 + targetUtilizationPercent: 50, }); scaling.scaleOnRequestCount('RequestScaling', { requestsPerTarget: 10000, - targetGroup: target -}) + targetGroup: target, +}); ``` Task auto-scaling is powered by *Application Auto-Scaling*. @@ -567,19 +610,18 @@ To start an Amazon ECS task on an Amazon EC2-backed Cluster, instantiate an `@aws-cdk/aws-events-targets.EcsTask` instead of an `Ec2Service`: ```ts -import * as targets from '@aws-cdk/aws-events-targets'; - +declare const cluster: ecs.Cluster; // Create a Task Definition for the container to start const taskDefinition = new ecs.Ec2TaskDefinition(this, 'TaskDef'); taskDefinition.addContainer('TheContainer', { image: ecs.ContainerImage.fromAsset(path.resolve(__dirname, '..', 'eventhandler-image')), memoryLimitMiB: 256, - logging: new ecs.AwsLogDriver({ streamPrefix: 'EventDemo', mode: AwsLogDriverMode.NON_BLOCKING }) + logging: new ecs.AwsLogDriver({ streamPrefix: 'EventDemo', mode: ecs.AwsLogDriverMode.NON_BLOCKING }), }); // An Rule that describes the event trigger (in this case a scheduled run) const rule = new events.Rule(this, 'Rule', { - schedule: events.Schedule.expression('rate(1 min)') + schedule: events.Schedule.expression('rate(1 min)'), }); // Pass an environment variable to the container 'TheContainer' in the task @@ -592,8 +634,8 @@ rule.addTarget(new targets.EcsTask({ environment: [{ name: 'I_WAS_TRIGGERED', value: 'From CloudWatch Events' - }] - }] + }], + }], })); ``` @@ -609,6 +651,7 @@ Currently Supported Log Drivers: - splunk - syslog - awsfirelens +- Generic ### awslogs Log Driver @@ -618,7 +661,7 @@ const taskDefinition = new ecs.Ec2TaskDefinition(this, 'TaskDef'); taskDefinition.addContainer('TheContainer', { image: ecs.ContainerImage.fromRegistry('example-image'), memoryLimitMiB: 256, - logging: ecs.LogDrivers.awsLogs({ streamPrefix: 'EventDemo' }) + logging: ecs.LogDrivers.awsLogs({ streamPrefix: 'EventDemo' }), }); ``` @@ -630,7 +673,7 @@ const taskDefinition = new ecs.Ec2TaskDefinition(this, 'TaskDef'); taskDefinition.addContainer('TheContainer', { image: ecs.ContainerImage.fromRegistry('example-image'), memoryLimitMiB: 256, - logging: ecs.LogDrivers.fluentd() + logging: ecs.LogDrivers.fluentd(), }); ``` @@ -642,7 +685,7 @@ const taskDefinition = new ecs.Ec2TaskDefinition(this, 'TaskDef'); taskDefinition.addContainer('TheContainer', { image: ecs.ContainerImage.fromRegistry('example-image'), memoryLimitMiB: 256, - logging: ecs.LogDrivers.gelf({ address: 'my-gelf-address' }) + logging: ecs.LogDrivers.gelf({ address: 'my-gelf-address' }), }); ``` @@ -654,7 +697,7 @@ const taskDefinition = new ecs.Ec2TaskDefinition(this, 'TaskDef'); taskDefinition.addContainer('TheContainer', { image: ecs.ContainerImage.fromRegistry('example-image'), memoryLimitMiB: 256, - logging: ecs.LogDrivers.journald() + logging: ecs.LogDrivers.journald(), }); ``` @@ -666,7 +709,7 @@ const taskDefinition = new ecs.Ec2TaskDefinition(this, 'TaskDef'); taskDefinition.addContainer('TheContainer', { image: ecs.ContainerImage.fromRegistry('example-image'), memoryLimitMiB: 256, - logging: ecs.LogDrivers.jsonFile() + logging: ecs.LogDrivers.jsonFile(), }); ``` @@ -679,9 +722,9 @@ taskDefinition.addContainer('TheContainer', { image: ecs.ContainerImage.fromRegistry('example-image'), memoryLimitMiB: 256, logging: ecs.LogDrivers.splunk({ - secretToken: cdk.SecretValue.secretsManager('my-splunk-token'), - url: 'my-splunk-url' - }) + token: SecretValue.secretsManager('my-splunk-token'), + url: 'my-splunk-url', + }), }); ``` @@ -693,7 +736,7 @@ const taskDefinition = new ecs.Ec2TaskDefinition(this, 'TaskDef'); taskDefinition.addContainer('TheContainer', { image: ecs.ContainerImage.fromRegistry('example-image'), memoryLimitMiB: 256, - logging: ecs.LogDrivers.syslog() + logging: ecs.LogDrivers.syslog(), }); ``` @@ -710,14 +753,17 @@ taskDefinition.addContainer('TheContainer', { Name: 'firehose', region: 'us-west-2', delivery_stream: 'my-stream', - } - }) + }, + }), }); ``` To pass secrets to the log configuration, use the `secretOptions` property of the log configuration. The task execution role is automatically granted read permissions on the secrets/parameters. ```ts +declare const secret: secretsmanager.Secret; +declare const parameter: ssm.StringParameter; + const taskDefinition = new ecs.Ec2TaskDefinition(this, 'TaskDef'); taskDefinition.addContainer('TheContainer', { image: ecs.ContainerImage.fromRegistry('example-image'), @@ -730,7 +776,7 @@ taskDefinition.addContainer('TheContainer', { apikey: ecs.Secret.fromSecretsManager(secret), host: ecs.Secret.fromSsmParameter(parameter), }, - }) + }), }); ``` @@ -747,9 +793,9 @@ taskDefinition.addContainer('TheContainer', { logging: new ecs.GenericLogDriver({ logDriver: 'fluentd', options: { - tag: 'example-tag' - } - }) + tag: 'example-tag', + }, + }), }); ``` @@ -759,7 +805,10 @@ To register your ECS service with a CloudMap Service Registry, you may add the `cloudMapOptions` property to your service: ```ts -const service = new ecs.Ec2Service(stack, 'Service', { +declare const taskDefinition: ecs.TaskDefinition; +declare const cluster: ecs.Cluster; + +const service = new ecs.Ec2Service(this, 'Service', { cluster, taskDefinition, cloudMapOptions: { @@ -774,8 +823,14 @@ By default, `SRV` DNS record types will target the default container and default port. However, you may target a different container and port on the same ECS task: ```ts +declare const taskDefinition: ecs.TaskDefinition; +declare const cluster: ecs.Cluster; + // Add a container to the task definition -const specificContainer = taskDefinition.addContainer(...); +const specificContainer = taskDefinition.addContainer('Container', { + image: ecs.ContainerImage.fromRegistry('/aws/aws-example-app'), + memoryLimitMiB: 2048, +}); // Add a port mapping specificContainer.addPortMappings({ @@ -783,7 +838,7 @@ specificContainer.addPortMappings({ protocol: ecs.Protocol.TCP, }); -new ecs.Ec2Service(stack, 'Service', { +new ecs.Ec2Service(this, 'Service', { cluster, taskDefinition, cloudMapOptions: { @@ -802,8 +857,8 @@ You may associate an ECS service with a specific CloudMap service. To do this, use the service's `associateCloudMapService` method: ```ts -const cloudMapService = new cloudmap.Service(...); -const ecsService = new ecs.FargateService(...); +declare const cloudMapService: cloudmap.Service; +declare const ecsService: ecs.FargateService; ecsService.associateCloudMapService({ service: cloudMapService, @@ -827,18 +882,20 @@ cluster. This will add both `FARGATE` and `FARGATE_SPOT` as available capacity providers on your cluster. ```ts -const cluster = new ecs.Cluster(stack, 'FargateCPCluster', { +declare const vpc: ec2.Vpc; + +const cluster = new ecs.Cluster(this, 'FargateCPCluster', { vpc, enableFargateCapacityProviders: true, }); -const taskDefinition = new ecs.FargateTaskDefinition(stack, 'TaskDef'); +const taskDefinition = new ecs.FargateTaskDefinition(this, 'TaskDef'); taskDefinition.addContainer('web', { image: ecs.ContainerImage.fromRegistry('amazon/amazon-ecs-sample'), }); -new ecs.FargateService(stack, 'FargateService', { +new ecs.FargateService(this, 'FargateService', { cluster, taskDefinition, capacityProviderStrategies: [ @@ -849,7 +906,7 @@ new ecs.FargateService(stack, 'FargateService', { { capacityProvider: 'FARGATE', weight: 1, - } + }, ], }); ``` @@ -869,11 +926,13 @@ running on them. If you want to disable this behavior, set both `enableManagedScaling` to and `enableManagedTerminationProtection` to `false`. ```ts -const cluster = new ecs.Cluster(stack, 'Cluster', { +declare const vpc: ec2.Vpc; + +const cluster = new ecs.Cluster(this, 'Cluster', { vpc, }); -const autoScalingGroup = new autoscaling.AutoScalingGroup(stack, 'ASG', { +const autoScalingGroup = new autoscaling.AutoScalingGroup(this, 'ASG', { vpc, instanceType: new ec2.InstanceType('t2.micro'), machineImage: ecs.EcsOptimizedImage.amazonLinux2(), @@ -881,26 +940,26 @@ const autoScalingGroup = new autoscaling.AutoScalingGroup(stack, 'ASG', { maxCapacity: 100, }); -const capacityProvider = new ecs.AsgCapacityProvider(stack, 'AsgCapacityProvider', { +const capacityProvider = new ecs.AsgCapacityProvider(this, 'AsgCapacityProvider', { autoScalingGroup, }); cluster.addAsgCapacityProvider(capacityProvider); -const taskDefinition = new ecs.Ec2TaskDefinition(stack, 'TaskDef'); +const taskDefinition = new ecs.Ec2TaskDefinition(this, 'TaskDef'); taskDefinition.addContainer('web', { image: ecs.ContainerImage.fromRegistry('amazon/amazon-ecs-sample'), memoryReservationMiB: 256, }); -new ecs.Ec2Service(stack, 'EC2Service', { +new ecs.Ec2Service(this, 'EC2Service', { cluster, taskDefinition, capacityProviderStrategies: [ { capacityProvider: capacityProvider.capacityProviderName, weight: 1, - } + }, ], }); ``` @@ -919,7 +978,7 @@ const inferenceAccelerators = [{ deviceType: 'eia2.medium', }]; -const taskDefinition = new ecs.Ec2TaskDefinition(stack, 'Ec2TaskDef', { +const taskDefinition = new ecs.Ec2TaskDefinition(this, 'Ec2TaskDef', { inferenceAccelerators, }); ``` @@ -929,6 +988,7 @@ field and set it to a list of device names used for the inference accelerators. list should match a `DeviceName` for an `InferenceAccelerator` specified in the task definition. ```ts +declare const taskDefinition: ecs.TaskDefinition; const inferenceAcceleratorResources = ['device1']; taskDefinition.addContainer('cont', { @@ -948,7 +1008,10 @@ To enable the ECS Exec feature for your containers, set the boolean flag `enable your `Ec2Service` or `FargateService`. ```ts -const service = new ecs.Ec2Service(stack, 'Service', { +declare const cluster: ecs.Cluster; +declare const taskDefinition: ecs.TaskDefinition; + +const service = new ecs.Ec2Service(this, 'Service', { cluster, taskDefinition, enableExecuteCommand: true, @@ -967,19 +1030,20 @@ of the `executeCommandConfiguration`. To use this key for encrypting CloudWatch to these resources on creation. ```ts -const kmsKey = new kms.Key(stack, 'KmsKey'); +declare const vpc: ec2.Vpc; +const kmsKey = new kms.Key(this, 'KmsKey'); // Pass the KMS key in the `encryptionKey` field to associate the key to the log group -const logGroup = new logs.LogGroup(stack, 'LogGroup', { +const logGroup = new logs.LogGroup(this, 'LogGroup', { encryptionKey: kmsKey, }); // Pass the KMS key in the `encryptionKey` field to associate the key to the S3 bucket -const execBucket = new s3.Bucket(stack, 'EcsExecBucket', { +const execBucket = new s3.Bucket(this, 'EcsExecBucket', { encryptionKey: kmsKey, }); -const cluster = new ecs.Cluster(stack, 'Cluster', { +const cluster = new ecs.Cluster(this, 'Cluster', { vpc, executeCommandConfiguration: { kmsKey, diff --git a/packages/@aws-cdk/aws-ecs/lib/base/base-service.ts b/packages/@aws-cdk/aws-ecs/lib/base/base-service.ts index ce5097e9ebccf..6bac2c663b82d 100644 --- a/packages/@aws-cdk/aws-ecs/lib/base/base-service.ts +++ b/packages/@aws-cdk/aws-ecs/lib/base/base-service.ts @@ -570,6 +570,8 @@ export abstract class BaseService extends Resource * * @example * + * declare const listener: elbv2.ApplicationListener; + * declare const service: ecs.BaseService; * listener.addTargets('ECS', { * port: 80, * targets: [service.loadBalancerTarget({ @@ -605,6 +607,8 @@ export abstract class BaseService extends Resource * * @example * + * declare const listener: elbv2.ApplicationListener; + * declare const service: ecs.BaseService; * service.registerLoadBalancerTargets( * { * containerName: 'web', diff --git a/packages/@aws-cdk/aws-ecs/lib/cluster.ts b/packages/@aws-cdk/aws-ecs/lib/cluster.ts index 49d99bf68925d..cc97f4aadfafe 100644 --- a/packages/@aws-cdk/aws-ecs/lib/cluster.ts +++ b/packages/@aws-cdk/aws-ecs/lib/cluster.ts @@ -821,11 +821,9 @@ export interface AddCapacityOptions extends AddAutoScalingGroupCapacityOptions, * To use an image that does not update on every deployment, pass: * * ```ts - * { - * machineImage: EcsOptimizedImage.amazonLinux2(AmiHardwareType.STANDARD, { - * cachedInContext: true, - * }), - * } + * const machineImage = ecs.EcsOptimizedImage.amazonLinux2(ecs.AmiHardwareType.STANDARD, { + * cachedInContext: true, + * }); * ``` * * For more information, see [Amazon ECS-optimized diff --git a/packages/@aws-cdk/aws-ecs/lib/index.ts b/packages/@aws-cdk/aws-ecs/lib/index.ts index bd076ccfd05f7..09d355dd18b38 100644 --- a/packages/@aws-cdk/aws-ecs/lib/index.ts +++ b/packages/@aws-cdk/aws-ecs/lib/index.ts @@ -36,6 +36,7 @@ export * from './log-drivers/json-file-log-driver'; export * from './log-drivers/splunk-log-driver'; export * from './log-drivers/syslog-log-driver'; export * from './log-drivers/log-driver'; +export * from './log-drivers/generic-log-driver'; export * from './log-drivers/log-drivers'; export * from './proxy-configuration/app-mesh-proxy-configuration'; diff --git a/packages/@aws-cdk/aws-ecs/lib/log-drivers/generic-log-driver.ts b/packages/@aws-cdk/aws-ecs/lib/log-drivers/generic-log-driver.ts index 9e356a8cd3c19..8181b7d689d44 100644 --- a/packages/@aws-cdk/aws-ecs/lib/log-drivers/generic-log-driver.ts +++ b/packages/@aws-cdk/aws-ecs/lib/log-drivers/generic-log-driver.ts @@ -1,5 +1,5 @@ import { ContainerDefinition, Secret } from '../container-definition'; -import { LogDriver, LogDriverConfig } from '../index'; +import { LogDriver, LogDriverConfig } from './log-driver'; import { removeEmpty, renderLogDriverSecretOptions } from './utils'; // v2 - keep this import as a separate section to reduce merge conflict when forward merging with the v2 branch. diff --git a/packages/@aws-cdk/aws-ecs/rosetta/default.ts-fixture b/packages/@aws-cdk/aws-ecs/rosetta/default.ts-fixture new file mode 100644 index 0000000000000..2cc599faf6c3f --- /dev/null +++ b/packages/@aws-cdk/aws-ecs/rosetta/default.ts-fixture @@ -0,0 +1,24 @@ +// Fixture with packages imported, but nothing else +import { Construct, SecretValue, Stack } from '@aws-cdk/core'; +import autoscaling = require('@aws-cdk/aws-autoscaling'); +import cloudmap = require('@aws-cdk/aws-servicediscovery'); +import ecs = require('@aws-cdk/aws-ecs'); +import ec2 = require('@aws-cdk/aws-ec2'); +import elb = require('@aws-cdk/aws-elasticloadbalancing'); +import elbv2 = require('@aws-cdk/aws-elasticloadbalancingv2'); +import events = require('@aws-cdk/aws-events'); +import kms = require('@aws-cdk/aws-kms'); +import logs = require('@aws-cdk/aws-logs'); +import s3 = require('@aws-cdk/aws-s3'); +import secretsmanager = require('@aws-cdk/aws-secretsmanager'); +import ssm = require('@aws-cdk/aws-ssm'); +import targets = require('@aws-cdk/aws-events-targets'); +import path = require('path'); + +class Fixture extends Stack { + constructor(scope: Construct, id: string) { + super(scope, id); + + /// here + } +}