-
Notifications
You must be signed in to change notification settings - Fork 4k
/
cluster.ts
1345 lines (1184 loc) · 49.7 KB
/
cluster.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
import { Construct } from 'constructs';
import { IAuroraClusterInstance, IClusterInstance, InstanceType } from './aurora-cluster-instance';
import { IClusterEngine } from './cluster-engine';
import { DatabaseClusterAttributes, IDatabaseCluster } from './cluster-ref';
import { Endpoint } from './endpoint';
import { NetworkType } from './instance';
import { IParameterGroup, ParameterGroup } from './parameter-group';
import { applyDefaultRotationOptions, defaultDeletionProtection, renderCredentials, setupS3ImportExport, helperRemovalPolicy, renderUnless, renderSnapshotCredentials } from './private/util';
import { BackupProps, Credentials, InstanceProps, PerformanceInsightRetention, RotationSingleUserOptions, RotationMultiUserOptions, SnapshotCredentials } from './props';
import { DatabaseProxy, DatabaseProxyOptions, ProxyTarget } from './proxy';
import { CfnDBCluster, CfnDBClusterProps, CfnDBInstance } from './rds.generated';
import { ISubnetGroup, SubnetGroup } from './subnet-group';
import * as cloudwatch from '../../aws-cloudwatch';
import * as ec2 from '../../aws-ec2';
import { IRole, ManagedPolicy, Role, ServicePrincipal } from '../../aws-iam';
import * as kms from '../../aws-kms';
import * as logs from '../../aws-logs';
import * as s3 from '../../aws-s3';
import * as secretsmanager from '../../aws-secretsmanager';
import { Annotations, Duration, FeatureFlags, Lazy, RemovalPolicy, Resource, Token } from '../../core';
import * as cxapi from '../../cx-api';
/**
* Common properties for a new database cluster or cluster from snapshot.
*/
interface DatabaseClusterBaseProps {
/**
* What kind of database to start
*/
readonly engine: IClusterEngine;
/**
* How many replicas/instances to create
*
* Has to be at least 1.
*
* @default 2
* @deprecated - use writer and readers instead
*/
readonly instances?: number;
/**
* Settings for the individual instances that are launched
*
* @deprecated - use writer and readers instead
*/
readonly instanceProps?: InstanceProps;
/**
* The instance to use for the cluster writer
*
* @default required if instanceProps is not provided
*/
readonly writer?: IClusterInstance;
/**
* A list of instances to create as cluster reader instances
*
* @default - no readers are created. The cluster will have a single writer/reader
*/
readonly readers?: IClusterInstance[];
/**
* The maximum number of Aurora capacity units (ACUs) for a DB instance in an Aurora Serverless v2 cluster.
* You can specify ACU values in half-step increments, such as 40, 40.5, 41, and so on.
* The largest value that you can use is 128 (256GB).
*
* The maximum capacity must be higher than 0.5 ACUs.
* @see https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless-v2.setting-capacity.html#aurora-serverless-v2.max_capacity_considerations
*
* @default 2
*/
readonly serverlessV2MaxCapacity?: number,
/**
* The minimum number of Aurora capacity units (ACUs) for a DB instance in an Aurora Serverless v2 cluster.
* You can specify ACU values in half-step increments, such as 8, 8.5, 9, and so on.
* The smallest value that you can use is 0.5.
*
* @see https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless-v2.setting-capacity.html#aurora-serverless-v2.max_capacity_considerations
*
* @default 0.5
*/
readonly serverlessV2MinCapacity?: number;
/**
* What subnets to run the RDS instances in.
*
* Must be at least 2 subnets in two different AZs.
*/
readonly vpc?: ec2.IVpc;
/**
* Where to place the instances within the VPC
*
* @default - the Vpc default strategy if not specified.
*/
readonly vpcSubnets?: ec2.SubnetSelection;
/**
* Security group.
*
* @default a new security group is created.
*/
readonly securityGroups?: ec2.ISecurityGroup[];
/**
* The ordering of updates for instances
*
* @default InstanceUpdateBehaviour.BULK
*/
readonly instanceUpdateBehaviour?: InstanceUpdateBehaviour;
/**
* The number of seconds to set a cluster's target backtrack window to.
* This feature is only supported by the Aurora MySQL database engine and
* cannot be enabled on existing clusters.
*
* @see https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraMySQL.Managing.Backtrack.html
* @default 0 seconds (no backtrack)
*/
readonly backtrackWindow?: Duration
/**
* Backup settings
*
* @default - Backup retention period for automated backups is 1 day.
* Backup preferred window is set to a 30-minute window selected at random from an
* 8-hour block of time for each AWS Region, occurring on a random day of the week.
* @see https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_WorkingWithAutomatedBackups.html#USER_WorkingWithAutomatedBackups.BackupWindow
*/
readonly backup?: BackupProps;
/**
* What port to listen on
*
* @default - The default for the engine is used.
*/
readonly port?: number;
/**
* An optional identifier for the cluster
*
* @default - A name is automatically generated.
*/
readonly clusterIdentifier?: string;
/**
* Base identifier for instances
*
* Every replica is named by appending the replica number to this string, 1-based.
*
* @default - clusterIdentifier is used with the word "Instance" appended.
* If clusterIdentifier is not provided, the identifier is automatically generated.
*/
readonly instanceIdentifierBase?: string;
/**
* Name of a database which is automatically created inside the cluster
*
* @default - Database is not created in cluster.
*/
readonly defaultDatabaseName?: string;
/**
* Indicates whether the DB cluster should have deletion protection enabled.
*
* @default - true if `removalPolicy` is RETAIN, `undefined` otherwise, which will not enable deletion protection.
* To disable deletion protection after it has been enabled, you must explicitly set this value to `false`.
*/
readonly deletionProtection?: boolean;
/**
* A preferred maintenance window day/time range. Should be specified as a range ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC).
*
* Example: 'Sun:23:45-Mon:00:15'
*
* @default - 30-minute window selected at random from an 8-hour block of time for
* each AWS Region, occurring on a random day of the week.
* @see https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_UpgradeDBInstance.Maintenance.html#Concepts.DBMaintenance
*/
readonly preferredMaintenanceWindow?: string;
/**
* Additional parameters to pass to the database engine
*
* @default - No parameter group.
*/
readonly parameterGroup?: IParameterGroup;
/**
* The parameters in the DBClusterParameterGroup to create automatically
*
* You can only specify parameterGroup or parameters but not both.
* You need to use a versioned engine to auto-generate a DBClusterParameterGroup.
*
* @default - None
*/
readonly parameters?: { [key: string]: string };
/**
* The removal policy to apply when the cluster and its instances are removed
* from the stack or replaced during an update.
*
* @default - RemovalPolicy.SNAPSHOT (remove the cluster and instances, but retain a snapshot of the data)
*/
readonly removalPolicy?: RemovalPolicy;
/**
* The list of log types that need to be enabled for exporting to
* CloudWatch Logs.
*
* @default - no log exports
*/
readonly cloudwatchLogsExports?: string[];
/**
* The number of days log events are kept in CloudWatch Logs. When updating
* this property, unsetting it doesn't remove the log retention policy. To
* remove the retention policy, set the value to `Infinity`.
*
* @default - logs never expire
*/
readonly cloudwatchLogsRetention?: logs.RetentionDays;
/**
* The IAM role for the Lambda function associated with the custom resource
* that sets the retention policy.
*
* @default - a new role is created.
*/
readonly cloudwatchLogsRetentionRole?: IRole;
/**
* The interval, in seconds, between points when Amazon RDS collects enhanced
* monitoring metrics for the DB instances.
*
* @default no enhanced monitoring
*/
readonly monitoringInterval?: Duration;
/**
* Role that will be used to manage DB instances monitoring.
*
* @default - A role is automatically created for you
*/
readonly monitoringRole?: IRole;
/**
* Role that will be associated with this DB cluster to enable S3 import.
* This feature is only supported by the Aurora database engine.
*
* This property must not be used if `s3ImportBuckets` is used.
*
* For MySQL:
* @see https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraMySQL.Integrating.LoadFromS3.html
*
* For PostgreSQL:
* @see https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraPostgreSQL.Migrating.html
*
* @default - New role is created if `s3ImportBuckets` is set, no role is defined otherwise
*/
readonly s3ImportRole?: IRole;
/**
* S3 buckets that you want to load data from. This feature is only supported by the Aurora database engine.
*
* This property must not be used if `s3ImportRole` is used.
*
* For MySQL:
* @see https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraMySQL.Integrating.LoadFromS3.html
*
* For PostgreSQL:
* @see https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraPostgreSQL.Migrating.html
*
* @default - None
*/
readonly s3ImportBuckets?: s3.IBucket[];
/**
* Role that will be associated with this DB cluster to enable S3 export.
* This feature is only supported by the Aurora database engine.
*
* This property must not be used if `s3ExportBuckets` is used.
*
* For MySQL:
* @see https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraMySQL.Integrating.SaveIntoS3.html
*
* For PostgreSQL:
* @see https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/postgresql-s3-export.html
*
* @default - New role is created if `s3ExportBuckets` is set, no role is defined otherwise
*/
readonly s3ExportRole?: IRole;
/**
* S3 buckets that you want to load data into. This feature is only supported by the Aurora database engine.
*
* This property must not be used if `s3ExportRole` is used.
*
* For MySQL:
* @see https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraMySQL.Integrating.SaveIntoS3.html
*
* For PostgreSQL:
* @see https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/postgresql-s3-export.html
*
* @default - None
*/
readonly s3ExportBuckets?: s3.IBucket[];
/**
* Existing subnet group for the cluster.
*
* @default - a new subnet group will be created.
*/
readonly subnetGroup?: ISubnetGroup;
/**
* Whether to enable mapping of AWS Identity and Access Management (IAM) accounts
* to database accounts.
*
* @default false
*/
readonly iamAuthentication?: boolean;
/**
* Whether to enable storage encryption.
*
* @default - true if storageEncryptionKey is provided, false otherwise
*/
readonly storageEncrypted?: boolean
/**
* The KMS key for storage encryption.
* If specified, `storageEncrypted` will be set to `true`.
*
* @default - if storageEncrypted is true then the default master key, no key otherwise
*/
readonly storageEncryptionKey?: kms.IKey;
/**
* The storage type to be associated with the DB cluster.
*
* @default - DBClusterStorageType.AURORA_IOPT1
*/
readonly storageType?: DBClusterStorageType;
/**
* Whether to copy tags to the snapshot when a snapshot is created.
*
* @default - true
*/
readonly copyTagsToSnapshot?: boolean;
/**
* The network type of the DB instance.
*
* @default - IPV4
*/
readonly networkType?: NetworkType;
}
/**
* The storage type to be associated with the DB cluster.
*/
export enum DBClusterStorageType {
/**
* Storage type for Aurora DB standard clusters.
*/
AURORA = 'aurora',
/**
* Storage type for Aurora DB I/O-Optimized clusters.
*/
AURORA_IOPT1 = 'aurora-iopt1',
}
/**
* The orchestration of updates of multiple instances
*/
export enum InstanceUpdateBehaviour {
/**
* In a bulk update, all instances of the cluster are updated at the same time.
* This results in a faster update procedure.
* During the update, however, all instances might be unavailable at the same time and thus a downtime might occur.
*/
BULK = 'BULK',
/**
* In a rolling update, one instance after another is updated.
* This results in at most one instance being unavailable during the update.
* If your cluster consists of more than 1 instance, the downtime periods are limited to the time a primary switch needs.
*/
ROLLING = 'ROLLING'
}
/**
* A new or imported clustered database.
*/
export abstract class DatabaseClusterBase extends Resource implements IDatabaseCluster {
// only required because of JSII bug: https://github.com/aws/jsii/issues/2040
public abstract readonly engine?: IClusterEngine;
/**
* Identifier of the cluster
*/
public abstract readonly clusterIdentifier: string;
/**
* The immutable identifier for the cluster; for example: cluster-ABCD1234EFGH5678IJKL90MNOP.
*
* This AWS Region-unique identifier is used in things like IAM authentication policies.
*/
public abstract readonly clusterResourceIdentifier: string;
/**
* Identifiers of the replicas
*/
public abstract readonly instanceIdentifiers: string[];
/**
* The endpoint to use for read/write operations
*/
public abstract readonly clusterEndpoint: Endpoint;
/**
* Endpoint to use for load-balanced read-only operations.
*/
public abstract readonly clusterReadEndpoint: Endpoint;
/**
* Endpoints which address each individual replica.
*/
public abstract readonly instanceEndpoints: Endpoint[];
/**
* Access to the network connections
*/
public abstract readonly connections: ec2.Connections;
/**
* Add a new db proxy to this cluster.
*/
public addProxy(id: string, options: DatabaseProxyOptions): DatabaseProxy {
return new DatabaseProxy(this, id, {
proxyTarget: ProxyTarget.fromCluster(this),
...options,
});
}
/**
* Renders the secret attachment target specifications.
*/
public asSecretAttachmentTarget(): secretsmanager.SecretAttachmentTargetProps {
return {
targetId: this.clusterIdentifier,
targetType: secretsmanager.AttachmentTargetType.RDS_DB_CLUSTER,
};
}
}
/**
* Abstract base for ``DatabaseCluster`` and ``DatabaseClusterFromSnapshot``
*/
abstract class DatabaseClusterNew extends DatabaseClusterBase {
/**
* The engine for this Cluster.
* Never undefined.
*/
public readonly engine?: IClusterEngine;
protected readonly newCfnProps: CfnDBClusterProps;
protected readonly securityGroups: ec2.ISecurityGroup[];
protected readonly subnetGroup: ISubnetGroup;
/**
* Secret in SecretsManager to store the database cluster user credentials.
*/
public abstract readonly secret?: secretsmanager.ISecret;
/**
* The VPC network to place the cluster in.
*/
public readonly vpc: ec2.IVpc;
/**
* The cluster's subnets.
*/
public readonly vpcSubnets?: ec2.SubnetSelection;
/**
* Application for single user rotation of the master password to this cluster.
*/
public readonly singleUserRotationApplication: secretsmanager.SecretRotationApplication;
/**
* Application for multi user rotation to this cluster.
*/
public readonly multiUserRotationApplication: secretsmanager.SecretRotationApplication;
protected readonly serverlessV2MinCapacity: number;
protected readonly serverlessV2MaxCapacity: number;
protected hasServerlessInstance?: boolean;
constructor(scope: Construct, id: string, props: DatabaseClusterBaseProps) {
super(scope, id);
if ((props.vpc && props.instanceProps?.vpc)) {
throw new Error('Provide either vpc or instanceProps.vpc, but not both');
} else if (!props.vpc && !props.instanceProps?.vpc) {
throw new Error('If instanceProps is not provided then `vpc` must be provided.');
}
if ((props.vpcSubnets && props.instanceProps?.vpcSubnets)) {
throw new Error('Provide either vpcSubnets or instanceProps.vpcSubnets, but not both');
}
this.vpc = props.instanceProps?.vpc ?? props.vpc!;
this.vpcSubnets = props.instanceProps?.vpcSubnets ?? props.vpcSubnets;
this.singleUserRotationApplication = props.engine.singleUserRotationApplication;
this.multiUserRotationApplication = props.engine.multiUserRotationApplication;
this.serverlessV2MaxCapacity = props.serverlessV2MaxCapacity ?? 2;
this.serverlessV2MinCapacity = props.serverlessV2MinCapacity ?? 0.5;
this.validateServerlessScalingConfig();
const { subnetIds } = this.vpc.selectSubnets(this.vpcSubnets);
// Cannot test whether the subnets are in different AZs, but at least we can test the amount.
if (subnetIds.length < 2) {
Annotations.of(this).addError(`Cluster requires at least 2 subnets, got ${subnetIds.length}`);
}
this.subnetGroup = props.subnetGroup ?? new SubnetGroup(this, 'Subnets', {
description: `Subnets for ${id} database`,
vpc: this.vpc,
vpcSubnets: this.vpcSubnets,
removalPolicy: renderUnless(helperRemovalPolicy(props.removalPolicy), RemovalPolicy.DESTROY),
});
this.securityGroups = props.instanceProps?.securityGroups ?? props.securityGroups ?? [
new ec2.SecurityGroup(this, 'SecurityGroup', {
description: 'RDS security group',
vpc: this.vpc,
}),
];
const combineRoles = props.engine.combineImportAndExportRoles ?? false;
let { s3ImportRole, s3ExportRole } = setupS3ImportExport(this, props, combineRoles);
if (props.parameterGroup && props.parameters) {
throw new Error('You cannot specify both parameterGroup and parameters');
}
const parameterGroup = props.parameterGroup ?? (
props.parameters
? new ParameterGroup(this, 'ParameterGroup', {
engine: props.engine,
parameters: props.parameters,
})
: undefined
);
// bind the engine to the Cluster
const clusterEngineBindConfig = props.engine.bindToCluster(this, {
s3ImportRole,
s3ExportRole,
parameterGroup,
});
const clusterAssociatedRoles: CfnDBCluster.DBClusterRoleProperty[] = [];
if (s3ImportRole) {
clusterAssociatedRoles.push({ roleArn: s3ImportRole.roleArn, featureName: clusterEngineBindConfig.features?.s3Import });
}
if (s3ExportRole &&
// only add the second associated Role if it's different than the first
// (duplicates in the associated Roles array are not allowed by the RDS service)
(s3ExportRole !== s3ImportRole ||
clusterEngineBindConfig.features?.s3Import !== clusterEngineBindConfig.features?.s3Export)) {
clusterAssociatedRoles.push({ roleArn: s3ExportRole.roleArn, featureName: clusterEngineBindConfig.features?.s3Export });
}
const clusterParameterGroup = props.parameterGroup ?? clusterEngineBindConfig.parameterGroup;
const clusterParameterGroupConfig = clusterParameterGroup?.bindToCluster({});
this.engine = props.engine;
const clusterIdentifier = FeatureFlags.of(this).isEnabled(cxapi.RDS_LOWERCASE_DB_IDENTIFIER) && !Token.isUnresolved(props.clusterIdentifier)
? props.clusterIdentifier?.toLowerCase()
: props.clusterIdentifier;
this.newCfnProps = {
// Basic
engine: props.engine.engineType,
engineVersion: props.engine.engineVersion?.fullVersion,
dbClusterIdentifier: clusterIdentifier,
dbSubnetGroupName: this.subnetGroup.subnetGroupName,
vpcSecurityGroupIds: this.securityGroups.map(sg => sg.securityGroupId),
port: props.port ?? clusterEngineBindConfig.port,
dbClusterParameterGroupName: clusterParameterGroupConfig?.parameterGroupName,
associatedRoles: clusterAssociatedRoles.length > 0 ? clusterAssociatedRoles : undefined,
deletionProtection: defaultDeletionProtection(props.deletionProtection, props.removalPolicy),
enableIamDatabaseAuthentication: props.iamAuthentication,
networkType: props.networkType,
serverlessV2ScalingConfiguration: Lazy.any({
produce: () => {
if (this.hasServerlessInstance) {
return {
minCapacity: this.serverlessV2MinCapacity,
maxCapacity: this.serverlessV2MaxCapacity,
};
}
return undefined;
},
}),
storageType: props.storageType?.toString(),
// Admin
backtrackWindow: props.backtrackWindow?.toSeconds(),
backupRetentionPeriod: props.backup?.retention?.toDays(),
preferredBackupWindow: props.backup?.preferredWindow,
preferredMaintenanceWindow: props.preferredMaintenanceWindow,
databaseName: props.defaultDatabaseName,
enableCloudwatchLogsExports: props.cloudwatchLogsExports,
// Encryption
kmsKeyId: props.storageEncryptionKey?.keyArn,
storageEncrypted: props.storageEncryptionKey ? true : props.storageEncrypted,
// Tags
copyTagsToSnapshot: props.copyTagsToSnapshot ?? true,
};
}
/**
* Create cluster instances
*
* @internal
*/
protected _createInstances(cluster: DatabaseClusterNew, props: DatabaseClusterProps): InstanceConfig {
const instanceEndpoints: Endpoint[] = [];
const instanceIdentifiers: string[] = [];
const readers: IAuroraClusterInstance[] = [];
let monitoringRole = props.monitoringRole;
if (!props.monitoringRole && props.monitoringInterval && props.monitoringInterval.toSeconds()) {
monitoringRole = new Role(cluster, 'MonitoringRole', {
assumedBy: new ServicePrincipal('monitoring.rds.amazonaws.com'),
managedPolicies: [
ManagedPolicy.fromAwsManagedPolicyName('service-role/AmazonRDSEnhancedMonitoringRole'),
],
});
}
// need to create the writer first since writer is determined by what instance is first
const writer = props.writer!.bind(this, this, {
monitoringInterval: props.monitoringInterval,
monitoringRole: monitoringRole,
removalPolicy: props.removalPolicy ?? RemovalPolicy.SNAPSHOT,
subnetGroup: this.subnetGroup,
promotionTier: 0, // override the promotion tier so that writers are always 0
});
instanceIdentifiers.push(writer.instanceIdentifier);
(props.readers ?? []).forEach(instance => {
const clusterInstance = instance.bind(this, this, {
monitoringInterval: props.monitoringInterval,
monitoringRole: monitoringRole,
removalPolicy: props.removalPolicy ?? RemovalPolicy.SNAPSHOT,
subnetGroup: this.subnetGroup,
});
readers.push(clusterInstance);
if (clusterInstance.tier < 2) {
this.validateReaderInstance(writer, clusterInstance);
}
instanceEndpoints.push(new Endpoint(clusterInstance.dbInstanceEndpointAddress, this.clusterEndpoint.port));
instanceIdentifiers.push(clusterInstance.instanceIdentifier);
});
this.validateClusterInstances(writer, readers);
return {
instanceEndpoints,
instanceIdentifiers,
};
}
/**
* Perform validations on the cluster instances
*/
private validateClusterInstances(writer: IAuroraClusterInstance, readers: IAuroraClusterInstance[]): void {
if (writer.type === InstanceType.SERVERLESS_V2) {
this.hasServerlessInstance = true;
}
if (readers.length > 0) {
const sortedReaders = readers.sort((a, b) => a.tier - b.tier);
const highestTierReaders: IAuroraClusterInstance[] = [];
const highestTier = sortedReaders[0].tier;
let hasProvisionedReader = false;
let noFailoverTierInstances = true;
let serverlessInHighestTier = false;
let hasServerlessReader = false;
const someProvisionedReadersDontMatchWriter: IAuroraClusterInstance[] = [];
for (const reader of sortedReaders) {
if (reader.type === InstanceType.SERVERLESS_V2) {
hasServerlessReader = true;
this.hasServerlessInstance = true;
} else {
hasProvisionedReader = true;
if (reader.instanceSize !== writer.instanceSize) {
someProvisionedReadersDontMatchWriter.push(reader);
}
}
if (reader.tier === highestTier) {
if (reader.type === InstanceType.SERVERLESS_V2) {
serverlessInHighestTier = true;
}
highestTierReaders.push(reader);
}
if (reader.tier <= 1) {
noFailoverTierInstances = false;
}
}
const hasOnlyServerlessReaders = hasServerlessReader && !hasProvisionedReader;
if (hasOnlyServerlessReaders) {
if (noFailoverTierInstances) {
Annotations.of(this).addWarningV2(
'@aws-cdk/aws-rds:noFailoverServerlessReaders',
`Cluster ${this.node.id} only has serverless readers and no reader is in promotion tier 0-1.`+
'Serverless readers in promotion tiers >= 2 will NOT scale with the writer, which can lead to '+
'availability issues if a failover event occurs. It is recommended that at least one reader '+
'has `scaleWithWriter` set to true',
);
}
} else {
if (serverlessInHighestTier && highestTier > 1) {
Annotations.of(this).addWarningV2(
'@aws-cdk/aws-rds:serverlessInHighestTier2-15',
`There are serverlessV2 readers in tier ${highestTier}. Since there are no instances in a higher tier, `+
'any instance in this tier is a failover target. Since this tier is > 1 the serverless reader will not scale '+
'with the writer which could lead to availability issues during failover.',
);
}
if (someProvisionedReadersDontMatchWriter.length > 0 && writer.type === InstanceType.PROVISIONED) {
Annotations.of(this).addWarningV2(
'@aws-cdk/aws-rds:provisionedReadersDontMatchWriter',
`There are provisioned readers in the highest promotion tier ${highestTier} that do not have the same `+
'InstanceSize as the writer. Any of these instances could be chosen as the new writer in the event '+
'of a failover.\n'+
`Writer InstanceSize: ${writer.instanceSize}\n`+
`Reader InstanceSizes: ${someProvisionedReadersDontMatchWriter.map(reader => reader.instanceSize).join(', ')}`,
);
}
}
}
}
/**
* Perform validations on the reader instance
*/
private validateReaderInstance(writer: IAuroraClusterInstance, reader: IAuroraClusterInstance): void {
if (writer.type === InstanceType.PROVISIONED) {
if (reader.type === InstanceType.SERVERLESS_V2) {
if (!instanceSizeSupportedByServerlessV2(writer.instanceSize!, this.serverlessV2MaxCapacity)) {
Annotations.of(this).addWarningV2('@aws-cdk/aws-rds:serverlessInstanceCantScaleWithWriter',
'For high availability any serverless instances in promotion tiers 0-1 '+
'should be able to scale to match the provisioned instance capacity.\n'+
`Serverless instance ${reader.node.id} is in promotion tier ${reader.tier},\n`+
`But can not scale to match the provisioned writer instance (${writer.instanceSize})`,
);
}
}
}
}
/**
* As a cluster-level metric, it represents the average of the ServerlessDatabaseCapacity
* values of all the Aurora Serverless v2 DB instances in the cluster.
*/
public metricServerlessDatabaseCapacity(props?: cloudwatch.MetricOptions) {
return this.metric('ServerlessDatabaseCapacity', { statistic: 'Average', ...props });
}
/**
* This value is represented as a percentage. It's calculated as the value of the
* ServerlessDatabaseCapacity metric divided by the maximum ACU value of the DB cluster.
*
* If this metric approaches a value of 100.0, the DB instance has scaled up as high as it can.
* Consider increasing the maximum ACU setting for the cluster.
*/
public metricACUUtilization(props?: cloudwatch.MetricOptions) {
return this.metric('ACUUtilization', { statistic: 'Average', ...props });
}
private validateServerlessScalingConfig(): void {
if (this.serverlessV2MaxCapacity > 128 || this.serverlessV2MaxCapacity < 0.5) {
throw new Error('serverlessV2MaxCapacity must be >= 0.5 & <= 128');
}
if (this.serverlessV2MinCapacity > 128 || this.serverlessV2MinCapacity < 0.5) {
throw new Error('serverlessV2MinCapacity must be >= 0.5 & <= 128');
}
if (this.serverlessV2MaxCapacity < this.serverlessV2MinCapacity) {
throw new Error('serverlessV2MaxCapacity must be greater than serverlessV2MinCapacity');
}
if (this.serverlessV2MaxCapacity === 0.5 && this.serverlessV2MinCapacity === 0.5) {
throw new Error('If serverlessV2MinCapacity === 0.5 then serverlessV2MaxCapacity must be >=1');
}
const regexp = new RegExp(/^[0-9]+\.?5?$/);
if (!regexp.test(this.serverlessV2MaxCapacity.toString()) || !regexp.test(this.serverlessV2MinCapacity.toString())) {
throw new Error('serverlessV2MinCapacity & serverlessV2MaxCapacity must be in 0.5 step increments, received '+
`min: ${this.serverlessV2MaxCapacity}, max: ${this.serverlessV2MaxCapacity}`);
}
}
/**
* Adds the single user rotation of the master password to this cluster.
* See [Single user rotation strategy](https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotating-secrets_strategies.html#rotating-secrets-one-user-one-password)
*/
public addRotationSingleUser(options: RotationSingleUserOptions = {}): secretsmanager.SecretRotation {
if (!this.secret) {
throw new Error('Cannot add a single user rotation for a cluster without a secret.');
}
const id = 'RotationSingleUser';
const existing = this.node.tryFindChild(id);
if (existing) {
throw new Error('A single user rotation was already added to this cluster.');
}
return new secretsmanager.SecretRotation(this, id, {
...applyDefaultRotationOptions(options, this.vpcSubnets),
secret: this.secret,
application: this.singleUserRotationApplication,
vpc: this.vpc,
target: this,
});
}
/**
* Adds the multi user rotation to this cluster.
* See [Alternating users rotation strategy](https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotating-secrets_strategies.html#rotating-secrets-two-users)
*/
public addRotationMultiUser(id: string, options: RotationMultiUserOptions): secretsmanager.SecretRotation {
if (!this.secret) {
throw new Error('Cannot add a multi user rotation for a cluster without a secret.');
}
return new secretsmanager.SecretRotation(this, id, {
...applyDefaultRotationOptions(options, this.vpcSubnets),
secret: options.secret,
masterSecret: this.secret,
application: this.multiUserRotationApplication,
vpc: this.vpc,
target: this,
});
}
}
/**
* Represents an imported database cluster.
*/
class ImportedDatabaseCluster extends DatabaseClusterBase implements IDatabaseCluster {
public readonly clusterIdentifier: string;
public readonly connections: ec2.Connections;
public readonly engine?: IClusterEngine;
private readonly _clusterResourceIdentifier?: string;
private readonly _clusterEndpoint?: Endpoint;
private readonly _clusterReadEndpoint?: Endpoint;
private readonly _instanceIdentifiers?: string[];
private readonly _instanceEndpoints?: Endpoint[];
constructor(scope: Construct, id: string, attrs: DatabaseClusterAttributes) {
super(scope, id);
this.clusterIdentifier = attrs.clusterIdentifier;
this._clusterResourceIdentifier = attrs.clusterResourceIdentifier;
const defaultPort = attrs.port ? ec2.Port.tcp(attrs.port) : undefined;
this.connections = new ec2.Connections({
securityGroups: attrs.securityGroups,
defaultPort,
});
this.engine = attrs.engine;
this._clusterEndpoint = (attrs.clusterEndpointAddress && attrs.port) ? new Endpoint(attrs.clusterEndpointAddress, attrs.port) : undefined;
this._clusterReadEndpoint = (attrs.readerEndpointAddress && attrs.port) ? new Endpoint(attrs.readerEndpointAddress, attrs.port) : undefined;
this._instanceIdentifiers = attrs.instanceIdentifiers;
this._instanceEndpoints = (attrs.instanceEndpointAddresses && attrs.port)
? attrs.instanceEndpointAddresses.map(addr => new Endpoint(addr, attrs.port!))
: undefined;
}
public get clusterResourceIdentifier() {
if (!this._clusterResourceIdentifier) {
throw new Error('Cannot access `clusterResourceIdentifier` of an imported cluster without a clusterResourceIdentifier');
}
return this._clusterResourceIdentifier;
}
public get clusterEndpoint() {
if (!this._clusterEndpoint) {
throw new Error('Cannot access `clusterEndpoint` of an imported cluster without an endpoint address and port');
}
return this._clusterEndpoint;
}
public get clusterReadEndpoint() {
if (!this._clusterReadEndpoint) {
throw new Error('Cannot access `clusterReadEndpoint` of an imported cluster without a readerEndpointAddress and port');
}
return this._clusterReadEndpoint;
}
public get instanceIdentifiers() {
if (!this._instanceIdentifiers) {
throw new Error('Cannot access `instanceIdentifiers` of an imported cluster without provided instanceIdentifiers');
}
return this._instanceIdentifiers;
}
public get instanceEndpoints() {
if (!this._instanceEndpoints) {
throw new Error('Cannot access `instanceEndpoints` of an imported cluster without instanceEndpointAddresses and port');
}
return this._instanceEndpoints;
}
}
/**
* Properties for a new database cluster
*/
export interface DatabaseClusterProps extends DatabaseClusterBaseProps {
/**
* Credentials for the administrative user
*
* @default - A username of 'admin' (or 'postgres' for PostgreSQL) and SecretsManager-generated password
*/
readonly credentials?: Credentials;
}
/**
* Create a clustered database with a given number of instances.
*
* @resource AWS::RDS::DBCluster
*/
export class DatabaseCluster extends DatabaseClusterNew {
/**
* Import an existing DatabaseCluster from properties
*/
public static fromDatabaseClusterAttributes(scope: Construct, id: string, attrs: DatabaseClusterAttributes): IDatabaseCluster {
return new ImportedDatabaseCluster(scope, id, attrs);
}
public readonly clusterIdentifier: string;
public readonly clusterResourceIdentifier: string;
public readonly clusterEndpoint: Endpoint;
public readonly clusterReadEndpoint: Endpoint;
public readonly connections: ec2.Connections;
public readonly instanceIdentifiers: string[];
public readonly instanceEndpoints: Endpoint[];
/**
* The secret attached to this cluster
*/
public readonly secret?: secretsmanager.ISecret;
constructor(scope: Construct, id: string, props: DatabaseClusterProps) {
super(scope, id, props);
const credentials = renderCredentials(this, props.engine, props.credentials);
const secret = credentials.secret;
const cluster = new CfnDBCluster(this, 'Resource', {
...this.newCfnProps,
// Admin
masterUsername: credentials.username,
masterUserPassword: credentials.password?.unsafeUnwrap(),
});
this.clusterIdentifier = cluster.ref;
this.clusterResourceIdentifier = cluster.attrDbClusterResourceId;
if (secret) {
this.secret = secret.attach(this);
}
// create a number token that represents the port of the cluster
const portAttribute = Token.asNumber(cluster.attrEndpointPort);
this.clusterEndpoint = new Endpoint(cluster.attrEndpointAddress, portAttribute);
this.clusterReadEndpoint = new Endpoint(cluster.attrReadEndpointAddress, portAttribute);
this.connections = new ec2.Connections({
securityGroups: this.securityGroups,
defaultPort: ec2.Port.tcp(this.clusterEndpoint.port),
});
cluster.applyRemovalPolicy(props.removalPolicy ?? RemovalPolicy.SNAPSHOT);
setLogRetention(this, props);
if ((props.writer || props.readers) && (props.instances || props.instanceProps)) {
throw new Error('Cannot provide writer or readers if instances or instanceProps are provided');