diff --git a/CHANGELOG.md b/CHANGELOG.md
index 5bcc00d5f..c63e99922 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -3,6 +3,15 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/#semantic-versioning-200).
+## [3.0.0] - TBD
+
+### :bug: Fixed
+
+### :crab: Changed
+- Breaking Change: Remove suggested ClusterId functionality. For applications that use a single cluster database **no changes are required**. For application that access multiple database clusters, all connection string **should be** reviewed and a mandatory `clusterId` parameter **should be added**. ([PR #1570](https://github.com/aws/aws-advanced-jdbc-wrapper/pull/1570)).
+
+### :magic_wand: Added
+
## [2.6.5] - 2025-10-16
### :magic_wand: Added
diff --git a/docs/using-the-jdbc-driver/UsingTheJdbcDriver.md b/docs/using-the-jdbc-driver/UsingTheJdbcDriver.md
index 26d0161e0..46bab7c64 100644
--- a/docs/using-the-jdbc-driver/UsingTheJdbcDriver.md
+++ b/docs/using-the-jdbc-driver/UsingTheJdbcDriver.md
@@ -5,9 +5,9 @@ The JDBC Wrapper also supports [connection pooling](./DataSource.md#Using-the-Aw
## Using the AWS JDBC Driver with plain RDS databases
It is possible to use the AWS JDBC Driver with plain RDS databases, but individual features may or may not be compatible. For example, failover handling and enhanced failure monitoring are not compatible with plain RDS databases and the relevant plugins must be disabled. Plugins can be enabled or disabled as seen in the [Connection Plugin Manager Parameters](#connection-plugin-manager-parameters) section. Please note that some plugins have been enabled by default. Plugin compatibility can be verified in the [plugins table](#list-of-available-plugins).
-## Using the AWS JDBC Driver with custom endpoints and other non-standard URLs
+## Using the AWS JDBC Driver to access multiple database clusters
> [!WARNING]\
-> If connecting using a non-standard RDS URL (e.g. a custom endpoint, ip address, rds proxy, or custom domain URL), the clusterId property must be set. If the `clusterId` is omitted when using a non-standard RDS URL, you may experience various issues. For more information, please see the [AWS Advanced JDBC Driver Parameters](https://github.com/aws/aws-advanced-jdbc-wrapper/blob/main/docs/using-the-jdbc-driver/UsingTheJdbcDriver.md#aws-advanced-jdbc-driver-parameters) section.
+> If connecting to multiple database clusters within a single application, each connection string must set `clusterId` property. If the `clusterId` is omitted, you may experience various issues. For more information, please see the [AWS Advanced JDBC Driver Parameters](https://github.com/aws/aws-advanced-jdbc-wrapper/blob/main/docs/using-the-jdbc-driver/UsingTheJdbcDriver.md#aws-advanced-jdbc-driver-parameters) section.
## Wrapper Protocol
The AWS JDBC Driver uses the protocol prefix `jdbc:aws-wrapper:`. Internally, the JDBC Wrapper will replace this protocol prefix with `jdbc:`, making the final protocol `jdbc:aws-wrapper:{suffix}` where `suffix` is specific to the desired underlying protocol. For example, to connect to a PostgreSQL database, you would use the protocol `jdbc:aws-wrapper:postgresql:`, and inside the AWS JDBC Driver, the final protocol that will be used to connect to a database will be `jdbc:postgresql:`.
@@ -78,7 +78,7 @@ These parameters are applicable to any instance of the AWS JDBC Driver.
| Parameter | Value | Required | Description | Default Value |
|---------------------------------------------------|-----------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------|
-| `clusterId` | `String` | If connecting using a non-standard RDS URL (e.g. an IP address, custom endpoint, rds proxy, or custom domain URL): Yes
Otherwise: No
:warning:If `clusterId` is omitted when using a non-standard RDS URL, you may experience various issues. | A unique identifier for the cluster. Connections with the same cluster id share a cluster topology cache. | None |
+| `clusterId` | `String` | If connecting to multiple database clusters within a single application: Yes
Otherwise: No
:warning:If `clusterId` is omitted, you may experience various issues. | A unique identifier for the cluster. Connections with the same cluster id share a cluster topology cache. This parameter is optional and defaults to `1`. When supporting multiple database clusters, this parameter becomes mandatory. Each connection string must include the `clusterId` parameter with a value that can be any number or string. However, all connection strings associated with the same database cluster must use identical `clusterId` values, while connection strings belonging to different database clusters must specify distinct values. Examples of value: `1`, `2`, `1234`, `abc-1`, `abc-2`. | `1` |
| `wrapperLoggerLevel` | `String` | No | Logger level of the AWS JDBC Driver.
If it is used, it must be one of the following values: `OFF`, `SEVERE`, `WARNING`, `INFO`, `CONFIG`, `FINE`, `FINER`, `FINEST`, `ALL`. | `null` |
| `database` | `String` | No | Database name. | `null` |
| `user` | `String` | No | Database username. | `null` |
diff --git a/docs/using-the-jdbc-driver/using-plugins/UsingTheFailover2Plugin.md b/docs/using-the-jdbc-driver/using-plugins/UsingTheFailover2Plugin.md
index 01c3db395..3f9a67daa 100644
--- a/docs/using-the-jdbc-driver/using-plugins/UsingTheFailover2Plugin.md
+++ b/docs/using-the-jdbc-driver/using-plugins/UsingTheFailover2Plugin.md
@@ -55,17 +55,17 @@ Verify plugin compatibility within your driver configuration using the [compatib
### Failover Plugin v2 Configuration Parameters
In addition to the parameters that you can configure for the underlying driver, you can pass the following parameters for the AWS JDBC Driver through the connection URL to specify additional failover behavior.
-| Parameter | Value | Required | Description | Default Value |
-|---------------------------------------|:--------:|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| `failoverMode` | String | No | Defines a mode for failover process. Failover process may prioritize nodes with different roles and connect to them. Possible values:
- `strict-writer` - Failover process follows writer node and connects to a new writer when it changes.
- `reader-or-writer` - During failover, the driver tries to connect to any available/accessible reader node. If no reader is available, the driver will connect to a writer node. This logic mimics the logic of the Aurora read-only cluster endpoint.
- `strict-reader` - During failover, the driver tries to connect to any available reader node. If no reader is available, the driver raises an error. Reader failover to a writer node will only be allowed for single-node clusters. This logic mimics the logic of the Aurora read-only cluster endpoint.
If this parameter is omitted, default value depends on connection url. For Aurora read-only cluster endpoint, it's set to `reader-or-writer`. Otherwise, it's `strict-writer`. | Default value depends on connection url. For Aurora read-only cluster endpoint, it's set to `reader-or-writer`. Otherwise, it's `strict-writer`. |
-| `clusterInstanceHostPattern` | String | If connecting using an IP address or custom domain URL: Yes
Otherwise: No | This parameter is not required unless connecting to an AWS RDS cluster via an IP address or custom domain URL. In those cases, this parameter specifies the cluster instance DNS pattern that will be used to build a complete instance endpoint. A "?" character in this pattern should be used as a placeholder for the DB instance identifiers of the instances in the cluster. See [here](#host-pattern) for more information.
Example: `?.my-domain.com`, `any-subdomain.?.my-domain.com:9999`
Use case Example: If your cluster instance endpoints follow this pattern:`instanceIdentifier1.customHost`, `instanceIdentifier2.customHost`, etc. and you want your initial connection to be to `customHost:1234`, then your connection string should look like this: `jdbc:aws-wrapper:mysql://customHost:1234/test?clusterInstanceHostPattern=?.customHost` | If the provided connection string is not an IP address or custom domain, the JDBC Driver will automatically acquire the cluster instance host pattern from the customer-provided connection string. |
-| `clusterTopologyRefreshRateMs` | Integer | No | Cluster topology refresh rate in milliseconds when a cluster is not in failover. It refers to the regular, slow monitoring rate explained above. | `30000` |
-| `failoverTimeoutMs` | Integer | No | Maximum allowed time in milliseconds to attempt reconnecting to a new writer or reader instance after a cluster failover is initiated. | `300000` |
-| `clusterTopologyHighRefreshRateMs` | Integer | No | Interval of time in milliseconds to wait between attempts to update cluster topology after the writer has come back online following a failover event. It corresponds to the increased monitoring rate described earlier. Usually, the topology monitoring component uses this increased monitoring rate for 30s after a new writer was detected. | `100` |
-| `failoverReaderHostSelectorStrategy` | String | No | Strategy used to select a reader node during failover. For more information on the available reader selection strategies, see this [table](../ReaderSelectionStrategies.md). | `random` |
-| `clusterId` | `String` | If connecting using a non-standard RDS URL (e.g. an IP address, custom endpoint, rds proxy, or custom domain URL): Yes
Otherwise: No
:warning:If `clusterId` is omitted when using a non-standard RDS URL, you may experience various issues. | A unique identifier for the cluster. Connections with the same cluster id share a cluster topology cache. | None |
-| `telemetryFailoverAdditionalTopTrace` | Boolean | No | Allows the driver to produce an additional telemetry span associated with failover. Such span helps to facilitate telemetry analysis in AWS CloudWatch. | `false` |
-| `skipFailoverOnInterruptedThread` | Boolean | No | Enable to skip failover if the current thread is interrupted. This may leave the Connection in an invalid state so the Connection should be disposed. | `false` |
+| Parameter | Value | Required | Description | Default Value |
+|---------------------------------------|:--------:|:----------------------------------------------------------------------------------------------------------:|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `failoverMode` | String | No | Defines a mode for failover process. Failover process may prioritize nodes with different roles and connect to them. Possible values:
- `strict-writer` - Failover process follows writer node and connects to a new writer when it changes.
- `reader-or-writer` - During failover, the driver tries to connect to any available/accessible reader node. If no reader is available, the driver will connect to a writer node. This logic mimics the logic of the Aurora read-only cluster endpoint.
- `strict-reader` - During failover, the driver tries to connect to any available reader node. If no reader is available, the driver raises an error. Reader failover to a writer node will only be allowed for single-node clusters. This logic mimics the logic of the Aurora read-only cluster endpoint.
If this parameter is omitted, default value depends on connection url. For Aurora read-only cluster endpoint, it's set to `reader-or-writer`. Otherwise, it's `strict-writer`. | Default value depends on connection url. For Aurora read-only cluster endpoint, it's set to `reader-or-writer`. Otherwise, it's `strict-writer`. |
+| `clusterInstanceHostPattern` | String | If connecting using an IP address or custom domain URL: Yes
Otherwise: No | This parameter is not required unless connecting to an AWS RDS cluster via an IP address or custom domain URL. In those cases, this parameter specifies the cluster instance DNS pattern that will be used to build a complete instance endpoint. A "?" character in this pattern should be used as a placeholder for the DB instance identifiers of the instances in the cluster. See [here](#host-pattern) for more information.
Example: `?.my-domain.com`, `any-subdomain.?.my-domain.com:9999`
Use case Example: If your cluster instance endpoints follow this pattern:`instanceIdentifier1.customHost`, `instanceIdentifier2.customHost`, etc. and you want your initial connection to be to `customHost:1234`, then your connection string should look like this: `jdbc:aws-wrapper:mysql://customHost:1234/test?clusterInstanceHostPattern=?.customHost` | If the provided connection string is not an IP address or custom domain, the JDBC Driver will automatically acquire the cluster instance host pattern from the customer-provided connection string. |
+| `clusterTopologyRefreshRateMs` | Integer | No | Cluster topology refresh rate in milliseconds when a cluster is not in failover. It refers to the regular, slow monitoring rate explained above. | `30000` |
+| `failoverTimeoutMs` | Integer | No | Maximum allowed time in milliseconds to attempt reconnecting to a new writer or reader instance after a cluster failover is initiated. | `300000` |
+| `clusterTopologyHighRefreshRateMs` | Integer | No | Interval of time in milliseconds to wait between attempts to update cluster topology after the writer has come back online following a failover event. It corresponds to the increased monitoring rate described earlier. Usually, the topology monitoring component uses this increased monitoring rate for 30s after a new writer was detected. | `100` |
+| `failoverReaderHostSelectorStrategy` | String | No | Strategy used to select a reader node during failover. For more information on the available reader selection strategies, see this [table](../ReaderSelectionStrategies.md). | `random` |
+| `clusterId` | `String` | If connecting to multiple database clusters within a single application:: Yes
Otherwise: No
| A unique identifier for the cluster. Connections with the same cluster id share a cluster topology cache. This parameter is optional and defaults to `1`. When supporting multiple database clusters, this parameter becomes mandatory. Each connection string must include the `clusterId` parameter with a value that can be any number or string. However, all connection strings associated with the same database cluster must use identical `clusterId` values, while connection strings belonging to different database clusters must specify distinct values. Examples of value: `1`, `2`, `1234`, `abc-1`, `abc-2`. | `1` |
+| `telemetryFailoverAdditionalTopTrace` | Boolean | No | Allows the driver to produce an additional telemetry span associated with failover. Such span helps to facilitate telemetry analysis in AWS CloudWatch. | `false` |
+| `skipFailoverOnInterruptedThread` | Boolean | No | Enable to skip failover if the current thread is interrupted. This may leave the Connection in an invalid state so the Connection should be disposed. | `false` |
diff --git a/docs/using-the-jdbc-driver/using-plugins/UsingTheFailoverPlugin.md b/docs/using-the-jdbc-driver/using-plugins/UsingTheFailoverPlugin.md
index 73b246fb6..cd26f54d6 100644
--- a/docs/using-the-jdbc-driver/using-plugins/UsingTheFailoverPlugin.md
+++ b/docs/using-the-jdbc-driver/using-plugins/UsingTheFailoverPlugin.md
@@ -23,20 +23,20 @@ Verify plugin compatibility within your driver configuration using the [compatib
### Failover Parameters
In addition to the parameters that you can configure for the underlying driver, you can pass the following parameters to the AWS JDBC Driver through the connection URL to specify additional failover behavior.
-| Parameter | Value | Required | Description | Default Value |
-|----------------------------------------|:--------:|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| `failoverMode` | String | No | Defines a mode for failover process. Failover process may prioritize nodes with different roles and connect to them. Possible values:
- `strict-writer` - Failover process follows writer node and connects to a new writer when it changes.
- `reader-or-writer` - During failover, the driver tries to connect to any available/accessible reader node. If no reader is available, the driver will connect to a writer node. This logic mimics the logic of the Aurora read-only cluster endpoint.
- `strict-reader` - During failover, the driver tries to connect to any available reader node. If no reader is available, the driver raises an error. Reader failover to a writer node will only be allowed for single-node clusters. This logic mimics the logic of the Aurora read-only cluster endpoint.
If this parameter is omitted, default value depends on connection url. For Aurora read-only cluster endpoint, it's set to `reader-or-writer`. Otherwise, it's `strict-writer`. | Default value depends on connection url. For Aurora read-only cluster endpoint, it's set to `reader-or-writer`. Otherwise, it's `strict-writer`. |
-| `clusterId` | `String` | If connecting using a non-standard RDS URL (e.g. an IP address, custom endpoint, rds proxy, or custom domain URL): Yes
Otherwise: No
:warning:If `clusterId` is omitted when using a non-standard RDS URL, you may experience various issues. | A unique identifier for the cluster. Connections with the same cluster id share a cluster topology cache. | None |
-| `clusterInstanceHostPattern` | String | If connecting using an IP address or custom domain URL: Yes
Otherwise: No | This parameter is not required unless connecting to an AWS RDS cluster via an IP address or custom domain URL. In those cases, this parameter specifies the cluster instance DNS pattern that will be used to build a complete instance endpoint. A "?" character in this pattern should be used as a placeholder for the DB instance identifiers of the instances in the cluster. See [here](#host-pattern) for more information.
Example: `?.my-domain.com`, `any-subdomain.?.my-domain.com:9999`
Use case Example: If your cluster instance endpoints follow this pattern:`instanceIdentifier1.customHost`, `instanceIdentifier2.customHost`, etc. and you want your initial connection to be to `customHost:1234`, then your connection string should look like this: `jdbc:aws-wrapper:mysql://customHost:1234/test?clusterInstanceHostPattern=?.customHost` | If the provided connection string is not an IP address or custom domain, the JDBC Driver will automatically acquire the cluster instance host pattern from the customer-provided connection string. |
-| `enableClusterAwareFailover` | Boolean | No | Set to `true` to enable the fast failover behavior offered by the AWS Advanced JDBC Driver. Set to `false` for simple JDBC connections that do not require fast failover functionality. | `true` |
-| `failoverClusterTopologyRefreshRateMs` | Integer | No | Cluster topology refresh rate in milliseconds during a writer failover process. During the writer failover process, cluster topology may be refreshed at a faster pace than normal to speed up discovery of the newly promoted writer. | `2000` |
-| `failoverReaderConnectTimeoutMs` | Integer | No | Maximum allowed time in milliseconds to attempt to connect to a reader instance during a reader failover process. | `30000` |
-| `failoverTimeoutMs` | Integer | No | Maximum allowed time in milliseconds to attempt reconnecting to a new writer or reader instance after a cluster failover is initiated. | `300000` |
-| `failoverWriterReconnectIntervalMs` | Integer | No | Interval of time in milliseconds to wait between attempts to reconnect to a failed writer during a writer failover process. | `2000` |
-| `enableConnectFailover` | Boolean | No | Enables/disables cluster-aware failover if the initial connection to the database fails due to a network exception. Note that this may result in a connection to a different instance in the cluster than was specified by the URL. | `false` |
-| `skipFailoverOnInterruptedThread` | Boolean | No | Enable to skip failover if the current thread is interrupted. This may leave the Connection in an invalid state so the Connection should be disposed. | `false` |
-| ~~`keepSessionStateOnFailover`~~ | Boolean | No | This parameter is no longer available. If specified, it will be ignored by the driver. See [Session State](../SessionState.md) for more details. | `false` |
-| ~~`enableFailoverStrictReader`~~ | Boolean | No | This parameter is no longer available and, if specified, it will be ignored by the driver. See `failoverMode` (`reader-or-writer` or `strict-reader`) for more details. | |
+| Parameter | Value | Required | Description | Default Value |
+|----------------------------------------|:--------:|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `failoverMode` | String | No | Defines a mode for failover process. Failover process may prioritize nodes with different roles and connect to them. Possible values:
- `strict-writer` - Failover process follows writer node and connects to a new writer when it changes.
- `reader-or-writer` - During failover, the driver tries to connect to any available/accessible reader node. If no reader is available, the driver will connect to a writer node. This logic mimics the logic of the Aurora read-only cluster endpoint.
- `strict-reader` - During failover, the driver tries to connect to any available reader node. If no reader is available, the driver raises an error. Reader failover to a writer node will only be allowed for single-node clusters. This logic mimics the logic of the Aurora read-only cluster endpoint.
If this parameter is omitted, default value depends on connection url. For Aurora read-only cluster endpoint, it's set to `reader-or-writer`. Otherwise, it's `strict-writer`. | Default value depends on connection url. For Aurora read-only cluster endpoint, it's set to `reader-or-writer`. Otherwise, it's `strict-writer`. |
+| `clusterId` | `String` | If connecting to multiple database clusters within a single application: Yes
Otherwise: No
:warning:If `clusterId` is omitted, you may experience various issues. | A unique identifier for the cluster. Connections with the same cluster id share a cluster topology cache. This parameter is optional and defaults to `1`. When supporting multiple database clusters, this parameter becomes mandatory. Each connection string must include the `clusterId` parameter with a value that can be any number or string. However, all connection strings associated with the same database cluster must use identical `clusterId` values, while connection strings belonging to different database clusters must specify distinct values. Examples of value: `1`, `2`, `1234`, `abc-1`, `abc-2`. | `1` |
+| `clusterInstanceHostPattern` | String | If connecting using an IP address or custom domain URL: Yes
Otherwise: No | This parameter is not required unless connecting to an AWS RDS cluster via an IP address or custom domain URL. In those cases, this parameter specifies the cluster instance DNS pattern that will be used to build a complete instance endpoint. A "?" character in this pattern should be used as a placeholder for the DB instance identifiers of the instances in the cluster. See [here](#host-pattern) for more information.
Example: `?.my-domain.com`, `any-subdomain.?.my-domain.com:9999`
Use case Example: If your cluster instance endpoints follow this pattern:`instanceIdentifier1.customHost`, `instanceIdentifier2.customHost`, etc. and you want your initial connection to be to `customHost:1234`, then your connection string should look like this: `jdbc:aws-wrapper:mysql://customHost:1234/test?clusterInstanceHostPattern=?.customHost` | If the provided connection string is not an IP address or custom domain, the JDBC Driver will automatically acquire the cluster instance host pattern from the customer-provided connection string. |
+| `enableClusterAwareFailover` | Boolean | No | Set to `true` to enable the fast failover behavior offered by the AWS Advanced JDBC Driver. Set to `false` for simple JDBC connections that do not require fast failover functionality. | `true` |
+| `failoverClusterTopologyRefreshRateMs` | Integer | No | Cluster topology refresh rate in milliseconds during a writer failover process. During the writer failover process, cluster topology may be refreshed at a faster pace than normal to speed up discovery of the newly promoted writer. | `2000` |
+| `failoverReaderConnectTimeoutMs` | Integer | No | Maximum allowed time in milliseconds to attempt to connect to a reader instance during a reader failover process. | `30000` |
+| `failoverTimeoutMs` | Integer | No | Maximum allowed time in milliseconds to attempt reconnecting to a new writer or reader instance after a cluster failover is initiated. | `300000` |
+| `failoverWriterReconnectIntervalMs` | Integer | No | Interval of time in milliseconds to wait between attempts to reconnect to a failed writer during a writer failover process. | `2000` |
+| `enableConnectFailover` | Boolean | No | Enables/disables cluster-aware failover if the initial connection to the database fails due to a network exception. Note that this may result in a connection to a different instance in the cluster than was specified by the URL. | `false` |
+| `skipFailoverOnInterruptedThread` | Boolean | No | Enable to skip failover if the current thread is interrupted. This may leave the Connection in an invalid state so the Connection should be disposed. | `false` |
+| ~~`keepSessionStateOnFailover`~~ | Boolean | No | This parameter is no longer available. If specified, it will be ignored by the driver. See [Session State](../SessionState.md) for more details. | `false` |
+| ~~`enableFailoverStrictReader`~~ | Boolean | No | This parameter is no longer available and, if specified, it will be ignored by the driver. See `failoverMode` (`reader-or-writer` or `strict-reader`) for more details. | |
## Host Pattern
When connecting to Aurora clusters, the [`clusterInstanceHostPattern`](#failover-parameters) parameter is required if the connection string does not provide enough information about the database cluster domain name. If the Aurora cluster endpoint is used directly, the AWS JDBC Driver will recognize the standard Aurora domain name and can re-build a proper Aurora instance name when needed. In cases where the connection string uses an IP address, a custom domain name, or localhost, the driver won't know how to build a proper domain name for a database instance endpoint. For example, if a custom domain was being used and the cluster instance endpoints followed a pattern of `instanceIdentifier1.customHost`, `instanceIdentifier2.customHost`, etc., the driver would need to know how to construct the instance endpoints using the specified custom domain. Since there isn't enough information from the custom domain alone to create the instance endpoints, you should set the `clusterInstanceHostPattern` to `?.customHost`, making the connection string `jdbc:aws-wrapper:postgresql://customHost:1234/test?clusterInstanceHostPattern=?.customHost`. Refer to [this diagram](../../images/failover_behavior.png) about AWS JDBC Driver behavior during failover for different connection URLs and more details and examples.
diff --git a/wrapper/src/main/java/software/amazon/jdbc/Driver.java b/wrapper/src/main/java/software/amazon/jdbc/Driver.java
index 7d59e83ff..8616e0ae7 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/Driver.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/Driver.java
@@ -423,10 +423,8 @@ public static void resetConnectionInitFunc() {
public static void clearCaches() {
CoreServicesContainer.getInstance().getStorageService().clearAll();
RdsUtils.clearCache();
- RdsHostListProvider.clearAll();
PluginServiceImpl.clearCache();
DialectManager.resetEndpointCache();
- MonitoringRdsHostListProvider.clearCache();
CustomEndpointMonitorImpl.clearCache();
OpenedConnectionTracker.clearCache();
AwsSecretsManagerCacheHolder.clearCache();
diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/RdsHostListProvider.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/RdsHostListProvider.java
index 738eebcc3..ea372e5a2 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/RdsHostListProvider.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/RdsHostListProvider.java
@@ -70,10 +70,10 @@ public class RdsHostListProvider implements DynamicHostListProvider {
+ "after which it will be updated during the next interaction with the connection.");
public static final AwsWrapperProperty CLUSTER_ID = new AwsWrapperProperty(
- "clusterId", "",
+ "clusterId", "1",
"A unique identifier for the cluster. "
+ "Connections with the same cluster id share a cluster topology cache. "
- + "If unspecified, a cluster id is automatically created for AWS RDS clusters.");
+ + "If unspecified, a cluster id is '1'.");
public static final AwsWrapperProperty CLUSTER_INSTANCE_HOST_PATTERN =
new AwsWrapperProperty(
@@ -88,11 +88,8 @@ public class RdsHostListProvider implements DynamicHostListProvider {
protected static final RdsUtils rdsHelper = new RdsUtils();
protected static final ConnectionUrlParser connectionUrlParser = new ConnectionUrlParser();
protected static final int defaultTopologyQueryTimeoutMs = 5000;
- protected static final long suggestedClusterIdRefreshRateNano = TimeUnit.MINUTES.toNanos(10);
- protected static final CacheMap suggestedPrimaryClusterIdCache = new CacheMap<>();
- protected static final CacheMap primaryClusterIdCache = new CacheMap<>();
-
protected final FullServicesContainer servicesContainer;
+
protected final HostListProviderService hostListProviderService;
protected final String originalUrl;
protected final String topologyQuery;
@@ -110,10 +107,6 @@ public class RdsHostListProvider implements DynamicHostListProvider {
protected String clusterId;
protected HostSpec clusterInstanceTemplate;
- // A primary clusterId is a clusterId that is based off of a cluster endpoint URL
- // (rather than a GUID or a value provided by the user).
- protected boolean isPrimaryClusterId;
-
protected volatile boolean isInitialized = false;
protected Properties properties;
@@ -160,8 +153,7 @@ protected void init() throws SQLException {
this.initialHostSpec = this.initialHostList.get(0);
this.hostListProviderService.setInitialConnectionHostSpec(this.initialHostSpec);
- this.clusterId = UUID.randomUUID().toString();
- this.isPrimaryClusterId = false;
+ this.clusterId = CLUSTER_ID.getString(this.properties);
this.refreshRateNano =
TimeUnit.MILLISECONDS.toNanos(CLUSTER_TOPOLOGY_REFRESH_RATE_MS.getInteger(properties));
@@ -182,34 +174,8 @@ protected void init() throws SQLException {
validateHostPatternSetting(this.clusterInstanceTemplate.getHost());
this.rdsUrlType = rdsHelper.identifyRdsType(this.initialHostSpec.getHost());
-
- final String clusterIdSetting = CLUSTER_ID.getString(this.properties);
- if (!StringUtils.isNullOrEmpty(clusterIdSetting)) {
- this.clusterId = clusterIdSetting;
- } else if (rdsUrlType == RdsUrlType.RDS_PROXY) {
- // Each proxy is associated with a single cluster, so it's safe to use RDS Proxy Url as cluster
- // identification
- this.clusterId = this.initialHostSpec.getUrl();
- } else if (rdsUrlType.isRds()) {
- final ClusterSuggestedResult clusterSuggestedResult =
- getSuggestedClusterId(this.initialHostSpec.getHostAndPort());
- if (clusterSuggestedResult != null && !StringUtils.isNullOrEmpty(clusterSuggestedResult.clusterId)) {
- this.clusterId = clusterSuggestedResult.clusterId;
- this.isPrimaryClusterId = clusterSuggestedResult.isPrimaryClusterId;
- } else {
- final String clusterRdsHostUrl =
- rdsHelper.getRdsClusterHostUrl(this.initialHostSpec.getHost());
- if (!StringUtils.isNullOrEmpty(clusterRdsHostUrl)) {
- this.clusterId = this.clusterInstanceTemplate.isPortSpecified()
- ? String.format("%s:%s", clusterRdsHostUrl, this.clusterInstanceTemplate.getPort())
- : clusterRdsHostUrl;
- this.isPrimaryClusterId = true;
- primaryClusterIdCache.put(this.clusterId, true, suggestedClusterIdRefreshRateNano);
- }
- }
- }
-
this.isInitialized = true;
+
} finally {
lock.unlock();
}
@@ -230,25 +196,8 @@ protected void init() throws SQLException {
protected FetchTopologyResult getTopology(final Connection conn, final boolean forceUpdate) throws SQLException {
init();
- final String suggestedPrimaryClusterId = suggestedPrimaryClusterIdCache.get(this.clusterId);
-
- // Change clusterId by accepting a suggested one
- if (!StringUtils.isNullOrEmpty(suggestedPrimaryClusterId)
- && !this.clusterId.equals(suggestedPrimaryClusterId)) {
-
- final String oldClusterId = this.clusterId;
- this.clusterId = suggestedPrimaryClusterId;
- this.isPrimaryClusterId = true;
- this.clusterIdChanged(oldClusterId);
- }
-
final List storedHosts = this.getStoredTopology();
- // This clusterId is a primary one and is about to create a new entry in the cache.
- // When a primary entry is created it needs to be suggested for other (non-primary) entries.
- // Remember a flag to do suggestion after cache is updated.
- final boolean needToSuggest = storedHosts == null && this.isPrimaryClusterId;
-
if (storedHosts == null || forceUpdate) {
// need to re-fetch topology
@@ -264,9 +213,6 @@ protected FetchTopologyResult getTopology(final Connection conn, final boolean f
if (!Utils.isNullOrEmpty(hosts)) {
this.servicesContainer.getStorageService().set(this.clusterId, new Topology(hosts));
- if (needToSuggest) {
- this.suggestPrimaryCluster(hosts);
- }
return new FetchTopologyResult(false, hosts);
}
}
@@ -279,73 +225,6 @@ protected FetchTopologyResult getTopology(final Connection conn, final boolean f
}
}
- protected void clusterIdChanged(final String oldClusterId) throws SQLException {
- // do nothing
- }
-
- protected ClusterSuggestedResult getSuggestedClusterId(final String url) {
- Map entries = this.servicesContainer.getStorageService().getEntries(Topology.class);
- if (entries == null) {
- return null;
- }
-
- for (final Entry entry : entries.entrySet()) {
- final String key = entry.getKey(); // clusterId
- final List hosts = entry.getValue().getHosts();
- final boolean isPrimaryCluster = primaryClusterIdCache.get(key, false,
- suggestedClusterIdRefreshRateNano);
- if (key.equals(url)) {
- return new ClusterSuggestedResult(url, isPrimaryCluster);
- }
- if (hosts == null) {
- continue;
- }
- for (final HostSpec host : hosts) {
- if (host.getHostAndPort().equals(url)) {
- LOGGER.finest(() -> Messages.get("RdsHostListProvider.suggestedClusterId",
- new Object[] {key, url}));
- return new ClusterSuggestedResult(key, isPrimaryCluster);
- }
- }
- }
- return null;
- }
-
- protected void suggestPrimaryCluster(final @NonNull List primaryClusterHosts) {
- if (Utils.isNullOrEmpty(primaryClusterHosts)) {
- return;
- }
-
- Map entries = this.servicesContainer.getStorageService().getEntries(Topology.class);
- if (entries == null) {
- return;
- }
-
- for (final Entry entry : entries.entrySet()) {
- final String clusterId = entry.getKey();
- final List clusterHosts = entry.getValue().getHosts();
- final boolean isPrimaryCluster = primaryClusterIdCache.get(clusterId, false,
- suggestedClusterIdRefreshRateNano);
- final String suggestedPrimaryClusterId = suggestedPrimaryClusterIdCache.get(clusterId);
- if (isPrimaryCluster
- || !StringUtils.isNullOrEmpty(suggestedPrimaryClusterId)
- || Utils.isNullOrEmpty(clusterHosts)) {
- continue;
- }
-
- // The entry is non-primary
- for (final HostSpec host : clusterHosts) {
- if (Utils.containsHostAndPort(primaryClusterHosts, host.getHostAndPort())) {
- // Instance on this cluster matches with one of the instance on primary cluster
- // Suggest the primary clusterId to this entry
- suggestedPrimaryClusterIdCache.put(clusterId, this.clusterId,
- suggestedClusterIdRefreshRateNano);
- break;
- }
- }
- }
- }
-
/**
* Obtain a cluster topology from database.
*
@@ -503,14 +382,6 @@ protected String getHostEndpoint(final String nodeName) {
return topology == null ? null : topology.getHosts();
}
- /**
- * Clear topology cache for all clusters.
- */
- public static void clearAll() {
- primaryClusterIdCache.clear();
- suggestedPrimaryClusterIdCache.clear();
- }
-
/**
* Clear topology cache for the current cluster.
*/
@@ -667,15 +538,4 @@ public String getClusterId() throws UnsupportedOperationException, SQLException
init();
return this.clusterId;
}
-
- public static class ClusterSuggestedResult {
-
- public String clusterId;
- public boolean isPrimaryClusterId;
-
- public ClusterSuggestedResult(final String clusterId, final boolean isPrimaryClusterId) {
- this.clusterId = clusterId;
- this.isPrimaryClusterId = isPrimaryClusterId;
- }
- }
}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/ClusterTopologyMonitor.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/ClusterTopologyMonitor.java
index 73ea62399..3ec9a9a87 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/ClusterTopologyMonitor.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/ClusterTopologyMonitor.java
@@ -28,8 +28,6 @@ public interface ClusterTopologyMonitor extends Monitor {
boolean canDispose();
- void setClusterId(final String clusterId);
-
List forceRefresh(final boolean writerImportant, final long timeoutMs)
throws SQLException, TimeoutException;
diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/ClusterTopologyMonitorImpl.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/ClusterTopologyMonitorImpl.java
index 92227de29..4ddfa0fd0 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/ClusterTopologyMonitorImpl.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/ClusterTopologyMonitorImpl.java
@@ -156,11 +156,6 @@ public boolean canDispose() {
return true;
}
- @Override
- public void setClusterId(String clusterId) {
- this.clusterId = clusterId;
- }
-
@Override
public List forceRefresh(final boolean shouldVerifyWriter, final long timeoutMs)
throws SQLException, TimeoutException {
diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/MonitoringRdsHostListProvider.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/MonitoringRdsHostListProvider.java
index 0bc4cf897..438e6fca6 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/MonitoringRdsHostListProvider.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/MonitoringRdsHostListProvider.java
@@ -71,10 +71,6 @@ public MonitoringRdsHostListProvider(
CLUSTER_TOPOLOGY_HIGH_REFRESH_RATE_MS.getLong(this.properties));
}
- public static void clearCache() {
- clearAll();
- }
-
@Override
protected void init() throws SQLException {
super.init();
@@ -114,31 +110,6 @@ protected List queryForTopology(final Connection conn) throws SQLExcep
}
}
- @Override
- protected void clusterIdChanged(final String oldClusterId) throws SQLException {
- MonitorService monitorService = this.servicesContainer.getMonitorService();
- final ClusterTopologyMonitorImpl existingMonitor =
- monitorService.get(ClusterTopologyMonitorImpl.class, oldClusterId);
- if (existingMonitor != null) {
- this.servicesContainer.getMonitorService().runIfAbsent(
- ClusterTopologyMonitorImpl.class,
- this.clusterId,
- this.servicesContainer,
- this.properties,
- (servicesContainer) -> existingMonitor);
- assert monitorService.get(ClusterTopologyMonitorImpl.class, this.clusterId) == existingMonitor;
- existingMonitor.setClusterId(this.clusterId);
- monitorService.remove(ClusterTopologyMonitorImpl.class, oldClusterId);
- }
-
- final StorageService storageService = this.servicesContainer.getStorageService();
- final Topology existingTopology = storageService.get(Topology.class, oldClusterId);
- final List existingHosts = existingTopology == null ? null : existingTopology.getHosts();
- if (existingHosts != null) {
- storageService.set(this.clusterId, new Topology(existingHosts));
- }
- }
-
@Override
public List forceRefresh(final boolean shouldVerifyWriter, final long timeoutMs)
throws SQLException, TimeoutException {
diff --git a/wrapper/src/test/java/integration/container/aurora/TestAuroraHostListProvider.java b/wrapper/src/test/java/integration/container/aurora/TestAuroraHostListProvider.java
index c35f6b0f8..5303763f7 100644
--- a/wrapper/src/test/java/integration/container/aurora/TestAuroraHostListProvider.java
+++ b/wrapper/src/test/java/integration/container/aurora/TestAuroraHostListProvider.java
@@ -26,8 +26,4 @@ public TestAuroraHostListProvider(
FullServicesContainer servicesContainer, Properties properties, String originalUrl) {
super(properties, originalUrl, servicesContainer, "", "", "");
}
-
- public static void clearCache() {
- AuroraHostListProvider.clearAll();
- }
}
diff --git a/wrapper/src/test/java/integration/container/tests/AdvancedPerformanceTest.java b/wrapper/src/test/java/integration/container/tests/AdvancedPerformanceTest.java
index 38087f2dc..e2b19303c 100644
--- a/wrapper/src/test/java/integration/container/tests/AdvancedPerformanceTest.java
+++ b/wrapper/src/test/java/integration/container/tests/AdvancedPerformanceTest.java
@@ -69,6 +69,7 @@
import software.amazon.jdbc.plugin.efm.HostMonitorThreadContainer;
import software.amazon.jdbc.plugin.efm2.HostMonitorServiceImpl;
import software.amazon.jdbc.plugin.failover.FailoverSuccessSQLException;
+import software.amazon.jdbc.util.CoreServicesContainer;
import software.amazon.jdbc.util.StringUtils;
@TestMethodOrder(MethodOrderer.MethodName.class)
@@ -686,7 +687,7 @@ private void ensureClusterHealthy() throws InterruptedException {
auroraUtil.makeSureInstancesUp(TimeUnit.MINUTES.toSeconds(5));
- TestAuroraHostListProvider.clearCache();
+ CoreServicesContainer.getInstance().getStorageService().clearAll();
TestPluginServiceImpl.clearHostAvailabilityCache();
HostMonitorThreadContainer.releaseInstance();
HostMonitorServiceImpl.closeAllMonitors();
diff --git a/wrapper/src/test/java/integration/container/tests/PerformanceTest.java b/wrapper/src/test/java/integration/container/tests/PerformanceTest.java
index 73e0b338c..68e131917 100644
--- a/wrapper/src/test/java/integration/container/tests/PerformanceTest.java
+++ b/wrapper/src/test/java/integration/container/tests/PerformanceTest.java
@@ -149,8 +149,6 @@ public void test_FailureDetectionTime_EnhancedMonitoringEnabled(final String efm
OpenedConnectionTracker.clearCache();
HostMonitorThreadContainer.releaseInstance();
HostMonitorServiceImpl.closeAllMonitors();
- AuroraHostListProvider.clearAll();
- MonitoringRdsHostListProvider.clearCache();
enhancedFailureMonitoringPerfDataList.clear();
@@ -231,8 +229,6 @@ public void test_FailureDetectionTime_FailoverAndEnhancedMonitoringEnabled(final
OpenedConnectionTracker.clearCache();
HostMonitorThreadContainer.releaseInstance();
HostMonitorServiceImpl.closeAllMonitors();
- AuroraHostListProvider.clearAll();
- MonitoringRdsHostListProvider.clearCache();
failoverWithEfmPerfDataList.clear();
@@ -319,8 +315,6 @@ private void test_FailoverTime_SocketTimeout(final String plugins) throws IOExce
OpenedConnectionTracker.clearCache();
HostMonitorThreadContainer.releaseInstance();
HostMonitorServiceImpl.closeAllMonitors();
- AuroraHostListProvider.clearAll();
- MonitoringRdsHostListProvider.clearCache();
failoverWithSocketTimeoutPerfDataList.clear();
diff --git a/wrapper/src/test/java/software/amazon/jdbc/hostlistprovider/RdsHostListProviderTest.java b/wrapper/src/test/java/software/amazon/jdbc/hostlistprovider/RdsHostListProviderTest.java
index 797d151be..cc337d2a3 100644
--- a/wrapper/src/test/java/software/amazon/jdbc/hostlistprovider/RdsHostListProviderTest.java
+++ b/wrapper/src/test/java/software/amazon/jdbc/hostlistprovider/RdsHostListProviderTest.java
@@ -17,20 +17,16 @@
package software.amazon.jdbc.hostlistprovider;
import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
-import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.atMostOnce;
-import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
-import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
@@ -109,7 +105,6 @@ void setUp() throws SQLException {
@AfterEach
void tearDown() throws Exception {
- RdsHostListProvider.clearAll();
storageService.clearAll();
closeable.close();
}
@@ -234,8 +229,7 @@ void testGetCachedTopology_returnStoredTopology() throws SQLException {
}
@Test
- void testTopologyCache_NoSuggestedClusterId() throws SQLException {
- RdsHostListProvider.clearAll();
+ void testTopologyCache() throws SQLException {
RdsHostListProvider provider1 = Mockito.spy(getRdsHostListProvider("jdbc:something://cluster-a.domain.com/"));
provider1.init();
@@ -256,8 +250,7 @@ void testTopologyCache_NoSuggestedClusterId() throws SQLException {
assertEquals(topologyClusterA, topologyProvider1);
RdsHostListProvider provider2 = Mockito.spy(getRdsHostListProvider("jdbc:something://cluster-b.domain.com/"));
- provider2.init();
- assertNull(provider2.getStoredTopology());
+ assertNotNull(provider2.getStoredTopology());
final List topologyClusterB = Arrays.asList(
new HostSpecBuilder(new SimpleHostAvailabilityStrategy())
@@ -268,162 +261,15 @@ void testTopologyCache_NoSuggestedClusterId() throws SQLException {
.host("instance-b-3.domain.com").port(HostSpec.NO_PORT).role(HostRole.READER).build());
doReturn(topologyClusterB).when(provider2).queryForTopology(any(Connection.class));
- final List topologyProvider2 = provider2.refresh(mock(Connection.class));
- assertEquals(topologyClusterB, topologyProvider2);
-
- assertEquals(2, storageService.size(Topology.class));
- }
-
- @Test
- void testTopologyCache_SuggestedClusterIdForRds() throws SQLException {
- RdsHostListProvider.clearAll();
-
- RdsHostListProvider provider1 =
- Mockito.spy(getRdsHostListProvider("jdbc:something://cluster-a.cluster-xyz.us-east-2.rds.amazonaws.com/"));
- provider1.init();
- final List topologyClusterA = Arrays.asList(
- new HostSpecBuilder(new SimpleHostAvailabilityStrategy())
- .host("instance-a-1.xyz.us-east-2.rds.amazonaws.com")
- .port(HostSpec.NO_PORT)
- .role(HostRole.WRITER)
- .build(),
- new HostSpecBuilder(new SimpleHostAvailabilityStrategy())
- .host("instance-a-2.xyz.us-east-2.rds.amazonaws.com")
- .port(HostSpec.NO_PORT)
- .role(HostRole.READER)
- .build(),
- new HostSpecBuilder(new SimpleHostAvailabilityStrategy())
- .host("instance-a-3.xyz.us-east-2.rds.amazonaws.com")
- .port(HostSpec.NO_PORT)
- .role(HostRole.READER)
- .build());
-
- doReturn(topologyClusterA).when(provider1).queryForTopology(any(Connection.class));
-
- assertEquals(0, storageService.size(Topology.class));
-
- final List topologyProvider1 = provider1.refresh(mock(Connection.class));
- assertEquals(topologyClusterA, topologyProvider1);
-
- RdsHostListProvider provider2 =
- Mockito.spy(getRdsHostListProvider("jdbc:something://cluster-a.cluster-xyz.us-east-2.rds.amazonaws.com/"));
- provider2.init();
-
- assertEquals(provider1.clusterId, provider2.clusterId);
- assertTrue(provider1.isPrimaryClusterId);
- assertTrue(provider2.isPrimaryClusterId);
-
- final List topologyProvider2 = provider2.refresh(mock(Connection.class));
- assertEquals(topologyClusterA, topologyProvider2);
-
- assertEquals(1, storageService.size(Topology.class));
- }
-
- @Test
- void testTopologyCache_SuggestedClusterIdForInstance() throws SQLException {
- RdsHostListProvider.clearAll();
-
- RdsHostListProvider provider1 =
- Mockito.spy(getRdsHostListProvider("jdbc:something://cluster-a.cluster-xyz.us-east-2.rds.amazonaws.com/"));
- provider1.init();
- final List topologyClusterA = Arrays.asList(
- new HostSpecBuilder(new SimpleHostAvailabilityStrategy())
- .host("instance-a-1.xyz.us-east-2.rds.amazonaws.com")
- .port(HostSpec.NO_PORT)
- .role(HostRole.WRITER)
- .build(),
- new HostSpecBuilder(new SimpleHostAvailabilityStrategy())
- .host("instance-a-2.xyz.us-east-2.rds.amazonaws.com")
- .port(HostSpec.NO_PORT)
- .role(HostRole.READER)
- .build(),
- new HostSpecBuilder(new SimpleHostAvailabilityStrategy())
- .host("instance-a-3.xyz.us-east-2.rds.amazonaws.com")
- .port(HostSpec.NO_PORT)
- .role(HostRole.READER)
- .build());
-
- doReturn(topologyClusterA).when(provider1).queryForTopology(any(Connection.class));
-
- assertEquals(0, storageService.size(Topology.class));
+ List topologyProvider2 = provider2.refresh(mock(Connection.class));
+ assertNotEquals(topologyClusterB, topologyProvider2);
- final List topologyProvider1 = provider1.refresh(mock(Connection.class));
- assertEquals(topologyClusterA, topologyProvider1);
-
- RdsHostListProvider provider2 =
- Mockito.spy(getRdsHostListProvider("jdbc:something://instance-a-3.xyz.us-east-2.rds.amazonaws.com/"));
- provider2.init();
-
- assertEquals(provider1.clusterId, provider2.clusterId);
- assertTrue(provider1.isPrimaryClusterId);
- assertTrue(provider2.isPrimaryClusterId);
-
- final List topologyProvider2 = provider2.refresh(mock(Connection.class));
- assertEquals(topologyClusterA, topologyProvider2);
+ topologyProvider2 = provider2.forceRefresh(mock(Connection.class));
+ assertEquals(topologyClusterB, topologyProvider2);
assertEquals(1, storageService.size(Topology.class));
}
- @Test
- void testTopologyCache_AcceptSuggestion() throws SQLException {
- RdsHostListProvider.clearAll();
-
- RdsHostListProvider provider1 =
- Mockito.spy(getRdsHostListProvider("jdbc:something://instance-a-2.xyz.us-east-2.rds.amazonaws.com/"));
- provider1.init();
- final List topologyClusterA = Arrays.asList(
- new HostSpecBuilder(new SimpleHostAvailabilityStrategy())
- .host("instance-a-1.xyz.us-east-2.rds.amazonaws.com")
- .port(HostSpec.NO_PORT)
- .role(HostRole.WRITER)
- .build(),
- new HostSpecBuilder(new SimpleHostAvailabilityStrategy())
- .host("instance-a-2.xyz.us-east-2.rds.amazonaws.com")
- .port(HostSpec.NO_PORT)
- .role(HostRole.READER)
- .build(),
- new HostSpecBuilder(new SimpleHostAvailabilityStrategy())
- .host("instance-a-3.xyz.us-east-2.rds.amazonaws.com")
- .port(HostSpec.NO_PORT)
- .role(HostRole.READER)
- .build());
-
- doAnswer(a -> topologyClusterA).when(provider1).queryForTopology(any(Connection.class));
-
- assertEquals(0, storageService.size(Topology.class));
-
- List topologyProvider1 = provider1.refresh(mock(Connection.class));
- assertEquals(topologyClusterA, topologyProvider1);
-
- // RdsHostListProvider.logCache();
-
- RdsHostListProvider provider2 =
- Mockito.spy(getRdsHostListProvider("jdbc:something://cluster-a.cluster-xyz.us-east-2.rds.amazonaws.com/"));
- provider2.init();
-
- doAnswer(a -> topologyClusterA).when(provider2).queryForTopology(any(Connection.class));
-
- final List topologyProvider2 = provider2.refresh(mock(Connection.class));
- assertEquals(topologyClusterA, topologyProvider2);
-
- assertNotEquals(provider1.clusterId, provider2.clusterId);
- assertFalse(provider1.isPrimaryClusterId);
- assertTrue(provider2.isPrimaryClusterId);
- assertEquals(2, storageService.size(Topology.class));
- assertEquals("cluster-a.cluster-xyz.us-east-2.rds.amazonaws.com",
- RdsHostListProvider.suggestedPrimaryClusterIdCache.get(provider1.clusterId));
-
- // RdsHostListProvider.logCache();
-
- topologyProvider1 = provider1.forceRefresh(mock(Connection.class));
- assertEquals(topologyClusterA, topologyProvider1);
- assertEquals(provider1.clusterId, provider2.clusterId);
- assertTrue(provider1.isPrimaryClusterId);
- assertTrue(provider2.isPrimaryClusterId);
-
- // RdsHostListProvider.logCache();
- }
-
@Test
void testIdentifyConnectionWithInvalidNodeIdQuery() throws SQLException {
rdsHostListProvider = Mockito.spy(getRdsHostListProvider("jdbc:someprotocol://url"));
@@ -605,25 +451,4 @@ void testGetTopology_returnsLatestWriter() throws SQLException {
assertEquals(expectedWriterHost.getHost(), result.hosts.get(0).getHost());
}
-
- @Test
- void testClusterUrlUsedAsDefaultClusterId() throws SQLException {
- String readerClusterUrl = "mycluster.cluster-ro-XYZ.us-east-1.rds.amazonaws.com";
- String expectedClusterId = "mycluster.cluster-XYZ.us-east-1.rds.amazonaws.com:1234";
- String connectionString = "jdbc:someprotocol://" + readerClusterUrl + ":1234/test";
- RdsHostListProvider provider1 = Mockito.spy(getRdsHostListProvider(connectionString));
- assertEquals(expectedClusterId, provider1.getClusterId());
-
- List mockTopology =
- Collections.singletonList(new HostSpecBuilder(new SimpleHostAvailabilityStrategy()).host("host").build());
- doReturn(mockTopology).when(provider1).queryForTopology(any(Connection.class));
- provider1.refresh();
- assertEquals(mockTopology, provider1.getStoredTopology());
- verify(provider1, times(1)).queryForTopology(mockConnection);
-
- RdsHostListProvider provider2 = Mockito.spy(getRdsHostListProvider(connectionString));
- assertEquals(expectedClusterId, provider2.getClusterId());
- assertEquals(mockTopology, provider2.getStoredTopology());
- verify(provider2, never()).queryForTopology(mockConnection);
- }
}
diff --git a/wrapper/src/test/java/software/amazon/jdbc/hostlistprovider/RdsMultiAzDbClusterListProviderTest.java b/wrapper/src/test/java/software/amazon/jdbc/hostlistprovider/RdsMultiAzDbClusterListProviderTest.java
index df6d6ee50..5c0343487 100644
--- a/wrapper/src/test/java/software/amazon/jdbc/hostlistprovider/RdsMultiAzDbClusterListProviderTest.java
+++ b/wrapper/src/test/java/software/amazon/jdbc/hostlistprovider/RdsMultiAzDbClusterListProviderTest.java
@@ -17,16 +17,13 @@
package software.amazon.jdbc.hostlistprovider;
import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
-import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.atMostOnce;
-import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.verify;
@@ -102,7 +99,6 @@ void setUp() throws SQLException {
@AfterEach
void tearDown() throws Exception {
- RdsMultiAzDbClusterListProvider.clearAll();
storageService.clearAll();
closeable.close();
}
@@ -118,7 +114,7 @@ private RdsMultiAzDbClusterListProvider getRdsMazDbClusterHostListProvider(Strin
"fang",
"li");
provider.init();
- // provider.clusterId = "cluster-id";
+ // provider.clusterId = "1";
return provider;
}
@@ -205,8 +201,7 @@ void testGetCachedTopology_returnCachedTopology() throws SQLException {
}
@Test
- void testTopologyCache_NoSuggestedClusterId() throws SQLException {
- RdsMultiAzDbClusterListProvider.clearAll();
+ void testTopologyCache() throws SQLException {
RdsMultiAzDbClusterListProvider provider1 =
Mockito.spy(getRdsMazDbClusterHostListProvider("jdbc:something://cluster-a.domain.com/"));
@@ -229,8 +224,7 @@ void testTopologyCache_NoSuggestedClusterId() throws SQLException {
RdsMultiAzDbClusterListProvider provider2 =
Mockito.spy(getRdsMazDbClusterHostListProvider("jdbc:something://cluster-b.domain.com/"));
- provider2.init();
- assertNull(provider2.getStoredTopology());
+ assertNotNull(provider2.getStoredTopology());
final List topologyClusterB = Arrays.asList(
new HostSpecBuilder(new SimpleHostAvailabilityStrategy())
@@ -241,168 +235,15 @@ void testTopologyCache_NoSuggestedClusterId() throws SQLException {
.host("instance-b-3.domain.com").port(HostSpec.NO_PORT).role(HostRole.READER).build());
doReturn(topologyClusterB).when(provider2).queryForTopology(any(Connection.class));
- final List topologyProvider2 = provider2.refresh(Mockito.mock(Connection.class));
- assertEquals(topologyClusterB, topologyProvider2);
-
- assertEquals(2, storageService.size(Topology.class));
- }
-
- @Test
- void testTopologyCache_SuggestedClusterIdForRds() throws SQLException {
- RdsMultiAzDbClusterListProvider.clearAll();
-
- RdsMultiAzDbClusterListProvider provider1 =
- Mockito.spy(getRdsMazDbClusterHostListProvider(
- "jdbc:something://cluster-a.cluster-xyz.us-east-2.rds.amazonaws.com/"));
- provider1.init();
- final List topologyClusterA = Arrays.asList(
- new HostSpecBuilder(new SimpleHostAvailabilityStrategy())
- .host("instance-a-1.xyz.us-east-2.rds.amazonaws.com")
- .port(HostSpec.NO_PORT)
- .role(HostRole.WRITER)
- .build(),
- new HostSpecBuilder(new SimpleHostAvailabilityStrategy())
- .host("instance-a-2.xyz.us-east-2.rds.amazonaws.com")
- .port(HostSpec.NO_PORT)
- .role(HostRole.READER)
- .build(),
- new HostSpecBuilder(new SimpleHostAvailabilityStrategy())
- .host("instance-a-3.xyz.us-east-2.rds.amazonaws.com")
- .port(HostSpec.NO_PORT)
- .role(HostRole.READER)
- .build());
-
- doReturn(topologyClusterA).when(provider1).queryForTopology(any(Connection.class));
-
- assertEquals(0, storageService.size(Topology.class));
-
- final List topologyProvider1 = provider1.refresh(Mockito.mock(Connection.class));
- assertEquals(topologyClusterA, topologyProvider1);
-
- RdsMultiAzDbClusterListProvider provider2 =
- Mockito.spy(getRdsMazDbClusterHostListProvider(
- "jdbc:something://cluster-a.cluster-xyz.us-east-2.rds.amazonaws.com/"));
- provider2.init();
-
- assertEquals(provider1.clusterId, provider2.clusterId);
- assertTrue(provider1.isPrimaryClusterId);
- assertTrue(provider2.isPrimaryClusterId);
-
- final List topologyProvider2 = provider2.refresh(Mockito.mock(Connection.class));
- assertEquals(topologyClusterA, topologyProvider2);
-
- assertEquals(1, storageService.size(Topology.class));
- }
-
- @Test
- void testTopologyCache_SuggestedClusterIdForInstance() throws SQLException {
- RdsMultiAzDbClusterListProvider.clearAll();
+ List topologyProvider2 = provider2.refresh(Mockito.mock(Connection.class));
+ assertNotEquals(topologyClusterB, topologyProvider2);
- RdsMultiAzDbClusterListProvider provider1 =
- Mockito.spy(getRdsMazDbClusterHostListProvider(
- "jdbc:something://cluster-a.cluster-xyz.us-east-2.rds.amazonaws.com/"));
- provider1.init();
- final List topologyClusterA = Arrays.asList(
- new HostSpecBuilder(new SimpleHostAvailabilityStrategy())
- .host("instance-a-1.xyz.us-east-2.rds.amazonaws.com")
- .port(HostSpec.NO_PORT)
- .role(HostRole.WRITER)
- .build(),
- new HostSpecBuilder(new SimpleHostAvailabilityStrategy())
- .host("instance-a-2.xyz.us-east-2.rds.amazonaws.com")
- .port(HostSpec.NO_PORT)
- .role(HostRole.READER)
- .build(),
- new HostSpecBuilder(new SimpleHostAvailabilityStrategy())
- .host("instance-a-3.xyz.us-east-2.rds.amazonaws.com")
- .port(HostSpec.NO_PORT)
- .role(HostRole.READER)
- .build());
-
- doReturn(topologyClusterA).when(provider1).queryForTopology(any(Connection.class));
-
- assertEquals(0, storageService.size(Topology.class));
-
- final List topologyProvider1 = provider1.refresh(Mockito.mock(Connection.class));
- assertEquals(topologyClusterA, topologyProvider1);
-
- RdsMultiAzDbClusterListProvider provider2 =
- Mockito.spy(getRdsMazDbClusterHostListProvider(
- "jdbc:something://instance-a-3.xyz.us-east-2.rds.amazonaws.com/"));
- provider2.init();
-
- assertEquals(provider1.clusterId, provider2.clusterId);
- assertTrue(provider1.isPrimaryClusterId);
- assertTrue(provider2.isPrimaryClusterId);
-
- final List topologyProvider2 = provider2.refresh(Mockito.mock(Connection.class));
- assertEquals(topologyClusterA, topologyProvider2);
+ topologyProvider2 = provider2.forceRefresh(Mockito.mock(Connection.class));
+ assertEquals(topologyClusterB, topologyProvider2);
assertEquals(1, storageService.size(Topology.class));
}
- @Test
- void testTopologyCache_AcceptSuggestion() throws SQLException {
- RdsMultiAzDbClusterListProvider.clearAll();
-
- RdsMultiAzDbClusterListProvider provider1 =
- Mockito.spy(getRdsMazDbClusterHostListProvider(
- "jdbc:something://instance-a-2.xyz.us-east-2.rds.amazonaws.com/"));
- provider1.init();
- final List topologyClusterA = Arrays.asList(
- new HostSpecBuilder(new SimpleHostAvailabilityStrategy())
- .host("instance-a-1.xyz.us-east-2.rds.amazonaws.com")
- .port(HostSpec.NO_PORT)
- .role(HostRole.WRITER)
- .build(),
- new HostSpecBuilder(new SimpleHostAvailabilityStrategy())
- .host("instance-a-2.xyz.us-east-2.rds.amazonaws.com")
- .port(HostSpec.NO_PORT)
- .role(HostRole.READER)
- .build(),
- new HostSpecBuilder(new SimpleHostAvailabilityStrategy())
- .host("instance-a-3.xyz.us-east-2.rds.amazonaws.com")
- .port(HostSpec.NO_PORT)
- .role(HostRole.READER)
- .build());
-
- doAnswer(a -> topologyClusterA).when(provider1).queryForTopology(any(Connection.class));
-
- assertEquals(0, storageService.size(Topology.class));
-
- List topologyProvider1 = provider1.refresh(Mockito.mock(Connection.class));
- assertEquals(topologyClusterA, topologyProvider1);
-
- // RdsMultiAzDbClusterListProvider.logCache();
-
- RdsMultiAzDbClusterListProvider provider2 =
- Mockito.spy(getRdsMazDbClusterHostListProvider(
- "jdbc:something://cluster-a.cluster-xyz.us-east-2.rds.amazonaws.com/"));
- provider2.init();
-
- doAnswer(a -> topologyClusterA).when(provider2).queryForTopology(any(Connection.class));
-
- final List topologyProvider2 = provider2.refresh(Mockito.mock(Connection.class));
- assertEquals(topologyClusterA, topologyProvider2);
-
- assertNotEquals(provider1.clusterId, provider2.clusterId);
- assertFalse(provider1.isPrimaryClusterId);
- assertTrue(provider2.isPrimaryClusterId);
- assertEquals(2, storageService.size(Topology.class));
- assertEquals("cluster-a.cluster-xyz.us-east-2.rds.amazonaws.com",
- RdsMultiAzDbClusterListProvider.suggestedPrimaryClusterIdCache.get(provider1.clusterId));
-
- // RdsMultiAzDbClusterListProvider.logCache();
-
- topologyProvider1 = provider1.forceRefresh(Mockito.mock(Connection.class));
- assertEquals(topologyClusterA, topologyProvider1);
- assertEquals(provider1.clusterId, provider2.clusterId);
- assertTrue(provider1.isPrimaryClusterId);
- assertTrue(provider2.isPrimaryClusterId);
-
- // RdsMultiAzDbClusterListProvider.logCache();
- }
-
@Test
void testIdentifyConnectionWithInvalidNodeIdQuery() throws SQLException {
rdsMazDbClusterHostListProvider = Mockito.spy(getRdsMazDbClusterHostListProvider("jdbc:someprotocol://url"));