diff --git a/.github/workflows/mysql_advanced_performance.yml b/.github/workflows/mysql_advanced_performance.yml
new file mode 100644
index 000000000..8297abf14
--- /dev/null
+++ b/.github/workflows/mysql_advanced_performance.yml
@@ -0,0 +1,55 @@
+name: Run Aurora Mysql Advanced Performance Tests
+
+on:
+ workflow_dispatch:
+
+jobs:
+ aurora-mysql-performance-tests:
+ concurrency: AdvancedPerformanceTests-Aurora
+ name: 'Run Aurora MySQL container advanced performance tests'
+ runs-on: ubuntu-latest
+ steps:
+ - name: 'Clone repository'
+ uses: actions/checkout@v3
+ with:
+ fetch-depth: 50
+ - name: 'Set up JDK 8'
+ uses: actions/setup-java@v3
+ with:
+ distribution: 'corretto'
+ java-version: 8
+ - name: 'Configure AWS credentials'
+ uses: aws-actions/configure-aws-credentials@v4
+ with:
+ aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
+ aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+ aws-region: ${{ secrets.AWS_DEFAULT_REGION }}
+ - name: 'Set up temp AWS credentials'
+ run: |
+ creds=($(aws sts get-session-token \
+ --duration-seconds 21600 \
+ --query 'Credentials.[AccessKeyId, SecretAccessKey, SessionToken]' \
+ --output text \
+ | xargs));
+ echo "::add-mask::${creds[0]}"
+ echo "::add-mask::${creds[1]}"
+ echo "::add-mask::${creds[2]}"
+ echo "TEMP_AWS_ACCESS_KEY_ID=${creds[0]}" >> $GITHUB_ENV
+ echo "TEMP_AWS_SECRET_ACCESS_KEY=${creds[1]}" >> $GITHUB_ENV
+ echo "TEMP_AWS_SESSION_TOKEN=${creds[2]}" >> $GITHUB_ENV
+ - name: 'Run performance tests (OpenJDK)'
+ run: |
+ ./gradlew --no-parallel --no-daemon test-aurora-mysql-advanced-performance
+ env:
+ AURORA_CLUSTER_DOMAIN: ${{ secrets.DB_CONN_SUFFIX }}
+ AURORA_DB_REGION: ${{ secrets.AWS_DEFAULT_REGION }}
+ AWS_ACCESS_KEY_ID: ${{ env.TEMP_AWS_ACCESS_KEY_ID }}
+ AWS_SECRET_ACCESS_KEY: ${{ env.TEMP_AWS_SECRET_ACCESS_KEY }}
+ AWS_SESSION_TOKEN: ${{ env.TEMP_AWS_SESSION_TOKEN }}
+ - name: 'Archive Performance Results'
+ if: always()
+ uses: actions/upload-artifact@v3
+ with:
+ name: 'performance-results'
+ path: ./wrapper/build/reports/tests/
+ retention-days: 5
diff --git a/.github/workflows/mysql_performance.yml b/.github/workflows/mysql_performance.yml
index b43d6c770..dfe8ffd67 100644
--- a/.github/workflows/mysql_performance.yml
+++ b/.github/workflows/mysql_performance.yml
@@ -19,7 +19,7 @@ jobs:
distribution: 'corretto'
java-version: 8
- name: 'Configure AWS credentials'
- uses: aws-actions/configure-aws-credentials@v1
+ uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
@@ -48,8 +48,8 @@ jobs:
AWS_SESSION_TOKEN: ${{ env.TEMP_AWS_SESSION_TOKEN }}
- name: 'Archive Performance Results'
if: always()
- uses: actions/upload-artifact@v2
+ uses: actions/upload-artifact@v3
with:
- name: 'junit-report-performance'
+ name: 'performance-results'
path: ./wrapper/build/reports/tests/
retention-days: 5
diff --git a/.github/workflows/pg_advanced_performance.yml b/.github/workflows/pg_advanced_performance.yml
new file mode 100644
index 000000000..61473b2e8
--- /dev/null
+++ b/.github/workflows/pg_advanced_performance.yml
@@ -0,0 +1,55 @@
+name: Run Aurora Postgres Advanced Performance Tests
+
+on:
+ workflow_dispatch:
+
+jobs:
+ aurora-postgres-performance-tests:
+ concurrency: AdvancedPerformanceTests-Aurora
+ name: 'Run Aurora Postgres container advanced performance tests'
+ runs-on: ubuntu-latest
+ steps:
+ - name: 'Clone repository'
+ uses: actions/checkout@v3
+ with:
+ fetch-depth: 50
+ - name: 'Set up JDK 8'
+ uses: actions/setup-java@v3
+ with:
+ distribution: 'corretto'
+ java-version: 8
+ - name: 'Configure AWS credentials'
+ uses: aws-actions/configure-aws-credentials@v4
+ with:
+ aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
+ aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+ aws-region: ${{ secrets.AWS_DEFAULT_REGION }}
+ - name: 'Set up temp AWS credentials'
+ run: |
+ creds=($(aws sts get-session-token \
+ --duration-seconds 21600 \
+ --query 'Credentials.[AccessKeyId, SecretAccessKey, SessionToken]' \
+ --output text \
+ | xargs));
+ echo "::add-mask::${creds[0]}"
+ echo "::add-mask::${creds[1]}"
+ echo "::add-mask::${creds[2]}"
+ echo "TEMP_AWS_ACCESS_KEY_ID=${creds[0]}" >> $GITHUB_ENV
+ echo "TEMP_AWS_SECRET_ACCESS_KEY=${creds[1]}" >> $GITHUB_ENV
+ echo "TEMP_AWS_SESSION_TOKEN=${creds[2]}" >> $GITHUB_ENV
+ - name: 'Run performance tests (OpenJDK)'
+ run: |
+ ./gradlew --no-parallel --no-daemon test-aurora-pg-advanced-performance
+ env:
+ AURORA_CLUSTER_DOMAIN: ${{ secrets.DB_CONN_SUFFIX }}
+ AURORA_DB_REGION: ${{ secrets.AWS_DEFAULT_REGION }}
+ AWS_ACCESS_KEY_ID: ${{ env.TEMP_AWS_ACCESS_KEY_ID }}
+ AWS_SECRET_ACCESS_KEY: ${{ env.TEMP_AWS_SECRET_ACCESS_KEY }}
+ AWS_SESSION_TOKEN: ${{ env.TEMP_AWS_SESSION_TOKEN }}
+ - name: 'Archive Performance Results'
+ if: always()
+ uses: actions/upload-artifact@v3
+ with:
+ name: 'performance-results'
+ path: ./wrapper/build/reports/tests/
+ retention-days: 5
diff --git a/.github/workflows/pg_performance.yml b/.github/workflows/pg_performance.yml
index 8534b91a8..a94954f0e 100644
--- a/.github/workflows/pg_performance.yml
+++ b/.github/workflows/pg_performance.yml
@@ -19,7 +19,7 @@ jobs:
distribution: 'corretto'
java-version: 8
- name: 'Configure AWS credentials'
- uses: aws-actions/configure-aws-credentials@v1
+ uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
@@ -48,8 +48,8 @@ jobs:
AWS_SESSION_TOKEN: ${{ env.TEMP_AWS_SESSION_TOKEN }}
- name: 'Archive Performance Results'
if: always()
- uses: actions/upload-artifact@v2
+ uses: actions/upload-artifact@v3
with:
- name: 'junit-report-performance'
+ name: 'performance-results'
path: ./wrapper/build/reports/tests/
retention-days: 5
diff --git a/.github/workflows/run-autoscaling-tests.yml b/.github/workflows/run-autoscaling-tests.yml
index 5f4528649..d2baed3d0 100644
--- a/.github/workflows/run-autoscaling-tests.yml
+++ b/.github/workflows/run-autoscaling-tests.yml
@@ -22,7 +22,7 @@ jobs:
distribution: 'corretto'
java-version: 8
- name: 'Configure AWS credentials'
- uses: aws-actions/configure-aws-credentials@v1
+ uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
diff --git a/.github/workflows/run-integration-tests.yml b/.github/workflows/run-integration-tests.yml
index 807a1ac47..883a7f7a1 100644
--- a/.github/workflows/run-integration-tests.yml
+++ b/.github/workflows/run-integration-tests.yml
@@ -25,7 +25,7 @@ jobs:
distribution: 'corretto'
java-version: 8
- name: 'Configure AWS credentials'
- uses: aws-actions/configure-aws-credentials@v1
+ uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
diff --git a/docs/using-the-jdbc-driver/UsingTheJdbcDriver.md b/docs/using-the-jdbc-driver/UsingTheJdbcDriver.md
index d8494e5dd..1878ff188 100644
--- a/docs/using-the-jdbc-driver/UsingTheJdbcDriver.md
+++ b/docs/using-the-jdbc-driver/UsingTheJdbcDriver.md
@@ -157,20 +157,21 @@ ConnectionProviderManager.setConnectionInitFunc((connection, protocol, hostSpec,
### List of Available Plugins
The AWS JDBC Driver has several built-in plugins that are available to use. Please visit the individual plugin page for more details.
-| Plugin name | Plugin Code | Database Compatibility | Description | Additional Required Dependencies |
-|------------------------------------------------------------------------------------------------|---------------------------|------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| [Failover Connection Plugin](./using-plugins/UsingTheFailoverPlugin.md) | `failover` | Aurora, RDS Multi-AZ DB Cluster | Enables the failover functionality supported by Amazon Aurora clusters and RDS Multi-AZ DB clusters. Prevents opening a wrong connection to an old writer node dues to stale DNS after failover event. This plugin is enabled by default. | None |
-| [Host Monitoring Connection Plugin](./using-plugins/UsingTheHostMonitoringPlugin.md) | `efm` | Aurora, RDS Multi-AZ DB Cluster | Enables enhanced host connection failure monitoring, allowing faster failure detection rates. This plugin is enabled by default. | None |
-| Data Cache Connection Plugin | `dataCache` | Any database | Caches results from SQL queries matching the regular expression specified in the `dataCacheTriggerCondition` configuration parameter. | None |
-| Execution Time Connection Plugin | `executionTime` | Any database | Logs the time taken to execute any JDBC method. | None |
-| Log Query Connection Plugin | `logQuery` | Any database | Tracks and logs the SQL statements to be executed. Sometimes SQL statements are not passed directly to the JDBC method as a parameter, such as [executeBatch()](https://docs.oracle.com/javase/8/docs/api/java/sql/Statement.html#executeBatch--). Users can set `enhancedLogQueryEnabled` to `true`, allowing the JDBC Wrapper to obtain SQL statements via Java Reflection.
:warning:**Note:** Enabling Java Reflection may cause a performance degradation. | None |
-| [IAM Authentication Connection Plugin](./using-plugins/UsingTheIamAuthenticationPlugin.md) | `iam` | Any database | Enables users to connect to their Amazon Aurora clusters using AWS Identity and Access Management (IAM). | [AWS Java SDK RDS v2.x](https://central.sonatype.com/artifact/software.amazon.awssdk/rds) |
-| [AWS Secrets Manager Connection Plugin](./using-plugins/UsingTheAwsSecretsManagerPlugin.md) | `awsSecretsManager` | Any database | Enables fetching database credentials from the AWS Secrets Manager service. | [Jackson Databind](https://central.sonatype.com/artifact/com.fasterxml.jackson.core/jackson-databind)
[AWS Secrets Manager](https://central.sonatype.com/artifact/software.amazon.awssdk/secretsmanager) |
-| Aurora Stale DNS Plugin | `auroraStaleDns` | Aurora | Prevents incorrectly opening a new connection to an old writer node when DNS records have not yet updated after a recent failover event.
:warning:**Note:** Contrary to `failover` plugin, `auroraStaleDns` plugin doesn't implement failover support itself. It helps to eliminate opening wrong connections to an old writer node after cluster failover is completed.
:warning:**Note:** This logic is already included in `failover` plugin so you can omit using both plugins at the same time. | None |
-| [Aurora Connection Tracker Plugin](./using-plugins/UsingTheAuroraConnectionTrackerPlugin.md) | `auroraConnectionTracker` | Aurora, RDS Multi-AZ DB Cluster | Tracks all the opened connections. In the event of a cluster failover, the plugin will close all the impacted connections to the node. This plugin is enabled by default. | None |
-| [Driver Metadata Connection Plugin](./using-plugins/UsingTheDriverMetadataConnectionPlugin.md) | `driverMetaData` | Any database | Allows user application to override the return value of `DatabaseMetaData#getDriverName` | None |
-| [Read Write Splitting Plugin](./using-plugins/UsingTheReadWriteSplittingPlugin.md) | `readWriteSplitting` | Aurora | Enables read write splitting functionality where users can switch between database reader and writer instances. | None |
-| [Developer Plugin](./using-plugins/UsingTheDeveloperPlugin.md) | `dev` | Any database | Helps developers test various everyday scenarios including rare events like network outages and database cluster failover. The plugin allows injecting and raising an expected exception, then verifying how applications handle it. | None |
+| Plugin name | Plugin Code | Database Compatibility | Description | Additional Required Dependencies |
+|-------------------------------------------------------------------------------------------------------------------|---------------------------|---------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| [Failover Connection Plugin](./using-plugins/UsingTheFailoverPlugin.md) | `failover` | Aurora, RDS Multi-AZ DB Cluster | Enables the failover functionality supported by Amazon Aurora clusters and RDS Multi-AZ DB clusters. Prevents opening a wrong connection to an old writer node dues to stale DNS after failover event. This plugin is enabled by default. | None |
+| [Host Monitoring Connection Plugin](./using-plugins/UsingTheHostMonitoringPlugin.md) | `efm` | Aurora, RDS Multi-AZ DB Cluster | Enables enhanced host connection failure monitoring, allowing faster failure detection rates. This plugin is enabled by default. | None |
+| [Host Monitoring Connection Plugin v2](./using-plugins/UsingTheHostMonitoringPlugin.md#host-monitoring-plugin-v2) | `efm2` | Aurora, RDS Multi-AZ DB Cluster | Enables enhanced host connection failure monitoring, allowing faster failure detection rates. This plugin is an alternative implementation for host health status monitoring. It is functionally the same as the `efm` plugin and uses the same configuration parameters. This plugin is experimental. | None |
+| Data Cache Connection Plugin | `dataCache` | Any database | Caches results from SQL queries matching the regular expression specified in the `dataCacheTriggerCondition` configuration parameter. | None |
+| Execution Time Connection Plugin | `executionTime` | Any database | Logs the time taken to execute any JDBC method. | None |
+| Log Query Connection Plugin | `logQuery` | Any database | Tracks and logs the SQL statements to be executed. Sometimes SQL statements are not passed directly to the JDBC method as a parameter, such as [executeBatch()](https://docs.oracle.com/javase/8/docs/api/java/sql/Statement.html#executeBatch--). Users can set `enhancedLogQueryEnabled` to `true`, allowing the JDBC Wrapper to obtain SQL statements via Java Reflection.
:warning:**Note:** Enabling Java Reflection may cause a performance degradation. | None |
+| [IAM Authentication Connection Plugin](./using-plugins/UsingTheIamAuthenticationPlugin.md) | `iam` | Any database | Enables users to connect to their Amazon Aurora clusters using AWS Identity and Access Management (IAM). | [AWS Java SDK RDS v2.x](https://central.sonatype.com/artifact/software.amazon.awssdk/rds) |
+| [AWS Secrets Manager Connection Plugin](./using-plugins/UsingTheAwsSecretsManagerPlugin.md) | `awsSecretsManager` | Any database | Enables fetching database credentials from the AWS Secrets Manager service. | [Jackson Databind](https://central.sonatype.com/artifact/com.fasterxml.jackson.core/jackson-databind)
[AWS Secrets Manager](https://central.sonatype.com/artifact/software.amazon.awssdk/secretsmanager) |
+| Aurora Stale DNS Plugin | `auroraStaleDns` | Aurora | Prevents incorrectly opening a new connection to an old writer node when DNS records have not yet updated after a recent failover event.
:warning:**Note:** Contrary to `failover` plugin, `auroraStaleDns` plugin doesn't implement failover support itself. It helps to eliminate opening wrong connections to an old writer node after cluster failover is completed.
:warning:**Note:** This logic is already included in `failover` plugin so you can omit using both plugins at the same time. | None |
+| [Aurora Connection Tracker Plugin](./using-plugins/UsingTheAuroraConnectionTrackerPlugin.md) | `auroraConnectionTracker` | Aurora, RDS Multi-AZ DB Cluster | Tracks all the opened connections. In the event of a cluster failover, the plugin will close all the impacted connections to the node. This plugin is enabled by default. | None |
+| [Driver Metadata Connection Plugin](./using-plugins/UsingTheDriverMetadataConnectionPlugin.md) | `driverMetaData` | Any database | Allows user application to override the return value of `DatabaseMetaData#getDriverName` | None |
+| [Read Write Splitting Plugin](./using-plugins/UsingTheReadWriteSplittingPlugin.md) | `readWriteSplitting` | Aurora | Enables read write splitting functionality where users can switch between database reader and writer instances. | None |
+| [Developer Plugin](./using-plugins/UsingTheDeveloperPlugin.md) | `dev` | Any database | Helps developers test various everyday scenarios including rare events like network outages and database cluster failover. The plugin allows injecting and raising an expected exception, then verifying how applications handle it. | None |
:exclamation: **NOTE**: As an enhancement, the wrapper is now able to automatically set the Aurora host list provider for connections to Aurora MySQL and Aurora PostgreSQL databases.
Aurora Host List Connection Plugin is deprecated. If you were using the Aurora Host List Connection Plugin, you can simply remove the plugin from the `wrapperPlugins` parameter.
diff --git a/docs/using-the-jdbc-driver/using-plugins/UsingTheHostMonitoringPlugin.md b/docs/using-the-jdbc-driver/using-plugins/UsingTheHostMonitoringPlugin.md
index e52f3d6a2..c658088c9 100644
--- a/docs/using-the-jdbc-driver/using-plugins/UsingTheHostMonitoringPlugin.md
+++ b/docs/using-the-jdbc-driver/using-plugins/UsingTheHostMonitoringPlugin.md
@@ -72,3 +72,21 @@ properties.setProperty("monitoring-socketTimeout", "10");
> We recommend you either disable the Host Monitoring Connection Plugin or avoid using RDS Proxy endpoints when the Host Monitoring Connection Plugin is active.
>
> Although using RDS Proxy endpoints with the AWS Advanced JDBC Driver with Enhanced Failure Monitoring doesn't cause any critical issues, we don't recommend this approach. The main reason is that RDS Proxy transparently re-routes requests to a single database instance. RDS Proxy decides which database instance is used based on many criteria (on a per-request basis). Switching between different instances makes the Host Monitoring Connection Plugin useless in terms of instance health monitoring because the plugin will be unable to identify which instance it's connected to, and which one it's monitoring. This could result in false positive failure detections. At the same time, the plugin will still proactively monitor network connectivity to RDS Proxy endpoints and report outages back to a user application if they occur.
+
+# **Experimental** Host Monitoring Plugin v2
+
+> [!WARNING] This plugin is experimental and users should test the plugin before using it in production environment.
+
+Host Monitoring Plugin v2, also known as `efm2`, is an alternative implementation of enhanced failure monitoring and it is functionally equal to the Host Monitoring Plugin described above. Both plugins share the same set of [configuration parameters](#enhanced-failure-monitoring-parameters). The `efm2` plugin is designed to be a drop-in replacement for the `efm` plugin.
+The `efm2` plugin can be used in any scenario where the `efm` plugin is mentioned.
+
+> [!NOTE] Since these two plugins are separate plugins, users may decide to use them together with a single connection. While this should not have any negative side effects, it is not recommended. It is recommended to use either the `efm` plugin, or the `efm2` plugin where it's needed.
+
+
+The `efm2` plugin is designed to address [some of the issues](https://github.com/awslabs/aws-advanced-jdbc-wrapper/issues/675) that have been reported by multiple users. The following changes have been made:
+- Used weak pointers to ease garbage collection
+- Split monitoring logic into two separate threads to increase overall monitoring stability
+- Reviewed locks for monitoring context
+- Reviewed and redesigned stopping of idle monitoring threads
+- Reviewed and simplified monitoring logic
+
diff --git a/wrapper/build.gradle.kts b/wrapper/build.gradle.kts
index 37e9e2d46..87ed00590 100644
--- a/wrapper/build.gradle.kts
+++ b/wrapper/build.gradle.kts
@@ -250,7 +250,9 @@ tasks.withType {
outputs.upToDateWhen { false }
System.getProperties().forEach {
- if (it.key.toString().startsWith("test-no-")) {
+ if (it.key.toString().startsWith("test-no-")
+ || it.key.toString() == "test-include-tags"
+ || it.key.toString() == "test-exclude-tags") {
systemProperty(it.key.toString(), it.value.toString())
}
}
@@ -391,6 +393,10 @@ tasks.register("test-all-aurora-performance") {
systemProperty("test-no-hikari", "true")
systemProperty("test-no-secrets-manager", "true")
systemProperty("test-no-graalvm", "true")
+ systemProperty("test-no-openjdk8", "true")
+ systemProperty("test-no-instances-1", "true")
+ systemProperty("test-no-instances-2", "true")
+ systemProperty("test-exclude-tags", "advanced,rw-splitting")
}
}
@@ -403,10 +409,34 @@ tasks.register("test-aurora-pg-performance") {
systemProperty("test-no-hikari", "true")
systemProperty("test-no-secrets-manager", "true")
systemProperty("test-no-graalvm", "true")
+ systemProperty("test-no-openjdk8", "true")
systemProperty("test-no-mysql-driver", "true")
systemProperty("test-no-mysql-engine", "true")
systemProperty("test-no-mariadb-driver", "true")
systemProperty("test-no-mariadb-engine", "true")
+ systemProperty("test-no-instances-1", "true")
+ systemProperty("test-no-instances-2", "true")
+ systemProperty("test-exclude-tags", "advanced,rw-splitting")
+ }
+}
+
+tasks.register("debug-aurora-pg-performance") {
+ group = "verification"
+ filter.includeTestsMatching("integration.host.TestRunner.debugTests")
+ doFirst {
+ systemProperty("test-no-docker", "true")
+ systemProperty("test-no-iam", "true")
+ systemProperty("test-no-hikari", "true")
+ systemProperty("test-no-secrets-manager", "true")
+ systemProperty("test-no-graalvm", "true")
+ systemProperty("test-no-openjdk8", "true")
+ systemProperty("test-no-mysql-driver", "true")
+ systemProperty("test-no-mysql-engine", "true")
+ systemProperty("test-no-mariadb-driver", "true")
+ systemProperty("test-no-mariadb-engine", "true")
+ systemProperty("test-no-instances-1", "true")
+ systemProperty("test-no-instances-2", "true")
+ systemProperty("test-exclude-tags", "advanced,rw-splitting")
}
}
@@ -419,10 +449,74 @@ tasks.register("test-aurora-mysql-performance") {
systemProperty("test-no-hikari", "true")
systemProperty("test-no-secrets-manager", "true")
systemProperty("test-no-graalvm", "true")
+ systemProperty("test-no-openjdk8", "true")
+ systemProperty("test-no-pg-driver", "true")
+ systemProperty("test-no-pg-engine", "true")
+ systemProperty("test-no-mariadb-driver", "true")
+ systemProperty("test-no-mariadb-engine", "true")
+ systemProperty("test-no-instances-1", "true")
+ systemProperty("test-no-instances-2", "true")
+ systemProperty("test-exclude-tags", "advanced,rw-splitting")
+ }
+}
+
+tasks.register("debug-aurora-mysql-performance") {
+ group = "verification"
+ filter.includeTestsMatching("integration.host.TestRunner.debugTests")
+ doFirst {
+ systemProperty("test-no-docker", "true")
+ systemProperty("test-no-iam", "true")
+ systemProperty("test-no-hikari", "true")
+ systemProperty("test-no-secrets-manager", "true")
+ systemProperty("test-no-graalvm", "true")
+ systemProperty("test-no-openjdk8", "true")
+ systemProperty("test-no-pg-driver", "true")
+ systemProperty("test-no-pg-engine", "true")
+ systemProperty("test-no-mariadb-driver", "true")
+ systemProperty("test-no-mariadb-engine", "true")
+ systemProperty("test-no-instances-1", "true")
+ systemProperty("test-no-instances-2", "true")
+ systemProperty("test-exclude-tags", "advanced,rw-splitting")
+ }
+}
+
+tasks.register("test-aurora-pg-advanced-performance") {
+ group = "verification"
+ filter.includeTestsMatching("integration.host.TestRunner.runTests")
+ doFirst {
+ systemProperty("test-no-docker", "true")
+ systemProperty("test-no-iam", "true")
+ systemProperty("test-no-hikari", "true")
+ systemProperty("test-no-secrets-manager", "true")
+ systemProperty("test-no-graalvm", "true")
+ systemProperty("test-no-openjdk8", "true")
+ systemProperty("test-no-mysql-driver", "true")
+ systemProperty("test-no-mysql-engine", "true")
+ systemProperty("test-no-mariadb-driver", "true")
+ systemProperty("test-no-mariadb-engine", "true")
+ systemProperty("test-no-instances-1", "true")
+ systemProperty("test-no-instances-2", "true")
+ systemProperty("test-include-tags", "advanced")
+ }
+}
+
+tasks.register("test-aurora-mysql-advanced-performance") {
+ group = "verification"
+ filter.includeTestsMatching("integration.host.TestRunner.runTests")
+ doFirst {
+ systemProperty("test-no-docker", "true")
+ systemProperty("test-no-iam", "true")
+ systemProperty("test-no-hikari", "true")
+ systemProperty("test-no-secrets-manager", "true")
+ systemProperty("test-no-graalvm", "true")
+ systemProperty("test-no-openjdk8", "true")
systemProperty("test-no-pg-driver", "true")
systemProperty("test-no-pg-engine", "true")
systemProperty("test-no-mariadb-driver", "true")
systemProperty("test-no-mariadb-engine", "true")
+ systemProperty("test-no-instances-1", "true")
+ systemProperty("test-no-instances-2", "true")
+ systemProperty("test-include-tags", "advanced")
}
}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/ConnectionPluginChainBuilder.java b/wrapper/src/main/java/software/amazon/jdbc/ConnectionPluginChainBuilder.java
index cc9a3eda4..353e3eeb6 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/ConnectionPluginChainBuilder.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/ConnectionPluginChainBuilder.java
@@ -62,6 +62,7 @@ public class ConnectionPluginChainBuilder {
put("logQuery", LogQueryConnectionPluginFactory.class);
put("dataCache", DataCacheConnectionPluginFactory.class);
put("efm", HostMonitoringConnectionPluginFactory.class);
+ put("efm2", software.amazon.jdbc.plugin.efm2.HostMonitoringConnectionPluginFactory.class);
put("failover", FailoverConnectionPluginFactory.class);
put("iam", IamAuthConnectionPluginFactory.class);
put("awsSecretsManager", AwsSecretsManagerConnectionPluginFactory.class);
@@ -91,6 +92,7 @@ public class ConnectionPluginChainBuilder {
put(ReadWriteSplittingPluginFactory.class, 600);
put(FailoverConnectionPluginFactory.class, 700);
put(HostMonitoringConnectionPluginFactory.class, 800);
+ put(software.amazon.jdbc.plugin.efm2.HostMonitoringConnectionPluginFactory.class, 810);
put(FastestResponseStrategyPluginFactory.class, 900);
put(IamAuthConnectionPluginFactory.class, 1000);
put(AwsSecretsManagerConnectionPluginFactory.class, 1100);
diff --git a/wrapper/src/main/java/software/amazon/jdbc/ConnectionPluginManager.java b/wrapper/src/main/java/software/amazon/jdbc/ConnectionPluginManager.java
index 8be2aff07..39f659c1f 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/ConnectionPluginManager.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/ConnectionPluginManager.java
@@ -71,6 +71,7 @@ public class ConnectionPluginManager implements CanReleaseResources, Wrapper {
put(LogQueryConnectionPlugin.class, "plugin:logQuery");
put(DataCacheConnectionPlugin.class, "plugin:dataCache");
put(HostMonitoringConnectionPlugin.class, "plugin:efm");
+ put(software.amazon.jdbc.plugin.efm2.HostMonitoringConnectionPlugin.class, "plugin:efm2");
put(FailoverConnectionPlugin.class, "plugin:failover");
put(IamAuthConnectionPlugin.class, "plugin:iam");
put(AwsSecretsManagerConnectionPlugin.class, "plugin:awsSecretsManager");
diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/HostMonitoringConnectionPlugin.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/HostMonitoringConnectionPlugin.java
new file mode 100644
index 000000000..d54f3b623
--- /dev/null
+++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/HostMonitoringConnectionPlugin.java
@@ -0,0 +1,285 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package software.amazon.jdbc.plugin.efm2;
+
+import java.sql.Connection;
+import java.sql.SQLException;
+import java.util.Collections;
+import java.util.EnumSet;
+import java.util.HashSet;
+import java.util.Properties;
+import java.util.Set;
+import java.util.function.Supplier;
+import java.util.logging.Logger;
+import org.checkerframework.checker.nullness.qual.NonNull;
+import software.amazon.jdbc.AwsWrapperProperty;
+import software.amazon.jdbc.HostSpec;
+import software.amazon.jdbc.JdbcCallable;
+import software.amazon.jdbc.NodeChangeOptions;
+import software.amazon.jdbc.OldConnectionSuggestedAction;
+import software.amazon.jdbc.PluginService;
+import software.amazon.jdbc.PropertyDefinition;
+import software.amazon.jdbc.cleanup.CanReleaseResources;
+import software.amazon.jdbc.plugin.AbstractConnectionPlugin;
+import software.amazon.jdbc.util.Messages;
+import software.amazon.jdbc.util.RdsUrlType;
+import software.amazon.jdbc.util.RdsUtils;
+import software.amazon.jdbc.util.SubscribedMethodHelper;
+
+/**
+ * Monitor the server while the connection is executing methods for more sophisticated failure
+ * detection.
+ */
+public class HostMonitoringConnectionPlugin extends AbstractConnectionPlugin
+ implements CanReleaseResources {
+
+ private static final Logger LOGGER =
+ Logger.getLogger(HostMonitoringConnectionPlugin.class.getName());
+
+ public static final AwsWrapperProperty FAILURE_DETECTION_ENABLED =
+ new AwsWrapperProperty(
+ "failureDetectionEnabled",
+ "true",
+ "Enable failure detection logic (aka node monitoring thread).");
+
+ public static final AwsWrapperProperty FAILURE_DETECTION_TIME =
+ new AwsWrapperProperty(
+ "failureDetectionTime",
+ "30000",
+ "Interval in millis between sending SQL to the server and the first probe to database node.");
+
+ public static final AwsWrapperProperty FAILURE_DETECTION_INTERVAL =
+ new AwsWrapperProperty(
+ "failureDetectionInterval",
+ "5000",
+ "Interval in millis between probes to database node.");
+
+ public static final AwsWrapperProperty FAILURE_DETECTION_COUNT =
+ new AwsWrapperProperty(
+ "failureDetectionCount",
+ "3",
+ "Number of failed connection checks before considering database node unhealthy.");
+
+ private static final Set subscribedMethods =
+ Collections.unmodifiableSet(new HashSet<>(Collections.singletonList("*")));
+
+ protected @NonNull Properties properties;
+ private final @NonNull Supplier monitorServiceSupplier;
+ private final @NonNull PluginService pluginService;
+ private MonitorService monitorService;
+ private final RdsUtils rdsHelper;
+ private HostSpec monitoringHostSpec;
+
+ static {
+ PropertyDefinition.registerPluginProperties(HostMonitoringConnectionPlugin.class);
+ PropertyDefinition.registerPluginProperties("monitoring-");
+ }
+
+ /**
+ * Initialize the node monitoring plugin.
+ *
+ * @param pluginService A service allowing the plugin to retrieve the current active connection
+ * and its connection settings.
+ * @param properties The property set used to initialize the active connection.
+ */
+ public HostMonitoringConnectionPlugin(
+ final @NonNull PluginService pluginService, final @NonNull Properties properties) {
+ this(pluginService, properties, () -> new MonitorServiceImpl(pluginService), new RdsUtils());
+ }
+
+ HostMonitoringConnectionPlugin(
+ final @NonNull PluginService pluginService,
+ final @NonNull Properties properties,
+ final @NonNull Supplier monitorServiceSupplier,
+ final RdsUtils rdsHelper) {
+ if (pluginService == null) {
+ throw new IllegalArgumentException("pluginService");
+ }
+ if (properties == null) {
+ throw new IllegalArgumentException("properties");
+ }
+ if (monitorServiceSupplier == null) {
+ throw new IllegalArgumentException("monitorServiceSupplier");
+ }
+ this.pluginService = pluginService;
+ this.properties = properties;
+ this.monitorServiceSupplier = monitorServiceSupplier;
+ this.rdsHelper = rdsHelper;
+ }
+
+ @Override
+ public Set getSubscribedMethods() {
+ return subscribedMethods;
+ }
+
+ /**
+ * Executes the given SQL function with {@link MonitorImpl} if connection monitoring is enabled.
+ * Otherwise, executes the SQL function directly.
+ */
+ @Override
+ public T execute(
+ final Class resultClass,
+ final Class exceptionClass,
+ final Object methodInvokeOn,
+ final String methodName,
+ final JdbcCallable jdbcMethodFunc,
+ final Object[] jdbcMethodArgs)
+ throws E {
+
+ // update config settings since they may change
+ final boolean isEnabled = FAILURE_DETECTION_ENABLED.getBoolean(this.properties);
+
+ if (!isEnabled || !SubscribedMethodHelper.NETWORK_BOUND_METHODS.contains(methodName)) {
+ return jdbcMethodFunc.call();
+ }
+
+ final int failureDetectionTimeMillis = FAILURE_DETECTION_TIME.getInteger(this.properties);
+ final int failureDetectionIntervalMillis =
+ FAILURE_DETECTION_INTERVAL.getInteger(this.properties);
+ final int failureDetectionCount = FAILURE_DETECTION_COUNT.getInteger(this.properties);
+
+ initMonitorService();
+
+ T result;
+ MonitorConnectionContext monitorContext = null;
+
+ try {
+ LOGGER.finest(
+ () -> Messages.get(
+ "HostMonitoringConnectionPlugin.activatedMonitoring",
+ new Object[] {methodName}));
+
+ final HostSpec monitoringHostSpec = this.getMonitoringHostSpec();
+
+ monitorContext =
+ this.monitorService.startMonitoring(
+ this.pluginService.getCurrentConnection(), // abort this connection if needed
+ monitoringHostSpec,
+ this.properties,
+ failureDetectionTimeMillis,
+ failureDetectionIntervalMillis,
+ failureDetectionCount);
+
+ result = jdbcMethodFunc.call();
+
+ } finally {
+ if (monitorContext != null) {
+ this.monitorService.stopMonitoring(monitorContext, this.pluginService.getCurrentConnection());
+ }
+
+ LOGGER.finest(
+ () -> Messages.get(
+ "HostMonitoringConnectionPlugin.monitoringDeactivated",
+ new Object[] {methodName}));
+ }
+
+ return result;
+ }
+
+ private void initMonitorService() {
+ if (this.monitorService == null) {
+ this.monitorService = this.monitorServiceSupplier.get();
+ }
+ }
+
+ /** Call this plugin's monitor service to release all resources associated with this plugin. */
+ @Override
+ public void releaseResources() {
+ if (this.monitorService != null) {
+ this.monitorService.releaseResources();
+ }
+
+ this.monitorService = null;
+ }
+
+ @Override
+ public OldConnectionSuggestedAction notifyConnectionChanged(final EnumSet changes) {
+ if (changes.contains(NodeChangeOptions.HOSTNAME)
+ || changes.contains(NodeChangeOptions.NODE_CHANGED)) {
+
+ // Reset monitoring HostSpec since the associated connection has changed.
+ this.monitoringHostSpec = null;
+ }
+
+ return OldConnectionSuggestedAction.NO_OPINION;
+ }
+
+ @Override
+ public Connection connect(
+ final @NonNull String driverProtocol,
+ final @NonNull HostSpec hostSpec,
+ final @NonNull Properties props,
+ final boolean isInitialConnection,
+ final @NonNull JdbcCallable connectFunc)
+ throws SQLException {
+ return connectInternal(driverProtocol, hostSpec, connectFunc);
+ }
+
+ private Connection connectInternal(String driverProtocol, HostSpec hostSpec,
+ JdbcCallable connectFunc) throws SQLException {
+ final Connection conn = connectFunc.call();
+
+ if (conn != null) {
+ final RdsUrlType type = this.rdsHelper.identifyRdsType(hostSpec.getHost());
+ if (type.isRdsCluster()) {
+ hostSpec.resetAliases();
+ this.pluginService.fillAliases(conn, hostSpec);
+ }
+ }
+
+ return conn;
+ }
+
+ @Override
+ public Connection forceConnect(
+ final @NonNull String driverProtocol,
+ final @NonNull HostSpec hostSpec,
+ final @NonNull Properties props,
+ final boolean isInitialConnection,
+ final @NonNull JdbcCallable forceConnectFunc)
+ throws SQLException {
+ return connectInternal(driverProtocol, hostSpec, forceConnectFunc);
+ }
+
+ public HostSpec getMonitoringHostSpec() {
+ if (this.monitoringHostSpec == null) {
+ this.monitoringHostSpec = this.pluginService.getCurrentHostSpec();
+ final RdsUrlType rdsUrlType = this.rdsHelper.identifyRdsType(monitoringHostSpec.getUrl());
+
+ try {
+ if (rdsUrlType.isRdsCluster()) {
+ LOGGER.finest("Monitoring HostSpec is associated with a cluster endpoint, "
+ + "plugin needs to identify the cluster connection.");
+ this.monitoringHostSpec = this.pluginService.identifyConnection(this.pluginService.getCurrentConnection());
+ if (this.monitoringHostSpec == null) {
+ throw new RuntimeException(Messages.get(
+ "HostMonitoringConnectionPlugin.unableToIdentifyConnection",
+ new Object[] {
+ this.pluginService.getCurrentHostSpec().getHost(),
+ this.pluginService.getHostListProvider()}));
+ }
+ this.pluginService.fillAliases(this.pluginService.getCurrentConnection(), monitoringHostSpec);
+ }
+ } catch (SQLException e) {
+ // Log and throw.
+ LOGGER.finest(Messages.get("HostMonitoringConnectionPlugin.errorIdentifyingConnection", new Object[] {e}));
+ throw new RuntimeException(e);
+ }
+ }
+ return this.monitoringHostSpec;
+ }
+}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/HostMonitoringConnectionPluginFactory.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/HostMonitoringConnectionPluginFactory.java
new file mode 100644
index 000000000..0dfdb79ca
--- /dev/null
+++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/HostMonitoringConnectionPluginFactory.java
@@ -0,0 +1,30 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package software.amazon.jdbc.plugin.efm2;
+
+import java.util.Properties;
+import software.amazon.jdbc.ConnectionPlugin;
+import software.amazon.jdbc.ConnectionPluginFactory;
+import software.amazon.jdbc.PluginService;
+
+/** Class initializing a {@link HostMonitoringConnectionPlugin}. */
+public class HostMonitoringConnectionPluginFactory implements ConnectionPluginFactory {
+ @Override
+ public ConnectionPlugin getInstance(final PluginService pluginService, final Properties props) {
+ return new HostMonitoringConnectionPlugin(pluginService, props);
+ }
+}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/Monitor.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/Monitor.java
new file mode 100644
index 000000000..5689db2ce
--- /dev/null
+++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/Monitor.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package software.amazon.jdbc.plugin.efm2;
+
+/**
+ * Interface for monitors. This class uses background threads to monitor servers with one or more
+ * connections for more efficient failure detection during method execution.
+ */
+public interface Monitor extends AutoCloseable, Runnable {
+
+ void startMonitoring(MonitorConnectionContext context);
+
+ boolean canDispose();
+}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/MonitorConnectionContext.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/MonitorConnectionContext.java
new file mode 100644
index 000000000..806047147
--- /dev/null
+++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/MonitorConnectionContext.java
@@ -0,0 +1,67 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package software.amazon.jdbc.plugin.efm2;
+
+import java.lang.ref.WeakReference;
+import java.sql.Connection;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicReference;
+
+/**
+ * Monitoring context for each connection. This contains each connection's criteria for whether a
+ * server should be considered unhealthy. The context is shared between the main thread and the monitor thread.
+ */
+public class MonitorConnectionContext {
+
+ private final AtomicReference> connectionToAbortRef;
+ private final AtomicBoolean nodeUnhealthy = new AtomicBoolean(false);
+
+ /**
+ * Constructor.
+ *
+ * @param connectionToAbort A reference to the connection associated with this context that will be aborted.
+ */
+ public MonitorConnectionContext(final Connection connectionToAbort) {
+ this.connectionToAbortRef = new AtomicReference<>(new WeakReference<>(connectionToAbort));
+ }
+
+ public boolean isNodeUnhealthy() {
+ return this.nodeUnhealthy.get();
+ }
+
+ void setNodeUnhealthy(final boolean nodeUnhealthy) {
+ this.nodeUnhealthy.set(nodeUnhealthy);
+ }
+
+ public boolean shouldAbort() {
+ return this.nodeUnhealthy.get() && this.connectionToAbortRef.get() != null;
+ }
+
+ public void setInactive() {
+ this.connectionToAbortRef.set(null);
+ }
+
+ public Connection getConnection() {
+ WeakReference copy = this.connectionToAbortRef.get();
+ return copy == null ? null : copy.get();
+ }
+
+ public boolean isActive() {
+ WeakReference copy = this.connectionToAbortRef.get();
+ return copy != null && copy.get() != null;
+ }
+}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/MonitorImpl.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/MonitorImpl.java
new file mode 100644
index 000000000..ee4e7a506
--- /dev/null
+++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/MonitorImpl.java
@@ -0,0 +1,416 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package software.amazon.jdbc.plugin.efm2;
+
+import java.lang.ref.WeakReference;
+import java.sql.Connection;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Properties;
+import java.util.Queue;
+import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.Executor;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+import org.checkerframework.checker.nullness.qual.NonNull;
+import software.amazon.jdbc.HostSpec;
+import software.amazon.jdbc.PluginService;
+import software.amazon.jdbc.hostavailability.HostAvailability;
+import software.amazon.jdbc.util.Messages;
+import software.amazon.jdbc.util.PropertyUtils;
+import software.amazon.jdbc.util.StringUtils;
+import software.amazon.jdbc.util.telemetry.TelemetryContext;
+import software.amazon.jdbc.util.telemetry.TelemetryCounter;
+import software.amazon.jdbc.util.telemetry.TelemetryFactory;
+import software.amazon.jdbc.util.telemetry.TelemetryGauge;
+import software.amazon.jdbc.util.telemetry.TelemetryTraceLevel;
+
+/**
+ * This class uses a background thread to monitor a particular server with one or more active {@link
+ * Connection}.
+ */
+public class MonitorImpl implements Monitor {
+
+ private static final Logger LOGGER = Logger.getLogger(MonitorImpl.class.getName());
+ private static final long THREAD_SLEEP_MILLIS = 1000;
+ private static final String MONITORING_PROPERTY_PREFIX = "monitoring-";
+
+ protected static final Executor ABORT_EXECUTOR = Executors.newSingleThreadExecutor();
+
+ private final Queue> activeContexts = new ConcurrentLinkedQueue<>();
+ private final HashMap>> newContexts = new HashMap<>();
+ private final PluginService pluginService;
+ private final TelemetryFactory telemetryFactory;
+ private final Properties properties;
+ private final HostSpec hostSpec;
+ private final AtomicBoolean stopped = new AtomicBoolean(false);
+ private Connection monitoringConn = null;
+ private final ExecutorService threadPool = Executors.newFixedThreadPool(2, runnableTarget -> {
+ final Thread monitoringThread = new Thread(runnableTarget);
+ monitoringThread.setDaemon(true);
+ return monitoringThread;
+ });
+
+ private final long failureDetectionTimeNano;
+ private final long failureDetectionIntervalNano;
+ private final int failureDetectionCount;
+
+ private long invalidNodeStartTimeNano;
+ private long failureCount;
+ private boolean nodeUnhealthy = false;
+
+
+ private final TelemetryGauge newContextsSizeGauge;
+ private final TelemetryGauge activeContextsSizeGauge;
+ private final TelemetryGauge nodeHealtyGauge;
+ private final TelemetryCounter abortedConnectionsCounter;
+
+ /**
+ * Store the monitoring configuration for a connection.
+ *
+ * @param pluginService A service for creating new connections.
+ * @param hostSpec The {@link HostSpec} of the server this {@link MonitorImpl}
+ * instance is monitoring.
+ * @param properties The {@link Properties} containing additional monitoring
+ * configuration.
+ */
+ public MonitorImpl(
+ final @NonNull PluginService pluginService,
+ final @NonNull HostSpec hostSpec,
+ final @NonNull Properties properties,
+ final int failureDetectionTimeMillis,
+ final int failureDetectionIntervalMillis,
+ final int failureDetectionCount,
+ final TelemetryCounter abortedConnectionsCounter) {
+
+ this.pluginService = pluginService;
+ this.telemetryFactory = pluginService.getTelemetryFactory();
+ this.hostSpec = hostSpec;
+ this.properties = properties;
+ this.failureDetectionTimeNano = TimeUnit.MILLISECONDS.toNanos(failureDetectionTimeMillis);
+ this.failureDetectionIntervalNano = TimeUnit.MILLISECONDS.toNanos(failureDetectionIntervalMillis);
+ this.failureDetectionCount = failureDetectionCount;
+ this.abortedConnectionsCounter = abortedConnectionsCounter;
+
+ final String hostId = StringUtils.isNullOrEmpty(this.hostSpec.getHostId())
+ ? this.hostSpec.getHost()
+ : this.hostSpec.getHostId();
+
+ this.newContextsSizeGauge = telemetryFactory.createGauge(
+ String.format("efm2.newContexts.size.%s", hostId),
+ this::getActiveContextSize);
+
+ this.activeContextsSizeGauge = telemetryFactory.createGauge(
+ String.format("efm2.activeContexts.size.%s", hostId),
+ () -> (long) this.activeContexts.size());
+
+ this.nodeHealtyGauge = telemetryFactory.createGauge(
+ String.format("efm2.nodeHealthy.%s", hostId),
+ () -> this.nodeUnhealthy ? 0L : 1L);
+
+ this.threadPool.submit(this::newContextRun); // task to handle new contexts
+ this.threadPool.submit(this); // task to handle active monitoring contexts
+ this.threadPool.shutdown(); // No more tasks are accepted by pool.
+ }
+
+ @Override
+ public boolean canDispose() {
+ return this.activeContexts.isEmpty();
+ }
+
+ @Override
+ public void close() throws Exception {
+ this.stopped.set(true);
+
+ // Waiting for 30s gives a thread enough time to exit monitoring loop and close database connection.
+ if (!this.threadPool.awaitTermination(30, TimeUnit.SECONDS)) {
+ this.threadPool.shutdownNow();
+ }
+ LOGGER.finest(() -> Messages.get(
+ "MonitorImpl.stopped",
+ new Object[] {this.hostSpec.getHost()}));
+ }
+
+ protected long getActiveContextSize() {
+ return this.newContexts.values().stream().mapToLong(java.util.Collection::size).sum();
+ }
+
+ @Override
+ public void startMonitoring(final MonitorConnectionContext context) {
+ if (this.stopped.get()) {
+ LOGGER.warning(() -> Messages.get("MonitorImpl.monitorIsStopped", new Object[] {this.hostSpec.getHost()}));
+ }
+
+ final long currentTimeNano = this.getCurrentTimeNano();
+ long startMonitoringTimeNano = this.truncateNanoToSeconds(
+ currentTimeNano + this.failureDetectionTimeNano);
+
+ Queue> queue =
+ this.newContexts.computeIfAbsent(
+ startMonitoringTimeNano,
+ (key) -> new ConcurrentLinkedQueue<>());
+ queue.add(new WeakReference<>(context));
+ }
+
+ private long truncateNanoToSeconds(final long timeNano) {
+ return TimeUnit.SECONDS.toNanos(TimeUnit.NANOSECONDS.toSeconds(timeNano));
+ }
+
+ public void clearContexts() {
+ this.newContexts.clear();
+ this.activeContexts.clear();
+ }
+
+ // This method helps to organize unit tests.
+ long getCurrentTimeNano() {
+ return System.nanoTime();
+ }
+
+ public void newContextRun() {
+
+ final TelemetryContext telemetryContext = telemetryFactory.openTelemetryContext(
+ "monitoring thread (new contexts)", TelemetryTraceLevel.TOP_LEVEL);
+ telemetryContext.setAttribute("url", this.hostSpec.getUrl());
+
+ try {
+ while (!this.stopped.get()) {
+
+ final long currentTimeNano = this.getCurrentTimeNano();
+
+ final ArrayList processedKeys = new ArrayList<>();
+ this.newContexts.entrySet().stream()
+ // Get entries with key (that is a time in nanos) less or equal than current time.
+ .filter(entry -> entry.getKey() < currentTimeNano)
+ .forEach(entry -> {
+ final Queue> queue = entry.getValue();
+ processedKeys.add(entry.getKey());
+ // Each value of found entry is a queue of monitoring contexts awaiting active monitoring.
+ // Add all contexts to an active monitoring contexts queue.
+ // Ignore disposed contexts.
+ WeakReference contextWeakRef;
+ while ((contextWeakRef = queue.poll()) != null) {
+ MonitorConnectionContext context = contextWeakRef.get();
+ if (context != null && context.isActive()) {
+ this.activeContexts.add(contextWeakRef);
+ }
+ }
+ });
+ processedKeys.forEach(this.newContexts::remove);
+
+ TimeUnit.SECONDS.sleep(1);
+ }
+ } catch (final InterruptedException intEx) {
+ // do nothing; just exit the thread
+ } catch (final Exception ex) {
+ // this should not be reached; log and exit thread
+ if (LOGGER.isLoggable(Level.FINEST)) {
+ LOGGER.log(
+ Level.FINEST,
+ Messages.get(
+ "MonitorImpl.exceptionDuringMonitoringStop",
+ new Object[]{this.hostSpec.getHost()}),
+ ex); // We want to print full trace stack of the exception.
+ }
+ } finally {
+ telemetryContext.closeContext();
+ }
+ }
+
+ @Override
+ public void run() {
+ final TelemetryContext telemetryContext = telemetryFactory.openTelemetryContext(
+ "monitoring thread", TelemetryTraceLevel.TOP_LEVEL);
+ telemetryContext.setAttribute("url", hostSpec.getUrl());
+
+ try {
+ while (!this.stopped.get()) {
+
+ if (this.activeContexts.isEmpty()) {
+ TimeUnit.MILLISECONDS.sleep(THREAD_SLEEP_MILLIS);
+ continue;
+ }
+
+ final long statusCheckStartTimeNano = this.getCurrentTimeNano();
+ final boolean isValid = this.checkConnectionStatus();
+ final long statusCheckEndTimeNano = this.getCurrentTimeNano();
+
+ this.updateNodeHealthStatus(isValid, statusCheckStartTimeNano, statusCheckEndTimeNano);
+
+ if (this.nodeUnhealthy) {
+ this.pluginService.setAvailability(this.hostSpec.asAliases(), HostAvailability.NOT_AVAILABLE);
+ }
+
+ final List> tmpActiveContexts = new ArrayList<>();
+ WeakReference monitorContextWeakRef;
+
+ while ((monitorContextWeakRef = this.activeContexts.poll()) != null) {
+ if (this.stopped.get()) {
+ break;
+ }
+
+ MonitorConnectionContext monitorContext = monitorContextWeakRef.get();
+ if (monitorContext == null) {
+ continue;
+ }
+
+ if (this.nodeUnhealthy) {
+ // Kill connection.
+ monitorContext.setNodeUnhealthy(true);
+ final Connection connectionToAbort = monitorContext.getConnection();
+ monitorContext.setInactive();
+ if (connectionToAbort != null) {
+ this.abortConnection(connectionToAbort);
+ this.abortedConnectionsCounter.inc();
+ }
+ } else if (monitorContext.isActive()) {
+ tmpActiveContexts.add(monitorContextWeakRef);
+ }
+ }
+
+ // activeContexts is empty now and tmpActiveContexts contains all yet active contexts
+ // Add active contexts back to the queue.
+ this.activeContexts.addAll(tmpActiveContexts);
+
+ TimeUnit.NANOSECONDS.sleep(this.failureDetectionIntervalNano);
+ }
+ } catch (final InterruptedException intEx) {
+ // do nothing
+ } catch (final Exception ex) {
+ // this should not be reached; log and exit thread
+ if (LOGGER.isLoggable(Level.FINEST)) {
+ LOGGER.log(
+ Level.FINEST,
+ Messages.get(
+ "MonitorImpl.exceptionDuringMonitoringStop",
+ new Object[]{this.hostSpec.getHost()}),
+ ex); // We want to print full trace stack of the exception.
+ }
+ } finally {
+ this.stopped.set(true);
+ if (this.monitoringConn != null) {
+ try {
+ this.monitoringConn.close();
+ } catch (final SQLException ex) {
+ // ignore
+ }
+ }
+ telemetryContext.closeContext();
+ }
+ }
+
+ /**
+ * Check the status of the monitored server by establishing a connection and sending a ping.
+ *
+ * @return True, if the server is still alive.
+ */
+ boolean checkConnectionStatus() {
+ TelemetryContext connectContext = telemetryFactory.openTelemetryContext(
+ "connection status check", TelemetryTraceLevel.NESTED);
+ try {
+ if (this.monitoringConn == null || this.monitoringConn.isClosed()) {
+ // open a new connection
+ final Properties monitoringConnProperties = PropertyUtils.copyProperties(this.properties);
+
+ this.properties.stringPropertyNames().stream()
+ .filter(p -> p.startsWith(MONITORING_PROPERTY_PREFIX))
+ .forEach(
+ p -> {
+ monitoringConnProperties.put(
+ p.substring(MONITORING_PROPERTY_PREFIX.length()),
+ this.properties.getProperty(p));
+ monitoringConnProperties.remove(p);
+ });
+
+ LOGGER.finest(() -> "Opening a monitoring connection to " + this.hostSpec.getUrl());
+ this.monitoringConn = this.pluginService.forceConnect(this.hostSpec, monitoringConnProperties);
+ LOGGER.finest(() -> "Opened monitoring connection: " + this.monitoringConn);
+ return true;
+ }
+
+ final boolean isValid = this.monitoringConn.isValid(
+ (int) TimeUnit.NANOSECONDS.toSeconds(this.failureDetectionIntervalNano));
+ return isValid;
+
+ } catch (final SQLException sqlEx) {
+ return false;
+
+ } finally {
+ connectContext.closeContext();
+ }
+ }
+
+ private void updateNodeHealthStatus(
+ final boolean connectionValid,
+ final long statusCheckStartNano,
+ final long statusCheckEndNano) {
+
+ if (!connectionValid) {
+ this.failureCount++;
+
+ if (this.invalidNodeStartTimeNano == 0) {
+ this.invalidNodeStartTimeNano = statusCheckStartNano;
+ }
+
+ final long invalidNodeDurationNano = statusCheckEndNano - this.invalidNodeStartTimeNano;
+ final long maxInvalidNodeDurationNano =
+ this.failureDetectionIntervalNano * Math.max(0, this.failureDetectionCount);
+
+ if (invalidNodeDurationNano >= maxInvalidNodeDurationNano) {
+ LOGGER.fine(() -> Messages.get("MonitorConnectionContext.hostDead", new Object[] {this.hostSpec.getHost()}));
+ this.nodeUnhealthy = true;
+ return;
+ }
+
+ LOGGER.finest(
+ () -> Messages.get(
+ "MonitorConnectionContext.hostNotResponding",
+ new Object[] {this.hostSpec.getHost(), this.failureCount}));
+ return;
+ }
+
+ if (this.failureCount > 0) {
+ // Node is back alive
+ LOGGER.finest(
+ () -> Messages.get("MonitorConnectionContext.hostAlive",
+ new Object[] {this.hostSpec.getHost()}));
+ }
+
+ this.failureCount = 0;
+ this.invalidNodeStartTimeNano = 0;
+ this.nodeUnhealthy = false;
+ }
+
+ private void abortConnection(final @NonNull Connection connectionToAbort) {
+ try {
+ connectionToAbort.abort(ABORT_EXECUTOR);
+ connectionToAbort.close();
+ } catch (final SQLException sqlEx) {
+ // ignore
+ LOGGER.finest(
+ () -> Messages.get(
+ "MonitorConnectionContext.exceptionAbortingConnection",
+ new Object[] {sqlEx.getMessage()}));
+ }
+ }
+
+}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/MonitorInitializer.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/MonitorInitializer.java
new file mode 100644
index 000000000..9027ccc5a
--- /dev/null
+++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/MonitorInitializer.java
@@ -0,0 +1,33 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package software.amazon.jdbc.plugin.efm2;
+
+import java.util.Properties;
+import software.amazon.jdbc.HostSpec;
+import software.amazon.jdbc.util.telemetry.TelemetryCounter;
+
+/** Interface for initialize a new {@link MonitorImpl}. */
+@FunctionalInterface
+public interface MonitorInitializer {
+ Monitor createMonitor(
+ HostSpec hostSpec,
+ Properties properties,
+ final int failureDetectionTimeMillis,
+ final int failureDetectionIntervalMillis,
+ final int failureDetectionCount,
+ final TelemetryCounter abortedConnectionsCounter);
+}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/MonitorService.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/MonitorService.java
new file mode 100644
index 000000000..6fd36bc87
--- /dev/null
+++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/MonitorService.java
@@ -0,0 +1,46 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package software.amazon.jdbc.plugin.efm2;
+
+import java.sql.Connection;
+import java.util.Properties;
+import software.amazon.jdbc.HostSpec;
+
+/**
+ * Interface for monitor services. This class implements ways to start and stop monitoring servers
+ * when connections are created.
+ */
+public interface MonitorService {
+
+ MonitorConnectionContext startMonitoring(
+ Connection connectionToAbort,
+ HostSpec hostSpec,
+ Properties properties,
+ int failureDetectionTimeMillis,
+ int failureDetectionIntervalMillis,
+ int failureDetectionCount);
+
+ /**
+ * Stop monitoring for a connection represented by the given {@link MonitorConnectionContext}.
+ * Removes the context from the {@link MonitorImpl}.
+ *
+ * @param context The {@link MonitorConnectionContext} representing a connection.
+ */
+ void stopMonitoring(MonitorConnectionContext context, Connection connectionToAbort);
+
+ void releaseResources();
+}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/MonitorServiceImpl.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/MonitorServiceImpl.java
new file mode 100644
index 000000000..5b17fdac1
--- /dev/null
+++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/MonitorServiceImpl.java
@@ -0,0 +1,185 @@
+/*
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package software.amazon.jdbc.plugin.efm2;
+
+import java.sql.Connection;
+import java.sql.SQLException;
+import java.util.Properties;
+import java.util.concurrent.Executor;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import java.util.logging.Logger;
+import org.checkerframework.checker.nullness.qual.NonNull;
+import software.amazon.jdbc.AwsWrapperProperty;
+import software.amazon.jdbc.HostSpec;
+import software.amazon.jdbc.PluginService;
+import software.amazon.jdbc.util.Messages;
+import software.amazon.jdbc.util.SlidingExpirationCacheWithCleanupThread;
+import software.amazon.jdbc.util.telemetry.TelemetryCounter;
+import software.amazon.jdbc.util.telemetry.TelemetryFactory;
+
+/**
+ * This class handles the creation and clean up of monitoring threads to servers with one or more
+ * active connections.
+ */
+public class MonitorServiceImpl implements MonitorService {
+
+ private static final Logger LOGGER = Logger.getLogger(MonitorServiceImpl.class.getName());
+ public static final AwsWrapperProperty MONITOR_DISPOSAL_TIME_MS =
+ new AwsWrapperProperty(
+ "monitorDisposalTime",
+ "600000", // 10min
+ "Interval in milliseconds for a monitor to be considered inactive and to be disposed.");
+
+ protected static final long CACHE_CLEANUP_NANO = TimeUnit.MINUTES.toNanos(1);
+
+ protected static final Executor ABORT_EXECUTOR = Executors.newSingleThreadExecutor();
+
+ protected static final SlidingExpirationCacheWithCleanupThread monitors =
+ new SlidingExpirationCacheWithCleanupThread<>(
+ Monitor::canDispose,
+ (monitor) -> {
+ try {
+ monitor.close();
+ } catch (Exception ex) {
+ // ignore
+ }
+ },
+ CACHE_CLEANUP_NANO);
+
+ protected final PluginService pluginService;
+ protected final MonitorInitializer monitorInitializer;
+ protected final TelemetryFactory telemetryFactory;
+ protected final TelemetryCounter abortedConnectionsCounter;
+
+ public MonitorServiceImpl(final @NonNull PluginService pluginService) {
+ this(
+ pluginService,
+ (hostSpec,
+ properties,
+ failureDetectionTimeMillis,
+ failureDetectionIntervalMillis,
+ failureDetectionCount,
+ abortedConnectionsCounter) ->
+ new MonitorImpl(
+ pluginService,
+ hostSpec,
+ properties,
+ failureDetectionTimeMillis,
+ failureDetectionIntervalMillis,
+ failureDetectionCount,
+ abortedConnectionsCounter));
+ }
+
+ MonitorServiceImpl(
+ final @NonNull PluginService pluginService,
+ final @NonNull MonitorInitializer monitorInitializer) {
+ this.pluginService = pluginService;
+ this.telemetryFactory = pluginService.getTelemetryFactory();
+ this.abortedConnectionsCounter = telemetryFactory.createCounter("efm2.connections.aborted");
+ this.monitorInitializer = monitorInitializer;
+ }
+
+ public static void clearCache() {
+ monitors.clear();
+ }
+
+ @Override
+ public MonitorConnectionContext startMonitoring(
+ final Connection connectionToAbort,
+ final HostSpec hostSpec,
+ final Properties properties,
+ final int failureDetectionTimeMillis,
+ final int failureDetectionIntervalMillis,
+ final int failureDetectionCount) {
+
+ final Monitor monitor = this.getMonitor(
+ hostSpec,
+ properties,
+ failureDetectionTimeMillis,
+ failureDetectionIntervalMillis,
+ failureDetectionCount);
+
+ final MonitorConnectionContext context = new MonitorConnectionContext(connectionToAbort);
+ monitor.startMonitoring(context);
+
+ return context;
+ }
+
+ @Override
+ public void stopMonitoring(
+ @NonNull final MonitorConnectionContext context,
+ @NonNull Connection connectionToAbort) {
+
+ if (context.shouldAbort()) {
+ context.setInactive();
+ try {
+ connectionToAbort.abort(ABORT_EXECUTOR);
+ connectionToAbort.close();
+ this.abortedConnectionsCounter.inc();
+ } catch (final SQLException sqlEx) {
+ // ignore
+ LOGGER.finest(
+ () -> Messages.get(
+ "MonitorConnectionContext.exceptionAbortingConnection",
+ new Object[] {sqlEx.getMessage()}));
+ }
+ } else {
+ context.setInactive();
+ }
+ }
+
+ @Override
+ public void releaseResources() {
+ // do nothing
+ }
+
+ /**
+ * Get or create a {@link MonitorImpl} for a server.
+ *
+ * @param hostSpec Information such as hostname of the server.
+ * @param properties The user configuration for the current connection.
+ * @return A {@link MonitorImpl} object associated with a specific server.
+ */
+ protected Monitor getMonitor(
+ final HostSpec hostSpec,
+ final Properties properties,
+ final int failureDetectionTimeMillis,
+ final int failureDetectionIntervalMillis,
+ final int failureDetectionCount) {
+
+ final String monitorKey = String.format("%d:%d:%d:%s",
+ failureDetectionTimeMillis,
+ failureDetectionIntervalMillis,
+ failureDetectionCount,
+ hostSpec.getUrl());
+
+ final long cacheExpirationNano = TimeUnit.MILLISECONDS.toNanos(
+ MONITOR_DISPOSAL_TIME_MS.getLong(properties));
+
+ return monitors.computeIfAbsent(
+ monitorKey,
+ (key) -> monitorInitializer.createMonitor(
+ hostSpec,
+ properties,
+ failureDetectionTimeMillis,
+ failureDetectionIntervalMillis,
+ failureDetectionCount,
+ this.abortedConnectionsCounter),
+ cacheExpirationNano);
+ }
+}
diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/telemetry/OpenTelemetryFactory.java b/wrapper/src/main/java/software/amazon/jdbc/util/telemetry/OpenTelemetryFactory.java
index 9181eb755..d39307faa 100644
--- a/wrapper/src/main/java/software/amazon/jdbc/util/telemetry/OpenTelemetryFactory.java
+++ b/wrapper/src/main/java/software/amazon/jdbc/util/telemetry/OpenTelemetryFactory.java
@@ -25,6 +25,14 @@ public class OpenTelemetryFactory implements TelemetryFactory {
private static final String INSTRUMENTATION_NAME = "aws-advanced-jdbc-wrapper";
+ /**
+ * Max allowed name length for counters and gauges.
+ *
+ * @see
+ * More details
+ */
+ private static final int NAME_MAX_LENGTH = 63;
+
private static OpenTelemetry openTelemetry;
private static Tracer tracer;
private static Meter meter;
@@ -57,13 +65,22 @@ public void postCopy(TelemetryContext telemetryContext, TelemetryTraceLevel trac
}
public TelemetryCounter createCounter(String name) {
+ if (name == null) {
+ throw new IllegalArgumentException("name");
+ }
meter = getOpenTelemetry().getMeter(INSTRUMENTATION_NAME);
- return new OpenTelemetryCounter(meter, name);
+ return new OpenTelemetryCounter(meter, trimName(name));
}
public TelemetryGauge createGauge(String name, GaugeCallable callback) {
+ if (name == null) {
+ throw new IllegalArgumentException("name");
+ }
meter = getOpenTelemetry().getMeter(INSTRUMENTATION_NAME);
- return new OpenTelemetryGauge(meter, name, callback);
+ return new OpenTelemetryGauge(meter, trimName(name), callback);
}
+ private String trimName(final String name) {
+ return (name.length() > NAME_MAX_LENGTH) ? name.substring(0, NAME_MAX_LENGTH) : name;
+ }
}
diff --git a/wrapper/src/main/resources/aws_advanced_jdbc_wrapper_messages.properties b/wrapper/src/main/resources/aws_advanced_jdbc_wrapper_messages.properties
index c3c4c46cb..b79c7d242 100644
--- a/wrapper/src/main/resources/aws_advanced_jdbc_wrapper_messages.properties
+++ b/wrapper/src/main/resources/aws_advanced_jdbc_wrapper_messages.properties
@@ -195,6 +195,7 @@ MonitorImpl.interruptedExceptionDuringMonitoring=Monitoring thread for node {0}
MonitorImpl.exceptionDuringMonitoringContinue=Continuing monitoring after unhandled exception was thrown in monitoring thread for node {0}.
MonitorImpl.exceptionDuringMonitoringStop=Stopping monitoring after unhandled exception was thrown in monitoring thread for node {0}.
MonitorImpl.monitorIsStopped=Monitoring was already stopped for node {0}.
+MonitorImpl.stopped=Stopped monitoring thread for node ''{0}''.
# Monitor Service Impl
MonitorServiceImpl.emptyAliasSet=Empty alias set passed for ''{0}''. Set should not be empty.
diff --git a/wrapper/src/test/build.gradle.kts b/wrapper/src/test/build.gradle.kts
index b52eecc92..938e05852 100644
--- a/wrapper/src/test/build.gradle.kts
+++ b/wrapper/src/test/build.gradle.kts
@@ -64,7 +64,16 @@ tasks.withType {
classpath += fileTree("./libs") { include("*.jar") } + project.files("./test")
outputs.upToDateWhen { false }
- useJUnitPlatform()
+ useJUnitPlatform {
+ System.getProperty("test-include-tags")?.split(",")?.forEach { tag ->
+ includeTags(tag)
+ println("Include tests with tag: $tag")
+ }
+ System.getProperty("test-exclude-tags")?.split(",")?.forEach { tag ->
+ excludeTags(tag)
+ println("Exclude tests with tag: $tag")
+ }
+ }
testLogging {
events(PASSED, FAILED, SKIPPED)
diff --git a/wrapper/src/test/java/integration/container/ConnectionStringHelper.java b/wrapper/src/test/java/integration/container/ConnectionStringHelper.java
index 3ac06428b..f5b56355e 100644
--- a/wrapper/src/test/java/integration/container/ConnectionStringHelper.java
+++ b/wrapper/src/test/java/integration/container/ConnectionStringHelper.java
@@ -204,7 +204,7 @@ public static Properties getDefaultProperties() {
PropertyDefinition.TELEMETRY_METRICS_BACKEND.name,
features.contains(TestEnvironmentFeatures.TELEMETRY_METRICS_ENABLED) ? "otlp" : "none");
- DriverHelper.setTcpKeepAlive(TestEnvironment.getCurrent().getCurrentDriver(), props, false);
+ props.setProperty(PropertyDefinition.TCP_KEEP_ALIVE.name, "false");
return props;
}
diff --git a/wrapper/src/test/java/integration/container/TestDriverProvider.java b/wrapper/src/test/java/integration/container/TestDriverProvider.java
index fe5b25dbf..63fabeea2 100644
--- a/wrapper/src/test/java/integration/container/TestDriverProvider.java
+++ b/wrapper/src/test/java/integration/container/TestDriverProvider.java
@@ -54,6 +54,8 @@
import org.junit.jupiter.api.extension.TestTemplateInvocationContextProvider;
import org.junit.platform.commons.util.AnnotationUtils;
import software.amazon.jdbc.dialect.DialectManager;
+import software.amazon.jdbc.plugin.efm.MonitorThreadContainer;
+import software.amazon.jdbc.plugin.efm2.MonitorServiceImpl;
import software.amazon.jdbc.targetdriverdialect.TargetDriverDialectManager;
public class TestDriverProvider implements TestTemplateInvocationContextProvider {
@@ -212,6 +214,8 @@ public void beforeEach(ExtensionContext context) throws Exception {
TestPluginServiceImpl.clearHostAvailabilityCache();
DialectManager.resetEndpointCache();
TargetDriverDialectManager.resetCustomDialect();
+ MonitorThreadContainer.releaseInstance();
+ MonitorServiceImpl.clearCache();
}
if (tracesEnabled) {
AWSXRay.endSegment();
diff --git a/wrapper/src/test/java/integration/container/tests/AdvancedPerformanceTest.java b/wrapper/src/test/java/integration/container/tests/AdvancedPerformanceTest.java
index 57320801e..2b4a1ba5b 100644
--- a/wrapper/src/test/java/integration/container/tests/AdvancedPerformanceTest.java
+++ b/wrapper/src/test/java/integration/container/tests/AdvancedPerformanceTest.java
@@ -18,13 +18,13 @@
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
+import static software.amazon.jdbc.PropertyDefinition.CONNECT_TIMEOUT;
import static software.amazon.jdbc.PropertyDefinition.PLUGINS;
import static software.amazon.jdbc.plugin.efm.HostMonitoringConnectionPlugin.FAILURE_DETECTION_COUNT;
import static software.amazon.jdbc.plugin.efm.HostMonitoringConnectionPlugin.FAILURE_DETECTION_INTERVAL;
import static software.amazon.jdbc.plugin.efm.HostMonitoringConnectionPlugin.FAILURE_DETECTION_TIME;
import static software.amazon.jdbc.plugin.failover.FailoverConnectionPlugin.FAILOVER_TIMEOUT_MS;
-import integration.DriverHelper;
import integration.TestEnvironmentFeatures;
import integration.container.ConnectionStringHelper;
import integration.container.TestDriverProvider;
@@ -58,10 +58,14 @@
import org.apache.poi.xssf.usermodel.XSSFSheet;
import org.apache.poi.xssf.usermodel.XSSFWorkbook;
import org.junit.jupiter.api.MethodOrderer;
+import org.junit.jupiter.api.Tag;
import org.junit.jupiter.api.TestMethodOrder;
import org.junit.jupiter.api.TestTemplate;
import org.junit.jupiter.api.extension.ExtendWith;
import org.junit.jupiter.params.provider.Arguments;
+import software.amazon.jdbc.PropertyDefinition;
+import software.amazon.jdbc.plugin.efm.MonitorThreadContainer;
+import software.amazon.jdbc.plugin.efm2.MonitorServiceImpl;
import software.amazon.jdbc.plugin.failover.FailoverSuccessSQLException;
import software.amazon.jdbc.util.StringUtils;
@@ -71,10 +75,13 @@
TestEnvironmentFeatures.PERFORMANCE,
TestEnvironmentFeatures.FAILOVER_SUPPORTED
})
+@Tag("advanced")
public class AdvancedPerformanceTest {
private static final Logger LOGGER = Logger.getLogger(AdvancedPerformanceTest.class.getName());
+ private static final String MONITORING_CONNECTION_PREFIX = "monitoring-";
+
private static final int REPEAT_TIMES =
StringUtils.isNullOrEmpty(System.getenv("REPEAT_TIMES"))
? 5
@@ -178,9 +185,11 @@ public void test_AdvancedPerformance() throws IOException {
} finally {
doWritePerfDataToFile(
String.format(
- "./build/reports/tests/DbEngine_%s_Driver_%s_AdvancedPerformanceResults.xlsx",
+ "./build/reports/tests/AdvancedPerformanceResults_"
+ + "Db_%s_Driver_%s_Instances_%d.xlsx",
TestEnvironment.getCurrent().getInfo().getRequest().getDatabaseEngine(),
- TestEnvironment.getCurrent().getCurrentDriver()),
+ TestEnvironment.getCurrent().getCurrentDriver(),
+ TestEnvironment.getCurrent().getInfo().getRequest().getNumOfInstances()),
perfDataList);
perfDataList.clear();
}
@@ -188,21 +197,21 @@ public void test_AdvancedPerformance() throws IOException {
private void doMeasurePerformance(int sleepDelayMillis) throws InterruptedException {
- final AtomicLong downtime = new AtomicLong();
+ final AtomicLong downtimeNano = new AtomicLong();
final CountDownLatch startLatch = new CountDownLatch(5);
final CountDownLatch finishLatch = new CountDownLatch(5);
- downtime.set(0);
+ downtimeNano.set(0);
final Thread failoverThread =
- getThread_Failover(sleepDelayMillis, downtime, startLatch, finishLatch);
+ getThread_Failover(sleepDelayMillis, downtimeNano, startLatch, finishLatch);
final Thread pgThread =
- getThread_DirectDriver(sleepDelayMillis, downtime, startLatch, finishLatch);
+ getThread_DirectDriver(sleepDelayMillis, downtimeNano, startLatch, finishLatch);
final Thread wrapperEfmThread =
- getThread_WrapperEfm(sleepDelayMillis, downtime, startLatch, finishLatch);
+ getThread_WrapperEfm(sleepDelayMillis, downtimeNano, startLatch, finishLatch);
final Thread wrapperEfmFailoverThread =
- getThread_WrapperEfmFailover(sleepDelayMillis, downtime, startLatch, finishLatch);
- final Thread dnsThread = getThread_DNS(sleepDelayMillis, downtime, startLatch, finishLatch);
+ getThread_WrapperEfmFailover(sleepDelayMillis, downtimeNano, startLatch, finishLatch);
+ final Thread dnsThread = getThread_DNS(sleepDelayMillis, downtimeNano, startLatch, finishLatch);
failoverThread.start();
pgThread.start();
@@ -216,6 +225,8 @@ private void doMeasurePerformance(int sleepDelayMillis) throws InterruptedExcept
LOGGER.finest("Test is over.");
+ assertTrue(downtimeNano.get() > 0);
+
failoverThread.interrupt();
pgThread.interrupt();
wrapperEfmThread.interrupt();
@@ -269,7 +280,7 @@ private void ensureDnsHealthy() throws UnknownHostException, InterruptedExceptio
private Thread getThread_Failover(
final int sleepDelayMillis,
- final AtomicLong downtime,
+ final AtomicLong downtimeNano,
final CountDownLatch startLatch,
final CountDownLatch finishLatch) {
@@ -286,8 +297,8 @@ private Thread getThread_Failover(
LOGGER.finest("Trigger failover...");
// trigger failover
- auroraUtil.failoverClusterAndWaitUntilWriterChanged();
- downtime.set(System.nanoTime());
+ failoverCluster();
+ downtimeNano.set(System.nanoTime());
LOGGER.finest("Failover is started.");
} catch (InterruptedException interruptedException) {
@@ -303,13 +314,13 @@ private Thread getThread_Failover(
private Thread getThread_DirectDriver(
final int sleepDelayMillis,
- final AtomicLong downtime,
+ final AtomicLong downtimeNano,
final CountDownLatch startLatch,
final CountDownLatch finishLatch) {
return new Thread(
() -> {
- long failureTime = 0;
+ long failureTimeNano = 0;
try {
// DB_CONN_STR_PREFIX
final Properties props = ConnectionStringHelper.getDefaultProperties();
@@ -345,7 +356,8 @@ private Thread getThread_DirectDriver(
} catch (SQLException throwable) { // Catching executing query
LOGGER.finest("DirectDriver thread exception: " + throwable);
// Calculate and add detection time
- failureTime = (System.nanoTime() - downtime.get());
+ assertTrue(downtimeNano.get() > 0);
+ failureTimeNano = System.nanoTime() - downtimeNano.get();
}
} catch (InterruptedException interruptedException) {
@@ -357,7 +369,8 @@ private Thread getThread_DirectDriver(
data.paramFailoverDelayMillis = sleepDelayMillis;
data.paramDriverName =
"DirectDriver - " + TestEnvironment.getCurrent().getCurrentDriver();
- data.failureDetectionTimeMillis = TimeUnit.NANOSECONDS.toMillis(failureTime);
+ data.failureDetectionTimeMillis = TimeUnit.NANOSECONDS.toMillis(failureTimeNano);
+ LOGGER.finest("DirectDriver Collected data: " + data);
perfDataList.add(data);
LOGGER.finest(
"DirectDriver Failure detection time is " + data.failureDetectionTimeMillis + "ms");
@@ -370,18 +383,24 @@ private Thread getThread_DirectDriver(
private Thread getThread_WrapperEfm(
final int sleepDelayMillis,
- final AtomicLong downtime,
+ final AtomicLong downtimeNano,
final CountDownLatch startLatch,
final CountDownLatch finishLatch) {
return new Thread(
() -> {
- long failureTime = 0;
+ long failureTimeNano = 0;
try {
final Properties props = ConnectionStringHelper.getDefaultProperties();
- DriverHelper.setMonitoringConnectTimeout(props, CONNECT_TIMEOUT_SEC, TimeUnit.SECONDS);
- DriverHelper.setMonitoringSocketTimeout(props, TIMEOUT_SEC, TimeUnit.SECONDS);
- DriverHelper.setConnectTimeout(props, CONNECT_TIMEOUT_SEC, TimeUnit.SECONDS);
+
+ props.setProperty(
+ MONITORING_CONNECTION_PREFIX + PropertyDefinition.CONNECT_TIMEOUT.name,
+ String.valueOf(TimeUnit.SECONDS.toMillis(CONNECT_TIMEOUT_SEC)));
+ props.setProperty(
+ MONITORING_CONNECTION_PREFIX + PropertyDefinition.SOCKET_TIMEOUT.name,
+ String.valueOf(TimeUnit.SECONDS.toMillis(TIMEOUT_SEC)));
+ CONNECT_TIMEOUT.set(props, String.valueOf(TimeUnit.SECONDS.toMillis(CONNECT_TIMEOUT_SEC)));
+
FAILURE_DETECTION_TIME.set(props, Integer.toString(EFM_FAILURE_DETECTION_TIME_MS));
FAILURE_DETECTION_INTERVAL.set(props, Integer.toString(EFM_FAILURE_DETECTION_INTERVAL_MS));
FAILURE_DETECTION_COUNT.set(props, Integer.toString(EFM_FAILURE_DETECTION_COUNT));
@@ -419,7 +438,8 @@ private Thread getThread_WrapperEfm(
LOGGER.finest("WrapperEfm thread exception: " + throwable);
// Calculate and add detection time
- failureTime = (System.nanoTime() - downtime.get());
+ assertTrue(downtimeNano.get() > 0);
+ failureTimeNano = System.nanoTime() - downtimeNano.get();
}
} catch (InterruptedException interruptedException) {
@@ -432,7 +452,8 @@ private Thread getThread_WrapperEfm(
data.paramDriverName =
String.format(
"AWS Wrapper (%s, EFM)", TestEnvironment.getCurrent().getCurrentDriver());
- data.failureDetectionTimeMillis = TimeUnit.NANOSECONDS.toMillis(failureTime);
+ data.failureDetectionTimeMillis = TimeUnit.NANOSECONDS.toMillis(failureTimeNano);
+ LOGGER.finest("WrapperEfm Collected data: " + data);
perfDataList.add(data);
LOGGER.finest(
"WrapperEfm Failure detection time is " + data.failureDetectionTimeMillis + "ms");
@@ -445,18 +466,24 @@ private Thread getThread_WrapperEfm(
private Thread getThread_WrapperEfmFailover(
final int sleepDelayMillis,
- final AtomicLong downtime,
+ final AtomicLong downtimeNano,
final CountDownLatch startLatch,
final CountDownLatch finishLatch) {
return new Thread(
() -> {
- long failureTime = 0;
+ long failureTimeNano = 0;
try {
final Properties props = ConnectionStringHelper.getDefaultProperties();
- DriverHelper.setMonitoringConnectTimeout(props, CONNECT_TIMEOUT_SEC, TimeUnit.SECONDS);
- DriverHelper.setMonitoringSocketTimeout(props, TIMEOUT_SEC, TimeUnit.SECONDS);
- DriverHelper.setConnectTimeout(props, CONNECT_TIMEOUT_SEC, TimeUnit.SECONDS);
+
+ props.setProperty(
+ MONITORING_CONNECTION_PREFIX + PropertyDefinition.CONNECT_TIMEOUT.name,
+ String.valueOf(TimeUnit.SECONDS.toMillis(CONNECT_TIMEOUT_SEC)));
+ props.setProperty(
+ MONITORING_CONNECTION_PREFIX + PropertyDefinition.SOCKET_TIMEOUT.name,
+ String.valueOf(TimeUnit.SECONDS.toMillis(TIMEOUT_SEC)));
+ CONNECT_TIMEOUT.set(props, String.valueOf(TimeUnit.SECONDS.toMillis(CONNECT_TIMEOUT_SEC)));
+
FAILURE_DETECTION_TIME.set(props, Integer.toString(EFM_FAILURE_DETECTION_TIME_MS));
FAILURE_DETECTION_INTERVAL.set(props, Integer.toString(EFM_FAILURE_DETECTION_TIME_MS));
FAILURE_DETECTION_COUNT.set(props, Integer.toString(EFM_FAILURE_DETECTION_COUNT));
@@ -495,7 +522,8 @@ private Thread getThread_WrapperEfmFailover(
LOGGER.finest("WrapperEfmFailover thread exception: " + throwable);
if (throwable instanceof FailoverSuccessSQLException) {
// Calculate and add detection time
- failureTime = (System.nanoTime() - downtime.get());
+ assertTrue(downtimeNano.get() > 0);
+ failureTimeNano = System.nanoTime() - downtimeNano.get();
}
}
@@ -510,7 +538,8 @@ private Thread getThread_WrapperEfmFailover(
String.format(
"AWS Wrapper (%s, EFM, Failover)",
TestEnvironment.getCurrent().getCurrentDriver());
- data.reconnectTimeMillis = TimeUnit.NANOSECONDS.toMillis(failureTime);
+ data.reconnectTimeMillis = TimeUnit.NANOSECONDS.toMillis(failureTimeNano);
+ LOGGER.finest("WrapperEfmFailover Collected data: " + data);
perfDataList.add(data);
LOGGER.finest(
"WrapperEfmFailover Reconnect time is " + data.reconnectTimeMillis + "ms");
@@ -523,13 +552,13 @@ private Thread getThread_WrapperEfmFailover(
private Thread getThread_DNS(
final int sleepDelayMillis,
- final AtomicLong downtime,
+ final AtomicLong downtimeNano,
final CountDownLatch startLatch,
final CountDownLatch finishLatch) {
return new Thread(
() -> {
- long failureTime = 0;
+ long failureTimeNano = 0;
String currentClusterIpAddress;
try {
@@ -571,7 +600,8 @@ private Thread getThread_DNS(
// DNS data has changed
if (!clusterIpAddress.equals(currentClusterIpAddress)) {
- failureTime = (System.nanoTime() - downtime.get());
+ assertTrue(downtimeNano.get() > 0);
+ failureTimeNano = System.nanoTime() - downtimeNano.get();
}
} catch (InterruptedException interruptedException) {
@@ -582,7 +612,8 @@ private Thread getThread_DNS(
PerfStat data = new PerfStat();
data.paramFailoverDelayMillis = sleepDelayMillis;
data.paramDriverName = "DNS";
- data.dnsUpdateTimeMillis = TimeUnit.NANOSECONDS.toMillis(failureTime);
+ data.dnsUpdateTimeMillis = TimeUnit.NANOSECONDS.toMillis(failureTimeNano);
+ LOGGER.finest("DNS Collected data: " + data);
perfDataList.add(data);
LOGGER.finest("DNS Update time is " + data.dnsUpdateTimeMillis + "ms");
@@ -611,6 +642,12 @@ private Connection openConnectionWithRetry(String url, Properties props) {
return conn;
}
+ private void failoverCluster() throws InterruptedException {
+ String clusterId = TestEnvironment.getCurrent().getInfo().getAuroraClusterName();
+ String randomNode = auroraUtil.getRandomDBClusterReaderInstanceId(clusterId);
+ auroraUtil.failoverClusterToTarget(clusterId, randomNode);
+ }
+
private void ensureClusterHealthy() throws InterruptedException {
auroraUtil.waitUntilClusterHasRightState(
@@ -649,6 +686,8 @@ private void ensureClusterHealthy() throws InterruptedException {
TestAuroraHostListProvider.clearCache();
TestPluginServiceImpl.clearHostAvailabilityCache();
+ MonitorThreadContainer.releaseInstance();
+ MonitorServiceImpl.clearCache();
}
private static Stream generateParams() {
@@ -710,5 +749,17 @@ public void writeData(Row row) {
cell = row.createCell(4);
cell.setCellValue(this.dnsUpdateTimeMillis);
}
+
+ @Override
+ public String toString() {
+ return String.format("%s [\nparamDriverName=%s,\nparamFailoverDelayMillis=%d,\n"
+ + "failureDetectionTimeMillis=%d,\nreconnectTimeMillis=%d,\ndnsUpdateTimeMillis=%d ]",
+ super.toString(),
+ this.paramDriverName,
+ this.paramFailoverDelayMillis,
+ this.failureDetectionTimeMillis,
+ this.reconnectTimeMillis,
+ this.dnsUpdateTimeMillis);
+ }
}
}
diff --git a/wrapper/src/test/java/integration/container/tests/PerformanceTest.java b/wrapper/src/test/java/integration/container/tests/PerformanceTest.java
index 13e3846d5..fa5aa19e0 100644
--- a/wrapper/src/test/java/integration/container/tests/PerformanceTest.java
+++ b/wrapper/src/test/java/integration/container/tests/PerformanceTest.java
@@ -17,13 +17,15 @@
package integration.container.tests;
import static org.junit.jupiter.api.Assertions.fail;
+import static software.amazon.jdbc.PropertyDefinition.CONNECT_TIMEOUT;
import static software.amazon.jdbc.PropertyDefinition.PLUGINS;
+import static software.amazon.jdbc.PropertyDefinition.SOCKET_TIMEOUT;
import static software.amazon.jdbc.plugin.efm.HostMonitoringConnectionPlugin.FAILURE_DETECTION_COUNT;
import static software.amazon.jdbc.plugin.efm.HostMonitoringConnectionPlugin.FAILURE_DETECTION_INTERVAL;
import static software.amazon.jdbc.plugin.efm.HostMonitoringConnectionPlugin.FAILURE_DETECTION_TIME;
import static software.amazon.jdbc.plugin.failover.FailoverConnectionPlugin.FAILOVER_TIMEOUT_MS;
-import integration.DriverHelper;
+import integration.DatabaseEngine;
import integration.TestEnvironmentFeatures;
import integration.container.ConnectionStringHelper;
import integration.container.ProxyHelper;
@@ -40,6 +42,7 @@
import java.sql.Statement;
import java.util.ArrayList;
import java.util.List;
+import java.util.LongSummaryStatistics;
import java.util.Properties;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
@@ -50,10 +53,14 @@
import org.apache.poi.xssf.usermodel.XSSFSheet;
import org.apache.poi.xssf.usermodel.XSSFWorkbook;
import org.junit.jupiter.api.MethodOrderer;
+import org.junit.jupiter.api.Tag;
import org.junit.jupiter.api.TestMethodOrder;
import org.junit.jupiter.api.TestTemplate;
import org.junit.jupiter.api.extension.ExtendWith;
import org.junit.jupiter.params.provider.Arguments;
+import software.amazon.jdbc.PropertyDefinition;
+import software.amazon.jdbc.plugin.efm.MonitorThreadContainer;
+import software.amazon.jdbc.plugin.efm2.MonitorServiceImpl;
import software.amazon.jdbc.plugin.failover.FailoverConnectionPlugin;
import software.amazon.jdbc.util.StringUtils;
@@ -68,6 +75,8 @@ public class PerformanceTest {
private static final Logger LOGGER = Logger.getLogger(PerformanceTest.class.getName());
+ private static final String MONITORING_CONNECTION_PREFIX = "monitoring-";
+
private static final int REPEAT_TIMES =
StringUtils.isNullOrEmpty(System.getenv("REPEAT_TIMES"))
? 5
@@ -120,8 +129,21 @@ private void doWritePerfDataToFile(String fileName, List extends PerfStatBase>
}
@TestTemplate
+ @Tag("efm")
public void test_FailureDetectionTime_EnhancedMonitoringEnabled() throws IOException {
+ MonitorThreadContainer.releaseInstance();
+ MonitorServiceImpl.clearCache();
+ test_FailureDetectionTime_EnhancedMonitoringEnabled("efm");
+
+ MonitorThreadContainer.releaseInstance();
+ MonitorServiceImpl.clearCache();
+ test_FailureDetectionTime_EnhancedMonitoringEnabled("efm2");
+ }
+
+ public void test_FailureDetectionTime_EnhancedMonitoringEnabled(final String efmPlugin)
+ throws IOException {
+
enhancedFailureMonitoringPerfDataList.clear();
try {
@@ -131,7 +153,7 @@ public void test_FailureDetectionTime_EnhancedMonitoringEnabled() throws IOExcep
try {
Object[] args = a.get();
execute_FailureDetectionTime_EnhancedMonitoringEnabled(
- (int) args[0], (int) args[1], (int) args[2], (int) args[3]);
+ efmPlugin, (int) args[0], (int) args[1], (int) args[2], (int) args[3]);
} catch (SQLException ex) {
throw new RuntimeException(ex);
}
@@ -140,39 +162,63 @@ public void test_FailureDetectionTime_EnhancedMonitoringEnabled() throws IOExcep
} finally {
doWritePerfDataToFile(
String.format(
- "./build/reports/tests/"
- + "DbEngine_%s_Driver_%s_"
- + "FailureDetectionPerformanceResults_EnhancedMonitoringEnabled.xlsx",
+ "./build/reports/tests/EnhancedMonitoringOnly_"
+ + "Db_%s_Driver_%s_Instances_%d_Plugin_%s.xlsx",
TestEnvironment.getCurrent().getInfo().getRequest().getDatabaseEngine(),
- TestEnvironment.getCurrent().getCurrentDriver()),
+ TestEnvironment.getCurrent().getCurrentDriver(),
+ TestEnvironment.getCurrent().getInfo().getRequest().getNumOfInstances(),
+ efmPlugin),
enhancedFailureMonitoringPerfDataList);
enhancedFailureMonitoringPerfDataList.clear();
}
}
private void execute_FailureDetectionTime_EnhancedMonitoringEnabled(
- int detectionTime, int detectionInterval, int detectionCount, int sleepDelayMillis)
+ final String efmPlugin,
+ int detectionTimeMillis,
+ int detectionIntervalMillis,
+ int detectionCount,
+ int sleepDelayMillis)
throws SQLException {
+
final Properties props = ConnectionStringHelper.getDefaultProperties();
- DriverHelper.setMonitoringConnectTimeout(props, CONNECT_TIMEOUT_SEC, TimeUnit.SECONDS);
- DriverHelper.setMonitoringSocketTimeout(props, TIMEOUT_SEC, TimeUnit.SECONDS);
- DriverHelper.setConnectTimeout(props, CONNECT_TIMEOUT_SEC, TimeUnit.SECONDS);
+ props.setProperty(
+ MONITORING_CONNECTION_PREFIX + PropertyDefinition.CONNECT_TIMEOUT.name,
+ String.valueOf(TimeUnit.SECONDS.toMillis(CONNECT_TIMEOUT_SEC)));
+ props.setProperty(
+ MONITORING_CONNECTION_PREFIX + PropertyDefinition.SOCKET_TIMEOUT.name,
+ String.valueOf(TimeUnit.SECONDS.toMillis(TIMEOUT_SEC)));
+ CONNECT_TIMEOUT.set(props, String.valueOf(TimeUnit.SECONDS.toMillis(CONNECT_TIMEOUT_SEC)));
+
// this performance test measures efm failure detection time after disconnecting the network
- FAILURE_DETECTION_TIME.set(props, Integer.toString(detectionTime));
- FAILURE_DETECTION_INTERVAL.set(props, Integer.toString(detectionInterval));
+ FAILURE_DETECTION_TIME.set(props, Integer.toString(detectionTimeMillis));
+ FAILURE_DETECTION_INTERVAL.set(props, Integer.toString(detectionIntervalMillis));
FAILURE_DETECTION_COUNT.set(props, Integer.toString(detectionCount));
- PLUGINS.set(props, "efm");
+ PLUGINS.set(props, efmPlugin);
final PerfStatMonitoring data = new PerfStatMonitoring();
doMeasurePerformance(sleepDelayMillis, REPEAT_TIMES, props, data);
- data.paramDetectionTime = detectionTime;
- data.paramDetectionInterval = detectionInterval;
+ data.paramDetectionTime = detectionTimeMillis;
+ data.paramDetectionInterval = detectionIntervalMillis;
data.paramDetectionCount = detectionCount;
enhancedFailureMonitoringPerfDataList.add(data);
}
@TestTemplate
+ @Tag("efm")
+ @Tag("failover")
public void test_FailureDetectionTime_FailoverAndEnhancedMonitoringEnabled() throws IOException {
+ MonitorThreadContainer.releaseInstance();
+ MonitorServiceImpl.clearCache();
+ test_FailureDetectionTime_FailoverAndEnhancedMonitoringEnabled("efm");
+
+ MonitorThreadContainer.releaseInstance();
+ MonitorServiceImpl.clearCache();
+ test_FailureDetectionTime_FailoverAndEnhancedMonitoringEnabled("efm2");
+ }
+
+ public void test_FailureDetectionTime_FailoverAndEnhancedMonitoringEnabled(final String efmPlugin)
+ throws IOException {
failoverWithEfmPerfDataList.clear();
@@ -183,7 +229,7 @@ public void test_FailureDetectionTime_FailoverAndEnhancedMonitoringEnabled() thr
try {
Object[] args = a.get();
execute_FailureDetectionTime_FailoverAndEnhancedMonitoringEnabled(
- (int) args[0], (int) args[1], (int) args[2], (int) args[3]);
+ efmPlugin, (int) args[0], (int) args[1], (int) args[2], (int) args[3]);
} catch (SQLException ex) {
throw new RuntimeException(ex);
}
@@ -192,31 +238,40 @@ public void test_FailureDetectionTime_FailoverAndEnhancedMonitoringEnabled() thr
} finally {
doWritePerfDataToFile(
String.format(
- "./build/reports/tests/"
- + "DbEngine_%s_Driver_%s_"
- + "FailureDetectionPerformanceResults_FailoverAndEnhancedMonitoringEnabled.xlsx",
+ "./build/reports/tests/FailoverWithEnhancedMonitoring_"
+ + "Db_%s_Driver_%s_Instances_%d_Plugin_%s.xlsx",
TestEnvironment.getCurrent().getInfo().getRequest().getDatabaseEngine(),
- TestEnvironment.getCurrent().getCurrentDriver()),
+ TestEnvironment.getCurrent().getCurrentDriver(),
+ TestEnvironment.getCurrent().getInfo().getRequest().getNumOfInstances(),
+ efmPlugin),
failoverWithEfmPerfDataList);
failoverWithEfmPerfDataList.clear();
}
}
private void execute_FailureDetectionTime_FailoverAndEnhancedMonitoringEnabled(
- int detectionTime, int detectionInterval, int detectionCount, int sleepDelayMillis)
+ final String efmPlugin,
+ int detectionTime,
+ int detectionInterval,
+ int detectionCount,
+ int sleepDelayMillis)
throws SQLException {
final Properties props = ConnectionStringHelper.getDefaultProperties();
- DriverHelper.setMonitoringConnectTimeout(props, CONNECT_TIMEOUT_SEC, TimeUnit.SECONDS);
- DriverHelper.setMonitoringSocketTimeout(props, TIMEOUT_SEC, TimeUnit.SECONDS);
- DriverHelper.setConnectTimeout(props, CONNECT_TIMEOUT_SEC, TimeUnit.SECONDS);
+ props.setProperty(
+ MONITORING_CONNECTION_PREFIX + PropertyDefinition.CONNECT_TIMEOUT.name,
+ String.valueOf(TimeUnit.SECONDS.toMillis(CONNECT_TIMEOUT_SEC)));
+ props.setProperty(
+ MONITORING_CONNECTION_PREFIX + PropertyDefinition.SOCKET_TIMEOUT.name,
+ String.valueOf(TimeUnit.SECONDS.toMillis(TIMEOUT_SEC)));
+ CONNECT_TIMEOUT.set(props, String.valueOf(TimeUnit.SECONDS.toMillis(CONNECT_TIMEOUT_SEC)));
// this performance test measures failover and efm failure detection time after disconnecting
// the network
FAILURE_DETECTION_TIME.set(props, Integer.toString(detectionTime));
FAILURE_DETECTION_INTERVAL.set(props, Integer.toString(detectionInterval));
FAILURE_DETECTION_COUNT.set(props, Integer.toString(detectionCount));
- PLUGINS.set(props, "failover,efm");
+ PLUGINS.set(props, "failover," + efmPlugin);
FAILOVER_TIMEOUT_MS.set(props, Integer.toString(PERF_FAILOVER_TIMEOUT_MS));
props.setProperty(
"clusterInstanceHostPattern",
@@ -236,6 +291,7 @@ private void execute_FailureDetectionTime_FailoverAndEnhancedMonitoringEnabled(
}
@TestTemplate
+ @Tag("failover")
public void test_FailoverTime_SocketTimeout() throws IOException {
failoverWithSocketTimeoutPerfDataList.clear();
@@ -255,9 +311,11 @@ public void test_FailoverTime_SocketTimeout() throws IOException {
} finally {
doWritePerfDataToFile(
String.format(
- "./build/reports/tests/DbEngine_%s_Driver_%s_FailoverPerformanceResults_SocketTimeout.xlsx",
+ "./build/reports/tests/FailoverWithSocketTimeout_"
+ + "Db_%s_Driver_%s_Instances_%d.xlsx",
TestEnvironment.getCurrent().getInfo().getRequest().getDatabaseEngine(),
- TestEnvironment.getCurrent().getCurrentDriver()),
+ TestEnvironment.getCurrent().getCurrentDriver(),
+ TestEnvironment.getCurrent().getInfo().getRequest().getNumOfInstances()),
failoverWithSocketTimeoutPerfDataList);
failoverWithSocketTimeoutPerfDataList.clear();
}
@@ -268,8 +326,8 @@ private void execute_FailoverTime_SocketTimeout(int socketTimeout, int sleepDela
final Properties props = ConnectionStringHelper.getDefaultProperties();
// this performance test measures how socket timeout changes the overall failover time
- DriverHelper.setSocketTimeout(props, socketTimeout, TimeUnit.SECONDS);
- DriverHelper.setConnectTimeout(props, CONNECT_TIMEOUT_SEC, TimeUnit.SECONDS);
+ SOCKET_TIMEOUT.set(props, String.valueOf(TimeUnit.SECONDS.toMillis(socketTimeout)));
+ CONNECT_TIMEOUT.set(props, String.valueOf(TimeUnit.SECONDS.toMillis(CONNECT_TIMEOUT_SEC)));
// Loads just failover plugin; don't load Enhanced Failure Monitoring plugin
props.setProperty("wrapperPlugins", "failover");
@@ -296,12 +354,11 @@ private void doMeasurePerformance(
PerfStatBase data)
throws SQLException {
- final String QUERY = "SELECT pg_sleep(600)"; // 600s -> 10min
- final AtomicLong downtime = new AtomicLong();
- final List elapsedTimes = new ArrayList<>(repeatTimes);
+ final AtomicLong downtimeNanos = new AtomicLong();
+ final List elapsedTimeMillis = new ArrayList<>(repeatTimes);
for (int i = 0; i < repeatTimes; i++) {
- downtime.set(0);
+ downtimeNanos.set(0);
// Thread to stop network
final Thread thread =
@@ -317,7 +374,8 @@ private void doMeasurePerformance(
.getInstances()
.get(0)
.getInstanceId());
- downtime.set(System.nanoTime());
+ downtimeNanos.set(System.nanoTime());
+ LOGGER.finest("Network outages started.");
} catch (InterruptedException interruptedException) {
// Ignore, stop the thread
}
@@ -329,12 +387,16 @@ private void doMeasurePerformance(
thread.start();
// Execute long query
- try (final ResultSet result = statement.executeQuery(QUERY)) {
+ try (final ResultSet result = statement.executeQuery(getQuerySql())) {
fail("Sleep query finished, should not be possible with network downed.");
} catch (SQLException ex) { // Catching executing query
// Calculate and add detection time
- final long failureTime = (System.nanoTime() - downtime.get());
- elapsedTimes.add(failureTime);
+ if (downtimeNanos.get() == 0) {
+ LOGGER.warning("Network outages start time is undefined!");
+ } else {
+ final long failureTimeMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - downtimeNanos.get());
+ elapsedTimeMillis.add(failureTimeMillis);
+ }
}
} finally {
@@ -349,15 +411,13 @@ private void doMeasurePerformance(
}
}
- final long min = elapsedTimes.stream().min(Long::compare).orElse(0L);
- final long max = elapsedTimes.stream().max(Long::compare).orElse(0L);
- final long avg =
- (long) elapsedTimes.stream().mapToLong(a -> a).summaryStatistics().getAverage();
+ final LongSummaryStatistics stats = elapsedTimeMillis.stream().mapToLong(a -> a).summaryStatistics();
data.paramNetworkOutageDelayMillis = sleepDelayMillis;
- data.minFailureDetectionTimeMillis = TimeUnit.NANOSECONDS.toMillis(min);
- data.maxFailureDetectionTimeMillis = TimeUnit.NANOSECONDS.toMillis(max);
- data.avgFailureDetectionTimeMillis = TimeUnit.NANOSECONDS.toMillis(avg);
+ data.minFailureDetectionTimeMillis = stats.getMin();
+ data.maxFailureDetectionTimeMillis = stats.getMax();
+ data.avgFailureDetectionTimeMillis = Math.round(stats.getAverage());
+ LOGGER.finest("Collected data: " + data);
}
private Connection openConnectionWithRetry(Properties props) {
@@ -390,8 +450,22 @@ private Connection connectToInstance(Properties props) throws SQLException {
return DriverManager.getConnection(url, props);
}
+ private String getQuerySql() {
+ final DatabaseEngine databaseEngine =
+ TestEnvironment.getCurrent().getInfo().getRequest().getDatabaseEngine();
+ switch (databaseEngine) {
+ case PG:
+ return "SELECT pg_sleep(600)"; // 600s -> 10min
+ case MYSQL:
+ case MARIADB:
+ return "SELECT sleep(600)"; // 600s -> 10min
+ default:
+ throw new UnsupportedOperationException(databaseEngine.name());
+ }
+ }
+
private Stream generateFailureDetectionTimeParams() {
- // detectionTime, detectionInterval, detectionCount, sleepDelayMS
+ // detectionTimeMs, detectionIntervalMs, detectionCount, sleepDelayMs
return Stream.of(
// Defaults
Arguments.of(30000, 5000, 3, 5000),
@@ -482,6 +556,20 @@ public void writeData(Row row) {
cell = row.createCell(6);
cell.setCellValue(this.avgFailureDetectionTimeMillis);
}
+
+ @Override
+ public String toString() {
+ return String.format("%s [\nparamDetectionTime=%d,\nparamDetectionInterval=%d,\nparamDetectionCount=%d,\n"
+ + "paramNetworkOutageDelayMillis=%d,\nmin=%d,\nmax=%d,\navg=%d ]",
+ super.toString(),
+ this.paramDetectionTime,
+ this.paramDetectionInterval,
+ this.paramDetectionCount,
+ this.paramNetworkOutageDelayMillis,
+ this.minFailureDetectionTimeMillis,
+ this.maxFailureDetectionTimeMillis,
+ this.avgFailureDetectionTimeMillis);
+ }
}
private static class PerfStatSocketTimeout extends PerfStatBase {
@@ -515,5 +603,17 @@ public void writeData(Row row) {
cell = row.createCell(4);
cell.setCellValue(this.avgFailureDetectionTimeMillis);
}
+
+ @Override
+ public String toString() {
+ return String.format("%s [\nparamSocketTimeout=%d,\nparamNetworkOutageDelayMillis=%d,\n"
+ + "min=%d,\nmax=%d,\navg=%d ]",
+ super.toString(),
+ this.paramSocketTimeout,
+ this.paramNetworkOutageDelayMillis,
+ this.minFailureDetectionTimeMillis,
+ this.maxFailureDetectionTimeMillis,
+ this.avgFailureDetectionTimeMillis);
+ }
}
}
diff --git a/wrapper/src/test/java/integration/container/tests/ReadWriteSplittingPerformanceTest.java b/wrapper/src/test/java/integration/container/tests/ReadWriteSplittingPerformanceTest.java
index 096197b63..4ce2cd424 100644
--- a/wrapper/src/test/java/integration/container/tests/ReadWriteSplittingPerformanceTest.java
+++ b/wrapper/src/test/java/integration/container/tests/ReadWriteSplittingPerformanceTest.java
@@ -41,6 +41,7 @@
import org.apache.poi.xssf.usermodel.XSSFSheet;
import org.apache.poi.xssf.usermodel.XSSFWorkbook;
import org.junit.jupiter.api.MethodOrderer;
+import org.junit.jupiter.api.Tag;
import org.junit.jupiter.api.TestMethodOrder;
import org.junit.jupiter.api.TestTemplate;
import org.junit.jupiter.api.extension.ExtendWith;
@@ -55,6 +56,7 @@
@ExtendWith(TestDriverProvider.class)
@EnableOnTestFeature(TestEnvironmentFeatures.PERFORMANCE)
@EnableOnNumOfInstances(min = 5)
+@Tag("rw-splitting")
public class ReadWriteSplittingPerformanceTest {
private static final Logger LOGGER =
diff --git a/wrapper/src/test/java/integration/host/TestEnvironment.java b/wrapper/src/test/java/integration/host/TestEnvironment.java
index e7113e0c6..21408c884 100644
--- a/wrapper/src/test/java/integration/host/TestEnvironment.java
+++ b/wrapper/src/test/java/integration/host/TestEnvironment.java
@@ -819,7 +819,8 @@ public void runTests(String taskName) throws IOException, InterruptedException {
containerHelper.runCmd(this.testContainer, "./collect_test_results.sh");
assertEquals(0, exitCode, "Hibernate ORM tests failed");
} else {
- containerHelper.runTest(this.testContainer, taskName);
+ TestEnvironmentConfiguration config = new TestEnvironmentConfiguration();
+ containerHelper.runTest(this.testContainer, taskName, config.includeTags, config.excludeTags);
}
}
@@ -834,7 +835,8 @@ public void debugTests(String taskName) throws IOException, InterruptedException
containerHelper.runCmd(this.testContainer, "./collect_test_results.sh");
assertEquals(0, exitCode, "Hibernate ORM tests failed");
} else {
- containerHelper.debugTest(this.testContainer, taskName);
+ TestEnvironmentConfiguration config = new TestEnvironmentConfiguration();
+ containerHelper.debugTest(this.testContainer, taskName, config.includeTags, config.excludeTags);
}
}
diff --git a/wrapper/src/test/java/integration/host/TestEnvironmentConfiguration.java b/wrapper/src/test/java/integration/host/TestEnvironmentConfiguration.java
index 238a559ed..9fc5443f3 100644
--- a/wrapper/src/test/java/integration/host/TestEnvironmentConfiguration.java
+++ b/wrapper/src/test/java/integration/host/TestEnvironmentConfiguration.java
@@ -18,8 +18,10 @@
public class TestEnvironmentConfiguration {
- public boolean noDocker = Boolean.parseBoolean(System.getProperty("test-no-docker", "false"));
- public boolean noAurora = Boolean.parseBoolean(System.getProperty("test-no-aurora", "false"));
+ public boolean noDocker =
+ Boolean.parseBoolean(System.getProperty("test-no-docker", "false"));
+ public boolean noAurora =
+ Boolean.parseBoolean(System.getProperty("test-no-aurora", "false"));
public boolean noPerformance =
Boolean.parseBoolean(System.getProperty("test-no-performance", "false"));
public boolean noMysqlEngine =
@@ -36,14 +38,39 @@ public class TestEnvironmentConfiguration {
Boolean.parseBoolean(System.getProperty("test-no-mariadb-driver", "false"));
public boolean noFailover =
Boolean.parseBoolean(System.getProperty("test-no-failover", "false"));
- public boolean noIam = Boolean.parseBoolean(System.getProperty("test-no-iam", "false"));
+ public boolean noIam =
+ Boolean.parseBoolean(System.getProperty("test-no-iam", "false"));
public boolean noSecretsManager =
Boolean.parseBoolean(System.getProperty("test-no-secrets-manager", "false"));
- public boolean noHikari = Boolean.parseBoolean(System.getProperty("test-no-hikari", "false"));
- public boolean noGraalVm = Boolean.parseBoolean(System.getProperty("test-no-graalvm", "false"));
- public boolean noOpenJdk = Boolean.parseBoolean(System.getProperty("test-no-openjdk", "false"));
- public boolean testHibernateOnly = Boolean.parseBoolean(System.getProperty("test-hibernate-only", "false"));
- public boolean testAutoscalingOnly = Boolean.parseBoolean(System.getProperty("test-autoscaling-only", "false"));
+ public boolean noHikari =
+ Boolean.parseBoolean(System.getProperty("test-no-hikari", "false"));
+ public boolean noGraalVm =
+ Boolean.parseBoolean(System.getProperty("test-no-graalvm", "false"));
+ public boolean noOpenJdk =
+ Boolean.parseBoolean(System.getProperty("test-no-openjdk", "false"));
+ public boolean noOpenJdk8 =
+ Boolean.parseBoolean(System.getProperty("test-no-openjdk8", "false"));
+ public boolean noOpenJdk11 =
+ Boolean.parseBoolean(System.getProperty("test-no-openjdk11", "false"));
+ public boolean testHibernateOnly =
+ Boolean.parseBoolean(System.getProperty("test-hibernate-only", "false"));
+ public boolean testAutoscalingOnly =
+ Boolean.parseBoolean(System.getProperty("test-autoscaling-only", "false"));
+
+ public boolean noInstances1 =
+ Boolean.parseBoolean(System.getProperty("test-no-instances-1", "false"));
+ public boolean noInstances2 =
+ Boolean.parseBoolean(System.getProperty("test-no-instances-2", "false"));
+ public boolean noInstances5 =
+ Boolean.parseBoolean(System.getProperty("test-no-instances-5", "false"));
+
+ public boolean noTracesTelemetry =
+ Boolean.parseBoolean(System.getProperty("test-no-traces-telemetry", "false"));
+ public boolean noMetricsTelemetry =
+ Boolean.parseBoolean(System.getProperty("test-no-metrics-telemetry", "false"));
+
+ public String includeTags = System.getProperty("test-include-tags");
+ public String excludeTags = System.getProperty("test-exclude-tags");
public String auroraDbRegion = System.getenv("AURORA_DB_REGION");
diff --git a/wrapper/src/test/java/integration/host/TestEnvironmentProvider.java b/wrapper/src/test/java/integration/host/TestEnvironmentProvider.java
index 4efa52e50..5aa8da892 100644
--- a/wrapper/src/test/java/integration/host/TestEnvironmentProvider.java
+++ b/wrapper/src/test/java/integration/host/TestEnvironmentProvider.java
@@ -52,365 +52,106 @@ public Stream provideTestTemplateInvocationContex
preCreateInfos.clear();
ArrayList resultContextList = new ArrayList<>();
- final boolean noDocker = Boolean.parseBoolean(System.getProperty("test-no-docker", "false"));
- final boolean noAurora = Boolean.parseBoolean(System.getProperty("test-no-aurora", "false"));
- final boolean noPerformance =
- Boolean.parseBoolean(System.getProperty("test-no-performance", "false"));
- final boolean noMysqlEngine =
- Boolean.parseBoolean(System.getProperty("test-no-mysql-engine", "false"));
- final boolean noMysqlDriver =
- Boolean.parseBoolean(System.getProperty("test-no-mysql-driver", "false"));
- final boolean noPgEngine =
- Boolean.parseBoolean(System.getProperty("test-no-pg-engine", "false"));
- final boolean noPgDriver =
- Boolean.parseBoolean(System.getProperty("test-no-pg-driver", "false"));
- final boolean noMariadbEngine =
- Boolean.parseBoolean(System.getProperty("test-no-mariadb-engine", "false"));
- final boolean noMariadbDriver =
- Boolean.parseBoolean(System.getProperty("test-no-mariadb-driver", "false"));
- final boolean noFailover =
- Boolean.parseBoolean(System.getProperty("test-no-failover", "false"));
- final boolean noIam = Boolean.parseBoolean(System.getProperty("test-no-iam", "false"));
- final boolean noSecretsManager =
- Boolean.parseBoolean(System.getProperty("test-no-secrets-manager", "false"));
- final boolean noHikari = Boolean.parseBoolean(System.getProperty("test-no-hikari", "false"));
- final boolean noGraalVm = Boolean.parseBoolean(System.getProperty("test-no-graalvm", "false"));
- final boolean noOpenJdk = Boolean.parseBoolean(System.getProperty("test-no-openjdk", "false"));
- final boolean testHibernateOnly = Boolean.parseBoolean(System.getProperty("test-hibernate-only", "false"));
- final boolean testAutoscalingOnly = Boolean.parseBoolean(System.getProperty("test-autoscaling-only", "false"));
- final boolean noTracesTelemetry = Boolean.parseBoolean(System.getProperty("test-no-traces-telemetry", "false"));
- final boolean noMetricsTelemetry = Boolean.parseBoolean(System.getProperty("test-no-metrics-telemetry", "false"));
-
- if (!noDocker) {
- if (!noMysqlEngine && !noOpenJdk) {
- resultContextList.add(
- getEnvironment(
- new TestEnvironmentRequest(
- DatabaseEngine.MYSQL,
- DatabaseInstances.SINGLE_INSTANCE,
- 1,
- DatabaseEngineDeployment.DOCKER,
- testHibernateOnly ? TargetJvm.OPENJDK11 : TargetJvm.OPENJDK8,
- TestEnvironmentFeatures.NETWORK_OUTAGES_ENABLED,
- noHikari ? null : TestEnvironmentFeatures.HIKARI,
- noMysqlDriver ? TestEnvironmentFeatures.SKIP_MYSQL_DRIVER_TESTS : null,
- noPgDriver ? TestEnvironmentFeatures.SKIP_PG_DRIVER_TESTS : null,
- noMariadbDriver ? TestEnvironmentFeatures.SKIP_MARIADB_DRIVER_TESTS : null,
- testHibernateOnly ? TestEnvironmentFeatures.RUN_HIBERNATE_TESTS_ONLY : null,
- testAutoscalingOnly ? TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY : null,
- noTracesTelemetry ? null : TestEnvironmentFeatures.TELEMETRY_TRACES_ENABLED,
- noMetricsTelemetry ? null : TestEnvironmentFeatures.TELEMETRY_METRICS_ENABLED,
- // AWS credentials are required for XRay telemetry
- noTracesTelemetry && noMetricsTelemetry ? null : TestEnvironmentFeatures.AWS_CREDENTIALS_ENABLED)));
- }
- if (!noPgEngine && !noOpenJdk) {
- resultContextList.add(
- getEnvironment(
- new TestEnvironmentRequest(
- DatabaseEngine.PG,
- DatabaseInstances.SINGLE_INSTANCE,
- 1,
- DatabaseEngineDeployment.DOCKER,
- testHibernateOnly ? TargetJvm.OPENJDK11 : TargetJvm.OPENJDK8,
- TestEnvironmentFeatures.NETWORK_OUTAGES_ENABLED,
- noHikari ? null : TestEnvironmentFeatures.HIKARI,
- noMysqlDriver ? TestEnvironmentFeatures.SKIP_MYSQL_DRIVER_TESTS : null,
- noPgDriver ? TestEnvironmentFeatures.SKIP_PG_DRIVER_TESTS : null,
- noMariadbDriver ? TestEnvironmentFeatures.SKIP_MARIADB_DRIVER_TESTS : null,
- testHibernateOnly ? TestEnvironmentFeatures.RUN_HIBERNATE_TESTS_ONLY : null,
- testAutoscalingOnly ? TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY : null,
- noTracesTelemetry ? null : TestEnvironmentFeatures.TELEMETRY_TRACES_ENABLED,
- noMetricsTelemetry ? null : TestEnvironmentFeatures.TELEMETRY_METRICS_ENABLED,
- // AWS credentials are required for XRay telemetry
- noTracesTelemetry && noMetricsTelemetry ? null : TestEnvironmentFeatures.AWS_CREDENTIALS_ENABLED)));
- }
- if (!noMariadbEngine && !noOpenJdk) {
- resultContextList.add(
- getEnvironment(
- new TestEnvironmentRequest(
- DatabaseEngine.MARIADB,
- DatabaseInstances.SINGLE_INSTANCE,
- 1,
- DatabaseEngineDeployment.DOCKER,
- TargetJvm.OPENJDK8,
- TestEnvironmentFeatures.NETWORK_OUTAGES_ENABLED,
- noHikari ? null : TestEnvironmentFeatures.HIKARI,
- noMysqlDriver ? TestEnvironmentFeatures.SKIP_MYSQL_DRIVER_TESTS : null,
- noPgDriver ? TestEnvironmentFeatures.SKIP_PG_DRIVER_TESTS : null,
- noMariadbDriver ? TestEnvironmentFeatures.SKIP_MARIADB_DRIVER_TESTS : null,
- noTracesTelemetry ? null : TestEnvironmentFeatures.TELEMETRY_TRACES_ENABLED,
- noMetricsTelemetry ? null : TestEnvironmentFeatures.TELEMETRY_METRICS_ENABLED,
- // AWS credentials are required for XRay telemetry
- noTracesTelemetry && noMetricsTelemetry ? null : TestEnvironmentFeatures.AWS_CREDENTIALS_ENABLED)));
- }
- if (!noMysqlEngine && !noGraalVm) {
- resultContextList.add(
- getEnvironment(
- new TestEnvironmentRequest(
- DatabaseEngine.MYSQL,
- DatabaseInstances.SINGLE_INSTANCE,
- 1,
- DatabaseEngineDeployment.DOCKER,
- TargetJvm.GRAALVM,
- TestEnvironmentFeatures.NETWORK_OUTAGES_ENABLED,
- noHikari ? null : TestEnvironmentFeatures.HIKARI,
- noMysqlDriver ? TestEnvironmentFeatures.SKIP_MYSQL_DRIVER_TESTS : null,
- noPgDriver ? TestEnvironmentFeatures.SKIP_PG_DRIVER_TESTS : null,
- noMariadbDriver ? TestEnvironmentFeatures.SKIP_MARIADB_DRIVER_TESTS : null,
- noTracesTelemetry ? null : TestEnvironmentFeatures.TELEMETRY_TRACES_ENABLED,
- noMetricsTelemetry ? null : TestEnvironmentFeatures.TELEMETRY_METRICS_ENABLED,
- // AWS credentials are required for XRay telemetry
- noTracesTelemetry && noMetricsTelemetry ? null : TestEnvironmentFeatures.AWS_CREDENTIALS_ENABLED)));
- }
- if (!noPgEngine && !noGraalVm) {
- resultContextList.add(
- getEnvironment(
- new TestEnvironmentRequest(
- DatabaseEngine.PG,
- DatabaseInstances.SINGLE_INSTANCE,
- 1,
- DatabaseEngineDeployment.DOCKER,
- TargetJvm.GRAALVM,
- TestEnvironmentFeatures.NETWORK_OUTAGES_ENABLED,
- noHikari ? null : TestEnvironmentFeatures.HIKARI,
- noMysqlDriver ? TestEnvironmentFeatures.SKIP_MYSQL_DRIVER_TESTS : null,
- noPgDriver ? TestEnvironmentFeatures.SKIP_PG_DRIVER_TESTS : null,
- noMariadbDriver ? TestEnvironmentFeatures.SKIP_MARIADB_DRIVER_TESTS : null,
- noTracesTelemetry ? null : TestEnvironmentFeatures.TELEMETRY_TRACES_ENABLED,
- noMetricsTelemetry ? null : TestEnvironmentFeatures.TELEMETRY_METRICS_ENABLED,
- // AWS credentials are required for XRay telemetry
- noTracesTelemetry && noMetricsTelemetry ? null : TestEnvironmentFeatures.AWS_CREDENTIALS_ENABLED)));
- }
- if (!noMariadbEngine && !noGraalVm) {
- resultContextList.add(
- getEnvironment(
- new TestEnvironmentRequest(
- DatabaseEngine.MARIADB,
- DatabaseInstances.SINGLE_INSTANCE,
- 1,
- DatabaseEngineDeployment.DOCKER,
- TargetJvm.GRAALVM,
- TestEnvironmentFeatures.NETWORK_OUTAGES_ENABLED,
- noHikari ? null : TestEnvironmentFeatures.HIKARI,
- noMysqlDriver ? TestEnvironmentFeatures.SKIP_MYSQL_DRIVER_TESTS : null,
- noPgDriver ? TestEnvironmentFeatures.SKIP_PG_DRIVER_TESTS : null,
- noMariadbDriver ? TestEnvironmentFeatures.SKIP_MARIADB_DRIVER_TESTS : null,
- noTracesTelemetry ? null : TestEnvironmentFeatures.TELEMETRY_TRACES_ENABLED,
- noMetricsTelemetry ? null : TestEnvironmentFeatures.TELEMETRY_METRICS_ENABLED,
- // AWS credentials are required for XRay telemetry
- noTracesTelemetry && noMetricsTelemetry ? null : TestEnvironmentFeatures.AWS_CREDENTIALS_ENABLED)));
- }
-
- // multiple instances
-
- if (!noMysqlEngine && !noOpenJdk) {
- resultContextList.add(
- getEnvironment(
- new TestEnvironmentRequest(
- DatabaseEngine.MYSQL,
- DatabaseInstances.MULTI_INSTANCE,
- 2,
- DatabaseEngineDeployment.DOCKER,
- testHibernateOnly ? TargetJvm.OPENJDK11 : TargetJvm.OPENJDK8,
- TestEnvironmentFeatures.NETWORK_OUTAGES_ENABLED,
- noHikari ? null : TestEnvironmentFeatures.HIKARI,
- noMysqlDriver ? TestEnvironmentFeatures.SKIP_MYSQL_DRIVER_TESTS : null,
- noPgDriver ? TestEnvironmentFeatures.SKIP_PG_DRIVER_TESTS : null,
- noMariadbDriver ? TestEnvironmentFeatures.SKIP_MARIADB_DRIVER_TESTS : null,
- testHibernateOnly ? TestEnvironmentFeatures.RUN_HIBERNATE_TESTS_ONLY : null,
- testAutoscalingOnly ? TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY : null,
- noTracesTelemetry ? null : TestEnvironmentFeatures.TELEMETRY_TRACES_ENABLED,
- noMetricsTelemetry ? null : TestEnvironmentFeatures.TELEMETRY_METRICS_ENABLED,
- // AWS credentials are required for XRay telemetry
- noTracesTelemetry && noMetricsTelemetry ? null : TestEnvironmentFeatures.AWS_CREDENTIALS_ENABLED)));
- }
- if (!noPgEngine && !noOpenJdk) {
- resultContextList.add(
- getEnvironment(
- new TestEnvironmentRequest(
- DatabaseEngine.PG,
- DatabaseInstances.MULTI_INSTANCE,
- 2,
- DatabaseEngineDeployment.DOCKER,
- testHibernateOnly ? TargetJvm.OPENJDK11 : TargetJvm.OPENJDK8,
- TestEnvironmentFeatures.NETWORK_OUTAGES_ENABLED,
- noHikari ? null : TestEnvironmentFeatures.HIKARI,
- noMysqlDriver ? TestEnvironmentFeatures.SKIP_MYSQL_DRIVER_TESTS : null,
- noPgDriver ? TestEnvironmentFeatures.SKIP_PG_DRIVER_TESTS : null,
- noMariadbDriver ? TestEnvironmentFeatures.SKIP_MARIADB_DRIVER_TESTS : null,
- testHibernateOnly ? TestEnvironmentFeatures.RUN_HIBERNATE_TESTS_ONLY : null,
- testAutoscalingOnly ? TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY : null,
- noTracesTelemetry ? null : TestEnvironmentFeatures.TELEMETRY_TRACES_ENABLED,
- noMetricsTelemetry ? null : TestEnvironmentFeatures.TELEMETRY_METRICS_ENABLED,
- // AWS credentials are required for XRay telemetry
- noTracesTelemetry && noMetricsTelemetry ? null : TestEnvironmentFeatures.AWS_CREDENTIALS_ENABLED)));
- }
- if (!noMariadbEngine && !noOpenJdk) {
- resultContextList.add(
- getEnvironment(
- new TestEnvironmentRequest(
- DatabaseEngine.MARIADB,
- DatabaseInstances.MULTI_INSTANCE,
- 2,
- DatabaseEngineDeployment.DOCKER,
- TargetJvm.OPENJDK8,
- TestEnvironmentFeatures.NETWORK_OUTAGES_ENABLED,
- noHikari ? null : TestEnvironmentFeatures.HIKARI,
- noMysqlDriver ? TestEnvironmentFeatures.SKIP_MYSQL_DRIVER_TESTS : null,
- noPgDriver ? TestEnvironmentFeatures.SKIP_PG_DRIVER_TESTS : null,
- noMariadbDriver ? TestEnvironmentFeatures.SKIP_MARIADB_DRIVER_TESTS : null,
- noTracesTelemetry ? null : TestEnvironmentFeatures.TELEMETRY_TRACES_ENABLED,
- noMetricsTelemetry ? null : TestEnvironmentFeatures.TELEMETRY_METRICS_ENABLED,
- // AWS credentials are required for XRay telemetry
- noTracesTelemetry && noMetricsTelemetry ? null : TestEnvironmentFeatures.AWS_CREDENTIALS_ENABLED)));
- }
- if (!noMysqlEngine && !noGraalVm) {
- resultContextList.add(
- getEnvironment(
- new TestEnvironmentRequest(
- DatabaseEngine.MYSQL,
- DatabaseInstances.MULTI_INSTANCE,
- 2,
- DatabaseEngineDeployment.DOCKER,
- TargetJvm.GRAALVM,
- TestEnvironmentFeatures.NETWORK_OUTAGES_ENABLED,
- noHikari ? null : TestEnvironmentFeatures.HIKARI,
- noMysqlDriver ? TestEnvironmentFeatures.SKIP_MYSQL_DRIVER_TESTS : null,
- noPgDriver ? TestEnvironmentFeatures.SKIP_PG_DRIVER_TESTS : null,
- noMariadbDriver ? TestEnvironmentFeatures.SKIP_MARIADB_DRIVER_TESTS : null,
- noTracesTelemetry ? null : TestEnvironmentFeatures.TELEMETRY_TRACES_ENABLED,
- noMetricsTelemetry ? null : TestEnvironmentFeatures.TELEMETRY_METRICS_ENABLED,
- // AWS credentials are required for XRay telemetry
- noTracesTelemetry && noMetricsTelemetry ? null : TestEnvironmentFeatures.AWS_CREDENTIALS_ENABLED)));
- }
- if (!noPgEngine && !noGraalVm) {
- resultContextList.add(
- getEnvironment(
- new TestEnvironmentRequest(
- DatabaseEngine.PG,
- DatabaseInstances.MULTI_INSTANCE,
- 2,
- DatabaseEngineDeployment.DOCKER,
- TargetJvm.GRAALVM,
- TestEnvironmentFeatures.NETWORK_OUTAGES_ENABLED,
- noHikari ? null : TestEnvironmentFeatures.HIKARI,
- noMysqlDriver ? TestEnvironmentFeatures.SKIP_MYSQL_DRIVER_TESTS : null,
- noPgDriver ? TestEnvironmentFeatures.SKIP_PG_DRIVER_TESTS : null,
- noMariadbDriver ? TestEnvironmentFeatures.SKIP_MARIADB_DRIVER_TESTS : null,
- noTracesTelemetry ? null : TestEnvironmentFeatures.TELEMETRY_TRACES_ENABLED,
- noMetricsTelemetry ? null : TestEnvironmentFeatures.TELEMETRY_METRICS_ENABLED,
- // AWS credentials are required for XRay telemetry
- noTracesTelemetry && noMetricsTelemetry ? null : TestEnvironmentFeatures.AWS_CREDENTIALS_ENABLED)));
- }
- if (!noMariadbEngine && !noGraalVm) {
- resultContextList.add(
- getEnvironment(
- new TestEnvironmentRequest(
- DatabaseEngine.MARIADB,
- DatabaseInstances.MULTI_INSTANCE,
- 2,
- DatabaseEngineDeployment.DOCKER,
- TargetJvm.GRAALVM,
- TestEnvironmentFeatures.NETWORK_OUTAGES_ENABLED,
- noHikari ? null : TestEnvironmentFeatures.HIKARI,
- noMysqlDriver ? TestEnvironmentFeatures.SKIP_MYSQL_DRIVER_TESTS : null,
- noPgDriver ? TestEnvironmentFeatures.SKIP_PG_DRIVER_TESTS : null,
- noMariadbDriver ? TestEnvironmentFeatures.SKIP_MARIADB_DRIVER_TESTS : null,
- noTracesTelemetry ? null : TestEnvironmentFeatures.TELEMETRY_TRACES_ENABLED,
- noMetricsTelemetry ? null : TestEnvironmentFeatures.TELEMETRY_METRICS_ENABLED,
- // AWS credentials are required for XRay telemetry
- noTracesTelemetry && noMetricsTelemetry ? null : TestEnvironmentFeatures.AWS_CREDENTIALS_ENABLED)));
- }
- }
-
- if (!noAurora) {
- if (!noMysqlEngine && !noOpenJdk) {
- resultContextList.add(
- getEnvironment(
- new TestEnvironmentRequest(
- DatabaseEngine.MYSQL,
- DatabaseInstances.MULTI_INSTANCE,
- 5,
- DatabaseEngineDeployment.AURORA,
- TargetJvm.OPENJDK8,
- TestEnvironmentFeatures.NETWORK_OUTAGES_ENABLED,
- noFailover ? null : TestEnvironmentFeatures.FAILOVER_SUPPORTED,
- TestEnvironmentFeatures.AWS_CREDENTIALS_ENABLED,
- noIam ? null : TestEnvironmentFeatures.IAM,
- noSecretsManager ? null : TestEnvironmentFeatures.SECRETS_MANAGER,
- noHikari ? null : TestEnvironmentFeatures.HIKARI,
- noPerformance ? null : TestEnvironmentFeatures.PERFORMANCE,
- noMysqlDriver ? TestEnvironmentFeatures.SKIP_MYSQL_DRIVER_TESTS : null,
- noPgDriver ? TestEnvironmentFeatures.SKIP_PG_DRIVER_TESTS : null,
- noMariadbDriver ? TestEnvironmentFeatures.SKIP_MARIADB_DRIVER_TESTS : null,
- testAutoscalingOnly ? TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY : null,
- noTracesTelemetry ? null : TestEnvironmentFeatures.TELEMETRY_TRACES_ENABLED,
- noMetricsTelemetry ? null : TestEnvironmentFeatures.TELEMETRY_METRICS_ENABLED)));
-
- // Tests for HIKARI, IAM, SECRETS_MANAGER and PERFORMANCE are covered by
- // cluster configuration above, so it's safe to skip these tests for configurations below.
- // The main goal of the following cluster configurations is to check failover.
- resultContextList.add(
- getEnvironment(
- new TestEnvironmentRequest(
- DatabaseEngine.MYSQL,
- DatabaseInstances.MULTI_INSTANCE,
- 2,
- DatabaseEngineDeployment.AURORA,
- TargetJvm.OPENJDK8,
- TestEnvironmentFeatures.NETWORK_OUTAGES_ENABLED,
- noFailover ? null : TestEnvironmentFeatures.FAILOVER_SUPPORTED,
- TestEnvironmentFeatures.AWS_CREDENTIALS_ENABLED,
- noMysqlDriver ? TestEnvironmentFeatures.SKIP_MYSQL_DRIVER_TESTS : null,
- noPgDriver ? TestEnvironmentFeatures.SKIP_PG_DRIVER_TESTS : null,
- noMariadbDriver ? TestEnvironmentFeatures.SKIP_MARIADB_DRIVER_TESTS : null,
- testAutoscalingOnly ? TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY : null,
- noTracesTelemetry ? null : TestEnvironmentFeatures.TELEMETRY_TRACES_ENABLED,
- noMetricsTelemetry ? null : TestEnvironmentFeatures.TELEMETRY_METRICS_ENABLED)));
- }
- if (!noPgEngine && !noOpenJdk) {
- resultContextList.add(
- getEnvironment(
- new TestEnvironmentRequest(
- DatabaseEngine.PG,
- DatabaseInstances.MULTI_INSTANCE,
- 5,
- DatabaseEngineDeployment.AURORA,
- TargetJvm.OPENJDK8,
- TestEnvironmentFeatures.NETWORK_OUTAGES_ENABLED,
- noFailover ? null : TestEnvironmentFeatures.FAILOVER_SUPPORTED,
- TestEnvironmentFeatures.AWS_CREDENTIALS_ENABLED,
- noIam ? null : TestEnvironmentFeatures.IAM,
- noSecretsManager ? null : TestEnvironmentFeatures.SECRETS_MANAGER,
- noHikari ? null : TestEnvironmentFeatures.HIKARI,
- noPerformance ? null : TestEnvironmentFeatures.PERFORMANCE,
- noMysqlDriver ? TestEnvironmentFeatures.SKIP_MYSQL_DRIVER_TESTS : null,
- noPgDriver ? TestEnvironmentFeatures.SKIP_PG_DRIVER_TESTS : null,
- noMariadbDriver ? TestEnvironmentFeatures.SKIP_MARIADB_DRIVER_TESTS : null,
- testAutoscalingOnly ? TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY : null,
- noTracesTelemetry ? null : TestEnvironmentFeatures.TELEMETRY_TRACES_ENABLED,
- noMetricsTelemetry ? null : TestEnvironmentFeatures.TELEMETRY_METRICS_ENABLED)));
-
- // Tests for HIKARI, IAM, SECRETS_MANAGER and PERFORMANCE are covered by
- // cluster configuration above, so it's safe to skip these tests for configurations below.
- // The main goal of the following cluster configurations is to check failover.
- resultContextList.add(
- getEnvironment(
- new TestEnvironmentRequest(
- DatabaseEngine.PG,
- DatabaseInstances.MULTI_INSTANCE,
- 2,
- DatabaseEngineDeployment.AURORA,
- TargetJvm.OPENJDK8,
- TestEnvironmentFeatures.NETWORK_OUTAGES_ENABLED,
- noFailover ? null : TestEnvironmentFeatures.FAILOVER_SUPPORTED,
- TestEnvironmentFeatures.AWS_CREDENTIALS_ENABLED,
- noMysqlDriver ? TestEnvironmentFeatures.SKIP_MYSQL_DRIVER_TESTS : null,
- noPgDriver ? TestEnvironmentFeatures.SKIP_PG_DRIVER_TESTS : null,
- noMariadbDriver ? TestEnvironmentFeatures.SKIP_MARIADB_DRIVER_TESTS : null,
- testAutoscalingOnly ? TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY : null,
- noTracesTelemetry ? null : TestEnvironmentFeatures.TELEMETRY_TRACES_ENABLED,
- noMetricsTelemetry ? null : TestEnvironmentFeatures.TELEMETRY_METRICS_ENABLED)));
+ TestEnvironmentConfiguration config = new TestEnvironmentConfiguration();
+
+ for (DatabaseEngineDeployment deployment : DatabaseEngineDeployment.values()) {
+ if (deployment == DatabaseEngineDeployment.DOCKER && config.noDocker) {
+ continue;
+ }
+ if (deployment == DatabaseEngineDeployment.AURORA && config.noAurora) {
+ continue;
+ }
+ if (deployment == DatabaseEngineDeployment.RDS) {
+ // Not in use.
+ continue;
+ }
+
+ for (DatabaseEngine engine : DatabaseEngine.values()) {
+ if (engine == DatabaseEngine.PG && config.noPgEngine) {
+ continue;
+ }
+ if (engine == DatabaseEngine.MYSQL && config.noMysqlEngine) {
+ continue;
+ }
+ if (engine == DatabaseEngine.MARIADB && config.noMariadbEngine) {
+ continue;
+ }
+
+ for (DatabaseInstances instances : DatabaseInstances.values()) {
+ if (deployment == DatabaseEngineDeployment.DOCKER
+ && instances != DatabaseInstances.SINGLE_INSTANCE) {
+ continue;
+ }
+
+ for (int numOfInstances : Arrays.asList(1, 2, 5)) {
+ if (instances == DatabaseInstances.SINGLE_INSTANCE && numOfInstances > 1) {
+ continue;
+ }
+ if (instances == DatabaseInstances.MULTI_INSTANCE && numOfInstances == 1) {
+ continue;
+ }
+ if (numOfInstances == 1 && config.noInstances1) {
+ continue;
+ }
+ if (numOfInstances == 2 && config.noInstances2) {
+ continue;
+ }
+ if (numOfInstances == 5 && config.noInstances5) {
+ continue;
+ }
+
+ for (TargetJvm jvm : TargetJvm.values()) {
+ if ((jvm == TargetJvm.OPENJDK8 || jvm == TargetJvm.OPENJDK11) && config.noOpenJdk) {
+ continue;
+ }
+ if (jvm == TargetJvm.OPENJDK8 && config.noOpenJdk8) {
+ continue;
+ }
+ if (jvm == TargetJvm.OPENJDK11 && config.noOpenJdk11) {
+ continue;
+ }
+ if (jvm != TargetJvm.OPENJDK11 && config.testHibernateOnly) {
+ // Run hibernate tests with OPENJDK11 only.
+ continue;
+ }
+ if (jvm == TargetJvm.GRAALVM && config.noGraalVm) {
+ continue;
+ }
+
+
+ resultContextList.add(
+ getEnvironment(
+ new TestEnvironmentRequest(
+ engine,
+ instances,
+ instances == DatabaseInstances.SINGLE_INSTANCE ? 1 : numOfInstances,
+ deployment,
+ jvm,
+ TestEnvironmentFeatures.NETWORK_OUTAGES_ENABLED,
+ deployment == DatabaseEngineDeployment.DOCKER
+ && config.noTracesTelemetry
+ && config.noMetricsTelemetry
+ ? null
+ : TestEnvironmentFeatures.AWS_CREDENTIALS_ENABLED,
+ deployment == DatabaseEngineDeployment.DOCKER || config.noFailover
+ ? null
+ : TestEnvironmentFeatures.FAILOVER_SUPPORTED,
+ deployment == DatabaseEngineDeployment.DOCKER || config.noIam
+ ? null
+ : TestEnvironmentFeatures.IAM,
+ config.noSecretsManager ? null : TestEnvironmentFeatures.SECRETS_MANAGER,
+ config.noHikari ? null : TestEnvironmentFeatures.HIKARI,
+ config.noPerformance ? null : TestEnvironmentFeatures.PERFORMANCE,
+ config.noMysqlDriver ? TestEnvironmentFeatures.SKIP_MYSQL_DRIVER_TESTS : null,
+ config.noPgDriver ? TestEnvironmentFeatures.SKIP_PG_DRIVER_TESTS : null,
+ config.noMariadbDriver ? TestEnvironmentFeatures.SKIP_MARIADB_DRIVER_TESTS : null,
+ config.testHibernateOnly ? TestEnvironmentFeatures.RUN_HIBERNATE_TESTS_ONLY : null,
+ config.testAutoscalingOnly ? TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY : null,
+ config.noTracesTelemetry ? null : TestEnvironmentFeatures.TELEMETRY_TRACES_ENABLED,
+ config.noMetricsTelemetry ? null : TestEnvironmentFeatures.TELEMETRY_METRICS_ENABLED)));
+ }
+ }
+ }
}
}
diff --git a/wrapper/src/test/java/integration/util/ContainerHelper.java b/wrapper/src/test/java/integration/util/ContainerHelper.java
index 32bd30b97..26d63fec7 100644
--- a/wrapper/src/test/java/integration/util/ContainerHelper.java
+++ b/wrapper/src/test/java/integration/util/ContainerHelper.java
@@ -26,6 +26,7 @@
import eu.rekawek.toxiproxy.ToxiproxyClient;
import integration.TestInstanceInfo;
import java.io.IOException;
+import java.util.ArrayList;
import java.util.function.Consumer;
import java.util.function.Function;
import org.testcontainers.DockerClientFactory;
@@ -89,25 +90,59 @@ public Long runCmdInDirectory(GenericContainer> container, String workingDirec
public void runTest(GenericContainer> container, String task)
throws IOException, InterruptedException {
+ runTest(container, task, null, null);
+ }
+
+ public void runTest(GenericContainer> container, String task, String includeTags, String excludeTags)
+ throws IOException, InterruptedException {
System.out.println("==== Container console feed ==== >>>>");
Consumer consumer = new ConsoleConsumer(true);
execInContainer(container, consumer, "printenv", "TEST_ENV_DESCRIPTION");
execInContainer(container, consumer, "java", "-version");
- Long exitCode =
- execInContainer(container, consumer, "./gradlew", task, "--no-parallel", "--no-daemon");
+
+ ArrayList commands = new ArrayList<>();
+ commands.add("./gradlew");
+ commands.add(task);
+ commands.add("--no-parallel");
+ commands.add("--no-daemon");
+ if (!StringUtils.isNullOrEmpty(includeTags)) {
+ commands.add(String.format("-Dtest-include-tags=%s", includeTags.replaceAll(" ", "")));
+ }
+ if (!StringUtils.isNullOrEmpty(excludeTags)) {
+ commands.add(String.format("-Dtest-exclude-tags=%s", excludeTags.replaceAll(" ", "")));
+ }
+
+ Long exitCode = execInContainer(container, consumer, commands.toArray(new String[0]));
System.out.println("==== Container console feed ==== <<<<");
assertEquals(0, exitCode, "Some tests failed.");
}
public void debugTest(GenericContainer> container, String task)
throws IOException, InterruptedException {
+ debugTest(container, task, null, null);
+ }
+
+ public void debugTest(GenericContainer> container, String task, String includeTags, String excludeTags)
+ throws IOException, InterruptedException {
System.out.println("==== Container console feed ==== >>>>");
Consumer consumer = new ConsoleConsumer();
execInContainer(container, consumer, "printenv", "TEST_ENV_DESCRIPTION");
execInContainer(container, consumer, "java", "-version");
- Long exitCode =
- execInContainer(
- container, consumer, "./gradlew", task, "--debug-jvm", "--no-parallel", "--no-daemon");
+
+ ArrayList commands = new ArrayList<>();
+ commands.add("./gradlew");
+ commands.add(task);
+ commands.add("--debug-jvm");
+ commands.add("--no-parallel");
+ commands.add("--no-daemon");
+ if (!StringUtils.isNullOrEmpty(includeTags)) {
+ commands.add(String.format("-Dtest-include-tags=%s", includeTags.replaceAll(" ", "")));
+ }
+ if (!StringUtils.isNullOrEmpty(excludeTags)) {
+ commands.add(String.format("-Dtest-exclude-tags=%s", excludeTags.replaceAll(" ", "")));
+ }
+
+ Long exitCode = execInContainer(container, consumer, commands.toArray(new String[0]));
System.out.println("==== Container console feed ==== <<<<");
assertEquals(0, exitCode, "Some tests failed.");
}