diff --git a/.github/workflows/aurora_performance.yml b/.github/workflows/aurora_performance.yml index 3f41f3b1..1ce1b126 100644 --- a/.github/workflows/aurora_performance.yml +++ b/.github/workflows/aurora_performance.yml @@ -55,8 +55,8 @@ jobs: run: | ./gradlew --no-parallel --no-daemon test-aurora-${{ matrix.db }}-performance --info env: - AURORA_CLUSTER_DOMAIN: ${{ secrets.DB_CONN_SUFFIX }} - AURORA_DB_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + RDS_CLUSTER_DOMAIN: ${{ secrets.DB_CONN_SUFFIX }} + RDS_DB_REGION: ${{ secrets.AWS_DEFAULT_REGION }} AWS_ACCESS_KEY_ID: ${{ env.TEMP_AWS_ACCESS_KEY_ID }} AWS_SECRET_ACCESS_KEY: ${{ env.TEMP_AWS_SECRET_ACCESS_KEY }} AWS_SESSION_TOKEN: ${{ env.TEMP_AWS_SESSION_TOKEN }} diff --git a/.github/workflows/integration_tests.yml b/.github/workflows/integration_tests.yml index d6ff0ccf..5915f41a 100644 --- a/.github/workflows/integration_tests.yml +++ b/.github/workflows/integration_tests.yml @@ -22,9 +22,6 @@ jobs: steps: - name: Clone repository uses: actions/checkout@v4 - with: - fetch-depth: 50 - - name: "Set up JDK 8" uses: actions/setup-java@v3 with: diff --git a/.github/workflows/multi_az_integration_tests.yml b/.github/workflows/multi_az_integration_tests.yml new file mode 100644 index 00000000..acea264c --- /dev/null +++ b/.github/workflows/multi_az_integration_tests.yml @@ -0,0 +1,94 @@ +name: Integration Tests + +on: + workflow_dispatch: + push: + branches: + - main + - feat/taz-integration-tests + paths-ignore: + - "**/*.md" + - "**/*.jpg" + - "**/README.txt" + - "**/LICENSE.txt" + - "docs/**" + - "ISSUE_TEMPLATE/**" + - "**/remove-old-artifacts.yml" + +jobs: + run-integration-tests: + name: Run Integration Tests + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + dbEngine: [ "multi-az-mysql", "multi-az-postgres" ] + + steps: + - name: Clone repository + uses: actions/checkout@v4 + - name: "Set up JDK 8" + uses: actions/setup-java@v3 + with: + distribution: "corretto" + java-version: 8 + - name: Set up Node.js + uses: actions/setup-node@v3 + with: + node-version: "20.x" + - name: Install dependencies + run: npm install --no-save + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws-region: ${{ secrets.AWS_DEFAULT_REGION }} + + - name: Set up Temp AWS Credentials + run: | + creds=($(aws sts get-session-token \ + --duration-seconds 21600 \ + --query 'Credentials.[AccessKeyId, SecretAccessKey, SessionToken]' \ + --output text \ + | xargs)); + echo "::add-mask::${creds[0]}" + echo "::add-mask::${creds[1]}" + echo "::add-mask::${creds[2]}" + echo "TEMP_AWS_ACCESS_KEY_ID=${creds[0]}" >> $GITHUB_ENV + echo "TEMP_AWS_SECRET_ACCESS_KEY=${creds[1]}" >> $GITHUB_ENV + echo "TEMP_AWS_SESSION_TOKEN=${creds[2]}" >> $GITHUB_ENV + + - name: Run Integration Tests + run: | + ./gradlew --no-parallel --no-daemon test-${{ matrix.dbEngine }} --info + env: + RDS_CLUSTER_DOMAIN: ${{ secrets.DB_CONN_SUFFIX }} + RDS_DB_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_ACCESS_KEY_ID: ${{ env.TEMP_AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ env.TEMP_AWS_SECRET_ACCESS_KEY }} + AWS_SESSION_TOKEN: ${{ env.TEMP_AWS_SESSION_TOKEN }} + + - name: "Get Github Action IP" + if: always() + id: ip + uses: haythem/public-ip@v1.3 + + - name: "Remove Github Action IP" + if: always() + run: | + aws ec2 revoke-security-group-ingress \ + --group-name default \ + --protocol -1 \ + --port -1 \ + --cidr ${{ steps.ip.outputs.ipv4 }}/32 \ + 2>&1 > /dev/null; + + - name: Archive results + if: always() + uses: actions/upload-artifact@v4 + with: + name: integration-report + path: ./tests/integration/container/reports + retention-days: 5 diff --git a/common/lib/database_dialect/database_dialect.ts b/common/lib/database_dialect/database_dialect.ts index 15a1f7f0..cc552606 100644 --- a/common/lib/database_dialect/database_dialect.ts +++ b/common/lib/database_dialect/database_dialect.ts @@ -17,6 +17,7 @@ import { HostListProvider } from "../host_list_provider/host_list_provider"; import { HostListProviderService } from "../host_list_provider_service"; import { ClientWrapper } from "../client_wrapper"; +import { FailoverRestriction } from "../plugins/failover/failover_restriction"; export enum DatabaseType { MYSQL, @@ -37,6 +38,7 @@ export interface DatabaseDialect { getConnectFunc(targetClient: any): () => Promise; getDatabaseType(): DatabaseType; getDialectName(): string; + getFailoverRestrictions(): FailoverRestriction[]; doesStatementSetReadOnly(statement: string): boolean | undefined; doesStatementSetTransactionIsolation(statement: string): number | undefined; doesStatementSetAutoCommit(statement: string): boolean | undefined; diff --git a/common/lib/plugins/failover/failover_restriction.ts b/common/lib/plugins/failover/failover_restriction.ts index ac401a35..770a0afb 100644 --- a/common/lib/plugins/failover/failover_restriction.ts +++ b/common/lib/plugins/failover/failover_restriction.ts @@ -15,5 +15,6 @@ */ export enum FailoverRestriction { - DISABLE_TASK_A + DISABLE_TASK_A, + ENABLE_WRITER_IN_TASK_B } diff --git a/common/lib/plugins/failover/reader_failover_handler.ts b/common/lib/plugins/failover/reader_failover_handler.ts index a0186389..70b71013 100644 --- a/common/lib/plugins/failover/reader_failover_handler.ts +++ b/common/lib/plugins/failover/reader_failover_handler.ts @@ -26,6 +26,7 @@ import { Messages } from "../../utils/messages"; import { WrapperProperties } from "../../wrapper_property"; import { ReaderTaskSelectorHandler } from "./reader_task_selector"; import { uniqueId } from "lodash"; +import { FailoverRestriction } from "./failover_restriction"; export interface ReaderFailoverHandler { failover(hosts: HostInfo[], currentHost: HostInfo): Promise; @@ -216,9 +217,11 @@ export class ClusterAwareReaderFailoverHandler implements ReaderFailoverHandler getReaderHostsByPriority(hosts: HostInfo[]): HostInfo[] { const activeReaders: HostInfo[] = []; const downHostList: HostInfo[] = []; + let writerHost: HostInfo | null = null; hosts.forEach((host) => { if (host.role === HostRole.WRITER) { + writerHost = host; return; } @@ -232,8 +235,15 @@ export class ClusterAwareReaderFailoverHandler implements ReaderFailoverHandler shuffleList(activeReaders); shuffleList(downHostList); + const numOfReaders = downHostList.length + activeReaders.length; const hostsByPriority: HostInfo[] = [...activeReaders]; hostsByPriority.push(...downHostList); + if ( + writerHost !== null && + (numOfReaders === 0 || this.pluginService.getDialect().getFailoverRestrictions().includes(FailoverRestriction.ENABLE_WRITER_IN_TASK_B)) + ) { + hostsByPriority.push(writerHost); + } return hostsByPriority; } diff --git a/common/lib/plugins/failover/writer_failover_handler.ts b/common/lib/plugins/failover/writer_failover_handler.ts index d0b89485..f64dfb64 100644 --- a/common/lib/plugins/failover/writer_failover_handler.ts +++ b/common/lib/plugins/failover/writer_failover_handler.ts @@ -20,12 +20,13 @@ import { ClusterAwareReaderFailoverHandler } from "./reader_failover_handler"; import { PluginService } from "../../plugin_service"; import { HostAvailability } from "../../host_availability/host_availability"; import { AwsWrapperError } from "../../utils/errors"; -import { getWriter, maskProperties } from "../../utils/utils"; +import { getWriter, logTopology, maskProperties } from "../../utils/utils"; import { ReaderFailoverResult } from "./reader_failover_result"; import { Messages } from "../../utils/messages"; import { logger } from "../../../logutils"; import { WrapperProperties } from "../../wrapper_property"; import { ClientWrapper } from "../../client_wrapper"; +import { FailoverRestriction } from "./failover_restriction"; export interface WriterFailoverHandler { failover(currentTopology: HostInfo[]): Promise; @@ -104,11 +105,14 @@ export class ClusterAwareWriterFailoverHandler implements WriterFailoverHandler const taskB = waitForNewWriterHandlerTask.call(); let selectedTask = ""; - const failoverTask = Promise.any([taskA, taskB]) + const singleTask: boolean = this.pluginService.getDialect().getFailoverRestrictions().includes(FailoverRestriction.DISABLE_TASK_A); + const failoverTaskPromise = singleTask ? taskB : Promise.any([taskA, taskB]); + + const failoverTask = failoverTaskPromise .then((result) => { selectedTask = result.taskName; // If the first resolved promise is connected or has an error, return it. - if (result.isConnected || result.exception) { + if (result.isConnected || result.exception || singleTask) { return result; } @@ -369,6 +373,8 @@ class WaitForNewWriterHandlerTask { } async refreshTopologyAndConnectToNewWriter(): Promise { + const allowOldWriter: boolean = this.pluginService.getDialect().getFailoverRestrictions().includes(FailoverRestriction.ENABLE_WRITER_IN_TASK_B); + while (this.pluginService.getCurrentClient() && Date.now() < this.endTime && !this.failoverCompleted) { try { if (this.currentReaderTargetClient) { @@ -390,8 +396,9 @@ class WaitForNewWriterHandlerTask { } else { this.currentTopology = topology; const writerCandidate = getWriter(this.currentTopology); - if (writerCandidate && !this.isSame(writerCandidate, this.originalWriterHost)) { + if (writerCandidate && (allowOldWriter || !this.isSame(writerCandidate, this.originalWriterHost))) { // new writer is available, and it's different from the previous writer + logger.debug(logTopology(this.currentTopology, "[Task B] ")); if (await this.connectToWriter(writerCandidate)) { return true; } diff --git a/mysql/lib/dialect/mysql_database_dialect.ts b/mysql/lib/dialect/mysql_database_dialect.ts index 31d575b8..b60a8a33 100644 --- a/mysql/lib/dialect/mysql_database_dialect.ts +++ b/mysql/lib/dialect/mysql_database_dialect.ts @@ -23,6 +23,7 @@ import { DatabaseDialectCodes } from "../../../common/lib/database_dialect/datab import { TransactionIsolationLevel } from "../../../common/lib/utils/transaction_isolation_level"; import { ClientWrapper } from "../../../common/lib/client_wrapper"; import { ClientUtils } from "../../../common/lib/utils/client_utils"; +import { FailoverRestriction } from "../../../common/lib/plugins/failover/failover_restriction"; export class MySQLDatabaseDialect implements DatabaseDialect { protected dialectName: string = this.constructor.name; @@ -118,6 +119,10 @@ export class MySQLDatabaseDialect implements DatabaseDialect { return this.dialectName; } + getFailoverRestrictions(): FailoverRestriction[] { + return []; + } + doesStatementSetReadOnly(statement: string): boolean | undefined { if (statement.includes("set session transaction read only")) { return true; diff --git a/mysql/lib/dialect/rds_multi_az_mysql_database_dialect.ts b/mysql/lib/dialect/rds_multi_az_mysql_database_dialect.ts index 89af4a5f..029b75bf 100644 --- a/mysql/lib/dialect/rds_multi_az_mysql_database_dialect.ts +++ b/mysql/lib/dialect/rds_multi_az_mysql_database_dialect.ts @@ -24,10 +24,8 @@ import { Messages } from "../../../common/lib/utils/messages"; import { logger } from "../../../common/logutils"; import { AwsWrapperError } from "../../../common/lib/utils/errors"; import { TopologyAwareDatabaseDialect } from "../../../common/lib/topology_aware_database_dialect"; -import { HostAvailability } from "../../../common/lib/host_availability/host_availability"; -import { HostInfoBuilder } from "../../../common/lib/host_info_builder"; -import { SimpleHostAvailabilityStrategy } from "../../../common/lib/host_availability/simple_host_availability_strategy"; import { RdsHostListProvider } from "../../../common/lib/host_list_provider/rds_host_list_provider"; +import { FailoverRestriction } from "../../../common/lib/plugins/failover/failover_restriction"; export class RdsMultiAZMySQLDatabaseDialect extends MySQLDatabaseDialect implements TopologyAwareDatabaseDialect { private static readonly TOPOLOGY_QUERY: string = "SELECT id, endpoint, port FROM mysql.rds_topology"; @@ -137,4 +135,8 @@ export class RdsMultiAZMySQLDatabaseDialect extends MySQLDatabaseDialect impleme getDialectUpdateCandidates(): string[] { return []; } + + getFailoverRestrictions(): FailoverRestriction[] { + return [FailoverRestriction.DISABLE_TASK_A, FailoverRestriction.ENABLE_WRITER_IN_TASK_B]; + } } diff --git a/pg/lib/dialect/pg_database_dialect.ts b/pg/lib/dialect/pg_database_dialect.ts index 9e4ae43d..482ecac5 100644 --- a/pg/lib/dialect/pg_database_dialect.ts +++ b/pg/lib/dialect/pg_database_dialect.ts @@ -22,6 +22,7 @@ import { AwsWrapperError } from "../../../common/lib/utils/errors"; import { DatabaseDialectCodes } from "../../../common/lib/database_dialect/database_dialect_codes"; import { TransactionIsolationLevel } from "../../../common/lib/utils/transaction_isolation_level"; import { ClientWrapper } from "../../../common/lib/client_wrapper"; +import { FailoverRestriction } from "../../../common/lib/plugins/failover/failover_restriction"; export class PgDatabaseDialect implements DatabaseDialect { protected dialectName: string = this.constructor.name; @@ -104,6 +105,10 @@ export class PgDatabaseDialect implements DatabaseDialect { return this.dialectName; } + getFailoverRestrictions(): FailoverRestriction[] { + return []; + } + doesStatementSetAutoCommit(statement: string): boolean | undefined { return undefined; } diff --git a/tests/integration/container/tests/aurora_failover.test.ts b/tests/integration/container/tests/aurora_failover.test.ts index 2fa351bf..8b1ad15b 100644 --- a/tests/integration/container/tests/aurora_failover.test.ts +++ b/tests/integration/container/tests/aurora_failover.test.ts @@ -58,7 +58,7 @@ describe("aurora failover", () => { logger.info(`Test started: ${expect.getState().currentTestName}`); env = await TestEnvironment.getCurrent(); - auroraTestUtility = new AuroraTestUtility(env.auroraRegion); + auroraTestUtility = new AuroraTestUtility(env.region); driver = DriverHelper.getDriverForDatabaseEngine(env.engine); initClientFunc = DriverHelper.getClient(driver); await ProxyHelper.enableAllConnectivity(); @@ -67,7 +67,7 @@ describe("aurora failover", () => { client = null; secondaryClient = null; await TestEnvironment.updateWriter(); - }); + }, 1000000); afterEach(async () => { if (client !== null) { diff --git a/tests/integration/container/tests/basic_connectivity.test.ts b/tests/integration/container/tests/basic_connectivity.test.ts index 3a1b58db..d1854c7c 100644 --- a/tests/integration/container/tests/basic_connectivity.test.ts +++ b/tests/integration/container/tests/basic_connectivity.test.ts @@ -22,26 +22,27 @@ import { logger } from "../../../../common/logutils"; import { DatabaseEngine } from "./utils/database_engine"; import { TestEnvironmentFeatures } from "./utils/test_environment_features"; import { features } from "./config"; +import { DatabaseEngineDeployment } from "./utils/database_engine_deployment"; const itIf = !features.includes(TestEnvironmentFeatures.PERFORMANCE) ? it : it.skip; let client: any; let auroraTestUtility: AuroraTestUtility; -async function executeInstanceQuery(client: any, engine: DatabaseEngine, props: any): Promise { +async function executeInstanceQuery(client: any, engine: DatabaseEngine, deployment: DatabaseEngineDeployment, props: any): Promise { client.on("error", (error: any) => { logger.debug(error.message); }); await client.connect(); - const res = await DriverHelper.executeInstanceQuery(engine, client); + const res = await DriverHelper.executeInstanceQuery(engine, deployment, client); expect(res).not.toBeNull(); } beforeEach(async () => { logger.info(`Test started: ${expect.getState().currentTestName}`); - auroraTestUtility = new AuroraTestUtility((await TestEnvironment.getCurrent()).auroraRegion); + auroraTestUtility = new AuroraTestUtility((await TestEnvironment.getCurrent()).region); await ProxyHelper.enableAllConnectivity(); client = null; }); @@ -76,7 +77,7 @@ describe("basic_connectivity", () => { props = DriverHelper.addDriverSpecificConfiguration(props, env.engine); client = initClientFunc(props); - await executeInstanceQuery(client, env.engine, props); + await executeInstanceQuery(client, env.engine, env.deployment, props); }, 1000000 ); @@ -99,12 +100,12 @@ describe("basic_connectivity", () => { props = DriverHelper.addDriverSpecificConfiguration(props, env.engine); client = initClientFunc(props); - await executeInstanceQuery(client, env.engine, props); + await executeInstanceQuery(client, env.engine, env.deployment, props); }, 1000000 ); - itIf( + itIf.skip( "wrapper with failover plugins instance endpoint", async () => { const env = await TestEnvironment.getCurrent(); @@ -122,12 +123,12 @@ describe("basic_connectivity", () => { props = DriverHelper.addDriverSpecificConfiguration(props, env.engine); client = initClientFunc(props); - await executeInstanceQuery(client, env.engine, props); + await executeInstanceQuery(client, env.engine, env.deployment, props); }, 1000000 ); - itIf( + itIf.skip( "wrapper", async () => { const env = await TestEnvironment.getCurrent(); @@ -150,14 +151,14 @@ describe("basic_connectivity", () => { }); await client.connect(); - const res = await DriverHelper.executeInstanceQuery(env.engine, client); + const res = await DriverHelper.executeInstanceQuery(env.engine, env.deployment, client); expect(res).not.toBeNull(); }, 1000000 ); - itIf( + itIf.skip( "wrapper_proxy", async () => { const env = await TestEnvironment.getCurrent(); diff --git a/tests/integration/container/tests/performance.test.ts b/tests/integration/container/tests/performance.test.ts index 810eb00f..4426b276 100644 --- a/tests/integration/container/tests/performance.test.ts +++ b/tests/integration/container/tests/performance.test.ts @@ -65,7 +65,7 @@ let env: TestEnvironment; let driver; let initClientFunc: (props: any) => any; -let auroraTestUtility; +let auroraTestUtility: AuroraTestUtility; let enhancedFailureMonitoringPerfDataList: PerfStatMonitoring[] = []; async function initDefaultConfig(host: string, port: number): Promise { @@ -146,7 +146,7 @@ async function executeFailureDetectionTimeEfmEnabled( detectionCount: number, sleepDelayMillis: number ) { - auroraTestUtility = new AuroraTestUtility((await TestEnvironment.getCurrent()).auroraRegion); + auroraTestUtility = new AuroraTestUtility((await TestEnvironment.getCurrent()).region); const config = await initDefaultConfig(env.proxyDatabaseInfo.writerInstanceEndpoint, env.proxyDatabaseInfo.clusterEndpointPort); config["plugins"] = "efm"; config[WrapperProperties.FAILURE_DETECTION_TIME_MS.name] = detectionTimeMillis; diff --git a/tests/integration/container/tests/read_write_splitting.test.ts b/tests/integration/container/tests/read_write_splitting.test.ts index f30923ed..117282fe 100644 --- a/tests/integration/container/tests/read_write_splitting.test.ts +++ b/tests/integration/container/tests/read_write_splitting.test.ts @@ -72,14 +72,14 @@ describe("aurora read write splitting", () => { beforeEach(async () => { logger.info(`Test started: ${expect.getState().currentTestName}`); env = await TestEnvironment.getCurrent(); - auroraTestUtility = new AuroraTestUtility(env.auroraRegion); + auroraTestUtility = new AuroraTestUtility(env.region); driver = DriverHelper.getDriverForDatabaseEngine(env.engine); initClientFunc = DriverHelper.getClient(driver); await ProxyHelper.enableAllConnectivity(); client = null; await TestEnvironment.updateWriter(); - }); + }, 1000000); afterEach(async () => { if (client !== null) { diff --git a/tests/integration/container/tests/utils/aurora_test_utility.ts b/tests/integration/container/tests/utils/aurora_test_utility.ts index 5959ef27..8010a231 100644 --- a/tests/integration/container/tests/utils/aurora_test_utility.ts +++ b/tests/integration/container/tests/utils/aurora_test_utility.ts @@ -15,21 +15,19 @@ */ import { - CreateDBClusterCommand, - DeleteDBInstanceCommand, DescribeDBClustersCommand, DescribeDBInstancesCommand, FailoverDBClusterCommand, - RDSClient + RDSClient, + RebootDBInstanceCommand } from "@aws-sdk/client-rds"; -import { TestInstanceInfo } from "./test_instance_info"; import { TestEnvironment } from "./test_environment"; import * as dns from "dns"; import { DBInstance } from "@aws-sdk/client-rds/dist-types/models/models_0"; -import { DatabaseEngine } from "./database_engine"; import { AwsClient } from "../../../../../common/lib/aws_client"; import { DriverHelper } from "./driver_helper"; import { sleep } from "../../../../../common/lib/utils/utils"; +import { logger } from "../../../../../common/logutils"; export class AuroraTestUtility { private client: RDSClient; @@ -66,58 +64,20 @@ export class AuroraTestUtility { } } - async createDbInstance(instanceId: string): Promise { - const environment = await TestEnvironment.getCurrent(); - if (await this.doesDbInstanceExists(instanceId)) { - await this.deleteDbInstance(instanceId); + async waitUntilInstanceHasRightState(instanceId: string, ...allowedStatues: string[]) { + let instanceInfo = await this.getDbInstance(instanceId); + if (instanceInfo === null) { + throw new Error("invalid instance"); } - - const input = { - DBClusterIdentifier: environment.info.auroraClusterName, - DBInstanceIdentifier: instanceId, - DBInstanceClass: "db.r5.large", - Engine: this.getAuroraEngineName(environment.engine), - PubliclyAccessible: true - }; - const command = new CreateDBClusterCommand(input); - await this.client.send(command); - - const instance = await this.waitUntilInstanceHasDesiredStatus(instanceId); - if (instance == null) { - throw new Error("failed to create instance"); - } - - return new TestInstanceInfo(instance); - } - - async deleteDbInstance(instanceId: string) { - const input = { - DBInstanceIdentifier: instanceId - }; - const command = new DeleteDBInstanceCommand(input); - await this.client.send(command); - await this.waitUntilInstanceHasDesiredStatus(instanceId, "deleted"); - } - - async waitUntilInstanceHasDesiredStatus(instanceId: string, desiredStatus: string = "available", waitTimeMins = 15) { - const current = new Date(); - const stopTime = current.setMinutes(current.getMinutes() + waitTimeMins * 60); - while (new Date().getTime() <= stopTime) { - try { - const instance = await this.getDbInstance(instanceId); - if (!this.isNullOrUndefined(instance)) { - return instance; - } - } catch (err: any) { - if (err.name === "DBInstanceNotFoundFault" && desiredStatus === "deleted") { - return null; - } - } - + let status = instanceInfo["DBInstanceStatus"]; + const waitTilTime: number = Date.now() + 15 * 60 * 1000; // 15 minutes + while (status && !allowedStatues.includes(status.toLowerCase()) && waitTilTime > Date.now()) { await sleep(1000); + instanceInfo = await this.getDbInstance(instanceId); + if (instanceInfo !== null) { + status = instanceInfo["DBInstanceStatus"]; + } } - - throw new Error("instance description timed out"); } async waitUntilClusterHasDesiredStatus(clusterId: string, desiredStatus: string = "available") { @@ -135,6 +95,21 @@ export class AuroraTestUtility { } } + async rebootInstance(instanceId: string) { + let attempts = 5; + while (--attempts > 0) { + try { + const command = new RebootDBInstanceCommand({ + DBInstanceIdentifier: instanceId + }); + await this.client.send(command); + } catch (error: any) { + logger.debug(`rebootDbInstance ${instanceId} failed: ${error.message}`); + await sleep(1000); + } + } + } + async getDbCluster(clusterId: string) { const command = new DescribeDBClustersCommand({ DBClusterIdentifier: clusterId @@ -220,8 +195,8 @@ export class AuroraTestUtility { } async queryInstanceId(client: AwsClient) { - const engine = (await TestEnvironment.getCurrent()).engine; - return await DriverHelper.executeInstanceQuery(engine, client); + const testEnvironment: TestEnvironment = await TestEnvironment.getCurrent(); + return await DriverHelper.executeInstanceQuery(testEnvironment.engine, testEnvironment.deployment, client); } async isDbInstanceWriter(instanceId: string, clusterId?: string) { @@ -261,17 +236,6 @@ export class AuroraTestUtility { return instance.DBInstanceIdentifier; } - getAuroraEngineName(engine: DatabaseEngine) { - switch (engine) { - case DatabaseEngine.PG: - return "aurora-postgresql"; - case DatabaseEngine.MYSQL: - return "aurora-mysql"; - default: - throw new Error("invalid engine"); - } - } - isNullOrUndefined(value: any): boolean { return value === undefined || value === null; } diff --git a/tests/integration/container/tests/utils/database_engine_deployment.ts b/tests/integration/container/tests/utils/database_engine_deployment.ts index 4c52f1ac..2dfc1b7b 100644 --- a/tests/integration/container/tests/utils/database_engine_deployment.ts +++ b/tests/integration/container/tests/utils/database_engine_deployment.ts @@ -17,5 +17,6 @@ export enum DatabaseEngineDeployment { DOCKER = "DOCKER", RDS = "RDS", + RDS_MULTI_AZ_CLUSTER = "RDS_MULTI_AZ_CLUSTER", AURORA = "AURORA" } diff --git a/tests/integration/container/tests/utils/driver_helper.ts b/tests/integration/container/tests/utils/driver_helper.ts index a5cefe34..48e94f4b 100644 --- a/tests/integration/container/tests/utils/driver_helper.ts +++ b/tests/integration/container/tests/utils/driver_helper.ts @@ -19,6 +19,7 @@ import { AwsMySQLClient } from "../../../../../mysql/lib"; import { AwsPGClient } from "../../../../../pg/lib"; import { DatabaseEngine } from "./database_engine"; import { AwsClient } from "../../../../../common/lib/aws_client"; +import { DatabaseEngineDeployment } from "./database_engine_deployment"; export class DriverHelper { static getClient(driver: TestDriver) { @@ -43,24 +44,38 @@ export class DriverHelper { } } - static getInstanceIdSql(engine: DatabaseEngine): string { - switch (engine) { - case DatabaseEngine.PG: - return "SELECT aurora_db_instance_identifier()"; - case DatabaseEngine.MYSQL: - return "SELECT @@aurora_server_id as id"; + static getInstanceIdSql(engine: DatabaseEngine, deployment: DatabaseEngineDeployment): string { + switch (deployment) { + case DatabaseEngineDeployment.AURORA: + switch (engine) { + case DatabaseEngine.PG: + return "SELECT aurora_db_instance_identifier() as id"; + case DatabaseEngine.MYSQL: + return "SELECT @@aurora_server_id as id"; + default: + throw new Error("invalid engine"); + } + case DatabaseEngineDeployment.RDS_MULTI_AZ_CLUSTER: + switch (engine) { + case DatabaseEngine.PG: + return "SELECT SUBSTRING(endpoint FROM 0 FOR POSITION('.' IN endpoint)) as id FROM rds_tools.show_topology() WHERE id IN (SELECT dbi_resource_id FROM rds_tools.dbi_resource_id())"; + case DatabaseEngine.MYSQL: + return "SELECT SUBSTRING_INDEX(endpoint, '.', 1) as id FROM mysql.rds_topology WHERE id=@@server_id"; + default: + throw new Error("invalid engine"); + } default: - throw new Error("invalid engine"); + throw new Error("invalid deployment"); } } - static async executeInstanceQuery(engine: DatabaseEngine, client: AwsClient) { - const sql = DriverHelper.getInstanceIdSql(engine); + static async executeInstanceQuery(engine: DatabaseEngine, deployment: DatabaseEngineDeployment, client: AwsClient) { + const sql = DriverHelper.getInstanceIdSql(engine, deployment); let result; switch (engine) { case DatabaseEngine.PG: return await (client as AwsPGClient).query(sql).then((result) => { - return result.rows[0]["aurora_db_instance_identifier"]; + return result.rows[0]["id"]; }); case DatabaseEngine.MYSQL: result = await (client as AwsMySQLClient).query({ sql: sql }); @@ -95,6 +110,8 @@ export class DriverHelper { static addDriverSpecificConfiguration(props: any, engine: DatabaseEngine, performance: boolean = false) { if (engine === DatabaseEngine.PG && !performance) { props["query_timeout"] = 10000; + // props["ssl"] = { ca: readFileSync("/app/global-bundle.pem").toString() }; + props["ssl"] = { rejectUnauthorized: false }; } else if (engine === DatabaseEngine.PG && performance) { props["query_timeout"] = 120000; props["connectionTimeoutMillis"] = 400; @@ -104,7 +121,6 @@ export class DriverHelper { props["monitoring_internal_query_timeout"] = 400; props["internal_query_timeout"] = 120000; } - return props; } } diff --git a/tests/integration/container/tests/utils/test_environment.ts b/tests/integration/container/tests/utils/test_environment.ts index c3ac243d..f3cae91e 100644 --- a/tests/integration/container/tests/utils/test_environment.ts +++ b/tests/integration/container/tests/utils/test_environment.ts @@ -45,11 +45,77 @@ export class TestEnvironment { static async updateWriter() { const info = TestEnvironment.env?.info; - if (info?.request.deployment === DatabaseEngineDeployment.AURORA) { - const auroraUtility = new AuroraTestUtility(info.auroraRegion); - await auroraUtility.waitUntilClusterHasDesiredStatus(info.auroraClusterName); - info.databaseInfo.moveInstanceFirst(await auroraUtility.getClusterWriterInstanceId(info.auroraClusterName)); - info.proxyDatabaseInfo.moveInstanceFirst(await auroraUtility.getClusterWriterInstanceId(info.auroraClusterName)); + if (info?.request.deployment === DatabaseEngineDeployment.AURORA || info?.request.deployment === DatabaseEngineDeployment.RDS_MULTI_AZ_CLUSTER) { + let remainingTries = 3; + let success = false; + + while (remainingTries-- > 0 && !success) { + try { + const auroraUtility = new AuroraTestUtility(info.region); + await auroraUtility.waitUntilClusterHasDesiredStatus(info.auroraClusterName); + info.databaseInfo.moveInstanceFirst(await auroraUtility.getClusterWriterInstanceId(info.auroraClusterName)); + info.proxyDatabaseInfo.moveInstanceFirst(await auroraUtility.getClusterWriterInstanceId(info.auroraClusterName)); + success = true; + } catch (error: any) { + switch (info?.request.deployment) { + case DatabaseEngineDeployment.AURORA: + await this.rebootAllClusterInstances(); + break; + case DatabaseEngineDeployment.RDS_MULTI_AZ_CLUSTER: + await this.rebootCluster(); + break; + default: + throw new Error(`Unsupported deployment ${info?.request.deployment}`); + } + } + } + + if (!success) { + fail(`Cluster ${info.auroraClusterName} is not healthy`); + } + } + } + + static async rebootAllClusterInstances() { + const info = TestEnvironment.env?.info; + const auroraUtility = new AuroraTestUtility(info?.region); + if (!info?.auroraClusterName) { + fail(`Invalid cluster`); + } + await auroraUtility.waitUntilClusterHasDesiredStatus(info.auroraClusterName!); + + const instanceIds: (string | undefined)[] | undefined = info?.databaseInfo.instances.map((instance) => instance.instanceId); + for (const instance in instanceIds) { + await auroraUtility.rebootInstance(instance); + } + await auroraUtility.waitUntilClusterHasDesiredStatus(info.auroraClusterName!); + for (const instance in instanceIds) { + await auroraUtility.waitUntilInstanceHasRightState(instance); + } + } + + static async rebootCluster() { + const info = TestEnvironment.env?.info; + const auroraUtility = new AuroraTestUtility(info?.region); + if (!info?.auroraClusterName) { + fail(`Invalid cluster`); + } + await auroraUtility.waitUntilClusterHasDesiredStatus(info.auroraClusterName!); + + const instanceIds: (string | undefined)[] | undefined = info?.databaseInfo.instances.map((instance) => instance.instanceId); + for (const instance in instanceIds) { + await auroraUtility.waitUntilInstanceHasRightState( + instance, + "available", + "storage-optimization", + "incompatible-credentials", + "incompatible-parameters", + "unavailable" + ); + await auroraUtility.rebootInstance(instance); + } + for (const instance in instanceIds) { + await auroraUtility.waitUntilInstanceHasRightState(instance); } } @@ -167,14 +233,18 @@ export class TestEnvironment { return this.proxyInstances[0]; } - get auroraRegion(): string { - return this.info.auroraRegion; + get region(): string { + return this.info.region; } get engine(): DatabaseEngine { return this.info.request.engine; } + get deployment(): DatabaseEngineDeployment { + return this.info.request.deployment; + } + private static createProxyUrl(host: string, port: number) { return `http://${host}:${port}`; } diff --git a/tests/integration/container/tests/utils/test_environment_info.ts b/tests/integration/container/tests/utils/test_environment_info.ts index fd31ad7c..725244e0 100644 --- a/tests/integration/container/tests/utils/test_environment_info.ts +++ b/tests/integration/container/tests/utils/test_environment_info.ts @@ -23,7 +23,7 @@ export class TestEnvironmentInfo { private readonly _awsAccessKeyId: string; private readonly _awsSecretAccessKey: string; private readonly _awsSessionToken: string; - private readonly _auroraRegion: string; + private readonly _region: string; private readonly _auroraClusterName: string; private readonly _iamUserName: string; private readonly _databaseInfo: TestDatabaseInfo; @@ -34,7 +34,7 @@ export class TestEnvironmentInfo { this._awsAccessKeyId = String(testInfo["awsAccessKeyId"]); this._awsSecretAccessKey = String(testInfo["awsSecretAccessKey"]); this._awsSessionToken = String(testInfo["awsSessionToken"]); - this._auroraRegion = String(testInfo["auroraRegion"]); + this._region = String(testInfo["region"]); this._auroraClusterName = String(testInfo["auroraClusterName"]); this._iamUserName = String(testInfo["iamUsername"]); @@ -58,8 +58,8 @@ export class TestEnvironmentInfo { return this._awsSessionToken; } - get auroraRegion(): string { - return this._auroraRegion; + get region(): string { + return this._region; } get auroraClusterName(): string { diff --git a/tests/integration/host/build.gradle.kts b/tests/integration/host/build.gradle.kts index fb93ca9c..73f31bce 100644 --- a/tests/integration/host/build.gradle.kts +++ b/tests/integration/host/build.gradle.kts @@ -93,6 +93,7 @@ tasks.register("test-aurora-postgres") { systemProperty("exclude-performance", "true") systemProperty("exclude-mysql-driver", "true") systemProperty("exclude-mysql-engine", "true") + systemProperty("exclude-multi-az", "true") } } @@ -105,6 +106,7 @@ tasks.register("test-aurora-mysql") { systemProperty("exclude-performance", "true") systemProperty("exclude-pg-driver", "true") systemProperty("exclude-pg-engine", "true") + systemProperty("exclude-multi-az", "true") } } @@ -136,6 +138,31 @@ tasks.register("test-aurora-mysql-performance") { } } + +tasks.register("test-multi-az-postgres") { + group = "verification" + filter.includeTestsMatching("integration.host.TestRunner.runTests") + doFirst { + systemProperty("exclude-docker", "true") + systemProperty("exclude-performance", "true") + systemProperty("exclude-mysql-driver", "true") + systemProperty("exclude-mysql-engine", "true") + systemProperty("exclude-aurora", "true") + } +} + +tasks.register("test-multi-az-mysql") { + group = "verification" + filter.includeTestsMatching("integration.host.TestRunner.runTests") + doFirst { + systemProperty("exclude-docker", "true") + systemProperty("exclude-performance", "true") + systemProperty("exclude-pg-driver", "true") + systemProperty("exclude-pg-engine", "true") + systemProperty("exclude-aurora", "true") + } +} + // Debug tasks.register("debug-all-environments") { @@ -206,3 +233,26 @@ tasks.register("debug-aurora-mysql-performance") { } } +tasks.register("debug-multi-az-mysql") { + group = "verification" + filter.includeTestsMatching("integration.host.TestRunner.debugTests") + doFirst { + systemProperty("exclude-docker", "true") + systemProperty("exclude-performance", "true") + systemProperty("exclude-pg-driver", "true") + systemProperty("exclude-pg-engine", "true") + systemProperty("exclude-aurora", "true") + } +} + +tasks.register("debug-multi-az-postgres") { + group = "verification" + filter.includeTestsMatching("integration.host.TestRunner.debugTests") + doFirst { + systemProperty("exclude-docker", "true") + systemProperty("exclude-performance", "true") + systemProperty("exclude-mysql-driver", "true") + systemProperty("exclude-mysql-engine", "true") + systemProperty("exclude-aurora", "true") + } +} diff --git a/tests/integration/host/src/test/java/integration/host/DatabaseEngineDeployment.java b/tests/integration/host/src/test/java/integration/host/DatabaseEngineDeployment.java index fcf83c72..4d21fa6a 100644 --- a/tests/integration/host/src/test/java/integration/host/DatabaseEngineDeployment.java +++ b/tests/integration/host/src/test/java/integration/host/DatabaseEngineDeployment.java @@ -19,5 +19,6 @@ public enum DatabaseEngineDeployment { DOCKER, RDS, + RDS_MULTI_AZ_CLUSTER, AURORA } diff --git a/tests/integration/host/src/test/java/integration/host/TestEnvironment.java b/tests/integration/host/src/test/java/integration/host/TestEnvironment.java index d63a5c4e..b2b58f01 100644 --- a/tests/integration/host/src/test/java/integration/host/TestEnvironment.java +++ b/tests/integration/host/src/test/java/integration/host/TestEnvironment.java @@ -189,11 +189,4 @@ public boolean isTestDriverAllowed(TestDriver testDriver) { } return true; } - - public static boolean isAwsDatabase() { - DatabaseEngineDeployment deployment = - getCurrent().getInfo().getRequest().getDatabaseEngineDeployment(); - return DatabaseEngineDeployment.AURORA.equals(deployment) - || DatabaseEngineDeployment.RDS.equals(deployment); - } } diff --git a/tests/integration/host/src/test/java/integration/host/TestEnvironmentConfig.java b/tests/integration/host/src/test/java/integration/host/TestEnvironmentConfig.java index 284b4021..8d1622ff 100644 --- a/tests/integration/host/src/test/java/integration/host/TestEnvironmentConfig.java +++ b/tests/integration/host/src/test/java/integration/host/TestEnvironmentConfig.java @@ -2,11 +2,16 @@ import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; + import java.io.IOException; +import java.net.URISyntaxException; import java.net.UnknownHostException; +import java.sql.Connection; import java.sql.SQLException; +import java.sql.Statement; import java.util.ArrayList; import java.util.logging.Logger; + import org.testcontainers.containers.GenericContainer; import org.testcontainers.containers.Network; import org.testcontainers.containers.ToxiproxyContainer; @@ -53,7 +58,7 @@ private TestEnvironmentConfig(TestEnvironmentRequest request) { this.info.setRequest(request); } - public static TestEnvironmentConfig build(TestEnvironmentRequest request) { + public static TestEnvironmentConfig build(TestEnvironmentRequest request) throws URISyntaxException, SQLException { TestEnvironmentConfig env = new TestEnvironmentConfig(request); switch (request.getDatabaseEngineDeployment()) { @@ -72,10 +77,14 @@ public static TestEnvironmentConfig build(TestEnvironmentRequest request) { break; case AURORA: + case RDS_MULTI_AZ_CLUSTER: initDatabaseParams(env); - createAuroraDbCluster(env); + createDbCluster(env); if (request.getFeatures().contains(TestEnvironmentFeatures.IAM)) { + if (request.getDatabaseEngineDeployment() == DatabaseEngineDeployment.RDS_MULTI_AZ_CLUSTER) { + throw new RuntimeException("IAM isn't supported by " + DatabaseEngineDeployment.RDS_MULTI_AZ_CLUSTER); + } configureIamAccess(env); } @@ -163,13 +172,13 @@ private static void createDatabaseContainers(TestEnvironmentConfig env) { } } - private static void createAuroraDbCluster(TestEnvironmentConfig env) { + private static void createDbCluster(TestEnvironmentConfig env) throws URISyntaxException, SQLException { switch (env.info.getRequest().getDatabaseInstances()) { case SINGLE_INSTANCE: initAwsCredentials(env); env.numOfInstances = 1; - createAuroraDbCluster(env, 1); + createDbCluster(env, 1); break; case MULTI_INSTANCE: initAwsCredentials(env); @@ -182,34 +191,35 @@ private static void createAuroraDbCluster(TestEnvironmentConfig env) { env.numOfInstances = 5; } - createAuroraDbCluster(env, env.numOfInstances); + createDbCluster(env, env.numOfInstances); break; default: throw new NotImplementedException(env.info.getRequest().getDatabaseEngine().toString()); } } - private static void createAuroraDbCluster(TestEnvironmentConfig env, int numOfInstances) { + private static void createDbCluster(TestEnvironmentConfig env, int numOfInstances) throws URISyntaxException, SQLException { - env.info.setAuroraRegion( - !StringUtils.isNullOrEmpty(System.getenv("AURORA_DB_REGION")) - ? System.getenv("AURORA_DB_REGION") + env.info.setRegion( + !StringUtils.isNullOrEmpty(System.getenv("RDS_DB_REGION")) + ? System.getenv("RDS_DB_REGION") : "us-east-1"); env.reuseAuroraDbCluster = - !StringUtils.isNullOrEmpty(System.getenv("REUSE_AURORA_CLUSTER")) - && Boolean.parseBoolean(System.getenv("REUSE_AURORA_CLUSTER")); - env.auroraClusterName = System.getenv("AURORA_CLUSTER_NAME"); // "cluster-mysql" + !StringUtils.isNullOrEmpty(System.getenv("REUSE_RDS_CLUSTER")) + && Boolean.parseBoolean(System.getenv("REUSE_RDS_CLUSTER")); + env.auroraClusterName = System.getenv("RDS_CLUSTER_NAME"); // "cluster-mysql" env.auroraClusterDomain = - System.getenv("AURORA_CLUSTER_DOMAIN"); // "XYZ.us-west-2.rds.amazonaws.com" + System.getenv("RDS_CLUSTER_DOMAIN"); // "XYZ.us-west-2.rds.amazonaws.com" if (StringUtils.isNullOrEmpty(env.auroraClusterDomain)) { - throw new RuntimeException("Environment variable AURORA_CLUSTER_DOMAIN is required."); + throw new RuntimeException("Environment variable RDS_CLUSTER_DOMAIN is required."); } env.auroraUtil = new AuroraTestUtility( - env.info.getAuroraRegion(), + env.info.getRegion(), + env.info.getRdsEndpoint(), env.awsAccessKeyId, env.awsSecretAccessKey, env.awsSessionToken); @@ -250,9 +260,13 @@ private static void createAuroraDbCluster(TestEnvironmentConfig env, int numOfIn } try { - String engine = getAuroraDbEngine(env.info.getRequest()); - String engineVersion = getAuroraDbEngineVersion(env.info.getRequest()); - String instanceClass = getAuroraInstanceClass(env.info.getRequest()); + final TestEnvironmentRequest request = env.info.getRequest(); + String engine = getDbEngine(request); + String engineVersion = getDbEngineVersion(request); + if (StringUtils.isNullOrEmpty(engineVersion)) { + throw new RuntimeException("Failed to get engine version."); + } + String instanceClass = getDbInstanceClass(env.info.getRequest()); env.auroraClusterDomain = env.auroraUtil.createCluster( @@ -260,6 +274,7 @@ private static void createAuroraDbCluster(TestEnvironmentConfig env, int numOfIn env.info.getDatabaseInfo().getPassword(), env.info.getDatabaseInfo().getDefaultDbName(), env.auroraClusterName, + env.info.getRequest().getDatabaseEngineDeployment(), engine, instanceClass, engineVersion, @@ -304,6 +319,26 @@ private static void createAuroraDbCluster(TestEnvironmentConfig env, int numOfIn throw new RuntimeException(e); } env.auroraUtil.ec2AuthorizeIP(env.runnerIP); + + final DatabaseEngineDeployment deployment = env.info.getRequest().getDatabaseEngineDeployment(); + final DatabaseEngine engine = env.info.getRequest().getDatabaseEngine(); + final TestDatabaseInfo info = env.info.getDatabaseInfo(); + + if (DatabaseEngineDeployment.RDS_MULTI_AZ_CLUSTER.equals(deployment) && DatabaseEngine.PG.equals(engine)) { + final String url = + String.format( + "%s%s:%d/%s", + DriverHelper.getDriverProtocol(engine), + info.getClusterEndpoint(), + info.getClusterEndpointPort(), + info.getDefaultDbName()); + + env.auroraUtil.createRdsExtension( + engine, + url, + info.getUsername(), + info.getPassword()); + } } private static String getRandomName(TestEnvironmentRequest request) { @@ -317,33 +352,81 @@ private static String getRandomName(TestEnvironmentRequest request) { } } - private static String getAuroraDbEngine(TestEnvironmentRequest request) { + private static String getDbEngine(TestEnvironmentRequest request) { + switch (request.getDatabaseEngineDeployment()) { + case AURORA: + return getAuroraDbEngine(request); + case RDS: + case RDS_MULTI_AZ_CLUSTER: + return getRdsEngine(request); + default: + throw new NotImplementedException(request.getDatabaseEngineDeployment().toString()); + } + } + + private static String getDbEngineVersion(TestEnvironmentRequest request) { + switch (request.getDatabaseEngineDeployment()) { + case AURORA: + return getAuroraDbEngineVersion(request); + case RDS: + case RDS_MULTI_AZ_CLUSTER: + return getRdsEngineVersion(request); + default: + throw new NotImplementedException(request.getDatabaseEngineDeployment().toString()); + } + } + + private static String getAuroraDbEngineVersion(TestEnvironmentRequest request) { switch (request.getDatabaseEngine()) { case MYSQL: - return "aurora-mysql"; + return "8.0.mysql_aurora.3.04.0"; case PG: - return "aurora-postgresql"; + return "15.2"; default: throw new NotImplementedException(request.getDatabaseEngine().toString()); } } - private static String getAuroraDbEngineVersion(TestEnvironmentRequest request) { + private static String getRdsEngineVersion(TestEnvironmentRequest request) { switch (request.getDatabaseEngine()) { case MYSQL: - return "8.0.mysql_aurora.3.04.0"; + return "8.0.33"; case PG: - return "15.2"; + return "15.4"; default: throw new NotImplementedException(request.getDatabaseEngine().toString()); } } - private static String getAuroraInstanceClass(TestEnvironmentRequest request) { + private static String getAuroraDbEngine(TestEnvironmentRequest request) { switch (request.getDatabaseEngine()) { case MYSQL: + return "aurora-mysql"; case PG: - return "db.r6g.large"; + return "aurora-postgresql"; + default: + throw new NotImplementedException(request.getDatabaseEngine().toString()); + } + } + + private static String getRdsEngine(TestEnvironmentRequest request) { + switch (request.getDatabaseEngine()) { + case MYSQL: + return "mysql"; + case PG: + return "postgres"; + default: + throw new NotImplementedException(request.getDatabaseEngine().toString()); + } + } + + private static String getDbInstanceClass(TestEnvironmentRequest request) { + switch (request.getDatabaseEngineDeployment()) { + case AURORA: + return "db.r5.large"; + case RDS: + case RDS_MULTI_AZ_CLUSTER: + return "db.m5d.large"; default: throw new NotImplementedException(request.getDatabaseEngine().toString()); } @@ -514,7 +597,7 @@ private static void createTestContainer(TestEnvironmentConfig env) { private static String getContainerBaseImageName(TestEnvironmentRequest request) { return "node:21"; } - + private static void configureIamAccess(TestEnvironmentConfig env) { @@ -528,15 +611,6 @@ private static void configureIamAccess(TestEnvironmentConfig env) { ? System.getenv("IAM_USER") : "jane_doe"); if (!env.reuseAuroraDbCluster) { - try { - Class.forName(DriverHelper.getDriverClassname(env.info.getRequest().getDatabaseEngine())); - } catch (ClassNotFoundException e) { - throw new RuntimeException( - "Driver not found: " - + DriverHelper.getDriverClassname(env.info.getRequest().getDatabaseEngine()), - e); - } - final String url = String.format( "%s%s:%d/%s", diff --git a/tests/integration/host/src/test/java/integration/host/TestEnvironmentInfo.java b/tests/integration/host/src/test/java/integration/host/TestEnvironmentInfo.java index aa69b9a5..92af936e 100644 --- a/tests/integration/host/src/test/java/integration/host/TestEnvironmentInfo.java +++ b/tests/integration/host/src/test/java/integration/host/TestEnvironmentInfo.java @@ -24,7 +24,8 @@ public class TestEnvironmentInfo { private String awsSecretAccessKey; private String awsSessionToken; - private String auroraRegion; + private String region; + private String rdsEndpoint; private String auroraClusterName; private String iamUsername; @@ -65,8 +66,12 @@ public String getAwsSessionToken() { return this.awsSessionToken; } - public String getAuroraRegion() { - return this.auroraRegion; + public String getRegion() { + return this.region; + } + + public String getRdsEndpoint() { + return this.rdsEndpoint; } public String getAuroraClusterName() { @@ -81,8 +86,12 @@ public void setRequest(TestEnvironmentRequest request) { this.request = request; } - public void setAuroraRegion(String auroraRegion) { - this.auroraRegion = auroraRegion; + public void setRegion(String region) { + this.region = region; + } + + public void setRdsEndpoint(String rdsEndpoint) { + this.rdsEndpoint = rdsEndpoint; } public void setAuroraClusterName(String auroraClusterName) { diff --git a/tests/integration/host/src/test/java/integration/host/TestEnvironmentProvider.java b/tests/integration/host/src/test/java/integration/host/TestEnvironmentProvider.java index aebd8d1c..1af1d5f1 100644 --- a/tests/integration/host/src/test/java/integration/host/TestEnvironmentProvider.java +++ b/tests/integration/host/src/test/java/integration/host/TestEnvironmentProvider.java @@ -30,7 +30,7 @@ public Stream provideTestTemplateInvocationContex final String numInstancesVar = System.getenv("NUM_INSTANCES"); final Integer numInstances = numInstancesVar == null ? null : Integer.parseInt(numInstancesVar); - final Set validNumInstances = new HashSet<>(Arrays.asList(1, 2, 5)); + final Set validNumInstances = new HashSet<>(Arrays.asList(1, 2, 3,5)); if (numInstances != null && !validNumInstances.contains(numInstances)) { throw new RuntimeException( String.format( @@ -44,6 +44,7 @@ public Stream provideTestTemplateInvocationContex final boolean excludeDocker = Boolean.parseBoolean(System.getProperty("exclude-docker", "false")); final boolean excludeAurora = Boolean.parseBoolean(System.getProperty("exclude-aurora", "false")); + final boolean excludeMultiAZ = Boolean.parseBoolean(System.getProperty("exclude-multi-az", "false")); final boolean excludePerformance = Boolean.parseBoolean(System.getProperty("exclude-performance", "false")); final boolean excludeMysqlEngine = @@ -61,148 +62,76 @@ public Stream provideTestTemplateInvocationContex Boolean.parseBoolean(System.getProperty("exclude-secrets-manager", "false")); final boolean testAutoscalingOnly = Boolean.parseBoolean(System.getProperty("test-autoscaling", "false")); - if (!excludeDocker) { - if (numInstances == null || numInstances == 1) { - if (!excludeMysqlEngine) { - resultContextList.add( - getEnvironment( - new TestEnvironmentRequest( - DatabaseEngine.MYSQL, - DatabaseInstances.SINGLE_INSTANCE, - 1, - DatabaseEngineDeployment.DOCKER, - TestEnvironmentFeatures.NETWORK_OUTAGES_ENABLED, - excludeMysqlDriver ? TestEnvironmentFeatures.SKIP_MYSQL_DRIVER_TESTS : null, - excludePgDriver ? TestEnvironmentFeatures.SKIP_PG_DRIVER_TESTS : null, - testAutoscalingOnly ? TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY : null))); - } - if (!excludePgEngine) { - resultContextList.add( - getEnvironment( - new TestEnvironmentRequest( - DatabaseEngine.PG, - DatabaseInstances.SINGLE_INSTANCE, - 1, - DatabaseEngineDeployment.DOCKER, - TestEnvironmentFeatures.NETWORK_OUTAGES_ENABLED, - TestEnvironmentFeatures.ABORT_CONNECTION_SUPPORTED, - excludeMysqlDriver ? TestEnvironmentFeatures.SKIP_MYSQL_DRIVER_TESTS : null, - excludePgDriver ? TestEnvironmentFeatures.SKIP_PG_DRIVER_TESTS : null, - testAutoscalingOnly ? TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY : null))); - } + for (DatabaseEngineDeployment deployment : DatabaseEngineDeployment.values()) { + if (deployment == DatabaseEngineDeployment.DOCKER && excludeDocker) { + continue; } - - // multiple instances - if (numInstances == null || numInstances == 2) { - if (!excludeMysqlEngine) { - resultContextList.add( - getEnvironment( - new TestEnvironmentRequest( - DatabaseEngine.MYSQL, - DatabaseInstances.MULTI_INSTANCE, - 2, - DatabaseEngineDeployment.DOCKER, - TestEnvironmentFeatures.NETWORK_OUTAGES_ENABLED, - excludeMysqlDriver ? TestEnvironmentFeatures.SKIP_MYSQL_DRIVER_TESTS : null, - excludePgDriver ? TestEnvironmentFeatures.SKIP_PG_DRIVER_TESTS : null, - testAutoscalingOnly ? TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY : null))); - } - if (!excludePgEngine) { - resultContextList.add( - getEnvironment( - new TestEnvironmentRequest( - DatabaseEngine.PG, - DatabaseInstances.MULTI_INSTANCE, - 2, - DatabaseEngineDeployment.DOCKER, - TestEnvironmentFeatures.NETWORK_OUTAGES_ENABLED, - TestEnvironmentFeatures.ABORT_CONNECTION_SUPPORTED, - excludeMysqlDriver ? TestEnvironmentFeatures.SKIP_MYSQL_DRIVER_TESTS : null, - excludePgDriver ? TestEnvironmentFeatures.SKIP_PG_DRIVER_TESTS : null, - testAutoscalingOnly ? TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY : null))); - } + if (deployment == DatabaseEngineDeployment.AURORA && excludeAurora) { + continue; } - } - - if (!excludeAurora) { - if (!excludeMysqlEngine) { - if (numInstances == null || numInstances == 5) { - resultContextList.add( - getEnvironment( - new TestEnvironmentRequest( - DatabaseEngine.MYSQL, - DatabaseInstances.MULTI_INSTANCE, - 5, - DatabaseEngineDeployment.AURORA, - TestEnvironmentFeatures.NETWORK_OUTAGES_ENABLED, - excludeFailover ? null : TestEnvironmentFeatures.FAILOVER_SUPPORTED, - TestEnvironmentFeatures.AWS_CREDENTIALS_ENABLED, - excludeIam ? null : TestEnvironmentFeatures.IAM, - excludeSecretsManager ? null : TestEnvironmentFeatures.SECRETS_MANAGER, - excludePerformance ? null : TestEnvironmentFeatures.PERFORMANCE, - excludeMysqlDriver ? TestEnvironmentFeatures.SKIP_MYSQL_DRIVER_TESTS : null, - excludePgDriver ? TestEnvironmentFeatures.SKIP_PG_DRIVER_TESTS : null, - testAutoscalingOnly ? TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY : null))); + if (deployment == DatabaseEngineDeployment.RDS) { + // Not in use. + continue; + } + if (deployment == DatabaseEngineDeployment.RDS_MULTI_AZ_CLUSTER && excludeMultiAZ) { + continue; + } + for (DatabaseEngine engine : DatabaseEngine.values()) { + if (engine == DatabaseEngine.PG && excludePgEngine) { + continue; } - - if (numInstances == null || numInstances == 2) { - // Tests for IAM, SECRETS_MANAGER and PERFORMANCE are covered by - // cluster configuration above, so it's safe to skip these tests for configurations below. - // The main goal of the following cluster configurations is to check failover. - resultContextList.add( - getEnvironment( - new TestEnvironmentRequest( - DatabaseEngine.MYSQL, - DatabaseInstances.MULTI_INSTANCE, - 2, - DatabaseEngineDeployment.AURORA, - TestEnvironmentFeatures.NETWORK_OUTAGES_ENABLED, - excludeFailover ? null : TestEnvironmentFeatures.FAILOVER_SUPPORTED, - TestEnvironmentFeatures.AWS_CREDENTIALS_ENABLED, - excludeMysqlDriver ? TestEnvironmentFeatures.SKIP_MYSQL_DRIVER_TESTS : null, - excludePgDriver ? TestEnvironmentFeatures.SKIP_PG_DRIVER_TESTS : null, - testAutoscalingOnly ? TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY : null))); + if (engine == DatabaseEngine.MYSQL && excludeMysqlEngine) { + continue; } - } - if (!excludePgEngine) { - if (numInstances == null || numInstances == 5) { - resultContextList.add( + + for (DatabaseInstances instances : DatabaseInstances.values()) { + if (deployment == DatabaseEngineDeployment.DOCKER + && instances != DatabaseInstances.SINGLE_INSTANCE) { + continue; + } + + for (int numOfInstances : Arrays.asList(1, 2, 3, 5)) { + if (instances == DatabaseInstances.SINGLE_INSTANCE && numOfInstances > 1) { + continue; + } + if (instances == DatabaseInstances.MULTI_INSTANCE && numOfInstances == 1) { + continue; + } + if (deployment == DatabaseEngineDeployment.RDS_MULTI_AZ_CLUSTER && numOfInstances != 3) { + // Multi-AZ clusters supports only 3 instances + continue; + } + if (deployment == DatabaseEngineDeployment.AURORA && numOfInstances == 3) { + // Aurora supports clusters with 3 instances but running such tests is similar + // to running tests on 5-instance cluster. + // Let's save some time and skip tests for this configuration + continue; + } + + resultContextList.add( getEnvironment( new TestEnvironmentRequest( - DatabaseEngine.PG, - DatabaseInstances.MULTI_INSTANCE, - 5, - DatabaseEngineDeployment.AURORA, + engine, + instances, + instances == DatabaseInstances.SINGLE_INSTANCE ? 1 : numOfInstances, + deployment, TestEnvironmentFeatures.NETWORK_OUTAGES_ENABLED, TestEnvironmentFeatures.ABORT_CONNECTION_SUPPORTED, - excludeFailover ? null : TestEnvironmentFeatures.FAILOVER_SUPPORTED, - TestEnvironmentFeatures.AWS_CREDENTIALS_ENABLED, - excludeIam ? null : TestEnvironmentFeatures.IAM, + deployment == DatabaseEngineDeployment.DOCKER ? null : TestEnvironmentFeatures.AWS_CREDENTIALS_ENABLED, + deployment == DatabaseEngineDeployment.DOCKER || excludeFailover + ? null + : TestEnvironmentFeatures.FAILOVER_SUPPORTED, + deployment == DatabaseEngineDeployment.DOCKER + || deployment == DatabaseEngineDeployment.RDS_MULTI_AZ_CLUSTER + || excludeIam + ? null + : TestEnvironmentFeatures.IAM, excludeSecretsManager ? null : TestEnvironmentFeatures.SECRETS_MANAGER, excludePerformance ? null : TestEnvironmentFeatures.PERFORMANCE, excludeMysqlDriver ? TestEnvironmentFeatures.SKIP_MYSQL_DRIVER_TESTS : null, excludePgDriver ? TestEnvironmentFeatures.SKIP_PG_DRIVER_TESTS : null, testAutoscalingOnly ? TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY : null))); - } - - if (numInstances == null || numInstances == 2) { - // Tests for IAM, SECRETS_MANAGER and PERFORMANCE are covered by - // cluster configuration above, so it's safe to skip these tests for configurations below. - // The main goal of the following cluster configurations is to check failover. - resultContextList.add( - getEnvironment( - new TestEnvironmentRequest( - DatabaseEngine.PG, - DatabaseInstances.MULTI_INSTANCE, - 2, - DatabaseEngineDeployment.AURORA, - TestEnvironmentFeatures.NETWORK_OUTAGES_ENABLED, - TestEnvironmentFeatures.ABORT_CONNECTION_SUPPORTED, - excludeFailover ? null : TestEnvironmentFeatures.FAILOVER_SUPPORTED, - TestEnvironmentFeatures.AWS_CREDENTIALS_ENABLED, - excludeMysqlDriver ? TestEnvironmentFeatures.SKIP_MYSQL_DRIVER_TESTS : null, - excludePgDriver ? TestEnvironmentFeatures.SKIP_PG_DRIVER_TESTS : null, - testAutoscalingOnly ? TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY : null))); + } } } } diff --git a/tests/integration/host/src/test/java/integration/host/util/AuroraTestUtility.java b/tests/integration/host/src/test/java/integration/host/util/AuroraTestUtility.java index d8bfad5c..01640557 100644 --- a/tests/integration/host/src/test/java/integration/host/util/AuroraTestUtility.java +++ b/tests/integration/host/src/test/java/integration/host/util/AuroraTestUtility.java @@ -16,64 +16,34 @@ package integration.host.util; -import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; - -import integration.host.DatabaseEngine; -import integration.host.TestDatabaseInfo; -import integration.host.TestEnvironment; -import integration.host.TestEnvironmentInfo; -import integration.host.TestInstanceInfo; +import integration.host.*; +import software.amazon.awssdk.auth.credentials.*; +import software.amazon.awssdk.core.waiters.WaiterResponse; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.ec2.Ec2Client; +import software.amazon.awssdk.services.ec2.model.DescribeSecurityGroupsResponse; +import software.amazon.awssdk.services.ec2.model.Ec2Exception; +import software.amazon.awssdk.services.rds.RdsClient; +import software.amazon.awssdk.services.rds.RdsClientBuilder; +import software.amazon.awssdk.services.rds.model.*; +import software.amazon.awssdk.services.rds.waiters.RdsWaiter; + import java.io.BufferedReader; import java.io.InputStreamReader; -import java.net.InetAddress; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.net.UnknownHostException; import java.sql.Connection; import java.sql.DriverManager; -import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; import java.time.Duration; -import java.util.ArrayList; -import java.util.List; -import java.util.Optional; -import java.util.Properties; -import java.util.Random; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; +import java.time.Instant; +import java.util.*; import java.util.concurrent.TimeUnit; -import java.util.logging.Level; import java.util.logging.Logger; import java.util.stream.Collectors; -import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; -import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; -import software.amazon.awssdk.auth.credentials.AwsSessionCredentials; -import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider; -import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; -import software.amazon.awssdk.core.waiters.WaiterResponse; -import software.amazon.awssdk.regions.Region; -import software.amazon.awssdk.services.ec2.Ec2Client; -import software.amazon.awssdk.services.ec2.model.DescribeSecurityGroupsResponse; -import software.amazon.awssdk.services.ec2.model.Ec2Exception; -import software.amazon.awssdk.services.rds.RdsClient; -import software.amazon.awssdk.services.rds.model.CreateDbClusterRequest; -import software.amazon.awssdk.services.rds.model.CreateDbInstanceRequest; -import software.amazon.awssdk.services.rds.model.DBCluster; -import software.amazon.awssdk.services.rds.model.DBClusterMember; -import software.amazon.awssdk.services.rds.model.DBInstance; -import software.amazon.awssdk.services.rds.model.DbClusterNotFoundException; -import software.amazon.awssdk.services.rds.model.DeleteDbClusterResponse; -import software.amazon.awssdk.services.rds.model.DeleteDbInstanceRequest; -import software.amazon.awssdk.services.rds.model.DescribeDbClustersRequest; -import software.amazon.awssdk.services.rds.model.DescribeDbClustersResponse; -import software.amazon.awssdk.services.rds.model.DescribeDbInstancesResponse; -import software.amazon.awssdk.services.rds.model.FailoverDbClusterResponse; -import software.amazon.awssdk.services.rds.model.Filter; -import software.amazon.awssdk.services.rds.model.Tag; -import software.amazon.awssdk.services.rds.waiters.RdsWaiter; /** * Creates and destroys AWS RDS Clusters and Instances. To use this functionality the following environment variables @@ -88,9 +58,13 @@ public class AuroraTestUtility { private String dbPassword = "my_test_password"; private String dbName = "test"; private String dbIdentifier = "test-identifier"; + private DatabaseEngineDeployment dbEngineDeployment; private String dbEngine = "aurora-postgresql"; private String dbEngineVersion = "13.9"; private String dbInstanceClass = "db.r5.large"; + private String storageType = "io1"; + private int allocatedStorage = 100; + private int iops = 1000; private final Region dbRegion; private final String dbSecGroup = "default"; private int numOfInstances = 5; @@ -102,39 +76,17 @@ public class AuroraTestUtility { private static final String DUPLICATE_IP_ERROR_CODE = "InvalidPermission.Duplicate"; - /** - * Initializes an AmazonRDS & AmazonEC2 client. RDS client used to create/destroy clusters & instances. EC2 client - * used to add/remove IP from security group. - */ - public AuroraTestUtility() { - this(Region.US_EAST_1, DefaultCredentialsProvider.create()); - } - - /** - * Initializes an AmazonRDS & AmazonEC2 client. - * - * @param region define AWS Regions, refer to - * https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html - */ - public AuroraTestUtility(Region region) { - this(region, DefaultCredentialsProvider.create()); - } - - /** - * Initializes an AmazonRDS & AmazonEC2 client. - * - * @param region define AWS Regions, refer to - * https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html - */ - public AuroraTestUtility(String region) { - this(getRegionInternal(region), DefaultCredentialsProvider.create()); + public AuroraTestUtility(String region, String endpoint) throws URISyntaxException { + this(getRegionInternal(region), endpoint, DefaultCredentialsProvider.create()); } public AuroraTestUtility( - String region, String awsAccessKeyId, String awsSecretAccessKey, String awsSessionToken) { + String region, String rdsEndpoint, String awsAccessKeyId, String awsSecretAccessKey, String awsSessionToken) + throws URISyntaxException { this( getRegionInternal(region), + rdsEndpoint, StaticCredentialsProvider.create( StringUtils.isNullOrEmpty(awsSessionToken) ? AwsBasicCredentials.create(awsAccessKeyId, awsSecretAccessKey) @@ -145,17 +97,27 @@ public AuroraTestUtility( * Initializes an AmazonRDS & AmazonEC2 client. * * @param region define AWS Regions, refer to - * https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html + * Regions, + * Availability Zones, and Local Zones * @param credentialsProvider Specific AWS credential provider */ - public AuroraTestUtility(Region region, AwsCredentialsProvider credentialsProvider) { + public AuroraTestUtility(Region region, String rdsEndpoint, AwsCredentialsProvider credentialsProvider) + throws URISyntaxException { dbRegion = region; + final RdsClientBuilder rdsClientBuilder = RdsClient.builder() + .region(dbRegion) + .credentialsProvider(credentialsProvider); - rdsClient = - RdsClient.builder().region(dbRegion).credentialsProvider(credentialsProvider).build(); + if (!StringUtils.isNullOrEmpty(rdsEndpoint)) { + rdsClientBuilder.endpointOverride(new URI(rdsEndpoint)); + } - ec2Client = - Ec2Client.builder().region(dbRegion).credentialsProvider(credentialsProvider).build(); + rdsClient = rdsClientBuilder.build(); + ec2Client = Ec2Client.builder() + .region(dbRegion) + .credentialsProvider(credentialsProvider) + .build(); } protected static Region getRegionInternal(String rdsRegion) { @@ -188,22 +150,32 @@ public String createCluster( String password, String dbName, String identifier, + DatabaseEngineDeployment deployment, String engine, String instanceClass, String version, int numOfInstances, ArrayList instances) throws InterruptedException { - dbUsername = username; - dbPassword = password; + this.dbUsername = username; + this.dbPassword = password; this.dbName = dbName; - dbIdentifier = identifier; - dbEngine = engine; - dbInstanceClass = instanceClass; - dbEngineVersion = version; + this.dbIdentifier = identifier; + this.dbEngineDeployment = deployment; + this.dbEngine = engine; + this.dbInstanceClass = instanceClass; + this.dbEngineVersion = version; this.numOfInstances = numOfInstances; this.instances = instances; - return createCluster(); + + switch (this.dbEngineDeployment) { + case AURORA: + return createAuroraCluster(); + case RDS_MULTI_AZ_CLUSTER: + return createMultiAzCluster(); + default: + throw new UnsupportedOperationException(this.dbEngineDeployment.toString()); + } } /** @@ -212,7 +184,7 @@ public String createCluster( * @return An endpoint for one of the instances * @throws InterruptedException when clusters have not started after 30 minutes */ - public String createCluster() throws InterruptedException { + public String createAuroraCluster() throws InterruptedException { // Create Cluster final Tag testRunnerTag = Tag.builder().key("env").value("test-runner").build(); @@ -281,6 +253,161 @@ public String createCluster() throws InterruptedException { return clusterDomainPrefix; } + /** + * Creates RDS Cluster/Instances and waits until they are up, and proper IP whitelisting for databases. + * + * @return An endpoint for one of the instances + * @throws InterruptedException when clusters have not started after 30 minutes + */ + public String createMultiAzCluster() throws InterruptedException { + // Create Cluster + final Tag testRunnerTag = Tag.builder().key("env").value("test-runner").build(); + CreateDbClusterRequest.Builder clusterBuilder = + CreateDbClusterRequest.builder() + .dbClusterIdentifier(dbIdentifier) + .publiclyAccessible(true) + .databaseName(dbName) + .masterUsername(dbUsername) + .masterUserPassword(dbPassword) + .sourceRegion(dbRegion.id()) + .engine(dbEngine) + .engineVersion(dbEngineVersion) + .enablePerformanceInsights(false) + .backupRetentionPeriod(1) + .storageEncrypted(true) + .tags(testRunnerTag); + + clusterBuilder = + clusterBuilder.allocatedStorage(allocatedStorage) + .dbClusterInstanceClass(dbInstanceClass) + .storageType(storageType) + .iops(iops); + + rdsClient.createDBCluster(clusterBuilder.build()); + + // For multi-AZ deployments, the cluster instances are created automatically. + + // Wait for all instances to be up + final RdsWaiter waiter = rdsClient.waiter(); + WaiterResponse waiterResponse = + waiter.waitUntilDBInstanceAvailable( + (requestBuilder) -> + requestBuilder.filters( + Filter.builder().name("db-cluster-id").values(dbIdentifier).build()), + (configurationBuilder) -> configurationBuilder.waitTimeout(Duration.ofMinutes(30))); + + if (waiterResponse.matched().exception().isPresent()) { + deleteCluster(); + throw new InterruptedException( + "Unable to start AWS RDS Cluster & Instances after waiting for 30 minutes"); + } + + final DescribeDbInstancesResponse dbInstancesResult = + rdsClient.describeDBInstances( + (builder) -> + builder.filters( + Filter.builder().name("db-cluster-id").values(dbIdentifier).build())); + final String endpoint = dbInstancesResult.dbInstances().get(0).endpoint().address(); + final String clusterDomainPrefix = endpoint.substring(endpoint.indexOf('.') + 1); + + for (DBInstance instance : dbInstancesResult.dbInstances()) { + this.instances.add( + new TestInstanceInfo( + instance.dbInstanceIdentifier(), + instance.endpoint().address(), + instance.endpoint().port())); + } + + return clusterDomainPrefix; + } + + /** + * Creates an RDS instance under the current cluster and waits until it is up. + * + * @param instanceId the desired instance ID of the new instance + * @return the instance info of the new instance + * @throws InterruptedException if the new instance is not available within 5 minutes + */ + public TestInstanceInfo createInstance(String instanceId) throws InterruptedException { + final Tag testRunnerTag = Tag.builder().key("env").value("test-runner").build(); + final TestEnvironmentInfo info = TestEnvironment.getCurrent().getInfo(); + + rdsClient.createDBInstance( + CreateDbInstanceRequest.builder() + .dbClusterIdentifier(info.getAuroraClusterName()) + .dbInstanceIdentifier(instanceId) + .dbInstanceClass(dbInstanceClass) + .engine(info.getDatabaseEngine()) + .engineVersion(info.getDatabaseEngineVersion()) + .publiclyAccessible(true) + .tags(testRunnerTag) + .build()); + + // Wait for the instance to become available + final RdsWaiter waiter = rdsClient.waiter(); + WaiterResponse waiterResponse = + waiter.waitUntilDBInstanceAvailable( + (requestBuilder) -> + requestBuilder.filters( + Filter.builder().name("db-instance-id").values(instanceId).build()), + (configurationBuilder) -> configurationBuilder.waitTimeout(Duration.ofMinutes(15))); + + if (waiterResponse.matched().exception().isPresent()) { + throw new InterruptedException( + "Instance creation timeout for " + instanceId + + ". The instance was not available within 5 minutes"); + } + + final DescribeDbInstancesResponse dbInstancesResult = + rdsClient.describeDBInstances( + (builder) -> + builder.filters( + Filter.builder().name("db-instance-id").values(instanceId).build())); + + if (dbInstancesResult.dbInstances().size() != 1) { + throw new RuntimeException( + "The describeDBInstances request for newly created instance " + instanceId + + " returned an unexpected number of instances: " + + dbInstancesResult.dbInstances().size()); + } + + DBInstance instance = dbInstancesResult.dbInstances().get(0); + TestInstanceInfo instanceInfo = new TestInstanceInfo( + instance.dbInstanceIdentifier(), + instance.endpoint().address(), + instance.endpoint().port()); + this.instances.add(instanceInfo); + return instanceInfo; + } + + /** + * Deletes an RDS instance. + * + * @param instanceToDelete the info for the instance to delete + * @throws InterruptedException if the instance has not been deleted within 5 minutes + */ + public void deleteInstance(TestInstanceInfo instanceToDelete) throws InterruptedException { + rdsClient.deleteDBInstance( + DeleteDbInstanceRequest.builder() + .dbInstanceIdentifier(instanceToDelete.getInstanceId()) + .skipFinalSnapshot(true) + .build()); + this.instances.remove(instanceToDelete); + + final RdsWaiter waiter = rdsClient.waiter(); + WaiterResponse waiterResponse = waiter.waitUntilDBInstanceDeleted( + (requestBuilder) -> requestBuilder.filters( + Filter.builder().name("db-instance-id").values(instanceToDelete.getInstanceId()) + .build()), + (configurationBuilder) -> configurationBuilder.waitTimeout(Duration.ofMinutes(15))); + + if (waiterResponse.matched().exception().isPresent()) { + throw new InterruptedException( + "Instance deletion timeout for " + instanceToDelete.getInstanceId() + + ". The instance was not deleted within 5 minutes"); + } + } + /** * Gets public IP. * @@ -316,10 +443,14 @@ public void ec2AuthorizeIP(String ipAddress) { (builder) -> builder .groupName(dbSecGroup) - .cidrIp(ipAddress + "/32") - .ipProtocol("-1") // All protocols - .fromPort(0) // For all ports - .toPort(65535)); + .ipPermissions((permissionBuilder) -> + permissionBuilder.ipRanges((ipRangeBuilder) -> + ipRangeBuilder + .cidrIp(ipAddress + "/32") + .description("Test run at " + Instant.now())) + .ipProtocol("-1") // All protocols + .fromPort(0) // For all ports + .toPort(65535))); } catch (Ec2Exception exception) { if (!DUPLICATE_IP_ERROR_CODE.equalsIgnoreCase(exception.awsErrorDetails().errorCode())) { throw exception; @@ -377,6 +508,23 @@ public void deleteCluster(String identifier) { * Destroys all instances and clusters. Removes IP from EC2 whitelist. */ public void deleteCluster() { + + switch (this.dbEngineDeployment) { + case AURORA: + this.deleteAuroraCluster(); + break; + case RDS_MULTI_AZ_CLUSTER: + this.deleteMultiAzCluster(); + break; + default: + throw new UnsupportedOperationException(this.dbEngineDeployment.toString()); + } + } + + /** + * Destroys all instances and clusters. Removes IP from EC2 whitelist. + */ + public void deleteAuroraCluster() { // Tear down instances for (int i = 1; i <= numOfInstances; i++) { try { @@ -392,8 +540,46 @@ public void deleteCluster() { } // Tear down cluster - rdsClient.deleteDBCluster( - (builder -> builder.skipFinalSnapshot(true).dbClusterIdentifier(dbIdentifier))); + int remainingAttempts = 5; + while (--remainingAttempts > 0) { + try { + DeleteDbClusterResponse response = rdsClient.deleteDBCluster( + (builder -> builder.skipFinalSnapshot(true).dbClusterIdentifier(dbIdentifier))); + if (response.sdkHttpResponse().isSuccessful()) { + break; + } + TimeUnit.SECONDS.sleep(30); + + } catch (DbClusterNotFoundException ex) { + // ignore + } catch (Exception ex) { + LOGGER.warning("Error deleting db cluster " + dbIdentifier + ": " + ex); + } + } + } + + /** + * Destroys all instances and clusters. + */ + public void deleteMultiAzCluster() { + // deleteDBinstance requests are not necessary to delete a multi-az cluster. + // Tear down cluster + int remainingAttempts = 5; + while (--remainingAttempts > 0) { + try { + DeleteDbClusterResponse response = rdsClient.deleteDBCluster( + (builder -> builder.skipFinalSnapshot(true).dbClusterIdentifier(dbIdentifier))); + if (response.sdkHttpResponse().isSuccessful()) { + break; + } + TimeUnit.SECONDS.sleep(30); + + } catch (DbClusterNotFoundException ex) { + // ignore + } catch (Exception ex) { + LOGGER.warning("Error deleting db cluster " + dbIdentifier + ": " + ex); + } + } } public boolean doesClusterExist(final String clusterId) { @@ -421,8 +607,10 @@ public DBCluster getClusterInfo(final String clusterId) { public DatabaseEngine getClusterEngine(final DBCluster cluster) { switch (cluster.engine()) { case "aurora-postgresql": + case "postgres": return DatabaseEngine.PG; case "aurora-mysql": + case "mysql": return DatabaseEngine.MYSQL; default: throw new UnsupportedOperationException(cluster.engine()); @@ -446,6 +634,70 @@ public List getClusterInstanceIds(final String clusterId) { return result; } + public void waitUntilClusterHasRightState(String clusterId) throws InterruptedException { + waitUntilClusterHasRightState(clusterId, "available"); + } + + public void waitUntilClusterHasRightState(String clusterId, String allowedStatus) throws InterruptedException { + String status = getDBCluster(clusterId).status(); + LOGGER.finest("Cluster status: " + status + ", waiting for status: " + allowedStatus); + final Set allowedStatuses = new HashSet<>(Arrays.asList(allowedStatus.toLowerCase())); + final long waitTillNanoTime = System.nanoTime() + TimeUnit.MINUTES.toNanos(10); + while (!allowedStatuses.contains(status.toLowerCase()) && waitTillNanoTime > System.nanoTime()) { + TimeUnit.MILLISECONDS.sleep(1000); + String tmpStatus = getDBCluster(clusterId).status(); + if (!tmpStatus.equalsIgnoreCase(status)) { + LOGGER.finest("Cluster status (waiting): " + tmpStatus); + } + status = tmpStatus; + } + LOGGER.finest("Cluster status (after wait): " + status); + } + + public DBCluster getDBCluster(String clusterId) { + final DescribeDbClustersResponse dbClustersResult = + rdsClient.describeDBClusters((builder) -> builder.dbClusterIdentifier(clusterId)); + final List dbClusterList = dbClustersResult.dbClusters(); + return dbClusterList.get(0); + } + + public Boolean isDBInstanceWriter(String instanceId) { + return isDBInstanceWriter( + TestEnvironment.getCurrent().getInfo().getAuroraClusterName(), instanceId); + } + + public Boolean isDBInstanceWriter(String clusterId, String instanceId) { + return getMatchedDBClusterMember(clusterId, instanceId).isClusterWriter(); + } + + public DBClusterMember getMatchedDBClusterMember(String clusterId, String instanceId) { + final List matchedMemberList = + getDBClusterMemberList(clusterId).stream() + .filter(dbClusterMember -> dbClusterMember.dbInstanceIdentifier().equals(instanceId)) + .collect(Collectors.toList()); + if (matchedMemberList.isEmpty()) { + throw new RuntimeException( + "Cannot find cluster member whose db instance identifier is " + instanceId); + } + return matchedMemberList.get(0); + } + + public List getDBClusterMemberList(String clusterId) { + final DBCluster dbCluster = getDBCluster(clusterId); + return dbCluster.dbClusterMembers(); + } + + public static void registerDriver(DatabaseEngine engine) { + try { + Class.forName(DriverHelper.getDriverClassname(engine)); + } catch (ClassNotFoundException e) { + throw new RuntimeException( + "Driver not found: " + + DriverHelper.getDriverClassname(engine), + e); + } + } + public void addAuroraAwsIamUser( DatabaseEngine databaseEngine, String connectionUrl, @@ -454,9 +706,10 @@ public void addAuroraAwsIamUser( String dbUser, String databaseName) throws SQLException { + AuroraTestUtility.registerDriver(databaseEngine); try (final Connection conn = DriverManager.getConnection(connectionUrl, userName, password); - final Statement stmt = conn.createStatement()) { + final Statement stmt = conn.createStatement()) { switch (databaseEngine) { case MYSQL: @@ -476,4 +729,36 @@ public void addAuroraAwsIamUser( } } } + + public void createRdsExtension( + DatabaseEngine databaseEngine, + String connectionUrl, + String userName, + String password) + throws SQLException { + AuroraTestUtility.registerDriver(databaseEngine); + + try (final Connection conn = DriverManager.getConnection(connectionUrl, userName, password); + final Statement stmt = conn.createStatement()) { + stmt.execute("CREATE EXTENSION IF NOT EXISTS rds_tools"); + } + } + + public List getEngineVersions(String engine) { + final List res = new ArrayList<>(); + final DescribeDbEngineVersionsResponse versions = rdsClient.describeDBEngineVersions( + DescribeDbEngineVersionsRequest.builder().engine(engine).build() + ); + for (DBEngineVersion version : versions.dbEngineVersions()) { + res.add(version.engineVersion()); + } + return res; + } + + public String getLatestVersion(String engine) { + return getEngineVersions(engine).stream() + .sorted(Comparator.reverseOrder()) + .findFirst() + .orElse(null); + } }