diff --git a/.github/workflows/integration_tests.yml b/.github/workflows/integration_tests.yml index 40139a40..c0ddaadd 100644 --- a/.github/workflows/integration_tests.yml +++ b/.github/workflows/integration_tests.yml @@ -5,7 +5,6 @@ on: push: branches: - main - - refactor/monitor-service paths-ignore: - "**/*.md" - "**/*.jpg" diff --git a/common/lib/connection_plugin_chain_builder.ts b/common/lib/connection_plugin_chain_builder.ts index 02e7544b..1cad14c7 100644 --- a/common/lib/connection_plugin_chain_builder.ts +++ b/common/lib/connection_plugin_chain_builder.ts @@ -43,6 +43,7 @@ import { CustomEndpointPluginFactory } from "./plugins/custom_endpoint/custom_en import { ConfigurationProfile } from "./profile/configuration_profile"; import { HostMonitoring2PluginFactory } from "./plugins/efm2/host_monitoring2_plugin_factory"; import { BlueGreenPluginFactory } from "./plugins/bluegreen/blue_green_plugin_factory"; +import { GlobalDbFailoverPluginFactory } from "./plugins/gdb_failover/global_db_failover_plugin_factory"; import { FullServicesContainer } from "./utils/full_services_container"; /* @@ -66,6 +67,7 @@ export class ConnectionPluginChainBuilder { ["readWriteSplitting", { factory: ReadWriteSplittingPluginFactory, weight: 600 }], ["failover", { factory: FailoverPluginFactory, weight: 700 }], ["failover2", { factory: Failover2PluginFactory, weight: 710 }], + ["gdbFailover", { factory: GlobalDbFailoverPluginFactory, weight: 720 }], ["efm", { factory: HostMonitoringPluginFactory, weight: 800 }], ["efm2", { factory: HostMonitoring2PluginFactory, weight: 810 }], ["fastestResponseStrategy", { factory: FastestResponseStrategyPluginFactory, weight: 900 }], @@ -87,6 +89,7 @@ export class ConnectionPluginChainBuilder { [ReadWriteSplittingPluginFactory, 600], [FailoverPluginFactory, 700], [Failover2PluginFactory, 710], + [GlobalDbFailoverPluginFactory, 720], [HostMonitoringPluginFactory, 800], [HostMonitoring2PluginFactory, 810], [LimitlessConnectionPluginFactory, 950], diff --git a/common/lib/database_dialect/database_dialect_codes.ts b/common/lib/database_dialect/database_dialect_codes.ts index 4815f48e..4351a6cb 100644 --- a/common/lib/database_dialect/database_dialect_codes.ts +++ b/common/lib/database_dialect/database_dialect_codes.ts @@ -15,11 +15,13 @@ */ export class DatabaseDialectCodes { + static readonly GLOBAL_AURORA_MYSQL: string = "global-aurora-mysql"; static readonly AURORA_MYSQL: string = "aurora-mysql"; static readonly RDS_MYSQL: string = "rds-mysql"; static readonly MYSQL: string = "mysql"; // https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html static readonly RDS_MULTI_AZ_MYSQL: string = "rds-multi-az-mysql"; + static readonly GLOBAL_AURORA_PG: string = "global-aurora-pg"; static readonly AURORA_PG: string = "aurora-pg"; static readonly RDS_PG: string = "rds-pg"; // https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html diff --git a/common/lib/database_dialect/database_dialect_manager.ts b/common/lib/database_dialect/database_dialect_manager.ts index e8127f2a..b462e60b 100644 --- a/common/lib/database_dialect/database_dialect_manager.ts +++ b/common/lib/database_dialect/database_dialect_manager.ts @@ -95,6 +95,14 @@ export class DatabaseDialectManager implements DatabaseDialectProvider { if (this.dbType === DatabaseType.MYSQL) { const type = this.rdsHelper.identifyRdsType(host); + if (type == RdsUrlType.RDS_GLOBAL_WRITER_CLUSTER) { + this.canUpdate = false; + this.dialectCode = DatabaseDialectCodes.GLOBAL_AURORA_MYSQL; + this.dialect = this.knownDialectsByCode.get(DatabaseDialectCodes.GLOBAL_AURORA_MYSQL); + this.logCurrentDialect(); + return this.dialect; + } + if (type.isRdsCluster) { this.canUpdate = true; this.dialectCode = DatabaseDialectCodes.AURORA_MYSQL; @@ -128,6 +136,14 @@ export class DatabaseDialectManager implements DatabaseDialectProvider { return this.dialect; } + if (type == RdsUrlType.RDS_GLOBAL_WRITER_CLUSTER) { + this.canUpdate = false; + this.dialectCode = DatabaseDialectCodes.GLOBAL_AURORA_PG; + this.dialect = this.knownDialectsByCode.get(DatabaseDialectCodes.GLOBAL_AURORA_PG); + this.logCurrentDialect(); + return this.dialect; + } + if (type.isRdsCluster) { this.canUpdate = true; this.dialectCode = DatabaseDialectCodes.AURORA_PG; diff --git a/common/lib/host_list_provider/aurora_topology_utils.ts b/common/lib/host_list_provider/aurora_topology_utils.ts new file mode 100644 index 00000000..e86480d8 --- /dev/null +++ b/common/lib/host_list_provider/aurora_topology_utils.ts @@ -0,0 +1,68 @@ +/* + Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"). + You may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +import { TopologyQueryResult, TopologyUtils } from "./topology_utils"; +import { ClientWrapper } from "../client_wrapper"; +import { DatabaseDialect } from "../database_dialect/database_dialect"; +import { HostInfo } from "../host_info"; +import { isDialectTopologyAware } from "../utils/utils"; +import { Messages } from "../utils/messages"; + +/** + * TopologyUtils implementation for Aurora clusters using a single HostInfo template. + */ +export class AuroraTopologyUtils extends TopologyUtils { + async queryForTopology( + targetClient: ClientWrapper, + dialect: DatabaseDialect, + initialHost: HostInfo, + clusterInstanceTemplate: HostInfo + ): Promise { + if (!isDialectTopologyAware(dialect)) { + throw new TypeError(Messages.get("RdsHostListProvider.incorrectDialect")); + } + + return await dialect + .queryForTopology(targetClient) + .then((res: TopologyQueryResult[]) => this.verifyWriter(this.createHosts(res, initialHost, clusterInstanceTemplate))); + } + + public createHosts(topologyQueryResults: TopologyQueryResult[], initialHost: HostInfo, clusterInstanceTemplate: HostInfo): HostInfo[] { + const hostsMap = new Map(); + topologyQueryResults.forEach((row) => { + const lastUpdateTime = row.lastUpdateTime ?? Date.now(); + + const host = this.createHost( + row.id, + row.host, + row.isWriter, + row.weight, + lastUpdateTime, + initialHost, + clusterInstanceTemplate, + row.endpoint, + row.port + ); + + const existing = hostsMap.get(host.host); + if (!existing || existing.lastUpdateTime < host.lastUpdateTime) { + hostsMap.set(host.host, host); + } + }); + + return Array.from(hostsMap.values()); + } +} diff --git a/common/lib/host_list_provider/connection_string_host_list_provider.ts b/common/lib/host_list_provider/connection_string_host_list_provider.ts index 646f679d..80187ee0 100644 --- a/common/lib/host_list_provider/connection_string_host_list_provider.ts +++ b/common/lib/host_list_provider/connection_string_host_list_provider.ts @@ -112,4 +112,8 @@ export class ConnectionStringHostListProvider implements StaticHostListProvider getClusterId(): string { throw new AwsWrapperError("ConnectionStringHostListProvider does not support getClusterId."); } + + forceMonitoringRefresh(shouldVerifyWriter: boolean, timeoutMs: number): Promise { + throw new AwsWrapperError("ConnectionStringHostListProvider does not support forceMonitoringRefresh."); + } } diff --git a/common/lib/host_list_provider/global_aurora_host_list_provider.ts b/common/lib/host_list_provider/global_aurora_host_list_provider.ts new file mode 100644 index 00000000..80b2153e --- /dev/null +++ b/common/lib/host_list_provider/global_aurora_host_list_provider.ts @@ -0,0 +1,67 @@ +/* + Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"). + You may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +import { RdsHostListProvider } from "./rds_host_list_provider"; +import { FullServicesContainer } from "../utils/full_services_container"; +import { HostInfo } from "../host_info"; +import { WrapperProperties } from "../wrapper_property"; +import { ClusterTopologyMonitor, ClusterTopologyMonitorImpl } from "./monitoring/cluster_topology_monitor"; +import { GlobalAuroraTopologyMonitor } from "./monitoring/global_aurora_topology_monitor"; +import { MonitorInitializer } from "../utils/monitoring/monitor"; +import { ClientWrapper } from "../client_wrapper"; +import { DatabaseDialect } from "../database_dialect/database_dialect"; +import { parseInstanceTemplates } from "../utils/utils"; + +export class GlobalAuroraHostListProvider extends RdsHostListProvider { + protected instanceTemplatesByRegion: Map; + protected override initSettings(): void { + super.initSettings(); + + const instanceTemplates = WrapperProperties.GLOBAL_CLUSTER_INSTANCE_HOST_PATTERNS.get(this.properties); + this.instanceTemplatesByRegion = parseInstanceTemplates( + instanceTemplates, + (hostPattern: string) => this.validateHostPatternSetting(hostPattern), + () => this.hostListProviderService.getHostInfoBuilder() + ); + } + + protected override async getOrCreateMonitor(): Promise { + const initializer: MonitorInitializer = { + createMonitor: (servicesContainer: FullServicesContainer): ClusterTopologyMonitor => { + return new GlobalAuroraTopologyMonitor( + servicesContainer, + this.topologyUtils, + this.clusterId, + this.initialHost, + this.properties, + this.clusterInstanceTemplate, + this.refreshRateNano, + this.highRefreshRateNano, + this.instanceTemplatesByRegion + ); + } + }; + + return await this.servicesContainers + .getMonitorService() + .runIfAbsent(ClusterTopologyMonitorImpl, this.clusterId, this.servicesContainers, this.properties, initializer); + } + + override async getCurrentTopology(targetClient: ClientWrapper, dialect: DatabaseDialect): Promise { + this.init(); + return await this.topologyUtils.queryForTopology(targetClient, dialect, this.initialHost, this.instanceTemplatesByRegion); + } +} diff --git a/common/lib/host_list_provider/global_topology_utils.ts b/common/lib/host_list_provider/global_topology_utils.ts index e08c54b3..69caa015 100644 --- a/common/lib/host_list_provider/global_topology_utils.ts +++ b/common/lib/host_list_provider/global_topology_utils.ts @@ -22,24 +22,19 @@ import { isDialectTopologyAware } from "../utils/utils"; import { Messages } from "../utils/messages"; import { AwsWrapperError } from "../utils/errors"; -export class GlobalTopologyUtils extends TopologyUtils { - async queryForTopology( - targetClient: ClientWrapper, - dialect: DatabaseDialect, - initialHost: HostInfo, - clusterInstanceTemplate: HostInfo - ): Promise { - throw new AwsWrapperError("Not implemented"); - } +export interface GdbTopologyUtils { + getRegion(instanceId: string, targetClient: ClientWrapper, dialect: DatabaseDialect): Promise; +} - async queryForTopologyWithRegion( +export class GlobalTopologyUtils extends TopologyUtils implements GdbTopologyUtils { + async queryForTopology( targetClient: ClientWrapper, dialect: DatabaseDialect, initialHost: HostInfo, instanceTemplateByRegion: Map ): Promise { if (!isDialectTopologyAware(dialect)) { - throw new TypeError(Messages.get("RdsHostListProvider.incorrectDialect")); + throw new AwsWrapperError(Messages.get("RdsHostListProvider.incorrectDialect")); } return await dialect @@ -47,6 +42,16 @@ export class GlobalTopologyUtils extends TopologyUtils { .then((res: TopologyQueryResult[]) => this.verifyWriter(this.createHostsWithTemplateMap(res, initialHost, instanceTemplateByRegion))); } + async getRegion(instanceId: string, targetClient: ClientWrapper, dialect: DatabaseDialect): Promise { + if (!isDialectTopologyAware(dialect)) { + throw new AwsWrapperError(Messages.get("RdsHostListProvider.incorrectDialect")); + } + + const results = await dialect.queryForTopology(targetClient); + const match = results.find((row) => row.id === instanceId); + return match?.awsRegion ?? null; + } + private createHostsWithTemplateMap( topologyQueryResults: TopologyQueryResult[], initialHost: HostInfo, diff --git a/common/lib/host_list_provider/monitoring/global_aurora_topology_monitor.ts b/common/lib/host_list_provider/monitoring/global_aurora_topology_monitor.ts new file mode 100644 index 00000000..9582c522 --- /dev/null +++ b/common/lib/host_list_provider/monitoring/global_aurora_topology_monitor.ts @@ -0,0 +1,69 @@ +/* + Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"). + You may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +import { ClusterTopologyMonitorImpl } from "./cluster_topology_monitor"; +import { GdbTopologyUtils, GlobalTopologyUtils } from "../global_topology_utils"; +import { FullServicesContainer } from "../../utils/full_services_container"; +import { HostInfo } from "../../host_info"; +import { ClientWrapper } from "../../client_wrapper"; +import { AwsWrapperError } from "../../utils/errors"; +import { Messages } from "../../utils/messages"; +import { TopologyUtils } from "../topology_utils"; + +function isGdbTopologyUtils(utils: TopologyUtils): utils is TopologyUtils & GdbTopologyUtils { + return "getRegion" in utils && typeof (utils as unknown as GdbTopologyUtils).getRegion === "function"; +} + +export class GlobalAuroraTopologyMonitor extends ClusterTopologyMonitorImpl { + protected readonly instanceTemplatesByRegion: Map; + declare public readonly topologyUtils: TopologyUtils; + + constructor( + servicesContainer: FullServicesContainer, + topologyUtils: TopologyUtils, + clusterId: string, + initialHostInfo: HostInfo, + properties: Map, + instanceTemplate: HostInfo, + refreshRateNano: number, + highRefreshRateNano: number, + instanceTemplatesByRegion: Map + ) { + super(servicesContainer, topologyUtils, clusterId, initialHostInfo, properties, instanceTemplate, refreshRateNano, highRefreshRateNano); + + this.instanceTemplatesByRegion = instanceTemplatesByRegion; + this.topologyUtils = topologyUtils; + } + + protected override async getInstanceTemplate(hostId: string, targetClient: ClientWrapper): Promise { + if (!isGdbTopologyUtils(this.topologyUtils)) { + throw new AwsWrapperError(Messages.get("GlobalAuroraTopologyMonitor.invalidTopologyUtils")); + } + + const dialect = this.hostListProviderService.getDialect(); + const region = await this.topologyUtils.getRegion(hostId, targetClient, dialect); + + if (region) { + const instanceTemplate = this.instanceTemplatesByRegion.get(region); + if (!instanceTemplate) { + throw new AwsWrapperError(Messages.get("GlobalAuroraTopologyMonitor.cannotFindRegionTemplate", region)); + } + return instanceTemplate; + } + + return this.instanceTemplate; + } +} diff --git a/common/lib/host_list_provider/rds_host_list_provider.ts b/common/lib/host_list_provider/rds_host_list_provider.ts index 168d4531..1af20677 100644 --- a/common/lib/host_list_provider/rds_host_list_provider.ts +++ b/common/lib/host_list_provider/rds_host_list_provider.ts @@ -257,14 +257,9 @@ export class RdsHostListProvider implements DynamicHostListProvider { return topology == null ? null : topology.hosts; } - static clearAll(): void { - // No-op - // TODO: remove if still not used after full service container refactoring - } - clear(): void { if (this.clusterId) { - CoreServicesContainer.getInstance().getStorageService().remove(Topology, this.clusterId); + this.servicesContainers.getStorageService().remove(Topology, this.clusterId); } } diff --git a/common/lib/host_list_provider/topology_utils.ts b/common/lib/host_list_provider/topology_utils.ts index 91c8be67..500f3046 100644 --- a/common/lib/host_list_provider/topology_utils.ts +++ b/common/lib/host_list_provider/topology_utils.ts @@ -17,7 +17,6 @@ import { ClientWrapper } from "../client_wrapper"; import { DatabaseDialect } from "../database_dialect/database_dialect"; import { HostInfo } from "../host_info"; -import { isDialectTopologyAware } from "../utils/utils"; import { Messages } from "../utils/messages"; import { HostRole } from "../host_role"; import { HostAvailability } from "../host_availability/host_availability"; @@ -25,6 +24,11 @@ import { HostInfoBuilder } from "../host_info_builder"; import { AwsWrapperError } from "../utils/errors"; import { TopologyAwareDatabaseDialect } from "../database_dialect/topology_aware_database_dialect"; +/** + * Type representing an instance template - either a single HostInfo or a Map of region to HostInfo. + */ +export type InstanceTemplate = HostInfo | Map; + /** * Options for creating a TopologyQueryResult instance. */ @@ -66,11 +70,11 @@ export class TopologyQueryResult { } /** - * A class defining utility methods that can be used to retrieve and process a variety of database topology + * An abstract class defining utility methods that can be used to retrieve and process a variety of database topology * information. This class can be overridden to define logic specific to various database engine deployments * (e.g. Aurora, Multi-AZ, Global Aurora etc.). */ -export class TopologyUtils { +export abstract class TopologyUtils { protected readonly dialect: TopologyAwareDatabaseDialect; protected readonly hostInfoBuilder: HostInfoBuilder; @@ -84,25 +88,17 @@ export class TopologyUtils { * * @param targetClient the client wrapper to use to query the database. * @param dialect the database dialect to use for the topology query. - * @param clusterInstanceTemplate the template {@link HostInfo} to use when constructing new {@link HostInfo} objects from - * the data returned by the topology query. + * @param initialHost the initial host info. + * @param instanceTemplate the template for constructing host info objects. * @returns a list of {@link HostInfo} objects representing the results of the topology query. * @throws TypeError if the dialect is not topology-aware. */ - async queryForTopology( + abstract queryForTopology( targetClient: ClientWrapper, dialect: DatabaseDialect, initialHost: HostInfo, - clusterInstanceTemplate: HostInfo - ): Promise { - if (!isDialectTopologyAware(dialect)) { - throw new TypeError(Messages.get("RdsHostListProvider.incorrectDialect")); - } - - return await dialect - .queryForTopology(targetClient) - .then((res: TopologyQueryResult[]) => this.verifyWriter(this.createHosts(res, initialHost, clusterInstanceTemplate))); - } + instanceTemplate: InstanceTemplate + ): Promise; public createHost( instanceId: string | undefined, @@ -123,7 +119,6 @@ export class TopologyUtils { } const finalEndpoint = endpoint ?? this.getHostEndpoint(hostname, instanceTemplate) ?? ""; - const finalPort = port ?? (instanceTemplate?.isPortSpecified() ? instanceTemplate?.port : initialHost?.port); const host: HostInfo = this.hostInfoBuilder @@ -139,46 +134,8 @@ export class TopologyUtils { return host; } - /** - * Creates {@link HostInfo} objects from the given topology query results. - * - * @param topologyQueryResults the result set returned by the topology query describing the cluster topology - * @param initialHost the {@link HostInfo} describing the initial connection. - * @param clusterInstanceTemplate the template used to construct the new {@link HostInfo} objects. - * @returns a list of {@link HostInfo} objects representing the topology. - */ - public createHosts(topologyQueryResults: TopologyQueryResult[], initialHost: HostInfo, clusterInstanceTemplate: HostInfo): HostInfo[] { - const hostsMap = new Map(); - topologyQueryResults.forEach((row) => { - const lastUpdateTime = row.lastUpdateTime ?? Date.now(); - - const host = this.createHost( - row.id, - row.host, - row.isWriter, - row.weight, - lastUpdateTime, - initialHost, - clusterInstanceTemplate, - row.endpoint, - row.port - ); - - const existing = hostsMap.get(host.host); - if (!existing || existing.lastUpdateTime < host.lastUpdateTime) { - hostsMap.set(host.host, host); - } - }); - - return Array.from(hostsMap.values()); - } - /** * Gets the host endpoint by replacing the placeholder in the cluster instance template. - * - * @param hostName the host name to use in the endpoint. - * @param clusterInstanceTemplate the template containing the endpoint pattern. - * @returns the constructed endpoint, or null if the template is invalid. */ protected getHostEndpoint(hostName: string, clusterInstanceTemplate: HostInfo): string | null { if (!clusterInstanceTemplate || !clusterInstanceTemplate.host) { @@ -191,9 +148,6 @@ export class TopologyUtils { /** * Verifies that the topology contains exactly one writer instance. * If multiple writers are found, selects the most recently updated one. - * - * @param allHosts the list of all hosts from the topology query. - * @returns the verified list of hosts with exactly one writer, or null if no writer is found. */ protected async verifyWriter(allHosts: HostInfo[]): Promise { if (allHosts === null || allHosts.length === 0) { diff --git a/common/lib/host_selector.ts b/common/lib/host_selector.ts index ac995cf3..e32fbb93 100644 --- a/common/lib/host_selector.ts +++ b/common/lib/host_selector.ts @@ -18,5 +18,5 @@ import { HostInfo } from "./host_info"; import { HostRole } from "./host_role"; export interface HostSelector { - getHost(hosts: HostInfo[], role: HostRole, props?: Map): HostInfo; + getHost(hosts: HostInfo[], role: HostRole | null, props?: Map): HostInfo; } diff --git a/common/lib/plugin_service.ts b/common/lib/plugin_service.ts index 3782a1ea..65a6d2dc 100644 --- a/common/lib/plugin_service.ts +++ b/common/lib/plugin_service.ts @@ -444,7 +444,6 @@ export class PluginServiceImpl implements PluginService, HostListProviderService ]; if (hostsToChange.length === 0) { - logger.debug(Messages.get("PluginService.hostsChangeListEmpty")); return; } diff --git a/common/lib/plugins/failover2/failover2_plugin.ts b/common/lib/plugins/failover2/failover2_plugin.ts index f4736330..953e6586 100644 --- a/common/lib/plugins/failover2/failover2_plugin.ts +++ b/common/lib/plugins/failover2/failover2_plugin.ts @@ -38,46 +38,47 @@ import { ClientWrapper } from "../../client_wrapper"; import { HostAvailability } from "../../host_availability/host_availability"; import { TelemetryTraceLevel } from "../../utils/telemetry/telemetry_trace_level"; import { HostRole } from "../../host_role"; -import { CanReleaseResources } from "../../can_release_resources"; import { ReaderFailoverResult } from "../failover/reader_failover_result"; -import { BlockingHostListProvider, HostListProvider } from "../../host_list_provider/host_list_provider"; import { logTopology } from "../../utils/utils"; +import { FullServicesContainer } from "../../utils/full_services_container"; -export class Failover2Plugin extends AbstractConnectionPlugin implements CanReleaseResources { +export class Failover2Plugin extends AbstractConnectionPlugin { private static readonly TELEMETRY_WRITER_FAILOVER = "failover to writer instance"; private static readonly TELEMETRY_READER_FAILOVER = "failover to reader"; private static readonly METHOD_END = "end"; private static readonly SUBSCRIBED_METHODS: Set = new Set(["initHostProvider", "connect", "query"]); private readonly _staleDnsHelper: StaleDnsHelper; - private readonly _properties: Map; - private readonly pluginService: PluginService; - private readonly _rdsHelper: RdsUtils; - private readonly failoverWriterTriggeredCounter: TelemetryCounter; - private readonly failoverWriterSuccessCounter: TelemetryCounter; - private readonly failoverWriterFailedCounter: TelemetryCounter; - private readonly failoverReaderTriggeredCounter: TelemetryCounter; - private readonly failoverReaderSuccessCounter: TelemetryCounter; - private readonly failoverReaderFailedCounter: TelemetryCounter; - private telemetryFailoverAdditionalTopTraceSetting: boolean = false; - private _rdsUrlType: RdsUrlType | null = null; - private _isInTransaction: boolean = false; + protected readonly properties: Map; + private readonly servicesContainer: FullServicesContainer; + protected readonly pluginService: PluginService; + protected readonly rdsHelper: RdsUtils; + protected readonly failoverWriterTriggeredCounter: TelemetryCounter; + protected readonly failoverWriterSuccessCounter: TelemetryCounter; + protected readonly failoverWriterFailedCounter: TelemetryCounter; + protected readonly failoverReaderTriggeredCounter: TelemetryCounter; + protected readonly failoverReaderSuccessCounter: TelemetryCounter; + protected readonly failoverReaderFailedCounter: TelemetryCounter; + protected telemetryFailoverAdditionalTopTraceSetting: boolean = false; + protected rdsUrlType: RdsUrlType | null = null; + protected _isInTransaction: boolean = false; private _lastError: any; failoverMode: FailoverMode = FailoverMode.UNKNOWN; - private hostListProviderService?: HostListProviderService; + protected hostListProviderService?: HostListProviderService; protected enableFailoverSetting: boolean = WrapperProperties.ENABLE_CLUSTER_AWARE_FAILOVER.defaultValue; - private readonly failoverTimeoutSettingMs: number = WrapperProperties.FAILOVER_TIMEOUT_MS.defaultValue; - private readonly failoverReaderHostSelectorStrategy: string = WrapperProperties.FAILOVER_READER_HOST_SELECTOR_STRATEGY.defaultValue; + protected readonly failoverTimeoutSettingMs: number = WrapperProperties.FAILOVER_TIMEOUT_MS.defaultValue; + protected readonly failoverReaderHostSelectorStrategy: string = WrapperProperties.FAILOVER_READER_HOST_SELECTOR_STRATEGY.defaultValue; - constructor(pluginService: PluginService, properties: Map, rdsHelper: RdsUtils) { + constructor(servicesContainer: FullServicesContainer, properties: Map, rdsHelper: RdsUtils) { super(); - this._properties = properties; - this.pluginService = pluginService; - this._rdsHelper = rdsHelper; + this.properties = properties; + this.servicesContainer = servicesContainer; + this.pluginService = servicesContainer.getPluginService(); + this.rdsHelper = rdsHelper; this._staleDnsHelper = new StaleDnsHelper(this.pluginService); - this.enableFailoverSetting = WrapperProperties.ENABLE_CLUSTER_AWARE_FAILOVER.get(this._properties); - this.failoverTimeoutSettingMs = WrapperProperties.FAILOVER_TIMEOUT_MS.get(this._properties); - this.failoverReaderHostSelectorStrategy = WrapperProperties.FAILOVER_READER_HOST_SELECTOR_STRATEGY.get(this._properties); + this.enableFailoverSetting = WrapperProperties.ENABLE_CLUSTER_AWARE_FAILOVER.get(this.properties); + this.failoverTimeoutSettingMs = WrapperProperties.FAILOVER_TIMEOUT_MS.get(this.properties); + this.failoverReaderHostSelectorStrategy = WrapperProperties.FAILOVER_READER_HOST_SELECTOR_STRATEGY.get(this.properties); const telemetryFactory = this.pluginService.getTelemetryFactory(); this.failoverWriterTriggeredCounter = telemetryFactory.createCounter("writerFailover.triggered.count"); @@ -104,21 +105,12 @@ export class Failover2Plugin extends AbstractConnectionPlugin implements CanRele } initHostProviderFunc(); - - this.failoverMode = failoverModeFromValue(WrapperProperties.FAILOVER_MODE.get(props)); - this._rdsUrlType = this._rdsHelper.identifyRdsType(hostInfo.host); - - if (this.failoverMode === FailoverMode.UNKNOWN) { - this.failoverMode = this._rdsUrlType === RdsUrlType.RDS_READER_CLUSTER ? FailoverMode.READER_OR_WRITER : FailoverMode.STRICT_WRITER; - } - - logger.debug(Messages.get("Failover.parameterValue", "failoverMode", FailoverMode[this.failoverMode])); } - private isFailoverEnabled(): boolean { + protected isFailoverEnabled(): boolean { return ( this.enableFailoverSetting && - this._rdsUrlType !== RdsUrlType.RDS_PROXY && + this.rdsUrlType !== RdsUrlType.RDS_PROXY && this.pluginService.getAllHosts() && this.pluginService.getAllHosts().length > 0 ); @@ -130,6 +122,8 @@ export class Failover2Plugin extends AbstractConnectionPlugin implements CanRele isInitialConnection: boolean, connectFunc: () => Promise ): Promise { + this.initFailoverMode(); + if ( // Failover is not enabled, does not require additional processing. !this.enableFailoverSetting || @@ -231,6 +225,10 @@ export class Failover2Plugin extends AbstractConnectionPlugin implements CanRele await this.failoverReader(); } + this.throwFailoverSuccessException(); + } + + protected throwFailoverSuccessException(): void { if (this._isInTransaction || this.pluginService.isInTransaction()) { // "Transaction resolution unknown. Please re-configure session state if required and try // restarting transaction." @@ -430,7 +428,7 @@ export class Failover2Plugin extends AbstractConnectionPlugin implements CanRele } private async createConnectionForHost(hostInfo: HostInfo): Promise { - const copyProps = new Map(this._properties); + const copyProps = new Map(this.properties); copyProps.set(WrapperProperties.HOST.name, hostInfo.host); return await this.pluginService.connect(hostInfo, copyProps, this); } @@ -464,6 +462,22 @@ export class Failover2Plugin extends AbstractConnectionPlugin implements CanRele return methodName === Failover2Plugin.METHOD_END; } + protected initFailoverMode(): void { + if (this.rdsUrlType) { + return; + } + + this.failoverMode = failoverModeFromValue(WrapperProperties.FAILOVER_MODE.get(this.properties)); + const initialHostInfo: HostInfo = this.hostListProviderService.getInitialConnectionHostInfo(); + this.rdsUrlType = this.rdsHelper.identifyRdsType(initialHostInfo.host); + + if (this.failoverMode === FailoverMode.UNKNOWN) { + this.failoverMode = this.rdsUrlType === RdsUrlType.RDS_READER_CLUSTER ? FailoverMode.READER_OR_WRITER : FailoverMode.STRICT_WRITER; + } + + logger.debug(Messages.get("Failover.parameterValue", "failoverMode", FailoverMode[this.failoverMode])); + } + private shouldErrorTriggerClientSwitch(error: any): boolean { if (!this.isFailoverEnabled()) { logger.debug(Messages.get("Failover.failoverDisabled")); @@ -486,11 +500,4 @@ export class Failover2Plugin extends AbstractConnectionPlugin implements CanRele this.failoverWriterFailedCounter.inc(); throw new FailoverFailedError(errorMessage); } - - async releaseResources(): Promise { - const hostListProvider: HostListProvider = this.pluginService.getHostListProvider(); - if (this.hostListProviderService.isBlockingHostListProvider(hostListProvider)) { - await (hostListProvider as BlockingHostListProvider).clearAll(); - } - } } diff --git a/common/lib/plugins/failover2/failover2_plugin_factory.ts b/common/lib/plugins/failover2/failover2_plugin_factory.ts index c60b7956..d6068760 100644 --- a/common/lib/plugins/failover2/failover2_plugin_factory.ts +++ b/common/lib/plugins/failover2/failover2_plugin_factory.ts @@ -29,7 +29,7 @@ export class Failover2PluginFactory extends ConnectionPluginFactory { if (!Failover2PluginFactory.failover2Plugin) { Failover2PluginFactory.failover2Plugin = await import("./failover2_plugin"); } - return new Failover2PluginFactory.failover2Plugin.Failover2Plugin(servicesContainer.getPluginService(), properties, new RdsUtils()); + return new Failover2PluginFactory.failover2Plugin.Failover2Plugin(servicesContainer, properties, new RdsUtils()); } catch (error: any) { throw new AwsWrapperError(Messages.get("ConnectionPluginChainBuilder.errorImportingPlugin", error.message, "Failover2Plugin")); } diff --git a/common/lib/plugins/gdb_failover/global_db_failover_mode.ts b/common/lib/plugins/gdb_failover/global_db_failover_mode.ts new file mode 100644 index 00000000..b29c6a5f --- /dev/null +++ b/common/lib/plugins/gdb_failover/global_db_failover_mode.ts @@ -0,0 +1,42 @@ +/* + Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"). + You may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +export enum GlobalDbFailoverMode { + STRICT_WRITER = "strict-writer", + STRICT_HOME_READER = "strict-home-reader", + STRICT_OUT_OF_HOME_READER = "strict-out-of-home-reader", + STRICT_ANY_READER = "strict-any-reader", + HOME_READER_OR_WRITER = "home-reader-or-writer", + OUT_OF_HOME_READER_OR_WRITER = "out-of-home-reader-or-writer", + ANY_READER_OR_WRITER = "any-reader-or-writer" +} + +const nameToValue = new Map([ + ["strict-writer", GlobalDbFailoverMode.STRICT_WRITER], + ["strict-home-reader", GlobalDbFailoverMode.STRICT_HOME_READER], + ["strict-out-of-home-reader", GlobalDbFailoverMode.STRICT_OUT_OF_HOME_READER], + ["strict-any-reader", GlobalDbFailoverMode.STRICT_ANY_READER], + ["home-reader-or-writer", GlobalDbFailoverMode.HOME_READER_OR_WRITER], + ["out-of-home-reader-or-writer", GlobalDbFailoverMode.OUT_OF_HOME_READER_OR_WRITER], + ["any-reader-or-writer", GlobalDbFailoverMode.ANY_READER_OR_WRITER] +]); + +export function globalDbFailoverModeFromValue(value: string | null | undefined): GlobalDbFailoverMode | null { + if (!value) { + return null; + } + return nameToValue.get(value.toLowerCase()) ?? null; +} diff --git a/common/lib/plugins/gdb_failover/global_db_failover_plugin.ts b/common/lib/plugins/gdb_failover/global_db_failover_plugin.ts new file mode 100644 index 00000000..6a0307b8 --- /dev/null +++ b/common/lib/plugins/gdb_failover/global_db_failover_plugin.ts @@ -0,0 +1,372 @@ +/* + Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"). + You may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +import { RdsUtils } from "../../utils/rds_utils"; +import { GlobalDbFailoverMode, globalDbFailoverModeFromValue } from "./global_db_failover_mode"; +import { HostInfo } from "../../host_info"; +import { WrapperProperties } from "../../wrapper_property"; +import { RdsUrlType } from "../../utils/rds_url_type"; +import { logger } from "../../../logutils"; +import { Messages } from "../../utils/messages"; +import { AwsTimeoutError, AwsWrapperError, FailoverFailedError, FailoverSuccessError, UnsupportedMethodError } from "../../utils/errors"; +import { ClientWrapper } from "../../client_wrapper"; +import { HostAvailability } from "../../host_availability/host_availability"; +import { TelemetryTraceLevel } from "../../utils/telemetry/telemetry_trace_level"; +import { HostRole } from "../../host_role"; +import { ReaderFailoverResult } from "../failover/reader_failover_result"; +import { containsHostAndPort, equalsIgnoreCase, getTimeInNanos, getWriter, logTopology } from "../../utils/utils"; +import { Failover2Plugin } from "../failover2/failover2_plugin"; +import { FullServicesContainer } from "../../utils/full_services_container"; + +export class GlobalDbFailoverPlugin extends Failover2Plugin { + private static readonly TELEMETRY_FAILOVER = "failover"; + + protected activeHomeFailoverMode: GlobalDbFailoverMode | null = null; + protected inactiveHomeFailoverMode: GlobalDbFailoverMode | null = null; + protected homeRegion: string | null = null; + + constructor(servicesContainer: FullServicesContainer, properties: Map, rdsHelper: RdsUtils) { + super(servicesContainer, properties, rdsHelper); + } + + protected initFailoverMode(): void { + if (this.rdsUrlType !== null) { + return; + } + + const initialHostInfo = this.hostListProviderService?.getInitialConnectionHostInfo(); + if (!initialHostInfo) { + throw new AwsWrapperError(Messages.get("GlobalDbFailoverPlugin.missingInitialHost")); + } + + this.rdsUrlType = this.rdsHelper.identifyRdsType(initialHostInfo.host); + + this.homeRegion = WrapperProperties.FAILOVER_HOME_REGION.get(this.properties) ?? null; + if (!this.homeRegion) { + if (!this.rdsUrlType.hasRegion) { + throw new AwsWrapperError(Messages.get("GlobalDbFailoverPlugin.missingHomeRegion")); + } + this.homeRegion = this.rdsHelper.getRdsRegion(initialHostInfo.host); + if (!this.homeRegion) { + throw new AwsWrapperError(Messages.get("GlobalDbFailoverPlugin.missingHomeRegion")); + } + } + + logger.debug(Messages.get("Failover.parameterValue", "failoverHomeRegion", this.homeRegion)); + + const activeHomeMode = WrapperProperties.ACTIVE_HOME_FAILOVER_MODE.get(this.properties); + const inactiveHomeMode = WrapperProperties.INACTIVE_HOME_FAILOVER_MODE.get(this.properties); + + this.activeHomeFailoverMode = globalDbFailoverModeFromValue(activeHomeMode); + this.inactiveHomeFailoverMode = globalDbFailoverModeFromValue(inactiveHomeMode); + + if (this.activeHomeFailoverMode === null) { + switch (this.rdsUrlType) { + case RdsUrlType.RDS_WRITER_CLUSTER: + case RdsUrlType.RDS_GLOBAL_WRITER_CLUSTER: + this.activeHomeFailoverMode = GlobalDbFailoverMode.STRICT_WRITER; + break; + default: + this.activeHomeFailoverMode = GlobalDbFailoverMode.HOME_READER_OR_WRITER; + } + } + + if (this.inactiveHomeFailoverMode === null) { + switch (this.rdsUrlType) { + case RdsUrlType.RDS_WRITER_CLUSTER: + case RdsUrlType.RDS_GLOBAL_WRITER_CLUSTER: + this.inactiveHomeFailoverMode = GlobalDbFailoverMode.STRICT_WRITER; + break; + default: + this.inactiveHomeFailoverMode = GlobalDbFailoverMode.HOME_READER_OR_WRITER; + } + } + + logger.debug(Messages.get("Failover.parameterValue", "activeHomeFailoverMode", this.activeHomeFailoverMode)); + logger.debug(Messages.get("Failover.parameterValue", "inactiveHomeFailoverMode", this.inactiveHomeFailoverMode)); + } + + override async failover(): Promise { + const telemetryFactory = this.pluginService.getTelemetryFactory(); + const telemetryContext = telemetryFactory.openTelemetryContext(GlobalDbFailoverPlugin.TELEMETRY_FAILOVER, TelemetryTraceLevel.NESTED); + + const failoverStartTimeNs = getTimeInNanos(); + const failoverEndTimeNs = failoverStartTimeNs + BigInt(this.failoverTimeoutSettingMs) * BigInt(1_000_000); + + try { + await telemetryContext.start(async () => { + logger.info(Messages.get("GlobalDbFailoverPlugin.startFailover")); + + // Force refresh host list and wait for topology to stabilize + const refreshResult = await this.pluginService.forceMonitoringRefresh(true, this.failoverTimeoutSettingMs); + if (!refreshResult) { + this.failoverWriterTriggeredCounter.inc(); + this.failoverWriterFailedCounter.inc(); + logger.error(Messages.get("Failover.unableToRefreshHostList")); + throw new FailoverFailedError(Messages.get("Failover.unableToRefreshHostList")); + } + + const updatedHosts = this.pluginService.getAllHosts(); + const writerCandidate = getWriter(updatedHosts); + + if (!writerCandidate) { + this.failoverWriterTriggeredCounter.inc(); + this.failoverWriterFailedCounter.inc(); + const message = logTopology(updatedHosts, Messages.get("Failover.unableToDetermineWriter")); + logger.error(message); + throw new FailoverFailedError(message); + } + + // Check writer region to determine failover mode + const writerRegion = this.rdsHelper.getRdsRegion(writerCandidate.host); + const isHomeRegion = equalsIgnoreCase(this.homeRegion, writerRegion); + logger.debug(Messages.get("GlobalDbFailoverPlugin.isHomeRegion", String(isHomeRegion))); + + const currentFailoverMode = isHomeRegion ? this.activeHomeFailoverMode : this.inactiveHomeFailoverMode; + logger.debug(Messages.get("GlobalDbFailoverPlugin.currentFailoverMode", String(currentFailoverMode))); + + switch (currentFailoverMode) { + case GlobalDbFailoverMode.STRICT_WRITER: + await this.failoverToWriter(writerCandidate); + break; + case GlobalDbFailoverMode.STRICT_HOME_READER: + await this.failoverToAllowedHost( + () => this.pluginService.getHosts().filter((x) => x.role === HostRole.READER && this.isHostInHomeRegion(x)), + HostRole.READER, + failoverEndTimeNs + ); + break; + case GlobalDbFailoverMode.STRICT_OUT_OF_HOME_READER: + await this.failoverToAllowedHost( + () => this.pluginService.getHosts().filter((x) => x.role === HostRole.READER && !this.isHostInHomeRegion(x)), + HostRole.READER, + failoverEndTimeNs + ); + break; + case GlobalDbFailoverMode.STRICT_ANY_READER: + await this.failoverToAllowedHost( + () => this.pluginService.getHosts().filter((x) => x.role === HostRole.READER), + HostRole.READER, + failoverEndTimeNs + ); + break; + case GlobalDbFailoverMode.HOME_READER_OR_WRITER: + await this.failoverToAllowedHost( + () => + this.pluginService.getHosts().filter((x) => x.role === HostRole.WRITER || (x.role === HostRole.READER && this.isHostInHomeRegion(x))), + null, + failoverEndTimeNs + ); + break; + case GlobalDbFailoverMode.OUT_OF_HOME_READER_OR_WRITER: + await this.failoverToAllowedHost( + () => + this.pluginService + .getHosts() + .filter((x) => x.role === HostRole.WRITER || (x.role === HostRole.READER && !this.isHostInHomeRegion(x))), + null, + failoverEndTimeNs + ); + break; + case GlobalDbFailoverMode.ANY_READER_OR_WRITER: + await this.failoverToAllowedHost(() => [...this.pluginService.getHosts()], null, failoverEndTimeNs); + break; + default: + throw new UnsupportedMethodError(`Unsupported failover mode: ${currentFailoverMode}`); + } + + logger.debug(Messages.get("Failover.establishedConnection", this.pluginService.getCurrentHostInfo()?.host ?? "unknown")); + this.throwFailoverSuccessException(); + }); + } finally { + logger.debug(Messages.get("GlobalDbFailoverPlugin.failoverElapsed", String(getTimeInNanos() - failoverStartTimeNs))); + + if (this.telemetryFailoverAdditionalTopTraceSetting && telemetryContext) { + await telemetryFactory.postCopy(telemetryContext, TelemetryTraceLevel.FORCE_TOP_LEVEL); + } + } + } + + private isHostInHomeRegion(host: HostInfo): boolean { + const hostRegion = this.rdsHelper.getRdsRegion(host.host); + return equalsIgnoreCase(hostRegion, this.homeRegion); + } + + protected async failoverToWriter(writerCandidate: HostInfo): Promise { + this.failoverWriterTriggeredCounter.inc(); + let writerCandidateConn: ClientWrapper | null = null; + + try { + const allowedHosts = this.pluginService.getHosts(); + if (!containsHostAndPort(allowedHosts, writerCandidate.hostAndPort)) { + this.failoverWriterFailedCounter.inc(); + const topologyString = logTopology(allowedHosts, ""); + logger.error(Messages.get("Failover.newWriterNotAllowed", writerCandidate.url, topologyString)); + throw new FailoverFailedError(Messages.get("Failover.newWriterNotAllowed", writerCandidate.url, topologyString)); + } + + try { + writerCandidateConn = await this.pluginService.connect(writerCandidate, this.properties, this); + } catch (error) { + this.failoverWriterFailedCounter.inc(); + logger.error(Messages.get("Failover.unableToConnectToWriterDueToError", writerCandidate.host, error.message)); + throw new FailoverFailedError(Messages.get("Failover.unableToConnectToWriterDueToError", writerCandidate.host, error.message)); + } + + const role = await this.pluginService.getHostRole(writerCandidateConn); + if (role !== HostRole.WRITER) { + await writerCandidateConn?.abort(); + writerCandidateConn = null; + this.failoverWriterFailedCounter.inc(); + logger.error(Messages.get("Failover.unexpectedReaderRole", writerCandidate.host)); + throw new FailoverFailedError(Messages.get("Failover.unexpectedReaderRole", writerCandidate.host)); + } + + await this.pluginService.setCurrentClient(writerCandidateConn, writerCandidate); + writerCandidateConn = null; // Prevent connection from being closed in finally block + + this.failoverWriterSuccessCounter.inc(); + } catch (ex) { + if (!(ex instanceof FailoverFailedError)) { + this.failoverWriterFailedCounter.inc(); + } + throw ex; + } finally { + if (writerCandidateConn && this.pluginService.getCurrentClient().targetClient !== writerCandidateConn) { + await writerCandidateConn.abort(); + } + } + } + + protected async failoverToAllowedHost(getAllowedHosts: () => HostInfo[], verifyRole: HostRole | null, failoverEndTimeNs: bigint): Promise { + this.failoverReaderTriggeredCounter.inc(); + + let result: ReaderFailoverResult | null = null; + try { + try { + result = await this.getAllowedFailoverConnection(getAllowedHosts, verifyRole, failoverEndTimeNs); + await this.pluginService.setCurrentClient(result.client!, result.newHost!); + result = null; + } catch (e) { + if (e instanceof AwsTimeoutError) { + logger.error(Messages.get("Failover.unableToConnectToReader")); + throw new FailoverFailedError(Messages.get("Failover.unableToConnectToReader")); + } + throw e; + } + + logger.info(Messages.get("Failover.establishedConnection", this.pluginService.getCurrentHostInfo()?.host ?? "unknown")); + this.throwFailoverSuccessException(); + } catch (ex) { + if (ex instanceof FailoverSuccessError) { + this.failoverReaderSuccessCounter.inc(); + } else { + this.failoverReaderFailedCounter.inc(); + } + throw ex; + } finally { + if (result?.client !== this.pluginService.getCurrentClient().targetClient) { + await result?.client.abort(); + } + } + } + + protected async getAllowedFailoverConnection( + getAllowedHosts: () => HostInfo[], + verifyRole: HostRole | null, + failoverEndTimeNs: bigint + ): Promise { + do { + await this.pluginService.refreshHostList(); + let updatedAllowedHosts = getAllowedHosts(); + + // Make a copy of hosts and set their availability + updatedAllowedHosts = updatedAllowedHosts.map((x) => + this.pluginService.getHostInfoBuilder().copyFrom(x).withAvailability(HostAvailability.AVAILABLE).build() + ); + + const remainingAllowedHosts = [...updatedAllowedHosts]; + + if (remainingAllowedHosts.length === 0) { + await this.shortDelay(); + continue; + } + + while (remainingAllowedHosts.length > 0 && getTimeInNanos() < failoverEndTimeNs) { + let candidateHost: HostInfo | undefined; + try { + candidateHost = this.pluginService.getHostInfoByStrategy(verifyRole, this.failoverReaderHostSelectorStrategy, remainingAllowedHosts); + } catch { + // Strategy can't get a host according to requested conditions. + // Do nothing + } + + if (!candidateHost) { + logger.debug(logTopology(remainingAllowedHosts, `${Messages.get("GlobalDbFailoverPlugin.candidateNull", String(verifyRole))} `)); + await this.shortDelay(); + break; + } + + let candidateConn: ClientWrapper | null = null; + try { + candidateConn = await this.pluginService.connect(candidateHost, this.properties, this); + // Since the roles in the host list might not be accurate, we execute a query to check the instance's role + const role = verifyRole === null ? null : await this.pluginService.getHostRole(candidateConn); + + if (verifyRole === null || verifyRole === role) { + const updatedHostSpec = this.pluginService + .getHostInfoBuilder() + .copyFrom(candidateHost) + .withRole(role ?? candidateHost.role) + .build(); + return new ReaderFailoverResult(candidateConn, updatedHostSpec, true); + } + + // The role is not as expected, so the connection is not valid + const index = remainingAllowedHosts.findIndex((h) => h.hostAndPort === candidateHost!.hostAndPort); + if (index !== -1) { + remainingAllowedHosts.splice(index, 1); + } + await candidateConn.abort(); + candidateConn = null; + } catch { + const index = remainingAllowedHosts.findIndex((h) => h.hostAndPort === candidateHost!.hostAndPort); + if (index !== -1) { + remainingAllowedHosts.splice(index, 1); + } + if (candidateConn) { + await candidateConn.abort(); + } + } + } + } while (getTimeInNanos() < failoverEndTimeNs); // All hosts failed. Keep trying until we hit the timeout. + + throw new AwsTimeoutError(Messages.get("Failover.failoverReaderTimeout")); + } + + protected shortDelay(): Promise { + return new Promise((resolve) => setTimeout(resolve, 100)); + } + + override async failoverReader(): Promise { + throw new UnsupportedMethodError("This method should not be used in this class. See failover() method for implementation details."); + } + + override async failoverWriter(): Promise { + // This method should not be used in this class. See failover() method for implementation details. + throw new UnsupportedMethodError("This method should not be used in this class. See failover() method for implementation details."); + } +} diff --git a/common/lib/plugins/gdb_failover/global_db_failover_plugin_factory.ts b/common/lib/plugins/gdb_failover/global_db_failover_plugin_factory.ts new file mode 100644 index 00000000..25950509 --- /dev/null +++ b/common/lib/plugins/gdb_failover/global_db_failover_plugin_factory.ts @@ -0,0 +1,38 @@ +/* + Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"). + You may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +import { ConnectionPluginFactory } from "../../plugin_factory"; +import { PluginService } from "../../plugin_service"; +import { ConnectionPlugin } from "../../connection_plugin"; +import { RdsUtils } from "../../utils/rds_utils"; +import { AwsWrapperError } from "../../utils/errors"; +import { Messages } from "../../utils/messages"; +import { FullServicesContainer } from "../../utils/full_services_container"; + +export class GlobalDbFailoverPluginFactory extends ConnectionPluginFactory { + private static globalDbFailoverPlugin: any; + + async getInstance(servicesContainer: FullServicesContainer, properties: Map): Promise { + try { + if (!GlobalDbFailoverPluginFactory.globalDbFailoverPlugin) { + GlobalDbFailoverPluginFactory.globalDbFailoverPlugin = await import("./global_db_failover_plugin"); + } + return new GlobalDbFailoverPluginFactory.globalDbFailoverPlugin.GlobalDbFailoverPlugin(servicesContainer, properties, new RdsUtils()); + } catch (error: any) { + throw new AwsWrapperError(Messages.get("ConnectionPluginChainBuilder.errorImportingPlugin", error.message, "GlobalDbFailoverPlugin")); + } + } +} diff --git a/common/lib/plugins/stale_dns/stale_dns_helper.ts b/common/lib/plugins/stale_dns/stale_dns_helper.ts index a1f9dcd5..a66b892d 100644 --- a/common/lib/plugins/stale_dns/stale_dns_helper.ts +++ b/common/lib/plugins/stale_dns/stale_dns_helper.ts @@ -23,13 +23,14 @@ import { Messages } from "../../utils/messages"; import { RdsUtils } from "../../utils/rds_utils"; import { lookup, LookupAddress } from "dns"; import { promisify } from "util"; -import { AwsWrapperError } from "../../utils/errors"; import { HostChangeOptions } from "../../host_change_options"; import { WrapperProperties } from "../../wrapper_property"; import { ClientWrapper } from "../../client_wrapper"; -import { getWriter, logTopology } from "../../utils/utils"; +import { containsHostAndPort, getWriter, logTopology } from "../../utils/utils"; import { TelemetryFactory } from "../../utils/telemetry/telemetry_factory"; import { TelemetryCounter } from "../../utils/telemetry/telemetry_counter"; +import { RdsUrlType } from "../../utils/rds_url_type"; +import { AwsWrapperError } from "../../utils/errors"; export class StaleDnsHelper { private readonly pluginService: PluginService; @@ -53,33 +54,38 @@ export class StaleDnsHelper { props: Map, connectFunc: () => Promise ): Promise { - if (!this.rdsUtils.isWriterClusterDns(host)) { - return connectFunc(); - } + const type: RdsUrlType = this.rdsUtils.identifyRdsType(host); - const currentTargetClient = await connectFunc(); - - let clusterInetAddress = ""; - try { - const lookupResult = await this.lookupResult(host); - clusterInetAddress = lookupResult.address; - } catch (error) { - // ignore + if (type !== RdsUrlType.RDS_WRITER_CLUSTER && type !== RdsUrlType.RDS_GLOBAL_WRITER_CLUSTER) { + return connectFunc(); } - const hostInetAddress = clusterInetAddress; - logger.debug(Messages.get("StaleDnsHelper.clusterEndpointDns", hostInetAddress)); - - if (!clusterInetAddress) { - return currentTargetClient; + if (type === RdsUrlType.RDS_WRITER_CLUSTER) { + const writer = getWriter(this.pluginService.getAllHosts()); + if (writer != null && this.rdsUtils.isRdsInstance(writer.host)) { + if ( + isInitialConnection && + WrapperProperties.SKIP_INACTIVE_WRITER_CLUSTER_CHECK.get(props) && + !this.rdsUtils.isSameRegion(writer.host, host) + ) { + // The cluster writer endpoint belongs to a different region than the current writer region. + // It means that the cluster is Aurora Global Database and cluster writer endpoint is in secondary region. + // In this case the cluster writer endpoint is in inactive state and doesn't represent the current writer + // so any connection check should be skipped. + // Continue with a normal workflow. + return connectFunc(); + } + } else { + // No writer is available. It could be the case with the first connection when topology isn't yet available. + // Continue with a normal workflow. + return connectFunc(); + } } - const currentHostInfo = this.pluginService.getCurrentHostInfo(); - if (!currentHostInfo) { - throw new AwsWrapperError("Stale DNS Helper: Current hostInfo was null."); - } + const currentTargetClient = await connectFunc(); - if (currentHostInfo && currentHostInfo.role === HostRole.READER) { + const isConnectedToReader: boolean = (await this.pluginService.getHostRole(currentTargetClient)) === HostRole.READER; + if (isConnectedToReader) { // This is if-statement is only reached if the connection url is a writer cluster endpoint. // If the new connection resolves to a reader instance, this means the topology is outdated. // Force refresh to update the topology. @@ -104,27 +110,18 @@ export class StaleDnsHelper { return currentTargetClient; } - if (!this.writerHostAddress) { - try { - const lookupResult = await this.lookupResult(this.writerHostInfo.host); - this.writerHostAddress = lookupResult.address; - } catch (error) { - // ignore - } - } - - logger.debug(Messages.get("StaleDnsHelper.writerInetAddress", this.writerHostAddress)); - - if (!this.writerHostAddress) { - return currentTargetClient; - } + if (isConnectedToReader) { + // Reconnect to writer host if current connection is reader. - if (this.writerHostAddress !== clusterInetAddress) { - // DNS resolves a cluster endpoint to a wrong writer - // opens a connection to a proper writer host logger.debug(Messages.get("StaleDnsHelper.staleDnsDetected", this.writerHostInfo.host)); this.staleDNSDetectedCounter.inc(); + const allowedHosts: HostInfo[] = this.pluginService.getHosts(); + + if (!containsHostAndPort(allowedHosts, this.writerHostInfo.hostAndPort)) { + throw new AwsWrapperError(Messages.get("StaleDnsHelper.currentWriterNotAllowed", this.writerHostInfo.host, logTopology(allowedHosts, ""))); + } + let targetClient = null; try { const newProps = new Map(props); @@ -165,8 +162,4 @@ export class StaleDnsHelper { } return Promise.resolve(); } - - lookupResult(host: string): Promise { - return promisify(lookup)(host, {}); - } } diff --git a/common/lib/random_host_selector.ts b/common/lib/random_host_selector.ts index d38d985c..5d35f0eb 100644 --- a/common/lib/random_host_selector.ts +++ b/common/lib/random_host_selector.ts @@ -25,7 +25,9 @@ export class RandomHostSelector implements HostSelector { public static STRATEGY_NAME = "random"; getHost(hosts: HostInfo[], role: HostRole, props?: Map): HostInfo { - const eligibleHosts = hosts.filter((hostInfo: HostInfo) => hostInfo.role === role && hostInfo.getAvailability() === HostAvailability.AVAILABLE); + const eligibleHosts = hosts.filter( + (hostInfo: HostInfo) => (role === null || hostInfo.role === role) && hostInfo.getAvailability() === HostAvailability.AVAILABLE + ); if (eligibleHosts.length === 0) { throw new AwsWrapperError(Messages.get("HostSelector.noHostsMatchingRole", role)); } diff --git a/common/lib/utils/messages.ts b/common/lib/utils/messages.ts index 06b55200..875c9da0 100644 --- a/common/lib/utils/messages.ts +++ b/common/lib/utils/messages.ts @@ -89,7 +89,11 @@ const MESSAGES: Record = { "Failover.unableToConnectToWriter": "Unable to establish SQL connection to the writer instance.", "Failover.unableToConnectToWriterDueToError": "Unable to establish SQL connection to the writer instance: %s due to error: %s.", "Failover.unableToConnectToReader": "Unable to establish SQL connection to the reader instance.", + "Failover.unableToRefreshHostList": "The request to discover the new topology timed out or was unsuccessful.", "Failover.unableToDetermineWriter": "Unable to determine the current writer instance.", + "Failover.unexpectedReaderRole": "The new writer was identified to be '%s', but querying the instance for its role returned a role of %s.", + "Failover.strictReaderUnknownHostRole": + "Unable to determine host role for '%s'. Since failover mode is set to STRICT_READER and the host may be a writer, it will not be selected for reader failover.", "Failover.detectedError": "[Failover] Detected an error while executing a command: %s", "Failover.failoverDisabled": "Cluster-aware failover is disabled.", "Failover.establishedConnection": "[Failover] Connected to %s", @@ -99,6 +103,7 @@ const MESSAGES: Record = { "Failover.noOperationsAfterConnectionClosed": "No operations allowed after client ended.", "Failover.transactionResolutionUnknownError": "Unknown transaction resolution error occurred during failover.", "Failover.connectionExplicitlyClosed": "Unable to failover on an explicitly closed connection.", + "Failover.failoverReaderTimeout": "The reader failover process was not able to establish a connection before timing out.", "Failover.timeoutError": "Internal failover task has timed out.", "Failover.newWriterNotAllowed": "The failover process identified the new writer but the host is not in the list of allowed hosts. New writer host: '%s'. Allowed hosts: '%s'.", @@ -109,12 +114,9 @@ const MESSAGES: Record = { "StaleDnsHelper.staleDnsDetected": "Stale DNS data detected. Opening a connection to '%s'.", "StaleDnsHelper.reset": "Reset stored writer host.", "StaleDnsPlugin.requireDynamicProvider": "Dynamic host list provider is required.", + "StaleDnsHelper.currentWriterNotAllowed": "The current writer is not in the list of allowed hosts. Current host: '%s'. Allowed hosts: %s", "Client.methodNotSupported": "Method '%s' not supported.", "Client.invalidTransactionIsolationLevel": "An invalid transaction isolation level was provided: '%s'.", - "AuroraStaleDnsHelper.clusterEndpointDns": "Cluster endpoint resolves to '%s'.", - "AuroraStaleDnsHelper.writerHostSpec": "Writer host: '%s'.", - "AuroraStaleDnsHelper.writerInetAddress": "Writer host address: '%s'", - "AuroraStaleDnsHelper.staleDnsDetected": "Stale DNS data detected. Opening a connection to '%s'.", "ReadWriteSplittingPlugin.setReadOnlyOnClosedClient": "setReadOnly cannot be called on a closed client '%s'.", "ReadWriteSplittingPlugin.errorSwitchingToCachedReader": "An error occurred while trying to switch to a cached reader client: '%s'. Error message: '%s'. The driver will attempt to establish a new reader client.", @@ -138,7 +140,8 @@ const MESSAGES: Record = { "ReadWriteSplittingPlugin.failoverErrorWhileExecutingCommand": "Detected a failover error while executing a command: '%s'", "ReadWriteSplittingPlugin.noReadersAvailable": "The plugin was unable to establish a reader client to any reader instance.", "ReadWriteSplittingPlugin.successfullyConnectedToReader": "Successfully connected to a new reader host: '%s'", - "ReadWriteSplittingPlugin.previousReaderNotAllowed": "The previous reader connection cannot be used because it is no longer in the list of allowed hosts. Previous reader: %s. Allowed hosts: %s", + "ReadWriteSplittingPlugin.previousReaderNotAllowed": + "The previous reader connection cannot be used because it is no longer in the list of allowed hosts. Previous reader: %s. Allowed hosts: %s", "ReadWriteSplittingPlugin.failedToConnectToReader": "Failed to connect to reader host: '%s'", "ReadWriteSplittingPlugin.unsupportedHostSelectorStrategy": "Unsupported host selection strategy '%s' specified in plugin configuration parameter 'readerHostSelectorStrategy'. Please visit the Read/Write Splitting Plugin documentation for all supported strategies.", @@ -205,7 +208,6 @@ const MESSAGES: Record = { "MonitorService.cleanupTaskInterrupted": "Monitor service cleanup task interrupted.", "PluginService.hostListEmpty": "Current host list is empty.", "PluginService.releaseResources": "Releasing resources.", - "PluginService.hostsChangeListEmpty": "There are no changes in the hosts' availability.", "PluginService.failedToRetrieveHostPort": "Could not retrieve Host:Port for connection.", "PluginService.nonEmptyAliases": "fillAliases called when HostInfo already contains the following aliases: '%s'.", "PluginService.forceMonitoringRefreshTimeout": "A timeout error occurred after waiting '%s' ms for refreshed topology.", @@ -311,8 +313,7 @@ const MESSAGES: Record = { "ClusterTopologyMonitor.stopHostMonitoringTask": "Stop cluster topology monitoring task for '%s'.", "ClusterTopologyMonitor.errorDuringMonitoring": "Error thrown during cluster topology monitoring: '%s'.", "ClusterTopologyMonitor.endMonitoring": "Stop cluster topology monitoring.", - "ClusterTopologyMonitor.matchingReaderTopologies": - "Reader topologies have been consistent for '%s' ms. Updating topology cache.", + "ClusterTopologyMonitor.matchingReaderTopologies": "Reader topologies have been consistent for '%s' ms. Updating topology cache.", "ClusterTopologyMonitor.reset": "[clusterId: '%s'] Resetting cluster topology monitor for '%s'.", "ClusterTopologyMonitor.resetEventReceived": "MonitorResetEvent received.", "HostMonitor.startMonitoring": "Host monitor '%s' started.", @@ -402,11 +403,21 @@ const MESSAGES: Record = { "TopologyUtils.errorGettingHostRole": "An error occurred while trying to get the host role.", "GlobalTopologyUtils.missingRegion": "Host '%s' is missing region information in the topology query result.", "GlobalTopologyUtils.missingTemplateForRegion": "No cluster instance template found for region '%s' when processing host '%s'.", - "GlobalTopologyUtils.globalClusterInstanceHostPatternsRequired": + "Utils.globalClusterInstanceHostPatternsRequired": "The 'globalClusterInstanceHostPatterns' property is required for Global Aurora Databases.", - "GlobalTopologyUtils.invalidPatternFormat": + "Utils.invalidPatternFormat": "Invalid pattern format '%s'. Expected format: 'region:host-pattern' (e.g., 'us-east-1:?.cluster-xyz.us-east-1.rds.amazonaws.com').", - "GlobalAuroraTopologyMonitor.cannotFindRegionTemplate": "Cannot find cluster instance template for region '%s'." + "GlobalAuroraTopologyMonitor.cannotFindRegionTemplate": "Cannot find cluster instance template for region '%s'.", + "GlobalAuroraTopologyMonitor.invalidTopologyUtils": "TopologyUtils must implement GdbTopologyUtils for GlobalAuroraTopologyMonitor.", + "GlobalDbFailoverPlugin.missingHomeRegion": + "The 'failoverHomeRegion' property is required when connecting to a Global Aurora Database without a region in the URL.", + "GlobalDbFailoverPlugin.missingInitialHost": "Unable to determine the initial connection host.", + "GlobalDbFailoverPlugin.startFailover": "Starting Global DB failover procedure.", + "GlobalDbFailoverPlugin.isHomeRegion": "Is home region: %s", + "GlobalDbFailoverPlugin.currentFailoverMode": "Current Global DB failover mode: %s", + "GlobalDbFailoverPlugin.failoverElapsed": "Global DB failover elapsed time: %s ms", + "GlobalDbFailoverPlugin.candidateNull": "Candidate host is null for role: %s", + "GlobalDbFailoverPlugin.unableToConnect": "Unable to establish a connection during Global DB failover." }; export class Messages { diff --git a/common/lib/utils/rds_url_type.ts b/common/lib/utils/rds_url_type.ts index 46300354..64955089 100644 --- a/common/lib/utils/rds_url_type.ts +++ b/common/lib/utils/rds_url_type.ts @@ -20,6 +20,7 @@ export class RdsUrlType { public static readonly RDS_READER_CLUSTER = new RdsUrlType(true, true, true); public static readonly RDS_CUSTOM_CLUSTER = new RdsUrlType(true, false, true); public static readonly RDS_PROXY = new RdsUrlType(true, false, true); + public static readonly RDS_PROXY_ENDPOINT = new RdsUrlType(true, false, true); public static readonly RDS_INSTANCE = new RdsUrlType(true, false, true); public static readonly RDS_AURORA_LIMITLESS_DB_SHARD_GROUP = new RdsUrlType(true, false, true); public static readonly RDS_GLOBAL_WRITER_CLUSTER = new RdsUrlType(true, true, false); diff --git a/common/lib/utils/rds_utils.ts b/common/lib/utils/rds_utils.ts index 66543349..95c894d7 100644 --- a/common/lib/utils/rds_utils.ts +++ b/common/lib/utils/rds_utils.ts @@ -22,12 +22,13 @@ export class RdsUtils { // can be found at // https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Overview.Endpoints.html // - // Details how to use RDS Proxy endpoints can be found at + // Details how to use RDS Proxy endpoints can be found at // https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/rds-proxy-endpoints.html // - // Values like "<...>" depend on particular Aurora cluster. + // Values like "<...>" depend on particular Aurora cluster. // For example: "" // + // // Cluster (Writer) Endpoint: .cluster-..rds.amazonaws.com // Example: test-postgres.cluster-123456789012.us-east-2.rds.amazonaws.com // @@ -41,7 +42,10 @@ export class RdsUtils { // Example: test-postgres-instance-1.123456789012.us-east-2.rds.amazonaws.com // // + // // Similar endpoints for China regions have different structure and are presented below. + // https://docs.amazonaws.cn/en_us/aws/latest/userguide/endpoints-Ningxia.html + // https://docs.amazonaws.cn/en_us/aws/latest/userguide/endpoints-Beijing.html // // Cluster (Writer) Endpoint: .cluster-.rds..amazonaws.com.cn // Example: test-postgres.cluster-123456789012.rds.cn-northwest-1.amazonaws.com.cn @@ -59,52 +63,51 @@ export class RdsUtils { // Governmental endpoints // https://aws.amazon.com/compliance/fips/#FIPS_Endpoints_by_Service // https://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/s3/model/Region.html - + // + // + // Aurora Global Database // https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Concepts.Aurora_Fea_Regions_DB-eng.Feature.GlobalDatabase.html + // Global Database Endpoint: .global-.global.rds.amazonaws.com + // Example: test-global-db-name.global-123456789012.global.rds.amazonaws.com + // + // + // RDS Proxy + // RDS Proxy Endpoint: .proxy-..rds.amazonaws.com + // Example: test-rds-proxy-name.proxy-123456789012.us-east-2.rds.amazonaws.com + // + // RDS Proxy Custom Endpoint: .endpoint.proxy-..rds.amazonaws.com + // Example: test-custom-endpoint-name.endpoint.proxy-123456789012.us-east-2.rds.amazonaws.com + private static readonly AURORA_GLOBAL_WRITER_DNS_PATTERN = /^(?.+)\.(?global-)?(?[a-zA-Z0-9]+\.global\.rds\.amazonaws\.com\.?)$/i; private static readonly AURORA_DNS_PATTERN = - /^(?.+)\.(?proxy-|cluster-|cluster-ro-|cluster-custom-|shardgrp-)?(?[a-zA-Z0-9]+\.(?[a-zA-Z0-9-]+)\.rds\.amazonaws\.com)$/i; - private static readonly AURORA_INSTANCE_PATTERN = /^(?.+)\.(?[a-zA-Z0-9]+\.(?[a-zA-Z0-9-]+)\.rds\.amazonaws\.com)$/i; + /^(?.+)\.(?proxy-|cluster-|cluster-ro-|cluster-custom-|shardgrp-)?(?[a-zA-Z0-9]+\.(?[a-zA-Z0-9-]+)\.(rds|rds-fips)\.amazonaws\.(com|au|eu|uk)\.?)$/i; private static readonly AURORA_CLUSTER_PATTERN = - /^(?.+)\.(?cluster-|cluster-ro-)+(?[a-zA-Z0-9]+\.(?[a-zA-Z0-9-]+)\.rds\.amazonaws\.com)$/i; - private static readonly AURORA_CUSTOM_CLUSTER_PATTERN = - /^(?.+)\.(?cluster-custom-)+(?[a-zA-Z0-9]+\.(?[a-zA-Z0-9-]+)\.rds\.amazonaws\.com)$/i; + /^(?.+)\.(?cluster-|cluster-ro-)+(?[a-zA-Z0-9]+\.(?[a-zA-Z0-9-]+)\.(rds|rds-fips)\.amazonaws\.(com|au|eu|uk)\.?)$/i; private static readonly AURORA_LIMITLESS_CLUSTER_PATTERN = - /^(?.+)\.(?shardgrp-)+(?[a-zA-Z0-9]+\.(?[a-zA-Z0-9-]+)\.rds\.(amazonaws\.com(\.cn)?|sc2s\.sgov\.gov|c2s\.ic\.gov))$/i; - private static readonly AURORA_PROXY_DNS_PATTERN = - /^(?.+)\.(?proxy-)+(?[a-zA-Z0-9]+\.(?[a-zA-Z0-9-]+)\.rds\.amazonaws\.com)$/i; + /^(?.+)\.(?shardgrp-)+(?[a-zA-Z0-9]+\.(?[a-zA-Z0-9-]+)\.(rds|rds-fips)\.(amazonaws\.com\.?|amazonaws\.eu\.?|amazonaws\.au\.?|amazonaws\.uk\.?|amazonaws\.com\.cn\.?|sc2s\.sgov\.gov\.?|c2s\.ic\.gov\.?))$/i; private static readonly AURORA_CHINA_DNS_PATTERN = - /^(?.+)\.(?proxy-|cluster-|cluster-ro-|cluster-custom-|shardgrp-)?(?[a-zA-Z0-9]+\.rds\.(?[a-zA-Z0-9-]+)\.amazonaws\.com\.cn)$/i; + /^(?.+)\.(?proxy-|cluster-|cluster-ro-|cluster-custom-|shardgrp-)?(?[a-zA-Z0-9]+\.(rds|rds-fips)\.(?[a-zA-Z0-9-]+)\.amazonaws\.com\.cn\.?)$/i; private static readonly AURORA_OLD_CHINA_DNS_PATTERN = - /^(?.+)\.(?proxy-|cluster-|cluster-ro-|cluster-custom-|shardgrp-)?(?[a-zA-Z0-9]+\.(?[a-zA-Z0-9-]+)\.rds\.amazonaws\.com\.cn)$/i; - private static readonly AURORA_CHINA_INSTANCE_PATTERN = - /^(?.+)\.(?[a-zA-Z0-9]+\.rds\.(?[a-zA-Z0-9-]+)\.amazonaws\.com\.cn)$/i; - private static readonly AURORA_OLD_CHINA_INSTANCE_PATTERN = - /^(?.+)\.(?[a-zA-Z0-9]+\.(?[a-zA-Z0-9-]+)\.rds\.amazonaws\.com\.cn)$/i; + /^(?.+)\.(?proxy-|cluster-|cluster-ro-|cluster-custom-|shardgrp-)?(?[a-zA-Z0-9]+\.(?[a-zA-Z0-9-]+)\.(rds|rds-fips)\.amazonaws\.com\.cn\.?)$/i; private static readonly AURORA_CHINA_CLUSTER_PATTERN = - /^(?.+)\.(?cluster-|cluster-ro-)+(?[a-zA-Z0-9]+\.rds\.(?[a-zA-Z0-9-]+)\.amazonaws\.com\.cn)$/i; - private static readonly AURORA_CHINA_LIMITLESS_CLUSTER_PATTERN = - /^(?.+)\.(?shardgrp-)?(?[a-zA-Z0-9]+\.rds\.(?[a-zA-Z0-9-]+)\.amazonaws\.com\.cn)$/i; + /^(?.+)\.(?cluster-|cluster-ro-)+(?[a-zA-Z0-9]+\.(rds|rds-fips)\.(?[a-zA-Z0-9-]+)\.amazonaws\.com\.cn\.?)$/i; private static readonly AURORA_OLD_CHINA_CLUSTER_PATTERN = - /^(?.+)\.(?cluster-|cluster-ro-)+(?[a-zA-Z0-9]+\.(?[a-zA-Z0-9-]+)\.rds\.amazonaws\.com\.cn)$/i; - private static readonly AURORA_OLD_CHINA_LIMITLESS_CLUSTER_PATTERN = - /^(?.+)\.(?shardgrp-)?(?[a-zA-Z0-9]+\.(?[a-zA-Z0-9-]+)\.rds\.amazonaws\.com\.cn)$/i; - private static readonly AURORA_CHINA_CUSTOM_CLUSTER_PATTERN = - /^(?.+)\.(?cluster-custom-)+(?[a-zA-Z0-9]+\.rds\.(?[a-zA-Z0-9-]+)\.amazonaws\.com\.cn)$/i; - private static readonly AURORA_OLD_CHINA_CUSTOM_CLUSTER_PATTERN = - /^(?.+)\.(?cluster-custom-)+(?[a-zA-Z0-9]+\.(?[a-zA-Z0-9-]+)\.rds\.amazonaws\.com\.cn)$/i; - private static readonly AURORA_CHINA_PROXY_DNS_PATTERN = - /^(?.+)\.(?proxy-)+(?[a-zA-Z0-9]+\.rds\.(?[a-zA-Z0-9-])+\.amazonaws\.com\.cn)$/i; - private static readonly AURORA_OLD_CHINA_PROXY_DNS_PATTERN = - /^(?.+)\.(?proxy-)+(?[a-zA-Z0-9]+\.(?[a-zA-Z0-9-])+\.rds\.amazonaws\.com\.cn)$/i; - + /^(?.+)\.(?cluster-|cluster-ro-)+(?[a-zA-Z0-9]+\.(?[a-zA-Z0-9-]+)\.(rds|rds-fips)\.amazonaws\.com\.cn\.?)$/i; private static readonly AURORA_GOV_DNS_PATTERN = - /^(?.+)\.(?proxy-|cluster-|cluster-ro-|cluster-custom-|shardgrp-)?(?[a-zA-Z0-9]+\.rds\.(?[a-zA-Z0-9-]+)\.(amazonaws\.com|c2s\.ic\.gov|sc2s\.sgov\.gov))$/i; + /^(?.+)\.(?proxy-|cluster-|cluster-ro-|cluster-custom-|shardgrp-)?(?[a-zA-Z0-9]+\.(rds|rds-fips)\.(?[a-zA-Z0-9-]+)\.(amazonaws\.com\.?|c2s\.ic\.gov\.?|sc2s\.sgov\.gov\.?))$/i; private static readonly AURORA_GOV_CLUSTER_PATTERN = - /^(?.+)\.(?cluster-|cluster-ro-)+(?[a-zA-Z0-9]+\.rds\.(?[a-zA-Z0-9-]+)\.(amazonaws\.com|c2s\.ic\.gov|sc2s\.sgov\.gov))$/i; + /^(?.+)\.(?cluster-|cluster-ro-)+(?[a-zA-Z0-9]+\.(rds|rds-fips)\.(?[a-zA-Z0-9-]+)\.(amazonaws\.com\.?|c2s\.ic\.gov\.?|sc2s\.sgov\.gov\.?))$/i; + + // RDS Proxy Custom Endpoint: .endpoint.proxy-..rds.amazonaws.com + private static readonly RDS_PROXY_ENDPOINT_DNS_PATTERN = + /^(?.+)\.endpoint\.(?proxy-)?(?[a-zA-Z0-9]+\.(?[a-zA-Z0-9-]+)\.rds\.amazonaws\.com\.?)$/i; + private static readonly RDS_PROXY_ENDPOINT_CHINA_DNS_PATTERN = + /^(?.+)\.endpoint\.(?proxy-)+(?[a-zA-Z0-9]+\.rds\.(?[a-zA-Z0-9-]+)\.amazonaws\.com\.cn\.?)$/i; + private static readonly RDS_PROXY_ENDPOINT_OLD_CHINA_DNS_PATTERN = + /^(?.+)\.endpoint\.(?proxy-)?(?[a-zA-Z0-9]+\.(?[a-zA-Z0-9-]+)\.rds\.amazonaws\.com\.cn\.?)$/i; private static readonly ELB_PATTERN = /^(?.+)\.elb\.((?[a-zA-Z0-9-]+)\.amazonaws\.com)$/i; private static readonly IP_V4 = @@ -121,20 +124,24 @@ export class RdsUtils { private static readonly cachedPatterns = new Map(); private static readonly cachedDnsPatterns = new Map(); + private static prepareHostFunc?: (host: string) => string; public isRdsClusterDns(host: string): boolean { - const dnsGroup = this.getDnsGroup(host); + const preparedHost = RdsUtils.getPreparedHost(host); + const dnsGroup = this.getDnsGroup(preparedHost); return equalsIgnoreCase(dnsGroup, "cluster-") || equalsIgnoreCase(dnsGroup, "cluster-ro-"); } public isRdsCustomClusterDns(host: string): boolean { - const dnsGroup = this.getDnsGroup(host); + const preparedHost = RdsUtils.getPreparedHost(host); + const dnsGroup = this.getDnsGroup(preparedHost); return equalsIgnoreCase(dnsGroup, "cluster-custom-"); } public isRdsDns(host: string): boolean { + const preparedHost = RdsUtils.getPreparedHost(host); const matcher = this.cacheMatcher( - host, + preparedHost, RdsUtils.AURORA_DNS_PATTERN, RdsUtils.AURORA_CHINA_DNS_PATTERN, RdsUtils.AURORA_OLD_CHINA_DNS_PATTERN, @@ -143,24 +150,46 @@ export class RdsUtils { const group = this.getRegexGroup(matcher, RdsUtils.DNS_GROUP); if (group) { - RdsUtils.cachedDnsPatterns.set(host, group); + RdsUtils.cachedDnsPatterns.set(preparedHost, group); } return matcher != null; } public isRdsInstance(host: string): boolean { - return !this.getDnsGroup(host) && this.isRdsDns(host); + const preparedHost = RdsUtils.getPreparedHost(host); + return !this.getDnsGroup(preparedHost) && this.isRdsDns(preparedHost); } isRdsProxyDns(host: string) { - const dnsGroup = this.getDnsGroup(host); + const preparedHost = RdsUtils.getPreparedHost(host); + const dnsGroup = this.getDnsGroup(preparedHost); return dnsGroup && dnsGroup.startsWith("proxy-"); } + isRdsProxyEndpointDns(host: string): boolean { + if (!host) { + return false; + } + + const preparedHost = RdsUtils.getPreparedHost(host); + const matcher = this.cacheMatcher( + preparedHost, + RdsUtils.RDS_PROXY_ENDPOINT_DNS_PATTERN, + RdsUtils.RDS_PROXY_ENDPOINT_CHINA_DNS_PATTERN, + RdsUtils.RDS_PROXY_ENDPOINT_OLD_CHINA_DNS_PATTERN + ); + if (this.getRegexGroup(matcher, RdsUtils.DNS_GROUP) !== null) { + return this.getRegexGroup(matcher, RdsUtils.INSTANCE_GROUP) !== null; + } + + return false; + } + getRdsClusterId(host: string): string | null { + const preparedHost = RdsUtils.getPreparedHost(host); const matcher = this.cacheMatcher( - host, + preparedHost, RdsUtils.AURORA_DNS_PATTERN, RdsUtils.AURORA_CHINA_DNS_PATTERN, RdsUtils.AURORA_OLD_CHINA_DNS_PATTERN, @@ -179,8 +208,9 @@ export class RdsUtils { return null; } + const preparedHost = RdsUtils.getPreparedHost(host); const matcher = this.cacheMatcher( - host, + preparedHost, RdsUtils.AURORA_DNS_PATTERN, RdsUtils.AURORA_CHINA_DNS_PATTERN, RdsUtils.AURORA_OLD_CHINA_DNS_PATTERN, @@ -198,8 +228,9 @@ export class RdsUtils { return "?"; } + const preparedHost = RdsUtils.getPreparedHost(host); const matcher = this.cacheMatcher( - host, + preparedHost, RdsUtils.AURORA_DNS_PATTERN, RdsUtils.AURORA_CHINA_DNS_PATTERN, RdsUtils.AURORA_OLD_CHINA_DNS_PATTERN, @@ -214,8 +245,9 @@ export class RdsUtils { return null; } + const preparedHost = RdsUtils.getPreparedHost(host); const matcher = this.cacheMatcher( - host, + preparedHost, RdsUtils.AURORA_DNS_PATTERN, RdsUtils.AURORA_CHINA_DNS_PATTERN, RdsUtils.AURORA_OLD_CHINA_DNS_PATTERN, @@ -227,7 +259,7 @@ export class RdsUtils { return group; } - const elbMatcher = host.match(RdsUtils.ELB_PATTERN); + const elbMatcher = preparedHost.match(RdsUtils.ELB_PATTERN); if (elbMatcher && elbMatcher.length > 0) { return this.getRegexGroup(elbMatcher, RdsUtils.REGION_GROUP); } @@ -235,23 +267,36 @@ export class RdsUtils { return null; } + public isSameRegion(host1: string | null, host2: string | null): boolean { + if (!host1 || !host2) { + return false; + } + const host1Region = this.getRdsRegion(host1); + const host2Region = this.getRdsRegion(host2); + return host1Region !== null && equalsIgnoreCase(host1Region, host2Region); + } + public isGlobalDbWriterClusterDns(host: string): boolean { - const dnsGroup = this.getDnsGroup(host); + const preparedHost = RdsUtils.getPreparedHost(host); + const dnsGroup = this.getDnsGroup(preparedHost); return equalsIgnoreCase(dnsGroup, "global-"); } public isWriterClusterDns(host: string): boolean { - const dnsGroup = this.getDnsGroup(host); + const preparedHost = RdsUtils.getPreparedHost(host); + const dnsGroup = this.getDnsGroup(preparedHost); return equalsIgnoreCase(dnsGroup, "cluster-"); } public isReaderClusterDns(host: string): boolean { - const dnsGroup = this.getDnsGroup(host); + const preparedHost = RdsUtils.getPreparedHost(host); + const dnsGroup = this.getDnsGroup(preparedHost); return equalsIgnoreCase(dnsGroup, "cluster-ro-"); } public isLimitlessDbShardGroupDns(host: string): boolean { - const dnsGroup = this.getDnsGroup(host); + const preparedHost = RdsUtils.getPreparedHost(host); + const dnsGroup = this.getDnsGroup(preparedHost); if (!dnsGroup) { return false; } @@ -263,25 +308,26 @@ export class RdsUtils { return null; } - const matcher = host.match(RdsUtils.AURORA_CLUSTER_PATTERN); + const preparedHost = RdsUtils.getPreparedHost(host); + const matcher = preparedHost.match(RdsUtils.AURORA_CLUSTER_PATTERN); if (matcher) { - return host.replace(RdsUtils.AURORA_CLUSTER_PATTERN, "$.cluster-$"); + return preparedHost.replace(RdsUtils.AURORA_CLUSTER_PATTERN, "$.cluster-$"); } - const limitlessMatcher = host.match(RdsUtils.AURORA_LIMITLESS_CLUSTER_PATTERN); + const limitlessMatcher = preparedHost.match(RdsUtils.AURORA_LIMITLESS_CLUSTER_PATTERN); if (limitlessMatcher) { - return host.replace(RdsUtils.AURORA_LIMITLESS_CLUSTER_PATTERN, "$.cluster-$"); + return preparedHost.replace(RdsUtils.AURORA_LIMITLESS_CLUSTER_PATTERN, "$.cluster-$"); } - const chinaMatcher = host.match(RdsUtils.AURORA_CHINA_CLUSTER_PATTERN); + const chinaMatcher = preparedHost.match(RdsUtils.AURORA_CHINA_CLUSTER_PATTERN); if (chinaMatcher) { - return host.replace(RdsUtils.AURORA_CHINA_CLUSTER_PATTERN, "$.cluster-$"); + return preparedHost.replace(RdsUtils.AURORA_CHINA_CLUSTER_PATTERN, "$.cluster-$"); } - const oldChinaMatcher = host.match(RdsUtils.AURORA_OLD_CHINA_CLUSTER_PATTERN); + const oldChinaMatcher = preparedHost.match(RdsUtils.AURORA_OLD_CHINA_CLUSTER_PATTERN); if (oldChinaMatcher) { - return host.replace(RdsUtils.AURORA_OLD_CHINA_CLUSTER_PATTERN, "$.cluster-$"); + return preparedHost.replace(RdsUtils.AURORA_OLD_CHINA_CLUSTER_PATTERN, "$.cluster-$"); } - const govMatcher = host.match(RdsUtils.AURORA_GOV_CLUSTER_PATTERN); + const govMatcher = preparedHost.match(RdsUtils.AURORA_GOV_CLUSTER_PATTERN); if (govMatcher) { - return host.replace(RdsUtils.AURORA_GOV_CLUSTER_PATTERN, "$.cluster-$"); + return preparedHost.replace(RdsUtils.AURORA_GOV_CLUSTER_PATTERN, "$.cluster-$"); } return null; } @@ -307,21 +353,24 @@ export class RdsUtils { return RdsUrlType.OTHER; } - if (this.isIPv4(host) || this.isIPv6(host)) { + const preparedHost = RdsUtils.getPreparedHost(host); + if (this.isIPv4(preparedHost) || this.isIPv6(preparedHost)) { return RdsUrlType.IP_ADDRESS; - } else if (this.isGlobalDbWriterClusterDns(host)) { + } else if (this.isGlobalDbWriterClusterDns(preparedHost)) { return RdsUrlType.RDS_GLOBAL_WRITER_CLUSTER; - } else if (this.isWriterClusterDns(host)) { + } else if (this.isWriterClusterDns(preparedHost)) { return RdsUrlType.RDS_WRITER_CLUSTER; - } else if (this.isReaderClusterDns(host)) { + } else if (this.isReaderClusterDns(preparedHost)) { return RdsUrlType.RDS_READER_CLUSTER; - } else if (this.isRdsCustomClusterDns(host)) { + } else if (this.isRdsCustomClusterDns(preparedHost)) { return RdsUrlType.RDS_CUSTOM_CLUSTER; - } else if (this.isLimitlessDbShardGroupDns(host)) { + } else if (this.isLimitlessDbShardGroupDns(preparedHost)) { return RdsUrlType.RDS_AURORA_LIMITLESS_DB_SHARD_GROUP; - } else if (this.isRdsProxyDns(host)) { + } else if (this.isRdsProxyDns(preparedHost)) { return RdsUrlType.RDS_PROXY; - } else if (this.isRdsDns(host)) { + } else if (this.isRdsProxyEndpointDns(preparedHost)) { + return RdsUrlType.RDS_PROXY_ENDPOINT; + } else if (this.isRdsDns(preparedHost)) { return RdsUrlType.RDS_INSTANCE; } else { // ELB URLs will also be classified as other @@ -330,23 +379,27 @@ export class RdsUtils { } public isGreenInstance(host: string) { - return host && RdsUtils.BG_GREEN_HOST_PATTERN.test(host); + const preparedHost = RdsUtils.getPreparedHost(host); + return preparedHost && RdsUtils.BG_GREEN_HOST_PATTERN.test(preparedHost); } public isOldInstance(host: string): boolean { - return !!host && RdsUtils.BG_OLD_HOST_PATTERN.test(host); + const preparedHost = RdsUtils.getPreparedHost(host); + return !!preparedHost && RdsUtils.BG_OLD_HOST_PATTERN.test(preparedHost); } public isNotOldInstance(host: string): boolean { if (!host) { return true; } - return !RdsUtils.BG_OLD_HOST_PATTERN.test(host); + const preparedHost = RdsUtils.getPreparedHost(host); + return !RdsUtils.BG_OLD_HOST_PATTERN.test(preparedHost); } // Verify that provided host is a blue host name and contains neither green prefix nor old prefix. public isNotGreenAndOldPrefixInstance(host: string): boolean { - return !!host && !RdsUtils.BG_GREEN_HOST_PATTERN.test(host) && !RdsUtils.BG_OLD_HOST_PATTERN.test(host); + const preparedHost = RdsUtils.getPreparedHost(host); + return !!preparedHost && !RdsUtils.BG_GREEN_HOST_PATTERN.test(preparedHost) && !RdsUtils.BG_OLD_HOST_PATTERN.test(preparedHost); } public removeGreenInstancePrefix(host: string): string { @@ -354,7 +407,8 @@ export class RdsUtils { return host; } - const matcher = host.match(RdsUtils.BG_GREEN_HOST_PATTERN); + const preparedHost = RdsUtils.getPreparedHost(host); + const matcher = preparedHost.match(RdsUtils.BG_GREEN_HOST_PATTERN); if (!matcher || matcher.length === 0) { return host; } @@ -427,4 +481,20 @@ export class RdsUtils { RdsUtils.cachedPatterns.clear(); RdsUtils.cachedDnsPatterns.clear(); } + + static setPrepareHostFunc(func?: (host: string) => string) { + RdsUtils.prepareHostFunc = func; + } + + static resetPrepareHostFunc() { + RdsUtils.prepareHostFunc = undefined; + } + + private static getPreparedHost(host: string): string { + const func = RdsUtils.prepareHostFunc; + if (!func) { + return host; + } + return func(host) ?? host; + } } diff --git a/common/lib/utils/utils.ts b/common/lib/utils/utils.ts index 00851271..908949b4 100644 --- a/common/lib/utils/utils.ts +++ b/common/lib/utils/utils.ts @@ -142,3 +142,40 @@ export class Pair { return this._right; } } + +export function parseInstanceTemplates( + instanceTemplatesString: string | null, + hostValidator: (hostPattern: string) => void, + hostInfoBuilderFunc: () => { withHost(host: string): { build(): HostInfo } } +): Map { + if (!instanceTemplatesString) { + throw new AwsWrapperError(Messages.get("Utils.globalClusterInstanceHostPatternsRequired")); + } + + const instanceTemplates = new Map(); + const patterns = instanceTemplatesString.split(","); + + for (const pattern of patterns) { + const trimmedPattern = pattern.trim(); + const colonIndex = trimmedPattern.indexOf(":"); + if (colonIndex === -1) { + throw new AwsWrapperError(Messages.get("Utils.invalidPatternFormat", trimmedPattern)); + } + + const region = trimmedPattern.substring(0, colonIndex).trim(); + const hostPattern = trimmedPattern.substring(colonIndex + 1).trim(); + + if (!region || !hostPattern) { + throw new AwsWrapperError(Messages.get("Utils.invalidPatternFormat", trimmedPattern)); + } + + hostValidator(hostPattern); + + const hostInfo = hostInfoBuilderFunc().withHost(hostPattern).build(); + instanceTemplates.set(region, hostInfo); + } + + logger.debug(`Detected Global Database patterns: ${JSON.stringify(Array.from(instanceTemplates.entries()))}`); + + return instanceTemplates; +} diff --git a/common/lib/wrapper_property.ts b/common/lib/wrapper_property.ts index 40f73988..ad9d2589 100644 --- a/common/lib/wrapper_property.ts +++ b/common/lib/wrapper_property.ts @@ -18,16 +18,19 @@ import { ConnectionProvider } from "./connection_provider"; import { DatabaseDialect } from "./database_dialect/database_dialect"; import { ClusterTopologyMonitorImpl } from "./host_list_provider/monitoring/cluster_topology_monitor"; import { BlueGreenStatusProvider } from "./plugins/bluegreen/blue_green_status_provider"; +import { AwsWrapperError } from "./utils/errors"; export class WrapperProperty { name: string; description: string; defaultValue: any; + allowedValues?: T[]; - constructor(name: string, description: string, defaultValue?: any) { + constructor(name: string, description: string, defaultValue?: any, allowedValues?: T[]) { this.name = name; this.description = description; this.defaultValue = defaultValue; + this.allowedValues = allowedValues; } get(props: Map): T { @@ -36,10 +39,25 @@ export class WrapperProperty { return this.defaultValue; } + if (val != null && this.allowedValues?.length > 0) { + if (!this.allowedValues.includes(val)) { + throw new AwsWrapperError( + `Invalid value '${val}' for property '${this.name}'. Allowed values: ${this.allowedValues.join(", ")}` + ); + } + } + return val; } set(props: Map, val: T) { + if (val != null && this.allowedValues?.length > 0) { + if (!this.allowedValues.includes(val)) { + throw new AwsWrapperError( + `Invalid value '${val}' for property '${this.name}'. Allowed values: ${this.allowedValues.join(", ")}` + ); + } + } props.set(this.name, val); } } @@ -210,6 +228,38 @@ export class WrapperProperties { ); static readonly FAILOVER_MODE = new WrapperProperty("failoverMode", "Set host role to follow during failover.", ""); + static readonly FAILOVER_HOME_REGION = new WrapperProperty("failoverHomeRegion", "Set home region for failover.", null); + + static readonly ACTIVE_HOME_FAILOVER_MODE = new WrapperProperty( + "activeHomeFailoverMode", + "Set host role to follow during failover when GDB primary region is in home region.", + null, + [ + "strict-writer", + "strict-home-reader", + "strict-out-of-home-reader", + "strict-any-reader", + "home-reader-or-writer", + "out-of-home-reader-or-writer", + "any-reader-or-writer" + ] + ); + + static readonly INACTIVE_HOME_FAILOVER_MODE = new WrapperProperty( + "inactiveHomeFailoverMode", + "Set host role to follow during failover when GDB primary region is not in home region.", + null, + [ + "strict-writer", + "strict-home-reader", + "strict-out-of-home-reader", + "strict-any-reader", + "home-reader-or-writer", + "out-of-home-reader-or-writer", + "any-reader-or-writer" + ] + ); + static readonly FAILOVER_READER_HOST_SELECTOR_STRATEGY = new WrapperProperty( "failoverReaderHostSelectorStrategy", "The strategy that should be used to select a new reader host while opening a new connection.", @@ -244,6 +294,16 @@ export class WrapperProperties { "clusters. Otherwise, if unspecified, the pattern will be automatically created for AWS RDS clusters." ); + static readonly GLOBAL_CLUSTER_INSTANCE_HOST_PATTERNS = new WrapperProperty( + "globalClusterInstanceHostPatterns", + "Comma-separated list of the cluster instance DNS patterns that will be used to " + + "build complete instance endpoints. " + + 'A "?" character in these patterns should be used as a placeholder for cluster instance names. ' + + "This parameter is required for Global Aurora Databases. " + + "Each region in the Global Aurora Database should be specified in the list. " + + "Format: region1:pattern1,region2:pattern2" + ); + static readonly SINGLE_WRITER_CONNECTION_STRING = new WrapperProperty( "singleWriterConnectionString", "Set to true if you are providing a connection string with multiple comma-delimited hosts and your cluster has only one writer. The writer must be the first host in the connection string", @@ -477,6 +537,32 @@ export class WrapperProperties { "Default value 0 means the Wrapper will keep reusing the same cached reader connection.", 0 ); + static readonly SKIP_INACTIVE_WRITER_CLUSTER_CHECK = new WrapperProperty( + "skipInactiveWriterClusterEndpointCheck", + "Allows to avoid connection check for inactive cluster writer endpoint.", + false + ); + + static readonly INACTIVE_CLUSTER_WRITER_SUBSTITUTION_ROLE = new WrapperProperty( + "inactiveClusterWriterEndpointSubstitutionRole", + "Defines whether or not the inactive cluster writer endpoint in the initial connection URL should be replaced with a writer instance URL from the topology info when available.", + "writer", + ["writer", "none"] + ); + + static readonly VERIFY_OPENED_CONNECTION_ROLE = new WrapperProperty( + "verifyOpenedConnectionType", + "Defines whether an opened connection should be verified to be a writer or reader, or if no role verification should be performed.", + null, + ["writer", "reader", "none"] + ); + + static readonly VERIFY_INACTIVE_CLUSTER_WRITER_CONNECTION_ROLE = new WrapperProperty( + "verifyInactiveClusterWriterEndpointConnectionType", + "Defines whether inactive cluster writer connection should be verified to be a writer, or if no role verification should be performed.", + "writer", + ["writer", "none"] + ); private static readonly PREFIXES = [ WrapperProperties.MONITORING_PROPERTY_PREFIX, diff --git a/mysql/lib/client.ts b/mysql/lib/client.ts index 5374d20e..5ae0e284 100644 --- a/mysql/lib/client.ts +++ b/mysql/lib/client.ts @@ -42,12 +42,14 @@ import { MySQL2DriverDialect } from "./dialect/mysql2_driver_dialect"; import { isDialectTopologyAware } from "../../common/lib/utils/utils"; import { MySQLClient, MySQLPoolClient } from "./mysql_client"; import { DriverConnectionProvider } from "../../common/lib/driver_connection_provider"; +import { GlobalAuroraMySQLDatabaseDialect } from "./dialect/global_aurora_mysql_database_dialect"; class BaseAwsMySQLClient extends AwsClient implements MySQLClient { private static readonly knownDialectsByCode: Map = new Map([ [DatabaseDialectCodes.MYSQL, new MySQLDatabaseDialect()], [DatabaseDialectCodes.RDS_MYSQL, new RdsMySQLDatabaseDialect()], [DatabaseDialectCodes.AURORA_MYSQL, new AuroraMySQLDatabaseDialect()], + [DatabaseDialectCodes.GLOBAL_AURORA_MYSQL, new GlobalAuroraMySQLDatabaseDialect()], [DatabaseDialectCodes.RDS_MULTI_AZ_MYSQL, new RdsMultiAZClusterMySQLDatabaseDialect()] ]); diff --git a/mysql/lib/dialect/aurora_mysql_database_dialect.ts b/mysql/lib/dialect/aurora_mysql_database_dialect.ts index a7eddfab..5dec3d14 100644 --- a/mysql/lib/dialect/aurora_mysql_database_dialect.ts +++ b/mysql/lib/dialect/aurora_mysql_database_dialect.ts @@ -15,18 +15,15 @@ */ import { MySQLDatabaseDialect } from "./mysql_database_dialect"; -import { HostListProviderService } from "../../../common/lib/host_list_provider_service"; import { HostListProvider } from "../../../common/lib/host_list_provider/host_list_provider"; import { RdsHostListProvider } from "../../../common/lib/host_list_provider/rds_host_list_provider"; import { TopologyAwareDatabaseDialect } from "../../../common/lib/database_dialect/topology_aware_database_dialect"; import { HostRole } from "../../../common/lib/host_role"; import { ClientWrapper } from "../../../common/lib/client_wrapper"; import { DatabaseDialectCodes } from "../../../common/lib/database_dialect/database_dialect_codes"; -import { WrapperProperties } from "../../../common/lib/wrapper_property"; -import { MonitoringRdsHostListProvider } from "../../../common/lib/host_list_provider/monitoring/monitoring_host_list_provider"; -import { PluginService } from "../../../common/lib/plugin_service"; import { BlueGreenDialect, BlueGreenResult } from "../../../common/lib/database_dialect/blue_green_dialect"; -import { TopologyQueryResult, TopologyUtils } from "../../../common/lib/host_list_provider/topology_utils"; +import { TopologyQueryResult } from "../../../common/lib/host_list_provider/topology_utils"; +import { AuroraTopologyUtils } from "../../../common/lib/host_list_provider/aurora_topology_utils"; import { FullServicesContainer } from "../../../common/lib/utils/full_services_container"; export class AuroraMySQLDatabaseDialect extends MySQLDatabaseDialect implements TopologyAwareDatabaseDialect, BlueGreenDialect { @@ -50,7 +47,7 @@ export class AuroraMySQLDatabaseDialect extends MySQLDatabaseDialect implements "SELECT 1 AS tmp FROM information_schema.tables WHERE table_schema = 'mysql' AND table_name = 'rds_topology'"; getHostListProvider(props: Map, originalUrl: string, servicesContainer: FullServicesContainer): HostListProvider { - const topologyUtils: TopologyUtils = new TopologyUtils(this, servicesContainer.getHostListProviderService().getHostInfoBuilder()); + const topologyUtils = new AuroraTopologyUtils(this, servicesContainer.getHostListProviderService().getHostInfoBuilder()); return new RdsHostListProvider(props, originalUrl, topologyUtils, servicesContainer); } @@ -132,7 +129,7 @@ export class AuroraMySQLDatabaseDialect extends MySQLDatabaseDialect implements } getDialectUpdateCandidates(): string[] { - return [DatabaseDialectCodes.RDS_MULTI_AZ_MYSQL]; + return [DatabaseDialectCodes.GLOBAL_AURORA_MYSQL, DatabaseDialectCodes.RDS_MULTI_AZ_MYSQL]; } async isBlueGreenStatusAvailable(clientWrapper: ClientWrapper): Promise { diff --git a/mysql/lib/dialect/global_aurora_mysql_database_dialect.ts b/mysql/lib/dialect/global_aurora_mysql_database_dialect.ts index ff90ae00..71fe8bb2 100644 --- a/mysql/lib/dialect/global_aurora_mysql_database_dialect.ts +++ b/mysql/lib/dialect/global_aurora_mysql_database_dialect.ts @@ -18,6 +18,10 @@ import { AuroraMySQLDatabaseDialect } from "./aurora_mysql_database_dialect"; import { GlobalAuroraTopologyDialect } from "../../../common/lib/database_dialect/topology_aware_database_dialect"; import { ClientWrapper } from "../../../common/lib/client_wrapper"; import { TopologyQueryResult } from "../../../common/lib/host_list_provider/topology_utils"; +import { FullServicesContainer } from "../../../common/lib/utils/full_services_container"; +import { HostListProvider } from "../../../common/lib/host_list_provider/host_list_provider"; +import { GlobalAuroraHostListProvider } from "../../../common/lib/host_list_provider/global_aurora_host_list_provider"; +import { GlobalTopologyUtils } from "../../../common/lib/host_list_provider/global_topology_utils"; export class GlobalAuroraMySQLDatabaseDialect extends AuroraMySQLDatabaseDialect implements GlobalAuroraTopologyDialect { private static readonly GLOBAL_STATUS_TABLE_EXISTS_QUERY = @@ -29,8 +33,8 @@ export class GlobalAuroraMySQLDatabaseDialect extends AuroraMySQLDatabaseDialect " upper(table_schema) = 'INFORMATION_SCHEMA' AND upper(table_name) = 'AURORA_GLOBAL_DB_INSTANCE_STATUS'"; private static readonly GLOBAL_TOPOLOGY_QUERY = - "SELECT SERVER_ID, CASE WHEN SESSION_ID = 'MASTER_SESSION_ID' THEN TRUE ELSE FALSE END AS IS_WRITER, " + - "VISIBILITY_LAG_IN_MSEC, AWS_REGION " + + "SELECT server_id, CASE WHEN SESSION_ID = 'MASTER_SESSION_ID' THEN TRUE ELSE FALSE END AS is_writer, " + + "visibility_lag_in_msec, aws_region " + "FROM information_schema.aurora_global_db_instance_status"; private static readonly REGION_COUNT_QUERY = "SELECT count(1) FROM information_schema.aurora_global_db_status"; @@ -68,7 +72,14 @@ export class GlobalAuroraMySQLDatabaseDialect extends AuroraMySQLDatabaseDialect return []; } - // TODO: implement GetHostListProvider once GDBHostListProvider is implemented + getHostListProvider(props: Map, originalUrl: string, servicesContainer: FullServicesContainer): HostListProvider { + return new GlobalAuroraHostListProvider( + props, + originalUrl, + new GlobalTopologyUtils(this, servicesContainer.getPluginService().getHostInfoBuilder()), + servicesContainer + ); + } async queryForTopology(targetClient: ClientWrapper): Promise { const res = await targetClient.query(GlobalAuroraMySQLDatabaseDialect.GLOBAL_TOPOLOGY_QUERY); @@ -77,7 +88,7 @@ export class GlobalAuroraMySQLDatabaseDialect extends AuroraMySQLDatabaseDialect rows.forEach((row) => { const hostName: string = row["server_id"]; const isWriter: boolean = row["is_writer"]; - const hostLag: number = row["visibility_lag_in_msec"] ?? 0; // visibility_lag_in_msec is nullable. + const hostLag: number = row["visibility_lag_in_msec"] ?? 0; // visibility_lag_in_sec is nullable. const awsRegion: string = row["aws_region"]; const host: TopologyQueryResult = new TopologyQueryResult({ diff --git a/mysql/lib/dialect/rds_multi_az_mysql_database_dialect.ts b/mysql/lib/dialect/rds_multi_az_mysql_database_dialect.ts index 4bbe7b35..cb53fb10 100644 --- a/mysql/lib/dialect/rds_multi_az_mysql_database_dialect.ts +++ b/mysql/lib/dialect/rds_multi_az_mysql_database_dialect.ts @@ -15,7 +15,6 @@ */ import { MySQLDatabaseDialect } from "./mysql_database_dialect"; -import { HostListProviderService } from "../../../common/lib/host_list_provider_service"; import { HostListProvider } from "../../../common/lib/host_list_provider/host_list_provider"; import { ClientWrapper } from "../../../common/lib/client_wrapper"; import { HostRole } from "../../../common/lib/host_role"; @@ -24,10 +23,8 @@ import { AwsWrapperError } from "../../../common/lib/utils/errors"; import { TopologyAwareDatabaseDialect } from "../../../common/lib/database_dialect/topology_aware_database_dialect"; import { RdsHostListProvider } from "../../../common/lib/host_list_provider/rds_host_list_provider"; import { FailoverRestriction } from "../../../common/lib/plugins/failover/failover_restriction"; -import { WrapperProperties } from "../../../common/lib/wrapper_property"; -import { PluginService } from "../../../common/lib/plugin_service"; -import { MonitoringRdsHostListProvider } from "../../../common/lib/host_list_provider/monitoring/monitoring_host_list_provider"; -import { TopologyQueryResult, TopologyUtils } from "../../../common/lib/host_list_provider/topology_utils"; +import { TopologyQueryResult } from "../../../common/lib/host_list_provider/topology_utils"; +import { AuroraTopologyUtils } from "../../../common/lib/host_list_provider/aurora_topology_utils"; import { FullServicesContainer } from "../../../common/lib/utils/full_services_container"; export class RdsMultiAZClusterMySQLDatabaseDialect extends MySQLDatabaseDialect implements TopologyAwareDatabaseDialect { @@ -73,7 +70,7 @@ export class RdsMultiAZClusterMySQLDatabaseDialect extends MySQLDatabaseDialect } getHostListProvider(props: Map, originalUrl: string, servicesContainer: FullServicesContainer): HostListProvider { - const topologyUtils: TopologyUtils = new TopologyUtils(this, servicesContainer.getHostListProviderService().getHostInfoBuilder()); + const topologyUtils = new AuroraTopologyUtils(this, servicesContainer.getHostListProviderService().getHostInfoBuilder()); return new RdsHostListProvider(props, originalUrl, topologyUtils, servicesContainer); } diff --git a/pg/lib/client.ts b/pg/lib/client.ts index 4ffab723..77e38978 100644 --- a/pg/lib/client.ts +++ b/pg/lib/client.ts @@ -41,12 +41,14 @@ import { NodePostgresDriverDialect } from "./dialect/node_postgres_driver_dialec import { isDialectTopologyAware } from "../../common/lib/utils/utils"; import { PGClient, PGPoolClient } from "./pg_client"; import { DriverConnectionProvider } from "../../common/lib/driver_connection_provider"; +import { GlobalAuroraPgDatabaseDialect } from "./dialect/global_aurora_pg_database_dialect"; class BaseAwsPgClient extends AwsClient implements PGClient { private static readonly knownDialectsByCode: Map = new Map([ [DatabaseDialectCodes.PG, new PgDatabaseDialect()], [DatabaseDialectCodes.RDS_PG, new RdsPgDatabaseDialect()], [DatabaseDialectCodes.AURORA_PG, new AuroraPgDatabaseDialect()], + [DatabaseDialectCodes.GLOBAL_AURORA_PG, new GlobalAuroraPgDatabaseDialect()], [DatabaseDialectCodes.RDS_MULTI_AZ_PG, new RdsMultiAZClusterPgDatabaseDialect()] ]); @@ -82,7 +84,7 @@ class BaseAwsPgClient extends AwsClient implements PGClient { return result; } - isReadOnly(): boolean { + isReadOnly(): boolean | undefined { return this.pluginService.getSessionStateService().getReadOnly(); } @@ -120,7 +122,7 @@ class BaseAwsPgClient extends AwsClient implements PGClient { this.pluginService.getSessionStateService().setTransactionIsolation(level); } - getTransactionIsolation(): TransactionIsolationLevel { + getTransactionIsolation(): TransactionIsolationLevel | undefined { return this.pluginService.getSessionStateService().getTransactionIsolation(); } @@ -147,7 +149,7 @@ class BaseAwsPgClient extends AwsClient implements PGClient { return result; } - getSchema(): string { + getSchema(): string | undefined { return this.pluginService.getSessionStateService().getSchema(); } @@ -399,7 +401,7 @@ export class AwsPgPoolClient implements PGPoolClient { await awsPGPooledConnection.connect(); const res = await awsPGPooledConnection.query(queryTextOrConfig as any, values); await awsPGPooledConnection.end(); - return res; + return res as any; } catch (error: any) { if (!(error instanceof FailoverSuccessError)) { // Release pooled connection. diff --git a/pg/lib/dialect/aurora_pg_database_dialect.ts b/pg/lib/dialect/aurora_pg_database_dialect.ts index 55e733ea..403b9912 100644 --- a/pg/lib/dialect/aurora_pg_database_dialect.ts +++ b/pg/lib/dialect/aurora_pg_database_dialect.ts @@ -23,7 +23,8 @@ import { ClientWrapper } from "../../../common/lib/client_wrapper"; import { DatabaseDialectCodes } from "../../../common/lib/database_dialect/database_dialect_codes"; import { LimitlessDatabaseDialect } from "../../../common/lib/database_dialect/limitless_database_dialect"; import { BlueGreenDialect, BlueGreenResult } from "../../../common/lib/database_dialect/blue_green_dialect"; -import { TopologyQueryResult, TopologyUtils } from "../../../common/lib/host_list_provider/topology_utils"; +import { TopologyQueryResult } from "../../../common/lib/host_list_provider/topology_utils"; +import { AuroraTopologyUtils } from "../../../common/lib/host_list_provider/aurora_topology_utils"; import { FullServicesContainer } from "../../../common/lib/utils/full_services_container"; export class AuroraPgDatabaseDialect extends PgDatabaseDialect implements TopologyAwareDatabaseDialect, LimitlessDatabaseDialect, BlueGreenDialect { @@ -51,7 +52,7 @@ export class AuroraPgDatabaseDialect extends PgDatabaseDialect implements Topolo private static readonly TOPOLOGY_TABLE_EXIST_QUERY: string = "SELECT pg_catalog.'get_blue_green_fast_switchover_metadata'::regproc"; getHostListProvider(props: Map, originalUrl: string, servicesContainer: FullServicesContainer): HostListProvider { - const topologyUtils: TopologyUtils = new TopologyUtils(this, servicesContainer.getHostListProviderService().getHostInfoBuilder()); + const topologyUtils = new AuroraTopologyUtils(this, servicesContainer.getHostListProviderService().getHostInfoBuilder()); return new RdsHostListProvider(props, originalUrl, topologyUtils, servicesContainer); } @@ -137,7 +138,7 @@ export class AuroraPgDatabaseDialect extends PgDatabaseDialect implements Topolo } getDialectUpdateCandidates(): string[] { - return [DatabaseDialectCodes.RDS_MULTI_AZ_PG]; + return [DatabaseDialectCodes.GLOBAL_AURORA_PG, DatabaseDialectCodes.RDS_MULTI_AZ_PG]; } getLimitlessRoutersQuery(): string { diff --git a/pg/lib/dialect/global_aurora_pg_database_dialect.ts b/pg/lib/dialect/global_aurora_pg_database_dialect.ts index fc718616..c837d380 100644 --- a/pg/lib/dialect/global_aurora_pg_database_dialect.ts +++ b/pg/lib/dialect/global_aurora_pg_database_dialect.ts @@ -18,6 +18,10 @@ import { AuroraPgDatabaseDialect } from "./aurora_pg_database_dialect"; import { GlobalAuroraTopologyDialect } from "../../../common/lib/database_dialect/topology_aware_database_dialect"; import { ClientWrapper } from "../../../common/lib/client_wrapper"; import { TopologyQueryResult } from "../../../common/lib/host_list_provider/topology_utils"; +import { FullServicesContainer } from "../../../common/lib/utils/full_services_container"; +import { HostListProvider } from "../../../common/lib/host_list_provider/host_list_provider"; +import { GlobalAuroraHostListProvider } from "../../../common/lib/host_list_provider/global_aurora_host_list_provider"; +import { GlobalTopologyUtils } from "../../../common/lib/host_list_provider/global_topology_utils"; export class GlobalAuroraPgDatabaseDialect extends AuroraPgDatabaseDialect implements GlobalAuroraTopologyDialect { private static readonly GLOBAL_STATUS_FUNC_EXISTS_QUERY = "select 'aurora_global_db_status'::regproc"; @@ -77,7 +81,14 @@ export class GlobalAuroraPgDatabaseDialect extends AuroraPgDatabaseDialect imple return []; } - // TODO: implement GetHostListProvider once GDBHostListProvider is implemented + getHostListProvider(props: Map, originalUrl: string, servicesContainer: FullServicesContainer): HostListProvider { + return new GlobalAuroraHostListProvider( + props, + originalUrl, + new GlobalTopologyUtils(this, servicesContainer.getPluginService().getHostInfoBuilder()), + servicesContainer + ); + } async queryForTopology(targetClient: ClientWrapper): Promise { const res = await targetClient.queryWithTimeout(GlobalAuroraPgDatabaseDialect.GLOBAL_TOPOLOGY_QUERY); diff --git a/pg/lib/dialect/rds_multi_az_pg_database_dialect.ts b/pg/lib/dialect/rds_multi_az_pg_database_dialect.ts index 87a7ce8e..87eb8623 100644 --- a/pg/lib/dialect/rds_multi_az_pg_database_dialect.ts +++ b/pg/lib/dialect/rds_multi_az_pg_database_dialect.ts @@ -14,7 +14,6 @@ limitations under the License. */ -import { HostListProviderService } from "../../../common/lib/host_list_provider_service"; import { HostListProvider } from "../../../common/lib/host_list_provider/host_list_provider"; import { ClientWrapper } from "../../../common/lib/client_wrapper"; import { AwsWrapperError, HostRole } from "../../../common/lib"; @@ -24,10 +23,8 @@ import { RdsHostListProvider } from "../../../common/lib/host_list_provider/rds_ import { PgDatabaseDialect } from "./pg_database_dialect"; import { ErrorHandler } from "../../../common/lib/error_handler"; import { MultiAzPgErrorHandler } from "../multi_az_pg_error_handler"; -import { WrapperProperties } from "../../../common/lib/wrapper_property"; -import { PluginService } from "../../../common/lib/plugin_service"; -import { MonitoringRdsHostListProvider } from "../../../common/lib/host_list_provider/monitoring/monitoring_host_list_provider"; -import { TopologyQueryResult, TopologyUtils } from "../../../common/lib/host_list_provider/topology_utils"; +import { TopologyQueryResult } from "../../../common/lib/host_list_provider/topology_utils"; +import { AuroraTopologyUtils } from "../../../common/lib/host_list_provider/aurora_topology_utils"; import { FullServicesContainer } from "../../../common/lib/utils/full_services_container"; export class RdsMultiAZClusterPgDatabaseDialect extends PgDatabaseDialect implements TopologyAwareDatabaseDialect { @@ -66,7 +63,7 @@ export class RdsMultiAZClusterPgDatabaseDialect extends PgDatabaseDialect implem } getHostListProvider(props: Map, originalUrl: string, servicesContainer: FullServicesContainer): HostListProvider { - const topologyUtils: TopologyUtils = new TopologyUtils(this, servicesContainer.getHostListProviderService().getHostInfoBuilder()); + const topologyUtils = new AuroraTopologyUtils(this, servicesContainer.getHostListProviderService().getHostInfoBuilder()); return new RdsHostListProvider(props, originalUrl, topologyUtils, servicesContainer); } diff --git a/tests/integration/container/tests/aurora_failover.test.ts b/tests/integration/container/tests/aurora_failover.test.ts deleted file mode 100644 index f580bc1f..00000000 --- a/tests/integration/container/tests/aurora_failover.test.ts +++ /dev/null @@ -1,315 +0,0 @@ -/* - Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"). - You may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -import { TestEnvironment } from "./utils/test_environment"; -import { DriverHelper } from "./utils/driver_helper"; -import { AuroraTestUtility } from "./utils/aurora_test_utility"; -import { FailoverSuccessError, PluginManager, TransactionIsolationLevel, TransactionResolutionUnknownError } from "../../../../index"; -import { DatabaseEngine } from "./utils/database_engine"; -import { QueryResult } from "pg"; -import { ProxyHelper } from "./utils/proxy_helper"; -import { logger } from "../../../../common/logutils"; -import { features, instanceCount } from "./config"; -import { TestEnvironmentFeatures } from "./utils/test_environment_features"; -import { RdsUtils } from "../../../../common/lib/utils/rds_utils"; - -const itIf = - features.includes(TestEnvironmentFeatures.FAILOVER_SUPPORTED) && - !features.includes(TestEnvironmentFeatures.PERFORMANCE) && - !features.includes(TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY) && - instanceCount >= 2 - ? it - : it.skip; -const itIfTwoInstance = instanceCount == 2 ? itIf : it.skip; -const itIfThreeInstanceAuroraCluster = instanceCount == 3 && !features.includes(TestEnvironmentFeatures.RDS_MULTI_AZ_SUPPORTED) ? it : it.skip; - -let env: TestEnvironment; -let driver; -let client: any; -let secondaryClient: any; -let initClientFunc: (props: any) => any; - -let auroraTestUtility: AuroraTestUtility; - -async function initDefaultConfig(host: string, port: number, connectToProxy: boolean): Promise { - let config: any = { - user: env.databaseInfo.username, - host: host, - database: env.databaseInfo.defaultDbName, - password: env.databaseInfo.password, - port: port, - plugins: "failover", - failoverTimeoutMs: 250000, - enableTelemetry: true, - telemetryTracesBackend: "OTLP", - telemetryMetricsBackend: "OTLP" - }; - if (connectToProxy) { - config["clusterInstanceHostPattern"] = "?." + env.proxyDatabaseInfo.instanceEndpointSuffix; - } - config = DriverHelper.addDriverSpecificConfiguration(config, env.engine); - return config; -} - -async function initConfigWithEFM2(host: string, port: number, connectToProxy: boolean): Promise { - const config: any = await initDefaultConfig(host, port, connectToProxy); - config["plugins"] = "failover,efm2"; - config["failoverTimeoutMs"] = 20000; - config["failureDetectionCount"] = 2; - config["failureDetectionInterval"] = 1000; - config["failureDetectionTime"] = 2000; - config["connectTimeout"] = 10000; - config["wrapperQueryTimeout"] = 20000; - config["monitoring_wrapperQueryTimeout"] = 3000; - config["monitoring_wrapperConnectTimeout"] = 3000; - return config; -} - -describe("aurora failover", () => { - beforeEach(async () => { - logger.info(`Test started: ${expect.getState().currentTestName}`); - env = await TestEnvironment.getCurrent(); - - auroraTestUtility = new AuroraTestUtility(env.region); - driver = DriverHelper.getDriverForDatabaseEngine(env.engine); - initClientFunc = DriverHelper.getClient(driver); - await ProxyHelper.enableAllConnectivity(); - await TestEnvironment.verifyClusterStatus(); - - client = null; - secondaryClient = null; - }, 1320000); - - afterEach(async () => { - if (client !== null) { - try { - await client.end(); - } catch (error) { - // pass - } - } - - if (secondaryClient !== null) { - try { - await secondaryClient.end(); - } catch (error) { - // pass - } - } - await PluginManager.releaseResources(); - logger.info(`Test finished: ${expect.getState().currentTestName}`); - }, 1320000); - - itIfThreeInstanceAuroraCluster( - "writer failover efm", - async () => { - // Connect to writer instance. - const writerConfig = await initDefaultConfig(env.proxyDatabaseInfo.writerInstanceEndpoint, env.proxyDatabaseInfo.instanceEndpointPort, true); - writerConfig["failoverMode"] = "reader-or-writer"; - - client = initClientFunc(writerConfig); - await client.connect(); - - const initialWriterId = await auroraTestUtility.queryInstanceId(client); - expect(await auroraTestUtility.isDbInstanceWriter(initialWriterId)).toStrictEqual(true); - const instances = env.databaseInfo.instances; - const readerInstance = instances[1].instanceId; - await ProxyHelper.disableAllConnectivity(env.engine); - - try { - await ProxyHelper.enableConnectivity(initialWriterId); - - // Sleep query activates monitoring connection after monitoring_wrapperQueryTimeout time is reached. - await auroraTestUtility.queryInstanceIdWithSleep(client); - - await ProxyHelper.enableConnectivity(readerInstance); - await ProxyHelper.disableConnectivity(env.engine, initialWriterId); - } catch (error) { - fail("The disable connectivity task was unexpectedly interrupted."); - } - // Failure occurs on connection invocation. - await expect(async () => { - await auroraTestUtility.queryInstanceId(client); - }).rejects.toThrow(FailoverSuccessError); - - const currentConnectionId = await auroraTestUtility.queryInstanceId(client); - expect(await auroraTestUtility.isDbInstanceWriter(currentConnectionId)).toBe(false); - expect(currentConnectionId).not.toBe(initialWriterId); - }, - 1320000 - ); - - itIf( - "fails from writer to new writer on connection invocation", - async () => { - const config = await initDefaultConfig(env.databaseInfo.writerInstanceEndpoint, env.databaseInfo.instanceEndpointPort, false); - client = initClientFunc(config); - - await client.connect(); - - const initialWriterId = await auroraTestUtility.queryInstanceId(client); - expect(await auroraTestUtility.isDbInstanceWriter(initialWriterId)).toStrictEqual(true); - - // Crash instance 1 and nominate a new writer - await auroraTestUtility.failoverClusterAndWaitUntilWriterChanged(); - - await expect(async () => { - await auroraTestUtility.queryInstanceId(client); - }).rejects.toThrow(FailoverSuccessError); - - // Assert that we are connected to the new writer after failover happens - const currentConnectionId = await auroraTestUtility.queryInstanceId(client); - expect(await auroraTestUtility.isDbInstanceWriter(currentConnectionId)).toBe(true); - expect(currentConnectionId).not.toBe(initialWriterId); - }, - 1320000 - ); - - itIf( - "writer fails within transaction", - async () => { - const config = await initDefaultConfig(env.databaseInfo.writerInstanceEndpoint, env.databaseInfo.instanceEndpointPort, false); - client = initClientFunc(config); - - await client.connect(); - const initialWriterId = await auroraTestUtility.queryInstanceId(client); - expect(await auroraTestUtility.isDbInstanceWriter(initialWriterId)).toStrictEqual(true); - - await DriverHelper.executeQuery(env.engine, client, "DROP TABLE IF EXISTS test3_3"); - await DriverHelper.executeQuery(env.engine, client, "CREATE TABLE test3_3 (id int not null primary key, test3_3_field varchar(255) not null)"); - - await DriverHelper.executeQuery(env.engine, client, "START TRANSACTION"); // start transaction - await DriverHelper.executeQuery(env.engine, client, "INSERT INTO test3_3 VALUES (1, 'test field string 1')"); - - // Crash instance 1 and nominate a new writer - await auroraTestUtility.failoverClusterAndWaitUntilWriterChanged(); - - await expect(async () => { - await DriverHelper.executeQuery(env.engine, client, "INSERT INTO test3_3 VALUES (2, 'test field string 2')"); - }).rejects.toThrow(TransactionResolutionUnknownError); - - // Attempt to query the instance id. - const currentConnectionId = await auroraTestUtility.queryInstanceId(client); - - // Assert that we are connected to the new writer after failover happens. - expect(await auroraTestUtility.isDbInstanceWriter(currentConnectionId)).toBe(true); - - const nextClusterWriterId = await auroraTestUtility.getClusterWriterInstanceId(); - expect(currentConnectionId).toBe(nextClusterWriterId); - expect(initialWriterId).not.toBe(nextClusterWriterId); - - // Assert that NO row has been inserted to the table. - const result = await DriverHelper.executeQuery(env.engine, client, "SELECT count(*) from test3_3"); - if (env.engine === DatabaseEngine.PG) { - expect((result as QueryResult).rows[0]["count"]).toBe("0"); - } else if (env.engine === DatabaseEngine.MYSQL) { - expect(JSON.parse(JSON.stringify(result))[0][0]["count(*)"]).toBe(0); - } - - await DriverHelper.executeQuery(env.engine, client, "DROP TABLE IF EXISTS test3_3"); - }, - 2000000 - ); - - itIf( - "fails from writer and transfers session state", - async () => { - const config = await initDefaultConfig(env.databaseInfo.writerInstanceEndpoint, env.databaseInfo.instanceEndpointPort, false); - client = initClientFunc(config); - - await client.connect(); - const initialWriterId = await auroraTestUtility.queryInstanceId(client); - expect(await auroraTestUtility.isDbInstanceWriter(initialWriterId)).toBe(true); - - await client.setReadOnly(true); - await client.setTransactionIsolation(TransactionIsolationLevel.TRANSACTION_SERIALIZABLE); - - if (driver === DatabaseEngine.PG) { - await client.setSchema(env.databaseInfo.defaultDbName); - } else if (driver === DatabaseEngine.MYSQL) { - await client.setAutoCommit(false); - await client.setCatalog(env.databaseInfo.defaultDbName); - } - - // Failover cluster and nominate a new writer - await auroraTestUtility.failoverClusterAndWaitUntilWriterChanged(); - - await expect(async () => { - await auroraTestUtility.queryInstanceId(client); - }).rejects.toThrow(FailoverSuccessError); - - // Assert that we are connected to the new writer after failover happens - const currentConnectionId = await auroraTestUtility.queryInstanceId(client); - expect(await auroraTestUtility.isDbInstanceWriter(currentConnectionId)).toBe(true); - expect(currentConnectionId).not.toBe(initialWriterId); - expect(client.isReadOnly()).toBe(true); - expect(client.getTransactionIsolation()).toBe(TransactionIsolationLevel.TRANSACTION_SERIALIZABLE); - if (driver === DatabaseEngine.PG) { - expect(client.getSchema()).toBe(env.databaseInfo.defaultDbName); - } else if (driver === DatabaseEngine.MYSQL) { - expect(client.getAutoCommit()).toBe(false); - expect(client.getCatalog()).toBe(env.databaseInfo.defaultDbName); - } - }, - 1320000 - ); - - itIfTwoInstance( - "fails from reader to writer", - async () => { - // Connect to writer instance - const writerConfig = await initDefaultConfig(env.proxyDatabaseInfo.writerInstanceEndpoint, env.proxyDatabaseInfo.instanceEndpointPort, true); - client = initClientFunc(writerConfig); - await client.connect(); - const initialWriterId = await auroraTestUtility.queryInstanceId(client); - expect(await auroraTestUtility.isDbInstanceWriter(initialWriterId)).toStrictEqual(true); - - // Get a reader instance - let readerInstanceHost; - for (const host of env.proxyDatabaseInfo.instances) { - if (host.instanceId && host.instanceId !== initialWriterId) { - readerInstanceHost = host.host; - } - } - if (!readerInstanceHost) { - throw new Error("Could not find a reader instance"); - } - const readerConfig = await initDefaultConfig(readerInstanceHost, env.proxyDatabaseInfo.instanceEndpointPort, true); - - secondaryClient = initClientFunc(readerConfig); - await secondaryClient.connect(); - - // Crash the reader instance - const rdsUtils = new RdsUtils(); - const readerInstanceId = rdsUtils.getRdsInstanceId(readerInstanceHost); - if (readerInstanceId) { - await ProxyHelper.disableConnectivity(env.engine, readerInstanceId); - - await expect(async () => { - await auroraTestUtility.queryInstanceId(secondaryClient); - }).rejects.toThrow(FailoverSuccessError); - - await ProxyHelper.enableConnectivity(readerInstanceId); - - // Assert that we are currently connected to the writer instance - const currentConnectionId = await auroraTestUtility.queryInstanceId(secondaryClient); - expect(await auroraTestUtility.isDbInstanceWriter(currentConnectionId)).toBe(true); - expect(currentConnectionId).toBe(initialWriterId); - } - }, - 1320000 - ); -}); diff --git a/tests/integration/container/tests/aurora_failover2.test.ts b/tests/integration/container/tests/aurora_failover2.test.ts deleted file mode 100644 index 61ebf362..00000000 --- a/tests/integration/container/tests/aurora_failover2.test.ts +++ /dev/null @@ -1,259 +0,0 @@ -/* - Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"). - You may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -import { TestEnvironment } from "./utils/test_environment"; -import { DriverHelper } from "./utils/driver_helper"; -import { AuroraTestUtility } from "./utils/aurora_test_utility"; -import { FailoverSuccessError, PluginManager, TransactionIsolationLevel, TransactionResolutionUnknownError } from "../../../../index"; -import { DatabaseEngine } from "./utils/database_engine"; -import { QueryResult } from "pg"; -import { ProxyHelper } from "./utils/proxy_helper"; -import { logger } from "../../../../common/logutils"; -import { features, instanceCount } from "./config"; -import { TestEnvironmentFeatures } from "./utils/test_environment_features"; -import { RdsUtils } from "../../../../common/lib/utils/rds_utils"; - -const itIf = - features.includes(TestEnvironmentFeatures.FAILOVER_SUPPORTED) && - !features.includes(TestEnvironmentFeatures.PERFORMANCE) && - !features.includes(TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY) && - instanceCount >= 2 - ? it - : it.skip; -const itIfTwoInstance = instanceCount == 2 ? itIf : it.skip; - -let env: TestEnvironment; -let driver; -let client: any; -let secondaryClient: any; -let initClientFunc: (props: any) => any; - -let auroraTestUtility: AuroraTestUtility; - -async function initDefaultConfig(host: string, port: number, connectToProxy: boolean): Promise { - let config: any = { - user: env.databaseInfo.username, - host: host, - database: env.databaseInfo.defaultDbName, - password: env.databaseInfo.password, - port: port, - plugins: "failover2", - failoverTimeoutMs: 250000, - enableTelemetry: true, - telemetryTracesBackend: "OTLP", - telemetryMetricsBackend: "OTLP" - }; - if (connectToProxy) { - config["clusterInstanceHostPattern"] = "?." + env.proxyDatabaseInfo.instanceEndpointSuffix; - } - config = DriverHelper.addDriverSpecificConfiguration(config, env.engine); - return config; -} - -describe("aurora failover2", () => { - beforeEach(async () => { - logger.info(`Test started: ${expect.getState().currentTestName}`); - env = await TestEnvironment.getCurrent(); - - auroraTestUtility = new AuroraTestUtility(env.region); - driver = DriverHelper.getDriverForDatabaseEngine(env.engine); - initClientFunc = DriverHelper.getClient(driver); - await ProxyHelper.enableAllConnectivity(); - await TestEnvironment.verifyClusterStatus(); - - client = null; - secondaryClient = null; - }, 1320000); - - afterEach(async () => { - if (client !== null) { - try { - await client.end(); - } catch (error) { - // pass - } - } - - if (secondaryClient !== null) { - try { - await secondaryClient.end(); - } catch (error) { - // pass - } - } - await PluginManager.releaseResources(); - logger.info(`Test finished: ${expect.getState().currentTestName}`); - }, 1320000); - - itIf( - "fails from writer to new writer on connection invocation", - async () => { - const config = await initDefaultConfig(env.databaseInfo.writerInstanceEndpoint, env.databaseInfo.instanceEndpointPort, false); - client = initClientFunc(config); - - await client.connect(); - - const initialWriterId = await auroraTestUtility.queryInstanceId(client); - expect(await auroraTestUtility.isDbInstanceWriter(initialWriterId)).toStrictEqual(true); - - // Crash instance 1 and nominate a new writer. - await auroraTestUtility.failoverClusterAndWaitUntilWriterChanged(); - - await expect(async () => { - await auroraTestUtility.queryInstanceId(client); - }).rejects.toThrow(FailoverSuccessError); - - // Assert that we are connected to the new writer after failover happens. - const currentConnectionId = await auroraTestUtility.queryInstanceId(client); - expect(await auroraTestUtility.isDbInstanceWriter(currentConnectionId)).toBe(true); - expect(currentConnectionId).not.toBe(initialWriterId); - }, - 1320000 - ); - - itIf( - "writer fails within transaction", - async () => { - const config = await initDefaultConfig(env.databaseInfo.writerInstanceEndpoint, env.databaseInfo.instanceEndpointPort, false); - client = initClientFunc(config); - - await client.connect(); - const initialWriterId = await auroraTestUtility.queryInstanceId(client); - expect(await auroraTestUtility.isDbInstanceWriter(initialWriterId)).toStrictEqual(true); - - await DriverHelper.executeQuery(env.engine, client, "DROP TABLE IF EXISTS test3_3"); - await DriverHelper.executeQuery(env.engine, client, "CREATE TABLE test3_3 (id int not null primary key, test3_3_field varchar(255) not null)"); - - await DriverHelper.executeQuery(env.engine, client, "START TRANSACTION"); // start transaction - await DriverHelper.executeQuery(env.engine, client, "INSERT INTO test3_3 VALUES (1, 'test field string 1')"); - - // Crash instance 1 and nominate a new writer. - await auroraTestUtility.failoverClusterAndWaitUntilWriterChanged(); - - await expect(async () => { - await DriverHelper.executeQuery(env.engine, client, "INSERT INTO test3_3 VALUES (2, 'test field string 2')"); - }).rejects.toThrow(TransactionResolutionUnknownError); - - const currentConnectionId = await auroraTestUtility.queryInstanceId(client); - // Assert that we are connected to the new writer after failover happens. - expect(await auroraTestUtility.isDbInstanceWriter(currentConnectionId)).toBe(true); - - const nextClusterWriterId = await auroraTestUtility.getClusterWriterInstanceId(); - expect(currentConnectionId).toBe(nextClusterWriterId); - expect(initialWriterId).not.toBe(nextClusterWriterId); - - // Assert that NO row has been inserted to the table. - const result = await DriverHelper.executeQuery(env.engine, client, "SELECT count(*) from test3_3"); - if (env.engine === DatabaseEngine.PG) { - expect((result as QueryResult).rows[0]["count"]).toBe("0"); - } else if (env.engine === DatabaseEngine.MYSQL) { - expect(JSON.parse(JSON.stringify(result))[0][0]["count(*)"]).toBe(0); - } - - await DriverHelper.executeQuery(env.engine, client, "DROP TABLE IF EXISTS test3_3"); - }, - 2000000 - ); - - itIf( - "fails from writer and transfers session state", - async () => { - const config = await initDefaultConfig(env.databaseInfo.writerInstanceEndpoint, env.databaseInfo.instanceEndpointPort, false); - client = initClientFunc(config); - - await client.connect(); - const initialWriterId = await auroraTestUtility.queryInstanceId(client); - expect(await auroraTestUtility.isDbInstanceWriter(initialWriterId)).toBe(true); - - await client.setReadOnly(true); - await client.setTransactionIsolation(TransactionIsolationLevel.TRANSACTION_SERIALIZABLE); - - if (driver === DatabaseEngine.PG) { - await client.setSchema(env.databaseInfo.defaultDbName); - } else if (driver === DatabaseEngine.MYSQL) { - await client.setAutoCommit(false); - await client.setCatalog(env.databaseInfo.defaultDbName); - } - - // Failover cluster and nominate a new writer. - await auroraTestUtility.failoverClusterAndWaitUntilWriterChanged(); - - await expect(async () => { - await auroraTestUtility.queryInstanceId(client); - }).rejects.toThrow(FailoverSuccessError); - - // Assert that we are connected to the new writer after failover happens. - const currentConnectionId = await auroraTestUtility.queryInstanceId(client); - expect(await auroraTestUtility.isDbInstanceWriter(currentConnectionId)).toBe(true); - expect(currentConnectionId).not.toBe(initialWriterId); - expect(client.isReadOnly()).toBe(true); - expect(client.getTransactionIsolation()).toBe(TransactionIsolationLevel.TRANSACTION_SERIALIZABLE); - if (driver === DatabaseEngine.PG) { - expect(client.getSchema()).toBe(env.databaseInfo.defaultDbName); - } else if (driver === DatabaseEngine.MYSQL) { - expect(client.getAutoCommit()).toBe(false); - expect(client.getCatalog()).toBe(env.databaseInfo.defaultDbName); - } - }, - 1320000 - ); - - itIfTwoInstance( - "fails from reader to writer", - async () => { - // Connect to writer instance. - const writerConfig = await initDefaultConfig(env.proxyDatabaseInfo.writerInstanceEndpoint, env.proxyDatabaseInfo.instanceEndpointPort, true); - client = initClientFunc(writerConfig); - await client.connect(); - const initialWriterId = await auroraTestUtility.queryInstanceId(client); - expect(await auroraTestUtility.isDbInstanceWriter(initialWriterId)).toStrictEqual(true); - - // Get a reader instance. - let readerInstanceHost; - for (const host of env.proxyDatabaseInfo.instances) { - if (host.instanceId && host.instanceId !== initialWriterId) { - readerInstanceHost = host.host; - } - } - if (!readerInstanceHost) { - throw new Error("Could not find a reader instance"); - } - const readerConfig = await initDefaultConfig(readerInstanceHost, env.proxyDatabaseInfo.instanceEndpointPort, true); - - secondaryClient = initClientFunc(readerConfig); - await secondaryClient.connect(); - - // Crash the reader instance. - const rdsUtils = new RdsUtils(); - const readerInstanceId = rdsUtils.getRdsInstanceId(readerInstanceHost); - if (readerInstanceId) { - await ProxyHelper.disableConnectivity(env.engine, readerInstanceId); - - await expect(async () => { - await auroraTestUtility.queryInstanceId(secondaryClient); - }).rejects.toThrow(FailoverSuccessError); - - await ProxyHelper.enableConnectivity(readerInstanceId); - - // Assert that we are currently connected to the writer instance. - const currentConnectionId = await auroraTestUtility.queryInstanceId(secondaryClient); - expect(await auroraTestUtility.isDbInstanceWriter(currentConnectionId)).toBe(true); - expect(currentConnectionId).toBe(initialWriterId); - } - }, - 1320000 - ); -}); diff --git a/tests/integration/container/tests/failover/aurora_failover.test.ts b/tests/integration/container/tests/failover/aurora_failover.test.ts new file mode 100644 index 00000000..6346ee90 --- /dev/null +++ b/tests/integration/container/tests/failover/aurora_failover.test.ts @@ -0,0 +1,125 @@ +/* + Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"). + You may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + */ + +import { TestEnvironment } from "../utils/test_environment"; +import { DriverHelper } from "../utils/driver_helper"; +import { AuroraTestUtility } from "../utils/aurora_test_utility"; +import { FailoverSuccessError, PluginManager } from "../../../../../index"; +import { ProxyHelper } from "../utils/proxy_helper"; +import { logger } from "../../../../../common/logutils"; +import { features, instanceCount } from "../config"; +import { TestEnvironmentFeatures } from "../utils/test_environment_features"; +import { createFailoverTests } from "./failover_tests"; + +const itIfThreeInstanceAuroraCluster = instanceCount == 3 && !features.includes(TestEnvironmentFeatures.RDS_MULTI_AZ_SUPPORTED) ? it : it.skip; + +describe("aurora failover", createFailoverTests({ plugins: "failover" })); + +describe("aurora failover - efm specific", () => { + let env: TestEnvironment; + let client: any; + let initClientFunc: (props: any) => any; + let auroraTestUtility: AuroraTestUtility; + + async function initConfigWithEFM2(host: string, port: number, connectToProxy: boolean): Promise { + let config: any = { + user: env.databaseInfo.username, + host: host, + database: env.databaseInfo.defaultDbName, + password: env.databaseInfo.password, + port: port, + plugins: "failover,efm2", + failoverTimeoutMs: 20000, + failureDetectionCount: 2, + failureDetectionInterval: 1000, + failureDetectionTime: 2000, + connectTimeout: 10000, + wrapperQueryTimeout: 20000, + monitoring_wrapperQueryTimeout: 3000, + monitoring_wrapperConnectTimeout: 3000, + enableTelemetry: true, + telemetryTracesBackend: "OTLP", + telemetryMetricsBackend: "OTLP" + }; + if (connectToProxy) { + config["clusterInstanceHostPattern"] = "?." + env.proxyDatabaseInfo.instanceEndpointSuffix; + } + config = DriverHelper.addDriverSpecificConfiguration(config, env.engine); + return config; + } + + beforeEach(async () => { + logger.info(`Test started: ${expect.getState().currentTestName}`); + env = await TestEnvironment.getCurrent(); + auroraTestUtility = new AuroraTestUtility(env.region); + const driver = DriverHelper.getDriverForDatabaseEngine(env.engine); + initClientFunc = DriverHelper.getClient(driver); + await ProxyHelper.enableAllConnectivity(); + await TestEnvironment.verifyClusterStatus(); + client = null; + }, 1320000); + + afterEach(async () => { + if (client !== null) { + try { + await client.end(); + } catch (error) { + // pass + } + } + await PluginManager.releaseResources(); + logger.info(`Test finished: ${expect.getState().currentTestName}`); + }, 1320000); + + itIfThreeInstanceAuroraCluster( + "writer failover efm", + async () => { + // Connect to writer instance + const writerConfig = await initConfigWithEFM2(env.proxyDatabaseInfo.writerInstanceEndpoint, env.proxyDatabaseInfo.instanceEndpointPort, true); + writerConfig["failoverMode"] = "reader-or-writer"; + + client = initClientFunc(writerConfig); + await client.connect(); + + const initialWriterId = await auroraTestUtility.queryInstanceId(client); + expect(await auroraTestUtility.isDbInstanceWriter(initialWriterId)).toStrictEqual(true); + const instances = env.databaseInfo.instances; + const readerInstance = instances[1].instanceId; + await ProxyHelper.disableAllConnectivity(env.engine); + + try { + await ProxyHelper.enableConnectivity(initialWriterId); + + // Sleep query activates monitoring connection after monitoring_wrapperQueryTimeout time is reached + await auroraTestUtility.queryInstanceIdWithSleep(client); + + await ProxyHelper.enableConnectivity(readerInstance); + await ProxyHelper.disableConnectivity(env.engine, initialWriterId); + } catch (error) { + fail("The disable connectivity task was unexpectedly interrupted."); + } + // Failure occurs on connection invocation + await expect(async () => { + await auroraTestUtility.queryInstanceId(client); + }).rejects.toThrow(FailoverSuccessError); + + const currentConnectionId = await auroraTestUtility.queryInstanceId(client); + expect(await auroraTestUtility.isDbInstanceWriter(currentConnectionId)).toBe(false); + expect(currentConnectionId).not.toBe(initialWriterId); + }, + 1320000 + ); +}); diff --git a/tests/integration/container/tests/failover/aurora_failover2.test.ts b/tests/integration/container/tests/failover/aurora_failover2.test.ts new file mode 100644 index 00000000..7f7adb3e --- /dev/null +++ b/tests/integration/container/tests/failover/aurora_failover2.test.ts @@ -0,0 +1,19 @@ +/* + Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"). + You may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + */ + +import { createFailoverTests } from "./failover_tests"; + +describe("aurora failover2", createFailoverTests({ plugins: "failover2" })); diff --git a/tests/integration/container/tests/failover/failover_tests.ts b/tests/integration/container/tests/failover/failover_tests.ts new file mode 100644 index 00000000..42ed0eab --- /dev/null +++ b/tests/integration/container/tests/failover/failover_tests.ts @@ -0,0 +1,269 @@ +/* + Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"). + You may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +import { TestEnvironment } from "../utils/test_environment"; +import { DriverHelper } from "../utils/driver_helper"; +import { AuroraTestUtility } from "../utils/aurora_test_utility"; +import { FailoverSuccessError, PluginManager, TransactionIsolationLevel, TransactionResolutionUnknownError } from "../../../../../index"; +import { DatabaseEngine } from "../utils/database_engine"; +import { QueryResult } from "pg"; +import { ProxyHelper } from "../utils/proxy_helper"; +import { logger } from "../../../../../common/logutils"; +import { features, instanceCount } from "../config"; +import { TestEnvironmentFeatures } from "../utils/test_environment_features"; +import { RdsUtils } from "../../../../../common/lib/utils/rds_utils"; + +export interface FailoverTestOptions { + plugins: string; + getExtraConfig?: () => Record; +} + +export function createFailoverTests(options: FailoverTestOptions) { + const itIf = + features.includes(TestEnvironmentFeatures.FAILOVER_SUPPORTED) && + !features.includes(TestEnvironmentFeatures.PERFORMANCE) && + !features.includes(TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY) && + instanceCount >= 2 + ? it + : it.skip; + const itIfTwoInstance = instanceCount == 2 ? itIf : it.skip; + + return () => { + let env: TestEnvironment; + let driver: any; + let client: any; + let secondaryClient: any; + let initClientFunc: (props: any) => any; + let auroraTestUtility: AuroraTestUtility; + + async function initDefaultConfig(host: string, port: number, connectToProxy: boolean): Promise { + let config: any = { + user: env.databaseInfo.username, + host: host, + database: env.databaseInfo.defaultDbName, + password: env.databaseInfo.password, + port: port, + plugins: options.plugins, + failoverTimeoutMs: 250000, + enableTelemetry: true, + telemetryTracesBackend: "OTLP", + telemetryMetricsBackend: "OTLP", + ...options.getExtraConfig?.() + }; + if (connectToProxy) { + config["clusterInstanceHostPattern"] = "?." + env.proxyDatabaseInfo.instanceEndpointSuffix; + } + config = DriverHelper.addDriverSpecificConfiguration(config, env.engine); + return config; + } + + beforeEach(async () => { + logger.info(`Test started: ${expect.getState().currentTestName}`); + env = await TestEnvironment.getCurrent(); + auroraTestUtility = new AuroraTestUtility(env.region); + driver = DriverHelper.getDriverForDatabaseEngine(env.engine); + initClientFunc = DriverHelper.getClient(driver); + await ProxyHelper.enableAllConnectivity(); + await TestEnvironment.verifyClusterStatus(); + client = null; + secondaryClient = null; + }, 1320000); + + afterEach(async () => { + if (client !== null) { + try { + await client.end(); + } catch (error) { + // pass + } + } + if (secondaryClient !== null) { + try { + await secondaryClient.end(); + } catch (error) { + // pass + } + } + await PluginManager.releaseResources(); + logger.info(`Test finished: ${expect.getState().currentTestName}`); + }, 1320000); + + itIf( + "fails from writer to new writer on connection invocation", + async () => { + const config = await initDefaultConfig(env.databaseInfo.writerInstanceEndpoint, env.databaseInfo.instanceEndpointPort, false); + client = initClientFunc(config); + + await client.connect(); + + const initialWriterId = await auroraTestUtility.queryInstanceId(client); + expect(await auroraTestUtility.isDbInstanceWriter(initialWriterId)).toStrictEqual(true); + + // Crash instance 1 and nominate a new writer + await auroraTestUtility.failoverClusterAndWaitUntilWriterChanged(); + + await expect(async () => { + await auroraTestUtility.queryInstanceId(client); + }).rejects.toThrow(FailoverSuccessError); + + // Assert that we are connected to the new writer after failover happens + const currentConnectionId = await auroraTestUtility.queryInstanceId(client); + expect(await auroraTestUtility.isDbInstanceWriter(currentConnectionId)).toBe(true); + expect(currentConnectionId).not.toBe(initialWriterId); + }, + 1320000 + ); + + itIf( + "writer fails within transaction", + async () => { + const config = await initDefaultConfig(env.databaseInfo.writerInstanceEndpoint, env.databaseInfo.instanceEndpointPort, false); + client = initClientFunc(config); + + await client.connect(); + const initialWriterId = await auroraTestUtility.queryInstanceId(client); + expect(await auroraTestUtility.isDbInstanceWriter(initialWriterId)).toStrictEqual(true); + + await DriverHelper.executeQuery(env.engine, client, "DROP TABLE IF EXISTS test3_3"); + await DriverHelper.executeQuery( + env.engine, + client, + "CREATE TABLE test3_3 (id int not null primary key, test3_3_field varchar(255) not null)" + ); + + await DriverHelper.executeQuery(env.engine, client, "START TRANSACTION"); + await DriverHelper.executeQuery(env.engine, client, "INSERT INTO test3_3 VALUES (1, 'test field string 1')"); + + // Crash instance 1 and nominate a new writer + await auroraTestUtility.failoverClusterAndWaitUntilWriterChanged(); + + await expect(async () => { + await DriverHelper.executeQuery(env.engine, client, "INSERT INTO test3_3 VALUES (2, 'test field string 2')"); + }).rejects.toThrow(TransactionResolutionUnknownError); + + // Attempt to query the instance id + const currentConnectionId = await auroraTestUtility.queryInstanceId(client); + + // Assert that we are connected to the new writer after failover happens + expect(await auroraTestUtility.isDbInstanceWriter(currentConnectionId)).toBe(true); + + const nextClusterWriterId = await auroraTestUtility.getClusterWriterInstanceId(); + expect(currentConnectionId).toBe(nextClusterWriterId); + expect(initialWriterId).not.toBe(nextClusterWriterId); + + // Assert that NO row has been inserted to the table + const result = await DriverHelper.executeQuery(env.engine, client, "SELECT count(*) from test3_3"); + if (env.engine === DatabaseEngine.PG) { + expect((result as QueryResult).rows[0]["count"]).toBe("0"); + } else if (env.engine === DatabaseEngine.MYSQL) { + expect(JSON.parse(JSON.stringify(result))[0][0]["count(*)"]).toBe(0); + } + + await DriverHelper.executeQuery(env.engine, client, "DROP TABLE IF EXISTS test3_3"); + }, + 2000000 + ); + + itIf( + "fails from writer and transfers session state", + async () => { + const config = await initDefaultConfig(env.databaseInfo.writerInstanceEndpoint, env.databaseInfo.instanceEndpointPort, false); + client = initClientFunc(config); + + await client.connect(); + const initialWriterId = await auroraTestUtility.queryInstanceId(client); + expect(await auroraTestUtility.isDbInstanceWriter(initialWriterId)).toBe(true); + + await client.setReadOnly(true); + await client.setTransactionIsolation(TransactionIsolationLevel.TRANSACTION_SERIALIZABLE); + + if (driver === DatabaseEngine.PG) { + await client.setSchema(env.databaseInfo.defaultDbName); + } else if (driver === DatabaseEngine.MYSQL) { + await client.setAutoCommit(false); + await client.setCatalog(env.databaseInfo.defaultDbName); + } + + // Failover cluster and nominate a new writer + await auroraTestUtility.failoverClusterAndWaitUntilWriterChanged(); + + await expect(async () => { + await auroraTestUtility.queryInstanceId(client); + }).rejects.toThrow(FailoverSuccessError); + + // Assert that we are connected to the new writer after failover happens + const currentConnectionId = await auroraTestUtility.queryInstanceId(client); + expect(await auroraTestUtility.isDbInstanceWriter(currentConnectionId)).toBe(true); + expect(currentConnectionId).not.toBe(initialWriterId); + expect(client.isReadOnly()).toBe(true); + expect(client.getTransactionIsolation()).toBe(TransactionIsolationLevel.TRANSACTION_SERIALIZABLE); + if (driver === DatabaseEngine.PG) { + expect(client.getSchema()).toBe(env.databaseInfo.defaultDbName); + } else if (driver === DatabaseEngine.MYSQL) { + expect(client.getAutoCommit()).toBe(false); + expect(client.getCatalog()).toBe(env.databaseInfo.defaultDbName); + } + }, + 1320000 + ); + + itIfTwoInstance( + "fails from reader to writer", + async () => { + // Connect to writer instance + const writerConfig = await initDefaultConfig(env.proxyDatabaseInfo.writerInstanceEndpoint, env.proxyDatabaseInfo.instanceEndpointPort, true); + client = initClientFunc(writerConfig); + await client.connect(); + const initialWriterId = await auroraTestUtility.queryInstanceId(client); + expect(await auroraTestUtility.isDbInstanceWriter(initialWriterId)).toStrictEqual(true); + + // Get a reader instance + let readerInstanceHost; + for (const host of env.proxyDatabaseInfo.instances) { + if (host.instanceId && host.instanceId !== initialWriterId) { + readerInstanceHost = host.host; + } + } + if (!readerInstanceHost) { + throw new Error("Could not find a reader instance"); + } + const readerConfig = await initDefaultConfig(readerInstanceHost, env.proxyDatabaseInfo.instanceEndpointPort, true); + + secondaryClient = initClientFunc(readerConfig); + await secondaryClient.connect(); + + // Crash the reader instance + const rdsUtils = new RdsUtils(); + const readerInstanceId = rdsUtils.getRdsInstanceId(readerInstanceHost); + if (readerInstanceId) { + await ProxyHelper.disableConnectivity(env.engine, readerInstanceId); + + await expect(async () => { + await auroraTestUtility.queryInstanceId(secondaryClient); + }).rejects.toThrow(FailoverSuccessError); + + await ProxyHelper.enableConnectivity(readerInstanceId); + + // Assert that we are currently connected to the writer instance + const currentConnectionId = await auroraTestUtility.queryInstanceId(secondaryClient); + expect(await auroraTestUtility.isDbInstanceWriter(currentConnectionId)).toBe(true); + expect(currentConnectionId).toBe(initialWriterId); + } + }, + 1320000 + ); + }; +} diff --git a/tests/integration/container/tests/failover/gdb_failover.test.ts b/tests/integration/container/tests/failover/gdb_failover.test.ts new file mode 100644 index 00000000..d0719971 --- /dev/null +++ b/tests/integration/container/tests/failover/gdb_failover.test.ts @@ -0,0 +1,181 @@ +/* + Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"). + You may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + */ + +import { TestEnvironment } from "../utils/test_environment"; +import { DriverHelper } from "../utils/driver_helper"; +import { AuroraTestUtility } from "../utils/aurora_test_utility"; +import { FailoverSuccessError, PluginManager } from "../../../../../index"; +import { ProxyHelper } from "../utils/proxy_helper"; +import { logger } from "../../../../../common/logutils"; +import { features, instanceCount } from "../config"; +import { TestEnvironmentFeatures } from "../utils/test_environment_features"; +import { createFailoverTests } from "./failover_tests"; + +const itIf = + features.includes(TestEnvironmentFeatures.FAILOVER_SUPPORTED) && + !features.includes(TestEnvironmentFeatures.PERFORMANCE) && + !features.includes(TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY) && + instanceCount >= 2 + ? it + : it.skip; +const itIfNetworkOutages = features.includes(TestEnvironmentFeatures.NETWORK_OUTAGES_ENABLED) && instanceCount >= 2 ? itIf : it.skip; + +let env: TestEnvironment; +let driver: any; +let client: any; +let initClientFunc: (props: any) => any; + +let auroraTestUtility: AuroraTestUtility; + +async function initDefaultConfig(host: string, port: number, connectToProxy: boolean): Promise { + let config: any = { + user: env.databaseInfo.username, + host: host, + database: env.databaseInfo.defaultDbName, + password: env.databaseInfo.password, + port: port, + plugins: "gdbFailover", + failoverTimeoutMs: 250000, + activeHomeFailoverMode: "strict-writer", + inactiveHomeFailoverMode: "strict-writer", + enableTelemetry: true, + telemetryTracesBackend: "OTLP", + telemetryMetricsBackend: "OTLP" + }; + if (connectToProxy) { + config["clusterInstanceHostPattern"] = "?." + env.proxyDatabaseInfo.instanceEndpointSuffix; + } + config = DriverHelper.addDriverSpecificConfiguration(config, env.engine); + return config; +} + +describe("gdb failover", () => { + // Inherit failover failover tests with GDB-specific configuration + // This mirrors the Java pattern where GdbFailoverTest extends FailoverTest + describe( + "failover tests", + createFailoverTests({ + plugins: "gdbFailover", + getExtraConfig: () => ({ + // These settings mimic failover/failover2 plugin logic when connecting to non-GDB Aurora or RDS DB clusters. + activeHomeFailoverMode: "strict-writer", + inactiveHomeFailoverMode: "strict-writer" + }) + }) + ); + + // GDB-specific tests (overrides from Java GdbFailoverTest) + describe("gdb-specific tests", () => { + beforeEach(async () => { + logger.info(`Test started: ${expect.getState().currentTestName}`); + env = await TestEnvironment.getCurrent(); + + auroraTestUtility = new AuroraTestUtility(env.region); + driver = DriverHelper.getDriverForDatabaseEngine(env.engine); + initClientFunc = DriverHelper.getClient(driver); + await ProxyHelper.enableAllConnectivity(); + await TestEnvironment.verifyClusterStatus(); + + client = null; + }, 1320000); + + afterEach(async () => { + if (client !== null) { + try { + await client.end(); + } catch (error) { + // pass + } + } + await PluginManager.releaseResources(); + logger.info(`Test finished: ${expect.getState().currentTestName}`); + }, 1320000); + + itIfNetworkOutages( + "reader failover with home-reader-or-writer mode", + async () => { + const initialWriterId = env.proxyDatabaseInfo.writerInstanceId; + const initialWriterHost = env.proxyDatabaseInfo.writerInstanceEndpoint; + const initialWriterPort = env.proxyDatabaseInfo.instanceEndpointPort; + + const config = await initDefaultConfig(initialWriterHost, initialWriterPort, true); + config["activeHomeFailoverMode"] = "home-reader-or-writer"; + config["inactiveHomeFailoverMode"] = "home-reader-or-writer"; + + client = initClientFunc(config); + await client.connect(); + + await ProxyHelper.disableConnectivity(env.engine, initialWriterId!); + + await expect(async () => { + await auroraTestUtility.queryInstanceId(client); + }).rejects.toThrow(FailoverSuccessError); + }, + 1320000 + ); + + itIfNetworkOutages( + "reader failover with strict-home-reader mode", + async () => { + const initialWriterId = env.proxyDatabaseInfo.writerInstanceId; + const initialWriterHost = env.proxyDatabaseInfo.writerInstanceEndpoint; + const initialWriterPort = env.proxyDatabaseInfo.instanceEndpointPort; + + const config = await initDefaultConfig(initialWriterHost, initialWriterPort, true); + config["activeHomeFailoverMode"] = "strict-home-reader"; + config["inactiveHomeFailoverMode"] = "strict-home-reader"; + + client = initClientFunc(config); + await client.connect(); + + await ProxyHelper.disableConnectivity(env.engine, initialWriterId!); + + await expect(async () => { + await auroraTestUtility.queryInstanceId(client); + }).rejects.toThrow(FailoverSuccessError); + + const currentConnectionId = await auroraTestUtility.queryInstanceId(client); + expect(await auroraTestUtility.isDbInstanceWriter(currentConnectionId)).toBe(false); + }, + 1320000 + ); + + itIfNetworkOutages( + "writer reelected with home-reader-or-writer mode", + async () => { + const initialWriterId = env.proxyDatabaseInfo.writerInstanceId; + const initialWriterHost = env.proxyDatabaseInfo.writerInstanceEndpoint; + const initialWriterPort = env.proxyDatabaseInfo.instanceEndpointPort; + + const config = await initDefaultConfig(initialWriterHost, initialWriterPort, true); + config["activeHomeFailoverMode"] = "home-reader-or-writer"; + config["inactiveHomeFailoverMode"] = "home-reader-or-writer"; + + client = initClientFunc(config); + await client.connect(); + + // Failover usually changes the writer instance, but we want to test re-election of the same writer, so we will + // simulate this by temporarily disabling connectivity to the writer. + await auroraTestUtility.simulateTemporaryFailure(initialWriterId!); + + await expect(async () => { + await auroraTestUtility.queryInstanceId(client); + }).rejects.toThrow(FailoverSuccessError); + }, + 1320000 + ); + }); +}); diff --git a/tests/integration/container/tests/utils/aurora_test_utility.ts b/tests/integration/container/tests/utils/aurora_test_utility.ts index b0e0bad8..bdfb9c71 100644 --- a/tests/integration/container/tests/utils/aurora_test_utility.ts +++ b/tests/integration/container/tests/utils/aurora_test_utility.ts @@ -42,6 +42,7 @@ import { TestInstanceInfo } from "./test_instance_info"; import { TestEnvironmentInfo } from "./test_environment_info"; import { DatabaseEngine } from "./database_engine"; import { DatabaseEngineDeployment } from "./database_engine_deployment"; +import { ProxyHelper } from "./proxy_helper"; const instanceClass: string = "db.r5.large"; @@ -492,4 +493,38 @@ export class AuroraTestUtility { logger.debug("switchoverBlueGreenDeployment request is sent."); } } + + async simulateTemporaryFailure(instanceName: string, delayMs: number = 0, failureDurationMs: number = 5000): Promise { + const env = await TestEnvironment.getCurrent(); + const deployment = env.deployment; + const clusterEndpoint = env.proxyDatabaseInfo.clusterEndpoint; + const clusterReadOnlyEndpoint = env.proxyDatabaseInfo.clusterReadOnlyEndpoint; + + (async () => { + try { + if (delayMs > 0) { + await sleep(delayMs); + } + + await ProxyHelper.disableConnectivity(env.engine, instanceName); + + if (deployment === DatabaseEngineDeployment.RDS_MULTI_AZ_CLUSTER) { + await ProxyHelper.disableConnectivity(env.engine, clusterEndpoint); + await ProxyHelper.disableConnectivity(env.engine, clusterReadOnlyEndpoint); + } + + await sleep(failureDurationMs); + + await ProxyHelper.enableConnectivity(instanceName); + if (deployment === DatabaseEngineDeployment.RDS_MULTI_AZ_CLUSTER) { + await ProxyHelper.enableConnectivity(clusterEndpoint); + await ProxyHelper.enableConnectivity(clusterReadOnlyEndpoint); + } + } catch (e: any) { + logger.error(`Error during simulateTemporaryFailure: ${e.message}`); + } + })(); + + await sleep(500); + } } diff --git a/tests/integration/container/tests/utils/test_environment.ts b/tests/integration/container/tests/utils/test_environment.ts index cc2d52e9..459229fa 100644 --- a/tests/integration/container/tests/utils/test_environment.ts +++ b/tests/integration/container/tests/utils/test_environment.ts @@ -37,6 +37,7 @@ import { ATTR_SERVICE_NAME } from "@opentelemetry/semantic-conventions"; import { PeriodicExportingMetricReader } from "@opentelemetry/sdk-metrics"; import { OTLPMetricExporter } from "@opentelemetry/exporter-metrics-otlp-grpc"; import { logger } from "../../../../../common/logutils"; +import { RdsUtils } from "../../../../../common/lib/utils/rds_utils"; import pkgPg from "pg"; import { ConnectionOptions, createConnection } from "mysql2/promise"; import { readFileSync } from "fs"; @@ -238,6 +239,14 @@ export class TestEnvironment { await TestEnvironment.initProxies(env); } + // Helps to eliminate problem with proxied endpoints. + RdsUtils.setPrepareHostFunc((host: string) => { + if (host.endsWith(".proxied")) { + return host.substring(0, host.length - ".proxied".length); + } + return host; + }); + const contextManager = new AsyncHooksContextManager(); contextManager.enable(); context.setGlobalContextManager(contextManager); diff --git a/tests/unit/connection_plugin_chain_builder.test.ts b/tests/unit/connection_plugin_chain_builder.test.ts index a4315aa8..bae3f3e8 100644 --- a/tests/unit/connection_plugin_chain_builder.test.ts +++ b/tests/unit/connection_plugin_chain_builder.test.ts @@ -42,6 +42,10 @@ describe("testConnectionPluginChainBuilder", () => { when(mockPluginService.getTelemetryFactory()).thenReturn(new NullTelemetryFactory()); }); + afterEach(async () => { + await PluginManager.releaseResources(); + }); + it.each([["iam,staleDns,failover"], ["iam, staleDns, failover"]])("sort plugins", async (plugins) => { const props = new Map(); props.set(WrapperProperties.PLUGINS.name, plugins); diff --git a/tests/unit/iam_authentication_plugin.test.ts b/tests/unit/iam_authentication_plugin.test.ts index f2ee620f..58d7ad7b 100644 --- a/tests/unit/iam_authentication_plugin.test.ts +++ b/tests/unit/iam_authentication_plugin.test.ts @@ -110,6 +110,7 @@ describe("testIamAuth", () => { afterEach(() => { reset(spyIamAuthUtils); + PluginManager.releaseResources(); }); it("testPostgresConnectValidTokenInCache", async () => { diff --git a/tests/unit/notification_pipeline.test.ts b/tests/unit/notification_pipeline.test.ts index 51f7d4b8..0d4d95c5 100644 --- a/tests/unit/notification_pipeline.test.ts +++ b/tests/unit/notification_pipeline.test.ts @@ -64,6 +64,10 @@ describe("notificationPipelineTest", () => { pluginManager["_plugins"] = [plugin]; }); + afterEach(async () => { + await PluginManager.releaseResources(); + }); + it("test_notifyConnectionChanged", async () => { const result: Set = await pluginManager.notifyConnectionChanged(connectionChanges, null); expect(plugin.counter).toBe(1); diff --git a/tests/unit/stale_dns_helper.test.ts b/tests/unit/stale_dns_helper.test.ts index 433b477c..7c9569e1 100644 --- a/tests/unit/stale_dns_helper.test.ts +++ b/tests/unit/stale_dns_helper.test.ts @@ -34,14 +34,6 @@ const mockHostListProviderService = mock(); const props: Map = new Map(); const writerInstance = new HostInfo("writer-host.XYZ.us-west-2.rds.amazonaws.com", 1234, HostRole.WRITER); -const writerCluster = new HostInfo("my-cluster.cluster-XYZ.us-west-2.rds.amazonaws.com", 1234, HostRole.WRITER); -const writerClusterInvalidClusterInetAddress = new HostInfo("my-cluster.cluster-invalid.us-west-2.rds.amazonaws.com", 1234, HostRole.WRITER); -const readerA = new HostInfo("reader-a-host.XYZ.us-west-2.rds.amazonaws.com", 1234, HostRole.READER, HostAvailability.AVAILABLE); -const readerB = new HostInfo("reader-b-host.XYZ.us-west-2.rds.amazonaws.com", 1234, HostRole.READER, HostAvailability.AVAILABLE); - -const clusterHostList = [writerCluster, readerA, readerB]; -const readerHostList = [readerA, readerB]; -const instanceHostList = [writerInstance, readerA, readerB]; const mockInitialConn = mock(AwsClient); const mockHostInfo = mock(HostInfo); @@ -87,208 +79,6 @@ describe("test_stale_dns_helper", () => { expect(returnConn).toBe(mockInitialClientWrapper); }); - it("test_get_verified_connection_cluster_inet_address_none", async () => { - const target: StaleDnsHelper = spy(new StaleDnsHelper(instance(mockPluginService))); - const targetInstance = instance(target); - - const mockHostListProviderServiceInstance = instance(mockHostListProviderService); - - when(target.lookupResult(anything())).thenReturn(); - - const returnConn = await targetInstance.getVerifiedConnection( - writerClusterInvalidClusterInetAddress.host, - true, - mockHostListProviderServiceInstance, - props, - mockConnectFunc - ); - - expect(mockInitialClientWrapper).toBe(returnConn); - expect(mockConnectFunc).toHaveBeenCalled(); - }); - - it("test_get_verified_connection__no_writer_hostinfo", async () => { - const target: StaleDnsHelper = spy(new StaleDnsHelper(instance(mockPluginService))); - const targetInstance = instance(target); - - const mockHostListProviderServiceInstance = instance(mockHostListProviderService); - when(mockPluginService.getHosts()).thenReturn(readerHostList); - when(mockPluginService.getAllHosts()).thenReturn(readerHostList); - - when(mockPluginService.getCurrentHostInfo()).thenReturn(readerA); - - const lookupAddress = { address: "2.2.2.2", family: 0 }; - when(target.lookupResult(anything())).thenResolve(lookupAddress); - - const returnConn = await targetInstance.getVerifiedConnection( - writerCluster.host, - true, - mockHostListProviderServiceInstance, - props, - mockConnectFunc - ); - - expect(mockConnectFunc).toHaveBeenCalled(); - expect(readerA.role).toBe(HostRole.READER); - verify(mockPluginService.forceRefreshHostList()).once(); - expect(mockInitialClientWrapper).toBe(returnConn); - }); - - it("test_get_verified_connection__writer_rds_cluster_dns_true", async () => { - const target: StaleDnsHelper = spy(new StaleDnsHelper(instance(mockPluginService))); - const targetInstance = instance(target); - - const mockHostListProviderServiceInstance = instance(mockHostListProviderService); - - when(mockPluginService.getHosts()).thenReturn(clusterHostList); - when(mockPluginService.getAllHosts()).thenReturn(clusterHostList); - - const lookupAddress = { address: "5.5.5.5", family: 0 }; - when(target.lookupResult(anything())).thenResolve(lookupAddress); - - const returnConn = await targetInstance.getVerifiedConnection( - writerCluster.host, - true, - mockHostListProviderServiceInstance, - props, - mockConnectFunc - ); - - expect(mockConnectFunc).toHaveBeenCalled(); - verify(mockPluginService.refreshHostList()).once(); - expect(mockInitialClientWrapper).toBe(returnConn); - }); - - it("test_get_verified_connection__writer_host_address_none", async () => { - const target: StaleDnsHelper = spy(new StaleDnsHelper(instance(mockPluginService))); - const targetInstance = instance(target); - when(mockPluginService.getHosts()).thenReturn(instanceHostList); - when(mockPluginService.getAllHosts()).thenReturn(instanceHostList); - - const mockHostListProviderServiceInstance = instance(mockHostListProviderService); - - const firstCall = { address: "5.5.5.5", family: 0 }; - const secondCall = { address: "", family: 0 }; - - when(target.lookupResult(anything())).thenResolve(firstCall, secondCall); - - const returnConn = await targetInstance.getVerifiedConnection( - writerCluster.host, - true, - mockHostListProviderServiceInstance, - props, - mockConnectFunc - ); - - expect(mockConnectFunc).toHaveBeenCalled(); - expect(mockInitialClientWrapper).toBe(returnConn); - }); - - it("test_get_verified_connection__writer_host_info_none", async () => { - const target: StaleDnsHelper = spy(new StaleDnsHelper(instance(mockPluginService))); - const targetInstance = instance(target); - when(mockPluginService.getHosts()).thenReturn(readerHostList); - when(mockPluginService.getAllHosts()).thenReturn(readerHostList); - const mockHostListProviderServiceInstance = instance(mockHostListProviderService); - - const firstCall = { address: "5.5.5.5", family: 0 }; - const secondCall = { address: "", family: 0 }; - - when(target.lookupResult(anything())).thenResolve(firstCall, secondCall); - - const returnConn = await targetInstance.getVerifiedConnection( - writerCluster.host, - true, - mockHostListProviderServiceInstance, - props, - mockConnectFunc - ); - - expect(mockConnectFunc).toHaveBeenCalled(); - expect(mockInitialClientWrapper).toBe(returnConn); - verify(mockPluginService.connect(anything(), anything())).never(); - }); - - it("test_get_verified_connection__writer_host_address_equals_cluster_inet_address", async () => { - const target: StaleDnsHelper = spy(new StaleDnsHelper(instance(mockPluginService))); - const targetInstance = instance(target); - when(mockPluginService.getHosts()).thenReturn(instanceHostList); - when(mockPluginService.getAllHosts()).thenReturn(instanceHostList); - const mockHostListProviderServiceInstance = instance(mockHostListProviderService); - - const firstCall = { address: "5.5.5.5", family: 0 }; - const secondCall = { address: "5.5.5.5", family: 0 }; - - when(target.lookupResult(anything())).thenResolve(firstCall, secondCall); - - const returnConn = await targetInstance.getVerifiedConnection( - writerCluster.host, - true, - mockHostListProviderServiceInstance, - props, - mockConnectFunc - ); - - expect(mockConnectFunc).toHaveBeenCalled(); - expect(mockInitialClientWrapper).toBe(returnConn); - verify(mockPluginService.connect(anything(), anything())).never(); - }); - - it("test_get_verified_connection__writer_host_address_not_equals_cluster_inet_address", async () => { - const target: StaleDnsHelper = spy(new StaleDnsHelper(instance(mockPluginService))); - const targetInstance = instance(target); - - when(mockPluginService.getHosts()).thenReturn(clusterHostList); - when(mockPluginService.getAllHosts()).thenReturn(clusterHostList); - const mockHostListProviderServiceInstance = instance(mockHostListProviderService); - targetInstance["writerHostInfo"] = writerCluster; - - const firstCall = { address: "5.5.5.5", family: 0 }; - const secondCall = { address: "8.8.8.8", family: 0 }; - - when(target.lookupResult(anything())).thenResolve(firstCall, secondCall); - - const returnConn = await targetInstance.getVerifiedConnection( - writerCluster.host, - false, - mockHostListProviderServiceInstance, - props, - mockConnectFunc - ); - - expect(mockInitialConn.targetClient).not.toBe(returnConn); - expect(mockConnectFunc).toHaveBeenCalled(); - verify(mockPluginService.connect(anything(), anything())).once(); - }); - - it("test_get_verified_connection__initial_connection_writer_host_address_not_equals_cluster_inet_address", async () => { - const target: StaleDnsHelper = spy(new StaleDnsHelper(instance(mockPluginService))); - const targetInstance = instance(target); - - when(mockPluginService.getHosts()).thenReturn(clusterHostList); - when(mockPluginService.getAllHosts()).thenReturn(clusterHostList); - const mockHostListProviderServiceInstance = instance(mockHostListProviderService); - targetInstance["writerHostInfo"] = writerCluster; - when(mockHostListProviderService.getInitialConnectionHostInfo()).thenReturn(writerCluster); - - const firstCall = { address: "5.5.5.5", family: 0 }; - const secondCall = { address: "8.8.8.8", family: 0 }; - - when(target.lookupResult(anything())).thenResolve(firstCall, secondCall); - - const returnConn = await targetInstance.getVerifiedConnection( - writerCluster.host, - true, - mockHostListProviderServiceInstance, - props, - mockConnectFunc - ); - - verify(mockPluginService.connect(anything(), anything())).once(); - expect(targetInstance["writerHostInfo"]).toBe(mockHostListProviderServiceInstance.getInitialConnectionHostInfo()); - expect(mockInitialConn.targetClient).not.toBe(returnConn); - }); - it("test_notify_host_list_changed", () => { const target: StaleDnsHelper = spy(new StaleDnsHelper(instance(mockPluginService))); const targetInstance = instance(target); diff --git a/tests/unit/topology_utils.test.ts b/tests/unit/topology_utils.test.ts index a757c8a6..b3e927c0 100644 --- a/tests/unit/topology_utils.test.ts +++ b/tests/unit/topology_utils.test.ts @@ -14,9 +14,10 @@ limitations under the License. */ -import { TopologyQueryResult, TopologyUtils } from "../../common/lib/host_list_provider/topology_utils"; +import { TopologyQueryResult } from "../../common/lib/host_list_provider/topology_utils"; +import { AuroraTopologyUtils } from "../../common/lib/host_list_provider/aurora_topology_utils"; import { anything, instance, mock, reset, when } from "ts-mockito"; -import { HostInfo, HostInfoBuilder } from "../../common/lib"; +import { HostInfo, HostInfoBuilder, PluginManager } from "../../common/lib"; import { SimpleHostAvailabilityStrategy } from "../../common/lib/host_availability/simple_host_availability_strategy"; import { AuroraPgDatabaseDialect } from "../../pg/lib/dialect/aurora_pg_database_dialect"; import { ClientWrapper } from "../../common/lib/client_wrapper"; @@ -43,8 +44,8 @@ function createHost(config: any): HostInfo { return info.build(); } -function getTopologyUtils(): TopologyUtils { - return new TopologyUtils(instance(mockDialect), hostInfoBuilder); +function getTopologyUtils(): AuroraTopologyUtils { + return new AuroraTopologyUtils(instance(mockDialect), hostInfoBuilder); } describe("testTopologyUtils", () => { @@ -54,9 +55,13 @@ describe("testTopologyUtils", () => { reset(mockNonTopologyDialect); }); + afterEach(async () => { + await PluginManager.releaseResources(); + }); + it("testQueryForTopology_withNonTopologyAwareDialect_throwsError", async () => { const hostInfoBuilder = new HostInfoBuilder({ hostAvailabilityStrategy: new SimpleHostAvailabilityStrategy() }); - const topologyUtils = new TopologyUtils(instance(mockNonTopologyDialect) as any, hostInfoBuilder); + const topologyUtils = new AuroraTopologyUtils(instance(mockNonTopologyDialect) as any, hostInfoBuilder); const initialHost = createHost({ host: "initial-host",