diff --git a/packages/network-controller/CHANGELOG.md b/packages/network-controller/CHANGELOG.md index 2946597ef25..588331acfda 100644 --- a/packages/network-controller/CHANGELOG.md +++ b/packages/network-controller/CHANGELOG.md @@ -7,8 +7,22 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +### Added + +- Add `NetworkController:rpcEndpointChainAvailable` messenger event ([#7166](https://github.com/MetaMask/core/pull/7166)) + - This is a counterpart to the (new) `NetworkController:rpcEndpointChainUnavailable` and `NetworkController:rpcEndpointChainDegraded` events, but is published when a successful request to an endpoint within a chain of endpoints is made either initially or following a previously established degraded or unavailable status. + ### Changed +- **BREAKING:** Split up and update payload data for `NetworkController:rpcEndpointDegraded` and `NetworkController:rpcEndpointUnavailable` ([#7166](https://github.com/MetaMask/core/pull/7166)) + - `NetworkController:rpcEndpointDegraded` and `NetworkController:rpcEndpointUnavailable` still exist and retain the same behavior as before. + - New events are `NetworkController:rpcEndpointChainDegraded` and `NetworkController:rpcEndpointChainUnavailable`, and are designed to represent an entire chain of endpoints. They are also guaranteed to not be published multiple times in a row. In particular, `NetworkController:rpcEndpointUnavailable` is published only after trying all of the endpoints for a chain and when the underlying circuit for the last endpoint breaks, not as each primary's or failover's circuit breaks. + - The event payloads for all events have been changed as well: `failoverEndpointUrl` has been renamed to `endpointUrl`, and `endpointUrl` has been renamed to `primaryEndpointUrl`. In addition, `networkClientId` has been added to the payload. +- **BREAKING:** Rename and update payload data for `NetworkController:rpcEndpointRequestRetried` ([#7166](https://github.com/MetaMask/core/pull/7166)) + - This event is now called `NetworkController:rpcEndpointRetried`. + - The event payload has been changed as well: `failoverEndpointUrl` has been removed, and `primaryEndpointUrl` has been added. In addition, `networkClientId` and `attempt` have been added to the payload. +- **BREAKING:** Update `AbstractRpcService`/`RpcServiceRequestable` to remove `{ isolated: true }` from the `onBreak` event data type ([#7166](https://github.com/MetaMask/core/pull/7166)) + - This represented the error produced when `isolate` is called on a Cockatiel circuit breaker policy. This never happens for our service (we use `isolate` internally, but this error is suppressed and cannot trigger `onBreak`) - Move peer dependencies for controller and service packages to direct dependencies ([#7209](https://github.com/MetaMask/core/pull/7209)) - The dependencies moved are: - `@metamask/error-reporting-service` (^3.0.0) diff --git a/packages/network-controller/package.json b/packages/network-controller/package.json index 4c1e0e86d6c..a002870cd01 100644 --- a/packages/network-controller/package.json +++ b/packages/network-controller/package.json @@ -78,6 +78,7 @@ "@types/jest-when": "^2.7.3", "@types/lodash": "^4.14.191", "@types/node-fetch": "^2.6.12", + "cockatiel": "^3.1.2", "deep-freeze-strict": "^1.1.1", "deepmerge": "^4.2.2", "jest": "^27.5.1", diff --git a/packages/network-controller/src/NetworkController.ts b/packages/network-controller/src/NetworkController.ts index 2bb04f387ad..0b721924031 100644 --- a/packages/network-controller/src/NetworkController.ts +++ b/packages/network-controller/src/NetworkController.ts @@ -443,9 +443,48 @@ export type NetworkControllerNetworkRemovedEvent = { }; /** - * `rpcEndpointUnavailable` is published after an attempt to make a request to - * an RPC endpoint fails too many times in a row (because of a connection error - * or an unusable response). + * `NetworkController:rpcEndpointChainUnavailable` is published when, after + * trying all endpoints in an endpoint chain, the last failover reaches a + * maximum number of consecutive 5xx responses, breaking the underlying circuit. + * + * In other words, this event will not be published if a failover is available, + * even if the primary is not. + * + * @param payload - The event payload. + * @param payload.chainId - The target network's chain ID. + * @param payload.error - The last error produced by the last failover in the + * endpoint chain. + * @param payload.networkClientId - The target network's client ID. + * @param payload.primaryEndpointUrl - The endpoint chain's primary URL. + */ +export type NetworkControllerRpcEndpointChainUnavailableEvent = { + type: 'NetworkController:rpcEndpointChainUnavailable'; + payload: [ + { + chainId: Hex; + error: unknown; + networkClientId: NetworkClientId; + primaryEndpointUrl: string; + }, + ]; +}; + +/** + * `NetworkController:rpcEndpointUnavailable` is published when any + * endpoint in an endpoint chain reaches a maximum number of consecutive 5xx + * responses, breaking the underlying circuit. + * + * In other words, this event will be published if a primary is not available, + * even if a failover is. + * + * @param payload - The event payload. + * @param payload.chainId - The target network's chain ID. + * @param payload.endpointUrl - The URL of the endpoint which reached the + * maximum number of consecutive 5xx responses. You can compare this to + * `primaryEndpointUrl` to know whether it was a failover or a primary. + * @param payload.error - The last error produced by the endpoint. + * @param payload.networkClientId - The target network's client ID. + * @param payload.primaryEndpointUrl - The endpoint chain's primary URL. */ export type NetworkControllerRpcEndpointUnavailableEvent = { type: 'NetworkController:rpcEndpointUnavailable'; @@ -453,15 +492,70 @@ export type NetworkControllerRpcEndpointUnavailableEvent = { { chainId: Hex; endpointUrl: string; - failoverEndpointUrl?: string; error: unknown; + networkClientId: NetworkClientId; + primaryEndpointUrl: string; }, ]; }; /** - * `rpcEndpointDegraded` is published after a request to an RPC endpoint - * responds successfully but takes too long. + * `NetworkController:rpcEndpointChainDegraded` is published for any of the + * endpoints in an endpoint chain when one of the following two conditions hold + * (and the chain is not already in a degraded state): + * + * 1. A successful (2xx) request, even after being retried, cannot be made to + * the endpoint. + * 2. A successful (2xx) request can be made to the endpoint, but it takes + * longer than expected to complete. + * + * Note that this event will be published even if there are local connectivity + * issues which prevent requests from being initiated. This is intentional. + * + * @param payload - The event payload. + * @param payload.chainId - The target network's chain ID. + * @param payload.endpointUrl - The URL of the endpoint for which requests + * failed or were slow to complete. + * @param payload.error - The last error produced by the endpoint (or + * `undefined` if the request was slow). + * @param payload.networkClientId - The target network's client ID. + * @param payload.primaryEndpointUrl - The endpoint chain's primary URL. + */ +export type NetworkControllerRpcEndpointChainDegradedEvent = { + type: 'NetworkController:rpcEndpointChainDegraded'; + payload: [ + { + chainId: Hex; + endpointUrl: string; + error: unknown; + networkClientId: NetworkClientId; + primaryEndpointUrl: string; + }, + ]; +}; + +/** + * + * `NetworkController:rpcEndpointDegraded` is published for any of the endpoints + * in an endpoint chain when: + * + * 1. A successful (2xx) request, even after being retried, cannot be made to + * the endpoint. + * 2. A successful (2xx) request can be made to the endpoint, but it takes + * longer than expected to complete. + * + * Note that this event will be published even if there are local connectivity + * issues which prevent requests from being initiated. This is intentional. + * + * @param payload - The event payload. + * @param payload.chainId - The target network's chain ID. + * @param payload.endpointUrl - The URL of the endpoint for which requests + * failed or were slow to complete. You can compare this to `primaryEndpointUrl` + * to know whether it was a failover or a primary. + * @param payload.error - The last error produced by the endpoint (or + * `undefined` if the request was slow). + * @param payload.networkClientId - The target network's client ID. + * @param payload.primaryEndpointUrl - The endpoint chain's primary URL. */ export type NetworkControllerRpcEndpointDegradedEvent = { type: 'NetworkController:rpcEndpointDegraded'; @@ -470,20 +564,64 @@ export type NetworkControllerRpcEndpointDegradedEvent = { chainId: Hex; endpointUrl: string; error: unknown; + networkClientId: NetworkClientId; + primaryEndpointUrl: string; }, ]; }; /** - * `rpcEndpointRequestRetried` is published after a request to an RPC endpoint - * is retried following a connection error or an unusable response. + * `NetworkController:rpcEndpointChainAvailable` is published in one of two + * cases: + * + * 1. The first time that a 2xx request is made to any of the endpoints in an + * endpoint chain. + * 2. When requests to any of the endpoints previously failed (placing the + * endpoint in a degraded or unavailable status), but are now succeeding again. + * + * @param payload - The event payload. + * @param payload.chainId - The target network's chain ID. + * @param payload.endpointUrl - The URL of the endpoint which meets either of + * the above conditions. + * @param payload.networkClientId - The target network's client ID. + * @param payload.primaryEndpointUrl - The endpoint chain's primary URL. */ -export type NetworkControllerRpcEndpointRequestRetriedEvent = { - type: 'NetworkController:rpcEndpointRequestRetried'; +export type NetworkControllerRpcEndpointChainAvailableEvent = { + type: 'NetworkController:rpcEndpointChainAvailable'; payload: [ { + chainId: Hex; endpointUrl: string; + networkClientId: NetworkClientId; + primaryEndpointUrl: string; + }, + ]; +}; + +/** + * `NetworkController:rpcEndpointRetried` is published before a request to any + * endpoint in an endpoint chain is retried. + * + * This is mainly useful for tests. + * + * @param payload - The event payload. + * @param payload.attempt - The current attempt counter for the endpoint + * (starting from 0). + * @param payload.chainId - The target network's chain ID. + * @param payload.endpointUrl - The URL of the endpoint being retried. + * @param payload.networkClientId - The target network's client ID. + * @param payload.primaryEndpointUrl - The endpoint chain's primary URL. + * @see {@link RpcService} for the list of retriable errors. + */ +export type NetworkControllerRpcEndpointRetriedEvent = { + type: 'NetworkController:rpcEndpointRetried'; + payload: [ + { attempt: number; + chainId: Hex; + endpointUrl: string; + networkClientId: NetworkClientId; + primaryEndpointUrl: string; }, ]; }; @@ -496,9 +634,12 @@ export type NetworkControllerEvents = | NetworkControllerInfuraIsUnblockedEvent | NetworkControllerNetworkAddedEvent | NetworkControllerNetworkRemovedEvent + | NetworkControllerRpcEndpointChainUnavailableEvent | NetworkControllerRpcEndpointUnavailableEvent + | NetworkControllerRpcEndpointChainDegradedEvent | NetworkControllerRpcEndpointDegradedEvent - | NetworkControllerRpcEndpointRequestRetriedEvent; + | NetworkControllerRpcEndpointChainAvailableEvent + | NetworkControllerRpcEndpointRetriedEvent; /** * All events that {@link NetworkController} calls internally. @@ -2800,6 +2941,7 @@ export class NetworkController extends BaseController< autoManagedNetworkClientRegistry[NetworkClientType.Infura][ addedRpcEndpoint.networkClientId ] = createAutoManagedNetworkClient({ + networkClientId: addedRpcEndpoint.networkClientId, networkClientConfiguration: { type: NetworkClientType.Infura, chainId: networkFields.chainId, @@ -2818,6 +2960,7 @@ export class NetworkController extends BaseController< autoManagedNetworkClientRegistry[NetworkClientType.Custom][ addedRpcEndpoint.networkClientId ] = createAutoManagedNetworkClient({ + networkClientId: addedRpcEndpoint.networkClientId, networkClientConfiguration: { type: NetworkClientType.Custom, chainId: networkFields.chainId, @@ -2980,6 +3123,7 @@ export class NetworkController extends BaseController< return [ rpcEndpoint.networkClientId, createAutoManagedNetworkClient({ + networkClientId: rpcEndpoint.networkClientId, networkClientConfiguration: { type: NetworkClientType.Infura, network: infuraNetworkName, @@ -2999,6 +3143,7 @@ export class NetworkController extends BaseController< return [ rpcEndpoint.networkClientId, createAutoManagedNetworkClient({ + networkClientId: rpcEndpoint.networkClientId, networkClientConfiguration: { type: NetworkClientType.Custom, chainId: networkConfiguration.chainId, diff --git a/packages/network-controller/src/create-auto-managed-network-client.test.ts b/packages/network-controller/src/create-auto-managed-network-client.test.ts index 3b49ccda1f5..c30208ce167 100644 --- a/packages/network-controller/src/create-auto-managed-network-client.test.ts +++ b/packages/network-controller/src/create-auto-managed-network-client.test.ts @@ -35,6 +35,7 @@ describe('createAutoManagedNetworkClient', () => { describe(`given configuration for a ${networkClientConfiguration.type} network client`, () => { it('allows the network client configuration to be accessed', () => { const { configuration } = createAutoManagedNetworkClient({ + networkClientId: 'some-network-client-id', networkClientConfiguration, getRpcServiceOptions: () => ({ fetch, @@ -51,6 +52,7 @@ describe('createAutoManagedNetworkClient', () => { // If unexpected requests occurred, then Nock would throw expect(() => { createAutoManagedNetworkClient({ + networkClientId: 'some-network-client-id', networkClientConfiguration, getRpcServiceOptions: () => ({ fetch, @@ -64,6 +66,7 @@ describe('createAutoManagedNetworkClient', () => { it('returns a provider proxy that has the same interface as a provider', () => { const { provider } = createAutoManagedNetworkClient({ + networkClientId: 'some-network-client-id', networkClientConfiguration, getRpcServiceOptions: () => ({ fetch, @@ -97,6 +100,7 @@ describe('createAutoManagedNetworkClient', () => { }); const { provider } = createAutoManagedNetworkClient({ + networkClientId: 'some-network-client-id', networkClientConfiguration, getRpcServiceOptions: () => ({ fetch, @@ -145,6 +149,7 @@ describe('createAutoManagedNetworkClient', () => { const messenger = buildNetworkControllerMessenger(); const { provider } = createAutoManagedNetworkClient({ + networkClientId: 'some-network-client-id', networkClientConfiguration, getRpcServiceOptions, getBlockTrackerOptions, @@ -166,6 +171,7 @@ describe('createAutoManagedNetworkClient', () => { }); expect(createNetworkClientMock).toHaveBeenCalledTimes(1); expect(createNetworkClientMock).toHaveBeenCalledWith({ + id: 'some-network-client-id', configuration: networkClientConfiguration, getRpcServiceOptions, getBlockTrackerOptions, @@ -204,6 +210,7 @@ describe('createAutoManagedNetworkClient', () => { const messenger = buildNetworkControllerMessenger(); const autoManagedNetworkClient = createAutoManagedNetworkClient({ + networkClientId: 'some-network-client-id', networkClientConfiguration, getRpcServiceOptions, getBlockTrackerOptions, @@ -227,6 +234,7 @@ describe('createAutoManagedNetworkClient', () => { }); expect(createNetworkClientMock).toHaveBeenNthCalledWith(1, { + id: 'some-network-client-id', configuration: networkClientConfiguration, getRpcServiceOptions, getBlockTrackerOptions, @@ -234,6 +242,7 @@ describe('createAutoManagedNetworkClient', () => { isRpcFailoverEnabled: false, }); expect(createNetworkClientMock).toHaveBeenNthCalledWith(2, { + id: 'some-network-client-id', configuration: networkClientConfiguration, getRpcServiceOptions, getBlockTrackerOptions, @@ -272,6 +281,7 @@ describe('createAutoManagedNetworkClient', () => { const messenger = buildNetworkControllerMessenger(); const autoManagedNetworkClient = createAutoManagedNetworkClient({ + networkClientId: 'some-network-client-id', networkClientConfiguration, getRpcServiceOptions, getBlockTrackerOptions, @@ -295,6 +305,7 @@ describe('createAutoManagedNetworkClient', () => { }); expect(createNetworkClientMock).toHaveBeenNthCalledWith(1, { + id: 'some-network-client-id', configuration: networkClientConfiguration, getRpcServiceOptions, getBlockTrackerOptions, @@ -302,6 +313,7 @@ describe('createAutoManagedNetworkClient', () => { isRpcFailoverEnabled: true, }); expect(createNetworkClientMock).toHaveBeenNthCalledWith(2, { + id: 'some-network-client-id', configuration: networkClientConfiguration, getRpcServiceOptions, getBlockTrackerOptions, @@ -313,6 +325,7 @@ describe('createAutoManagedNetworkClient', () => { it('returns a block tracker proxy that has the same interface as a block tracker', () => { const { blockTracker } = createAutoManagedNetworkClient({ + networkClientId: 'some-network-client-id', networkClientConfiguration, getRpcServiceOptions: () => ({ fetch, @@ -372,6 +385,7 @@ describe('createAutoManagedNetworkClient', () => { }); const { blockTracker } = createAutoManagedNetworkClient({ + networkClientId: 'some-network-client-id', networkClientConfiguration, getRpcServiceOptions: () => ({ fetch, @@ -441,6 +455,7 @@ describe('createAutoManagedNetworkClient', () => { const messenger = buildNetworkControllerMessenger(); const { blockTracker } = createAutoManagedNetworkClient({ + networkClientId: 'some-network-client-id', networkClientConfiguration, getRpcServiceOptions, getBlockTrackerOptions, @@ -458,6 +473,7 @@ describe('createAutoManagedNetworkClient', () => { await blockTracker.checkForLatestBlock(); expect(createNetworkClientMock).toHaveBeenCalledTimes(1); expect(createNetworkClientMock).toHaveBeenCalledWith({ + id: 'some-network-client-id', configuration: networkClientConfiguration, getRpcServiceOptions, getBlockTrackerOptions, @@ -496,6 +512,7 @@ describe('createAutoManagedNetworkClient', () => { const messenger = buildNetworkControllerMessenger(); const autoManagedNetworkClient = createAutoManagedNetworkClient({ + networkClientId: 'some-network-client-id', networkClientConfiguration, getRpcServiceOptions, getBlockTrackerOptions, @@ -513,6 +530,7 @@ describe('createAutoManagedNetworkClient', () => { }); expect(createNetworkClientMock).toHaveBeenNthCalledWith(1, { + id: 'some-network-client-id', configuration: networkClientConfiguration, getRpcServiceOptions, getBlockTrackerOptions, @@ -520,6 +538,7 @@ describe('createAutoManagedNetworkClient', () => { isRpcFailoverEnabled: false, }); expect(createNetworkClientMock).toHaveBeenNthCalledWith(2, { + id: 'some-network-client-id', configuration: networkClientConfiguration, getRpcServiceOptions, getBlockTrackerOptions, @@ -558,6 +577,7 @@ describe('createAutoManagedNetworkClient', () => { const messenger = buildNetworkControllerMessenger(); const autoManagedNetworkClient = createAutoManagedNetworkClient({ + networkClientId: 'some-network-client-id', networkClientConfiguration, getRpcServiceOptions, getBlockTrackerOptions, @@ -575,6 +595,7 @@ describe('createAutoManagedNetworkClient', () => { }); expect(createNetworkClientMock).toHaveBeenNthCalledWith(1, { + id: 'some-network-client-id', configuration: networkClientConfiguration, getRpcServiceOptions, getBlockTrackerOptions, @@ -582,6 +603,7 @@ describe('createAutoManagedNetworkClient', () => { isRpcFailoverEnabled: true, }); expect(createNetworkClientMock).toHaveBeenNthCalledWith(2, { + id: 'some-network-client-id', configuration: networkClientConfiguration, getRpcServiceOptions, getBlockTrackerOptions, @@ -608,6 +630,7 @@ describe('createAutoManagedNetworkClient', () => { ], }); const { blockTracker, destroy } = createAutoManagedNetworkClient({ + networkClientId: 'some-network-client-id', networkClientConfiguration, getRpcServiceOptions: () => ({ fetch, diff --git a/packages/network-controller/src/create-auto-managed-network-client.ts b/packages/network-controller/src/create-auto-managed-network-client.ts index 5ab700a1737..3bdded89d83 100644 --- a/packages/network-controller/src/create-auto-managed-network-client.ts +++ b/packages/network-controller/src/create-auto-managed-network-client.ts @@ -3,7 +3,10 @@ import type { Logger } from 'loglevel'; import type { NetworkClient } from './create-network-client'; import { createNetworkClient } from './create-network-client'; -import type { NetworkControllerMessenger } from './NetworkController'; +import type { + NetworkClientId, + NetworkControllerMessenger, +} from './NetworkController'; import type { RpcServiceOptions } from './rpc-service/rpc-service'; import type { BlockTracker, @@ -65,6 +68,8 @@ const UNINITIALIZED_TARGET = { __UNINITIALIZED__: true }; * then cached for subsequent usages. * * @param args - The arguments. + * @param args.networkClientId - The ID that will be assigned to the new network + * client in the registry. * @param args.networkClientConfiguration - The configuration object that will be * used to instantiate the network client when it is needed. * @param args.getRpcServiceOptions - Factory for constructing RPC service @@ -81,6 +86,7 @@ const UNINITIALIZED_TARGET = { __UNINITIALIZED__: true }; export function createAutoManagedNetworkClient< Configuration extends NetworkClientConfiguration, >({ + networkClientId, networkClientConfiguration, getRpcServiceOptions, getBlockTrackerOptions = () => ({}), @@ -88,6 +94,7 @@ export function createAutoManagedNetworkClient< isRpcFailoverEnabled: givenIsRpcFailoverEnabled, logger, }: { + networkClientId: NetworkClientId; networkClientConfiguration: Configuration; getRpcServiceOptions: ( rpcEndpointUrl: string, @@ -104,6 +111,7 @@ export function createAutoManagedNetworkClient< const ensureNetworkClientCreated = (): NetworkClient => { networkClient ??= createNetworkClient({ + id: networkClientId, configuration: networkClientConfiguration, getRpcServiceOptions, getBlockTrackerOptions, diff --git a/packages/network-controller/src/create-network-client-tests/rpc-endpoint-events.test.ts b/packages/network-controller/src/create-network-client-tests/rpc-endpoint-events.test.ts new file mode 100644 index 00000000000..d843a3476cf --- /dev/null +++ b/packages/network-controller/src/create-network-client-tests/rpc-endpoint-events.test.ts @@ -0,0 +1,1256 @@ +import { + ConstantBackoff, + DEFAULT_DEGRADED_THRESHOLD, + HttpError, +} from '@metamask/controller-utils'; +import { errorCodes } from '@metamask/rpc-errors'; + +import { buildRootMessenger } from '../../tests/helpers'; +import { + withMockedCommunications, + withNetworkClient, +} from '../../tests/network-client/helpers'; +import { DEFAULT_MAX_CONSECUTIVE_FAILURES } from '../rpc-service/rpc-service'; +import { NetworkClientType } from '../types'; + +describe('createNetworkClient - RPC endpoint events', () => { + for (const networkClientType of Object.values(NetworkClientType)) { + describe(`${networkClientType}`, () => { + const blockNumber = '0x100'; + const backoffDuration = 100; + + describe('with RPC failover', () => { + it('publishes the NetworkController:rpcEndpointChainUnavailable event only when the max number of consecutive request failures is reached for all of the endpoints in a chain of endpoints', async () => { + const failoverEndpointUrl = 'https://failover.endpoint/'; + const request = { + method: 'eth_gasPrice', + params: [], + }; + const expectedError = createResourceUnavailableError(503); + const expectedUnavailableError = new HttpError(503); + + const messenger = buildRootMessenger(); + const rpcEndpointChainUnavailableEventHandler = jest.fn(); + messenger.subscribe( + 'NetworkController:rpcEndpointChainUnavailable', + rpcEndpointChainUnavailableEventHandler, + ); + + await withNetworkClient( + { + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + providerType: networkClientType, + isRpcFailoverEnabled: true, + failoverRpcUrls: [failoverEndpointUrl], + messenger, + getRpcServiceOptions: () => ({ + fetch, + btoa, + policyOptions: { + backoff: new ConstantBackoff(backoffDuration), + }, + }), + mockedEndpoints: [ + { providerType: networkClientType }, + { + providerType: 'custom', + customRpcUrl: failoverEndpointUrl, + }, + ], + }, + async ({ makeRpcCall, clock, chainId, rpcUrl, comms }) => { + if (!comms) { + throw new Error( + 'comms should be defined when mockedEndpoints is provided', + ); + } + const [primaryComms, failoverComms] = comms; + + // The first time a block-cacheable request is made, the + // latest block number is retrieved through the block + // tracker first. + primaryComms.mockRpcCall({ + request: { + method: 'eth_blockNumber', + params: [], + }, + times: DEFAULT_MAX_CONSECUTIVE_FAILURES, + response: { + httpStatus: 503, + }, + }); + failoverComms.mockRpcCall({ + request: { + method: 'eth_blockNumber', + params: [], + }, + times: DEFAULT_MAX_CONSECUTIVE_FAILURES, + response: { + httpStatus: 503, + }, + }); + + messenger.subscribe( + 'NetworkController:rpcEndpointRetried', + () => { + // Ensure that we advance to the next RPC request + // retry, not the next block tracker request. + clock.tick(backoffDuration); + }, + ); + + // Hit the primary and exceed the max number of retries + await expect(makeRpcCall(request)).rejects.toThrow(expectedError); + // Hit the primary and exceed the max number of retries + await expect(makeRpcCall(request)).rejects.toThrow(expectedError); + // Hit the primary and exceed the max number of retries, + // breaking the circuit; then hit the failover and exceed + // the max of retries + await expect(makeRpcCall(request)).rejects.toThrow(expectedError); + // Hit the failover and exceed the max number of retries + await expect(makeRpcCall(request)).rejects.toThrow(expectedError); + // Hit the failover and exceed the max number of retries, + // breaking the circuit + await expect(makeRpcCall(request)).rejects.toThrow(expectedError); + + expect( + rpcEndpointChainUnavailableEventHandler, + ).toHaveBeenCalledTimes(1); + expect( + rpcEndpointChainUnavailableEventHandler, + ).toHaveBeenCalledWith({ + chainId, + error: expectedUnavailableError, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + primaryEndpointUrl: rpcUrl, + }); + }, + ); + }); + + it('publishes the NetworkController:rpcEndpointUnavailable event each time the max number of consecutive request failures is reached for any of the endpoints in a chain of endpoints', async () => { + const failoverEndpointUrl = 'https://failover.endpoint/'; + const request = { + method: 'eth_gasPrice', + params: [], + }; + const expectedError = createResourceUnavailableError(503); + const expectedUnavailableError = new HttpError(503); + + const messenger = buildRootMessenger(); + const rpcEndpointUnavailableEventHandler = jest.fn(); + messenger.subscribe( + 'NetworkController:rpcEndpointUnavailable', + rpcEndpointUnavailableEventHandler, + ); + + await withNetworkClient( + { + providerType: networkClientType, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + isRpcFailoverEnabled: true, + failoverRpcUrls: [failoverEndpointUrl], + messenger, + getRpcServiceOptions: () => ({ + fetch, + btoa, + policyOptions: { + backoff: new ConstantBackoff(backoffDuration), + }, + }), + mockedEndpoints: [ + { providerType: networkClientType }, + { + providerType: 'custom', + customRpcUrl: failoverEndpointUrl, + }, + ], + }, + async ({ makeRpcCall, clock, chainId, rpcUrl, comms }) => { + if (!comms) { + throw new Error( + 'comms should be defined when mockedEndpoints is provided', + ); + } + const [primaryComms, failoverComms] = comms; + + // The first time a block-cacheable request is made, the + // latest block number is retrieved through the block + // tracker first. + primaryComms.mockRpcCall({ + request: { + method: 'eth_blockNumber', + params: [], + }, + times: DEFAULT_MAX_CONSECUTIVE_FAILURES, + response: { + httpStatus: 503, + }, + }); + failoverComms.mockRpcCall({ + request: { + method: 'eth_blockNumber', + params: [], + }, + times: DEFAULT_MAX_CONSECUTIVE_FAILURES, + response: { + httpStatus: 503, + }, + }); + + messenger.subscribe( + 'NetworkController:rpcEndpointRetried', + () => { + // Ensure that we advance to the next RPC request + // retry, not the next block tracker request. + clock.tick(backoffDuration); + }, + ); + + // Hit the primary and exceed the max number of retries + await expect(makeRpcCall(request)).rejects.toThrow(expectedError); + // Hit the primary and exceed the max number of retries + await expect(makeRpcCall(request)).rejects.toThrow(expectedError); + // Hit the primary and exceed the max number of retries, + // breaking the circuit; then hit the failover and exceed + // the max of retries + await expect(makeRpcCall(request)).rejects.toThrow(expectedError); + // Hit the failover and exceed the max number of retries + await expect(makeRpcCall(request)).rejects.toThrow(expectedError); + // Hit the failover and exceed the max number of retries, + // breaking the circuit + await expect(makeRpcCall(request)).rejects.toThrow(expectedError); + + expect(rpcEndpointUnavailableEventHandler).toHaveBeenCalledTimes( + 2, + ); + expect(rpcEndpointUnavailableEventHandler).toHaveBeenCalledWith({ + chainId, + endpointUrl: rpcUrl, + error: expectedUnavailableError, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + primaryEndpointUrl: rpcUrl, + }); + expect(rpcEndpointUnavailableEventHandler).toHaveBeenCalledWith({ + chainId, + endpointUrl: failoverEndpointUrl, + error: expectedUnavailableError, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + primaryEndpointUrl: rpcUrl, + }); + }, + ); + }); + + it('does not publish the NetworkController:rpcEndpointChainDegraded event again if the max number of retries is reached in making requests to a failover endpoint', async () => { + const failoverEndpointUrl = 'https://failover.endpoint/'; + const request = { + method: 'eth_gasPrice', + params: [], + }; + const expectedError = createResourceUnavailableError(503); + const expectedDegradedError = new HttpError(503); + + const messenger = buildRootMessenger(); + const rpcEndpointChainDegradedEventHandler = jest.fn(); + messenger.subscribe( + 'NetworkController:rpcEndpointChainDegraded', + rpcEndpointChainDegradedEventHandler, + ); + + await withNetworkClient( + { + providerType: networkClientType, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + isRpcFailoverEnabled: true, + failoverRpcUrls: [failoverEndpointUrl], + messenger, + getRpcServiceOptions: () => ({ + fetch, + btoa, + policyOptions: { + backoff: new ConstantBackoff(backoffDuration), + }, + }), + mockedEndpoints: [ + { providerType: networkClientType }, + { + providerType: 'custom', + customRpcUrl: failoverEndpointUrl, + }, + ], + }, + async ({ makeRpcCall, clock, chainId, rpcUrl, comms }) => { + if (!comms) { + throw new Error( + 'comms should be defined when mockedEndpoints is provided', + ); + } + const [primaryComms, failoverComms] = comms; + + // The first time a block-cacheable request is made, the + // latest block number is retrieved through the block + // tracker first. + primaryComms.mockRpcCall({ + request: { + method: 'eth_blockNumber', + params: [], + }, + times: DEFAULT_MAX_CONSECUTIVE_FAILURES, + response: { + httpStatus: 503, + }, + }); + failoverComms.mockRpcCall({ + request: { + method: 'eth_blockNumber', + params: [], + }, + times: 5, + response: { + httpStatus: 503, + }, + }); + + messenger.subscribe( + 'NetworkController:rpcEndpointRetried', + () => { + // Ensure that we advance to the next RPC request + // retry, not the next block tracker request. + clock.tick(backoffDuration); + }, + ); + + // Hit the primary and exceed the max number of retries + await expect(makeRpcCall(request)).rejects.toThrow(expectedError); + // Hit the primary and exceed the max number of retries + await expect(makeRpcCall(request)).rejects.toThrow(expectedError); + // Hit the primary and exceed the max number of retries, + // break the circuit; hit the failover and exceed the max + // number of retries + await expect(makeRpcCall(request)).rejects.toThrow(expectedError); + + expect( + rpcEndpointChainDegradedEventHandler, + ).toHaveBeenCalledTimes(1); + expect(rpcEndpointChainDegradedEventHandler).toHaveBeenCalledWith( + { + chainId, + endpointUrl: rpcUrl, + error: expectedDegradedError, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + primaryEndpointUrl: rpcUrl, + }, + ); + }, + ); + }); + + it('does not publish the NetworkController:rpcEndpointChainDegraded event again when the time to complete a request to a failover endpoint is too long', async () => { + const failoverEndpointUrl = 'https://failover.endpoint/'; + const request = { + method: 'eth_gasPrice', + params: [], + }; + const expectedError = createResourceUnavailableError(503); + const expectedDegradedError = new HttpError(503); + + const messenger = buildRootMessenger(); + const rpcEndpointChainDegradedEventHandler = jest.fn(); + messenger.subscribe( + 'NetworkController:rpcEndpointChainDegraded', + rpcEndpointChainDegradedEventHandler, + ); + + await withNetworkClient( + { + providerType: networkClientType, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + isRpcFailoverEnabled: true, + failoverRpcUrls: [failoverEndpointUrl], + messenger, + getRpcServiceOptions: () => ({ + fetch, + btoa, + policyOptions: { + backoff: new ConstantBackoff(backoffDuration), + }, + }), + mockedEndpoints: [ + { providerType: networkClientType }, + { + providerType: 'custom', + customRpcUrl: failoverEndpointUrl, + }, + ], + }, + async ({ makeRpcCall, clock, chainId, rpcUrl, comms }) => { + if (!comms) { + throw new Error( + 'comms should be defined when mockedEndpoints is provided', + ); + } + const [primaryComms, failoverComms] = comms; + + // The first time a block-cacheable request is made, the + // latest block number is retrieved through the block + // tracker first. + primaryComms.mockRpcCall({ + request: { + method: 'eth_blockNumber', + params: [], + }, + times: DEFAULT_MAX_CONSECUTIVE_FAILURES, + response: { + httpStatus: 503, + }, + }); + failoverComms.mockRpcCall({ + request: { + method: 'eth_blockNumber', + params: [], + }, + response: () => { + clock.tick(DEFAULT_DEGRADED_THRESHOLD + 1); + return { + result: '0x1', + }; + }, + }); + failoverComms.mockRpcCall({ + request, + response: () => { + clock.tick(DEFAULT_DEGRADED_THRESHOLD + 1); + return { + result: 'ok', + }; + }, + }); + + messenger.subscribe( + 'NetworkController:rpcEndpointRetried', + () => { + // Ensure that we advance to the next RPC request + // retry, not the next block tracker request. + clock.tick(backoffDuration); + }, + ); + + // Hit the primary and exceed the max number of retries + await expect(makeRpcCall(request)).rejects.toThrow(expectedError); + // Hit the primary and exceed the max number of retries + await expect(makeRpcCall(request)).rejects.toThrow(expectedError); + // Hit the primary and exceed the max number of retries, + // break the circuit; hit the failover + await makeRpcCall(request); + + expect( + rpcEndpointChainDegradedEventHandler, + ).toHaveBeenCalledTimes(1); + expect(rpcEndpointChainDegradedEventHandler).toHaveBeenCalledWith( + { + chainId, + endpointUrl: rpcUrl, + error: expectedDegradedError, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + primaryEndpointUrl: rpcUrl, + }, + ); + }, + ); + }); + + it('publishes the NetworkController:rpcEndpointDegraded event again if the max number of retries is reached in making requests to a failover endpoint', async () => { + const failoverEndpointUrl = 'https://failover.endpoint/'; + const request = { + method: 'eth_gasPrice', + params: [], + }; + const expectedError = createResourceUnavailableError(503); + const expectedDegradedError = new HttpError(503); + + const messenger = buildRootMessenger(); + const rpcEndpointDegradedEventHandler = jest.fn(); + messenger.subscribe( + 'NetworkController:rpcEndpointDegraded', + rpcEndpointDegradedEventHandler, + ); + + await withNetworkClient( + { + providerType: networkClientType, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + isRpcFailoverEnabled: true, + failoverRpcUrls: [failoverEndpointUrl], + messenger, + getRpcServiceOptions: () => ({ + fetch, + btoa, + policyOptions: { + backoff: new ConstantBackoff(backoffDuration), + }, + }), + mockedEndpoints: [ + { providerType: networkClientType }, + { + providerType: 'custom', + customRpcUrl: failoverEndpointUrl, + }, + ], + }, + async ({ makeRpcCall, clock, chainId, rpcUrl, comms }) => { + if (!comms) { + throw new Error( + 'comms should be defined when mockedEndpoints is provided', + ); + } + const [primaryComms, failoverComms] = comms; + + // The first time a block-cacheable request is made, the + // latest block number is retrieved through the block + // tracker first. + primaryComms.mockRpcCall({ + request: { + method: 'eth_blockNumber', + params: [], + }, + times: DEFAULT_MAX_CONSECUTIVE_FAILURES, + response: { + httpStatus: 503, + }, + }); + failoverComms.mockRpcCall({ + request: { + method: 'eth_blockNumber', + params: [], + }, + times: 5, + response: { + httpStatus: 503, + }, + }); + + messenger.subscribe( + 'NetworkController:rpcEndpointRetried', + () => { + // Ensure that we advance to the next RPC request + // retry, not the next block tracker request. + clock.tick(backoffDuration); + }, + ); + + // Hit the primary and exceed the max number of retries + await expect(makeRpcCall(request)).rejects.toThrow(expectedError); + // Hit the primary and exceed the max number of retries + await expect(makeRpcCall(request)).rejects.toThrow(expectedError); + // Hit the primary and exceed the max number of retries, + // break the circuit; hit the failover and exceed the max + // number of retries + await expect(makeRpcCall(request)).rejects.toThrow(expectedError); + + expect(rpcEndpointDegradedEventHandler).toHaveBeenCalledTimes(3); + expect(rpcEndpointDegradedEventHandler).toHaveBeenNthCalledWith( + 1, + { + chainId, + endpointUrl: rpcUrl, + error: expectedDegradedError, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + primaryEndpointUrl: rpcUrl, + }, + ); + expect(rpcEndpointDegradedEventHandler).toHaveBeenNthCalledWith( + 2, + { + chainId, + endpointUrl: rpcUrl, + error: expectedDegradedError, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + primaryEndpointUrl: rpcUrl, + }, + ); + expect(rpcEndpointDegradedEventHandler).toHaveBeenNthCalledWith( + 3, + { + chainId, + endpointUrl: failoverEndpointUrl, + error: expectedDegradedError, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + primaryEndpointUrl: rpcUrl, + }, + ); + }, + ); + }); + + it('publishes the NetworkController:rpcEndpointDegraded event again when the time to complete a request to a failover endpoint is too long', async () => { + const failoverEndpointUrl = 'https://failover.endpoint/'; + const request = { + method: 'eth_gasPrice', + params: [], + }; + const expectedError = createResourceUnavailableError(503); + const expectedDegradedError = new HttpError(503); + + const messenger = buildRootMessenger(); + const rpcEndpointDegradedEventHandler = jest.fn(); + messenger.subscribe( + 'NetworkController:rpcEndpointDegraded', + rpcEndpointDegradedEventHandler, + ); + + await withNetworkClient( + { + providerType: networkClientType, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + isRpcFailoverEnabled: true, + failoverRpcUrls: [failoverEndpointUrl], + messenger, + getRpcServiceOptions: () => ({ + fetch, + btoa, + policyOptions: { + backoff: new ConstantBackoff(backoffDuration), + }, + }), + mockedEndpoints: [ + { providerType: networkClientType }, + { + providerType: 'custom', + customRpcUrl: failoverEndpointUrl, + }, + ], + }, + async ({ makeRpcCall, clock, chainId, rpcUrl, comms }) => { + if (!comms) { + throw new Error( + 'comms should be defined when mockedEndpoints is provided', + ); + } + const [primaryComms, failoverComms] = comms; + + // The first time a block-cacheable request is made, the + // latest block number is retrieved through the block + // tracker first. + primaryComms.mockRpcCall({ + request: { + method: 'eth_blockNumber', + params: [], + }, + times: DEFAULT_MAX_CONSECUTIVE_FAILURES, + response: { + httpStatus: 503, + }, + }); + failoverComms.mockRpcCall({ + request: { + method: 'eth_blockNumber', + params: [], + }, + response: () => { + clock.tick(DEFAULT_DEGRADED_THRESHOLD + 1); + return { + result: '0x1', + }; + }, + }); + failoverComms.mockRpcCall({ + request, + response: () => { + clock.tick(DEFAULT_DEGRADED_THRESHOLD + 1); + return { + result: 'ok', + }; + }, + }); + + messenger.subscribe( + 'NetworkController:rpcEndpointRetried', + () => { + // Ensure that we advance to the next RPC request + // retry, not the next block tracker request. + clock.tick(backoffDuration); + }, + ); + + // Hit the primary and exceed the max number of retries + await expect(makeRpcCall(request)).rejects.toThrow(expectedError); + // Hit the primary and exceed the max number of retries + await expect(makeRpcCall(request)).rejects.toThrow(expectedError); + // Hit the primary and exceed the max number of retries, + // break the circuit; hit the failover + await makeRpcCall(request); + + expect(rpcEndpointDegradedEventHandler).toHaveBeenCalledTimes(4); + expect(rpcEndpointDegradedEventHandler).toHaveBeenNthCalledWith( + 1, + { + chainId, + endpointUrl: rpcUrl, + error: expectedDegradedError, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + primaryEndpointUrl: rpcUrl, + }, + ); + expect(rpcEndpointDegradedEventHandler).toHaveBeenNthCalledWith( + 2, + { + chainId, + endpointUrl: rpcUrl, + error: expectedDegradedError, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + primaryEndpointUrl: rpcUrl, + }, + ); + expect(rpcEndpointDegradedEventHandler).toHaveBeenNthCalledWith( + 3, + { + chainId, + endpointUrl: failoverEndpointUrl, + error: undefined, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + primaryEndpointUrl: rpcUrl, + }, + ); + expect(rpcEndpointDegradedEventHandler).toHaveBeenNthCalledWith( + 4, + { + chainId, + endpointUrl: failoverEndpointUrl, + error: undefined, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + primaryEndpointUrl: rpcUrl, + }, + ); + }, + ); + }); + + it('publishes the NetworkController:rpcEndpointChainAvailable event the first time a successful request to a failover endpoint is made', async () => { + const failoverEndpointUrl = 'https://failover.endpoint/'; + const request = { + method: 'eth_gasPrice', + params: [], + }; + const expectedError = createResourceUnavailableError(503); + + const messenger = buildRootMessenger(); + const rpcEndpointChainAvailableEventHandler = jest.fn(); + messenger.subscribe( + 'NetworkController:rpcEndpointChainAvailable', + rpcEndpointChainAvailableEventHandler, + ); + + await withNetworkClient( + { + providerType: networkClientType, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + isRpcFailoverEnabled: true, + failoverRpcUrls: [failoverEndpointUrl], + messenger, + getRpcServiceOptions: () => ({ + fetch, + btoa, + policyOptions: { + backoff: new ConstantBackoff(backoffDuration), + }, + }), + mockedEndpoints: [ + { providerType: networkClientType }, + { + providerType: 'custom', + customRpcUrl: failoverEndpointUrl, + }, + ], + }, + async ({ makeRpcCall, clock, chainId, rpcUrl, comms }) => { + if (!comms) { + throw new Error( + 'comms should be defined when mockedEndpoints is provided', + ); + } + const [primaryComms, failoverComms] = comms; + + // The first time a block-cacheable request is made, the + // latest block number is retrieved through the block + // tracker first. + primaryComms.mockRpcCall({ + request: { + method: 'eth_blockNumber', + params: [], + }, + times: DEFAULT_MAX_CONSECUTIVE_FAILURES, + response: { + httpStatus: 503, + }, + }); + failoverComms.mockRpcCall({ + request: { + method: 'eth_blockNumber', + params: [], + }, + response: { + result: '0x1', + }, + }); + failoverComms.mockRpcCall({ + request, + response: { + result: 'ok', + }, + }); + + messenger.subscribe( + 'NetworkController:rpcEndpointRetried', + () => { + // Ensure that we advance to the next RPC request + // retry, not the next block tracker request. + clock.tick(backoffDuration); + }, + ); + + // Hit the endpoint and exceed the max number of retries + await expect(makeRpcCall(request)).rejects.toThrow(expectedError); + // Hit the endpoint and exceed the max number of retries + await expect(makeRpcCall(request)).rejects.toThrow(expectedError); + // Hit the endpoint and exceed the max number of retries, + // breaking the circuit; hit the failover + await makeRpcCall(request); + + expect( + rpcEndpointChainAvailableEventHandler, + ).toHaveBeenCalledTimes(1); + expect( + rpcEndpointChainAvailableEventHandler, + ).toHaveBeenCalledWith({ + chainId, + endpointUrl: failoverEndpointUrl, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + primaryEndpointUrl: rpcUrl, + }); + }, + ); + }); + }); + + describe('without RPC failover', () => { + it('publishes the NetworkController:rpcEndpointChainDegraded event only once, even if the max number of retries is continually reached in making requests to a primary endpoint', async () => { + const request = { + method: 'eth_gasPrice', + params: [], + }; + const expectedError = createResourceUnavailableError(503); + const expectedDegradedError = new HttpError(503); + + await withMockedCommunications( + { providerType: networkClientType }, + async (comms) => { + // The first time a block-cacheable request is made, the + // latest block number is retrieved through the block + // tracker first. + comms.mockRpcCall({ + request: { + method: 'eth_blockNumber', + params: [], + }, + times: DEFAULT_MAX_CONSECUTIVE_FAILURES, + response: { + httpStatus: 503, + }, + }); + + const messenger = buildRootMessenger(); + const rpcEndpointChainDegradedEventHandler = jest.fn(); + messenger.subscribe( + 'NetworkController:rpcEndpointChainDegraded', + rpcEndpointChainDegradedEventHandler, + ); + + await withNetworkClient( + { + providerType: networkClientType, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + messenger, + getRpcServiceOptions: () => ({ + fetch, + btoa, + policyOptions: { + backoff: new ConstantBackoff(backoffDuration), + }, + }), + }, + async ({ makeRpcCall, clock, chainId, rpcUrl }) => { + messenger.subscribe( + 'NetworkController:rpcEndpointRetried', + () => { + // Ensure that we advance to the next RPC request + // retry, not the next block tracker request. + clock.tick(backoffDuration); + }, + ); + + // Hit the endpoint and exceed the max number of retries + await expect(makeRpcCall(request)).rejects.toThrow( + expectedError, + ); + // Hit the endpoint and exceed the max number of retries + await expect(makeRpcCall(request)).rejects.toThrow( + expectedError, + ); + // Hit the endpoint and exceed the max number of retries, + // breaking the circuit + await expect(makeRpcCall(request)).rejects.toThrow( + expectedError, + ); + + expect( + rpcEndpointChainDegradedEventHandler, + ).toHaveBeenCalledTimes(1); + expect( + rpcEndpointChainDegradedEventHandler, + ).toHaveBeenCalledWith({ + chainId, + endpointUrl: rpcUrl, + error: expectedDegradedError, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + primaryEndpointUrl: rpcUrl, + }); + }, + ); + }, + ); + }); + + it('publishes the NetworkController:rpcEndpointChainDegraded event only once, even if the time to complete a request to a primary endpoint is continually too long', async () => { + const request = { + method: 'eth_gasPrice', + params: [], + }; + + await withMockedCommunications( + { providerType: networkClientType }, + async (comms) => { + const messenger = buildRootMessenger(); + const rpcEndpointChainDegradedEventHandler = jest.fn(); + messenger.subscribe( + 'NetworkController:rpcEndpointChainDegraded', + rpcEndpointChainDegradedEventHandler, + ); + + await withNetworkClient( + { + providerType: networkClientType, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + messenger, + getRpcServiceOptions: () => ({ + fetch, + btoa, + policyOptions: { + backoff: new ConstantBackoff(backoffDuration), + }, + }), + }, + async ({ makeRpcCall, clock, chainId, rpcUrl }) => { + // The first time a block-cacheable request is made, the + // latest block number is retrieved through the block + // tracker first. + comms.mockRpcCall({ + request: { + method: 'eth_blockNumber', + params: [], + }, + response: () => { + clock.tick(DEFAULT_DEGRADED_THRESHOLD + 1); + return { + result: '0x1', + }; + }, + }); + comms.mockRpcCall({ + request, + response: () => { + clock.tick(DEFAULT_DEGRADED_THRESHOLD + 1); + return { + result: 'ok', + }; + }, + times: 2, + }); + + await makeRpcCall(request); + await makeRpcCall(request); + + expect( + rpcEndpointChainDegradedEventHandler, + ).toHaveBeenCalledTimes(1); + expect( + rpcEndpointChainDegradedEventHandler, + ).toHaveBeenCalledWith({ + chainId, + endpointUrl: rpcUrl, + error: undefined, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + primaryEndpointUrl: rpcUrl, + }); + }, + ); + }, + ); + }); + + it('publishes the NetworkController:rpcEndpointDegraded event each time the max number of retries is reached in making requests to a primary endpoint', async () => { + const request = { + method: 'eth_gasPrice', + params: [], + }; + const expectedError = createResourceUnavailableError(503); + const expectedDegradedError = new HttpError(503); + + await withMockedCommunications( + { providerType: networkClientType }, + async (comms) => { + // The first time a block-cacheable request is made, the + // latest block number is retrieved through the block + // tracker first. + comms.mockRpcCall({ + request: { + method: 'eth_blockNumber', + params: [], + }, + times: DEFAULT_MAX_CONSECUTIVE_FAILURES, + response: { + httpStatus: 503, + }, + }); + + const messenger = buildRootMessenger(); + const rpcEndpointDegradedEventHandler = jest.fn(); + messenger.subscribe( + 'NetworkController:rpcEndpointDegraded', + rpcEndpointDegradedEventHandler, + ); + + await withNetworkClient( + { + providerType: networkClientType, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + messenger, + getRpcServiceOptions: () => ({ + fetch, + btoa, + policyOptions: { + backoff: new ConstantBackoff(backoffDuration), + }, + }), + }, + async ({ makeRpcCall, clock, chainId, rpcUrl }) => { + messenger.subscribe( + 'NetworkController:rpcEndpointRetried', + () => { + // Ensure that we advance to the next RPC request + // retry, not the next block tracker request. + clock.tick(backoffDuration); + }, + ); + + // Hit the endpoint and exceed the max number of retries + await expect(makeRpcCall(request)).rejects.toThrow( + expectedError, + ); + // Hit the endpoint and exceed the max number of retries + await expect(makeRpcCall(request)).rejects.toThrow( + expectedError, + ); + // Hit the endpoint and exceed the max number of retries, + // breaking the circuit + await expect(makeRpcCall(request)).rejects.toThrow( + expectedError, + ); + + expect(rpcEndpointDegradedEventHandler).toHaveBeenCalledTimes( + 2, + ); + expect(rpcEndpointDegradedEventHandler).toHaveBeenCalledWith({ + chainId, + endpointUrl: rpcUrl, + error: expectedDegradedError, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + primaryEndpointUrl: rpcUrl, + }); + expect(rpcEndpointDegradedEventHandler).toHaveBeenCalledWith({ + chainId, + endpointUrl: rpcUrl, + error: expectedDegradedError, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + primaryEndpointUrl: rpcUrl, + }); + }, + ); + }, + ); + }); + + it('publishes the NetworkController:rpcEndpointDegraded event when the time to complete a request to a primary endpoint is continually too long', async () => { + const request = { + method: 'eth_gasPrice', + params: [], + }; + + await withMockedCommunications( + { providerType: networkClientType }, + async (comms) => { + const messenger = buildRootMessenger(); + const rpcEndpointDegradedEventHandler = jest.fn(); + messenger.subscribe( + 'NetworkController:rpcEndpointDegraded', + rpcEndpointDegradedEventHandler, + ); + + await withNetworkClient( + { + providerType: networkClientType, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + messenger, + getBlockTrackerOptions: () => ({ + pollingInterval: 10000, + }), + getRpcServiceOptions: () => ({ + fetch, + btoa, + policyOptions: { + backoff: new ConstantBackoff(backoffDuration), + }, + }), + }, + async ({ makeRpcCall, clock, chainId, rpcUrl }) => { + // The first time a block-cacheable request is made, the + // latest block number is retrieved through the block + // tracker first. + comms.mockRpcCall({ + request: { + method: 'eth_blockNumber', + params: [], + }, + response: () => { + clock.tick(DEFAULT_DEGRADED_THRESHOLD + 1); + return { + result: '0x1', + }; + }, + }); + comms.mockRpcCall({ + request, + response: () => { + clock.tick(DEFAULT_DEGRADED_THRESHOLD + 1); + return { + result: 'ok', + }; + }, + }); + + await makeRpcCall(request); + + expect(rpcEndpointDegradedEventHandler).toHaveBeenCalledTimes( + 2, + ); + expect(rpcEndpointDegradedEventHandler).toHaveBeenCalledWith({ + chainId, + endpointUrl: rpcUrl, + error: undefined, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + primaryEndpointUrl: rpcUrl, + }); + expect(rpcEndpointDegradedEventHandler).toHaveBeenCalledWith({ + chainId, + endpointUrl: rpcUrl, + error: undefined, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + primaryEndpointUrl: rpcUrl, + }); + }, + ); + }, + ); + }); + + it('publishes the NetworkController:rpcEndpointChainAvailable event the first time a successful request to a (primary) RPC endpoint is made', async () => { + const request = { + method: 'eth_gasPrice', + params: [], + }; + + await withMockedCommunications( + { providerType: networkClientType }, + async (comms) => { + // The first time a block-cacheable request is made, the + // latest block number is retrieved through the block + // tracker first. + comms.mockNextBlockTrackerRequest({ + blockNumber, + }); + comms.mockRpcCall({ + request, + response: { + result: 'ok', + }, + }); + + const messenger = buildRootMessenger(); + const rpcEndpointChainAvailableEventHandler = jest.fn(); + messenger.subscribe( + 'NetworkController:rpcEndpointChainAvailable', + rpcEndpointChainAvailableEventHandler, + ); + + await withNetworkClient( + { + providerType: networkClientType, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + messenger, + getRpcServiceOptions: () => ({ + fetch, + btoa, + policyOptions: { + backoff: new ConstantBackoff(backoffDuration), + }, + }), + }, + async ({ makeRpcCall, chainId, rpcUrl }) => { + await makeRpcCall(request); + + expect( + rpcEndpointChainAvailableEventHandler, + ).toHaveBeenCalledTimes(1); + expect( + rpcEndpointChainAvailableEventHandler, + ).toHaveBeenCalledWith({ + chainId, + endpointUrl: rpcUrl, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + primaryEndpointUrl: rpcUrl, + }); + }, + ); + }, + ); + }); + }); + }); + } +}); + +/** + * Creates a "resource unavailable" RPC error for testing. + * + * @param httpStatus - The HTTP status that the error represents. + * @returns The RPC error. + */ +function createResourceUnavailableError(httpStatus: number) { + return expect.objectContaining({ + code: errorCodes.rpc.resourceUnavailable, + message: 'RPC endpoint not found or unavailable.', + data: { + httpStatus, + }, + }); +} diff --git a/packages/network-controller/src/create-network-client.ts b/packages/network-controller/src/create-network-client.ts index 8ae4565b767..2f2e2d0b87a 100644 --- a/packages/network-controller/src/create-network-client.ts +++ b/packages/network-controller/src/create-network-client.ts @@ -26,7 +26,10 @@ import type { import type { Hex, Json, JsonRpcRequest } from '@metamask/utils'; import type { Logger } from 'loglevel'; -import type { NetworkControllerMessenger } from './NetworkController'; +import type { + NetworkClientId, + NetworkControllerMessenger, +} from './NetworkController'; import type { RpcServiceOptions } from './rpc-service/rpc-service'; import { RpcServiceChain } from './rpc-service/rpc-service-chain'; import type { @@ -59,6 +62,8 @@ type RpcApiMiddleware = JsonRpcMiddleware< * Create a JSON RPC network client for a specific network. * * @param args - The arguments. + * @param args.id - The ID that will be assigned to the new network client in + * the registry. * @param args.configuration - The network configuration. * @param args.getRpcServiceOptions - Factory for constructing RPC service * options. See {@link NetworkControllerOptions.getRpcServiceOptions}. @@ -74,6 +79,7 @@ type RpcApiMiddleware = JsonRpcMiddleware< * @returns The network client. */ export function createNetworkClient({ + id, configuration, getRpcServiceOptions, getBlockTrackerOptions, @@ -81,6 +87,7 @@ export function createNetworkClient({ isRpcFailoverEnabled, logger, }: { + id: NetworkClientId; configuration: NetworkClientConfiguration; getRpcServiceOptions: ( rpcEndpointUrl: string, @@ -96,50 +103,14 @@ export function createNetworkClient({ configuration.type === NetworkClientType.Infura ? `https://${configuration.network}.infura.io/v3/${configuration.infuraProjectId}` : configuration.rpcUrl; - const availableEndpointUrls = isRpcFailoverEnabled - ? [primaryEndpointUrl, ...(configuration.failoverRpcUrls ?? [])] - : [primaryEndpointUrl]; - const rpcServiceChain = new RpcServiceChain( - availableEndpointUrls.map((endpointUrl) => ({ - ...getRpcServiceOptions(endpointUrl), - endpointUrl, - logger, - })), - ); - rpcServiceChain.onBreak(({ endpointUrl, failoverEndpointUrl, ...rest }) => { - let error: unknown; - if ('error' in rest) { - error = rest.error; - } else if ('value' in rest) { - error = rest.value; - } - - messenger.publish('NetworkController:rpcEndpointUnavailable', { - chainId: configuration.chainId, - endpointUrl, - failoverEndpointUrl, - error, - }); - }); - rpcServiceChain.onDegraded(({ endpointUrl, ...rest }) => { - let error: unknown; - if ('error' in rest) { - error = rest.error; - } else if ('value' in rest) { - error = rest.value; - } - - messenger.publish('NetworkController:rpcEndpointDegraded', { - chainId: configuration.chainId, - endpointUrl, - error, - }); - }); - rpcServiceChain.onRetry(({ endpointUrl, attempt }) => { - messenger.publish('NetworkController:rpcEndpointRequestRetried', { - endpointUrl, - attempt, - }); + const rpcServiceChain = createRpcServiceChain({ + id, + primaryEndpointUrl, + configuration, + getRpcServiceOptions, + messenger, + isRpcFailoverEnabled, + logger, }); let rpcApiMiddleware: RpcApiMiddleware; @@ -194,6 +165,189 @@ export function createNetworkClient({ return { configuration, provider, blockTracker, destroy }; } +/** + * Creates an RPC service chain, which represents the primary endpoint URL for + * the network as well as its failover URLs. + * + * @param args - The arguments. + * @param args.id - The ID that will be assigned to the new network client in + * the registry. + * @param args.primaryEndpointUrl - The primary endpoint URL. + * @param args.configuration - The network configuration. + * @param args.getRpcServiceOptions - Factory for constructing RPC service + * options. See {@link NetworkControllerOptions.getRpcServiceOptions}. + * @param args.messenger - The network controller messenger. + * @param args.isRpcFailoverEnabled - Whether or not requests sent to the + * primary RPC endpoint for this network should be automatically diverted to + * provided failover endpoints if the primary is unavailable. This effectively + * causes the `failoverRpcUrls` property of the network client configuration + * to be honored or ignored. + * @param args.logger - A `loglevel` logger. + * @returns The RPC service chain. + */ +function createRpcServiceChain({ + id, + primaryEndpointUrl, + configuration, + getRpcServiceOptions, + messenger, + isRpcFailoverEnabled, + logger, +}: { + id: NetworkClientId; + primaryEndpointUrl: string; + configuration: NetworkClientConfiguration; + getRpcServiceOptions: ( + rpcEndpointUrl: string, + ) => Omit; + messenger: NetworkControllerMessenger; + isRpcFailoverEnabled: boolean; + logger?: Logger; +}) { + const availableEndpointUrls: [string, ...string[]] = isRpcFailoverEnabled + ? [primaryEndpointUrl, ...(configuration.failoverRpcUrls ?? [])] + : [primaryEndpointUrl]; + const rpcServiceConfigurations = availableEndpointUrls.map((endpointUrl) => ({ + ...getRpcServiceOptions(endpointUrl), + endpointUrl, + logger, + })); + + /** + * Extracts the error from Cockatiel's `FailureReason` type received in + * circuit breaker event handlers. + * + * The `FailureReason` object can have two possible shapes: + * - `{ error: Error }` - When the RPC service throws an error (the common + * case for RPC failures). + * - `{ value: T }` - When the RPC service returns a value that the retry + * filter policy considers a failure. + * + * @param value - The event data object from the circuit breaker event + * listener (after destructuring known properties like `endpointUrl` and + * `primaryEndpointUrl`). This represents Cockatiel's `FailureReason` type. + * @returns The error or failure value, or `undefined` if neither property + * exists (which shouldn't happen in practice unless the circuit breaker is + * manually isolated). + */ + const getError = (value: object) => { + if ('error' in value) { + return value.error; + } else if ('value' in value) { + return value.value; + } + return undefined; + }; + + const rpcServiceChain = new RpcServiceChain([ + rpcServiceConfigurations[0], + ...rpcServiceConfigurations.slice(1), + ]); + + rpcServiceChain.onBreak( + ({ + endpointUrl, + primaryEndpointUrl: primaryEndpointUrlFromEvent, + ...rest + }) => { + const error = getError(rest); + + if (error === undefined) { + // This error shouldn't happen in practice because we never call `.isolate` + // on the circuit breaker policy, but we need to appease TypeScript. + throw new Error('Could not make request to endpoint.'); + } + + messenger.publish('NetworkController:rpcEndpointChainUnavailable', { + chainId: configuration.chainId, + networkClientId: id, + primaryEndpointUrl: primaryEndpointUrlFromEvent, + error, + }); + }, + ); + + rpcServiceChain.onServiceBreak( + ({ + endpointUrl, + primaryEndpointUrl: primaryEndpointUrlFromEvent, + ...rest + }) => { + const error = getError(rest); + messenger.publish('NetworkController:rpcEndpointUnavailable', { + chainId: configuration.chainId, + networkClientId: id, + primaryEndpointUrl: primaryEndpointUrlFromEvent, + endpointUrl, + error, + }); + }, + ); + + rpcServiceChain.onDegraded( + ({ + endpointUrl, + primaryEndpointUrl: primaryEndpointUrlFromEvent, + ...rest + }) => { + const error = getError(rest); + messenger.publish('NetworkController:rpcEndpointChainDegraded', { + chainId: configuration.chainId, + networkClientId: id, + primaryEndpointUrl: primaryEndpointUrlFromEvent, + endpointUrl, + error, + }); + }, + ); + + rpcServiceChain.onServiceDegraded( + ({ + endpointUrl, + primaryEndpointUrl: primaryEndpointUrlFromEvent, + ...rest + }) => { + const error = getError(rest); + messenger.publish('NetworkController:rpcEndpointDegraded', { + chainId: configuration.chainId, + networkClientId: id, + primaryEndpointUrl: primaryEndpointUrlFromEvent, + endpointUrl, + error, + }); + }, + ); + + rpcServiceChain.onAvailable( + ({ endpointUrl, primaryEndpointUrl: primaryEndpointUrlFromEvent }) => { + messenger.publish('NetworkController:rpcEndpointChainAvailable', { + chainId: configuration.chainId, + networkClientId: id, + primaryEndpointUrl: primaryEndpointUrlFromEvent, + endpointUrl, + }); + }, + ); + + rpcServiceChain.onServiceRetry( + ({ + attempt, + endpointUrl, + primaryEndpointUrl: primaryEndpointUrlFromEvent, + }) => { + messenger.publish('NetworkController:rpcEndpointRetried', { + chainId: configuration.chainId, + networkClientId: id, + primaryEndpointUrl: primaryEndpointUrlFromEvent, + endpointUrl, + attempt, + }); + }, + ); + + return rpcServiceChain; +} + /** * Create the block tracker for the network. * diff --git a/packages/network-controller/src/index.ts b/packages/network-controller/src/index.ts index 96d93fb02d9..98153162fe7 100644 --- a/packages/network-controller/src/index.ts +++ b/packages/network-controller/src/index.ts @@ -36,9 +36,12 @@ export type { NetworkControllerActions, NetworkControllerMessenger, NetworkControllerOptions, + NetworkControllerRpcEndpointChainUnavailableEvent, NetworkControllerRpcEndpointUnavailableEvent, + NetworkControllerRpcEndpointChainDegradedEvent, NetworkControllerRpcEndpointDegradedEvent, - NetworkControllerRpcEndpointRequestRetriedEvent, + NetworkControllerRpcEndpointChainAvailableEvent, + NetworkControllerRpcEndpointRetriedEvent, } from './NetworkController'; export { getDefaultNetworkControllerState, diff --git a/packages/network-controller/src/rpc-service/rpc-service-chain.test.ts b/packages/network-controller/src/rpc-service/rpc-service-chain.test.ts index 3a2c31bfd55..0b6ff504504 100644 --- a/packages/network-controller/src/rpc-service/rpc-service-chain.test.ts +++ b/packages/network-controller/src/rpc-service/rpc-service-chain.test.ts @@ -1,17 +1,43 @@ +import { + DEFAULT_CIRCUIT_BREAK_DURATION, + DEFAULT_DEGRADED_THRESHOLD, + HttpError, +} from '@metamask/controller-utils'; import { errorCodes } from '@metamask/rpc-errors'; import nock from 'nock'; import { useFakeTimers } from 'sinon'; import type { SinonFakeTimers } from 'sinon'; +import { + DEFAULT_MAX_CONSECUTIVE_FAILURES, + DEFAULT_MAX_RETRIES, +} from './rpc-service'; import { RpcServiceChain } from './rpc-service-chain'; -const RESOURCE_UNAVAILABLE_ERROR = expect.objectContaining({ - code: errorCodes.rpc.resourceUnavailable, - message: 'RPC endpoint not found or unavailable.', - data: { - httpStatus: 503, - }, -}); +/** + * The number of fetch requests made for a single request to an RPC service, using default max + * retry attempts. + */ +const DEFAULT_REQUEST_ATTEMPTS = 1 + DEFAULT_MAX_RETRIES; + +/** + * Number of attempts required to break the circuit of an RPC service using default retry attempts + * and max consecutive failures. + * + * Note: This calculation and later ones assume that there is no remainder. + */ +const DEFAULT_RPC_SERVICE_ATTEMPTS_UNTIL_BREAK = + DEFAULT_MAX_CONSECUTIVE_FAILURES / DEFAULT_REQUEST_ATTEMPTS; + +/** + * Number of attempts required to break the circuit of an RPC service chain (with a single + * failover) that uses default retry attempts and max consecutive failures. + * + * The value is one less than double the number of attempts needed to break a single circuit + * because on failure of the primary, the request gets forwarded to the failover immediately. + */ +const DEFAULT_RPC_CHAIN_ATTEMPTS_UNTIL_BREAK = + 2 * DEFAULT_RPC_SERVICE_ATTEMPTS_UNTIL_BREAK - 1; describe('RpcServiceChain', () => { let clock: SinonFakeTimers; @@ -24,7 +50,7 @@ describe('RpcServiceChain', () => { clock.restore(); }); - describe('onRetry', () => { + describe('onServiceRetry', () => { it('returns a listener which can be disposed', () => { const rpcServiceChain = new RpcServiceChain([ { @@ -34,10 +60,10 @@ describe('RpcServiceChain', () => { }, ]); - const onRetryListener = rpcServiceChain.onRetry(() => { + const onServiceRetryListener = rpcServiceChain.onServiceRetry(() => { // do whatever }); - expect(onRetryListener.dispose()).toBeUndefined(); + expect(onServiceRetryListener.dispose()).toBeUndefined(); }); }); @@ -58,6 +84,23 @@ describe('RpcServiceChain', () => { }); }); + describe('onServiceBreak', () => { + it('returns a listener which can be disposed', () => { + const rpcServiceChain = new RpcServiceChain([ + { + fetch, + btoa, + endpointUrl: 'https://rpc.example.chain', + }, + ]); + + const onServiceBreakListener = rpcServiceChain.onServiceBreak(() => { + // do whatever + }); + expect(onServiceBreakListener.dispose()).toBeUndefined(); + }); + }); + describe('onDegraded', () => { it('returns a listener which can be disposed', () => { const rpcServiceChain = new RpcServiceChain([ @@ -75,9 +118,45 @@ describe('RpcServiceChain', () => { }); }); + describe('onServiceDegraded', () => { + it('returns a listener which can be disposed', () => { + const rpcServiceChain = new RpcServiceChain([ + { + fetch, + btoa, + endpointUrl: 'https://rpc.example.chain', + }, + ]); + + const onServiceDegradedListener = rpcServiceChain.onServiceDegraded( + () => { + // do whatever + }, + ); + expect(onServiceDegradedListener.dispose()).toBeUndefined(); + }); + }); + + describe('onAvailable', () => { + it('returns a listener which can be disposed', () => { + const rpcServiceChain = new RpcServiceChain([ + { + fetch, + btoa, + endpointUrl: 'https://rpc.example.chain', + }, + ]); + + const onAvailableListener = rpcServiceChain.onAvailable(() => { + // do whatever + }); + expect(onAvailableListener.dispose()).toBeUndefined(); + }); + }); + describe('request', () => { it('returns what the first RPC service in the chain returns, if it succeeds', async () => { - nock('https://first.chain') + nock('https://first.endpoint') .post('/', { id: 1, jsonrpc: '2.0', @@ -94,12 +173,12 @@ describe('RpcServiceChain', () => { { fetch, btoa, - endpointUrl: 'https://first.chain', + endpointUrl: 'https://first.endpoint', }, { fetch, btoa, - endpointUrl: 'https://second.chain', + endpointUrl: 'https://second.endpoint', fetchOptions: { headers: { 'X-Foo': 'Bar', @@ -127,30 +206,24 @@ describe('RpcServiceChain', () => { }); }); - it('uses the other RPC services in the chain as failovers', async () => { - nock('https://first.chain') - .post( - '/', - { - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }, - { - reqheaders: {}, - }, - ) - .times(15) + it('returns what a failover service returns, if the primary is unavailable and the failover is not', async () => { + nock('https://first.endpoint') + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) .reply(503); - nock('https://second.chain') + nock('https://second.endpoint') .post('/', { id: 1, jsonrpc: '2.0', method: 'eth_chainId', params: [], }) - .times(15) + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) .reply(503); nock('https://third.chain') .post('/', { @@ -164,22 +237,17 @@ describe('RpcServiceChain', () => { jsonrpc: '2.0', result: 'ok', }); - + const expectedError = createResourceUnavailableError(503); const rpcServiceChain = new RpcServiceChain([ { fetch, btoa, - endpointUrl: 'https://first.chain', + endpointUrl: 'https://first.endpoint', }, { fetch, btoa, - endpointUrl: 'https://second.chain', - fetchOptions: { - headers: { - 'X-Foo': 'Bar', - }, - }, + endpointUrl: 'https://second.endpoint', }, { fetch, @@ -187,11 +255,8 @@ describe('RpcServiceChain', () => { endpointUrl: 'https://third.chain', }, ]); - rpcServiceChain.onRetry(() => { - // We don't need to await this promise; adding it to the promise - // queue is enough to continue. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.nextAsync(); + rpcServiceChain.onServiceRetry(() => { + clock.next(); }); const jsonRpcRequest = { @@ -202,22 +267,22 @@ describe('RpcServiceChain', () => { }; // Retry the first endpoint until max retries is hit. await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( - RESOURCE_UNAVAILABLE_ERROR, + expectedError, ); // Retry the first endpoint again, until max retries is hit. await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( - RESOURCE_UNAVAILABLE_ERROR, + expectedError, ); // Retry the first endpoint for a third time, until max retries is hit. // The circuit will break on the last time, and the second endpoint will // be retried, until max retries is hit. await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( - RESOURCE_UNAVAILABLE_ERROR, + expectedError, ); // Try the first endpoint, see that the circuit is broken, and retry the // second endpoint, until max retries is hit. await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( - RESOURCE_UNAVAILABLE_ERROR, + expectedError, ); // Try the first endpoint, see that the circuit is broken, and retry the // second endpoint, until max retries is hit. @@ -233,7 +298,7 @@ describe('RpcServiceChain', () => { }); it("allows each RPC service's fetch options to be configured separately, yet passes the fetch options given to request to all of them", async () => { - const firstEndpointScope = nock('https://first.chain', { + const firstEndpointScope = nock('https://first.endpoint', { reqheaders: { 'X-Fizz': 'Buzz', }, @@ -244,11 +309,10 @@ describe('RpcServiceChain', () => { method: 'eth_chainId', params: [], }) - .times(15) + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) .reply(503); - const secondEndpointScope = nock('https://second.chain', { + const secondEndpointScope = nock('https://second.endpoint', { reqheaders: { - 'X-Foo': 'Bar', 'X-Fizz': 'Buzz', }, }) @@ -258,11 +322,10 @@ describe('RpcServiceChain', () => { method: 'eth_chainId', params: [], }) - .times(15) + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) .reply(503); const thirdEndpointScope = nock('https://third.chain', { reqheaders: { - 'X-Foo': 'Bar', 'X-Fizz': 'Buzz', }, }) @@ -277,17 +340,17 @@ describe('RpcServiceChain', () => { jsonrpc: '2.0', result: 'ok', }); - + const expectedError = createResourceUnavailableError(503); const rpcServiceChain = new RpcServiceChain([ { fetch, btoa, - endpointUrl: 'https://first.chain', + endpointUrl: 'https://first.endpoint', }, { fetch, btoa, - endpointUrl: 'https://second.chain', + endpointUrl: 'https://second.endpoint', fetchOptions: { headers: { 'X-Foo': 'Bar', @@ -303,11 +366,8 @@ describe('RpcServiceChain', () => { }, }, ]); - rpcServiceChain.onRetry(() => { - // We don't need to await this promise; adding it to the promise - // queue is enough to continue. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.nextAsync(); + rpcServiceChain.onServiceRetry(() => { + clock.next(); }); const jsonRpcRequest = { @@ -324,22 +384,22 @@ describe('RpcServiceChain', () => { // Retry the first endpoint until max retries is hit. await expect( rpcServiceChain.request(jsonRpcRequest, fetchOptions), - ).rejects.toThrow(RESOURCE_UNAVAILABLE_ERROR); + ).rejects.toThrow(expectedError); // Retry the first endpoint again, until max retries is hit. await expect( rpcServiceChain.request(jsonRpcRequest, fetchOptions), - ).rejects.toThrow(RESOURCE_UNAVAILABLE_ERROR); + ).rejects.toThrow(expectedError); // Retry the first endpoint for a third time, until max retries is hit. // The circuit will break on the last time, and the second endpoint will // be retried, until max retries is hit. await expect( rpcServiceChain.request(jsonRpcRequest, fetchOptions), - ).rejects.toThrow(RESOURCE_UNAVAILABLE_ERROR); + ).rejects.toThrow(expectedError); // Try the first endpoint, see that the circuit is broken, and retry the // second endpoint, until max retries is hit. await expect( rpcServiceChain.request(jsonRpcRequest, fetchOptions), - ).rejects.toThrow(RESOURCE_UNAVAILABLE_ERROR); + ).rejects.toThrow(expectedError); // Try the first endpoint, see that the circuit is broken, and retry the // second endpoint, until max retries is hit. // The circuit will break on the last time, and the third endpoint will @@ -351,26 +411,79 @@ describe('RpcServiceChain', () => { expect(thirdEndpointScope.isDone()).toBe(true); }); - it('calls onRetry each time an RPC service in the chain retries its request', async () => { - nock('https://first.chain') + it("throws a custom error if a request is attempted while a service's circuit is open", async () => { + const endpointUrl = 'https://some.endpoint'; + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) + .reply(503); + const expectedError = createResourceUnavailableError(503); + const rpcServiceChain = new RpcServiceChain([ + { + fetch, + btoa, + endpointUrl, + }, + ]); + const onBreakListener = jest.fn(); + rpcServiceChain.onServiceRetry(() => { + clock.next(); + }); + rpcServiceChain.onBreak(onBreakListener); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + // Retry the endpoint until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Retry the endpoint again, until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Retry the endpoint for a third time, until max retries is hit. + // The circuit will break on the last time. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Attempt the endpoint again. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + 'RPC endpoint returned too many errors', + ); + }); + + it('calls onServiceRetry each time an RPC service in the chain retries its request', async () => { + const primaryEndpointUrl = 'https://first.endpoint'; + const secondaryEndpointUrl = 'https://second.endpoint'; + const tertiaryEndpointUrl = 'https://third.chain'; + nock(primaryEndpointUrl) .post('/', { id: 1, jsonrpc: '2.0', method: 'eth_chainId', params: [], }) - .times(15) + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) .reply(503); - nock('https://second.chain') + nock(secondaryEndpointUrl) .post('/', { id: 1, jsonrpc: '2.0', method: 'eth_chainId', params: [], }) - .times(15) + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) .reply(503); - nock('https://third.chain') + nock(tertiaryEndpointUrl) .post('/', { id: 1, jsonrpc: '2.0', @@ -382,17 +495,18 @@ describe('RpcServiceChain', () => { jsonrpc: '2.0', result: 'ok', }); - + const expectedError = createResourceUnavailableError(503); + const expectedRetryError = new HttpError(503); const rpcServiceChain = new RpcServiceChain([ { fetch, btoa, - endpointUrl: 'https://first.chain', + endpointUrl: primaryEndpointUrl, }, { fetch, btoa, - endpointUrl: 'https://second.chain', + endpointUrl: secondaryEndpointUrl, fetchOptions: { headers: { 'X-Foo': 'Bar', @@ -402,19 +516,13 @@ describe('RpcServiceChain', () => { { fetch, btoa, - endpointUrl: 'https://third.chain', + endpointUrl: tertiaryEndpointUrl, }, ]); - const onRetryListener = jest.fn< - ReturnType[0]>, - Parameters[0]> - >(() => { - // We don't need to await this promise; adding it to the promise - // queue is enough to continue. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.nextAsync(); + const onServiceRetryListener = jest.fn(() => { + clock.next(); }); - rpcServiceChain.onRetry(onRetryListener); + rpcServiceChain.onServiceRetry(onServiceRetryListener); const jsonRpcRequest = { id: 1, @@ -424,22 +532,22 @@ describe('RpcServiceChain', () => { }; // Retry the first endpoint until max retries is hit. await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( - RESOURCE_UNAVAILABLE_ERROR, + expectedError, ); // Retry the first endpoint again, until max retries is hit. await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( - RESOURCE_UNAVAILABLE_ERROR, + expectedError, ); // Retry the first endpoint for a third time, until max retries is hit. // The circuit will break on the last time, and the second endpoint will // be retried, until max retries is hit. await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( - RESOURCE_UNAVAILABLE_ERROR, + expectedError, ); // Try the first endpoint, see that the circuit is broken, and retry the // second endpoint, until max retries is hit. await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( - RESOURCE_UNAVAILABLE_ERROR, + expectedError, ); // Try the first endpoint, see that the circuit is broken, and retry the // second endpoint, until max retries is hit. @@ -447,85 +555,119 @@ describe('RpcServiceChain', () => { // be hit. This is finally a success. await rpcServiceChain.request(jsonRpcRequest); - const onRetryListenerCallCountsByEndpointUrl = - onRetryListener.mock.calls.reduce( - (memo, call) => { - const { endpointUrl } = call[0]; - memo[endpointUrl] = (memo[endpointUrl] ?? 0) + 1; - return memo; - }, - {} as Record, - ); - - expect(onRetryListenerCallCountsByEndpointUrl).toStrictEqual({ - 'https://first.chain/': 12, - 'https://second.chain/': 12, - }); + for (let attempt = 0; attempt < 24; attempt++) { + expect(onServiceRetryListener).toHaveBeenNthCalledWith(attempt + 1, { + primaryEndpointUrl: `${primaryEndpointUrl}/`, + endpointUrl: + attempt >= 12 + ? `${secondaryEndpointUrl}/` + : `${primaryEndpointUrl}/`, + attempt: (attempt % 4) + 1, + delay: expect.any(Number), + error: expectedRetryError, + }); + } }); - it('calls onBreak each time the underlying circuit for each RPC service in the chain breaks', async () => { - nock('https://first.chain') + it('does not call onBreak if the primary service circuit breaks and the request to its failover fails but its circuit has not broken yet', async () => { + const primaryEndpointUrl = 'https://first.endpoint'; + const secondaryEndpointUrl = 'https://second.endpoint'; + nock(primaryEndpointUrl) .post('/', { id: 1, jsonrpc: '2.0', method: 'eth_chainId', params: [], }) - .times(15) + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) .reply(503); - nock('https://second.chain') + nock(secondaryEndpointUrl) .post('/', { id: 1, jsonrpc: '2.0', method: 'eth_chainId', params: [], }) - .times(15) - .reply(503); - nock('https://third.chain') + .reply(500); + const rpcServiceChain = new RpcServiceChain([ + { + fetch, + btoa, + endpointUrl: primaryEndpointUrl, + }, + { + fetch, + btoa, + endpointUrl: secondaryEndpointUrl, + }, + ]); + const onBreakListener = jest.fn(); + rpcServiceChain.onServiceRetry(() => { + clock.next(); + }); + rpcServiceChain.onBreak(onBreakListener); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + // Retry the first endpoint until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + createResourceUnavailableError(503), + ); + // Retry the first endpoint again, until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + createResourceUnavailableError(503), + ); + // Retry the first endpoint for a third time, until max retries is hit. + // The circuit will break on the last time, and the second endpoint will + // be hit (unsuccessfully). + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + createResourceUnavailableError(500), + ); + + expect(onBreakListener).not.toHaveBeenCalled(); + }); + + it("calls onBreak when all of the RPC services' circuits have broken", async () => { + const primaryEndpointUrl = 'https://first.endpoint'; + const secondaryEndpointUrl = 'https://second.endpoint'; + nock(primaryEndpointUrl) .post('/', { id: 1, jsonrpc: '2.0', method: 'eth_chainId', params: [], }) - .reply(200, { + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) + .reply(503); + nock(secondaryEndpointUrl) + .post('/', { id: 1, jsonrpc: '2.0', - result: 'ok', - }); - + method: 'eth_chainId', + params: [], + }) + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) + .reply(503); + const expectedError = createResourceUnavailableError(503); const rpcServiceChain = new RpcServiceChain([ { fetch, btoa, - endpointUrl: 'https://first.chain', - }, - { - fetch, - btoa, - endpointUrl: 'https://second.chain', - fetchOptions: { - headers: { - 'X-Foo': 'Bar', - }, - }, + endpointUrl: primaryEndpointUrl, }, { fetch, btoa, - endpointUrl: 'https://third.chain', + endpointUrl: secondaryEndpointUrl, }, ]); - const onBreakListener = jest.fn< - ReturnType[0]>, - Parameters[0]> - >(); - rpcServiceChain.onRetry(() => { - // We don't need to await this promise; adding it to the promise - // queue is enough to continue. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.nextAsync(); + const onBreakListener = jest.fn(); + rpcServiceChain.onServiceRetry(() => { + clock.next(); }); rpcServiceChain.onBreak(onBreakListener); @@ -537,64 +679,152 @@ describe('RpcServiceChain', () => { }; // Retry the first endpoint until max retries is hit. await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( - RESOURCE_UNAVAILABLE_ERROR, + expectedError, ); // Retry the first endpoint again, until max retries is hit. await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( - RESOURCE_UNAVAILABLE_ERROR, + expectedError, ); // Retry the first endpoint for a third time, until max retries is hit. // The circuit will break on the last time, and the second endpoint will // be retried, until max retries is hit. await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( - RESOURCE_UNAVAILABLE_ERROR, + expectedError, ); // Try the first endpoint, see that the circuit is broken, and retry the // second endpoint, until max retries is hit. await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( - RESOURCE_UNAVAILABLE_ERROR, + expectedError, ); // Try the first endpoint, see that the circuit is broken, and retry the - // second endpoint, until max retries is hit. - // The circuit will break on the last time, and the third endpoint will - // be hit. This is finally a success. - await rpcServiceChain.request(jsonRpcRequest); - - expect(onBreakListener).toHaveBeenCalledTimes(2); - expect(onBreakListener).toHaveBeenNthCalledWith( - 1, - expect.objectContaining({ - endpointUrl: 'https://first.chain/', - }), - ); - expect(onBreakListener).toHaveBeenNthCalledWith( - 2, - expect.objectContaining({ - endpointUrl: 'https://second.chain/', - }), + // second endpoint, until max retries is hit. The circuit will break on + // the last time. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, ); + + expect(onBreakListener).toHaveBeenCalledTimes(1); + expect(onBreakListener).toHaveBeenCalledWith({ + primaryEndpointUrl: `${primaryEndpointUrl}/`, + endpointUrl: `${secondaryEndpointUrl}/`, + error: new Error("Fetch failed with status '503'"), + }); }); - it('calls onDegraded each time an RPC service in the chain gives up before the circuit breaks or responds successfully but slowly', async () => { - nock('https://first.chain') + it("calls onBreak again if all services' circuits break, the primary service responds successfully, and all services' circuits break again", async () => { + const primaryEndpointUrl = 'https://first.endpoint'; + const secondaryEndpointUrl = 'https://second.endpoint'; + nock(primaryEndpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) + .reply(503); + nock(primaryEndpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .reply(200, { + id: 1, + jsonrpc: '2.0', + result: 'ok', + }); + nock(primaryEndpointUrl) .post('/', { id: 1, jsonrpc: '2.0', method: 'eth_chainId', params: [], }) - .times(15) + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) .reply(503); - nock('https://second.chain') + nock(secondaryEndpointUrl) .post('/', { id: 1, jsonrpc: '2.0', method: 'eth_chainId', params: [], }) - .times(15) + .times(30) .reply(503); - nock('https://third.chain') + const expectedError = createResourceUnavailableError(503); + const rpcServiceChain = new RpcServiceChain([ + { + fetch, + btoa, + endpointUrl: primaryEndpointUrl, + }, + { + fetch, + btoa, + endpointUrl: secondaryEndpointUrl, + }, + ]); + const onBreakListener = jest.fn(); + const onAvailableListener = jest.fn(); + rpcServiceChain.onServiceRetry(() => { + clock.next(); + }); + rpcServiceChain.onBreak(onBreakListener); + rpcServiceChain.onAvailable(onAvailableListener); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + // Retry the first endpoint until its circuit breaks, then retry the + // second endpoint until *its* circuit breaks. + for (let i = 0; i < DEFAULT_RPC_CHAIN_ATTEMPTS_UNTIL_BREAK; i++) { + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + } + // Wait until the circuit break duration passes, try the first endpoint + // and see that it succeeds. + clock.tick(DEFAULT_CIRCUIT_BREAK_DURATION); + await rpcServiceChain.request(jsonRpcRequest); + // Do it again: retry the first endpoint until its circuit breaks, then + // retry the second endpoint until *its* circuit breaks. + for (let i = 0; i < DEFAULT_RPC_CHAIN_ATTEMPTS_UNTIL_BREAK; i++) { + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + } + + expect(onBreakListener).toHaveBeenCalledTimes(2); + expect(onBreakListener).toHaveBeenNthCalledWith(1, { + primaryEndpointUrl: `${primaryEndpointUrl}/`, + endpointUrl: `${secondaryEndpointUrl}/`, + error: new Error("Fetch failed with status '503'"), + }); + expect(onBreakListener).toHaveBeenNthCalledWith(2, { + primaryEndpointUrl: `${primaryEndpointUrl}/`, + endpointUrl: `${secondaryEndpointUrl}/`, + error: new Error("Fetch failed with status '503'"), + }); + }); + + it("calls onBreak again if all services' circuits break, the primary service responds successfully but slowly, and all circuits break again", async () => { + const primaryEndpointUrl = 'https://first.endpoint'; + const secondaryEndpointUrl = 'https://second.endpoint'; + nock(primaryEndpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) + .reply(503); + nock(primaryEndpointUrl) .post('/', { id: 1, jsonrpc: '2.0', @@ -602,47 +832,49 @@ describe('RpcServiceChain', () => { params: [], }) .reply(200, () => { - clock.tick(6000); + clock.tick(DEFAULT_DEGRADED_THRESHOLD + 1); return { id: 1, jsonrpc: '2.0', result: '0x1', }; }); - + nock(primaryEndpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) + .reply(503); + nock(secondaryEndpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(30) + .reply(503); + const expectedError = createResourceUnavailableError(503); const rpcServiceChain = new RpcServiceChain([ { fetch, btoa, - endpointUrl: 'https://first.chain', - }, - { - fetch, - btoa, - endpointUrl: 'https://second.chain', - fetchOptions: { - headers: { - 'X-Foo': 'Bar', - }, - }, + endpointUrl: primaryEndpointUrl, }, { fetch, btoa, - endpointUrl: 'https://third.chain', + endpointUrl: secondaryEndpointUrl, }, ]); - const onDegradedListener = jest.fn< - ReturnType[0]>, - Parameters[0]> - >(); - rpcServiceChain.onRetry(() => { - // We don't need to await this promise; adding it to the promise - // queue is enough to continue. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.nextAsync(); + const onBreakListener = jest.fn(); + rpcServiceChain.onServiceRetry(() => { + clock.next(); }); - rpcServiceChain.onDegraded(onDegradedListener); + rpcServiceChain.onBreak(onBreakListener); const jsonRpcRequest = { id: 1, @@ -650,46 +882,1243 @@ describe('RpcServiceChain', () => { method: 'eth_chainId', params: [], }; - // Retry the first endpoint until max retries is hit. - await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( - RESOURCE_UNAVAILABLE_ERROR, - ); - // Retry the first endpoint again, until max retries is hit. - await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( - RESOURCE_UNAVAILABLE_ERROR, - ); - // Retry the first endpoint for a third time, until max retries is hit. - // The circuit will break on the last time, and the second endpoint will - // be retried, until max retries is hit. - await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( - RESOURCE_UNAVAILABLE_ERROR, + // Retry the first endpoint until its circuit breaks, then retry the + // second endpoint until *its* circuit breaks. + for (let i = 0; i < DEFAULT_RPC_CHAIN_ATTEMPTS_UNTIL_BREAK; i++) { + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + } + // Wait until the circuit break duration passes, try the first endpoint + // and see that it succeeds. + clock.tick(DEFAULT_CIRCUIT_BREAK_DURATION); + await rpcServiceChain.request(jsonRpcRequest); + // Do it again: retry the first endpoint until its circuit breaks, then + // retry the second endpoint until *its* circuit breaks. + for (let i = 0; i < DEFAULT_RPC_CHAIN_ATTEMPTS_UNTIL_BREAK; i++) { + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + } + + expect(onBreakListener).toHaveBeenCalledTimes(2); + expect(onBreakListener).toHaveBeenNthCalledWith(1, { + primaryEndpointUrl: `${primaryEndpointUrl}/`, + endpointUrl: `${secondaryEndpointUrl}/`, + error: new Error("Fetch failed with status '503'"), + }); + expect(onBreakListener).toHaveBeenNthCalledWith(2, { + primaryEndpointUrl: `${primaryEndpointUrl}/`, + endpointUrl: `${secondaryEndpointUrl}/`, + error: new Error("Fetch failed with status '503'"), + }); + }); + + it('calls onServiceBreak each time the circuit of an RPC service in the chain breaks', async () => { + const primaryEndpointUrl = 'https://first.endpoint'; + const secondaryEndpointUrl = 'https://second.endpoint'; + const tertiaryEndpointUrl = 'https://second.endpoint'; + nock(primaryEndpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) + .reply(503); + nock(secondaryEndpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) + .reply(503); + nock(tertiaryEndpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) + .reply(503); + const expectedError = createResourceUnavailableError(503); + const rpcServiceChain = new RpcServiceChain([ + { + fetch, + btoa, + endpointUrl: primaryEndpointUrl, + }, + { + fetch, + btoa, + endpointUrl: secondaryEndpointUrl, + }, + { + fetch, + btoa, + endpointUrl: tertiaryEndpointUrl, + }, + ]); + const onServiceBreakListener = jest.fn(); + rpcServiceChain.onServiceRetry(() => { + clock.next(); + }); + rpcServiceChain.onServiceBreak(onServiceBreakListener); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + // Retry the first endpoint until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Retry the first endpoint again, until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Retry the first endpoint for a third time, until max retries is hit. + // The circuit will break on the last time, and the second endpoint will + // be hit, until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Retry the second endpoint again, until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Retry the second endpoint for a third time, until max retries is hit. + // The circuit will break on the last time, and the third endpoint will + // be hit, until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Retry the third endpoint again, until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Retry the third endpoint for a third time, until max retries is hit. + // The circuit will break on the last time. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + + expect(onServiceBreakListener).toHaveBeenCalledTimes(3); + expect(onServiceBreakListener).toHaveBeenNthCalledWith(1, { + primaryEndpointUrl: `${primaryEndpointUrl}/`, + endpointUrl: `${primaryEndpointUrl}/`, + error: new Error("Fetch failed with status '503'"), + }); + expect(onServiceBreakListener).toHaveBeenNthCalledWith(2, { + primaryEndpointUrl: `${primaryEndpointUrl}/`, + endpointUrl: `${secondaryEndpointUrl}/`, + error: new Error("Fetch failed with status '503'"), + }); + expect(onServiceBreakListener).toHaveBeenNthCalledWith(3, { + primaryEndpointUrl: `${primaryEndpointUrl}/`, + endpointUrl: `${tertiaryEndpointUrl}/`, + error: new Error("Fetch failed with status '503'"), + }); + }); + + it("calls onDegraded only once even if a service's maximum number of retries is reached multiple times", async () => { + const endpointUrl = 'https://some.endpoint'; + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) + .reply(503); + const expectedError = createResourceUnavailableError(503); + const expectedDegradedError = new HttpError(503); + const rpcServiceChain = new RpcServiceChain([ + { + fetch, + btoa, + endpointUrl, + }, + ]); + const onDegradedListener = jest.fn(); + rpcServiceChain.onServiceRetry(() => { + clock.next(); + }); + rpcServiceChain.onDegraded(onDegradedListener); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + // Retry the endpoint until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Retry the endpoint again, until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Retry the endpoint for a third time, until max retries is hit. + // The circuit will break on the last time. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + + expect(onDegradedListener).toHaveBeenCalledTimes(1); + expect(onDegradedListener).toHaveBeenCalledWith({ + primaryEndpointUrl: `${endpointUrl}/`, + endpointUrl: `${endpointUrl}/`, + error: expectedDegradedError, + }); + }); + + it('calls onDegraded only once even if the time to complete a request via a service is continually slow', async () => { + const endpointUrl = 'https://some.endpoint'; + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(2) + .reply(200, () => { + clock.tick(DEFAULT_DEGRADED_THRESHOLD + 1); + return { + id: 1, + jsonrpc: '2.0', + result: '0x1', + }; + }); + const rpcServiceChain = new RpcServiceChain([ + { + fetch, + btoa, + endpointUrl, + }, + ]); + const onDegradedListener = jest.fn(); + rpcServiceChain.onServiceRetry(() => { + clock.next(); + }); + rpcServiceChain.onDegraded(onDegradedListener); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + await rpcServiceChain.request(jsonRpcRequest); + await rpcServiceChain.request(jsonRpcRequest); + + expect(onDegradedListener).toHaveBeenCalledTimes(1); + expect(onDegradedListener).toHaveBeenCalledWith({ + primaryEndpointUrl: `${endpointUrl}/`, + endpointUrl: `${endpointUrl}/`, + }); + }); + + it('calls onDegraded only once even if a service runs out of retries and then responds successfully but slowly, or vice versa', async () => { + const endpointUrl = 'https://some.endpoint'; + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(5) + .reply(503); + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .reply(200, () => { + clock.tick(DEFAULT_DEGRADED_THRESHOLD + 1); + return { + id: 1, + jsonrpc: '2.0', + result: '0x1', + }; + }); + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(5) + .reply(503); + const expectedError = createResourceUnavailableError(503); + const expectedDegradedError = new HttpError(503); + const rpcServiceChain = new RpcServiceChain([ + { + fetch, + btoa, + endpointUrl, + }, + ]); + const onDegradedListener = jest.fn(); + rpcServiceChain.onServiceRetry(() => { + clock.next(); + }); + rpcServiceChain.onDegraded(onDegradedListener); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + // Retry the endpoint until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Try the endpoint again, and see that it succeeds. + await rpcServiceChain.request(jsonRpcRequest); + // Retry the endpoint again until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + + expect(onDegradedListener).toHaveBeenCalledTimes(1); + expect(onDegradedListener).toHaveBeenCalledWith({ + primaryEndpointUrl: `${endpointUrl}/`, + endpointUrl: `${endpointUrl}/`, + error: expectedDegradedError, + }); + }); + + it("does not call onDegraded again when the primary service's circuit breaks and its failover responds successfully but slowly", async () => { + const primaryEndpointUrl = 'https://first.endpoint'; + const secondaryEndpointUrl = 'https://second.endpoint'; + nock(primaryEndpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) + .reply(503); + nock(secondaryEndpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .reply(200, () => { + clock.tick(DEFAULT_DEGRADED_THRESHOLD + 1); + return { + id: 1, + jsonrpc: '2.0', + result: '0x1', + }; + }); + const expectedError = createResourceUnavailableError(503); + const expectedDegradedError = new HttpError(503); + const rpcServiceChain = new RpcServiceChain([ + { + fetch, + btoa, + endpointUrl: primaryEndpointUrl, + }, + { + fetch, + btoa, + endpointUrl: secondaryEndpointUrl, + }, + ]); + const onBreakListener = jest.fn(); + const onDegradedListener = jest.fn(); + rpcServiceChain.onServiceRetry(() => { + clock.next(); + }); + rpcServiceChain.onBreak(onBreakListener); + rpcServiceChain.onDegraded(onDegradedListener); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + // Retry the first endpoint until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Retry the first endpoint again, until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Retry the first endpoint for a third time, until max retries is hit. + // The circuit will break on the last time, and the second endpoint will + // be hit, albeit slowly. + await rpcServiceChain.request(jsonRpcRequest); + + expect(onDegradedListener).toHaveBeenCalledTimes(1); + expect(onDegradedListener).toHaveBeenCalledWith({ + primaryEndpointUrl: `${primaryEndpointUrl}/`, + endpointUrl: `${primaryEndpointUrl}/`, + error: expectedDegradedError, + }); + }); + + it("calls onDegraded again when a service's underlying circuit breaks, and then after waiting, the service responds successfully but slowly", async () => { + const endpointUrl = 'https://some.endpoint'; + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) + .reply(503); + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .reply(200, () => { + clock.tick(DEFAULT_DEGRADED_THRESHOLD + 1); + return { + id: 1, + jsonrpc: '2.0', + result: '0x1', + }; + }); + const expectedError = createResourceUnavailableError(503); + const expectedDegradedError = new HttpError(503); + const rpcServiceChain = new RpcServiceChain([ + { + fetch, + btoa, + endpointUrl, + }, + ]); + const onDegradedListener = jest.fn(); + rpcServiceChain.onServiceRetry(() => { + clock.next(); + }); + rpcServiceChain.onDegraded(onDegradedListener); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + // Retry the endpoint until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Retry the endpoint again, until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Retry the endpoint for a third time, until max retries is hit. + // The circuit will break on the last time. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Wait until the circuit break duration passes, try the endpoint again, + // and see that it succeeds, but slowly. + clock.tick(DEFAULT_CIRCUIT_BREAK_DURATION); + await rpcServiceChain.request(jsonRpcRequest); + + expect(onDegradedListener).toHaveBeenCalledTimes(2); + expect(onDegradedListener).toHaveBeenNthCalledWith(1, { + primaryEndpointUrl: `${endpointUrl}/`, + endpointUrl: `${endpointUrl}/`, + error: expectedDegradedError, + }); + expect(onDegradedListener).toHaveBeenNthCalledWith(2, { + primaryEndpointUrl: `${endpointUrl}/`, + endpointUrl: `${endpointUrl}/`, + }); + }); + + it("calls onDegraded again when a failover service's underlying circuit breaks, and then after waiting, the primary responds successfully but slowly", async () => { + const primaryEndpointUrl = 'https://first.endpoint'; + const secondaryEndpointUrl = 'https://second.endpoint'; + nock(primaryEndpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) + .reply(503); + nock(primaryEndpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .reply(200, () => { + clock.tick(DEFAULT_DEGRADED_THRESHOLD + 1); + return { + id: 1, + jsonrpc: '2.0', + result: '0x1', + }; + }); + nock(secondaryEndpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) + .reply(503); + const expectedError = createResourceUnavailableError(503); + const expectedDegradedError = new HttpError(503); + const rpcServiceChain = new RpcServiceChain([ + { + fetch, + btoa, + endpointUrl: primaryEndpointUrl, + }, + { + fetch, + btoa, + endpointUrl: secondaryEndpointUrl, + }, + ]); + const onDegradedListener = jest.fn(); + rpcServiceChain.onServiceRetry(() => { + clock.next(); + }); + rpcServiceChain.onDegraded(onDegradedListener); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + // Retry the first endpoint until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Retry the first endpoint again, until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Retry the first endpoint for a third time, until max retries is hit. + // The circuit will break on the last time, and the second endpoint will + // be hit, until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Retry the second endpoint again, until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Retry the second endpoint for a third time, until max retries is hit. + // The circuit will break on the last time. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + clock.tick(DEFAULT_CIRCUIT_BREAK_DURATION); + // Hit the first endpoint again, and see that it succeeds, but slowly + await rpcServiceChain.request(jsonRpcRequest); + + expect(onDegradedListener).toHaveBeenCalledTimes(2); + expect(onDegradedListener).toHaveBeenNthCalledWith(1, { + primaryEndpointUrl: `${primaryEndpointUrl}/`, + endpointUrl: `${primaryEndpointUrl}/`, + error: expectedDegradedError, + }); + expect(onDegradedListener).toHaveBeenNthCalledWith(2, { + primaryEndpointUrl: `${primaryEndpointUrl}/`, + endpointUrl: `${primaryEndpointUrl}/`, + }); + }); + + it('calls onServiceDegraded each time a service continually runs out of retries (but before its circuit breaks)', async () => { + const endpointUrl = 'https://some.endpoint'; + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) + .reply(503); + const expectedError = createResourceUnavailableError(503); + const expectedDegradedError = new HttpError(503); + const rpcServiceChain = new RpcServiceChain([ + { + fetch, + btoa, + endpointUrl, + }, + ]); + const onServiceDegradedListener = jest.fn(); + rpcServiceChain.onServiceRetry(() => { + clock.next(); + }); + rpcServiceChain.onServiceDegraded(onServiceDegradedListener); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + // Retry the endpoint until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Retry the endpoint again, until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Retry the endpoint for a third time, until max retries is hit. + // The circuit will break on the last time. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + + expect(onServiceDegradedListener).toHaveBeenCalledTimes(2); + expect(onServiceDegradedListener).toHaveBeenNthCalledWith(1, { + primaryEndpointUrl: `${endpointUrl}/`, + endpointUrl: `${endpointUrl}/`, + error: expectedDegradedError, + }); + expect(onServiceDegradedListener).toHaveBeenNthCalledWith(2, { + primaryEndpointUrl: `${endpointUrl}/`, + endpointUrl: `${endpointUrl}/`, + error: expectedDegradedError, + }); + }); + + it('calls onServiceDegraded each time a service continually responds slowly', async () => { + const endpointUrl = 'https://some.endpoint'; + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(2) + .reply(200, () => { + clock.tick(DEFAULT_DEGRADED_THRESHOLD + 1); + return { + id: 1, + jsonrpc: '2.0', + result: '0x1', + }; + }); + const rpcServiceChain = new RpcServiceChain([ + { + fetch, + btoa, + endpointUrl, + }, + ]); + const onServiceDegradedListener = jest.fn(); + rpcServiceChain.onServiceRetry(() => { + clock.next(); + }); + rpcServiceChain.onServiceDegraded(onServiceDegradedListener); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + await rpcServiceChain.request(jsonRpcRequest); + await rpcServiceChain.request(jsonRpcRequest); + + expect(onServiceDegradedListener).toHaveBeenCalledTimes(2); + expect(onServiceDegradedListener).toHaveBeenNthCalledWith(1, { + primaryEndpointUrl: `${endpointUrl}/`, + endpointUrl: `${endpointUrl}/`, + }); + expect(onServiceDegradedListener).toHaveBeenNthCalledWith(2, { + primaryEndpointUrl: `${endpointUrl}/`, + endpointUrl: `${endpointUrl}/`, + }); + }); + + it('calls onServiceDegraded each time a service runs out of retries and then responds successfully but slowly, or vice versa', async () => { + const endpointUrl = 'https://some.endpoint'; + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(5) + .reply(503); + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .reply(200, () => { + clock.tick(DEFAULT_DEGRADED_THRESHOLD + 1); + return { + id: 1, + jsonrpc: '2.0', + result: '0x1', + }; + }); + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(5) + .reply(503); + const expectedError = createResourceUnavailableError(503); + const expectedDegradedError = new HttpError(503); + const rpcServiceChain = new RpcServiceChain([ + { + fetch, + btoa, + endpointUrl, + }, + ]); + const onServiceDegradedListener = jest.fn(); + rpcServiceChain.onServiceRetry(() => { + clock.next(); + }); + rpcServiceChain.onServiceDegraded(onServiceDegradedListener); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + // Retry the endpoint until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Try the endpoint again, and see that it succeeds. + await rpcServiceChain.request(jsonRpcRequest); + // Retry the endpoint again until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + + expect(onServiceDegradedListener).toHaveBeenCalledTimes(3); + expect(onServiceDegradedListener).toHaveBeenNthCalledWith(1, { + primaryEndpointUrl: `${endpointUrl}/`, + endpointUrl: `${endpointUrl}/`, + error: expectedDegradedError, + }); + expect(onServiceDegradedListener).toHaveBeenNthCalledWith(2, { + primaryEndpointUrl: `${endpointUrl}/`, + endpointUrl: `${endpointUrl}/`, + }); + expect(onServiceDegradedListener).toHaveBeenNthCalledWith(3, { + primaryEndpointUrl: `${endpointUrl}/`, + endpointUrl: `${endpointUrl}/`, + error: expectedDegradedError, + }); + }); + + it("calls onServiceDegraded again when the primary service's circuit breaks and its failover responds successfully but slowly", async () => { + const primaryEndpointUrl = 'https://first.endpoint'; + const secondaryEndpointUrl = 'https://second.endpoint'; + nock(primaryEndpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) + .reply(503); + nock(secondaryEndpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .reply(200, () => { + clock.tick(DEFAULT_DEGRADED_THRESHOLD + 1); + return { + id: 1, + jsonrpc: '2.0', + result: '0x1', + }; + }); + const expectedError = createResourceUnavailableError(503); + const expectedDegradedError = new HttpError(503); + const rpcServiceChain = new RpcServiceChain([ + { + fetch, + btoa, + endpointUrl: primaryEndpointUrl, + }, + { + fetch, + btoa, + endpointUrl: secondaryEndpointUrl, + }, + ]); + const onBreakListener = jest.fn(); + const onServiceDegradedListener = jest.fn(); + rpcServiceChain.onServiceRetry(() => { + clock.next(); + }); + rpcServiceChain.onBreak(onBreakListener); + rpcServiceChain.onServiceDegraded(onServiceDegradedListener); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + // Retry the first endpoint until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, ); - // Try the first endpoint, see that the circuit is broken, and retry the - // second endpoint, until max retries is hit. + // Retry the first endpoint again, until max retries is hit. await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( - RESOURCE_UNAVAILABLE_ERROR, + expectedError, ); - // Try the first endpoint, see that the circuit is broken, and retry the - // second endpoint, until max retries is hit. - // The circuit will break on the last time, and the third endpoint will - // be hit. This is finally a success. + // Retry the first endpoint for a third time, until max retries is hit. + // The circuit will break on the last time, and the second endpoint will + // be hit, albeit slowly. await rpcServiceChain.request(jsonRpcRequest); - const onDegradedListenerCallCountsByEndpointUrl = - onDegradedListener.mock.calls.reduce( - (memo: Record, call) => { - const { endpointUrl } = call[0]; - memo[endpointUrl] = (memo[endpointUrl] ?? 0) + 1; - return memo; - }, - {}, - ); + expect(onServiceDegradedListener).toHaveBeenCalledTimes(3); + expect(onServiceDegradedListener).toHaveBeenNthCalledWith(1, { + primaryEndpointUrl: `${primaryEndpointUrl}/`, + endpointUrl: `${primaryEndpointUrl}/`, + error: expectedDegradedError, + }); + expect(onServiceDegradedListener).toHaveBeenNthCalledWith(2, { + primaryEndpointUrl: `${primaryEndpointUrl}/`, + endpointUrl: `${primaryEndpointUrl}/`, + error: expectedDegradedError, + }); + expect(onServiceDegradedListener).toHaveBeenNthCalledWith(3, { + primaryEndpointUrl: `${primaryEndpointUrl}/`, + endpointUrl: `${secondaryEndpointUrl}/`, + }); + }); + + it("calls onServiceDegraded again when a service's underlying circuit breaks, and then after waiting, the service responds successfully but slowly", async () => { + const endpointUrl = 'https://first.endpoint'; + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) + .reply(503); + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .reply(200, () => { + clock.tick(DEFAULT_DEGRADED_THRESHOLD + 1); + return { + id: 1, + jsonrpc: '2.0', + result: '0x1', + }; + }); + const expectedError = createResourceUnavailableError(503); + const expectedDegradedError = new HttpError(503); + const rpcServiceChain = new RpcServiceChain([ + { + fetch, + btoa, + endpointUrl, + }, + ]); + const onServiceDegradedListener = jest.fn(); + rpcServiceChain.onServiceRetry(() => { + clock.next(); + }); + rpcServiceChain.onServiceDegraded(onServiceDegradedListener); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + // Retry the endpoint until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Retry the endpoint again, until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Retry the endpoint for a third time, until max retries is hit. + // The circuit will break on the last time. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Wait until the circuit break duration passes, try the endpoint again, + // and see that it succeeds, but slowly. + clock.tick(DEFAULT_CIRCUIT_BREAK_DURATION); + await rpcServiceChain.request(jsonRpcRequest); + + expect(onServiceDegradedListener).toHaveBeenCalledTimes(3); + expect(onServiceDegradedListener).toHaveBeenNthCalledWith(1, { + primaryEndpointUrl: `${endpointUrl}/`, + endpointUrl: `${endpointUrl}/`, + error: expectedDegradedError, + }); + expect(onServiceDegradedListener).toHaveBeenNthCalledWith(2, { + primaryEndpointUrl: `${endpointUrl}/`, + endpointUrl: `${endpointUrl}/`, + error: expectedDegradedError, + }); + expect(onServiceDegradedListener).toHaveBeenNthCalledWith(3, { + primaryEndpointUrl: `${endpointUrl}/`, + endpointUrl: `${endpointUrl}/`, + }); + }); + + it("calls onServiceDegraded again when a failover service's underlying circuit breaks, and then after waiting, the primary responds successfully but slowly", async () => { + const primaryEndpointUrl = 'https://first.endpoint'; + const secondaryEndpointUrl = 'https://second.endpoint'; + nock(primaryEndpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) + .reply(503); + nock(primaryEndpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .reply(200, () => { + clock.tick(DEFAULT_DEGRADED_THRESHOLD + 1); + return { + id: 1, + jsonrpc: '2.0', + result: '0x1', + }; + }); + nock(secondaryEndpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) + .reply(503); + const expectedError = createResourceUnavailableError(503); + const expectedDegradedError = new HttpError(503); + const rpcServiceChain = new RpcServiceChain([ + { + fetch, + btoa, + endpointUrl: primaryEndpointUrl, + }, + { + fetch, + btoa, + endpointUrl: secondaryEndpointUrl, + }, + ]); + const onServiceDegradedListener = jest.fn(); + rpcServiceChain.onServiceRetry(() => { + clock.next(); + }); + rpcServiceChain.onServiceDegraded(onServiceDegradedListener); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + // Retry the first endpoint until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Retry the first endpoint again, until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Retry the first endpoint for a third time, until max retries is hit. + // The circuit will break on the last time, and the second endpoint will + // be hit, until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Retry the second endpoint again, until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Retry the second endpoint for a third time, until max retries is hit. + // The circuit will break on the last time. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + clock.tick(DEFAULT_CIRCUIT_BREAK_DURATION); + // Hit the first endpoint again, and see that it succeeds, but slowly + await rpcServiceChain.request(jsonRpcRequest); + + expect(onServiceDegradedListener).toHaveBeenCalledTimes(5); + expect(onServiceDegradedListener).toHaveBeenNthCalledWith(1, { + primaryEndpointUrl: `${primaryEndpointUrl}/`, + endpointUrl: `${primaryEndpointUrl}/`, + error: expectedDegradedError, + }); + expect(onServiceDegradedListener).toHaveBeenNthCalledWith(2, { + primaryEndpointUrl: `${primaryEndpointUrl}/`, + endpointUrl: `${primaryEndpointUrl}/`, + error: expectedDegradedError, + }); + expect(onServiceDegradedListener).toHaveBeenNthCalledWith(3, { + primaryEndpointUrl: `${primaryEndpointUrl}/`, + endpointUrl: `${secondaryEndpointUrl}/`, + error: expectedDegradedError, + }); + expect(onServiceDegradedListener).toHaveBeenNthCalledWith(4, { + primaryEndpointUrl: `${primaryEndpointUrl}/`, + endpointUrl: `${secondaryEndpointUrl}/`, + error: expectedDegradedError, + }); + expect(onServiceDegradedListener).toHaveBeenNthCalledWith(5, { + primaryEndpointUrl: `${primaryEndpointUrl}/`, + endpointUrl: `${primaryEndpointUrl}/`, + }); + }); + + it('calls onAvailable only once, even if a service continually responds successfully', async () => { + const endpointUrl = 'https://first.endpoint'; + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(3) + .reply(200, { + id: 1, + jsonrpc: '2.0', + result: '0x1', + }); + const rpcServiceChain = new RpcServiceChain([ + { + fetch, + btoa, + endpointUrl, + }, + ]); + const onAvailableListener = jest.fn(); + rpcServiceChain.onServiceRetry(() => { + clock.next(); + }); + rpcServiceChain.onAvailable(onAvailableListener); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + await rpcServiceChain.request(jsonRpcRequest); + await rpcServiceChain.request(jsonRpcRequest); + await rpcServiceChain.request(jsonRpcRequest); + + expect(onAvailableListener).toHaveBeenCalledTimes(1); + expect(onAvailableListener).toHaveBeenCalledWith({ + primaryEndpointUrl: `${endpointUrl}/`, + endpointUrl: `${endpointUrl}/`, + }); + }); + + it("calls onAvailable once, after the primary service's circuit has broken, the request to the failover succeeds", async () => { + const primaryEndpointUrl = 'https://first.endpoint'; + const secondaryEndpointUrl = 'https://second.endpoint'; + nock(primaryEndpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) + .reply(503); + nock(secondaryEndpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .reply(200, { + id: 1, + jsonrpc: '2.0', + result: 'ok', + }); + + const rpcServiceChain = new RpcServiceChain([ + { + fetch, + btoa, + endpointUrl: primaryEndpointUrl, + }, + { + fetch, + btoa, + endpointUrl: secondaryEndpointUrl, + }, + ]); + const onAvailableListener = jest.fn(); + rpcServiceChain.onServiceRetry(() => { + clock.next(); + }); + rpcServiceChain.onAvailable(onAvailableListener); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + // Retry the first endpoint until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + createResourceUnavailableError(503), + ); + // Retry the first endpoint again, until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + createResourceUnavailableError(503), + ); + // Retry the first endpoint for a third time, until max retries is hit. + // The circuit will break on the last time, and the second endpoint will + // be hit. + await rpcServiceChain.request(jsonRpcRequest); + + expect(onAvailableListener).toHaveBeenCalledTimes(1); + expect(onAvailableListener).toHaveBeenNthCalledWith(1, { + primaryEndpointUrl: `${primaryEndpointUrl}/`, + endpointUrl: `${secondaryEndpointUrl}/`, + }); + }); + + it('calls onAvailable when a service becomes degraded by responding slowly, and then recovers', async () => { + const endpointUrl = 'https://first.endpoint'; + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .reply(200, () => { + return { + id: 1, + jsonrpc: '2.0', + result: '0x1', + }; + }); + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .reply(200, { + id: 1, + jsonrpc: '2.0', + result: '0x1', + }); + const rpcServiceChain = new RpcServiceChain([ + { + fetch, + btoa, + endpointUrl, + }, + ]); + const onAvailableListener = jest.fn(); + rpcServiceChain.onServiceRetry(() => { + clock.next(); + }); + rpcServiceChain.onAvailable(onAvailableListener); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + await rpcServiceChain.request(jsonRpcRequest); + await rpcServiceChain.request(jsonRpcRequest); - expect(onDegradedListenerCallCountsByEndpointUrl).toStrictEqual({ - 'https://first.chain/': 2, - 'https://second.chain/': 2, - 'https://third.chain/': 1, + expect(onAvailableListener).toHaveBeenCalledTimes(1); + expect(onAvailableListener).toHaveBeenCalledWith({ + primaryEndpointUrl: `${endpointUrl}/`, + endpointUrl: `${endpointUrl}/`, }); }); }); }); + +/** + * Creates a "resource unavailable" RPC error for testing. + * + * @param httpStatus - The HTTP status that the error represents. + * @returns The RPC error. + */ +function createResourceUnavailableError(httpStatus: number) { + return expect.objectContaining({ + code: errorCodes.rpc.resourceUnavailable, + message: 'RPC endpoint not found or unavailable.', + data: { + httpStatus, + }, + }); +} diff --git a/packages/network-controller/src/rpc-service/rpc-service-chain.ts b/packages/network-controller/src/rpc-service/rpc-service-chain.ts index f8e04a69dbc..3853f52fe05 100644 --- a/packages/network-controller/src/rpc-service/rpc-service-chain.ts +++ b/packages/network-controller/src/rpc-service/rpc-service-chain.ts @@ -1,3 +1,7 @@ +import { + CircuitState, + CockatielEventEmitter, +} from '@metamask/controller-utils'; import type { Json, JsonRpcParams, @@ -7,18 +11,78 @@ import type { import { RpcService } from './rpc-service'; import type { RpcServiceOptions } from './rpc-service'; -import type { RpcServiceRequestable } from './rpc-service-requestable'; -import type { FetchOptions } from './shared'; +import type { + CockatielEventToEventEmitterWithData, + CockatielEventToEventListenerWithData, + ExtendCockatielEventData, + ExtractCockatielEventData, + FetchOptions, +} from './shared'; +import { projectLogger, createModuleLogger } from '../logger'; + +const log = createModuleLogger(projectLogger, 'RpcServiceChain'); + +/** + * Statuses that the RPC service chain can be in. + */ +const STATUSES = { + Available: 'available', + Degraded: 'degraded', + Unknown: 'unknown', + Unavailable: 'unavailable', +} as const; /** - * This class constructs a chain of RpcService objects which represent a - * particular network. The first object in the chain is intended to be the - * primary way of reaching the network and the remaining objects are used as - * failovers. + * Statuses that the RPC service chain can be in. */ -export class RpcServiceChain implements RpcServiceRequestable { +type Status = (typeof STATUSES)[keyof typeof STATUSES]; + +/** + * This class constructs and manages requests to a chain of RpcService objects + * which represent RPC endpoints with which to access a particular network. The + * first service in the chain is intended to be the primary way of hitting the + * network and the remaining services are used as failovers. + */ +export class RpcServiceChain { + /** + * The event emitter for the `onBreak` event. + */ + readonly #onAvailableEventEmitter: CockatielEventToEventEmitterWithData< + RpcService['onAvailable'], + { primaryEndpointUrl: string } + >; + + /** + * The event emitter for the `onBreak` event. + */ + readonly #onBreakEventEmitter: CockatielEventToEventEmitterWithData< + RpcService['onBreak'], + { primaryEndpointUrl: string } + >; + + /** + * The event emitter for the `onBreak` event. + */ + readonly #onDegradedEventEmitter: CockatielEventToEventEmitterWithData< + RpcService['onDegraded'], + { primaryEndpointUrl: string } + >; + + /** + * The first RPC service that requests will be sent to. + */ + readonly #primaryService: RpcService; + + /** + * The RPC services in the chain. + */ readonly #services: RpcService[]; + /** + * The status of the RPC service chain. + */ + #status: Status; + /** * Constructs a new RpcServiceChain object. * @@ -27,20 +91,72 @@ export class RpcServiceChain implements RpcServiceRequestable { * {@link RpcServiceOptions}. */ constructor( - rpcServiceConfigurations: Omit[], + rpcServiceConfigurations: [RpcServiceOptions, ...RpcServiceOptions[]], ) { - this.#services = this.#buildRpcServiceChain(rpcServiceConfigurations); + this.#services = rpcServiceConfigurations.map( + (rpcServiceConfiguration) => new RpcService(rpcServiceConfiguration), + ); + this.#primaryService = this.#services[0]; + + this.#status = STATUSES.Unknown; + this.#onBreakEventEmitter = new CockatielEventEmitter< + ExtendCockatielEventData< + ExtractCockatielEventData, + { primaryEndpointUrl: string } + > + >(); + + this.#onDegradedEventEmitter = new CockatielEventEmitter(); + for (const service of this.#services) { + service.onDegraded((data) => { + if (this.#status !== STATUSES.Degraded) { + log('Updating status to "degraded"', data); + this.#status = STATUSES.Degraded; + this.#onDegradedEventEmitter.emit({ + ...data, + primaryEndpointUrl: this.#primaryService.endpointUrl.toString(), + }); + } + }); + } + + this.#onAvailableEventEmitter = new CockatielEventEmitter(); + for (const service of this.#services) { + service.onAvailable((data) => { + if (this.#status !== STATUSES.Available) { + log('Updating status to "available"', data); + this.#status = STATUSES.Available; + this.#onAvailableEventEmitter.emit({ + ...data, + primaryEndpointUrl: this.#primaryService.endpointUrl.toString(), + }); + } + }); + } } /** - * Listens for when any of the RPC services retry a request. + * Calls the provided callback when any of the RPC services is retried. + * + * This is mainly useful for tests. * - * @param listener - The callback to be called when the retry occurs. - * @returns What {@link RpcService.onRetry} returns. + * @param listener - The callback to be called. + * @returns An object with a `dispose` method which can be used to unregister + * the event listener. */ - onRetry(listener: Parameters[0]) { + onServiceRetry( + listener: CockatielEventToEventListenerWithData< + RpcService['onRetry'], + { primaryEndpointUrl: string } + >, + ) { const disposables = this.#services.map((service) => - service.onRetry(listener), + service.onRetry((data) => { + listener({ + ...data, + primaryEndpointUrl: this.#primaryService.endpointUrl.toString(), + }); + }), ); return { @@ -51,15 +167,51 @@ export class RpcServiceChain implements RpcServiceRequestable { } /** - * Listens for when any of the RPC services retry the request too many times - * in a row. + * Calls the provided callback only when the maximum number of failed + * consecutive attempts to receive a 2xx response has been reached for all + * RPC services in the chain, and all services' underlying circuits have + * broken. + * + * The callback will not be called if a service's circuit breaks but its + * failover does not. Use `onServiceBreak` if you'd like a lower level of + * granularity. + * + * @param listener - The callback to be called. + * @returns An object with a `dispose` method which can be used to unregister + * the callback. + */ + onBreak( + listener: CockatielEventToEventListenerWithData< + RpcService['onBreak'], + { primaryEndpointUrl: string } + >, + ) { + return this.#onBreakEventEmitter.addListener(listener); + } + + /** + * Calls the provided callback each time when, for *any* of the RPC services + * in this chain, the maximum number of failed consecutive attempts to receive + * a 2xx response has been reached and the underlying circuit has broken. A + * more granular version of `onBreak`. * - * @param listener - The callback to be called when the retry occurs. - * @returns What {@link RpcService.onBreak} returns. + * @param listener - The callback to be called. + * @returns An object with a `dispose` method which can be used to unregister + * the callback. */ - onBreak(listener: Parameters[0]) { + onServiceBreak( + listener: CockatielEventToEventListenerWithData< + RpcService['onBreak'], + { primaryEndpointUrl: string } + >, + ) { const disposables = this.#services.map((service) => - service.onBreak(listener), + service.onBreak((data) => { + listener({ + ...data, + primaryEndpointUrl: this.#primaryService.endpointUrl.toString(), + }); + }), ); return { @@ -70,14 +222,70 @@ export class RpcServiceChain implements RpcServiceRequestable { } /** - * Listens for when any of the RPC services send a slow request. + * Calls the provided callback if no requests have been initiated yet or + * all requests to RPC services in this chain have responded successfully in a + * timely fashion, and then one of the two conditions apply: + * + * 1. When a retriable error is encountered making a request to an RPC + * service, and the request is retried until a set maximum is reached. + * 2. When a RPC service responds successfully, but the request takes longer + * than a set number of seconds to complete. + * + * Note that the callback will be called even if there are local connectivity + * issues which prevent requests from being initiated. This is intentional. + * + * Also note this callback will only be called if the RPC service chain as a + * whole is in a "degraded" state, and will then only be called once (e.g., it + * will not be called if a failover service falls into a degraded state, then + * the primary comes back online, but it is slow). Use `onServiceDegraded` if + * you'd like a lower level of granularity. + * + * @param listener - The callback to be called. + * @returns An object with a `dispose` method which can be used to unregister + * the callback. + */ + onDegraded( + listener: CockatielEventToEventListenerWithData< + RpcService['onDegraded'], + { primaryEndpointUrl: string } + >, + ) { + return this.#onDegradedEventEmitter.addListener(listener); + } + + /** + * Calls the provided callback each time one of the two conditions apply: * - * @param listener - The callback to be called when the retry occurs. - * @returns What {@link RpcService.onRetry} returns. + * 1. When a retriable error is encountered making a request to an RPC + * service, and the request is retried until a set maximum is reached. + * 2. When a RPC service responds successfully, but the request takes longer + * than a set number of seconds to complete. + * + * Note that the callback will be called even if there are local connectivity + * issues which prevent requests from being initiated. This is intentional. + * + * This is a more granular version of `onDegraded`. The callback will be + * called for each slow request to an RPC service. It may also be called again + * if a failover service falls into a degraded state, then the primary comes + * back online, but it is slow. + * + * @param listener - The callback to be called. + * @returns An object with a `dispose` method which can be used to unregister + * the callback. */ - onDegraded(listener: Parameters[0]) { + onServiceDegraded( + listener: CockatielEventToEventListenerWithData< + RpcService['onDegraded'], + { primaryEndpointUrl: string } + >, + ) { const disposables = this.#services.map((service) => - service.onDegraded(listener), + service.onDegraded((data) => { + listener({ + ...data, + primaryEndpointUrl: this.#primaryService.endpointUrl.toString(), + }); + }), ); return { @@ -88,8 +296,33 @@ export class RpcServiceChain implements RpcServiceRequestable { } /** - * Makes a request to the first RPC service in the chain. If this service is - * down, then the request is forwarded to the next service in the chain, etc. + * Calls the provided callback in one of the following two conditions: + * + * 1. The first time that a 2xx request is made to any of the RPC services in + * this chain. + * 2. When requests to any the failover RPC services in this chain were + * failing such that they were degraded or their underyling circuits broke, + * but the first request to the primary succeeds again. + * + * Note this callback will only be called if the RPC service chain as a whole + * is in an "available" state. + * + * @param listener - The callback to be called. + * @returns An object with a `dispose` method which can be used to unregister + * the callback. + */ + onAvailable( + listener: CockatielEventToEventListenerWithData< + RpcService['onAvailable'], + { primaryEndpointUrl: string } + >, + ) { + return this.#onAvailableEventEmitter.addListener(listener); + } + + /** + * Uses the RPC services in the chain to make a request, using each service + * after the first as a fallback to the previous one as necessary. * * This overload is specifically designed for `eth_getBlockByNumber`, which * can return a `result` of `null` despite an expected `Result` being @@ -113,8 +346,8 @@ export class RpcServiceChain implements RpcServiceRequestable { ): Promise | JsonRpcResponse>; /** - * Makes a request to the first RPC service in the chain. If this service is - * down, then the request is forwarded to the next service in the chain, etc. + * Uses the RPC services in the chain to make a request, using each service + * after the first as a fallback to the previous one as necessary. * * This overload is designed for all RPC methods except for * `eth_getBlockByNumber`, which are expected to return a `result` of the @@ -139,31 +372,94 @@ export class RpcServiceChain implements RpcServiceRequestable { jsonRpcRequest: Readonly>, fetchOptions: FetchOptions = {}, ): Promise> { - return this.#services[0].request(jsonRpcRequest, fetchOptions); - } + // Start with the primary (first) service and switch to failovers as the + // need arises. This is a bit confusing, so keep reading for more on how + // this works. - /** - * Constructs the chain of RPC services. The second RPC service is - * configured as the failover for the first, the third service is - * configured as the failover for the second, etc. - * - * @param rpcServiceConfigurations - The options for the RPC services that - * you want to construct. Each object in this array is the same as - * {@link RpcServiceOptions}. - * @returns The constructed chain of RPC services. - */ - #buildRpcServiceChain( - rpcServiceConfigurations: Omit[], - ): RpcService[] { - return [...rpcServiceConfigurations] - .reverse() - .reduce((workingServices: RpcService[], serviceConfiguration, index) => { - const failoverService = index > 0 ? workingServices[0] : undefined; - const service = new RpcService({ - ...serviceConfiguration, - failoverService, - }); - return [service, ...workingServices]; - }, []); + let availableServiceIndex: number | undefined; + let response: JsonRpcResponse | undefined; + + for (const [i, service] of this.#services.entries()) { + log(`Trying service #${i + 1}...`); + const previousCircuitState = service.getCircuitState(); + + try { + // Try making the request through the service. + response = await service.request( + jsonRpcRequest, + fetchOptions, + ); + log('Service successfully received request.'); + availableServiceIndex = i; + break; + } catch (error) { + // Oops, that didn't work. + // Capture this error so that we can handle it later. + + const { lastError } = service; + const isCircuitOpen = service.getCircuitState() === CircuitState.Open; + + log('Service failed! error =', error, 'lastError = ', lastError); + + if (isCircuitOpen) { + if (i < this.#services.length - 1) { + log( + "This service's circuit is open. Proceeding to next service...", + ); + continue; + } + + if ( + previousCircuitState !== CircuitState.Open && + this.#status !== STATUSES.Unavailable && + lastError !== undefined + ) { + // If the service's circuit just broke and it's the last one in the + // chain, then trigger the onBreak event. (But if for some reason we + // have already done this, then don't do it.) + log( + 'This service\'s circuit just opened and it is the last service. Updating status to "unavailable" and triggering onBreak.', + ); + this.#status = STATUSES.Unavailable; + this.#onBreakEventEmitter.emit({ + error: lastError, + primaryEndpointUrl: this.#primaryService.endpointUrl.toString(), + endpointUrl: service.endpointUrl.toString(), + }); + } + } + + // The service failed, and we throw whatever the error is. The calling + // code can try again if it so desires. + log( + `${isCircuitOpen ? '' : "This service's circuit is closed. "}Re-throwing error.`, + ); + throw error; + } + } + + if (response) { + // If one of the services is available, reset all of the circuits of the + // following services. If we didn't do this and the service became + // unavailable in the future, and any of the failovers' circuits were + // open (due to previous failures), we would receive a "circuit broken" + // error when we attempted to divert traffic to the failovers again. + // + if (availableServiceIndex !== undefined) { + for (const [i, service] of [...this.#services.entries()].slice( + availableServiceIndex + 1, + )) { + log(`Resetting policy for service #${i + 1}.`); + service.resetPolicy(); + } + } + + return response; + } + + // The only way we can end up here is if there are no services to loop over. + // That is not possible due to the types on the constructor, but TypeScript + // doesn't know this, so we have to appease it. + throw new Error('Nothing to return'); } } diff --git a/packages/network-controller/src/rpc-service/rpc-service-requestable.ts b/packages/network-controller/src/rpc-service/rpc-service-requestable.ts index c3dbcb4a495..85ff60e176c 100644 --- a/packages/network-controller/src/rpc-service/rpc-service-requestable.ts +++ b/packages/network-controller/src/rpc-service/rpc-service-requestable.ts @@ -6,7 +6,13 @@ import type { JsonRpcResponse, } from '@metamask/utils'; -import type { AddToCockatielEventData, FetchOptions } from './shared'; +import type { + CockatielEventToEventListenerWithData, + ExcludeCockatielEventData, + ExtendCockatielEventData, + ExtractCockatielEventData, + FetchOptions, +} from './shared'; /** * The interface for a service class responsible for making a request to a @@ -22,8 +28,8 @@ export type RpcServiceRequestable = { * @see {@link createServicePolicy} */ onRetry( - listener: AddToCockatielEventData< - Parameters[0], + listener: CockatielEventToEventListenerWithData< + ServicePolicy['onRetry'], { endpointUrl: string } >, ): ReturnType; @@ -37,10 +43,15 @@ export type RpcServiceRequestable = { * @see {@link createServicePolicy} */ onBreak( - listener: AddToCockatielEventData< - Parameters[0], - { endpointUrl: string } - >, + listener: ( + data: ExcludeCockatielEventData< + ExtendCockatielEventData< + ExtractCockatielEventData, + { endpointUrl: string } + >, + 'isolated' + >, + ) => void, ): ReturnType; /** @@ -52,12 +63,26 @@ export type RpcServiceRequestable = { * @see {@link createServicePolicy} */ onDegraded( - listener: AddToCockatielEventData< - Parameters[0], + listener: CockatielEventToEventListenerWithData< + ServicePolicy['onDegraded'], { endpointUrl: string } >, ): ReturnType; + /** + * Listens for when the policy underlying this RPC service is available. + * + * @param listener - The callback to be called when the request is available. + * @returns What {@link ServicePolicy.onDegraded} returns. + * @see {@link createServicePolicy} + */ + onAvailable( + listener: CockatielEventToEventListenerWithData< + ServicePolicy['onAvailable'], + { endpointUrl: string } + >, + ): ReturnType; + /** * Makes a request to the target. */ diff --git a/packages/network-controller/src/rpc-service/rpc-service.test.ts b/packages/network-controller/src/rpc-service/rpc-service.test.ts index 6faaa7799f3..18a96e4d070 100644 --- a/packages/network-controller/src/rpc-service/rpc-service.test.ts +++ b/packages/network-controller/src/rpc-service/rpc-service.test.ts @@ -1,14 +1,21 @@ -import { HttpError } from '@metamask/controller-utils'; +import { + DEFAULT_CIRCUIT_BREAK_DURATION, + DEFAULT_DEGRADED_THRESHOLD, + HttpError, +} from '@metamask/controller-utils'; import { errorCodes } from '@metamask/rpc-errors'; +import { CircuitState } from 'cockatiel'; import deepFreeze from 'deep-freeze-strict'; import nock from 'nock'; import { FetchError } from 'node-fetch'; import { useFakeTimers } from 'sinon'; import type { SinonFakeTimers } from 'sinon'; -import type { AbstractRpcService } from './abstract-rpc-service'; -import { CUSTOM_RPC_ERRORS, RpcService } from './rpc-service'; -import { DEFAULT_CIRCUIT_BREAK_DURATION } from '../../../controller-utils/src/create-service-policy'; +import { + CUSTOM_RPC_ERRORS, + DEFAULT_MAX_RETRIES, + RpcService, +} from './rpc-service'; describe('RpcService', () => { let clock: SinonFakeTimers; @@ -21,6 +28,320 @@ describe('RpcService', () => { clock.restore(); }); + describe('resetPolicy', () => { + it('resets the state of the circuit to "closed"', async () => { + const endpointUrl = 'https://rpc.example.chain'; + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(15) + .reply(503); + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .reply(200, { + id: 1, + jsonrpc: '2.0', + result: 'ok', + }); + const service = new RpcService({ + fetch, + btoa, + endpointUrl, + }); + service.onRetry(() => { + clock.next(); + }); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + // Get through the first two rounds of retries + await ignoreRejection(service.request(jsonRpcRequest)); + await ignoreRejection(service.request(jsonRpcRequest)); + // The last retry breaks the circuit + await ignoreRejection(service.request(jsonRpcRequest)); + expect(service.getCircuitState()).toBe(CircuitState.Open); + + service.resetPolicy(); + + expect(service.getCircuitState()).toBe(CircuitState.Closed); + }); + + it('allows making a successful request to the service if its circuit has broken', async () => { + const endpointUrl = 'https://rpc.example.chain'; + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(15) + .reply(503); + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .reply(200, { + id: 1, + jsonrpc: '2.0', + result: 'ok', + }); + const service = new RpcService({ + fetch, + btoa, + endpointUrl, + }); + service.onRetry(() => { + clock.next(); + }); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + // Get through the first two rounds of retries + await ignoreRejection(service.request(jsonRpcRequest)); + await ignoreRejection(service.request(jsonRpcRequest)); + // The last retry breaks the circuit + await ignoreRejection(service.request(jsonRpcRequest)); + + service.resetPolicy(); + + expect(await service.request(jsonRpcRequest)).toStrictEqual({ + id: 1, + jsonrpc: '2.0', + result: 'ok', + }); + }); + + it('calls onAvailable listeners if the service was executed successfully, its circuit broke, it was reset, and executes successfully again', async () => { + const endpointUrl = 'https://rpc.example.chain'; + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .reply(200, { + id: 1, + jsonrpc: '2.0', + result: 'ok', + }); + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(15) + .reply(503); + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .reply(200, { + id: 1, + jsonrpc: '2.0', + result: 'ok', + }); + const onAvailableListener = jest.fn(); + const service = new RpcService({ + fetch, + btoa, + endpointUrl, + }); + service.onRetry(() => { + clock.next(); + }); + service.onAvailable(onAvailableListener); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + + // Make a successful requst + await service.request(jsonRpcRequest); + expect(onAvailableListener).toHaveBeenCalledTimes(1); + + // Get through the first two rounds of retries + await ignoreRejection(service.request(jsonRpcRequest)); + await ignoreRejection(service.request(jsonRpcRequest)); + // The last retry breaks the circuit + await ignoreRejection(service.request(jsonRpcRequest)); + + service.resetPolicy(); + + // Make another successful requst + await service.request(jsonRpcRequest); + expect(onAvailableListener).toHaveBeenCalledTimes(2); + }); + + it('allows making an unsuccessful request to the service if its circuit has broken', async () => { + const endpointUrl = 'https://rpc.example.chain'; + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(15) + .reply(503); + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .reply(500); + const service = new RpcService({ + fetch, + btoa, + endpointUrl, + }); + service.onRetry(() => { + clock.next(); + }); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + // Get through the first two rounds of retries + await ignoreRejection(service.request(jsonRpcRequest)); + await ignoreRejection(service.request(jsonRpcRequest)); + // The last retry breaks the circuit + await ignoreRejection(service.request(jsonRpcRequest)); + + service.resetPolicy(); + + await expect(service.request(jsonRpcRequest)).rejects.toThrow( + 'RPC endpoint not found or unavailable', + ); + }); + + it('does not call onBreak listeners', async () => { + const endpointUrl = 'https://rpc.example.chain'; + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(15) + .reply(503); + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .reply(500); + const onBreakListener = jest.fn(); + const service = new RpcService({ + fetch, + btoa, + endpointUrl, + }); + service.onRetry(() => { + clock.next(); + }); + service.onBreak(onBreakListener); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + + // Get through the first two rounds of retries + await ignoreRejection(service.request(jsonRpcRequest)); + await ignoreRejection(service.request(jsonRpcRequest)); + // The last retry breaks the circuit + await ignoreRejection(service.request(jsonRpcRequest)); + expect(onBreakListener).toHaveBeenCalledTimes(1); + + service.resetPolicy(); + expect(onBreakListener).toHaveBeenCalledTimes(1); + }); + }); + + describe('getCircuitState', () => { + it('returns the state of the underlying circuit', async () => { + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + const endpointUrl = 'https://rpc.example.chain'; + nock(endpointUrl).post('/', jsonRpcRequest).times(15).reply(503); + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .reply(500); + const service = new RpcService({ + fetch, + btoa, + endpointUrl, + }); + service.onRetry(() => { + clock.next(); + }); + + expect(service.getCircuitState()).toBe(CircuitState.Closed); + + // Retry until we break the circuit + await ignoreRejection(service.request(jsonRpcRequest)); + await ignoreRejection(service.request(jsonRpcRequest)); + await ignoreRejection(service.request(jsonRpcRequest)); + await ignoreRejection(service.request(jsonRpcRequest)); + expect(service.getCircuitState()).toBe(CircuitState.Open); + + clock.tick(DEFAULT_CIRCUIT_BREAK_DURATION); + const promise = ignoreRejection(service.request(jsonRpcRequest)); + expect(service.getCircuitState()).toBe(CircuitState.HalfOpen); + await promise; + expect(service.getCircuitState()).toBe(CircuitState.Open); + }); + }); + describe('request', () => { // NOTE: Keep this list synced with CONNECTION_ERRORS describe.each([ @@ -61,7 +382,7 @@ describe('RpcService', () => { message: 'terminated', }, ])( - `if making the request throws the $message error`, + `if making the request throws the "$message" error`, ({ constructorName, message }) => { let error; switch (constructorName) { @@ -83,7 +404,7 @@ describe('RpcService', () => { ); describe.each(['ETIMEDOUT', 'ECONNRESET'])( - 'if making the request throws a %s error', + 'if making the request throws a "%s" error', (errorCode) => { const error = new Error('timed out'); // @ts-expect-error `code` does not exist on the Error type, but is @@ -99,210 +420,42 @@ describe('RpcService', () => { ); describe('if the endpoint URL was not mocked via Nock', () => { - it('re-throws the error without retrying the request', async () => { - const service = new RpcService({ - fetch, - btoa, - endpointUrl: 'https://rpc.example.chain', - }); - - const promise = service.request({ - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }); - await expect(promise).rejects.toThrow('Nock: Disallowed net connect'); - }); - - it('does not forward the request to a failover service if given one', async () => { - const failoverService = buildMockRpcService(); - const service = new RpcService({ - fetch, - btoa, - endpointUrl: 'https://rpc.example.chain', - failoverService, - }); - - const jsonRpcRequest = { - id: 1, - jsonrpc: '2.0' as const, - method: 'eth_chainId', - params: [], - }; - await ignoreRejection(service.request(jsonRpcRequest)); - expect(failoverService.request).not.toHaveBeenCalled(); - }); - - it('does not call onBreak', async () => { - const onBreakListener = jest.fn(); - const service = new RpcService({ - fetch, - btoa, - endpointUrl: 'https://rpc.example.chain', - }); - service.onBreak(onBreakListener); - - const promise = service.request({ - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }); - await ignoreRejection(promise); - expect(onBreakListener).not.toHaveBeenCalled(); + testsForNonRetriableErrors({ + expectedError: 'Nock: Disallowed net connect', }); }); describe('if the endpoint URL was mocked via Nock, but not the RPC method', () => { - it('re-throws the error without retrying the request', async () => { - const endpointUrl = 'https://rpc.example.chain'; - nock(endpointUrl) - .post('/', { - id: 1, - jsonrpc: '2.0', - method: 'eth_incorrectMethod', - params: [], - }) - .reply(500); - const service = new RpcService({ - fetch, - btoa, - endpointUrl, - }); - - const promise = service.request({ - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }); - await expect(promise).rejects.toThrow('Nock: No match for request'); - }); - - it('does not forward the request to a failover service if given one', async () => { - const endpointUrl = 'https://rpc.example.chain'; - nock(endpointUrl) - .post('/', { - id: 1, - jsonrpc: '2.0', - method: 'eth_incorrectMethod', - params: [], - }) - .reply(500); - const failoverService = buildMockRpcService(); - const service = new RpcService({ - fetch, - btoa, - endpointUrl, - failoverService, - }); - - const jsonRpcRequest = { - id: 1, - jsonrpc: '2.0' as const, - method: 'eth_chainId', - params: [], - }; - await ignoreRejection(service.request(jsonRpcRequest)); - expect(failoverService.request).not.toHaveBeenCalled(); - }); - - it('does not call onBreak', async () => { - const endpointUrl = 'https://rpc.example.chain'; - nock(endpointUrl) - .post('/', { - id: 1, - jsonrpc: '2.0', - method: 'eth_incorrectMethod', - params: [], - }) - .reply(500); - const onBreakListener = jest.fn(); - const service = new RpcService({ - fetch, - btoa, - endpointUrl, - }); - service.onBreak(onBreakListener); - - const promise = service.request({ - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }); - await ignoreRejection(promise); - expect(onBreakListener).not.toHaveBeenCalled(); + testsForNonRetriableErrors({ + beforeCreateService: ({ endpointUrl }) => { + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_incorrectMethod', + params: [], + }) + .reply(500); + }, + rpcMethod: 'eth_chainId', + expectedError: 'Nock: No match for request', }); }); describe('if making the request throws an unknown error', () => { - it('re-throws the error without retrying the request', async () => { - const error = new Error('oops'); - const mockFetch = jest.fn(() => { - throw error; - }); - const service = new RpcService({ - fetch: mockFetch, - btoa, - endpointUrl: 'https://rpc.example.chain', - }); - - const promise = service.request({ - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }); - await expect(promise).rejects.toThrow(error); - expect(mockFetch).toHaveBeenCalledTimes(1); - }); - - it('does not forward the request to a failover service if given one', async () => { - const error = new Error('oops'); - const mockFetch = jest.fn(() => { - throw error; - }); - const failoverService = buildMockRpcService(); - const service = new RpcService({ - fetch: mockFetch, - btoa, - endpointUrl: 'https://rpc.example.chain', - failoverService, - }); - - const jsonRpcRequest = { - id: 1, - jsonrpc: '2.0' as const, - method: 'eth_chainId', - params: [], - }; - await ignoreRejection(service.request(jsonRpcRequest)); - expect(failoverService.request).not.toHaveBeenCalled(); - }); - - it('does not call onBreak', async () => { - const error = new Error('oops'); - const mockFetch = jest.fn(() => { - throw error; - }); - const onBreakListener = jest.fn(); - const service = new RpcService({ - fetch: mockFetch, - btoa, - endpointUrl: 'https://rpc.example.chain', - }); - service.onBreak(onBreakListener); - - const promise = service.request({ - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }); - await ignoreRejection(promise); - expect(onBreakListener).not.toHaveBeenCalled(); + testsForNonRetriableErrors({ + createService: ({ endpointUrl, expectedError }) => { + return new RpcService({ + fetch: () => { + // This error could be anything. + // eslint-disable-next-line @typescript-eslint/only-throw-error + throw expectedError; + }, + btoa, + endpointUrl, + }); + }, + expectedError: new Error('oops'), }); }); @@ -325,374 +478,97 @@ describe('RpcService', () => { ); describe('if the endpoint has a 401 response', () => { - it('throws an unauthorized error without retrying the request', async () => { - const endpointUrl = 'https://rpc.example.chain'; - nock(endpointUrl) - .post('/', { - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }) - .reply(401); - const service = new RpcService({ - fetch, - btoa, - endpointUrl, - }); - - const promise = service.request({ - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }); - await expect(promise).rejects.toThrow( - expect.objectContaining({ - code: CUSTOM_RPC_ERRORS.unauthorized, - message: 'Unauthorized.', - data: { - httpStatus: 401, - }, - }), - ); - }); - - it('does not forward the request to a failover service if given one', async () => { - const endpointUrl = 'https://rpc.example.chain'; - nock(endpointUrl) - .post('/', { - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }) - .reply(401); - const failoverService = buildMockRpcService(); - const service = new RpcService({ - fetch, - btoa, - endpointUrl, - failoverService, - }); - - const jsonRpcRequest = { - id: 1, - jsonrpc: '2.0' as const, - method: 'eth_chainId', - params: [], - }; - await ignoreRejection(service.request(jsonRpcRequest)); - expect(failoverService.request).not.toHaveBeenCalled(); - }); - - it('does not call onBreak', async () => { - const endpointUrl = 'https://rpc.example.chain'; - nock(endpointUrl) - .post('/', { - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }) - .reply(429); - const onBreakListener = jest.fn(); - const service = new RpcService({ - fetch, - btoa, - endpointUrl, - }); - service.onBreak(onBreakListener); - - const promise = service.request({ - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }); - await ignoreRejection(promise); - expect(onBreakListener).not.toHaveBeenCalled(); - }); - }); - - describe.each([402, 404, 500, 501, 505, 506, 507, 508, 510, 511])( - 'if the endpoint has a %d response', - (httpStatus) => { - it('throws a resource unavailable error without retrying the request', async () => { - const endpointUrl = 'https://rpc.example.chain'; + testsForNonRetriableErrors({ + beforeCreateService: ({ endpointUrl, rpcMethod }) => { nock(endpointUrl) .post('/', { id: 1, jsonrpc: '2.0', - method: 'eth_unknownMethod', + method: rpcMethod, params: [], }) - .reply(httpStatus); - const service = new RpcService({ - fetch, - btoa, - endpointUrl, - }); + .reply(401); + }, + expectedError: expect.objectContaining({ + code: CUSTOM_RPC_ERRORS.unauthorized, + message: 'Unauthorized.', + data: { + httpStatus: 401, + }, + }), + }); + }); - const promise = service.request({ - id: 1, - jsonrpc: '2.0', - method: 'eth_unknownMethod', - params: [], - }); - await expect(promise).rejects.toThrow( - expect.objectContaining({ - code: errorCodes.rpc.resourceUnavailable, - message: 'RPC endpoint not found or unavailable.', - data: { - httpStatus, - }, - }), - ); + describe.each([402, 404, 500, 501, 505, 506, 507, 508, 510, 511])( + 'if the endpoint has a %d response', + (httpStatus) => { + testsForNonRetriableErrors({ + beforeCreateService: ({ endpointUrl, rpcMethod }) => { + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: rpcMethod, + params: [], + }) + .reply(httpStatus); + }, + expectedError: expect.objectContaining({ + code: errorCodes.rpc.resourceUnavailable, + message: 'RPC endpoint not found or unavailable.', + data: { + httpStatus, + }, + }), }); + }, + ); - it('does not forward the request to a failover service if given one', async () => { - const endpointUrl = 'https://rpc.example.chain'; + describe('if the endpoint has a 429 response', () => { + const httpStatus = 429; + + testsForNonRetriableErrors({ + beforeCreateService: ({ endpointUrl, rpcMethod }) => { nock(endpointUrl) .post('/', { id: 1, jsonrpc: '2.0', - method: 'eth_unknownMethod', + method: rpcMethod, params: [], }) .reply(httpStatus); - const failoverService = buildMockRpcService(); - const service = new RpcService({ - fetch, - btoa, - endpointUrl, - failoverService, - }); + }, + expectedError: expect.objectContaining({ + code: errorCodes.rpc.limitExceeded, + message: 'Request is being rate limited.', + data: { + httpStatus, + }, + }), + }); + }); - const jsonRpcRequest = { - id: 1, - jsonrpc: '2.0' as const, - method: 'eth_unknownMethod', - params: [], - }; - await ignoreRejection(service.request(jsonRpcRequest)); - expect(failoverService.request).not.toHaveBeenCalled(); - }); + describe('when the endpoint has a 4xx response that is not 401, 402, 404, or 429', () => { + const httpStatus = 422; - it('does not call onBreak', async () => { - const endpointUrl = 'https://rpc.example.chain'; + testsForNonRetriableErrors({ + beforeCreateService: ({ endpointUrl, rpcMethod }) => { nock(endpointUrl) .post('/', { id: 1, jsonrpc: '2.0', - method: 'eth_unknownMethod', + method: rpcMethod, params: [], }) .reply(httpStatus); - const onBreakListener = jest.fn(); - const service = new RpcService({ - fetch, - btoa, - endpointUrl, - }); - service.onBreak(onBreakListener); - - const promise = service.request({ - id: 1, - jsonrpc: '2.0', - method: 'eth_unknownMethod', - params: [], - }); - await ignoreRejection(promise); - expect(onBreakListener).not.toHaveBeenCalled(); - }); - }, - ); - - describe('if the endpoint has a 429 response', () => { - it('throws a rate-limiting error without retrying the request', async () => { - const httpStatus = 429; - const endpointUrl = 'https://rpc.example.chain'; - nock(endpointUrl) - .post('/', { - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }) - .reply(httpStatus); - const service = new RpcService({ - fetch, - btoa, - endpointUrl, - }); - - const promise = service.request({ - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }); - await expect(promise).rejects.toThrow( - expect.objectContaining({ - code: errorCodes.rpc.limitExceeded, - message: 'Request is being rate limited.', - data: { - httpStatus, - }, - }), - ); - }); - - it('does not forward the request to a failover service if given one', async () => { - const httpStatus = 429; - const endpointUrl = 'https://rpc.example.chain'; - nock(endpointUrl) - .post('/', { - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }) - .reply(httpStatus); - const failoverService = buildMockRpcService(); - const service = new RpcService({ - fetch, - btoa, - endpointUrl, - failoverService, - }); - - const jsonRpcRequest = { - id: 1, - jsonrpc: '2.0' as const, - method: 'eth_chainId', - params: [], - }; - await ignoreRejection(service.request(jsonRpcRequest)); - expect(failoverService.request).not.toHaveBeenCalled(); - }); - - it('does not call onBreak', async () => { - const httpStatus = 429; - const endpointUrl = 'https://rpc.example.chain'; - nock(endpointUrl) - .post('/', { - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }) - .reply(httpStatus); - const onBreakListener = jest.fn(); - const service = new RpcService({ - fetch, - btoa, - endpointUrl, - }); - service.onBreak(onBreakListener); - - const promise = service.request({ - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }); - await ignoreRejection(promise); - expect(onBreakListener).not.toHaveBeenCalled(); - }); - }); - - describe('when the endpoint has a 4xx response that is not 401, 402, 404, or 429', () => { - const httpStatus = 422; - - it('throws a generic HTTP client error without retrying the request', async () => { - const endpointUrl = 'https://rpc.example.chain'; - nock(endpointUrl) - .post('/', { - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }) - .reply(httpStatus); - const service = new RpcService({ - fetch, - btoa, - endpointUrl, - }); - - const promise = service.request({ - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }); - await expect(promise).rejects.toThrow( - expect.objectContaining({ - code: CUSTOM_RPC_ERRORS.httpClientError, - message: 'RPC endpoint returned HTTP client error.', - data: { - httpStatus, - }, - }), - ); - }); - - it('does not forward the request to a failover service if given one', async () => { - const endpointUrl = 'https://rpc.example.chain'; - nock(endpointUrl) - .post('/', { - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }) - .reply(httpStatus); - const failoverService = buildMockRpcService(); - const service = new RpcService({ - fetch, - btoa, - endpointUrl, - failoverService, - }); - - const jsonRpcRequest = { - id: 1, - jsonrpc: '2.0' as const, - method: 'eth_chainId', - params: [], - }; - await ignoreRejection(service.request(jsonRpcRequest)); - expect(failoverService.request).not.toHaveBeenCalled(); - }); - - it('does not call onBreak', async () => { - const endpointUrl = 'https://rpc.example.chain'; - nock(endpointUrl) - .post('/', { - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }) - .reply(httpStatus); - const onBreakListener = jest.fn(); - const service = new RpcService({ - fetch, - btoa, - endpointUrl, - }); - service.onBreak(onBreakListener); - - const promise = service.request({ - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }); - await ignoreRejection(promise); - expect(onBreakListener).not.toHaveBeenCalled(); + }, + expectedError: expect.objectContaining({ + code: CUSTOM_RPC_ERRORS.httpClientError, + message: 'RPC endpoint returned HTTP client error.', + data: { + httpStatus, + }, + }), }); }); @@ -1018,7 +894,7 @@ describe('RpcService', () => { params: [], }) .reply(200, () => { - clock.tick(6000); + clock.tick(DEFAULT_DEGRADED_THRESHOLD + 1); return { id: 1, jsonrpc: '2.0', @@ -1041,6 +917,103 @@ describe('RpcService', () => { }); expect(onDegradedListener).toHaveBeenCalledTimes(1); + expect(onDegradedListener).toHaveBeenCalledWith({ + endpointUrl: `${endpointUrl}/`, + }); + }); + + it('calls the onAvailable callback the first time a successful request occurs', async () => { + const endpointUrl = 'https://rpc.example.chain'; + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .reply(200, () => { + return { + id: 1, + jsonrpc: '2.0', + result: '0x1', + }; + }); + const onAvailableListener = jest.fn(); + const service = new RpcService({ + fetch, + btoa, + endpointUrl, + }); + service.onAvailable(onAvailableListener); + + await service.request({ + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }); + + expect(onAvailableListener).toHaveBeenCalledTimes(1); + expect(onAvailableListener).toHaveBeenCalledWith({ + endpointUrl: `${endpointUrl}/`, + }); + }); + + it('calls the onAvailable callback if the endpoint takes more than 5 seconds to respond and then speeds up again', async () => { + const endpointUrl = 'https://rpc.example.chain'; + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .reply(200, () => { + clock.tick(DEFAULT_DEGRADED_THRESHOLD + 1); + return { + id: 1, + jsonrpc: '2.0', + result: '0x1', + }; + }) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .reply(200, () => { + return { + id: 1, + jsonrpc: '2.0', + result: '0x1', + }; + }); + const onAvailableListener = jest.fn(); + const service = new RpcService({ + fetch, + btoa, + endpointUrl, + }); + service.onAvailable(onAvailableListener); + + await service.request({ + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }); + await service.request({ + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }); + + expect(onAvailableListener).toHaveBeenCalledTimes(1); + expect(onAvailableListener).toHaveBeenCalledWith({ + endpointUrl: `${endpointUrl}/`, + }); }); }); }); @@ -1062,526 +1035,394 @@ async function ignoreRejection( /** * These are tests that exercise logic for cases in which the request cannot be - * made because the `fetch` calls throws a specific error. + * made because some kind of error is thrown, and the request is not retried. * - * @param args - The arguments - * @param args.getClock - A function that returns the Sinon clock, set in - * `beforeEach`. - * @param args.producedError - The error produced when `fetch` is called. + * @param args - The arguments. + * @param args.beforeCreateService - A function that is run before the service + * is created. + * @param args.createService - A function that is run to create the service. + * @param args.endpointUrl - The URL that is hit. + * @param args.rpcMethod - The RPC method that is used. (Defaults to + * `eth_chainId`). * @param args.expectedError - The error that a call to the service's `request` * method is expected to produce. */ -function testsForRetriableFetchErrors({ - getClock, - producedError, +function testsForNonRetriableErrors({ + beforeCreateService = () => { + // do nothing + }, + createService = (args) => { + return new RpcService({ fetch, btoa, endpointUrl: args.endpointUrl }); + }, + endpointUrl = 'https://rpc.example.chain', + rpcMethod = `eth_chainId`, expectedError, }: { - getClock: () => SinonFakeTimers; - producedError: Error; - expectedError: string | jest.Constructable | RegExp | Error; + beforeCreateService?: (args: { + endpointUrl: string; + rpcMethod: string; + }) => void; + createService?: (args: { + endpointUrl: string; + expectedError: string | RegExp | Error | jest.Constructable | undefined; + }) => RpcService; + endpointUrl?: string; + rpcMethod?: string; + expectedError: string | RegExp | Error | jest.Constructable | undefined; }) { - describe('if there is no failover service provided', () => { - it('retries a constantly failing request up to 4 more times before re-throwing the error, if `request` is only called once', async () => { - const clock = getClock(); - const mockFetch = jest.fn(() => { - throw producedError; - }); - const service = new RpcService({ - fetch: mockFetch, - btoa, - endpointUrl: 'https://rpc.example.chain', - }); - service.onRetry(() => { - // We don't need to await this promise; adding it to the promise - // queue is enough to continue. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.nextAsync(); - }); - - const jsonRpcRequest = { - id: 1, - jsonrpc: '2.0' as const, - method: 'eth_chainId', - params: [], - }; - await expect(service.request(jsonRpcRequest)).rejects.toThrow( - expectedError, - ); - expect(mockFetch).toHaveBeenCalledTimes(5); - }); + /* eslint-disable jest/require-top-level-describe */ - it('still re-throws the error even after the circuit breaks', async () => { - const clock = getClock(); - const mockFetch = jest.fn(() => { - throw producedError; - }); - const service = new RpcService({ - fetch: mockFetch, - btoa, - endpointUrl: 'https://rpc.example.chain', - }); - service.onRetry(() => { - // We don't need to await this promise; adding it to the promise - // queue is enough to continue. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.nextAsync(); - }); + it('re-throws the error without retrying the request', async () => { + beforeCreateService({ endpointUrl, rpcMethod }); + const service = createService({ endpointUrl, expectedError }); - const jsonRpcRequest = { - id: 1, - jsonrpc: '2.0' as const, - method: 'eth_chainId', - params: [], - }; - await ignoreRejection(service.request(jsonRpcRequest)); - await ignoreRejection(service.request(jsonRpcRequest)); - // The last retry breaks the circuit - await expect(service.request(jsonRpcRequest)).rejects.toThrow( - expectedError, - ); + const promise = service.request({ + id: 1, + jsonrpc: '2.0', + method: rpcMethod, + params: [], }); - it('calls the onBreak callback once after the circuit breaks', async () => { - const clock = getClock(); - const mockFetch = jest.fn(() => { - throw producedError; - }); - const endpointUrl = 'https://rpc.example.chain'; - const onBreakListener = jest.fn(); - const service = new RpcService({ - fetch: mockFetch, - btoa, - endpointUrl, - }); - service.onRetry(() => { - // We don't need to await this promise; adding it to the promise - // queue is enough to continue. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.nextAsync(); - }); - service.onBreak(onBreakListener); - - const jsonRpcRequest = { - id: 1, - jsonrpc: '2.0' as const, - method: 'eth_chainId', - params: [], - }; - await ignoreRejection(service.request(jsonRpcRequest)); - await ignoreRejection(service.request(jsonRpcRequest)); - // The last retry breaks the circuit - await ignoreRejection(service.request(jsonRpcRequest)); - - expect(onBreakListener).toHaveBeenCalledTimes(1); - expect(onBreakListener).toHaveBeenCalledWith({ - error: expectedError, - endpointUrl: `${endpointUrl}/`, - }); - }); + await expect(promise).rejects.toThrow(expectedError); + }); - it('throws an error that includes the number of minutes until the circuit is re-closed if a request is attempted while the circuit is open', async () => { - const clock = getClock(); - const mockFetch = jest.fn(() => { - throw producedError; - }); - const endpointUrl = 'https://rpc.example.chain'; - const logger = { warn: jest.fn() }; - const service = new RpcService({ - fetch: mockFetch, - btoa, - endpointUrl, - logger, - }); - service.onRetry(() => { - // We don't need to await this promise; adding it to the promise - // queue is enough to continue. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.nextAsync(); - }); + it('does not call onRetry', async () => { + beforeCreateService({ endpointUrl, rpcMethod }); + const onRetryListener = jest.fn(); + const service = createService({ endpointUrl, expectedError }); + service.onRetry(onRetryListener); - const jsonRpcRequest = { + await ignoreRejection( + service.request({ id: 1, - jsonrpc: '2.0' as const, - method: 'eth_chainId', + jsonrpc: '2.0', + method: rpcMethod, params: [], - }; - await ignoreRejection(service.request(jsonRpcRequest)); - await ignoreRejection(service.request(jsonRpcRequest)); - await ignoreRejection(service.request(jsonRpcRequest)); - - clock.tick(60000); - await expect(service.request(jsonRpcRequest)).rejects.toThrow( - expect.objectContaining({ - code: errorCodes.rpc.resourceUnavailable, - message: - 'RPC endpoint returned too many errors, retrying in 29 minutes. Consider using a different RPC endpoint.', - }), - ); - }); + }), + ); + expect(onRetryListener).not.toHaveBeenCalled(); + }); - it('logs the original CircuitBreakError if a request is attempted while the circuit is open', async () => { - const clock = getClock(); - const mockFetch = jest.fn(() => { - throw producedError; - }); - const endpointUrl = 'https://rpc.example.chain'; - const logger = { warn: jest.fn() }; - const service = new RpcService({ - fetch: mockFetch, - btoa, - endpointUrl, - logger, - }); - service.onRetry(() => { - // We don't need to await this promise; adding it to the promise - // queue is enough to continue. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.nextAsync(); - }); + it('does not call onBreak', async () => { + beforeCreateService({ endpointUrl, rpcMethod }); + const onBreakListener = jest.fn(); + const service = createService({ endpointUrl, expectedError }); + service.onBreak(onBreakListener); - const jsonRpcRequest = { + await ignoreRejection( + service.request({ id: 1, - jsonrpc: '2.0' as const, - method: 'eth_chainId', + jsonrpc: '2.0', + method: rpcMethod, params: [], - }; - await ignoreRejection(service.request(jsonRpcRequest)); - await ignoreRejection(service.request(jsonRpcRequest)); - await ignoreRejection(service.request(jsonRpcRequest)); - await ignoreRejection(service.request(jsonRpcRequest)); - - expect(logger.warn).toHaveBeenCalledWith( - expect.objectContaining({ - message: 'Execution prevented because the circuit breaker is open', - }), - ); - }); + }), + ); + expect(onBreakListener).not.toHaveBeenCalled(); }); - describe('if a failover service is provided', () => { - it('still retries a constantly failing request up to 4 more times before re-throwing the error, if `request` is only called once', async () => { - const clock = getClock(); - const mockFetch = jest.fn(() => { - throw producedError; - }); - const failoverService = buildMockRpcService(); - const service = new RpcService({ - fetch: mockFetch, - btoa, - endpointUrl: 'https://rpc.example.chain', - failoverService, - }); - service.onRetry(() => { - // We don't need to await this promise; adding it to the promise - // queue is enough to continue. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.nextAsync(); - }); + it('does not call onDegraded', async () => { + beforeCreateService({ endpointUrl, rpcMethod }); + const onDegradedListener = jest.fn(); + const service = createService({ endpointUrl, expectedError }); + service.onDegraded(onDegradedListener); - const jsonRpcRequest = { + await ignoreRejection( + service.request({ id: 1, - jsonrpc: '2.0' as const, - method: 'eth_chainId', + jsonrpc: '2.0', + method: rpcMethod, params: [], - }; - await expect(service.request(jsonRpcRequest)).rejects.toThrow( - expectedError, - ); - expect(mockFetch).toHaveBeenCalledTimes(5); - }); + }), + ); + expect(onDegradedListener).not.toHaveBeenCalled(); + }); - it('forwards the request to the failover service in addition to the primary endpoint while the circuit is broken, stopping when the primary endpoint recovers', async () => { - const clock = getClock(); - const jsonRpcRequest = { + it('does not call onAvailable', async () => { + beforeCreateService({ endpointUrl, rpcMethod }); + const onAvailableListener = jest.fn(); + const service = createService({ endpointUrl, expectedError }); + service.onAvailable(onAvailableListener); + + await ignoreRejection( + service.request({ id: 1, - jsonrpc: '2.0' as const, - method: 'eth_chainId', + jsonrpc: '2.0', + method: rpcMethod, params: [], - }; - let invocationCounter = 0; - const mockFetch = jest.fn(async () => { - invocationCounter += 1; - if (invocationCounter === 17) { - return new Response( - JSON.stringify({ - id: jsonRpcRequest.id, - jsonrpc: jsonRpcRequest.jsonrpc, - result: 'ok', - }), - ); - } - throw producedError; - }); - const failoverService = buildMockRpcService(); - const service = new RpcService({ - fetch: mockFetch, - btoa, - endpointUrl: 'https://rpc.example.chain', - fetchOptions: { - headers: { - 'X-Foo': 'bar', - }, - }, - failoverService, - }); - service.onRetry(() => { - // We don't need to await this promise; adding it to the promise - // queue is enough to continue. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.nextAsync(); - }); + }), + ); + expect(onAvailableListener).not.toHaveBeenCalled(); + }); - await expect(service.request(jsonRpcRequest)).rejects.toThrow( - expectedError, - ); - expect(mockFetch).toHaveBeenCalledTimes(5); + /* eslint-enable jest/require-top-level-describe */ +} - await expect(service.request(jsonRpcRequest)).rejects.toThrow( - expectedError, - ); - expect(mockFetch).toHaveBeenCalledTimes(10); +/** + * These are tests that exercise logic for cases in which the request cannot be + * made because the `fetch` calls throws a specific error. + * + * @param args - The arguments + * @param args.getClock - A function that returns the Sinon clock, set in + * `beforeEach`. + * @param args.producedError - The error produced when `fetch` is called. + * @param args.expectedError - The error that a call to the service's `request` + * method is expected to produce. + */ +function testsForRetriableFetchErrors({ + getClock, + producedError, + expectedError, +}: { + getClock: () => SinonFakeTimers; + producedError: Error; + expectedError: string | jest.Constructable | RegExp | Error; +}) { + // This function is designed to be used inside of a describe, so this won't be + // a problem in practice. + /* eslint-disable jest/require-top-level-describe */ - // The last retry breaks the circuit - await service.request(jsonRpcRequest); - expect(mockFetch).toHaveBeenCalledTimes(15); - expect(failoverService.request).toHaveBeenCalledTimes(1); - expect(failoverService.request).toHaveBeenNthCalledWith( - 1, - jsonRpcRequest, - { - headers: { - Accept: 'application/json', - 'Content-Type': 'application/json', - 'X-Foo': 'bar', - }, - method: 'POST', - body: JSON.stringify(jsonRpcRequest), - }, - ); + it('retries a constantly failing request up to 4 more times before re-throwing the error, if `request` is only called once', async () => { + const clock = getClock(); + const mockFetch = jest.fn(() => { + throw producedError; + }); + const service = new RpcService({ + fetch: mockFetch, + btoa, + endpointUrl: 'https://rpc.example.chain', + }); + service.onRetry(() => { + clock.next(); + }); - await service.request(jsonRpcRequest); - // The circuit is broken, so the `fetch` is not attempted - expect(mockFetch).toHaveBeenCalledTimes(15); - expect(failoverService.request).toHaveBeenCalledTimes(2); - expect(failoverService.request).toHaveBeenNthCalledWith( - 2, - jsonRpcRequest, - { - headers: { - Accept: 'application/json', - 'Content-Type': 'application/json', - 'X-Foo': 'bar', - }, - method: 'POST', - body: JSON.stringify(jsonRpcRequest), - }, - ); + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + await expect(service.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + expect(mockFetch).toHaveBeenCalledTimes(5); + }); - clock.tick(DEFAULT_CIRCUIT_BREAK_DURATION); - await service.request(jsonRpcRequest); - expect(mockFetch).toHaveBeenCalledTimes(16); - // The circuit breaks again - expect(failoverService.request).toHaveBeenCalledTimes(3); - expect(failoverService.request).toHaveBeenNthCalledWith( - 2, - jsonRpcRequest, - { - headers: { - Accept: 'application/json', - 'Content-Type': 'application/json', - 'X-Foo': 'bar', - }, - method: 'POST', - body: JSON.stringify(jsonRpcRequest), - }, - ); + it('calls the onDegraded callback once for each retry round', async () => { + const clock = getClock(); + const mockFetch = jest.fn(() => { + throw producedError; + }); + const endpointUrl = 'https://rpc.example.chain'; + const onDegradedListener = jest.fn(); + const service = new RpcService({ + fetch: mockFetch, + btoa, + endpointUrl, + }); + service.onRetry(() => { + clock.next(); + }); - clock.tick(DEFAULT_CIRCUIT_BREAK_DURATION); - // Finally the request succeeds - const response = await service.request(jsonRpcRequest); - expect(response).toStrictEqual({ - id: 1, - jsonrpc: '2.0', - result: 'ok', - }); - expect(mockFetch).toHaveBeenCalledTimes(17); - expect(failoverService.request).toHaveBeenCalledTimes(3); + service.onDegraded(onDegradedListener); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + await ignoreRejection(service.request(jsonRpcRequest)); + await ignoreRejection(service.request(jsonRpcRequest)); + // The last retry breaks the circuit + await ignoreRejection(service.request(jsonRpcRequest)); + + expect(onDegradedListener).toHaveBeenCalledTimes(2); + expect(onDegradedListener).toHaveBeenCalledWith({ + endpointUrl: `${endpointUrl}/`, + error: expectedError, }); + }); - it('still calls onBreak each time the circuit breaks from the perspective of the primary endpoint', async () => { - const clock = getClock(); - const mockFetch = jest.fn(() => { - throw producedError; - }); - const endpointUrl = 'https://rpc.example.chain'; - const failoverEndpointUrl = 'https://failover.endpoint'; - const failoverService = buildMockRpcService({ - endpointUrl: new URL(failoverEndpointUrl), - }); - const onBreakListener = jest.fn(); - const service = new RpcService({ - fetch: mockFetch, - btoa, - endpointUrl, - failoverService, - }); - service.onRetry(() => { - // We don't need to await this promise; adding it to the promise - // queue is enough to continue. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.nextAsync(); - }); - service.onBreak(onBreakListener); + it('still re-throws the error even after the circuit breaks', async () => { + const clock = getClock(); + const mockFetch = jest.fn(() => { + throw producedError; + }); + const service = new RpcService({ + fetch: mockFetch, + btoa, + endpointUrl: 'https://rpc.example.chain', + }); + service.onRetry(() => { + clock.next(); + }); - const jsonRpcRequest = { - id: 1, - jsonrpc: '2.0' as const, - method: 'eth_chainId', - params: [], - }; - await ignoreRejection(() => service.request(jsonRpcRequest)); - await ignoreRejection(() => service.request(jsonRpcRequest)); - // The last retry breaks the circuit - await service.request(jsonRpcRequest); - clock.tick(DEFAULT_CIRCUIT_BREAK_DURATION); - // The circuit breaks again - await service.request(jsonRpcRequest); + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + await ignoreRejection(service.request(jsonRpcRequest)); + await ignoreRejection(service.request(jsonRpcRequest)); + // The last retry breaks the circuit + await expect(service.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + }); - expect(onBreakListener).toHaveBeenCalledTimes(2); - expect(onBreakListener).toHaveBeenCalledWith({ - error: expectedError, - endpointUrl: `${endpointUrl}/`, - failoverEndpointUrl: `${failoverEndpointUrl}/`, - }); + it('calls the onBreak callback once after the circuit breaks', async () => { + const clock = getClock(); + const mockFetch = jest.fn(() => { + throw producedError; }); + const endpointUrl = 'https://rpc.example.chain'; + const onBreakListener = jest.fn(); + const service = new RpcService({ + fetch: mockFetch, + btoa, + endpointUrl, + }); + service.onRetry(() => { + clock.next(); + }); + service.onBreak(onBreakListener); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + await ignoreRejection(service.request(jsonRpcRequest)); + await ignoreRejection(service.request(jsonRpcRequest)); + // The last retry breaks the circuit + await ignoreRejection(service.request(jsonRpcRequest)); + + expect(onBreakListener).toHaveBeenCalledTimes(1); + expect(onBreakListener).toHaveBeenCalledWith({ + error: expectedError, + endpointUrl: `${endpointUrl}/`, + }); + }); - it('throws an error that includes the number of minutes until the circuit is re-closed if a request is attempted while the circuit is open', async () => { - const clock = getClock(); - const mockFetch = jest.fn(() => { - throw producedError; - }); - const endpointUrl = 'https://rpc.example.chain'; - const failoverEndpointUrl = 'https://failover.endpoint'; - const logger = { warn: jest.fn() }; - const failoverService = new RpcService({ - fetch: mockFetch, - btoa, - endpointUrl: failoverEndpointUrl, - logger, - }); - failoverService.onRetry(() => { - // We don't need to await this promise; adding it to the promise - // queue is enough to continue. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.nextAsync(); - }); - const onBreakListener = jest.fn(); - const service = new RpcService({ - fetch: mockFetch, - btoa, - endpointUrl, - failoverService, - logger, - }); - service.onRetry(() => { - // We don't need to await this promise; adding it to the promise - // queue is enough to continue. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.nextAsync(); - }); - service.onBreak(onBreakListener); - - const jsonRpcRequest = { - id: 1, - jsonrpc: '2.0' as const, - method: 'eth_chainId', - params: [], - }; - // Get through the first two rounds of retries on the primary - await ignoreRejection(() => service.request(jsonRpcRequest)); - await ignoreRejection(() => service.request(jsonRpcRequest)); - // The last retry breaks the circuit and sends the request to the failover - await ignoreRejection(() => service.request(jsonRpcRequest)); - // Get through the first two rounds of retries on the failover - await ignoreRejection(() => service.request(jsonRpcRequest)); - await ignoreRejection(() => service.request(jsonRpcRequest)); - - // The last retry breaks the circuit on the failover - clock.tick(60000); - await expect(service.request(jsonRpcRequest)).rejects.toThrow( - expect.objectContaining({ - code: errorCodes.rpc.resourceUnavailable, - message: - 'RPC endpoint returned too many errors, retrying in 29 minutes. Consider using a different RPC endpoint.', - }), - ); - expect(logger.warn).toHaveBeenCalledWith( - expect.objectContaining({ - message: 'Execution prevented because the circuit breaker is open', - }), - ); + it('throws an error that includes the number of minutes until the circuit is re-closed if a request is attempted while the circuit is open', async () => { + const clock = getClock(); + const mockFetch = jest.fn(() => { + throw producedError; + }); + const endpointUrl = 'https://rpc.example.chain'; + const logger = { warn: jest.fn() }; + const service = new RpcService({ + fetch: mockFetch, + btoa, + endpointUrl, + logger, + }); + service.onRetry(() => { + clock.next(); }); - it('logs the original CircuitBreakError if a request is attempted while the circuit is open', async () => { - const clock = getClock(); - const mockFetch = jest.fn(() => { - throw producedError; - }); - const endpointUrl = 'https://rpc.example.chain'; - const failoverEndpointUrl = 'https://failover.endpoint'; - const logger = { warn: jest.fn() }; - const failoverService = new RpcService({ - fetch: mockFetch, - btoa, - endpointUrl: failoverEndpointUrl, - logger, - }); - failoverService.onRetry(() => { - // We don't need to await this promise; adding it to the promise - // queue is enough to continue. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.nextAsync(); - }); - const onBreakListener = jest.fn(); - const service = new RpcService({ - fetch: mockFetch, - btoa, - endpointUrl, - failoverService, - logger, - }); - service.onRetry(() => { - // We don't need to await this promise; adding it to the promise - // queue is enough to continue. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.nextAsync(); - }); - service.onBreak(onBreakListener); + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + // Get through the first two rounds of retries + await ignoreRejection(service.request(jsonRpcRequest)); + await ignoreRejection(service.request(jsonRpcRequest)); + // The last retry breaks the circuit + await ignoreRejection(service.request(jsonRpcRequest)); + + // Advance a minute to test that the message updates dynamically as time passes + clock.tick(60000); + await expect(service.request(jsonRpcRequest)).rejects.toThrow( + expect.objectContaining({ + code: errorCodes.rpc.resourceUnavailable, + message: + 'RPC endpoint returned too many errors, retrying in 29 minutes. Consider using a different RPC endpoint.', + }), + ); + }); - const jsonRpcRequest = { - id: 1, - jsonrpc: '2.0' as const, - method: 'eth_chainId', - params: [], - }; - // Get through the first two rounds of retries on the primary - await ignoreRejection(() => service.request(jsonRpcRequest)); - await ignoreRejection(() => service.request(jsonRpcRequest)); - // The last retry breaks the circuit and sends the request to the failover - await ignoreRejection(() => service.request(jsonRpcRequest)); - // Get through the first two rounds of retries on the failover - await ignoreRejection(() => service.request(jsonRpcRequest)); - await ignoreRejection(() => service.request(jsonRpcRequest)); - - // The last retry breaks the circuit on the failover - await ignoreRejection(() => service.request(jsonRpcRequest)); - expect(logger.warn).toHaveBeenCalledWith( - expect.objectContaining({ - message: 'Execution prevented because the circuit breaker is open', - }), - ); + it('logs the original CircuitBreakError if a request is attempted while the circuit is open', async () => { + const clock = getClock(); + const mockFetch = jest.fn(() => { + throw producedError; + }); + const endpointUrl = 'https://rpc.example.chain'; + const logger = { warn: jest.fn() }; + const service = new RpcService({ + fetch: mockFetch, + btoa, + endpointUrl, + logger, + }); + service.onRetry(() => { + clock.next(); + }); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + await ignoreRejection(service.request(jsonRpcRequest)); + await ignoreRejection(service.request(jsonRpcRequest)); + await ignoreRejection(service.request(jsonRpcRequest)); + await ignoreRejection(service.request(jsonRpcRequest)); + + expect(logger.warn).toHaveBeenCalledWith( + expect.objectContaining({ + message: 'Execution prevented because the circuit breaker is open', + }), + ); + }); + + it('calls the onAvailable callback if the endpoint becomes degraded via errors and then recovers', async () => { + const clock = getClock(); + let invocationIndex = -1; + const mockFetch = jest.fn(async () => { + invocationIndex += 1; + if (invocationIndex === DEFAULT_MAX_RETRIES + 1) { + return new Response( + JSON.stringify({ + id: 1, + jsonrpc: '2.0', + result: { some: 'data' }, + }), + ); + } + throw producedError; + }); + const endpointUrl = 'https://rpc.example.chain'; + const onAvailableListener = jest.fn(); + const service = new RpcService({ + fetch: mockFetch, + btoa, + endpointUrl, + }); + service.onAvailable(onAvailableListener); + service.onRetry(() => { + clock.next(); }); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + // Cause the retry policy to give up + await ignoreRejection(service.request(jsonRpcRequest)); + await service.request(jsonRpcRequest); + + expect(onAvailableListener).toHaveBeenCalledTimes(1); }); + + /* eslint-enable jest/require-top-level-describe */ } /** @@ -1613,363 +1454,203 @@ function testsForRetriableResponses({ }) { // This function is designed to be used inside of a describe, so this won't be // a problem in practice. - /* eslint-disable jest/no-identical-title */ - - describe('if there is no failover service provided', () => { - it('retries a constantly failing request up to 4 more times before re-throwing the error, if `request` is only called once', async () => { - const clock = getClock(); - const scope = nock('https://rpc.example.chain') - .post('/', { - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }) - .times(5) - .reply(httpStatus, responseBody); - const service = new RpcService({ - fetch, - btoa, - endpointUrl: 'https://rpc.example.chain', - }); - service.onRetry(() => { - // We don't need to await this promise; adding it to the promise - // queue is enough to continue. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.nextAsync(); - }); + /* eslint-disable jest/require-top-level-describe,jest/no-identical-title */ - const jsonRpcRequest = { + it('retries a constantly failing request up to 4 more times before re-throwing the error, if `request` is only called once', async () => { + const clock = getClock(); + const scope = nock('https://rpc.example.chain') + .post('/', { id: 1, - jsonrpc: '2.0' as const, + jsonrpc: '2.0', method: 'eth_chainId', params: [], - }; - await expect(service.request(jsonRpcRequest)).rejects.toThrow( - expectedError, - ); - expect(scope.isDone()).toBe(true); + }) + .times(5) + .reply(httpStatus, responseBody); + const service = new RpcService({ + fetch, + btoa, + endpointUrl: 'https://rpc.example.chain', + }); + service.onRetry(() => { + clock.next(); }); - it('still re-throws the error even after the circuit breaks', async () => { - const clock = getClock(); - nock('https://rpc.example.chain') - .post('/', { - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }) - .times(15) - .reply(httpStatus, responseBody); - const service = new RpcService({ - fetch, - btoa, - endpointUrl: 'https://rpc.example.chain', - }); - service.onRetry(() => { - // We don't need to await this promise; adding it to the promise - // queue is enough to continue. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.nextAsync(); - }); + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + await expect(service.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + expect(scope.isDone()).toBe(true); + }); - const jsonRpcRequest = { + it('still re-throws the error even after the circuit breaks', async () => { + const clock = getClock(); + nock('https://rpc.example.chain') + .post('/', { id: 1, - jsonrpc: '2.0' as const, + jsonrpc: '2.0', method: 'eth_chainId', params: [], - }; - await ignoreRejection(service.request(jsonRpcRequest)); - await ignoreRejection(service.request(jsonRpcRequest)); - // The last retry breaks the circuit - await expect(service.request(jsonRpcRequest)).rejects.toThrow( - expectedError, - ); + }) + .times(15) + .reply(httpStatus, responseBody); + const service = new RpcService({ + fetch, + btoa, + endpointUrl: 'https://rpc.example.chain', + }); + service.onRetry(() => { + clock.next(); }); - it('calls the onBreak callback once after the circuit breaks', async () => { - const clock = getClock(); - const endpointUrl = 'https://rpc.example.chain'; - nock(endpointUrl) - .post('/', { - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }) - .times(15) - .reply(httpStatus, responseBody); - const onBreakListener = jest.fn(); - const service = new RpcService({ - fetch, - btoa, - endpointUrl, - }); - service.onRetry(() => { - // We don't need to await this promise; adding it to the promise - // queue is enough to continue. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.nextAsync(); - }); - service.onBreak(onBreakListener); + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + await ignoreRejection(service.request(jsonRpcRequest)); + await ignoreRejection(service.request(jsonRpcRequest)); + // The last retry breaks the circuit + await expect(service.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + }); - const jsonRpcRequest = { + it('calls the onBreak callback once after the circuit breaks', async () => { + const clock = getClock(); + const endpointUrl = 'https://rpc.example.chain'; + nock(endpointUrl) + .post('/', { id: 1, - jsonrpc: '2.0' as const, + jsonrpc: '2.0', method: 'eth_chainId', params: [], - }; - await ignoreRejection(service.request(jsonRpcRequest)); - await ignoreRejection(service.request(jsonRpcRequest)); - // The last retry breaks the circuit - await ignoreRejection(service.request(jsonRpcRequest)); - - expect(onBreakListener).toHaveBeenCalledTimes(1); - expect(onBreakListener).toHaveBeenCalledWith({ - error: expectedOnBreakError, - endpointUrl: `${endpointUrl}/`, - }); + }) + .times(15) + .reply(httpStatus, responseBody); + const onBreakListener = jest.fn(); + const service = new RpcService({ + fetch, + btoa, + endpointUrl, + }); + service.onRetry(() => { + clock.next(); + }); + service.onBreak(onBreakListener); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + await ignoreRejection(service.request(jsonRpcRequest)); + await ignoreRejection(service.request(jsonRpcRequest)); + // The last retry breaks the circuit + await ignoreRejection(service.request(jsonRpcRequest)); + + expect(onBreakListener).toHaveBeenCalledTimes(1); + expect(onBreakListener).toHaveBeenCalledWith({ + error: expectedOnBreakError, + endpointUrl: `${endpointUrl}/`, }); }); - describe('if a failover service is provided', () => { - it('still retries a constantly failing request up to 4 more times before re-throwing the error, if `request` is only called once', async () => { - const clock = getClock(); - const scope = nock('https://rpc.example.chain') - .post('/', { - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }) - .times(5) - .reply(httpStatus, responseBody); - const failoverService = buildMockRpcService(); - const service = new RpcService({ - fetch, - btoa, - endpointUrl: 'https://rpc.example.chain', - failoverService, - }); - service.onRetry(() => { - // We don't need to await this promise; adding it to the promise - // queue is enough to continue. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.nextAsync(); - }); - - const jsonRpcRequest = { + it('throws an error that includes the number of minutes until the circuit is re-closed if a request is attempted while the circuit is open', async () => { + const clock = getClock(); + const endpointUrl = 'https://rpc.example.chain'; + nock(endpointUrl) + .post('/', { id: 1, - jsonrpc: '2.0' as const, + jsonrpc: '2.0', method: 'eth_chainId', params: [], - }; - await expect(service.request(jsonRpcRequest)).rejects.toThrow( - expectedError, - ); - expect(scope.isDone()).toBe(true); + }) + .times(15) + .reply(httpStatus, responseBody); + const onBreakListener = jest.fn(); + const service = new RpcService({ + fetch, + btoa, + endpointUrl, }); - - it('forwards the request to the failover service in addition to the primary endpoint while the circuit is broken, stopping when the primary endpoint recovers', async () => { - const clock = getClock(); - const jsonRpcRequest = { - id: 1, - jsonrpc: '2.0' as const, - method: 'eth_chainId', - params: [], - }; - let invocationCounter = 0; - nock('https://rpc.example.chain') - .post('/', { - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }) - .times(17) - .reply(() => { - invocationCounter += 1; - if (invocationCounter === 17) { - return [ - 200, - JSON.stringify({ - id: jsonRpcRequest.id, - jsonrpc: jsonRpcRequest.jsonrpc, - result: 'ok', - }), - ]; - } - return [httpStatus, responseBody]; - }); - const failoverService = buildMockRpcService(); - const service = new RpcService({ - fetch, - btoa, - endpointUrl: 'https://rpc.example.chain', - fetchOptions: { - headers: { - 'X-Foo': 'bar', - }, - }, - failoverService, - }); - service.onRetry(() => { - // We don't need to await this promise; adding it to the promise - // queue is enough to continue. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.nextAsync(); - }); - - await expect(service.request(jsonRpcRequest)).rejects.toThrow( - expectedError, - ); - expect(invocationCounter).toBe(5); - - await expect(service.request(jsonRpcRequest)).rejects.toThrow( - expectedError, - ); - expect(invocationCounter).toBe(10); - - // The last retry breaks the circuit - await service.request(jsonRpcRequest); - expect(invocationCounter).toBe(15); - expect(failoverService.request).toHaveBeenCalledTimes(1); - expect(failoverService.request).toHaveBeenNthCalledWith( - 1, - jsonRpcRequest, - { - headers: { - Accept: 'application/json', - 'Content-Type': 'application/json', - 'X-Foo': 'bar', - }, - method: 'POST', - body: JSON.stringify(jsonRpcRequest), - }, - ); - - await service.request(jsonRpcRequest); - // The circuit is broken, so the `fetch` is not attempted - expect(invocationCounter).toBe(15); - expect(failoverService.request).toHaveBeenCalledTimes(2); - expect(failoverService.request).toHaveBeenNthCalledWith( - 2, - jsonRpcRequest, - { - headers: { - Accept: 'application/json', - 'Content-Type': 'application/json', - 'X-Foo': 'bar', - }, - method: 'POST', - body: JSON.stringify(jsonRpcRequest), - }, - ); - - clock.tick(DEFAULT_CIRCUIT_BREAK_DURATION); - await service.request(jsonRpcRequest); - expect(invocationCounter).toBe(16); - // The circuit breaks again - expect(failoverService.request).toHaveBeenCalledTimes(3); - expect(failoverService.request).toHaveBeenNthCalledWith( - 2, - jsonRpcRequest, - { - headers: { - Accept: 'application/json', - 'Content-Type': 'application/json', - 'X-Foo': 'bar', - }, - method: 'POST', - body: JSON.stringify(jsonRpcRequest), - }, - ); - - clock.tick(DEFAULT_CIRCUIT_BREAK_DURATION); - // Finally the request succeeds - const response = await service.request(jsonRpcRequest); - expect(response).toStrictEqual({ - id: 1, - jsonrpc: '2.0', - result: 'ok', - }); - expect(invocationCounter).toBe(17); - expect(failoverService.request).toHaveBeenCalledTimes(3); + service.onRetry(() => { + clock.next(); }); + service.onBreak(onBreakListener); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + // Get through the first two rounds of retries + await ignoreRejection(service.request(jsonRpcRequest)); + await ignoreRejection(service.request(jsonRpcRequest)); + // The last retry breaks the circuit + await ignoreRejection(service.request(jsonRpcRequest)); + + // Advance a minute to test that the message updates dynamically as time passes + clock.tick(60000); + await expect(service.request(jsonRpcRequest)).rejects.toThrow( + expect.objectContaining({ + code: errorCodes.rpc.resourceUnavailable, + message: + 'RPC endpoint returned too many errors, retrying in 29 minutes. Consider using a different RPC endpoint.', + }), + ); + }); - it('still calls onBreak each time the circuit breaks from the perspective of the primary endpoint', async () => { - const clock = getClock(); - nock('https://rpc.example.chain') - .post('/', { - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }) - .times(16) - .reply(httpStatus, responseBody); - const endpointUrl = 'https://rpc.example.chain'; - const failoverEndpointUrl = 'https://failover.endpoint'; - const failoverService = buildMockRpcService({ - endpointUrl: new URL(failoverEndpointUrl), - }); - const onBreakListener = jest.fn(); - const service = new RpcService({ - fetch, - btoa, - endpointUrl, - failoverService, - }); - service.onRetry(() => { - // We don't need to await this promise; adding it to the promise - // queue is enough to continue. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.nextAsync(); - }); - service.onBreak(onBreakListener); - - const jsonRpcRequest = { + it('logs the original CircuitBreakError if a request is attempted while the circuit is open', async () => { + const clock = getClock(); + const endpointUrl = 'https://rpc.example.chain'; + nock(endpointUrl) + .post('/', { id: 1, - jsonrpc: '2.0' as const, + jsonrpc: '2.0', method: 'eth_chainId', params: [], - }; - await ignoreRejection(() => service.request(jsonRpcRequest)); - await ignoreRejection(() => service.request(jsonRpcRequest)); - // The last retry breaks the circuit - await service.request(jsonRpcRequest); - clock.tick(DEFAULT_CIRCUIT_BREAK_DURATION); - // The circuit breaks again - await service.request(jsonRpcRequest); - - expect(onBreakListener).toHaveBeenCalledTimes(2); - expect(onBreakListener).toHaveBeenCalledWith({ - error: expectedOnBreakError, - endpointUrl: `${endpointUrl}/`, - failoverEndpointUrl: `${failoverEndpointUrl}/`, - }); + }) + .times(15) + .reply(httpStatus, responseBody); + const logger = { warn: jest.fn() }; + const onBreakListener = jest.fn(); + const service = new RpcService({ + fetch, + btoa, + endpointUrl, + logger, + }); + service.onRetry(() => { + clock.next(); }); + service.onBreak(onBreakListener); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + await ignoreRejection(service.request(jsonRpcRequest)); + await ignoreRejection(service.request(jsonRpcRequest)); + await ignoreRejection(service.request(jsonRpcRequest)); + await ignoreRejection(service.request(jsonRpcRequest)); + + expect(logger.warn).toHaveBeenCalledWith( + expect.objectContaining({ + message: 'Execution prevented because the circuit breaker is open', + }), + ); }); - /* eslint-enable jest/no-identical-title */ -} - -/** - * Constructs a fake RPC service for use as a failover in tests. - * - * @param overrides - The overrides. - * @returns The fake failover service. - */ -function buildMockRpcService( - overrides?: Partial, -): AbstractRpcService { - return { - endpointUrl: new URL('https://test.example'), - request: jest.fn(), - onRetry: jest.fn(), - onBreak: jest.fn(), - onDegraded: jest.fn(), - ...overrides, - }; + /* eslint-enable jest/require-top-level-describe,jest/no-identical-title */ } diff --git a/packages/network-controller/src/rpc-service/rpc-service.ts b/packages/network-controller/src/rpc-service/rpc-service.ts index 7021e8167cd..b170989fc2e 100644 --- a/packages/network-controller/src/rpc-service/rpc-service.ts +++ b/packages/network-controller/src/rpc-service/rpc-service.ts @@ -4,7 +4,6 @@ import type { } from '@metamask/controller-utils'; import { BrokenCircuitError, - CircuitState, HttpError, createServicePolicy, handleWhen, @@ -23,7 +22,8 @@ import deepmerge from 'deepmerge'; import type { Logger } from 'loglevel'; import type { AbstractRpcService } from './abstract-rpc-service'; -import type { AddToCockatielEventData, FetchOptions } from './shared'; +import type { FetchOptions } from './shared'; +import { projectLogger, createModuleLogger } from '../logger'; /** * Options for the RpcService constructor. @@ -38,11 +38,6 @@ export type RpcServiceOptions = { * The URL of the RPC endpoint to hit. */ endpointUrl: URL | string; - /** - * An RPC service that represents a failover endpoint which will be invoked - * while the circuit for _this_ service is open. - */ - failoverService?: AbstractRpcService; /** * A function that can be used to make an HTTP request. If your JavaScript * environment supports `fetch` natively, you'll probably want to pass that; @@ -65,6 +60,8 @@ export type RpcServiceOptions = { policyOptions?: Omit; }; +const log = createModuleLogger(projectLogger, 'RpcService'); + /** * The maximum number of times that a failing service should be re-run before * giving up. @@ -238,25 +235,25 @@ function stripCredentialsFromUrl(url: URL): URL { */ export class RpcService implements AbstractRpcService { /** - * The function used to make an HTTP request. + * The URL of the RPC endpoint. */ - readonly #fetch: typeof fetch; + readonly endpointUrl: URL; /** - * The URL of the RPC endpoint. + * The last error that the retry policy captured (or `undefined` if the last + * execution of the service was successful). */ - readonly endpointUrl: URL; + lastError: Error | undefined; /** - * A common set of options that the request options will extend. + * The function used to make an HTTP request. */ - readonly #fetchOptions: FetchOptions; + readonly #fetch: typeof fetch; /** - * An RPC service that represents a failover endpoint which will be invoked - * while the circuit for _this_ service is open. + * A common set of options that the request options will extend. */ - readonly #failoverService: RpcServiceOptions['failoverService']; + readonly #fetchOptions: FetchOptions; /** * A `loglevel` logger. @@ -277,7 +274,6 @@ export class RpcService implements AbstractRpcService { const { btoa: givenBtoa, endpointUrl, - failoverService, fetch: givenFetch, logger, fetchOptions = {}, @@ -292,10 +288,9 @@ export class RpcService implements AbstractRpcService { givenBtoa, ); this.endpointUrl = stripCredentialsFromUrl(normalizedUrl); - this.#failoverService = failoverService; this.#logger = logger; - const policy = createServicePolicy({ + this.#policy = createServicePolicy({ maxRetries: DEFAULT_MAX_RETRIES, maxConsecutiveFailures: DEFAULT_MAX_CONSECUTIVE_FAILURES, ...policyOptions, @@ -315,7 +310,24 @@ export class RpcService implements AbstractRpcService { ); }), }); - this.#policy = policy; + } + + /** + * Resets the underlying composite Cockatiel policy. + * + * This is useful in a collection of RpcServices where some act as failovers + * for others where you effectively want to invalidate the failovers when the + * primary recovers. + */ + resetPolicy() { + this.#policy.reset(); + } + + /** + * @returns The state of the underlying circuit. + */ + getCircuitState() { + return this.#policy.getCircuitState(); } /** @@ -325,12 +337,7 @@ export class RpcService implements AbstractRpcService { * @returns What {@link ServicePolicy.onRetry} returns. * @see {@link createServicePolicy} */ - onRetry( - listener: AddToCockatielEventData< - Parameters[0], - { endpointUrl: string } - >, - ) { + onRetry(listener: Parameters[0]) { return this.#policy.onRetry((data) => { listener({ ...data, endpointUrl: this.endpointUrl.toString() }); }); @@ -338,26 +345,28 @@ export class RpcService implements AbstractRpcService { /** * Listens for when the RPC service retries the request too many times in a - * row. + * row, causing the underlying circuit to break. * * @param listener - The callback to be called when the circuit is broken. * @returns What {@link ServicePolicy.onBreak} returns. * @see {@link createServicePolicy} */ - onBreak( - listener: AddToCockatielEventData< - Parameters[0], - { endpointUrl: string; failoverEndpointUrl?: string } - >, - ) { + onBreak(listener: Parameters[0]) { return this.#policy.onBreak((data) => { - listener({ - ...data, - endpointUrl: this.endpointUrl.toString(), - failoverEndpointUrl: this.#failoverService - ? this.#failoverService.endpointUrl.toString() - : undefined, - }); + // `{ isolated: true }` is a special object that shows up when `isolate` + // is called on the circuit breaker. Usually `isolate` is used to hold the + // circuit open, but we (ab)use this method in `createServicePolicy` to + // reset the circuit breaker policy. When we do this, we don't want to + // call `onBreak` handlers, because then it causes + // `NetworkController:rpcEndpointUnavailable` and + // `NetworkController:rpcEndpointChainUnavailable` to be published. So we + // have to ignore that object here. The consequence is that `isolate` + // doesn't function the way it is intended, at least in the context of an + // RpcService. However, we are making a bet that we won't need to use it + // other than how we are already using it. + if (!('isolated' in data)) { + listener({ ...data, endpointUrl: this.endpointUrl.toString() }); + } }); } @@ -369,21 +378,27 @@ export class RpcService implements AbstractRpcService { * @returns What {@link ServicePolicy.onDegraded} returns. * @see {@link createServicePolicy} */ - onDegraded( - listener: AddToCockatielEventData< - Parameters[0], - { endpointUrl: string } - >, - ) { + onDegraded(listener: Parameters[0]) { return this.#policy.onDegraded((data) => { listener({ ...(data ?? {}), endpointUrl: this.endpointUrl.toString() }); }); } /** - * Makes a request to the RPC endpoint. If the circuit is open because this - * request has failed too many times, the request is forwarded to a failover - * service (if provided). + * Listens for when the policy underlying this RPC service is available. + * + * @param listener - The callback to be called when the request is available. + * @returns What {@link ServicePolicy.onAvailable} returns. + * @see {@link createServicePolicy} + */ + onAvailable(listener: Parameters[0]) { + return this.#policy.onAvailable(() => { + listener({ endpointUrl: this.endpointUrl.toString() }); + }); + } + + /** + * Makes a request to the RPC endpoint. * * This overload is specifically designed for `eth_getBlockByNumber`, which * can return a `result` of `null` despite an expected `Result` being @@ -405,9 +420,7 @@ export class RpcService implements AbstractRpcService { ): Promise | JsonRpcResponse>; /** - * Makes a request to the RPC endpoint. If the circuit is open because this - * request has failed too many times, the request is forwarded to a failover - * service (if provided). + * Makes a request to the RPC endpoint. * * This overload is designed for all RPC methods except for * `eth_getBlockByNumber`, which are expected to return a `result` of the @@ -437,21 +450,7 @@ export class RpcService implements AbstractRpcService { jsonRpcRequest, fetchOptions, ); - - try { - return await this.#processRequest(completeFetchOptions); - } catch (error) { - if ( - this.#policy.circuitBreakerPolicy.state === CircuitState.Open && - this.#failoverService !== undefined - ) { - return await this.#failoverService.request( - jsonRpcRequest, - completeFetchOptions, - ); - } - throw error; - } + return await this.#executeAndProcessRequest(completeFetchOptions); } /** @@ -528,19 +527,46 @@ export class RpcService implements AbstractRpcService { * @throws A generic HTTP client JSON-RPC error (code -32050) for any other 4xx HTTP status codes. * @throws A "parse" JSON-RPC error (code -32700) if the response is not valid JSON. */ - async #processRequest( + async #executeAndProcessRequest( fetchOptions: FetchOptions, ): Promise | JsonRpcResponse> { let response: Response | undefined; try { - return await this.#policy.execute(async () => { - response = await this.#fetch(this.endpointUrl, fetchOptions); - if (!response.ok) { - throw new HttpError(response.status); - } - return await response.json(); - }); + log( + `[${this.endpointUrl}] Circuit state`, + this.#policy.getCircuitState(), + ); + const jsonDecodedResponse = await this.#policy.execute( + async (context) => { + log( + 'REQUEST INITIATED:', + this.endpointUrl.toString(), + '::', + fetchOptions, + // @ts-expect-error This property _is_ here, the type of + // ServicePolicy is just wrong. + `(attempt ${context.attempt + 1})`, + ); + response = await this.#fetch(this.endpointUrl, fetchOptions); + if (!response.ok) { + throw new HttpError(response.status); + } + log( + 'REQUEST SUCCESSFUL:', + this.endpointUrl.toString(), + response.status, + ); + return await response.json(); + }, + ); + this.lastError = undefined; + return jsonDecodedResponse; } catch (error) { + log('REQUEST ERROR:', this.endpointUrl.toString(), error); + + this.lastError = + error instanceof Error ? error : new Error(getErrorMessage(error)); + if (error instanceof HttpError) { const status = error.httpStatus; if (status === 401) { diff --git a/packages/network-controller/src/rpc-service/shared.ts b/packages/network-controller/src/rpc-service/shared.ts index e33ae6129ad..c66cb1082c8 100644 --- a/packages/network-controller/src/rpc-service/shared.ts +++ b/packages/network-controller/src/rpc-service/shared.ts @@ -1,13 +1,58 @@ +import type { + CockatielEvent, + CockatielEventEmitter, +} from '@metamask/controller-utils'; + /** * Equivalent to the built-in `FetchOptions` type, but renamed for clarity. */ export type FetchOptions = RequestInit; /** - * Extends an event listener that Cockatiel uses so that when it is called, more - * data can be supplied in the event object. + * Converts a Cockatiel event type to an event emitter type. */ -export type AddToCockatielEventData = - EventListener extends (data: infer Data) => void - ? (data: Data extends void ? AdditionalData : Data & AdditionalData) => void +export type CockatielEventToEventEmitter = + Event extends CockatielEvent + ? CockatielEventEmitter : never; + +/** + * Obtains the event data type from a Cockatiel event or event listener type. + */ +export type ExtractCockatielEventData = + CockatielEventOrEventListener extends CockatielEvent + ? Data + : CockatielEventOrEventListener extends (data: infer Data) => void + ? Data + : never; + +/** + * Extends the data that a Cockatiel event listener is called with additional + * data. + */ +export type ExtendCockatielEventData = + OriginalData extends void ? AdditionalData : OriginalData & AdditionalData; + +/** + * Removes keys from the data that a Cockatiel event listner is called with. + */ +export type ExcludeCockatielEventData< + OriginalData, + Keys extends PropertyKey, +> = OriginalData extends void ? void : Omit; + +/** + * Converts a Cockatiel event type to an event listener type, but adding the + * requested data. + */ +export type CockatielEventToEventListenerWithData = ( + data: ExtendCockatielEventData, Data>, +) => void; + +/** + * Converts a Cockatiel event listener type to an event emitter type. + */ +export type CockatielEventToEventEmitterWithData = + CockatielEventEmitter< + ExtendCockatielEventData, Data> + >; diff --git a/packages/network-controller/tests/NetworkController.test.ts b/packages/network-controller/tests/NetworkController.test.ts index 2e85aaddaad..6383310fdca 100644 --- a/packages/network-controller/tests/NetworkController.test.ts +++ b/packages/network-controller/tests/NetworkController.test.ts @@ -4637,6 +4637,7 @@ describe('NetworkController', () => { expect(createAutoManagedNetworkClientSpy).toHaveBeenNthCalledWith( 2, { + networkClientId: infuraNetworkType, networkClientConfiguration: { infuraProjectId, failoverRpcUrls: ['https://first.failover.endpoint'], @@ -4654,6 +4655,7 @@ describe('NetworkController', () => { expect(createAutoManagedNetworkClientSpy).toHaveBeenNthCalledWith( 3, { + networkClientId: 'BBBB-BBBB-BBBB-BBBB', networkClientConfiguration: { chainId: infuraChainId, failoverRpcUrls: ['https://second.failover.endpoint'], @@ -4670,6 +4672,7 @@ describe('NetworkController', () => { expect(createAutoManagedNetworkClientSpy).toHaveBeenNthCalledWith( 4, { + networkClientId: 'CCCC-CCCC-CCCC-CCCC', networkClientConfiguration: { chainId: infuraChainId, failoverRpcUrls: ['https://third.failover.endpoint'], @@ -6047,6 +6050,7 @@ describe('NetworkController', () => { expect( createAutoManagedNetworkClientSpy, ).toHaveBeenNthCalledWith(3, { + networkClientId: infuraNetworkType, networkClientConfiguration: { chainId: infuraChainId, failoverRpcUrls: ['https://failover.endpoint'], @@ -6278,6 +6282,7 @@ describe('NetworkController', () => { expect( createAutoManagedNetworkClientSpy, ).toHaveBeenNthCalledWith(3, { + networkClientId: 'AAAA-AAAA-AAAA-AAAA', networkClientConfiguration: { chainId: infuraChainId, failoverRpcUrls: ['https://first.failover.endpoint'], @@ -6293,6 +6298,7 @@ describe('NetworkController', () => { expect( createAutoManagedNetworkClientSpy, ).toHaveBeenNthCalledWith(4, { + networkClientId: 'BBBB-BBBB-BBBB-BBBB', networkClientConfiguration: { chainId: infuraChainId, failoverRpcUrls: ['https://second.failover.endpoint'], @@ -7265,6 +7271,7 @@ describe('NetworkController', () => { expect( createAutoManagedNetworkClientSpy, ).toHaveBeenNthCalledWith(3, { + networkClientId: 'BBBB-BBBB-BBBB-BBBB', networkClientConfiguration: { chainId: infuraChainId, failoverRpcUrls: ['https://failover.endpoint'], @@ -8135,6 +8142,7 @@ describe('NetworkController', () => { expect(createAutoManagedNetworkClientSpy).toHaveBeenNthCalledWith( 3, { + networkClientId: 'BBBB-BBBB-BBBB-BBBB', networkClientConfiguration: { chainId: '0x1337', failoverRpcUrls: ['https://first.failover.endpoint'], @@ -8151,6 +8159,7 @@ describe('NetworkController', () => { expect(createAutoManagedNetworkClientSpy).toHaveBeenNthCalledWith( 4, { + networkClientId: 'CCCC-CCCC-CCCC-CCCC', networkClientConfiguration: { chainId: '0x1337', failoverRpcUrls: ['https://second.failover.endpoint'], @@ -9136,6 +9145,7 @@ describe('NetworkController', () => { }); expect(createAutoManagedNetworkClientSpy).toHaveBeenCalledWith({ + networkClientId: 'BBBB-BBBB-BBBB-BBBB', networkClientConfiguration: { chainId: '0x1337', failoverRpcUrls: ['https://failover.endpoint'], @@ -10292,6 +10302,7 @@ describe('NetworkController', () => { expect( createAutoManagedNetworkClientSpy, ).toHaveBeenNthCalledWith(4, { + networkClientId: 'CCCC-CCCC-CCCC-CCCC', networkClientConfiguration: { chainId: infuraChainId, failoverRpcUrls: ['https://first.failover.endpoint'], @@ -10307,6 +10318,7 @@ describe('NetworkController', () => { expect( createAutoManagedNetworkClientSpy, ).toHaveBeenNthCalledWith(5, { + networkClientId: 'DDDD-DDDD-DDDD-DDDD', networkClientConfiguration: { chainId: infuraChainId, failoverRpcUrls: ['https://second.failover.endpoint'], @@ -11008,6 +11020,7 @@ describe('NetworkController', () => { ); expect(createAutoManagedNetworkClientSpy).toHaveBeenCalledWith({ + networkClientId: 'CCCC-CCCC-CCCC-CCCC', networkClientConfiguration: { chainId: '0x1337', failoverRpcUrls: ['https://first.failover.endpoint'], @@ -11021,6 +11034,7 @@ describe('NetworkController', () => { isRpcFailoverEnabled: true, }); expect(createAutoManagedNetworkClientSpy).toHaveBeenCalledWith({ + networkClientId: 'DDDD-DDDD-DDDD-DDDD', networkClientConfiguration: { chainId: '0x1337', failoverRpcUrls: ['https://second.failover.endpoint'], @@ -11737,6 +11751,7 @@ describe('NetworkController', () => { expect( createAutoManagedNetworkClientSpy, ).toHaveBeenNthCalledWith(6, { + networkClientId: 'CCCC-CCCC-CCCC-CCCC', networkClientConfiguration: { chainId: anotherInfuraChainId, failoverRpcUrls: ['https://first.failover.endpoint'], @@ -11752,6 +11767,7 @@ describe('NetworkController', () => { expect( createAutoManagedNetworkClientSpy, ).toHaveBeenNthCalledWith(7, { + networkClientId: 'DDDD-DDDD-DDDD-DDDD', networkClientConfiguration: { chainId: anotherInfuraChainId, failoverRpcUrls: ['https://second.failover.endpoint'], @@ -12434,6 +12450,7 @@ describe('NetworkController', () => { expect(createAutoManagedNetworkClientSpy).toHaveBeenNthCalledWith( 4, { + networkClientId: 'CCCC-CCCC-CCCC-CCCC', networkClientConfiguration: { chainId: '0x2448', failoverRpcUrls: ['https://first.failover.endpoint'], @@ -12450,6 +12467,7 @@ describe('NetworkController', () => { expect(createAutoManagedNetworkClientSpy).toHaveBeenNthCalledWith( 5, { + networkClientId: 'DDDD-DDDD-DDDD-DDDD', networkClientConfiguration: { chainId: '0x2448', failoverRpcUrls: ['https://second.failover.endpoint'], diff --git a/packages/network-controller/tests/network-client/helpers.ts b/packages/network-controller/tests/network-client/helpers.ts index 4f0dcb128ea..6d3e6b60beb 100644 --- a/packages/network-controller/tests/network-client/helpers.ts +++ b/packages/network-controller/tests/network-client/helpers.ts @@ -3,13 +3,16 @@ import type { InfuraNetworkType } from '@metamask/controller-utils'; import { BUILT_IN_NETWORKS } from '@metamask/controller-utils'; import type { BlockTracker } from '@metamask/eth-block-tracker'; import EthQuery from '@metamask/eth-query'; -import type { Hex } from '@metamask/utils'; +import type { Hex, Json, JsonRpcRequest } from '@metamask/utils'; import nock, { isDone as nockIsDone } from 'nock'; import type { Scope as NockScope } from 'nock'; import { useFakeTimers } from 'sinon'; import { createNetworkClient } from '../../src/create-network-client'; -import type { NetworkControllerOptions } from '../../src/NetworkController'; +import type { + NetworkClientId, + NetworkControllerOptions, +} from '../../src/NetworkController'; import type { NetworkClientConfiguration, Provider } from '../../src/types'; import { NetworkClientType } from '../../src/types'; import type { RootMessenger } from '../helpers'; @@ -45,9 +48,7 @@ const DEFAULT_LATEST_BLOCK_NUMBER = '0x42'; * * @param args - The arguments that `console.log` takes. */ -// TODO: Replace `any` with type -// eslint-disable-next-line @typescript-eslint/no-explicit-any -function debug(...args: any) { +function debug(...args: unknown[]) { /* eslint-disable-next-line n/no-process-env */ if (process.env.DEBUG_PROVIDER_TESTS === '1') { console.log(...args); @@ -71,21 +72,18 @@ function buildScopeForMockingRequests( }); } -// TODO: Replace `any` with type -// eslint-disable-next-line @typescript-eslint/no-explicit-any -export type MockRequest = { method: string; params?: any[] }; +export type MockRequest = { method: string; params?: Json[] }; type Response = { id?: number | string; jsonrpc?: '2.0'; - // TODO: Replace `any` with type - // eslint-disable-next-line @typescript-eslint/no-explicit-any - error?: any; - // TODO: Replace `any` with type - // eslint-disable-next-line @typescript-eslint/no-explicit-any - result?: any; + error?: unknown; + result?: unknown; httpStatus?: number; }; -export type MockResponse = { body: JSONRPCResponse | string } | Response; +export type MockResponse = + | { body: JSONRPCResponse | string } + | Response + | (() => Response | Promise); type CurriedMockRpcCallOptions = { request: MockRequest; // The response data. @@ -147,22 +145,12 @@ function mockRpcCall({ // eth-query always passes `params`, so even if we don't supply this property, // for consistency with makeRpcCall, assume that the `body` contains it const { method, params = [], ...rest } = request; - let httpStatus = 200; - let completeResponse: JSONRPCResponse | string = { id: 2, jsonrpc: '2.0' }; - if (response !== undefined) { - if ('body' in response) { - completeResponse = response.body; - } else { - if (response.error) { - completeResponse.error = response.error; - } else { - completeResponse.result = response.result; - } - if (response.httpStatus) { - httpStatus = response.httpStatus; - } - } - } + const httpStatus = + (typeof response === 'object' && + 'httpStatus' in response && + response.httpStatus) || + 200; + /* @ts-expect-error The types for Nock do not include `basePath` in the interface for Nock.Scope. */ const url = nockScope.basePath.includes('infura.io') ? `/v3/${MOCK_INFURA_PROJECT_ID}` @@ -196,26 +184,42 @@ function mockRpcCall({ if (error !== undefined) { return nockRequest.replyWithError(error); - } else if (completeResponse !== undefined) { - // TODO: Replace `any` with type - // eslint-disable-next-line @typescript-eslint/no-explicit-any - return nockRequest.reply(httpStatus, (_, requestBody: any) => { - if (typeof completeResponse === 'string') { - return completeResponse; - } - - if (response !== undefined && !('body' in response)) { - if (response.id === undefined) { - completeResponse.id = requestBody.id; - } else { - completeResponse.id = response.id; - } - } - debug('Nock returning Response', completeResponse); - return completeResponse; - }); } - return nockRequest; + + return nockRequest.reply(async (_uri, requestBody) => { + const jsonRpcRequest = requestBody as JsonRpcRequest; + let resolvedResponse: Response | string | JSONRPCResponse | undefined; + if (typeof response === 'function') { + resolvedResponse = await response(); + } else if (response !== undefined && 'body' in response) { + resolvedResponse = response.body; + } else { + resolvedResponse = response; + } + + if ( + typeof resolvedResponse === 'string' || + resolvedResponse === undefined + ) { + return [httpStatus, resolvedResponse]; + } + + const { + id: jsonRpcId = jsonRpcRequest.id, + jsonrpc: jsonRpcVersion = jsonRpcRequest.jsonrpc, + result: jsonRpcResult, + error: jsonRpcError, + } = resolvedResponse; + + const completeResponse = { + id: jsonRpcId, + jsonrpc: jsonRpcVersion, + result: jsonRpcResult, + error: jsonRpcError, + }; + debug('Nock returning Response', completeResponse); + return [httpStatus, completeResponse]; + }); } type MockBlockTrackerRequestOptions = { @@ -227,7 +231,7 @@ type MockBlockTrackerRequestOptions = { * The block number that the block tracker should report, as a 0x-prefixed hex * string. */ - blockNumber: string; + blockNumber?: string; }; /** @@ -285,12 +289,16 @@ async function mockAllBlockTrackerRequests({ * response if it is successful or rejects with the error from the JSON-RPC * response otherwise. */ -function makeRpcCall(ethQuery: EthQuery, request: MockRequest) { +function makeRpcCall( + ethQuery: EthQuery, + request: MockRequest, +): Promise { return new Promise((resolve, reject) => { debug('[makeRpcCall] making request', request); - // TODO: Replace `any` with type + // ethQuery.sendAsync expects Json[] params, but our test helpers need to + // support undefined values in params for certain test scenarios. // eslint-disable-next-line @typescript-eslint/no-explicit-any - ethQuery.sendAsync(request, (error: any, result: any) => { + ethQuery.sendAsync(request as any, (error: unknown, result: unknown) => { debug('[makeRpcCall > ethQuery handler] error', error, 'result', result); if (error) { // This should be an error, but we will allow it to be whatever it is. @@ -316,21 +324,34 @@ export type MockOptions = { getBlockTrackerOptions?: NetworkControllerOptions['getBlockTrackerOptions']; expectedHeaders?: Record; messenger?: RootMessenger; + networkClientId?: NetworkClientId; isRpcFailoverEnabled?: boolean; }; +type MockBlockTrackerRequestOptionsWithoutScope = Omit< + MockBlockTrackerRequestOptions, + 'nockScope' +>; + export type MockCommunications = { - // TODO: Replace `any` with type - // eslint-disable-next-line @typescript-eslint/no-explicit-any - mockNextBlockTrackerRequest: (options?: any) => void; - // TODO: Replace `any` with type - // eslint-disable-next-line @typescript-eslint/no-explicit-any - mockAllBlockTrackerRequests: (options?: any) => void; + mockNextBlockTrackerRequest: ( + options?: MockBlockTrackerRequestOptionsWithoutScope, + ) => void; + mockAllBlockTrackerRequests: ( + options?: MockBlockTrackerRequestOptionsWithoutScope, + ) => void | Promise; mockRpcCall: (options: CurriedMockRpcCallOptions) => MockRpcCallResult; rpcUrl: string; infuraNetwork: InfuraNetworkType; }; +export type MockedEndpointConfig = { + providerType: ProviderType; + infuraNetwork?: InfuraNetworkType; + customRpcUrl?: string; + expectedHeaders?: Record; +}; + /** * Sets up request mocks for requests to the provider. * @@ -359,17 +380,13 @@ export async function withMockedCommunications( ? `https://${infuraNetwork}.infura.io` : customRpcUrl; const nockScope = buildScopeForMockingRequests(rpcUrl, expectedHeaders); - // TODO: Replace `any` with type - // eslint-disable-next-line @typescript-eslint/no-explicit-any - const curriedMockNextBlockTrackerRequest = (localOptions: any) => - mockNextBlockTrackerRequest({ nockScope, ...localOptions }); - // TODO: Replace `any` with type - // eslint-disable-next-line @typescript-eslint/no-explicit-any - const curriedMockAllBlockTrackerRequests = (localOptions: any) => - mockAllBlockTrackerRequests({ nockScope, ...localOptions }); - // TODO: Replace `any` with type - // eslint-disable-next-line @typescript-eslint/no-explicit-any - const curriedMockRpcCall = (localOptions: any) => + const curriedMockNextBlockTrackerRequest = ( + localOptions?: MockBlockTrackerRequestOptionsWithoutScope, + ) => mockNextBlockTrackerRequest({ nockScope, ...localOptions }); + const curriedMockAllBlockTrackerRequests = ( + localOptions?: MockBlockTrackerRequestOptionsWithoutScope, + ) => mockAllBlockTrackerRequests({ nockScope, ...localOptions }); + const curriedMockRpcCall = (localOptions: CurriedMockRpcCallOptions) => mockRpcCall({ nockScope, ...localOptions }); const comms = { @@ -391,15 +408,12 @@ type MockNetworkClient = { blockTracker: BlockTracker; provider: Provider; clock: sinon.SinonFakeTimers; - // TODO: Replace `any` with type - // eslint-disable-next-line @typescript-eslint/no-explicit-any - makeRpcCall: (request: MockRequest) => Promise; - // TODO: Replace `any` with type - // eslint-disable-next-line @typescript-eslint/no-explicit-any - makeRpcCallsInSeries: (requests: MockRequest[]) => Promise; + makeRpcCall: (request: MockRequest) => Promise; + makeRpcCallsInSeries: (requests: MockRequest[]) => Promise; messenger: RootMessenger; chainId: Hex; rpcUrl: string; + comms?: MockCommunications[]; }; /** @@ -419,21 +433,15 @@ type MockNetworkClient = { * `setTimeout` handler. * @returns The given promise. */ -export async function waitForPromiseToBeFulfilledAfterRunningAllTimers( - // TODO: Replace `any` with type - // eslint-disable-next-line @typescript-eslint/no-explicit-any - promise: any, - // TODO: Replace `any` with type - // eslint-disable-next-line @typescript-eslint/no-explicit-any - clock: any, -) { +export async function waitForPromiseToBeFulfilledAfterRunningAllTimers( + promise: Promise, + clock: sinon.SinonFakeTimers, +): Promise { let hasPromiseBeenFulfilled = false; let numTimesClockHasBeenAdvanced = 0; promise - // TODO: Replace `any` with type - // eslint-disable-next-line @typescript-eslint/no-explicit-any - .catch((error: any) => { + .catch((error: unknown) => { // This is used to silence Node.js warnings about the rejection // being handled asynchronously. The error is handled later when // `promise` is awaited, but we log it here anyway in case it gets @@ -474,8 +482,13 @@ export async function waitForPromiseToBeFulfilledAfterRunningAllTimers( * @param options.getRpcServiceOptions - RPC service options factory. * @param options.getBlockTrackerOptions - Block tracker options factory. * @param options.messenger - The root messenger to use in tests. + * @param options.networkClientId - The ID of the new network client. * @param options.isRpcFailoverEnabled - Whether or not the RPC failover * functionality is enabled. + * @param options.mockedEndpoints - Optional array of endpoint configurations to + * mock. When provided, eliminates the need for nested withMockedCommunications + * calls. The first config is for the primary endpoint, subsequent ones are for + * failover endpoints. * @param fn - A function which will be called with an object that allows * interaction with the network client. * @returns The return value of the given function. @@ -491,11 +504,11 @@ export async function withNetworkClient( getRpcServiceOptions = () => ({ fetch, btoa }), getBlockTrackerOptions = () => ({}), messenger = buildRootMessenger(), + networkClientId = 'some-network-client-id', isRpcFailoverEnabled = false, - }: MockOptions, - // TODO: Replace `any` with type - // eslint-disable-next-line @typescript-eslint/no-explicit-any - fn: (client: MockNetworkClient) => Promise, + mockedEndpoints, + }: MockOptions & { mockedEndpoints?: MockedEndpointConfig[] }, + fn: (client: MockNetworkClient) => Promise, ) { // Faking timers ends up doing two things: // 1. Halting the block tracker (which depends on `setTimeout` to periodically @@ -504,6 +517,37 @@ export async function withNetworkClient( // depends on `setTimeout`) const clock = useFakeTimers(); + // Setup mocked communications if mockedEndpoints is provided + const comms: MockCommunications[] = []; + if (mockedEndpoints) { + for (const endpointConfig of mockedEndpoints) { + const endpointRpcUrl = + endpointConfig.providerType === 'infura' + ? `https://${endpointConfig.infuraNetwork ?? 'mainnet'}.infura.io` + : (endpointConfig.customRpcUrl ?? MOCK_RPC_URL); + const nockScope = buildScopeForMockingRequests( + endpointRpcUrl, + endpointConfig.expectedHeaders ?? {}, + ); + const curriedMockNextBlockTrackerRequest = ( + localOptions?: MockBlockTrackerRequestOptionsWithoutScope, + ) => mockNextBlockTrackerRequest({ nockScope, ...localOptions }); + const curriedMockAllBlockTrackerRequests = ( + localOptions?: MockBlockTrackerRequestOptionsWithoutScope, + ) => mockAllBlockTrackerRequests({ nockScope, ...localOptions }); + const curriedMockRpcCall = (localOptions: CurriedMockRpcCallOptions) => + mockRpcCall({ nockScope, ...localOptions }); + + comms.push({ + mockNextBlockTrackerRequest: curriedMockNextBlockTrackerRequest, + mockAllBlockTrackerRequests: curriedMockAllBlockTrackerRequests, + mockRpcCall: curriedMockRpcCall, + rpcUrl: endpointRpcUrl, + infuraNetwork: endpointConfig.infuraNetwork ?? 'mainnet', + }); + } + } + const networkControllerMessenger = buildNetworkControllerMessenger(messenger); // The JSON-RPC client wraps `eth_estimateGas` so that it takes 2 seconds longer @@ -540,6 +584,7 @@ export async function withNetworkClient( : `https://${infuraNetwork}.infura.io/v3/${MOCK_INFURA_PROJECT_ID}`; const networkClient = createNetworkClient({ + id: networkClientId, configuration: networkClientConfiguration, getRpcServiceOptions, getBlockTrackerOptions, @@ -571,6 +616,7 @@ export async function withNetworkClient( messenger, chainId, rpcUrl, + comms: comms.length > 0 ? comms : undefined, }; try { @@ -579,14 +625,17 @@ export async function withNetworkClient( await blockTracker.destroy(); clock.restore(); + + // Ensure all nock mocks were called + if (mockedEndpoints) { + nockIsDone(); + } } } type BuildMockParamsOptions = { // The block parameter value to set. - // TODO: Replace `any` with type - // eslint-disable-next-line @typescript-eslint/no-explicit-any - blockParam: any; + blockParam: Json | undefined; // The index of the block parameter. blockParamIndex: number; }; @@ -608,9 +657,12 @@ type BuildMockParamsOptions = { export function buildMockParams({ blockParam, blockParamIndex, -}: BuildMockParamsOptions) { - const params = new Array(blockParamIndex).fill('some value'); - params[blockParamIndex] = blockParam; +}: BuildMockParamsOptions): Json[] { + const params: Json[] = new Array(blockParamIndex).fill('some value'); + // If blockParam is undefined, don't set it (resulting in a shorter array) + if (blockParam !== undefined) { + params[blockParamIndex] = blockParam; + } return params; } @@ -630,11 +682,14 @@ export function buildMockParams({ export function buildRequestWithReplacedBlockParam( { method, params = [] }: MockRequest, blockParamIndex: number, - // TODO: Replace `any` with type - // eslint-disable-next-line @typescript-eslint/no-explicit-any - blockParam: any, -) { - const updatedParams = params.slice(); - updatedParams[blockParamIndex] = blockParam; + blockParam: Json | undefined, +): MockRequest { + const updatedParams: Json[] = params.slice(); + // If blockParam is undefined, truncate the array at that index + if (blockParam !== undefined) { + updatedParams[blockParamIndex] = blockParam; + } else { + updatedParams.length = blockParamIndex; + } return { method, params: updatedParams }; } diff --git a/packages/network-controller/tests/network-client/rpc-failover.ts b/packages/network-controller/tests/network-client/rpc-failover.ts index f214c939cb3..da09d22b9ae 100644 --- a/packages/network-controller/tests/network-client/rpc-failover.ts +++ b/packages/network-controller/tests/network-client/rpc-failover.ts @@ -26,7 +26,6 @@ export function testsForRpcFailoverBehavior({ failure, isRetriableFailure, getExpectedError, - getExpectedBreakError = getExpectedError, }: { providerType: ProviderType; requestToCall: MockRequest; @@ -96,7 +95,7 @@ export function testsForRpcFailoverBehavior({ }, async ({ makeRpcCall, clock }) => { messenger.subscribe( - 'NetworkController:rpcEndpointRequestRetried', + 'NetworkController:rpcEndpointRetried', () => { // Ensure that we advance to the next RPC request // retry, not the next block tracker request. @@ -120,188 +119,6 @@ export function testsForRpcFailoverBehavior({ }); }); - it('publishes the NetworkController:rpcEndpointUnavailable event when the failover occurs', async () => { - const failoverEndpointUrl = 'https://failover.endpoint/'; - - await withMockedCommunications({ providerType }, async (primaryComms) => { - await withMockedCommunications( - { - providerType: 'custom', - customRpcUrl: failoverEndpointUrl, - }, - async (failoverComms) => { - const request = requestToCall; - const requestToMock = getRequestToMock(request, blockNumber); - const additionalMockRpcCallOptions = - failure instanceof Error || typeof failure === 'string' - ? { error: failure } - : { response: failure }; - - // The first time a block-cacheable request is made, the - // latest block number is retrieved through the block - // tracker first. - primaryComms.mockNextBlockTrackerRequest({ - blockNumber, - }); - primaryComms.mockRpcCall({ - request: requestToMock, - times: maxConsecutiveFailures, - ...additionalMockRpcCallOptions, - }); - failoverComms.mockRpcCall({ - request: requestToMock, - response: { - result: 'ok', - }, - }); - - const messenger = buildRootMessenger(); - const rpcEndpointUnavailableEventHandler = jest.fn(); - messenger.subscribe( - 'NetworkController:rpcEndpointUnavailable', - rpcEndpointUnavailableEventHandler, - ); - - await withNetworkClient( - { - providerType, - isRpcFailoverEnabled: true, - failoverRpcUrls: [failoverEndpointUrl], - messenger, - getRpcServiceOptions: () => ({ - fetch, - btoa, - policyOptions: { - backoff: new ConstantBackoff(backoffDuration), - }, - }), - }, - async ({ makeRpcCall, clock, chainId, rpcUrl }) => { - messenger.subscribe( - 'NetworkController:rpcEndpointRequestRetried', - () => { - // Ensure that we advance to the next RPC request - // retry, not the next block tracker request. - // We also don't need to await this, it just needs to - // be added to the promise queue. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.tickAsync(backoffDuration); - }, - ); - - for (let i = 0; i < numRequestsToMake - 1; i++) { - await ignoreRejection(makeRpcCall(request)); - } - await makeRpcCall(request); - - expect(rpcEndpointUnavailableEventHandler).toHaveBeenCalledWith( - { - chainId, - endpointUrl: rpcUrl, - failoverEndpointUrl, - error: getExpectedBreakError(rpcUrl), - }, - ); - }, - ); - }, - ); - }); - }); - - it('publishes the NetworkController:rpcEndpointUnavailable event when the failover becomes unavailable', async () => { - const failoverEndpointUrl = 'https://failover.endpoint/'; - - await withMockedCommunications({ providerType }, async (primaryComms) => { - await withMockedCommunications( - { - providerType: 'custom', - customRpcUrl: failoverEndpointUrl, - }, - async (failoverComms) => { - const request = requestToCall; - const requestToMock = getRequestToMock(request, blockNumber); - const additionalMockRpcCallOptions = - failure instanceof Error || typeof failure === 'string' - ? { error: failure } - : { response: failure }; - - // The first time a block-cacheable request is made, the - // latest block number is retrieved through the block - // tracker first. - primaryComms.mockNextBlockTrackerRequest({ - blockNumber, - }); - primaryComms.mockRpcCall({ - request: requestToMock, - times: maxConsecutiveFailures, - ...additionalMockRpcCallOptions, - }); - failoverComms.mockRpcCall({ - request: requestToMock, - times: maxConsecutiveFailures, - ...additionalMockRpcCallOptions, - }); - // Block tracker requests on the primary will fail over - failoverComms.mockNextBlockTrackerRequest({ - blockNumber, - }); - - const messenger = buildRootMessenger(); - const rpcEndpointUnavailableEventHandler = jest.fn(); - messenger.subscribe( - 'NetworkController:rpcEndpointUnavailable', - rpcEndpointUnavailableEventHandler, - ); - - await withNetworkClient( - { - providerType, - isRpcFailoverEnabled: true, - failoverRpcUrls: [failoverEndpointUrl], - messenger, - getRpcServiceOptions: () => ({ - fetch, - btoa, - policyOptions: { - backoff: new ConstantBackoff(backoffDuration), - }, - }), - }, - async ({ makeRpcCall, clock, chainId }) => { - messenger.subscribe( - 'NetworkController:rpcEndpointRequestRetried', - () => { - // Ensure that we advance to the next RPC request - // retry, not the next block tracker request. - // We also don't need to await this, it just needs to - // be added to the promise queue. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.tickAsync(backoffDuration); - }, - ); - - for (let i = 0; i < maxConsecutiveFailures - 1; i++) { - await ignoreRejection(makeRpcCall(request)); - } - for (let i = 0; i < maxConsecutiveFailures; i++) { - await ignoreRejection(makeRpcCall(request)); - } - - expect( - rpcEndpointUnavailableEventHandler, - ).toHaveBeenNthCalledWith(2, { - chainId, - endpointUrl: failoverEndpointUrl, - error: getExpectedBreakError(failoverEndpointUrl), - }); - }, - ); - }, - ); - }); - }); - it('allows RPC service options to be customized', async () => { const customMaxConsecutiveFailures = 6; const customMaxRetries = 2; @@ -390,7 +207,7 @@ export function testsForRpcFailoverBehavior({ }, async ({ makeRpcCall, clock }) => { messenger.subscribe( - 'NetworkController:rpcEndpointRequestRetried', + 'NetworkController:rpcEndpointRetried', () => { // Ensure that we advance to the next RPC request // retry, not the next block tracker request. @@ -452,17 +269,14 @@ export function testsForRpcFailoverBehavior({ }), }, async ({ makeRpcCall, clock, rpcUrl }) => { - messenger.subscribe( - 'NetworkController:rpcEndpointRequestRetried', - () => { - // Ensure that we advance to the next RPC request - // retry, not the next block tracker request. - // We also don't need to await this, it just needs to - // be added to the promise queue. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.tickAsync(backoffDuration); - }, - ); + messenger.subscribe('NetworkController:rpcEndpointRetried', () => { + // Ensure that we advance to the next RPC request + // retry, not the next block tracker request. + // We also don't need to await this, it just needs to + // be added to the promise queue. + // eslint-disable-next-line @typescript-eslint/no-floating-promises + clock.tickAsync(backoffDuration); + }); for (let i = 0; i < numRequestsToMake - 1; i++) { await ignoreRejection(makeRpcCall(request)); diff --git a/yarn.lock b/yarn.lock index ae19c08550f..1faccfd873c 100644 --- a/yarn.lock +++ b/yarn.lock @@ -4208,6 +4208,7 @@ __metadata: "@types/lodash": "npm:^4.14.191" "@types/node-fetch": "npm:^2.6.12" async-mutex: "npm:^0.5.0" + cockatiel: "npm:^3.1.2" deep-freeze-strict: "npm:^1.1.1" deepmerge: "npm:^4.2.2" fast-deep-equal: "npm:^3.1.3"