From 2c5fceac674343ffc62fde3d3c521624b66f4f73 Mon Sep 17 00:00:00 2001 From: Neal Beeken Date: Fri, 11 Feb 2022 13:39:41 -0500 Subject: [PATCH 01/10] fix(NODE-3769): align retryable error logic with spec --- .eslintrc.json | 7 +- .evergreen/run-kms-servers.sh | 6 +- .evergreen/run-tests.sh | 2 +- etc/charts/README.md | 11 - etc/charts/build_images.sh | 13 -- etc/charts/imgs/MongoError.svg | 1 - etc/charts/mermaid/MongoError.mmd | 22 -- etc/notes/errors.md | 36 +++- global.d.ts | 51 +++-- src/bulk/common.ts | 8 +- src/bulk/unordered.ts | 2 +- src/cmap/auth/gssapi.ts | 7 +- src/cmap/auth/mongocr.ts | 2 +- src/cmap/auth/mongodb_aws.ts | 4 +- src/cmap/auth/scram.ts | 4 +- src/cmap/connect.ts | 16 +- src/cmap/connection.ts | 29 ++- src/error.ts | 149 +++++++++---- src/index.ts | 10 +- src/operations/execute_operation.ts | 171 +++++++-------- src/operations/rename.ts | 4 +- src/sdam/common.ts | 6 - src/sdam/monitor.ts | 12 +- src/sdam/server.ts | 160 +++++++------- src/sdam/topology.ts | 45 +--- src/sdam/topology_description.ts | 3 - src/sessions.ts | 61 +++--- src/utils.ts | 28 ++- .../change-streams/change_stream.test.js | 9 +- .../client_side_encryption.prose.test.js | 14 +- test/integration/crud/bulk.test.js | 102 +++++---- .../load_balancers.spec.test.js | 1 - .../retryable_writes.spec.prose.test.ts | 56 +++++ .../retryable_writes.spec.test.js | 199 ------------------ .../retryable_writes.spec.test.ts | 198 +++++++++++++++++ test/readme.md | 46 +++- test/tools/spec-runner/index.js | 55 +++-- test/tools/unified-spec-runner/match.ts | 6 +- test/tools/unified-spec-runner/runner.ts | 11 +- test/types/enum.test-d.ts | 2 + test/unit/error.test.ts | 173 ++++++++++++--- test/unit/sdam/topology.test.js | 8 +- 42 files changed, 991 insertions(+), 759 deletions(-) delete mode 100644 etc/charts/README.md delete mode 100755 etc/charts/build_images.sh delete mode 100644 etc/charts/imgs/MongoError.svg delete mode 100644 etc/charts/mermaid/MongoError.mmd create mode 100644 test/integration/retryable-writes/retryable_writes.spec.prose.test.ts delete mode 100644 test/integration/retryable-writes/retryable_writes.spec.test.js create mode 100644 test/integration/retryable-writes/retryable_writes.spec.test.ts diff --git a/.eslintrc.json b/.eslintrc.json index bc8863df6d..3eb33dd8b9 100644 --- a/.eslintrc.json +++ b/.eslintrc.json @@ -66,6 +66,10 @@ { "name": ".", "message": "Please import directly from the relevant file instead." + }, + { + "name": "..", + "message": "Please import directly from the relevant file instead." } ] } @@ -158,7 +162,8 @@ "parser": "@typescript-eslint/parser", "rules": { "no-console": "off", - "no-restricted-syntax": "off" + "no-restricted-syntax": "off", + "typescript-eslint/ban-ts-comment": "off" } }, { diff --git a/.evergreen/run-kms-servers.sh b/.evergreen/run-kms-servers.sh index 76ef6ac258..3fde63cbd3 100644 --- a/.evergreen/run-kms-servers.sh +++ b/.evergreen/run-kms-servers.sh @@ -2,6 +2,6 @@ cd ${DRIVERS_TOOLS}/.evergreen/csfle . ./activate_venv.sh # by default it always runs on port 5698 ./kmstlsvenv/bin/python3 -u kms_kmip_server.py & -./kmstlsvenv/bin/python3 -u kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/expired.pem --port 8000 & -./kmstlsvenv/bin/python3 -u kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/wrong-host.pem --port 8001 & -./kmstlsvenv/bin/python3 -u kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/server.pem --port 8002 --require_client_cert & +./kmstlsvenv/bin/python3 -u kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/expired.pem --port 9000 & +./kmstlsvenv/bin/python3 -u kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/wrong-host.pem --port 9001 & +./kmstlsvenv/bin/python3 -u kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/server.pem --port 9002 --require_client_cert & diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index a6a349e4f3..a2da41890b 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -47,7 +47,7 @@ else source "$DRIVERS_TOOLS"/.evergreen/csfle/set-temp-creds.sh fi -npm install mongodb-client-encryption@">=2.0.0-beta.4" +npm install mongodb-client-encryption export AUTH=$AUTH export SINGLE_MONGOS_LB_URI=${SINGLE_MONGOS_LB_URI} diff --git a/etc/charts/README.md b/etc/charts/README.md deleted file mode 100644 index c2086d8c71..0000000000 --- a/etc/charts/README.md +++ /dev/null @@ -1,11 +0,0 @@ -# Charts and mermaid code - -The `mermaid` directory contains the [mermaid](https://mermaid-js.github.io/mermaid/#/) files which serve as the source code for the svg files included in the `../errors.md` - -To generate these files, there is an included script, `build_images.sh` which builds images for all the mermaid files in the `mermaid` directory. - -To use this script, the [mermaid cli](https://github.com/mermaid-js/mermaid-cli) must be installed and be accessible via your $PATH variable. - -**Note on mermaid installation** - -It is preferable to install mermaid via npm rather than brew since brew will install node as a dependency which could interfere with nvm. diff --git a/etc/charts/build_images.sh b/etc/charts/build_images.sh deleted file mode 100755 index 4a9a34f7dc..0000000000 --- a/etc/charts/build_images.sh +++ /dev/null @@ -1,13 +0,0 @@ -#! /bin/bash - -echo "Building svgs..." -cd mermaid -for f in *.mmd -do - echo "Processing $f" - outname="${f%%.*}" - mmdc -i $f -o ../imgs/$outname.svg & -done -wait - -echo "Done" diff --git a/etc/charts/imgs/MongoError.svg b/etc/charts/imgs/MongoError.svg deleted file mode 100644 index 13d86e58c0..0000000000 --- a/etc/charts/imgs/MongoError.svg +++ /dev/null @@ -1 +0,0 @@ -
MongoError
MongoDriverError
MongoNetworkError
MongoServerError
MongoSystemError
MongoAPIError
MongoRuntimeError
\ No newline at end of file diff --git a/etc/charts/mermaid/MongoError.mmd b/etc/charts/mermaid/MongoError.mmd deleted file mode 100644 index 0592b5c564..0000000000 --- a/etc/charts/mermaid/MongoError.mmd +++ /dev/null @@ -1,22 +0,0 @@ -graph TD - MongoError --- MongoDriverError - MongoError --- MongoNetworkError - MongoError --- MongoServerError - MongoError --- MongoSystemError - MongoDriverError --- MongoAPIError - MongoDriverError --- MongoRuntimeError - -linkStyle 0 stroke:#116149 -linkStyle 1 stroke:#116149 -linkStyle 2 stroke:#116149 -linkStyle 3 stroke:#116149 -linkStyle 4 stroke:#116149 -linkStyle 5 stroke:#116149 - -style MongoError fill:#13aa52,stroke:#21313c,color:#FAFBFC -style MongoSystemError fill:#13aa52,stroke:#21313c,color:#FAFBFC -style MongoNetworkError fill:#13aa52,stroke:#21313c,color:#FAFBFC -style MongoServerError fill:#13aa52,stroke:#21313c,color:#FAFBFC -style MongoDriverError fill:#13aa52,stroke:#21313c,color:#FAFBFC -style MongoAPIError fill:#13aa52,stroke:#21313c,color:#FAFBFC -style MongoRuntimeError fill:#13aa52,stroke:#21313c,color:#FAFBFC diff --git a/etc/notes/errors.md b/etc/notes/errors.md index 6feee91f72..03c47e7373 100644 --- a/etc/notes/errors.md +++ b/etc/notes/errors.md @@ -14,6 +14,7 @@ - [`MongoDriverError`](#MongoDriverError) - [`MongoAPIError`](#MongoAPIError) - [`MongoRuntimeError`](#MongoRuntimeError) + - [`MongoUnexpectedServerResponseError`](#MongoUnexpectedServerResponseError) - [`MongoNetworkError`](#MongoNetworkError) - [`MongoServerError`](#MongoServerError) - [`MongoSystemError`](#MongoSystemError) @@ -38,7 +39,31 @@ There are four main error classes which stem from `MongoError`: `MongoDriverErro The base class from which all errors in the Node driver subclass. `MongoError` should **never** be be directly instantiated. -![(MongoError hierarchy tree)](../charts/imgs/MongoError.svg) + +```mermaid +graph TD + MongoError --- MongoDriverError + MongoError --- MongoNetworkError + MongoError --- MongoServerError + MongoError --- MongoSystemError + MongoDriverError --- MongoAPIError + MongoDriverError --- MongoRuntimeError + +linkStyle 0 stroke:#116149 +linkStyle 1 stroke:#116149 +linkStyle 2 stroke:#116149 +linkStyle 3 stroke:#116149 +linkStyle 4 stroke:#116149 +linkStyle 5 stroke:#116149 + +style MongoError fill:#13aa52,stroke:#21313c,color:#FAFBFC +style MongoSystemError fill:#13aa52,stroke:#21313c,color:#FAFBFC +style MongoNetworkError fill:#13aa52,stroke:#21313c,color:#FAFBFC +style MongoServerError fill:#13aa52,stroke:#21313c,color:#FAFBFC +style MongoDriverError fill:#13aa52,stroke:#21313c,color:#FAFBFC +style MongoAPIError fill:#13aa52,stroke:#21313c,color:#FAFBFC +style MongoRuntimeError fill:#13aa52,stroke:#21313c,color:#FAFBFC +``` Children of `MongoError` include: @@ -90,6 +115,15 @@ This class should **never** be directly instantiated. | **MongoChangeStreamError** | Thrown when an error is encountered when operating on a ChangeStream. | | **MongoGridFSStreamError** | Thrown when an unexpected state is reached when operating on a GridFS Stream. | | **MongoGridFSChunkError** | Thrown when a malformed or invalid chunk is encountered when reading from a GridFS Stream. | +| **MongoUnexpectedServerResponseError** | Thrown when the driver receives a **parsable** response it did not expect from the server. | + +### MongoUnexpectedServerResponseError + +Intended for the scenario where the MongoDB returns an unexpected response in relation to some state the driver is in. +This error should **NOT** represent a response that couldn't be parsed due to errors in protocol formatting. + +Ex. Server selection results in a feature detection change: this is not usually an unexpected response, but if while retrying an operation serverSelection returns a server with a lower wireVersion than expected, we can no longer proceed with the retry, so the response is unexpected in that case. + ### `MongoNetworkError` diff --git a/global.d.ts b/global.d.ts index 3f7f03eb62..4e1e26ec33 100644 --- a/global.d.ts +++ b/global.d.ts @@ -1,32 +1,40 @@ import { OneOrMore } from './src/mongo_types'; import type { TestConfiguration } from './test/tools/runner/config'; -type WithExclusion = `!${T}` +type WithExclusion = `!${T}`; /** Defined in test/tools/runner/filters/mongodb_topology_filter.js (topologyTypeToString) */ type TopologyTypes = 'single' | 'replicaset' | 'sharded' | 'load-balanced'; -type TopologyTypeRequirement = OneOrMore | OneOrMore> +type TopologyTypeRequirement = OneOrMore | OneOrMore>; declare global { -interface MongoDBMetadataUI { - requires?: { - topology?: TopologyTypeRequirement; - mongodb?: string; - os?: NodeJS.Platform | `!${NodeJS.Platform}`; - apiVersion?: '1'; - clientSideEncryption?: boolean; - serverless?: 'forbid' | 'allow' | 'require'; - auth: 'enabled' | 'disabled' - }; + interface MongoDBMetadataUI { + requires?: { + topology?: TopologyTypeRequirement; + mongodb?: string; + os?: NodeJS.Platform | `!${NodeJS.Platform}`; + apiVersion?: '1'; + clientSideEncryption?: boolean; + serverless?: 'forbid' | 'allow' | 'require'; + auth: 'enabled' | 'disabled'; + }; - sessions?: { - skipLeakTests?: boolean; - }; -} + sessions?: { + skipLeakTests?: boolean; + }; + } + + interface MetadataAndTest { + metadata: MongoDBMetadataUI; + test: Fn; + } + + namespace Chai { + interface Assertion { + /** @deprecated Used only by the legacy spec runner, the unified runner implements the unified spec expectations */ + matchMongoSpec: (anything: any) => Chai.Assertion; + } + } -interface MetadataAndTest { - metadata: MongoDBMetadataUI; - test: Fn; -} namespace Mocha { interface TestFunction { (title: string, metadata: MongoDBMetadataUI, fn: Mocha.Func): Mocha.Test; @@ -42,6 +50,9 @@ interface MetadataAndTest { interface Test { metadata: MongoDBMetadataUI; + + /** @deprecated Attach spec to a test if you need access to it in a beforeEach hook, not recommended?? */ + spec: Record; } interface Runnable { diff --git a/src/bulk/common.ts b/src/bulk/common.ts index 3ca0ece0be..fa9e5eb1ce 100644 --- a/src/bulk/common.ts +++ b/src/bulk/common.ts @@ -1289,10 +1289,7 @@ export abstract class BulkOperationBase { * Handles the write error before executing commands * @internal */ - handleWriteError( - callback: Callback, - writeResult: BulkWriteResult - ): boolean | undefined { + handleWriteError(callback: Callback, writeResult: BulkWriteResult): boolean { if (this.s.bulkResult.writeErrors.length > 0) { const msg = this.s.bulkResult.writeErrors[0].errmsg ? this.s.bulkResult.writeErrors[0].errmsg @@ -1317,7 +1314,8 @@ export abstract class BulkOperationBase { callback(new MongoBulkWriteError(writeConcernError, writeResult)); return true; } - return; + + return false; } abstract addToOperationsList( diff --git a/src/bulk/unordered.ts b/src/bulk/unordered.ts index f808ccc06c..b4227e8d03 100644 --- a/src/bulk/unordered.ts +++ b/src/bulk/unordered.ts @@ -13,7 +13,7 @@ export class UnorderedBulkOperation extends BulkOperationBase { super(collection, options, false); } - override handleWriteError(callback: Callback, writeResult: BulkWriteResult): boolean | undefined { + override handleWriteError(callback: Callback, writeResult: BulkWriteResult): boolean { if (this.s.batches.length) { return false; } diff --git a/src/cmap/auth/gssapi.ts b/src/cmap/auth/gssapi.ts index 689d6e9512..d13e2e00ea 100644 --- a/src/cmap/auth/gssapi.ts +++ b/src/cmap/auth/gssapi.ts @@ -42,11 +42,8 @@ export class GSSAPI extends AuthProvider { new MongoMissingCredentialsError('Credentials required for GSSAPI authentication') ); const { username } = credentials; - function externalCommand( - command: Document, - cb: Callback<{ payload: string; conversationId: any }> - ) { - return connection.command(ns('$external.$cmd'), command, undefined, cb); + function externalCommand(command: Document, callback: Callback) { + return connection.command(ns('$external.$cmd'), command, undefined, callback); } makeKerberosClient(authContext, (err, client) => { if (err) return callback(err); diff --git a/src/cmap/auth/mongocr.ts b/src/cmap/auth/mongocr.ts index 232378f0d4..0359dba889 100644 --- a/src/cmap/auth/mongocr.ts +++ b/src/cmap/auth/mongocr.ts @@ -18,7 +18,7 @@ export class MongoCR extends AuthProvider { let key = null; // Get nonce - if (err == null) { + if (err == null && r != null) { nonce = r.nonce; // Use node md5 generator diff --git a/src/cmap/auth/mongodb_aws.ts b/src/cmap/auth/mongodb_aws.ts index ad2014c4fc..028ae75208 100644 --- a/src/cmap/auth/mongodb_aws.ts +++ b/src/cmap/auth/mongodb_aws.ts @@ -90,7 +90,9 @@ export class MongoDBAWS extends AuthProvider { }; connection.command(ns(`${db}.$cmd`), saslStart, undefined, (err, res) => { - if (err) return callback(err); + if (err || !res) { + return callback(err); + } const serverResponse = BSON.deserialize(res.payload.buffer, bsonOptions) as { s: Binary; diff --git a/src/cmap/auth/scram.ts b/src/cmap/auth/scram.ts index 2aac6f8e95..3551c49aab 100644 --- a/src/cmap/auth/scram.ts +++ b/src/cmap/auth/scram.ts @@ -122,7 +122,7 @@ function executeScram(cryptoMethod: CryptoMethod, authContext: AuthContext, call const saslStartCmd = makeFirstMessage(cryptoMethod, credentials, nonce); connection.command(ns(`${db}.$cmd`), saslStartCmd, undefined, (_err, result) => { const err = resolveError(_err, result); - if (err) { + if (err || !result) { return callback(err); } @@ -213,7 +213,7 @@ function continueScramConversation( connection.command(ns(`${db}.$cmd`), saslContinueCmd, undefined, (_err, r) => { const err = resolveError(_err, r); - if (err) { + if (err || !r) { return callback(err); } diff --git a/src/cmap/connect.ts b/src/cmap/connect.ts index 7b007b5fa2..596319fccf 100644 --- a/src/cmap/connect.ts +++ b/src/cmap/connect.ts @@ -16,14 +16,7 @@ import { MongoRuntimeError, MongoServerError } from '../error'; -import { - Callback, - CallbackWithType, - ClientMetadata, - HostAddress, - makeClientMetadata, - ns -} from '../utils'; +import { Callback, ClientMetadata, HostAddress, makeClientMetadata, ns } from '../utils'; import { AuthContext, AuthProvider } from './auth/auth_provider'; import { GSSAPI } from './auth/gssapi'; import { MongoCR } from './auth/mongocr'; @@ -133,7 +126,7 @@ function performInitialHandshake( const start = new Date().getTime(); conn.command(ns('admin.$cmd'), handshakeDoc, handshakeOptions, (err, response) => { - if (err) { + if (err || !response) { callback(err); return; } @@ -336,10 +329,7 @@ const SOCKET_ERROR_EVENT_LIST = ['error', 'close', 'timeout', 'parseError'] as c type ErrorHandlerEventName = typeof SOCKET_ERROR_EVENT_LIST[number] | 'cancel'; const SOCKET_ERROR_EVENTS = new Set(SOCKET_ERROR_EVENT_LIST); -function makeConnection( - options: MakeConnectionOptions, - _callback: CallbackWithType -) { +function makeConnection(options: MakeConnectionOptions, _callback: Callback) { const useTLS = options.tls ?? false; const keepAlive = options.keepAlive ?? true; const socketTimeoutMS = options.socketTimeoutMS ?? Reflect.get(options, 'socketTimeout') ?? 0; diff --git a/src/cmap/connection.ts b/src/cmap/connection.ts index 5e0ecb7968..eea4edfd1c 100644 --- a/src/cmap/connection.ts +++ b/src/cmap/connection.ts @@ -22,6 +22,7 @@ import { import type { ServerApi, SupportedNodeConnectionOptions } from '../mongo_client'; import { CancellationToken, TypedEventEmitter } from '../mongo_types'; import { ReadPreference, ReadPreferenceLike } from '../read_preference'; +import type { HandleOperationResultCallback } from '../sdam/server'; import { applySession, ClientSession, updateSessionFromResponse } from '../sessions'; import { calculateDurationInMs, @@ -33,7 +34,7 @@ import { now, uuidV4 } from '../utils'; -import type { W, WriteConcern, WriteConcernOptions } from '../write_concern'; +import type { WriteConcern } from '../write_concern'; import type { MongoCredentials } from './auth/mongo_credentials'; import { CommandFailedEvent, @@ -109,11 +110,12 @@ export interface CommandOptions extends BSONSerializeOptions { noResponse?: boolean; omitReadPreference?: boolean; - // FIXME: NODE-2802 - willRetryWrite?: boolean; + // TODO(NODE-2802): Currently the CommandOptions take a property willRetryWrite which is a hint from executeOperation that the txnNum should be applied to this command. + // Applying a session to a command should happen as part of command construction, most likely in the CommandOperation#executeCommand method, + // where we have access to the details we need to determine if a txnNum should also be applied. + willRetryWrite?: true; - // FIXME: NODE-2781 - writeConcern?: WriteConcernOptions | WriteConcern | W; + writeConcern?: WriteConcern; } /** @internal */ @@ -381,7 +383,7 @@ export class Connection extends TypedEventEmitter { ns: MongoDBNamespace, cmd: Document, options: CommandOptions | undefined, - callback: Callback + callback: HandleOperationResultCallback ): void { if (!(ns instanceof MongoDBNamespace)) { // TODO(NODE-3483): Replace this with a MongoCommandError @@ -411,7 +413,7 @@ export class Connection extends TypedEventEmitter { clusterTime = session.clusterTime; } - const err = applySession(session, finalCmd, options as CommandOptions); + const err = applySession(session, finalCmd, options); if (err) { return callback(err); } @@ -454,7 +456,12 @@ export class Connection extends TypedEventEmitter { } /** @internal */ - query(ns: MongoDBNamespace, cmd: Document, options: QueryOptions, callback: Callback): void { + query( + ns: MongoDBNamespace, + cmd: Document, + options: QueryOptions, + callback: HandleOperationResultCallback + ): void { const isExplain = cmd.$explain != null; const readPreference = options.readPreference ?? ReadPreference.primary; const batchSize = options.batchSize || 0; @@ -531,7 +538,7 @@ export class Connection extends TypedEventEmitter { ns: MongoDBNamespace, cursorId: Long, options: GetMoreOptions, - callback: Callback + callback: HandleOperationResultCallback ): void { const fullResult = !!options[kFullResult]; const wireVersion = maxWireVersion(this); @@ -588,7 +595,7 @@ export class Connection extends TypedEventEmitter { ns: MongoDBNamespace, cursorIds: Long[], options: CommandOptions, - callback: Callback + callback: HandleOperationResultCallback ): void { if (!cursorIds || !Array.isArray(cursorIds)) { // TODO(NODE-3483): Replace this with a MongoCommandError @@ -617,7 +624,7 @@ export class Connection extends TypedEventEmitter { (err, response) => { if (err || !response) return callback(err); if (response.cursorNotFound) { - return callback(new MongoNetworkError('cursor killed or timed out'), null); + return callback(new MongoNetworkError('cursor killed or timed out')); } if (!Array.isArray(response.documents) || response.documents.length === 0) { diff --git a/src/error.ts b/src/error.ts index af29237c31..74d2f1af5f 100644 --- a/src/error.ts +++ b/src/error.ts @@ -13,21 +13,24 @@ const kErrorLabels = Symbol('errorLabels'); * The legacy error message from the server that indicates the node is not a writable primary * https://github.com/mongodb/specifications/blob/b07c26dc40d04ac20349f989db531c9845fdd755/source/server-discovery-and-monitoring/server-discovery-and-monitoring.rst#not-writable-primary-and-node-is-recovering */ -export const LEGACY_NOT_WRITABLE_PRIMARY_ERROR_MESSAGE = 'not master'; +export const LEGACY_NOT_WRITABLE_PRIMARY_ERROR_MESSAGE = new RegExp('not master', 'i'); /** * @internal * The legacy error message from the server that indicates the node is not a primary or secondary * https://github.com/mongodb/specifications/blob/b07c26dc40d04ac20349f989db531c9845fdd755/source/server-discovery-and-monitoring/server-discovery-and-monitoring.rst#not-writable-primary-and-node-is-recovering */ -export const LEGACY_NOT_PRIMARY_OR_SECONDARY_ERROR_MESSAGE = 'not master or secondary'; +export const LEGACY_NOT_PRIMARY_OR_SECONDARY_ERROR_MESSAGE = new RegExp( + 'not master or secondary', + 'i' +); /** * @internal * The error message from the server that indicates the node is recovering * https://github.com/mongodb/specifications/blob/b07c26dc40d04ac20349f989db531c9845fdd755/source/server-discovery-and-monitoring/server-discovery-and-monitoring.rst#not-writable-primary-and-node-is-recovering */ -export const NODE_IS_RECOVERING_ERROR_MESSAGE = 'node is recovering'; +export const NODE_IS_RECOVERING_ERROR_MESSAGE = new RegExp('node is recovering', 'i'); /** @internal MongoDB Error Codes */ export const MONGODB_ERROR_CODES = Object.freeze({ @@ -80,6 +83,17 @@ export const GET_MORE_RESUMABLE_CODES = new Set([ MONGODB_ERROR_CODES.CursorNotFound ]); +/** @public */ +export const MongoErrorLabel = Object.freeze({ + RetryableWriteError: 'RetryableWriteError', + TransientTransactionError: 'TransientTransactionError', + UnknownTransactionCommitResult: 'UnknownTransactionCommitResult', + ResumableChangeStreamError: 'ResumableChangeStreamError' +} as const); + +/** @public */ +export type MongoErrorLabel = typeof MongoErrorLabel[keyof typeof MongoErrorLabel]; + /** @public */ export interface ErrorDescription extends Document { message?: string; @@ -238,7 +252,7 @@ export class MongoRuntimeError extends MongoDriverError { } /** - * An error generated when a batch command is reexecuted after one of the commands in the batch + * An error generated when a batch command is re-executed after one of the commands in the batch * has failed * * @public @@ -403,6 +417,32 @@ export class MongoGridFSChunkError extends MongoRuntimeError { } } +/** + * An error generated when a **parsable** unexpected response comes from the server. + * This is generally an error where the driver in a state expecting a certain behavior to occur in + * the next message from MongoDB but it receives something else. + * This error **does not** represent an issue with wire message formatting. + * + * #### Example + * When an operation fails, it is the driver's job to retry it. It must perform serverSelection + * again to make sure that it attempts the operation against a server in a good state. If server + * selection returns a server that does not support retryable operations, this error is used. + * This scenario is unlikely as retryable support would also have been determined on the first attempt + * but it is possible the state change could report a selectable server that does not support retries. + * + * @public + * @category Error + */ +export class MongoUnexpectedServerResponseError extends MongoRuntimeError { + constructor(message: string) { + super(message); + } + + get name(): string { + return 'MongoUnexpectedServerResponseError'; + } +} + /** * An error thrown when the user attempts to add options to a cursor that has already been * initialized @@ -689,8 +729,8 @@ export class MongoWriteConcernError extends MongoServerError { } } -// see: https://github.com/mongodb/specifications/blob/master/source/retryable-writes/retryable-writes.rst#terms -const RETRYABLE_ERROR_CODES = new Set([ +// https://github.com/mongodb/specifications/blob/master/source/retryable-reads/retryable-reads.rst#retryable-error +const RETRYABLE_READ_ERROR_CODES = new Set([ MONGODB_ERROR_CODES.HostUnreachable, MONGODB_ERROR_CODES.HostNotFound, MONGODB_ERROR_CODES.NetworkTimeout, @@ -704,41 +744,74 @@ const RETRYABLE_ERROR_CODES = new Set([ MONGODB_ERROR_CODES.NotPrimaryOrSecondary ]); +// see: https://github.com/mongodb/specifications/blob/master/source/retryable-writes/retryable-writes.rst#terms const RETRYABLE_WRITE_ERROR_CODES = new Set([ - MONGODB_ERROR_CODES.InterruptedAtShutdown, - MONGODB_ERROR_CODES.InterruptedDueToReplStateChange, - MONGODB_ERROR_CODES.NotWritablePrimary, - MONGODB_ERROR_CODES.NotPrimaryNoSecondaryOk, - MONGODB_ERROR_CODES.NotPrimaryOrSecondary, - MONGODB_ERROR_CODES.PrimarySteppedDown, - MONGODB_ERROR_CODES.ShutdownInProgress, - MONGODB_ERROR_CODES.HostNotFound, - MONGODB_ERROR_CODES.HostUnreachable, - MONGODB_ERROR_CODES.NetworkTimeout, - MONGODB_ERROR_CODES.SocketException, + ...RETRYABLE_READ_ERROR_CODES, MONGODB_ERROR_CODES.ExceededTimeLimit ]); -export function isRetryableEndTransactionError(error: MongoError): boolean { - return error.hasErrorLabel('RetryableWriteError'); -} +export function needsRetryableWriteLabel(error: Error, maxWireVersion: number): boolean { + if (maxWireVersion >= 9) { + // 4.4+ servers attach their own retryable write error + return false; + } + // pre-4.4 server, then the driver adds an error label for every valid case + // execute operation will only inspect the label, code/message logic is handled here + + if (error instanceof MongoNetworkError) { + return true; + } + + if (error instanceof MongoError && error.hasErrorLabel(MongoErrorLabel.RetryableWriteError)) { + // Before 4.4 the error label can be one way of identifying retry + // so we can return true if we have the label, but fall back to code checking below + return true; + } -export function isRetryableWriteError(error: MongoError): boolean { if (error instanceof MongoWriteConcernError) { return RETRYABLE_WRITE_ERROR_CODES.has(error.result?.code ?? error.code ?? 0); } - return typeof error.code === 'number' && RETRYABLE_WRITE_ERROR_CODES.has(error.code); + + if (error instanceof MongoError && typeof error.code === 'number') { + return RETRYABLE_WRITE_ERROR_CODES.has(error.code); + } + + const isNotWritablePrimaryError = LEGACY_NOT_WRITABLE_PRIMARY_ERROR_MESSAGE.test(error.message); + if (isNotWritablePrimaryError) { + return true; + } + + const isNodeIsRecoveringError = NODE_IS_RECOVERING_ERROR_MESSAGE.test(error.message); + if (isNodeIsRecoveringError) { + return true; + } + + return false; } /** Determines whether an error is something the driver should attempt to retry */ -export function isRetryableError(error: MongoError): boolean { - return ( - // eslint-disable-next-line @typescript-eslint/no-non-null-assertion - (typeof error.code === 'number' && RETRYABLE_ERROR_CODES.has(error.code!)) || - error instanceof MongoNetworkError || - !!error.message.match(new RegExp(LEGACY_NOT_WRITABLE_PRIMARY_ERROR_MESSAGE)) || - !!error.message.match(new RegExp(NODE_IS_RECOVERING_ERROR_MESSAGE)) - ); +export function isRetryableReadError(error: MongoError): boolean { + const hasRetryableErrorCode = + typeof error.code === 'number' ? RETRYABLE_READ_ERROR_CODES.has(error.code) : false; + if (hasRetryableErrorCode) { + return true; + } + + if (error instanceof MongoNetworkError) { + return true; + } + + const isNotWritablePrimaryError = LEGACY_NOT_WRITABLE_PRIMARY_ERROR_MESSAGE.test(error.message); + if (isNotWritablePrimaryError) { + return true; + } + + const isNodeIsRecoveringError = NODE_IS_RECOVERING_ERROR_MESSAGE.test(error.message); + if (isNodeIsRecoveringError) { + return true; + } + + return false; } const SDAM_RECOVERING_CODES = new Set([ @@ -749,7 +822,7 @@ const SDAM_RECOVERING_CODES = new Set([ MONGODB_ERROR_CODES.NotPrimaryOrSecondary ]); -const SDAM_NOTPRIMARY_CODES = new Set([ +const SDAM_NOT_PRIMARY_CODES = new Set([ MONGODB_ERROR_CODES.NotWritablePrimary, MONGODB_ERROR_CODES.NotPrimaryNoSecondaryOk, MONGODB_ERROR_CODES.LegacyNotPrimary @@ -767,22 +840,22 @@ function isRecoveringError(err: MongoError) { } return ( - new RegExp(LEGACY_NOT_PRIMARY_OR_SECONDARY_ERROR_MESSAGE).test(err.message) || - new RegExp(NODE_IS_RECOVERING_ERROR_MESSAGE).test(err.message) + LEGACY_NOT_PRIMARY_OR_SECONDARY_ERROR_MESSAGE.test(err.message) || + NODE_IS_RECOVERING_ERROR_MESSAGE.test(err.message) ); } function isNotWritablePrimaryError(err: MongoError) { if (typeof err.code === 'number') { // If any error code exists, we ignore the error.message - return SDAM_NOTPRIMARY_CODES.has(err.code); + return SDAM_NOT_PRIMARY_CODES.has(err.code); } if (isRecoveringError(err)) { return false; } - return new RegExp(LEGACY_NOT_WRITABLE_PRIMARY_ERROR_MESSAGE).test(err.message); + return LEGACY_NOT_WRITABLE_PRIMARY_ERROR_MESSAGE.test(err.message); } export function isNodeShuttingDownError(err: MongoError): boolean { @@ -829,10 +902,12 @@ export function isResumableError(error?: MongoError, wireVersion?: number): bool if (wireVersion != null && wireVersion >= 9) { // DRIVERS-1308: For 4.4 drivers running against 4.4 servers, drivers will add a special case to treat the CursorNotFound error code as resumable - if (error && error instanceof MongoError && error.code === 43) { + if (error && error instanceof MongoError && error.code === MONGODB_ERROR_CODES.CursorNotFound) { return true; } - return error instanceof MongoError && error.hasErrorLabel('ResumableChangeStreamError'); + return ( + error instanceof MongoError && error.hasErrorLabel(MongoErrorLabel.ResumableChangeStreamError) + ); } if (error && typeof error.code === 'number') { diff --git a/src/index.ts b/src/index.ts index 0502109519..f996118a42 100644 --- a/src/index.ts +++ b/src/index.ts @@ -65,6 +65,7 @@ export { MongoTailableCursorError, MongoTopologyClosedError, MongoTransactionError, + MongoUnexpectedServerResponseError, MongoWriteConcernError } from './error'; export { @@ -92,6 +93,7 @@ export { AuthMechanism } from './cmap/auth/providers'; export { Compressor } from './cmap/wire_protocol/compression'; export { CURSOR_FLAGS } from './cursor/abstract_cursor'; export { AutoEncryptionLoggerLevel } from './deps'; +export { MongoErrorLabel } from './error'; export { ExplainVerbosity } from './explain'; export { LoggerLevel } from './logger'; export { ServerApiVersion } from './mongo_client'; @@ -396,7 +398,13 @@ export type { RTTPinger, RTTPingerOptions } from './sdam/monitor'; -export type { Server, ServerEvents, ServerOptions, ServerPrivate } from './sdam/server'; +export type { + HandleOperationResultCallback, + Server, + ServerEvents, + ServerOptions, + ServerPrivate +} from './sdam/server'; export type { ServerDescription, ServerDescriptionOptions, diff --git a/src/operations/execute_operation.ts b/src/operations/execute_operation.ts index b273c5c0fa..225a1be4d6 100644 --- a/src/operations/execute_operation.ts +++ b/src/operations/execute_operation.ts @@ -1,14 +1,16 @@ import type { Document } from '../bson'; import { - isRetryableError, + isRetryableReadError, MongoCompatibilityError, MONGODB_ERROR_CODES, MongoError, + MongoErrorLabel, MongoExpiredSessionError, MongoNetworkError, MongoRuntimeError, MongoServerError, - MongoTransactionError + MongoTransactionError, + MongoUnexpectedServerResponseError } from '../error'; import { ReadPreference } from '../read_preference'; import type { Server } from '../sdam/server'; @@ -19,7 +21,7 @@ import { } from '../sdam/server_selection'; import type { Topology } from '../sdam/topology'; import type { ClientSession } from '../sessions'; -import { Callback, maxWireVersion, maybePromise, supportsRetryableWrites } from '../utils'; +import { Callback, maybePromise, supportsRetryableWrites } from '../utils'; import { AbstractOperation, Aspect } from './operation'; const MMAPv1_RETRY_WRITES_ERROR_CODE = MONGODB_ERROR_CODES.IllegalOperation; @@ -72,16 +74,16 @@ export function executeOperation< TResult = ResultTypeFromOperation >(topology: Topology, operation: T, callback?: Callback): Promise | void { if (!(operation instanceof AbstractOperation)) { - // TODO(NODE-3483) + // TODO(NODE-3483): Extend MongoRuntimeError throw new MongoRuntimeError('This method requires a valid operation instance'); } - return maybePromise(callback, cb => { + return maybePromise(callback, callback => { if (topology.shouldCheckForSessionSupport()) { return topology.selectServer(ReadPreference.primaryPreferred, err => { - if (err) return cb(err); + if (err) return callback(err); - executeOperation(topology, operation, cb); + executeOperation(topology, operation, callback); }); } @@ -94,63 +96,51 @@ export function executeOperation< owner = Symbol(); session = topology.startSession({ owner, explicit: false }); } else if (session.hasEnded) { - return cb(new MongoExpiredSessionError('Use of expired sessions is not permitted')); + return callback(new MongoExpiredSessionError('Use of expired sessions is not permitted')); } else if (session.snapshotEnabled && !topology.capabilities.supportsSnapshotReads) { - return cb(new MongoCompatibilityError('Snapshot reads require MongoDB 5.0 or later')); + return callback(new MongoCompatibilityError('Snapshot reads require MongoDB 5.0 or later')); } } else if (session) { // If the user passed an explicit session and we are still, after server selection, // trying to run against a topology that doesn't support sessions we error out. - return cb(new MongoCompatibilityError('Current topology does not support sessions')); + return callback(new MongoCompatibilityError('Current topology does not support sessions')); } try { - executeWithServerSelection(topology, session, operation, (err, result) => { - if (session && session.owner && session.owner === owner) { - return session.endSession(err2 => cb(err2 || err, result)); + executeWithServerSelection(topology, session, operation, (error, result) => { + if (session?.owner != null && session.owner === owner) { + return session.endSession(endSessionError => callback(endSessionError ?? error, result)); } - cb(err, result); + callback(error, result); }); - } catch (e) { - if (session && session.owner && session.owner === owner) { + } catch (error) { + if (session?.owner != null && session.owner === owner) { session.endSession(); } - - throw e; + throw error; } }); } -function supportsRetryableReads(server: Server) { - return maxWireVersion(server) >= 6; -} - -function executeWithServerSelection( +function executeWithServerSelection( topology: Topology, - session: ClientSession | undefined, + session: ClientSession, operation: AbstractOperation, - callback: Callback + callback: Callback ) { - const readPreference = operation.readPreference || ReadPreference.primary; - const inTransaction = session && session.inTransaction(); + const readPreference = operation.readPreference ?? ReadPreference.primary; + const inTransaction = !!session?.inTransaction(); if (inTransaction && !readPreference.equals(ReadPreference.primary)) { - callback( + return callback( new MongoTransactionError( `Read preference in a transaction must be primary, not: ${readPreference.mode}` ) ); - - return; } - if ( - session && - session.isPinned && - session.transaction.isCommitted && - !operation.bypassPinningCheck - ) { + if (session?.isPinned && session.transaction.isCommitted && !operation.bypassPinningCheck) { session.unpin(); } @@ -170,60 +160,54 @@ function executeWithServerSelection( } const serverSelectionOptions = { session }; - function callbackWithRetry(err?: any, result?: any) { - if (err == null) { - return callback(undefined, result); - } + function retryOperation(originalError: MongoError) { + const isWriteOperation = operation.hasAspect(Aspect.WRITE_OPERATION); + const isReadOperation = operation.hasAspect(Aspect.READ_OPERATION); - const hasReadAspect = operation.hasAspect(Aspect.READ_OPERATION); - const hasWriteAspect = operation.hasAspect(Aspect.WRITE_OPERATION); - const itShouldRetryWrite = shouldRetryWrite(err); - - if ((hasReadAspect && !isRetryableError(err)) || (hasWriteAspect && !itShouldRetryWrite)) { - return callback(err); - } - - if ( - hasWriteAspect && - itShouldRetryWrite && - err.code === MMAPv1_RETRY_WRITES_ERROR_CODE && - err.errmsg.match(/Transaction numbers/) - ) { - callback( + if (isWriteOperation && originalError.code === MMAPv1_RETRY_WRITES_ERROR_CODE) { + return callback( new MongoServerError({ message: MMAPv1_RETRY_WRITES_ERROR_MESSAGE, errmsg: MMAPv1_RETRY_WRITES_ERROR_MESSAGE, - originalError: err + originalError }) ); + } - return; + if (isWriteOperation && !originalError.hasErrorLabel(MongoErrorLabel.RetryableWriteError)) { + return callback(originalError); } - // select a new server, and attempt to retry the operation - topology.selectServer(selector, serverSelectionOptions, (e?: any, server?: any) => { - if ( - e || - (operation.hasAspect(Aspect.READ_OPERATION) && !supportsRetryableReads(server)) || - (operation.hasAspect(Aspect.WRITE_OPERATION) && !supportsRetryableWrites(server)) - ) { - callback(e); - return; - } + if (isReadOperation && !isRetryableReadError(originalError)) { + return callback(originalError); + } + if ( + originalError instanceof MongoNetworkError && + session?.isPinned && + !session.inTransaction() && + operation.hasAspect(Aspect.CURSOR_CREATING) + ) { // If we have a cursor and the initial command fails with a network error, // we can retry it on another connection. So we need to check it back in, clear the // pool for the service id, and retry again. - if ( - err && - err instanceof MongoNetworkError && - server.loadBalanced && - session && - session.isPinned && - !session.inTransaction() && - operation.hasAspect(Aspect.CURSOR_CREATING) - ) { - session.unpin({ force: true, forceClear: true }); + session.unpin({ force: true, forceClear: true }); + } + + // select a new server, and attempt to retry the operation + topology.selectServer(selector, serverSelectionOptions, (error?: Error, server?: Server) => { + if (!error && isWriteOperation && !supportsRetryableWrites(server)) { + return callback( + new MongoUnexpectedServerResponseError( + 'Selected server does not support retryable writes' + ) + ); + } + + if (error || !server) { + return callback( + error ?? new MongoUnexpectedServerResponseError('Server selection failed without error') + ); } operation.execute(server, session, callback); @@ -233,8 +217,7 @@ function executeWithServerSelection( if ( readPreference && !readPreference.equals(ReadPreference.primary) && - session && - session.inTransaction() + session?.inTransaction() ) { callback( new MongoTransactionError( @@ -246,21 +229,17 @@ function executeWithServerSelection( } // select a server, and execute the operation against it - topology.selectServer(selector, serverSelectionOptions, (err?: any, server?: any) => { - if (err) { - callback(err); - return; + topology.selectServer(selector, serverSelectionOptions, (error, server) => { + if (error || !server) { + return callback(error); } if (session && operation.hasAspect(Aspect.RETRYABLE)) { const willRetryRead = - topology.s.options.retryReads !== false && - !inTransaction && - supportsRetryableReads(server) && - operation.canRetryRead; + topology.s.options.retryReads && !inTransaction && operation.canRetryRead; const willRetryWrite = - topology.s.options.retryWrites === true && + topology.s.options.retryWrites && !inTransaction && supportsRetryableWrites(server) && operation.canRetryWrite; @@ -274,15 +253,17 @@ function executeWithServerSelection( session.incrementTransactionNumber(); } - operation.execute(server, session, callbackWithRetry); - return; + return operation.execute(server, session, (error, result) => { + if (error instanceof MongoError) { + return retryOperation(error); + } else if (error) { + return callback(error); + } + callback(undefined, result); + }); } } - operation.execute(server, session, callback); + return operation.execute(server, session, callback); }); } - -function shouldRetryWrite(err: any) { - return err instanceof MongoError && err.hasErrorLabel('RetryableWriteError'); -} diff --git a/src/operations/rename.ts b/src/operations/rename.ts index 02e27a5bf7..00f3ee3118 100644 --- a/src/operations/rename.ts +++ b/src/operations/rename.ts @@ -1,6 +1,6 @@ import type { Document } from '../bson'; import { Collection } from '../collection'; -import { MongoServerError } from '../error'; +import { MongoRuntimeError, MongoServerError } from '../error'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; import { Callback, checkCollectionName } from '../utils'; @@ -46,7 +46,7 @@ export class RenameOperation extends RunAdminCommandOperation { const coll = this.collection; super.execute(server, session, (err, doc) => { - if (err) return callback(err); + if (err || !doc) return callback(err ? err : new MongoRuntimeError('No result')); // We have an error if (doc?.errmsg) { return callback(new MongoServerError(doc)); diff --git a/src/sdam/common.ts b/src/sdam/common.ts index 9a9a5450a7..6738ad9eb8 100644 --- a/src/sdam/common.ts +++ b/src/sdam/common.ts @@ -53,12 +53,6 @@ export function drainTimerQueue(queue: TimerQueue): void { queue.clear(); } -/** @internal */ -export function clearAndRemoveTimerFrom(timer: NodeJS.Timeout, timers: TimerQueue): boolean { - clearTimeout(timer); - return timers.delete(timer); -} - /** @public */ export interface ClusterTime { clusterTime: Timestamp; diff --git a/src/sdam/monitor.ts b/src/sdam/monitor.ts index 9f7f786b3e..f734d52423 100644 --- a/src/sdam/monitor.ts +++ b/src/sdam/monitor.ts @@ -2,7 +2,7 @@ import { Document, Long } from '../bson'; import { connect } from '../cmap/connect'; import { Connection, ConnectionOptions } from '../cmap/connection'; import { LEGACY_HELLO_COMMAND } from '../constants'; -import { AnyError, MongoNetworkError } from '../error'; +import { MongoNetworkError, MongoUnexpectedServerResponseError } from '../error'; import { CancellationToken, TypedEventEmitter } from '../mongo_types'; import type { Callback, InterruptibleAsyncInterval } from '../utils'; import { @@ -211,7 +211,7 @@ function checkServer(monitor: Monitor, callback: Callback) { let start = now(); monitor.emit(Server.SERVER_HEARTBEAT_STARTED, new ServerHeartbeatStartedEvent(monitor.address)); - function failureHandler(err: AnyError) { + function failureHandler(err: Error) { monitor[kConnection]?.destroy({ force: true }); monitor[kConnection] = undefined; @@ -259,8 +259,12 @@ function checkServer(monitor: Monitor, callback: Callback) { connection.command(ns('admin.$cmd'), cmd, options, (err, hello) => { if (err) { - failureHandler(err); - return; + return failureHandler(err); + } + if (!hello) { + return failureHandler( + new MongoUnexpectedServerResponseError('Empty response without error') + ); } if (!('isWritablePrimary' in hello)) { diff --git a/src/sdam/server.ts b/src/sdam/server.ts index e13f793de1..b822b69e24 100644 --- a/src/sdam/server.ts +++ b/src/sdam/server.ts @@ -27,14 +27,16 @@ import type { AutoEncrypter } from '../deps'; import { isNetworkErrorBeforeHandshake, isNodeShuttingDownError, - isRetryableWriteError, isSDAMUnrecoverableError, MongoCompatibilityError, MongoError, + MongoErrorLabel, MongoInvalidArgumentError, MongoNetworkError, MongoNetworkTimeoutError, - MongoServerClosedError + MongoServerClosedError, + MongoUnexpectedServerResponseError, + needsRetryableWriteLabel } from '../error'; import { Logger } from '../logger'; import type { ServerApi } from '../mongo_client'; @@ -43,7 +45,6 @@ import type { ClientSession } from '../sessions'; import { isTransactionCommand } from '../transactions'; import { Callback, - CallbackWithType, collationNotSupported, EventEmitterWithState, makeStateMachine, @@ -283,24 +284,12 @@ export class Server extends TypedEventEmitter { * Execute a command * @internal */ - command(ns: MongoDBNamespace, cmd: Document, callback: Callback): void; - /** @internal */ command( ns: MongoDBNamespace, cmd: Document, options: CommandOptions, callback: Callback - ): void; - command( - ns: MongoDBNamespace, - cmd: Document, - options?: CommandOptions | Callback, - callback?: Callback ): void { - if (typeof options === 'function') { - (callback = options), (options = {}), (options = options ?? {}); - } - if (callback == null) { throw new MongoInvalidArgumentError('Callback must be provided'); } @@ -345,7 +334,7 @@ export class Server extends TypedEventEmitter { } session.pin(checkedOut); - this.command(ns, cmd, finalOptions, callback as Callback); + this.command(ns, cmd, finalOptions, callback); }); return; @@ -363,7 +352,7 @@ export class Server extends TypedEventEmitter { ns, cmd, finalOptions, - makeOperationHandler(this, conn, cmd, finalOptions, cb) as Callback + makeOperationHandler(this, conn, cmd, finalOptions, cb) ); }, callback @@ -388,12 +377,7 @@ export class Server extends TypedEventEmitter { return cb(err); } - conn.query( - ns, - cmd, - options, - makeOperationHandler(this, conn, cmd, options, cb) as Callback - ); + conn.query(ns, cmd, options, makeOperationHandler(this, conn, cmd, options, cb)); }, callback ); @@ -422,12 +406,7 @@ export class Server extends TypedEventEmitter { return cb(err); } - conn.getMore( - ns, - cursorId, - options, - makeOperationHandler(this, conn, {}, options, cb) as Callback - ); + conn.getMore(ns, cursorId, options, makeOperationHandler(this, conn, {}, options, cb)); }, callback ); @@ -463,7 +442,7 @@ export class Server extends TypedEventEmitter { ns, cursorIds, options, - makeOperationHandler(this, conn, {}, undefined, cb) as Callback + makeOperationHandler(this, conn, {}, undefined, cb) ); }, callback @@ -541,73 +520,98 @@ function isRetryableWritesEnabled(topology: Topology) { return topology.s.options.retryWrites !== false; } +/** @internal */ +export interface HandleOperationResultCallback { + (error: MongoError | Error | undefined, result?: Document): void; +} + function makeOperationHandler( server: Server, connection: Connection, cmd: Document, options: CommandOptions | GetMoreOptions | undefined, callback: Callback -): CallbackWithType { +): HandleOperationResultCallback { const session = options?.session; - return function handleOperationResult(err, result) { - if (err && !connectionIsStale(server.s.pool, connection)) { - if (err instanceof MongoNetworkError) { - if (session && !session.hasEnded && session.serverSession) { - session.serverSession.isDirty = true; - } + return function handleOperationResult(error, result) { + if (result != null) { + return callback(undefined, result); + } - // inActiveTransaction check handles commit and abort. - if (inActiveTransaction(session, cmd) && !err.hasErrorLabel('TransientTransactionError')) { - err.addErrorLabel('TransientTransactionError'); - } + if (!error) { + return callback(new MongoUnexpectedServerResponseError('Empty response with no error')); + } - if ( - (isRetryableWritesEnabled(server.s.topology) || isTransactionCommand(cmd)) && - supportsRetryableWrites(server) && - !inActiveTransaction(session, cmd) - ) { - err.addErrorLabel('RetryableWriteError'); - } + if (!(error instanceof MongoError)) { + // Node.js or some other error we have not special handling for + return callback(error); + } - if (!(err instanceof MongoNetworkTimeoutError) || isNetworkErrorBeforeHandshake(err)) { - // In load balanced mode we never mark the server as unknown and always - // clear for the specific service id. + if (connectionIsStale(server.s.pool, connection)) { + return callback(error); + } - server.s.pool.clear(connection.serviceId); - if (!server.loadBalanced) { - markServerUnknown(server, err); - } - } - } else { - // if pre-4.4 server, then add error label if its a retryable write error - if ( - (isRetryableWritesEnabled(server.s.topology) || isTransactionCommand(cmd)) && - maxWireVersion(server) < 9 && - isRetryableWriteError(err) && - !inActiveTransaction(session, cmd) - ) { - err.addErrorLabel('RetryableWriteError'); + if (error instanceof MongoNetworkError) { + if (session && !session.hasEnded && session.serverSession) { + session.serverSession.isDirty = true; + } + + // inActiveTransaction check handles commit and abort. + if ( + inActiveTransaction(session, cmd) && + !error.hasErrorLabel(MongoErrorLabel.TransientTransactionError) + ) { + error.addErrorLabel(MongoErrorLabel.TransientTransactionError); + } + + if ( + (isRetryableWritesEnabled(server.s.topology) || isTransactionCommand(cmd)) && + supportsRetryableWrites(server) && + !inActiveTransaction(session, cmd) + ) { + error.addErrorLabel(MongoErrorLabel.RetryableWriteError); + } + + if (!(error instanceof MongoNetworkTimeoutError) || isNetworkErrorBeforeHandshake(error)) { + // In load balanced mode we never mark the server as unknown and always + // clear for the specific service id. + + server.s.pool.clear(connection.serviceId); + if (!server.loadBalanced) { + markServerUnknown(server, error); } + } + } else { + if ( + (isRetryableWritesEnabled(server.s.topology) || isTransactionCommand(cmd)) && + needsRetryableWriteLabel(error, maxWireVersion(server)) && + !inActiveTransaction(session, cmd) + ) { + error.addErrorLabel(MongoErrorLabel.RetryableWriteError); + } - if (isSDAMUnrecoverableError(err)) { - if (shouldHandleStateChangeError(server, err)) { - if (maxWireVersion(server) <= 7 || isNodeShuttingDownError(err)) { - server.s.pool.clear(connection.serviceId); - } + if (isSDAMUnrecoverableError(error)) { + if (shouldHandleStateChangeError(server, error)) { + if (maxWireVersion(server) <= 7 || isNodeShuttingDownError(error)) { + server.s.pool.clear(connection.serviceId); + } - if (!server.loadBalanced) { - markServerUnknown(server, err); - process.nextTick(() => server.requestCheck()); - } + if (!server.loadBalanced) { + markServerUnknown(server, error); + process.nextTick(() => server.requestCheck()); } } } + } - if (session && session.isPinned && err.hasErrorLabel('TransientTransactionError')) { - session.unpin({ force: true }); - } + if ( + session && + session.isPinned && + error.hasErrorLabel(MongoErrorLabel.TransientTransactionError) + ) { + session.unpin({ force: true }); } - callback(err, result); + return callback(error); }; } diff --git a/src/sdam/topology.ts b/src/sdam/topology.ts index 4d9a067eaa..6b23547288 100644 --- a/src/sdam/topology.ts +++ b/src/sdam/topology.ts @@ -50,7 +50,6 @@ import { } from '../utils'; import { _advanceClusterTime, - clearAndRemoveTimerFrom, ClusterTime, drainTimerQueue, ServerType, @@ -435,7 +434,12 @@ export class Topology extends TypedEventEmitter { // connect all known servers, then attempt server selection to connect const serverDescriptions = Array.from(this.s.description.servers.values()); - connectServers(this, serverDescriptions); + this.s.servers = new Map( + serverDescriptions.map(serverDescription => [ + serverDescription.address, + createAndConnectServer(this, serverDescription) + ]) + ); // In load balancer mode we need to fake a server description getting // emitted from the monitor, since the monitor doesn't exist. @@ -459,7 +463,7 @@ export class Topology extends TypedEventEmitter { // TODO: NODE-2471 if (server && this.s.credentials) { - server.command(ns('admin.$cmd'), { ping: 1 }, err => { + server.command(ns('admin.$cmd'), { ping: 1 }, {}, err => { if (err) { typeof callback === 'function' ? callback(err) : this.emit(Topology.ERROR, err); return; @@ -875,13 +879,8 @@ function randomSelection(array: ServerDescription[]): ServerDescription { * * @param topology - The topology that this server belongs to * @param serverDescription - The description for the server to initialize and connect to - * @param connectDelay - Time to wait before attempting initial connection */ -function createAndConnectServer( - topology: Topology, - serverDescription: ServerDescription, - connectDelay?: number -) { +function createAndConnectServer(topology: Topology, serverDescription: ServerDescription) { topology.emit( Topology.SERVER_OPENING, new ServerOpeningEvent(topology.s.id, serverDescription.address) @@ -894,38 +893,10 @@ function createAndConnectServer( server.on(Server.DESCRIPTION_RECEIVED, description => topology.serverUpdateHandler(description)); - if (connectDelay) { - const connectTimer = setTimeout(() => { - clearAndRemoveTimerFrom(connectTimer, topology.s.connectionTimers); - server.connect(); - }, connectDelay); - - topology.s.connectionTimers.add(connectTimer); - return server; - } - server.connect(); return server; } -/** - * Create `Server` instances for all initially known servers, connect them, and assign - * them to the passed in `Topology`. - * - * @param topology - The topology responsible for the servers - * @param serverDescriptions - A list of server descriptions to connect - */ -function connectServers(topology: Topology, serverDescriptions: ServerDescription[]) { - topology.s.servers = serverDescriptions.reduce( - (servers: Map, serverDescription: ServerDescription) => { - const server = createAndConnectServer(topology, serverDescription); - servers.set(serverDescription.address, server); - return servers; - }, - new Map() - ); -} - /** * @param topology - Topology to update. * @param incomingServerDescription - New server description. diff --git a/src/sdam/topology_description.ts b/src/sdam/topology_description.ts index e5e5e55356..5e2019d56d 100644 --- a/src/sdam/topology_description.ts +++ b/src/sdam/topology_description.ts @@ -58,9 +58,6 @@ export class TopologyDescription { ) { options = options ?? {}; - // TODO: consider assigning all these values to a temporary value `s` which - // we use `Object.freeze` on, ensuring the internal state of this type - // is immutable. this.type = topologyType ?? TopologyType.Unknown; this.servers = serverDescriptions ?? new Map(); this.stale = false; diff --git a/src/sessions.ts b/src/sessions.ts index 1ae404a272..7b05edee40 100644 --- a/src/sessions.ts +++ b/src/sessions.ts @@ -6,16 +6,14 @@ import { PINNED, UNPINNED } from './constants'; import type { AbstractCursor } from './cursor/abstract_cursor'; import { AnyError, - isRetryableEndTransactionError, - isRetryableError, MongoAPIError, MongoCompatibilityError, MONGODB_ERROR_CODES, MongoDriverError, MongoError, + MongoErrorLabel, MongoExpiredSessionError, MongoInvalidArgumentError, - MongoNetworkError, MongoRuntimeError, MongoServerError, MongoTransactionError, @@ -41,7 +39,6 @@ import { now, uuidV4 } from './utils'; -import type { WriteConcern } from './write_concern'; const minWireVersionForShardedTransactions = 8; @@ -507,7 +504,7 @@ export function maybeClearPinnedConnection( session.inTransaction() && error && error instanceof MongoError && - error.hasErrorLabel('TransientTransactionError') + error.hasErrorLabel(MongoErrorLabel.TransientTransactionError) ) { return; } @@ -559,11 +556,11 @@ function attemptTransactionCommit( hasNotTimedOut(startTime, MAX_WITH_TRANSACTION_TIMEOUT) && !isMaxTimeMSExpiredError(err) ) { - if (err.hasErrorLabel('UnknownTransactionCommitResult')) { + if (err.hasErrorLabel(MongoErrorLabel.UnknownTransactionCommitResult)) { return attemptTransactionCommit(session, startTime, fn, options); } - if (err.hasErrorLabel('TransientTransactionError')) { + if (err.hasErrorLabel(MongoErrorLabel.TransientTransactionError)) { return attemptTransaction(session, startTime, fn, options); } } @@ -617,14 +614,14 @@ function attemptTransaction( function maybeRetryOrThrow(err: MongoError): Promise { if ( err instanceof MongoError && - err.hasErrorLabel('TransientTransactionError') && + err.hasErrorLabel(MongoErrorLabel.TransientTransactionError) && hasNotTimedOut(startTime, MAX_WITH_TRANSACTION_TIMEOUT) ) { return attemptTransaction(session, startTime, fn, options); } if (isMaxTimeMSExpiredError(err)) { - err.addErrorLabel('UnknownTransactionCommitResult'); + err.addErrorLabel(MongoErrorLabel.UnknownTransactionCommitResult); } throw err; @@ -639,7 +636,11 @@ function attemptTransaction( ); } -function endTransaction(session: ClientSession, commandName: string, callback: Callback) { +function endTransaction( + session: ClientSession, + commandName: 'abortTransaction' | 'commitTransaction', + callback: Callback +) { if (!assertAlive(session, callback)) { // checking result in case callback was called return; @@ -717,7 +718,7 @@ function endTransaction(session: ClientSession, commandName: string, callback: C Object.assign(command, { maxTimeMS: session.transaction.options.maxTimeMS }); } - function commandHandler(e?: MongoError, r?: Document) { + function commandHandler(error?: Error, result?: Document) { if (commandName !== 'commitTransaction') { session.transaction.transition(TxnState.TRANSACTION_ABORTED); if (session.loadBalanced) { @@ -729,25 +730,24 @@ function endTransaction(session: ClientSession, commandName: string, callback: C } session.transaction.transition(TxnState.TRANSACTION_COMMITTED); - if (e) { + if (error instanceof MongoError) { if ( - e instanceof MongoNetworkError || - e instanceof MongoWriteConcernError || - isRetryableError(e) || - isMaxTimeMSExpiredError(e) + error.hasErrorLabel(MongoErrorLabel.RetryableWriteError) || + error instanceof MongoWriteConcernError || + isMaxTimeMSExpiredError(error) ) { - if (isUnknownTransactionCommitResult(e)) { - e.addErrorLabel('UnknownTransactionCommitResult'); + if (isUnknownTransactionCommitResult(error)) { + error.addErrorLabel(MongoErrorLabel.UnknownTransactionCommitResult); // per txns spec, must unpin session in this case - session.unpin({ error: e }); + session.unpin({ error }); } - } else if (e.hasErrorLabel('TransientTransactionError')) { - session.unpin({ error: e }); + } else if (error.hasErrorLabel(MongoErrorLabel.TransientTransactionError)) { + session.unpin({ error }); } } - callback(e, r); + callback(error, result); } // Assumption here that commandName is "commitTransaction" or "abortTransaction" @@ -763,13 +763,13 @@ function endTransaction(session: ClientSession, commandName: string, callback: C readPreference: ReadPreference.primary, bypassPinningCheck: true }), - (err, reply) => { + (error, result) => { if (command.abortTransaction) { // always unpin on abort regardless of command outcome session.unpin(); } - if (err && isRetryableEndTransactionError(err as MongoError)) { + if (error instanceof MongoError && error.hasErrorLabel(MongoErrorLabel.RetryableWriteError)) { // SPEC-1185: apply majority write concern when retrying commitTransaction if (command.commitTransaction) { // per txns spec, must unpin session in this case @@ -787,11 +787,11 @@ function endTransaction(session: ClientSession, commandName: string, callback: C readPreference: ReadPreference.primary, bypassPinningCheck: true }), - (_err, _reply) => commandHandler(_err as MongoError, _reply) + commandHandler ); } - commandHandler(err as MongoError, reply); + commandHandler(error, result); } ); } @@ -936,11 +936,13 @@ export class ServerSessionPool { * @param session - the session tracking transaction state * @param command - the command to decorate * @param options - Optional settings passed to calling operation + * + * @internal */ export function applySession( session: ClientSession, command: Document, - options?: CommandOptions + options: CommandOptions ): MongoDriverError | undefined { // TODO: merge this with `assertAlive`, did not want to throw a try/catch here if (session.hasEnded) { @@ -952,10 +954,9 @@ export function applySession( return new MongoRuntimeError('Unable to acquire server session'); } - // SPEC-1019: silently ignore explicit session with unacknowledged write for backwards compatibility - // FIXME: NODE-2781, this check for write concern shouldn't be happening here, but instead during command construction - if (options && options.writeConcern && (options.writeConcern as WriteConcern).w === 0) { + if (options.writeConcern?.w === 0) { if (session && session.explicit) { + // Error if user provided an explicit session to an unacknowledged write (SPEC-1019) return new MongoAPIError('Cannot have explicit session with unacknowledged writes'); } return; diff --git a/src/utils.ts b/src/utils.ts index 207c43314d..5fe1edbfbc 100644 --- a/src/utils.ts +++ b/src/utils.ts @@ -35,8 +35,6 @@ import { W, WriteConcern, WriteConcernOptions } from './write_concern'; * @public */ export type Callback = (error?: AnyError, result?: T) => void; -/** @public */ -export type CallbackWithType = (error?: E, result?: T0) => void; export const MAX_JS_INT = Number.MAX_SAFE_INTEGER + 1; @@ -1313,13 +1311,25 @@ export function enumToString(en: Record): string { * * @internal */ -export function supportsRetryableWrites(server: Server): boolean { - return ( - !!server.loadBalanced || - (server.description.maxWireVersion >= 6 && - !!server.description.logicalSessionTimeoutMinutes && - server.description.type !== ServerType.Standalone) - ); +export function supportsRetryableWrites(server?: Server): boolean { + if (!server) { + return false; + } + + if (server.loadBalanced) { + // Loadbalanced topologies will always support retry writes + return true; + } + + if (server.description.logicalSessionTimeoutMinutes != null) { + // that supports sessions + if (server.description.type !== ServerType.Standalone) { + // and that is not a standalone + return true; + } + } + + return false; } export function parsePackageVersion({ version }: { version: string }): { diff --git a/test/integration/change-streams/change_stream.test.js b/test/integration/change-streams/change_stream.test.js index 892bc9e215..35e91112af 100644 --- a/test/integration/change-streams/change_stream.test.js +++ b/test/integration/change-streams/change_stream.test.js @@ -1422,6 +1422,7 @@ describe('Change Streams', function () { let client; let coll; let startAfter; + let changeStream; beforeEach(function (done) { const configuration = this.configuration; @@ -1449,15 +1450,15 @@ describe('Change Streams', function () { }); }); - afterEach(function (done) { - client.close(done); + afterEach(async function () { + await changeStream.close(); + await client.close(); }); it('should work with events', { metadata: { requires: { topology: 'replicaset', mongodb: '>=4.1.1' } }, test: function (done) { - const changeStream = coll.watch([], { startAfter }); - this.defer(() => changeStream.close()); + changeStream = coll.watch([], { startAfter }); coll.insertOne({ x: 2 }, { writeConcern: { w: 'majority', j: true } }, err => { expect(err).to.not.exist; diff --git a/test/integration/client-side-encryption/client_side_encryption.prose.test.js b/test/integration/client-side-encryption/client_side_encryption.prose.test.js index d33ee1c2b0..ed51ba402a 100644 --- a/test/integration/client-side-encryption/client_side_encryption.prose.test.js +++ b/test/integration/client-side-encryption/client_side_encryption.prose.test.js @@ -1142,12 +1142,12 @@ describe('Client Side Encryption Prose Tests', metadata, function () { }; const clientNoTlsOptions = { keyVaultNamespace, - kmsProviders: getKmsProviders(null, null, '127.0.0.1:8002', '127.0.0.1:8002'), + kmsProviders: getKmsProviders(null, null, '127.0.0.1:9002', '127.0.0.1:9002'), tlsOptions: tlsCaOptions }; const clientWithTlsOptions = { keyVaultNamespace, - kmsProviders: getKmsProviders(null, null, '127.0.0.1:8002', '127.0.0.1:8002'), + kmsProviders: getKmsProviders(null, null, '127.0.0.1:9002', '127.0.0.1:9002'), tlsOptions: { aws: { tlsCAFile: process.env.KMIP_TLS_CA_FILE, @@ -1169,12 +1169,12 @@ describe('Client Side Encryption Prose Tests', metadata, function () { }; const clientWithTlsExpiredOptions = { keyVaultNamespace, - kmsProviders: getKmsProviders(null, '127.0.0.1:8000', '127.0.0.1:8000', '127.0.0.1:8000'), + kmsProviders: getKmsProviders(null, '127.0.0.1:9000', '127.0.0.1:9000', '127.0.0.1:9000'), tlsOptions: tlsCaOptions }; const clientWithInvalidHostnameOptions = { keyVaultNamespace, - kmsProviders: getKmsProviders(null, '127.0.0.1:8001', '127.0.0.1:8001', '127.0.0.1:8001'), + kmsProviders: getKmsProviders(null, '127.0.0.1:9001', '127.0.0.1:9001', '127.0.0.1:9001'), tlsOptions: tlsCaOptions }; const mongodbClientEncryption = this.configuration.mongodbClientEncryption; @@ -1245,10 +1245,10 @@ describe('Client Side Encryption Prose Tests', metadata, function () { const masterKey = { region: 'us-east-1', key: 'arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0', - endpoint: '127.0.0.1:8002' + endpoint: '127.0.0.1:9002' }; - const masterKeyExpired = { ...masterKey, endpoint: '127.0.0.1:8000' }; - const masterKeyInvalidHostname = { ...masterKey, endpoint: '127.0.0.1:8001' }; + const masterKeyExpired = { ...masterKey, endpoint: '127.0.0.1:9000' }; + const masterKeyInvalidHostname = { ...masterKey, endpoint: '127.0.0.1:9001' }; it('should fail with no TLS', metadata, async function () { try { diff --git a/test/integration/crud/bulk.test.js b/test/integration/crud/bulk.test.js index b3170cf1f1..bfd9a897ee 100644 --- a/test/integration/crud/bulk.test.js +++ b/test/integration/crud/bulk.test.js @@ -17,6 +17,8 @@ chai.use(require('chai-subset')); const MAX_BSON_SIZE = 16777216; describe('Bulk', function () { + /** @type {MongoClient} */ + let client; before(function () { return setupDatabase(this.configuration); }); @@ -96,6 +98,17 @@ describe('Bulk', function () { }); }); + beforeEach(async function () { + client = this.configuration.newClient(this.configuration.writeConcernMax(), { + maxPoolSize: 1 + }); + await client.connect(); + }); + + afterEach(async function () { + if (client) await client.close(); + }); + it('should correctly handle ordered single batch api write command error', { metadata: { requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } @@ -490,61 +503,40 @@ describe('Bulk', function () { } }); - it( - 'should Correctly Execute Ordered Batch of Write Operations with duplicate key errors on updates', - { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function (done) { - var self = this; - var client = self.configuration.newClient(self.configuration.writeConcernMax(), { - maxPoolSize: 1 - }); - - client.connect(function (err, client) { - var db = client.db(self.configuration.db); - var col = db.collection('batch_write_ordered_ops_6'); - - // Add unique index on b field causing all updates to fail - col.createIndex({ b: 1 }, { unique: true, sparse: false }, function (err) { - expect(err).to.not.exist; - - var batch = col.initializeOrderedBulkOp(); - - // Add some operations to be executed in order - batch.insert({ a: 1 }); - batch.find({ a: 1 }).update({ $set: { b: 1 } }); - batch.insert({ b: 1 }); - - // Execute the operations - batch.execute(function (err, result) { - expect(err).to.exist; - expect(result).to.not.exist; - - // Test basic settings - result = err.result; - test.equal(1, result.nInserted); - test.equal(1, result.nMatched); - test.ok(1 === result.nModified || result.nModified == null); - test.equal(true, result.hasWriteErrors()); - test.ok(1, result.getWriteErrorCount()); - - // Individual error checking - var error = result.getWriteErrorAt(0); - test.equal(2, error.index); - test.equal(11000, error.code); - test.ok(error.errmsg != null); - test.equal(1, error.getOperation().b); - - client.close(done); - }); - }); - }); - } - } - ); + it('should Correctly Execute Ordered Batch of Write Operations with duplicate key errors on updates', async function () { + const db = client.db(this.configuration.db); + const col = db.collection('batch_write_ordered_ops_6'); + // Add unique index on b field causing all updates to fail + await col.createIndex({ b: 1 }, { unique: true, sparse: false }); + const batch = col.initializeOrderedBulkOp(); + + // Add some operations to be executed in order + batch.insert({ a: 1 }); + batch.find({ a: 1 }).update({ $set: { b: 1 } }); + batch.insert({ b: 1 }); + + const thrownError = await batch.execute().catch(error => error); + expect(thrownError).to.instanceOf(Error); + + // Test basic settings + const result = thrownError.result; + expect(result).to.have.property('nInserted', 1); + expect(result).to.have.property('nMatched', 1); + expect(result) + .to.have.property('nModified') + .that.satisfies(v => v == null || v === 1); + expect(result).to.have.property('hasWriteErrors').that.is.a('function'); + expect(result).to.have.property('getWriteErrorCount').that.is.a('function'); + expect(result.hasWriteErrors()).to.be.true; + expect(result.getWriteErrorCount()).to.equal(1); + + // Individual error checking + const writeError = result.getWriteErrorAt(0); + expect(writeError).to.have.property('index', 2); + expect(writeError).to.have.property('code', 11000); + expect(writeError).to.have.property('errmsg').that.is.a('string'); + expect(writeError.getOperation()).to.have.property('b', 1); + }); it( 'should Correctly Execute Ordered Batch of Write Operations with upserts causing duplicate key errors on updates', diff --git a/test/integration/load-balancers/load_balancers.spec.test.js b/test/integration/load-balancers/load_balancers.spec.test.js index 1446a5e42c..ae8abc298d 100644 --- a/test/integration/load-balancers/load_balancers.spec.test.js +++ b/test/integration/load-balancers/load_balancers.spec.test.js @@ -61,6 +61,5 @@ const SKIP = [ ]; describe('Load Balancer Unified Tests', function () { - this.timeout(10000); runUnifiedSuite(loadSpecTests(path.join('load-balancers')), SKIP); }); diff --git a/test/integration/retryable-writes/retryable_writes.spec.prose.test.ts b/test/integration/retryable-writes/retryable_writes.spec.prose.test.ts new file mode 100644 index 0000000000..35008d13de --- /dev/null +++ b/test/integration/retryable-writes/retryable_writes.spec.prose.test.ts @@ -0,0 +1,56 @@ +import { expect } from 'chai'; + +import { MongoError, MongoServerError } from '../../../src'; + +describe('Retryable Writes Spec Prose', () => { + /** + * 1 Test that retryable writes raise an exception when using the MMAPv1 storage engine. + * For this test, execute a write operation, such as insertOne, which should generate an exception and the error code is 20. + * Assert that the error message is the replacement error message: + * + * ``` + * This MongoDB deployment does not support retryable writes. Please add + * retryWrites=false to your connection string. + * ``` + * Note: Drivers that rely on serverStatus to determine the storage engine in use MAY skip this test for sharded clusters, since mongos does not report this information in its serverStatus response. + */ + let client; + + beforeEach(async function () { + client = this.configuration.newClient(); + await client.connect(); + }); + + afterEach(async () => { + await client?.close(); + }); + + it.skip('retryable writes raise an exception when using the MMAPv1 storage engine', async () => { + const failPoint = await client.db('admin').command({ + configureFailPoint: 'failCommand', + mode: { times: 1 }, + data: { + failCommands: ['insert'], + errorCode: 20, // MMAP Error code, + closeConnection: false + } + }); + + expect(failPoint).to.have.property('ok', 1); + + const error = await client + .db('test') + .collection('test') + .insertOne({ a: 1 }) + .catch(error => error); + + expect(error).to.exist; + expect(error).to.be.instanceOf(MongoServerError); + expect(error).to.have.property('originalError').that.instanceOf(MongoError); + expect(error.originalError).to.have.property('code', 20); + expect(error).to.have.property( + 'message', + 'This MongoDB deployment does not support retryable writes. Please add retryWrites=false to your connection string.' + ); + }).skipReason = 'TODO, might need to limit server versions? 3.6 fails, 4.x has different shape'; +}); diff --git a/test/integration/retryable-writes/retryable_writes.spec.test.js b/test/integration/retryable-writes/retryable_writes.spec.test.js deleted file mode 100644 index d85f0881f0..0000000000 --- a/test/integration/retryable-writes/retryable_writes.spec.test.js +++ /dev/null @@ -1,199 +0,0 @@ -'use strict'; - -const { expect } = require('chai'); -const { loadSpecTests } = require('../../spec'); -const { legacyRunOnToRunOnRequirement } = require('../../tools/spec-runner'); -const { isAnyRequirementSatisfied } = require('../../tools/unified-spec-runner/unified-utils'); - -describe('Legacy Retryable Writes specs', function () { - let ctx = {}; - const retryableWrites = loadSpecTests('retryable-writes/legacy'); - - for (const suite of retryableWrites) { - describe(suite.name, function () { - beforeEach(async function () { - let utilClient; - if (this.configuration.isLoadBalanced) { - // The util client can always point at the single mongos LB frontend. - utilClient = this.configuration.newClient(this.configuration.singleMongosLoadBalancerUri); - } else { - utilClient = this.configuration.newClient(); - } - - await utilClient.connect(); - - const allRequirements = suite.runOn.map(legacyRunOnToRunOnRequirement); - - const someRequirementMet = - !allRequirements.length || - (await isAnyRequirementSatisfied(this.currentTest.ctx, allRequirements, utilClient)); - - await utilClient.close(); - - if (!someRequirementMet) this.skip(); - }); - - afterEach(async function () { - // Step 3: Test Teardown. Turn off failpoints, and close client - if (!ctx.db || !ctx.client) { - return; - } - - if (ctx.failPointName) { - await turnOffFailPoint(ctx.client, ctx.failPointName); - } - await ctx.client.close(); - ctx = {}; // reset context - }); - - for (const test of suite.tests) { - it(test.description, async function () { - // Step 1: Test Setup. Includes a lot of boilerplate stuff - // like creating a client, dropping and refilling data collections, - // and enabling failpoints - await executeScenarioSetup(suite, test, this.configuration, ctx); - // Step 2: Run the test - await executeScenarioTest(test, ctx); - }); - } - }); - } -}); - -function executeScenarioSetup(scenario, test, config, ctx) { - const url = config.url(); - const options = Object.assign({}, test.clientOptions, { - heartbeatFrequencyMS: 100, - monitorCommands: true, - minPoolSize: 10 - }); - - ctx.failPointName = test.failPoint && test.failPoint.configureFailPoint; - - const client = config.newClient(url, options); - return client - .connect() - .then(client => (ctx.client = client)) - .then(() => (ctx.db = ctx.client.db(config.db))) - .then( - () => - (ctx.collection = ctx.db.collection( - `retryable_writes_test_${config.name}_${test.operation.name}` - )) - ) - .then(() => ctx.collection.drop()) - .catch(err => { - if (!err.message.match(/ns not found/)) { - throw err; - } - }) - .then(() => - Array.isArray(scenario.data) && scenario.data.length - ? ctx.collection.insertMany(scenario.data) - : {} - ) - .then(() => (test.failPoint ? ctx.client.db('admin').command(test.failPoint) : {})); -} - -function executeScenarioTest(test, ctx) { - return Promise.resolve() - .then(() => { - const args = generateArguments(test); - - let result = ctx.collection[test.operation.name].apply(ctx.collection, args); - const outcome = test.outcome && test.outcome.result; - const errorLabelsContain = outcome && outcome.errorLabelsContain; - const errorLabelsOmit = outcome && outcome.errorLabelsOmit; - const hasResult = outcome && !errorLabelsContain && !errorLabelsOmit; - if (test.outcome.error) { - result = result - .then(() => expect(false).to.be.true) - .catch(err => { - expect(err).to.exist; - expect(err.message, 'expected operations to fail, but they succeeded').to.not.match( - /expected false to be true/ - ); - if (hasResult) expect(err.result).to.matchMongoSpec(test.outcome.result); - if (errorLabelsContain) expect(err.errorLabels).to.include.members(errorLabelsContain); - if (errorLabelsOmit) { - errorLabelsOmit.forEach(label => { - expect(err.errorLabels).to.not.contain(label); - }); - } - }); - } else if (test.outcome.result) { - const expected = test.outcome.result; - result = result.then(transformToResultValue).then(r => expect(r).to.deep.include(expected)); - } - - return result; - }) - .then(() => { - if (test.outcome.collection) { - return ctx.collection - .find({}) - .toArray() - .then(collectionResults => { - expect(collectionResults).to.eql(test.outcome.collection.data); - }); - } - }); -} - -// Helper Functions - -/** - * Transforms the arguments from a test into actual arguments for our function calls - * - * @param {any} test - */ -function generateArguments(test) { - const args = []; - - if (test.operation.arguments) { - const options = {}; - Object.keys(test.operation.arguments).forEach(arg => { - if (arg === 'requests') { - args.push(test.operation.arguments[arg].map(convertBulkWriteOperation)); - } else if (arg === 'upsert') { - options.upsert = test.operation.arguments[arg]; - } else if (arg === 'returnDocument') { - options.returnDocument = test.operation.arguments[arg].toLowerCase(); - } else { - args.push(test.operation.arguments[arg]); - } - }); - - if (Object.keys(options).length > 0) { - args.push(options); - } - } - - return args; -} - -/** - * Transforms a request arg into a bulk write operation - * - * @param {any} op - */ -function convertBulkWriteOperation(op) { - return { [op.name]: op.arguments }; -} - -/** - * Transforms output of a bulk write to conform to the test format - * - * @param {any} result - */ -function transformToResultValue(result) { - return result && result.value ? result.value : result; -} - -/** Runs a command that turns off a fail point */ -function turnOffFailPoint(client, name) { - return client.db('admin').command({ - configureFailPoint: name, - mode: 'off' - }); -} diff --git a/test/integration/retryable-writes/retryable_writes.spec.test.ts b/test/integration/retryable-writes/retryable_writes.spec.test.ts new file mode 100644 index 0000000000..17dbf62145 --- /dev/null +++ b/test/integration/retryable-writes/retryable_writes.spec.test.ts @@ -0,0 +1,198 @@ +import { expect } from 'chai'; + +import type { Collection, Db, MongoClient } from '../../../src'; +import { loadSpecTests } from '../../spec'; +import { legacyRunOnToRunOnRequirement } from '../../tools/spec-runner'; +import { isAnyRequirementSatisfied } from '../../tools/unified-spec-runner/unified-utils'; + +interface RetryableWriteTestContext { + client?: MongoClient; + db?: Db; + collection?: Collection; + failPointName?: any; +} + +describe('Legacy Retryable Writes Specs', function () { + let ctx: RetryableWriteTestContext = {}; + + const retryableWrites = loadSpecTests('retryable-writes', 'legacy'); + + for (const suite of retryableWrites) { + describe(suite.name, function () { + beforeEach(async function () { + let utilClient: MongoClient; + if (this.configuration.isLoadBalanced) { + // The util client can always point at the single mongos LB frontend. + utilClient = this.configuration.newClient(this.configuration.singleMongosLoadBalancerUri); + } else { + utilClient = this.configuration.newClient(); + } + + await utilClient.connect(); + + const allRequirements = suite.runOn.map(legacyRunOnToRunOnRequirement); + + const someRequirementMet = + !allRequirements.length || + (await isAnyRequirementSatisfied(this.currentTest.ctx, allRequirements, utilClient)); + + await utilClient.close(); + + if (!someRequirementMet) this.skip(); + }); + + beforeEach(async function () { + // Step 1: Test Setup. Includes a lot of boilerplate stuff + // like creating a client, dropping and refilling data collections, + // and enabling failpoints + const { spec } = this.currentTest; + await executeScenarioSetup(suite, spec, this.configuration, ctx); + }); + + afterEach(async function () { + // Step 3: Test Teardown. Turn off failpoints, and close client + if (!ctx.db || !ctx.client) { + return; + } + + if (ctx.failPointName) { + await turnOffFailPoint(ctx.client, ctx.failPointName); + } + await ctx.client.close(); + ctx = {}; // reset context + }); + + for (const spec of suite.tests) { + // Step 2: Run the test + const mochaTest = it(spec.description, async () => await executeScenarioTest(spec, ctx)); + + // A pattern we don't need to repeat for unified tests + // In order to give the beforeEach hook access to the + // spec test so it can be responsible for skipping it + // and executeScenarioSetup + mochaTest.spec = spec; + } + }); + } +}); + +async function executeScenarioSetup(scenario, test, config, ctx) { + const url = config.url(); + const options = { + ...test.clientOptions, + heartbeatFrequencyMS: 100, + monitorCommands: true, + minPoolSize: 10 + }; + + ctx.failPointName = test.failPoint && test.failPoint.configureFailPoint; + + const client = config.newClient(url, options); + await client.connect(); + + ctx.client = client; + ctx.db = client.db(config.db); + ctx.collection = ctx.db.collection(`retryable_writes_test_${config.name}_${test.operation.name}`); + + try { + await ctx.collection.drop(); + } catch (error) { + if (!error.message.match(/ns not found/)) { + throw error; + } + } + + if (Array.isArray(scenario.data) && scenario.data.length) { + await ctx.collection.insertMany(scenario.data); + } + + if (test.failPoint) { + await ctx.client.db('admin').command(test.failPoint); + } +} + +async function executeScenarioTest(test, ctx: RetryableWriteTestContext) { + const args = generateArguments(test); + + // In case the spec files or our API changes + expect(ctx.collection).to.have.property(test.operation.name).that.is.a('function'); + + // TODO(NODE-4033): Collect command started events and assert txn number existence or omission + // have to add logic to handle bulkWrites which emit multiple command started events + const resultOrError = await ctx.collection[test.operation.name](...args).catch(error => error); + + const outcome = test.outcome && test.outcome.result; + const errorLabelsContain = outcome && outcome.errorLabelsContain; + const errorLabelsOmit = outcome && outcome.errorLabelsOmit; + const hasResult = outcome && !errorLabelsContain && !errorLabelsOmit; + if (test.outcome.error) { + const thrownError = resultOrError; + expect(thrownError, `${test.operation.name} was supposed to fail but did not!`).to.exist; + expect(thrownError).to.have.property('message'); + + if (hasResult) { + expect(thrownError.result).to.matchMongoSpec(test.outcome.result); + } + + if (errorLabelsContain) { + expect(thrownError.errorLabels).to.include.members(errorLabelsContain); + } + + if (errorLabelsOmit) { + for (const label of errorLabelsOmit) { + expect(thrownError.errorLabels).to.not.contain(label); + } + } + } else if (test.outcome.result) { + expect(resultOrError, resultOrError.stack).to.not.be.instanceOf(Error); + const result = resultOrError; + const expected = test.outcome.result; + + // TODO(NODE-4034): Make CRUD results spec compliant + expect(result.value ?? result).to.deep.include(expected); + } + + if (test.outcome.collection) { + const collectionResults = await ctx.collection.find({}).toArray(); + expect(collectionResults).to.deep.equal(test.outcome.collection.data); + } +} + +// Helper Functions + +/** Transforms the arguments from a test into actual arguments for our function calls */ +function generateArguments(test) { + const args = []; + + if (test.operation.arguments) { + const options: Record = {}; + for (const arg of Object.keys(test.operation.arguments)) { + if (arg === 'requests') { + /** Transforms a request arg into a bulk write operation */ + args.push( + test.operation.arguments[arg].map(({ name, arguments: args }) => ({ [name]: args })) + ); + } else if (arg === 'upsert') { + options.upsert = test.operation.arguments[arg]; + } else if (arg === 'returnDocument') { + options.returnDocument = test.operation.arguments[arg].toLowerCase(); + } else { + args.push(test.operation.arguments[arg]); + } + } + + if (Object.keys(options).length > 0) { + args.push(options); + } + } + + return args; +} + +/** Runs a command that turns off a fail point */ +async function turnOffFailPoint(client, name) { + return await client.db('admin').command({ + configureFailPoint: name, + mode: 'off' + }); +} diff --git a/test/readme.md b/test/readme.md index b5105234b9..b433be60e8 100644 --- a/test/readme.md +++ b/test/readme.md @@ -13,6 +13,43 @@ about the types of tests and how to run them. - [Writing Tests](#writing-tests) - [Testing with Special Environments](#testing-with-special-environments) +## Complex deployments + +Some of the topologies mentioned in this testing guide require a bit of "orchestration". +For that, we have [mongo-orchestration](https://github.com/10gen/mongo-orchestration). +We advise cloning this repository (instead of installing from pip). Inside the repo run: + +```sh +# Create a virtual environment so you can make sure m-orch has all of its dependencies versioned separate from your global environment +python -m .venv.com +# There are activates for a few different shells, use the one that matches your shell +source ./.venv.com/bin/activate +``` + +Your working directory must still be at the root of the mongo-orchestration repository. +With the virtual environment activated install mongo-orchestration with the following command. + +```sh +# "-e ." installs the package as an editable python module +# the benefit is that the source code in the repo is the same code that is run when you launch orchestration +# instead of a new copy being installed elsewhere +pip install -e . +``` + +I prefer launching mongo-orchestration in the foreground so it's easily stopped with ctrl-C but the pid file can be found in the current directory. + +```sh +mongo-orchestration start --no-fork +``` + +Posting a configuration can look something like this: + +```sh +curl --silent --show-error --max-time 60 --fail http://localhost:8889/v1/sharded_clusters --data @"$DRIVERS_TOOLS/.evergreen/orchestration/configs/sharded_clusters/load-balancer.json" +``` + +Where `$DRIVERS_TOOLS` is the path to drivers evergreen tools. + ## About the Tests All of our test automation is powered by the [Mocha test framework][mocha]. @@ -263,7 +300,8 @@ The following steps will walk you through how to create and test a MongoDB Serve The following steps will walk you through how to start and test a load balancer. -1. Start a sharded cluster. You can use the [cluster_setup.sh](tools/cluster_setup.sh) script to do so: `./test/tools/cluster_setup.sh sharded_cluster`. The tool should create a cluster with two mongos, so you have a URI similar to `MONGODB_URI=mongodb://host1,host2/`. +1. Start a sharded cluster with `--setParameter featureFlagLoadBalancer=true` and `--setParameter loadBalancerPort=27051` flags passed to the mongos processes. + 1. Follow the instructions here to setup [mongo-orchestration]() 1. Create an environment variable named `MONGODB_URI` that stores the URI of the sharded cluster you just created. For example: `export MONGODB_URI="mongodb://host1,host2/"` 1. Install the HAProxy load balancer. For those on macOS, you can install HAProxy with `brew install haproxy`. 1. Start the load balancer by using the [run-load-balancer script](https://github.com/mongodb-labs/drivers-evergreen-tools/blob/master/.evergreen/run-load-balancer.sh) provided in drivers-evergreen-tools. @@ -354,9 +392,9 @@ The following steps will walk you through how to run the tests for CSFLE. 1. This will install all the dependencies needed to run a python kms_kmip simulated server 3. In 4 separate terminals launch the following: - `./kmstlsvenv/bin/python3 -u kms_kmip_server.py` # by default it always runs on port 5698 - - `./kmstlsvenv/bin/python3 -u kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/expired.pem --port 8000` - - `./kmstlsvenv/bin/python3 -u kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/wrong-host.pem --port 8001` - - `./kmstlsvenv/bin/python3 -u kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/server.pem --port 8002 --require_client_cert` + - `./kmstlsvenv/bin/python3 -u kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/expired.pem --port 9000` + - `./kmstlsvenv/bin/python3 -u kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/wrong-host.pem --port 9001` + - `./kmstlsvenv/bin/python3 -u kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/server.pem --port 9002 --require_client_cert` 4. Set the following environment variables: - `export KMIP_TLS_CA_FILE="${DRIVERS_TOOLS}/.evergreen/x509gen/ca.pem"` - `export KMIP_TLS_CERT_FILE="${DRIVERS_TOOLS}/.evergreen/x509gen/client.pem"` diff --git a/test/tools/spec-runner/index.js b/test/tools/spec-runner/index.js index 0ebee503d0..c9878a6a4a 100644 --- a/test/tools/spec-runner/index.js +++ b/test/tools/spec-runner/index.js @@ -458,26 +458,25 @@ function validateExpectations(commandEvents, spec, savedSessionData) { const actualEvents = normalizeCommandShapes(commandEvents); const rawExpectedEvents = spec.expectations.map(x => x.command_started_event); const expectedEvents = normalizeCommandShapes(rawExpectedEvents); - expect(actualEvents).to.have.length(expectedEvents.length); - expectedEvents.forEach((expected, idx) => { - const actual = actualEvents[idx]; + for (const [idx, expectedEvent] of expectedEvents.entries()) { + const actualEvent = actualEvents[idx]; - if (expected.commandName != null) { - expect(actual.commandName).to.equal(expected.commandName); + if (typeof expectedEvent.commandName === 'string') { + expect(actualEvent).to.have.property('commandName', expectedEvent.commandName); } - if (expected.databaseName != null) { - expect(actual.databaseName).to.equal(expected.databaseName); + if (typeof expectedEvent.databaseName === 'string') { + expect(actualEvent).to.have.property('databaseName', expectedEvent.databaseName); } - const actualCommand = actual.command; - const expectedCommand = expected.command; + const actualCommand = actualEvent.command; + const expectedCommand = expectedEvent.command; if (expectedCommand.sort) { - // TODO: This is a workaround that works because all sorts in the specs + // TODO(NODE-3235): This is a workaround that works because all sorts in the specs // are objects with one key; ideally we'd want to adjust the spec definitions // to indicate whether order matters for any given key and set general - // expectations accordingly (see NODE-3235) + // expectations accordingly expect(Object.keys(expectedCommand.sort)).to.have.lengthOf(1); expect(actualCommand.sort).to.be.instanceOf(Map); expect(actualCommand.sort.size).to.equal(1); @@ -487,7 +486,7 @@ function validateExpectations(commandEvents, spec, savedSessionData) { } expect(actualCommand).withSessionData(savedSessionData).to.matchMongoSpec(expectedCommand); - }); + } } function normalizeCommandShapes(commands) { @@ -875,25 +874,23 @@ function convertCollectionOptions(options) { return result; } -function testOperations(testData, operationContext, options) { +async function testOperations(testData, operationContext, options) { options = options || { swallowOperationErrors: true }; - return testData.operations.reduce((combined, operation) => { - return combined.then(() => { - const object = operation.object || 'collection'; - if (object === 'collection') { - const db = operationContext.database; - const collectionName = operationContext.collectionName; - const collectionOptions = operation.collectionOptions || {}; - - operationContext[object] = db.collection( - collectionName, - convertCollectionOptions(collectionOptions) - ); - } - return testOperation(operation, operationContext[object], operationContext, options); - }); - }, Promise.resolve()); + for (const operation of testData.operations) { + const object = operation.object || 'collection'; + if (object === 'collection') { + const db = operationContext.database; + const collectionName = operationContext.collectionName; + const collectionOptions = operation.collectionOptions || {}; + + operationContext[object] = db.collection( + collectionName, + convertCollectionOptions(collectionOptions) + ); + } + await testOperation(operation, operationContext[object], operationContext, options); + } } module.exports = { diff --git a/test/tools/unified-spec-runner/match.ts b/test/tools/unified-spec-runner/match.ts index 39a83c7036..ce0370e79a 100644 --- a/test/tools/unified-spec-runner/match.ts +++ b/test/tools/unified-spec-runner/match.ts @@ -272,7 +272,7 @@ function validEmptyCmapEvent( export function matchesEvents( expected: (ExpectedCommandEvent & ExpectedCmapEvent)[], - actual: (CommandEvent & CmapEvent)[], + actual: CommandEvent[] | CmapEvent[], entities: EntitiesMap ): void { if (actual.length !== expected.length) { @@ -300,7 +300,9 @@ export function matchesEvents( ]); } else if (expectedEvent.commandFailedEvent) { expect(actualEvent).to.be.instanceOf(CommandFailedEvent); - expect(actualEvent.commandName).to.equal(expectedEvent.commandFailedEvent.commandName); + expect(actualEvent) + .to.have.property('commandName') + .that.equals(expectedEvent.commandFailedEvent.commandName); } else if (expectedEvent.connectionClosedEvent) { expect(actualEvent).to.be.instanceOf(ConnectionClosedEvent); if (expectedEvent.connectionClosedEvent.hasServiceId) { diff --git a/test/tools/unified-spec-runner/runner.ts b/test/tools/unified-spec-runner/runner.ts index 34964caf67..f057a2070e 100644 --- a/test/tools/unified-spec-runner/runner.ts +++ b/test/tools/unified-spec-runner/runner.ts @@ -69,7 +69,7 @@ export async function runUnifiedTest( utilClient = ctx.configuration.newClient(); } - let entities; + let entities: EntitiesMap; try { trace('\n starting test:'); try { @@ -187,14 +187,11 @@ export async function runUnifiedTest( for (const expectedEventList of test.expectEvents) { const clientId = expectedEventList.client; const eventType = expectedEventList.eventType; - let actualEvents; // If no event type is provided it defaults to 'command', so just // check for 'cmap' here for now. - if (eventType === 'cmap') { - actualEvents = clientCmapEvents.get(clientId); - } else { - actualEvents = clientCommandEvents.get(clientId); - } + const actualEvents = + eventType === 'cmap' ? clientCmapEvents.get(clientId) : clientCommandEvents.get(clientId); + expect(actualEvents, `No client entity found with id ${clientId}`).to.exist; matchesEvents(expectedEventList.events, actualEvents, entities); } diff --git a/test/types/enum.test-d.ts b/test/types/enum.test-d.ts index 225282f618..69ac0894ef 100644 --- a/test/types/enum.test-d.ts +++ b/test/types/enum.test-d.ts @@ -12,6 +12,7 @@ import { ExplainVerbosity, GSSAPICanonicalizationValue, LoggerLevel, + MongoErrorLabel, ProfilingLevel, ReadConcernLevel, ReadPreferenceMode, @@ -45,3 +46,4 @@ expectType(Object.values(ReturnDocument)[num]); expectType(Object.values(ServerApiVersion)[num]); expectType(Object.values(ServerType)[num]); expectType(Object.values(TopologyType)[num]); +expectType(Object.values(MongoErrorLabel)[num]); diff --git a/test/unit/error.test.ts b/test/unit/error.test.ts index 2bdb5f8479..c7a6b48524 100644 --- a/test/unit/error.test.ts +++ b/test/unit/error.test.ts @@ -5,11 +5,12 @@ import { WaitQueueTimeoutError as MongoWaitQueueTimeoutError } from '../../src/cmap/errors'; import { - isRetryableEndTransactionError, + isRetryableReadError, isSDAMUnrecoverableError, LEGACY_NOT_PRIMARY_OR_SECONDARY_ERROR_MESSAGE, LEGACY_NOT_WRITABLE_PRIMARY_ERROR_MESSAGE, MongoSystemError, + needsRetryableWriteLabel, NODE_IS_RECOVERING_ERROR_MESSAGE } from '../../src/error'; import * as importsFromErrorSrc from '../../src/error'; @@ -146,34 +147,6 @@ describe('MongoErrors', () => { }); }); - describe('#isRetryableEndTransactionError', function () { - context('when the error has a RetryableWriteError label', function () { - const error = new MongoNetworkError(''); - error.addErrorLabel('RetryableWriteError'); - - it('returns true', function () { - expect(isRetryableEndTransactionError(error)).to.be.true; - }); - }); - - context('when the error does not have a RetryableWriteError label', function () { - const error = new MongoNetworkError(''); - error.addErrorLabel('InvalidLabel'); - - it('returns false', function () { - expect(isRetryableEndTransactionError(error)).to.be.false; - }); - }); - - context('when the error does not have any label', function () { - const error = new MongoNetworkError(''); - - it('returns false', function () { - expect(isRetryableEndTransactionError(error)).to.be.false; - }); - }); - }); - describe('#isSDAMUnrecoverableError', function () { context('when the error is a MongoParseError', function () { it('returns true', function () { @@ -211,7 +184,7 @@ describe('MongoErrors', () => { function () { it('returns false', function () { // If the response includes an error code, it MUST be solely used to determine if error is a "node is recovering" or "not writable primary" error. - const error = new MongoError(NODE_IS_RECOVERING_ERROR_MESSAGE); + const error = new MongoError(NODE_IS_RECOVERING_ERROR_MESSAGE.source); error.code = 555; expect(isSDAMUnrecoverableError(error)).to.be.false; }); @@ -222,7 +195,9 @@ describe('MongoErrors', () => { 'when the error message contains the legacy "not primary" message and no error code is used', function () { it('returns true', function () { - const error = new MongoError(`this is ${LEGACY_NOT_WRITABLE_PRIMARY_ERROR_MESSAGE}.`); + const error = new MongoError( + `this is ${LEGACY_NOT_WRITABLE_PRIMARY_ERROR_MESSAGE.source}.` + ); expect(isSDAMUnrecoverableError(error)).to.be.true; }); } @@ -365,7 +340,7 @@ describe('MongoErrors', () => { topology.selectServer('primary', (err, server) => { expect(err).to.not.exist; - server.command(ns('db1'), Object.assign({}, RAW_USER_WRITE_CONCERN_CMD), err => { + server.command(ns('db1'), Object.assign({}, RAW_USER_WRITE_CONCERN_CMD), {}, err => { let _err; try { expect(err).to.be.an.instanceOf(MongoWriteConcernError); @@ -406,7 +381,7 @@ describe('MongoErrors', () => { topology.selectServer('primary', (err, server) => { expect(err).to.not.exist; - server.command(ns('db1'), Object.assign({}, RAW_USER_WRITE_CONCERN_CMD), err => { + server.command(ns('db1'), Object.assign({}, RAW_USER_WRITE_CONCERN_CMD), {}, err => { let _err; try { expect(err).to.be.an.instanceOf(MongoWriteConcernError); @@ -424,4 +399,136 @@ describe('MongoErrors', () => { }); }); }); + + describe('retryable errors', () => { + describe('#needsRetryableWriteLabel', () => { + // Note the wireVersions used below are used to represent + // 8 - below server version 4.4 + // 9 - above server version 4.4 + + const tests: { + description: string; + result: boolean; + error: Error; + maxWireVersion: number; + }[] = [ + { + description: 'a plain error', + result: false, + error: new Error('do not retry me!'), + maxWireVersion: 8 + }, + { + description: 'a MongoError with no code nor label', + result: false, + error: new MongoError('do not retry me!'), + maxWireVersion: 8 + }, + { + description: 'network error', + result: true, + error: new MongoNetworkError('socket bad, try again'), + maxWireVersion: 8 + }, + { + description: 'a MongoWriteConcernError with no code nor label', + result: false, + error: new MongoWriteConcernError({ message: 'empty wc error' }), + maxWireVersion: 8 + }, + { + description: 'a MongoWriteConcernError with a random label', + result: false, + error: new MongoWriteConcernError( + { message: 'random label' }, + { errorLabels: ['myLabel'] } + ), + maxWireVersion: 8 + }, + { + description: 'a MongoWriteConcernError with a retryable code above server 4.4', + result: false, + error: new MongoWriteConcernError({}, { code: 262 }), + maxWireVersion: 9 + }, + { + description: 'a MongoWriteConcernError with a retryable code below server 4.4', + result: true, + error: new MongoWriteConcernError({}, { code: 262 }), + maxWireVersion: 8 + }, + { + description: 'a MongoWriteConcernError with a RetryableWriteError label below server 4.4', + result: true, + error: new MongoWriteConcernError({}, { errorLabels: ['RetryableWriteError'] }), + maxWireVersion: 8 + }, + { + description: 'a MongoWriteConcernError with a RetryableWriteError label above server 4.4', + result: false, + error: new MongoWriteConcernError({}, { errorLabels: ['RetryableWriteError'] }), + maxWireVersion: 9 + }, + { + description: 'any MongoError with a RetryableWriteError label', + result: false, + error: (() => { + // These tests all use MongoWriteConcernError because + // its constructor is easier to call but any MongoError should work + const error = new MongoError(''); + error.addErrorLabel('RetryableWriteError'); + return error; + })(), + maxWireVersion: 9 + } + ]; + for (const { description, result, error, maxWireVersion } of tests) { + it(`${description} ${result ? 'needs' : 'does not need'} a retryable write label`, () => { + expect(needsRetryableWriteLabel(error, maxWireVersion)).to.be.equal(result); + }); + } + }); + + describe('#isRetryableReadError', () => { + const tests: { description: string; result: boolean; error: MongoError }[] = [ + { + description: 'plain error', + result: false, + // @ts-expect-error: passing in a plain error to test false case + error: new Error('do not retry me!') + }, + { + description: 'An error code that is not retryable', + result: false, + error: new MongoServerError({ message: '', code: 1 }) + }, + { + description: 'An error code that is retryable', + result: true, + error: new MongoServerError({ message: '', code: 91 }) + }, + { + description: 'network error', + result: true, + error: new MongoNetworkError('socket bad, try again') + }, + { + description: 'error with legacy not writable primary error message', + result: true, + error: new MongoError(LEGACY_NOT_WRITABLE_PRIMARY_ERROR_MESSAGE.source) + }, + { + description: 'error with node is recovering error message', + result: true, + error: new MongoError('node is recovering') + } + ]; + + for (const { description, result, error } of tests) { + it(`${description} is${result ? '' : ' not'} a retryable read`, () => { + expect(isRetryableReadError(error)).to.be.equal(result); + }); + } + }); + }); }); diff --git a/test/unit/sdam/topology.test.js b/test/unit/sdam/topology.test.js index 55a3d12ce9..e642302e7a 100644 --- a/test/unit/sdam/topology.test.js +++ b/test/unit/sdam/topology.test.js @@ -218,7 +218,7 @@ describe('Topology (unit)', function () { let poolCleared = false; topology.on('connectionPoolCleared', () => (poolCleared = true)); - server.command(ns('test.test'), { insert: { a: 42 } }, (err, result) => { + server.command(ns('test.test'), { insert: { a: 42 } }, {}, (err, result) => { expect(result).to.not.exist; expect(err).to.exist; expect(err).to.eql(serverDescription.error); @@ -235,7 +235,7 @@ describe('Topology (unit)', function () { if (isHello(doc)) { request.reply(Object.assign({}, mock.HELLO, { maxWireVersion: 9 })); } else if (doc.insert) { - request.reply({ ok: 0, message: LEGACY_NOT_WRITABLE_PRIMARY_ERROR_MESSAGE }); + request.reply({ ok: 0, message: LEGACY_NOT_WRITABLE_PRIMARY_ERROR_MESSAGE.source }); } else { request.reply({ ok: 1 }); } @@ -254,7 +254,7 @@ describe('Topology (unit)', function () { let poolCleared = false; topology.on('connectionPoolCleared', () => (poolCleared = true)); - server.command(ns('test.test'), { insert: { a: 42 } }, (err, result) => { + server.command(ns('test.test'), { insert: { a: 42 } }, {}, (err, result) => { expect(result).to.not.exist; expect(err).to.exist; expect(err).to.eql(serverDescription.error); @@ -287,7 +287,7 @@ describe('Topology (unit)', function () { let serverDescription; server.on('descriptionReceived', sd => (serverDescription = sd)); - server.command(ns('test.test'), { insert: { a: 42 } }, (err, result) => { + server.command(ns('test.test'), { insert: { a: 42 } }, {}, (err, result) => { expect(result).to.not.exist; expect(err).to.exist; expect(err).to.eql(serverDescription.error); From d43abe7b26eeb3a32e4ccff5610078a84cb21514 Mon Sep 17 00:00:00 2001 From: Neal Beeken Date: Wed, 9 Mar 2022 11:45:13 -0500 Subject: [PATCH 02/10] fix: new override rules --- src/bulk/common.ts | 5 +++-- src/error.ts | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/bulk/common.ts b/src/bulk/common.ts index fa9e5eb1ce..1fed3680ad 100644 --- a/src/bulk/common.ts +++ b/src/bulk/common.ts @@ -1,4 +1,3 @@ -import type { ClientSession, Server } from '..'; import { BSONSerializeOptions, Document, @@ -24,7 +23,9 @@ import { InsertOperation } from '../operations/insert'; import { AbstractOperation, Hint } from '../operations/operation'; import { makeUpdateStatement, UpdateOperation, UpdateStatement } from '../operations/update'; import { PromiseProvider } from '../promise_provider'; +import type { Server } from '../sdam/server'; import type { Topology } from '../sdam/topology'; +import type { ClientSession } from '../sessions'; import { applyRetryableWrites, Callback, @@ -935,7 +936,7 @@ class BulkWriteShimOperation extends AbstractOperation { this.bulkOperation = bulkOperation; } - execute(server: Server, session: ClientSession, callback: Callback): void { + execute(server: Server, session: ClientSession | undefined, callback: Callback): void { if (this.options.session == null) { // An implicit session could have been created by 'executeOperation' // So if we stick it on finalOptions here, each bulk operation diff --git a/src/error.ts b/src/error.ts index 74d2f1af5f..30ddd47d0a 100644 --- a/src/error.ts +++ b/src/error.ts @@ -438,7 +438,7 @@ export class MongoUnexpectedServerResponseError extends MongoRuntimeError { super(message); } - get name(): string { + override get name(): string { return 'MongoUnexpectedServerResponseError'; } } From 16d834fefe4e882808dff402938644462e34f7d0 Mon Sep 17 00:00:00 2001 From: Neal Beeken Date: Thu, 10 Mar 2022 15:02:24 -0500 Subject: [PATCH 03/10] fix: undo some changes --- src/cmap/auth/gssapi.ts | 7 +++++-- src/cmap/auth/mongocr.ts | 2 +- src/cmap/auth/mongodb_aws.ts | 4 +--- src/cmap/auth/scram.ts | 4 ++-- src/cmap/connection.ts | 18 ++++++------------ src/index.ts | 8 +------- src/sdam/server.ts | 7 +------ 7 files changed, 17 insertions(+), 33 deletions(-) diff --git a/src/cmap/auth/gssapi.ts b/src/cmap/auth/gssapi.ts index d13e2e00ea..689d6e9512 100644 --- a/src/cmap/auth/gssapi.ts +++ b/src/cmap/auth/gssapi.ts @@ -42,8 +42,11 @@ export class GSSAPI extends AuthProvider { new MongoMissingCredentialsError('Credentials required for GSSAPI authentication') ); const { username } = credentials; - function externalCommand(command: Document, callback: Callback) { - return connection.command(ns('$external.$cmd'), command, undefined, callback); + function externalCommand( + command: Document, + cb: Callback<{ payload: string; conversationId: any }> + ) { + return connection.command(ns('$external.$cmd'), command, undefined, cb); } makeKerberosClient(authContext, (err, client) => { if (err) return callback(err); diff --git a/src/cmap/auth/mongocr.ts b/src/cmap/auth/mongocr.ts index 0359dba889..232378f0d4 100644 --- a/src/cmap/auth/mongocr.ts +++ b/src/cmap/auth/mongocr.ts @@ -18,7 +18,7 @@ export class MongoCR extends AuthProvider { let key = null; // Get nonce - if (err == null && r != null) { + if (err == null) { nonce = r.nonce; // Use node md5 generator diff --git a/src/cmap/auth/mongodb_aws.ts b/src/cmap/auth/mongodb_aws.ts index 028ae75208..ad2014c4fc 100644 --- a/src/cmap/auth/mongodb_aws.ts +++ b/src/cmap/auth/mongodb_aws.ts @@ -90,9 +90,7 @@ export class MongoDBAWS extends AuthProvider { }; connection.command(ns(`${db}.$cmd`), saslStart, undefined, (err, res) => { - if (err || !res) { - return callback(err); - } + if (err) return callback(err); const serverResponse = BSON.deserialize(res.payload.buffer, bsonOptions) as { s: Binary; diff --git a/src/cmap/auth/scram.ts b/src/cmap/auth/scram.ts index 3551c49aab..2aac6f8e95 100644 --- a/src/cmap/auth/scram.ts +++ b/src/cmap/auth/scram.ts @@ -122,7 +122,7 @@ function executeScram(cryptoMethod: CryptoMethod, authContext: AuthContext, call const saslStartCmd = makeFirstMessage(cryptoMethod, credentials, nonce); connection.command(ns(`${db}.$cmd`), saslStartCmd, undefined, (_err, result) => { const err = resolveError(_err, result); - if (err || !result) { + if (err) { return callback(err); } @@ -213,7 +213,7 @@ function continueScramConversation( connection.command(ns(`${db}.$cmd`), saslContinueCmd, undefined, (_err, r) => { const err = resolveError(_err, r); - if (err || !r) { + if (err) { return callback(err); } diff --git a/src/cmap/connection.ts b/src/cmap/connection.ts index eea4edfd1c..cfec2a6c15 100644 --- a/src/cmap/connection.ts +++ b/src/cmap/connection.ts @@ -22,7 +22,6 @@ import { import type { ServerApi, SupportedNodeConnectionOptions } from '../mongo_client'; import { CancellationToken, TypedEventEmitter } from '../mongo_types'; import { ReadPreference, ReadPreferenceLike } from '../read_preference'; -import type { HandleOperationResultCallback } from '../sdam/server'; import { applySession, ClientSession, updateSessionFromResponse } from '../sessions'; import { calculateDurationInMs, @@ -383,7 +382,7 @@ export class Connection extends TypedEventEmitter { ns: MongoDBNamespace, cmd: Document, options: CommandOptions | undefined, - callback: HandleOperationResultCallback + callback: Callback ): void { if (!(ns instanceof MongoDBNamespace)) { // TODO(NODE-3483): Replace this with a MongoCommandError @@ -413,7 +412,7 @@ export class Connection extends TypedEventEmitter { clusterTime = session.clusterTime; } - const err = applySession(session, finalCmd, options); + const err = applySession(session, finalCmd, options as CommandOptions); if (err) { return callback(err); } @@ -456,12 +455,7 @@ export class Connection extends TypedEventEmitter { } /** @internal */ - query( - ns: MongoDBNamespace, - cmd: Document, - options: QueryOptions, - callback: HandleOperationResultCallback - ): void { + query(ns: MongoDBNamespace, cmd: Document, options: QueryOptions, callback: Callback): void { const isExplain = cmd.$explain != null; const readPreference = options.readPreference ?? ReadPreference.primary; const batchSize = options.batchSize || 0; @@ -538,7 +532,7 @@ export class Connection extends TypedEventEmitter { ns: MongoDBNamespace, cursorId: Long, options: GetMoreOptions, - callback: HandleOperationResultCallback + callback: Callback ): void { const fullResult = !!options[kFullResult]; const wireVersion = maxWireVersion(this); @@ -595,7 +589,7 @@ export class Connection extends TypedEventEmitter { ns: MongoDBNamespace, cursorIds: Long[], options: CommandOptions, - callback: HandleOperationResultCallback + callback: Callback ): void { if (!cursorIds || !Array.isArray(cursorIds)) { // TODO(NODE-3483): Replace this with a MongoCommandError @@ -624,7 +618,7 @@ export class Connection extends TypedEventEmitter { (err, response) => { if (err || !response) return callback(err); if (response.cursorNotFound) { - return callback(new MongoNetworkError('cursor killed or timed out')); + return callback(new MongoNetworkError('cursor killed or timed out'), null); } if (!Array.isArray(response.documents) || response.documents.length === 0) { diff --git a/src/index.ts b/src/index.ts index f996118a42..da8cc2614d 100644 --- a/src/index.ts +++ b/src/index.ts @@ -398,13 +398,7 @@ export type { RTTPinger, RTTPingerOptions } from './sdam/monitor'; -export type { - HandleOperationResultCallback, - Server, - ServerEvents, - ServerOptions, - ServerPrivate -} from './sdam/server'; +export type { Server, ServerEvents, ServerOptions, ServerPrivate } from './sdam/server'; export type { ServerDescription, ServerDescriptionOptions, diff --git a/src/sdam/server.ts b/src/sdam/server.ts index b822b69e24..a14a392d60 100644 --- a/src/sdam/server.ts +++ b/src/sdam/server.ts @@ -520,18 +520,13 @@ function isRetryableWritesEnabled(topology: Topology) { return topology.s.options.retryWrites !== false; } -/** @internal */ -export interface HandleOperationResultCallback { - (error: MongoError | Error | undefined, result?: Document): void; -} - function makeOperationHandler( server: Server, connection: Connection, cmd: Document, options: CommandOptions | GetMoreOptions | undefined, callback: Callback -): HandleOperationResultCallback { +): Callback { const session = options?.session; return function handleOperationResult(error, result) { if (result != null) { From fc0721b25bfa9e3c8ba9144128b79359e4dc57cb Mon Sep 17 00:00:00 2001 From: Neal Beeken Date: Thu, 10 Mar 2022 15:30:10 -0500 Subject: [PATCH 04/10] fix: more removals --- .evergreen/run-kms-servers.sh | 6 ++--- .evergreen/run-tests.sh | 2 +- src/cmap/connect.ts | 2 +- src/operations/rename.ts | 4 +-- src/sdam/monitor.ts | 7 +----- test/readme.md | 46 +++-------------------------------- 6 files changed, 12 insertions(+), 55 deletions(-) diff --git a/.evergreen/run-kms-servers.sh b/.evergreen/run-kms-servers.sh index 3fde63cbd3..76ef6ac258 100644 --- a/.evergreen/run-kms-servers.sh +++ b/.evergreen/run-kms-servers.sh @@ -2,6 +2,6 @@ cd ${DRIVERS_TOOLS}/.evergreen/csfle . ./activate_venv.sh # by default it always runs on port 5698 ./kmstlsvenv/bin/python3 -u kms_kmip_server.py & -./kmstlsvenv/bin/python3 -u kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/expired.pem --port 9000 & -./kmstlsvenv/bin/python3 -u kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/wrong-host.pem --port 9001 & -./kmstlsvenv/bin/python3 -u kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/server.pem --port 9002 --require_client_cert & +./kmstlsvenv/bin/python3 -u kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/expired.pem --port 8000 & +./kmstlsvenv/bin/python3 -u kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/wrong-host.pem --port 8001 & +./kmstlsvenv/bin/python3 -u kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/server.pem --port 8002 --require_client_cert & diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index a2da41890b..a6a349e4f3 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -47,7 +47,7 @@ else source "$DRIVERS_TOOLS"/.evergreen/csfle/set-temp-creds.sh fi -npm install mongodb-client-encryption +npm install mongodb-client-encryption@">=2.0.0-beta.4" export AUTH=$AUTH export SINGLE_MONGOS_LB_URI=${SINGLE_MONGOS_LB_URI} diff --git a/src/cmap/connect.ts b/src/cmap/connect.ts index 596319fccf..0520c5d07e 100644 --- a/src/cmap/connect.ts +++ b/src/cmap/connect.ts @@ -126,7 +126,7 @@ function performInitialHandshake( const start = new Date().getTime(); conn.command(ns('admin.$cmd'), handshakeDoc, handshakeOptions, (err, response) => { - if (err || !response) { + if (err) { callback(err); return; } diff --git a/src/operations/rename.ts b/src/operations/rename.ts index 00f3ee3118..02e27a5bf7 100644 --- a/src/operations/rename.ts +++ b/src/operations/rename.ts @@ -1,6 +1,6 @@ import type { Document } from '../bson'; import { Collection } from '../collection'; -import { MongoRuntimeError, MongoServerError } from '../error'; +import { MongoServerError } from '../error'; import type { Server } from '../sdam/server'; import type { ClientSession } from '../sessions'; import { Callback, checkCollectionName } from '../utils'; @@ -46,7 +46,7 @@ export class RenameOperation extends RunAdminCommandOperation { const coll = this.collection; super.execute(server, session, (err, doc) => { - if (err || !doc) return callback(err ? err : new MongoRuntimeError('No result')); + if (err) return callback(err); // We have an error if (doc?.errmsg) { return callback(new MongoServerError(doc)); diff --git a/src/sdam/monitor.ts b/src/sdam/monitor.ts index f734d52423..d43bc3e49e 100644 --- a/src/sdam/monitor.ts +++ b/src/sdam/monitor.ts @@ -2,7 +2,7 @@ import { Document, Long } from '../bson'; import { connect } from '../cmap/connect'; import { Connection, ConnectionOptions } from '../cmap/connection'; import { LEGACY_HELLO_COMMAND } from '../constants'; -import { MongoNetworkError, MongoUnexpectedServerResponseError } from '../error'; +import { MongoNetworkError } from '../error'; import { CancellationToken, TypedEventEmitter } from '../mongo_types'; import type { Callback, InterruptibleAsyncInterval } from '../utils'; import { @@ -261,11 +261,6 @@ function checkServer(monitor: Monitor, callback: Callback) { if (err) { return failureHandler(err); } - if (!hello) { - return failureHandler( - new MongoUnexpectedServerResponseError('Empty response without error') - ); - } if (!('isWritablePrimary' in hello)) { // Provide hello-style response document. diff --git a/test/readme.md b/test/readme.md index b433be60e8..b5105234b9 100644 --- a/test/readme.md +++ b/test/readme.md @@ -13,43 +13,6 @@ about the types of tests and how to run them. - [Writing Tests](#writing-tests) - [Testing with Special Environments](#testing-with-special-environments) -## Complex deployments - -Some of the topologies mentioned in this testing guide require a bit of "orchestration". -For that, we have [mongo-orchestration](https://github.com/10gen/mongo-orchestration). -We advise cloning this repository (instead of installing from pip). Inside the repo run: - -```sh -# Create a virtual environment so you can make sure m-orch has all of its dependencies versioned separate from your global environment -python -m .venv.com -# There are activates for a few different shells, use the one that matches your shell -source ./.venv.com/bin/activate -``` - -Your working directory must still be at the root of the mongo-orchestration repository. -With the virtual environment activated install mongo-orchestration with the following command. - -```sh -# "-e ." installs the package as an editable python module -# the benefit is that the source code in the repo is the same code that is run when you launch orchestration -# instead of a new copy being installed elsewhere -pip install -e . -``` - -I prefer launching mongo-orchestration in the foreground so it's easily stopped with ctrl-C but the pid file can be found in the current directory. - -```sh -mongo-orchestration start --no-fork -``` - -Posting a configuration can look something like this: - -```sh -curl --silent --show-error --max-time 60 --fail http://localhost:8889/v1/sharded_clusters --data @"$DRIVERS_TOOLS/.evergreen/orchestration/configs/sharded_clusters/load-balancer.json" -``` - -Where `$DRIVERS_TOOLS` is the path to drivers evergreen tools. - ## About the Tests All of our test automation is powered by the [Mocha test framework][mocha]. @@ -300,8 +263,7 @@ The following steps will walk you through how to create and test a MongoDB Serve The following steps will walk you through how to start and test a load balancer. -1. Start a sharded cluster with `--setParameter featureFlagLoadBalancer=true` and `--setParameter loadBalancerPort=27051` flags passed to the mongos processes. - 1. Follow the instructions here to setup [mongo-orchestration]() +1. Start a sharded cluster. You can use the [cluster_setup.sh](tools/cluster_setup.sh) script to do so: `./test/tools/cluster_setup.sh sharded_cluster`. The tool should create a cluster with two mongos, so you have a URI similar to `MONGODB_URI=mongodb://host1,host2/`. 1. Create an environment variable named `MONGODB_URI` that stores the URI of the sharded cluster you just created. For example: `export MONGODB_URI="mongodb://host1,host2/"` 1. Install the HAProxy load balancer. For those on macOS, you can install HAProxy with `brew install haproxy`. 1. Start the load balancer by using the [run-load-balancer script](https://github.com/mongodb-labs/drivers-evergreen-tools/blob/master/.evergreen/run-load-balancer.sh) provided in drivers-evergreen-tools. @@ -392,9 +354,9 @@ The following steps will walk you through how to run the tests for CSFLE. 1. This will install all the dependencies needed to run a python kms_kmip simulated server 3. In 4 separate terminals launch the following: - `./kmstlsvenv/bin/python3 -u kms_kmip_server.py` # by default it always runs on port 5698 - - `./kmstlsvenv/bin/python3 -u kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/expired.pem --port 9000` - - `./kmstlsvenv/bin/python3 -u kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/wrong-host.pem --port 9001` - - `./kmstlsvenv/bin/python3 -u kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/server.pem --port 9002 --require_client_cert` + - `./kmstlsvenv/bin/python3 -u kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/expired.pem --port 8000` + - `./kmstlsvenv/bin/python3 -u kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/wrong-host.pem --port 8001` + - `./kmstlsvenv/bin/python3 -u kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/server.pem --port 8002 --require_client_cert` 4. Set the following environment variables: - `export KMIP_TLS_CA_FILE="${DRIVERS_TOOLS}/.evergreen/x509gen/ca.pem"` - `export KMIP_TLS_CERT_FILE="${DRIVERS_TOOLS}/.evergreen/x509gen/client.pem"` From f017a072b1a1d8898b63963b8b06d7a839151073 Mon Sep 17 00:00:00 2001 From: Neal Beeken Date: Thu, 10 Mar 2022 15:36:08 -0500 Subject: [PATCH 05/10] fix: undo port changes --- .../change-streams/change_stream.test.js | 9 ++++----- .../client_side_encryption.prose.test.js | 14 +++++++------- .../load-balancers/load_balancers.spec.test.js | 1 + 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/test/integration/change-streams/change_stream.test.js b/test/integration/change-streams/change_stream.test.js index 35e91112af..892bc9e215 100644 --- a/test/integration/change-streams/change_stream.test.js +++ b/test/integration/change-streams/change_stream.test.js @@ -1422,7 +1422,6 @@ describe('Change Streams', function () { let client; let coll; let startAfter; - let changeStream; beforeEach(function (done) { const configuration = this.configuration; @@ -1450,15 +1449,15 @@ describe('Change Streams', function () { }); }); - afterEach(async function () { - await changeStream.close(); - await client.close(); + afterEach(function (done) { + client.close(done); }); it('should work with events', { metadata: { requires: { topology: 'replicaset', mongodb: '>=4.1.1' } }, test: function (done) { - changeStream = coll.watch([], { startAfter }); + const changeStream = coll.watch([], { startAfter }); + this.defer(() => changeStream.close()); coll.insertOne({ x: 2 }, { writeConcern: { w: 'majority', j: true } }, err => { expect(err).to.not.exist; diff --git a/test/integration/client-side-encryption/client_side_encryption.prose.test.js b/test/integration/client-side-encryption/client_side_encryption.prose.test.js index ed51ba402a..d33ee1c2b0 100644 --- a/test/integration/client-side-encryption/client_side_encryption.prose.test.js +++ b/test/integration/client-side-encryption/client_side_encryption.prose.test.js @@ -1142,12 +1142,12 @@ describe('Client Side Encryption Prose Tests', metadata, function () { }; const clientNoTlsOptions = { keyVaultNamespace, - kmsProviders: getKmsProviders(null, null, '127.0.0.1:9002', '127.0.0.1:9002'), + kmsProviders: getKmsProviders(null, null, '127.0.0.1:8002', '127.0.0.1:8002'), tlsOptions: tlsCaOptions }; const clientWithTlsOptions = { keyVaultNamespace, - kmsProviders: getKmsProviders(null, null, '127.0.0.1:9002', '127.0.0.1:9002'), + kmsProviders: getKmsProviders(null, null, '127.0.0.1:8002', '127.0.0.1:8002'), tlsOptions: { aws: { tlsCAFile: process.env.KMIP_TLS_CA_FILE, @@ -1169,12 +1169,12 @@ describe('Client Side Encryption Prose Tests', metadata, function () { }; const clientWithTlsExpiredOptions = { keyVaultNamespace, - kmsProviders: getKmsProviders(null, '127.0.0.1:9000', '127.0.0.1:9000', '127.0.0.1:9000'), + kmsProviders: getKmsProviders(null, '127.0.0.1:8000', '127.0.0.1:8000', '127.0.0.1:8000'), tlsOptions: tlsCaOptions }; const clientWithInvalidHostnameOptions = { keyVaultNamespace, - kmsProviders: getKmsProviders(null, '127.0.0.1:9001', '127.0.0.1:9001', '127.0.0.1:9001'), + kmsProviders: getKmsProviders(null, '127.0.0.1:8001', '127.0.0.1:8001', '127.0.0.1:8001'), tlsOptions: tlsCaOptions }; const mongodbClientEncryption = this.configuration.mongodbClientEncryption; @@ -1245,10 +1245,10 @@ describe('Client Side Encryption Prose Tests', metadata, function () { const masterKey = { region: 'us-east-1', key: 'arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0', - endpoint: '127.0.0.1:9002' + endpoint: '127.0.0.1:8002' }; - const masterKeyExpired = { ...masterKey, endpoint: '127.0.0.1:9000' }; - const masterKeyInvalidHostname = { ...masterKey, endpoint: '127.0.0.1:9001' }; + const masterKeyExpired = { ...masterKey, endpoint: '127.0.0.1:8000' }; + const masterKeyInvalidHostname = { ...masterKey, endpoint: '127.0.0.1:8001' }; it('should fail with no TLS', metadata, async function () { try { diff --git a/test/integration/load-balancers/load_balancers.spec.test.js b/test/integration/load-balancers/load_balancers.spec.test.js index ae8abc298d..1446a5e42c 100644 --- a/test/integration/load-balancers/load_balancers.spec.test.js +++ b/test/integration/load-balancers/load_balancers.spec.test.js @@ -61,5 +61,6 @@ const SKIP = [ ]; describe('Load Balancer Unified Tests', function () { + this.timeout(10000); runUnifiedSuite(loadSpecTests(path.join('load-balancers')), SKIP); }); From 4ff8642e7a07c159f8c964866e4eb98fd1d954bb Mon Sep 17 00:00:00 2001 From: Neal Beeken Date: Thu, 10 Mar 2022 16:23:19 -0500 Subject: [PATCH 06/10] test: unskip MMAPv1 error prose test --- .../retryable-writes/retryable_writes.spec.prose.test.ts | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/test/integration/retryable-writes/retryable_writes.spec.prose.test.ts b/test/integration/retryable-writes/retryable_writes.spec.prose.test.ts index 35008d13de..9f7e6d6127 100644 --- a/test/integration/retryable-writes/retryable_writes.spec.prose.test.ts +++ b/test/integration/retryable-writes/retryable_writes.spec.prose.test.ts @@ -17,6 +17,11 @@ describe('Retryable Writes Spec Prose', () => { let client; beforeEach(async function () { + if (this.configuration.buildInfo.versionArray[0] < 4) { + this.currentTest.skipReason = + 'configureFailPoint only works on server versions greater than 4'; + this.skip(); + } client = this.configuration.newClient(); await client.connect(); }); @@ -25,7 +30,7 @@ describe('Retryable Writes Spec Prose', () => { await client?.close(); }); - it.skip('retryable writes raise an exception when using the MMAPv1 storage engine', async () => { + it('retryable writes raise an exception when using the MMAPv1 storage engine', async () => { const failPoint = await client.db('admin').command({ configureFailPoint: 'failCommand', mode: { times: 1 }, @@ -52,5 +57,5 @@ describe('Retryable Writes Spec Prose', () => { 'message', 'This MongoDB deployment does not support retryable writes. Please add retryWrites=false to your connection string.' ); - }).skipReason = 'TODO, might need to limit server versions? 3.6 fails, 4.x has different shape'; + }); }); From 3392dbfbf89e5c9f556a5f1bb302f592466b5cff Mon Sep 17 00:00:00 2001 From: Neal Beeken Date: Thu, 10 Mar 2022 17:58:08 -0500 Subject: [PATCH 07/10] test: only on replica_set --- .../retryable-writes/retryable_writes.spec.prose.test.ts | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/test/integration/retryable-writes/retryable_writes.spec.prose.test.ts b/test/integration/retryable-writes/retryable_writes.spec.prose.test.ts index 9f7e6d6127..00d5c69fce 100644 --- a/test/integration/retryable-writes/retryable_writes.spec.prose.test.ts +++ b/test/integration/retryable-writes/retryable_writes.spec.prose.test.ts @@ -1,6 +1,6 @@ import { expect } from 'chai'; -import { MongoError, MongoServerError } from '../../../src'; +import { MongoError, MongoServerError, TopologyType } from '../../../src'; describe('Retryable Writes Spec Prose', () => { /** @@ -17,7 +17,10 @@ describe('Retryable Writes Spec Prose', () => { let client; beforeEach(async function () { - if (this.configuration.buildInfo.versionArray[0] < 4) { + if ( + this.configuration.buildInfo.versionArray[0] < 4 || + this.configuration.topologyType !== TopologyType.ReplicaSetWithPrimary + ) { this.currentTest.skipReason = 'configureFailPoint only works on server versions greater than 4'; this.skip(); From 019d7a7d62f879f0dbc35b7c189a4ca8ece11d48 Mon Sep 17 00:00:00 2001 From: Neal Beeken Date: Fri, 11 Mar 2022 13:37:22 -0500 Subject: [PATCH 08/10] daria's comments --- global.d.ts | 1 - .../retryable_writes.spec.prose.test.ts | 2 +- test/unit/error.test.ts | 23 +++++++++++-------- 3 files changed, 14 insertions(+), 12 deletions(-) diff --git a/global.d.ts b/global.d.ts index 4e1e26ec33..145acdf283 100644 --- a/global.d.ts +++ b/global.d.ts @@ -51,7 +51,6 @@ declare global { interface Test { metadata: MongoDBMetadataUI; - /** @deprecated Attach spec to a test if you need access to it in a beforeEach hook, not recommended?? */ spec: Record; } diff --git a/test/integration/retryable-writes/retryable_writes.spec.prose.test.ts b/test/integration/retryable-writes/retryable_writes.spec.prose.test.ts index 00d5c69fce..2171885f85 100644 --- a/test/integration/retryable-writes/retryable_writes.spec.prose.test.ts +++ b/test/integration/retryable-writes/retryable_writes.spec.prose.test.ts @@ -53,7 +53,7 @@ describe('Retryable Writes Spec Prose', () => { .catch(error => error); expect(error).to.exist; - expect(error).to.be.instanceOf(MongoServerError); + expect(error).that.is.instanceOf(MongoServerError); expect(error).to.have.property('originalError').that.instanceOf(MongoError); expect(error.originalError).to.have.property('code', 20); expect(error).to.have.property( diff --git a/test/unit/error.test.ts b/test/unit/error.test.ts index c7a6b48524..ff3337c11a 100644 --- a/test/unit/error.test.ts +++ b/test/unit/error.test.ts @@ -406,6 +406,9 @@ describe('MongoErrors', () => { // 8 - below server version 4.4 // 9 - above server version 4.4 + const ABOVE_4_4 = 9; + const BELOW_4_4 = 8; + const tests: { description: string; result: boolean; @@ -416,25 +419,25 @@ describe('MongoErrors', () => { description: 'a plain error', result: false, error: new Error('do not retry me!'), - maxWireVersion: 8 + maxWireVersion: BELOW_4_4 }, { description: 'a MongoError with no code nor label', result: false, error: new MongoError('do not retry me!'), - maxWireVersion: 8 + maxWireVersion: BELOW_4_4 }, { description: 'network error', result: true, error: new MongoNetworkError('socket bad, try again'), - maxWireVersion: 8 + maxWireVersion: BELOW_4_4 }, { description: 'a MongoWriteConcernError with no code nor label', result: false, error: new MongoWriteConcernError({ message: 'empty wc error' }), - maxWireVersion: 8 + maxWireVersion: BELOW_4_4 }, { description: 'a MongoWriteConcernError with a random label', @@ -443,31 +446,31 @@ describe('MongoErrors', () => { { message: 'random label' }, { errorLabels: ['myLabel'] } ), - maxWireVersion: 8 + maxWireVersion: BELOW_4_4 }, { description: 'a MongoWriteConcernError with a retryable code above server 4.4', result: false, error: new MongoWriteConcernError({}, { code: 262 }), - maxWireVersion: 9 + maxWireVersion: ABOVE_4_4 }, { description: 'a MongoWriteConcernError with a retryable code below server 4.4', result: true, error: new MongoWriteConcernError({}, { code: 262 }), - maxWireVersion: 8 + maxWireVersion: BELOW_4_4 }, { description: 'a MongoWriteConcernError with a RetryableWriteError label below server 4.4', result: true, error: new MongoWriteConcernError({}, { errorLabels: ['RetryableWriteError'] }), - maxWireVersion: 8 + maxWireVersion: BELOW_4_4 }, { description: 'a MongoWriteConcernError with a RetryableWriteError label above server 4.4', result: false, error: new MongoWriteConcernError({}, { errorLabels: ['RetryableWriteError'] }), - maxWireVersion: 9 + maxWireVersion: ABOVE_4_4 }, { description: 'any MongoError with a RetryableWriteError label', @@ -479,7 +482,7 @@ describe('MongoErrors', () => { error.addErrorLabel('RetryableWriteError'); return error; })(), - maxWireVersion: 9 + maxWireVersion: ABOVE_4_4 } ]; for (const { description, result, error, maxWireVersion } of tests) { From 0264e4702f40436917ad9cd4bcc75e5c708ee239 Mon Sep 17 00:00:00 2001 From: Neal Beeken Date: Fri, 11 Mar 2022 13:59:34 -0500 Subject: [PATCH 09/10] fix: Bailey's comments --- src/cmap/connection.ts | 8 +++-- src/cursor/abstract_cursor.ts | 2 +- src/operations/execute_operation.ts | 6 ++-- src/operations/operation.ts | 8 ++--- src/sdam/topology.ts | 17 +--------- test/unit/error.test.ts | 4 +-- test/unit/sdam/topology.test.js | 50 +++++++++++++++++------------ 7 files changed, 45 insertions(+), 50 deletions(-) diff --git a/src/cmap/connection.ts b/src/cmap/connection.ts index cfec2a6c15..e5665e9116 100644 --- a/src/cmap/connection.ts +++ b/src/cmap/connection.ts @@ -109,9 +109,11 @@ export interface CommandOptions extends BSONSerializeOptions { noResponse?: boolean; omitReadPreference?: boolean; - // TODO(NODE-2802): Currently the CommandOptions take a property willRetryWrite which is a hint from executeOperation that the txnNum should be applied to this command. - // Applying a session to a command should happen as part of command construction, most likely in the CommandOperation#executeCommand method, - // where we have access to the details we need to determine if a txnNum should also be applied. + // TODO(NODE-2802): Currently the CommandOptions take a property willRetryWrite which is a hint + // from executeOperation that the txnNum should be applied to this command. + // Applying a session to a command should happen as part of command construction, + // most likely in the CommandOperation#executeCommand method, where we have access to + // the details we need to determine if a txnNum should also be applied. willRetryWrite?: true; writeConcern?: WriteConcern; diff --git a/src/cursor/abstract_cursor.ts b/src/cursor/abstract_cursor.ts index c4969abc27..ea8e769495 100644 --- a/src/cursor/abstract_cursor.ts +++ b/src/cursor/abstract_cursor.ts @@ -655,7 +655,7 @@ function next(cursor: AbstractCursor, blocking: boolean, callback: Callback { + return cursor[kTopology].selectServer(ReadPreference.primaryPreferred, {}, err => { if (err) return callback(err); return next(cursor, blocking, callback); }); diff --git a/src/operations/execute_operation.ts b/src/operations/execute_operation.ts index 225a1be4d6..c1750726fc 100644 --- a/src/operations/execute_operation.ts +++ b/src/operations/execute_operation.ts @@ -80,7 +80,7 @@ export function executeOperation< return maybePromise(callback, callback => { if (topology.shouldCheckForSessionSupport()) { - return topology.selectServer(ReadPreference.primaryPreferred, err => { + return topology.selectServer(ReadPreference.primaryPreferred, {}, err => { if (err) return callback(err); executeOperation(topology, operation, callback); @@ -89,7 +89,7 @@ export function executeOperation< // The driver sessions spec mandates that we implicitly create sessions for operations // that are not explicitly provided with a session. - let session: ClientSession | undefined = operation.session; + let session = operation.session; let owner: symbol | undefined; if (topology.hasSessionSupport()) { if (session == null) { @@ -125,7 +125,7 @@ export function executeOperation< function executeWithServerSelection( topology: Topology, - session: ClientSession, + session: ClientSession | undefined, operation: AbstractOperation, callback: Callback ) { diff --git a/src/operations/operation.ts b/src/operations/operation.ts index d4ca5d011d..82265d06f3 100644 --- a/src/operations/operation.ts +++ b/src/operations/operation.ts @@ -59,7 +59,7 @@ export abstract class AbstractOperation { // TODO: Each operation defines its own options, there should be better typing here options: Document; - [kSession]: ClientSession; + [kSession]: ClientSession | undefined; constructor(options: OperationOptions = {}) { this.readPreference = this.hasAspect(Aspect.WRITE_OPERATION) @@ -69,9 +69,7 @@ export abstract class AbstractOperation { // Pull the BSON serialize options from the already-resolved options this.bsonOptions = resolveBSONOptions(options); - if (options.session) { - this[kSession] = options.session; - } + this[kSession] = options.session != null ? options.session : undefined; this.options = options; this.bypassPinningCheck = !!options.bypassPinningCheck; @@ -93,7 +91,7 @@ export abstract class AbstractOperation { return ctor.aspects.has(aspect); } - get session(): ClientSession { + get session(): ClientSession | undefined { return this[kSession]; } diff --git a/src/sdam/topology.ts b/src/sdam/topology.ts index 6b23547288..ccb8371332 100644 --- a/src/sdam/topology.ts +++ b/src/sdam/topology.ts @@ -553,27 +553,11 @@ export class Topology extends TypedEventEmitter { * @param callback - The callback used to indicate success or failure * @returns An instance of a `Server` meeting the criteria of the predicate provided */ - selectServer(options: SelectServerOptions, callback: Callback): void; - selectServer( - selector: string | ReadPreference | ServerSelector, - callback: Callback - ): void; selectServer( selector: string | ReadPreference | ServerSelector, options: SelectServerOptions, callback: Callback - ): void; - selectServer( - selector: string | ReadPreference | ServerSelector | SelectServerOptions, - _options?: SelectServerOptions | Callback, - _callback?: Callback ): void { - let options = _options as SelectServerOptions; - const callback = (_callback ?? _options) as Callback; - if (typeof options === 'function') { - options = {}; - } - let serverSelector; if (typeof selector !== 'function') { if (typeof selector === 'string') { @@ -671,6 +655,7 @@ export class Topology extends TypedEventEmitter { this.selectServer( readPreferenceServerSelector(ReadPreference.primaryPreferred), + {}, (err, server) => { if (err || !server) { if (typeof callback === 'function') callback(err); diff --git a/test/unit/error.test.ts b/test/unit/error.test.ts index ff3337c11a..9c66b92ed2 100644 --- a/test/unit/error.test.ts +++ b/test/unit/error.test.ts @@ -337,7 +337,7 @@ describe('MongoErrors', () => { return cleanup(err); } - topology.selectServer('primary', (err, server) => { + topology.selectServer('primary', {}, (err, server) => { expect(err).to.not.exist; server.command(ns('db1'), Object.assign({}, RAW_USER_WRITE_CONCERN_CMD), {}, err => { @@ -378,7 +378,7 @@ describe('MongoErrors', () => { return cleanup(err); } - topology.selectServer('primary', (err, server) => { + topology.selectServer('primary', {}, (err, server) => { expect(err).to.not.exist; server.command(ns('db1'), Object.assign({}, RAW_USER_WRITE_CONCERN_CMD), {}, err => { diff --git a/test/unit/sdam/topology.test.js b/test/unit/sdam/topology.test.js index e642302e7a..7d9d28d6c1 100644 --- a/test/unit/sdam/topology.test.js +++ b/test/unit/sdam/topology.test.js @@ -173,7 +173,7 @@ describe('Topology (unit)', function () { topology.connect(err => { expect(err).to.not.exist; - topology.selectServer('primary', (err, server) => { + topology.selectServer('primary', {}, (err, server) => { expect(err).to.not.exist; server.command(ns('admin.$cmd'), { ping: 1 }, { socketTimeoutMS: 250 }, (err, result) => { @@ -209,7 +209,7 @@ describe('Topology (unit)', function () { topology.connect(err => { expect(err).to.not.exist; - topology.selectServer('primary', (err, server) => { + topology.selectServer('primary', {}, (err, server) => { expect(err).to.not.exist; let serverDescription; @@ -245,7 +245,7 @@ describe('Topology (unit)', function () { topology.connect(err => { expect(err).to.not.exist; - topology.selectServer('primary', (err, server) => { + topology.selectServer('primary', {}, (err, server) => { expect(err).to.not.exist; let serverDescription; @@ -281,7 +281,7 @@ describe('Topology (unit)', function () { topology.connect(err => { expect(err).to.not.exist; - topology.selectServer('primary', (err, server) => { + topology.selectServer('primary', {}, (err, server) => { expect(err).to.not.exist; let serverDescription; @@ -472,17 +472,22 @@ describe('Topology (unit)', function () { }); topology.connect(() => { - topology.selectServer(ReadPreference.secondary, { serverSelectionTimeoutMS: 1000 }, err => { - expect(err).to.exist; - expect(err).to.match(/Server selection timed out/); - expect(err).to.have.property('reason'); + topology.selectServer( + ReadPreference.secondary, + {}, + { serverSelectionTimeoutMS: 1000 }, + err => { + expect(err).to.exist; + expect(err).to.match(/Server selection timed out/); + expect(err).to.have.property('reason'); - // When server is created `connect` is called on the monitor. When server selection - // occurs `requestCheck` will be called for an immediate check. - expect(requestCheck).property('callCount').to.equal(1); + // When server is created `connect` is called on the monitor. When server selection + // occurs `requestCheck` will be called for an immediate check. + expect(requestCheck).property('callCount').to.equal(1); - topology.close(done); - }); + topology.close(done); + } + ); }); }); @@ -494,11 +499,16 @@ describe('Topology (unit)', function () { }); topology.close(() => { - topology.selectServer(ReadPreference.primary, { serverSelectionTimeoutMS: 2000 }, err => { - expect(err).to.exist; - expect(err).to.match(/Topology is closed/); - done(); - }); + topology.selectServer( + ReadPreference.primary, + {}, + { serverSelectionTimeoutMS: 2000 }, + err => { + expect(err).to.exist; + expect(err).to.match(/Topology is closed/); + done(); + } + ); }); }); @@ -550,11 +560,11 @@ describe('Topology (unit)', function () { preventSelection = true; for (let i = 0; i < toSelect - 1; ++i) { - topology.selectServer(i % 5 === 0 ? failingSelector : anySelector, finish); + topology.selectServer(i % 5 === 0 ? failingSelector : anySelector, {}, finish); } preventSelection = false; - topology.selectServer(anySelector, finish); + topology.selectServer(anySelector, {}, finish); }); }); }); From 3cdebcbcec0ba0da7b932c90df10a28438ae5a19 Mon Sep 17 00:00:00 2001 From: Neal Beeken Date: Fri, 11 Mar 2022 14:31:15 -0500 Subject: [PATCH 10/10] fix: unit --- test/unit/sdam/topology.test.js | 38 ++++++++++++--------------------- 1 file changed, 14 insertions(+), 24 deletions(-) diff --git a/test/unit/sdam/topology.test.js b/test/unit/sdam/topology.test.js index 7d9d28d6c1..1f690c5ec7 100644 --- a/test/unit/sdam/topology.test.js +++ b/test/unit/sdam/topology.test.js @@ -472,22 +472,17 @@ describe('Topology (unit)', function () { }); topology.connect(() => { - topology.selectServer( - ReadPreference.secondary, - {}, - { serverSelectionTimeoutMS: 1000 }, - err => { - expect(err).to.exist; - expect(err).to.match(/Server selection timed out/); - expect(err).to.have.property('reason'); + topology.selectServer(ReadPreference.secondary, { serverSelectionTimeoutMS: 1000 }, err => { + expect(err).to.exist; + expect(err).to.match(/Server selection timed out/); + expect(err).to.have.property('reason'); - // When server is created `connect` is called on the monitor. When server selection - // occurs `requestCheck` will be called for an immediate check. - expect(requestCheck).property('callCount').to.equal(1); + // When server is created `connect` is called on the monitor. When server selection + // occurs `requestCheck` will be called for an immediate check. + expect(requestCheck).property('callCount').to.equal(1); - topology.close(done); - } - ); + topology.close(done); + }); }); }); @@ -499,16 +494,11 @@ describe('Topology (unit)', function () { }); topology.close(() => { - topology.selectServer( - ReadPreference.primary, - {}, - { serverSelectionTimeoutMS: 2000 }, - err => { - expect(err).to.exist; - expect(err).to.match(/Topology is closed/); - done(); - } - ); + topology.selectServer(ReadPreference.primary, { serverSelectionTimeoutMS: 2000 }, err => { + expect(err).to.exist; + expect(err).to.match(/Topology is closed/); + done(); + }); }); });