diff --git a/.eslintrc.json b/.eslintrc.json
index bc8863df6d..3eb33dd8b9 100644
--- a/.eslintrc.json
+++ b/.eslintrc.json
@@ -66,6 +66,10 @@
{
"name": ".",
"message": "Please import directly from the relevant file instead."
+ },
+ {
+ "name": "..",
+ "message": "Please import directly from the relevant file instead."
}
]
}
@@ -158,7 +162,8 @@
"parser": "@typescript-eslint/parser",
"rules": {
"no-console": "off",
- "no-restricted-syntax": "off"
+ "no-restricted-syntax": "off",
+ "typescript-eslint/ban-ts-comment": "off"
}
},
{
diff --git a/etc/charts/README.md b/etc/charts/README.md
deleted file mode 100644
index c2086d8c71..0000000000
--- a/etc/charts/README.md
+++ /dev/null
@@ -1,11 +0,0 @@
-# Charts and mermaid code
-
-The `mermaid` directory contains the [mermaid](https://mermaid-js.github.io/mermaid/#/) files which serve as the source code for the svg files included in the `../errors.md`
-
-To generate these files, there is an included script, `build_images.sh` which builds images for all the mermaid files in the `mermaid` directory.
-
-To use this script, the [mermaid cli](https://github.com/mermaid-js/mermaid-cli) must be installed and be accessible via your $PATH variable.
-
-**Note on mermaid installation**
-
-It is preferable to install mermaid via npm rather than brew since brew will install node as a dependency which could interfere with nvm.
diff --git a/etc/charts/build_images.sh b/etc/charts/build_images.sh
deleted file mode 100755
index 4a9a34f7dc..0000000000
--- a/etc/charts/build_images.sh
+++ /dev/null
@@ -1,13 +0,0 @@
-#! /bin/bash
-
-echo "Building svgs..."
-cd mermaid
-for f in *.mmd
-do
- echo "Processing $f"
- outname="${f%%.*}"
- mmdc -i $f -o ../imgs/$outname.svg &
-done
-wait
-
-echo "Done"
diff --git a/etc/charts/imgs/MongoError.svg b/etc/charts/imgs/MongoError.svg
deleted file mode 100644
index 13d86e58c0..0000000000
--- a/etc/charts/imgs/MongoError.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/etc/charts/mermaid/MongoError.mmd b/etc/charts/mermaid/MongoError.mmd
deleted file mode 100644
index 0592b5c564..0000000000
--- a/etc/charts/mermaid/MongoError.mmd
+++ /dev/null
@@ -1,22 +0,0 @@
-graph TD
- MongoError --- MongoDriverError
- MongoError --- MongoNetworkError
- MongoError --- MongoServerError
- MongoError --- MongoSystemError
- MongoDriverError --- MongoAPIError
- MongoDriverError --- MongoRuntimeError
-
-linkStyle 0 stroke:#116149
-linkStyle 1 stroke:#116149
-linkStyle 2 stroke:#116149
-linkStyle 3 stroke:#116149
-linkStyle 4 stroke:#116149
-linkStyle 5 stroke:#116149
-
-style MongoError fill:#13aa52,stroke:#21313c,color:#FAFBFC
-style MongoSystemError fill:#13aa52,stroke:#21313c,color:#FAFBFC
-style MongoNetworkError fill:#13aa52,stroke:#21313c,color:#FAFBFC
-style MongoServerError fill:#13aa52,stroke:#21313c,color:#FAFBFC
-style MongoDriverError fill:#13aa52,stroke:#21313c,color:#FAFBFC
-style MongoAPIError fill:#13aa52,stroke:#21313c,color:#FAFBFC
-style MongoRuntimeError fill:#13aa52,stroke:#21313c,color:#FAFBFC
diff --git a/etc/notes/errors.md b/etc/notes/errors.md
index 6feee91f72..03c47e7373 100644
--- a/etc/notes/errors.md
+++ b/etc/notes/errors.md
@@ -14,6 +14,7 @@
- [`MongoDriverError`](#MongoDriverError)
- [`MongoAPIError`](#MongoAPIError)
- [`MongoRuntimeError`](#MongoRuntimeError)
+ - [`MongoUnexpectedServerResponseError`](#MongoUnexpectedServerResponseError)
- [`MongoNetworkError`](#MongoNetworkError)
- [`MongoServerError`](#MongoServerError)
- [`MongoSystemError`](#MongoSystemError)
@@ -38,7 +39,31 @@ There are four main error classes which stem from `MongoError`: `MongoDriverErro
The base class from which all errors in the Node driver subclass.
`MongoError` should **never** be be directly instantiated.
-![(MongoError hierarchy tree)](../charts/imgs/MongoError.svg)
+
+```mermaid
+graph TD
+ MongoError --- MongoDriverError
+ MongoError --- MongoNetworkError
+ MongoError --- MongoServerError
+ MongoError --- MongoSystemError
+ MongoDriverError --- MongoAPIError
+ MongoDriverError --- MongoRuntimeError
+
+linkStyle 0 stroke:#116149
+linkStyle 1 stroke:#116149
+linkStyle 2 stroke:#116149
+linkStyle 3 stroke:#116149
+linkStyle 4 stroke:#116149
+linkStyle 5 stroke:#116149
+
+style MongoError fill:#13aa52,stroke:#21313c,color:#FAFBFC
+style MongoSystemError fill:#13aa52,stroke:#21313c,color:#FAFBFC
+style MongoNetworkError fill:#13aa52,stroke:#21313c,color:#FAFBFC
+style MongoServerError fill:#13aa52,stroke:#21313c,color:#FAFBFC
+style MongoDriverError fill:#13aa52,stroke:#21313c,color:#FAFBFC
+style MongoAPIError fill:#13aa52,stroke:#21313c,color:#FAFBFC
+style MongoRuntimeError fill:#13aa52,stroke:#21313c,color:#FAFBFC
+```
Children of `MongoError` include:
@@ -90,6 +115,15 @@ This class should **never** be directly instantiated.
| **MongoChangeStreamError** | Thrown when an error is encountered when operating on a ChangeStream. |
| **MongoGridFSStreamError** | Thrown when an unexpected state is reached when operating on a GridFS Stream. |
| **MongoGridFSChunkError** | Thrown when a malformed or invalid chunk is encountered when reading from a GridFS Stream. |
+| **MongoUnexpectedServerResponseError** | Thrown when the driver receives a **parsable** response it did not expect from the server. |
+
+### MongoUnexpectedServerResponseError
+
+Intended for the scenario where the MongoDB returns an unexpected response in relation to some state the driver is in.
+This error should **NOT** represent a response that couldn't be parsed due to errors in protocol formatting.
+
+Ex. Server selection results in a feature detection change: this is not usually an unexpected response, but if while retrying an operation serverSelection returns a server with a lower wireVersion than expected, we can no longer proceed with the retry, so the response is unexpected in that case.
+
### `MongoNetworkError`
diff --git a/global.d.ts b/global.d.ts
index 3f7f03eb62..145acdf283 100644
--- a/global.d.ts
+++ b/global.d.ts
@@ -1,32 +1,40 @@
import { OneOrMore } from './src/mongo_types';
import type { TestConfiguration } from './test/tools/runner/config';
-type WithExclusion = `!${T}`
+type WithExclusion = `!${T}`;
/** Defined in test/tools/runner/filters/mongodb_topology_filter.js (topologyTypeToString) */
type TopologyTypes = 'single' | 'replicaset' | 'sharded' | 'load-balanced';
-type TopologyTypeRequirement = OneOrMore | OneOrMore>
+type TopologyTypeRequirement = OneOrMore | OneOrMore>;
declare global {
-interface MongoDBMetadataUI {
- requires?: {
- topology?: TopologyTypeRequirement;
- mongodb?: string;
- os?: NodeJS.Platform | `!${NodeJS.Platform}`;
- apiVersion?: '1';
- clientSideEncryption?: boolean;
- serverless?: 'forbid' | 'allow' | 'require';
- auth: 'enabled' | 'disabled'
- };
+ interface MongoDBMetadataUI {
+ requires?: {
+ topology?: TopologyTypeRequirement;
+ mongodb?: string;
+ os?: NodeJS.Platform | `!${NodeJS.Platform}`;
+ apiVersion?: '1';
+ clientSideEncryption?: boolean;
+ serverless?: 'forbid' | 'allow' | 'require';
+ auth: 'enabled' | 'disabled';
+ };
- sessions?: {
- skipLeakTests?: boolean;
- };
-}
+ sessions?: {
+ skipLeakTests?: boolean;
+ };
+ }
+
+ interface MetadataAndTest {
+ metadata: MongoDBMetadataUI;
+ test: Fn;
+ }
+
+ namespace Chai {
+ interface Assertion {
+ /** @deprecated Used only by the legacy spec runner, the unified runner implements the unified spec expectations */
+ matchMongoSpec: (anything: any) => Chai.Assertion;
+ }
+ }
-interface MetadataAndTest {
- metadata: MongoDBMetadataUI;
- test: Fn;
-}
namespace Mocha {
interface TestFunction {
(title: string, metadata: MongoDBMetadataUI, fn: Mocha.Func): Mocha.Test;
@@ -42,6 +50,8 @@ interface MetadataAndTest {
interface Test {
metadata: MongoDBMetadataUI;
+
+ spec: Record;
}
interface Runnable {
diff --git a/src/bulk/common.ts b/src/bulk/common.ts
index 3ca0ece0be..1fed3680ad 100644
--- a/src/bulk/common.ts
+++ b/src/bulk/common.ts
@@ -1,4 +1,3 @@
-import type { ClientSession, Server } from '..';
import {
BSONSerializeOptions,
Document,
@@ -24,7 +23,9 @@ import { InsertOperation } from '../operations/insert';
import { AbstractOperation, Hint } from '../operations/operation';
import { makeUpdateStatement, UpdateOperation, UpdateStatement } from '../operations/update';
import { PromiseProvider } from '../promise_provider';
+import type { Server } from '../sdam/server';
import type { Topology } from '../sdam/topology';
+import type { ClientSession } from '../sessions';
import {
applyRetryableWrites,
Callback,
@@ -935,7 +936,7 @@ class BulkWriteShimOperation extends AbstractOperation {
this.bulkOperation = bulkOperation;
}
- execute(server: Server, session: ClientSession, callback: Callback): void {
+ execute(server: Server, session: ClientSession | undefined, callback: Callback): void {
if (this.options.session == null) {
// An implicit session could have been created by 'executeOperation'
// So if we stick it on finalOptions here, each bulk operation
@@ -1289,10 +1290,7 @@ export abstract class BulkOperationBase {
* Handles the write error before executing commands
* @internal
*/
- handleWriteError(
- callback: Callback,
- writeResult: BulkWriteResult
- ): boolean | undefined {
+ handleWriteError(callback: Callback, writeResult: BulkWriteResult): boolean {
if (this.s.bulkResult.writeErrors.length > 0) {
const msg = this.s.bulkResult.writeErrors[0].errmsg
? this.s.bulkResult.writeErrors[0].errmsg
@@ -1317,7 +1315,8 @@ export abstract class BulkOperationBase {
callback(new MongoBulkWriteError(writeConcernError, writeResult));
return true;
}
- return;
+
+ return false;
}
abstract addToOperationsList(
diff --git a/src/bulk/unordered.ts b/src/bulk/unordered.ts
index f808ccc06c..b4227e8d03 100644
--- a/src/bulk/unordered.ts
+++ b/src/bulk/unordered.ts
@@ -13,7 +13,7 @@ export class UnorderedBulkOperation extends BulkOperationBase {
super(collection, options, false);
}
- override handleWriteError(callback: Callback, writeResult: BulkWriteResult): boolean | undefined {
+ override handleWriteError(callback: Callback, writeResult: BulkWriteResult): boolean {
if (this.s.batches.length) {
return false;
}
diff --git a/src/cmap/connect.ts b/src/cmap/connect.ts
index 7b007b5fa2..0520c5d07e 100644
--- a/src/cmap/connect.ts
+++ b/src/cmap/connect.ts
@@ -16,14 +16,7 @@ import {
MongoRuntimeError,
MongoServerError
} from '../error';
-import {
- Callback,
- CallbackWithType,
- ClientMetadata,
- HostAddress,
- makeClientMetadata,
- ns
-} from '../utils';
+import { Callback, ClientMetadata, HostAddress, makeClientMetadata, ns } from '../utils';
import { AuthContext, AuthProvider } from './auth/auth_provider';
import { GSSAPI } from './auth/gssapi';
import { MongoCR } from './auth/mongocr';
@@ -336,10 +329,7 @@ const SOCKET_ERROR_EVENT_LIST = ['error', 'close', 'timeout', 'parseError'] as c
type ErrorHandlerEventName = typeof SOCKET_ERROR_EVENT_LIST[number] | 'cancel';
const SOCKET_ERROR_EVENTS = new Set(SOCKET_ERROR_EVENT_LIST);
-function makeConnection(
- options: MakeConnectionOptions,
- _callback: CallbackWithType
-) {
+function makeConnection(options: MakeConnectionOptions, _callback: Callback) {
const useTLS = options.tls ?? false;
const keepAlive = options.keepAlive ?? true;
const socketTimeoutMS = options.socketTimeoutMS ?? Reflect.get(options, 'socketTimeout') ?? 0;
diff --git a/src/cmap/connection.ts b/src/cmap/connection.ts
index 5e0ecb7968..e5665e9116 100644
--- a/src/cmap/connection.ts
+++ b/src/cmap/connection.ts
@@ -33,7 +33,7 @@ import {
now,
uuidV4
} from '../utils';
-import type { W, WriteConcern, WriteConcernOptions } from '../write_concern';
+import type { WriteConcern } from '../write_concern';
import type { MongoCredentials } from './auth/mongo_credentials';
import {
CommandFailedEvent,
@@ -109,11 +109,14 @@ export interface CommandOptions extends BSONSerializeOptions {
noResponse?: boolean;
omitReadPreference?: boolean;
- // FIXME: NODE-2802
- willRetryWrite?: boolean;
+ // TODO(NODE-2802): Currently the CommandOptions take a property willRetryWrite which is a hint
+ // from executeOperation that the txnNum should be applied to this command.
+ // Applying a session to a command should happen as part of command construction,
+ // most likely in the CommandOperation#executeCommand method, where we have access to
+ // the details we need to determine if a txnNum should also be applied.
+ willRetryWrite?: true;
- // FIXME: NODE-2781
- writeConcern?: WriteConcernOptions | WriteConcern | W;
+ writeConcern?: WriteConcern;
}
/** @internal */
diff --git a/src/cursor/abstract_cursor.ts b/src/cursor/abstract_cursor.ts
index c4969abc27..ea8e769495 100644
--- a/src/cursor/abstract_cursor.ts
+++ b/src/cursor/abstract_cursor.ts
@@ -655,7 +655,7 @@ function next(cursor: AbstractCursor, blocking: boolean, callback: Callback {
+ return cursor[kTopology].selectServer(ReadPreference.primaryPreferred, {}, err => {
if (err) return callback(err);
return next(cursor, blocking, callback);
});
diff --git a/src/error.ts b/src/error.ts
index af29237c31..30ddd47d0a 100644
--- a/src/error.ts
+++ b/src/error.ts
@@ -13,21 +13,24 @@ const kErrorLabels = Symbol('errorLabels');
* The legacy error message from the server that indicates the node is not a writable primary
* https://github.com/mongodb/specifications/blob/b07c26dc40d04ac20349f989db531c9845fdd755/source/server-discovery-and-monitoring/server-discovery-and-monitoring.rst#not-writable-primary-and-node-is-recovering
*/
-export const LEGACY_NOT_WRITABLE_PRIMARY_ERROR_MESSAGE = 'not master';
+export const LEGACY_NOT_WRITABLE_PRIMARY_ERROR_MESSAGE = new RegExp('not master', 'i');
/**
* @internal
* The legacy error message from the server that indicates the node is not a primary or secondary
* https://github.com/mongodb/specifications/blob/b07c26dc40d04ac20349f989db531c9845fdd755/source/server-discovery-and-monitoring/server-discovery-and-monitoring.rst#not-writable-primary-and-node-is-recovering
*/
-export const LEGACY_NOT_PRIMARY_OR_SECONDARY_ERROR_MESSAGE = 'not master or secondary';
+export const LEGACY_NOT_PRIMARY_OR_SECONDARY_ERROR_MESSAGE = new RegExp(
+ 'not master or secondary',
+ 'i'
+);
/**
* @internal
* The error message from the server that indicates the node is recovering
* https://github.com/mongodb/specifications/blob/b07c26dc40d04ac20349f989db531c9845fdd755/source/server-discovery-and-monitoring/server-discovery-and-monitoring.rst#not-writable-primary-and-node-is-recovering
*/
-export const NODE_IS_RECOVERING_ERROR_MESSAGE = 'node is recovering';
+export const NODE_IS_RECOVERING_ERROR_MESSAGE = new RegExp('node is recovering', 'i');
/** @internal MongoDB Error Codes */
export const MONGODB_ERROR_CODES = Object.freeze({
@@ -80,6 +83,17 @@ export const GET_MORE_RESUMABLE_CODES = new Set([
MONGODB_ERROR_CODES.CursorNotFound
]);
+/** @public */
+export const MongoErrorLabel = Object.freeze({
+ RetryableWriteError: 'RetryableWriteError',
+ TransientTransactionError: 'TransientTransactionError',
+ UnknownTransactionCommitResult: 'UnknownTransactionCommitResult',
+ ResumableChangeStreamError: 'ResumableChangeStreamError'
+} as const);
+
+/** @public */
+export type MongoErrorLabel = typeof MongoErrorLabel[keyof typeof MongoErrorLabel];
+
/** @public */
export interface ErrorDescription extends Document {
message?: string;
@@ -238,7 +252,7 @@ export class MongoRuntimeError extends MongoDriverError {
}
/**
- * An error generated when a batch command is reexecuted after one of the commands in the batch
+ * An error generated when a batch command is re-executed after one of the commands in the batch
* has failed
*
* @public
@@ -403,6 +417,32 @@ export class MongoGridFSChunkError extends MongoRuntimeError {
}
}
+/**
+ * An error generated when a **parsable** unexpected response comes from the server.
+ * This is generally an error where the driver in a state expecting a certain behavior to occur in
+ * the next message from MongoDB but it receives something else.
+ * This error **does not** represent an issue with wire message formatting.
+ *
+ * #### Example
+ * When an operation fails, it is the driver's job to retry it. It must perform serverSelection
+ * again to make sure that it attempts the operation against a server in a good state. If server
+ * selection returns a server that does not support retryable operations, this error is used.
+ * This scenario is unlikely as retryable support would also have been determined on the first attempt
+ * but it is possible the state change could report a selectable server that does not support retries.
+ *
+ * @public
+ * @category Error
+ */
+export class MongoUnexpectedServerResponseError extends MongoRuntimeError {
+ constructor(message: string) {
+ super(message);
+ }
+
+ override get name(): string {
+ return 'MongoUnexpectedServerResponseError';
+ }
+}
+
/**
* An error thrown when the user attempts to add options to a cursor that has already been
* initialized
@@ -689,8 +729,8 @@ export class MongoWriteConcernError extends MongoServerError {
}
}
-// see: https://github.com/mongodb/specifications/blob/master/source/retryable-writes/retryable-writes.rst#terms
-const RETRYABLE_ERROR_CODES = new Set([
+// https://github.com/mongodb/specifications/blob/master/source/retryable-reads/retryable-reads.rst#retryable-error
+const RETRYABLE_READ_ERROR_CODES = new Set([
MONGODB_ERROR_CODES.HostUnreachable,
MONGODB_ERROR_CODES.HostNotFound,
MONGODB_ERROR_CODES.NetworkTimeout,
@@ -704,41 +744,74 @@ const RETRYABLE_ERROR_CODES = new Set([
MONGODB_ERROR_CODES.NotPrimaryOrSecondary
]);
+// see: https://github.com/mongodb/specifications/blob/master/source/retryable-writes/retryable-writes.rst#terms
const RETRYABLE_WRITE_ERROR_CODES = new Set([
- MONGODB_ERROR_CODES.InterruptedAtShutdown,
- MONGODB_ERROR_CODES.InterruptedDueToReplStateChange,
- MONGODB_ERROR_CODES.NotWritablePrimary,
- MONGODB_ERROR_CODES.NotPrimaryNoSecondaryOk,
- MONGODB_ERROR_CODES.NotPrimaryOrSecondary,
- MONGODB_ERROR_CODES.PrimarySteppedDown,
- MONGODB_ERROR_CODES.ShutdownInProgress,
- MONGODB_ERROR_CODES.HostNotFound,
- MONGODB_ERROR_CODES.HostUnreachable,
- MONGODB_ERROR_CODES.NetworkTimeout,
- MONGODB_ERROR_CODES.SocketException,
+ ...RETRYABLE_READ_ERROR_CODES,
MONGODB_ERROR_CODES.ExceededTimeLimit
]);
-export function isRetryableEndTransactionError(error: MongoError): boolean {
- return error.hasErrorLabel('RetryableWriteError');
-}
+export function needsRetryableWriteLabel(error: Error, maxWireVersion: number): boolean {
+ if (maxWireVersion >= 9) {
+ // 4.4+ servers attach their own retryable write error
+ return false;
+ }
+ // pre-4.4 server, then the driver adds an error label for every valid case
+ // execute operation will only inspect the label, code/message logic is handled here
+
+ if (error instanceof MongoNetworkError) {
+ return true;
+ }
+
+ if (error instanceof MongoError && error.hasErrorLabel(MongoErrorLabel.RetryableWriteError)) {
+ // Before 4.4 the error label can be one way of identifying retry
+ // so we can return true if we have the label, but fall back to code checking below
+ return true;
+ }
-export function isRetryableWriteError(error: MongoError): boolean {
if (error instanceof MongoWriteConcernError) {
return RETRYABLE_WRITE_ERROR_CODES.has(error.result?.code ?? error.code ?? 0);
}
- return typeof error.code === 'number' && RETRYABLE_WRITE_ERROR_CODES.has(error.code);
+
+ if (error instanceof MongoError && typeof error.code === 'number') {
+ return RETRYABLE_WRITE_ERROR_CODES.has(error.code);
+ }
+
+ const isNotWritablePrimaryError = LEGACY_NOT_WRITABLE_PRIMARY_ERROR_MESSAGE.test(error.message);
+ if (isNotWritablePrimaryError) {
+ return true;
+ }
+
+ const isNodeIsRecoveringError = NODE_IS_RECOVERING_ERROR_MESSAGE.test(error.message);
+ if (isNodeIsRecoveringError) {
+ return true;
+ }
+
+ return false;
}
/** Determines whether an error is something the driver should attempt to retry */
-export function isRetryableError(error: MongoError): boolean {
- return (
- // eslint-disable-next-line @typescript-eslint/no-non-null-assertion
- (typeof error.code === 'number' && RETRYABLE_ERROR_CODES.has(error.code!)) ||
- error instanceof MongoNetworkError ||
- !!error.message.match(new RegExp(LEGACY_NOT_WRITABLE_PRIMARY_ERROR_MESSAGE)) ||
- !!error.message.match(new RegExp(NODE_IS_RECOVERING_ERROR_MESSAGE))
- );
+export function isRetryableReadError(error: MongoError): boolean {
+ const hasRetryableErrorCode =
+ typeof error.code === 'number' ? RETRYABLE_READ_ERROR_CODES.has(error.code) : false;
+ if (hasRetryableErrorCode) {
+ return true;
+ }
+
+ if (error instanceof MongoNetworkError) {
+ return true;
+ }
+
+ const isNotWritablePrimaryError = LEGACY_NOT_WRITABLE_PRIMARY_ERROR_MESSAGE.test(error.message);
+ if (isNotWritablePrimaryError) {
+ return true;
+ }
+
+ const isNodeIsRecoveringError = NODE_IS_RECOVERING_ERROR_MESSAGE.test(error.message);
+ if (isNodeIsRecoveringError) {
+ return true;
+ }
+
+ return false;
}
const SDAM_RECOVERING_CODES = new Set([
@@ -749,7 +822,7 @@ const SDAM_RECOVERING_CODES = new Set([
MONGODB_ERROR_CODES.NotPrimaryOrSecondary
]);
-const SDAM_NOTPRIMARY_CODES = new Set([
+const SDAM_NOT_PRIMARY_CODES = new Set([
MONGODB_ERROR_CODES.NotWritablePrimary,
MONGODB_ERROR_CODES.NotPrimaryNoSecondaryOk,
MONGODB_ERROR_CODES.LegacyNotPrimary
@@ -767,22 +840,22 @@ function isRecoveringError(err: MongoError) {
}
return (
- new RegExp(LEGACY_NOT_PRIMARY_OR_SECONDARY_ERROR_MESSAGE).test(err.message) ||
- new RegExp(NODE_IS_RECOVERING_ERROR_MESSAGE).test(err.message)
+ LEGACY_NOT_PRIMARY_OR_SECONDARY_ERROR_MESSAGE.test(err.message) ||
+ NODE_IS_RECOVERING_ERROR_MESSAGE.test(err.message)
);
}
function isNotWritablePrimaryError(err: MongoError) {
if (typeof err.code === 'number') {
// If any error code exists, we ignore the error.message
- return SDAM_NOTPRIMARY_CODES.has(err.code);
+ return SDAM_NOT_PRIMARY_CODES.has(err.code);
}
if (isRecoveringError(err)) {
return false;
}
- return new RegExp(LEGACY_NOT_WRITABLE_PRIMARY_ERROR_MESSAGE).test(err.message);
+ return LEGACY_NOT_WRITABLE_PRIMARY_ERROR_MESSAGE.test(err.message);
}
export function isNodeShuttingDownError(err: MongoError): boolean {
@@ -829,10 +902,12 @@ export function isResumableError(error?: MongoError, wireVersion?: number): bool
if (wireVersion != null && wireVersion >= 9) {
// DRIVERS-1308: For 4.4 drivers running against 4.4 servers, drivers will add a special case to treat the CursorNotFound error code as resumable
- if (error && error instanceof MongoError && error.code === 43) {
+ if (error && error instanceof MongoError && error.code === MONGODB_ERROR_CODES.CursorNotFound) {
return true;
}
- return error instanceof MongoError && error.hasErrorLabel('ResumableChangeStreamError');
+ return (
+ error instanceof MongoError && error.hasErrorLabel(MongoErrorLabel.ResumableChangeStreamError)
+ );
}
if (error && typeof error.code === 'number') {
diff --git a/src/index.ts b/src/index.ts
index 0502109519..da8cc2614d 100644
--- a/src/index.ts
+++ b/src/index.ts
@@ -65,6 +65,7 @@ export {
MongoTailableCursorError,
MongoTopologyClosedError,
MongoTransactionError,
+ MongoUnexpectedServerResponseError,
MongoWriteConcernError
} from './error';
export {
@@ -92,6 +93,7 @@ export { AuthMechanism } from './cmap/auth/providers';
export { Compressor } from './cmap/wire_protocol/compression';
export { CURSOR_FLAGS } from './cursor/abstract_cursor';
export { AutoEncryptionLoggerLevel } from './deps';
+export { MongoErrorLabel } from './error';
export { ExplainVerbosity } from './explain';
export { LoggerLevel } from './logger';
export { ServerApiVersion } from './mongo_client';
diff --git a/src/operations/execute_operation.ts b/src/operations/execute_operation.ts
index b273c5c0fa..c1750726fc 100644
--- a/src/operations/execute_operation.ts
+++ b/src/operations/execute_operation.ts
@@ -1,14 +1,16 @@
import type { Document } from '../bson';
import {
- isRetryableError,
+ isRetryableReadError,
MongoCompatibilityError,
MONGODB_ERROR_CODES,
MongoError,
+ MongoErrorLabel,
MongoExpiredSessionError,
MongoNetworkError,
MongoRuntimeError,
MongoServerError,
- MongoTransactionError
+ MongoTransactionError,
+ MongoUnexpectedServerResponseError
} from '../error';
import { ReadPreference } from '../read_preference';
import type { Server } from '../sdam/server';
@@ -19,7 +21,7 @@ import {
} from '../sdam/server_selection';
import type { Topology } from '../sdam/topology';
import type { ClientSession } from '../sessions';
-import { Callback, maxWireVersion, maybePromise, supportsRetryableWrites } from '../utils';
+import { Callback, maybePromise, supportsRetryableWrites } from '../utils';
import { AbstractOperation, Aspect } from './operation';
const MMAPv1_RETRY_WRITES_ERROR_CODE = MONGODB_ERROR_CODES.IllegalOperation;
@@ -72,85 +74,73 @@ export function executeOperation<
TResult = ResultTypeFromOperation
>(topology: Topology, operation: T, callback?: Callback): Promise | void {
if (!(operation instanceof AbstractOperation)) {
- // TODO(NODE-3483)
+ // TODO(NODE-3483): Extend MongoRuntimeError
throw new MongoRuntimeError('This method requires a valid operation instance');
}
- return maybePromise(callback, cb => {
+ return maybePromise(callback, callback => {
if (topology.shouldCheckForSessionSupport()) {
- return topology.selectServer(ReadPreference.primaryPreferred, err => {
- if (err) return cb(err);
+ return topology.selectServer(ReadPreference.primaryPreferred, {}, err => {
+ if (err) return callback(err);
- executeOperation(topology, operation, cb);
+ executeOperation(topology, operation, callback);
});
}
// The driver sessions spec mandates that we implicitly create sessions for operations
// that are not explicitly provided with a session.
- let session: ClientSession | undefined = operation.session;
+ let session = operation.session;
let owner: symbol | undefined;
if (topology.hasSessionSupport()) {
if (session == null) {
owner = Symbol();
session = topology.startSession({ owner, explicit: false });
} else if (session.hasEnded) {
- return cb(new MongoExpiredSessionError('Use of expired sessions is not permitted'));
+ return callback(new MongoExpiredSessionError('Use of expired sessions is not permitted'));
} else if (session.snapshotEnabled && !topology.capabilities.supportsSnapshotReads) {
- return cb(new MongoCompatibilityError('Snapshot reads require MongoDB 5.0 or later'));
+ return callback(new MongoCompatibilityError('Snapshot reads require MongoDB 5.0 or later'));
}
} else if (session) {
// If the user passed an explicit session and we are still, after server selection,
// trying to run against a topology that doesn't support sessions we error out.
- return cb(new MongoCompatibilityError('Current topology does not support sessions'));
+ return callback(new MongoCompatibilityError('Current topology does not support sessions'));
}
try {
- executeWithServerSelection(topology, session, operation, (err, result) => {
- if (session && session.owner && session.owner === owner) {
- return session.endSession(err2 => cb(err2 || err, result));
+ executeWithServerSelection(topology, session, operation, (error, result) => {
+ if (session?.owner != null && session.owner === owner) {
+ return session.endSession(endSessionError => callback(endSessionError ?? error, result));
}
- cb(err, result);
+ callback(error, result);
});
- } catch (e) {
- if (session && session.owner && session.owner === owner) {
+ } catch (error) {
+ if (session?.owner != null && session.owner === owner) {
session.endSession();
}
-
- throw e;
+ throw error;
}
});
}
-function supportsRetryableReads(server: Server) {
- return maxWireVersion(server) >= 6;
-}
-
-function executeWithServerSelection(
+function executeWithServerSelection(
topology: Topology,
session: ClientSession | undefined,
operation: AbstractOperation,
- callback: Callback
+ callback: Callback
) {
- const readPreference = operation.readPreference || ReadPreference.primary;
- const inTransaction = session && session.inTransaction();
+ const readPreference = operation.readPreference ?? ReadPreference.primary;
+ const inTransaction = !!session?.inTransaction();
if (inTransaction && !readPreference.equals(ReadPreference.primary)) {
- callback(
+ return callback(
new MongoTransactionError(
`Read preference in a transaction must be primary, not: ${readPreference.mode}`
)
);
-
- return;
}
- if (
- session &&
- session.isPinned &&
- session.transaction.isCommitted &&
- !operation.bypassPinningCheck
- ) {
+ if (session?.isPinned && session.transaction.isCommitted && !operation.bypassPinningCheck) {
session.unpin();
}
@@ -170,60 +160,54 @@ function executeWithServerSelection(
}
const serverSelectionOptions = { session };
- function callbackWithRetry(err?: any, result?: any) {
- if (err == null) {
- return callback(undefined, result);
- }
+ function retryOperation(originalError: MongoError) {
+ const isWriteOperation = operation.hasAspect(Aspect.WRITE_OPERATION);
+ const isReadOperation = operation.hasAspect(Aspect.READ_OPERATION);
- const hasReadAspect = operation.hasAspect(Aspect.READ_OPERATION);
- const hasWriteAspect = operation.hasAspect(Aspect.WRITE_OPERATION);
- const itShouldRetryWrite = shouldRetryWrite(err);
-
- if ((hasReadAspect && !isRetryableError(err)) || (hasWriteAspect && !itShouldRetryWrite)) {
- return callback(err);
- }
-
- if (
- hasWriteAspect &&
- itShouldRetryWrite &&
- err.code === MMAPv1_RETRY_WRITES_ERROR_CODE &&
- err.errmsg.match(/Transaction numbers/)
- ) {
- callback(
+ if (isWriteOperation && originalError.code === MMAPv1_RETRY_WRITES_ERROR_CODE) {
+ return callback(
new MongoServerError({
message: MMAPv1_RETRY_WRITES_ERROR_MESSAGE,
errmsg: MMAPv1_RETRY_WRITES_ERROR_MESSAGE,
- originalError: err
+ originalError
})
);
+ }
- return;
+ if (isWriteOperation && !originalError.hasErrorLabel(MongoErrorLabel.RetryableWriteError)) {
+ return callback(originalError);
}
- // select a new server, and attempt to retry the operation
- topology.selectServer(selector, serverSelectionOptions, (e?: any, server?: any) => {
- if (
- e ||
- (operation.hasAspect(Aspect.READ_OPERATION) && !supportsRetryableReads(server)) ||
- (operation.hasAspect(Aspect.WRITE_OPERATION) && !supportsRetryableWrites(server))
- ) {
- callback(e);
- return;
- }
+ if (isReadOperation && !isRetryableReadError(originalError)) {
+ return callback(originalError);
+ }
+ if (
+ originalError instanceof MongoNetworkError &&
+ session?.isPinned &&
+ !session.inTransaction() &&
+ operation.hasAspect(Aspect.CURSOR_CREATING)
+ ) {
// If we have a cursor and the initial command fails with a network error,
// we can retry it on another connection. So we need to check it back in, clear the
// pool for the service id, and retry again.
- if (
- err &&
- err instanceof MongoNetworkError &&
- server.loadBalanced &&
- session &&
- session.isPinned &&
- !session.inTransaction() &&
- operation.hasAspect(Aspect.CURSOR_CREATING)
- ) {
- session.unpin({ force: true, forceClear: true });
+ session.unpin({ force: true, forceClear: true });
+ }
+
+ // select a new server, and attempt to retry the operation
+ topology.selectServer(selector, serverSelectionOptions, (error?: Error, server?: Server) => {
+ if (!error && isWriteOperation && !supportsRetryableWrites(server)) {
+ return callback(
+ new MongoUnexpectedServerResponseError(
+ 'Selected server does not support retryable writes'
+ )
+ );
+ }
+
+ if (error || !server) {
+ return callback(
+ error ?? new MongoUnexpectedServerResponseError('Server selection failed without error')
+ );
}
operation.execute(server, session, callback);
@@ -233,8 +217,7 @@ function executeWithServerSelection(
if (
readPreference &&
!readPreference.equals(ReadPreference.primary) &&
- session &&
- session.inTransaction()
+ session?.inTransaction()
) {
callback(
new MongoTransactionError(
@@ -246,21 +229,17 @@ function executeWithServerSelection(
}
// select a server, and execute the operation against it
- topology.selectServer(selector, serverSelectionOptions, (err?: any, server?: any) => {
- if (err) {
- callback(err);
- return;
+ topology.selectServer(selector, serverSelectionOptions, (error, server) => {
+ if (error || !server) {
+ return callback(error);
}
if (session && operation.hasAspect(Aspect.RETRYABLE)) {
const willRetryRead =
- topology.s.options.retryReads !== false &&
- !inTransaction &&
- supportsRetryableReads(server) &&
- operation.canRetryRead;
+ topology.s.options.retryReads && !inTransaction && operation.canRetryRead;
const willRetryWrite =
- topology.s.options.retryWrites === true &&
+ topology.s.options.retryWrites &&
!inTransaction &&
supportsRetryableWrites(server) &&
operation.canRetryWrite;
@@ -274,15 +253,17 @@ function executeWithServerSelection(
session.incrementTransactionNumber();
}
- operation.execute(server, session, callbackWithRetry);
- return;
+ return operation.execute(server, session, (error, result) => {
+ if (error instanceof MongoError) {
+ return retryOperation(error);
+ } else if (error) {
+ return callback(error);
+ }
+ callback(undefined, result);
+ });
}
}
- operation.execute(server, session, callback);
+ return operation.execute(server, session, callback);
});
}
-
-function shouldRetryWrite(err: any) {
- return err instanceof MongoError && err.hasErrorLabel('RetryableWriteError');
-}
diff --git a/src/operations/operation.ts b/src/operations/operation.ts
index d4ca5d011d..82265d06f3 100644
--- a/src/operations/operation.ts
+++ b/src/operations/operation.ts
@@ -59,7 +59,7 @@ export abstract class AbstractOperation {
// TODO: Each operation defines its own options, there should be better typing here
options: Document;
- [kSession]: ClientSession;
+ [kSession]: ClientSession | undefined;
constructor(options: OperationOptions = {}) {
this.readPreference = this.hasAspect(Aspect.WRITE_OPERATION)
@@ -69,9 +69,7 @@ export abstract class AbstractOperation {
// Pull the BSON serialize options from the already-resolved options
this.bsonOptions = resolveBSONOptions(options);
- if (options.session) {
- this[kSession] = options.session;
- }
+ this[kSession] = options.session != null ? options.session : undefined;
this.options = options;
this.bypassPinningCheck = !!options.bypassPinningCheck;
@@ -93,7 +91,7 @@ export abstract class AbstractOperation {
return ctor.aspects.has(aspect);
}
- get session(): ClientSession {
+ get session(): ClientSession | undefined {
return this[kSession];
}
diff --git a/src/sdam/common.ts b/src/sdam/common.ts
index 9a9a5450a7..6738ad9eb8 100644
--- a/src/sdam/common.ts
+++ b/src/sdam/common.ts
@@ -53,12 +53,6 @@ export function drainTimerQueue(queue: TimerQueue): void {
queue.clear();
}
-/** @internal */
-export function clearAndRemoveTimerFrom(timer: NodeJS.Timeout, timers: TimerQueue): boolean {
- clearTimeout(timer);
- return timers.delete(timer);
-}
-
/** @public */
export interface ClusterTime {
clusterTime: Timestamp;
diff --git a/src/sdam/monitor.ts b/src/sdam/monitor.ts
index 9f7f786b3e..d43bc3e49e 100644
--- a/src/sdam/monitor.ts
+++ b/src/sdam/monitor.ts
@@ -2,7 +2,7 @@ import { Document, Long } from '../bson';
import { connect } from '../cmap/connect';
import { Connection, ConnectionOptions } from '../cmap/connection';
import { LEGACY_HELLO_COMMAND } from '../constants';
-import { AnyError, MongoNetworkError } from '../error';
+import { MongoNetworkError } from '../error';
import { CancellationToken, TypedEventEmitter } from '../mongo_types';
import type { Callback, InterruptibleAsyncInterval } from '../utils';
import {
@@ -211,7 +211,7 @@ function checkServer(monitor: Monitor, callback: Callback) {
let start = now();
monitor.emit(Server.SERVER_HEARTBEAT_STARTED, new ServerHeartbeatStartedEvent(monitor.address));
- function failureHandler(err: AnyError) {
+ function failureHandler(err: Error) {
monitor[kConnection]?.destroy({ force: true });
monitor[kConnection] = undefined;
@@ -259,8 +259,7 @@ function checkServer(monitor: Monitor, callback: Callback) {
connection.command(ns('admin.$cmd'), cmd, options, (err, hello) => {
if (err) {
- failureHandler(err);
- return;
+ return failureHandler(err);
}
if (!('isWritablePrimary' in hello)) {
diff --git a/src/sdam/server.ts b/src/sdam/server.ts
index e13f793de1..a14a392d60 100644
--- a/src/sdam/server.ts
+++ b/src/sdam/server.ts
@@ -27,14 +27,16 @@ import type { AutoEncrypter } from '../deps';
import {
isNetworkErrorBeforeHandshake,
isNodeShuttingDownError,
- isRetryableWriteError,
isSDAMUnrecoverableError,
MongoCompatibilityError,
MongoError,
+ MongoErrorLabel,
MongoInvalidArgumentError,
MongoNetworkError,
MongoNetworkTimeoutError,
- MongoServerClosedError
+ MongoServerClosedError,
+ MongoUnexpectedServerResponseError,
+ needsRetryableWriteLabel
} from '../error';
import { Logger } from '../logger';
import type { ServerApi } from '../mongo_client';
@@ -43,7 +45,6 @@ import type { ClientSession } from '../sessions';
import { isTransactionCommand } from '../transactions';
import {
Callback,
- CallbackWithType,
collationNotSupported,
EventEmitterWithState,
makeStateMachine,
@@ -283,24 +284,12 @@ export class Server extends TypedEventEmitter {
* Execute a command
* @internal
*/
- command(ns: MongoDBNamespace, cmd: Document, callback: Callback): void;
- /** @internal */
command(
ns: MongoDBNamespace,
cmd: Document,
options: CommandOptions,
callback: Callback
- ): void;
- command(
- ns: MongoDBNamespace,
- cmd: Document,
- options?: CommandOptions | Callback,
- callback?: Callback
): void {
- if (typeof options === 'function') {
- (callback = options), (options = {}), (options = options ?? {});
- }
-
if (callback == null) {
throw new MongoInvalidArgumentError('Callback must be provided');
}
@@ -345,7 +334,7 @@ export class Server extends TypedEventEmitter {
}
session.pin(checkedOut);
- this.command(ns, cmd, finalOptions, callback as Callback);
+ this.command(ns, cmd, finalOptions, callback);
});
return;
@@ -363,7 +352,7 @@ export class Server extends TypedEventEmitter {
ns,
cmd,
finalOptions,
- makeOperationHandler(this, conn, cmd, finalOptions, cb) as Callback
+ makeOperationHandler(this, conn, cmd, finalOptions, cb)
);
},
callback
@@ -388,12 +377,7 @@ export class Server extends TypedEventEmitter {
return cb(err);
}
- conn.query(
- ns,
- cmd,
- options,
- makeOperationHandler(this, conn, cmd, options, cb) as Callback
- );
+ conn.query(ns, cmd, options, makeOperationHandler(this, conn, cmd, options, cb));
},
callback
);
@@ -422,12 +406,7 @@ export class Server extends TypedEventEmitter {
return cb(err);
}
- conn.getMore(
- ns,
- cursorId,
- options,
- makeOperationHandler(this, conn, {}, options, cb) as Callback
- );
+ conn.getMore(ns, cursorId, options, makeOperationHandler(this, conn, {}, options, cb));
},
callback
);
@@ -463,7 +442,7 @@ export class Server extends TypedEventEmitter {
ns,
cursorIds,
options,
- makeOperationHandler(this, conn, {}, undefined, cb) as Callback
+ makeOperationHandler(this, conn, {}, undefined, cb)
);
},
callback
@@ -547,67 +526,87 @@ function makeOperationHandler(
cmd: Document,
options: CommandOptions | GetMoreOptions | undefined,
callback: Callback
-): CallbackWithType {
+): Callback {
const session = options?.session;
- return function handleOperationResult(err, result) {
- if (err && !connectionIsStale(server.s.pool, connection)) {
- if (err instanceof MongoNetworkError) {
- if (session && !session.hasEnded && session.serverSession) {
- session.serverSession.isDirty = true;
- }
+ return function handleOperationResult(error, result) {
+ if (result != null) {
+ return callback(undefined, result);
+ }
- // inActiveTransaction check handles commit and abort.
- if (inActiveTransaction(session, cmd) && !err.hasErrorLabel('TransientTransactionError')) {
- err.addErrorLabel('TransientTransactionError');
- }
+ if (!error) {
+ return callback(new MongoUnexpectedServerResponseError('Empty response with no error'));
+ }
- if (
- (isRetryableWritesEnabled(server.s.topology) || isTransactionCommand(cmd)) &&
- supportsRetryableWrites(server) &&
- !inActiveTransaction(session, cmd)
- ) {
- err.addErrorLabel('RetryableWriteError');
- }
+ if (!(error instanceof MongoError)) {
+ // Node.js or some other error we have not special handling for
+ return callback(error);
+ }
- if (!(err instanceof MongoNetworkTimeoutError) || isNetworkErrorBeforeHandshake(err)) {
- // In load balanced mode we never mark the server as unknown and always
- // clear for the specific service id.
+ if (connectionIsStale(server.s.pool, connection)) {
+ return callback(error);
+ }
- server.s.pool.clear(connection.serviceId);
- if (!server.loadBalanced) {
- markServerUnknown(server, err);
- }
- }
- } else {
- // if pre-4.4 server, then add error label if its a retryable write error
- if (
- (isRetryableWritesEnabled(server.s.topology) || isTransactionCommand(cmd)) &&
- maxWireVersion(server) < 9 &&
- isRetryableWriteError(err) &&
- !inActiveTransaction(session, cmd)
- ) {
- err.addErrorLabel('RetryableWriteError');
+ if (error instanceof MongoNetworkError) {
+ if (session && !session.hasEnded && session.serverSession) {
+ session.serverSession.isDirty = true;
+ }
+
+ // inActiveTransaction check handles commit and abort.
+ if (
+ inActiveTransaction(session, cmd) &&
+ !error.hasErrorLabel(MongoErrorLabel.TransientTransactionError)
+ ) {
+ error.addErrorLabel(MongoErrorLabel.TransientTransactionError);
+ }
+
+ if (
+ (isRetryableWritesEnabled(server.s.topology) || isTransactionCommand(cmd)) &&
+ supportsRetryableWrites(server) &&
+ !inActiveTransaction(session, cmd)
+ ) {
+ error.addErrorLabel(MongoErrorLabel.RetryableWriteError);
+ }
+
+ if (!(error instanceof MongoNetworkTimeoutError) || isNetworkErrorBeforeHandshake(error)) {
+ // In load balanced mode we never mark the server as unknown and always
+ // clear for the specific service id.
+
+ server.s.pool.clear(connection.serviceId);
+ if (!server.loadBalanced) {
+ markServerUnknown(server, error);
}
+ }
+ } else {
+ if (
+ (isRetryableWritesEnabled(server.s.topology) || isTransactionCommand(cmd)) &&
+ needsRetryableWriteLabel(error, maxWireVersion(server)) &&
+ !inActiveTransaction(session, cmd)
+ ) {
+ error.addErrorLabel(MongoErrorLabel.RetryableWriteError);
+ }
- if (isSDAMUnrecoverableError(err)) {
- if (shouldHandleStateChangeError(server, err)) {
- if (maxWireVersion(server) <= 7 || isNodeShuttingDownError(err)) {
- server.s.pool.clear(connection.serviceId);
- }
+ if (isSDAMUnrecoverableError(error)) {
+ if (shouldHandleStateChangeError(server, error)) {
+ if (maxWireVersion(server) <= 7 || isNodeShuttingDownError(error)) {
+ server.s.pool.clear(connection.serviceId);
+ }
- if (!server.loadBalanced) {
- markServerUnknown(server, err);
- process.nextTick(() => server.requestCheck());
- }
+ if (!server.loadBalanced) {
+ markServerUnknown(server, error);
+ process.nextTick(() => server.requestCheck());
}
}
}
+ }
- if (session && session.isPinned && err.hasErrorLabel('TransientTransactionError')) {
- session.unpin({ force: true });
- }
+ if (
+ session &&
+ session.isPinned &&
+ error.hasErrorLabel(MongoErrorLabel.TransientTransactionError)
+ ) {
+ session.unpin({ force: true });
}
- callback(err, result);
+ return callback(error);
};
}
diff --git a/src/sdam/topology.ts b/src/sdam/topology.ts
index 4d9a067eaa..ccb8371332 100644
--- a/src/sdam/topology.ts
+++ b/src/sdam/topology.ts
@@ -50,7 +50,6 @@ import {
} from '../utils';
import {
_advanceClusterTime,
- clearAndRemoveTimerFrom,
ClusterTime,
drainTimerQueue,
ServerType,
@@ -435,7 +434,12 @@ export class Topology extends TypedEventEmitter {
// connect all known servers, then attempt server selection to connect
const serverDescriptions = Array.from(this.s.description.servers.values());
- connectServers(this, serverDescriptions);
+ this.s.servers = new Map(
+ serverDescriptions.map(serverDescription => [
+ serverDescription.address,
+ createAndConnectServer(this, serverDescription)
+ ])
+ );
// In load balancer mode we need to fake a server description getting
// emitted from the monitor, since the monitor doesn't exist.
@@ -459,7 +463,7 @@ export class Topology extends TypedEventEmitter {
// TODO: NODE-2471
if (server && this.s.credentials) {
- server.command(ns('admin.$cmd'), { ping: 1 }, err => {
+ server.command(ns('admin.$cmd'), { ping: 1 }, {}, err => {
if (err) {
typeof callback === 'function' ? callback(err) : this.emit(Topology.ERROR, err);
return;
@@ -549,27 +553,11 @@ export class Topology extends TypedEventEmitter {
* @param callback - The callback used to indicate success or failure
* @returns An instance of a `Server` meeting the criteria of the predicate provided
*/
- selectServer(options: SelectServerOptions, callback: Callback): void;
- selectServer(
- selector: string | ReadPreference | ServerSelector,
- callback: Callback
- ): void;
selectServer(
selector: string | ReadPreference | ServerSelector,
options: SelectServerOptions,
callback: Callback
- ): void;
- selectServer(
- selector: string | ReadPreference | ServerSelector | SelectServerOptions,
- _options?: SelectServerOptions | Callback,
- _callback?: Callback
): void {
- let options = _options as SelectServerOptions;
- const callback = (_callback ?? _options) as Callback;
- if (typeof options === 'function') {
- options = {};
- }
-
let serverSelector;
if (typeof selector !== 'function') {
if (typeof selector === 'string') {
@@ -667,6 +655,7 @@ export class Topology extends TypedEventEmitter {
this.selectServer(
readPreferenceServerSelector(ReadPreference.primaryPreferred),
+ {},
(err, server) => {
if (err || !server) {
if (typeof callback === 'function') callback(err);
@@ -875,13 +864,8 @@ function randomSelection(array: ServerDescription[]): ServerDescription {
*
* @param topology - The topology that this server belongs to
* @param serverDescription - The description for the server to initialize and connect to
- * @param connectDelay - Time to wait before attempting initial connection
*/
-function createAndConnectServer(
- topology: Topology,
- serverDescription: ServerDescription,
- connectDelay?: number
-) {
+function createAndConnectServer(topology: Topology, serverDescription: ServerDescription) {
topology.emit(
Topology.SERVER_OPENING,
new ServerOpeningEvent(topology.s.id, serverDescription.address)
@@ -894,38 +878,10 @@ function createAndConnectServer(
server.on(Server.DESCRIPTION_RECEIVED, description => topology.serverUpdateHandler(description));
- if (connectDelay) {
- const connectTimer = setTimeout(() => {
- clearAndRemoveTimerFrom(connectTimer, topology.s.connectionTimers);
- server.connect();
- }, connectDelay);
-
- topology.s.connectionTimers.add(connectTimer);
- return server;
- }
-
server.connect();
return server;
}
-/**
- * Create `Server` instances for all initially known servers, connect them, and assign
- * them to the passed in `Topology`.
- *
- * @param topology - The topology responsible for the servers
- * @param serverDescriptions - A list of server descriptions to connect
- */
-function connectServers(topology: Topology, serverDescriptions: ServerDescription[]) {
- topology.s.servers = serverDescriptions.reduce(
- (servers: Map, serverDescription: ServerDescription) => {
- const server = createAndConnectServer(topology, serverDescription);
- servers.set(serverDescription.address, server);
- return servers;
- },
- new Map()
- );
-}
-
/**
* @param topology - Topology to update.
* @param incomingServerDescription - New server description.
diff --git a/src/sdam/topology_description.ts b/src/sdam/topology_description.ts
index e5e5e55356..5e2019d56d 100644
--- a/src/sdam/topology_description.ts
+++ b/src/sdam/topology_description.ts
@@ -58,9 +58,6 @@ export class TopologyDescription {
) {
options = options ?? {};
- // TODO: consider assigning all these values to a temporary value `s` which
- // we use `Object.freeze` on, ensuring the internal state of this type
- // is immutable.
this.type = topologyType ?? TopologyType.Unknown;
this.servers = serverDescriptions ?? new Map();
this.stale = false;
diff --git a/src/sessions.ts b/src/sessions.ts
index 1ae404a272..7b05edee40 100644
--- a/src/sessions.ts
+++ b/src/sessions.ts
@@ -6,16 +6,14 @@ import { PINNED, UNPINNED } from './constants';
import type { AbstractCursor } from './cursor/abstract_cursor';
import {
AnyError,
- isRetryableEndTransactionError,
- isRetryableError,
MongoAPIError,
MongoCompatibilityError,
MONGODB_ERROR_CODES,
MongoDriverError,
MongoError,
+ MongoErrorLabel,
MongoExpiredSessionError,
MongoInvalidArgumentError,
- MongoNetworkError,
MongoRuntimeError,
MongoServerError,
MongoTransactionError,
@@ -41,7 +39,6 @@ import {
now,
uuidV4
} from './utils';
-import type { WriteConcern } from './write_concern';
const minWireVersionForShardedTransactions = 8;
@@ -507,7 +504,7 @@ export function maybeClearPinnedConnection(
session.inTransaction() &&
error &&
error instanceof MongoError &&
- error.hasErrorLabel('TransientTransactionError')
+ error.hasErrorLabel(MongoErrorLabel.TransientTransactionError)
) {
return;
}
@@ -559,11 +556,11 @@ function attemptTransactionCommit(
hasNotTimedOut(startTime, MAX_WITH_TRANSACTION_TIMEOUT) &&
!isMaxTimeMSExpiredError(err)
) {
- if (err.hasErrorLabel('UnknownTransactionCommitResult')) {
+ if (err.hasErrorLabel(MongoErrorLabel.UnknownTransactionCommitResult)) {
return attemptTransactionCommit(session, startTime, fn, options);
}
- if (err.hasErrorLabel('TransientTransactionError')) {
+ if (err.hasErrorLabel(MongoErrorLabel.TransientTransactionError)) {
return attemptTransaction(session, startTime, fn, options);
}
}
@@ -617,14 +614,14 @@ function attemptTransaction(
function maybeRetryOrThrow(err: MongoError): Promise {
if (
err instanceof MongoError &&
- err.hasErrorLabel('TransientTransactionError') &&
+ err.hasErrorLabel(MongoErrorLabel.TransientTransactionError) &&
hasNotTimedOut(startTime, MAX_WITH_TRANSACTION_TIMEOUT)
) {
return attemptTransaction(session, startTime, fn, options);
}
if (isMaxTimeMSExpiredError(err)) {
- err.addErrorLabel('UnknownTransactionCommitResult');
+ err.addErrorLabel(MongoErrorLabel.UnknownTransactionCommitResult);
}
throw err;
@@ -639,7 +636,11 @@ function attemptTransaction(
);
}
-function endTransaction(session: ClientSession, commandName: string, callback: Callback) {
+function endTransaction(
+ session: ClientSession,
+ commandName: 'abortTransaction' | 'commitTransaction',
+ callback: Callback
+) {
if (!assertAlive(session, callback)) {
// checking result in case callback was called
return;
@@ -717,7 +718,7 @@ function endTransaction(session: ClientSession, commandName: string, callback: C
Object.assign(command, { maxTimeMS: session.transaction.options.maxTimeMS });
}
- function commandHandler(e?: MongoError, r?: Document) {
+ function commandHandler(error?: Error, result?: Document) {
if (commandName !== 'commitTransaction') {
session.transaction.transition(TxnState.TRANSACTION_ABORTED);
if (session.loadBalanced) {
@@ -729,25 +730,24 @@ function endTransaction(session: ClientSession, commandName: string, callback: C
}
session.transaction.transition(TxnState.TRANSACTION_COMMITTED);
- if (e) {
+ if (error instanceof MongoError) {
if (
- e instanceof MongoNetworkError ||
- e instanceof MongoWriteConcernError ||
- isRetryableError(e) ||
- isMaxTimeMSExpiredError(e)
+ error.hasErrorLabel(MongoErrorLabel.RetryableWriteError) ||
+ error instanceof MongoWriteConcernError ||
+ isMaxTimeMSExpiredError(error)
) {
- if (isUnknownTransactionCommitResult(e)) {
- e.addErrorLabel('UnknownTransactionCommitResult');
+ if (isUnknownTransactionCommitResult(error)) {
+ error.addErrorLabel(MongoErrorLabel.UnknownTransactionCommitResult);
// per txns spec, must unpin session in this case
- session.unpin({ error: e });
+ session.unpin({ error });
}
- } else if (e.hasErrorLabel('TransientTransactionError')) {
- session.unpin({ error: e });
+ } else if (error.hasErrorLabel(MongoErrorLabel.TransientTransactionError)) {
+ session.unpin({ error });
}
}
- callback(e, r);
+ callback(error, result);
}
// Assumption here that commandName is "commitTransaction" or "abortTransaction"
@@ -763,13 +763,13 @@ function endTransaction(session: ClientSession, commandName: string, callback: C
readPreference: ReadPreference.primary,
bypassPinningCheck: true
}),
- (err, reply) => {
+ (error, result) => {
if (command.abortTransaction) {
// always unpin on abort regardless of command outcome
session.unpin();
}
- if (err && isRetryableEndTransactionError(err as MongoError)) {
+ if (error instanceof MongoError && error.hasErrorLabel(MongoErrorLabel.RetryableWriteError)) {
// SPEC-1185: apply majority write concern when retrying commitTransaction
if (command.commitTransaction) {
// per txns spec, must unpin session in this case
@@ -787,11 +787,11 @@ function endTransaction(session: ClientSession, commandName: string, callback: C
readPreference: ReadPreference.primary,
bypassPinningCheck: true
}),
- (_err, _reply) => commandHandler(_err as MongoError, _reply)
+ commandHandler
);
}
- commandHandler(err as MongoError, reply);
+ commandHandler(error, result);
}
);
}
@@ -936,11 +936,13 @@ export class ServerSessionPool {
* @param session - the session tracking transaction state
* @param command - the command to decorate
* @param options - Optional settings passed to calling operation
+ *
+ * @internal
*/
export function applySession(
session: ClientSession,
command: Document,
- options?: CommandOptions
+ options: CommandOptions
): MongoDriverError | undefined {
// TODO: merge this with `assertAlive`, did not want to throw a try/catch here
if (session.hasEnded) {
@@ -952,10 +954,9 @@ export function applySession(
return new MongoRuntimeError('Unable to acquire server session');
}
- // SPEC-1019: silently ignore explicit session with unacknowledged write for backwards compatibility
- // FIXME: NODE-2781, this check for write concern shouldn't be happening here, but instead during command construction
- if (options && options.writeConcern && (options.writeConcern as WriteConcern).w === 0) {
+ if (options.writeConcern?.w === 0) {
if (session && session.explicit) {
+ // Error if user provided an explicit session to an unacknowledged write (SPEC-1019)
return new MongoAPIError('Cannot have explicit session with unacknowledged writes');
}
return;
diff --git a/src/utils.ts b/src/utils.ts
index 207c43314d..5fe1edbfbc 100644
--- a/src/utils.ts
+++ b/src/utils.ts
@@ -35,8 +35,6 @@ import { W, WriteConcern, WriteConcernOptions } from './write_concern';
* @public
*/
export type Callback = (error?: AnyError, result?: T) => void;
-/** @public */
-export type CallbackWithType = (error?: E, result?: T0) => void;
export const MAX_JS_INT = Number.MAX_SAFE_INTEGER + 1;
@@ -1313,13 +1311,25 @@ export function enumToString(en: Record): string {
*
* @internal
*/
-export function supportsRetryableWrites(server: Server): boolean {
- return (
- !!server.loadBalanced ||
- (server.description.maxWireVersion >= 6 &&
- !!server.description.logicalSessionTimeoutMinutes &&
- server.description.type !== ServerType.Standalone)
- );
+export function supportsRetryableWrites(server?: Server): boolean {
+ if (!server) {
+ return false;
+ }
+
+ if (server.loadBalanced) {
+ // Loadbalanced topologies will always support retry writes
+ return true;
+ }
+
+ if (server.description.logicalSessionTimeoutMinutes != null) {
+ // that supports sessions
+ if (server.description.type !== ServerType.Standalone) {
+ // and that is not a standalone
+ return true;
+ }
+ }
+
+ return false;
}
export function parsePackageVersion({ version }: { version: string }): {
diff --git a/test/integration/crud/bulk.test.js b/test/integration/crud/bulk.test.js
index b3170cf1f1..bfd9a897ee 100644
--- a/test/integration/crud/bulk.test.js
+++ b/test/integration/crud/bulk.test.js
@@ -17,6 +17,8 @@ chai.use(require('chai-subset'));
const MAX_BSON_SIZE = 16777216;
describe('Bulk', function () {
+ /** @type {MongoClient} */
+ let client;
before(function () {
return setupDatabase(this.configuration);
});
@@ -96,6 +98,17 @@ describe('Bulk', function () {
});
});
+ beforeEach(async function () {
+ client = this.configuration.newClient(this.configuration.writeConcernMax(), {
+ maxPoolSize: 1
+ });
+ await client.connect();
+ });
+
+ afterEach(async function () {
+ if (client) await client.close();
+ });
+
it('should correctly handle ordered single batch api write command error', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
@@ -490,61 +503,40 @@ describe('Bulk', function () {
}
});
- it(
- 'should Correctly Execute Ordered Batch of Write Operations with duplicate key errors on updates',
- {
- metadata: {
- requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
- },
-
- test: function (done) {
- var self = this;
- var client = self.configuration.newClient(self.configuration.writeConcernMax(), {
- maxPoolSize: 1
- });
-
- client.connect(function (err, client) {
- var db = client.db(self.configuration.db);
- var col = db.collection('batch_write_ordered_ops_6');
-
- // Add unique index on b field causing all updates to fail
- col.createIndex({ b: 1 }, { unique: true, sparse: false }, function (err) {
- expect(err).to.not.exist;
-
- var batch = col.initializeOrderedBulkOp();
-
- // Add some operations to be executed in order
- batch.insert({ a: 1 });
- batch.find({ a: 1 }).update({ $set: { b: 1 } });
- batch.insert({ b: 1 });
-
- // Execute the operations
- batch.execute(function (err, result) {
- expect(err).to.exist;
- expect(result).to.not.exist;
-
- // Test basic settings
- result = err.result;
- test.equal(1, result.nInserted);
- test.equal(1, result.nMatched);
- test.ok(1 === result.nModified || result.nModified == null);
- test.equal(true, result.hasWriteErrors());
- test.ok(1, result.getWriteErrorCount());
-
- // Individual error checking
- var error = result.getWriteErrorAt(0);
- test.equal(2, error.index);
- test.equal(11000, error.code);
- test.ok(error.errmsg != null);
- test.equal(1, error.getOperation().b);
-
- client.close(done);
- });
- });
- });
- }
- }
- );
+ it('should Correctly Execute Ordered Batch of Write Operations with duplicate key errors on updates', async function () {
+ const db = client.db(this.configuration.db);
+ const col = db.collection('batch_write_ordered_ops_6');
+ // Add unique index on b field causing all updates to fail
+ await col.createIndex({ b: 1 }, { unique: true, sparse: false });
+ const batch = col.initializeOrderedBulkOp();
+
+ // Add some operations to be executed in order
+ batch.insert({ a: 1 });
+ batch.find({ a: 1 }).update({ $set: { b: 1 } });
+ batch.insert({ b: 1 });
+
+ const thrownError = await batch.execute().catch(error => error);
+ expect(thrownError).to.instanceOf(Error);
+
+ // Test basic settings
+ const result = thrownError.result;
+ expect(result).to.have.property('nInserted', 1);
+ expect(result).to.have.property('nMatched', 1);
+ expect(result)
+ .to.have.property('nModified')
+ .that.satisfies(v => v == null || v === 1);
+ expect(result).to.have.property('hasWriteErrors').that.is.a('function');
+ expect(result).to.have.property('getWriteErrorCount').that.is.a('function');
+ expect(result.hasWriteErrors()).to.be.true;
+ expect(result.getWriteErrorCount()).to.equal(1);
+
+ // Individual error checking
+ const writeError = result.getWriteErrorAt(0);
+ expect(writeError).to.have.property('index', 2);
+ expect(writeError).to.have.property('code', 11000);
+ expect(writeError).to.have.property('errmsg').that.is.a('string');
+ expect(writeError.getOperation()).to.have.property('b', 1);
+ });
it(
'should Correctly Execute Ordered Batch of Write Operations with upserts causing duplicate key errors on updates',
diff --git a/test/integration/retryable-writes/retryable_writes.spec.prose.test.ts b/test/integration/retryable-writes/retryable_writes.spec.prose.test.ts
new file mode 100644
index 0000000000..2171885f85
--- /dev/null
+++ b/test/integration/retryable-writes/retryable_writes.spec.prose.test.ts
@@ -0,0 +1,64 @@
+import { expect } from 'chai';
+
+import { MongoError, MongoServerError, TopologyType } from '../../../src';
+
+describe('Retryable Writes Spec Prose', () => {
+ /**
+ * 1 Test that retryable writes raise an exception when using the MMAPv1 storage engine.
+ * For this test, execute a write operation, such as insertOne, which should generate an exception and the error code is 20.
+ * Assert that the error message is the replacement error message:
+ *
+ * ```
+ * This MongoDB deployment does not support retryable writes. Please add
+ * retryWrites=false to your connection string.
+ * ```
+ * Note: Drivers that rely on serverStatus to determine the storage engine in use MAY skip this test for sharded clusters, since mongos does not report this information in its serverStatus response.
+ */
+ let client;
+
+ beforeEach(async function () {
+ if (
+ this.configuration.buildInfo.versionArray[0] < 4 ||
+ this.configuration.topologyType !== TopologyType.ReplicaSetWithPrimary
+ ) {
+ this.currentTest.skipReason =
+ 'configureFailPoint only works on server versions greater than 4';
+ this.skip();
+ }
+ client = this.configuration.newClient();
+ await client.connect();
+ });
+
+ afterEach(async () => {
+ await client?.close();
+ });
+
+ it('retryable writes raise an exception when using the MMAPv1 storage engine', async () => {
+ const failPoint = await client.db('admin').command({
+ configureFailPoint: 'failCommand',
+ mode: { times: 1 },
+ data: {
+ failCommands: ['insert'],
+ errorCode: 20, // MMAP Error code,
+ closeConnection: false
+ }
+ });
+
+ expect(failPoint).to.have.property('ok', 1);
+
+ const error = await client
+ .db('test')
+ .collection('test')
+ .insertOne({ a: 1 })
+ .catch(error => error);
+
+ expect(error).to.exist;
+ expect(error).that.is.instanceOf(MongoServerError);
+ expect(error).to.have.property('originalError').that.instanceOf(MongoError);
+ expect(error.originalError).to.have.property('code', 20);
+ expect(error).to.have.property(
+ 'message',
+ 'This MongoDB deployment does not support retryable writes. Please add retryWrites=false to your connection string.'
+ );
+ });
+});
diff --git a/test/integration/retryable-writes/retryable_writes.spec.test.js b/test/integration/retryable-writes/retryable_writes.spec.test.js
deleted file mode 100644
index d85f0881f0..0000000000
--- a/test/integration/retryable-writes/retryable_writes.spec.test.js
+++ /dev/null
@@ -1,199 +0,0 @@
-'use strict';
-
-const { expect } = require('chai');
-const { loadSpecTests } = require('../../spec');
-const { legacyRunOnToRunOnRequirement } = require('../../tools/spec-runner');
-const { isAnyRequirementSatisfied } = require('../../tools/unified-spec-runner/unified-utils');
-
-describe('Legacy Retryable Writes specs', function () {
- let ctx = {};
- const retryableWrites = loadSpecTests('retryable-writes/legacy');
-
- for (const suite of retryableWrites) {
- describe(suite.name, function () {
- beforeEach(async function () {
- let utilClient;
- if (this.configuration.isLoadBalanced) {
- // The util client can always point at the single mongos LB frontend.
- utilClient = this.configuration.newClient(this.configuration.singleMongosLoadBalancerUri);
- } else {
- utilClient = this.configuration.newClient();
- }
-
- await utilClient.connect();
-
- const allRequirements = suite.runOn.map(legacyRunOnToRunOnRequirement);
-
- const someRequirementMet =
- !allRequirements.length ||
- (await isAnyRequirementSatisfied(this.currentTest.ctx, allRequirements, utilClient));
-
- await utilClient.close();
-
- if (!someRequirementMet) this.skip();
- });
-
- afterEach(async function () {
- // Step 3: Test Teardown. Turn off failpoints, and close client
- if (!ctx.db || !ctx.client) {
- return;
- }
-
- if (ctx.failPointName) {
- await turnOffFailPoint(ctx.client, ctx.failPointName);
- }
- await ctx.client.close();
- ctx = {}; // reset context
- });
-
- for (const test of suite.tests) {
- it(test.description, async function () {
- // Step 1: Test Setup. Includes a lot of boilerplate stuff
- // like creating a client, dropping and refilling data collections,
- // and enabling failpoints
- await executeScenarioSetup(suite, test, this.configuration, ctx);
- // Step 2: Run the test
- await executeScenarioTest(test, ctx);
- });
- }
- });
- }
-});
-
-function executeScenarioSetup(scenario, test, config, ctx) {
- const url = config.url();
- const options = Object.assign({}, test.clientOptions, {
- heartbeatFrequencyMS: 100,
- monitorCommands: true,
- minPoolSize: 10
- });
-
- ctx.failPointName = test.failPoint && test.failPoint.configureFailPoint;
-
- const client = config.newClient(url, options);
- return client
- .connect()
- .then(client => (ctx.client = client))
- .then(() => (ctx.db = ctx.client.db(config.db)))
- .then(
- () =>
- (ctx.collection = ctx.db.collection(
- `retryable_writes_test_${config.name}_${test.operation.name}`
- ))
- )
- .then(() => ctx.collection.drop())
- .catch(err => {
- if (!err.message.match(/ns not found/)) {
- throw err;
- }
- })
- .then(() =>
- Array.isArray(scenario.data) && scenario.data.length
- ? ctx.collection.insertMany(scenario.data)
- : {}
- )
- .then(() => (test.failPoint ? ctx.client.db('admin').command(test.failPoint) : {}));
-}
-
-function executeScenarioTest(test, ctx) {
- return Promise.resolve()
- .then(() => {
- const args = generateArguments(test);
-
- let result = ctx.collection[test.operation.name].apply(ctx.collection, args);
- const outcome = test.outcome && test.outcome.result;
- const errorLabelsContain = outcome && outcome.errorLabelsContain;
- const errorLabelsOmit = outcome && outcome.errorLabelsOmit;
- const hasResult = outcome && !errorLabelsContain && !errorLabelsOmit;
- if (test.outcome.error) {
- result = result
- .then(() => expect(false).to.be.true)
- .catch(err => {
- expect(err).to.exist;
- expect(err.message, 'expected operations to fail, but they succeeded').to.not.match(
- /expected false to be true/
- );
- if (hasResult) expect(err.result).to.matchMongoSpec(test.outcome.result);
- if (errorLabelsContain) expect(err.errorLabels).to.include.members(errorLabelsContain);
- if (errorLabelsOmit) {
- errorLabelsOmit.forEach(label => {
- expect(err.errorLabels).to.not.contain(label);
- });
- }
- });
- } else if (test.outcome.result) {
- const expected = test.outcome.result;
- result = result.then(transformToResultValue).then(r => expect(r).to.deep.include(expected));
- }
-
- return result;
- })
- .then(() => {
- if (test.outcome.collection) {
- return ctx.collection
- .find({})
- .toArray()
- .then(collectionResults => {
- expect(collectionResults).to.eql(test.outcome.collection.data);
- });
- }
- });
-}
-
-// Helper Functions
-
-/**
- * Transforms the arguments from a test into actual arguments for our function calls
- *
- * @param {any} test
- */
-function generateArguments(test) {
- const args = [];
-
- if (test.operation.arguments) {
- const options = {};
- Object.keys(test.operation.arguments).forEach(arg => {
- if (arg === 'requests') {
- args.push(test.operation.arguments[arg].map(convertBulkWriteOperation));
- } else if (arg === 'upsert') {
- options.upsert = test.operation.arguments[arg];
- } else if (arg === 'returnDocument') {
- options.returnDocument = test.operation.arguments[arg].toLowerCase();
- } else {
- args.push(test.operation.arguments[arg]);
- }
- });
-
- if (Object.keys(options).length > 0) {
- args.push(options);
- }
- }
-
- return args;
-}
-
-/**
- * Transforms a request arg into a bulk write operation
- *
- * @param {any} op
- */
-function convertBulkWriteOperation(op) {
- return { [op.name]: op.arguments };
-}
-
-/**
- * Transforms output of a bulk write to conform to the test format
- *
- * @param {any} result
- */
-function transformToResultValue(result) {
- return result && result.value ? result.value : result;
-}
-
-/** Runs a command that turns off a fail point */
-function turnOffFailPoint(client, name) {
- return client.db('admin').command({
- configureFailPoint: name,
- mode: 'off'
- });
-}
diff --git a/test/integration/retryable-writes/retryable_writes.spec.test.ts b/test/integration/retryable-writes/retryable_writes.spec.test.ts
new file mode 100644
index 0000000000..17dbf62145
--- /dev/null
+++ b/test/integration/retryable-writes/retryable_writes.spec.test.ts
@@ -0,0 +1,198 @@
+import { expect } from 'chai';
+
+import type { Collection, Db, MongoClient } from '../../../src';
+import { loadSpecTests } from '../../spec';
+import { legacyRunOnToRunOnRequirement } from '../../tools/spec-runner';
+import { isAnyRequirementSatisfied } from '../../tools/unified-spec-runner/unified-utils';
+
+interface RetryableWriteTestContext {
+ client?: MongoClient;
+ db?: Db;
+ collection?: Collection;
+ failPointName?: any;
+}
+
+describe('Legacy Retryable Writes Specs', function () {
+ let ctx: RetryableWriteTestContext = {};
+
+ const retryableWrites = loadSpecTests('retryable-writes', 'legacy');
+
+ for (const suite of retryableWrites) {
+ describe(suite.name, function () {
+ beforeEach(async function () {
+ let utilClient: MongoClient;
+ if (this.configuration.isLoadBalanced) {
+ // The util client can always point at the single mongos LB frontend.
+ utilClient = this.configuration.newClient(this.configuration.singleMongosLoadBalancerUri);
+ } else {
+ utilClient = this.configuration.newClient();
+ }
+
+ await utilClient.connect();
+
+ const allRequirements = suite.runOn.map(legacyRunOnToRunOnRequirement);
+
+ const someRequirementMet =
+ !allRequirements.length ||
+ (await isAnyRequirementSatisfied(this.currentTest.ctx, allRequirements, utilClient));
+
+ await utilClient.close();
+
+ if (!someRequirementMet) this.skip();
+ });
+
+ beforeEach(async function () {
+ // Step 1: Test Setup. Includes a lot of boilerplate stuff
+ // like creating a client, dropping and refilling data collections,
+ // and enabling failpoints
+ const { spec } = this.currentTest;
+ await executeScenarioSetup(suite, spec, this.configuration, ctx);
+ });
+
+ afterEach(async function () {
+ // Step 3: Test Teardown. Turn off failpoints, and close client
+ if (!ctx.db || !ctx.client) {
+ return;
+ }
+
+ if (ctx.failPointName) {
+ await turnOffFailPoint(ctx.client, ctx.failPointName);
+ }
+ await ctx.client.close();
+ ctx = {}; // reset context
+ });
+
+ for (const spec of suite.tests) {
+ // Step 2: Run the test
+ const mochaTest = it(spec.description, async () => await executeScenarioTest(spec, ctx));
+
+ // A pattern we don't need to repeat for unified tests
+ // In order to give the beforeEach hook access to the
+ // spec test so it can be responsible for skipping it
+ // and executeScenarioSetup
+ mochaTest.spec = spec;
+ }
+ });
+ }
+});
+
+async function executeScenarioSetup(scenario, test, config, ctx) {
+ const url = config.url();
+ const options = {
+ ...test.clientOptions,
+ heartbeatFrequencyMS: 100,
+ monitorCommands: true,
+ minPoolSize: 10
+ };
+
+ ctx.failPointName = test.failPoint && test.failPoint.configureFailPoint;
+
+ const client = config.newClient(url, options);
+ await client.connect();
+
+ ctx.client = client;
+ ctx.db = client.db(config.db);
+ ctx.collection = ctx.db.collection(`retryable_writes_test_${config.name}_${test.operation.name}`);
+
+ try {
+ await ctx.collection.drop();
+ } catch (error) {
+ if (!error.message.match(/ns not found/)) {
+ throw error;
+ }
+ }
+
+ if (Array.isArray(scenario.data) && scenario.data.length) {
+ await ctx.collection.insertMany(scenario.data);
+ }
+
+ if (test.failPoint) {
+ await ctx.client.db('admin').command(test.failPoint);
+ }
+}
+
+async function executeScenarioTest(test, ctx: RetryableWriteTestContext) {
+ const args = generateArguments(test);
+
+ // In case the spec files or our API changes
+ expect(ctx.collection).to.have.property(test.operation.name).that.is.a('function');
+
+ // TODO(NODE-4033): Collect command started events and assert txn number existence or omission
+ // have to add logic to handle bulkWrites which emit multiple command started events
+ const resultOrError = await ctx.collection[test.operation.name](...args).catch(error => error);
+
+ const outcome = test.outcome && test.outcome.result;
+ const errorLabelsContain = outcome && outcome.errorLabelsContain;
+ const errorLabelsOmit = outcome && outcome.errorLabelsOmit;
+ const hasResult = outcome && !errorLabelsContain && !errorLabelsOmit;
+ if (test.outcome.error) {
+ const thrownError = resultOrError;
+ expect(thrownError, `${test.operation.name} was supposed to fail but did not!`).to.exist;
+ expect(thrownError).to.have.property('message');
+
+ if (hasResult) {
+ expect(thrownError.result).to.matchMongoSpec(test.outcome.result);
+ }
+
+ if (errorLabelsContain) {
+ expect(thrownError.errorLabels).to.include.members(errorLabelsContain);
+ }
+
+ if (errorLabelsOmit) {
+ for (const label of errorLabelsOmit) {
+ expect(thrownError.errorLabels).to.not.contain(label);
+ }
+ }
+ } else if (test.outcome.result) {
+ expect(resultOrError, resultOrError.stack).to.not.be.instanceOf(Error);
+ const result = resultOrError;
+ const expected = test.outcome.result;
+
+ // TODO(NODE-4034): Make CRUD results spec compliant
+ expect(result.value ?? result).to.deep.include(expected);
+ }
+
+ if (test.outcome.collection) {
+ const collectionResults = await ctx.collection.find({}).toArray();
+ expect(collectionResults).to.deep.equal(test.outcome.collection.data);
+ }
+}
+
+// Helper Functions
+
+/** Transforms the arguments from a test into actual arguments for our function calls */
+function generateArguments(test) {
+ const args = [];
+
+ if (test.operation.arguments) {
+ const options: Record = {};
+ for (const arg of Object.keys(test.operation.arguments)) {
+ if (arg === 'requests') {
+ /** Transforms a request arg into a bulk write operation */
+ args.push(
+ test.operation.arguments[arg].map(({ name, arguments: args }) => ({ [name]: args }))
+ );
+ } else if (arg === 'upsert') {
+ options.upsert = test.operation.arguments[arg];
+ } else if (arg === 'returnDocument') {
+ options.returnDocument = test.operation.arguments[arg].toLowerCase();
+ } else {
+ args.push(test.operation.arguments[arg]);
+ }
+ }
+
+ if (Object.keys(options).length > 0) {
+ args.push(options);
+ }
+ }
+
+ return args;
+}
+
+/** Runs a command that turns off a fail point */
+async function turnOffFailPoint(client, name) {
+ return await client.db('admin').command({
+ configureFailPoint: name,
+ mode: 'off'
+ });
+}
diff --git a/test/tools/spec-runner/index.js b/test/tools/spec-runner/index.js
index 0ebee503d0..c9878a6a4a 100644
--- a/test/tools/spec-runner/index.js
+++ b/test/tools/spec-runner/index.js
@@ -458,26 +458,25 @@ function validateExpectations(commandEvents, spec, savedSessionData) {
const actualEvents = normalizeCommandShapes(commandEvents);
const rawExpectedEvents = spec.expectations.map(x => x.command_started_event);
const expectedEvents = normalizeCommandShapes(rawExpectedEvents);
- expect(actualEvents).to.have.length(expectedEvents.length);
- expectedEvents.forEach((expected, idx) => {
- const actual = actualEvents[idx];
+ for (const [idx, expectedEvent] of expectedEvents.entries()) {
+ const actualEvent = actualEvents[idx];
- if (expected.commandName != null) {
- expect(actual.commandName).to.equal(expected.commandName);
+ if (typeof expectedEvent.commandName === 'string') {
+ expect(actualEvent).to.have.property('commandName', expectedEvent.commandName);
}
- if (expected.databaseName != null) {
- expect(actual.databaseName).to.equal(expected.databaseName);
+ if (typeof expectedEvent.databaseName === 'string') {
+ expect(actualEvent).to.have.property('databaseName', expectedEvent.databaseName);
}
- const actualCommand = actual.command;
- const expectedCommand = expected.command;
+ const actualCommand = actualEvent.command;
+ const expectedCommand = expectedEvent.command;
if (expectedCommand.sort) {
- // TODO: This is a workaround that works because all sorts in the specs
+ // TODO(NODE-3235): This is a workaround that works because all sorts in the specs
// are objects with one key; ideally we'd want to adjust the spec definitions
// to indicate whether order matters for any given key and set general
- // expectations accordingly (see NODE-3235)
+ // expectations accordingly
expect(Object.keys(expectedCommand.sort)).to.have.lengthOf(1);
expect(actualCommand.sort).to.be.instanceOf(Map);
expect(actualCommand.sort.size).to.equal(1);
@@ -487,7 +486,7 @@ function validateExpectations(commandEvents, spec, savedSessionData) {
}
expect(actualCommand).withSessionData(savedSessionData).to.matchMongoSpec(expectedCommand);
- });
+ }
}
function normalizeCommandShapes(commands) {
@@ -875,25 +874,23 @@ function convertCollectionOptions(options) {
return result;
}
-function testOperations(testData, operationContext, options) {
+async function testOperations(testData, operationContext, options) {
options = options || { swallowOperationErrors: true };
- return testData.operations.reduce((combined, operation) => {
- return combined.then(() => {
- const object = operation.object || 'collection';
- if (object === 'collection') {
- const db = operationContext.database;
- const collectionName = operationContext.collectionName;
- const collectionOptions = operation.collectionOptions || {};
-
- operationContext[object] = db.collection(
- collectionName,
- convertCollectionOptions(collectionOptions)
- );
- }
- return testOperation(operation, operationContext[object], operationContext, options);
- });
- }, Promise.resolve());
+ for (const operation of testData.operations) {
+ const object = operation.object || 'collection';
+ if (object === 'collection') {
+ const db = operationContext.database;
+ const collectionName = operationContext.collectionName;
+ const collectionOptions = operation.collectionOptions || {};
+
+ operationContext[object] = db.collection(
+ collectionName,
+ convertCollectionOptions(collectionOptions)
+ );
+ }
+ await testOperation(operation, operationContext[object], operationContext, options);
+ }
}
module.exports = {
diff --git a/test/tools/unified-spec-runner/match.ts b/test/tools/unified-spec-runner/match.ts
index 39a83c7036..ce0370e79a 100644
--- a/test/tools/unified-spec-runner/match.ts
+++ b/test/tools/unified-spec-runner/match.ts
@@ -272,7 +272,7 @@ function validEmptyCmapEvent(
export function matchesEvents(
expected: (ExpectedCommandEvent & ExpectedCmapEvent)[],
- actual: (CommandEvent & CmapEvent)[],
+ actual: CommandEvent[] | CmapEvent[],
entities: EntitiesMap
): void {
if (actual.length !== expected.length) {
@@ -300,7 +300,9 @@ export function matchesEvents(
]);
} else if (expectedEvent.commandFailedEvent) {
expect(actualEvent).to.be.instanceOf(CommandFailedEvent);
- expect(actualEvent.commandName).to.equal(expectedEvent.commandFailedEvent.commandName);
+ expect(actualEvent)
+ .to.have.property('commandName')
+ .that.equals(expectedEvent.commandFailedEvent.commandName);
} else if (expectedEvent.connectionClosedEvent) {
expect(actualEvent).to.be.instanceOf(ConnectionClosedEvent);
if (expectedEvent.connectionClosedEvent.hasServiceId) {
diff --git a/test/tools/unified-spec-runner/runner.ts b/test/tools/unified-spec-runner/runner.ts
index 34964caf67..f057a2070e 100644
--- a/test/tools/unified-spec-runner/runner.ts
+++ b/test/tools/unified-spec-runner/runner.ts
@@ -69,7 +69,7 @@ export async function runUnifiedTest(
utilClient = ctx.configuration.newClient();
}
- let entities;
+ let entities: EntitiesMap;
try {
trace('\n starting test:');
try {
@@ -187,14 +187,11 @@ export async function runUnifiedTest(
for (const expectedEventList of test.expectEvents) {
const clientId = expectedEventList.client;
const eventType = expectedEventList.eventType;
- let actualEvents;
// If no event type is provided it defaults to 'command', so just
// check for 'cmap' here for now.
- if (eventType === 'cmap') {
- actualEvents = clientCmapEvents.get(clientId);
- } else {
- actualEvents = clientCommandEvents.get(clientId);
- }
+ const actualEvents =
+ eventType === 'cmap' ? clientCmapEvents.get(clientId) : clientCommandEvents.get(clientId);
+
expect(actualEvents, `No client entity found with id ${clientId}`).to.exist;
matchesEvents(expectedEventList.events, actualEvents, entities);
}
diff --git a/test/types/enum.test-d.ts b/test/types/enum.test-d.ts
index 225282f618..69ac0894ef 100644
--- a/test/types/enum.test-d.ts
+++ b/test/types/enum.test-d.ts
@@ -12,6 +12,7 @@ import {
ExplainVerbosity,
GSSAPICanonicalizationValue,
LoggerLevel,
+ MongoErrorLabel,
ProfilingLevel,
ReadConcernLevel,
ReadPreferenceMode,
@@ -45,3 +46,4 @@ expectType(Object.values(ReturnDocument)[num]);
expectType(Object.values(ServerApiVersion)[num]);
expectType(Object.values(ServerType)[num]);
expectType(Object.values(TopologyType)[num]);
+expectType(Object.values(MongoErrorLabel)[num]);
diff --git a/test/unit/error.test.ts b/test/unit/error.test.ts
index 2bdb5f8479..9c66b92ed2 100644
--- a/test/unit/error.test.ts
+++ b/test/unit/error.test.ts
@@ -5,11 +5,12 @@ import {
WaitQueueTimeoutError as MongoWaitQueueTimeoutError
} from '../../src/cmap/errors';
import {
- isRetryableEndTransactionError,
+ isRetryableReadError,
isSDAMUnrecoverableError,
LEGACY_NOT_PRIMARY_OR_SECONDARY_ERROR_MESSAGE,
LEGACY_NOT_WRITABLE_PRIMARY_ERROR_MESSAGE,
MongoSystemError,
+ needsRetryableWriteLabel,
NODE_IS_RECOVERING_ERROR_MESSAGE
} from '../../src/error';
import * as importsFromErrorSrc from '../../src/error';
@@ -146,34 +147,6 @@ describe('MongoErrors', () => {
});
});
- describe('#isRetryableEndTransactionError', function () {
- context('when the error has a RetryableWriteError label', function () {
- const error = new MongoNetworkError('');
- error.addErrorLabel('RetryableWriteError');
-
- it('returns true', function () {
- expect(isRetryableEndTransactionError(error)).to.be.true;
- });
- });
-
- context('when the error does not have a RetryableWriteError label', function () {
- const error = new MongoNetworkError('');
- error.addErrorLabel('InvalidLabel');
-
- it('returns false', function () {
- expect(isRetryableEndTransactionError(error)).to.be.false;
- });
- });
-
- context('when the error does not have any label', function () {
- const error = new MongoNetworkError('');
-
- it('returns false', function () {
- expect(isRetryableEndTransactionError(error)).to.be.false;
- });
- });
- });
-
describe('#isSDAMUnrecoverableError', function () {
context('when the error is a MongoParseError', function () {
it('returns true', function () {
@@ -211,7 +184,7 @@ describe('MongoErrors', () => {
function () {
it('returns false', function () {
// If the response includes an error code, it MUST be solely used to determine if error is a "node is recovering" or "not writable primary" error.
- const error = new MongoError(NODE_IS_RECOVERING_ERROR_MESSAGE);
+ const error = new MongoError(NODE_IS_RECOVERING_ERROR_MESSAGE.source);
error.code = 555;
expect(isSDAMUnrecoverableError(error)).to.be.false;
});
@@ -222,7 +195,9 @@ describe('MongoErrors', () => {
'when the error message contains the legacy "not primary" message and no error code is used',
function () {
it('returns true', function () {
- const error = new MongoError(`this is ${LEGACY_NOT_WRITABLE_PRIMARY_ERROR_MESSAGE}.`);
+ const error = new MongoError(
+ `this is ${LEGACY_NOT_WRITABLE_PRIMARY_ERROR_MESSAGE.source}.`
+ );
expect(isSDAMUnrecoverableError(error)).to.be.true;
});
}
@@ -362,10 +337,10 @@ describe('MongoErrors', () => {
return cleanup(err);
}
- topology.selectServer('primary', (err, server) => {
+ topology.selectServer('primary', {}, (err, server) => {
expect(err).to.not.exist;
- server.command(ns('db1'), Object.assign({}, RAW_USER_WRITE_CONCERN_CMD), err => {
+ server.command(ns('db1'), Object.assign({}, RAW_USER_WRITE_CONCERN_CMD), {}, err => {
let _err;
try {
expect(err).to.be.an.instanceOf(MongoWriteConcernError);
@@ -403,10 +378,10 @@ describe('MongoErrors', () => {
return cleanup(err);
}
- topology.selectServer('primary', (err, server) => {
+ topology.selectServer('primary', {}, (err, server) => {
expect(err).to.not.exist;
- server.command(ns('db1'), Object.assign({}, RAW_USER_WRITE_CONCERN_CMD), err => {
+ server.command(ns('db1'), Object.assign({}, RAW_USER_WRITE_CONCERN_CMD), {}, err => {
let _err;
try {
expect(err).to.be.an.instanceOf(MongoWriteConcernError);
@@ -424,4 +399,139 @@ describe('MongoErrors', () => {
});
});
});
+
+ describe('retryable errors', () => {
+ describe('#needsRetryableWriteLabel', () => {
+ // Note the wireVersions used below are used to represent
+ // 8 - below server version 4.4
+ // 9 - above server version 4.4
+
+ const ABOVE_4_4 = 9;
+ const BELOW_4_4 = 8;
+
+ const tests: {
+ description: string;
+ result: boolean;
+ error: Error;
+ maxWireVersion: number;
+ }[] = [
+ {
+ description: 'a plain error',
+ result: false,
+ error: new Error('do not retry me!'),
+ maxWireVersion: BELOW_4_4
+ },
+ {
+ description: 'a MongoError with no code nor label',
+ result: false,
+ error: new MongoError('do not retry me!'),
+ maxWireVersion: BELOW_4_4
+ },
+ {
+ description: 'network error',
+ result: true,
+ error: new MongoNetworkError('socket bad, try again'),
+ maxWireVersion: BELOW_4_4
+ },
+ {
+ description: 'a MongoWriteConcernError with no code nor label',
+ result: false,
+ error: new MongoWriteConcernError({ message: 'empty wc error' }),
+ maxWireVersion: BELOW_4_4
+ },
+ {
+ description: 'a MongoWriteConcernError with a random label',
+ result: false,
+ error: new MongoWriteConcernError(
+ { message: 'random label' },
+ { errorLabels: ['myLabel'] }
+ ),
+ maxWireVersion: BELOW_4_4
+ },
+ {
+ description: 'a MongoWriteConcernError with a retryable code above server 4.4',
+ result: false,
+ error: new MongoWriteConcernError({}, { code: 262 }),
+ maxWireVersion: ABOVE_4_4
+ },
+ {
+ description: 'a MongoWriteConcernError with a retryable code below server 4.4',
+ result: true,
+ error: new MongoWriteConcernError({}, { code: 262 }),
+ maxWireVersion: BELOW_4_4
+ },
+ {
+ description: 'a MongoWriteConcernError with a RetryableWriteError label below server 4.4',
+ result: true,
+ error: new MongoWriteConcernError({}, { errorLabels: ['RetryableWriteError'] }),
+ maxWireVersion: BELOW_4_4
+ },
+ {
+ description: 'a MongoWriteConcernError with a RetryableWriteError label above server 4.4',
+ result: false,
+ error: new MongoWriteConcernError({}, { errorLabels: ['RetryableWriteError'] }),
+ maxWireVersion: ABOVE_4_4
+ },
+ {
+ description: 'any MongoError with a RetryableWriteError label',
+ result: false,
+ error: (() => {
+ // These tests all use MongoWriteConcernError because
+ // its constructor is easier to call but any MongoError should work
+ const error = new MongoError('');
+ error.addErrorLabel('RetryableWriteError');
+ return error;
+ })(),
+ maxWireVersion: ABOVE_4_4
+ }
+ ];
+ for (const { description, result, error, maxWireVersion } of tests) {
+ it(`${description} ${result ? 'needs' : 'does not need'} a retryable write label`, () => {
+ expect(needsRetryableWriteLabel(error, maxWireVersion)).to.be.equal(result);
+ });
+ }
+ });
+
+ describe('#isRetryableReadError', () => {
+ const tests: { description: string; result: boolean; error: MongoError }[] = [
+ {
+ description: 'plain error',
+ result: false,
+ // @ts-expect-error: passing in a plain error to test false case
+ error: new Error('do not retry me!')
+ },
+ {
+ description: 'An error code that is not retryable',
+ result: false,
+ error: new MongoServerError({ message: '', code: 1 })
+ },
+ {
+ description: 'An error code that is retryable',
+ result: true,
+ error: new MongoServerError({ message: '', code: 91 })
+ },
+ {
+ description: 'network error',
+ result: true,
+ error: new MongoNetworkError('socket bad, try again')
+ },
+ {
+ description: 'error with legacy not writable primary error message',
+ result: true,
+ error: new MongoError(LEGACY_NOT_WRITABLE_PRIMARY_ERROR_MESSAGE.source)
+ },
+ {
+ description: 'error with node is recovering error message',
+ result: true,
+ error: new MongoError('node is recovering')
+ }
+ ];
+
+ for (const { description, result, error } of tests) {
+ it(`${description} is${result ? '' : ' not'} a retryable read`, () => {
+ expect(isRetryableReadError(error)).to.be.equal(result);
+ });
+ }
+ });
+ });
});
diff --git a/test/unit/sdam/topology.test.js b/test/unit/sdam/topology.test.js
index 55a3d12ce9..1f690c5ec7 100644
--- a/test/unit/sdam/topology.test.js
+++ b/test/unit/sdam/topology.test.js
@@ -173,7 +173,7 @@ describe('Topology (unit)', function () {
topology.connect(err => {
expect(err).to.not.exist;
- topology.selectServer('primary', (err, server) => {
+ topology.selectServer('primary', {}, (err, server) => {
expect(err).to.not.exist;
server.command(ns('admin.$cmd'), { ping: 1 }, { socketTimeoutMS: 250 }, (err, result) => {
@@ -209,7 +209,7 @@ describe('Topology (unit)', function () {
topology.connect(err => {
expect(err).to.not.exist;
- topology.selectServer('primary', (err, server) => {
+ topology.selectServer('primary', {}, (err, server) => {
expect(err).to.not.exist;
let serverDescription;
@@ -218,7 +218,7 @@ describe('Topology (unit)', function () {
let poolCleared = false;
topology.on('connectionPoolCleared', () => (poolCleared = true));
- server.command(ns('test.test'), { insert: { a: 42 } }, (err, result) => {
+ server.command(ns('test.test'), { insert: { a: 42 } }, {}, (err, result) => {
expect(result).to.not.exist;
expect(err).to.exist;
expect(err).to.eql(serverDescription.error);
@@ -235,7 +235,7 @@ describe('Topology (unit)', function () {
if (isHello(doc)) {
request.reply(Object.assign({}, mock.HELLO, { maxWireVersion: 9 }));
} else if (doc.insert) {
- request.reply({ ok: 0, message: LEGACY_NOT_WRITABLE_PRIMARY_ERROR_MESSAGE });
+ request.reply({ ok: 0, message: LEGACY_NOT_WRITABLE_PRIMARY_ERROR_MESSAGE.source });
} else {
request.reply({ ok: 1 });
}
@@ -245,7 +245,7 @@ describe('Topology (unit)', function () {
topology.connect(err => {
expect(err).to.not.exist;
- topology.selectServer('primary', (err, server) => {
+ topology.selectServer('primary', {}, (err, server) => {
expect(err).to.not.exist;
let serverDescription;
@@ -254,7 +254,7 @@ describe('Topology (unit)', function () {
let poolCleared = false;
topology.on('connectionPoolCleared', () => (poolCleared = true));
- server.command(ns('test.test'), { insert: { a: 42 } }, (err, result) => {
+ server.command(ns('test.test'), { insert: { a: 42 } }, {}, (err, result) => {
expect(result).to.not.exist;
expect(err).to.exist;
expect(err).to.eql(serverDescription.error);
@@ -281,13 +281,13 @@ describe('Topology (unit)', function () {
topology.connect(err => {
expect(err).to.not.exist;
- topology.selectServer('primary', (err, server) => {
+ topology.selectServer('primary', {}, (err, server) => {
expect(err).to.not.exist;
let serverDescription;
server.on('descriptionReceived', sd => (serverDescription = sd));
- server.command(ns('test.test'), { insert: { a: 42 } }, (err, result) => {
+ server.command(ns('test.test'), { insert: { a: 42 } }, {}, (err, result) => {
expect(result).to.not.exist;
expect(err).to.exist;
expect(err).to.eql(serverDescription.error);
@@ -550,11 +550,11 @@ describe('Topology (unit)', function () {
preventSelection = true;
for (let i = 0; i < toSelect - 1; ++i) {
- topology.selectServer(i % 5 === 0 ? failingSelector : anySelector, finish);
+ topology.selectServer(i % 5 === 0 ? failingSelector : anySelector, {}, finish);
}
preventSelection = false;
- topology.selectServer(anySelector, finish);
+ topology.selectServer(anySelector, {}, finish);
});
});
});