diff --git a/.mocharc.json b/.mocharc.json index 41b4e1feed..fad554261e 100644 --- a/.mocharc.json +++ b/.mocharc.json @@ -5,8 +5,8 @@ "ts" ], "require": [ - "ts-node/register", "source-map-support/register", + "ts-node/register", "test/tools/runner/chai-addons", "test/tools/runner/circular-dep-hack" ], diff --git a/package-lock.json b/package-lock.json index b244900f17..9a00832cc5 100644 --- a/package-lock.json +++ b/package-lock.json @@ -24,6 +24,7 @@ "@types/node": "^16.10.3", "@types/saslprep": "^1.0.1", "@types/semver": "^7.3.8", + "@types/sinon": "^10.0.6", "@types/whatwg-url": "^8.2.1", "@typescript-eslint/eslint-plugin": "^4.33.0", "@typescript-eslint/parser": "^4.33.0", @@ -46,7 +47,7 @@ "prettier": "^2.4.1", "rimraf": "^3.0.2", "semver": "^7.3.5", - "sinon": "^11.1.2", + "sinon": "^12.0.1", "sinon-chai": "^3.7.0", "source-map-support": "^0.5.20", "standard-version": "^9.3.1", @@ -1040,6 +1041,15 @@ "integrity": "sha512-D/2EJvAlCEtYFEYmmlGwbGXuK886HzyCc3nZX/tkFTQdEU8jZDAgiv08P162yB17y4ZXZoq7yFAnW4GDBb9Now==", "dev": true }, + "node_modules/@types/sinon": { + "version": "10.0.6", + "resolved": "https://registry.npmjs.org/@types/sinon/-/sinon-10.0.6.tgz", + "integrity": "sha512-6EF+wzMWvBNeGrfP3Nx60hhx+FfwSg1JJBLAAP/IdIUq0EYkqCYf70VT3PhuhPX9eLD+Dp+lNdpb/ZeHG8Yezg==", + "dev": true, + "dependencies": { + "@sinonjs/fake-timers": "^7.1.0" + } + }, "node_modules/@types/webidl-conversions": { "version": "6.1.1", "resolved": "https://registry.npmjs.org/@types/webidl-conversions/-/webidl-conversions-6.1.1.tgz", @@ -6020,13 +6030,13 @@ } }, "node_modules/sinon": { - "version": "11.1.2", - "resolved": "https://registry.npmjs.org/sinon/-/sinon-11.1.2.tgz", - "integrity": "sha512-59237HChms4kg7/sXhiRcUzdSkKuydDeTiamT/jesUVHshBgL8XAmhgFo0GfK6RruMDM/iRSij1EybmMog9cJw==", + "version": "12.0.1", + "resolved": "https://registry.npmjs.org/sinon/-/sinon-12.0.1.tgz", + "integrity": "sha512-iGu29Xhym33ydkAT+aNQFBINakjq69kKO6ByPvTsm3yyIACfyQttRTP03aBP/I8GfhFmLzrnKwNNkr0ORb1udg==", "dev": true, "dependencies": { "@sinonjs/commons": "^1.8.3", - "@sinonjs/fake-timers": "^7.1.2", + "@sinonjs/fake-timers": "^8.1.0", "@sinonjs/samsam": "^6.0.2", "diff": "^5.0.0", "nise": "^5.1.0", @@ -6047,6 +6057,15 @@ "sinon": ">=4.0.0" } }, + "node_modules/sinon/node_modules/@sinonjs/fake-timers": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/@sinonjs/fake-timers/-/fake-timers-8.1.0.tgz", + "integrity": "sha512-OAPJUAtgeINhh/TAlUID4QTs53Njm7xzddaVlEs/SXwgtiD1tW22zAB/W1wdqfrpmikgaWQ9Fw6Ws+hsiRm5Vg==", + "dev": true, + "dependencies": { + "@sinonjs/commons": "^1.7.0" + } + }, "node_modules/slash": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", @@ -8334,6 +8353,15 @@ "integrity": "sha512-D/2EJvAlCEtYFEYmmlGwbGXuK886HzyCc3nZX/tkFTQdEU8jZDAgiv08P162yB17y4ZXZoq7yFAnW4GDBb9Now==", "dev": true }, + "@types/sinon": { + "version": "10.0.6", + "resolved": "https://registry.npmjs.org/@types/sinon/-/sinon-10.0.6.tgz", + "integrity": "sha512-6EF+wzMWvBNeGrfP3Nx60hhx+FfwSg1JJBLAAP/IdIUq0EYkqCYf70VT3PhuhPX9eLD+Dp+lNdpb/ZeHG8Yezg==", + "dev": true, + "requires": { + "@sinonjs/fake-timers": "^7.1.0" + } + }, "@types/webidl-conversions": { "version": "6.1.1", "resolved": "https://registry.npmjs.org/@types/webidl-conversions/-/webidl-conversions-6.1.1.tgz", @@ -12089,17 +12117,28 @@ } }, "sinon": { - "version": "11.1.2", - "resolved": "https://registry.npmjs.org/sinon/-/sinon-11.1.2.tgz", - "integrity": "sha512-59237HChms4kg7/sXhiRcUzdSkKuydDeTiamT/jesUVHshBgL8XAmhgFo0GfK6RruMDM/iRSij1EybmMog9cJw==", + "version": "12.0.1", + "resolved": "https://registry.npmjs.org/sinon/-/sinon-12.0.1.tgz", + "integrity": "sha512-iGu29Xhym33ydkAT+aNQFBINakjq69kKO6ByPvTsm3yyIACfyQttRTP03aBP/I8GfhFmLzrnKwNNkr0ORb1udg==", "dev": true, "requires": { "@sinonjs/commons": "^1.8.3", - "@sinonjs/fake-timers": "^7.1.2", + "@sinonjs/fake-timers": "^8.1.0", "@sinonjs/samsam": "^6.0.2", "diff": "^5.0.0", "nise": "^5.1.0", "supports-color": "^7.2.0" + }, + "dependencies": { + "@sinonjs/fake-timers": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/@sinonjs/fake-timers/-/fake-timers-8.1.0.tgz", + "integrity": "sha512-OAPJUAtgeINhh/TAlUID4QTs53Njm7xzddaVlEs/SXwgtiD1tW22zAB/W1wdqfrpmikgaWQ9Fw6Ws+hsiRm5Vg==", + "dev": true, + "requires": { + "@sinonjs/commons": "^1.7.0" + } + } } }, "sinon-chai": { diff --git a/package.json b/package.json index 095fa01dd0..7c0c60aa6b 100644 --- a/package.json +++ b/package.json @@ -47,6 +47,7 @@ "@types/node": "^16.10.3", "@types/saslprep": "^1.0.1", "@types/semver": "^7.3.8", + "@types/sinon": "^10.0.6", "@types/whatwg-url": "^8.2.1", "@typescript-eslint/eslint-plugin": "^4.33.0", "@typescript-eslint/parser": "^4.33.0", @@ -69,7 +70,7 @@ "prettier": "^2.4.1", "rimraf": "^3.0.2", "semver": "^7.3.5", - "sinon": "^11.1.2", + "sinon": "^12.0.1", "sinon-chai": "^3.7.0", "source-map-support": "^0.5.20", "standard-version": "^9.3.1", diff --git a/src/connection_string.ts b/src/connection_string.ts index c188e2df85..ed897000dc 100644 --- a/src/connection_string.ts +++ b/src/connection_string.ts @@ -75,7 +75,7 @@ export function resolveSRVRecord(options: MongoOptions, callback: Callback { + dns.resolveSrv(`_${options.srvServiceName}._tcp.${lookupAddress}`, (err, addresses) => { if (err) return callback(err); if (addresses.length === 0) { @@ -92,7 +92,7 @@ export function resolveSRVRecord(options: MongoOptions, callback: Callback txtRecordOptions.get(option) === '')) { + return callback(new MongoParseError('Cannot have empty URI params in DNS TXT Record')); + } + const source = txtRecordOptions.get('authSource') ?? undefined; const replicaSet = txtRecordOptions.get('replicaSet') ?? undefined; const loadBalanced = txtRecordOptions.get('loadBalanced') ?? undefined; - if (source === '' || replicaSet === '') { - return callback(new MongoParseError('Cannot have empty URI params in DNS TXT Record')); - } - if (!options.userSpecifiedAuthSource && source) { options.credentials = MongoCredentials.merge(options.credentials, { source }); } @@ -136,7 +136,11 @@ export function resolveSRVRecord(options: MongoOptions, callback: Callback 0) { + return callback(new MongoParseError('Cannot combine replicaSet option with srvMaxHosts')); + } + + const lbError = validateLoadBalancedOptions(hostAddresses, options, true); if (lbError) { return callback(lbError); } @@ -251,13 +255,6 @@ export function parseOptions( const mongoOptions = Object.create(null); mongoOptions.hosts = isSRV ? [] : hosts.map(HostAddress.fromString); - if (isSRV) { - // SRV Record is resolved upon connecting - mongoOptions.srvHost = hosts[0]; - if (!url.searchParams.has('tls') && !url.searchParams.has('ssl')) { - options.tls = true; - } - } const urlOptions = new CaseInsensitiveMap(); @@ -289,30 +286,34 @@ export function parseOptions( throw new MongoAPIError('URI cannot contain options with no value'); } - if (key.toLowerCase() === 'serverapi') { - throw new MongoParseError( - 'URI cannot contain `serverApi`, it can only be passed to the client' - ); - } - - if (key.toLowerCase() === 'authsource' && urlOptions.has('authSource')) { - // If authSource is an explicit key in the urlOptions we need to remove the implicit dbName - urlOptions.delete('authSource'); - } - if (!urlOptions.has(key)) { urlOptions.set(key, values); } } + if (urlOptions.has('authSource')) { + // If authSource is an explicit key in the urlOptions we need to remove the dbName + urlOptions.delete('dbName'); + } + const objectOptions = new CaseInsensitiveMap( Object.entries(options).filter(([, v]) => v != null) ); + // Validate options that can only be provided by one of uri or object + + if (urlOptions.has('serverApi')) { + throw new MongoParseError( + 'URI cannot contain `serverApi`, it can only be passed to the client' + ); + } + if (objectOptions.has('loadBalanced')) { throw new MongoParseError('loadBalanced is only a valid option in the URI'); } + // All option collection + const allOptions = new CaseInsensitiveMap(); const allKeys = new Set([ @@ -360,6 +361,8 @@ export function parseOptions( ); } + // Option parsing and setting + for (const [key, descriptor] of Object.entries(OPTIONS)) { const values = allOptions.get(key); if (!values || values.length === 0) continue; @@ -401,25 +404,53 @@ export function parseOptions( if (options.promiseLibrary) PromiseProvider.set(options.promiseLibrary); - if (mongoOptions.directConnection && typeof mongoOptions.srvHost === 'string') { - throw new MongoAPIError('SRV URI does not support directConnection'); - } - - const lbError = validateLoadBalancedOptions(hosts, mongoOptions); + const lbError = validateLoadBalancedOptions(hosts, mongoOptions, isSRV); if (lbError) { throw lbError; } + if (mongoClient && mongoOptions.autoEncryption) { + Encrypter.checkForMongoCrypt(); + mongoOptions.encrypter = new Encrypter(mongoClient, uri, options); + mongoOptions.autoEncrypter = mongoOptions.encrypter.autoEncrypter; + } + + // Potential SRV Overrides and SRV connection string validations - // Potential SRV Overrides mongoOptions.userSpecifiedAuthSource = objectOptions.has('authSource') || urlOptions.has('authSource'); mongoOptions.userSpecifiedReplicaSet = objectOptions.has('replicaSet') || urlOptions.has('replicaSet'); - if (mongoClient && mongoOptions.autoEncryption) { - Encrypter.checkForMongoCrypt(); - mongoOptions.encrypter = new Encrypter(mongoClient, uri, options); - mongoOptions.autoEncrypter = mongoOptions.encrypter.autoEncrypter; + if (isSRV) { + // SRV Record is resolved upon connecting + mongoOptions.srvHost = hosts[0]; + + if (mongoOptions.directConnection) { + throw new MongoAPIError('SRV URI does not support directConnection'); + } + + if (mongoOptions.srvMaxHosts > 0 && typeof mongoOptions.replicaSet === 'string') { + throw new MongoParseError('Cannot use srvMaxHosts option with replicaSet'); + } + + // SRV turns on TLS by default, but users can override and turn it off + const noUserSpecifiedTLS = !objectOptions.has('tls') && !urlOptions.has('tls'); + const noUserSpecifiedSSL = !objectOptions.has('ssl') && !urlOptions.has('ssl'); + if (noUserSpecifiedTLS && noUserSpecifiedSSL) { + mongoOptions.tls = true; + } + } else { + const userSpecifiedSrvOptions = + urlOptions.has('srvMaxHosts') || + objectOptions.has('srvMaxHosts') || + urlOptions.has('srvServiceName') || + objectOptions.has('srvServiceName'); + + if (userSpecifiedSrvOptions) { + throw new MongoParseError( + 'Cannot use srvMaxHosts or srvServiceName with a non-srv connection string' + ); + } } return mongoOptions; @@ -427,7 +458,8 @@ export function parseOptions( function validateLoadBalancedOptions( hosts: HostAddress[] | string[], - mongoOptions: MongoOptions + mongoOptions: MongoOptions, + isSrv: boolean ): MongoParseError | undefined { if (mongoOptions.loadBalanced) { if (hosts.length > 1) { @@ -439,6 +471,10 @@ function validateLoadBalancedOptions( if (mongoOptions.directConnection) { return new MongoParseError(LB_DIRECT_CONNECTION_ERROR); } + + if (isSrv && mongoOptions.srvMaxHosts > 0) { + return new MongoParseError('Cannot limit srv hosts with loadBalanced enabled'); + } } } @@ -924,6 +960,14 @@ export const OPTIONS = { default: 0, type: 'uint' }, + srvMaxHosts: { + type: 'uint', + default: 0 + }, + srvServiceName: { + type: 'string', + default: 'mongodb' + }, ssl: { target: 'tls', type: 'boolean' diff --git a/src/mongo_client.ts b/src/mongo_client.ts index b57563f51d..7e4ccfef9b 100644 --- a/src/mongo_client.ts +++ b/src/mongo_client.ts @@ -132,6 +132,16 @@ export interface MongoClientOptions extends BSONSerializeOptions, SupportedNodeC compressors?: CompressorName[] | string; /** An integer that specifies the compression level if using zlib for network compression. */ zlibCompressionLevel?: 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | undefined; + /** The maximum number of hosts to connect to when using an srv connection string, a setting of `0` means unlimited hosts */ + srvMaxHosts?: number; + /** + * Modifies the srv URI to look like: + * + * `_{srvServiceName}._tcp.{hostname}.{domainname}` + * + * Querying this DNS URI is expected to respond with SRV records + */ + srvServiceName?: string; /** The maximum number of connections in the connection pool. */ maxPoolSize?: number; /** The minimum number of connections in the connection pool. */ @@ -643,6 +653,8 @@ export interface MongoOptions | 'retryWrites' | 'serverSelectionTimeoutMS' | 'socketTimeoutMS' + | 'srvMaxHosts' + | 'srvServiceName' | 'tlsAllowInvalidCertificates' | 'tlsAllowInvalidHostnames' | 'tlsInsecure' diff --git a/src/sdam/srv_polling.ts b/src/sdam/srv_polling.ts index 119bcc597b..aaa0f7fea2 100644 --- a/src/sdam/srv_polling.ts +++ b/src/sdam/srv_polling.ts @@ -29,18 +29,15 @@ export class SrvPollingEvent { this.srvRecords = srvRecords; } - addresses(): Map { - return new Map( - this.srvRecords.map(record => { - const host = new HostAddress(`${record.name}:${record.port}`); - return [host.toString(), host]; - }) - ); + hostnames(): Set { + return new Set(this.srvRecords.map(r => HostAddress.fromSrvRecord(r).toString())); } } /** @internal */ export interface SrvPollerOptions extends LoggerOptions { + srvServiceName: string; + srvMaxHosts: number; srvHost: string; heartbeatFrequencyMS: number; } @@ -58,6 +55,8 @@ export class SrvPoller extends TypedEventEmitter { logger: Logger; haMode: boolean; generation: number; + srvMaxHosts: number; + srvServiceName: string; _timeout?: NodeJS.Timeout; /** @event */ @@ -71,8 +70,10 @@ export class SrvPoller extends TypedEventEmitter { } this.srvHost = options.srvHost; + this.srvMaxHosts = options.srvMaxHosts ?? 0; + this.srvServiceName = options.srvServiceName ?? 'mongodb'; this.rescanSrvIntervalMS = 60000; - this.heartbeatFrequencyMS = options.heartbeatFrequencyMS || 10000; + this.heartbeatFrequencyMS = options.heartbeatFrequencyMS ?? 10000; this.logger = new Logger('srvPoller', options); this.haMode = false; @@ -82,7 +83,7 @@ export class SrvPoller extends TypedEventEmitter { } get srvAddress(): string { - return `_mongodb._tcp.${this.srvHost}`; + return `_${this.srvServiceName}._tcp.${this.srvHost}`; } get intervalMS(): number { @@ -143,13 +144,13 @@ export class SrvPoller extends TypedEventEmitter { } const finalAddresses: dns.SrvRecord[] = []; - srvRecords.forEach((record: dns.SrvRecord) => { + for (const record of srvRecords) { if (matchesParentDomain(record.name, this.srvHost)) { finalAddresses.push(record); } else { this.parentDomainMismatch(record); } - }); + } if (!finalAddresses.length) { this.failure('No valid addresses found at host'); diff --git a/src/sdam/topology.ts b/src/sdam/topology.ts index dadb2d1dc6..1c9aeb26b7 100644 --- a/src/sdam/topology.ts +++ b/src/sdam/topology.ts @@ -27,7 +27,8 @@ import { HostAddress, ns, emitWarning, - EventEmitterWithState + EventEmitterWithState, + shuffle } from '../utils'; import { TopologyType, @@ -140,6 +141,8 @@ export interface TopologyPrivate { /** @public */ export interface TopologyOptions extends BSONSerializeOptions, ServerOptions { + srvMaxHosts: number; + srvServiceName: string; hosts: HostAddress[]; retryWrites: boolean; retryReads: boolean; @@ -290,8 +293,15 @@ export class Topology extends TypedEventEmitter { const topologyType = topologyTypeFromOptions(options); const topologyId = globalTopologyCounter++; + const selectedHosts = + options.srvMaxHosts == null || + options.srvMaxHosts === 0 || + options.srvMaxHosts >= seedlist.length + ? seedlist + : shuffle(seedlist, options.srvMaxHosts); + const serverDescriptions = new Map(); - for (const hostAddress of seedlist) { + for (const hostAddress of selectedHosts) { serverDescriptions.set(hostAddress.toString(), new ServerDescription(hostAddress)); } @@ -339,7 +349,9 @@ export class Topology extends TypedEventEmitter { options.srvPoller ?? new SrvPoller({ heartbeatFrequencyMS: this.s.heartbeatFrequencyMS, - srvHost: options.srvHost + srvHost: options.srvHost, + srvMaxHosts: options.srvMaxHosts, + srvServiceName: options.srvServiceName }); this.on(Topology.TOPOLOGY_DESCRIPTION_CHANGED, this.s.detectShardedTopology); @@ -363,7 +375,10 @@ export class Topology extends TypedEventEmitter { private detectSrvRecords(ev: SrvPollingEvent) { const previousTopologyDescription = this.s.description; - this.s.description = this.s.description.updateFromSrvPollingEvent(ev); + this.s.description = this.s.description.updateFromSrvPollingEvent( + ev, + this.s.options.srvMaxHosts + ); if (this.s.description === previousTopologyDescription) { // Nothing changed, so return return; diff --git a/src/sdam/topology_description.ts b/src/sdam/topology_description.ts index 60ad5e689f..4d48dfc451 100644 --- a/src/sdam/topology_description.ts +++ b/src/sdam/topology_description.ts @@ -4,6 +4,7 @@ import { TopologyType, ServerType } from './common'; import type { ObjectId, Document } from '../bson'; import type { SrvPollingEvent } from './srv_polling'; import { MongoError, MongoRuntimeError } from '../error'; +import { shuffle } from '../utils'; // constants related to compatibility checks const MIN_SUPPORTED_SERVER_VERSION = WIRE_CONSTANTS.MIN_SUPPORTED_SERVER_VERSION; @@ -139,23 +140,46 @@ export class TopologyDescription { * Returns a new TopologyDescription based on the SrvPollingEvent * @internal */ - updateFromSrvPollingEvent(ev: SrvPollingEvent): TopologyDescription { - const newAddresses = ev.addresses(); - const serverDescriptions = new Map(this.servers); - for (const address of this.servers.keys()) { - if (newAddresses.has(address)) { - newAddresses.delete(address); - } else { - serverDescriptions.delete(address); + updateFromSrvPollingEvent(ev: SrvPollingEvent, srvMaxHosts = 0): TopologyDescription { + /** The SRV addresses defines the set of addresses we should be using */ + const incomingHostnames = ev.hostnames(); + const currentHostnames = new Set(this.servers.keys()); + + const hostnamesToAdd = new Set(incomingHostnames); + const hostnamesToRemove = new Set(); + for (const hostname of currentHostnames) { + // filter hostnamesToAdd (made from incomingHostnames) down to what is *not* present in currentHostnames + hostnamesToAdd.delete(hostname); + if (!incomingHostnames.has(hostname)) { + // If the SRV Records no longer include this hostname + // we have to stop using it + hostnamesToRemove.add(hostname); } } - if (serverDescriptions.size === this.servers.size && newAddresses.size === 0) { + if (hostnamesToAdd.size === 0 && hostnamesToRemove.size === 0) { + // No new hosts to add and none to remove return this; } - for (const [address, host] of newAddresses) { - serverDescriptions.set(address, new ServerDescription(host)); + const serverDescriptions = new Map(this.servers); + for (const removedHost of hostnamesToRemove) { + serverDescriptions.delete(removedHost); + } + + if (hostnamesToAdd.size > 0) { + if (srvMaxHosts === 0) { + // Add all! + for (const hostToAdd of hostnamesToAdd) { + serverDescriptions.set(hostToAdd, new ServerDescription(hostToAdd)); + } + } else if (serverDescriptions.size < srvMaxHosts) { + // Add only the amount needed to get us back to srvMaxHosts + const selectedHosts = shuffle(hostnamesToAdd, srvMaxHosts - serverDescriptions.size); + for (const selectedHostToAdd of selectedHosts) { + serverDescriptions.set(selectedHostToAdd, new ServerDescription(selectedHostToAdd)); + } + } } return new TopologyDescription( diff --git a/src/utils.ts b/src/utils.ts index eb275e862d..cb5de9b020 100644 --- a/src/utils.ts +++ b/src/utils.ts @@ -28,6 +28,7 @@ import type { CommandOperationOptions, OperationParent } from './operations/comm import { ReadPreference } from './read_preference'; import { URL } from 'url'; import { MAX_SUPPORTED_WIRE_VERSION } from './cmap/wire_protocol/constants'; +import type { SrvRecord } from 'dns'; /** * MongoDB Driver style callback @@ -1095,8 +1096,9 @@ export function isSuperset(set: Set | any[], subset: Set | any[]): boo return true; } -export function setDifference(setA: Iterable, setB: Iterable): Set { - const difference = new Set(setA); +/** Returns the items that are uniquely in setA */ +export function setDifference(setA: Iterable, setB: Iterable): Set { + const difference = new Set(setA); for (const elem of setB) { difference.delete(elem); } @@ -1319,6 +1321,14 @@ export class HostAddress { Object.freeze(this); } + [Symbol.for('nodejs.util.inspect.custom')](): string { + return this.inspect(); + } + + inspect(): string { + return `new HostAddress('${this.toString(true)}')`; + } + /** * @param ipv6Brackets - optionally request ipv6 bracket notation required for connection strings */ @@ -1335,6 +1345,10 @@ export class HostAddress { static fromString(s: string): HostAddress { return new HostAddress(s); } + + static fromSrvRecord({ name, port }: SrvRecord): HostAddress { + return HostAddress.fromString(`${name}:${port}`); + } } export const DEFAULT_PK_FACTORY = { @@ -1405,3 +1419,33 @@ export function parsePackageVersion({ version }: { version: string }): { const [major, minor, patch] = version.split('.').map((n: string) => Number.parseInt(n, 10)); return { major, minor, patch }; } + +/** + * Fisher–Yates Shuffle + * + * Reference: https://bost.ocks.org/mike/shuffle/ + * @param sequence - items to be shuffled + * @param limit - Defaults to `0`. If nonzero shuffle will slice the randomized array e.g, `.slice(0, limit)` otherwise will return the entire randomized array. + */ +export function shuffle(sequence: Iterable, limit = 0): Array { + const items = Array.from(sequence); // shallow copy in order to never shuffle the input + + if (limit > items.length) { + throw new MongoRuntimeError('Limit must be less than the number of items'); + } + + let remainingItemsToShuffle = items.length; + const lowerBound = limit % items.length === 0 ? 1 : items.length - limit; + while (remainingItemsToShuffle > lowerBound) { + // Pick a remaining element + const randomIndex = Math.floor(Math.random() * remainingItemsToShuffle); + remainingItemsToShuffle -= 1; + + // And swap it with the current element + const swapHold = items[remainingItemsToShuffle]; + items[remainingItemsToShuffle] = items[randomIndex]; + items[randomIndex] = swapHold; + } + + return limit % items.length === 0 ? items : items.slice(lowerBound); +} diff --git a/test/functional/connection_leak_detector.js b/test/functional/connection_leak_detector.js deleted file mode 100644 index 38b3fa44bf..0000000000 --- a/test/functional/connection_leak_detector.js +++ /dev/null @@ -1,61 +0,0 @@ -'use strict'; - -// Disabled for now b/c it conflicts with session leak tests - -// before(function() { -// this.client = this.configuration.newClient({}, { maxPoolSize: 1 }); - -// return Promise.resolve() -// .then(() => this.client.connect()) -// .then(() => { -// this.adminDb = this.client.db(this.configuration.db).admin(); -// return this.adminDb.serverStatus(); -// }) -// .then(serverStatus => { -// this._currentConnections = serverStatus.connections.current; -// this._connectionChangedTests = []; -// }); -// }); - -// beforeEach(function() { -// return Promise.resolve() -// .then(() => this.adminDb.serverStatus()) -// .then(serverStatus => { -// this._currentConnections = serverStatus.connections.current; -// }); -// }); - -// afterEach(function() { -// return Promise.resolve() -// .then(() => this.adminDb.serverStatus()) -// .then(serverStatus => { -// const currentConnections = serverStatus.connections.current; -// if (this._currentConnections !== currentConnections) { -// console.log('connections: ', this._currentConnections, '-->', currentConnections); -// this._connectionChangedTests.push({ -// test: this.currentTest, -// previous: this._currentConnections, -// current: currentConnections -// }); -// } - -// this._currentConnections = currentConnections; -// }); -// }); - -// after(function() { -// return this.client.close().then(() => { -// if (this._connectionChangedTests.length) { -// console.group('The following tests had unstable connection counts:'); -// console.log('| previous | current | name |'); -// console.log('| -------- | ---- | ---- |'); -// this._connectionChangedTests.forEach(({ test, previous, current }) => { -// const name = test.fullTitle(); -// previous = previous.toString(10).padStart(8); -// current = current.toString(10).padStart(4); -// console.log(`| ${previous} | ${current} | ${name} |`); -// }); -// console.groupEnd(); -// } -// }); -// }); diff --git a/test/functional/uri_options_spec.test.js b/test/functional/uri_options_spec.test.js index e9e8f135fc..dd34322331 100644 --- a/test/functional/uri_options_spec.test.js +++ b/test/functional/uri_options_spec.test.js @@ -1,16 +1,20 @@ 'use strict'; -const chai = require('chai'); -const expect = chai.expect; -chai.use(require('chai-subset')); +const { expect } = require('chai'); +const { promisify } = require('util'); +require('chai').use(require('chai-subset')); -const { parseOptions } = require('../../src/connection_string'); +const { parseOptions, resolveSRVRecord } = require('../../src/connection_string'); +const { MongoParseError } = require('../../src/error'); const { loadSpecTests } = require('../spec'); describe('URI Options (spec)', function () { - loadSpecTests('uri-options').forEach(suite => { + const uriSpecs = loadSpecTests('uri-options'); + + // FIXME(NODE-3738): URI tests do not correctly assert whether they error or not + for (const suite of uriSpecs) { describe(suite.name, () => { - suite.tests.forEach(test => { + for (const test of suite.tests) { const itFn = test.warning ? it.skip : it; itFn(`${test.description}`, function () { @@ -24,10 +28,46 @@ describe('URI Options (spec)', function () { expect(options).to.containSubset(test.options); } } catch (err) { - expect(err).to.be.an.instanceof(Error); + if (test.warning === false || test.valid === true) { + // This test is supposed to not throw an error, we skip here for now (NODE-3738) + this.skip(); + } + expect(err).to.be.an.instanceof(MongoParseError); } }); - }); + } }); + } + + describe('srvMaxHost manual testing', function () { + const srvMaxHostTests = uriSpecs.find(testFolder => testFolder.name === 'srv-options').tests; + + for (const test of srvMaxHostTests) { + it(test.description, async function () { + let thrownError; + let driverOptions; + let hosts; + try { + driverOptions = parseOptions(test.uri); + hosts = await promisify(resolveSRVRecord)(driverOptions); + } catch (error) { + thrownError = error; + } + + if (test.valid === false || test.warning === true) { + // We implement warnings as errors + expect(thrownError).to.be.instanceOf(MongoParseError); + expect(hosts).to.not.exist; + return; // Nothing more to test... + } + + expect(thrownError).to.not.exist; + expect(driverOptions).to.exist; + + for (const [testOptionKey, testOptionValue] of Object.entries(test.options)) { + expect(driverOptions).to.have.property(testOptionKey, testOptionValue); + } + }); + } }); }); diff --git a/test/integration/initial-dns-seedlist-discovery/initial_dns_seedlist_discovery.spec.test.ts b/test/integration/initial-dns-seedlist-discovery/initial_dns_seedlist_discovery.spec.test.ts new file mode 100644 index 0000000000..7526727e4b --- /dev/null +++ b/test/integration/initial-dns-seedlist-discovery/initial_dns_seedlist_discovery.spec.test.ts @@ -0,0 +1,152 @@ +import * as fs from 'fs'; +import * as path from 'path'; +import * as dns from 'dns'; +import { expect } from 'chai'; +import { promisify } from 'util'; +import { HostAddress } from '../../../src/utils'; +import { MongoClient } from '../../../src'; + +function makeTest(test, topology) { + let client; + + afterEach(async () => { + if (client) { + await client.close(); + client = undefined; + } + }); + + it(test.comment, async function () { + return this.skip(); // FIXME(NODE-3757): These tests require specific environment setups, also the error cases need better assertions + if (topology === 'replica-set' && this.configuration.topologyType !== 'ReplicaSetWithPrimary') { + return this.skip(); + } + + if (topology === 'sharded' && this.configuration.topologyType !== 'sharded') { + return this.skip(); + } + + if (topology === 'load-balanced' && this.configuration.topologyType !== 'load-balanced') { + return this.skip(); + } + + let thrownError; + try { + client = new MongoClient(test.uri, { serverSelectionTimeoutMS: 2000, tls: false }); + await client.connect(); + } catch (error) { + thrownError = error; + } + + if (test.error) { + expect(thrownError).to.exist; + return; // Nothing more to test... + } + + const options = client.options; + const hosts = Array.from(client.topology.s.description.servers.keys()); + + expect(thrownError).to.not.exist; + expect(options).to.exist; + + // Implicit SRV options must be set. + expect(options.directConnection).to.be.false; + const testOptions = test.options; + if (testOptions && 'tls' in testOptions) { + expect(options).to.have.property('tls', testOptions.tls); + } else if (testOptions && 'ssl' in testOptions) { + expect(options).to.have.property('tls', testOptions.ssl); + } else { + expect(options.tls).to.be.true; + } + if (testOptions && testOptions.replicaSet) { + expect(options).to.have.property('replicaSet', testOptions.replicaSet); + } + if (testOptions && testOptions.authSource) { + expect(options).to.have.property('credentials'); + expect(options.credentials.source).to.equal(testOptions.authSource); + } + if (testOptions && testOptions.loadBalanced) { + expect(options).to.have.property('loadBalanced', testOptions.loadBalanced); + } + if (test.parsed_options && test.parsed_options.user && test.parsed_options.password) { + expect(options.credentials.username).to.equal(test.parsed_options.user); + expect(options.credentials.password).to.equal(test.parsed_options.password); + } + + // srvMaxHost limiting happens in the topology constructor + if (options.srvHost && test.comment.includes('srvMaxHosts')) { + const initialSeedlist = hosts.map(h => h.toString()); + const selectedHosts = Array.from(topology.s.description.servers.keys()); + + if (typeof test.numSeeds === 'number') { + // numSeeds: the expected number of initial seeds discovered from the SRV record. + expect(initialSeedlist).to.have.lengthOf(test.numSeeds); + } + if (typeof test.numHosts === 'number') { + // numHosts: the expected number of hosts discovered once SDAM completes a scan. + // (In our case, its the Topology constructor, but not actual SDAM) + expect(selectedHosts).to.have.lengthOf(test.numHosts); + } + + if (Array.isArray(test.seeds)) { + // verify that the set of hosts in the client's initial seedlist + // matches the list in seeds + expect(initialSeedlist).to.deep.equal(test.seeds); + } + if (Array.isArray(test.hosts)) { + // verify that the set of ServerDescriptions in the client's TopologyDescription + // eventually matches the list in hosts + const actualAddresses = await Promise.all( + selectedHosts + .map(async hn => await promisify(dns.lookup)(HostAddress.fromString(hn).host)) + .map(async (addr, i) => { + let address = (await addr).address; + address = address === '127.0.0.1' ? 'localhost' : address; + return HostAddress.fromString( + `${address}:${HostAddress.fromString(selectedHosts[i]).port}` + ).toString(); + }) + ); + + expect(actualAddresses).to.deep.equal(test.hosts); + } + } + }); +} + +function readTestFilesFor(topology) { + const specPath = path.join(__dirname, '../../spec', 'initial-dns-seedlist-discovery', topology); + const testFiles = fs + .readdirSync(specPath) + .filter(x => x.indexOf('.json') !== -1) + .map(x => [x, fs.readFileSync(path.join(specPath, x), 'utf8')]) + .map(x => { + const test = JSON.parse(x[1]); + const fileName = path.basename(x[0], '.json'); + if (!test.comment) { + test.comment = fileName; + } + return [fileName, test]; + }); + return testFiles; +} + +/** + * The tests in the replica-set directory MUST be executed against a three-node replica set on localhost ports 27017, 27018, and 27019 with replica set name repl0. + * The tests in the load-balanced directory MUST be executed against a load-balanced sharded cluster with the mongos servers running on localhost ports 27017 and 27018 (corresponding to the script in drivers-evergreen-tools). + * The load balancers, shard servers, and config servers may run on any open ports. + * The tests in the sharded directory MUST be executed against a sharded cluster with the mongos servers running on localhost ports 27017 and 27018. Shard servers and config servers may run on any open ports. + * In all cases, the clusters MUST be started with SSL enabled. + * To run the tests that accompany this spec, you need to configure the SRV and TXT records with a real name server. The following records are required for these tests: + */ +describe('Initial DNS Seedlist Discovery', () => { + for (const topology of ['replica-set', 'load-balanced', 'sharded']) { + describe(topology, function () { + const testFiles = readTestFilesFor(topology); + for (const [, test] of testFiles) { + makeTest(test, topology); + } + }); + } +}); diff --git a/test/tools/mock.js b/test/tools/mock.js index 72dede7826..c13ec860cc 100644 --- a/test/tools/mock.js +++ b/test/tools/mock.js @@ -49,9 +49,9 @@ const { HostAddress } = require('../../src/utils'); /** * Make a mock mongodb server. * - * @param {number} port - port number - * @param {string} host - address - * @param {object} options - options + * @param {number} [port] - port number + * @param {string} [host] - address + * @param {object} [options] - options * @returns {Promise} */ function createServer(port, host, options) { diff --git a/test/tools/utils.js b/test/tools/utils.js index b5101ed046..985e1c5d78 100644 --- a/test/tools/utils.js +++ b/test/tools/utils.js @@ -337,7 +337,17 @@ const runLater = (fn, ms) => { }); }; +const sleep = ms => new Promise(resolve => setTimeout(resolve, ms)); + +/** + * If you are using sinon fake timers, it can end up blocking queued IO from running + * awaiting a nextTick call will allow the event loop to process Networking/FS callbacks + */ +const processTick = () => new Promise(resolve => process.nextTick(resolve)); + module.exports = { + processTick, + sleep, runLater, ejson, EventCollector, diff --git a/test/unit/connection_string.test.js b/test/unit/connection_string.test.js index 647599c64e..e17276e9c2 100644 --- a/test/unit/connection_string.test.js +++ b/test/unit/connection_string.test.js @@ -1,10 +1,8 @@ 'use strict'; -const fs = require('fs'); -const path = require('path'); const { MongoParseError, MongoDriverError, MongoInvalidArgumentError } = require('../../src/error'); const { loadSpecTests } = require('../spec'); -const { parseOptions, resolveSRVRecord } = require('../../src/connection_string'); +const { parseOptions } = require('../../src/connection_string'); const { AuthMechanism } = require('../../src/cmap/auth/defaultAuthProviders'); const { expect } = require('chai'); @@ -25,8 +23,6 @@ const skipTests = [ 'Deprecated (or unknown) options are ignored if replacement exists' ]; -const SPEC_PATHS = ['load-balanced', 'replica-set', 'sharded']; - describe('Connection String', function () { it('should not support auth passed with user', function () { const optionsWithUser = { @@ -223,82 +219,5 @@ describe('Connection String', function () { expect(options.dbName).to.equal('somedb'); expect(options.srvHost).to.equal('test1.test.build.10gen.cc'); }); - - SPEC_PATHS.forEach(function (folder) { - describe('spec tests', function () { - const specPath = path.join(__dirname, '../spec', 'initial-dns-seedlist-discovery', folder); - const testFiles = fs - .readdirSync(specPath) - .filter(x => x.indexOf('.json') !== -1) - .map(x => [x, fs.readFileSync(path.join(specPath, x), 'utf8')]) - .map(x => [path.basename(x[0], '.json'), JSON.parse(x[1])]); - - testFiles.forEach(test => { - if (!test[1].comment) { - test[1].comment = test[0]; - } - - const comment = test[1].comment; - const skipTest = comment.search(/^(srvMaxHosts|srv-service-name)/) > -1; - const maybeIt = skipTest ? it.skip : it; - // TODO: NODE-3467: Implement maxSrvHosts work - maybeIt(comment, { - metadata: { requires: { topology: ['single'] } }, - test: function (done) { - try { - const options = parseOptions(test[1].uri); - resolveSRVRecord(options, (err, result) => { - if (test[1].error) { - expect(err).to.exist; - expect(result).to.not.exist; - } else { - expect(err).to.not.exist; - expect(result).to.exist; - // Implicit SRV options must be set. - expect(options.directConnection).to.be.false; - const testOptions = test[1].options; - if (testOptions && 'tls' in testOptions) { - expect(options).to.have.property('tls', testOptions.tls); - } else if (testOptions && 'ssl' in testOptions) { - expect(options).to.have.property('tls', testOptions.ssl); - } else { - expect(options.tls).to.be.true; - } - if (testOptions && testOptions.replicaSet) { - expect(options).to.have.property('replicaSet', testOptions.replicaSet); - } - if (testOptions && testOptions.authSource) { - expect(options).to.have.property('credentials'); - expect(options.credentials.source).to.equal(testOptions.authSource); - } - if (testOptions && testOptions.loadBalanced) { - expect(options).to.have.property('loadBalanced', testOptions.loadBalanced); - } - if ( - test[1].parsed_options && - test[1].parsed_options.user && - test[1].parsed_options.password - ) { - expect(options.credentials.username).to.equal(test[1].parsed_options.user); - expect(options.credentials.password).to.equal( - test[1].parsed_options.password - ); - } - } - done(); - }); - } catch (error) { - if (test[1].error) { - expect(error).to.exist; - done(); - } else { - throw error; - } - } - } - }); - }); - }); - }); }); }); diff --git a/test/unit/mongo_client.test.js b/test/unit/mongo_client.test.js index 47a7785c95..e5ffce996b 100644 --- a/test/unit/mongo_client.test.js +++ b/test/unit/mongo_client.test.js @@ -2,8 +2,9 @@ const os = require('os'); const fs = require('fs'); const { expect } = require('chai'); +const { promisify } = require('util'); const { getSymbolFrom } = require('../tools/utils'); -const { parseOptions } = require('../../src/connection_string'); +const { parseOptions, resolveSRVRecord } = require('../../src/connection_string'); const { ReadConcern } = require('../../src/read_concern'); const { WriteConcern } = require('../../src/write_concern'); const { ReadPreference } = require('../../src/read_preference'); @@ -719,4 +720,71 @@ describe('MongoOptions', function () { expect(parse).to.throw(/URI/); }); }); + + it('srvMaxHosts > 0 cannot be combined with LB or ReplicaSet', () => { + expect(() => { + new MongoClient('mongodb+srv://localhost?srvMaxHosts=2&replicaSet=repl'); + }).to.throw(MongoParseError, 'Cannot use srvMaxHosts option with replicaSet'); + expect(() => { + new MongoClient('mongodb+srv://localhost?srvMaxHosts=2&loadBalanced=true'); + }).to.throw(MongoParseError, 'Cannot limit srv hosts with loadBalanced enabled'); + expect(() => { + new MongoClient('mongodb+srv://localhost', { srvMaxHosts: 2, replicaSet: 'blah' }); + }).to.throw(MongoParseError, 'Cannot use srvMaxHosts option with replicaSet'); + expect(() => { + new MongoClient('mongodb+srv://localhost?loadBalanced=true', { srvMaxHosts: 2 }); + }).to.throw(MongoParseError, 'Cannot limit srv hosts with loadBalanced enabled'); + + // These should not throw. + new MongoClient('mongodb+srv://localhost?srvMaxHosts=0&replicaSet=repl'); + new MongoClient('mongodb+srv://localhost', { srvMaxHosts: 0, replicaSet: 'blah' }); + new MongoClient('mongodb+srv://localhost?srvMaxHosts=0&loadBalanced=true'); + new MongoClient('mongodb+srv://localhost?loadBalanced=true', { srvMaxHosts: 0 }); + }); + + it('srvServiceName and srvMaxHosts cannot be used on a non-srv connection string', () => { + expect(() => { + new MongoClient('mongodb://localhost?srvMaxHosts=2'); + }).to.throw(MongoParseError); + expect(() => { + new MongoClient('mongodb://localhost?srvMaxHosts=0'); + }).to.throw(MongoParseError); + expect(() => { + new MongoClient('mongodb://localhost', { srvMaxHosts: 0 }); + }).to.throw(MongoParseError); + expect(() => { + new MongoClient('mongodb://localhost?srvServiceName=abc'); + }).to.throw(MongoParseError); + expect(() => { + new MongoClient('mongodb://localhost', { srvMaxHosts: 2 }); + }).to.throw(MongoParseError); + expect(() => { + new MongoClient('mongodb://localhost', { srvServiceName: 'abc' }); + }).to.throw(MongoParseError); + }); + + it('srvServiceName should error if it is too long', async () => { + let thrownError; + let options; + try { + options = parseOptions('mongodb+srv://localhost.a.com', { srvServiceName: 'a'.repeat(255) }); + await promisify(resolveSRVRecord)(options); + } catch (error) { + thrownError = error; + } + expect(thrownError).to.have.property('code', 'EBADNAME'); + }); + + it('srvServiceName should not error if it is greater than 15 characters as long as the DNS query limit is not surpassed', async () => { + let thrownError; + let options; + try { + options = parseOptions('mongodb+srv://localhost.a.com', { srvServiceName: 'a'.repeat(16) }); + await promisify(resolveSRVRecord)(options); + } catch (error) { + thrownError = error; + } + // Nothing wrong with the name, just DNE + expect(thrownError).to.have.property('code', 'ENOTFOUND'); + }); }); diff --git a/test/unit/polling_srv_records_for_mongos_discovery.prose.test.ts b/test/unit/polling_srv_records_for_mongos_discovery.prose.test.ts new file mode 100644 index 0000000000..5a2e7f3408 --- /dev/null +++ b/test/unit/polling_srv_records_for_mongos_discovery.prose.test.ts @@ -0,0 +1,267 @@ +import * as dns from 'dns'; +import * as sinon from 'sinon'; +import { expect } from 'chai'; +import { MongoClient } from '../../src'; +import { processTick } from '../tools/utils'; +import { it } from 'mocha'; +import * as mock from '../tools/mock'; + +/* + The SRV Prose Tests make use of the following REAL DNS records. + CURRENTLY, WE DO NOT USE THESE. We have stubbed the methods to build our own fake data for testing. + We use sinon to replace the results from resolveSrv to test hostname removals and insertions. + + The actual spec prose assumes you have a 4 node sharded cluster running on ports: + [27017, 27018, 27019, 27020] + + Record TTL Class Address + localhost.test.test.build.10gen.cc. 86400 IN A 127.0.0.1 + + Record TTL Class Port Target + _mongodb._tcp.test1.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.build.10gen.cc. + _mongodb._tcp.test1.test.build.10gen.cc. 86400 IN SRV 27018 localhost.test.build.10gen.cc. + _mongodb._tcp.test3.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.build.10gen.cc. + _customname._tcp.test22.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.build.10gen.cc. +*/ + +const srvRecord = (name, port) => ({ name, port, weight: 0, priority: 0 }); +interface ShardedClusterMocks { + mongoses: mock.MockServer[]; + readonly srvRecords: dns.SrvRecord[]; +} + +// TODO(): Make use of the shared driver's DNS records + +describe('Polling Srv Records for Mongos Discovery', () => { + const SRV_CONNECTION_STRING = 'mongodb+srv://test.mock.test.build.10gen.cc'; + let shardedCluster: ShardedClusterMocks; + let resolveSrvStub: sinon.SinonStub; + let lookupStub: sinon.SinonStub; + let client: MongoClient; + let clock: sinon.SinonFakeTimers; + const initialRecords = Object.freeze([ + { name: 'localhost.test.mock.test.build.10gen.cc', port: 2017 }, + { name: 'localhost.test.mock.test.build.10gen.cc', port: 2018 } + ]); + + beforeEach(() => { + clock = sinon.useFakeTimers(); + }); + + afterEach(() => { + if (clock) { + clock.restore(); + clock = undefined; + } + }); + + beforeEach(async () => { + const mongoses = [ + await mock.createServer(2017), + await mock.createServer(2018), + await mock.createServer(2019), + await mock.createServer(2020) + ]; + + const srvRecords = mongoses.map(s => + srvRecord('localhost.test.mock.test.build.10gen.cc', s.port) + ); + + shardedCluster = { mongoses, srvRecords }; + + for (const mongos of shardedCluster.mongoses) { + mongos.setMessageHandler(request => { + const document = request.document; + + if (document.ismaster || document.hello) { + request.reply({ ...mock.HELLO, msg: 'isdbgrid' }); + } + }); + } + }); + + afterEach(async () => { + await mock.cleanup(); + }); + + afterEach(async () => { + if (resolveSrvStub) { + resolveSrvStub.restore(); + resolveSrvStub = undefined; + } + if (lookupStub) { + lookupStub.restore(); + lookupStub = undefined; + } + if (client) { + await client.close(); + client = undefined; + } + }); + + function makeStubs({ + initialRecords = undefined, + replacementRecords = undefined, + srvServiceName = 'mongodb' + }) { + let initialDNSLookup = true; + const mockRecords = shardedCluster.srvRecords; + replacementRecords ??= mockRecords; + initialRecords ??= mockRecords; + // first call is for the driver initial connection + // second call will check the poller + resolveSrvStub = sinon.stub(dns, 'resolveSrv').callsFake((address, callback) => { + expect(address).to.equal(`_${srvServiceName}._tcp.test.mock.test.build.10gen.cc`); + if (initialDNSLookup) { + initialDNSLookup = false; + return process.nextTick(callback, null, initialRecords); + } + process.nextTick(callback, null, replacementRecords); + }); + + lookupStub = sinon.stub(dns, 'lookup').callsFake((...args) => { + const hostname = args[0]; + const options = typeof args[1] === 'object' ? args[1] : {}; + const callback = args[args.length - 1] as (err: null, address: string, family: 4) => void; + + if (hostname.includes('test.mock.test.build.10gen.cc')) { + return process.nextTick(callback, null, '127.0.0.1', 4); + } + + const { wrappedMethod: lookup } = lookupStub; + lookup(hostname, options, callback); + }); + } + + it('10 - All DNS records are selected (srvMaxHosts = 0)', async () => { + const replacementRecords = [ + { name: 'localhost.test.mock.test.build.10gen.cc', port: 2017, weight: 0, priority: 0 }, + { name: 'localhost.test.mock.test.build.10gen.cc', port: 2019, weight: 0, priority: 0 }, + { name: 'localhost.test.mock.test.build.10gen.cc', port: 2020, weight: 0, priority: 0 } + ]; + + makeStubs({ initialRecords, replacementRecords }); + + client = new MongoClient(SRV_CONNECTION_STRING, { + tls: false, // Need to turn off the automatic TLS turn on with SRV connection strings + srvMaxHosts: 0, + serverSelectionTimeoutMS: 5000 // This is just to make the test fail in a nice amount of time + }); + await client.connect(); + + const selectedHosts = client.topology.s.seedlist; + expect(selectedHosts).to.have.lengthOf(initialRecords.length); + expect(selectedHosts.map(({ host }) => host)).to.deep.equal( + initialRecords.map(({ name }) => name) + ); + + clock.tick(2 * client.topology.s.srvPoller.rescanSrvIntervalMS); + await processTick(); + + const polledServerAddresses = Array.from(client.topology.description.servers.keys()); + polledServerAddresses.sort(); + expect(polledServerAddresses).to.deep.equal( + replacementRecords.map(({ name, port }) => `${name}:${port}`) + ); + }); + + it('11 - All DNS records are selected (srvMaxHosts >= records)', async () => { + const replacementRecords = [ + { name: 'localhost.test.mock.test.build.10gen.cc', port: 2019, weight: 0, priority: 0 }, + { name: 'localhost.test.mock.test.build.10gen.cc', port: 2020, weight: 0, priority: 0 } + ]; + + makeStubs({ initialRecords, replacementRecords }); + + client = new MongoClient(SRV_CONNECTION_STRING, { + tls: false, + srvMaxHosts: 2, + serverSelectionTimeoutMS: 5000 + }); + await client.connect(); + + const selectedHosts = client.topology.s.seedlist; + expect(selectedHosts).to.have.lengthOf(2); + expect(selectedHosts.map(({ host }) => host)).to.deep.equal( + initialRecords.map(({ name }) => name) + ); + + clock.tick(2 * client.topology.s.srvPoller.rescanSrvIntervalMS); + await processTick(); + + const polledServerAddresses = Array.from(client.topology.description.servers.keys()); + polledServerAddresses.sort(); + expect(polledServerAddresses).to.deep.equal( + replacementRecords.map(({ name, port }) => `${name}:${port}`) + ); + }); + + it('12 - New DNS records are randomly selected (srvMaxHosts > 0)', async () => { + const replacementRecords = [ + { name: 'localhost.test.mock.test.build.10gen.cc', port: 2017, weight: 0, priority: 0 }, + { name: 'localhost.test.mock.test.build.10gen.cc', port: 2019, weight: 0, priority: 0 }, + { name: 'localhost.test.mock.test.build.10gen.cc', port: 2020, weight: 0, priority: 0 } + ]; + + makeStubs({ initialRecords, replacementRecords }); + + client = new MongoClient(SRV_CONNECTION_STRING, { + tls: false, + srvMaxHosts: 2, + serverSelectionTimeoutMS: 5000 + }); + await client.connect(); + + const selectedHosts = client.topology.s.seedlist; + expect(selectedHosts).to.have.lengthOf(2); + expect(selectedHosts.map(({ host }) => host)).to.deep.equal( + initialRecords.map(({ name }) => name) + ); + + clock.tick(2 * client.topology.s.srvPoller.rescanSrvIntervalMS); + await processTick(); + + const polledServerAddresses = Array.from(client.topology.description.servers.keys()); + polledServerAddresses.sort(); + // Only two addresses, one should remain the original 2017, + // while the other will be one of 2019 or 2020 + expect(polledServerAddresses).to.have.lengthOf(2); + expect(polledServerAddresses).to.include('localhost.test.mock.test.build.10gen.cc:2017'); + expect(polledServerAddresses).satisfies( + addresses => + // If you want proof, comment one of these conditions out, and run the test a few times + // you should see it pass and fail at random + addresses.includes('localhost.test.mock.test.build.10gen.cc:2019') || + addresses.includes('localhost.test.mock.test.build.10gen.cc:2020') + ); + }); + + it('13 - DNS record with custom service name can be found', async () => { + const replacementRecords = [ + { name: 'localhost.test.mock.test.build.10gen.cc', port: 2019, weight: 0, priority: 0 }, + { name: 'localhost.test.mock.test.build.10gen.cc', port: 2020, weight: 0, priority: 0 } + ]; + + makeStubs({ + initialRecords: [initialRecords[0]], + replacementRecords, + srvServiceName: 'myFancySrvServiceName' + }); + + client = new MongoClient(SRV_CONNECTION_STRING, { + tls: false, + srvServiceName: 'myFancySrvServiceName', + serverSelectionTimeoutMS: 5000 + }); + + await client.connect(); + + clock.tick(2 * client.topology.s.srvPoller.rescanSrvIntervalMS); + // No need to await process tick, since we're not checking DNS lookups + + const resolveSrvCalls = resolveSrvStub.getCalls(); + expect(resolveSrvCalls).to.have.lengthOf(2); + expect(resolveSrvCalls[0].args[0]).includes('myFancySrvServiceName'); + expect(resolveSrvCalls[1].args[0]).include('myFancySrvServiceName'); + }); +}); diff --git a/test/unit/utils.test.js b/test/unit/utils.test.js index bee1f80e13..15ba77841e 100644 --- a/test/unit/utils.test.js +++ b/test/unit/utils.test.js @@ -3,13 +3,15 @@ const { eachAsync, executeLegacyOperation, makeInterruptibleAsyncInterval, - BufferPool + BufferPool, + shuffle } = require('../../src/utils'); const { expect } = require('chai'); const sinon = require('sinon'); +const { MongoRuntimeError } = require('../../src/error'); -describe('utils', function () { - context('eachAsync', function () { +describe('driver utils', function () { + context('eachAsync()', function () { it('should callback with an error', function (done) { eachAsync( [{ error: false }, { error: true }], @@ -327,7 +329,7 @@ describe('utils', function () { }); }); - context('BufferPool', function () { + context('new BufferPool()', function () { it('should report the correct length', function () { const buffer = new BufferPool(); buffer.append(Buffer.from([0, 1])); @@ -414,7 +416,7 @@ describe('utils', function () { }); }); - context('executeLegacyOperation', function () { + context('executeLegacyOperation()', function () { it('should call callback with errors on throw errors, and rethrow error', function () { const expectedError = new Error('THIS IS AN ERROR'); let callbackError, caughtError; @@ -456,4 +458,88 @@ describe('utils', function () { }); }); }); + + describe('shuffle()', () => { + it('should support iterables', function () { + // Kind of an implicit test, we should not throw/crash here. + const input = new Set(['a', 'b', 'c']); + const output = shuffle(input); + expect(Array.isArray(output)).to.be.true; + }); + + it('should not mutate the original input', function () { + const input = Object.freeze(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']); + const output = shuffle(input); // This will throw if shuffle tries to edit the input + expect(output === input).to.be.false; + expect(output).to.not.deep.equal(input); + expect(output).to.have.lengthOf(input.length); + }); + + it(`should give a random subset of length equal to limit when limit is less than the input length`, function () { + const input = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']; + const output = shuffle(input, input.length - 1); + expect(output).to.not.deep.equal(input); + expect(output).to.have.lengthOf(input.length - 1); + }); + + it(`should give a random shuffling of the entire input when limit is equal to input length`, function () { + const input = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']; + const output = shuffle(input, input.length); + expect(output).to.not.deep.equal(input); + expect(output).to.have.lengthOf(input.length); + }); + + it(`should always return the same element when input is one item`, function () { + const input = ['a']; + for (let i = 0; i < 10; i++) { + const output = shuffle(input); + expect(output).to.deep.equal(input); + } + for (let i = 0; i < 10; i++) { + const output = shuffle(input, 1); // and with limit + expect(output).to.deep.equal(input); + } + }); + + it(`should return a random item on every call of limit 1`, function () { + const input = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']; + const outputs = new Set(); + for (let i = 0; i < 5; i++) { + const output = shuffle(input, 1); + expect(output).to.have.lengthOf(1); + outputs.add(output[0]); + } + // Of the 5 shuffles we got at least 2 unique random items, this is to avoid flakiness + expect(outputs.size).is.greaterThanOrEqual(2); + }); + + it('should give a random shuffling of the entire input when no limit provided', () => { + const input = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']; + const output = shuffle(input); + // Of course it is possible a shuffle returns exactly the same as the input + // but it is so improbable it is worth the flakiness in my opinion + expect(output).to.not.deep.equal(input); + expect(output).to.have.lengthOf(input.length); + }); + it('should give a random shuffling of the entire input when limit is explicitly set to 0', () => { + const input = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']; + const output = shuffle(input, 0); + expect(output).to.not.deep.equal(input); + expect(output).to.have.lengthOf(input.length); + }); + + it('should handle empty array if limit is unspecified or 0', function () { + expect(shuffle([])).to.deep.equal([]); + expect(shuffle([], 0)).to.deep.equal([]); + }); + + it('should throw if limit is greater than zero and empty array', function () { + expect(() => shuffle([], 2)).to.throw(MongoRuntimeError); + expect(() => shuffle([], 1)).to.throw(MongoRuntimeError); + }); + + it('should throw if limit is larger than input size', () => { + expect(() => shuffle(['a', 'b'], 3)).to.throw(MongoRuntimeError); + }); + }); });