From 07d5dd245345670412738b740edb46db188b486f Mon Sep 17 00:00:00 2001 From: Darshan Sen Date: Sat, 3 Apr 2021 21:14:32 +0530 Subject: [PATCH 1/2] fs: move constants to internal/fs/utils.js Refs: https://github.com/nodejs/node/pull/38004#discussion_r604951958 PR-URL: https://github.com/nodejs/node/pull/38061 Reviewed-By: Antoine du Hamel Reviewed-By: Nitzan Uziely Reviewed-By: James M Snell --- lib/fs.js | 10 ++++------ lib/internal/fs/promises.js | 16 ++++++---------- lib/internal/fs/read_file_context.js | 16 +++++++--------- lib/internal/fs/utils.js | 21 +++++++++++++++++++++ 4 files changed, 38 insertions(+), 25 deletions(-) diff --git a/lib/fs.js b/lib/fs.js index 935d6a1aed96b3..06a398e5c29d8c 100644 --- a/lib/fs.js +++ b/lib/fs.js @@ -24,10 +24,6 @@ 'use strict'; -// Most platforms don't allow reads or writes >= 2 GB. -// See https://github.com/libuv/libuv/pull/1501. -const kIoMaxLength = 2 ** 31 - 1; - // When using FSReqCallback, make sure to create the object only *after* all // parameter validation has happened, so that the objects are not kept in memory // in case they are created but never used due to an exception. @@ -79,6 +75,10 @@ const { FSReqCallback, statValues } = binding; const { toPathIfFileURL } = require('internal/url'); const internalUtil = require('internal/util'); const { + constants: { + kIoMaxLength, + kMaxUserId, + }, copyObject, Dirent, getDirents, @@ -121,8 +121,6 @@ const { validateInteger, validateInt32 } = require('internal/validators'); -// 2 ** 32 - 1 -const kMaxUserId = 4294967295; let truncateWarn = true; let fs; diff --git a/lib/internal/fs/promises.js b/lib/internal/fs/promises.js index 39418446bd9c15..733d3f2c99285d 100644 --- a/lib/internal/fs/promises.js +++ b/lib/internal/fs/promises.js @@ -1,17 +1,7 @@ 'use strict'; -// Most platforms don't allow reads or writes >= 2 GB. -// See https://github.com/libuv/libuv/pull/1501. -const kIoMaxLength = 2 ** 31 - 1; - -// Note: This is different from kReadFileBufferLength used for non-promisified -// fs.readFile. -const kReadFileMaxChunkSize = 2 ** 14; const kWriteFileMaxChunkSize = 2 ** 14; -// 2 ** 32 - 1 -const kMaxUserId = 4294967295; - const { Error, MathMax, @@ -44,6 +34,12 @@ const { const { isArrayBufferView } = require('internal/util/types'); const { rimrafPromises } = require('internal/fs/rimraf'); const { + constants: { + kIoMaxLength, + kMaxUserId, + kReadFileBufferLength, + kReadFileUnknownBufferLength, + }, copyObject, getDirents, getOptions, diff --git a/lib/internal/fs/read_file_context.js b/lib/internal/fs/read_file_context.js index faca0e0c331e39..4976a7a1deb8d8 100644 --- a/lib/internal/fs/read_file_context.js +++ b/lib/internal/fs/read_file_context.js @@ -4,6 +4,13 @@ const { MathMin, } = primordials; +const { + constants: { + kReadFileBufferLength, + kReadFileUnknownBufferLength, + } +} = require('internal/fs/utils'); + const { Buffer } = require('buffer'); const { FSReqCallback, close, read } = internalBinding('fs'); @@ -18,15 +25,6 @@ const lazyDOMException = hideStackFrames((message, name) => { return new DOMException(message, name); }); -// Use 64kb in case the file type is not a regular file and thus do not know the -// actual file size. Increasing the value further results in more frequent over -// allocation for small files and consumes CPU time and memory that should be -// used else wise. -// Use up to 512kb per read otherwise to partition reading big files to prevent -// blocking other threads in case the available threads are all in use. -const kReadFileUnknownBufferLength = 64 * 1024; -const kReadFileBufferLength = 512 * 1024; - function readFileAfterRead(err, bytesRead) { const context = this.context; diff --git a/lib/internal/fs/utils.js b/lib/internal/fs/utils.js index d3f4c621e4dd56..1d4edc0e011e4c 100644 --- a/lib/internal/fs/utils.js +++ b/lib/internal/fs/utils.js @@ -113,6 +113,21 @@ const kMaximumCopyMode = COPYFILE_EXCL | COPYFILE_FICLONE | COPYFILE_FICLONE_FORCE; +// Most platforms don't allow reads or writes >= 2 GB. +// See https://github.com/libuv/libuv/pull/1501. +const kIoMaxLength = 2 ** 31 - 1; + +// Use 64kb in case the file type is not a regular file and thus do not know the +// actual file size. Increasing the value further results in more frequent over +// allocation for small files and consumes CPU time and memory that should be +// used else wise. +// Use up to 512kb per read otherwise to partition reading big files to prevent +// blocking other threads in case the available threads are all in use. +const kReadFileUnknownBufferLength = 64 * 1024; +const kReadFileBufferLength = 512 * 1024; + +const kMaxUserId = 2 ** 32 - 1; + const isWindows = process.platform === 'win32'; let fs; @@ -815,6 +830,12 @@ const validatePosition = hideStackFrames((position, name) => { }); module.exports = { + constants: { + kIoMaxLength, + kMaxUserId, + kReadFileBufferLength, + kReadFileUnknownBufferLength, + }, assertEncoding, BigIntStats, // for testing copyObject, From 2ffa9c59371b408b52c930a0ad59df43c146809e Mon Sep 17 00:00:00 2001 From: Nitzan Uziely Date: Fri, 5 Mar 2021 13:27:56 +0200 Subject: [PATCH 2/2] fs: improve fsPromises readFile performance Improve the fsPromises readFile performance by allocating only one buffer, when size is known, increase the size of the readbuffer chunks, and dont read more data if size bytes have been read Refs: https://github.com/nodejs/node/issues/37583 PR-URL: https://github.com/nodejs/node/pull/37608 Reviewed-By: Benjamin Gruenbaum Reviewed-By: James M Snell Reviewed-By: Anna Henningsen --- lib/internal/fs/promises.js | 45 ++++++++++++++----- .../test-fs-promises-file-handle-readFile.js | 15 ++++--- 2 files changed, 44 insertions(+), 16 deletions(-) diff --git a/lib/internal/fs/promises.js b/lib/internal/fs/promises.js index 733d3f2c99285d..a3de33c45c52d0 100644 --- a/lib/internal/fs/promises.js +++ b/lib/internal/fs/promises.js @@ -3,6 +3,7 @@ const kWriteFileMaxChunkSize = 2 ** 14; const { + ArrayPrototypePush, Error, MathMax, MathMin, @@ -292,24 +293,46 @@ async function readFileHandle(filehandle, options) { if (size > kIoMaxLength) throw new ERR_FS_FILE_TOO_LARGE(size); - const chunks = []; - const chunkSize = size === 0 ? - kReadFileMaxChunkSize : - MathMin(size, kReadFileMaxChunkSize); let endOfFile = false; + let totalRead = 0; + const noSize = size === 0; + const buffers = []; + const fullBuffer = noSize ? undefined : Buffer.allocUnsafeSlow(size); do { if (signal && signal.aborted) { throw lazyDOMException('The operation was aborted', 'AbortError'); } - const buf = Buffer.alloc(chunkSize); - const { bytesRead, buffer } = - await read(filehandle, buf, 0, chunkSize, -1); - endOfFile = bytesRead === 0; - if (bytesRead > 0) - chunks.push(buffer.slice(0, bytesRead)); + let buffer; + let offset; + let length; + if (noSize) { + buffer = Buffer.allocUnsafeSlow(kReadFileUnknownBufferLength); + offset = 0; + length = kReadFileUnknownBufferLength; + } else { + buffer = fullBuffer; + offset = totalRead; + length = MathMin(size - totalRead, kReadFileBufferLength); + } + + const bytesRead = (await binding.read(filehandle.fd, buffer, offset, + length, -1, kUsePromises)) || 0; + totalRead += bytesRead; + endOfFile = bytesRead === 0 || totalRead === size; + if (noSize && bytesRead > 0) { + const isBufferFull = bytesRead === kReadFileUnknownBufferLength; + const chunkBuffer = isBufferFull ? buffer : buffer.slice(0, bytesRead); + ArrayPrototypePush(buffers, chunkBuffer); + } } while (!endOfFile); - const result = chunks.length === 1 ? chunks[0] : Buffer.concat(chunks); + let result; + if (size > 0) { + result = totalRead === size ? fullBuffer : fullBuffer.slice(0, totalRead); + } else { + result = buffers.length === 1 ? buffers[0] : Buffer.concat(buffers, + totalRead); + } return options.encoding ? result.toString(options.encoding) : result; } diff --git a/test/parallel/test-fs-promises-file-handle-readFile.js b/test/parallel/test-fs-promises-file-handle-readFile.js index 367391244eace9..3a178f7f37ff25 100644 --- a/test/parallel/test-fs-promises-file-handle-readFile.js +++ b/test/parallel/test-fs-promises-file-handle-readFile.js @@ -11,7 +11,7 @@ const { open, readFile, writeFile, - truncate + truncate, } = fs.promises; const path = require('path'); const tmpdir = require('../common/tmpdir'); @@ -65,6 +65,7 @@ async function doReadAndCancel() { await assert.rejects(readFile(fileHandle, { signal }), { name: 'AbortError' }); + await fileHandle.close(); } // Signal aborted on first tick @@ -75,10 +76,11 @@ async function doReadAndCancel() { fs.writeFileSync(filePathForHandle, buffer); const controller = new AbortController(); const { signal } = controller; - tick(1, () => controller.abort()); + process.nextTick(() => controller.abort()); await assert.rejects(readFile(fileHandle, { signal }), { name: 'AbortError' - }); + }, 'tick-0'); + await fileHandle.close(); } // Signal aborted right before buffer read @@ -91,10 +93,12 @@ async function doReadAndCancel() { const controller = new AbortController(); const { signal } = controller; - tick(2, () => controller.abort()); + tick(1, () => controller.abort()); await assert.rejects(fileHandle.readFile({ signal, encoding: 'utf8' }), { name: 'AbortError' - }); + }, 'tick-1'); + + await fileHandle.close(); } // Validate file size is within range for reading @@ -112,6 +116,7 @@ async function doReadAndCancel() { name: 'RangeError', code: 'ERR_FS_FILE_TOO_LARGE' }); + await fileHandle.close(); } }