diff --git a/lib/internal/fs/promises.js b/lib/internal/fs/promises.js index 39418446bd9c15..696c904be35c7f 100644 --- a/lib/internal/fs/promises.js +++ b/lib/internal/fs/promises.js @@ -1,18 +1,7 @@ 'use strict'; -// Most platforms don't allow reads or writes >= 2 GB. -// See https://github.com/libuv/libuv/pull/1501. -const kIoMaxLength = 2 ** 31 - 1; - -// Note: This is different from kReadFileBufferLength used for non-promisified -// fs.readFile. -const kReadFileMaxChunkSize = 2 ** 14; -const kWriteFileMaxChunkSize = 2 ** 14; - -// 2 ** 32 - 1 -const kMaxUserId = 4294967295; - const { + ArrayPrototypePush, Error, MathMax, MathMin, @@ -44,6 +33,13 @@ const { const { isArrayBufferView } = require('internal/util/types'); const { rimrafPromises } = require('internal/fs/rimraf'); const { + constants: { + kIoMaxLength, + kMaxUserId, + kReadFileBufferLength, + kReadFileUnknownBufferLength, + kWriteFileMaxChunkSize, + }, copyObject, getDirents, getOptions, @@ -296,24 +292,46 @@ async function readFileHandle(filehandle, options) { if (size > kIoMaxLength) throw new ERR_FS_FILE_TOO_LARGE(size); - const chunks = []; - const chunkSize = size === 0 ? - kReadFileMaxChunkSize : - MathMin(size, kReadFileMaxChunkSize); let endOfFile = false; + let totalRead = 0; + const noSize = size === 0; + const buffers = []; + const fullBuffer = noSize ? undefined : Buffer.allocUnsafeSlow(size); do { if (signal && signal.aborted) { throw lazyDOMException('The operation was aborted', 'AbortError'); } - const buf = Buffer.alloc(chunkSize); - const { bytesRead, buffer } = - await read(filehandle, buf, 0, chunkSize, -1); - endOfFile = bytesRead === 0; - if (bytesRead > 0) - chunks.push(buffer.slice(0, bytesRead)); + let buffer; + let offset; + let length; + if (noSize) { + buffer = Buffer.allocUnsafeSlow(kReadFileUnknownBufferLength); + offset = 0; + length = kReadFileUnknownBufferLength; + } else { + buffer = fullBuffer; + offset = totalRead; + length = MathMin(size - totalRead, kReadFileBufferLength); + } + + const bytesRead = (await binding.read(filehandle.fd, buffer, offset, + length, -1, kUsePromises)) || 0; + totalRead += bytesRead; + endOfFile = bytesRead === 0 || totalRead === size; + if (noSize && bytesRead > 0) { + const isBufferFull = bytesRead === kReadFileUnknownBufferLength; + const chunkBuffer = isBufferFull ? buffer : buffer.slice(0, bytesRead); + ArrayPrototypePush(buffers, chunkBuffer); + } } while (!endOfFile); - const result = chunks.length === 1 ? chunks[0] : Buffer.concat(chunks); + let result; + if (size > 0) { + result = totalRead === size ? fullBuffer : fullBuffer.slice(0, totalRead); + } else { + result = buffers.length === 1 ? buffers[0] : Buffer.concat(buffers, + totalRead); + } return options.encoding ? result.toString(options.encoding) : result; } diff --git a/lib/internal/fs/utils.js b/lib/internal/fs/utils.js index d3f4c621e4dd56..6af79033f452b6 100644 --- a/lib/internal/fs/utils.js +++ b/lib/internal/fs/utils.js @@ -113,6 +113,23 @@ const kMaximumCopyMode = COPYFILE_EXCL | COPYFILE_FICLONE | COPYFILE_FICLONE_FORCE; +// Most platforms don't allow reads or writes >= 2 GB. +// See https://github.com/libuv/libuv/pull/1501. +const kIoMaxLength = 2 ** 31 - 1; + +// Use 64kb in case the file type is not a regular file and thus do not know the +// actual file size. Increasing the value further results in more frequent over +// allocation for small files and consumes CPU time and memory that should be +// used else wise. +// Use up to 512kb per read otherwise to partition reading big files to prevent +// blocking other threads in case the available threads are all in use. +const kReadFileUnknownBufferLength = 64 * 1024; +const kReadFileBufferLength = 512 * 1024; + +const kWriteFileMaxChunkSize = 512 * 1024; + +const kMaxUserId = 2 ** 32 - 1; + const isWindows = process.platform === 'win32'; let fs; @@ -815,6 +832,13 @@ const validatePosition = hideStackFrames((position, name) => { }); module.exports = { + constants: { + kIoMaxLength, + kMaxUserId, + kReadFileBufferLength, + kReadFileUnknownBufferLength, + kWriteFileMaxChunkSize, + }, assertEncoding, BigIntStats, // for testing copyObject,