Skip to content

Commit

Permalink
fs: improve fsPromises readFile performance
Browse files Browse the repository at this point in the history
Improve the fsPromises readFile performance
by allocating only one buffer, when size is known,
increase the size of the readbuffer chunks,
and dont read more data if size bytes have been read

Refs: nodejs#37583
PR-URL: nodejs#37608
Reviewed-By: Benjamin Gruenbaum <benjamingr@gmail.com>
Reviewed-By: James M Snell <jasnell@gmail.com>
Reviewed-By: Anna Henningsen <anna@addaleax.net>
  • Loading branch information
Linkgoron authored and MoritzLoewenstein committed Sep 1, 2021
1 parent 07d5dd2 commit 2ffa9c5
Show file tree
Hide file tree
Showing 2 changed files with 44 additions and 16 deletions.
45 changes: 34 additions & 11 deletions lib/internal/fs/promises.js
Expand Up @@ -3,6 +3,7 @@
const kWriteFileMaxChunkSize = 2 ** 14;

const {
ArrayPrototypePush,
Error,
MathMax,
MathMin,
Expand Down Expand Up @@ -292,24 +293,46 @@ async function readFileHandle(filehandle, options) {
if (size > kIoMaxLength)
throw new ERR_FS_FILE_TOO_LARGE(size);

const chunks = [];
const chunkSize = size === 0 ?
kReadFileMaxChunkSize :
MathMin(size, kReadFileMaxChunkSize);
let endOfFile = false;
let totalRead = 0;
const noSize = size === 0;
const buffers = [];
const fullBuffer = noSize ? undefined : Buffer.allocUnsafeSlow(size);
do {
if (signal && signal.aborted) {
throw lazyDOMException('The operation was aborted', 'AbortError');
}
const buf = Buffer.alloc(chunkSize);
const { bytesRead, buffer } =
await read(filehandle, buf, 0, chunkSize, -1);
endOfFile = bytesRead === 0;
if (bytesRead > 0)
chunks.push(buffer.slice(0, bytesRead));
let buffer;
let offset;
let length;
if (noSize) {
buffer = Buffer.allocUnsafeSlow(kReadFileUnknownBufferLength);
offset = 0;
length = kReadFileUnknownBufferLength;
} else {
buffer = fullBuffer;
offset = totalRead;
length = MathMin(size - totalRead, kReadFileBufferLength);
}

const bytesRead = (await binding.read(filehandle.fd, buffer, offset,
length, -1, kUsePromises)) || 0;
totalRead += bytesRead;
endOfFile = bytesRead === 0 || totalRead === size;
if (noSize && bytesRead > 0) {
const isBufferFull = bytesRead === kReadFileUnknownBufferLength;
const chunkBuffer = isBufferFull ? buffer : buffer.slice(0, bytesRead);
ArrayPrototypePush(buffers, chunkBuffer);
}
} while (!endOfFile);

const result = chunks.length === 1 ? chunks[0] : Buffer.concat(chunks);
let result;
if (size > 0) {
result = totalRead === size ? fullBuffer : fullBuffer.slice(0, totalRead);
} else {
result = buffers.length === 1 ? buffers[0] : Buffer.concat(buffers,
totalRead);
}

return options.encoding ? result.toString(options.encoding) : result;
}
Expand Down
15 changes: 10 additions & 5 deletions test/parallel/test-fs-promises-file-handle-readFile.js
Expand Up @@ -11,7 +11,7 @@ const {
open,
readFile,
writeFile,
truncate
truncate,
} = fs.promises;
const path = require('path');
const tmpdir = require('../common/tmpdir');
Expand Down Expand Up @@ -65,6 +65,7 @@ async function doReadAndCancel() {
await assert.rejects(readFile(fileHandle, { signal }), {
name: 'AbortError'
});
await fileHandle.close();
}

// Signal aborted on first tick
Expand All @@ -75,10 +76,11 @@ async function doReadAndCancel() {
fs.writeFileSync(filePathForHandle, buffer);
const controller = new AbortController();
const { signal } = controller;
tick(1, () => controller.abort());
process.nextTick(() => controller.abort());
await assert.rejects(readFile(fileHandle, { signal }), {
name: 'AbortError'
});
}, 'tick-0');
await fileHandle.close();
}

// Signal aborted right before buffer read
Expand All @@ -91,10 +93,12 @@ async function doReadAndCancel() {

const controller = new AbortController();
const { signal } = controller;
tick(2, () => controller.abort());
tick(1, () => controller.abort());
await assert.rejects(fileHandle.readFile({ signal, encoding: 'utf8' }), {
name: 'AbortError'
});
}, 'tick-1');

await fileHandle.close();
}

// Validate file size is within range for reading
Expand All @@ -112,6 +116,7 @@ async function doReadAndCancel() {
name: 'RangeError',
code: 'ERR_FS_FILE_TOO_LARGE'
});
await fileHandle.close();
}
}

Expand Down

0 comments on commit 2ffa9c5

Please sign in to comment.