Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fs: reduce memory retention when streaming small files #21968

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 18 additions & 1 deletion lib/internal/fs/streams.js
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,18 @@ const util = require('util');
const kMinPoolSpace = 128;

let pool;
// It can happen that we expect to read a large chunk of data, and reserve
// a large chunk of the pool accordingly, but the read() call only filled
// a portion of it. If a concurrently executing read() then uses the same pool,
// the "reserved" portion cannot be used, so we allow it to be re-used as a
// new pool later.
const poolFragments = [];

function allocNewPool(poolSize) {
pool = Buffer.allocUnsafe(poolSize);
if (poolFragments.length > 0)
pool = poolFragments.pop();
else
pool = Buffer.allocUnsafe(poolSize);
pool.used = 0;
}

Expand Down Expand Up @@ -171,6 +180,14 @@ ReadStream.prototype._read = function(n) {
this.emit('error', er);
} else {
let b = null;
// Now that we know how much data we have actually read, re-wind the
// 'used' field if we can, and otherwise allow the remainder of our
// reservation to be used as a new pool later.
if (start + toRead === thisPool.used && thisPool === pool)
thisPool.used += bytesRead - toRead;
else if (toRead - bytesRead > kMinPoolSpace)
poolFragments.push(thisPool.slice(start + bytesRead, start + toRead));

if (bytesRead > 0) {
this.bytesRead += bytesRead;
b = thisPool.slice(start, start + bytesRead);
Expand Down
47 changes: 47 additions & 0 deletions test/parallel/test-fs-read-stream-concurrent-reads.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
'use strict';
const common = require('../common');
const fixtures = require('../common/fixtures');
const assert = require('assert');
const fs = require('fs');

// Test that concurrent file read streams don’t interfere with each other’s
// contents, and that the chunks generated by the reads only retain a
// 'reasonable' amount of memory.

// Refs: https://github.com/nodejs/node/issues/21967

const filename = fixtures.path('loop.js'); // Some small non-homogeneous file.
const content = fs.readFileSync(filename);

const N = 1000;
let started = 0;
let done = 0;

const arrayBuffers = new Set();

function startRead() {
++started;
const chunks = [];
fs.createReadStream(filename)
.on('data', (chunk) => {
chunks.push(chunk);
arrayBuffers.add(chunk.buffer);
if (started < N)
startRead();
})
.on('end', common.mustCall(() => {
assert.deepStrictEqual(Buffer.concat(chunks), content);
if (++done === N) {
const retainedMemory =
[...arrayBuffers].map((ab) => ab.byteLength).reduce((a, b) => a + b);
assert(retainedMemory / (N * content.length) <= 3,
`Retaining ${retainedMemory} bytes in ABs for ${N} ` +
`chunks of size ${content.length}`);
}
}));
}

// Don’t start the reads all at once – that way we would have to allocate
// a large amount of memory upfront.
for (let i = 0; i < 4; ++i)
startRead();