Skip to content

Commit

Permalink
stream: pipeline should only destroy un-finished streams
Browse files Browse the repository at this point in the history
This PR logically reverts #31940
which has caused lots of unnecessary breakage in the ecosystem.

This PR also aligns better with the actual documented behavior:

`stream.pipeline()` will call `stream.destroy(err)` on all streams
except:
  * `Readable` streams which have emitted `'end'` or `'close'`.
  * `Writable` streams which have emitted `'finish'` or `'close'`.

The behavior introduced in #31940
was much more aggressive in terms of destroying streams. This was
good for avoiding potential resources leaks however breaks some
common assumputions in legacy streams.

Furthermore, it makes the code simpler and removes some hacks.

Fixes: #32954
Fixes: #32955

PR-URL: #32968
Reviewed-By: Matteo Collina <matteo.collina@gmail.com>
Reviewed-By: Mathias Buus <mathiasbuus@gmail.com>
  • Loading branch information
ronag authored and BethGriggs committed Apr 27, 2020
1 parent 4bf7fc2 commit eb801d0
Show file tree
Hide file tree
Showing 2 changed files with 112 additions and 39 deletions.
56 changes: 18 additions & 38 deletions lib/internal/streams/pipeline.js
Expand Up @@ -25,43 +25,18 @@ let EE;
let PassThrough;
let createReadableStreamAsyncIterator;

function isIncoming(stream) {
return (
stream.socket &&
typeof stream.complete === 'boolean' &&
ArrayIsArray(stream.rawTrailers) &&
ArrayIsArray(stream.rawHeaders)
);
}

function isOutgoing(stream) {
return (
stream.socket &&
typeof stream.setHeader === 'function'
);
}

function destroyer(stream, reading, writing, final, callback) {
const _destroy = once((err) => {
if (!err && (isIncoming(stream) || isOutgoing(stream))) {
// http/1 request objects have a coupling to their response and should
// not be prematurely destroyed. Assume they will handle their own
// lifecycle.
return callback();
}
function destroyer(stream, reading, writing, callback) {
callback = once(callback);

if (!err && reading && !writing && stream.writable) {
return callback();
}

if (err || !final || !stream.readable) {
destroyImpl.destroyer(stream, err);
}
callback(err);
let finished = false;
stream.on('close', () => {
finished = true;
});

if (eos === undefined) eos = require('internal/streams/end-of-stream');
eos(stream, { readable: reading, writable: writing }, (err) => {
finished = !err;

const rState = stream._readableState;
if (
err &&
Expand All @@ -78,14 +53,19 @@ function destroyer(stream, reading, writing, final, callback) {
// eos will only fail with premature close on the reading side for
// duplex streams.
stream
.once('end', _destroy)
.once('error', _destroy);
.once('end', callback)
.once('error', callback);
} else {
_destroy(err);
callback(err);
}
});

return (err) => _destroy(err || new ERR_STREAM_DESTROYED('pipe'));
return (err) => {
if (finished) return;
finished = true;
destroyImpl.destroyer(stream, err);
callback(err || new ERR_STREAM_DESTROYED('pipe'));
};
}

function popCallback(streams) {
Expand Down Expand Up @@ -204,7 +184,7 @@ function pipeline(...streams) {

if (isStream(stream)) {
finishCount++;
destroys.push(destroyer(stream, reading, writing, !reading, finish));
destroys.push(destroyer(stream, reading, writing, finish));
}

if (i === 0) {
Expand Down Expand Up @@ -262,7 +242,7 @@ function pipeline(...streams) {
ret = pt;

finishCount++;
destroys.push(destroyer(ret, false, true, true, finish));
destroys.push(destroyer(ret, false, true, finish));
}
} else if (isStream(stream)) {
if (isReadable(ret)) {
Expand Down
95 changes: 94 additions & 1 deletion test/parallel/test-stream-pipeline.js
Expand Up @@ -13,6 +13,7 @@ const {
const assert = require('assert');
const http = require('http');
const { promisify } = require('util');
const net = require('net');

{
let finished = false;
Expand Down Expand Up @@ -916,7 +917,7 @@ const { promisify } = require('util');
const src = new PassThrough({ autoDestroy: false });
const dst = new PassThrough({ autoDestroy: false });
pipeline(src, dst, common.mustCall(() => {
assert.strictEqual(src.destroyed, true);
assert.strictEqual(src.destroyed, false);
assert.strictEqual(dst.destroyed, false);
}));
src.end();
Expand Down Expand Up @@ -1118,3 +1119,95 @@ const { promisify } = require('util');
assert.strictEqual(closed, true);
}));
}

{
const server = net.createServer(common.mustCall((socket) => {
// echo server
pipeline(socket, socket, common.mustCall());
// 13 force destroys the socket before it has a chance to emit finish
socket.on('finish', common.mustCall(() => {
server.close();
}));
})).listen(0, common.mustCall(() => {
const socket = net.connect(server.address().port);
socket.end();
}));
}

{
const d = new Duplex({
autoDestroy: false,
write: common.mustCall((data, enc, cb) => {
d.push(data);
cb();
}),
read: common.mustCall(() => {
d.push(null);
}),
final: common.mustCall((cb) => {
setTimeout(() => {
assert.strictEqual(d.destroyed, false);
cb();
}, 1000);
}),
destroy: common.mustNotCall()
});

const sink = new Writable({
write: common.mustCall((data, enc, cb) => {
cb();
})
});

pipeline(d, sink, common.mustCall());

d.write('test');
d.end();
}

{
const server = net.createServer(common.mustCall((socket) => {
// echo server
pipeline(socket, socket, common.mustCall());
socket.on('finish', common.mustCall(() => {
server.close();
}));
})).listen(0, common.mustCall(() => {
const socket = net.connect(server.address().port);
socket.end();
}));
}

{
const d = new Duplex({
autoDestroy: false,
write: common.mustCall((data, enc, cb) => {
d.push(data);
cb();
}),
read: common.mustCall(() => {
d.push(null);
}),
final: common.mustCall((cb) => {
setTimeout(() => {
assert.strictEqual(d.destroyed, false);
cb();
}, 1000);
}),
// `destroy()` won't be invoked by pipeline since
// the writable side has not completed when
// the pipeline has completed.
destroy: common.mustNotCall()
});

const sink = new Writable({
write: common.mustCall((data, enc, cb) => {
cb();
})
});

pipeline(d, sink, common.mustCall());

d.write('test');
d.end();
}

0 comments on commit eb801d0

Please sign in to comment.