0
0
mirror of https://github.com/nodejs/node.git synced 2024-11-30 07:27:22 +01:00
nodejs/lib/_stream_readable.js

916 lines
25 KiB
JavaScript
Raw Normal View History

'use strict';
2012-10-03 00:44:50 +02:00
module.exports = Readable;
Readable.ReadableState = ReadableState;
2012-10-03 00:44:50 +02:00
const EE = require('events');
const Stream = require('stream');
const Buffer = require('buffer').Buffer;
const util = require('util');
const debug = util.debuglog('stream');
var StringDecoder;
2012-10-03 00:44:50 +02:00
util.inherits(Readable, Stream);
2012-10-31 22:30:30 +01:00
function ReadableState(options, stream) {
2012-10-03 00:44:50 +02:00
options = options || {};
// object stream flag. Used to make read(n) ignore n and to
// make all the buffer merging and length checks go away
this.objectMode = !!options.objectMode;
if (stream instanceof Stream.Duplex)
this.objectMode = this.objectMode || !!options.readableObjectMode;
// the point at which it stops calling _read() to fill the buffer
// Note: 0 is a valid value, means "don't call _read preemptively ever"
var hwm = options.highWaterMark;
var defaultHwm = this.objectMode ? 16 : 16 * 1024;
this.highWaterMark = (hwm || hwm === 0) ? hwm : defaultHwm;
// cast to ints.
this.highWaterMark = ~~this.highWaterMark;
2012-10-03 00:44:50 +02:00
this.buffer = [];
this.length = 0;
this.pipes = null;
this.pipesCount = 0;
this.flowing = null;
2012-10-03 00:44:50 +02:00
this.ended = false;
this.endEmitted = false;
2012-10-03 00:44:50 +02:00
this.reading = false;
// a flag to be able to tell if the onwrite cb is called immediately,
// or on a later tick. We set this to true at first, because any
// actions that shouldn't happen until "later" should generally also
// not happen before the first write call.
this.sync = true;
2012-10-03 00:44:50 +02:00
// whenever we return null, then we set a flag to say
// that we're awaiting a 'readable' event emission.
this.needReadable = false;
this.emittedReadable = false;
this.readableListening = false;
this.resumeScheduled = false;
// Crypto is kind of old and crusty. Historically, its default string
// encoding is 'binary' so we have to make this configurable.
// Everything else in the universe uses 'utf8', though.
this.defaultEncoding = options.defaultEncoding || 'utf8';
// when piping, we only care about 'readable' events that happen
// after read()ing all the bytes and not getting any pushback.
this.ranOut = false;
// the number of writers that are awaiting a drain event in .pipe()s
this.awaitDrain = 0;
// if true, a maybeReadMore has been scheduled
this.readingMore = false;
this.decoder = null;
this.encoding = null;
if (options.encoding) {
if (!StringDecoder)
StringDecoder = require('string_decoder').StringDecoder;
this.decoder = new StringDecoder(options.encoding);
this.encoding = options.encoding;
}
2012-10-03 00:44:50 +02:00
}
function Readable(options) {
if (!(this instanceof Readable))
return new Readable(options);
2012-10-03 00:44:50 +02:00
this._readableState = new ReadableState(options, this);
// legacy
this.readable = true;
if (options && typeof options.read === 'function')
this._read = options.read;
Stream.call(this);
2012-10-03 00:44:50 +02:00
}
// Manually shove something into the read() buffer.
// This returns true if the highWaterMark has not been hit yet,
// similar to how Writable.write() returns true if you should
// write() some more.
Readable.prototype.push = function(chunk, encoding) {
var state = this._readableState;
if (!state.objectMode && typeof chunk === 'string') {
encoding = encoding || state.defaultEncoding;
if (encoding !== state.encoding) {
chunk = Buffer.from(chunk, encoding);
encoding = '';
}
}
return readableAddChunk(this, state, chunk, encoding, false);
};
// Unshift should *always* be something directly out of read()
2013-02-28 04:32:19 +01:00
Readable.prototype.unshift = function(chunk) {
var state = this._readableState;
return readableAddChunk(this, state, chunk, '', true);
2013-02-28 04:32:19 +01:00
};
Readable.prototype.isPaused = function() {
return this._readableState.flowing === false;
};
function readableAddChunk(stream, state, chunk, encoding, addToFront) {
var er = chunkInvalid(state, chunk);
if (er) {
stream.emit('error', er);
} else if (chunk === null) {
stream: Fix unshift() race conditions Fix #5272 The consumption of a readable stream is a dance with 3 partners. 1. The specific stream Author (A) 2. The Stream Base class (B), and 3. The Consumer of the stream (C) When B calls the _read() method that A implements, it sets a 'reading' flag, so that parallel calls to _read() can be avoided. When A calls stream.push(), B knows that it's safe to start calling _read() again. If the consumer C is some kind of parser that wants in some cases to pass the source stream off to some other party, but not before "putting back" some bit of previously consumed data (as in the case of Node's websocket http upgrade implementation). So, stream.unshift() will generally *never* be called by A, but *only* called by C. Prior to this patch, stream.unshift() *also* unset the state.reading flag, meaning that C could indicate the end of a read, and B would dutifully fire off another _read() call to A. This is inappropriate. In the case of fs streams, and other variably-laggy streams that don't tolerate overlapped _read() calls, this causes big problems. Also, calling stream.shift() after the 'end' event did not raise any kind of error, but would cause very strange behavior indeed. Calling it after the EOF chunk was seen, but before the 'end' event was fired would also cause weird behavior, and could lead to data being lost, since it would not emit another 'readable' event. This change makes it so that: 1. stream.unshift() does *not* set state.reading = false 2. stream.unshift() is allowed up until the 'end' event. 3. unshifting onto a EOF-encountered and zero-length (but not yet end-emitted) stream will defer the 'end' event until the new data is consumed. 4. pushing onto a EOF-encountered stream is now an error. So, if you read(), you have that single tick to safely unshift() data back into the stream, even if the null chunk was pushed, and the length was 0.
2013-04-12 00:01:26 +02:00
state.reading = false;
onEofChunk(stream, state);
} else if (state.objectMode || chunk && chunk.length > 0) {
stream: Fix unshift() race conditions Fix #5272 The consumption of a readable stream is a dance with 3 partners. 1. The specific stream Author (A) 2. The Stream Base class (B), and 3. The Consumer of the stream (C) When B calls the _read() method that A implements, it sets a 'reading' flag, so that parallel calls to _read() can be avoided. When A calls stream.push(), B knows that it's safe to start calling _read() again. If the consumer C is some kind of parser that wants in some cases to pass the source stream off to some other party, but not before "putting back" some bit of previously consumed data (as in the case of Node's websocket http upgrade implementation). So, stream.unshift() will generally *never* be called by A, but *only* called by C. Prior to this patch, stream.unshift() *also* unset the state.reading flag, meaning that C could indicate the end of a read, and B would dutifully fire off another _read() call to A. This is inappropriate. In the case of fs streams, and other variably-laggy streams that don't tolerate overlapped _read() calls, this causes big problems. Also, calling stream.shift() after the 'end' event did not raise any kind of error, but would cause very strange behavior indeed. Calling it after the EOF chunk was seen, but before the 'end' event was fired would also cause weird behavior, and could lead to data being lost, since it would not emit another 'readable' event. This change makes it so that: 1. stream.unshift() does *not* set state.reading = false 2. stream.unshift() is allowed up until the 'end' event. 3. unshifting onto a EOF-encountered and zero-length (but not yet end-emitted) stream will defer the 'end' event until the new data is consumed. 4. pushing onto a EOF-encountered stream is now an error. So, if you read(), you have that single tick to safely unshift() data back into the stream, even if the null chunk was pushed, and the length was 0.
2013-04-12 00:01:26 +02:00
if (state.ended && !addToFront) {
const e = new Error('stream.push() after EOF');
stream: Fix unshift() race conditions Fix #5272 The consumption of a readable stream is a dance with 3 partners. 1. The specific stream Author (A) 2. The Stream Base class (B), and 3. The Consumer of the stream (C) When B calls the _read() method that A implements, it sets a 'reading' flag, so that parallel calls to _read() can be avoided. When A calls stream.push(), B knows that it's safe to start calling _read() again. If the consumer C is some kind of parser that wants in some cases to pass the source stream off to some other party, but not before "putting back" some bit of previously consumed data (as in the case of Node's websocket http upgrade implementation). So, stream.unshift() will generally *never* be called by A, but *only* called by C. Prior to this patch, stream.unshift() *also* unset the state.reading flag, meaning that C could indicate the end of a read, and B would dutifully fire off another _read() call to A. This is inappropriate. In the case of fs streams, and other variably-laggy streams that don't tolerate overlapped _read() calls, this causes big problems. Also, calling stream.shift() after the 'end' event did not raise any kind of error, but would cause very strange behavior indeed. Calling it after the EOF chunk was seen, but before the 'end' event was fired would also cause weird behavior, and could lead to data being lost, since it would not emit another 'readable' event. This change makes it so that: 1. stream.unshift() does *not* set state.reading = false 2. stream.unshift() is allowed up until the 'end' event. 3. unshifting onto a EOF-encountered and zero-length (but not yet end-emitted) stream will defer the 'end' event until the new data is consumed. 4. pushing onto a EOF-encountered stream is now an error. So, if you read(), you have that single tick to safely unshift() data back into the stream, even if the null chunk was pushed, and the length was 0.
2013-04-12 00:01:26 +02:00
stream.emit('error', e);
} else if (state.endEmitted && addToFront) {
const e = new Error('stream.unshift() after end event');
stream: Fix unshift() race conditions Fix #5272 The consumption of a readable stream is a dance with 3 partners. 1. The specific stream Author (A) 2. The Stream Base class (B), and 3. The Consumer of the stream (C) When B calls the _read() method that A implements, it sets a 'reading' flag, so that parallel calls to _read() can be avoided. When A calls stream.push(), B knows that it's safe to start calling _read() again. If the consumer C is some kind of parser that wants in some cases to pass the source stream off to some other party, but not before "putting back" some bit of previously consumed data (as in the case of Node's websocket http upgrade implementation). So, stream.unshift() will generally *never* be called by A, but *only* called by C. Prior to this patch, stream.unshift() *also* unset the state.reading flag, meaning that C could indicate the end of a read, and B would dutifully fire off another _read() call to A. This is inappropriate. In the case of fs streams, and other variably-laggy streams that don't tolerate overlapped _read() calls, this causes big problems. Also, calling stream.shift() after the 'end' event did not raise any kind of error, but would cause very strange behavior indeed. Calling it after the EOF chunk was seen, but before the 'end' event was fired would also cause weird behavior, and could lead to data being lost, since it would not emit another 'readable' event. This change makes it so that: 1. stream.unshift() does *not* set state.reading = false 2. stream.unshift() is allowed up until the 'end' event. 3. unshifting onto a EOF-encountered and zero-length (but not yet end-emitted) stream will defer the 'end' event until the new data is consumed. 4. pushing onto a EOF-encountered stream is now an error. So, if you read(), you have that single tick to safely unshift() data back into the stream, even if the null chunk was pushed, and the length was 0.
2013-04-12 00:01:26 +02:00
stream.emit('error', e);
} else {
var skipAdd;
if (state.decoder && !addToFront && !encoding) {
stream: Fix unshift() race conditions Fix #5272 The consumption of a readable stream is a dance with 3 partners. 1. The specific stream Author (A) 2. The Stream Base class (B), and 3. The Consumer of the stream (C) When B calls the _read() method that A implements, it sets a 'reading' flag, so that parallel calls to _read() can be avoided. When A calls stream.push(), B knows that it's safe to start calling _read() again. If the consumer C is some kind of parser that wants in some cases to pass the source stream off to some other party, but not before "putting back" some bit of previously consumed data (as in the case of Node's websocket http upgrade implementation). So, stream.unshift() will generally *never* be called by A, but *only* called by C. Prior to this patch, stream.unshift() *also* unset the state.reading flag, meaning that C could indicate the end of a read, and B would dutifully fire off another _read() call to A. This is inappropriate. In the case of fs streams, and other variably-laggy streams that don't tolerate overlapped _read() calls, this causes big problems. Also, calling stream.shift() after the 'end' event did not raise any kind of error, but would cause very strange behavior indeed. Calling it after the EOF chunk was seen, but before the 'end' event was fired would also cause weird behavior, and could lead to data being lost, since it would not emit another 'readable' event. This change makes it so that: 1. stream.unshift() does *not* set state.reading = false 2. stream.unshift() is allowed up until the 'end' event. 3. unshifting onto a EOF-encountered and zero-length (but not yet end-emitted) stream will defer the 'end' event until the new data is consumed. 4. pushing onto a EOF-encountered stream is now an error. So, if you read(), you have that single tick to safely unshift() data back into the stream, even if the null chunk was pushed, and the length was 0.
2013-04-12 00:01:26 +02:00
chunk = state.decoder.write(chunk);
skipAdd = (!state.objectMode && chunk.length === 0);
}
if (!addToFront)
stream: Fix unshift() race conditions Fix #5272 The consumption of a readable stream is a dance with 3 partners. 1. The specific stream Author (A) 2. The Stream Base class (B), and 3. The Consumer of the stream (C) When B calls the _read() method that A implements, it sets a 'reading' flag, so that parallel calls to _read() can be avoided. When A calls stream.push(), B knows that it's safe to start calling _read() again. If the consumer C is some kind of parser that wants in some cases to pass the source stream off to some other party, but not before "putting back" some bit of previously consumed data (as in the case of Node's websocket http upgrade implementation). So, stream.unshift() will generally *never* be called by A, but *only* called by C. Prior to this patch, stream.unshift() *also* unset the state.reading flag, meaning that C could indicate the end of a read, and B would dutifully fire off another _read() call to A. This is inappropriate. In the case of fs streams, and other variably-laggy streams that don't tolerate overlapped _read() calls, this causes big problems. Also, calling stream.shift() after the 'end' event did not raise any kind of error, but would cause very strange behavior indeed. Calling it after the EOF chunk was seen, but before the 'end' event was fired would also cause weird behavior, and could lead to data being lost, since it would not emit another 'readable' event. This change makes it so that: 1. stream.unshift() does *not* set state.reading = false 2. stream.unshift() is allowed up until the 'end' event. 3. unshifting onto a EOF-encountered and zero-length (but not yet end-emitted) stream will defer the 'end' event until the new data is consumed. 4. pushing onto a EOF-encountered stream is now an error. So, if you read(), you have that single tick to safely unshift() data back into the stream, even if the null chunk was pushed, and the length was 0.
2013-04-12 00:01:26 +02:00
state.reading = false;
// Don't add to the buffer if we've decoded to an empty string chunk and
// we're not in object mode
if (!skipAdd) {
// if we want the data now, just emit it.
if (state.flowing && state.length === 0 && !state.sync) {
stream.emit('data', chunk);
stream.read(0);
} else {
// update the buffer info.
state.length += state.objectMode ? 1 : chunk.length;
if (addToFront)
state.buffer.unshift(chunk);
else
state.buffer.push(chunk);
if (state.needReadable)
emitReadable(stream);
}
}
stream: Fix unshift() race conditions Fix #5272 The consumption of a readable stream is a dance with 3 partners. 1. The specific stream Author (A) 2. The Stream Base class (B), and 3. The Consumer of the stream (C) When B calls the _read() method that A implements, it sets a 'reading' flag, so that parallel calls to _read() can be avoided. When A calls stream.push(), B knows that it's safe to start calling _read() again. If the consumer C is some kind of parser that wants in some cases to pass the source stream off to some other party, but not before "putting back" some bit of previously consumed data (as in the case of Node's websocket http upgrade implementation). So, stream.unshift() will generally *never* be called by A, but *only* called by C. Prior to this patch, stream.unshift() *also* unset the state.reading flag, meaning that C could indicate the end of a read, and B would dutifully fire off another _read() call to A. This is inappropriate. In the case of fs streams, and other variably-laggy streams that don't tolerate overlapped _read() calls, this causes big problems. Also, calling stream.shift() after the 'end' event did not raise any kind of error, but would cause very strange behavior indeed. Calling it after the EOF chunk was seen, but before the 'end' event was fired would also cause weird behavior, and could lead to data being lost, since it would not emit another 'readable' event. This change makes it so that: 1. stream.unshift() does *not* set state.reading = false 2. stream.unshift() is allowed up until the 'end' event. 3. unshifting onto a EOF-encountered and zero-length (but not yet end-emitted) stream will defer the 'end' event until the new data is consumed. 4. pushing onto a EOF-encountered stream is now an error. So, if you read(), you have that single tick to safely unshift() data back into the stream, even if the null chunk was pushed, and the length was 0.
2013-04-12 00:01:26 +02:00
maybeReadMore(stream, state);
}
} else if (!addToFront) {
stream: Fix unshift() race conditions Fix #5272 The consumption of a readable stream is a dance with 3 partners. 1. The specific stream Author (A) 2. The Stream Base class (B), and 3. The Consumer of the stream (C) When B calls the _read() method that A implements, it sets a 'reading' flag, so that parallel calls to _read() can be avoided. When A calls stream.push(), B knows that it's safe to start calling _read() again. If the consumer C is some kind of parser that wants in some cases to pass the source stream off to some other party, but not before "putting back" some bit of previously consumed data (as in the case of Node's websocket http upgrade implementation). So, stream.unshift() will generally *never* be called by A, but *only* called by C. Prior to this patch, stream.unshift() *also* unset the state.reading flag, meaning that C could indicate the end of a read, and B would dutifully fire off another _read() call to A. This is inappropriate. In the case of fs streams, and other variably-laggy streams that don't tolerate overlapped _read() calls, this causes big problems. Also, calling stream.shift() after the 'end' event did not raise any kind of error, but would cause very strange behavior indeed. Calling it after the EOF chunk was seen, but before the 'end' event was fired would also cause weird behavior, and could lead to data being lost, since it would not emit another 'readable' event. This change makes it so that: 1. stream.unshift() does *not* set state.reading = false 2. stream.unshift() is allowed up until the 'end' event. 3. unshifting onto a EOF-encountered and zero-length (but not yet end-emitted) stream will defer the 'end' event until the new data is consumed. 4. pushing onto a EOF-encountered stream is now an error. So, if you read(), you have that single tick to safely unshift() data back into the stream, even if the null chunk was pushed, and the length was 0.
2013-04-12 00:01:26 +02:00
state.reading = false;
}
return needMoreData(state);
}
// if it's past the high water mark, we can push in some more.
// Also, if we have no data yet, we can stand some
// more bytes. This is to work around cases where hwm=0,
// such as the repl. Also, if the push() triggered a
// readable event, and the user called read(largeNumber) such that
// needReadable was set, then we ought to push more, so that another
// 'readable' event will be triggered.
function needMoreData(state) {
return !state.ended &&
(state.needReadable ||
state.length < state.highWaterMark ||
state.length === 0);
}
// backwards compatibility.
Readable.prototype.setEncoding = function(enc) {
if (!StringDecoder)
StringDecoder = require('string_decoder').StringDecoder;
this._readableState.decoder = new StringDecoder(enc);
this._readableState.encoding = enc;
2013-08-19 19:14:42 +02:00
return this;
};
// Don't raise the hwm > 8MB
const MAX_HWM = 0x800000;
function computeNewHighWaterMark(n) {
if (n >= MAX_HWM) {
n = MAX_HWM;
} else {
// Get the next highest power of 2
n--;
n |= n >>> 1;
n |= n >>> 2;
n |= n >>> 4;
n |= n >>> 8;
n |= n >>> 16;
n++;
}
return n;
}
2012-10-03 00:44:50 +02:00
function howMuchToRead(n, state) {
if (state.length === 0 && state.ended)
return 0;
if (state.objectMode)
return n === 0 ? 0 : 1;
if (n === null || isNaN(n)) {
// only flow one buffer at a time
if (state.flowing && state.buffer.length)
return state.buffer[0].length;
else
return state.length;
}
2012-10-03 00:44:50 +02:00
if (n <= 0)
return 0;
2012-10-03 00:44:50 +02:00
// If we're asking for more than the target buffer level,
// then raise the water mark. Bump up to the next highest
// power of 2, to prevent increasing it excessively in tiny
// amounts.
if (n > state.highWaterMark)
state.highWaterMark = computeNewHighWaterMark(n);
2012-10-03 00:44:50 +02:00
// don't have that much. return null, unless we've ended.
if (n > state.length) {
if (!state.ended) {
2012-10-03 00:44:50 +02:00
state.needReadable = true;
return 0;
} else {
return state.length;
}
2012-10-03 00:44:50 +02:00
}
return n;
}
// you can override either this method, or the async _read(n) below.
Readable.prototype.read = function(n) {
debug('read', n);
var state = this._readableState;
var nOrig = n;
2012-10-03 00:44:50 +02:00
if (typeof n !== 'number' || n > 0)
state.emittedReadable = false;
// if we're doing read(0) to trigger a readable event, but we
// already have a bunch of data in the buffer, then just trigger
// the 'readable' event and move on.
if (n === 0 &&
state.needReadable &&
(state.length >= state.highWaterMark || state.ended)) {
debug('read: emitReadable', state.length, state.ended);
if (state.length === 0 && state.ended)
endReadable(this);
else
emitReadable(this);
return null;
}
n = howMuchToRead(n, state);
2012-10-03 00:44:50 +02:00
// if we've ended, and we're now clear, then finish it up.
if (n === 0 && state.ended) {
if (state.length === 0)
endReadable(this);
return null;
}
2012-10-03 00:44:50 +02:00
// All the actual chunk generation logic needs to be
// *below* the call to _read. The reason is that in certain
// synthetic stream cases, such as passthrough streams, _read
// may be a completely synchronous operation which may change
// the state of the read buffer, providing enough data when
// before there was *not* enough.
//
// So, the steps are:
// 1. Figure out what the state of things will be after we do
// a read from the buffer.
//
// 2. If that resulting state will trigger a _read, then call _read.
// Note that this may be asynchronous, or synchronous. Yes, it is
// deeply ugly to write APIs this way, but that still doesn't mean
// that the Readable class should behave improperly, as streams are
// designed to be sync/async agnostic.
// Take note if the _read call is sync or async (ie, if the read call
// has returned yet), so that we know whether or not it's safe to emit
// 'readable' etc.
//
// 3. Actually pull the requested chunks out of the buffer and return.
// if we need a readable event, then we need to do some reading.
var doRead = state.needReadable;
debug('need readable', doRead);
// if we currently have less than the highWaterMark, then also read some
if (state.length === 0 || state.length - n < state.highWaterMark) {
doRead = true;
debug('length less than watermark', doRead);
}
// however, if we've ended, then there's no point, and if we're already
// reading, then it's unnecessary.
if (state.ended || state.reading) {
doRead = false;
debug('reading or ended', doRead);
}
if (doRead) {
debug('do read');
2012-10-03 00:44:50 +02:00
state.reading = true;
2012-10-31 22:30:30 +01:00
state.sync = true;
// if the length is currently zero, then we *need* a readable event.
if (state.length === 0)
state.needReadable = true;
2012-10-03 00:44:50 +02:00
// call internal read method
this._read(state.highWaterMark);
2012-10-31 22:30:30 +01:00
state.sync = false;
2012-10-03 00:44:50 +02:00
}
// If _read pushed data synchronously, then `reading` will be false,
// and we need to re-evaluate how much data we can return to the user.
if (doRead && !state.reading)
n = howMuchToRead(nOrig, state);
var ret;
if (n > 0)
ret = fromList(n, state);
else
ret = null;
if (ret === null) {
state.needReadable = true;
n = 0;
}
state.length -= n;
// If we have nothing in the buffer, then we want to know
// as soon as we *do* get something into the buffer.
if (state.length === 0 && !state.ended)
state.needReadable = true;
// If we tried to read() past the EOF, then emit end on the next tick.
if (nOrig !== n && state.ended && state.length === 0)
endReadable(this);
if (ret !== null)
this.emit('data', ret);
2012-10-03 00:44:50 +02:00
return ret;
};
function chunkInvalid(state, chunk) {
var er = null;
if (!(chunk instanceof Buffer) &&
typeof chunk !== 'string' &&
chunk !== null &&
chunk !== undefined &&
2014-02-23 20:00:28 +01:00
!state.objectMode) {
er = new TypeError('Invalid non-string/buffer chunk');
}
return er;
}
2012-10-31 22:30:30 +01:00
function onEofChunk(stream, state) {
if (state.ended) return;
if (state.decoder) {
var chunk = state.decoder.end();
if (chunk && chunk.length) {
state.buffer.push(chunk);
state.length += state.objectMode ? 1 : chunk.length;
}
2012-10-31 22:30:30 +01:00
}
state.ended = true;
2012-10-31 22:30:30 +01:00
// emit 'readable' now to make sure it gets picked up.
emitReadable(stream);
2012-10-31 22:30:30 +01:00
}
// Don't emit readable right away in sync mode, because this can trigger
// another read() call => stack overflow. This way, it might trigger
// a nextTick recursion warning, but that's not so bad.
function emitReadable(stream) {
var state = stream._readableState;
state.needReadable = false;
if (!state.emittedReadable) {
debug('emitReadable', state.flowing);
state.emittedReadable = true;
if (state.sync)
process.nextTick(emitReadable_, stream);
else
emitReadable_(stream);
}
}
function emitReadable_(stream) {
debug('emit readable');
stream.emit('readable');
flow(stream);
}
// at this point, the user has presumably seen the 'readable' event,
// and called read() to consume some data. that may have triggered
// in turn another _read(n) call, in which case reading = true if
// it's in progress.
// However, if we're not ended, or reading, and the length < hwm,
// then go ahead and try to read some more preemptively.
function maybeReadMore(stream, state) {
if (!state.readingMore) {
state.readingMore = true;
process.nextTick(maybeReadMore_, stream, state);
}
}
function maybeReadMore_(stream, state) {
var len = state.length;
while (!state.reading && !state.flowing && !state.ended &&
state.length < state.highWaterMark) {
debug('maybeReadMore read 0');
stream.read(0);
if (len === state.length)
// didn't get any data, stop spinning.
break;
else
len = state.length;
}
state.readingMore = false;
}
2012-10-03 00:44:50 +02:00
// abstract method. to be overridden in specific implementation classes.
// call cb(er, data) where data is <= n in length.
// for virtual (non-string, non-buffer) streams, "length" is somewhat
// arbitrary, and perhaps not very meaningful.
Readable.prototype._read = function(n) {
this.emit('error', new Error('not implemented'));
2012-10-03 00:44:50 +02:00
};
Readable.prototype.pipe = function(dest, pipeOpts) {
var src = this;
var state = this._readableState;
switch (state.pipesCount) {
case 0:
state.pipes = dest;
break;
case 1:
2012-12-06 19:21:22 +01:00
state.pipes = [state.pipes, dest];
break;
default:
state.pipes.push(dest);
break;
}
state.pipesCount += 1;
debug('pipe count=%d opts=%j', state.pipesCount, pipeOpts);
2012-10-03 00:44:50 +02:00
var doEnd = (!pipeOpts || pipeOpts.end !== false) &&
dest !== process.stdout &&
dest !== process.stderr;
var endFn = doEnd ? onend : cleanup;
if (state.endEmitted)
process.nextTick(endFn);
else
src.once('end', endFn);
dest.on('unpipe', onunpipe);
function onunpipe(readable) {
debug('onunpipe');
if (readable === src) {
cleanup();
}
2012-10-03 00:44:50 +02:00
}
function onend() {
debug('onend');
2012-10-03 00:44:50 +02:00
dest.end();
}
// when the dest drains, it reduces the awaitDrain counter
// on the source. This would be more elegant with a .once()
// handler in flow(), but adding and removing repeatedly is
// too slow.
var ondrain = pipeOnDrain(src);
dest.on('drain', ondrain);
var cleanedUp = false;
function cleanup() {
debug('cleanup');
// cleanup event handlers once the pipe is broken
dest.removeListener('close', onclose);
dest.removeListener('finish', onfinish);
dest.removeListener('drain', ondrain);
dest.removeListener('error', onerror);
dest.removeListener('unpipe', onunpipe);
src.removeListener('end', onend);
src.removeListener('end', cleanup);
src.removeListener('data', ondata);
cleanedUp = true;
// if the reader is waiting for a drain event from this
// specific writer, then it would cause it to never start
// flowing again.
// So, if this is awaiting a drain, then we just call it now.
// If we don't know, then assume that we are waiting for one.
if (state.awaitDrain &&
(!dest._writableState || dest._writableState.needDrain))
ondrain();
}
src.on('data', ondata);
function ondata(chunk) {
debug('ondata');
var ret = dest.write(chunk);
if (false === ret) {
// If the user unpiped during `dest.write()`, it is possible
// to get stuck in a permanently paused state if that write
// also returned false.
// => Check whether `dest` is still a piping destination.
if (((state.pipesCount === 1 && state.pipes === dest) ||
(state.pipesCount > 1 && state.pipes.indexOf(dest) !== -1)) &&
!cleanedUp) {
debug('false write response, pause', src._readableState.awaitDrain);
src._readableState.awaitDrain++;
}
src.pause();
}
}
// if the dest has an error, then stop piping into it.
// however, don't suppress the throwing behavior for this.
function onerror(er) {
debug('onerror', er);
2012-11-29 07:09:28 +01:00
unpipe();
dest.removeListener('error', onerror);
if (EE.listenerCount(dest, 'error') === 0)
dest.emit('error', er);
}
// This is a brutally ugly hack to make sure that our error handler
// is attached before any userland ones. NEVER DO THIS.
if (!dest._events || !dest._events.error)
dest.on('error', onerror);
else if (Array.isArray(dest._events.error))
dest._events.error.unshift(onerror);
else
dest._events.error = [onerror, dest._events.error];
// Both close and finish should trigger unpipe, but only once.
function onclose() {
dest.removeListener('finish', onfinish);
unpipe();
}
dest.once('close', onclose);
function onfinish() {
debug('onfinish');
dest.removeListener('close', onclose);
unpipe();
}
dest.once('finish', onfinish);
2012-11-29 07:09:28 +01:00
function unpipe() {
debug('unpipe');
2012-11-29 07:09:28 +01:00
src.unpipe(dest);
}
// tell the dest that it's being piped to
2012-10-03 00:44:50 +02:00
dest.emit('pipe', src);
// start the flow if it hasn't been started already.
if (!state.flowing) {
debug('pipe resume');
src.resume();
}
2012-10-03 00:44:50 +02:00
return dest;
};
function pipeOnDrain(src) {
return function() {
var state = src._readableState;
debug('pipeOnDrain', state.awaitDrain);
if (state.awaitDrain)
state.awaitDrain--;
if (state.awaitDrain === 0 && EE.listenerCount(src, 'data')) {
state.flowing = true;
flow(src);
}
};
}
2012-10-03 00:44:50 +02:00
Readable.prototype.unpipe = function(dest) {
var state = this._readableState;
// if we're not piping anywhere, then do nothing.
if (state.pipesCount === 0)
return this;
// just one destination. most common case.
if (state.pipesCount === 1) {
// passed in one, but it's not the right one.
if (dest && dest !== state.pipes)
return this;
if (!dest)
dest = state.pipes;
// got a match.
state.pipes = null;
state.pipesCount = 0;
state.flowing = false;
if (dest)
2012-10-03 00:44:50 +02:00
dest.emit('unpipe', this);
return this;
2012-10-03 00:44:50 +02:00
}
// slow case. multiple pipe destinations.
if (!dest) {
// remove all.
var dests = state.pipes;
var len = state.pipesCount;
state.pipes = null;
state.pipesCount = 0;
state.flowing = false;
for (let i = 0; i < len; i++)
dests[i].emit('unpipe', this);
return this;
}
// try to find the right one.
const i = state.pipes.indexOf(dest);
if (i === -1)
return this;
state.pipes.splice(i, 1);
state.pipesCount -= 1;
if (state.pipesCount === 1)
state.pipes = state.pipes[0];
dest.emit('unpipe', this);
2012-10-03 00:44:50 +02:00
return this;
};
// set up data events if they are asked for
// Ensure readable listeners eventually get something
Readable.prototype.on = function(ev, fn) {
var res = Stream.prototype.on.call(this, ev, fn);
// If listening to data, and it has not explicitly been paused,
// then call resume to start the flow of data on the next tick.
if (ev === 'data' && false !== this._readableState.flowing) {
this.resume();
}
2012-10-03 00:44:50 +02:00
if (ev === 'readable' && !this._readableState.endEmitted) {
var state = this._readableState;
if (!state.readableListening) {
state.readableListening = true;
state.emittedReadable = false;
state.needReadable = true;
if (!state.reading) {
process.nextTick(nReadingNextTick, this);
} else if (state.length) {
emitReadable(this, state);
}
}
}
return res;
2012-10-03 00:44:50 +02:00
};
Readable.prototype.addListener = Readable.prototype.on;
function nReadingNextTick(self) {
debug('readable nexttick read 0');
self.read(0);
}
2012-10-03 00:44:50 +02:00
// pause() and resume() are remnants of the legacy readable stream API
// If the user uses them, then switch into old mode.
Readable.prototype.resume = function() {
var state = this._readableState;
if (!state.flowing) {
debug('resume');
state.flowing = true;
resume(this, state);
}
return this;
2012-10-03 00:44:50 +02:00
};
function resume(stream, state) {
if (!state.resumeScheduled) {
state.resumeScheduled = true;
process.nextTick(resume_, stream, state);
}
}
function resume_(stream, state) {
if (!state.reading) {
debug('resume read 0');
stream.read(0);
}
2012-10-03 00:44:50 +02:00
state.resumeScheduled = false;
stream.emit('resume');
flow(stream);
if (state.flowing && !state.reading)
stream.read(0);
}
2012-10-03 00:44:50 +02:00
Readable.prototype.pause = function() {
debug('call pause flowing=%j', this._readableState.flowing);
if (false !== this._readableState.flowing) {
debug('pause');
this._readableState.flowing = false;
this.emit('pause');
}
return this;
2012-10-03 00:44:50 +02:00
};
function flow(stream) {
2012-10-03 00:44:50 +02:00
var state = stream._readableState;
debug('flow', state.flowing);
2012-10-03 00:44:50 +02:00
if (state.flowing) {
do {
var chunk = stream.read();
} while (null !== chunk && state.flowing);
2012-10-03 00:44:50 +02:00
}
}
// wrap an old-style stream as the async data source.
// This is *not* part of the readable stream interface.
// It is an ugly unfortunate mess of history.
Readable.prototype.wrap = function(stream) {
var state = this._readableState;
var paused = false;
var self = this;
2012-10-03 00:44:50 +02:00
stream.on('end', function() {
debug('wrapped end');
if (state.decoder && !state.ended) {
2012-10-12 20:45:17 +02:00
var chunk = state.decoder.end();
2013-01-08 04:40:08 +01:00
if (chunk && chunk.length)
self.push(chunk);
2012-10-12 20:45:17 +02:00
}
2013-01-08 04:40:08 +01:00
self.push(null);
});
2012-10-03 00:44:50 +02:00
stream.on('data', function(chunk) {
debug('wrapped data');
2012-10-12 20:45:17 +02:00
if (state.decoder)
chunk = state.decoder.write(chunk);
// don't skip over falsy values in objectMode
if (state.objectMode && (chunk === null || chunk === undefined))
return;
else if (!state.objectMode && (!chunk || !chunk.length))
2012-10-12 20:45:17 +02:00
return;
2013-01-08 04:40:08 +01:00
var ret = self.push(chunk);
if (!ret) {
2012-10-03 00:44:50 +02:00
paused = true;
stream.pause();
}
});
2012-10-03 00:44:50 +02:00
// proxy all the other methods.
// important when wrapping filters and duplexes.
for (var i in stream) {
if (this[i] === undefined && typeof stream[i] === 'function') {
2012-10-03 00:44:50 +02:00
this[i] = function(method) { return function() {
return stream[method].apply(stream, arguments);
}; }(i);
2012-10-03 00:44:50 +02:00
}
}
// proxy certain important events.
const events = ['error', 'close', 'destroy', 'pause', 'resume'];
2012-10-03 00:44:50 +02:00
events.forEach(function(ev) {
stream.on(ev, self.emit.bind(self, ev));
});
2012-10-03 00:44:50 +02:00
2013-01-08 04:40:08 +01:00
// when we try to consume some more bytes, simply unpause the
// underlying stream.
self._read = function(n) {
debug('wrapped _read', n);
2013-01-08 04:40:08 +01:00
if (paused) {
2012-10-03 00:44:50 +02:00
paused = false;
stream.resume();
2012-10-03 00:44:50 +02:00
}
};
return self;
2012-10-03 00:44:50 +02:00
};
// exposed for testing purposes only.
Readable._fromList = fromList;
// Pluck off n bytes from an array of buffers.
// Length is the combined lengths of all the buffers in the list.
function fromList(n, state) {
var list = state.buffer;
var length = state.length;
var stringMode = !!state.decoder;
var objectMode = !!state.objectMode;
2012-10-03 00:44:50 +02:00
var ret;
// nothing in the list, definitely empty.
if (list.length === 0)
2012-10-03 00:44:50 +02:00
return null;
if (length === 0)
2012-10-03 00:44:50 +02:00
ret = null;
else if (objectMode)
ret = list.shift();
else if (!n || n >= length) {
2012-10-03 00:44:50 +02:00
// read it all, truncate the array.
if (stringMode)
ret = list.join('');
else if (list.length === 1)
ret = list[0];
else
ret = Buffer.concat(list, length);
2012-10-03 00:44:50 +02:00
list.length = 0;
} else {
// read just some of it.
if (n < list[0].length) {
// just take a part of the first list item.
// slice is the same for buffers and strings.
const buf = list[0];
2012-10-03 00:44:50 +02:00
ret = buf.slice(0, n);
list[0] = buf.slice(n);
} else if (n === list[0].length) {
// first list is a perfect match
ret = list.shift();
} else {
// complex case.
// we have enough to cover it, but it spans past the first buffer.
if (stringMode)
ret = '';
else
ret = Buffer.allocUnsafe(n);
2012-10-03 00:44:50 +02:00
var c = 0;
for (var i = 0, l = list.length; i < l && c < n; i++) {
const buf = list[0];
2012-10-03 00:44:50 +02:00
var cpy = Math.min(n - c, buf.length);
if (stringMode)
ret += buf.slice(0, cpy);
else
buf.copy(ret, c, 0, cpy);
if (cpy < buf.length)
2012-10-03 00:44:50 +02:00
list[0] = buf.slice(cpy);
else
2012-10-03 00:44:50 +02:00
list.shift();
2012-10-03 00:44:50 +02:00
c += cpy;
}
}
}
return ret;
}
function endReadable(stream) {
var state = stream._readableState;
// If we get here before consuming all the bytes, then that is a
// bug in node. Should never happen.
if (state.length > 0)
throw new Error('"endReadable()" called on non-empty stream');
if (!state.endEmitted) {
state.ended = true;
process.nextTick(endReadableNT, state, stream);
}
}
function endReadableNT(state, stream) {
// Check that we didn't get one last unshift.
if (!state.endEmitted && state.length === 0) {
state.endEmitted = true;
stream.readable = false;
stream.emit('end');
}
}