2012-10-03 00:44:50 +02:00
|
|
|
// Copyright Joyent, Inc. and other Node contributors.
|
|
|
|
//
|
|
|
|
// Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
// copy of this software and associated documentation files (the
|
|
|
|
// "Software"), to deal in the Software without restriction, including
|
|
|
|
// without limitation the rights to use, copy, modify, merge, publish,
|
|
|
|
// distribute, sublicense, and/or sell copies of the Software, and to permit
|
|
|
|
// persons to whom the Software is furnished to do so, subject to the
|
|
|
|
// following conditions:
|
|
|
|
//
|
|
|
|
// The above copyright notice and this permission notice shall be included
|
|
|
|
// in all copies or substantial portions of the Software.
|
|
|
|
//
|
|
|
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
|
|
|
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
|
|
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
|
|
|
|
// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
|
|
|
|
// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
|
|
|
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
|
|
|
// USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
|
|
|
|
|
|
module.exports = Readable;
|
2012-11-13 08:30:10 +01:00
|
|
|
Readable.ReadableState = ReadableState;
|
2012-10-03 00:44:50 +02:00
|
|
|
|
2013-02-14 09:48:11 +01:00
|
|
|
var EE = require('events').EventEmitter;
|
2012-10-03 00:44:50 +02:00
|
|
|
var Stream = require('stream');
|
|
|
|
var util = require('util');
|
2012-10-04 01:52:14 +02:00
|
|
|
var StringDecoder;
|
2012-10-03 00:44:50 +02:00
|
|
|
|
|
|
|
util.inherits(Readable, Stream);
|
|
|
|
|
2012-10-31 22:30:30 +01:00
|
|
|
function ReadableState(options, stream) {
|
2012-10-03 00:44:50 +02:00
|
|
|
options = options || {};
|
|
|
|
|
2012-11-13 08:31:25 +01:00
|
|
|
// the point at which it stops calling _read() to fill the buffer
|
2012-12-19 03:49:42 +01:00
|
|
|
// Note: 0 is a valid value, means "don't call _read preemptively ever"
|
|
|
|
var hwm = options.highWaterMark;
|
|
|
|
this.highWaterMark = (hwm || hwm === 0) ? hwm : 16 * 1024;
|
2012-11-13 08:31:25 +01:00
|
|
|
|
|
|
|
// cast to ints.
|
|
|
|
this.highWaterMark = ~~this.highWaterMark;
|
|
|
|
|
2012-10-03 00:44:50 +02:00
|
|
|
this.buffer = [];
|
|
|
|
this.length = 0;
|
2012-11-17 04:27:41 +01:00
|
|
|
this.pipes = null;
|
|
|
|
this.pipesCount = 0;
|
2012-10-03 00:44:50 +02:00
|
|
|
this.flowing = false;
|
|
|
|
this.ended = false;
|
2012-10-04 01:52:14 +02:00
|
|
|
this.endEmitted = false;
|
2012-10-03 00:44:50 +02:00
|
|
|
this.reading = false;
|
2013-02-23 01:45:22 +01:00
|
|
|
|
|
|
|
// a flag to be able to tell if the onwrite cb is called immediately,
|
|
|
|
// or on a later tick. We set this to true at first, becuase any
|
|
|
|
// actions that shouldn't happen until "later" should generally also
|
|
|
|
// not happen before the first write call.
|
|
|
|
this.sync = true;
|
|
|
|
|
2012-10-03 00:44:50 +02:00
|
|
|
// whenever we return null, then we set a flag to say
|
|
|
|
// that we're awaiting a 'readable' event emission.
|
|
|
|
this.needReadable = false;
|
2012-11-13 08:32:05 +01:00
|
|
|
this.emittedReadable = false;
|
2012-10-04 01:52:14 +02:00
|
|
|
|
2013-01-12 05:59:57 +01:00
|
|
|
|
|
|
|
// object stream flag. Used to make read(n) ignore n and to
|
|
|
|
// make all the buffer merging and length checks go away
|
|
|
|
this.objectMode = !!options.objectMode;
|
|
|
|
|
2012-11-28 03:20:16 +01:00
|
|
|
// when piping, we only care about 'readable' events that happen
|
|
|
|
// after read()ing all the bytes and not getting any pushback.
|
|
|
|
this.ranOut = false;
|
2012-11-28 10:25:39 +01:00
|
|
|
|
|
|
|
// the number of writers that are awaiting a drain event in .pipe()s
|
|
|
|
this.awaitDrain = 0;
|
2012-11-28 03:20:16 +01:00
|
|
|
|
2013-03-08 09:26:53 +01:00
|
|
|
// if true, a maybeReadMore has been scheduled
|
|
|
|
this.readingMore = false;
|
|
|
|
|
2012-10-04 01:52:14 +02:00
|
|
|
this.decoder = null;
|
|
|
|
if (options.encoding) {
|
|
|
|
if (!StringDecoder)
|
|
|
|
StringDecoder = require('string_decoder').StringDecoder;
|
|
|
|
this.decoder = new StringDecoder(options.encoding);
|
|
|
|
}
|
2012-10-03 00:44:50 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
function Readable(options) {
|
2012-10-08 23:43:17 +02:00
|
|
|
if (!(this instanceof Readable))
|
|
|
|
return new Readable(options);
|
|
|
|
|
2012-10-03 00:44:50 +02:00
|
|
|
this._readableState = new ReadableState(options, this);
|
2012-11-28 03:21:05 +01:00
|
|
|
|
|
|
|
// legacy
|
|
|
|
this.readable = true;
|
|
|
|
|
2012-12-27 17:30:50 +01:00
|
|
|
Stream.call(this);
|
2012-10-03 00:44:50 +02:00
|
|
|
}
|
|
|
|
|
2013-01-08 03:07:17 +01:00
|
|
|
// Manually shove something into the read() buffer.
|
|
|
|
// This returns true if the highWaterMark has not been hit yet,
|
|
|
|
// similar to how Writable.write() returns true if you should
|
|
|
|
// write() some more.
|
|
|
|
Readable.prototype.push = function(chunk) {
|
2013-02-28 01:56:30 +01:00
|
|
|
var state = this._readableState;
|
2013-03-06 08:37:37 +01:00
|
|
|
if (typeof chunk === 'string' && !state.objectMode)
|
|
|
|
chunk = new Buffer(chunk, arguments[1]);
|
2013-02-28 04:32:19 +01:00
|
|
|
return readableAddChunk(this, state, chunk, false);
|
2013-01-08 03:07:17 +01:00
|
|
|
};
|
|
|
|
|
2013-02-28 04:32:19 +01:00
|
|
|
Readable.prototype.unshift = function(chunk) {
|
|
|
|
var state = this._readableState;
|
2013-03-06 08:37:37 +01:00
|
|
|
if (typeof chunk === 'string' && !state.objectMode)
|
|
|
|
chunk = new Buffer(chunk, arguments[1]);
|
2013-02-28 04:32:19 +01:00
|
|
|
return readableAddChunk(this, state, chunk, true);
|
|
|
|
};
|
|
|
|
|
|
|
|
function readableAddChunk(stream, state, chunk, addToFront) {
|
2013-02-28 01:56:30 +01:00
|
|
|
state.reading = false;
|
|
|
|
|
|
|
|
var er = chunkInvalid(state, chunk);
|
|
|
|
if (er) {
|
|
|
|
stream.emit('error', er);
|
|
|
|
} else if (chunk === null || chunk === undefined) {
|
stream: There is no _read cb, there is only push
This makes it so that `stream.push(chunk)` is the only way to signal the
end of reading, removing the confusing disparity between the
callback-style _read method, and the fact that most real-world streams
do not have a 1:1 corollation between the "please give me data" event,
and the actual arrival of a chunk of data.
It is still possible, of course, to implement a `CallbackReadable` on
top of this. Simply provide a method like this as the callback:
function readCallback(er, chunk) {
if (er)
stream.emit('error', er);
else
stream.push(chunk);
}
However, *only* fs streams actually would behave in this way, so it
makes not a lot of sense to make TCP, TLS, HTTP, and all the rest have
to bend into this uncomfortable paradigm.
2013-03-01 00:32:32 +01:00
|
|
|
onEofChunk(stream, state);
|
2013-02-28 01:56:30 +01:00
|
|
|
} else if (state.objectMode || chunk && chunk.length > 0) {
|
|
|
|
if (state.decoder)
|
|
|
|
chunk = state.decoder.write(chunk);
|
|
|
|
|
|
|
|
// update the buffer info.
|
|
|
|
state.length += state.objectMode ? 1 : chunk.length;
|
2013-02-28 04:32:19 +01:00
|
|
|
if (addToFront)
|
|
|
|
state.buffer.unshift(chunk);
|
|
|
|
else
|
|
|
|
state.buffer.push(chunk);
|
2013-02-28 01:56:30 +01:00
|
|
|
|
|
|
|
if (state.needReadable)
|
|
|
|
emitReadable(stream);
|
|
|
|
|
|
|
|
maybeReadMore(stream, state);
|
|
|
|
}
|
|
|
|
|
|
|
|
return needMoreData(state);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// if it's past the high water mark, we can push in some more.
|
|
|
|
// Also, if we have no data yet, we can stand some
|
|
|
|
// more bytes. This is to work around cases where hwm=0,
|
|
|
|
// such as the repl. Also, if the push() triggered a
|
|
|
|
// readable event, and the user called read(largeNumber) such that
|
|
|
|
// needReadable was set, then we ought to push more, so that another
|
|
|
|
// 'readable' event will be triggered.
|
|
|
|
function needMoreData(state) {
|
|
|
|
return !state.ended &&
|
|
|
|
(state.needReadable ||
|
|
|
|
state.length < state.highWaterMark ||
|
|
|
|
state.length === 0);
|
|
|
|
}
|
|
|
|
|
2012-10-04 01:52:14 +02:00
|
|
|
// backwards compatibility.
|
|
|
|
Readable.prototype.setEncoding = function(enc) {
|
|
|
|
if (!StringDecoder)
|
|
|
|
StringDecoder = require('string_decoder').StringDecoder;
|
|
|
|
this._readableState.decoder = new StringDecoder(enc);
|
|
|
|
};
|
|
|
|
|
2013-03-06 18:55:00 +01:00
|
|
|
// Don't raise the hwm > 128MB
|
|
|
|
var MAX_HWM = 0x800000;
|
|
|
|
function roundUpToNextPowerOf2(n) {
|
|
|
|
if (n >= MAX_HWM) {
|
|
|
|
n = MAX_HWM;
|
|
|
|
} else {
|
|
|
|
// Get the next highest power of 2
|
|
|
|
n--;
|
|
|
|
for (var p = 1; p < 32; p <<= 1) n |= n >> p;
|
|
|
|
n++;
|
|
|
|
}
|
|
|
|
return n;
|
|
|
|
}
|
2012-10-03 00:44:50 +02:00
|
|
|
|
2012-10-07 22:12:21 +02:00
|
|
|
function howMuchToRead(n, state) {
|
|
|
|
if (state.length === 0 && state.ended)
|
|
|
|
return 0;
|
|
|
|
|
2013-01-12 05:59:57 +01:00
|
|
|
if (state.objectMode)
|
|
|
|
return n === 0 ? 0 : 1;
|
|
|
|
|
2013-02-14 20:26:54 +01:00
|
|
|
if (isNaN(n) || n === null) {
|
|
|
|
// only flow one buffer at a time
|
|
|
|
if (state.flowing && state.buffer.length)
|
|
|
|
return state.buffer[0].length;
|
|
|
|
else
|
|
|
|
return state.length;
|
|
|
|
}
|
2012-10-03 00:44:50 +02:00
|
|
|
|
2012-10-07 22:12:21 +02:00
|
|
|
if (n <= 0)
|
|
|
|
return 0;
|
2012-10-03 00:44:50 +02:00
|
|
|
|
2013-03-06 07:09:27 +01:00
|
|
|
// If we're asking for more than the target buffer level,
|
2013-03-06 18:55:00 +01:00
|
|
|
// then raise the water mark. Bump up to the next highest
|
|
|
|
// power of 2, to prevent increasing it excessively in tiny
|
|
|
|
// amounts.
|
2013-03-06 07:09:27 +01:00
|
|
|
if (n > state.highWaterMark)
|
2013-03-06 18:55:00 +01:00
|
|
|
state.highWaterMark = roundUpToNextPowerOf2(n);
|
2013-03-06 07:09:27 +01:00
|
|
|
|
2012-10-03 00:44:50 +02:00
|
|
|
// don't have that much. return null, unless we've ended.
|
|
|
|
if (n > state.length) {
|
2012-10-07 22:12:21 +02:00
|
|
|
if (!state.ended) {
|
2012-10-03 00:44:50 +02:00
|
|
|
state.needReadable = true;
|
2012-10-07 22:12:21 +02:00
|
|
|
return 0;
|
2012-10-03 00:44:50 +02:00
|
|
|
} else
|
2012-10-07 22:12:21 +02:00
|
|
|
return state.length;
|
2012-10-03 00:44:50 +02:00
|
|
|
}
|
|
|
|
|
2012-10-07 22:12:21 +02:00
|
|
|
return n;
|
|
|
|
}
|
2012-10-04 01:52:14 +02:00
|
|
|
|
stream: There is no _read cb, there is only push
This makes it so that `stream.push(chunk)` is the only way to signal the
end of reading, removing the confusing disparity between the
callback-style _read method, and the fact that most real-world streams
do not have a 1:1 corollation between the "please give me data" event,
and the actual arrival of a chunk of data.
It is still possible, of course, to implement a `CallbackReadable` on
top of this. Simply provide a method like this as the callback:
function readCallback(er, chunk) {
if (er)
stream.emit('error', er);
else
stream.push(chunk);
}
However, *only* fs streams actually would behave in this way, so it
makes not a lot of sense to make TCP, TLS, HTTP, and all the rest have
to bend into this uncomfortable paradigm.
2013-03-01 00:32:32 +01:00
|
|
|
// you can override either this method, or the async _read(n) below.
|
2012-10-07 22:12:21 +02:00
|
|
|
Readable.prototype.read = function(n) {
|
|
|
|
var state = this._readableState;
|
|
|
|
var nOrig = n;
|
2012-10-03 00:44:50 +02:00
|
|
|
|
2012-11-13 08:32:05 +01:00
|
|
|
if (typeof n !== 'number' || n > 0)
|
|
|
|
state.emittedReadable = false;
|
|
|
|
|
2013-02-12 00:37:50 +01:00
|
|
|
// if we're doing read(0) to trigger a readable event, but we
|
|
|
|
// already have a bunch of data in the buffer, then just trigger
|
|
|
|
// the 'readable' event and move on.
|
|
|
|
if (n === 0 &&
|
|
|
|
state.needReadable &&
|
|
|
|
state.length >= state.highWaterMark) {
|
|
|
|
emitReadable(this);
|
|
|
|
return null;
|
|
|
|
}
|
|
|
|
|
2012-10-07 22:12:21 +02:00
|
|
|
n = howMuchToRead(n, state);
|
2012-10-03 00:44:50 +02:00
|
|
|
|
2012-10-07 22:12:21 +02:00
|
|
|
// if we've ended, and we're now clear, then finish it up.
|
|
|
|
if (n === 0 && state.ended) {
|
2013-01-14 20:25:39 +01:00
|
|
|
if (state.length === 0)
|
|
|
|
endReadable(this);
|
2012-10-07 22:12:21 +02:00
|
|
|
return null;
|
|
|
|
}
|
2012-10-03 00:44:50 +02:00
|
|
|
|
2012-10-07 22:12:21 +02:00
|
|
|
// All the actual chunk generation logic needs to be
|
|
|
|
// *below* the call to _read. The reason is that in certain
|
|
|
|
// synthetic stream cases, such as passthrough streams, _read
|
|
|
|
// may be a completely synchronous operation which may change
|
|
|
|
// the state of the read buffer, providing enough data when
|
|
|
|
// before there was *not* enough.
|
|
|
|
//
|
|
|
|
// So, the steps are:
|
|
|
|
// 1. Figure out what the state of things will be after we do
|
|
|
|
// a read from the buffer.
|
|
|
|
//
|
|
|
|
// 2. If that resulting state will trigger a _read, then call _read.
|
|
|
|
// Note that this may be asynchronous, or synchronous. Yes, it is
|
|
|
|
// deeply ugly to write APIs this way, but that still doesn't mean
|
|
|
|
// that the Readable class should behave improperly, as streams are
|
|
|
|
// designed to be sync/async agnostic.
|
|
|
|
// Take note if the _read call is sync or async (ie, if the read call
|
|
|
|
// has returned yet), so that we know whether or not it's safe to emit
|
|
|
|
// 'readable' etc.
|
|
|
|
//
|
|
|
|
// 3. Actually pull the requested chunks out of the buffer and return.
|
|
|
|
|
|
|
|
// if we need a readable event, then we need to do some reading.
|
|
|
|
var doRead = state.needReadable;
|
2012-11-13 08:31:25 +01:00
|
|
|
|
|
|
|
// if we currently have less than the highWaterMark, then also read some
|
|
|
|
if (state.length - n <= state.highWaterMark)
|
2012-10-07 22:12:21 +02:00
|
|
|
doRead = true;
|
2012-11-13 08:31:25 +01:00
|
|
|
|
2012-10-07 22:12:21 +02:00
|
|
|
// however, if we've ended, then there's no point, and if we're already
|
|
|
|
// reading, then it's unnecessary.
|
|
|
|
if (state.ended || state.reading)
|
|
|
|
doRead = false;
|
|
|
|
|
|
|
|
if (doRead) {
|
2012-10-03 00:44:50 +02:00
|
|
|
state.reading = true;
|
2012-10-31 22:30:30 +01:00
|
|
|
state.sync = true;
|
2012-12-13 07:03:19 +01:00
|
|
|
// if the length is currently zero, then we *need* a readable event.
|
|
|
|
if (state.length === 0)
|
|
|
|
state.needReadable = true;
|
2012-10-03 00:44:50 +02:00
|
|
|
// call internal read method
|
2013-03-06 07:57:15 +01:00
|
|
|
this._read(state.highWaterMark);
|
2012-10-31 22:30:30 +01:00
|
|
|
state.sync = false;
|
2012-10-03 00:44:50 +02:00
|
|
|
}
|
|
|
|
|
2012-10-07 22:12:21 +02:00
|
|
|
// If _read called its callback synchronously, then `reading`
|
|
|
|
// will be false, and we need to re-evaluate how much data we
|
|
|
|
// can return to the user.
|
|
|
|
if (doRead && !state.reading)
|
|
|
|
n = howMuchToRead(nOrig, state);
|
|
|
|
|
|
|
|
var ret;
|
|
|
|
if (n > 0)
|
2013-01-12 05:59:57 +01:00
|
|
|
ret = fromList(n, state);
|
2012-10-07 22:12:21 +02:00
|
|
|
else
|
|
|
|
ret = null;
|
|
|
|
|
2013-02-12 00:45:53 +01:00
|
|
|
if (ret === null) {
|
2012-10-07 22:12:21 +02:00
|
|
|
state.needReadable = true;
|
|
|
|
n = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
state.length -= n;
|
|
|
|
|
2012-12-13 07:03:19 +01:00
|
|
|
// If we have nothing in the buffer, then we want to know
|
|
|
|
// as soon as we *do* get something into the buffer.
|
|
|
|
if (state.length === 0 && !state.ended)
|
|
|
|
state.needReadable = true;
|
|
|
|
|
2013-01-16 01:44:29 +01:00
|
|
|
// If we happened to read() exactly the remaining amount in the
|
|
|
|
// buffer, and the EOF has been seen at this point, then make sure
|
|
|
|
// that we emit 'end' on the very next tick.
|
|
|
|
if (state.ended && !state.endEmitted && state.length === 0)
|
|
|
|
endReadable(this);
|
|
|
|
|
2012-10-03 00:44:50 +02:00
|
|
|
return ret;
|
|
|
|
};
|
|
|
|
|
2013-02-28 01:56:30 +01:00
|
|
|
function chunkInvalid(state, chunk) {
|
|
|
|
var er = null;
|
2013-01-12 05:59:57 +01:00
|
|
|
if (!Buffer.isBuffer(chunk) &&
|
|
|
|
'string' !== typeof chunk &&
|
|
|
|
chunk !== null &&
|
2013-02-23 01:47:27 +01:00
|
|
|
chunk !== undefined &&
|
|
|
|
!state.objectMode &&
|
|
|
|
!er) {
|
|
|
|
er = new TypeError('Invalid non-string/buffer chunk');
|
2013-01-12 05:59:57 +01:00
|
|
|
}
|
2013-02-28 01:56:30 +01:00
|
|
|
return er;
|
|
|
|
}
|
2013-01-12 05:59:57 +01:00
|
|
|
|
2012-10-31 22:30:30 +01:00
|
|
|
|
stream: There is no _read cb, there is only push
This makes it so that `stream.push(chunk)` is the only way to signal the
end of reading, removing the confusing disparity between the
callback-style _read method, and the fact that most real-world streams
do not have a 1:1 corollation between the "please give me data" event,
and the actual arrival of a chunk of data.
It is still possible, of course, to implement a `CallbackReadable` on
top of this. Simply provide a method like this as the callback:
function readCallback(er, chunk) {
if (er)
stream.emit('error', er);
else
stream.push(chunk);
}
However, *only* fs streams actually would behave in this way, so it
makes not a lot of sense to make TCP, TLS, HTTP, and all the rest have
to bend into this uncomfortable paradigm.
2013-03-01 00:32:32 +01:00
|
|
|
function onEofChunk(stream, state) {
|
2013-02-28 01:56:30 +01:00
|
|
|
state.ended = true;
|
|
|
|
if (state.decoder) {
|
|
|
|
var chunk = state.decoder.end();
|
|
|
|
if (chunk && chunk.length) {
|
|
|
|
state.buffer.push(chunk);
|
|
|
|
state.length += state.objectMode ? 1 : chunk.length;
|
|
|
|
}
|
2012-10-31 22:30:30 +01:00
|
|
|
}
|
|
|
|
|
2013-02-28 01:56:30 +01:00
|
|
|
// if we've ended and we have some data left, then emit
|
|
|
|
// 'readable' now to make sure it gets picked up.
|
|
|
|
if (state.length > 0)
|
stream: Return false from push() more properly
There are cases where a push() call would return true, even though
the thing being pushed was in fact way way larger than the high
water mark, simply because the 'needReadable' was already set, and
would not get unset until nextTick.
In some cases, this could lead to an infinite loop of pushing data
into the buffer, never getting to the 'readable' event which would
unset the needReadable flag.
Fix by splitting up the emitReadable function, so that it always
sets the flag on this tick, even if it defers until nextTick to
actually emit the event.
Also, if we're not ending or already in the process of reading, it
now calls read(0) if we're below the high water mark. Thus, the
highWaterMark value is the intended amount to buffer up to, and it
is smarter about hitting the target.
2013-02-21 23:30:36 +01:00
|
|
|
emitReadable(stream);
|
2013-02-22 20:24:05 +01:00
|
|
|
else
|
2013-02-28 01:56:30 +01:00
|
|
|
endReadable(stream);
|
2012-10-31 22:30:30 +01:00
|
|
|
}
|
|
|
|
|
stream: Return false from push() more properly
There are cases where a push() call would return true, even though
the thing being pushed was in fact way way larger than the high
water mark, simply because the 'needReadable' was already set, and
would not get unset until nextTick.
In some cases, this could lead to an infinite loop of pushing data
into the buffer, never getting to the 'readable' event which would
unset the needReadable flag.
Fix by splitting up the emitReadable function, so that it always
sets the flag on this tick, even if it defers until nextTick to
actually emit the event.
Also, if we're not ending or already in the process of reading, it
now calls read(0) if we're below the high water mark. Thus, the
highWaterMark value is the intended amount to buffer up to, and it
is smarter about hitting the target.
2013-02-21 23:30:36 +01:00
|
|
|
// Don't emit readable right away in sync mode, because this can trigger
|
|
|
|
// another read() call => stack overflow. This way, it might trigger
|
|
|
|
// a nextTick recursion warning, but that's not so bad.
|
2013-01-24 02:52:45 +01:00
|
|
|
function emitReadable(stream) {
|
|
|
|
var state = stream._readableState;
|
|
|
|
state.needReadable = false;
|
|
|
|
if (state.emittedReadable)
|
|
|
|
return;
|
|
|
|
|
|
|
|
state.emittedReadable = true;
|
stream: Return false from push() more properly
There are cases where a push() call would return true, even though
the thing being pushed was in fact way way larger than the high
water mark, simply because the 'needReadable' was already set, and
would not get unset until nextTick.
In some cases, this could lead to an infinite loop of pushing data
into the buffer, never getting to the 'readable' event which would
unset the needReadable flag.
Fix by splitting up the emitReadable function, so that it always
sets the flag on this tick, even if it defers until nextTick to
actually emit the event.
Also, if we're not ending or already in the process of reading, it
now calls read(0) if we're below the high water mark. Thus, the
highWaterMark value is the intended amount to buffer up to, and it
is smarter about hitting the target.
2013-02-21 23:30:36 +01:00
|
|
|
if (state.sync)
|
|
|
|
process.nextTick(function() {
|
|
|
|
emitReadable_(stream);
|
|
|
|
});
|
|
|
|
else
|
|
|
|
emitReadable_(stream);
|
|
|
|
}
|
|
|
|
|
|
|
|
function emitReadable_(stream) {
|
|
|
|
var state = stream._readableState;
|
2013-01-24 02:52:45 +01:00
|
|
|
stream.emit('readable');
|
2013-02-22 20:24:05 +01:00
|
|
|
}
|
stream: Return false from push() more properly
There are cases where a push() call would return true, even though
the thing being pushed was in fact way way larger than the high
water mark, simply because the 'needReadable' was already set, and
would not get unset until nextTick.
In some cases, this could lead to an infinite loop of pushing data
into the buffer, never getting to the 'readable' event which would
unset the needReadable flag.
Fix by splitting up the emitReadable function, so that it always
sets the flag on this tick, even if it defers until nextTick to
actually emit the event.
Also, if we're not ending or already in the process of reading, it
now calls read(0) if we're below the high water mark. Thus, the
highWaterMark value is the intended amount to buffer up to, and it
is smarter about hitting the target.
2013-02-21 23:30:36 +01:00
|
|
|
|
2013-02-28 01:56:30 +01:00
|
|
|
|
|
|
|
// at this point, the user has presumably seen the 'readable' event,
|
|
|
|
// and called read() to consume some data. that may have triggered
|
stream: There is no _read cb, there is only push
This makes it so that `stream.push(chunk)` is the only way to signal the
end of reading, removing the confusing disparity between the
callback-style _read method, and the fact that most real-world streams
do not have a 1:1 corollation between the "please give me data" event,
and the actual arrival of a chunk of data.
It is still possible, of course, to implement a `CallbackReadable` on
top of this. Simply provide a method like this as the callback:
function readCallback(er, chunk) {
if (er)
stream.emit('error', er);
else
stream.push(chunk);
}
However, *only* fs streams actually would behave in this way, so it
makes not a lot of sense to make TCP, TLS, HTTP, and all the rest have
to bend into this uncomfortable paradigm.
2013-03-01 00:32:32 +01:00
|
|
|
// in turn another _read(n) call, in which case reading = true if
|
2013-02-28 01:56:30 +01:00
|
|
|
// it's in progress.
|
|
|
|
// However, if we're not ended, or reading, and the length < hwm,
|
2013-03-08 09:26:53 +01:00
|
|
|
// then go ahead and try to read some more preemptively.
|
2013-02-22 20:24:05 +01:00
|
|
|
function maybeReadMore(stream, state) {
|
2013-03-08 09:26:53 +01:00
|
|
|
if (!state.readingMore) {
|
|
|
|
state.readingMore = true;
|
2013-02-28 01:56:30 +01:00
|
|
|
process.nextTick(function() {
|
2013-03-08 09:26:53 +01:00
|
|
|
state.readingMore = false;
|
2013-02-28 01:56:30 +01:00
|
|
|
maybeReadMore_(stream, state);
|
|
|
|
});
|
2013-03-08 09:26:53 +01:00
|
|
|
}
|
2013-02-28 01:56:30 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
function maybeReadMore_(stream, state) {
|
2013-03-08 09:26:53 +01:00
|
|
|
if (!state.reading && !state.flowing && !state.ended &&
|
stream: Return false from push() more properly
There are cases where a push() call would return true, even though
the thing being pushed was in fact way way larger than the high
water mark, simply because the 'needReadable' was already set, and
would not get unset until nextTick.
In some cases, this could lead to an infinite loop of pushing data
into the buffer, never getting to the 'readable' event which would
unset the needReadable flag.
Fix by splitting up the emitReadable function, so that it always
sets the flag on this tick, even if it defers until nextTick to
actually emit the event.
Also, if we're not ending or already in the process of reading, it
now calls read(0) if we're below the high water mark. Thus, the
highWaterMark value is the intended amount to buffer up to, and it
is smarter about hitting the target.
2013-02-21 23:30:36 +01:00
|
|
|
state.length < state.highWaterMark) {
|
|
|
|
stream.read(0);
|
|
|
|
}
|
2013-01-24 02:52:45 +01:00
|
|
|
}
|
|
|
|
|
2012-10-03 00:44:50 +02:00
|
|
|
// abstract method. to be overridden in specific implementation classes.
|
|
|
|
// call cb(er, data) where data is <= n in length.
|
|
|
|
// for virtual (non-string, non-buffer) streams, "length" is somewhat
|
|
|
|
// arbitrary, and perhaps not very meaningful.
|
stream: There is no _read cb, there is only push
This makes it so that `stream.push(chunk)` is the only way to signal the
end of reading, removing the confusing disparity between the
callback-style _read method, and the fact that most real-world streams
do not have a 1:1 corollation between the "please give me data" event,
and the actual arrival of a chunk of data.
It is still possible, of course, to implement a `CallbackReadable` on
top of this. Simply provide a method like this as the callback:
function readCallback(er, chunk) {
if (er)
stream.emit('error', er);
else
stream.push(chunk);
}
However, *only* fs streams actually would behave in this way, so it
makes not a lot of sense to make TCP, TLS, HTTP, and all the rest have
to bend into this uncomfortable paradigm.
2013-03-01 00:32:32 +01:00
|
|
|
Readable.prototype._read = function(n) {
|
|
|
|
this.emit('error', new Error('not implemented'));
|
2012-10-03 00:44:50 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
Readable.prototype.pipe = function(dest, pipeOpts) {
|
|
|
|
var src = this;
|
|
|
|
var state = this._readableState;
|
2012-11-17 04:27:41 +01:00
|
|
|
|
|
|
|
switch (state.pipesCount) {
|
|
|
|
case 0:
|
|
|
|
state.pipes = dest;
|
|
|
|
break;
|
|
|
|
case 1:
|
2012-12-06 19:21:22 +01:00
|
|
|
state.pipes = [state.pipes, dest];
|
2012-11-17 04:27:41 +01:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
state.pipes.push(dest);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
state.pipesCount += 1;
|
2012-10-03 00:44:50 +02:00
|
|
|
|
|
|
|
if ((!pipeOpts || pipeOpts.end !== false) &&
|
|
|
|
dest !== process.stdout &&
|
|
|
|
dest !== process.stderr) {
|
|
|
|
src.once('end', onend);
|
2013-01-07 15:10:35 +01:00
|
|
|
} else {
|
|
|
|
src.once('end', cleanup);
|
|
|
|
}
|
|
|
|
|
|
|
|
dest.on('unpipe', onunpipe);
|
|
|
|
function onunpipe(readable) {
|
|
|
|
if (readable !== src) return;
|
|
|
|
cleanup();
|
2012-10-03 00:44:50 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
function onend() {
|
|
|
|
dest.end();
|
|
|
|
}
|
|
|
|
|
2012-11-28 19:46:24 +01:00
|
|
|
// when the dest drains, it reduces the awaitDrain counter
|
|
|
|
// on the source. This would be more elegant with a .once()
|
|
|
|
// handler in flow(), but adding and removing repeatedly is
|
|
|
|
// too slow.
|
|
|
|
var ondrain = pipeOnDrain(src);
|
|
|
|
dest.on('drain', ondrain);
|
2013-01-07 15:10:35 +01:00
|
|
|
|
|
|
|
function cleanup() {
|
|
|
|
// cleanup event handlers once the pipe is broken
|
2013-02-06 15:45:06 +01:00
|
|
|
dest.removeListener('close', onclose);
|
2013-01-07 15:10:35 +01:00
|
|
|
dest.removeListener('finish', onfinish);
|
|
|
|
dest.removeListener('drain', ondrain);
|
|
|
|
dest.removeListener('error', onerror);
|
|
|
|
dest.removeListener('unpipe', onunpipe);
|
|
|
|
src.removeListener('end', onend);
|
|
|
|
src.removeListener('end', cleanup);
|
|
|
|
|
|
|
|
// if the reader is waiting for a drain event from this
|
|
|
|
// specific writer, then it would cause it to never start
|
|
|
|
// flowing again.
|
|
|
|
// So, if this is awaiting a drain, then we just call it now.
|
|
|
|
// If we don't know, then assume that we are waiting for one.
|
|
|
|
if (!dest._writableState || dest._writableState.needDrain)
|
|
|
|
ondrain();
|
|
|
|
}
|
2012-11-28 19:46:24 +01:00
|
|
|
|
2012-11-29 05:45:16 +01:00
|
|
|
// if the dest has an error, then stop piping into it.
|
|
|
|
// however, don't suppress the throwing behavior for this.
|
2012-12-22 16:14:42 +01:00
|
|
|
function onerror(er) {
|
2012-11-29 07:09:28 +01:00
|
|
|
unpipe();
|
2013-02-14 09:48:11 +01:00
|
|
|
if (EE.listenerCount(dest, 'error') === 0)
|
2012-11-29 05:45:16 +01:00
|
|
|
dest.emit('error', er);
|
2012-12-22 16:14:42 +01:00
|
|
|
}
|
|
|
|
dest.once('error', onerror);
|
2012-11-29 05:45:16 +01:00
|
|
|
|
2013-02-06 15:45:06 +01:00
|
|
|
// Both close and finish should trigger unpipe, but only once.
|
|
|
|
function onclose() {
|
|
|
|
dest.removeListener('finish', onfinish);
|
|
|
|
unpipe();
|
|
|
|
}
|
|
|
|
dest.once('close', onclose);
|
2012-12-22 16:14:42 +01:00
|
|
|
function onfinish() {
|
2013-02-06 15:45:06 +01:00
|
|
|
dest.removeListener('close', onclose);
|
|
|
|
unpipe();
|
2012-12-22 16:14:42 +01:00
|
|
|
}
|
|
|
|
dest.once('finish', onfinish);
|
2012-11-29 07:09:28 +01:00
|
|
|
|
|
|
|
function unpipe() {
|
|
|
|
src.unpipe(dest);
|
|
|
|
}
|
|
|
|
|
2012-11-28 19:46:24 +01:00
|
|
|
// tell the dest that it's being piped to
|
2012-10-03 00:44:50 +02:00
|
|
|
dest.emit('pipe', src);
|
|
|
|
|
2012-11-28 19:46:24 +01:00
|
|
|
// start the flow if it hasn't been started already.
|
2012-10-04 02:43:27 +02:00
|
|
|
if (!state.flowing) {
|
2012-11-28 03:20:16 +01:00
|
|
|
// the handler that waits for readable events after all
|
|
|
|
// the data gets sucked out in flow.
|
|
|
|
// This would be easier to follow with a .once() handler
|
|
|
|
// in flow(), but that is too slow.
|
|
|
|
this.on('readable', pipeOnReadable);
|
|
|
|
|
2012-10-04 02:43:27 +02:00
|
|
|
state.flowing = true;
|
2012-11-17 05:24:14 +01:00
|
|
|
process.nextTick(function() {
|
2012-11-28 03:20:16 +01:00
|
|
|
flow(src);
|
2012-11-17 05:24:14 +01:00
|
|
|
});
|
2012-10-04 02:43:27 +02:00
|
|
|
}
|
2012-10-03 00:44:50 +02:00
|
|
|
|
|
|
|
return dest;
|
|
|
|
};
|
|
|
|
|
2012-11-28 10:25:39 +01:00
|
|
|
function pipeOnDrain(src) {
|
|
|
|
return function() {
|
|
|
|
var dest = this;
|
|
|
|
var state = src._readableState;
|
2012-12-06 19:21:22 +01:00
|
|
|
state.awaitDrain--;
|
2012-11-28 10:25:39 +01:00
|
|
|
if (state.awaitDrain === 0)
|
|
|
|
flow(src);
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2012-11-28 03:20:16 +01:00
|
|
|
function flow(src) {
|
2012-10-03 00:44:50 +02:00
|
|
|
var state = src._readableState;
|
|
|
|
var chunk;
|
2012-11-28 10:25:39 +01:00
|
|
|
state.awaitDrain = 0;
|
2012-10-03 00:44:50 +02:00
|
|
|
|
2012-11-17 04:27:41 +01:00
|
|
|
function write(dest, i, list) {
|
|
|
|
var written = dest.write(chunk);
|
|
|
|
if (false === written) {
|
2012-11-28 10:25:39 +01:00
|
|
|
state.awaitDrain++;
|
2012-11-17 04:27:41 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-03-06 07:16:46 +01:00
|
|
|
while (state.pipesCount && null !== (chunk = src.read())) {
|
2012-11-17 04:27:41 +01:00
|
|
|
|
|
|
|
if (state.pipesCount === 1)
|
|
|
|
write(state.pipes, 0, null);
|
|
|
|
else
|
|
|
|
state.pipes.forEach(write);
|
|
|
|
|
2012-10-03 00:44:50 +02:00
|
|
|
src.emit('data', chunk);
|
|
|
|
|
|
|
|
// if anyone needs a drain, then we have to wait for that.
|
2012-11-28 10:25:39 +01:00
|
|
|
if (state.awaitDrain > 0)
|
2012-10-03 00:44:50 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// if every destination was unpiped, either before entering this
|
|
|
|
// function, or in the while loop, then stop flowing.
|
|
|
|
//
|
|
|
|
// NB: This is a pretty rare edge case.
|
2012-11-17 04:27:41 +01:00
|
|
|
if (state.pipesCount === 0) {
|
2012-10-03 00:44:50 +02:00
|
|
|
state.flowing = false;
|
|
|
|
|
|
|
|
// if there were data event listeners added, then switch to old mode.
|
2013-02-14 09:48:11 +01:00
|
|
|
if (EE.listenerCount(src, 'data') > 0)
|
2012-10-12 19:03:03 +02:00
|
|
|
emitDataEvents(src);
|
2012-10-03 00:44:50 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// at this point, no one needed a drain, so we just ran out of data
|
|
|
|
// on the next readable event, start it over again.
|
2012-11-28 03:20:16 +01:00
|
|
|
state.ranOut = true;
|
2012-10-03 00:44:50 +02:00
|
|
|
}
|
|
|
|
|
2012-11-28 03:20:16 +01:00
|
|
|
function pipeOnReadable() {
|
|
|
|
if (this._readableState.ranOut) {
|
|
|
|
this._readableState.ranOut = false;
|
|
|
|
flow(this);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2012-10-03 00:44:50 +02:00
|
|
|
Readable.prototype.unpipe = function(dest) {
|
|
|
|
var state = this._readableState;
|
2012-11-17 04:27:41 +01:00
|
|
|
|
|
|
|
// if we're not piping anywhere, then do nothing.
|
|
|
|
if (state.pipesCount === 0)
|
|
|
|
return this;
|
|
|
|
|
|
|
|
// just one destination. most common case.
|
|
|
|
if (state.pipesCount === 1) {
|
|
|
|
// passed in one, but it's not the right one.
|
|
|
|
if (dest && dest !== state.pipes)
|
|
|
|
return this;
|
|
|
|
|
|
|
|
if (!dest)
|
|
|
|
dest = state.pipes;
|
|
|
|
|
|
|
|
// got a match.
|
|
|
|
state.pipes = null;
|
|
|
|
state.pipesCount = 0;
|
2012-11-28 03:20:16 +01:00
|
|
|
this.removeListener('readable', pipeOnReadable);
|
2013-01-12 05:59:57 +01:00
|
|
|
state.flowing = false;
|
2012-11-17 04:27:41 +01:00
|
|
|
if (dest)
|
2012-10-03 00:44:50 +02:00
|
|
|
dest.emit('unpipe', this);
|
2012-11-17 04:27:41 +01:00
|
|
|
return this;
|
2012-10-03 00:44:50 +02:00
|
|
|
}
|
2012-11-17 04:27:41 +01:00
|
|
|
|
|
|
|
// slow case. multiple pipe destinations.
|
|
|
|
|
|
|
|
if (!dest) {
|
|
|
|
// remove all.
|
|
|
|
var dests = state.pipes;
|
|
|
|
var len = state.pipesCount;
|
|
|
|
state.pipes = null;
|
|
|
|
state.pipesCount = 0;
|
2012-11-28 03:20:16 +01:00
|
|
|
this.removeListener('readable', pipeOnReadable);
|
2013-01-12 05:59:57 +01:00
|
|
|
state.flowing = false;
|
2012-11-17 04:27:41 +01:00
|
|
|
|
|
|
|
for (var i = 0; i < len; i++)
|
|
|
|
dests[i].emit('unpipe', this);
|
|
|
|
return this;
|
|
|
|
}
|
|
|
|
|
|
|
|
// try to find the right one.
|
|
|
|
var i = state.pipes.indexOf(dest);
|
|
|
|
if (i === -1)
|
|
|
|
return this;
|
|
|
|
|
|
|
|
state.pipes.splice(i, 1);
|
|
|
|
state.pipesCount -= 1;
|
|
|
|
if (state.pipesCount === 1)
|
|
|
|
state.pipes = state.pipes[0];
|
|
|
|
|
|
|
|
dest.emit('unpipe', this);
|
|
|
|
|
2012-10-03 00:44:50 +02:00
|
|
|
return this;
|
|
|
|
};
|
|
|
|
|
2013-03-03 01:03:22 +01:00
|
|
|
// set up data events if they are asked for
|
|
|
|
// Ensure readable listeners eventually get something
|
2013-01-09 03:17:06 +01:00
|
|
|
Readable.prototype.on = function(ev, fn) {
|
2013-01-08 18:56:55 +01:00
|
|
|
var res = Stream.prototype.on.call(this, ev, fn);
|
|
|
|
|
2012-10-03 00:44:50 +02:00
|
|
|
if (ev === 'data' && !this._readableState.flowing)
|
|
|
|
emitDataEvents(this);
|
|
|
|
|
2013-03-08 09:26:53 +01:00
|
|
|
if (ev === 'readable' && !this._readableState.reading)
|
2013-03-03 01:03:22 +01:00
|
|
|
this.read(0);
|
|
|
|
|
2013-01-08 18:56:55 +01:00
|
|
|
return res;
|
2012-10-03 00:44:50 +02:00
|
|
|
};
|
|
|
|
Readable.prototype.addListener = Readable.prototype.on;
|
|
|
|
|
|
|
|
// pause() and resume() are remnants of the legacy readable stream API
|
|
|
|
// If the user uses them, then switch into old mode.
|
|
|
|
Readable.prototype.resume = function() {
|
|
|
|
emitDataEvents(this);
|
2012-12-11 00:58:23 +01:00
|
|
|
this.read(0);
|
2012-12-13 07:17:57 +01:00
|
|
|
this.emit('resume');
|
2012-10-03 00:44:50 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
Readable.prototype.pause = function() {
|
2012-12-05 02:34:17 +01:00
|
|
|
emitDataEvents(this, true);
|
2012-12-13 07:17:57 +01:00
|
|
|
this.emit('pause');
|
2012-10-03 00:44:50 +02:00
|
|
|
};
|
|
|
|
|
2012-12-05 02:34:17 +01:00
|
|
|
function emitDataEvents(stream, startPaused) {
|
2012-10-03 00:44:50 +02:00
|
|
|
var state = stream._readableState;
|
|
|
|
|
|
|
|
if (state.flowing) {
|
|
|
|
// https://github.com/isaacs/readable-stream/issues/16
|
|
|
|
throw new Error('Cannot switch to old mode now.');
|
|
|
|
}
|
|
|
|
|
2012-12-05 02:34:17 +01:00
|
|
|
var paused = startPaused || false;
|
2012-10-03 00:44:50 +02:00
|
|
|
var readable = false;
|
|
|
|
|
|
|
|
// convert to an old-style stream.
|
|
|
|
stream.readable = true;
|
|
|
|
stream.pipe = Stream.prototype.pipe;
|
2013-01-09 03:17:06 +01:00
|
|
|
stream.on = stream.addListener = Stream.prototype.on;
|
2012-10-03 00:44:50 +02:00
|
|
|
|
|
|
|
stream.on('readable', function() {
|
|
|
|
readable = true;
|
2012-12-05 03:19:07 +01:00
|
|
|
|
2012-10-03 00:44:50 +02:00
|
|
|
var c;
|
|
|
|
while (!paused && (null !== (c = stream.read())))
|
|
|
|
stream.emit('data', c);
|
|
|
|
|
|
|
|
if (c === null) {
|
|
|
|
readable = false;
|
|
|
|
stream._readableState.needReadable = true;
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
stream.pause = function() {
|
|
|
|
paused = true;
|
2012-12-13 07:17:57 +01:00
|
|
|
this.emit('pause');
|
2012-10-03 00:44:50 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
stream.resume = function() {
|
|
|
|
paused = false;
|
|
|
|
if (readable)
|
2012-12-05 03:19:07 +01:00
|
|
|
process.nextTick(function() {
|
|
|
|
stream.emit('readable');
|
|
|
|
});
|
2012-12-11 00:58:23 +01:00
|
|
|
else
|
|
|
|
this.read(0);
|
2012-12-13 07:17:57 +01:00
|
|
|
this.emit('resume');
|
2012-10-03 00:44:50 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
// now make it start, just in case it hadn't already.
|
2012-12-05 02:20:12 +01:00
|
|
|
stream.emit('readable');
|
2012-10-03 00:44:50 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// wrap an old-style stream as the async data source.
|
|
|
|
// This is *not* part of the readable stream interface.
|
|
|
|
// It is an ugly unfortunate mess of history.
|
|
|
|
Readable.prototype.wrap = function(stream) {
|
|
|
|
var state = this._readableState;
|
|
|
|
var paused = false;
|
|
|
|
|
2012-11-17 05:24:14 +01:00
|
|
|
var self = this;
|
2012-10-03 00:44:50 +02:00
|
|
|
stream.on('end', function() {
|
|
|
|
state.ended = true;
|
2012-10-12 20:45:17 +02:00
|
|
|
if (state.decoder) {
|
|
|
|
var chunk = state.decoder.end();
|
2013-01-08 04:40:08 +01:00
|
|
|
if (chunk && chunk.length)
|
|
|
|
self.push(chunk);
|
2012-10-12 20:45:17 +02:00
|
|
|
}
|
|
|
|
|
2013-01-08 04:40:08 +01:00
|
|
|
self.push(null);
|
2012-11-17 05:24:14 +01:00
|
|
|
});
|
2012-10-03 00:44:50 +02:00
|
|
|
|
|
|
|
stream.on('data', function(chunk) {
|
2012-10-12 20:45:17 +02:00
|
|
|
if (state.decoder)
|
|
|
|
chunk = state.decoder.write(chunk);
|
|
|
|
if (!chunk || !chunk.length)
|
|
|
|
return;
|
|
|
|
|
2013-01-08 04:40:08 +01:00
|
|
|
var ret = self.push(chunk);
|
|
|
|
if (!ret) {
|
2012-10-03 00:44:50 +02:00
|
|
|
paused = true;
|
|
|
|
stream.pause();
|
|
|
|
}
|
2012-11-17 05:24:14 +01:00
|
|
|
});
|
2012-10-03 00:44:50 +02:00
|
|
|
|
|
|
|
// proxy all the other methods.
|
|
|
|
// important when wrapping filters and duplexes.
|
|
|
|
for (var i in stream) {
|
|
|
|
if (typeof stream[i] === 'function' &&
|
|
|
|
typeof this[i] === 'undefined') {
|
|
|
|
this[i] = function(method) { return function() {
|
|
|
|
return stream[method].apply(stream, arguments);
|
|
|
|
}}(i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// proxy certain important events.
|
|
|
|
var events = ['error', 'close', 'destroy', 'pause', 'resume'];
|
|
|
|
events.forEach(function(ev) {
|
2012-11-17 05:24:14 +01:00
|
|
|
stream.on(ev, self.emit.bind(self, ev));
|
|
|
|
});
|
2012-10-03 00:44:50 +02:00
|
|
|
|
2013-01-08 04:40:08 +01:00
|
|
|
// when we try to consume some more bytes, simply unpause the
|
|
|
|
// underlying stream.
|
stream: There is no _read cb, there is only push
This makes it so that `stream.push(chunk)` is the only way to signal the
end of reading, removing the confusing disparity between the
callback-style _read method, and the fact that most real-world streams
do not have a 1:1 corollation between the "please give me data" event,
and the actual arrival of a chunk of data.
It is still possible, of course, to implement a `CallbackReadable` on
top of this. Simply provide a method like this as the callback:
function readCallback(er, chunk) {
if (er)
stream.emit('error', er);
else
stream.push(chunk);
}
However, *only* fs streams actually would behave in this way, so it
makes not a lot of sense to make TCP, TLS, HTTP, and all the rest have
to bend into this uncomfortable paradigm.
2013-03-01 00:32:32 +01:00
|
|
|
self._read = function(n) {
|
2013-01-08 04:40:08 +01:00
|
|
|
if (paused) {
|
2012-10-03 00:44:50 +02:00
|
|
|
stream.resume();
|
|
|
|
paused = false;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// exposed for testing purposes only.
|
|
|
|
Readable._fromList = fromList;
|
|
|
|
|
|
|
|
// Pluck off n bytes from an array of buffers.
|
|
|
|
// Length is the combined lengths of all the buffers in the list.
|
2013-01-12 05:59:57 +01:00
|
|
|
function fromList(n, state) {
|
|
|
|
var list = state.buffer;
|
|
|
|
var length = state.length;
|
|
|
|
var stringMode = !!state.decoder;
|
|
|
|
var objectMode = !!state.objectMode;
|
2012-10-03 00:44:50 +02:00
|
|
|
var ret;
|
|
|
|
|
|
|
|
// nothing in the list, definitely empty.
|
2013-01-12 05:59:57 +01:00
|
|
|
if (list.length === 0)
|
2012-10-03 00:44:50 +02:00
|
|
|
return null;
|
|
|
|
|
2012-10-04 01:52:14 +02:00
|
|
|
if (length === 0)
|
2012-10-03 00:44:50 +02:00
|
|
|
ret = null;
|
2013-01-12 05:59:57 +01:00
|
|
|
else if (objectMode)
|
|
|
|
ret = list.shift();
|
2012-10-04 01:52:14 +02:00
|
|
|
else if (!n || n >= length) {
|
2012-10-03 00:44:50 +02:00
|
|
|
// read it all, truncate the array.
|
2012-10-04 01:52:14 +02:00
|
|
|
if (stringMode)
|
|
|
|
ret = list.join('');
|
|
|
|
else
|
|
|
|
ret = Buffer.concat(list, length);
|
2012-10-03 00:44:50 +02:00
|
|
|
list.length = 0;
|
|
|
|
} else {
|
|
|
|
// read just some of it.
|
|
|
|
if (n < list[0].length) {
|
|
|
|
// just take a part of the first list item.
|
2012-10-04 01:52:14 +02:00
|
|
|
// slice is the same for buffers and strings.
|
2012-10-03 00:44:50 +02:00
|
|
|
var buf = list[0];
|
|
|
|
ret = buf.slice(0, n);
|
|
|
|
list[0] = buf.slice(n);
|
|
|
|
} else if (n === list[0].length) {
|
|
|
|
// first list is a perfect match
|
|
|
|
ret = list.shift();
|
|
|
|
} else {
|
|
|
|
// complex case.
|
|
|
|
// we have enough to cover it, but it spans past the first buffer.
|
2012-10-04 01:52:14 +02:00
|
|
|
if (stringMode)
|
|
|
|
ret = '';
|
|
|
|
else
|
|
|
|
ret = new Buffer(n);
|
|
|
|
|
2012-10-03 00:44:50 +02:00
|
|
|
var c = 0;
|
|
|
|
for (var i = 0, l = list.length; i < l && c < n; i++) {
|
|
|
|
var buf = list[0];
|
|
|
|
var cpy = Math.min(n - c, buf.length);
|
2012-10-04 01:52:14 +02:00
|
|
|
|
|
|
|
if (stringMode)
|
|
|
|
ret += buf.slice(0, cpy);
|
|
|
|
else
|
|
|
|
buf.copy(ret, c, 0, cpy);
|
|
|
|
|
|
|
|
if (cpy < buf.length)
|
2012-10-03 00:44:50 +02:00
|
|
|
list[0] = buf.slice(cpy);
|
2012-10-04 01:52:14 +02:00
|
|
|
else
|
2012-10-03 00:44:50 +02:00
|
|
|
list.shift();
|
2012-10-04 01:52:14 +02:00
|
|
|
|
2012-10-03 00:44:50 +02:00
|
|
|
c += cpy;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2012-10-04 01:52:14 +02:00
|
|
|
|
|
|
|
function endReadable(stream) {
|
|
|
|
var state = stream._readableState;
|
2013-01-14 20:25:39 +01:00
|
|
|
|
|
|
|
// If we get here before consuming all the bytes, then that is a
|
|
|
|
// bug in node. Should never happen.
|
|
|
|
if (state.length > 0)
|
|
|
|
throw new Error('endReadable called on non-empty stream');
|
|
|
|
|
2012-10-04 01:52:14 +02:00
|
|
|
if (state.endEmitted)
|
|
|
|
return;
|
|
|
|
state.ended = true;
|
|
|
|
state.endEmitted = true;
|
2012-11-17 05:24:14 +01:00
|
|
|
process.nextTick(function() {
|
2012-12-05 20:27:46 +01:00
|
|
|
stream.readable = false;
|
2012-11-17 05:24:14 +01:00
|
|
|
stream.emit('end');
|
|
|
|
});
|
2012-10-04 01:52:14 +02:00
|
|
|
}
|