0
0
mirror of https://github.com/nodejs/node.git synced 2024-12-01 16:10:02 +01:00
nodejs/test/sequential/test-net-bytes-per-incoming-chunk-overhead.js
Rich Trott 1ee4703974
test: move net bytes-per-chunk test to sequential
The test is timing out on FreeBSD 10 in CI. It takes less than half as
long to run when it is in sequential on that platform instead of
parallel.

Refs: https://github.com/nodejs/node/pull/21322#issuecomment-399311718

PR-URL: https://github.com/nodejs/node/pull/21457
Reviewed-By: Trivikram Kamat <trivikr.dev@gmail.com>
Reviewed-By: Anna Henningsen <anna@addaleax.net>
Reviewed-By: Colin Ihrig <cjihrig@gmail.com>
Reviewed-By: James M Snell <jasnell@gmail.com>
2018-06-24 15:55:27 +02:00

42 lines
1.1 KiB
JavaScript

// Flags: --expose-gc
'use strict';
const common = require('../common');
const assert = require('assert');
const net = require('net');
// Tests that, when receiving small chunks, we do not keep the full length
// of the original allocation for the libuv read call in memory.
let client;
let baseRSS;
const receivedChunks = [];
const N = 250000;
const server = net.createServer(common.mustCall((socket) => {
baseRSS = process.memoryUsage().rss;
socket.setNoDelay(true);
socket.on('data', (chunk) => {
receivedChunks.push(chunk);
if (receivedChunks.length < N) {
client.write('a');
} else {
client.end();
server.close();
}
});
})).listen(0, common.mustCall(() => {
client = net.connect(server.address().port);
client.setNoDelay(true);
client.write('hello!');
}));
process.on('exit', () => {
global.gc();
const bytesPerChunk =
(process.memoryUsage().rss - baseRSS) / receivedChunks.length;
// We should always have less than one page (usually ~ 4 kB) per chunk.
assert(bytesPerChunk < 512, `measured ${bytesPerChunk} bytes per chunk`);
});