0
0
mirror of https://github.com/mongodb/mongo.git synced 2024-11-30 17:10:48 +01:00

SERVER-33068 Fix run_check_repl_dbhash.js hook to actually run dbhash.

Consolidates the logic in the run_check_repl_dbhash.js and
run_validate_collections.js for discovering all of the mongod processes
in a MongoDB deployment into a new discover_topology.js library.

Also adds a test that relies on mongod logging to verify that the
run_check_repl_dbhash.js and run_validate_collections.js hooks execute
on all of the expected servers.
This commit is contained in:
Max Hirschhorn 2018-02-02 21:54:43 -05:00
parent 331d67a0d8
commit 64bed81733
4 changed files with 499 additions and 244 deletions

View File

@ -3,197 +3,200 @@
'use strict';
(function() {
load('jstests/libs/parallelTester.js');
load('jstests/libs/discover_topology.js'); // For Topology and DiscoverTopology.
load('jstests/libs/parallelTester.js'); // For ScopedThread.
function isMasterSlave(uri) {
const mongo = new Mongo(uri);
jsTest.authenticate(mongo);
const cmdLineOpts = mongo.getDB('admin').adminCommand('getCmdLineOpts');
assert.commandWorked(cmdLineOpts);
// A thin wrapper around master/slave nodes that provides methods necessary for checking data
// consistency between the master and slave nodes.
//
// DEPRECATED: This wrapper is only intended to be used for the master-slave deployment started
// by resmoke.py as part of the master_slave_jscore_passthrough.yml test suite and it shouldn't
// be used for any other master/slave tests.
//
// TODO SERVER-32143: Remove this wrapper.
function MasterSlaveDBHashTest(primaryHost) {
const master = new Mongo(primaryHost);
const masterPort = master.host.split(':')[1];
const slave = new Mongo('localhost:' + String(parseInt(masterPort) + 1));
this.nodeList = function nodeList() {
return [master.host, slave.host];
};
this.getHashes = function getHashes(db) {
const combinedRes = {};
let res = master.getDB(db).runCommand('dbhash');
assert.commandWorked(res);
combinedRes.master = res;
res = slave.getDB(db).runCommand('dbhash');
assert.commandWorked(res);
combinedRes.slaves = [res];
return combinedRes;
};
this.getPrimary = function getPrimary() {
slave.setSlaveOk();
this.liveNodes = {master: master, slaves: [slave]};
return master;
};
this.getSecondaries = function getSecondaries() {
return [slave];
};
this.awaitReplication = function awaitReplication() {
assert.commandWorked(master.adminCommand({fsyncUnlock: 1}),
'failed to unlock the primary');
print('Starting fsync on master to flush all pending writes');
assert.commandWorked(master.adminCommand({fsync: 1}));
print('fsync on master completed');
const kTimeout = 5 * 60 * 1000; // 5 minute timeout
const dbNames = master.getDBNames();
for (let dbName of dbNames) {
if (dbName === 'local') {
continue;
}
print('Awaiting replication of inserts into ' + dbName);
assert.writeOK(master.getDB(dbName).await_repl.insert(
{awaiting: 'repl'}, {writeConcern: {w: 2, wtimeout: kTimeout}}),
'Awaiting replication failed');
}
print('Finished awaiting replication');
assert.commandWorked(master.adminCommand({fsync: 1, lock: 1}),
'failed to re-lock the primary');
};
this.checkReplicatedDataHashes = function checkReplicatedDataHashes() {
const msgPrefix = 'checkReplicatedDataHashes for master-slave deployment';
const excludedDBs = jsTest.options().excludedDBsFromDBHash || [];
// Since UUIDs aren't explicitly replicated in master-slave deployments, we ignore the
// UUID in the output of the 'listCollections' command to avoid reporting a known data
// inconsistency issue from checkReplicatedDataHashes().
const ignoreUUIDs = true;
new ReplSetTest({
nodes: 0
}).checkReplicatedDataHashes.call(this, msgPrefix, excludedDBs, ignoreUUIDs);
};
this.checkReplicaSet = function checkReplicaSet() {
new ReplSetTest({nodes: 0}).checkReplicaSet.apply(this, arguments);
};
this.dumpOplog = function dumpOplog() {
print('Not dumping oplog for master-slave deployment');
};
}
function isMasterSlaveDeployment(conn) {
const cmdLineOpts = assert.commandWorked(conn.adminCommand({getCmdLineOpts: 1}));
return cmdLineOpts.parsed.master === true;
}
function isMultiNodeReplSet(uri) {
const mongo = new Mongo(uri);
let hosts = [];
const isMaster = mongo.adminCommand({isMaster: 1});
if (isMaster.hasOwnProperty('setName')) {
let hosts = isMaster.hosts;
if (isMaster.hasOwnProperty('passives')) {
hosts = hosts.concat(isMaster.passives);
}
}
return hosts.length > 1;
}
// Adds the uri and description (replset or master-slave) if server needs dbhash check.
function checkAndAddServerDesc(uri, out) {
// No need to check the dbhash of single node replsets.
if (isMultiNodeReplSet(uri)) {
out.push({type: 'replset', uri: uri});
} else if (isMasterSlave(uri)) {
out.push({type: 'master-slave', uri: uri});
function checkReplicatedDataHashesThread(hosts, testData) {
try {
TestData = testData;
new ReplSetTest(hosts[0]).checkReplicatedDataHashes();
return {ok: 1};
} catch (e) {
return {ok: 0, hosts: hosts, error: e.toString(), stack: e.stack};
}
}
function checkReplDataHashThread(serverDesc, testData, excludedDBs) {
// A thin wrapper around master/slave nodes that provides the getHashes(), getPrimary(),
// awaitReplication(), and nodeList() methods.
// DEPRECATED: this wrapper only supports nodes started through resmoke's masterslave.py
// fixture. Please do not use it with other master/slave clusters.
function MasterSlaveDBHashTest(primaryHost) {
const master = new Mongo(primaryHost);
const masterPort = master.host.split(':')[1];
const slave = new Mongo('localhost:' + String(parseInt(masterPort) + 1));
this.nodeList = function() {
return [master.host, slave.host];
};
this.getHashes = function(db) {
const combinedRes = {};
let res = master.getDB(db).runCommand('dbhash');
assert.commandWorked(res);
combinedRes.master = res;
res = slave.getDB(db).runCommand('dbhash');
assert.commandWorked(res);
combinedRes.slaves = [res];
return combinedRes;
};
this.getPrimary = function() {
slave.setSlaveOk();
this.liveNodes = {master: master, slaves: [slave]};
return master;
};
this.getSecondaries = function() {
return [slave];
};
this.awaitReplication = function() {
assert.commandWorked(master.adminCommand({fsyncUnlock: 1}),
'failed to unlock the primary');
print('Starting fsync on master to flush all pending writes');
assert.commandWorked(master.adminCommand({fsync: 1}));
print('fsync on master completed');
const kTimeout = 60 * 1000 * 5; // 5min timeout
const dbNames = master.getDBNames();
print('Awaiting replication of inserts into ' + dbNames);
for (let dbName of dbNames) {
if (dbName === 'local')
continue;
assert.writeOK(
master.getDB(dbName).await_repl.insert(
{awaiting: 'repl'}, {writeConcern: {w: 2, wtimeout: kTimeout}}),
'Awaiting replication failed');
}
print('Finished awaiting replication');
assert.commandWorked(master.adminCommand({fsync: 1, lock: 1}),
'failed to re-lock the primary');
};
this.checkReplicatedDataHashes = function() {
ReplSetTest({nodes: 0}).checkReplicatedDataHashes.apply(this, ['test', [], true]);
};
this.checkReplicaSet = function() {
ReplSetTest({nodes: 0}).checkReplicaSet.apply(this, arguments);
};
this.dumpOplog = function() {
print('master-slave cannot dump oplog');
};
}
TestData = testData;
// Since UUIDs aren't explicitly replicated in master-slave deployments, we ignore the UUID
// in the output of the 'listCollections' command to avoid reporting a known data
// inconsistency issue from checkReplicatedDataHashes().
const ignoreUUIDs = serverDesc.type === 'master-slave';
let fixture = null;
if (serverDesc.type === 'replset') {
fixture = new ReplSetTest(serverDesc.uri);
} else if (serverDesc.type === 'master-slave') {
fixture = new MasterSlaveDBHashTest(serverDesc.uri);
} else {
throw 'unrecognized server type ' + serverDesc.type;
}
fixture.checkReplicatedDataHashes(undefined, excludedDBs, ignoreUUIDs);
}
let startTime = Date.now();
const startTime = Date.now();
assert.neq(typeof db, 'undefined', 'No `db` object, is the shell connected to a mongod?');
// stores each server type (master/slave or replset) and uri.
const serversNeedingReplDataHashCheck = [];
const primaryInfo = db.isMaster();
const isMongos = primaryInfo.msg === 'isdbgrid';
const isReplSet = primaryInfo.hasOwnProperty('setName');
const uri = db.getMongo().host;
let skipped = false;
try {
const conn = db.getMongo();
assert(primaryInfo.ismaster,
'shell is not connected to the primary or master node: ' + tojson(primaryInfo));
assert(isMongos || isReplSet || isMasterSlave(uri),
'not replset, master/slave, or sharded cluster');
if (isMongos) {
// Add shards and config server if they are replica sets.
let res = db.adminCommand('getShardMap');
assert.commandWorked(res);
const csURI = res.map.config;
res = db.adminCommand('listShards');
assert.commandWorked(res);
const shardURIs = res.shards.map((shard) => shard.host);
checkAndAddServerDesc(csURI, serversNeedingReplDataHashCheck);
shardURIs.forEach((shardURI) => {
checkAndAddServerDesc(shardURI, serversNeedingReplDataHashCheck);
});
} else {
checkAndAddServerDesc(uri, serversNeedingReplDataHashCheck);
}
const threads = [];
const excludedDBs = jsTest.options().excludedDBsFromDBHash || [];
serversNeedingReplDataHashCheck.forEach((serverDesc) => {
const thread = new ScopedThread(checkReplDataHashThread, serverDesc, TestData, excludedDBs);
threads.push({serverDesc: serverDesc, handle: thread});
thread.start();
});
if (serversNeedingReplDataHashCheck.length === 0) {
let skipReason = 'No multi-node replication detected in ';
if (isMongos) {
skipReason += 'sharded cluster';
} else if (isReplSet) {
skipReason += 'replica set';
} else {
skipReason += 'master-slave set';
if (isMasterSlaveDeployment(conn)) {
new MasterSlaveDBHashTest(conn.host).checkReplicatedDataHashes();
return;
}
print('Skipping consistency checks for cluster because ' + skipReason);
return;
}
const topology = DiscoverTopology.findConnectedNodes(conn);
const failedChecks = [];
threads.forEach(thread => {
thread.handle.join();
if (thread.handle.hasFailed()) {
failedChecks.push(thread.serverDesc.uri + ' (' + thread.serverDesc.type + ')');
if (topology.type === Topology.kStandalone) {
print('Skipping data consistency checks for cluster because we are connected to a' +
' stand-alone mongod: ' + tojsononeline(topology));
skipped = true;
return;
}
});
assert.eq(failedChecks.length,
0,
'dbhash check failed for the following hosts: ' + failedChecks.join(','));
if (topology.type === Topology.kReplicaSet) {
if (topology.nodes.length === 1) {
print('Skipping data consistency checks for cluster because we are connected to a' +
' 1-node replica set: ' + tojsononeline(topology));
skipped = true;
return;
}
const totalTime = Date.now() - startTime;
print('Finished consistency checks of cluster in ' + totalTime + ' ms.');
new ReplSetTest(topology.nodes[0]).checkReplicatedDataHashes();
return;
}
if (topology.type !== Topology.kShardedCluster) {
throw new Error('Unrecognized topology format: ' + tojson(topology));
}
const threads = [];
try {
if (topology.configsvr.nodes.length > 1) {
const thread = new ScopedThread(
checkReplicatedDataHashesThread, topology.configsvr.nodes, TestData);
threads.push(thread);
thread.start();
} else {
print('Skipping data consistency checks for 1-node CSRS: ' +
tojsononeline(topology));
}
for (let shardName of Object.keys(topology.shards)) {
const shard = topology.shards[shardName];
if (shard.type === Topology.kStandalone) {
print('Skipping data consistency checks for stand-alone shard: ' +
tojsononeline(topology));
continue;
}
if (shard.type !== Topology.kReplicaSet) {
throw new Error('Unrecognized topology format: ' + tojson(topology));
}
if (shard.nodes.length > 1) {
const thread =
new ScopedThread(checkReplicatedDataHashesThread, shard.nodes, TestData);
threads.push(thread);
thread.start();
} else {
print('Skipping data consistency checks for 1-node replica set shard: ' +
tojsononeline(topology));
}
}
} finally {
// Wait for each thread to finish. Throw an error if any thread fails.
const returnData = threads.map(thread => {
thread.join();
return thread.returnData();
});
returnData.forEach(res => {
assert.commandWorked(res, 'data consistency checks failed');
});
}
} finally {
if (!skipped) {
const totalTime = Date.now() - startTime;
print('Finished data consistency checks for cluster in ' + totalTime + ' ms.');
}
}
})();

View File

@ -3,73 +3,36 @@
'use strict';
(function() {
assert.eq(typeof db, 'object', 'Invalid `db` object, is the shell connected to a mongod?');
function getConnectionStrings(conn) {
// If conn does not point to a repl set, then this function returns [conn].
const res = conn.adminCommand({isMaster: 1});
let hostList = [];
if (res.hasOwnProperty('setName')) {
hostList = res.hosts;
if (res.hasOwnProperty('passives')) {
hostList = hostList.concat(res.passives);
}
return hostList;
} else {
return [conn.host];
}
}
function getConfigConnStr() {
const shardMap = db.adminCommand({getShardMap: 1});
if (!shardMap.hasOwnProperty('map')) {
throw new Error('Expected getShardMap() to return an object a "map" field: ' +
tojson(shardMap));
}
const map = shardMap.map;
if (!map.hasOwnProperty('config')) {
throw new Error('Expected getShardMap().map to have a "config" field: ' + tojson(map));
}
return map.config;
}
function isMongos() {
return db.isMaster().msg === 'isdbgrid';
}
function getHostList() {
let hostList = [];
if (isMongos()) {
// We're connected to a sharded cluster through a mongos.
// 1) Add all the config servers to the server list.
const configConnStr = getConfigConnStr();
const configServerReplSetConn = new Mongo(configConnStr);
hostList = getConnectionStrings(configServerReplSetConn);
// 2) Add shard members to the server list.
const configDB = db.getSiblingDB('config');
const cursor = configDB.shards.find();
while (cursor.hasNext()) {
const shard = cursor.next();
const shardReplSetConn = new Mongo(shard.host);
hostList.push(...getConnectionStrings(shardReplSetConn));
}
} else {
// We're connected to a mongod.
hostList = getConnectionStrings(db.getMongo());
}
return hostList;
}
load('jstests/libs/discover_topology.js'); // For Topology and DiscoverTopology.
load('jstests/hooks/validate_collections.js'); // For CollectionValidator.
new CollectionValidator().validateNodes(getHostList());
assert.eq(typeof db, 'object', 'Invalid `db` object, is the shell connected to a mongod?');
const topology = DiscoverTopology.findConnectedNodes(db.getMongo());
const hostList = [];
if (topology.type === Topology.kStandalone) {
hostList.push(topology.mongod);
} else if (topology.type === Topology.kReplicaSet) {
hostList.push(...topology.nodes);
} else if (topology.type === Topology.kShardedCluster) {
hostList.push(...topology.configsvr.nodes);
for (let shardName of Object.keys(topology.shards)) {
const shard = topology.shards[shardName];
if (shard.type === Topology.kStandalone) {
hostList.push(shard.mongod);
} else if (shard.type === Topology.kReplicaSet) {
hostList.push(...shard.nodes);
} else {
throw new Error('Unrecognized topology format: ' + tojson(topology));
}
}
} else {
throw new Error('Unrecognized topology format: ' + tojson(topology));
}
new CollectionValidator().validateNodes(hostList);
})();

View File

@ -0,0 +1,102 @@
'use strict';
// The tojson() function that is commonly used to build up assertion messages doesn't support the
// Symbol type, so we just use unique string values instead.
var Topology = {
kStandalone: 'stand-alone',
kReplicaSet: 'replica set',
kShardedCluster: 'sharded cluster',
};
var DiscoverTopology = (function() {
const kDefaultConnectFn = (host) => new Mongo(host);
function getDataMemberConnectionStrings(conn) {
const res = conn.adminCommand({isMaster: 1});
if (!res.hasOwnProperty('setName')) {
// 'conn' represents a connection to a stand-alone mongod.
return {type: Topology.kStandalone, mongod: conn.host};
}
// The "passives" field contains the list of unelectable (priority=0) secondaries
// and is omitted from the server's response when there are none.
res.passives = res.passives || [];
return {type: Topology.kReplicaSet, nodes: [...res.hosts, ...res.passives]};
}
function findConnectedNodesViaMongos(conn, options) {
function getConfigServerConnectionString() {
const shardMap = conn.adminCommand({getShardMap: 1});
if (!shardMap.hasOwnProperty('map')) {
throw new Error(
'Expected "getShardMap" command to return an object with a "map" field: ' +
tojson(shardMap));
}
if (!shardMap.map.hasOwnProperty('config')) {
throw new Error(
'Expected "getShardMap" command to return an object with a "map.config"' +
' field: ' + tojson(shardMap));
}
return shardMap.map.config;
}
const connectFn =
options.hasOwnProperty('connectFn') ? options.connectFn : kDefaultConnectFn;
const configsvrConn = connectFn(getConfigServerConnectionString());
const configsvrHosts = getDataMemberConnectionStrings(configsvrConn);
const shards = assert.commandWorked(conn.adminCommand({listShards: 1})).shards;
const shardHosts = {};
for (let shardInfo of shards) {
const shardConn = connectFn(shardInfo.host);
shardHosts[shardInfo._id] = getDataMemberConnectionStrings(shardConn);
}
return {type: Topology.kShardedCluster, configsvr: configsvrHosts, shards: shardHosts};
}
return {
/**
* Returns an object describing the topology of the mongod processes reachable from 'conn'.
* The "connectFn" property can be optionally specified to support custom retry logic when
* making connection attempts without overriding the Mongo constructor itself.
*
* For a stand-alone mongod, an object of the form
* {type: Topology.kStandalone, mongod: <conn-string>}
* is returned.
*
* For a replica set, an object of the form
* {type: Topology.kReplicaSet, nodes: [<conn-string1>, <conn-string2>, ...]}
* is returned.
*
* For a sharded cluster, an object of the form
* {
* type: Topology.kShardedCluster,
* configsvr: {nodes: [...]},
* shards: {
* <shard-name1>: {type: Topology.kStandalone, mongod: ...},
* <shard-name2>: {type: Topology.kReplicaSet, nodes: [...]},
* ...
* }
* }
* is returned, where the description for each shard depends on whether it is a stand-alone
* shard or a replica set shard.
*/
findConnectedNodes: function findConnectedNodes(conn,
options = {connectFn: kDefaultConnectFn}) {
const isMongod = conn.adminCommand({isMaster: 1}).msg !== 'isdbgrid';
if (isMongod) {
return getDataMemberConnectionStrings(conn);
}
return findConnectedNodesViaMongos(conn, options);
},
};
})();

View File

@ -0,0 +1,187 @@
/**
* Verifies that the data consistency checks work against the variety of cluster types we use in our
* testing.
*
* @tags: [requires_replication, requires_sharding]
*/
// The global 'db' variable is used by the data consistency hooks.
var db;
(function() {
"use strict";
// We skip doing the data consistency checks while terminating the cluster because they conflict
// with the counts of the number of times the "dbhash" and "validate" commands are run.
TestData.skipCollectionAndIndexValidation = true;
TestData.skipCheckDBHashes = true;
function makePatternForDBHash(dbName) {
return new RegExp("COMMAND.*command " + dbName +
"\\.\\$cmd appName: \"MongoDB Shell\" command: db[Hh]ash",
"g");
}
function makePatternForValidate(dbName, collName) {
return new RegExp(
"COMMAND.*command " + dbName +
"\\.\\$cmd appName: \"MongoDB Shell\" command: validate { validate: \"" + collName +
"\"",
"g");
}
function countMatches(pattern, output) {
assert(pattern.global, "the 'g' flag must be used to find all matches");
let numMatches = 0;
while (pattern.exec(output) !== null) {
++numMatches;
}
return numMatches;
}
function runDataConsistencyChecks(testCase) {
db = testCase.conn.getDB("test");
try {
clearRawMongoProgramOutput();
load("jstests/hooks/run_check_repl_dbhash.js");
load("jstests/hooks/run_validate_collections.js");
// We terminate the processes to ensure that the next call to rawMongoProgramOutput()
// will return all of their output.
testCase.teardown();
return rawMongoProgramOutput();
} finally {
db = undefined;
}
}
(function testReplicaSetWithVotingSecondaries() {
const numNodes = 2;
const rst = new ReplSetTest({
nodes: numNodes,
nodeOptions: {
setParameter: {logComponentVerbosity: tojson({command: 1})},
}
});
rst.startSet();
rst.initiateWithNodeZeroAsPrimary();
// Insert a document so the "dbhash" and "validate" commands have some actual work to do.
assert.commandWorked(rst.nodes[0].getDB("test").mycoll.insert({}));
const output =
runDataConsistencyChecks({conn: rst.nodes[0], teardown: () => rst.stopSet()});
let pattern = makePatternForDBHash("test");
assert.eq(numNodes,
countMatches(pattern, output),
"expected to find " + tojson(pattern) + " from each node in the log output");
pattern = makePatternForValidate("test", "mycoll");
assert.eq(numNodes,
countMatches(pattern, output),
"expected to find " + tojson(pattern) + " from each node in the log output");
})();
(function testReplicaSetWithNonVotingSecondaries() {
const numNodes = 2;
const rst = new ReplSetTest({
nodes: numNodes,
nodeOptions: {
setParameter: {logComponentVerbosity: tojson({command: 1})},
}
});
rst.startSet();
const replSetConfig = rst.getReplSetConfig();
for (let i = 1; i < numNodes; ++i) {
replSetConfig.members[i].priority = 0;
replSetConfig.members[i].votes = 0;
}
rst.initiate(replSetConfig);
// Insert a document so the "dbhash" and "validate" commands have some actual work to do.
assert.commandWorked(rst.nodes[0].getDB("test").mycoll.insert({}));
const output =
runDataConsistencyChecks({conn: rst.nodes[0], teardown: () => rst.stopSet()});
let pattern = makePatternForDBHash("test");
assert.eq(numNodes,
countMatches(pattern, output),
"expected to find " + tojson(pattern) + " from each node in the log output");
pattern = makePatternForValidate("test", "mycoll");
assert.eq(numNodes,
countMatches(pattern, output),
"expected to find " + tojson(pattern) + " from each node in the log output");
})();
(function testShardedClusterWithOneNodeCSRS() {
const st = new ShardingTest({
mongos: 1,
config: 1,
configOptions: {
setParameter: {logComponentVerbosity: tojson({command: 1})},
},
shards: 0
});
const output = runDataConsistencyChecks({conn: st.s, teardown: () => st.stop()});
let pattern = makePatternForDBHash("config");
assert.eq(0,
countMatches(pattern, output),
"expected not to find " + tojson(pattern) + " in the log output for 1-node CSRS");
pattern = makePatternForValidate("config", "mongos");
assert.eq(1,
countMatches(pattern, output),
"expected to find " + tojson(pattern) + " in the log output for 1-node CSRS");
})();
(function testShardedCluster() {
const st = new ShardingTest({
mongos: 1,
config: 3,
configOptions: {
setParameter: {logComponentVerbosity: tojson({command: 1})},
},
shards: 1,
rs: {nodes: 2},
rsOptions: {
setParameter: {logComponentVerbosity: tojson({command: 1})},
}
});
// Insert a document so the "dbhash" and "validate" commands have some actual work to do on
// the replica set shard.
assert.commandWorked(st.s.getDB("test").mycoll.insert({}));
const output = runDataConsistencyChecks({conn: st.s, teardown: () => st.stop()});
// The "config" database exists on both the CSRS and the replica set shards due to the
// "config.transactions" collection.
let pattern = makePatternForDBHash("config");
assert.eq(5,
countMatches(pattern, output),
"expected to find " + tojson(pattern) +
" from each CSRS node and each replica set shard node in the log output");
pattern = makePatternForValidate("config", "mongos");
assert.eq(3,
countMatches(pattern, output),
"expected to find " + tojson(pattern) + " from each CSRS node in the log output");
pattern = makePatternForDBHash("test");
assert.eq(2,
countMatches(pattern, output),
"expected to find " + tojson(pattern) +
" from each replica set shard node in the log output");
pattern = makePatternForValidate("test", "mycoll");
assert.eq(2,
countMatches(pattern, output),
"expected to find " + tojson(pattern) +
" from each replica set shard node in the log output");
})();
})();