0
0
mirror of https://github.com/mongodb/mongo.git synced 2024-12-01 09:32:32 +01:00

SERVER-30347 Fail startup when running wiredTiger with --nojournal as part of a replica set

This reverts commit 06b9e9ebf9.
This commit is contained in:
Vesselina Ratcheva 2018-01-19 10:10:31 -05:00
parent 4189db4c92
commit 82994d2409
10 changed files with 297 additions and 365 deletions

View File

@ -4710,6 +4710,8 @@ buildvariants:
push_arch: x86_64
compile_flags: -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_gcc.vars
num_jobs_available: $(grep -c ^processor /proc/cpuinfo)
# Running WiredTiger with --nojournal in a replica set is no longer supported,
# so this variant does not include replica set tests.
test_flags: --nojournal --excludeWithAnyTags=requires_journaling,requires_replication
use_scons_cache: true
build_mongoreplay: true

View File

@ -1,7 +1,7 @@
/**
* Tests aggregate command against mongos with slaveOk. For more tests on read preference,
* please refer to jstests/sharding/read_pref_cmd.js.
* @tags: [requires_sharding,requires_replication]
* @tags: [requires_sharding, requires_replication]
*/
(function() {
load('jstests/replsets/rslib.js');

View File

@ -1,6 +1,6 @@
// Test copyDatabase command inside a sharded cluster with and without auth. Tests with auth are
// currently disabled due to SERVER-13080.
// @tags: [requires_sharding,requires_replication]
// @tags: [requires_sharding, requires_replication]
var baseName = "jstests_clone_copyauth_between_shards";

View File

@ -11,7 +11,15 @@ TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
TestData.skipCheckDBHashes = true;
(function() {
'use strict';
"use strict";
// Skip this test if running with the "wiredTiger" storage engine, since it requires
// using 'nojournal' in a replica set, which is not supported when using WT.
if (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger") {
// WT is currently the default engine so it is used when 'storageEngine' is not set.
jsTest.log("Skipping test because it is not applicable for the wiredTiger storage engine");
return;
}
// Options for a cluster with two replica set shards, the first with two nodes the second with
// one
@ -137,5 +145,4 @@ TestData.skipCheckDBHashes = true;
assert.eq(coll.count({_id: 1}), 1);
st.stop();
})();

View File

@ -1,87 +0,0 @@
/**
* This test is only for the WiredTiger storageEngine
* Test nojournal option with wiredTiger replset. This test will fail with mmap because
* unclean shutdowns are not safe. If running without a journal, WT starts up from last
* valid checkpoint, so should recover.
*
* Start a set.
* Insert data into collection foo
* fsync secondary member 1
* Kill -9 secondary member 1
* Add some more data in a new collection.
* Restart member 1.
* Check that it syncs from the last checkpoint and data is there.
*/
// used to parse RAM log file
var contains = function(logLines, func) {
var i = logLines.length;
while (i--) {
printjson(logLines[i]);
if (func(logLines[i])) {
return true;
}
}
return false;
};
// This test can only be run if the storageEngine is wiredTiger
if (jsTest.options().storageEngine && jsTest.options().storageEngine !== "wiredTiger") {
jsTestLog("Skipping test because storageEngine is not wiredTiger");
} else {
var name = "wt_nojournal_repl";
var replTest = new ReplSetTest({
name: name,
nodes: 3,
oplogSize: 2,
nodeOptions: {
nojournal: "",
storageEngine: "wiredTiger",
}
});
var nodes = replTest.startSet();
// make sure node 0 becomes primary initially
var config = replTest.getReplSetConfig();
config.members[1].priority = 0;
config.members[2].priority = 0;
replTest.initiate(config);
var masterDB = replTest.getPrimary().getDB("test");
var secondary1 = replTest.liveNodes.slaves[0];
jsTestLog("add some data to collection foo");
for (var i = 0; i < 100; i++) {
masterDB.foo.insert({x: i});
}
replTest.awaitReplication();
assert.eq(secondary1.getDB("test").foo.count(), 100);
jsTestLog("run fsync on the secondary to ensure it remains after restart");
assert.commandWorked(secondary1.getDB("admin").runCommand({fsync: 1}));
jsTestLog("kill -9 secondary 1");
MongoRunner.stopMongod(secondary1, 9, {allowedExitCode: MongoRunner.EXIT_SIGKILL});
jsTestLog("add some data to a new collection bar");
for (var i = 0; i < 100; i++) {
masterDB.bar.insert({x: i});
}
jsTestLog("restart secondary 1 and let it catch up");
secondary1 = replTest.restart(1);
replTest.awaitReplication();
// Test that the restarted secondary did NOT do an initial sync by checking the log
var res = secondary1.adminCommand({getLog: "global"});
assert(!contains(res.log, function(v) {
return v.indexOf("initial sync") != -1;
}));
jsTestLog("check data is in both collections");
assert.eq(secondary1.getDB("test").foo.count(), 100);
assert.eq(secondary1.getDB("test").bar.count(), 100);
jsTestLog("Success!");
replTest.stopSet();
}

View File

@ -4,165 +4,153 @@
// so cannot be run on the ephemeralForTest storage engine, as it accepts all journaled writes.
// @tags: [SERVER-21420]
var request;
var result;
(function() {
// NOTE: ALL TESTS BELOW SHOULD BE SELF-CONTAINED, FOR EASIER DEBUGGING
// Skip this test if running with the "wiredTiger" storage engine, since it requires
// using 'nojournal' in a replica set, which is not supported when using WT.
if (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger") {
// WT is currently the default engine so it is used when 'storageEngine' is not set.
jsTest.log("Skipping test because it is not applicable for the wiredTiger storage engine");
return;
}
jsTest.log("Starting no journal/repl set tests...");
var request;
var result;
// Start a single-node replica set with no journal
// Allows testing immediate write concern failures and wc application failures
var rst = new ReplSetTest({nodes: 2});
rst.startSet({nojournal: ""});
rst.initiate();
var mongod = rst.getPrimary();
var coll = mongod.getCollection("test.batch_write_command_wc");
// NOTE: ALL TESTS BELOW SHOULD BE SELF-CONTAINED, FOR EASIER DEBUGGING
//
// Basic insert, default WC
coll.remove({});
printjson(request = {
insert: coll.getName(),
documents: [{a: 1}]
});
printjson(result = coll.runCommand(request));
assert(result.ok);
assert.eq(1, result.n);
assert.eq(1, coll.find().itcount());
jsTest.log("Starting no journal/repl set tests...");
//
// Basic insert, majority WC
coll.remove({});
printjson(request = {
insert: coll.getName(),
documents: [{a: 1}],
writeConcern: {w: 'majority'}
});
printjson(result = coll.runCommand(request));
assert(result.ok);
assert.eq(1, result.n);
assert.eq(1, coll.find().itcount());
// Start a single-node replica set with no journal
// Allows testing immediate write concern failures and wc application failures
var rst = new ReplSetTest({nodes: 2});
rst.startSet({nojournal: ""});
rst.initiate();
var mongod = rst.getPrimary();
var coll = mongod.getCollection("test.batch_write_command_wc");
//
// Basic insert, w:2 WC
coll.remove({});
printjson(request = {
insert: coll.getName(),
documents: [{a: 1}],
writeConcern: {w: 2}
});
printjson(result = coll.runCommand(request));
assert(result.ok);
assert.eq(1, result.n);
assert.eq(1, coll.find().itcount());
//
// Basic insert, default WC
coll.remove({});
printjson(request = {insert: coll.getName(), documents: [{a: 1}]});
printjson(result = coll.runCommand(request));
assert(result.ok);
assert.eq(1, result.n);
assert.eq(1, coll.find().itcount());
//
// Basic insert, immediate nojournal error
coll.remove({});
printjson(request = {
insert: coll.getName(),
documents: [{a: 1}],
writeConcern: {j: true}
});
printjson(result = coll.runCommand(request));
assert(!result.ok);
assert.eq(0, coll.find().itcount());
//
// Basic insert, majority WC
coll.remove({});
printjson(
request = {insert: coll.getName(), documents: [{a: 1}], writeConcern: {w: 'majority'}});
printjson(result = coll.runCommand(request));
assert(result.ok);
assert.eq(1, result.n);
assert.eq(1, coll.find().itcount());
//
// Basic insert, timeout wc error
coll.remove({});
printjson(request = {
insert: coll.getName(),
documents: [{a: 1}],
writeConcern: {w: 3, wtimeout: 1}
});
printjson(result = coll.runCommand(request));
assert(result.ok);
assert.eq(1, result.n);
assert(result.writeConcernError);
assert.eq(100, result.writeConcernError.code);
assert.eq(1, coll.find().itcount());
//
// Basic insert, w:2 WC
coll.remove({});
printjson(request = {insert: coll.getName(), documents: [{a: 1}], writeConcern: {w: 2}});
printjson(result = coll.runCommand(request));
assert(result.ok);
assert.eq(1, result.n);
assert.eq(1, coll.find().itcount());
//
// Basic insert, wmode wc error
coll.remove({});
printjson(request = {
insert: coll.getName(),
documents: [{a: 1}],
writeConcern: {w: 'invalid'}
});
printjson(result = coll.runCommand(request));
assert(result.ok);
assert.eq(1, result.n);
assert(result.writeConcernError);
assert.eq(1, coll.find().itcount());
//
// Basic insert, immediate nojournal error
coll.remove({});
printjson(request = {insert: coll.getName(), documents: [{a: 1}], writeConcern: {j: true}});
printjson(result = coll.runCommand(request));
assert(!result.ok);
assert.eq(0, coll.find().itcount());
//
// Two ordered inserts, write error and wc error both reported
coll.remove({});
printjson(request = {
insert: coll.getName(),
documents: [{a: 1}, {$invalid: 'doc'}],
writeConcern: {w: 'invalid'}
});
printjson(result = coll.runCommand(request));
assert(result.ok);
assert.eq(1, result.n);
assert.eq(result.writeErrors.length, 1);
assert.eq(result.writeErrors[0].index, 1);
assert(result.writeConcernError);
assert.eq(1, coll.find().itcount());
//
// Basic insert, timeout wc error
coll.remove({});
printjson(
request = {insert: coll.getName(), documents: [{a: 1}], writeConcern: {w: 3, wtimeout: 1}});
printjson(result = coll.runCommand(request));
assert(result.ok);
assert.eq(1, result.n);
assert(result.writeConcernError);
assert.eq(100, result.writeConcernError.code);
assert.eq(1, coll.find().itcount());
//
// Two unordered inserts, write error and wc error reported
coll.remove({});
printjson(request = {
insert: coll.getName(),
documents: [{a: 1}, {$invalid: 'doc'}],
writeConcern: {w: 'invalid'},
ordered: false
});
printjson(result = coll.runCommand(request));
assert(result.ok);
assert.eq(1, result.n);
assert.eq(result.writeErrors.length, 1);
assert.eq(result.writeErrors[0].index, 1);
assert(result.writeConcernError);
assert.eq(1, coll.find().itcount());
//
// Basic insert, wmode wc error
coll.remove({});
printjson(
request = {insert: coll.getName(), documents: [{a: 1}], writeConcern: {w: 'invalid'}});
printjson(result = coll.runCommand(request));
assert(result.ok);
assert.eq(1, result.n);
assert(result.writeConcernError);
assert.eq(1, coll.find().itcount());
//
// Write error with empty writeConcern object.
coll.remove({});
request = {
insert: coll.getName(),
documents: [{_id: 1}, {_id: 1}],
writeConcern: {},
ordered: false
};
result = coll.runCommand(request);
assert(result.ok);
assert.eq(1, result.n);
assert.eq(result.writeErrors.length, 1);
assert.eq(result.writeErrors[0].index, 1);
assert.eq(null, result.writeConcernError);
assert.eq(1, coll.find().itcount());
//
// Two ordered inserts, write error and wc error both reported
coll.remove({});
printjson(request = {
insert: coll.getName(),
documents: [{a: 1}, {$invalid: 'doc'}],
writeConcern: {w: 'invalid'}
});
printjson(result = coll.runCommand(request));
assert(result.ok);
assert.eq(1, result.n);
assert.eq(result.writeErrors.length, 1);
assert.eq(result.writeErrors[0].index, 1);
assert(result.writeConcernError);
assert.eq(1, coll.find().itcount());
//
// Write error with unspecified w.
coll.remove({});
request = {
insert: coll.getName(),
documents: [{_id: 1}, {_id: 1}],
writeConcern: {wtimeout: 1},
ordered: false
};
result = assert.commandWorkedIgnoringWriteErrors(coll.runCommand(request));
assert.eq(1, result.n);
assert.eq(result.writeErrors.length, 1);
assert.eq(result.writeErrors[0].index, 1);
assert.eq(null, result.writeConcernError);
assert.eq(1, coll.find().itcount());
//
// Two unordered inserts, write error and wc error reported
coll.remove({});
printjson(request = {
insert: coll.getName(),
documents: [{a: 1}, {$invalid: 'doc'}],
writeConcern: {w: 'invalid'},
ordered: false
});
printjson(result = coll.runCommand(request));
assert(result.ok);
assert.eq(1, result.n);
assert.eq(result.writeErrors.length, 1);
assert.eq(result.writeErrors[0].index, 1);
assert(result.writeConcernError);
assert.eq(1, coll.find().itcount());
jsTest.log("DONE no journal/repl tests");
rst.stopSet();
//
// Write error with empty writeConcern object.
coll.remove({});
request =
{insert: coll.getName(), documents: [{_id: 1}, {_id: 1}], writeConcern: {}, ordered: false};
result = coll.runCommand(request);
assert(result.ok);
assert.eq(1, result.n);
assert.eq(result.writeErrors.length, 1);
assert.eq(result.writeErrors[0].index, 1);
assert.eq(null, result.writeConcernError);
assert.eq(1, coll.find().itcount());
//
// Write error with unspecified w.
coll.remove({});
request = {
insert: coll.getName(),
documents: [{_id: 1}, {_id: 1}],
writeConcern: {wtimeout: 1},
ordered: false
};
result = assert.commandWorkedIgnoringWriteErrors(coll.runCommand(request));
assert.eq(1, result.n);
assert.eq(result.writeErrors.length, 1);
assert.eq(result.writeErrors[0].index, 1);
assert.eq(null, result.writeConcernError);
assert.eq(1, coll.find().itcount());
jsTest.log("DONE no journal/repl tests");
rst.stopSet();
})();

View File

@ -4,132 +4,148 @@
// so cannot be run on the ephemeralForTest storage engine, as it accepts all journaled writes.
// @tags: [SERVER-21420]
jsTest.log("Starting bulk api write concern tests...");
(function() {
// Start a 2-node replica set with no journal
// Allows testing immediate write concern failures and wc application failures
var rst = new ReplSetTest({nodes: 2});
rst.startSet({nojournal: ""});
rst.initiate();
var mongod = rst.getPrimary();
var coll = mongod.getCollection("test.bulk_api_wc");
jsTest.log("Starting bulk api write concern tests...");
var executeTests = function() {
// Skip this test if running with the "wiredTiger" storage engine, since it requires
// using 'nojournal' in a replica set, which is not supported when using WT.
if (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger") {
// WT is currently the default engine so it is used when 'storageEngine' is not set.
jsTest.log("Skipping test because it is not applicable for the wiredTiger storage engine");
return;
}
// Create a unique index, legacy writes validate too early to use invalid documents for write
// error testing
coll.ensureIndex({a: 1}, {unique: true});
// Start a 2-node replica set with no journal
// Allows testing immediate write concern failures and wc application failures
var rst = new ReplSetTest({nodes: 2});
rst.startSet({nojournal: ""});
rst.initiate();
var mongod = rst.getPrimary();
var coll = mongod.getCollection("test.bulk_api_wc");
//
// Ordered
//
var executeTests = function() {
//
// Fail due to nojournal
coll.remove({});
var bulk = coll.initializeOrderedBulkOp();
bulk.insert({a: 1});
bulk.insert({a: 2});
assert.throws(function() {
bulk.execute({j: true});
});
// Create a unique index, legacy writes validate too early to use invalid documents for
// write
// error testing
coll.ensureIndex({a: 1}, {unique: true});
//
// Fail due to unrecognized write concern field.
coll.remove({});
var bulk = coll.initializeOrderedBulkOp();
bulk.insert({a: 1});
bulk.insert({a: 2});
var result = assert.throws(function() {
bulk.execute({x: 1});
});
assert.eq(ErrorCodes.FailedToParse, result.code, 'unexpected error code: ' + tojson(result));
assert.eq('unrecognized write concern field: x',
result.errmsg,
'unexpected error message: ' + tojson(result));
//
// Ordered
//
//
// Fail with write error, no write concern error even though it would fail on apply for ordered
coll.remove({});
var bulk = coll.initializeOrderedBulkOp();
bulk.insert({a: 1});
bulk.insert({a: 2});
bulk.insert({a: 2});
result = assert.throws(function() {
bulk.execute({w: 'invalid'});
});
assert.eq(result.nInserted, 2);
assert.eq(result.getWriteErrors()[0].index, 2);
assert(!result.getWriteConcernError());
assert.eq(coll.find().itcount(), 2);
//
// Fail due to nojournal
coll.remove({});
var bulk = coll.initializeOrderedBulkOp();
bulk.insert({a: 1});
bulk.insert({a: 2});
assert.throws(function() {
bulk.execute({j: true});
});
//
// Unordered
//
//
// Fail due to unrecognized write concern field.
coll.remove({});
var bulk = coll.initializeOrderedBulkOp();
bulk.insert({a: 1});
bulk.insert({a: 2});
var result = assert.throws(function() {
bulk.execute({x: 1});
});
assert.eq(
ErrorCodes.FailedToParse, result.code, 'unexpected error code: ' + tojson(result));
assert.eq('unrecognized write concern field: x',
result.errmsg,
'unexpected error message: ' + tojson(result));
//
// Fail with write error, write concern error reported when unordered
coll.remove({});
var bulk = coll.initializeUnorderedBulkOp();
bulk.insert({a: 1});
bulk.insert({a: 2});
bulk.insert({a: 2});
var result = assert.throws(function() {
bulk.execute({w: 'invalid'});
});
assert.eq(result.nInserted, 2);
assert.eq(result.getWriteErrors()[0].index, 2);
assert(result.getWriteConcernError());
assert.eq(coll.find().itcount(), 2);
//
// Fail with write error, no write concern error even though it would fail on apply for
// ordered
coll.remove({});
var bulk = coll.initializeOrderedBulkOp();
bulk.insert({a: 1});
bulk.insert({a: 2});
bulk.insert({a: 2});
result = assert.throws(function() {
bulk.execute({w: 'invalid'});
});
assert.eq(result.nInserted, 2);
assert.eq(result.getWriteErrors()[0].index, 2);
assert(!result.getWriteConcernError());
assert.eq(coll.find().itcount(), 2);
//
// Fail with write error, write concern timeout reported when unordered
// Note that wtimeout:true can only be reported when the batch is all the same, so there's not
// multiple wc errors
coll.remove({});
var bulk = coll.initializeUnorderedBulkOp();
bulk.insert({a: 1});
bulk.insert({a: 2});
bulk.insert({a: 2});
var result = assert.throws(function() {
bulk.execute({w: 3, wtimeout: 1});
});
assert.eq(result.nInserted, 2);
assert.eq(result.getWriteErrors()[0].index, 2);
assert.eq(100, result.getWriteConcernError().code);
assert.eq(coll.find().itcount(), 2);
//
// Unordered
//
//
// Fail with write error and upserted, write concern error reported when unordered
coll.remove({});
var bulk = coll.initializeUnorderedBulkOp();
bulk.insert({a: 1});
bulk.insert({a: 2});
bulk.find({a: 3}).upsert().updateOne({a: 3});
bulk.insert({a: 3});
var result = assert.throws(function() {
bulk.execute({w: 'invalid'});
});
assert.eq(result.nInserted, 2);
assert.eq(result.nUpserted, 1);
assert.eq(result.getUpsertedIds()[0].index, 2);
assert.eq(result.getWriteErrors()[0].index, 3);
assert(result.getWriteConcernError());
assert.eq(coll.find().itcount(), 3);
};
//
// Fail with write error, write concern error reported when unordered
coll.remove({});
var bulk = coll.initializeUnorderedBulkOp();
bulk.insert({a: 1});
bulk.insert({a: 2});
bulk.insert({a: 2});
var result = assert.throws(function() {
bulk.execute({w: 'invalid'});
});
assert.eq(result.nInserted, 2);
assert.eq(result.getWriteErrors()[0].index, 2);
assert(result.getWriteConcernError());
assert.eq(coll.find().itcount(), 2);
// Use write commands
coll.getMongo().useWriteCommands = function() {
return true;
};
executeTests();
//
// Fail with write error, write concern timeout reported when unordered
// Note that wtimeout:true can only be reported when the batch is all the same, so there's
// not
// multiple wc errors
coll.remove({});
var bulk = coll.initializeUnorderedBulkOp();
bulk.insert({a: 1});
bulk.insert({a: 2});
bulk.insert({a: 2});
var result = assert.throws(function() {
bulk.execute({w: 3, wtimeout: 1});
});
assert.eq(result.nInserted, 2);
assert.eq(result.getWriteErrors()[0].index, 2);
assert.eq(100, result.getWriteConcernError().code);
assert.eq(coll.find().itcount(), 2);
// FAILING currently due to incorrect batch api reading of GLE
// Use legacy opcodes
coll.getMongo().useWriteCommands = function() {
return false;
};
executeTests();
//
// Fail with write error and upserted, write concern error reported when unordered
coll.remove({});
var bulk = coll.initializeUnorderedBulkOp();
bulk.insert({a: 1});
bulk.insert({a: 2});
bulk.find({a: 3}).upsert().updateOne({a: 3});
bulk.insert({a: 3});
var result = assert.throws(function() {
bulk.execute({w: 'invalid'});
});
assert.eq(result.nInserted, 2);
assert.eq(result.nUpserted, 1);
assert.eq(result.getUpsertedIds()[0].index, 2);
assert.eq(result.getWriteErrors()[0].index, 3);
assert(result.getWriteConcernError());
assert.eq(coll.find().itcount(), 3);
};
jsTest.log("DONE bulk api wc tests");
rst.stopSet();
// Use write commands
coll.getMongo().useWriteCommands = function() {
return true;
};
executeTests();
// FAILING currently due to incorrect batch api reading of GLE
// Use legacy opcodes
coll.getMongo().useWriteCommands = function() {
return false;
};
executeTests();
jsTest.log("DONE bulk api wc tests");
rst.stopSet();
})();

View File

@ -4,6 +4,14 @@
(function() {
'use strict';
// Skip this test if running with the "wiredTiger" storage engine, since it requires
// using 'nojournal' in a replica set, which is not supported when using WT.
if (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger") {
// WT is currently the default engine so it is used when 'storageEngine' is not set.
jsTest.log("Skipping test because it is not applicable for the wiredTiger storage engine");
return;
}
var nodeCount = 3;
var rst = new ReplSetTest({nodes: nodeCount});
rst.startSet({nojournal: ""});

View File

@ -769,6 +769,15 @@ ExitCode _initAndListen(int listenPort) {
}
}
// Disallow running WiredTiger with --nojournal in a replica set
if (storageGlobalParams.engine == "wiredTiger" && !storageGlobalParams.dur &&
replSettings.usingReplSets()) {
log() << "Runnning wiredTiger without journaling in a replica set is not "
<< "supported. Make sure you are not using --nojournal and that "
<< "storage.journal.enabled is not set to 'false'.";
exitCleanly(EXIT_BADOPTIONS);
}
logMongodStartupWarnings(storageGlobalParams, serverGlobalParams, serviceContext);
{

View File

@ -408,17 +408,6 @@ void logMongodStartupWarnings(const StorageGlobalParams& storageParams,
warned = true;
}
// Check if --nojournal
bool isReplSet = replCoord->getReplicationMode() == repl::ReplicationCoordinator::modeReplSet;
if (isReplSet && storageParams.engine == "wiredTiger" && !storageParams.dur) {
log() << startupWarningsLog;
log() << "** WARNING: Running wiredTiger with the --nojournal option in a replica set"
<< startupWarningsLog;
log() << "** is deprecated and subject to be removed in a future version."
<< startupWarningsLog;
warned = true;
}
if (warned) {
log() << startupWarningsLog;
}