diff --git a/src/mongo/db/repl/initial_syncer.cpp b/src/mongo/db/repl/initial_syncer.cpp index 1b8bd562bc4..683642a83d3 100644 --- a/src/mongo/db/repl/initial_syncer.cpp +++ b/src/mongo/db/repl/initial_syncer.cpp @@ -110,6 +110,9 @@ MONGO_FAIL_POINT_DEFINE(failAndHangInitialSync); // Failpoint which fails initial sync before it applies the next batch of oplog entries. MONGO_FAIL_POINT_DEFINE(failInitialSyncBeforeApplyingBatch); +// Failpoint which fasserts if applying a batch fails. +MONGO_FAIL_POINT_DEFINE(initialSyncFassertIfApplyingBatchFails); + namespace { using namespace executor; using CallbackArgs = executor::TaskExecutor::CallbackArgs; @@ -1264,6 +1267,14 @@ void InitialSyncer::_multiApplierCallback(const Status& multiApplierStatus, stdx::lock_guard lock(_mutex); auto status = _checkForShutdownAndConvertStatus_inlock(multiApplierStatus, "error applying batch"); + + // Set to cause initial sync to fassert instead of restart if applying a batch fails, so that + // tests can be robust to network errors but not oplog idempotency errors. + if (MONGO_FAIL_POINT(initialSyncFassertIfApplyingBatchFails)) { + log() << "initialSyncFassertIfApplyingBatchFails fail point enabled."; + fassert(31210, status); + } + if (!status.isOK()) { error() << "Failed to apply batch due to '" << redact(status) << "'"; onCompletionGuard->setResultAndCancelRemainingWork_inlock(lock, status);