diff --git a/jstests/core/timeseries/bucket_unpacking_with_sort_extended_range.js b/jstests/core/timeseries/bucket_unpacking_with_sort_extended_range.js index cc8b6af7d35..48d74d9c7ad 100644 --- a/jstests/core/timeseries/bucket_unpacking_with_sort_extended_range.js +++ b/jstests/core/timeseries/bucket_unpacking_with_sort_extended_range.js @@ -9,7 +9,7 @@ * # This complicates aggregation extraction. * do_not_wrap_aggregations_in_facets, * # Refusing to run a test that issues an aggregation command with explain because it may - * # return incomplete results if interrupted by a stepdown/tenant migration. + * # return incomplete results if interrupted by a stepdown. * does_not_support_stepdowns, * # We need a timeseries collection. * requires_timeseries, diff --git a/jstests/core/timeseries/timeseries_bucket_limit_time_range.js b/jstests/core/timeseries/timeseries_bucket_limit_time_range.js index a5344660211..5c0a6b4e79a 100644 --- a/jstests/core/timeseries/timeseries_bucket_limit_time_range.js +++ b/jstests/core/timeseries/timeseries_bucket_limit_time_range.js @@ -1,7 +1,7 @@ /** * Tests maximum time-range of measurements held in each bucket in a time-series buckets collection. * @tags: [ - * # This test depends on certain writes ending up in the same bucket. Stepdowns/tenant migration + * # This test depends on certain writes ending up in the same bucket. Stepdowns * # may result in writes splitting between two primaries, and thus different buckets. * does_not_support_stepdowns, * # We need a timeseries collection. diff --git a/jstests/core/timeseries/timeseries_delete_compressed_buckets.js b/jstests/core/timeseries/timeseries_delete_compressed_buckets.js index 4dddeb74efb..3ec34345d70 100644 --- a/jstests/core/timeseries/timeseries_delete_compressed_buckets.js +++ b/jstests/core/timeseries/timeseries_delete_compressed_buckets.js @@ -8,7 +8,7 @@ * requires_non_retryable_writes, * requires_fcv_70, * # This test depends on certain writes ending up in the same bucket to trigger compression. - * # Stepdowns and tenant migrations may result in writes splitting between two primaries, and + * # Stepdowns may result in writes splitting between two primaries, and * # thus different buckets. * does_not_support_stepdowns, * ] diff --git a/jstests/core/timeseries/timeseries_resume_after.js b/jstests/core/timeseries/timeseries_resume_after.js index 88826b22072..a329f2c288f 100644 --- a/jstests/core/timeseries/timeseries_resume_after.js +++ b/jstests/core/timeseries/timeseries_resume_after.js @@ -7,7 +7,7 @@ * not_allowed_with_signed_security_token, * # Queries on mongoS may not request or provide a resume token. * assumes_against_mongod_not_mongos, - * # Resuming may not work properly with stepdowns/tenant migration. + * # Resuming may not work properly with stepdowns. * does_not_support_stepdowns, * # We need a timeseries collection. * requires_timeseries, diff --git a/jstests/core/timeseries/timeseries_simple.js b/jstests/core/timeseries/timeseries_simple.js index 6ab025884c5..d795cf9ec7b 100644 --- a/jstests/core/timeseries/timeseries_simple.js +++ b/jstests/core/timeseries/timeseries_simple.js @@ -2,7 +2,7 @@ * Tests inserting sample data into the time-series buckets collection. * This test is for the simple case of only one measurement per bucket. * @tags: [ - * # This test depends on certain writes ending up in the same bucket. Stepdowns/tenant migration + * # This test depends on certain writes ending up in the same bucket. Stepdowns * # may result in writes splitting between two primaries, and thus different buckets. * does_not_support_stepdowns, * # We need a timeseries collection. diff --git a/jstests/core/timeseries/timeseries_update_compressed_buckets.js b/jstests/core/timeseries/timeseries_update_compressed_buckets.js index a677a3d2f5b..408cad160ee 100644 --- a/jstests/core/timeseries/timeseries_update_compressed_buckets.js +++ b/jstests/core/timeseries/timeseries_update_compressed_buckets.js @@ -9,7 +9,7 @@ * requires_non_retryable_writes, * featureFlagTimeseriesUpdatesSupport, * # This test depends on certain writes ending up in the same bucket to trigger compression. - * # Stepdowns and tenant migrations may result in writes splitting between two primaries, and + * # Stepdowns may result in writes splitting between two primaries, and * # thus different buckets. * does_not_support_stepdowns, * ] diff --git a/jstests/core/txns/bulk_write_getMore.js b/jstests/core/txns/bulk_write_getMore.js index 938efcb703a..3df516c74b5 100644 --- a/jstests/core/txns/bulk_write_getMore.js +++ b/jstests/core/txns/bulk_write_getMore.js @@ -78,8 +78,9 @@ function runTest(retryableWrite) { coll.drop(); coll1.drop(); + // TODO SERVER-97170 check if the following is still necessary // Want to test ns is properly applied to a cursor that does not need a getMore. This test - // is in this file so it does not run in tenant migration suites since that would change the ns + // is in this file so it does not run in suites since that would change the ns // name. res = assert.commandWorked(db.adminCommand({ bulkWrite: 1, diff --git a/jstests/core/write/delete/batched_multi_deletes_a.js b/jstests/core/write/delete/batched_multi_deletes_a.js index a9d2db4b1c3..a6e4dd4fb14 100644 --- a/jstests/core/write/delete/batched_multi_deletes_a.js +++ b/jstests/core/write/delete/batched_multi_deletes_a.js @@ -1,7 +1,7 @@ /** * Tests batch-deleting a large range of data using predicate on the 'a' field. - * This test does not rely on getMores on purpose, as this is a requirement for running on - * tenant migration passthroughs. + * TODO SERVER-97166 Re-evaluate if this test does not need to rely on getMores + * This test does not rely on getMores on purpose. * * @tags: [ * does_not_support_retryable_writes, diff --git a/jstests/core/write/delete/batched_multi_deletes_id.js b/jstests/core/write/delete/batched_multi_deletes_id.js index 37f8cfb18c4..c08ddb5d238 100644 --- a/jstests/core/write/delete/batched_multi_deletes_id.js +++ b/jstests/core/write/delete/batched_multi_deletes_id.js @@ -1,7 +1,7 @@ /** * Tests batch-deleting a large range of data using predicate on the _id field. - * This test does not rely on getMores on purpose, as this is a requirement for running on - * tenant migration passthroughs. + * TODO SERVER-97166 Re-evaluate if this test does not need to rely on getMores + * This test does not rely on getMores on purpose. * * @tags: [ * does_not_support_retryable_writes, diff --git a/jstests/core/write/delete/libs/batched_multi_deletes.js b/jstests/core/write/delete/libs/batched_multi_deletes.js index 4b8a8e2764f..8539f2e67d8 100644 --- a/jstests/core/write/delete/libs/batched_multi_deletes.js +++ b/jstests/core/write/delete/libs/batched_multi_deletes.js @@ -1,7 +1,7 @@ /** * Tests batch-deleting a large range of data using a given predicate. - * This test does not rely on getMores on purpose, as this is a requirement for running on - * tenant migration passthroughs. + * TODO SERVER-97166 Re-evaluate if this test does not need to rely on getMores + * This test does not rely on getMores on purpose. */ export function runBatchedMultiDeletesTest(coll, queryPredicate) { diff --git a/jstests/replsets/rslib.js b/jstests/replsets/rslib.js index aaf86d2728d..d0967913fdf 100644 --- a/jstests/replsets/rslib.js +++ b/jstests/replsets/rslib.js @@ -885,9 +885,8 @@ createRst = function(rstArgs, retryOnRetryableErrors) { return new ReplSetTest({rstArgs: rstArgs}); } catch (e) { if (retryOnRetryableErrors && isNetworkError(e)) { - jsTest.log(`Failed to create ReplSetTest for ${ - rstArgs.name} inside tenant migration thread: ${tojson(e)}. Retrying in ${ - kCreateRstRetryIntervalMS}ms.`); + jsTest.log(`Failed to create ReplSetTest for ${rstArgs.name} with error: ${ + tojson(e)}. Retrying in ${kCreateRstRetryIntervalMS}ms.`); sleep(kCreateRstRetryIntervalMS); continue; } diff --git a/jstests/ssl/x509/certs.yml b/jstests/ssl/x509/certs.yml index a5c9e422360..bdcd2cecfbd 100644 --- a/jstests/ssl/x509/certs.yml +++ b/jstests/ssl/x509/certs.yml @@ -442,7 +442,7 @@ certs: DNS: localhost IP: 127.0.0.1 - # For tenant migration testing. + # TODO SERVER-97176 do we need this cert after removal of tenant migration code? - name: "rs0.pem" description: General purpose server certificate file. Subject: diff --git a/src/mongo/db/catalog/coll_mod_index.cpp b/src/mongo/db/catalog/coll_mod_index.cpp index 59c6a6b028d..a546da8f6cb 100644 --- a/src/mongo/db/catalog/coll_mod_index.cpp +++ b/src/mongo/db/catalog/coll_mod_index.cpp @@ -180,9 +180,7 @@ void _processCollModIndexRequestUnique(OperationContext* opCtx, boost::optional* newUnique) { invariant(!idx->unique(), str::stream() << "Index is already unique: " << idx->infoObj()); - // Checks for duplicates for the 'applyOps' command. In the tenant migration case, assumes - // similarly to initial sync that we don't need to perform this check in the destination - // cluster. + // Checks for duplicates for the 'applyOps' command. if (mode && *mode == repl::OplogApplication::Mode::kApplyOpsCmd) { auto duplicateRecords = scanIndexForDuplicates(opCtx, idx); if (!duplicateRecords.empty()) { diff --git a/src/mongo/db/collection_crud/collection_write_path.cpp b/src/mongo/db/collection_crud/collection_write_path.cpp index abcc7f58cf2..cf077b04828 100644 --- a/src/mongo/db/collection_crud/collection_write_path.cpp +++ b/src/mongo/db/collection_crud/collection_write_path.cpp @@ -142,7 +142,7 @@ std::vector reserveOplogSlotsForRetryableFindAndModify(OperationConte } // We reserve oplog slots here, expecting the slot with the greatest timestmap (say TS) to be - // used as the oplog timestamp. Tenant migrations and resharding will forge no-op image oplog + // used as the oplog timestamp. Resharding will forge no-op image oplog // entries and set the timestamp for these synthetic entries to be TS - 1. auto oplogInfo = LocalOplogInfo::get(opCtx); auto slots = oplogInfo->getNextOpTimes(opCtx, 2); @@ -695,13 +695,13 @@ void updateDocument(OperationContext* opCtx, // post-image in a side collection, then we must reserve oplog slots in advance. We // expect to use the reserved oplog slots as follows, where TS is the greatest // timestamp of 'oplogSlots': - // TS - 1: Tenant migrations and resharding will forge no-op image oplog entries and set + // TS - 1: Resharding will forge no-op image oplog entries and set // the entry timestamps to TS - 1. // TS: The timestamp given to the update oplog entry. args->oplogSlots = reserveOplogSlotsForRetryableFindAndModify(opCtx); } else { // Retryable findAndModify commands should not reserve oplog slots before entering this - // function since tenant migrations and resharding rely on always being able to set + // function since resharding rely on always being able to set // timestamps of forged pre- and post- image entries to timestamp of findAndModify - 1. invariant(!(args->retryableWrite && setNeedsRetryImageOplogField)); } @@ -775,13 +775,13 @@ StatusWith updateDocumentWithDamages(OperationContext* opCtx, // post-image in a side collection, then we must reserve oplog slots in advance. We // expect to use the reserved oplog slots as follows, where TS is the greatest // timestamp of 'oplogSlots': - // TS - 1: Tenant migrations and resharding will forge no-op image oplog entries and set + // TS - 1: Resharding will forge no-op image oplog entries and set // the entry timestamps to TS - 1. // TS: The timestamp given to the update oplog entry. args->oplogSlots = reserveOplogSlotsForRetryableFindAndModify(opCtx); } else { // Retryable findAndModify commands should not reserve oplog slots before entering this - // function since tenant migrations and resharding rely on always being able to set + // function since resharding rely on always being able to set // timestamps of forged pre- and post- image entries to timestamp of findAndModify - 1. invariant(!(args->retryableWrite && setNeedsRetryImageOplogField)); } diff --git a/src/mongo/db/repl/oplog_applier_utils.h b/src/mongo/db/repl/oplog_applier_utils.h index 8781f25b43e..e4c5356bfe1 100644 --- a/src/mongo/db/repl/oplog_applier_utils.h +++ b/src/mongo/db/repl/oplog_applier_utils.h @@ -71,8 +71,7 @@ private: }; /** - * This class contains some static methods common to ordinary oplog application and oplog - * application as part of tenant migration. + * This class contains some static methods common to ordinary oplog application. */ class OplogApplierUtils { public: diff --git a/src/mongo/db/repl/oplog_fetcher.cpp b/src/mongo/db/repl/oplog_fetcher.cpp index c3648ade880..16844647384 100644 --- a/src/mongo/db/repl/oplog_fetcher.cpp +++ b/src/mongo/db/repl/oplog_fetcher.cpp @@ -686,8 +686,7 @@ StatusWith OplogFetcher::_getNextBatch() { Timer timer; if (!_cursor) { // An error occurred and we should recreate the cursor. - // The OplogFetcher uses an aggregation command in tenant migrations, which does not - // support tailable cursors. When recreating the cursor, use the longer initial max time + // When recreating the cursor, use the longer initial max time // to avoid timing out. auto status = _createNewCursor(false); if (!status.isOK()) {