mirror of
https://github.com/mongodb/mongo.git
synced 2024-11-24 00:17:37 +01:00
SERVER-97089 Remove irrelevant comments related to tenant migration (#29201)
GitOrigin-RevId: 9d281651f245f0002d0755bb555e80149a5b44b1
This commit is contained in:
parent
e9cbff0ea2
commit
dfd009b9c6
@ -9,7 +9,7 @@
|
||||
* # This complicates aggregation extraction.
|
||||
* do_not_wrap_aggregations_in_facets,
|
||||
* # Refusing to run a test that issues an aggregation command with explain because it may
|
||||
* # return incomplete results if interrupted by a stepdown/tenant migration.
|
||||
* # return incomplete results if interrupted by a stepdown.
|
||||
* does_not_support_stepdowns,
|
||||
* # We need a timeseries collection.
|
||||
* requires_timeseries,
|
||||
|
@ -1,7 +1,7 @@
|
||||
/**
|
||||
* Tests maximum time-range of measurements held in each bucket in a time-series buckets collection.
|
||||
* @tags: [
|
||||
* # This test depends on certain writes ending up in the same bucket. Stepdowns/tenant migration
|
||||
* # This test depends on certain writes ending up in the same bucket. Stepdowns
|
||||
* # may result in writes splitting between two primaries, and thus different buckets.
|
||||
* does_not_support_stepdowns,
|
||||
* # We need a timeseries collection.
|
||||
|
@ -8,7 +8,7 @@
|
||||
* requires_non_retryable_writes,
|
||||
* requires_fcv_70,
|
||||
* # This test depends on certain writes ending up in the same bucket to trigger compression.
|
||||
* # Stepdowns and tenant migrations may result in writes splitting between two primaries, and
|
||||
* # Stepdowns may result in writes splitting between two primaries, and
|
||||
* # thus different buckets.
|
||||
* does_not_support_stepdowns,
|
||||
* ]
|
||||
|
@ -7,7 +7,7 @@
|
||||
* not_allowed_with_signed_security_token,
|
||||
* # Queries on mongoS may not request or provide a resume token.
|
||||
* assumes_against_mongod_not_mongos,
|
||||
* # Resuming may not work properly with stepdowns/tenant migration.
|
||||
* # Resuming may not work properly with stepdowns.
|
||||
* does_not_support_stepdowns,
|
||||
* # We need a timeseries collection.
|
||||
* requires_timeseries,
|
||||
|
@ -2,7 +2,7 @@
|
||||
* Tests inserting sample data into the time-series buckets collection.
|
||||
* This test is for the simple case of only one measurement per bucket.
|
||||
* @tags: [
|
||||
* # This test depends on certain writes ending up in the same bucket. Stepdowns/tenant migration
|
||||
* # This test depends on certain writes ending up in the same bucket. Stepdowns
|
||||
* # may result in writes splitting between two primaries, and thus different buckets.
|
||||
* does_not_support_stepdowns,
|
||||
* # We need a timeseries collection.
|
||||
|
@ -9,7 +9,7 @@
|
||||
* requires_non_retryable_writes,
|
||||
* featureFlagTimeseriesUpdatesSupport,
|
||||
* # This test depends on certain writes ending up in the same bucket to trigger compression.
|
||||
* # Stepdowns and tenant migrations may result in writes splitting between two primaries, and
|
||||
* # Stepdowns may result in writes splitting between two primaries, and
|
||||
* # thus different buckets.
|
||||
* does_not_support_stepdowns,
|
||||
* ]
|
||||
|
@ -78,8 +78,9 @@ function runTest(retryableWrite) {
|
||||
coll.drop();
|
||||
coll1.drop();
|
||||
|
||||
// TODO SERVER-97170 check if the following is still necessary
|
||||
// Want to test ns is properly applied to a cursor that does not need a getMore. This test
|
||||
// is in this file so it does not run in tenant migration suites since that would change the ns
|
||||
// is in this file so it does not run in suites since that would change the ns
|
||||
// name.
|
||||
res = assert.commandWorked(db.adminCommand({
|
||||
bulkWrite: 1,
|
||||
|
@ -1,7 +1,7 @@
|
||||
/**
|
||||
* Tests batch-deleting a large range of data using predicate on the 'a' field.
|
||||
* This test does not rely on getMores on purpose, as this is a requirement for running on
|
||||
* tenant migration passthroughs.
|
||||
* TODO SERVER-97166 Re-evaluate if this test does not need to rely on getMores
|
||||
* This test does not rely on getMores on purpose.
|
||||
*
|
||||
* @tags: [
|
||||
* does_not_support_retryable_writes,
|
||||
|
@ -1,7 +1,7 @@
|
||||
/**
|
||||
* Tests batch-deleting a large range of data using predicate on the _id field.
|
||||
* This test does not rely on getMores on purpose, as this is a requirement for running on
|
||||
* tenant migration passthroughs.
|
||||
* TODO SERVER-97166 Re-evaluate if this test does not need to rely on getMores
|
||||
* This test does not rely on getMores on purpose.
|
||||
*
|
||||
* @tags: [
|
||||
* does_not_support_retryable_writes,
|
||||
|
@ -1,7 +1,7 @@
|
||||
/**
|
||||
* Tests batch-deleting a large range of data using a given predicate.
|
||||
* This test does not rely on getMores on purpose, as this is a requirement for running on
|
||||
* tenant migration passthroughs.
|
||||
* TODO SERVER-97166 Re-evaluate if this test does not need to rely on getMores
|
||||
* This test does not rely on getMores on purpose.
|
||||
*/
|
||||
|
||||
export function runBatchedMultiDeletesTest(coll, queryPredicate) {
|
||||
|
@ -885,9 +885,8 @@ createRst = function(rstArgs, retryOnRetryableErrors) {
|
||||
return new ReplSetTest({rstArgs: rstArgs});
|
||||
} catch (e) {
|
||||
if (retryOnRetryableErrors && isNetworkError(e)) {
|
||||
jsTest.log(`Failed to create ReplSetTest for ${
|
||||
rstArgs.name} inside tenant migration thread: ${tojson(e)}. Retrying in ${
|
||||
kCreateRstRetryIntervalMS}ms.`);
|
||||
jsTest.log(`Failed to create ReplSetTest for ${rstArgs.name} with error: ${
|
||||
tojson(e)}. Retrying in ${kCreateRstRetryIntervalMS}ms.`);
|
||||
sleep(kCreateRstRetryIntervalMS);
|
||||
continue;
|
||||
}
|
||||
|
@ -442,7 +442,7 @@ certs:
|
||||
DNS: localhost
|
||||
IP: 127.0.0.1
|
||||
|
||||
# For tenant migration testing.
|
||||
# TODO SERVER-97176 do we need this cert after removal of tenant migration code?
|
||||
- name: "rs0.pem"
|
||||
description: General purpose server certificate file.
|
||||
Subject:
|
||||
|
@ -180,9 +180,7 @@ void _processCollModIndexRequestUnique(OperationContext* opCtx,
|
||||
boost::optional<bool>* newUnique) {
|
||||
invariant(!idx->unique(), str::stream() << "Index is already unique: " << idx->infoObj());
|
||||
|
||||
// Checks for duplicates for the 'applyOps' command. In the tenant migration case, assumes
|
||||
// similarly to initial sync that we don't need to perform this check in the destination
|
||||
// cluster.
|
||||
// Checks for duplicates for the 'applyOps' command.
|
||||
if (mode && *mode == repl::OplogApplication::Mode::kApplyOpsCmd) {
|
||||
auto duplicateRecords = scanIndexForDuplicates(opCtx, idx);
|
||||
if (!duplicateRecords.empty()) {
|
||||
|
@ -142,7 +142,7 @@ std::vector<OplogSlot> reserveOplogSlotsForRetryableFindAndModify(OperationConte
|
||||
}
|
||||
|
||||
// We reserve oplog slots here, expecting the slot with the greatest timestmap (say TS) to be
|
||||
// used as the oplog timestamp. Tenant migrations and resharding will forge no-op image oplog
|
||||
// used as the oplog timestamp. Resharding will forge no-op image oplog
|
||||
// entries and set the timestamp for these synthetic entries to be TS - 1.
|
||||
auto oplogInfo = LocalOplogInfo::get(opCtx);
|
||||
auto slots = oplogInfo->getNextOpTimes(opCtx, 2);
|
||||
@ -695,13 +695,13 @@ void updateDocument(OperationContext* opCtx,
|
||||
// post-image in a side collection, then we must reserve oplog slots in advance. We
|
||||
// expect to use the reserved oplog slots as follows, where TS is the greatest
|
||||
// timestamp of 'oplogSlots':
|
||||
// TS - 1: Tenant migrations and resharding will forge no-op image oplog entries and set
|
||||
// TS - 1: Resharding will forge no-op image oplog entries and set
|
||||
// the entry timestamps to TS - 1.
|
||||
// TS: The timestamp given to the update oplog entry.
|
||||
args->oplogSlots = reserveOplogSlotsForRetryableFindAndModify(opCtx);
|
||||
} else {
|
||||
// Retryable findAndModify commands should not reserve oplog slots before entering this
|
||||
// function since tenant migrations and resharding rely on always being able to set
|
||||
// function since resharding rely on always being able to set
|
||||
// timestamps of forged pre- and post- image entries to timestamp of findAndModify - 1.
|
||||
invariant(!(args->retryableWrite && setNeedsRetryImageOplogField));
|
||||
}
|
||||
@ -775,13 +775,13 @@ StatusWith<BSONObj> updateDocumentWithDamages(OperationContext* opCtx,
|
||||
// post-image in a side collection, then we must reserve oplog slots in advance. We
|
||||
// expect to use the reserved oplog slots as follows, where TS is the greatest
|
||||
// timestamp of 'oplogSlots':
|
||||
// TS - 1: Tenant migrations and resharding will forge no-op image oplog entries and set
|
||||
// TS - 1: Resharding will forge no-op image oplog entries and set
|
||||
// the entry timestamps to TS - 1.
|
||||
// TS: The timestamp given to the update oplog entry.
|
||||
args->oplogSlots = reserveOplogSlotsForRetryableFindAndModify(opCtx);
|
||||
} else {
|
||||
// Retryable findAndModify commands should not reserve oplog slots before entering this
|
||||
// function since tenant migrations and resharding rely on always being able to set
|
||||
// function since resharding rely on always being able to set
|
||||
// timestamps of forged pre- and post- image entries to timestamp of findAndModify - 1.
|
||||
invariant(!(args->retryableWrite && setNeedsRetryImageOplogField));
|
||||
}
|
||||
|
@ -71,8 +71,7 @@ private:
|
||||
};
|
||||
|
||||
/**
|
||||
* This class contains some static methods common to ordinary oplog application and oplog
|
||||
* application as part of tenant migration.
|
||||
* This class contains some static methods common to ordinary oplog application.
|
||||
*/
|
||||
class OplogApplierUtils {
|
||||
public:
|
||||
|
@ -686,8 +686,7 @@ StatusWith<OplogFetcher::Documents> OplogFetcher::_getNextBatch() {
|
||||
Timer timer;
|
||||
if (!_cursor) {
|
||||
// An error occurred and we should recreate the cursor.
|
||||
// The OplogFetcher uses an aggregation command in tenant migrations, which does not
|
||||
// support tailable cursors. When recreating the cursor, use the longer initial max time
|
||||
// When recreating the cursor, use the longer initial max time
|
||||
// to avoid timing out.
|
||||
auto status = _createNewCursor(false);
|
||||
if (!status.isOK()) {
|
||||
|
Loading…
Reference in New Issue
Block a user