mirror of
https://github.com/mongodb/mongo.git
synced 2024-12-01 09:32:32 +01:00
SERVER-53066 Remove shardingFullDDLSupport feature guard
This commit is contained in:
parent
def8791009
commit
b8daadc929
@ -5,7 +5,6 @@
|
||||
*
|
||||
* @tags: [
|
||||
* requires_sharding,
|
||||
* featureFlagShardingFullDDLSupport,
|
||||
* ]
|
||||
*/
|
||||
'use strict';
|
||||
|
@ -14,7 +14,6 @@
|
||||
* does_not_support_stepdowns,
|
||||
* # Can be removed once PM-1965-Milestone-1 is completed.
|
||||
* does_not_support_transactions,
|
||||
* featureFlagShardingFullDDLSupport
|
||||
* ]
|
||||
*/
|
||||
|
||||
|
@ -14,7 +14,6 @@
|
||||
* does_not_support_stepdowns,
|
||||
* # Can be removed once PM-1965-Milestone-1 is completed.
|
||||
* does_not_support_transactions,
|
||||
* featureFlagShardingFullDDLSupport,
|
||||
* # Requires all nodes to be running the latest binary.
|
||||
* multiversion_incompatible
|
||||
* ]
|
||||
|
@ -7,7 +7,6 @@
|
||||
* requires_sharding,
|
||||
* # TODO (SERVER-56879): Support add/remove shards in new DDL paths
|
||||
* does_not_support_add_remove_shards,
|
||||
* featureFlagShardingFullDDLSupport,
|
||||
* ]
|
||||
*/
|
||||
|
||||
|
@ -10,7 +10,6 @@
|
||||
* does_not_support_add_remove_shards,
|
||||
* # Requires all nodes to be running the latest binary.
|
||||
* multiversion_incompatible,
|
||||
* featureFlagShardingFullDDLSupport,
|
||||
* ]
|
||||
*/
|
||||
|
||||
|
@ -17,7 +17,6 @@
|
||||
* # This test just performs rename operations that can't be executed in transactions
|
||||
* does_not_support_transactions,
|
||||
* # Can be removed once PM-1965-Milestone-1 is completed.
|
||||
* featureFlagShardingFullDDLSupport
|
||||
* ]
|
||||
*/
|
||||
|
||||
|
@ -3,8 +3,7 @@
|
||||
* currentOp command.
|
||||
*
|
||||
* @tags: [
|
||||
* featureFlagShardingFullDDLSupport,
|
||||
* disabled_due_to_server_58295
|
||||
* disabled_due_to_server_58295,
|
||||
* ]
|
||||
*/
|
||||
(function() {
|
||||
|
@ -1,200 +0,0 @@
|
||||
/*
|
||||
* Test that the index commands are correctly propagated if they are executed
|
||||
* either before, during, or after the initial split critical section.
|
||||
*/
|
||||
(function() {
|
||||
"use strict";
|
||||
|
||||
load("jstests/libs/fail_point_util.js");
|
||||
load("jstests/libs/parallelTester.js");
|
||||
load("jstests/sharding/libs/sharded_index_util.js");
|
||||
|
||||
// Test intentionally inserts orphans outside of migration.
|
||||
TestData.skipCheckOrphans = true;
|
||||
|
||||
/*
|
||||
* Shards the given collection.
|
||||
*/
|
||||
function runShardCollection(host, ns, shardKey) {
|
||||
const mongos = new Mongo(host);
|
||||
return mongos.adminCommand({shardCollection: ns, key: shardKey});
|
||||
}
|
||||
|
||||
/*
|
||||
* Defines zones for the given collection, then runs shardCollection and the given command after
|
||||
* the given shardCollection fail point is hit. If isBlocked is true, asserts that the command is
|
||||
* blocked (behind the initial split critical section). Otherwise, asserts that the command
|
||||
* succeeds.
|
||||
*/
|
||||
function runCommandDuringShardCollection(st, ns, shardKey, zones, failpointName, cmd, isBlocked) {
|
||||
const dbName = ns.split(".")[0];
|
||||
|
||||
// Predefine zones for the collection.
|
||||
for (const zone of zones) {
|
||||
assert.commandWorked(st.s.adminCommand(
|
||||
{updateZoneKeyRange: ns, min: zone.min, max: zone.max, zone: zone.name}));
|
||||
}
|
||||
|
||||
// Turn on the fail point on the primary shard and wait for shardCollection to hit the
|
||||
// fail point.
|
||||
let failPoint = configureFailPoint(st.shard0, failpointName);
|
||||
let shardCollThread = new Thread(runShardCollection, st.s.host, ns, shardKey);
|
||||
shardCollThread.start();
|
||||
failPoint.wait();
|
||||
|
||||
if (isBlocked) {
|
||||
// Assert that the command eventually times out.
|
||||
assert.commandFailedWithCode(
|
||||
st.s.getDB(dbName).runCommand(Object.assign(cmd, {maxTimeMS: 500})),
|
||||
ErrorCodes.MaxTimeMSExpired);
|
||||
} else {
|
||||
assert.commandWorked(st.s.getDB(dbName).runCommand(cmd));
|
||||
}
|
||||
|
||||
// Turn off the fail point and wait for shardCollection to complete.
|
||||
failPoint.off();
|
||||
shardCollThread.join();
|
||||
assert.commandWorked(shardCollThread.returnData());
|
||||
}
|
||||
|
||||
const numShards = 4;
|
||||
const st = new ShardingTest({shards: numShards});
|
||||
|
||||
const featureFlagParam = assert.commandWorked(
|
||||
st.configRS.getPrimary().adminCommand({getParameter: 1, featureFlagShardingFullDDLSupport: 1}));
|
||||
|
||||
if (featureFlagParam.featureFlagShardingFullDDLSupport.value) {
|
||||
jsTest.log(
|
||||
'Skipping test because featureFlagShardingFullDDLSupport feature flag is enabled and this test expects the exacty legacy steps, so, it is incompatible with the new path.');
|
||||
st.stop();
|
||||
return;
|
||||
}
|
||||
|
||||
const allShards = [];
|
||||
for (let i = 0; i < numShards; i++) {
|
||||
allShards.push(st["shard" + i]);
|
||||
}
|
||||
|
||||
const dbName = "test";
|
||||
const testDB = st.s.getDB(dbName);
|
||||
const shardKey = {
|
||||
_id: 1
|
||||
};
|
||||
const index = {
|
||||
key: {x: 1},
|
||||
name: "x_1"
|
||||
};
|
||||
|
||||
const shardToZone = {
|
||||
[st.shard1.shardName]: {name: "zone1", min: {_id: MinKey}, max: {_id: 0}},
|
||||
[st.shard2.shardName]: {name: "zone2", min: {_id: 0}, max: {_id: MaxKey}}
|
||||
};
|
||||
const zones = Object.values(shardToZone);
|
||||
|
||||
const failPoints = [
|
||||
{
|
||||
name: "pauseShardCollectionBeforeCriticalSection",
|
||||
expectedAffectedShards: new Set([st.shard0, st.shard1, st.shard2]),
|
||||
criticalSectionInProgress: false
|
||||
},
|
||||
{
|
||||
name: "pauseShardCollectionReadOnlyCriticalSection",
|
||||
expectedAffectedShards: new Set([st.shard0, st.shard1, st.shard2]),
|
||||
criticalSectionInProgress: true
|
||||
},
|
||||
{
|
||||
name: "pauseShardCollectionCommitPhase",
|
||||
expectedAffectedShards: new Set([st.shard0, st.shard1, st.shard2]),
|
||||
criticalSectionInProgress: true
|
||||
},
|
||||
{
|
||||
name: "pauseShardCollectionAfterCriticalSection",
|
||||
expectedAffectedShards: new Set([st.shard1, st.shard2]),
|
||||
criticalSectionInProgress: false
|
||||
}
|
||||
];
|
||||
|
||||
assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
|
||||
st.ensurePrimaryShard(dbName, st.shard0.shardName);
|
||||
for (const [shardName, zone] of Object.entries(shardToZone)) {
|
||||
assert.commandWorked(st.s.adminCommand({addShardToZone: shardName, zone: zone.name}));
|
||||
}
|
||||
|
||||
failPoints.forEach(failPoint => {
|
||||
jsTest.log(`Testing createIndexes in step ${failPoint.name}...`);
|
||||
const collName = "testCreateIndexes" + failPoint.name;
|
||||
const ns = dbName + "." + collName;
|
||||
const cmd = {createIndexes: collName, indexes: [index]};
|
||||
const isBlocked = failPoint.criticalSectionInProgress;
|
||||
|
||||
assert.commandWorked(st.s.getDB(dbName).createCollection(collName));
|
||||
runCommandDuringShardCollection(st, ns, shardKey, zones, failPoint.name, cmd, isBlocked);
|
||||
|
||||
if (isBlocked) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Assert that the index only exists on the targeted shards.
|
||||
allShards.forEach(shard => {
|
||||
if (failPoint.expectedAffectedShards.has(shard)) {
|
||||
ShardedIndexUtil.assertIndexExistsOnShard(shard, dbName, collName, index.key);
|
||||
} else {
|
||||
ShardedIndexUtil.assertIndexDoesNotExistOnShard(shard, dbName, collName, index.key);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
failPoints.forEach(failPoint => {
|
||||
jsTest.log(`Testing dropIndexes in step ${failPoint.name}...`);
|
||||
const collName = "testDropIndexes" + failPoint.name;
|
||||
const ns = dbName + "." + collName;
|
||||
const cmd = {dropIndexes: collName, index: index.name};
|
||||
const isBlocked = failPoint.criticalSectionInProgress;
|
||||
|
||||
assert.commandWorked(st.s.getDB(dbName).createCollection(collName));
|
||||
assert.commandWorked(
|
||||
st.s.getDB(dbName).runCommand({createIndexes: collName, indexes: [index]}));
|
||||
runCommandDuringShardCollection(st, ns, shardKey, zones, failPoint.name, cmd, isBlocked);
|
||||
|
||||
if (isBlocked) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Assert that the index does not exist on any shards.
|
||||
allShards.forEach(shard => {
|
||||
const usedToOwnChunks = (shard.shardName === st.shard0.shardName);
|
||||
if (failPoint.expectedAffectedShards.has(shard) || !usedToOwnChunks) {
|
||||
ShardedIndexUtil.assertIndexDoesNotExistOnShard(shard, dbName, collName, index.key);
|
||||
} else {
|
||||
ShardedIndexUtil.assertIndexExistsOnShard(st.shard0, dbName, collName, index.key);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
failPoints.forEach(failPoint => {
|
||||
jsTest.log(`Testing collMod in step ${failPoint.name}...`);
|
||||
const collName = "testCollMod" + failPoint.name;
|
||||
const ns = dbName + "." + collName;
|
||||
const cmd = {collMod: collName, validator: {x: {$type: "string"}}};
|
||||
const isBlocked = failPoint.criticalSectionInProgress;
|
||||
|
||||
assert.commandWorked(st.s.getDB(dbName).createCollection(collName));
|
||||
runCommandDuringShardCollection(st, ns, shardKey, zones, failPoint.name, cmd, isBlocked);
|
||||
|
||||
if (isBlocked) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Assert that only the targeted shards do document validation.
|
||||
allShards.forEach(shard => {
|
||||
if (failPoint.expectedAffectedShards.has(shard)) {
|
||||
assert.commandFailedWithCode(shard.getCollection(ns).insert({x: 1}),
|
||||
ErrorCodes.DocumentValidationFailure);
|
||||
} else {
|
||||
assert.commandWorked(shard.getCollection(ns).insert({x: 1}));
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
st.stop();
|
||||
})();
|
@ -33,19 +33,6 @@ jsTest.log('Testing renaming sharded collections');
|
||||
assert.commandWorked(
|
||||
s.s0.adminCommand({shardCollection: 'test.shardedColl', key: {_id: 'hashed'}}));
|
||||
|
||||
const DDLFeatureFlagParam = assert.commandWorked(
|
||||
s.configRS.getPrimary().adminCommand({getParameter: 1, featureFlagShardingFullDDLSupport: 1}));
|
||||
const isDDLFeatureFlagEnabled = DDLFeatureFlagParam.featureFlagShardingFullDDLSupport.value;
|
||||
// Ensure renaming to or from a sharded collection fails in the legacy path.
|
||||
if (!isDDLFeatureFlagEnabled) {
|
||||
// Renaming from a sharded collection
|
||||
assert.commandFailed(db.shardedColl.renameCollection('somethingElse'));
|
||||
|
||||
// Renaming to a sharded collection with dropTarget=true
|
||||
const dropTarget = true;
|
||||
assert.commandFailed(db.bar.renameCollection('shardedColl', dropTarget));
|
||||
}
|
||||
|
||||
// Renaming to a sharded collection without dropTarget=true
|
||||
assert.commandFailed(db.bar.renameCollection('shardedColl'));
|
||||
|
||||
|
@ -75,174 +75,168 @@ function testRename(st, dbName, toNs, dropTarget, mustFail) {
|
||||
// - Locally unknown target collection to drop
|
||||
const st = new ShardingTest({shards: 3, mongos: 1, other: {enableBalancer: false}});
|
||||
|
||||
// Test just if DDL feature flag enabled
|
||||
const DDLFeatureFlagParam = assert.commandWorked(
|
||||
st.configRS.getPrimary().adminCommand({getParameter: 1, featureFlagShardingFullDDLSupport: 1}));
|
||||
const isDDLFeatureFlagEnabled = DDLFeatureFlagParam.featureFlagShardingFullDDLSupport.value;
|
||||
if (isDDLFeatureFlagEnabled) {
|
||||
const mongos = st.s0;
|
||||
const mongos = st.s0;
|
||||
|
||||
// Rename to non-existing target collection must succeed
|
||||
{
|
||||
const dbName = 'testRenameToNewCollection';
|
||||
const toNs = dbName + '.to';
|
||||
testRename(st, dbName, toNs, false /* dropTarget */, false /* mustFail */);
|
||||
// Rename to non-existing target collection must succeed
|
||||
{
|
||||
const dbName = 'testRenameToNewCollection';
|
||||
const toNs = dbName + '.to';
|
||||
testRename(st, dbName, toNs, false /* dropTarget */, false /* mustFail */);
|
||||
}
|
||||
|
||||
// Rename to existing sharded target collection with dropTarget=true must succeed
|
||||
{
|
||||
const dbName = 'testRenameToExistingShardedCollection';
|
||||
const toNs = dbName + '.to';
|
||||
assert.commandWorked(
|
||||
mongos.adminCommand({enablesharding: dbName, primaryShard: st.shard0.shardName}));
|
||||
assert.commandWorked(mongos.adminCommand({shardCollection: toNs, key: {a: 1}}));
|
||||
|
||||
const toColl = mongos.getCollection(toNs);
|
||||
toColl.insert({a: 0});
|
||||
toColl.insert({a: 2});
|
||||
assert.commandWorked(mongos.adminCommand({split: toNs, middle: {a: 1}}));
|
||||
|
||||
const toUUID = getUUIDFromConfigCollections(mongos, toNs);
|
||||
const aChunk = mongos.getDB('config').chunks.findOne({uuid: toUUID});
|
||||
assert.commandWorked(mongos.adminCommand({
|
||||
moveChunk: toNs,
|
||||
bounds: [aChunk.min, aChunk.max],
|
||||
to: st.shard1.shardName,
|
||||
}));
|
||||
|
||||
testRename(st, dbName, toNs, true /* dropTarget */, false /* mustFail */);
|
||||
}
|
||||
|
||||
// Rename to existing unsharded target collection with dropTarget=true must succeed
|
||||
{
|
||||
const dbName = 'testRenameToExistingUnshardedCollection';
|
||||
const toNs = dbName + '.to';
|
||||
assert.commandWorked(
|
||||
mongos.adminCommand({enablesharding: dbName, primaryShard: st.shard0.shardName}));
|
||||
const toColl = mongos.getCollection(toNs);
|
||||
toColl.insert({a: 0});
|
||||
|
||||
testRename(st, dbName, toNs, true /* dropTarget */, false /* mustFail */);
|
||||
}
|
||||
|
||||
// Rename to existing unsharded target collection with dropTarget=false must fail
|
||||
{
|
||||
const dbName = 'testRenameToUnshardedCollectionWithoutDropTarget';
|
||||
const toNs = dbName + '.to';
|
||||
assert.commandWorked(
|
||||
mongos.adminCommand({enablesharding: dbName, primaryShard: st.shard0.shardName}));
|
||||
const toColl = mongos.getCollection(toNs);
|
||||
toColl.insert({a: 0});
|
||||
|
||||
testRename(st, dbName, toNs, false /* dropTarget */, true /* mustFail */);
|
||||
}
|
||||
|
||||
// Rename to existing sharded target collection with dropTarget=false must fail
|
||||
{
|
||||
const dbName = 'testRenameToShardedCollectionWithoutDropTarget';
|
||||
const toNs = dbName + '.to';
|
||||
assert.commandWorked(
|
||||
mongos.adminCommand({enablesharding: dbName, primaryShard: st.shard0.shardName}));
|
||||
assert.commandWorked(mongos.adminCommand({shardCollection: toNs, key: {a: 1}}));
|
||||
const toColl = mongos.getCollection(toNs);
|
||||
toColl.insert({a: 0});
|
||||
|
||||
testRename(st, dbName, toNs, false /* dropTarget */, true /* mustFail */);
|
||||
}
|
||||
|
||||
// Rename unsharded collection to sharded target collection with dropTarget=true must succeed
|
||||
{
|
||||
const dbName = 'testRenameUnshardedToShardedTargetCollection';
|
||||
const fromNs = dbName + '.from';
|
||||
const toNs = dbName + '.to';
|
||||
assert.commandWorked(
|
||||
mongos.adminCommand({enablesharding: dbName, primaryShard: st.shard0.shardName}));
|
||||
assert.commandWorked(mongos.adminCommand({shardCollection: toNs, key: {a: 1}}));
|
||||
|
||||
const toColl = mongos.getCollection(toNs);
|
||||
toColl.insert({a: 0});
|
||||
toColl.insert({a: 2});
|
||||
assert.commandWorked(mongos.adminCommand({split: toNs, middle: {a: 1}}));
|
||||
const toUUID = getUUIDFromConfigCollections(mongos, toNs);
|
||||
const aChunk = mongos.getDB('config').chunks.findOne({uuid: toUUID});
|
||||
assert.commandWorked(mongos.adminCommand(
|
||||
{moveChunk: toNs, bounds: [aChunk.min, aChunk.max], to: st.shard1.shardName}));
|
||||
|
||||
const fromColl = mongos.getCollection(fromNs);
|
||||
fromColl.insert({x: 0});
|
||||
|
||||
assert.commandWorked(fromColl.renameCollection(toNs.split('.')[1], true /* dropTarget */));
|
||||
|
||||
// Source collection just has documents with field `x`
|
||||
assert.eq(toColl.find({x: {$exists: true}}).itcount(), 1, 'Expected one source document');
|
||||
// Source collection just has documents with field `a`
|
||||
assert.eq(toColl.find({a: {$exists: true}}).itcount(), 0, 'Expected no target documents');
|
||||
}
|
||||
|
||||
// Successful rename must pass tags from source to the target collection
|
||||
{
|
||||
const dbName = 'testRenameFromTaggedCollection';
|
||||
const db = st.getDB(dbName);
|
||||
const fromNs = dbName + '.from';
|
||||
const toNs = dbName + '.to';
|
||||
|
||||
assert.commandWorked(
|
||||
mongos.adminCommand({enablesharding: dbName, primaryShard: st.shard0.shardName}));
|
||||
assert.commandWorked(mongos.adminCommand({addShardToZone: st.shard0.shardName, zone: 'x'}));
|
||||
assert.commandWorked(mongos.adminCommand({addShardToZone: st.shard1.shardName, zone: 'y'}));
|
||||
assert.commandWorked(
|
||||
mongos.adminCommand({updateZoneKeyRange: fromNs, min: {x: 0}, max: {x: 2}, zone: 'x'}));
|
||||
assert.commandWorked(
|
||||
mongos.adminCommand({updateZoneKeyRange: fromNs, min: {x: 2}, max: {x: 4}, zone: 'y'}));
|
||||
assert.commandWorked(mongos.adminCommand({shardCollection: fromNs, key: {x: 1}}));
|
||||
|
||||
var fromTags = mongos.getDB('config').tags.find({ns: fromNs}).toArray();
|
||||
|
||||
const fromColl = mongos.getCollection(fromNs);
|
||||
fromColl.insert({x: 1});
|
||||
|
||||
assert.commandWorked(fromColl.renameCollection(toNs.split('.')[1], false /* dropTarget */));
|
||||
|
||||
const toTags = mongos.getDB('config').tags.find({ns: toNs}).toArray();
|
||||
assert.eq(toTags.length, 2, "Expected 2 tags associated to the target collection");
|
||||
|
||||
function deleteDifferentTagFields(tag, index, array) {
|
||||
delete tag['_id'];
|
||||
delete tag['ns'];
|
||||
}
|
||||
fromTags.forEach(deleteDifferentTagFields);
|
||||
toTags.forEach(deleteDifferentTagFields);
|
||||
|
||||
// Compare field by field because keys can potentially be in different order
|
||||
for (field in Object.keys(fromTags[0])) {
|
||||
assert.eq(fromTags[0][field],
|
||||
toTags[0][field],
|
||||
"Expected source tags to be passed to target collection");
|
||||
assert.eq(fromTags[1][field],
|
||||
toTags[1][field],
|
||||
"Expected source tags to be passed to target collection");
|
||||
}
|
||||
|
||||
// Rename to existing sharded target collection with dropTarget=true must succeed
|
||||
{
|
||||
const dbName = 'testRenameToExistingShardedCollection';
|
||||
const toNs = dbName + '.to';
|
||||
assert.commandWorked(
|
||||
mongos.adminCommand({enablesharding: dbName, primaryShard: st.shard0.shardName}));
|
||||
assert.commandWorked(mongos.adminCommand({shardCollection: toNs, key: {a: 1}}));
|
||||
fromTags = mongos.getDB('config').tags.find({ns: fromNs}).toArray();
|
||||
assert.eq(fromTags.length, 0, "Expected no tags associated to the source collection");
|
||||
}
|
||||
|
||||
const toColl = mongos.getCollection(toNs);
|
||||
toColl.insert({a: 0});
|
||||
toColl.insert({a: 2});
|
||||
assert.commandWorked(mongos.adminCommand({split: toNs, middle: {a: 1}}));
|
||||
// Rename to target collection with tags must fail
|
||||
{
|
||||
const dbName = 'testRenameToTaggedCollection';
|
||||
const fromNs = dbName + '.from';
|
||||
const toNs = dbName + '.to';
|
||||
assert.commandWorked(mongos.adminCommand({addShardToZone: st.shard0.shardName, zone: 'x'}));
|
||||
assert.commandWorked(
|
||||
mongos.adminCommand({updateZoneKeyRange: toNs, min: {x: 0}, max: {x: 10}, zone: 'x'}));
|
||||
|
||||
const toUUID = getUUIDFromConfigCollections(mongos, toNs);
|
||||
const aChunk = mongos.getDB('config').chunks.findOne({uuid: toUUID});
|
||||
assert.commandWorked(mongos.adminCommand({
|
||||
moveChunk: toNs,
|
||||
bounds: [aChunk.min, aChunk.max],
|
||||
to: st.shard1.shardName,
|
||||
}));
|
||||
assert.commandWorked(mongos.adminCommand({enablesharding: dbName}));
|
||||
assert.commandWorked(mongos.adminCommand({shardCollection: fromNs, key: {x: 1}}));
|
||||
|
||||
testRename(st, dbName, toNs, true /* dropTarget */, false /* mustFail */);
|
||||
}
|
||||
|
||||
// Rename to existing unsharded target collection with dropTarget=true must succeed
|
||||
{
|
||||
const dbName = 'testRenameToExistingUnshardedCollection';
|
||||
const toNs = dbName + '.to';
|
||||
assert.commandWorked(
|
||||
mongos.adminCommand({enablesharding: dbName, primaryShard: st.shard0.shardName}));
|
||||
const toColl = mongos.getCollection(toNs);
|
||||
toColl.insert({a: 0});
|
||||
|
||||
testRename(st, dbName, toNs, true /* dropTarget */, false /* mustFail */);
|
||||
}
|
||||
|
||||
// Rename to existing unsharded target collection with dropTarget=false must fail
|
||||
{
|
||||
const dbName = 'testRenameToUnshardedCollectionWithoutDropTarget';
|
||||
const toNs = dbName + '.to';
|
||||
assert.commandWorked(
|
||||
mongos.adminCommand({enablesharding: dbName, primaryShard: st.shard0.shardName}));
|
||||
const toColl = mongos.getCollection(toNs);
|
||||
toColl.insert({a: 0});
|
||||
|
||||
testRename(st, dbName, toNs, false /* dropTarget */, true /* mustFail */);
|
||||
}
|
||||
|
||||
// Rename to existing sharded target collection with dropTarget=false must fail
|
||||
{
|
||||
const dbName = 'testRenameToShardedCollectionWithoutDropTarget';
|
||||
const toNs = dbName + '.to';
|
||||
assert.commandWorked(
|
||||
mongos.adminCommand({enablesharding: dbName, primaryShard: st.shard0.shardName}));
|
||||
assert.commandWorked(mongos.adminCommand({shardCollection: toNs, key: {a: 1}}));
|
||||
const toColl = mongos.getCollection(toNs);
|
||||
toColl.insert({a: 0});
|
||||
|
||||
testRename(st, dbName, toNs, false /* dropTarget */, true /* mustFail */);
|
||||
}
|
||||
|
||||
// Rename unsharded collection to sharded target collection with dropTarget=true must succeed
|
||||
{
|
||||
const dbName = 'testRenameUnshardedToShardedTargetCollection';
|
||||
const fromNs = dbName + '.from';
|
||||
const toNs = dbName + '.to';
|
||||
assert.commandWorked(
|
||||
mongos.adminCommand({enablesharding: dbName, primaryShard: st.shard0.shardName}));
|
||||
assert.commandWorked(mongos.adminCommand({shardCollection: toNs, key: {a: 1}}));
|
||||
|
||||
const toColl = mongos.getCollection(toNs);
|
||||
toColl.insert({a: 0});
|
||||
toColl.insert({a: 2});
|
||||
assert.commandWorked(mongos.adminCommand({split: toNs, middle: {a: 1}}));
|
||||
const toUUID = getUUIDFromConfigCollections(mongos, toNs);
|
||||
const aChunk = mongos.getDB('config').chunks.findOne({uuid: toUUID});
|
||||
assert.commandWorked(mongos.adminCommand(
|
||||
{moveChunk: toNs, bounds: [aChunk.min, aChunk.max], to: st.shard1.shardName}));
|
||||
|
||||
const fromColl = mongos.getCollection(fromNs);
|
||||
fromColl.insert({x: 0});
|
||||
|
||||
assert.commandWorked(fromColl.renameCollection(toNs.split('.')[1], true /* dropTarget */));
|
||||
|
||||
// Source collection just has documents with field `x`
|
||||
assert.eq(toColl.find({x: {$exists: true}}).itcount(), 1, 'Expected one source document');
|
||||
// Source collection just has documents with field `a`
|
||||
assert.eq(toColl.find({a: {$exists: true}}).itcount(), 0, 'Expected no target documents');
|
||||
}
|
||||
|
||||
// Successful rename must pass tags from source to the target collection
|
||||
{
|
||||
const dbName = 'testRenameFromTaggedCollection';
|
||||
const db = st.getDB(dbName);
|
||||
const fromNs = dbName + '.from';
|
||||
const toNs = dbName + '.to';
|
||||
|
||||
assert.commandWorked(
|
||||
mongos.adminCommand({enablesharding: dbName, primaryShard: st.shard0.shardName}));
|
||||
assert.commandWorked(mongos.adminCommand({addShardToZone: st.shard0.shardName, zone: 'x'}));
|
||||
assert.commandWorked(mongos.adminCommand({addShardToZone: st.shard1.shardName, zone: 'y'}));
|
||||
assert.commandWorked(
|
||||
mongos.adminCommand({updateZoneKeyRange: fromNs, min: {x: 0}, max: {x: 2}, zone: 'x'}));
|
||||
assert.commandWorked(
|
||||
mongos.adminCommand({updateZoneKeyRange: fromNs, min: {x: 2}, max: {x: 4}, zone: 'y'}));
|
||||
assert.commandWorked(mongos.adminCommand({shardCollection: fromNs, key: {x: 1}}));
|
||||
|
||||
var fromTags = mongos.getDB('config').tags.find({ns: fromNs}).toArray();
|
||||
|
||||
const fromColl = mongos.getCollection(fromNs);
|
||||
fromColl.insert({x: 1});
|
||||
|
||||
assert.commandWorked(fromColl.renameCollection(toNs.split('.')[1], false /* dropTarget */));
|
||||
|
||||
const toTags = mongos.getDB('config').tags.find({ns: toNs}).toArray();
|
||||
assert.eq(toTags.length, 2, "Expected 2 tags associated to the target collection");
|
||||
|
||||
function deleteDifferentTagFields(tag, index, array) {
|
||||
delete tag['_id'];
|
||||
delete tag['ns'];
|
||||
}
|
||||
fromTags.forEach(deleteDifferentTagFields);
|
||||
toTags.forEach(deleteDifferentTagFields);
|
||||
|
||||
// Compare field by field because keys can potentially be in different order
|
||||
for (field in Object.keys(fromTags[0])) {
|
||||
assert.eq(fromTags[0][field],
|
||||
toTags[0][field],
|
||||
"Expected source tags to be passed to target collection");
|
||||
assert.eq(fromTags[1][field],
|
||||
toTags[1][field],
|
||||
"Expected source tags to be passed to target collection");
|
||||
}
|
||||
|
||||
fromTags = mongos.getDB('config').tags.find({ns: fromNs}).toArray();
|
||||
assert.eq(fromTags.length, 0, "Expected no tags associated to the source collection");
|
||||
}
|
||||
|
||||
// Rename to target collection with tags must fail
|
||||
{
|
||||
const dbName = 'testRenameToTaggedCollection';
|
||||
const fromNs = dbName + '.from';
|
||||
const toNs = dbName + '.to';
|
||||
assert.commandWorked(mongos.adminCommand({addShardToZone: st.shard0.shardName, zone: 'x'}));
|
||||
assert.commandWorked(
|
||||
mongos.adminCommand({updateZoneKeyRange: toNs, min: {x: 0}, max: {x: 10}, zone: 'x'}));
|
||||
|
||||
assert.commandWorked(mongos.adminCommand({enablesharding: dbName}));
|
||||
assert.commandWorked(mongos.adminCommand({shardCollection: fromNs, key: {x: 1}}));
|
||||
|
||||
const fromColl = mongos.getCollection(fromNs);
|
||||
fromColl.insert({x: 1});
|
||||
assert.commandFailed(fromColl.renameCollection(toNs.split('.')[1], false /* dropTarget*/));
|
||||
}
|
||||
const fromColl = mongos.getCollection(fromNs);
|
||||
fromColl.insert({x: 1});
|
||||
assert.commandFailed(fromColl.renameCollection(toNs.split('.')[1], false /* dropTarget*/));
|
||||
}
|
||||
|
||||
st.stop();
|
||||
|
@ -686,7 +686,7 @@ protected:
|
||||
NamespaceString _originalNss = NamespaceString("db.foo");
|
||||
UUID _originalUUID = UUID::gen();
|
||||
OID _originalEpoch = OID::gen();
|
||||
// TODO: SERVER-53066 Initialize it with a Timestamp.
|
||||
// TODO: SERVER-58990 Initialize it with a Timestamp.
|
||||
boost::optional<Timestamp> _originalTimestamp;
|
||||
|
||||
NamespaceString _tempNss = NamespaceString("db.system.resharding." + _originalUUID.toString());
|
||||
@ -695,7 +695,7 @@ protected:
|
||||
|
||||
OID _finalEpoch = OID::gen();
|
||||
boost::optional<Timestamp>
|
||||
_finalTimestamp; // TODO: SERVER-53066 Initialize it with a Timestamp.
|
||||
_finalTimestamp; // TODO: SERVER-58990 Initialize it with a Timestamp.
|
||||
|
||||
ShardKeyPattern _oldShardKey = ShardKeyPattern(BSON("oldSK" << 1));
|
||||
ShardKeyPattern _newShardKey = ShardKeyPattern(BSON("newSK" << 1));
|
||||
|
@ -30,7 +30,6 @@
|
||||
#pragma once
|
||||
|
||||
#include "mongo/db/commands/feature_compatibility_version.h"
|
||||
#include "mongo/s/sharding_ddl_50_upgrade_downgrade_gen.h"
|
||||
|
||||
namespace mongo {
|
||||
|
||||
|
@ -29,74 +29,19 @@
|
||||
|
||||
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand
|
||||
|
||||
#include "mongo/platform/basic.h"
|
||||
|
||||
#include "mongo/db/auth/authorization_session.h"
|
||||
#include "mongo/db/catalog/rename_collection.h"
|
||||
#include "mongo/db/commands.h"
|
||||
#include "mongo/db/db_raii.h"
|
||||
#include "mongo/db/s/collection_sharding_state.h"
|
||||
#include "mongo/db/s/dist_lock_manager.h"
|
||||
#include "mongo/db/s/rename_collection_coordinator.h"
|
||||
#include "mongo/db/s/sharded_rename_collection_gen.h"
|
||||
#include "mongo/db/s/sharding_ddl_50_upgrade_downgrade.h"
|
||||
#include "mongo/db/s/sharding_ddl_coordinator_service.h"
|
||||
#include "mongo/db/s/sharding_ddl_util.h"
|
||||
#include "mongo/db/s/sharding_state.h"
|
||||
#include "mongo/logv2/log.h"
|
||||
#include "mongo/s/cluster_commands_helpers.h"
|
||||
#include "mongo/s/grid.h"
|
||||
#include "mongo/s/request_types/sharded_ddl_commands_gen.h"
|
||||
|
||||
namespace mongo {
|
||||
namespace {
|
||||
|
||||
bool isCollectionSharded(OperationContext* opCtx, const NamespaceString& nss) {
|
||||
try {
|
||||
Grid::get(opCtx)->catalogClient()->getCollection(opCtx, nss);
|
||||
return true;
|
||||
} catch (ExceptionFor<ErrorCodes::NamespaceNotFound>&) {
|
||||
// The collection is unsharded or doesn't exist
|
||||
return false;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
bool renameIsAllowedOnNS(const NamespaceString& nss) {
|
||||
if (nss.isSystem()) {
|
||||
return nss.isLegalClientSystemNS(serverGlobalParams.featureCompatibility);
|
||||
}
|
||||
|
||||
return !nss.isOnInternalDb();
|
||||
}
|
||||
|
||||
RenameCollectionResponse renameCollectionLegacy(OperationContext* opCtx,
|
||||
const ShardsvrRenameCollection& request,
|
||||
const NamespaceString& fromNss) {
|
||||
const auto& toNss = request.getTo();
|
||||
|
||||
auto fromDbDistLock = uassertStatusOK(DistLockManager::get(opCtx)->lock(
|
||||
opCtx, fromNss.db(), "renameCollection", DistLockManager::kDefaultLockTimeout));
|
||||
|
||||
auto fromCollDistLock = uassertStatusOK(DistLockManager::get(opCtx)->lock(
|
||||
opCtx, fromNss.ns(), "renameCollection", DistLockManager::kDefaultLockTimeout));
|
||||
|
||||
auto toCollDistLock = uassertStatusOK(DistLockManager::get(opCtx)->lock(
|
||||
opCtx, toNss.ns(), "renameCollection", DistLockManager::kDefaultLockTimeout));
|
||||
|
||||
// Make sure that source and target collection are not sharded
|
||||
uassert(ErrorCodes::IllegalOperation,
|
||||
str::stream() << "source namespace '" << fromNss << "' must not be sharded",
|
||||
!isCollectionSharded(opCtx, fromNss));
|
||||
uassert(ErrorCodes::IllegalOperation,
|
||||
str::stream() << "cannot rename to sharded collection '" << toNss << "'",
|
||||
!isCollectionSharded(opCtx, toNss));
|
||||
|
||||
RenameCollectionOptions options{request.getDropTarget(), request.getStayTemp()};
|
||||
validateAndRunRenameCollection(opCtx, fromNss, toNss, options);
|
||||
|
||||
return RenameCollectionResponse(ChunkVersion::UNSHARDED());
|
||||
}
|
||||
|
||||
class ShardsvrRenameCollectionCommand final : public TypedCommand<ShardsvrRenameCollectionCommand> {
|
||||
public:
|
||||
using Request = ShardsvrRenameCollection;
|
||||
@ -132,19 +77,6 @@ public:
|
||||
|
||||
opCtx->setAlwaysInterruptAtStepDownOrUp();
|
||||
|
||||
FixedFCVRegion fixedFCVRegion(opCtx);
|
||||
|
||||
const bool useNewPath =
|
||||
feature_flags::gShardingFullDDLSupport.isEnabled(*fixedFCVRegion);
|
||||
|
||||
if (fromNss.db() != toNss.db()) {
|
||||
sharding_ddl_util::checkDbPrimariesOnTheSameShard(opCtx, fromNss, toNss);
|
||||
}
|
||||
|
||||
if (!useNewPath) {
|
||||
return renameCollectionLegacy(opCtx, req, fromNss);
|
||||
}
|
||||
|
||||
uassert(ErrorCodes::InvalidOptions,
|
||||
str::stream() << Request::kCommandName
|
||||
<< " must be called with majority writeConcern, got "
|
||||
|
@ -188,7 +188,6 @@ env.Library(
|
||||
'shard_cannot_refresh_due_to_locks_held_exception.cpp',
|
||||
'shard_id.cpp',
|
||||
'shard_invalidated_for_targeting_exception.cpp',
|
||||
'sharding_ddl_50_upgrade_downgrade.idl',
|
||||
'stale_exception.cpp',
|
||||
'type_collection_timeseries_fields.idl',
|
||||
'would_change_owning_shard_exception.cpp',
|
||||
|
@ -1,38 +0,0 @@
|
||||
|
||||
# Copyright (C) 2020-present MongoDB, Inc.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the Server Side Public License, version 1,
|
||||
# as published by MongoDB, Inc.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# Server Side Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the Server Side Public License
|
||||
# along with this program. If not, see
|
||||
# <http://www.mongodb.com/licensing/server-side-public-license>.
|
||||
#
|
||||
# As a special exception, the copyright holders give permission to link the
|
||||
# code of portions of this program with the OpenSSL library under certain
|
||||
# conditions as described in each individual source file and distribute
|
||||
# linked combinations including the program with the OpenSSL library. You
|
||||
# must comply with the Server Side Public License in all respects for
|
||||
# all of the code used other than as permitted herein. If you modify file(s)
|
||||
# with this exception, you may extend this exception to your version of the
|
||||
# file(s), but you are not obligated to do so. If you do not wish to do so,
|
||||
# delete this exception statement from your version. If you delete this
|
||||
# exception statement from all source files in the program, then also delete
|
||||
# it in the license file.
|
||||
#
|
||||
|
||||
global:
|
||||
cpp_namespace: "mongo::feature_flags"
|
||||
|
||||
feature_flags:
|
||||
featureFlagShardingFullDDLSupport:
|
||||
description: "Ensures extra guarantees on DDL operations under a sharded cluster."
|
||||
cpp_varname: gShardingFullDDLSupport
|
||||
default: true
|
||||
version: 5.0
|
Loading…
Reference in New Issue
Block a user