0
0
mirror of https://github.com/mongodb/mongo.git synced 2024-12-01 09:32:32 +01:00

SERVER-44165 Remove the config.rangeDeletions collection on downgrade

This commit is contained in:
Alex Taskov 2019-12-03 21:15:06 +00:00 committed by evergreen
parent bc4cc10370
commit 9da2f6624c
4 changed files with 63 additions and 0 deletions

View File

@ -0,0 +1,51 @@
(function() {
"use strict";
load("jstests/libs/uuid_util.js");
const dbName = "test";
const collName = "foo";
const ns = dbName + "." + collName;
const rangeDeletionNs = "config.rangeDeletions";
// Create 2 shards with 3 replicas each.
let st = new ShardingTest({shards: {rs0: {nodes: 3}, rs1: {nodes: 3}}});
// Create a sharded collection with two chunks: [-inf, 50), [50, inf)
assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
assert.commandWorked(st.s.adminCommand({movePrimary: dbName, to: st.shard0.shardName}));
assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {x: 1}}));
assert.commandWorked(st.s.adminCommand({split: ns, middle: {x: 50}}));
// Pause range deletion.
let originalShard0Primary = st.rs0.getPrimary();
originalShard0Primary.adminCommand({configureFailPoint: 'suspendRangeDeletion', mode: 'alwaysOn'});
// Write range to deletion collection
let deletionTask = {
_id: UUID(),
nss: ns,
collectionUuid: UUID(),
donorShardId: "unused",
range: {min: {x: 50}, max: {x: MaxKey}},
whenToClean: "now"
};
let deletionsColl = st.shard0.getCollection(rangeDeletionNs);
// Write range to deletion collection
deletionsColl.insert(deletionTask);
// Verify deletion count.
assert.eq(deletionsColl.find().itcount(), 1);
print("setting fcv: " + lastStableFCV);
assert.commandWorked(
st.s.getDB("admin").runCommand({setFeatureCompatibilityVersion: lastStableFCV}));
checkFCV(st.shard0.getDB("admin"), lastStableFCV);
// Verify deletion count.
assert.eq(deletionsColl.find().itcount(), 0);
st.stop();
})();

View File

@ -45,6 +45,7 @@
#include "mongo/db/repl/repl_client_info.h"
#include "mongo/db/s/active_shard_collection_registry.h"
#include "mongo/db/s/config/sharding_catalog_manager.h"
#include "mongo/db/s/migration_util.h"
#include "mongo/db/server_options.h"
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/s/catalog/type_collection.h"
@ -234,6 +235,9 @@ public:
}
if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) {
LOG(0) << "Downgrade: dropping config.rangeDeletions collection";
migrationutil::dropRangeDeletionsCollection(opCtx);
// The primary shard sharding a collection will write the initial chunks for a
// collection directly to the config server, so wait for all shard collections to
// complete to guarantee no chunks are missed by the update on the config server.

View File

@ -198,6 +198,12 @@ void resubmitRangeDeletionsOnStepUp(ServiceContext* serviceContext) {
});
}
void dropRangeDeletionsCollection(OperationContext* opCtx) {
DBDirectClient client(opCtx);
client.dropCollection(NamespaceString::kRangeDeletionNamespace.toString(),
WriteConcerns::kMajorityWriteConcern);
}
void persistMigrationCoordinatorLocally(OperationContext* opCtx,
const MigrationCoordinatorDocument& migrationDoc) {
PersistentTaskStore<MigrationCoordinatorDocument> store(

View File

@ -84,6 +84,8 @@ void submitPendingDeletions(OperationContext* opCtx);
// Asynchronously calls submitPendingDeletions using the fixed executor pool.
void resubmitRangeDeletionsOnStepUp(ServiceContext* serviceContext);
void dropRangeDeletionsCollection(OperationContext* opCtx);
/**
* Writes the migration coordinator document to config.migrationCoordinators and waits for majority
* write concern.