0
0
mirror of https://github.com/mongodb/mongo.git synced 2024-12-01 09:32:32 +01:00

SERVER-73028 Add index supporting automerger query on config.chunks

This commit is contained in:
Pierlauro Sciarelli 2023-03-07 11:10:28 +00:00 committed by Evergreen Agent
parent 2ff107f947
commit 7208ec2715
4 changed files with 41 additions and 38 deletions

View File

@ -115,29 +115,18 @@ var testListConfigChunksIndexes = function(st) {
// This test depends on all the indexes in the configChunksIndexes being the exact indexes
// in the config chunks collection.
var configDB = st.s.getDB("config");
var configChunksIndexes = (function() {
if (configDB.collections.findOne({_id: "config.system.sessions"}).timestamp) {
return ["_id_", "uuid_1_lastmod_1", "uuid_1_min_1", "uuid_1_shard_1_min_1"];
} else {
return ["_id_", "ns_1_lastmod_1", "ns_1_min_1", "ns_1_shard_1_min_1"];
}
}());
var cursor;
var cursorArray = [];
var expectedConfigChunksIndexes =
["_id_", "uuid_1_lastmod_1", "uuid_1_min_1", "uuid_1_shard_1_min_1"];
const foundIndexesArray = getListIndexesCursor(configDB.chunks).toArray();
// TODO SERVER-74573 always consider new index once 7.0 branches out
if (foundIndexesArray.length == expectedConfigChunksIndexes.length + 1) {
// CSRS nodes in v7.0 create a new index on config.chunks. Since the creation is not
// FCV-gated, this code is handling mixed binaries scenarios
expectedConfigChunksIndexes.push("uuid_1_shard_1_onCurrentShardSince_1");
}
cursor = getListIndexesCursor(configDB.chunks);
assert.eq(cursorGetIndexNames(cursor), configChunksIndexes);
cursor = getListIndexesCursor(configDB.chunks, {cursor: {batchSize: 2}}, 2);
assert.eq(cursor.objsLeftInBatch(), 2);
cursorArray.push(cursor.next());
cursorArray.push(cursor.next());
assert(cursor.hasNext());
assert.eq(cursor.objsLeftInBatch(), 2);
cursorArray.push(cursor.next());
cursorArray.push(cursor.next());
assert(!cursor.hasNext());
assert.eq(arrayGetNames(sortArrayByName(cursorArray)), configChunksIndexes);
assert.eq(foundIndexesArray.length, expectedConfigChunksIndexes.length);
assert.eq(arrayGetNames(sortArrayByName(foundIndexesArray)), expectedConfigChunksIndexes);
};
/**

View File

@ -217,7 +217,7 @@ Status createIndexesForConfigChunks(OperationContext* opCtx) {
BSON(ChunkType::collectionUUID() << 1 << ChunkType::min() << 1),
unique);
if (!result.isOK()) {
return result.withContext("couldn't create uuid_1_min_1 index on config db");
return result.withContext("couldn't create uuid_1_min_1 index on config.chunks");
}
result = createIndexOnConfigCollection(
@ -226,7 +226,7 @@ Status createIndexesForConfigChunks(OperationContext* opCtx) {
BSON(ChunkType::collectionUUID() << 1 << ChunkType::shard() << 1 << ChunkType::min() << 1),
unique);
if (!result.isOK()) {
return result.withContext("couldn't create uuid_1_shard_1_min_1 index on config db");
return result.withContext("couldn't create uuid_1_shard_1_min_1 index on config.chunks");
}
result = createIndexOnConfigCollection(
@ -235,7 +235,18 @@ Status createIndexesForConfigChunks(OperationContext* opCtx) {
BSON(ChunkType::collectionUUID() << 1 << ChunkType::lastmod() << 1),
unique);
if (!result.isOK()) {
return result.withContext("couldn't create uuid_1_lastmod_1 index on config db");
return result.withContext("couldn't create uuid_1_lastmod_1 index on config.chunks");
}
result = createIndexOnConfigCollection(opCtx,
ChunkType::ConfigNS,
BSON(ChunkType::collectionUUID()
<< 1 << ChunkType::shard() << 1
<< ChunkType::onCurrentShardSince() << 1),
false /* unique */);
if (!result.isOK()) {
return result.withContext(
"couldn't create uuid_1_shard_1_onCurrentShardSince_1 index on config.chunks");
}
return Status::OK();

View File

@ -1080,18 +1080,18 @@ ShardingCatalogManager::commitMergeAllChunksOnShard(OperationContext* opCtx,
const auto oldestTimestampSupportedForHistory =
getOldestTimestampSupportedForSnapshotHistory(opCtx);
const auto chunksBelongingToShard =
uassertStatusOK(
_localConfigShard->exhaustiveFindOnConfig(
opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kLocalReadConcern,
ChunkType::ConfigNS,
BSON(ChunkType::collectionUUID
<< collUuid << ChunkType::shard(shardId.toString()) << ChunkType::jumbo
<< BSON("$ne" << true) << ChunkType::onCurrentShardSince
<< BSON("$lt" << oldestTimestampSupportedForHistory)),
BSON(ChunkType::min << 1) /* sort */,
boost::none /* limit */))
uassertStatusOK(_localConfigShard->exhaustiveFindOnConfig(
opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kLocalReadConcern,
ChunkType::ConfigNS,
BSON(ChunkType::collectionUUID
<< collUuid << ChunkType::shard(shardId.toString())
<< ChunkType::onCurrentShardSince
<< BSON("$lt" << oldestTimestampSupportedForHistory)
<< ChunkType::jumbo << BSON("$ne" << true)),
BSON(ChunkType::min << 1) /* sort */,
boost::none /* limit */))
.docs;
// 3. Prepare the data for the merge.

View File

@ -275,7 +275,10 @@ TEST_F(ConfigInitializationTest, BuildsNecessaryIndexes) {
<< "unique" << true),
BSON("v" << 2 << "key" << BSON("uuid" << 1 << "lastmod" << 1) << "name"
<< "uuid_1_lastmod_1"
<< "unique" << true)};
<< "unique" << true),
BSON("v" << 2 << "key" << BSON("uuid" << 1 << "shard" << 1 << "onCurrentShardSince" << 1)
<< "name"
<< "uuid_1_shard_1_onCurrentShardSince_1")};
auto expectedShardsIndexes = std::vector<BSONObj>{
BSON("v" << 2 << "key" << BSON("_id" << 1) << "name"