mirror of
https://github.com/mongodb/mongo.git
synced 2024-11-30 17:10:48 +01:00
SERVER-44818 Change the balancerCollectionStatus command response format
This commit is contained in:
parent
fe1ab54ab2
commit
2b70f6f0d7
@ -44,7 +44,7 @@ assert.commandFailedWithCode(st.s0.adminCommand({balancerCollectionStatus: 'db.c
|
||||
var result = assert.commandWorked(st.s0.adminCommand({balancerCollectionStatus: 'db.col'}));
|
||||
|
||||
// new collections must be balanced
|
||||
assert.eq(result.status, 'balanced');
|
||||
assert.eq(result.balancerCompliant, true);
|
||||
|
||||
// get shardIds
|
||||
var shards = st.s0.getDB('config').shards.find().toArray();
|
||||
@ -60,7 +60,8 @@ assert.commandWorked(st.s0.adminCommand({moveChunk: 'db.col', find: {key: 20}, t
|
||||
result = assert.commandWorked(st.s0.adminCommand({balancerCollectionStatus: 'db.col'}));
|
||||
|
||||
// chunksImbalanced expected
|
||||
assert.eq(result.status, 'chunksImbalance');
|
||||
assert.eq(result.balancerCompliant, false);
|
||||
assert.eq(result.firstComplianceViolation, 'chunksImbalance');
|
||||
|
||||
// run balancer with 3 rounds
|
||||
runBalancer(3);
|
||||
@ -68,7 +69,7 @@ runBalancer(3);
|
||||
// the chunks must be balanced now
|
||||
result = assert.commandWorked(st.s0.adminCommand({balancerCollectionStatus: 'db.col'}));
|
||||
|
||||
assert.eq(result.status, 'balanced');
|
||||
assert.eq(result.balancerCompliant, true);
|
||||
|
||||
// manually move a chunk to a shard before creating zones (this will help
|
||||
// testing the zone violation)
|
||||
@ -86,7 +87,8 @@ assert.commandWorked(st.s0.adminCommand(
|
||||
result = assert.commandWorked(st.s0.adminCommand({balancerCollectionStatus: 'db.col'}));
|
||||
|
||||
// having a chunk on a different zone will cause a zone violation
|
||||
assert.eq(result.status, 'zoneViolation');
|
||||
assert.eq(result.balancerCompliant, false);
|
||||
assert.eq(result.firstComplianceViolation, 'zoneViolation');
|
||||
|
||||
// run balancer, we don't know exactly where the first run moved the chunks
|
||||
// so lets run 3 rounds just in case
|
||||
@ -95,8 +97,8 @@ runBalancer(3);
|
||||
// the chunks must be balanced now
|
||||
result = assert.commandWorked(st.s0.adminCommand({balancerCollectionStatus: 'db.col'}));
|
||||
|
||||
// final check: all chunks are balanced and in the correct zone
|
||||
assert.eq(result.status, 'balanced');
|
||||
// All chunks are balanced and in the correct zone
|
||||
assert.eq(result.balancerCompliant, true);
|
||||
|
||||
st.stop();
|
||||
})();
|
@ -83,7 +83,6 @@ const auto getBalancer = ServiceContext::declareDecoration<std::unique_ptr<Balan
|
||||
/**
|
||||
* Balancer status response
|
||||
*/
|
||||
static constexpr StringData kBalancerPolicyStatusBalanced = "balanced"_sd;
|
||||
static constexpr StringData kBalancerPolicyStatusDraining = "draining"_sd;
|
||||
static constexpr StringData kBalancerPolicyStatusZoneViolation = "zoneViolation"_sd;
|
||||
static constexpr StringData kBalancerPolicyStatusChunksImbalance = "chunksImbalance"_sd;
|
||||
@ -700,27 +699,28 @@ void Balancer::notifyPersistedBalancerSettingsChanged() {
|
||||
_condVar.notify_all();
|
||||
}
|
||||
|
||||
StringData Balancer::getBalancerStatusForNs(OperationContext* opCtx, const NamespaceString& ns) {
|
||||
Balancer::BalancerStatus Balancer::getBalancerStatusForNs(OperationContext* opCtx,
|
||||
const NamespaceString& ns) {
|
||||
auto splitChunks = uassertStatusOK(_chunkSelectionPolicy->selectChunksToSplit(opCtx, ns));
|
||||
if (!splitChunks.empty()) {
|
||||
return kBalancerPolicyStatusZoneViolation;
|
||||
return {false, kBalancerPolicyStatusZoneViolation.toString()};
|
||||
}
|
||||
auto chunksToMove = uassertStatusOK(_chunkSelectionPolicy->selectChunksToMove(opCtx, ns));
|
||||
if (chunksToMove.empty()) {
|
||||
return kBalancerPolicyStatusBalanced;
|
||||
return {true, boost::none};
|
||||
}
|
||||
const auto& migrationInfo = chunksToMove.front();
|
||||
|
||||
switch (migrationInfo.reason) {
|
||||
case MigrateInfo::drain:
|
||||
return kBalancerPolicyStatusDraining;
|
||||
return {false, kBalancerPolicyStatusDraining.toString()};
|
||||
case MigrateInfo::zoneViolation:
|
||||
return kBalancerPolicyStatusZoneViolation;
|
||||
return {false, kBalancerPolicyStatusZoneViolation.toString()};
|
||||
case MigrateInfo::chunksImbalance:
|
||||
return kBalancerPolicyStatusChunksImbalance;
|
||||
return {false, kBalancerPolicyStatusChunksImbalance.toString()};
|
||||
}
|
||||
|
||||
return kBalancerPolicyStatusBalanced;
|
||||
return {true, boost::none};
|
||||
}
|
||||
|
||||
} // namespace mongo
|
||||
|
@ -149,11 +149,15 @@ public:
|
||||
*/
|
||||
void notifyPersistedBalancerSettingsChanged();
|
||||
|
||||
struct BalancerStatus {
|
||||
bool balancerCompliant;
|
||||
boost::optional<std::string> firstComplianceViolation;
|
||||
};
|
||||
/**
|
||||
* Returns if a given collection is draining due to a removed shard, has chunks on an invalid
|
||||
* zone or the number of chunks is imbalanced across the cluster
|
||||
*/
|
||||
StringData getBalancerStatusForNs(OperationContext* opCtx, const NamespaceString& nss);
|
||||
BalancerStatus getBalancerStatusForNs(OperationContext* opCtx, const NamespaceString& nss);
|
||||
|
||||
private:
|
||||
/**
|
||||
|
@ -75,8 +75,14 @@ public:
|
||||
uassert(ErrorCodes::InvalidNamespace,
|
||||
str::stream() << "Invalid namespace specified '" << nss.ns() << "'",
|
||||
nss.isValid());
|
||||
|
||||
return Response(Balancer::get(opCtx)->getBalancerStatusForNs(opCtx, nss).toString());
|
||||
const auto& balancerStatus = Balancer::get(opCtx)->getBalancerStatusForNs(opCtx, nss);
|
||||
Response response(balancerStatus.balancerCompliant);
|
||||
response.setFirstComplianceViolation(
|
||||
balancerStatus.firstComplianceViolation.is_initialized()
|
||||
? boost::optional<StringData>(
|
||||
StringData(*balancerStatus.firstComplianceViolation))
|
||||
: boost::optional<StringData>(boost::none));
|
||||
return response;
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -39,9 +39,13 @@ structs:
|
||||
description: "Response of the config server command"
|
||||
strict: false
|
||||
fields:
|
||||
status:
|
||||
balancerCompliant:
|
||||
type: bool
|
||||
description: "true if there are no actions needed, if false, then firstComplianceViolation will contain the violation with the highest priority which will be addressed next"
|
||||
firstComplianceViolation:
|
||||
type: string
|
||||
description: "One of the following: balanced, draining, zoneViolation or chunksImbalance"
|
||||
optional: true
|
||||
description: "One of the following: draining, zoneViolation or chunksImbalance"
|
||||
|
||||
commands:
|
||||
balancerCollectionStatus:
|
||||
|
Loading…
Reference in New Issue
Block a user