mirror of
https://github.com/mongodb/mongo.git
synced 2024-12-01 09:32:32 +01:00
SERVER-58942 Remove sharding test code referring v4.4 starting from v5.1
This commit is contained in:
parent
90052f7d16
commit
4900921ca8
@ -127,10 +127,6 @@ function waitForFailpoint(hitFailpointStr, numTimes, timeout) {
|
||||
* TODO (SERVER-48114): Remove this function.
|
||||
*/
|
||||
function enableCoordinateCommitReturnImmediatelyAfterPersistingDecision(st) {
|
||||
if (jsTest.options().shardMixedBinVersions ||
|
||||
jsTest.options().useRandomBinVersionsWithinReplicaSet)
|
||||
return;
|
||||
|
||||
st._rs.forEach(rs => {
|
||||
rs.nodes.forEach(node => {
|
||||
assert.commandWorked(node.getDB('admin').runCommand({
|
||||
|
@ -37,17 +37,12 @@ runMoveChunkMakeDonorStepDownAfterFailpoint(st,
|
||||
false /* shouldMakeMigrationFailToCommitOnConfig */,
|
||||
ErrorCodes.OperationFailed);
|
||||
|
||||
// After SERVER-47982 newer versions will fail with StaleEpoch instead of OperationFailed, which
|
||||
// might cause this test to fail on multiversion suite.
|
||||
//
|
||||
// TODO (SERVER-47265): moveChunk should only fail with StaleEpoch once SERVER-32198 is backported
|
||||
// to 4.4.
|
||||
runMoveChunkMakeDonorStepDownAfterFailpoint(
|
||||
st,
|
||||
dbName,
|
||||
"hangInEnsureChunkVersionIsGreaterThanThenSimulateErrorUninterruptible",
|
||||
true /* shouldMakeMigrationFailToCommitOnConfig */,
|
||||
[ErrorCodes.OperationFailed, ErrorCodes.StaleEpoch]);
|
||||
ErrorCodes.StaleEpoch);
|
||||
|
||||
runMoveChunkMakeDonorStepDownAfterFailpoint(
|
||||
st,
|
||||
|
@ -93,24 +93,15 @@ function testKillOpAfterFailPoint(failPointName, opToKillThreadName) {
|
||||
assert.eq(st.s0.getDB(dbName).getCollection(collName).countDocuments({}), 1000);
|
||||
}
|
||||
|
||||
// After SERVER-47982 all the failpoints are hit on the migration recovery, which is performed on
|
||||
// another thread which operation context is RecoverRefreshThread. To run this test on a
|
||||
// multiversion suite we have to also search for the previous name.
|
||||
//
|
||||
// TODO (SERVER-47265): operation context name should be RecoverRefreshThread once SERVER-32198 is
|
||||
// backported to 4.4
|
||||
testKillOpAfterFailPoint("hangInEnsureChunkVersionIsGreaterThanInterruptible",
|
||||
"(ensureChunkVersionIsGreaterThan)|(RecoverRefreshThread)");
|
||||
"RecoverRefreshThread");
|
||||
testKillOpAfterFailPoint("hangInRefreshFilteringMetadataUntilSuccessInterruptible",
|
||||
"(refreshFilteringMetadataUntilSuccess)|(RecoverRefreshThread)");
|
||||
testKillOpAfterFailPoint("hangInPersistMigrateCommitDecisionInterruptible",
|
||||
"(persist migrate commit decision)|(RecoverRefreshThread)");
|
||||
"RecoverRefreshThread");
|
||||
testKillOpAfterFailPoint("hangInPersistMigrateCommitDecisionInterruptible", "RecoverRefreshThread");
|
||||
testKillOpAfterFailPoint("hangInDeleteRangeDeletionOnRecipientInterruptible",
|
||||
"(cancel range deletion on recipient)|(RecoverRefreshThread)");
|
||||
testKillOpAfterFailPoint("hangInReadyRangeDeletionLocallyInterruptible",
|
||||
"(ready local range deletion)|(RecoverRefreshThread)");
|
||||
testKillOpAfterFailPoint("hangInAdvanceTxnNumInterruptible",
|
||||
"(advance migration txn number)|(RecoverRefreshThread)");
|
||||
"RecoverRefreshThread");
|
||||
testKillOpAfterFailPoint("hangInReadyRangeDeletionLocallyInterruptible", "RecoverRefreshThread");
|
||||
testKillOpAfterFailPoint("hangInAdvanceTxnNumInterruptible", "RecoverRefreshThread");
|
||||
|
||||
st.stop();
|
||||
})();
|
||||
|
@ -47,7 +47,6 @@ assert.eq(res.version.i, 0);
|
||||
assert.eq(undefined, res.chunks);
|
||||
|
||||
// When fullMetadata set to true, chunks should be included in the response
|
||||
// if the mongos version is v4.4.
|
||||
res = st.s.adminCommand({getShardVersion: ns, fullMetadata: true});
|
||||
assert.commandWorked(res);
|
||||
assert.eq(res.version.t, 1);
|
||||
|
@ -1,6 +1,5 @@
|
||||
/**
|
||||
* The failpoints used here are not defined in the previous release (4.4).
|
||||
* @tags: [multiversion_incompatible, does_not_support_stepdowns]
|
||||
* @tags: [does_not_support_stepdowns]
|
||||
*/
|
||||
(function() {
|
||||
'use strict';
|
||||
|
@ -3,8 +3,6 @@
|
||||
(function() {
|
||||
'use strict';
|
||||
|
||||
const clusterInFCV44 = jsTestOptions().mongosBinVersion != 'last-lts';
|
||||
|
||||
// start up a new sharded cluster
|
||||
var st = new ShardingTest({shards: 2, mongos: 1});
|
||||
|
||||
@ -47,15 +45,12 @@ for (var i = 0; i < numChunks; i++) {
|
||||
mongos.adminCommand({moveChunk: coll + "", find: {_id: i}, to: st.shard1.shardName}));
|
||||
}
|
||||
|
||||
// Shards don't persist range deletion state in FCV < 4.4.
|
||||
if (clusterInFCV44) {
|
||||
jsTest.log("Verifying that the donor still has the range deletion task docs...");
|
||||
jsTest.log("Verifying that the donor still has the range deletion task docs...");
|
||||
|
||||
// Range deletions are queued async of migrate thread.
|
||||
let rangeDelDocs =
|
||||
st.shard0.getDB("config").getCollection("rangeDeletions").find({nss: coll + ""}).toArray();
|
||||
assert.eq(numChunks, rangeDelDocs.length, `rangeDelDocs: ${tojson(rangeDelDocs.length)}`);
|
||||
}
|
||||
// Range deletions are queued async of migrate thread.
|
||||
let rangeDelDocs =
|
||||
st.shard0.getDB("config").getCollection("rangeDeletions").find({nss: coll + ""}).toArray();
|
||||
assert.eq(numChunks, rangeDelDocs.length, `rangeDelDocs: ${tojson(rangeDelDocs.length)}`);
|
||||
|
||||
jsTest.log("Dropping and re-creating collection...");
|
||||
|
||||
@ -70,15 +65,9 @@ assert.commandWorked(bulk.execute());
|
||||
jsTest.log("Allowing the range deletion tasks to be processed by closing the cursor...");
|
||||
cursor.close();
|
||||
|
||||
// Shards don't persist range deletion state in FCV < 4.4.
|
||||
if (clusterInFCV44) {
|
||||
assert.soon(() => {
|
||||
return 0 ===
|
||||
st.shard0.getDB("config").getCollection("rangeDeletions").count({nss: coll + ""});
|
||||
});
|
||||
} else {
|
||||
sleep(10 * 1000);
|
||||
}
|
||||
assert.soon(() => {
|
||||
return 0 === st.shard0.getDB("config").getCollection("rangeDeletions").count({nss: coll + ""});
|
||||
});
|
||||
|
||||
jsTest.log("Checking that the new collection's documents were not cleaned up...");
|
||||
|
||||
|
@ -341,20 +341,6 @@ const failureModes = {
|
||||
|
||||
for (const failureModeName in failureModes) {
|
||||
for (const type in transactionTypes) {
|
||||
if (failureModeName.includes("participantCannotMajorityCommitWrites") &&
|
||||
type.includes("ExpectTwoPhaseCommit") &&
|
||||
(jsTestOptions().useRandomBinVersionsWithinReplicaSet ||
|
||||
jsTestOptions().shardMixedBinVersions)) {
|
||||
// In v4.4, the coordinator will also make an abort decision after timing out waiting
|
||||
// for votes in these cases. However, coordinateCommitTransaction will not return as
|
||||
// soon as the decision is made durable on the coordinator, instead it will wait for the
|
||||
// decision to be majority-ack'd by all participants, which can't happen while one of
|
||||
// the participants can't majority commit writes.
|
||||
jsTest.log(
|
||||
`${failureModeName} with ${type} is skipped since we're running v4.4 binaries`);
|
||||
continue;
|
||||
}
|
||||
|
||||
txnNumber++;
|
||||
assert.lt(txnNumber,
|
||||
MAX_TRANSACTIONS,
|
||||
|
Loading…
Reference in New Issue
Block a user