0
0
mirror of https://github.com/mongodb/mongo.git synced 2024-12-01 09:32:32 +01:00

SERVER-43892 ShardedClusterFixture always starts shards as replicasets

This commit is contained in:
Tommaso Tocci 2021-02-15 15:24:32 +01:00 committed by Evergreen Agent
parent 9f6cffd942
commit 40f2c1e8e9
10 changed files with 23 additions and 57 deletions

View File

@ -42,7 +42,5 @@ executor:
transactionLifetimeLimitSeconds: 1
writePeriodicNoops: 1
verbose: ''
# TODO: SERVER-43927 Make jstestfuzz_sharded and jstestfuzz_sharded_session suite start
# shards as replica sets.
num_rs_nodes_per_shard: null
num_rs_nodes_per_shard: 1
num_shards: 2

View File

@ -39,7 +39,5 @@ executor:
transactionLifetimeLimitSeconds: 1
writePeriodicNoops: 1
verbose: ''
# TODO: SERVER-43927 Make jstestfuzz_sharded and jstestfuzz_sharded_session suite start
# shards as replica sets.
num_rs_nodes_per_shard: null
num_rs_nodes_per_shard: 1
num_shards: 2

View File

@ -79,8 +79,6 @@ executor:
mongod_options:
set_parameters:
enableTestCommands: 1
# TODO: SERVER-43892 Make sharding_jscore_op_query_passthrough and sharding_jscore_passthrough start
# shards as replica sets by default.
num_rs_nodes_per_shard: null
num_rs_nodes_per_shard: 1
enable_sharding:
- test

View File

@ -79,8 +79,6 @@ executor:
mongod_options:
set_parameters:
enableTestCommands: 1
# TODO: SERVER-43892 Make sharding_jscore_op_query_passthrough and sharding_jscore_passthrough start
# shards as replica sets by default.
num_rs_nodes_per_shard: null
num_rs_nodes_per_shard: 1
enable_sharding:
- test

View File

@ -79,8 +79,6 @@ executor:
set_parameters:
enableTestCommands: 1
internalQueryEnableLoggingV2OplogEntries: false
# TODO: SERVER-43892 Make sharding_jscore_op_query_passthrough and sharding_jscore_passthrough start
# shards as replica sets by default.
num_rs_nodes_per_shard: null
num_rs_nodes_per_shard: 1
enable_sharding:
- test

View File

@ -59,7 +59,14 @@ class ShardedClusterFixture(interface.Fixture): # pylint: disable=too-many-inst
self.shard_options = utils.default_if_none(shard_options, {})
self.mixed_bin_versions = utils.default_if_none(mixed_bin_versions,
config.MIXED_BIN_VERSIONS)
if self.mixed_bin_versions is not None and num_rs_nodes_per_shard is not None:
if self.num_rs_nodes_per_shard is None:
raise TypeError("num_rs_nodes_per_shard must be an integer but found None")
elif isinstance(self.num_rs_nodes_per_shard, int):
if self.num_rs_nodes_per_shard <= 0:
raise ValueError("num_rs_nodes_per_shard must be a positive integer")
if self.mixed_bin_versions is not None:
num_mongods = self.num_shards * self.num_rs_nodes_per_shard
if len(self.mixed_bin_versions) != num_mongods:
msg = (("The number of binary versions specified: {} do not match the number of"\
@ -96,14 +103,7 @@ class ShardedClusterFixture(interface.Fixture): # pylint: disable=too-many-inst
if not self.shards:
for i in range(self.num_shards):
if self.num_rs_nodes_per_shard is None:
shard = self._new_standalone_shard(i)
elif isinstance(self.num_rs_nodes_per_shard, int):
if self.num_rs_nodes_per_shard <= 0:
raise ValueError("num_rs_nodes_per_shard must be a positive integer")
shard = self._new_rs_shard(i, self.num_rs_nodes_per_shard)
else:
raise TypeError("num_rs_nodes_per_shard must be an integer or None")
shard = self._new_rs_shard(i, self.num_rs_nodes_per_shard)
self.shards.append(shard)
# Start up each of the shards
@ -169,8 +169,7 @@ class ShardedClusterFixture(interface.Fixture): # pylint: disable=too-many-inst
primary.admin.command({"refreshLogicalSessionCacheNow": 1})
for shard in self.shards:
primary = (shard.mongo_client() if self.num_rs_nodes_per_shard is None else
shard.get_primary().mongo_client())
primary = shard.get_primary().mongo_client()
primary.admin.command({"refreshLogicalSessionCacheNow": 1})
def _auth_to_db(self, client):
@ -317,25 +316,6 @@ class ShardedClusterFixture(interface.Fixture): # pylint: disable=too-many-inst
replset_config_options=replset_config_options, mixed_bin_versions=mixed_bin_versions,
shard_logging_prefix=shard_logging_prefix, **shard_options)
def _new_standalone_shard(self, index):
"""Return a standalone.MongoDFixture configured as a shard in a sharded cluster."""
mongod_logger = logging.loggers.new_fixture_node_logger(
self.__class__.__name__, self.job_num, "shard{}".format(index))
shard_options = self.shard_options.copy()
preserve_dbpath = shard_options.pop("preserve_dbpath", self.preserve_dbpath)
mongod_options = self.mongod_options.copy()
mongod_options.update(shard_options.pop("mongod_options", {}))
mongod_options["shardsvr"] = ""
mongod_options["dbpath"] = os.path.join(self._dbpath_prefix, "shard{}".format(index))
return standalone.MongoDFixture(mongod_logger, self.job_num, mongod_options=mongod_options,
mongod_executable=self.mongod_executable,
preserve_dbpath=preserve_dbpath, **shard_options)
def _new_mongos(self, index, total):
"""
Return a _MongoSFixture configured to be used as the mongos for a sharded cluster.

View File

@ -4559,7 +4559,7 @@ tasks:
## jstestfuzz concurrent sharded cluster ##
- <<: *jstestfuzz_template
name: jstestfuzz_concurrent_sharded_gen
tags: ["jstestfuzz", "common"]
tags: ["jstestfuzz", "common", "sharding"]
commands:
- func: "generate fuzzer tasks"
vars:
@ -4789,7 +4789,7 @@ tasks:
## jstestfuzz sharded cluster ##
- <<: *jstestfuzz_template
name: jstestfuzz_sharded_gen
tags: ["jstestfuzz", "common"]
tags: ["jstestfuzz", "common", "sharding"]
commands:
- func: "generate fuzzer tasks"
vars:
@ -8594,9 +8594,8 @@ buildvariants:
- name: concurrency_simultaneous
- name: disk_wiredtiger
- name: failpoints_auth
- name: .jscore .common !.decimal !.txns
- name: .jstestfuzz .common !.repl
- name: sharding_jscore_passthrough
- name: .jscore .common !.sharding !.decimal !.txns
- name: .jstestfuzz .common !.sharding !.repl
- name: ubuntu1804
display_name: Ubuntu 18.04

View File

@ -179,9 +179,9 @@ explain = t.find().maxTimeMS(200).explain();
assert.commandWorked(explain);
// .readPref()
explain = t.explain().find().readPref("secondary").finish();
explain = t.explain().find().readPref("secondaryPreferred").finish();
assert.commandWorked(explain);
explain = t.find().readPref("secondary").explain();
explain = t.find().readPref("secondaryPreferred").explain();
assert.commandWorked(explain);
// .comment()

View File

@ -1,7 +1,7 @@
// Test that attempting to read after optime fails if replication is not enabled.
// @tags: [
// multiversion_incompatible,
// assumes_standalone_mongod
// assumes_standalone_mongod,
// ]
(function() {

View File

@ -7,9 +7,6 @@
// assumes_write_concern_unchanged,
// requires_non_retryable_writes,
// requires_fastcount,
// # TODO (SERVER-43892): We can enable this test in the multiversion passthrough, which starts
// # shards as replica sets, once the test can be run against replica set shards.
// multiversion_incompatible,
// ]
//
@ -186,7 +183,7 @@ coll.unsetWriteConcern();
coll.remove({});
var wRes = assert.writeError(coll.insert({foo: "bar"}, {writeConcern: {w: "invalid"}}));
var res = assert.commandWorked(db.hello());
var replSet = res.hasOwnProperty("setName");
var replSet = res.hasOwnProperty("$clusterTime");
if (!replSet && coll.getMongo().writeMode() == "commands")
assert.eq(coll.count(), 0, "not-replset || command mode");
else // compatibility,