0
0
mirror of https://github.com/mongodb/mongo.git synced 2024-12-01 09:32:32 +01:00

SERVER-38165 Enable transactions testing with the inMemory storage engine

This commit is contained in:
Dianna 2019-04-17 11:44:14 -04:00
parent b10765d357
commit 96bf04156c
9 changed files with 52 additions and 23 deletions

View File

@ -34,11 +34,6 @@ class CheckReplDBHashInBackground(jsfile.JSHook):
"Not enabling the background thread because '%s' storage engine"
" doesn't support snapshot reads.", server_status["storageEngine"]["name"])
return
if not server_status["storageEngine"].get("persistent", False):
self.logger.info(
"Not enabling the background thread because '%s' storage engine"
" is not persistent.", server_status["storageEngine"]["name"])
return
self._background_job = _BackgroundJob()
self.logger.info("Starting the background thread.")

View File

@ -9814,7 +9814,7 @@ buildvariants:
# spawning a large number of linker processes.
num_scons_link_jobs_available: $(( $(grep -c ^processor /proc/cpuinfo) / 4 ))
python: '/cygdrive/c/python/python36/python.exe'
test_flags: --storageEngine=inMemory --excludeWithAnyTags=requires_persistence,requires_journaling,uses_transactions
test_flags: --storageEngine=inMemory --excludeWithAnyTags=requires_persistence,requires_journaling
ext: zip
use_scons_cache: true
multiversion_platform: windows
@ -9859,6 +9859,7 @@ buildvariants:
- name: read_concern_linearizable_passthrough
- name: replica_sets_auth_gen
- name: replica_sets_jscore_passthrough
- name: replica_sets_multi_stmt_txn_jscore_passthrough
- name: sasl
- name: sharded_causally_consistent_jscore_txns_passthrough_gen
- name: sharded_collections_causally_consistent_jscore_txns_passthrough
@ -13016,7 +13017,7 @@ buildvariants:
- rhel62-small
batchtime: 1440 # 1 day
expansions: &enterprise-rhel-62-64-bit-inmem-expansions
test_flags: --storageEngine=inMemory --excludeWithAnyTags=requires_persistence,requires_journaling,uses_transactions
test_flags: --storageEngine=inMemory --excludeWithAnyTags=requires_persistence,requires_journaling
compile_flags: --ssl MONGO_DISTMOD=rhel62 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_v3_gcc.vars
multiversion_platform: rhel62
multiversion_edition: enterprise
@ -13063,6 +13064,7 @@ buildvariants:
- name: concurrency_replication_causal_consistency
distros:
- rhel62-large # Some workloads require a lot of memory, use a bigger machine for this suite.
- name: concurrency_replication_multi_stmt_txn
- name: concurrency_sharded_replication
distros:
- rhel62-large # Some workloads require a lot of memory, use a bigger machine for this suite.
@ -13117,6 +13119,7 @@ buildvariants:
- name: jsCore_compatibility
- name: jsCore_op_query
- name: jsCore_txns
- name: jsCore_txns_multi_oplog_entries
- name: causally_consistent_jscore_txns_passthrough
- name: aggregation_multiversion_fuzzer_gen
- name: aggregation_wildcard_fuzzer_gen
@ -13149,6 +13152,10 @@ buildvariants:
- name: logical_session_cache_standalone_10sec_refresh_jscore_passthrough_gen
- name: logical_session_cache_standalone_default_refresh_jscore_passthrough_gen
- name: mongosTest
- name: multi_shard_local_read_write_multi_stmt_txn_jscore_passthrough_gen
- name: multi_shard_multi_stmt_txn_jscore_passthrough_gen
- name: multi_stmt_txn_jscore_passthrough_with_migration_gen
- name: multi_shard_multi_stmt_txn_kill_primary_jscore_passthrough_gen
- name: noPassthrough_gen
- name: noPassthroughWithMongod_gen
- name: parallel_gen
@ -13158,6 +13165,18 @@ buildvariants:
- name: replica_sets
- name: replica_sets_auth_gen
- name: replica_sets_jscore_passthrough
- name: replica_sets_multi_oplog_txns_gen
- name: replica_sets_multi_oplog_txns_jscore_passthrough
- name: replica_sets_multi_stmt_txn_jscore_passthrough
- name: replica_sets_multi_stmt_txn_stepdown_jscore_passthrough_gen
distros:
- rhel62-large
- name: replica_sets_multi_stmt_txn_kill_primary_jscore_passthrough
distros:
- rhel62-large
- name: replica_sets_multi_stmt_txn_terminate_primary_jscore_passthrough
distros:
- rhel62-large
- name: retryable_writes_jscore_passthrough_gen
- name: retryable_writes_jscore_stepdown_passthrough
- name: rollback_fuzzer_gen
@ -13168,6 +13187,9 @@ buildvariants:
- name: sharded_jscore_txns
- name: sharded_jscore_txns_sharded_collections
- name: sharded_collections_jscore_passthrough
- name: sharded_multi_stmt_txn_jscore_passthrough
distros:
- rhel62-large
- name: sharding_gen
- name: sharding_auth_gen
- name: sharding_auth_audit_gen
@ -13541,7 +13563,7 @@ buildvariants:
# We need to compensate for SMT8 setting the cpu count very high and lower the amount of parallelism down
compile_flags: --dbg=on --opt=on --ssl MONGO_DISTMOD=rhel71 -j$(echo "$(grep -c processor /proc/cpuinfo)/2" | bc) CCFLAGS="-mcpu=power8 -mtune=power8 -mcmodel=medium" --variables-files=etc/scons/mongodbtoolchain_v3_gcc.vars
resmoke_jobs_factor: 0.25
test_flags: --storageEngine=inMemory --excludeWithAnyTags=requires_persistence,requires_journaling,uses_transactions
test_flags: --storageEngine=inMemory --excludeWithAnyTags=requires_persistence,requires_journaling
tooltags: "ssl sasl"
build_mongoreplay: true
display_tasks:
@ -13635,7 +13657,7 @@ buildvariants:
expansions:
compile_flags: --dbg=on --opt=on --ssl MONGO_DISTMOD=rhel72 -j3 CCFLAGS="-march=z196 -mtune=zEC12" --variables-files=etc/scons/mongodbtoolchain_v3_gcc.vars
resmoke_jobs_max: 2
test_flags: --storageEngine=inMemory --excludeWithAnyTags=requires_persistence,requires_journaling,uses_transactions
test_flags: --storageEngine=inMemory --excludeWithAnyTags=requires_persistence,requires_journaling
tooltags: "ssl sasl"
build_mongoreplay: true
display_tasks:

View File

@ -17,8 +17,7 @@
let session =
rst.getPrimary().getDB(dbName).getMongo().startSession({causalConsistency: false});
let sessionDb = session.getDatabase(dbName);
if (!sessionDb.serverStatus().storageEngine.supportsSnapshotReadConcern ||
!sessionDb.serverStatus().storageEngine.persistent) {
if (!sessionDb.serverStatus().storageEngine.supportsSnapshotReadConcern) {
// Transactions with readConcern snapshot fail.
session.startTransaction({readConcern: {level: "snapshot"}});
assert.commandFailedWithCode(sessionDb.runCommand({find: collName}),

View File

@ -3,7 +3,7 @@
* replaying the commitTransaction oplog entry. We hold back the snapshot so that we make sure that
* the operations from the transaction are not reflected in the data when recovery starts.
*
* @tags: [uses_transactions, uses_prepare_transaction]
* @tags: [requires_persistence, uses_transactions, uses_prepare_transaction]
*/
(function() {

View File

@ -22,6 +22,14 @@
return;
}
function findPrepareEntry(oplogColl) {
if (TestData.setParameters.useMultipleOplogEntryFormatForTransactions) {
return oplogColl.findOne({op: "c", o: {"prepareTransaction": 1}});
} else {
return oplogColl.findOne({prepare: true});
}
}
// A new replica set for both the commit and abort tests to ensure the same clean state.
function doTest(commitOrAbort) {
const replSet = new ReplSetTest({
@ -60,20 +68,25 @@
jsTestLog("Get transaction entry from config.transactions");
const txnEntry = primary.getDB("config").transactions.findOne();
assert.eq(txnEntry.startOpTime.ts, prepareTimestamp, tojson(txnEntry));
if (TestData.setParameters.useMultipleOplogEntryFormatForTransactions) {
assert.lt(txnEntry.startOpTime.ts, prepareTimestamp, tojson(txnEntry));
} else {
assert.eq(txnEntry.startOpTime.ts, prepareTimestamp, tojson(txnEntry));
}
assert.soonNoExcept(() => {
const secondaryTxnEntry = secondary.getDB("config").transactions.findOne();
assert(secondaryTxnEntry);
assert.eq(secondaryTxnEntry, txnEntry, tojson(secondaryTxnEntry));
return true;
});
jsTestLog("Find prepare oplog entry");
const oplogEntry = primaryOplog.findOne({prepare: true});
const oplogEntry = findPrepareEntry(primaryOplog);
assert.eq(oplogEntry.ts, prepareTimestamp, tojson(oplogEntry));
// Must already be written on secondary, since the config.transactions entry is.
const secondaryOplogEntry = secondaryOplog.findOne({prepare: true});
const secondaryOplogEntry = findPrepareEntry(secondaryOplog);
assert.eq(secondaryOplogEntry.ts, prepareTimestamp, tojson(secondaryOplogEntry));
jsTestLog("Insert documents until oplog exceeds oplogSize");
@ -84,11 +97,11 @@
jsTestLog(
`Oplog dataSize = ${primaryOplog.dataSize()}, check the prepare entry still exists`);
assert.eq(oplogEntry, primaryOplog.findOne({prepare: true}));
assert.eq(oplogEntry, findPrepareEntry(primaryOplog));
assert.soon(() => {
return secondaryOplog.dataSize() > PrepareHelpers.oplogSizeBytes;
});
assert.eq(oplogEntry, secondaryOplog.findOne({prepare: true}));
assert.eq(oplogEntry, findPrepareEntry(secondaryOplog));
if (commitOrAbort === "commit") {
jsTestLog("Commit prepared transaction and wait for oplog to shrink to max oplogSize");

View File

@ -2,7 +2,7 @@
* Test that startup recovery successfully recovers multiple prepared transactions and that we can
* commit or abort the transaction afterwards.
*
* @tags: [uses_transactions, uses_prepare_transaction]
* @tags: [requires_persistence, uses_transactions, uses_prepare_transaction]
*/
(function() {

View File

@ -7,7 +7,7 @@
* This tests the oldestActiveTransactionTimestamp, which is calculated from the "startOpTime"
* field of documents in the config.transactions collection.
*
* @tags: [uses_transactions, uses_prepare_transaction]
* @tags: [requires_persistence, uses_transactions, uses_prepare_transaction]
*/
(function() {

View File

@ -6,7 +6,7 @@
* reflects the transaction. If the operations are replayed, this will cause a BSONTooLarge
* exception.
*
* @tags: [uses_transactions, uses_prepare_transaction]
* @tags: [requires_persistence, uses_transactions, uses_prepare_transaction]
*/
(function() {
@ -90,4 +90,4 @@
assert.eq(testDB[collName].findOne({_id: 1}), {_id: 1, a: 1});
replTest.stopSet();
}());
}());

View File

@ -2,7 +2,7 @@
* Test that we can successfully reconstruct a prepared transaction that was prepared before the
* stable timestamp at the end of startup recovery.
*
* @tags: [uses_transactions, uses_prepare_transaction]
* @tags: [requires_persistence, uses_transactions, uses_prepare_transaction]
*/
(function() {
@ -112,4 +112,4 @@
assert.eq(testColl.count(), 3);
replTest.stopSet();
}());
}());