diff --git a/buildscripts/resmokelib/config.py b/buildscripts/resmokelib/config.py index 6fa0c2ad3ce..cd4e6978b8f 100644 --- a/buildscripts/resmokelib/config.py +++ b/buildscripts/resmokelib/config.py @@ -113,6 +113,7 @@ DEFAULTS = { "shell_seed": None, "storage_engine": "wiredTiger", "storage_engine_cache_size_gb": None, + "mozjs_js_gc_zeal": None, "suite_files": "with_server", "tag_files": [], "test_files": [], @@ -722,3 +723,8 @@ REQUIRES_WORKLOAD_CONTAINER_SETUP = False # Config fuzzer encryption options, this is only set when the fuzzer is run CONFIG_FUZZER_ENCRYPTION_OPTS = None + +# If resmoke is running on a build variant that specifies a mongo_mozjs_opts, +# we need a way to provide the JS_GC_ZEAL setting provided as part of the mongo_mozjs_opts +# exclusively to mongod/mongos. +MOZJS_JS_GC_ZEAL = None diff --git a/buildscripts/resmokelib/configure_resmoke.py b/buildscripts/resmokelib/configure_resmoke.py index 2dc7414fc94..2a898289fcd 100644 --- a/buildscripts/resmokelib/configure_resmoke.py +++ b/buildscripts/resmokelib/configure_resmoke.py @@ -524,6 +524,7 @@ or explicitly pass --installDir to the run subcommand of buildscripts/resmoke.py _config.SHELL_SEED = config.pop("shell_seed") _config.STAGGER_JOBS = config.pop("stagger_jobs") == "on" _config.STORAGE_ENGINE_CACHE_SIZE = config.pop("storage_engine_cache_size_gb") + _config.MOZJS_JS_GC_ZEAL = config.pop("mozjs_js_gc_zeal") _config.SUITE_FILES = config.pop("suite_files") if _config.SUITE_FILES is not None: _config.SUITE_FILES = _config.SUITE_FILES.split(",") diff --git a/buildscripts/resmokelib/run/__init__.py b/buildscripts/resmokelib/run/__init__.py index ca757565e79..216e7369fa9 100644 --- a/buildscripts/resmokelib/run/__init__.py +++ b/buildscripts/resmokelib/run/__init__.py @@ -1658,6 +1658,13 @@ class RunPlugin(PluginInterface): ), ) + mongodb_server_options.add_argument( + "--mozjsJsGcZeal", + dest="mozjs_js_gc_zeal", + action="store", + help="sets JS_GC_ZEAL for mozjs.", + ) + mongodb_server_options.add_argument( "--majorityReadConcern", action="store", diff --git a/buildscripts/resmokelib/testing/fixtures/fixturelib.py b/buildscripts/resmokelib/testing/fixtures/fixturelib.py index 3fbe0458125..4221ff7338d 100644 --- a/buildscripts/resmokelib/testing/fixtures/fixturelib.py +++ b/buildscripts/resmokelib/testing/fixtures/fixturelib.py @@ -167,6 +167,7 @@ class _FixtureConfig(object): self.NO_JOURNAL = config.NO_JOURNAL self.STORAGE_ENGINE = config.STORAGE_ENGINE self.STORAGE_ENGINE_CACHE_SIZE = config.STORAGE_ENGINE_CACHE_SIZE + self.MOZJS_JS_GC_ZEAL = config.MOZJS_JS_GC_ZEAL self.WT_COLL_CONFIG = config.WT_COLL_CONFIG self.WT_ENGINE_CONFIG = config.WT_ENGINE_CONFIG self.WT_INDEX_CONFIG = config.WT_INDEX_CONFIG diff --git a/buildscripts/resmokelib/testing/fixtures/shardedcluster.py b/buildscripts/resmokelib/testing/fixtures/shardedcluster.py index 85e1c045d43..d5447f45ec3 100644 --- a/buildscripts/resmokelib/testing/fixtures/shardedcluster.py +++ b/buildscripts/resmokelib/testing/fixtures/shardedcluster.py @@ -1009,6 +1009,15 @@ class MongosLauncher(object): DEFAULT_MONGOS_SHUTDOWN_TIMEOUT_MILLIS ) + # If a JS_GC_ZEAL value has been provided in the configuration under MOZJS_JS_GC_ZEAL, + # we inject this value directly as an environment variable to be passed to the spawned + # mongos process. + if self.config.MOZJS_JS_GC_ZEAL: + process_kwargs = self.fixturelib.default_if_none(process_kwargs, {}).copy() + env_vars = process_kwargs.setdefault("env_vars", {}).copy() + env_vars.setdefault("JS_GC_ZEAL", self.config.MOZJS_JS_GC_ZEAL) + process_kwargs["env_vars"] = env_vars + _add_testing_set_parameters(suite_set_parameters) return self.fixturelib.mongos_program( diff --git a/buildscripts/resmokelib/testing/fixtures/standalone.py b/buildscripts/resmokelib/testing/fixtures/standalone.py index 903bfce1565..273819c5309 100644 --- a/buildscripts/resmokelib/testing/fixtures/standalone.py +++ b/buildscripts/resmokelib/testing/fixtures/standalone.py @@ -475,6 +475,15 @@ class MongodLauncher(object): elif self.config.STORAGE_ENGINE == "wiredTiger" or self.config.STORAGE_ENGINE is None: shortcut_opts["wiredTigerCacheSizeGB"] = self.config.STORAGE_ENGINE_CACHE_SIZE + # If a JS_GC_ZEAL value has been provided in the configuration under MOZJS_JS_GC_ZEAL, + # we inject this value directly as an environment variable to be passed to the spawned + # mongod process. + if self.config.MOZJS_JS_GC_ZEAL: + process_kwargs = self.fixturelib.default_if_none(process_kwargs, {}).copy() + env_vars = process_kwargs.setdefault("env_vars", {}).copy() + env_vars.setdefault("JS_GC_ZEAL", self.config.MOZJS_JS_GC_ZEAL) + process_kwargs["env_vars"] = env_vars + # These options are just flags, so they should not take a value. opts_without_vals = "logappend" diff --git a/buildscripts/resmokelib/testing/testcases/jstest.py b/buildscripts/resmokelib/testing/testcases/jstest.py index 524e7645054..afe9a25ccd9 100644 --- a/buildscripts/resmokelib/testing/testcases/jstest.py +++ b/buildscripts/resmokelib/testing/testcases/jstest.py @@ -73,6 +73,9 @@ class _SingleJSTestCase(interface.ProcessTestCase): test_data["ignoreUnterminatedProcesses"] = False test_data["ignoreChildProcessErrorCode"] = False + if config.MOZJS_JS_GC_ZEAL: + test_data["mozJSGCZeal"] = config.MOZJS_JS_GC_ZEAL + # The tests in 'timeseries' directory need to use a different logic for implicity sharding # the collection. Make sure that we consider both unix and windows directory structures. # Check if any test being run is a timeseries test diff --git a/etc/evergreen_yml_components/variants/sanitizer/test_dev.yml b/etc/evergreen_yml_components/variants/sanitizer/test_dev.yml index c67d17e0bbb..a069eef94dd 100644 --- a/etc/evergreen_yml_components/variants/sanitizer/test_dev.yml +++ b/etc/evergreen_yml_components/variants/sanitizer/test_dev.yml @@ -610,8 +610,9 @@ buildvariants: expansions: <<: *enterprise-rhel-8-64-bit-dynamic-expansions # JS_GC_ZEAL modes can be found at https://github.com/mongodb/mongo/blob/r8.0.0-rc9/src/third_party/mozjs/extract/js/src/gc/GC.cpp#L563-L612. - # These modes correspond to collecting the nursery (GenerationalGC) every 50 allocations. - mozjs_options: JS_GC_ZEAL='7,50' + # These modes correspond to a GC policy of generationalGC (mode 7) every 75 allocations, and a + # consistency check of the heap after every GC cycle (mode 15). + mongo_mozjs_options: "7;15,75" compile_flags: >- --ssl MONGO_DISTMOD=rhel88 @@ -623,6 +624,8 @@ buildvariants: compile_variant: enterprise-rhel-8-64-bit-dynamic-spider-monkey-dbg exec_timeout_secs: 32400 # 9 hour timeout timeout_secs: 18000 # 5 hour idle timeout + test_flags: >- + --includeWithAnyTags=requires_scripting depends_on: [] tasks: - name: compile_test_parallel_core_stream_TG @@ -630,5 +633,8 @@ buildvariants: - rhel8.8-xlarge - name: aggregation - name: aggregation_mongos_passthrough + - name: auth_gen - name: concurrency_simultaneous_gen - name: jsCore + - name: noPassthrough_gen + - name: sharding_gen diff --git a/evergreen/resmoke_tests_execute.sh b/evergreen/resmoke_tests_execute.sh old mode 100644 new mode 100755 index fd0483cca9d..f24a9913a93 --- a/evergreen/resmoke_tests_execute.sh +++ b/evergreen/resmoke_tests_execute.sh @@ -117,6 +117,11 @@ if [[ ${disable_unit_tests} = "false" && ! -f ${skip_tests} ]]; then extra_args="$extra_args --runNoFeatureFlagTests" fi + # Introduce JS_GC_ZEAL to be used specifically under mongod/mongos. + if [[ "${build_variant}" = "enterprise-rhel-8-64-bit-dynamic-spider-monkey-dbg" && ! -z "${mongo_mozjs_options}" ]]; then + extra_args="$extra_args --mozjsJsGcZeal='${mongo_mozjs_options}'" + fi + path_value="$PATH:/data/multiversion" # Set the suite name to be the task name by default; unless overridden with the `suite` expansion. diff --git a/jstests/auth/bypass_default_max_time_ms.js b/jstests/auth/bypass_default_max_time_ms.js index bed7faa3580..a12140178d7 100644 --- a/jstests/auth/bypass_default_max_time_ms.js +++ b/jstests/auth/bypass_default_max_time_ms.js @@ -9,6 +9,8 @@ * requires_auth, * requires_replication, * requires_sharding, + * # Uses $function + * requires_scripting, * uses_transactions, * featureFlagSecurityToken, * ] diff --git a/jstests/auth/commands_builtin_roles_sharded.js b/jstests/auth/commands_builtin_roles_sharded.js index 21284e707dc..cb8a1d3ac7b 100644 --- a/jstests/auth/commands_builtin_roles_sharded.js +++ b/jstests/auth/commands_builtin_roles_sharded.js @@ -6,7 +6,7 @@ * The test logic implemented here operates on the test cases defined * in jstests/auth/lib/commands_lib.js * - * @tags: [requires_sharding] + * @tags: [requires_sharding, requires_scripting] */ import {runAllCommandsBuiltinRoles} from "jstests/auth/lib/commands_builtin_roles.js"; diff --git a/jstests/auth/commands_builtin_roles_standalone.js b/jstests/auth/commands_builtin_roles_standalone.js index 4f8442189d5..0b4f7ef984c 100644 --- a/jstests/auth/commands_builtin_roles_standalone.js +++ b/jstests/auth/commands_builtin_roles_standalone.js @@ -5,6 +5,9 @@ * * The test logic implemented here operates on the test cases defined * in jstests/auth/lib/commands_lib.js + * @tags: [ + * requires_scripting + * ] */ import {runAllCommandsBuiltinRoles} from "jstests/auth/lib/commands_builtin_roles.js"; diff --git a/jstests/auth/commands_user_defined_roles.js b/jstests/auth/commands_user_defined_roles.js index 366669b4df7..7a4b6b51aed 100644 --- a/jstests/auth/commands_user_defined_roles.js +++ b/jstests/auth/commands_user_defined_roles.js @@ -5,7 +5,7 @@ Exhaustive test for authorization of commands with user-defined roles. The test logic implemented here operates on the test cases defined in jstests/auth/lib/commands_lib.js. -@tags: [requires_sharding] +@tags: [requires_sharding, requires_scripting] */ diff --git a/jstests/auth/default_max_time_ms_aggregate.js b/jstests/auth/default_max_time_ms_aggregate.js index f4f18d2ce5f..e388ae8d3dd 100644 --- a/jstests/auth/default_max_time_ms_aggregate.js +++ b/jstests/auth/default_max_time_ms_aggregate.js @@ -8,6 +8,7 @@ * # Transactions aborted upon fcv upgrade or downgrade; cluster parameters use internal txns. * uses_transactions, * requires_fcv_80, + * requires_scripting, * ] */ diff --git a/jstests/auth/default_max_time_ms_metrics.js b/jstests/auth/default_max_time_ms_metrics.js index 1d3cad58ea4..02d53df661b 100644 --- a/jstests/auth/default_max_time_ms_metrics.js +++ b/jstests/auth/default_max_time_ms_metrics.js @@ -9,6 +9,7 @@ * # Transactions aborted upon fcv upgrade or downgrade; cluster parameters use internal txns. * uses_transactions, * requires_fcv_80, + * requires_scripting, * ] */ diff --git a/jstests/auth/default_max_time_ms_sharded.js b/jstests/auth/default_max_time_ms_sharded.js index afffe62db1e..c2044e4f185 100644 --- a/jstests/auth/default_max_time_ms_sharded.js +++ b/jstests/auth/default_max_time_ms_sharded.js @@ -9,6 +9,7 @@ * required_auth, * requires_sharding, * uses_transactions, + * requires_scripting, * ] */ diff --git a/jstests/auth/default_max_time_ms_sharded_with_hedged_reads.js b/jstests/auth/default_max_time_ms_sharded_with_hedged_reads.js index 0eff37f6be7..ce4eb324e9e 100644 --- a/jstests/auth/default_max_time_ms_sharded_with_hedged_reads.js +++ b/jstests/auth/default_max_time_ms_sharded_with_hedged_reads.js @@ -8,6 +8,7 @@ * required_auth, * requires_sharding, * uses_transactions, + * requires_scripting, * ] */ import {configureFailPoint} from "jstests/libs/fail_point_util.js"; diff --git a/jstests/auth/js_scope_leak.js b/jstests/auth/js_scope_leak.js index 1e4396aed06..56320319426 100644 --- a/jstests/auth/js_scope_leak.js +++ b/jstests/auth/js_scope_leak.js @@ -1,3 +1,8 @@ +/** + * @tags: [ + * requires_scripting + * ] + */ // Test for SERVER-9129 // Verify global scope data does not persist past logout or auth. // NOTE: Each test case covers 3 state transitions: diff --git a/jstests/auth/mr_auth.js b/jstests/auth/mr_auth.js index 35c8ad1215f..751bb0259af 100644 --- a/jstests/auth/mr_auth.js +++ b/jstests/auth/mr_auth.js @@ -2,7 +2,7 @@ // mode. Other modes require writing to an output collection which is not allowed. SERVER-3345 // // This test requires users to persist across a restart. -// @tags: [requires_persistence] +// @tags: [requires_persistence, requires_scripting] let baseName = "jstests_mr_auth"; let dbName = "test"; diff --git a/jstests/disk/killall.js b/jstests/disk/killall.js index 5744c15c23b..59bc8e27367 100644 --- a/jstests/disk/killall.js +++ b/jstests/disk/killall.js @@ -1,3 +1,10 @@ +/** + * @tags: [ + * # Uses $where operator + * requires_scripting, + * ] + */ + /** * Verify that killing an instance of mongod while it is in a long running computation or infinite * loop still leads to clean shutdown, and that said shutdown is prompt. diff --git a/jstests/noPassthrough/client_metadata_slowlog.js b/jstests/noPassthrough/client_metadata_slowlog.js index 18e9a047fdb..b04f19dc5fb 100644 --- a/jstests/noPassthrough/client_metadata_slowlog.js +++ b/jstests/noPassthrough/client_metadata_slowlog.js @@ -1,5 +1,9 @@ /** * Test that verifies client metadata is logged as part of slow query logging in MongoD. + * + * @tags: [ + * requires_scripting + * ] */ let conn = MongoRunner.runMongod({useLogFiles: true}); assert.neq(null, conn, 'mongod was unable to start up'); @@ -29,4 +33,4 @@ for (var a of log.split("\n")) { assert(predicate.test(log), "'Slow query' log line missing in mongod log file!\n" + "Log file contents: " + conn.fullOptions.logFile); -MongoRunner.stopMongod(conn); \ No newline at end of file +MongoRunner.stopMongod(conn); diff --git a/jstests/noPassthrough/client_metadata_slowlog_rs.js b/jstests/noPassthrough/client_metadata_slowlog_rs.js index 8221e133d97..1ad7fa805d6 100644 --- a/jstests/noPassthrough/client_metadata_slowlog_rs.js +++ b/jstests/noPassthrough/client_metadata_slowlog_rs.js @@ -3,6 +3,7 @@ * set. * @tags: [ * requires_replication, + * requires_scripting, * ] */ import {ReplSetTest} from "jstests/libs/replsettest.js"; @@ -47,4 +48,4 @@ count = coll.find({ assert.eq(count.length, 1, "expected 1 document"); assert(checkLog.checkContainsOnce(rst.getSecondary(), predicate)); -rst.stopSet(); \ No newline at end of file +rst.stopSet(); diff --git a/jstests/noPassthrough/comment_field_passthrough.js b/jstests/noPassthrough/comment_field_passthrough.js index 5cfa14d1290..4fcfecd94ca 100644 --- a/jstests/noPassthrough/comment_field_passthrough.js +++ b/jstests/noPassthrough/comment_field_passthrough.js @@ -5,6 +5,7 @@ * requires_persistence, * requires_replication, * requires_sharding, + * requires_scripting, * ] */ diff --git a/jstests/noPassthrough/currentop_query.js b/jstests/noPassthrough/currentop_query.js index 1c3c1a5ae48..5814c72d860 100644 --- a/jstests/noPassthrough/currentop_query.js +++ b/jstests/noPassthrough/currentop_query.js @@ -4,6 +4,7 @@ * @tags: [ * requires_replication, * requires_sharding, + * requires_scripting, * ] */ import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; diff --git a/jstests/noPassthrough/deprecated_serverside_js.js b/jstests/noPassthrough/deprecated_serverside_js.js index d555b78b866..b92a698e1ef 100644 --- a/jstests/noPassthrough/deprecated_serverside_js.js +++ b/jstests/noPassthrough/deprecated_serverside_js.js @@ -4,6 +4,7 @@ // We want to make sure that the deprecation warning message is only logged twice despite // the multiple invocations in an effort to not clutter the dev's console. // More specifically, we expect to only log 1 out of 128 events. +// @tags: [requires_scripting] import {iterateMatchingLogLines} from "jstests/libs/log.js"; import {ShardingTest} from "jstests/libs/shardingtest.js"; @@ -202,4 +203,4 @@ deprecationTest(shardedDB, accumulatorDeprecationMsg, accumulatorCmdObj, shards) deprecationTest(shardedDB, functionDeprecationMsg, functionAggCmdObj, shards); deprecationTest(shardedDB, functionDeprecationMsg, functionFindCmdObj, shards); -shards.stop(); \ No newline at end of file +shards.stop(); diff --git a/jstests/noPassthrough/explain_optimization_stats.js b/jstests/noPassthrough/explain_optimization_stats.js index a4663839d11..f04a64b9f47 100644 --- a/jstests/noPassthrough/explain_optimization_stats.js +++ b/jstests/noPassthrough/explain_optimization_stats.js @@ -1,5 +1,8 @@ /** * Tests for validating that optimization stats are included in explain output. + * @tags: [ + * requires_scripting + * ] */ import {ReplSetTest} from "jstests/libs/replsettest.js"; import {ShardingTest} from "jstests/libs/shardingtest.js"; diff --git a/jstests/noPassthrough/expression_function_kill.js b/jstests/noPassthrough/expression_function_kill.js index 0174a5e72e5..97e97e67e49 100644 --- a/jstests/noPassthrough/expression_function_kill.js +++ b/jstests/noPassthrough/expression_function_kill.js @@ -1,5 +1,8 @@ /** * Tests where/function can be interrupted through maxTimeMS and query knob. + * @tags: [ + * requires_scripting + * ] */ const mongodOptions = {}; const conn = MongoRunner.runMongod(mongodOptions); @@ -75,4 +78,4 @@ tests.forEach(function(testCase) { testCase.err(cursor); }); -MongoRunner.stopMongod(conn); \ No newline at end of file +MongoRunner.stopMongod(conn); diff --git a/jstests/noPassthrough/index_partial_no_explain_cmds.js b/jstests/noPassthrough/index_partial_no_explain_cmds.js index b997cd49d34..6489c05fbdc 100644 --- a/jstests/noPassthrough/index_partial_no_explain_cmds.js +++ b/jstests/noPassthrough/index_partial_no_explain_cmds.js @@ -1,5 +1,6 @@ // Test partial indexes with commands that don't use explain. These commands are tested against // mongod with the --notablescan flag set, so that they fail if the index is not used. +// @tags: [requires_scripting] import {resultsEq} from "jstests/aggregation/extras/utils.js"; var runner = MongoRunner.runMongod({setParameter: "notablescan=1"}); diff --git a/jstests/noPassthrough/javascript_options.js b/jstests/noPassthrough/javascript_options.js index 6c8574f9682..34eae3269d2 100644 --- a/jstests/noPassthrough/javascript_options.js +++ b/jstests/noPassthrough/javascript_options.js @@ -1,3 +1,8 @@ +/* + * @tags: [ + * requires_scripting, + * ] + */ import { testGetCmdLineOptsMongod, testGetCmdLineOptsMongos diff --git a/jstests/noPassthrough/js_protection.js b/jstests/noPassthrough/js_protection.js index 9f5b6d2268d..196407da41c 100644 --- a/jstests/noPassthrough/js_protection.js +++ b/jstests/noPassthrough/js_protection.js @@ -8,6 +8,10 @@ * server. * 3. db.loadServerScripts performs as expected even with the flag is set in * the shell. + * + * @tags: [ + * requires_scripting + * ] */ var testServer = MongoRunner.runMongod({setParameter: "javascriptProtection=true"}); diff --git a/jstests/noPassthrough/lock_free_ops_concurrent_with_exclusive_lock.js b/jstests/noPassthrough/lock_free_ops_concurrent_with_exclusive_lock.js index 94030c4398c..554619c3805 100644 --- a/jstests/noPassthrough/lock_free_ops_concurrent_with_exclusive_lock.js +++ b/jstests/noPassthrough/lock_free_ops_concurrent_with_exclusive_lock.js @@ -3,6 +3,7 @@ * listCollection and listIndexes commands can run while a MODE_X collection lock is held. * * @tags: [ + * requires_scripting, * ] */ @@ -95,4 +96,4 @@ jsTestLog("Waiting for unstalled collMod operation to finish."); awaitBlockingCollMod(); jsTestLog("Done."); -MongoRunner.stopMongod(conn); \ No newline at end of file +MongoRunner.stopMongod(conn); diff --git a/jstests/noPassthrough/log_format_slowms_samplerate_loglevel.js b/jstests/noPassthrough/log_format_slowms_samplerate_loglevel.js index 130b1aa8b7e..2353988c845 100644 --- a/jstests/noPassthrough/log_format_slowms_samplerate_loglevel.js +++ b/jstests/noPassthrough/log_format_slowms_samplerate_loglevel.js @@ -4,6 +4,7 @@ * @tags: [ * requires_replication, * requires_sharding, + * requires_scripting, * ] */ diff --git a/jstests/noPassthrough/log_nReturned.js b/jstests/noPassthrough/log_nReturned.js index 5ca247a1b6d..efce87fff21 100644 --- a/jstests/noPassthrough/log_nReturned.js +++ b/jstests/noPassthrough/log_nReturned.js @@ -1,6 +1,6 @@ /** * This test verifies the correctness of the "nReturned" value output in the slow query logs. - * @tags: [] + * @tags: [requires_scripting] */ import {findMatchingLogLine} from "jstests/libs/log.js"; diff --git a/jstests/noPassthrough/max_time_ms.js b/jstests/noPassthrough/max_time_ms.js index cd6d580c0da..92ef5944c36 100644 --- a/jstests/noPassthrough/max_time_ms.js +++ b/jstests/noPassthrough/max_time_ms.js @@ -4,6 +4,7 @@ * Creates a sharded cluster. * @tags: [ * requires_sharding, + * requires_scripting, * ] */ import {FixtureHelpers} from "jstests/libs/fixture_helpers.js"; diff --git a/jstests/noPassthrough/max_time_ms_does_not_leak_shard_cursor.js b/jstests/noPassthrough/max_time_ms_does_not_leak_shard_cursor.js index 746f74ae8ef..7c0e2fa90b3 100644 --- a/jstests/noPassthrough/max_time_ms_does_not_leak_shard_cursor.js +++ b/jstests/noPassthrough/max_time_ms_does_not_leak_shard_cursor.js @@ -1,7 +1,7 @@ // Tests that if a mongoS cursor exceeds the maxTimeMs timeout, the cursors on the shards will be // cleaned up. Exercises the fix for the bug described in SERVER-62710. // -// @tags: [] +// @tags: [requires_scripting] import {configureFailPoint} from "jstests/libs/fail_point_util.js"; import {ShardingTest} from "jstests/libs/shardingtest.js"; diff --git a/jstests/noPassthrough/mr_disk_use.js b/jstests/noPassthrough/mr_disk_use.js index 33304a13dd7..b68dec5eb34 100644 --- a/jstests/noPassthrough/mr_disk_use.js +++ b/jstests/noPassthrough/mr_disk_use.js @@ -1,5 +1,9 @@ -// Test mapReduce use with different values of the allowDiskUseByDefault parameter. - +/** + * Test mapReduce use with different values of the allowDiskUseByDefault parameter. + * @tags: [ + * requires_scripting, + * ] + */ const conn = MongoRunner.runMongod(); assert.neq(null, conn, "mongod was unable to start up"); @@ -36,4 +40,4 @@ assert.commandWorked(db.adminCommand({setParameter: 1, allowDiskUseByDefault: tr const res = assert.commandWorked(db.runCommand(mapReduceCmd)); assert.eq(res.results[0], {_id: "a", value: 42}, res); -MongoRunner.stopMongod(conn); \ No newline at end of file +MongoRunner.stopMongod(conn); diff --git a/jstests/noPassthrough/mr_mutable_properties.js b/jstests/noPassthrough/mr_mutable_properties.js index 3e7a0294181..1e5a3507f69 100644 --- a/jstests/noPassthrough/mr_mutable_properties.js +++ b/jstests/noPassthrough/mr_mutable_properties.js @@ -1,6 +1,7 @@ // See SERVER-9448 // Test argument and receiver (aka 'this') objects and their children can be mutated // in Map, Reduce and Finalize functions +// @tags: [requires_scripting] import {assertArrayEq} from "jstests/aggregation/extras/utils.js"; import {ShardingTest} from "jstests/libs/shardingtest.js"; @@ -94,4 +95,4 @@ let st = new ShardingTest({shards: 2, setParameter: {mrEnableSingleReduceOptimiz assert.neq(null, st.s, "mongod was unable to start up"); st.s.adminCommand({shardCollection: "test.mrMutableReceiver"}); runTest(st.s.getDB("test").mrMutableReceiver); -st.stop(); \ No newline at end of file +st.stop(); diff --git a/jstests/noPassthrough/mr_single_reduce_optimization.js b/jstests/noPassthrough/mr_single_reduce_optimization.js index 397a52e423f..4798b460ecb 100644 --- a/jstests/noPassthrough/mr_single_reduce_optimization.js +++ b/jstests/noPassthrough/mr_single_reduce_optimization.js @@ -1,5 +1,6 @@ // See SERVER-68766. Verify that the reduce function is not run on a single value if the relevant // flag is enabled. +// @tags: [requires_scripting] const conn = MongoRunner.runMongod({setParameter: {mrEnableSingleReduceOptimization: true}}); const testDB = conn.getDB('foo'); diff --git a/jstests/noPassthrough/operator_counters_match.js b/jstests/noPassthrough/operator_counters_match.js index 6a2c7db5ce6..5c789efe6aa 100644 --- a/jstests/noPassthrough/operator_counters_match.js +++ b/jstests/noPassthrough/operator_counters_match.js @@ -1,6 +1,6 @@ /** * Tests counters for match expressions. - * @tags: [requires_fcv_50] + * @tags: [requires_fcv_50, requires_scripting] */ const mongod = MongoRunner.runMongod(); @@ -283,4 +283,4 @@ checkCounters( .itcount()), "$geoIntersects"); -MongoRunner.stopMongod(mongod); \ No newline at end of file +MongoRunner.stopMongod(mongod); diff --git a/jstests/noPassthrough/plan_cache_replan_where.js b/jstests/noPassthrough/plan_cache_replan_where.js index fd057b1b2cc..9b0f0888297 100644 --- a/jstests/noPassthrough/plan_cache_replan_where.js +++ b/jstests/noPassthrough/plan_cache_replan_where.js @@ -3,6 +3,7 @@ * * @tags: [ * requires_profiling, + * requires_scripting, * ] */ import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; diff --git a/jstests/noPassthrough/profile_interrupted_op.js b/jstests/noPassthrough/profile_interrupted_op.js index ba266656eb4..fb6d26addd1 100644 --- a/jstests/noPassthrough/profile_interrupted_op.js +++ b/jstests/noPassthrough/profile_interrupted_op.js @@ -4,7 +4,7 @@ * state, the collection can still be successfully created on the fly. * * This test restarts the server and requires that data persists across restarts. - * @tags: [requires_persistence, requires_profiling] + * @tags: [requires_persistence, requires_profiling, requires_scripting] */ // // Start mongo with profiling disabled, create an empty database, and populate it with a @@ -53,4 +53,4 @@ const res = db.runCommand({listCollections: 1, filter: {name: "system.profile"}} assert.commandWorked(res); assert.eq(res.cursor.firstBatch.length, 1, res); -MongoRunner.stopMongod(standalone); \ No newline at end of file +MongoRunner.stopMongod(standalone); diff --git a/jstests/noPassthrough/read_only_allow_disk_use.js b/jstests/noPassthrough/read_only_allow_disk_use.js index d0ec2047a1c..a10c7435d21 100644 --- a/jstests/noPassthrough/read_only_allow_disk_use.js +++ b/jstests/noPassthrough/read_only_allow_disk_use.js @@ -6,7 +6,8 @@ * 'ephemeralForTest' storage engines that do not support queryable backup (read-only) mode. * @tags: [ * requires_persistence, - * requires_replication + * requires_replication, + * requires_scripting * ] */ import {ReplSetTest} from "jstests/libs/replsettest.js"; diff --git a/jstests/noPassthrough/shell_cmd_assertions.js b/jstests/noPassthrough/shell_cmd_assertions.js index 6e3cb91d41f..f74401223b3 100644 --- a/jstests/noPassthrough/shell_cmd_assertions.js +++ b/jstests/noPassthrough/shell_cmd_assertions.js @@ -1,5 +1,8 @@ /** * Tests for the command assertion functions in mongo/shell/assert.js. + * @tags: [ + * requires_scripting, + * ] */ const conn = MongoRunner.runMongod(); @@ -406,4 +409,4 @@ tests.forEach((test) => { }); /* cleanup */ -MongoRunner.stopMongod(conn); \ No newline at end of file +MongoRunner.stopMongod(conn); diff --git a/jstests/noPassthrough/socket_disconnect_kills.js b/jstests/noPassthrough/socket_disconnect_kills.js index f47ee11a884..d14f70c84fc 100644 --- a/jstests/noPassthrough/socket_disconnect_kills.js +++ b/jstests/noPassthrough/socket_disconnect_kills.js @@ -12,7 +12,8 @@ // due to client disconnect, and the number of completed operations that couldn't return data // due to client disconnect. // -// @tags: [requires_sharding] +// @tags: [requires_sharding, requires_scripting] + import {configureFailPoint} from "jstests/libs/fail_point_util.js"; import {ShardingTest} from "jstests/libs/shardingtest.js"; diff --git a/jstests/noPassthrough/update_now_clustertime_replset.js b/jstests/noPassthrough/update_now_clustertime_replset.js index f1961830d05..6c00ca5d070 100644 --- a/jstests/noPassthrough/update_now_clustertime_replset.js +++ b/jstests/noPassthrough/update_now_clustertime_replset.js @@ -6,6 +6,7 @@ * which cannot support a replica set. * @tags: [ * requires_replication, + * requires_scripting * ] */ import {ReplSetTest} from "jstests/libs/replsettest.js"; @@ -284,4 +285,4 @@ for (let result of results) { assert.eq(result.mergeCT, result.aggCT); } -rst.stopSet(); \ No newline at end of file +rst.stopSet(); diff --git a/jstests/noPassthrough/update_now_clustertime_sharding.js b/jstests/noPassthrough/update_now_clustertime_sharding.js index f8db80423de..39f3f86639e 100644 --- a/jstests/noPassthrough/update_now_clustertime_sharding.js +++ b/jstests/noPassthrough/update_now_clustertime_sharding.js @@ -6,6 +6,7 @@ * cannot support a sharded cluster. * @tags: [ * requires_sharding, + * requires_scripting * ] */ import {ShardingTest} from "jstests/libs/shardingtest.js"; @@ -308,4 +309,4 @@ for (let result of results) { assert.eq(result.mergeCT, result.aggCT); } -st.stop(); \ No newline at end of file +st.stop(); diff --git a/jstests/noPassthrough/update_server-5552.js b/jstests/noPassthrough/update_server-5552.js index 46504e516c5..114278fb43c 100644 --- a/jstests/noPassthrough/update_server-5552.js +++ b/jstests/noPassthrough/update_server-5552.js @@ -1,3 +1,8 @@ +/** + * @tags: [ + * requires_scripting + * ] + */ var db; const conn = MongoRunner.runMongod(); assert.neq(null, conn, "mongod failed to start."); diff --git a/jstests/replsets/commands_that_write_accept_wc.js b/jstests/replsets/commands_that_write_accept_wc.js index 3263a00a2f4..b9a13830c77 100644 --- a/jstests/replsets/commands_that_write_accept_wc.js +++ b/jstests/replsets/commands_that_write_accept_wc.js @@ -4,6 +4,10 @@ * defines various database commands and what they expect to be true before and after the fact. * It then runs the commands with an invalid writeConcern and a valid writeConcern and * ensures that they succeed and fail appropriately. + * + * @tags: [ + * requires_scripting, + * ] */ import {ReplSetTest} from "jstests/libs/replsettest.js"; @@ -197,4 +201,4 @@ commands.forEach(function(cmd) { testInvalidWriteConcern(cmd); }); -replTest.stopSet(); \ No newline at end of file +replTest.stopSet(); diff --git a/jstests/replsets/groupAndMapReduce.js b/jstests/replsets/groupAndMapReduce.js index 1bf1f4658f7..99142de1c5e 100644 --- a/jstests/replsets/groupAndMapReduce.js +++ b/jstests/replsets/groupAndMapReduce.js @@ -1,3 +1,8 @@ +/** + * @tags: [ + * requires_scripting + * ] + */ import {ReplSetTest} from "jstests/libs/replsettest.js"; import {waitForAllMembers} from "jstests/replsets/rslib.js"; diff --git a/jstests/replsets/mr_nonrepl_coll_in_local_db.js b/jstests/replsets/mr_nonrepl_coll_in_local_db.js index e4896b3772d..7f5b8b24e6c 100644 --- a/jstests/replsets/mr_nonrepl_coll_in_local_db.js +++ b/jstests/replsets/mr_nonrepl_coll_in_local_db.js @@ -6,6 +6,7 @@ // We verify this requirement by running a map-reduce, examining the logs to find the names of // all collections created, and checking the oplog for entries logging the creation of each of those // collections. +// @tags: [requires_scripting] import {ReplSetTest} from "jstests/libs/replsettest.js"; diff --git a/jstests/replsets/prepare_conflict_read_concern_behavior.js b/jstests/replsets/prepare_conflict_read_concern_behavior.js index ca71d435b66..ae7ffb3dc70 100644 --- a/jstests/replsets/prepare_conflict_read_concern_behavior.js +++ b/jstests/replsets/prepare_conflict_read_concern_behavior.js @@ -22,6 +22,7 @@ * uses_transactions, * # TODO (SERVER-80568): Re-enable this test in multiversion suites once it has been fixed. * DISABLED_TEMPORARILY_DUE_TO_FCV_UPGRADE, + * requires_scripting * ] */ diff --git a/jstests/replsets/txn_override_unittests.js b/jstests/replsets/txn_override_unittests.js index 9e569aa2018..2c01e46ca4e 100644 --- a/jstests/replsets/txn_override_unittests.js +++ b/jstests/replsets/txn_override_unittests.js @@ -25,7 +25,7 @@ * us to keep more tests running now. That said, these should ideally all throw so we do not rely on * the test itself calling assert.commandWorked. * - * @tags: [requires_replication, uses_transactions] + * @tags: [requires_replication, uses_transactions, requires_scripting] */ import {ReplSetTest} from "jstests/libs/replsettest.js"; diff --git a/jstests/replsets/write_concern_update_where.js b/jstests/replsets/write_concern_update_where.js index 1864a03d17e..301da618d49 100644 --- a/jstests/replsets/write_concern_update_where.js +++ b/jstests/replsets/write_concern_update_where.js @@ -1,6 +1,10 @@ /** * Tests update with $where does not wait for write concern (which would trigger assertion while * holding global lock) when it iterates system.js collection using DBDirectClient. + * + * @tags: [ + * requires_scripting + * ] */ import {ReplSetTest} from "jstests/libs/replsettest.js"; diff --git a/jstests/serial_run/allow_partial_results_with_maxTimeMS_failpoints.js b/jstests/serial_run/allow_partial_results_with_maxTimeMS_failpoints.js index 282063134be..abee2821387 100644 --- a/jstests/serial_run/allow_partial_results_with_maxTimeMS_failpoints.js +++ b/jstests/serial_run/allow_partial_results_with_maxTimeMS_failpoints.js @@ -11,6 +11,7 @@ * requires_replication, * requires_getmore, * requires_fcv_62, + * requires_scripting * ] */ import {configureFailPoint} from "jstests/libs/fail_point_util.js"; diff --git a/jstests/sharding/auth.js b/jstests/sharding/auth.js index bae9635c863..8140aba3a4b 100644 --- a/jstests/sharding/auth.js +++ b/jstests/sharding/auth.js @@ -9,6 +9,7 @@ * # TODO (SERVER-88123): Re-enable this test. * # Test doesn't start enough mongods to have num_mongos routers * embedded_router_incompatible, + * requires_scripting * ] */ import {ReplSetTest} from "jstests/libs/replsettest.js"; diff --git a/jstests/sharding/authCommands.js b/jstests/sharding/authCommands.js index 604041a67d5..4fc592c5817 100644 --- a/jstests/sharding/authCommands.js +++ b/jstests/sharding/authCommands.js @@ -1,6 +1,6 @@ /** * This tests using DB commands with authentication enabled when sharded. - * @tags: [multiversion_incompatible] + * @tags: [multiversion_incompatible, requires_scripting] */ // Multiple users cannot be authenticated on one connection within a session. TestData.disableImplicitSessions = true; diff --git a/jstests/sharding/commands_that_write_accept_wc_shards.js b/jstests/sharding/commands_that_write_accept_wc_shards.js index a5b8cfd859c..ba2b9172840 100644 --- a/jstests/sharding/commands_that_write_accept_wc_shards.js +++ b/jstests/sharding/commands_that_write_accept_wc_shards.js @@ -9,6 +9,7 @@ * of 5MB across all sharding tests in wiredTiger. * @tags: [ * resource_intensive, + * requires_scripting * ] */ import {ShardingTest} from "jstests/libs/shardingtest.js"; diff --git a/jstests/sharding/conversion_of_replica_set_to_sharded_cluster.js b/jstests/sharding/conversion_of_replica_set_to_sharded_cluster.js index 28b83dc60fe..fda0f3ff95a 100644 --- a/jstests/sharding/conversion_of_replica_set_to_sharded_cluster.js +++ b/jstests/sharding/conversion_of_replica_set_to_sharded_cluster.js @@ -8,6 +8,7 @@ * # TODO (SERVER-88123): Re-enable this test. * # Test doesn't start enough mongods to have num_mongos routers * embedded_router_incompatible, + * requires_scripting * ] */ diff --git a/jstests/sharding/features2.js b/jstests/sharding/features2.js index a977f79776b..ff89727b021 100644 --- a/jstests/sharding/features2.js +++ b/jstests/sharding/features2.js @@ -1,3 +1,4 @@ +// @tags: [requires_scripting] import {ShardingTest} from "jstests/libs/shardingtest.js"; var s = new ShardingTest({shards: 2, mongos: 1}); diff --git a/jstests/sharding/features3.js b/jstests/sharding/features3.js index 77d9956411a..1707952da88 100644 --- a/jstests/sharding/features3.js +++ b/jstests/sharding/features3.js @@ -7,6 +7,7 @@ // - Tests fsync and fsync+lock permissions on sharded db // @tags: [ // expects_explicit_underscore_id_index, +// requires_scripting // ] import {ShardingTest} from "jstests/libs/shardingtest.js"; diff --git a/jstests/sharding/hedged_reads.js b/jstests/sharding/hedged_reads.js index f3b896682aa..b43bc39b431 100644 --- a/jstests/sharding/hedged_reads.js +++ b/jstests/sharding/hedged_reads.js @@ -9,6 +9,7 @@ * # This test is known to be racey due to implementation of hedged reads (SERVER-65329). * # Disable windows testing as this feature is deprecated in v8.0. * incompatible_with_windows_tls, + * requires_scripting * ] */ import {configureFailPoint} from "jstests/libs/fail_point_util.js"; diff --git a/jstests/sharding/localhostAuthBypass.js b/jstests/sharding/localhostAuthBypass.js index 782bd4a6cbe..82708bff530 100644 --- a/jstests/sharding/localhostAuthBypass.js +++ b/jstests/sharding/localhostAuthBypass.js @@ -9,6 +9,7 @@ import {ShardingTest} from "jstests/libs/shardingtest.js"; // The following checks, which occurs on ShardingTest.stop, involve using a mongos to read data on // the config server, but this test uses a special shutdown function which stops the mongoses before // calling ShardingTest.stop. +// @tags : [requires_scripting] TestData.skipCheckingUUIDsConsistentAcrossCluster = true; TestData.skipCheckingIndexesConsistentAcrossCluster = true; TestData.skipCheckOrphans = true; diff --git a/jstests/sharding/move_primary_with_writes.js b/jstests/sharding/move_primary_with_writes.js index d646eddca4b..4b846253c50 100644 --- a/jstests/sharding/move_primary_with_writes.js +++ b/jstests/sharding/move_primary_with_writes.js @@ -1,5 +1,5 @@ /** - * @tags: [does_not_support_stepdowns] + * @tags: [does_not_support_stepdowns, requires_scripting] */ import {configureFailPoint} from "jstests/libs/fail_point_util.js"; import {FixtureHelpers} from "jstests/libs/fixture_helpers.js"; diff --git a/jstests/sharding/mr_and_agg_versioning.js b/jstests/sharding/mr_and_agg_versioning.js index 49aafd3bc65..5e2aacfe816 100644 --- a/jstests/sharding/mr_and_agg_versioning.js +++ b/jstests/sharding/mr_and_agg_versioning.js @@ -1,5 +1,6 @@ // Test that map reduce and aggregate properly handle shard versioning. // Test delibarately inserts orphaned data outside of migrations. +// @tags: [requires_scripting] import {ShardingTest} from "jstests/libs/shardingtest.js"; TestData.skipCheckOrphans = true; @@ -57,4 +58,4 @@ res = staleMongos2.getCollection(nsString).aggregate( [{$group: {_id: "$key", value: {$sum: "$value"}}}, {$sort: {_id: 1}}]); validateOutput(res.toArray()); -st.stop(); \ No newline at end of file +st.stop(); diff --git a/jstests/sharding/mr_single_reduce_split.js b/jstests/sharding/mr_single_reduce_split.js index 19083ef3279..1359f6bcf81 100644 --- a/jstests/sharding/mr_single_reduce_split.js +++ b/jstests/sharding/mr_single_reduce_split.js @@ -3,6 +3,7 @@ * cluster when there are documents on multiple chunks that need to be merged. * @tags: [ * backport_required_multiversion, + * requires_scripting * ] */ import {ShardingTest} from "jstests/libs/shardingtest.js"; @@ -52,4 +53,4 @@ res = assert.commandWorked(mongosDB.runCommand( {mapReduce: mongosColl.getName(), map: map, reduce: reduce, out: {inline: 1}})); assert.eq(res.results[0], {_id: 0, value: {val: "reduced value"}}); -st.stop(); \ No newline at end of file +st.stop(); diff --git a/jstests/sharding/query/agg_js_on_mongos.js b/jstests/sharding/query/agg_js_on_mongos.js index 67b4cc6dc1c..e0a795faab2 100644 --- a/jstests/sharding/query/agg_js_on_mongos.js +++ b/jstests/sharding/query/agg_js_on_mongos.js @@ -1,5 +1,6 @@ // Performs an aggregation that will execute JavaScript on mongos. This is a sanity check to confirm // that JavaScript is available on mongos. +// @tags: [requires_scripting] import {ShardingTest} from "jstests/libs/shardingtest.js"; diff --git a/jstests/sharding/query/collation_targeting.js b/jstests/sharding/query/collation_targeting.js index 311f30d2be2..c69cb156d78 100644 --- a/jstests/sharding/query/collation_targeting.js +++ b/jstests/sharding/query/collation_targeting.js @@ -1,4 +1,5 @@ // Test shard targeting for queries with collation. +// @tags: [requires_scripting] import {ShardingTest} from "jstests/libs/shardingtest.js"; import { WriteWithoutShardKeyTestUtil @@ -527,4 +528,4 @@ explain = coll.explain().update( assert.commandWorked(explain); assert.eq(1, explain.queryPlanner.winningPlan.shards.length); -st.stop(); \ No newline at end of file +st.stop(); diff --git a/jstests/sharding/query/collation_targeting_inherited.js b/jstests/sharding/query/collation_targeting_inherited.js index 100c82e9bb0..83f36d3c0e2 100644 --- a/jstests/sharding/query/collation_targeting_inherited.js +++ b/jstests/sharding/query/collation_targeting_inherited.js @@ -1,4 +1,5 @@ // Test shard targeting for queries on a collection with a default collation. +// @tags : [requires_scripting] import {ShardingTest} from "jstests/libs/shardingtest.js"; import { WriteWithoutShardKeyTestUtil @@ -556,4 +557,4 @@ explain = assert.commandWorked(explain); assert.eq(1, explain.queryPlanner.winningPlan.shards.length); -st.stop(); \ No newline at end of file +st.stop(); diff --git a/jstests/sharding/query/find_getmore_cmd.js b/jstests/sharding/query/find_getmore_cmd.js index 167f939fcc0..e302410c3cc 100644 --- a/jstests/sharding/query/find_getmore_cmd.js +++ b/jstests/sharding/query/find_getmore_cmd.js @@ -3,6 +3,9 @@ * * Always run on a fully upgraded cluster, so that {$meta: "sortKey"} projections use the newest * sort key format. + * @tags: [ + * requires_scripting, + * ] */ import {ShardingTest} from "jstests/libs/shardingtest.js"; @@ -158,4 +161,4 @@ assert.eq(cmdRes.cursor.firstBatch[3], {key: [1]}); assert.eq(cmdRes.cursor.firstBatch[4], {key: [5]}); assert.eq(cmdRes.cursor.firstBatch[5], {key: [9]}); -st.stop(); \ No newline at end of file +st.stop(); diff --git a/jstests/sharding/query/javascript_heap_limit.js b/jstests/sharding/query/javascript_heap_limit.js index 42bd71e2218..c10140d73b1 100644 --- a/jstests/sharding/query/javascript_heap_limit.js +++ b/jstests/sharding/query/javascript_heap_limit.js @@ -1,3 +1,9 @@ +/** + * @tags: [ + * requires_scripting + * ] + */ + // Confirms that JavaScript heap limits are respected in aggregation. Includes testing for mapReduce // and $where which use aggregation for execution. import {ShardingTest} from "jstests/libs/shardingtest.js"; diff --git a/jstests/sharding/query/map_reduce_invalid_output_collection.js b/jstests/sharding/query/map_reduce_invalid_output_collection.js index eaf86f9a982..a8782e48ac4 100644 --- a/jstests/sharding/query/map_reduce_invalid_output_collection.js +++ b/jstests/sharding/query/map_reduce_invalid_output_collection.js @@ -1,5 +1,6 @@ // Test that mapReduce correctly fails if the target collection is not unsharded or sharded by just // _id. +// @tags: [requires_scripting] import {ShardingTest} from "jstests/libs/shardingtest.js"; const st = new ShardingTest({shards: 2, mongos: 2}); @@ -124,4 +125,4 @@ testAgainstValidShardedOutput({_id: "hashed"}); 31313); })(); -st.stop(); \ No newline at end of file +st.stop(); diff --git a/jstests/sharding/query/map_reduce_invalid_result_set.js b/jstests/sharding/query/map_reduce_invalid_result_set.js index 72e4e389269..fb83bde7c7a 100644 --- a/jstests/sharding/query/map_reduce_invalid_result_set.js +++ b/jstests/sharding/query/map_reduce_invalid_result_set.js @@ -4,6 +4,7 @@ // # TODO (SERVER-88127): Re-enable this test or add an explanation why it is incompatible. // embedded_router_incompatible, // uses_map_reduce_with_temp_collections, +// requires_scripting // ] import {ShardingTest} from "jstests/libs/shardingtest.js"; diff --git a/jstests/sharding/query/map_reduce_scope.js b/jstests/sharding/query/map_reduce_scope.js index 415db006146..2a675f1e0fd 100644 --- a/jstests/sharding/query/map_reduce_scope.js +++ b/jstests/sharding/query/map_reduce_scope.js @@ -1,6 +1,9 @@ /** * Test to verify 'scope' parameter of mapReduce command. This test verfies that 'map', 'reduce' and * 'finalize' functions can use 'scope' variable passed in the input. + * @tags: [ + * requires_scripting, + * ] */ import {ShardingTest} from "jstests/libs/shardingtest.js"; diff --git a/jstests/sharding/query/mrShardedOutput.js b/jstests/sharding/query/mrShardedOutput.js index 1098814415a..3229cae6f0a 100644 --- a/jstests/sharding/query/mrShardedOutput.js +++ b/jstests/sharding/query/mrShardedOutput.js @@ -2,6 +2,7 @@ // flag. // This test stresses behavior that is only true of the mapReduce implementation using aggregation, // so it cannot be run in mixed-version suites. +// @tags: [requires_scripting] import {FixtureHelpers} from "jstests/libs/fixture_helpers.js"; import {ShardingTest} from "jstests/libs/shardingtest.js"; diff --git a/jstests/sharding/query/mrShardedOutputAuth.js b/jstests/sharding/query/mrShardedOutputAuth.js index 4a7d9a32ae6..bb95016d0c3 100644 --- a/jstests/sharding/query/mrShardedOutputAuth.js +++ b/jstests/sharding/query/mrShardedOutputAuth.js @@ -1,6 +1,9 @@ /** * Test that a mapReduce job can write sharded output to a database * from a separate input database while authenticated to both. + * @tags: [ + * requires_scripting, + * ] */ import {ShardingTest} from "jstests/libs/shardingtest.js"; diff --git a/jstests/sharding/query/mr_output_options.js b/jstests/sharding/query/mr_output_options.js index 9f45fa8bfb5..57ede659ad3 100644 --- a/jstests/sharding/query/mr_output_options.js +++ b/jstests/sharding/query/mr_output_options.js @@ -1,5 +1,6 @@ // Tests that the mapReduce command works correctly under all combinations of the input and output // collections being sharded or unsharded. +// @tags: [requires_scripting] import {ShardingTest} from "jstests/libs/shardingtest.js"; const st = new ShardingTest({shards: 2, other: {chunkSize: 1}}); @@ -102,4 +103,4 @@ output = inputColl.mapReduce( assert.commandWorked(output); assert.eq(output.results, [{_id: 0, value: 1}]); -st.stop(); \ No newline at end of file +st.stop(); diff --git a/jstests/sharding/query/mr_replace_different_db_sharded.js b/jstests/sharding/query/mr_replace_different_db_sharded.js index 61ae5f635aa..54772590ea7 100644 --- a/jstests/sharding/query/mr_replace_different_db_sharded.js +++ b/jstests/sharding/query/mr_replace_different_db_sharded.js @@ -1,4 +1,5 @@ // Test MapReduce output option replace into different db in a sharded environment. +// @tags: [requires_scripting] import {ShardingTest} from "jstests/libs/shardingtest.js"; const st = new ShardingTest({shards: 2, mongos: 1}); @@ -66,4 +67,4 @@ assert.eq(2, destColl.find().count(), result); const finalIndexes = assert.commandWorked(destDB.runCommand({"listIndexes": destColl.getName()})); const finalIndexesArray = new DBCommandCursor(destDB, finalIndexes).toArray(); assert.eq(2, finalIndexesArray.length); -st.stop(); \ No newline at end of file +st.stop(); diff --git a/jstests/sharding/query_config.js b/jstests/sharding/query_config.js index 0c6ab70e41b..d1d64947a2f 100644 --- a/jstests/sharding/query_config.js +++ b/jstests/sharding/query_config.js @@ -2,6 +2,7 @@ // @tags: [ // # TODO (SERVER-88122): Re-enable this test or add an explanation why it is incompatible. // embedded_router_incompatible, +// requires_scripting, // ] import {ShardingTest} from "jstests/libs/shardingtest.js"; diff --git a/jstests/sharding/read_pref_cmd.js b/jstests/sharding/read_pref_cmd.js index f3fc8cc9147..f9209b139b9 100644 --- a/jstests/sharding/read_pref_cmd.js +++ b/jstests/sharding/read_pref_cmd.js @@ -5,6 +5,7 @@ * resource_intensive, * # TODO (SERVER-88127): Re-enable this test or add an explanation why it is incompatible. * embedded_router_incompatible, + * requires_scripting * ] */ import {configureFailPoint} from "jstests/libs/fail_point_util.js"; diff --git a/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js b/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js index 84abdcb2f12..026e0224795 100644 --- a/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js +++ b/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js @@ -25,6 +25,7 @@ * @tags: [ * # TODO (SERVER-88125): Re-enable this test or add an explanation why it is incompatible. * embedded_router_incompatible, + * requires_scripting, * ] */ import { diff --git a/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js b/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js index a0979ce469c..ce8756fe8f5 100644 --- a/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js +++ b/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js @@ -18,6 +18,7 @@ * @tags: [ * # TODO (SERVER-88125): Re-enable this test or add an explanation why it is incompatible. * embedded_router_incompatible, + * requires_scripting, * ] */ import { diff --git a/jstests/sharding/shard_targeting.js b/jstests/sharding/shard_targeting.js index 5e52d226489..1a8167536f1 100644 --- a/jstests/sharding/shard_targeting.js +++ b/jstests/sharding/shard_targeting.js @@ -3,6 +3,7 @@ // If the optional query is not given, mongos will wrongly use the command // BSONObj itself as the query to target shards, which could return wrong // shards if the shard key happens to be one of the fields in the command object. +// @tags: [requires_scripting] import {ShardingTest} from "jstests/libs/shardingtest.js"; var s = new ShardingTest({shards: 2}); diff --git a/src/mongo/scripting/SConscript b/src/mongo/scripting/SConscript index 4a7a2e0680f..4d357656fdf 100644 --- a/src/mongo/scripting/SConscript +++ b/src/mongo/scripting/SConscript @@ -40,6 +40,13 @@ if jsEngine: ) scriptingEnv.InjectMozJS() + if get_option("spider-monkey-dbg") == "on": + scriptingEnv.Prepend( + CPPDEFINES=[ + "MONGO_SPIDERMONKEY_DBG", + ] + ) + scriptingEnv.Library( target="scripting", source=[ diff --git a/src/mongo/scripting/mozjs/implscope.cpp b/src/mongo/scripting/mozjs/implscope.cpp index 53681f55890..b14c73d1cfc 100644 --- a/src/mongo/scripting/mozjs/implscope.cpp +++ b/src/mongo/scripting/mozjs/implscope.cpp @@ -556,6 +556,12 @@ MozJSImplScope::MozJSImplScope(MozJSScriptEngine* engine, boost::optional j _engine->getScopeInitCallback()(*this); } +#ifdef MONGO_SPIDERMONKEY_DBG + if (const auto* jsGcZealEnv = getenv("JS_GC_ZEAL"); jsGcZealEnv) { + LOGV2_INFO(9202400, "Initializing MozJSImplScope", "jsGcZeal"_attr = jsGcZealEnv); + } + +#endif currentJSScope = this; } @@ -1167,6 +1173,12 @@ bool MozJSImplScope::_checkErrorState(bool success, bool reportError, bool asser if (_status.isOK()) { JS::RootedValue excn(_context); if (JS_GetPendingException(_context, &excn)) { + // It's possible that we have an uncaught exception for OOM, which is reported on the + // exception status of the JSContext. We must check for this OOM exception before + // clearing the pending exception. This function checks both the status on the JSContext + // as well as the message string of the exception being provided. + const auto isThrowingOOM = JS_IsThrowingOutOfMemoryException(_context, excn); + // The pending JS exception needs to be cleared before we call ValueWriter below to // print the exception. ValueWriter::toStringData() may call back into the Interpret, // which asserts that we don't have an exception pending in DEBUG builds. @@ -1207,10 +1219,15 @@ bool MozJSImplScope::_checkErrorState(bool success, bool reportError, bool asser } else { str::stream ss; JSStringWrapper jsstr; - ss << "uncaught exception: " - << str::UTF8SafeTruncation(ValueWriter(_context, excn).toStringData(&jsstr), - kMaxErrorStringSize); - _status = Status(ErrorCodes::UnknownError, ss); + + if (isThrowingOOM) { + _status = Status(ErrorCodes::JSInterpreterFailure, "Out of memory"); + } else { + ss << "uncaught exception: " + << str::UTF8SafeTruncation(ValueWriter(_context, excn).toStringData(&jsstr), + kMaxErrorStringSize); + _status = Status(ErrorCodes::UnknownError, ss); + } } } else { _status = Status(ErrorCodes::UnknownError, "Unknown Failure from JSInterpreter"); @@ -1255,7 +1272,7 @@ MozJSImplScope* MozJSImplScope::getThreadScope() { void MozJSImplScope::setOOM() { _hasOutOfMemoryException = true; - JS_RequestInterruptCallback(_context); + JS_RequestInterruptCallbackCanWait(_context); } void MozJSImplScope::setParentStack(std::string parentStack) { diff --git a/src/mongo/scripting/mozjs/jscustomallocator.cpp b/src/mongo/scripting/mozjs/jscustomallocator.cpp index a4046927ba0..4eb0ab6fefc 100644 --- a/src/mongo/scripting/mozjs/jscustomallocator.cpp +++ b/src/mongo/scripting/mozjs/jscustomallocator.cpp @@ -115,11 +115,20 @@ void* wrap_alloc(T&& func, void* ptr, size_t bytes) { size_t mb = get_max_bytes(); size_t tb = get_total_bytes(); - if (mb && (tb + bytes > mb)) { + // During a GC cycle, GC::purgeRuntime() is called, which tries to free unused items in the + // SharedImmutableStringsCache while holding its corresponding mutex. Our js_free implementation + // calls wrap_alloc, with a value of 0 for 'bytes'. Previously, if we were already at the + // max_bytes limit when purging the runtime, the call to MozJSImplScope::setOOM() would request + // an urgent JS interrupt, which acquires a futex with order 500, while still holding the mutex + // for the SharedImmutableStringsCache (order 600). This triggered a failure of a MOZ_ASSERT + // which enforces correct lock ordering in the JS engine. For this reason, we avoid checking + // for an OOM here if we are requesting zero bytes (i.e freeing memory). + if (mb && bytes && (tb + bytes > mb)) { auto scope = mongo::mozjs::MozJSImplScope::getThreadScope(); - if (scope) + if (scope) { scope->setOOM(); - + return nullptr; + } // We fall through here because we want to let spidermonkey continue // with whatever it was doing. Calling setOOM will fail the top level // operation as soon as possible. diff --git a/src/mongo/shell/servers.js b/src/mongo/shell/servers.js index 53d9d039b0b..aa24b61cc50 100644 --- a/src/mongo/shell/servers.js +++ b/src/mongo/shell/servers.js @@ -1599,6 +1599,14 @@ MongoRunner._startWithArgs = function(argArray, env, waitForConnect) { argArray = appendSetParameterArgs(argArray); var port = MongoRunner.parsePort.apply(null, argArray); var pid = -1; + + if (jsTest.options().mozJSGCZeal) { + if (env === undefined) { + env = {}; + } + env["JS_GC_ZEAL"] = jsTest.options().mozJSGCZeal; + } + if (env === undefined) { pid = _startMongoProgram.apply(null, argArray); } else { diff --git a/src/mongo/shell/utils.js b/src/mongo/shell/utils.js index 739c88f45df..bc7e45df873 100644 --- a/src/mongo/shell/utils.js +++ b/src/mongo/shell/utils.js @@ -526,6 +526,7 @@ jsTestOptions = function() { performTimeseriesCompressionIntermediateDataIntegrityCheckOnInsert: true, fuzzMongodConfigs: TestData.fuzzMongodConfigs || false, + mozJSGCZeal: TestData.mozJSGCZeal || "", }); } return _jsTestOptions; diff --git a/src/third_party/mozjs/extract/js/public/Exception.h b/src/third_party/mozjs/extract/js/public/Exception.h index 649c40ac472..97fa73d6b14 100644 --- a/src/third_party/mozjs/extract/js/public/Exception.h +++ b/src/third_party/mozjs/extract/js/public/Exception.h @@ -42,6 +42,10 @@ extern JS_PUBLIC_API void JS_SetPendingException( extern JS_PUBLIC_API void JS_ClearPendingException(JSContext* cx); +// MONGODB MODIFICATION: Checks if we are currently throwing an OOM exception and the exception +// message matches the out of memory exception string. +extern JS_PUBLIC_API bool JS_IsThrowingOutOfMemoryException(JSContext* cx, const JS::Value& exc); + /** * If the given object is an exception object, the exception will have (or be * able to lazily create) an error report struct, and this function will return diff --git a/src/third_party/mozjs/extract/js/src/jsapi.cpp b/src/third_party/mozjs/extract/js/src/jsapi.cpp index 90ba9bcf085..f6c8aad77cb 100644 --- a/src/third_party/mozjs/extract/js/src/jsapi.cpp +++ b/src/third_party/mozjs/extract/js/src/jsapi.cpp @@ -3779,6 +3779,12 @@ JS_PUBLIC_API void JS_ClearPendingException(JSContext* cx) { cx->clearPendingException(); } +// MONGODB MODIFICATION: Checks if we are currently throwing an OOM exception and the exception +// message matches the out of memory exception string. +JS_PUBLIC_API bool JS_IsThrowingOutOfMemoryException(JSContext* cx, const JS::Value& exc) { + return cx->isThrowingOutOfMemoryException(exc); +} + JS::AutoSaveExceptionState::AutoSaveExceptionState(JSContext* cx) : context(cx), status(cx->status), exceptionValue(cx), exceptionStack(cx) { AssertHeapIsIdle(); diff --git a/src/third_party/mozjs/extract/js/src/vm/JSContext.cpp b/src/third_party/mozjs/extract/js/src/vm/JSContext.cpp index fd24c4d6736..011f49f2fb2 100644 --- a/src/third_party/mozjs/extract/js/src/vm/JSContext.cpp +++ b/src/third_party/mozjs/extract/js/src/vm/JSContext.cpp @@ -1114,11 +1114,23 @@ void JSContext::setRuntime(JSRuntime* rt) { runtime_ = rt; } -#if defined(NIGHTLY_BUILD) +// MONGODB MODIFICATION: This function is required to check for OOM exceptions which are thrown as a +// JSString instead of JSObject, which makes it difficult to compare against ErrorNumbers directly. +// Instead, we have to compare the message string obtained from the exception against the expected +// value. static bool IsOutOfMemoryException(JSContext* cx, const Value& v) { return v == StringValue(cx->names().outOfMemory); } -#endif + +// MONGODB MODIFICATION: +// When an OOM exception is thrown by SpiderMonkey (see https://github.com/10gen/mongo/blob/master/src/third_party/mozjs/extract/js/src/vm/JSContext.cpp#L270), +// the exception status is set to ExceptionStatus::OutOfMemory and the exception is generated as a +// JSString using the message in ErrorNumbers.msg. By checking for both these conditions, we can +// detect whether or not the exception we are handling is an out of memory exception thrown by +// SpiderMonkey. +bool JSContext::isThrowingOutOfMemoryException(const Value& exc) { + return isThrowingOutOfMemory() && IsOutOfMemoryException(this, exc); +} void JSContext::setPendingException(HandleValue v, Handle stack) { #if defined(NIGHTLY_BUILD) diff --git a/src/third_party/mozjs/extract/js/src/vm/JSContext.h b/src/third_party/mozjs/extract/js/src/vm/JSContext.h index 450b4ac2c42..f44f8e8b3a5 100644 --- a/src/third_party/mozjs/extract/js/src/vm/JSContext.h +++ b/src/third_party/mozjs/extract/js/src/vm/JSContext.h @@ -286,6 +286,10 @@ struct JS_PUBLIC_API JSContext : public JS::RootingContext, JS::NativeStackLimit stackLimitForJitCode(JS::StackKind kind); size_t gcSystemPageSize() { return js::gc::SystemPageSize(); } + // MONGODB MODIFICATION: Checks if we are currently throwing an OOM exception and the exception + // message matches the out of memory exception string. + bool isThrowingOutOfMemoryException(const JS::Value& exc); + /* * "Entering" a realm changes cx->realm (which changes cx->global). Note * that this does not push an Activation so it's possible for the caller's diff --git a/src/third_party/mozjs/get-sources.sh b/src/third_party/mozjs/get-sources.sh index c0234f51eab..cc2c29fecad 100755 --- a/src/third_party/mozjs/get-sources.sh +++ b/src/third_party/mozjs/get-sources.sh @@ -10,7 +10,7 @@ NAME=spidermonkey VERSION="115.7.0esr" LIB_GIT_BRANCH=spidermonkey-esr115.7-cpp-only -LIB_GIT_REVISION=e696addae6303fddfe1128f8d6090130bb68d61d +LIB_GIT_REVISION=bd739211fb34733b254407d78788a24b206ab99d LIB_GIT_REPO=git@github.com:mongodb-forks/spidermonkey.git DEST_DIR=$(git rev-parse --show-toplevel)/src/third_party/mozjs diff --git a/src/third_party/mozjs/include/js/Exception.h b/src/third_party/mozjs/include/js/Exception.h index 649c40ac472..97fa73d6b14 100644 --- a/src/third_party/mozjs/include/js/Exception.h +++ b/src/third_party/mozjs/include/js/Exception.h @@ -42,6 +42,10 @@ extern JS_PUBLIC_API void JS_SetPendingException( extern JS_PUBLIC_API void JS_ClearPendingException(JSContext* cx); +// MONGODB MODIFICATION: Checks if we are currently throwing an OOM exception and the exception +// message matches the out of memory exception string. +extern JS_PUBLIC_API bool JS_IsThrowingOutOfMemoryException(JSContext* cx, const JS::Value& exc); + /** * If the given object is an exception object, the exception will have (or be * able to lazily create) an error report struct, and this function will return