diff --git a/.eslintrc.yml b/.eslintrc.yml index 02ce8acdf41..49993a9d628 100644 --- a/.eslintrc.yml +++ b/.eslintrc.yml @@ -17,6 +17,7 @@ rules: no-redeclare: 0 no-constant-condition: 0 no-loss-of-precision: 0 + semi: 2 no-restricted-syntax: [ "error", diff --git a/jstests/aggregation/accumulators/top_bottom_top_n_bottom_n.js b/jstests/aggregation/accumulators/top_bottom_top_n_bottom_n.js index 474f838b151..df833ba2d32 100644 --- a/jstests/aggregation/accumulators/top_bottom_top_n_bottom_n.js +++ b/jstests/aggregation/accumulators/top_bottom_top_n_bottom_n.js @@ -330,7 +330,7 @@ assert.eq([2, 3, 4], embeddedResult[0].result); // Sort on array assert(coll.drop()); -const makeArray = (i) => [i, i + 1, i + 2] +const makeArray = (i) => [i, i + 1, i + 2]; assert.commandWorked(coll.insertMany([4, 2, 3, 1].map((i) => ({a: makeArray(i)})))); const nestedResult = coll.aggregate({$group: {_id: "", result: {$bottomN: {n: 3, output: "$a", sortBy: {"a": 1}}}}}) diff --git a/jstests/aggregation/documents_merge.js b/jstests/aggregation/documents_merge.js index 588ada7203a..4a9feb09b43 100644 --- a/jstests/aggregation/documents_merge.js +++ b/jstests/aggregation/documents_merge.js @@ -9,7 +9,7 @@ import { dropWithoutImplicitRecreate, withEachMergeMode -} from "jstests/aggregation/extras/merge_helpers.js" +} from "jstests/aggregation/extras/merge_helpers.js"; const outColl = db[`${jsTest.name()}_out`]; const outCollName = outColl.getName(); diff --git a/jstests/aggregation/expressions/expression_get_field.js b/jstests/aggregation/expressions/expression_get_field.js index 6ec3cf51040..68885838cb1 100644 --- a/jstests/aggregation/expressions/expression_get_field.js +++ b/jstests/aggregation/expressions/expression_get_field.js @@ -81,7 +81,7 @@ assertGetFieldFailedWithCode({field: {$const: []}, input: {"a": 1}}, [5654602, 3 // Test that $getField returns the correct value from the provided object. assertGetFieldResultsEq({field: "a", input: {a: "b"}}, [{_id: 0, test: "b"}, {_id: 1, test: "b"}]); assertGetFieldResultsEq({field: {$concat: ["a", "b"]}, input: {ab: "b"}}, - [{_id: 0, test: "b"}, {_id: 1, test: "b"}]) + [{_id: 0, test: "b"}, {_id: 1, test: "b"}]); assertGetFieldResultsEq({field: {$cond: [false, null, "x"]}, input: {x: "b"}}, [{_id: 0, test: "b"}, {_id: 1, test: "b"}]); assertGetFieldResultsEq({field: {$cond: [{$eq: ["$y", 9]}, null, "x"]}, input: {x: "b"}}, diff --git a/jstests/aggregation/expressions/in.js b/jstests/aggregation/expressions/in.js index 7b312a38713..2030a12deba 100644 --- a/jstests/aggregation/expressions/in.js +++ b/jstests/aggregation/expressions/in.js @@ -182,7 +182,7 @@ testExpressionWithIntersection({ array2: [2, 3, 4], elementIsIncluded: false, queryFormShouldBeEquivalent: false -}) +}); testExpressionWithIntersection({ element: 2, @@ -190,7 +190,7 @@ testExpressionWithIntersection({ array2: [2, 3, 4], elementIsIncluded: true, queryFormShouldBeEquivalent: false -}) +}); testExpressionWithIntersection({ element: 1, @@ -198,7 +198,7 @@ testExpressionWithIntersection({ array2: [4, 5, 6], elementIsIncluded: false, queryFormShouldBeEquivalent: false -}) +}); testExpressionWithIntersection({ element: 1, @@ -206,7 +206,7 @@ testExpressionWithIntersection({ array2: [], elementIsIncluded: false, queryFormShouldBeEquivalent: false -}) +}); testExpressionWithIntersection({ element: 1, @@ -214,7 +214,7 @@ testExpressionWithIntersection({ array2: [4, 5, 6], elementIsIncluded: false, queryFormShouldBeEquivalent: false -}) +}); /* ------------------------ Mismatched Types Tests ------------------------ */ diff --git a/jstests/aggregation/ifnull.js b/jstests/aggregation/ifnull.js index 530adc8f27f..3dffbbcdade 100644 --- a/jstests/aggregation/ifnull.js +++ b/jstests/aggregation/ifnull.js @@ -108,4 +108,4 @@ assert.commandWorked(t.insertOne({three: 3, my_list_of_docs: [{z: 1}, {z: 2}]})) assertQueryResult({"three": 3, "my_list_of_docs": {"b": 3}}, [ {$set: {my_list_of_docs: {$ifNull: [null, {b: "$three"}]}}}, {$project: {_id: 0, my_list_of_docs: 1, three: 1}} -]) +]); diff --git a/jstests/aggregation/match_swapping_renamed_fields.js b/jstests/aggregation/match_swapping_renamed_fields.js index 790bc0411ee..fa661b579bf 100644 --- a/jstests/aggregation/match_swapping_renamed_fields.js +++ b/jstests/aggregation/match_swapping_renamed_fields.js @@ -211,7 +211,7 @@ assert.eq(2, matchStages.length); // Test that we correctly match using the '$elemMatch' expression on renamed subfields. Designed to // reproduce HELP-59485. -coll.drop() +coll.drop(); assert.commandWorked(coll.insertMany([ { _id: 0, @@ -266,7 +266,7 @@ runElemMatchTest({ } ], expectedDocumentIds: [1] -}) +}); // Repeat the previous test case, but this time with a $project stage targeting a deeply nested // transform. @@ -289,7 +289,7 @@ runElemMatchTest({ } ], expectedDocumentIds: [1], -}) +}); // Similarly, ensure that we match on the correct documents when using $elemMatch expressions on // simple dot-syntax renamed fields. @@ -303,4 +303,4 @@ runElemMatchTest({ } ], expectedDocumentIds: [1] -}) +}); diff --git a/jstests/aggregation/sources/densify/doc_on_bound.js b/jstests/aggregation/sources/densify/doc_on_bound.js index 3d5b4dd1f3c..df560b98520 100644 --- a/jstests/aggregation/sources/densify/doc_on_bound.js +++ b/jstests/aggregation/sources/densify/doc_on_bound.js @@ -60,7 +60,7 @@ function testDocOnBoundsPartitioned() { {"time": ISODate("2023-09-15T06:00:00Z")}, {"time": ISODate("2023-09-15T12:00:00Z")}, {"time": ISODate("2023-09-15T18:00:00Z")} - ] + ]; assert(arrayEq(resultArray, expected), buildErrorString(resultArray, expected)); } @@ -110,7 +110,7 @@ function testDocOnBoundsNotPartitioned() { {"time": ISODate("2023-09-15T06:00:00Z")}, {"time": ISODate("2023-09-15T12:00:00Z")}, {"time": ISODate("2023-09-15T18:00:00Z")} - ] + ]; assert(arrayEq(resultArray, expected), buildErrorString(resultArray, expected)); } @@ -175,7 +175,7 @@ function testDocOnAndOffFullBound() { {"time": ISODate("2023-09-15T12:00:00Z")}, {"time": ISODate("2023-09-15T18:00:00Z")}, {"time": ISODate("2023-09-15T18:00:00Z"), "orig": true} - ] + ]; assert(arrayEq(resultArray, expected), buildErrorString(resultArray, expected)); } @@ -213,7 +213,7 @@ function testFullNoPartition() { {"time": ISODate("2023-09-14T12:00:00Z")}, {"time": ISODate("2023-09-14T18:00:00Z")}, {"time": ISODate("2023-09-15T00:00:00Z"), "orig": true}, - ] + ]; assert(arrayEq(resultArray, expected), buildErrorString(resultArray, expected)); } testDocOnBoundsPartitioned(); diff --git a/jstests/aggregation/sources/densify/libs/densify_in_js.js b/jstests/aggregation/sources/densify/libs/densify_in_js.js index 4e4b4e2dbfb..49db6c4f79a 100644 --- a/jstests/aggregation/sources/densify/libs/densify_in_js.js +++ b/jstests/aggregation/sources/densify/libs/densify_in_js.js @@ -103,14 +103,13 @@ export function densifyInJS(stage, docs) { // Explicit ranges always generate on-step relative to the lower-bound of the range, // this function encapsulates the logic to do that for dates (requires a loop since steps aren't // always constant sized). - const getNextStepFromBase = - (val, base, step) => { - let nextStep = base; - while (nextStep <= val) { - nextStep = add(nextStep, step); - } - return nextStep; + const getNextStepFromBase = (val, base, step) => { + let nextStep = base; + while (nextStep <= val) { + nextStep = add(nextStep, step); } + return nextStep; + }; if (bounds === "full") { if (docs.length == 0) { @@ -120,11 +119,9 @@ export function densifyInJS(stage, docs) { const maxValue = docsWithoutNulls[docsWithoutNulls.length - 1][field]; return densifyInJS({field: stage.field, range: {step, bounds: [minValue, maxValue], unit}}, docs); - } - else if (bounds === "partition") { + } else if (bounds === "partition") { throw new Error("Partitioning not supported by JS densify."); - } - else if (bounds.length == 2) { + } else if (bounds.length == 2) { const [lower, upper] = bounds; let currentVal = docsWithoutNulls.length > 0 ? Math.min(docsWithoutNulls[0], sub(lower, step)) diff --git a/jstests/aggregation/sources/documents.js b/jstests/aggregation/sources/documents.js index 5b023a3f082..65846339f91 100644 --- a/jstests/aggregation/sources/documents.js +++ b/jstests/aggregation/sources/documents.js @@ -141,12 +141,12 @@ assert.throwsWithCode(() => { }, ErrorCodes.InvalidNamespace); // $unionWith must fail because it requires a collection even when database does not exist -assert.throwsWithCode( - () => {nonExistingDB.aggregate([{ +assert.throwsWithCode(() => { + nonExistingDB.aggregate([{ $unionWith: {pipeline: [{$documents: {$map: {input: {$range: [0, 5]}, in : {x: "$$this"}}}}]} - }])}, - ErrorCodes.InvalidNamespace); + }]); +}, ErrorCodes.InvalidNamespace); // $unionWith must fail due to no $document assert.throwsWithCode(() => { diff --git a/jstests/aggregation/sources/setWindowFields/min_max.js b/jstests/aggregation/sources/setWindowFields/min_max.js index 0e6f110bf16..3d2a7d10989 100644 --- a/jstests/aggregation/sources/setWindowFields/min_max.js +++ b/jstests/aggregation/sources/setWindowFields/min_max.js @@ -37,7 +37,7 @@ let expectedResults = [ {maxStr: "transmit Ohio AI", minStr: "Louisiana system-worthy Borders"}, {maxStr: "transmit Ohio AI", minStr: "compressing Supervisor Synchronised"}, {maxStr: "fuchsia", minStr: "Inlet"}, -] +]; coll.drop(); for (let i = 0; i < documents.length; i++) { @@ -67,9 +67,9 @@ documents = [ {_id: 0, "num": 10, "str": "ABCDEFGHIJK"}, {_id: 1, "num": 3, "str": "ABCDE"}, {_id: 2, "num": 5, "str": "AB"}, -] +]; -expectedResults = [{minStr: "ABCDEFGHIJK"}, {minStr: "AB"}, {minStr: "AB"}] +expectedResults = [{minStr: "ABCDEFGHIJK"}, {minStr: "AB"}, {minStr: "AB"}]; coll.drop(); for (let i = 0; i < documents.length; i++) { diff --git a/jstests/aggregation/sources/setWindowFields/rank.js b/jstests/aggregation/sources/setWindowFields/rank.js index cd6ad2b3e98..58b40641825 100644 --- a/jstests/aggregation/sources/setWindowFields/rank.js +++ b/jstests/aggregation/sources/setWindowFields/rank.js @@ -68,7 +68,7 @@ let pipeline = [{ sortBy: {_id: 1}, output: {rank: {$rank: "$_id"}}, } -}] +}]; assertErrCodeAndErrMsgContains(coll, pipeline, 5371603, "$rank"); // Rank based accumulators must have a sortBy. diff --git a/jstests/auth/default_max_time_ms_metrics.js b/jstests/auth/default_max_time_ms_metrics.js index a24078ea51b..f788c40f1fb 100644 --- a/jstests/auth/default_max_time_ms_metrics.js +++ b/jstests/auth/default_max_time_ms_metrics.js @@ -68,7 +68,7 @@ function runTests(conn, directConn) { connectionsToCheck.forEach((db, i) => { const serverStatus = assert.commandWorked(db.runCommand({serverStatus: 1})); assert.gt(serverStatus.metrics.operation[[metricField]], beforeMetrics[i]); - }) + }); } // Times out due to the default value. diff --git a/jstests/auth/lib/commands_lib.js b/jstests/auth/lib/commands_lib.js index 22b20656b99..94f93dfaa9f 100644 --- a/jstests/auth/lib/commands_lib.js +++ b/jstests/auth/lib/commands_lib.js @@ -7395,7 +7395,7 @@ export const authCommandsLib = { apiParameters: {version: "1", strict: true} }, setup: function(db) { - const collName = "validate_db_metadata_command_specific_db" + const collName = "validate_db_metadata_command_specific_db"; assert.commandWorked(db.getSiblingDB(firstDbName).createCollection(collName)); assert.commandWorked(db.getSiblingDB(secondDbName).createCollection(collName)); assert.commandWorked(db.getSiblingDB("ThirdDB").createCollection(collName)); @@ -7429,7 +7429,7 @@ export const authCommandsLib = { testname: "validate_db_metadata_command_all_dbs", command: {validateDBMetadata: 1, apiParameters: {version: "1", strict: true}}, setup: function(db) { - const collName = "validate_db_metadata_command_all_dbs" + const collName = "validate_db_metadata_command_all_dbs"; assert.commandWorked(db.getSiblingDB(firstDbName).createCollection(collName)); assert.commandWorked(db.getSiblingDB(secondDbName).createCollection(collName)); }, diff --git a/jstests/change_streams/change_stream.js b/jstests/change_streams/change_stream.js index 60090675c54..1ee8e513cc0 100644 --- a/jstests/change_streams/change_stream.js +++ b/jstests/change_streams/change_stream.js @@ -263,7 +263,7 @@ let resumeToken = cursor.postBatchResumeToken._data; assert.soon(() => { assert.commandWorked(db.t1.insert({a: 2})); cursor = cst.assertNoChange(cursor); - return resumeToken != cursor.postBatchResumeToken._data + return resumeToken != cursor.postBatchResumeToken._data; }); // With trivially false predicates @@ -273,7 +273,7 @@ resumeToken = cursor.postBatchResumeToken._data; assert.soon(() => { assert.commandWorked(db.t1.insert({a: 2})); cursor = cst.assertNoChange(cursor); - return resumeToken != cursor.postBatchResumeToken._data + return resumeToken != cursor.postBatchResumeToken._data; }); cst.cleanUp(); diff --git a/jstests/change_streams/oplog_rewrite/change_stream_basic_match_pushdown_rewrite.js b/jstests/change_streams/oplog_rewrite/change_stream_basic_match_pushdown_rewrite.js index d95c2eeee87..056d6d9fab2 100644 --- a/jstests/change_streams/oplog_rewrite/change_stream_basic_match_pushdown_rewrite.js +++ b/jstests/change_streams/oplog_rewrite/change_stream_basic_match_pushdown_rewrite.js @@ -205,20 +205,19 @@ assert.eq(stringValues.slice(0, 2), ["Value", "vAlue"]); // transaction, they effectively occur at exactly the same time. assert.sameMembers(stringValues.slice(2, 4), ["vaLue", "valUe"]); -const verifyOnChangeStream = - (matchExpression, hasEntriesReturned) => { - const string = JSON.stringify(matchExpression); - const changeStream = coll.aggregate([{$changeStream: {}}, {$match: matchExpression}]); - assert.commandWorked(coll.insert({string})); - if (hasEntriesReturned) { - assert.soon(() => changeStream.hasNext()); - const event = changeStream.next(); - assert.eq(event.fullDocument.string, string, event); - } else { - assert(!changeStream.hasNext()); - } - changeStream.close(); +const verifyOnChangeStream = (matchExpression, hasEntriesReturned) => { + const string = JSON.stringify(matchExpression); + const changeStream = coll.aggregate([{$changeStream: {}}, {$match: matchExpression}]); + assert.commandWorked(coll.insert({string})); + if (hasEntriesReturned) { + assert.soon(() => changeStream.hasNext()); + const event = changeStream.next(); + assert.eq(event.fullDocument.string, string, event); + } else { + assert(!changeStream.hasNext()); } + changeStream.close(); +}; // Run a change stream with empty field path match expression to match null. Expect to return all // the oplog entries as the field "" is not set in oplogs. diff --git a/jstests/change_streams/shard_collection_event.js b/jstests/change_streams/shard_collection_event.js index e752eb07f03..e6a09ff8bcc 100644 --- a/jstests/change_streams/shard_collection_event.js +++ b/jstests/change_streams/shard_collection_event.js @@ -194,7 +194,7 @@ function runTest(startChangeStream) { "capped": false, "collation": {"locale": "simple"} } - }) + }); } assert.commandWorked(db.adminCommand({enableSharding: dbName})); diff --git a/jstests/concurrency/fsm_workloads/agg_lookup_in_txn.js b/jstests/concurrency/fsm_workloads/agg_lookup_in_txn.js index 2c8c837552b..75e3e3bae64 100644 --- a/jstests/concurrency/fsm_workloads/agg_lookup_in_txn.js +++ b/jstests/concurrency/fsm_workloads/agg_lookup_in_txn.js @@ -183,7 +183,7 @@ export const $config = (function() { coll_aux.createIndex({_id: 1}); const mustShardForeignCollection = cluster.isSharded() && Random.rand() > 0.5; if (mustShardForeignCollection) { - jsTest.log("Sharding auxiliary collection") + jsTest.log("Sharding auxiliary collection"); cluster.shardCollection(coll_aux, this.shardKey, false); } else { jsTest.log("Auxiliary collection will be unsharded"); diff --git a/jstests/concurrency/fsm_workloads/exchange_producer.js b/jstests/concurrency/fsm_workloads/exchange_producer.js index b7bf4f35552..f5fa3dbc16d 100644 --- a/jstests/concurrency/fsm_workloads/exchange_producer.js +++ b/jstests/concurrency/fsm_workloads/exchange_producer.js @@ -50,15 +50,18 @@ export const $config = (function() { return function consumerCallback(db, collName) { return runGetMoreOnCursor( db, collName, consumerId, this.batchSize, this.cursorIds, this.sessionId); - } + }; } return { // A no-op starting state so the worker threads don't all start on the same cursors. - init: function init(db, collName) {}, consumer0: makeConsumerCallback(0), - consumer1: makeConsumerCallback(1), consumer2: makeConsumerCallback(2), - consumer3: makeConsumerCallback(3), consumer4: makeConsumerCallback(4), - } + init: function init(db, collName) {}, + consumer0: makeConsumerCallback(0), + consumer1: makeConsumerCallback(1), + consumer2: makeConsumerCallback(2), + consumer3: makeConsumerCallback(3), + consumer4: makeConsumerCallback(4), + }; }(); var allStatesEqual = @@ -124,7 +127,13 @@ export const $config = (function() { // threadCount must be equal to numConsumers. We need as many worker threads as consumers to // avoid a deadlock where all threads are waiting for one particular cursor to run a getMore. return { - threadCount: data.numConsumers, iterations: 20, startState: 'init', states: states, - transitions: transitions, setup: setup, teardown: teardown, data: data - } + threadCount: data.numConsumers, + iterations: 20, + startState: 'init', + states: states, + transitions: transitions, + setup: setup, + teardown: teardown, + data: data + }; })(); diff --git a/jstests/concurrency/fsm_workloads/random_DDL_CRUD_operations.js b/jstests/concurrency/fsm_workloads/random_DDL_CRUD_operations.js index 323e34945cb..c8064a2c0bb 100644 --- a/jstests/concurrency/fsm_workloads/random_DDL_CRUD_operations.js +++ b/jstests/concurrency/fsm_workloads/random_DDL_CRUD_operations.js @@ -261,7 +261,7 @@ export const $config = (function() { // Check guarantees IF NO CONCURRENT DROP is running. // If a concurrent rename came in, then either the full operation succeded (meaning // there will be 0 documents left) or the insert came in first. - assert.contains(currentDocs, [0, numDocs], threadInfos) + assert.contains(currentDocs, [0, numDocs], threadInfos); jsTestLog('CRUD - Update ' + threadInfos); res = coll.update({generation: generation}, {$set: {updated: true}}, {multi: true}); diff --git a/jstests/concurrency/fsm_workloads/random_moveChunk_multi_update_delete_paused_migrations.js b/jstests/concurrency/fsm_workloads/random_moveChunk_multi_update_delete_paused_migrations.js index 8a22e875bbc..22066ca4eb4 100644 --- a/jstests/concurrency/fsm_workloads/random_moveChunk_multi_update_delete_paused_migrations.js +++ b/jstests/concurrency/fsm_workloads/random_moveChunk_multi_update_delete_paused_migrations.js @@ -77,7 +77,9 @@ function setPauseMigrationsClusterParameter(db, cluster, enabled) { cluster.executeOnMongosNodes((db) => { // Ensure all mongoses have refreshed cluster parameter after being set. - assert.soon(() => {return getPauseMigrationsClusterParameter(db) === enabled}); + assert.soon(() => { + return getPauseMigrationsClusterParameter(db) === enabled; + }); }); } @@ -225,7 +227,7 @@ export const $config = extendWorkload($baseConfig, function($config, $super) { ignoreErrorsIfInNonTransactionalStepdownSuite(() => { const updates = this.createRandomUpdateBatch(collName); jsTestLog("Executing updates: " + tojson(updates)); - const result = db.runCommand({update: collName, updates}) + const result = db.runCommand({update: collName, updates}); jsTestLog("Result: " + tojson(result)); assert.commandWorked(result); let totalUpdates = 0; diff --git a/jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_findAndModify_update.js b/jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_findAndModify_update.js index d3013db46f9..5813f31c14e 100644 --- a/jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_findAndModify_update.js +++ b/jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_findAndModify_update.js @@ -14,9 +14,9 @@ import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; import { $config as $baseConfig -} from 'jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_arbitrary_updates.js' +} from 'jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_arbitrary_updates.js'; -const logCollection = "log_collection" +const logCollection = "log_collection"; export const $config = extendWorkload($baseConfig, function($config, $super) { // Perform arbitrary updates on metric fields of measurements. diff --git a/jstests/concurrency/fsm_workloads/timeseries_agg_out.js b/jstests/concurrency/fsm_workloads/timeseries_agg_out.js index a96f5e4a239..46eb70cb580 100644 --- a/jstests/concurrency/fsm_workloads/timeseries_agg_out.js +++ b/jstests/concurrency/fsm_workloads/timeseries_agg_out.js @@ -72,7 +72,7 @@ export const $config = extendWorkload($baseConfig, function($config, $super) { // when the mongos is fetching data from the shard using getMore(). Remove // theinterruptedQueryErrors from allowedErrorCodes once this bug is being addressed if (TestData.runningWithBalancer) { - allowedErrorCodes = allowedErrorCodes.concat(interruptedQueryErrors) + allowedErrorCodes = allowedErrorCodes.concat(interruptedQueryErrors); } assert.commandWorkedOrFailedWithCode(res, allowedErrorCodes); diff --git a/jstests/concurrency/fsm_workloads/timeseries_deletes_and_inserts.js b/jstests/concurrency/fsm_workloads/timeseries_deletes_and_inserts.js index a1a8254fd48..cb17b9c120d 100644 --- a/jstests/concurrency/fsm_workloads/timeseries_deletes_and_inserts.js +++ b/jstests/concurrency/fsm_workloads/timeseries_deletes_and_inserts.js @@ -26,7 +26,7 @@ function retryUntilWorked(query) { return query(); } catch (e) { if (e.code == ErrorCodes.QueryPlanKilled && TestData.runningWithBalancer) { - attempts++ + attempts++; } else { throw e; } @@ -134,13 +134,16 @@ export const $config = (function() { // Now validate the state of each reading. We will check all of the seed data and each // reading that we may have inserted. for (let readingNo = 0; readingNo < data.nTotalReadings; ++readingNo) { - const wasDeleted = retryUntilWorked( - () => {return logColl.count({readingNo: readingNo, deleted: true}) > 0}); - const wasInserted = retryUntilWorked( - () => {return logColl.count({readingNo: readingNo, inserted: true}) > 0}); + const wasDeleted = retryUntilWorked(() => { + return logColl.count({readingNo: readingNo, deleted: true}) > 0; + }); + const wasInserted = retryUntilWorked(() => { + return logColl.count({readingNo: readingNo, inserted: true}) > 0; + }); - const nReadings = - retryUntilWorked(() => {return db[collName].count({readingNo: readingNo})}); + const nReadings = retryUntilWorked(() => { + return db[collName].count({readingNo: readingNo}); + }); if (wasDeleted && !wasInserted) { // Easy case: this reading was deleted and never inserted - we expect 0 records. diff --git a/jstests/concurrency/fsm_workloads/timeseries_findAndModify_updates_and_inserts.js b/jstests/concurrency/fsm_workloads/timeseries_findAndModify_updates_and_inserts.js index 574cb4e5fe7..cf7529349f5 100644 --- a/jstests/concurrency/fsm_workloads/timeseries_findAndModify_updates_and_inserts.js +++ b/jstests/concurrency/fsm_workloads/timeseries_findAndModify_updates_and_inserts.js @@ -44,7 +44,7 @@ export const $config = extendWorkload($baseConfig, function($config, $super) { init: {findAndUpdateMany: 0.25, insert: 0.75}, findAndUpdateMany: {findAndUpdateMany: 0.5, insert: 0.5}, insert: {findAndUpdateMany: 0.5, insert: 0.5} - } + }; return $config; }); diff --git a/jstests/concurrency/fsm_workloads/timeseries_mirrored_writes.js b/jstests/concurrency/fsm_workloads/timeseries_mirrored_writes.js index 3df4ea0403f..b0c123b29f0 100644 --- a/jstests/concurrency/fsm_workloads/timeseries_mirrored_writes.js +++ b/jstests/concurrency/fsm_workloads/timeseries_mirrored_writes.js @@ -104,7 +104,7 @@ export const $config = (function() { }; function setup(db, collName, cluster) { - assert.commandWorked(db.createCollection(this.getRegularCollectionName())) + assert.commandWorked(db.createCollection(this.getRegularCollectionName())); assert.commandWorked(db.createCollection(this.getTimeseriesCollectionName(), { timeseries: { timeField: this.timeFieldName, diff --git a/jstests/concurrency/fsm_workloads/timeseries_reshard_with_inserts.js b/jstests/concurrency/fsm_workloads/timeseries_reshard_with_inserts.js index c6d50fa7097..d0822ee6143 100644 --- a/jstests/concurrency/fsm_workloads/timeseries_reshard_with_inserts.js +++ b/jstests/concurrency/fsm_workloads/timeseries_reshard_with_inserts.js @@ -76,9 +76,9 @@ export const $config = (function() { if (this.tid === 0 && shouldContinueResharding) { let newShardKey; if (bsonWoCompare(this.shardKey, shardKeys[0]) === 0) { - newShardKey = shardKeys[1] + newShardKey = shardKeys[1]; } else { - newShardKey = shardKeys[0] + newShardKey = shardKeys[0]; } executeReshardTimeseries(db, collName, newShardKey); diff --git a/jstests/core/administrative/profile/profile_delete.js b/jstests/core/administrative/profile/profile_delete.js index fe88bef7e81..deedb3f5c01 100644 --- a/jstests/core/administrative/profile/profile_delete.js +++ b/jstests/core/administrative/profile/profile_delete.js @@ -45,7 +45,7 @@ for (let i = 0; i < 10; ++i) { assert.commandWorked(coll.insert(docs)); assert.commandWorked(coll.createIndex({a: 1})); -let testComment = "test1" +let testComment = "test1"; assert.commandWorked(testDB.runCommand({ delete: collName, deletes: [{q: {a: {$gte: 2}, b: {$gte: 2}}, limit: 1, collation: {locale: "fr"}}], diff --git a/jstests/core/doc_validation/doc_validation_options.js b/jstests/core/doc_validation/doc_validation_options.js index 077bdb77936..0c75c187f4d 100644 --- a/jstests/core/doc_validation/doc_validation_options.js +++ b/jstests/core/doc_validation/doc_validation_options.js @@ -61,7 +61,7 @@ if (FeatureFlagUtil.isPresentAndEnabled(db, "ErrorAndLogValidationAction")) { t.runCommand("collMod", {validationAction: "errorAndLog"}), ErrorCodes.InvalidOptions); if (res.ok) { assertFailsValidation(t.update({}, {$set: {a: 2}})); - checkLogsForFailedValidation(errorAndLogId) + checkLogsForFailedValidation(errorAndLogId); // make sure persisted const info = db.getCollectionInfos({name: t.getName()})[0]; assert.eq("errorAndLog", info.options.validationAction, tojson(info)); @@ -74,7 +74,7 @@ t.update({}, {$set: {a: 2}}); assert.eq(1, t.find({a: 2}).itcount()); // check log for message. In case of sharded deployments, look on all shards and expect the log to // be found on one of them. -checkLogsForFailedValidation(warnLogId) +checkLogsForFailedValidation(warnLogId); // make sure persisted const info = db.getCollectionInfos({name: t.getName()})[0]; assert.eq("warn", info.options.validationAction, tojson(info)); diff --git a/jstests/core/index/covered/covered_index_simple_id.js b/jstests/core/index/covered/covered_index_simple_id.js index 37a8176006b..0b7c72f2f3f 100644 --- a/jstests/core/index/covered/covered_index_simple_id.js +++ b/jstests/core/index/covered/covered_index_simple_id.js @@ -68,7 +68,7 @@ switch (getOptimizer(plan)) { // optimizer. M2: allow only collscans, M4: check bonsai behavior for index scan. break; default: - break + break; } // Test in query @@ -86,6 +86,6 @@ switch (getOptimizer(plan)) { // optimizer. M2: allow only collscans, M4: check bonsai behavior for index scan. break; default: - break + break; } print('all tests pass'); diff --git a/jstests/core/index/express.js b/jstests/core/index/express.js index 311a994bf3b..95caf316add 100644 --- a/jstests/core/index/express.js +++ b/jstests/core/index/express.js @@ -62,7 +62,7 @@ let isShardedColl = false; function recreateCollWith(documents) { coll.drop(); assert.commandWorked(coll.insert(documents)); - isShardedColl = FixtureHelpers.isSharded(coll) + isShardedColl = FixtureHelpers.isSharded(coll); } recreateCollWith(docs); @@ -110,7 +110,7 @@ for (let index of [{a: 1}, {a: -1}, {a: 1, b: 1}, {a: 1, b: -1}, {a: -1, b: 1}, // When the index is not dotted, queries against nested fields do not use express unless they look // for an exact match. -coll.dropIndexes() +coll.dropIndexes(); assert.commandWorked(coll.createIndex({a: 1})); runExpressTest({filter: {'a.b': 0}, limit: 1, result: [], usesExpress: false}); runExpressTest({ diff --git a/jstests/core/index/hidden_index.js b/jstests/core/index/hidden_index.js index de32ccd460e..468a5bf70b8 100644 --- a/jstests/core/index/hidden_index.js +++ b/jstests/core/index/hidden_index.js @@ -70,7 +70,7 @@ function validateHiddenIndexBehaviour( assert.eq(numOfUsedIndexes(explain), 0); break; default: - break + break; } assert.commandWorked(coll.hideIndex(index_name)); @@ -102,7 +102,7 @@ function validateHiddenIndexBehaviour( assert.eq(numOfUsedIndexes(explain), 0); break; default: - break + break; } assert.commandWorked(coll.dropIndex(index_name)); diff --git a/jstests/core/index/index_check2.js b/jstests/core/index/index_check2.js index ce4f4dc819e..1942caae2a4 100644 --- a/jstests/core/index/index_check2.js +++ b/jstests/core/index/index_check2.js @@ -53,5 +53,5 @@ switch (getOptimizer(t.find(q1).explain())) { // optimizer. M2: allow only collscans, M4: check bonsai behavior for index scan. break; default: - break + break; } diff --git a/jstests/core/index/index_check7.js b/jstests/core/index/index_check7.js index 5b8e4c7370f..0a4abf767c2 100644 --- a/jstests/core/index/index_check7.js +++ b/jstests/core/index/index_check7.js @@ -42,5 +42,5 @@ switch (getOptimizer(explainResult)) { break; } default: - break + break; } diff --git a/jstests/core/index/index_filter_commands.js b/jstests/core/index/index_filter_commands.js index f6e4bffc488..f7c791ae822 100644 --- a/jstests/core/index/index_filter_commands.js +++ b/jstests/core/index/index_filter_commands.js @@ -292,7 +292,7 @@ if (!FixtureHelpers.isMongos(db)) { explain = getSingleNodeExplain(coll.find({z: 1}).explain(verbosity)); assert.eq(true, getQueryPlanner(explain).indexFilterSet, explain); explain = - getSingleNodeExplain(coll.find(queryA1, projectionA1).sort(sortA1).explain(verbosity)) + getSingleNodeExplain(coll.find(queryA1, projectionA1).sort(sortA1).explain(verbosity)); assert.eq(true, getQueryPlanner(explain).indexFilterSet, verbosity); }); } else { diff --git a/jstests/core/index/index_partial_read_ops.js b/jstests/core/index/index_partial_read_ops.js index 40584ef73cb..5586a905949 100644 --- a/jstests/core/index/index_partial_read_ops.js +++ b/jstests/core/index/index_partial_read_ops.js @@ -200,7 +200,8 @@ const coll = db.index_partial_read_ops; explain = getSingleNodeExplain(coll.explain('executionStats').find({x: 2, a: 5}).finish()); assert.eq(1, explain.executionStats.nReturned); assert(isCollscan(db, getWinningPlan(explain.queryPlanner))); - explain = getSingleNodeExplain(coll.explain('executionStats').find({x: 3, a: 5, b: 1}).finish()) + explain = + getSingleNodeExplain(coll.explain('executionStats').find({x: 3, a: 5, b: 1}).finish()); assert.eq(1, explain.executionStats.nReturned); assert(isCollscan(db, getWinningPlan(explain.queryPlanner))); })(); diff --git a/jstests/core/query/agg_hint.js b/jstests/core/query/agg_hint.js index 8b4287c350f..aa9f574efbb 100644 --- a/jstests/core/query/agg_hint.js +++ b/jstests/core/query/agg_hint.js @@ -14,9 +14,9 @@ const isHintsToQuerySettingsSuite = TestData.isHintsToQuerySettingsSuite || fals const testDB = db.getSiblingDB("agg_hint"); assert.commandWorked(testDB.dropDatabase()); -const collName = jsTestName() + "_col" +const collName = jsTestName() + "_col"; const coll = testDB.getCollection(collName); -const viewName = jsTestName() + "_view" +const viewName = jsTestName() + "_view"; const view = testDB.getCollection(viewName); function confirmWinningPlanUsesExpectedIndex( diff --git a/jstests/core/query/covered_multikey.js b/jstests/core/query/covered_multikey.js index 7c056941ab5..c90a5aa7409 100644 --- a/jstests/core/query/covered_multikey.js +++ b/jstests/core/query/covered_multikey.js @@ -38,7 +38,7 @@ switch (getOptimizer(explainRes)) { // TODO SERVER-77719: Ensure that the decision for using the scan lines up with CQF // optimizer. M2: allow only collscans, M4: check bonsai behavior for index scan. assert(isCollscan(db, winningPlan)); - break + break; } } assert(!planHasStage(db, winningPlan, "FETCH")); @@ -77,7 +77,7 @@ switch (getOptimizer(explainRes)) { // TODO SERVER-77719: Ensure that the decision for using the scan lines up with CQF // optimizer. M2: allow only collscans, M4: check bonsai behavior for index scan. assert(isCollscan(db, winningPlan)); - break + break; } } diff --git a/jstests/core/query/explain/explain_find.js b/jstests/core/query/explain/explain_find.js index eb52d01e188..7cb406b94ce 100644 --- a/jstests/core/query/explain/explain_find.js +++ b/jstests/core/query/explain/explain_find.js @@ -49,11 +49,11 @@ if ((serverVer[0] == 7 && serverVer[1] >= 3) || serverVer[0] > 7) { // Explain output differs slightly under SBE versus classic engine if (explain.queryPlanner.winningPlan.queryPlan) { - assert.eq("EOF", explain.queryPlanner.winningPlan.queryPlan.stage) + assert.eq("EOF", explain.queryPlanner.winningPlan.queryPlan.stage); } else { - assert.eq("EOF", explain.queryPlanner.winningPlan.stage) + assert.eq("EOF", explain.queryPlanner.winningPlan.stage); } - assert.eq("does_not_exist_hopefully.jstests_explain_find", explain.queryPlanner.namespace) - assert.eq({"a": {"$lte": 2}}, explain.queryPlanner.parsedQuery) + assert.eq("does_not_exist_hopefully.jstests_explain_find", explain.queryPlanner.namespace); + assert.eq({"a": {"$lte": 2}}, explain.queryPlanner.parsedQuery); } diff --git a/jstests/core/query/explain/explain_find_and_modify.js b/jstests/core/query/explain/explain_find_and_modify.js index 95fffa8af1a..70667a2653a 100644 --- a/jstests/core/query/explain/explain_find_and_modify.js +++ b/jstests/core/query/explain/explain_find_and_modify.js @@ -274,7 +274,7 @@ function assertExplainResultsMatch(explainOut, expectedMatches, preMsg, currentP explainOut[key], expectedMatches[key], preMsg, totalFieldName); } else if (key == "stage" && expectedMatches[key] == "UPDATE") { // Express handles update-by-id post 8.0 - let want = [expectedMatches[key], "EXPRESS_UPDATE"] + let want = [expectedMatches[key], "EXPRESS_UPDATE"]; assert.contains(explainOut[key], want, preMsg + "Explain's " + totalFieldName + " (" + explainOut[key] + ")" + @@ -282,7 +282,7 @@ function assertExplainResultsMatch(explainOut, expectedMatches, preMsg, currentP } else if (key == "stage" && expectedMatches[key] == "DELETE") { // Express handles delete-by-id post 8.0 - let want = [expectedMatches[key], "EXPRESS_DELETE"] + let want = [expectedMatches[key], "EXPRESS_DELETE"]; assert.contains(explainOut[key], want, preMsg + "Explain's " + totalFieldName + " (" + explainOut[key] + ")" + diff --git a/jstests/core/query/explain/explain_find_trivially_false_predicates.js b/jstests/core/query/explain/explain_find_trivially_false_predicates.js index 17af8e2f0e3..2c6915e0d1a 100644 --- a/jstests/core/query/explain/explain_find_trivially_false_predicates.js +++ b/jstests/core/query/explain/explain_find_trivially_false_predicates.js @@ -8,7 +8,7 @@ */ import {getWinningPlanFromExplain, isEofPlan} from "jstests/libs/analyze_plan.js"; -import {assertDropAndRecreateCollection} from "jstests/libs/collection_drop_recreate.js" +import {assertDropAndRecreateCollection} from "jstests/libs/collection_drop_recreate.js"; const collName = "jstests_explain_find_trivially_false_predicates"; diff --git a/jstests/core/query/explain/explain_find_trivially_false_predicates_in_agg_pipelines.js b/jstests/core/query/explain/explain_find_trivially_false_predicates_in_agg_pipelines.js index b2cb3bd63c8..18400751827 100644 --- a/jstests/core/query/explain/explain_find_trivially_false_predicates_in_agg_pipelines.js +++ b/jstests/core/query/explain/explain_find_trivially_false_predicates_in_agg_pipelines.js @@ -15,7 +15,7 @@ * ] */ -import {getExplainPipelineFromAggregationResult} from "jstests/aggregation/extras/utils.js" +import {getExplainPipelineFromAggregationResult} from "jstests/aggregation/extras/utils.js"; import { aggPlanHasStage, getAggPlanStages, @@ -23,7 +23,7 @@ import { isEofPlan, planHasStage } from "jstests/libs/analyze_plan.js"; -import {assertDropAndRecreateCollection} from "jstests/libs/collection_drop_recreate.js" +import {assertDropAndRecreateCollection} from "jstests/libs/collection_drop_recreate.js"; function assertPlanIsEOF(plan) { // Explain query output doesn't include planning for the foreign branch hence we use execution @@ -48,12 +48,12 @@ function assertUnionOfPlans(plan, firstPartStage, secondPartStage) { const collName = "explain_find_trivially_false_predicates_in_agg_pipelines"; -const localCollName = `${collName}-local` +const localCollName = `${collName}-local`; assertDropAndRecreateCollection(db, localCollName); const localColl = db[localCollName]; assert.commandWorked(localColl.insert(Array.from({length: 10}, (_, i) => ({a: i, side: "local"})))); -const foreignCollName = `${collName}-foreign` +const foreignCollName = `${collName}-foreign`; assertDropAndRecreateCollection(db, foreignCollName); const foreignColl = db[foreignCollName]; assert.commandWorked( diff --git a/jstests/core/query/explain/explain_find_trivially_false_predicates_in_tailables_over_capped_colls.js b/jstests/core/query/explain/explain_find_trivially_false_predicates_in_tailables_over_capped_colls.js index 3bb5c341486..fb8fd536aef 100644 --- a/jstests/core/query/explain/explain_find_trivially_false_predicates_in_tailables_over_capped_colls.js +++ b/jstests/core/query/explain/explain_find_trivially_false_predicates_in_tailables_over_capped_colls.js @@ -9,7 +9,7 @@ * ] */ import {getWinningPlanFromExplain, isEofPlan} from "jstests/libs/analyze_plan.js"; -import {assertDropAndRecreateCollection} from "jstests/libs/collection_drop_recreate.js" +import {assertDropAndRecreateCollection} from "jstests/libs/collection_drop_recreate.js"; const collName = "explain_find_trivially_false_predicates_in_tailables_over_capped_colls"; diff --git a/jstests/core/query/explain_classic_runtime_planner_for_sbe.js b/jstests/core/query/explain_classic_runtime_planner_for_sbe.js index 7e9f4fed720..92cd5b7f4d3 100644 --- a/jstests/core/query/explain_classic_runtime_planner_for_sbe.js +++ b/jstests/core/query/explain_classic_runtime_planner_for_sbe.js @@ -54,7 +54,7 @@ function assertExplainFormat(explain, expectedNumReturned) { // executionStats - SBE format: const stages = getExecutionStages(explain); assert.eq(stages.length, 1, explain); - const execStage = stages[0] + const execStage = stages[0]; assert(execStage.hasOwnProperty("opens"), explain); assert(execStage.hasOwnProperty("closes"), explain); assert(!execStage.hasOwnProperty("works"), explain); diff --git a/jstests/core/query/idhack.js b/jstests/core/query/idhack.js index 527cc81d296..b31e858b955 100644 --- a/jstests/core/query/idhack.js +++ b/jstests/core/query/idhack.js @@ -74,7 +74,7 @@ winningPlan = getWinningPlan(explain.queryPlanner); assert(!isIdhackOrExpress(db, winningPlan), winningPlan); // Express is an 8.0+ feature. -const hasExpress = isExpress(db, getWinningPlan(t.find({_id: 1}).explain().queryPlanner)) +const hasExpress = isExpress(db, getWinningPlan(t.find({_id: 1}).explain().queryPlanner)); if (hasExpress) { // Express is used for simple _id queries. explain = t.find({_id: 1}).explain(); diff --git a/jstests/core/query/plan_cache/plan_cache_sbe.js b/jstests/core/query/plan_cache/plan_cache_sbe.js index ebd47cfc8cf..c00da162477 100644 --- a/jstests/core/query/plan_cache/plan_cache_sbe.js +++ b/jstests/core/query/plan_cache/plan_cache_sbe.js @@ -99,7 +99,7 @@ runWithParamsAllNodes(db, [{key: "internalCascadesOptimizerDisableFastPath", val const serverStatusBefore = db.serverStatus(); for (let val = 0; val < 5; ++val) { - const pipeline = getPipeline(val) + const pipeline = getPipeline(val); assert.eq(coll.aggregate(pipeline).toArray().length, 5 - val); } const planCacheStats = coll.aggregate([{$planCacheStats: {}}]).toArray(); diff --git a/jstests/core/query/query_settings/query_settings_cmds_validation.js b/jstests/core/query/query_settings/query_settings_cmds_validation.js index d2dccf0c3bc..46c437194a0 100644 --- a/jstests/core/query/query_settings/query_settings_cmds_validation.js +++ b/jstests/core/query/query_settings/query_settings_cmds_validation.js @@ -6,7 +6,7 @@ // ] // -import {assertDropAndRecreateCollection} from "jstests/libs/collection_drop_recreate.js" +import {assertDropAndRecreateCollection} from "jstests/libs/collection_drop_recreate.js"; import {QuerySettingsUtils} from "jstests/libs/query_settings_utils.js"; const collName = jsTestName(); diff --git a/jstests/core/query/query_settings/query_settings_index_application_aggregate.js b/jstests/core/query/query_settings/query_settings_index_application_aggregate.js index d60f91d57e6..85c4cb94303 100644 --- a/jstests/core/query/query_settings/query_settings_index_application_aggregate.js +++ b/jstests/core/query/query_settings/query_settings_index_application_aggregate.js @@ -270,7 +270,7 @@ function instantiateTestCasesNoSecondaryView(...testCases) { if (FixtureHelpers.isSharded(coll) || FixtureHelpers.isSharded(secondaryColl)) { // TODO: SERVER-88883 Report 'indexesUsed' for $lookup over sharded collections. - instantiateTestCases(testAggregateQuerySettingsApplicationWithGraphLookup) + instantiateTestCases(testAggregateQuerySettingsApplicationWithGraphLookup); instantiateTestCasesNoSecondaryView( testAggregateQuerySettingsApplicationWithoutSecondaryCollections, diff --git a/jstests/core/query/query_settings/query_shape_hash_in_slow_query_logs.js b/jstests/core/query/query_settings/query_shape_hash_in_slow_query_logs.js index 11a649834e9..46f738966ca 100644 --- a/jstests/core/query/query_settings/query_shape_hash_in_slow_query_logs.js +++ b/jstests/core/query/query_settings/query_shape_hash_in_slow_query_logs.js @@ -83,7 +83,7 @@ function testQueryShapeHash(query) { assert.eq(slowLogQueryShapeHash, querySettingsQueryShapeHash, "Query shape hash from the logs doesn't match the one from query settings"); - }) + }); } { diff --git a/jstests/core/query/regex/regex_collations.js b/jstests/core/query/regex/regex_collations.js index 1f7fa6a8adf..8c86bac6b37 100644 --- a/jstests/core/query/regex/regex_collations.js +++ b/jstests/core/query/regex/regex_collations.js @@ -11,7 +11,7 @@ import { isCollscan, isIxscan } from "jstests/libs/analyze_plan.js"; -import {assertDropAndRecreateCollection} from "jstests/libs/collection_drop_recreate.js" +import {assertDropAndRecreateCollection} from "jstests/libs/collection_drop_recreate.js"; function assertIXScanTightBounds(explain) { const winningPlan = getWinningPlanFromExplain(explain); diff --git a/jstests/core/query/sort/sortj.js b/jstests/core/query/sort/sortj.js index c5953293c54..365bf968a7b 100644 --- a/jstests/core/query/sort/sortj.js +++ b/jstests/core/query/sort/sortj.js @@ -10,7 +10,7 @@ import {FixtureHelpers} from "jstests/libs/fixture_helpers.js"; const collection = db.jstests_sortj; collection.drop(); -assert.commandWorked(collection.createIndex({a: 1})) +assert.commandWorked(collection.createIndex({a: 1})); const numShards = FixtureHelpers.numberOfShardsForCollection(collection); diff --git a/jstests/core/rename_system_buckets_collections.js b/jstests/core/rename_system_buckets_collections.js index 1b9a9353750..810aae5ab46 100644 --- a/jstests/core/rename_system_buckets_collections.js +++ b/jstests/core/rename_system_buckets_collections.js @@ -28,7 +28,7 @@ const timeseriesOpts = { // TODO SERVER-89999: remove once the feature flag version becomes last LTS const simpleBucketCollectionsDisallowed = - FeatureFlagUtil.isEnabled(db, "DisallowBucketCollectionWithoutTimeseriesOptions") + FeatureFlagUtil.isEnabled(db, "DisallowBucketCollectionWithoutTimeseriesOptions"); function setupEnv() { db.dropDatabase(); @@ -123,7 +123,7 @@ function runTests(targetDbName) { } } -jsTest.log("Run test cases with rename within same database") +jsTest.log("Run test cases with rename within same database"); runTests(dbName); -jsTest.log("Run test cases with rename across different databases") +jsTest.log("Run test cases with rename across different databases"); runTests(otherDbName); diff --git a/jstests/core/timeseries/bucket_unpacking_with_compound_sort_on_point_queries.js b/jstests/core/timeseries/bucket_unpacking_with_compound_sort_on_point_queries.js index 75e7c1e43d3..bfa3d8d81ef 100644 --- a/jstests/core/timeseries/bucket_unpacking_with_compound_sort_on_point_queries.js +++ b/jstests/core/timeseries/bucket_unpacking_with_compound_sort_on_point_queries.js @@ -23,7 +23,7 @@ import { runDoesntRewriteTest, runRewritesTest, setupColl -} from "jstests/core/timeseries/libs/timeseries_sort_util.js" +} from "jstests/core/timeseries/libs/timeseries_sort_util.js"; const metaCollSubFieldsName = "bucket_unpacking_with_compound_sort_with_meta_sub_on_point_queries"; const metaCollSubFields = db[metaCollSubFieldsName]; diff --git a/jstests/core/timeseries/bucket_unpacking_with_sort.js b/jstests/core/timeseries/bucket_unpacking_with_sort.js index ab0f91962e0..932e4648837 100644 --- a/jstests/core/timeseries/bucket_unpacking_with_sort.js +++ b/jstests/core/timeseries/bucket_unpacking_with_sort.js @@ -24,7 +24,7 @@ import { forwardIxscan, runRewritesTest, setupColl -} from "jstests/core/timeseries/libs/timeseries_sort_util.js" +} from "jstests/core/timeseries/libs/timeseries_sort_util.js"; const collName = "bucket_unpacking_with_sort"; const coll = db[collName]; diff --git a/jstests/core/timeseries/bucket_unpacking_with_sort_negative.js b/jstests/core/timeseries/bucket_unpacking_with_sort_negative.js index bd627bac1d6..91aa47383c6 100644 --- a/jstests/core/timeseries/bucket_unpacking_with_sort_negative.js +++ b/jstests/core/timeseries/bucket_unpacking_with_sort_negative.js @@ -22,7 +22,7 @@ import { runDoesntRewriteTest, runRewritesTest, setupColl -} from "jstests/core/timeseries/libs/timeseries_sort_util.js" +} from "jstests/core/timeseries/libs/timeseries_sort_util.js"; const collName = "bucket_unpacking_with_sort_negative"; const coll = db[collName]; diff --git a/jstests/core/timeseries/bucket_unpacking_with_sort_on_multiple_fields_point_queries.js b/jstests/core/timeseries/bucket_unpacking_with_sort_on_multiple_fields_point_queries.js index abd5a227b13..4bbcf81c27c 100644 --- a/jstests/core/timeseries/bucket_unpacking_with_sort_on_multiple_fields_point_queries.js +++ b/jstests/core/timeseries/bucket_unpacking_with_sort_on_multiple_fields_point_queries.js @@ -23,7 +23,7 @@ import { forwardIxscan, runRewritesTest, setupColl -} from "jstests/core/timeseries/libs/timeseries_sort_util.js" +} from "jstests/core/timeseries/libs/timeseries_sort_util.js"; const metaCollSubFieldsName = "bucket_unpacking_with_sort_with_meta_sub_on_multiple_fields_point_queries"; diff --git a/jstests/core/timeseries/bucket_unpacking_with_sort_on_single_field_point_queries.js b/jstests/core/timeseries/bucket_unpacking_with_sort_on_single_field_point_queries.js index 2d82c3c7724..8eb1f1e28e4 100644 --- a/jstests/core/timeseries/bucket_unpacking_with_sort_on_single_field_point_queries.js +++ b/jstests/core/timeseries/bucket_unpacking_with_sort_on_single_field_point_queries.js @@ -23,7 +23,7 @@ import { forwardIxscan, runRewritesTest, setupColl -} from "jstests/core/timeseries/libs/timeseries_sort_util.js" +} from "jstests/core/timeseries/libs/timeseries_sort_util.js"; const metaCollName = "bucket_unpacking_with_sort_with_meta_on_single_field_point_queries"; const metaColl = db[metaCollName]; diff --git a/jstests/core/timeseries/bucket_unpacking_with_sort_with_collation.js b/jstests/core/timeseries/bucket_unpacking_with_sort_with_collation.js index c794224863d..d56972a6dce 100644 --- a/jstests/core/timeseries/bucket_unpacking_with_sort_with_collation.js +++ b/jstests/core/timeseries/bucket_unpacking_with_sort_with_collation.js @@ -21,7 +21,7 @@ import { forwardIxscan, runDoesntRewriteTest, runRewritesTest -} from "jstests/core/timeseries/libs/timeseries_sort_util.js" +} from "jstests/core/timeseries/libs/timeseries_sort_util.js"; const ciStringCollName = 'bucket_unpacking_with_sort_ci'; const ciStringColl = db[ciStringCollName]; diff --git a/jstests/core/timeseries/bucket_unpacking_with_sort_with_geo.js b/jstests/core/timeseries/bucket_unpacking_with_sort_with_geo.js index e3d1cb48402..f815b54cdd5 100644 --- a/jstests/core/timeseries/bucket_unpacking_with_sort_with_geo.js +++ b/jstests/core/timeseries/bucket_unpacking_with_sort_with_geo.js @@ -17,7 +17,10 @@ * cannot_run_during_upgrade_downgrade, * ] */ -import {runDoesntRewriteTest, setupColl} from "jstests/core/timeseries/libs/timeseries_sort_util.js" +import { + runDoesntRewriteTest, + setupColl +} from "jstests/core/timeseries/libs/timeseries_sort_util.js"; const geoCollName = 'bucket_unpacking_with_sort_with_geo'; const geoColl = db[geoCollName]; diff --git a/jstests/core/timeseries/timeseries_block_explain.js b/jstests/core/timeseries/timeseries_block_explain.js index d18b909e5bd..143534412aa 100644 --- a/jstests/core/timeseries/timeseries_block_explain.js +++ b/jstests/core/timeseries/timeseries_block_explain.js @@ -12,7 +12,7 @@ * ] */ import {getAggPlanStage} from "jstests/libs/analyze_plan.js"; -import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js" +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; import {getSbePlanStages} from "jstests/libs/sbe_explain_helpers.js"; import {checkSbeFullyEnabled} from "jstests/libs/sbe_util.js"; diff --git a/jstests/core/timeseries/timeseries_collmod.js b/jstests/core/timeseries/timeseries_collmod.js index e91e643253b..c4ee06954ac 100644 --- a/jstests/core/timeseries/timeseries_collmod.js +++ b/jstests/core/timeseries/timeseries_collmod.js @@ -167,7 +167,7 @@ assert.commandWorked(db.runCommand({ })); // Setting prepareUnique should return an error on a time-series collection index. -assert.commandWorked(coll.createIndex({"prepareUniqueIndex": 1})) +assert.commandWorked(coll.createIndex({"prepareUniqueIndex": 1})); assert.commandFailedWithCode(db.runCommand({ "collMod": collName, "index": {"keyPattern": {"prepareUniqueIndex": 1}, "prepareUnique": true} diff --git a/jstests/core/timeseries/timeseries_computed_field.js b/jstests/core/timeseries/timeseries_computed_field.js index 9b8e27418e5..c653f931371 100644 --- a/jstests/core/timeseries/timeseries_computed_field.js +++ b/jstests/core/timeseries/timeseries_computed_field.js @@ -61,7 +61,7 @@ TimeseriesTest.run((insert) => { [timeFieldName]: new Date(datePrefix + 300), [metaFieldName]: "gpu", length: -2, - }) + }); // Computing a field on a dotted path which is an array, then grouping on it. Note that the // semantics for setting a computed field on a dotted array path are particularly strange, but diff --git a/jstests/core/timeseries/timeseries_delete_compressed_buckets.js b/jstests/core/timeseries/timeseries_delete_compressed_buckets.js index 969f4ad9ed8..996db3e3dac 100644 --- a/jstests/core/timeseries/timeseries_delete_compressed_buckets.js +++ b/jstests/core/timeseries/timeseries_delete_compressed_buckets.js @@ -36,9 +36,10 @@ function assertBucketsAreCompressed(db, bucketsColl) { } const bucketDocs = bucketsColl.find().toArray(); - bucketDocs.forEach( - bucketDoc => {assert(TimeseriesTest.isBucketCompressed(bucketDoc.control.version), - `Expected bucket to be compressed: ${tojson(bucketDoc)}`)}); + bucketDocs.forEach(bucketDoc => { + assert(TimeseriesTest.isBucketCompressed(bucketDoc.control.version), + `Expected bucket to be compressed: ${tojson(bucketDoc)}`); + }); } function prepareCompressedBucket() { diff --git a/jstests/core/timeseries/timeseries_geonear_lookup.js b/jstests/core/timeseries/timeseries_geonear_lookup.js index 1e4fc692ae1..53cd8b180fc 100644 --- a/jstests/core/timeseries/timeseries_geonear_lookup.js +++ b/jstests/core/timeseries/timeseries_geonear_lookup.js @@ -24,7 +24,7 @@ assert.commandWorked(testDB.createCollection( assert.commandWorked(tsColl.createIndex({'tags.loc': '2dsphere'})); -tsColl.insert({time: ISODate(), tags: {loc: [40, 40], descr: 0}, value: 0}) +tsColl.insert({time: ISODate(), tags: {loc: [40, 40], descr: 0}, value: 0}); const coll2 = db.getCollection("store_min_max_values"); coll2.drop(); diff --git a/jstests/core/timeseries/timeseries_geonear_mindistance_and_maxdistance.js b/jstests/core/timeseries/timeseries_geonear_mindistance_and_maxdistance.js index baf42d80fe1..e2451509eb1 100644 --- a/jstests/core/timeseries/timeseries_geonear_mindistance_and_maxdistance.js +++ b/jstests/core/timeseries/timeseries_geonear_mindistance_and_maxdistance.js @@ -14,7 +14,7 @@ const timeFieldName = "time"; const metaFieldName = "tags"; const tsColl = db.getCollection("ts_coll"); -const normColl = db.getCollection("normal_coll") +const normColl = db.getCollection("normal_coll"); function setUpCollection(coll, options) { coll.drop(); diff --git a/jstests/core/timeseries/timeseries_group.js b/jstests/core/timeseries/timeseries_group.js index 230a0566405..2659098ec7e 100644 --- a/jstests/core/timeseries/timeseries_group.js +++ b/jstests/core/timeseries/timeseries_group.js @@ -23,7 +23,7 @@ import { blockProcessingTestCases, generateMetaVals } from "jstests/libs/block_processing_test_cases.js"; -import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js" +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; import { checkSbeFullFeatureFlagEnabled, checkSbeStatus, diff --git a/jstests/core/timeseries/timeseries_homogeneous_top_bottom.js b/jstests/core/timeseries/timeseries_homogeneous_top_bottom.js index ed8a0c82b1b..732f311ea5c 100644 --- a/jstests/core/timeseries/timeseries_homogeneous_top_bottom.js +++ b/jstests/core/timeseries/timeseries_homogeneous_top_bottom.js @@ -124,7 +124,7 @@ TimeseriesTest.run((insert) => { acc: {[accumulator]: {n: nVal, sortBy: sortBy, output: "$_id"}} } } - ]) + ]); } } } diff --git a/jstests/core/timeseries/timeseries_lastpoint.js b/jstests/core/timeseries/timeseries_lastpoint.js index e31c7a0590e..06c41b84dd4 100644 --- a/jstests/core/timeseries/timeseries_lastpoint.js +++ b/jstests/core/timeseries/timeseries_lastpoint.js @@ -78,7 +78,7 @@ let lpx2 = undefined; // lastpoint value of x for m = 2 coll.insert({t: timestamps.t3, m: 1, x: 3}); // add to bucket #1 // An event with a different meta goes into a separate bucket. - coll.insert({t: timestamps.t6, m: 2, x: 6}) + coll.insert({t: timestamps.t6, m: 2, x: 6}); lpx2 = 6; // If this assert fails it would mean that bucket creation logic have changed. The lastpoint @@ -404,7 +404,7 @@ const casesLastpointWithDistinctScan = [ // The lastpoint opt currently isn't lowered to SBE. assert(false, `Lastpoint opt isn't implemented in SBE for pipeline ${ - tojson(pipeline)} but got ${tojson(explainFull)}`) + tojson(pipeline)} but got ${tojson(explainFull)}`); } // Check that the result matches the expected by the test case. @@ -444,7 +444,7 @@ const casesLastpointWithDistinctScan = [ // The distinct scan opt currently isn't lowered to SBE. assert(false, `Lastpoint opt isn't implemented in SBE for pipeline ${ - tojson(pipeline)} but got ${tojson(explainFull)}`) + tojson(pipeline)} but got ${tojson(explainFull)}`); } // Check that the result matches the expected by the test case. diff --git a/jstests/core/timeseries/timeseries_lastpoint_common_sort_key.js b/jstests/core/timeseries/timeseries_lastpoint_common_sort_key.js index ad3cd46e0f2..a6f3d68972a 100644 --- a/jstests/core/timeseries/timeseries_lastpoint_common_sort_key.js +++ b/jstests/core/timeseries/timeseries_lastpoint_common_sort_key.js @@ -67,7 +67,7 @@ let lpa2 = undefined; // lastpoint value of a for m = 1 coll.insert({t: timestamps.t3, m: 1, x: 3, a: 13}); // add to bucket #1 // An event with a different meta goes into a separate bucket. - coll.insert({t: timestamps.t6, m: 2, x: 6, a: 16}) + coll.insert({t: timestamps.t6, m: 2, x: 6, a: 16}); lpx2 = 6; lpa2 = 16; @@ -131,7 +131,7 @@ const casesLastpointOptimization = [ // The lastpoint opt currently isn't lowered to SBE. assert(false, `Lastpoint opt isn't implemented in SBE for pipeline ${ - tojson(pipeline)} but got ${tojson(explainFull)}`) + tojson(pipeline)} but got ${tojson(explainFull)}`); } // Check that the result matches the expected by the test case. diff --git a/jstests/core/timeseries/timeseries_lookup.js b/jstests/core/timeseries/timeseries_lookup.js index a18fef9736f..2adc141d57c 100644 --- a/jstests/core/timeseries/timeseries_lookup.js +++ b/jstests/core/timeseries/timeseries_lookup.js @@ -282,7 +282,7 @@ TimeseriesTest.run((insert) => { ], as: "joined" }}; - const result = testDB.local.aggregate(lookupStage) + const result = testDB.local.aggregate(lookupStage); assertArrayEq({ actual: result.toArray(), expected: [ diff --git a/jstests/core/timeseries/timeseries_match.js b/jstests/core/timeseries/timeseries_match.js index 7c20aac38e9..b005245e7d4 100644 --- a/jstests/core/timeseries/timeseries_match.js +++ b/jstests/core/timeseries/timeseries_match.js @@ -17,7 +17,7 @@ import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; import {getEngine, getQueryPlanner, getSingleNodeExplain} from "jstests/libs/analyze_plan.js"; -import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js" +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; import {checkSbeFullyEnabled} from "jstests/libs/sbe_util.js"; TimeseriesTest.run((insert) => { @@ -389,7 +389,7 @@ TimeseriesTest.run((insert) => { // Check results. { - const results = coll.aggregate(pipe).toArray().map((x) => x._id) + const results = coll.aggregate(pipe).toArray().map((x) => x._id); results.sort(); assert.eq(testCase.ids, results, () => "Test case " + tojson(testCase)); } @@ -427,7 +427,7 @@ TimeseriesTest.run((insert) => { {$match: {"measurement": "cpu"}}, {$project: {_id: 1}} ]; - const res = coll.aggregate(pipe).toArray() + const res = coll.aggregate(pipe).toArray(); assert.eq(res.length, coll.count(), res); } @@ -436,7 +436,7 @@ TimeseriesTest.run((insert) => { assert.commandWorked(db.createCollection(coll.getName(), { timeseries: {timeField: timeFieldName, metaField: metaFieldName}, })); - assert.contains(bucketsColl.getName(), db.getCollectionNames()) + assert.contains(bucketsColl.getName(), db.getCollectionNames()); insert( coll, diff --git a/jstests/core/timeseries/timeseries_predicates.js b/jstests/core/timeseries/timeseries_predicates.js index e5948c0b738..6916dad5c4b 100644 --- a/jstests/core/timeseries/timeseries_predicates.js +++ b/jstests/core/timeseries/timeseries_predicates.js @@ -11,11 +11,11 @@ * ] */ -import {assertDropAndRecreateCollection} from "jstests/libs/collection_drop_recreate.js" +import {assertDropAndRecreateCollection} from "jstests/libs/collection_drop_recreate.js"; const coll = assertDropAndRecreateCollection(db, "timeseries_predicates_normal"); const tsColl = assertDropAndRecreateCollection( - db, "timeseries_predicates_timeseries", {timeseries: {timeField: 'time', metaField: 'mt'}}) + db, "timeseries_predicates_timeseries", {timeseries: {timeField: 'time', metaField: 'mt'}}); const bucketsColl = db.getCollection('system.buckets.' + tsColl.getName()); // Tests that we produce the same results for a given 'predicate', with and without timeseries, and diff --git a/jstests/core/timeseries/timeseries_project_pushdown.js b/jstests/core/timeseries/timeseries_project_pushdown.js index eecadf275b1..1177f42826a 100644 --- a/jstests/core/timeseries/timeseries_project_pushdown.js +++ b/jstests/core/timeseries/timeseries_project_pushdown.js @@ -33,7 +33,7 @@ function runTest({docs, pipeline, expectedResults}) { docs: [{_id: 1, [timeField]: new Date(), [metaField]: 2}], pipeline: [{$project: {new: {$getField: metaField}, _id: 0}}], expectedResults: [{new: 2}] - }) + }); })(); // $getField does not traverse objects, and should not be rewritten when it relies on a mix of @@ -46,7 +46,7 @@ function runTest({docs, pipeline, expectedResults}) { ], pipeline: [{$project: {new: {$add: [`$${metaField}`, {$getField: "a.b"}]}}}], expectedResults: [{_id: 1, new: 5}, {_id: 2, new: null}] - }) + }); })(); // @@ -61,7 +61,7 @@ function runTest({docs, pipeline, expectedResults}) { ], pipeline: [{$project: {new: {$add: [`$${metaField}`, {$getField: {$literal: "a.$b"}}]}}}], expectedResults: [{_id: 1, new: 5}, {_id: 2, new: null}] - }) + }); }()); // There is a difference between the metaField "meta1", and "$meta1". Field paths are allowed to @@ -75,7 +75,7 @@ function runTest({docs, pipeline, expectedResults}) { ], pipeline: [{$project: {new: {$add: [`$${metaField}`, {$getField: {$literal: "$meta1"}}]}}}], expectedResults: [{_id: 1, new: null}, {_id: 2, new: 5}] - }) + }); })(); // @@ -93,7 +93,7 @@ function runTest({docs, pipeline, expectedResults}) { } }], expectedResults: [{_id: 1, new: 4}, {_id: 2, new: 8}] - }) + }); })(); // When we rely on both the metaField and a measurementField we should not perform the rewrite and @@ -109,7 +109,7 @@ function runTest({docs, pipeline, expectedResults}) { {new: {$add: [`$${metaField}`, {$getField: {$cond: [false, null, "a.b.c"]}}]}} }], expectedResults: [{_id: 1, new: null}, {_id: 2, new: 5}] - }) + }); })(); // @@ -124,7 +124,7 @@ function runTest({docs, pipeline, expectedResults}) { ], pipeline: [{$project: {new: {$getField: {input: `$${metaField}`, field: "b"}}}}], expectedResults: [{_id: 1, new: 4}, {_id: 2}] - }) + }); })(); // Validate the correct results are returned when there is a field with '$' inside the metaField. @@ -137,7 +137,7 @@ function runTest({docs, pipeline, expectedResults}) { ], pipeline: [{$project: {new: {$getField: {input: `$${metaField}`, field: "a.$b"}}}}], expectedResults: [{_id: 1, new: 4}, {_id: 2}] - }) + }); })(); // When we rely on both the metaField and a measurementField we should not perform the rewrite and @@ -157,7 +157,7 @@ function runTest({docs, pipeline, expectedResults}) { } }], expectedResults: [{_id: 1, new: 6}, {_id: 2, new: null}] - }) + }); })(); // same test as above but with $addFields and not $project. @@ -179,7 +179,7 @@ function runTest({docs, pipeline, expectedResults}) { {[timeField]: time, [metaField]: 2, a: {"$meta1": 4}, _id: 1, new: 6}, {[timeField]: time, [metaField]: 2, a: {c: 5}, _id: 2, new: null} ] - }) + }); })(); // @@ -203,7 +203,7 @@ function runTest({docs, pipeline, expectedResults}) { } }], expectedResults: [{_id: 1, new: 3}, {_id: 2}] - }) + }); })(); // When we rely on both the metaField and a measurementField we should not perform the rewrite and @@ -230,7 +230,7 @@ function runTest({docs, pipeline, expectedResults}) { } }], expectedResults: [{_id: 1, new: 5}, {_id: 2, new: null}] - }) + }); })(); // This test validates that $project with '$$ROOT' which requires the whole document returns the @@ -247,7 +247,7 @@ function runTest({docs, pipeline, expectedResults}) { {_id: 1, new: {_id: 1, [timeField]: time, [metaField]: 2, a: 2}}, {_id: 2, new: {_id: 2, [timeField]: time, [metaField]: 2, b: 3}} ] - }) + }); })(); (function testAddFields_WithROOT() { @@ -274,5 +274,5 @@ function runTest({docs, pipeline, expectedResults}) { new: {_id: 2, [timeField]: time, [metaField]: 2, b: 3} } ] - }) + }); })(); diff --git a/jstests/core/timeseries/timeseries_sbe.js b/jstests/core/timeseries/timeseries_sbe.js index ce8941b6164..041075a8874 100644 --- a/jstests/core/timeseries/timeseries_sbe.js +++ b/jstests/core/timeseries/timeseries_sbe.js @@ -64,7 +64,7 @@ function runTest({pipeline, shouldUseSbe, aggStages}) { runTest({ pipeline: [{$match: {m: 17}}], shouldUseSbe: false, -}) +}); // $project by itself is not lowered except in SBE full. jsTestLog("ian: SBE full " + sbeFullyEnabled); @@ -149,7 +149,7 @@ runTest({ runTest({ pipeline: [{$sort: {t: 1}}, {$project: {t: 1}}], shouldUseSbe: false, -}) +}); // $match -> $addFields -> $group is permitted only in SBE full. runTest({ @@ -159,7 +159,7 @@ runTest({ {$group: {_id: null, s: {$sum: "$x"}}} ], shouldUseSbe: sbeFullyEnabled -}) +}); // A stack of $project stages is permitted only in SBE full. runTest({ @@ -179,7 +179,7 @@ runTest({ {$group: {_id: "$a", n: {$sum: "$b"}}}, ], shouldUseSbe: sbeUnpackPushdownEnabled, -}) +}); // The full rewrite of a group might avoid unpacking. Let's check that these are fully lowered. runTest({ @@ -188,7 +188,7 @@ runTest({ {$group: {_id: "$m", min: {$min: "$a"}}}, ], shouldUseSbe: sbeUnpackPushdownEnabled, -}) +}); // Bucket unpacking should not be lowered when there is an eventFilter with a full match // expression that is not supported in SBE. This entire pipeline should run in classic. diff --git a/jstests/core/timeseries/timeseries_update_compressed_buckets.js b/jstests/core/timeseries/timeseries_update_compressed_buckets.js index 85a41dd6fba..2c4ab2bc217 100644 --- a/jstests/core/timeseries/timeseries_update_compressed_buckets.js +++ b/jstests/core/timeseries/timeseries_update_compressed_buckets.js @@ -39,9 +39,10 @@ function assertBucketsAreCompressed(db, bucketsColl) { } const bucketDocs = bucketsColl.find().toArray(); - bucketDocs.forEach( - bucketDoc => {assert(TimeseriesTest.isBucketCompressed(bucketDoc.control.version), - `Expected bucket to be compressed: ${tojson(bucketDoc)}`)}); + bucketDocs.forEach(bucketDoc => { + assert(TimeseriesTest.isBucketCompressed(bucketDoc.control.version), + `Expected bucket to be compressed: ${tojson(bucketDoc)}`); + }); } function prepareCompressedBucket() { diff --git a/jstests/core/timeseries/timeseries_user_system_buckets.js b/jstests/core/timeseries/timeseries_user_system_buckets.js index 89142f0ad46..db9d6e5e384 100644 --- a/jstests/core/timeseries/timeseries_user_system_buckets.js +++ b/jstests/core/timeseries/timeseries_user_system_buckets.js @@ -26,8 +26,8 @@ const tsOptions2 = { timeField: "timestamp", metaField: "metadata2" }; -const kColl = "coll" -const kBucket = "system.buckets.coll" +const kColl = "coll"; +const kBucket = "system.buckets.coll"; function createWorked(collName, tsOptions = {}) { if (Object.keys(tsOptions).length === 0) { @@ -70,12 +70,12 @@ function runTest(testCase, minRequiredVersion = null) { return; } } - testCase() + testCase(); db.dropDatabase(); } // Reset any previous run state. -db.dropDatabase() +db.dropDatabase(); // Case prexisting collection: standard. { @@ -153,14 +153,13 @@ db.dropDatabase() { jsTest.log("Case collection: bucket timeseries / collection: standard."); runTest(() => { - createWorked(kBucket, tsOptions) + createWorked(kBucket, tsOptions); if (FixtureHelpers.isMongos(db) || TestData.testingReplicaSetEndpoint) { // TODO SERVER-87189 Replace this with commandFailed. Now we always pass from the // coordinator to create a collection which will prevent from using the main namespace // if a bucket nss already exists. createWorkedOrFailedWithCode(kColl, {}, ErrorCodes.NamespaceExists); - } - else { + } else { // TODO SERVER-85855 creating a normal collection with an already created bucket // timeseries should fail. createWorked(kColl); @@ -170,7 +169,7 @@ db.dropDatabase() jsTest.log("Case collection: bucket timeseries / collection: timeseries."); runTest( () => { - createWorked(kBucket, tsOptions) + createWorked(kBucket, tsOptions); createWorked(kColl, tsOptions); }, // Creation of bucket namespace is not idempotent before 8.0 (SERVER-89827) @@ -180,14 +179,14 @@ db.dropDatabase() jsTest.log( "Case collection: bucket timeseries / collection: timeseries with different options."); runTest(() => { - createWorked(kBucket, tsOptions) + createWorked(kBucket, tsOptions); createFailed(kColl, tsOptions2, ErrorCodes.NamespaceExists); }); jsTest.log( "Case collection: bucket timeseries / collection: bucket timeseries with different options."); runTest(() => { - createWorked(kBucket, tsOptions) + createWorked(kBucket, tsOptions); createFailed(kBucket, tsOptions2, ErrorCodes.NamespaceExists); }); diff --git a/jstests/core/write/bulk/bulk_write_profile.js b/jstests/core/write/bulk/bulk_write_profile.js index c31133a960e..6321d968c53 100644 --- a/jstests/core/write/bulk/bulk_write_profile.js +++ b/jstests/core/write/bulk/bulk_write_profile.js @@ -22,8 +22,8 @@ import {getNLatestProfilerEntries} from "jstests/libs/profiler.js"; var testDB = db.getSiblingDB(jsTestName()); assert.commandWorked(testDB.dropDatabase()); -const collName1 = jsTestName() + "_1" -const collName2 = jsTestName() + "_2" +const collName1 = jsTestName() + "_1"; +const collName2 = jsTestName() + "_2"; const coll1 = testDB.getCollection(collName1); const coll2 = testDB.getCollection(collName2); diff --git a/jstests/core/write/bulk/bulk_write_timeseries_basic.js b/jstests/core/write/bulk/bulk_write_timeseries_basic.js index 50dbbbf2aba..a8072223025 100644 --- a/jstests/core/write/bulk/bulk_write_timeseries_basic.js +++ b/jstests/core/write/bulk/bulk_write_timeseries_basic.js @@ -59,7 +59,7 @@ res = db.adminCommand({ }); summaryFieldsValidator( res, {nErrors: 1, nInserted: 0, nDeleted: 0, nMatched: 0, nModified: 0, nUpserted: 0}); -cursorEntryValidator(res.cursor.firstBatch[0], {ok: 0, idx: 0, code: 2, n: 0}) +cursorEntryValidator(res.cursor.firstBatch[0], {ok: 0, idx: 0, code: 2, n: 0}); cursorSizeValidator(res, 1); assert.eq(coll.countDocuments({}), 3); @@ -77,9 +77,9 @@ res = db.adminCommand({ }); summaryFieldsValidator( res, {nErrors: 1, nInserted: 2, nDeleted: 0, nMatched: 0, nModified: 0, nUpserted: 0}); -cursorEntryValidator(res.cursor.firstBatch[0], {ok: 0, idx: 0, code: 2, n: 0}) -cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1}) -cursorEntryValidator(res.cursor.firstBatch[2], {ok: 1, idx: 2, n: 1}) +cursorEntryValidator(res.cursor.firstBatch[0], {ok: 0, idx: 0, code: 2, n: 0}); +cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1}); +cursorEntryValidator(res.cursor.firstBatch[2], {ok: 1, idx: 2, n: 1}); assert.eq(coll.countDocuments({}), 5); // Test unordered inserts to 2 collections - 1 timeseries collection and 1 non-timeseries @@ -99,8 +99,8 @@ res = db.adminCommand({ }); summaryFieldsValidator( res, {nErrors: 2, nInserted: 4, nDeleted: 0, nMatched: 0, nModified: 0, nUpserted: 0}); -cursorEntryValidator(res.cursor.firstBatch[0], {ok: 0, idx: 0, code: 2, n: 0}) -cursorEntryValidator(res.cursor.firstBatch[4], {ok: 0, idx: 4, code: 11000, n: 0}) +cursorEntryValidator(res.cursor.firstBatch[0], {ok: 0, idx: 0, code: 2, n: 0}); +cursorEntryValidator(res.cursor.firstBatch[4], {ok: 0, idx: 4, code: 11000, n: 0}); assert.eq(coll.countDocuments({}), 8); assert.eq(nonTSColl.countDocuments({}), 1); @@ -118,7 +118,7 @@ res = db.adminCommand({ }); summaryFieldsValidator( res, {nErrors: 1, nInserted: 1, nDeleted: 0, nMatched: 0, nModified: 0, nUpserted: 0}); -cursorEntryValidator(res.cursor.firstBatch[1], {ok: 0, idx: 1, code: 2, n: 0}) +cursorEntryValidator(res.cursor.firstBatch[1], {ok: 0, idx: 1, code: 2, n: 0}); cursorSizeValidator(res, 2); assert.eq(coll.countDocuments({}), 8); assert.eq(nonTSColl.countDocuments({}), 2); diff --git a/jstests/core_sharding/shard_collection_basic.js b/jstests/core_sharding/shard_collection_basic.js index e2a6de65054..14a5fe62465 100644 --- a/jstests/core_sharding/shard_collection_basic.js +++ b/jstests/core_sharding/shard_collection_basic.js @@ -4,7 +4,7 @@ import {findChunksUtil} from "jstests/sharding/libs/find_chunks_util.js"; -var kDbName = db.getName() +var kDbName = db.getName(); db.dropDatabase(); diff --git a/jstests/cqf/optimizer/over_approximation.js b/jstests/cqf/optimizer/over_approximation.js index b1146260486..9baf2e85cc6 100644 --- a/jstests/cqf/optimizer/over_approximation.js +++ b/jstests/cqf/optimizer/over_approximation.js @@ -21,7 +21,7 @@ assert.commandWorked(t.insert({ } } } -})) +})); const res = t.aggregate({ @@ -43,4 +43,4 @@ const res = } }).toArray(); -assert.eq([], res, "No documents should match") +assert.eq([], res, "No documents should match"); diff --git a/jstests/cqf/optimizer/parameterization.js b/jstests/cqf/optimizer/parameterization.js index 92f65cbf510..93c017f4a60 100644 --- a/jstests/cqf/optimizer/parameterization.js +++ b/jstests/cqf/optimizer/parameterization.js @@ -103,24 +103,36 @@ runWithParams( // Collection has no indexes except default _id index // Verify that queries are parameterized correctly for M2 Bonsai-eligible FIND queries - cmds.forEach( - cmdEl => {verifyCommandCorrectness(cmdEl[0], cmdEl[1], find, assertPhysicalScan)}); - cmds.forEach(cmdEl => {verifyCommandParameterization(cmdEl[0], find, assertParamerized)}); + cmds.forEach(cmdEl => { + verifyCommandCorrectness(cmdEl[0], cmdEl[1], find, assertPhysicalScan); + }); + cmds.forEach(cmdEl => { + verifyCommandParameterization(cmdEl[0], find, assertParamerized); + }); // Verify that queries are parameterized correctly for M2 Bonsai-eligible AGG queries - cmds.forEach( - cmdEl => {verifyCommandCorrectness(cmdEl[0], cmdEl[1], agg, assertPhysicalScan)}); - cmds.forEach(cmdEl => {verifyCommandParameterization(cmdEl[0], agg, assertParamerized)}); + cmds.forEach(cmdEl => { + verifyCommandCorrectness(cmdEl[0], cmdEl[1], agg, assertPhysicalScan); + }); + cmds.forEach(cmdEl => { + verifyCommandParameterization(cmdEl[0], agg, assertParamerized); + }); assert.commandWorked(coll.createIndex({'a.b': 1})); // Collection has indexes // Verify that queries are not parameterized for M2 Bonsai-ineligible FIND queries - cmds.forEach( - cmdEl => {verifyCommandCorrectness(cmdEl[0], cmdEl[1], find, !assertPhysicalScan)}); - cmds.forEach(cmdEl => {verifyCommandParameterization(cmdEl[0], find, !assertParamerized)}); + cmds.forEach(cmdEl => { + verifyCommandCorrectness(cmdEl[0], cmdEl[1], find, !assertPhysicalScan); + }); + cmds.forEach(cmdEl => { + verifyCommandParameterization(cmdEl[0], find, !assertParamerized); + }); // Verify that queries are not parameterized for M2 Bonsai-ineligible AGG queries - cmds.forEach( - cmdEl => {verifyCommandCorrectness(cmdEl[0], cmdEl[1], agg, !assertPhysicalScan)}); - cmds.forEach(cmdEl => {verifyCommandParameterization(cmdEl[0], agg, !assertParamerized)}); + cmds.forEach(cmdEl => { + verifyCommandCorrectness(cmdEl[0], cmdEl[1], agg, !assertPhysicalScan); + }); + cmds.forEach(cmdEl => { + verifyCommandParameterization(cmdEl[0], agg, !assertParamerized); + }); }); diff --git a/jstests/cqf/optimizer/projection.js b/jstests/cqf/optimizer/projection.js index aeffa8f54f7..53733d1716f 100644 --- a/jstests/cqf/optimizer/projection.js +++ b/jstests/cqf/optimizer/projection.js @@ -230,7 +230,7 @@ function testInputOutputPipeline({input, pipeline, expectedOutput, interestingIn expectedOutput: [{_id: 0, a: 2}, {_id: 1, a: 2}, {_id: 2, a: 2}, {_id: 3, a: 2}, {_id: 4, a: 2}], interestingIndexes: [] - }) + }); testInputOutputPipeline({ input: docs, @@ -243,5 +243,5 @@ function testInputOutputPipeline({input, pipeline, expectedOutput, interestingIn {_id: 4, a: {b: 2}} ], interestingIndexes: [] - }) + }); }()); diff --git a/jstests/cqf_parallel/optimizer/basic_exchange.js b/jstests/cqf_parallel/optimizer/basic_exchange.js index 6401a38ef37..b81d3649239 100644 --- a/jstests/cqf_parallel/optimizer/basic_exchange.js +++ b/jstests/cqf_parallel/optimizer/basic_exchange.js @@ -18,12 +18,11 @@ assert.commandWorked(t.insert({a: {b: 3}})); assert.commandWorked(t.insert({a: {b: 4}})); assert.commandWorked(t.insert({a: {b: 5}})); -const runTest = - () => { - const res = t.explain("executionStats").aggregate([{$match: {'a.b': 2}}]); - assert.eq(1, res.executionStats.nReturned); - assertValueOnPlanPath("Exchange", res, "child.nodeType"); - } +const runTest = () => { + const res = t.explain("executionStats").aggregate([{$match: {'a.b': 2}}]); + assert.eq(1, res.executionStats.nReturned); + assertValueOnPlanPath("Exchange", res, "child.nodeType"); +}; // Test exchange with both Sargable nodes & Filter nodes runWithParams([{key: "internalCascadesOptimizerDisableSargableWhenNoIndexes", value: false}], diff --git a/jstests/disk/wt_repair_corrupt_metadata.js b/jstests/disk/wt_repair_corrupt_metadata.js index 555cac9303b..c73fd51d97b 100644 --- a/jstests/disk/wt_repair_corrupt_metadata.js +++ b/jstests/disk/wt_repair_corrupt_metadata.js @@ -31,7 +31,7 @@ let runTest = function(mongodOptions) { let mongod = startMongodOnExistingPath(dbpath, mongodOptions); const buildInfo = assert.commandWorked(mongod.getDB(baseName).adminCommand({"buildInfo": 1})); - const isSanitizerEnabled = buildInfo.buildEnvironment.ccflags.includes('-fsanitize') + const isSanitizerEnabled = buildInfo.buildEnvironment.ccflags.includes('-fsanitize'); // Force a checkpoint and make a copy of the turtle file. assert.commandWorked(mongod.getDB(baseName).adminCommand({fsync: 1})); diff --git a/jstests/fle2/libs/encrypted_client_util.js b/jstests/fle2/libs/encrypted_client_util.js index 79944ad4e81..7e8765196f4 100644 --- a/jstests/fle2/libs/encrypted_client_util.js +++ b/jstests/fle2/libs/encrypted_client_util.js @@ -22,7 +22,7 @@ export function runWithEncryption(edb, func) { assert(!edb.getMongo().isAutoEncryptionEnabled(), "Cannot switch to encrypted connection on already encrypted connection. Do not " + - "nest calls to runWithEncryption.") + "nest calls to runWithEncryption."); edb.getMongo().toggleAutoEncryption(true); @@ -90,7 +90,9 @@ DB.prototype.eadminCommand = function(cmd, params) { }; DBCollection.prototype.ecount = function(filter) { - return runWithEncryption(this, () => {return this.find(filter).toArray().length}); + return runWithEncryption(this, () => { + return this.find(filter).toArray().length; + }); }; // Note that efind does not exist since find executes diff --git a/jstests/hooks/lag_secondary_application.js b/jstests/hooks/lag_secondary_application.js index eb0fa34acf7..5d36b508946 100644 --- a/jstests/hooks/lag_secondary_application.js +++ b/jstests/hooks/lag_secondary_application.js @@ -9,7 +9,7 @@ const MAX_MS = 1000; /* Pick a random millisecond value between 400 and 1000 for the lag value */ function randomMSFromInterval(minMS, maxMS) { // min and max included - return Math.floor(Math.random() * (maxMS - minMS + 1) + minMS) + return Math.floor(Math.random() * (maxMS - minMS + 1) + minMS); } /* Returns true if the error code indicates the node is currently shutting down. */ diff --git a/jstests/hooks/magic_restore.js b/jstests/hooks/magic_restore.js index 5ab48d5501e..ae20fb01c67 100644 --- a/jstests/hooks/magic_restore.js +++ b/jstests/hooks/magic_restore.js @@ -272,7 +272,7 @@ const topology = DiscoverTopology.findConnectedNodes(db); if (topology.type == Topology.kShardedCluster) { // Perform restore for the config server. - const path = MongoRunner.dataPath + '../magicRestore/configsvr/node0' + const path = MongoRunner.dataPath + '../magicRestore/configsvr/node0'; let configMongo = new Mongo(topology.configsvr.nodes[0]); performMagicRestore(configMongo, path, "configsvr", {"replSet": "config-rs", "configsvr": ''}); diff --git a/jstests/hooks/magic_restore_backup.js b/jstests/hooks/magic_restore_backup.js index f0964222b54..74495643a07 100644 --- a/jstests/hooks/magic_restore_backup.js +++ b/jstests/hooks/magic_restore_backup.js @@ -37,7 +37,7 @@ const topology = DiscoverTopology.findConnectedNodes(db); if (topology.type == Topology.kReplicaSet) { const conn = db.getMongo(); - const dbPathPrefix = MongoRunner.dataPath + '../magicRestore/node0' + const dbPathPrefix = MongoRunner.dataPath + '../magicRestore/node0'; let [cursor, metadata] = takeBackup(conn, dbPathPrefix); writeMetadataInfo(conn, metadata.checkpointTimestamp); cursor.close(); @@ -52,7 +52,7 @@ if (topology.type == Topology.kReplicaSet) { let maxCheckpointTimestamp = Timestamp(); // Take configsvr backup. - const path = MongoRunner.dataPath + '../magicRestore/configsvr/node0' + const path = MongoRunner.dataPath + '../magicRestore/configsvr/node0'; restorePaths.push(path); let nodeMongo = new Mongo(topology.configsvr.nodes[0]); diff --git a/jstests/hooks/run_check_repl_dbhash_background.js b/jstests/hooks/run_check_repl_dbhash_background.js index 3bed18b5378..82914aa0852 100644 --- a/jstests/hooks/run_check_repl_dbhash_background.js +++ b/jstests/hooks/run_check_repl_dbhash_background.js @@ -246,7 +246,7 @@ function checkReplDbhashBackgroundThread(hosts) { jsTestLog(`About to run setSecurity token on ${rst}`); rst.nodes.forEach(node => node._setSecurityToken(token)); - jsTestLog(`Running checkcollection for ${dbName} with token ${token}`) + jsTestLog(`Running checkcollection for ${dbName} with token ${token}`); return checkCollectionHashesForDB(dbName, clusterTime); } finally { rst.nodes.forEach(node => node._setSecurityToken(undefined)); diff --git a/jstests/libs/analyze_plan.js b/jstests/libs/analyze_plan.js index 689e820f1b8..1356aedc171 100644 --- a/jstests/libs/analyze_plan.js +++ b/jstests/libs/analyze_plan.js @@ -33,7 +33,7 @@ export function getQueryPlanner(explain) { assert(explain.hasOwnProperty("stages"), explain); const stage = explain.stages[0]; assert(stage.hasOwnProperty("$cursor"), explain); - const cursorStage = stage.$cursor + const cursorStage = stage.$cursor; assert(cursorStage.hasOwnProperty("queryPlanner"), explain); return cursorStage.queryPlanner; } @@ -112,7 +112,7 @@ export function getWinningPlan(queryPlanner) { } export function getWinningSBEPlan(queryPlanner) { - assert(queryPlanner.winningPlan.hasOwnProperty("slotBasedPlan"), queryPlanner) + assert(queryPlanner.winningPlan.hasOwnProperty("slotBasedPlan"), queryPlanner); return queryPlanner.winningPlan.slotBasedPlan; } @@ -298,7 +298,7 @@ export function getAllPlanStages(root) { * This helper function can be used for any optimizer. */ export function getPlanStage(root, stage) { - assert(stage, "Stage was not defined in getPlanStage.") + assert(stage, "Stage was not defined in getPlanStage."); var planStageList = getPlanStages(root, stage); if (planStageList.length === 0) { @@ -636,7 +636,7 @@ export function getAggPlanStages(root, stage, useQueryPlannerSection = false) { * This helper function can be used for any optimizer. */ export function getAggPlanStage(root, stage, useQueryPlannerSection = false) { - assert(stage, "Stage was not defined in getAggPlanStage.") + assert(stage, "Stage was not defined in getAggPlanStage."); let planStageList = getAggPlanStages(root, stage, useQueryPlannerSection); if (planStageList.length === 0) { @@ -667,7 +667,7 @@ export function aggPlanHasStage(root, stage) { * returns true if the plan has a stage called 'stage'. */ export function planHasStage(db, root, stage) { - assert(stage, "Stage was not defined in planHasStage.") + assert(stage, "Stage was not defined in planHasStage."); return getPlanStages(root, stage).length > 0; } @@ -930,7 +930,7 @@ export function assertCoveredQueryAndCount({collection, query, project, count}) "Winning plan was not covered: " + tojson(explain.queryPlanner.winningPlan)); break; default: - break + break; } // Same query as a count command should also be covered. @@ -944,7 +944,7 @@ export function assertCoveredQueryAndCount({collection, query, project, count}) assertExplainCount({explainResults: explain, expectedCount: count}); break; default: - break + break; } } diff --git a/jstests/libs/ftdc.js b/jstests/libs/ftdc.js index 66247ae2cb4..0d0b05b4076 100644 --- a/jstests/libs/ftdc.js +++ b/jstests/libs/ftdc.js @@ -52,7 +52,7 @@ export function verifyGetDiagnosticData(adminDb, logData = true, assumeMultiserv TestData.testingReplicaSetEndpoint) { const hasKnownData = (data.hasOwnProperty("shard") && data.shard.hasOwnProperty("serverStatus")) || - (data.hasOwnProperty("router") && data.router.hasOwnProperty("connPoolStats")) + (data.hasOwnProperty("router") && data.router.hasOwnProperty("connPoolStats")); assert(hasKnownData, "does not have 'shard.serverStatus' nor 'router.connPoolStats' in '" + tojson(data) + "'"); diff --git a/jstests/libs/hang_test_to_attach_gdb.js b/jstests/libs/hang_test_to_attach_gdb.js index 8fd47525c54..5560696e697 100644 --- a/jstests/libs/hang_test_to_attach_gdb.js +++ b/jstests/libs/hang_test_to_attach_gdb.js @@ -12,7 +12,7 @@ function isTimeToWake(waitFor) { return false; } } else { - jsTestLog("Was not given a termination condition. Will keep looping forever.") + jsTestLog("Was not given a termination condition. Will keep looping forever."); } return false; } @@ -47,7 +47,7 @@ export function hangTestToAttachGDB(st, opts) { jsTestLog("Here are the ports to connect to"); logClusterPorts(st); } - jsTestLog("Test is sleeping waiting for you to connect") + jsTestLog("Test is sleeping waiting for you to connect"); if (opts.waitFor && isTimeToWake(opts.waitFor)) { jsTestLog("Breaking sleep loop"); break; diff --git a/jstests/libs/optimizer_utils.js b/jstests/libs/optimizer_utils.js index 2983d5afae6..7735867bc62 100644 --- a/jstests/libs/optimizer_utils.js +++ b/jstests/libs/optimizer_utils.js @@ -53,7 +53,7 @@ export function usedBonsaiOptimizer(explain) { return false; } } - return true + return true; } // This section handles the explain output for unsharded queries. diff --git a/jstests/libs/override_methods/implicit_timeseries_collections.js b/jstests/libs/override_methods/implicit_timeseries_collections.js index ff23dc00f74..5f05badc55d 100644 --- a/jstests/libs/override_methods/implicit_timeseries_collections.js +++ b/jstests/libs/override_methods/implicit_timeseries_collections.js @@ -15,7 +15,7 @@ const originalAssertEq = assert.eq; // The name of the implicitly added timestamp field. const timeFieldName = "overrideTimeFieldName"; -const metaFieldName = "metaFieldName" +const metaFieldName = "metaFieldName"; const denylistedNamespaces = [ /^admin\./, @@ -188,7 +188,7 @@ function cleanUpResultCursor(result, batchName) { } result["cursor"][batchName].forEach(doc => { delete doc[timeFieldName]; - }) + }); } /** diff --git a/jstests/libs/override_methods/make_index_filters_into_query_settings.js b/jstests/libs/override_methods/make_index_filters_into_query_settings.js index 9a2e3b1941d..9a5855cb7ab 100644 --- a/jstests/libs/override_methods/make_index_filters_into_query_settings.js +++ b/jstests/libs/override_methods/make_index_filters_into_query_settings.js @@ -28,7 +28,7 @@ function populateIndexFilterSetIfQuerySettingsArePresent(response) { function processAggregateResponse(cmdObj, response) { if (cmdObj.pipeline.some(stage => stage.hasOwnProperty("$planCacheStats"))) { for (let cacheEntry of response.cursor.firstBatch) { - cacheEntry.indexFilterSet = cacheEntry.hasOwnProperty('querySettings') + cacheEntry.indexFilterSet = cacheEntry.hasOwnProperty('querySettings'); } } @@ -138,7 +138,7 @@ function runCommandOverride(conn, dbName, cmdName, cmdObj, clientFunction, makeF // Remove all query settings associated with that collection upon collection drop. This // is the semantics of index filters. planCacheClearFiltersToRemoveAllQuerySettings(conn, - {planCacheClearFilters: cmdObj.drop}) + {planCacheClearFilters: cmdObj.drop}); // Drop the collection. return clientFunction.apply(conn, makeFuncArgs(cmdObj)); diff --git a/jstests/libs/override_methods/noop_assertions.js b/jstests/libs/override_methods/noop_assertions.js index fbc6c3cf4a6..c14c7ce53b8 100644 --- a/jstests/libs/override_methods/noop_assertions.js +++ b/jstests/libs/override_methods/noop_assertions.js @@ -1,10 +1,9 @@ -assert.soon = - function(func) { +assert.soon = function(func) { if (typeof (func) == "string") { eval(func); } else { func(); } -} +}; -doassert = function() { /* noop */ } +doassert = function() { /* noop */ }; diff --git a/jstests/libs/override_methods/send_command_to_initial_sync_node_lib.js b/jstests/libs/override_methods/send_command_to_initial_sync_node_lib.js index 6e4d384b828..15d20190a0b 100644 --- a/jstests/libs/override_methods/send_command_to_initial_sync_node_lib.js +++ b/jstests/libs/override_methods/send_command_to_initial_sync_node_lib.js @@ -33,7 +33,7 @@ export function shouldSkipCommand(_commandName, commandObj) { // Ignore fsync to avoid locking the initial sync node without unlocking. "fsync": true, "fsyncUnlock": true, - } + }; if (_commandName in skippedCommands) { return true; diff --git a/jstests/libs/override_methods/send_command_to_initial_sync_node_sharded_cluster.js b/jstests/libs/override_methods/send_command_to_initial_sync_node_sharded_cluster.js index 1f48439b786..46454a80c47 100644 --- a/jstests/libs/override_methods/send_command_to_initial_sync_node_sharded_cluster.js +++ b/jstests/libs/override_methods/send_command_to_initial_sync_node_sharded_cluster.js @@ -55,7 +55,7 @@ function maybeSendCommandToInitialSyncNodesShardedCluster( const shardMap = conn.adminCommand({getShardMap: 1}); if (!shardMap.ok) { jsTestLog("Unable to run getShardMap: " + tojson(shardMap) + - ", skipping forwarding command " + _commandName + " to initial sync node") + ", skipping forwarding command " + _commandName + " to initial sync node"); return func.apply(conn, makeFuncArgs(commandObj)); } diff --git a/jstests/libs/override_methods/set_recordids_replicated.js b/jstests/libs/override_methods/set_recordids_replicated.js index 66a74ecff4c..365dbe2ba68 100644 --- a/jstests/libs/override_methods/set_recordids_replicated.js +++ b/jstests/libs/override_methods/set_recordids_replicated.js @@ -25,7 +25,7 @@ function runCommandWithRecordIdsReplicated( const collName = commandObj[commandName]; const ns = dbName + "." + collName; if (commandName === "drop") { - createdCollections.delete(ns) + createdCollections.delete(ns); return func.apply(conn, makeFuncArgs(commandObj)); } if (!commandsToOverride.has(commandName) || createdCollections.has(ns) || diff --git a/jstests/libs/override_methods/validate_collections_on_shutdown.js b/jstests/libs/override_methods/validate_collections_on_shutdown.js index ba3c705d264..bda34afe06d 100644 --- a/jstests/libs/override_methods/validate_collections_on_shutdown.js +++ b/jstests/libs/override_methods/validate_collections_on_shutdown.js @@ -114,9 +114,7 @@ MongoRunner.validateCollectionsCallback = function(port, options) { } assert.commandWorked(res); dbs = res.databases.map(dbInfo => { - return { - name: dbInfo.name, tenant: dbInfo.tenantId - } + return {name: dbInfo.name, tenant: dbInfo.tenantId}; }); }) .execute(); diff --git a/jstests/libs/query_stats_utils.js b/jstests/libs/query_stats_utils.js index b48899a0f63..e75876bc528 100644 --- a/jstests/libs/query_stats_utils.js +++ b/jstests/libs/query_stats_utils.js @@ -347,7 +347,7 @@ function hasValueAtPath(object, dottedPath) { let nestedFields = dottedPath.split("."); for (const nestedField of nestedFields) { if (!object.hasOwnProperty(nestedField)) { - return false + return false; } object = object[nestedField]; } @@ -363,7 +363,7 @@ export function getValueAtPath(object, dottedPath) { let nestedFields = dottedPath.split("."); for (const nestedField of nestedFields) { if (!object.hasOwnProperty(nestedField)) { - return false + return false; } object = object[nestedField]; } diff --git a/jstests/multiVersion/libs/dbcheck_old_format_keys_test.js b/jstests/multiVersion/libs/dbcheck_old_format_keys_test.js index a8ef267e4cb..698827f4056 100644 --- a/jstests/multiVersion/libs/dbcheck_old_format_keys_test.js +++ b/jstests/multiVersion/libs/dbcheck_old_format_keys_test.js @@ -30,7 +30,7 @@ const upgradeVersions = { "8.0": {"fcv": "8.1", "nextVersion": "latest"}, // TODO (SERVER-66611): Automate modifying this list. "latest": {} -} +}; export class DbCheckOldFormatKeysTest { constructor({ diff --git a/jstests/multiVersion/libs/multi_cluster.js b/jstests/multiVersion/libs/multi_cluster.js index ad052b9375e..0afe22b4b14 100644 --- a/jstests/multiVersion/libs/multi_cluster.js +++ b/jstests/multiVersion/libs/multi_cluster.js @@ -117,7 +117,7 @@ ShardingTest.prototype._restartBinariesForUpgrade = function( if (options.upgradeOneShard) { // Upgrade one shard. - let rs = options.upgradeOneShard + let rs = options.upgradeOneShard; rs.upgradeSet(upgradeOptions); } @@ -271,7 +271,7 @@ ShardingTest.prototype._restartBinariesForDowngrade = function( if (options.downgradeOneShard) { // Downgrade one shard. - let rs = options.downgradeOneShard + let rs = options.downgradeOneShard; rs.upgradeSet(downgradeOptions); } diff --git a/jstests/multiVersion/targetedTestsLastLtsFeatures/mixed_cluster_roles.js b/jstests/multiVersion/targetedTestsLastLtsFeatures/mixed_cluster_roles.js index 3277cd00b53..1a06e129e97 100644 --- a/jstests/multiVersion/targetedTestsLastLtsFeatures/mixed_cluster_roles.js +++ b/jstests/multiVersion/targetedTestsLastLtsFeatures/mixed_cluster_roles.js @@ -104,7 +104,7 @@ function ensureShardingCommandsFail(conn) { testCRUD(node0); ensureShardingCommandsFail(node0); - jsTestLog('Test with shard server primary before the shard identity document is inserted.') + jsTestLog('Test with shard server primary before the shard identity document is inserted.'); MongoRunner.stopMongod(node0, null, {noCleanData: true}); node0 = MongoRunner.runMongod({ noCleanData: true, diff --git a/jstests/multiVersion/targetedTestsLastLtsFeatures/upgrade_during_vectored_insert.js b/jstests/multiVersion/targetedTestsLastLtsFeatures/upgrade_during_vectored_insert.js index f1fd9cfd316..c87db8f4adb 100644 --- a/jstests/multiVersion/targetedTestsLastLtsFeatures/upgrade_during_vectored_insert.js +++ b/jstests/multiVersion/targetedTestsLastLtsFeatures/upgrade_during_vectored_insert.js @@ -48,7 +48,7 @@ let insertThread = new Thread(function(host, testCollFullName) { TestData.disableImplicitSessions = true; assert(jsTest.options().disableImplicitSessions); let conn = new Mongo(host); - let testColl = conn.getCollection(testCollFullName) + let testColl = conn.getCollection(testCollFullName); // Run a batch that will fail on the last document due to a DuplicateKeyError. jsTestLog("Inserting data"); let res = testColl.insert( diff --git a/jstests/noPassthrough/batched_multi_deletes_cursor_cache_disabled.js b/jstests/noPassthrough/batched_multi_deletes_cursor_cache_disabled.js index 5cc242c6ded..bc76800cebc 100644 --- a/jstests/noPassthrough/batched_multi_deletes_cursor_cache_disabled.js +++ b/jstests/noPassthrough/batched_multi_deletes_cursor_cache_disabled.js @@ -15,7 +15,7 @@ var st = const primary = st.s0; const buildInfo = assert.commandWorked(st.s0.adminCommand({"buildInfo": 1})); -const isSanitizerEnabled = buildInfo.buildEnvironment.ccflags.includes('-fsanitize') +const isSanitizerEnabled = buildInfo.buildEnvironment.ccflags.includes('-fsanitize'); if (!isSanitizerEnabled) { jsTestLog("Skipping " + jsTestName() + " because address sanitizer is not active."); diff --git a/jstests/noPassthrough/bulk_write_metrics.js b/jstests/noPassthrough/bulk_write_metrics.js index aafc1dc0cba..77512cd36bd 100644 --- a/jstests/noPassthrough/bulk_write_metrics.js +++ b/jstests/noPassthrough/bulk_write_metrics.js @@ -14,7 +14,7 @@ function runTest(isMongos, cluster, bulkWrite, retryCount, timeseries) { const dbName = "testDB"; const collName1 = "testColl1"; - const collName2 = "testColl2" + const collName2 = "testColl2"; const namespace1 = `${dbName}.${collName1}`; const namespace2 = `${dbName}.${collName2}`; const session = isMongos ? cluster.s.startSession() : cluster.getPrimary().startSession(); diff --git a/jstests/noPassthrough/cqf_explain.js b/jstests/noPassthrough/cqf_explain.js index 57dcf0ce8ea..5920b6e974d 100644 --- a/jstests/noPassthrough/cqf_explain.js +++ b/jstests/noPassthrough/cqf_explain.js @@ -10,7 +10,7 @@ import { getShardQueryPlans, getWinningPlanFromExplain, runOnAllTopLevelExplains -} from "jstests/libs/analyze_plan.js" +} from "jstests/libs/analyze_plan.js"; import {DiscoverTopology} from "jstests/libs/discover_topology.js"; import { leftmostLeafStage, @@ -476,18 +476,19 @@ function runTest(db, coll, isSharded) { // Test that the parsedQuery field is empty when the query is empty. explain = coll.find().explain(); analyzeTopLevelExplain( - explain, false /* expectedMaxPSRCountReached */, {"filter": {}} /* expectedParsedQuery */) + explain, false /* expectedMaxPSRCountReached */, {"filter": {}} /* expectedParsedQuery */); explain = coll.explain().aggregate(); - analyzeTopLevelExplain( - explain, false /* expectedMaxPSRCountReached */, {"pipeline": []} /* expectedParsedQuery */) + analyzeTopLevelExplain(explain, + false /* expectedMaxPSRCountReached */, + {"pipeline": []} /* expectedParsedQuery */); // Test that the parsedQuery field is correct for a more complex query. explain = coll.find({$or: [{a: 1}, {a: {$lt: 1}}]}, {a: 1}).explain(); analyzeTopLevelExplain(explain, false /* expectedMaxPSRCountReached */, { "filter": {"$or": [{"a": {"$eq": 1}}, {"a": {"$lt": 1}}]}, "projection": {"a": true, "_id": true} - }) + }); explain = coll.explain().aggregate([{$match: {$and: [{a: {$lt: 5}}, {a: 5}]}}]); analyzeTopLevelExplain(explain, diff --git a/jstests/noPassthrough/cqf_explain_planSummary.js b/jstests/noPassthrough/cqf_explain_planSummary.js index 4d0da48ac75..78a0054b692 100644 --- a/jstests/noPassthrough/cqf_explain_planSummary.js +++ b/jstests/noPassthrough/cqf_explain_planSummary.js @@ -2,7 +2,7 @@ * Tests the planSummary field present in CQF explain output. */ -import {getPlanSummaries} from "jstests/libs/analyze_plan.js" +import {getPlanSummaries} from "jstests/libs/analyze_plan.js"; function checkSummaries(db, collName, summaryExpected, hint, isSharded) { const commands = [ diff --git a/jstests/noPassthrough/cqf_fallback.js b/jstests/noPassthrough/cqf_fallback.js index aa80b700158..faac48be0e3 100644 --- a/jstests/noPassthrough/cqf_fallback.js +++ b/jstests/noPassthrough/cqf_fallback.js @@ -135,7 +135,7 @@ assertSupportedByBonsaiFully({find: coll.getName(), filter: {'a': {$lte: null}}} assertSupportedByBonsaiFully({find: coll.getName(), filter: {'a': {$gt: null}}}); assertSupportedByBonsaiFully( {aggregate: coll.getName(), pipeline: [{$match: {a: {$eq: null}}}], cursor: {}}); -assertSupportedByBonsaiFully({find: coll.getName(), filter: {a: {$in: [1, 2, null, 3]}}}) +assertSupportedByBonsaiFully({find: coll.getName(), filter: {a: {$in: [1, 2, null, 3]}}}); assertSupportedByBonsaiFully({find: coll.getName(), filter: {a: {$elemMatch: {b: null}}}}); assertSupportedByBonsaiFully({find: coll.getName(), filter: {'a.c': {$elemMatch: {b: null}}}}); diff --git a/jstests/noPassthrough/dbcheck_coll_view.js b/jstests/noPassthrough/dbcheck_coll_view.js index 27729ecb2bb..52ed9a2183f 100644 --- a/jstests/noPassthrough/dbcheck_coll_view.js +++ b/jstests/noPassthrough/dbcheck_coll_view.js @@ -60,7 +60,7 @@ replSet.awaitReplication(); fp.off(); // Ensure that dbCheck completes successfully on all nodes. -awaitDbCheckCompletion(replSet, db, true /*waitForHealthLogDbCheckStop*/) +awaitDbCheckCompletion(replSet, db, true /*waitForHealthLogDbCheckStop*/); assertForDbCheckErrorsForAllNodes(replSet, true /*assertForErrors*/, true /*assertForWarnings*/); // Make sure the dbCheck has one batch with the one document we have inserted. diff --git a/jstests/noPassthrough/explain_optimization_stats.js b/jstests/noPassthrough/explain_optimization_stats.js index 3dabb3f55c8..9d273c884f3 100644 --- a/jstests/noPassthrough/explain_optimization_stats.js +++ b/jstests/noPassthrough/explain_optimization_stats.js @@ -3,7 +3,7 @@ */ const collName = "jstests_explain_optimization_stats"; -import {assertDropAndRecreateCollection} from "jstests/libs/collection_drop_recreate.js" +import {assertDropAndRecreateCollection} from "jstests/libs/collection_drop_recreate.js"; import {FixtureHelpers} from "jstests/libs/fixture_helpers.js"; import {configureFailPoint} from "jstests/libs/fail_point_util.js"; @@ -52,7 +52,7 @@ function runTest(db) { emit("val", 1); }, reduce: function(k, v) { - return 1 + return 1; }, out: "example" } diff --git a/jstests/noPassthrough/index_multikey_deduplication.js b/jstests/noPassthrough/index_multikey_deduplication.js index 821dee1c28b..d325ab43808 100644 --- a/jstests/noPassthrough/index_multikey_deduplication.js +++ b/jstests/noPassthrough/index_multikey_deduplication.js @@ -30,7 +30,9 @@ for (let i = 0; i < 300; ++i) { docs.push({i, multiKey: array}); } Array.shuffle(docs); -docs.forEach((doc, index) => {doc.stringIndex = `index${index + 1000}`}); +docs.forEach((doc, index) => { + doc.stringIndex = `index${index + 1000}`; +}); // This query should return 249 documents from {i: 51} to {i: 299} const query = { diff --git a/jstests/noPassthrough/max_time_ms_sharded.js b/jstests/noPassthrough/max_time_ms_sharded.js index f83853c6f10..6459a9eed57 100644 --- a/jstests/noPassthrough/max_time_ms_sharded.js +++ b/jstests/noPassthrough/max_time_ms_sharded.js @@ -130,16 +130,14 @@ configureMaxTimeNeverTimeOut("off"); // maxTimeAlwaysTimeOut to ensure mongod throws if it receives a max time. // -let assertCommandFailedWithMaxTimeMSExpired = - (commandName, failMessage) => { - const maxTimeMSCounter = admin.serverStatus().metrics.operation.killedDueToMaxTimeMSExpired; - assert.commandFailedWithCode(coll.runCommand(commandName, {maxTimeMS: defaultMaxTimeMS}), - ErrorCodes.MaxTimeMSExpired, - failMessage); +let assertCommandFailedWithMaxTimeMSExpired = (commandName, failMessage) => { + const maxTimeMSCounter = admin.serverStatus().metrics.operation.killedDueToMaxTimeMSExpired; + assert.commandFailedWithCode(coll.runCommand(commandName, {maxTimeMS: defaultMaxTimeMS}), + ErrorCodes.MaxTimeMSExpired, + failMessage); - assert.gt(admin.serverStatus().metrics.operation.killedDueToMaxTimeMSExpired, - maxTimeMSCounter); - } + assert.gt(admin.serverStatus().metrics.operation.killedDueToMaxTimeMSExpired, maxTimeMSCounter); +}; // Positive test for "validate". configureMaxTimeAlwaysTimeOut("alwaysOn"); diff --git a/jstests/noPassthrough/mongos_unsharded_commands_interface_parity_with_replica_set.js b/jstests/noPassthrough/mongos_unsharded_commands_interface_parity_with_replica_set.js index abab3ff2dfa..ca8c77fb12a 100644 --- a/jstests/noPassthrough/mongos_unsharded_commands_interface_parity_with_replica_set.js +++ b/jstests/noPassthrough/mongos_unsharded_commands_interface_parity_with_replica_set.js @@ -321,19 +321,19 @@ function collModParity(db, collModCommand) { // Run collmod on a unsplitable collection db.runCommand({createUnsplittableCollection: "x"}); db.runCommand({createIndexes: "x", indexes: [{key: {"age": 1}, name: "ageIndex"}]}); - collModCommand.command.collMod = "x" + collModCommand.command.collMod = "x"; const unsplitableResultKeys = Object.keys(runAndAssertTestCase(collModCommand, db)); // Run collmod on an unsharded collection db.runCommand({create: "y"}); db.runCommand({createIndexes: "y", indexes: [{key: {"age": 1}, name: "ageIndex"}]}); - collModCommand.command.collMod = "y" + collModCommand.command.collMod = "y"; const unshardedResultKeys = Object.keys(runAndAssertTestCase(collModCommand, db)); // Run collmod on an sharded collection db.runCommand({create: "z"}); db.runCommand({createIndexes: "z", indexes: [{key: {"age": 1}, name: "ageIndex"}]}); - collModCommand.command.collMod = "z" + collModCommand.command.collMod = "z"; const shardedResultKeys = Object.keys(runAndAssertTestCase(collModCommand, db)); assert(isSubset(unsplitableResultKeys, unshardedResultKeys) && @@ -377,7 +377,7 @@ function collModParityTests(db) { } function runAndAssertTestCaseWithForcedWriteConcern(testCase, testFixture) { - testFixture.stopReplication(testFixture.mongoConfig) + testFixture.stopReplication(testFixture.mongoConfig); testCase.command.writeConcern = {w: "majority", wtimeout: 1}; const result = testFixture.db.runCommand(testCase.command); assertWriteConcernError(result); @@ -439,14 +439,14 @@ function assertMongosAndReplicaSetInterfaceParity(test, testCase, forceWriteConc mongoConfig: rst, stopReplication: stopReplicationOnSecondaries, restartReplication: restartReplicationOnSecondaries, - } + }; const mongosTestFixture = { db: mongosDb, mongoConfig: st, stopReplication: stopReplicationOnSecondariesOfAllShards, restartReplication: restartReplicationOnAllShards, - } + }; const replSetResultKeys = Object.keys(runTestCase( test, diff --git a/jstests/noPassthrough/move_collection_create_options.js b/jstests/noPassthrough/move_collection_create_options.js index 28eea044d8d..c4ad22c9bcc 100644 --- a/jstests/noPassthrough/move_collection_create_options.js +++ b/jstests/noPassthrough/move_collection_create_options.js @@ -49,7 +49,7 @@ function validateCollection(conn, jsTest.log("*** Checking expectedCollOpts " + tojson({listCollectionsDoc, expectedCollOpts})); for (let fieldName in expectedCollOpts) { const actual = getDottedField(listCollectionsDoc.options, fieldName); - const expected = expectedCollOpts[fieldName] + const expected = expectedCollOpts[fieldName]; assert.eq(bsonUnorderedFieldsCompare(actual, expected), 0, {fieldName, actual, expected}); } assert.eq(coll.countDocuments({}), maxCount); @@ -66,7 +66,7 @@ function validateCollection(conn, found = true; for (let fieldName in expectedIndex) { const actual = getDottedField(actualIndex, fieldName); - const expected = expectedIndex[fieldName] + const expected = expectedIndex[fieldName]; assert.eq(bsonUnorderedFieldsCompare(actual, expected), 0, {fieldName, actual, expected}); diff --git a/jstests/noPassthrough/network_timeout.js b/jstests/noPassthrough/network_timeout.js index d4ebd70ebd3..a79a1d4739b 100644 --- a/jstests/noPassthrough/network_timeout.js +++ b/jstests/noPassthrough/network_timeout.js @@ -13,12 +13,12 @@ TestData.skipCheckOrphans = true; TestData.skipCheckShardFilteringMetadata = true; let st = new ShardingTest({shards: 1, mongos: 1, config: 1, rs: {nodes: 2}}); -let testDB = "test" -let testColl = "testColl" +let testDB = "test"; +let testColl = "testColl"; let testNS = testDB + "." + testColl; let admin = st.s.getDB("admin"); -let conn = new Mongo(st.s.host) +let conn = new Mongo(st.s.host); // Shard the collection to test sharding APIs such as AsyncRequestsSender during cluster find. assert.commandWorked(admin.runCommand({enableSharding: testDB})); diff --git a/jstests/noPassthrough/notablescan.js b/jstests/noPassthrough/notablescan.js index 794ff2e9156..c493b06632e 100644 --- a/jstests/noPassthrough/notablescan.js +++ b/jstests/noPassthrough/notablescan.js @@ -67,7 +67,7 @@ assert.commandWorked(db.adminCommand({setParameter: 1, notablescan: true})); } { // Run the testcase with a clustered index. - assertDropAndRecreateCollection(db, colName, {clusteredIndex: {key: {_id: 1}, unique: true}}) + assertDropAndRecreateCollection(db, colName, {clusteredIndex: {key: {_id: 1}, unique: true}}); coll = db.getCollection(colName); assert.commandWorked(coll.insert({_id: 22})); assert.eq(1, coll.find({_id: 22}).itcount()); diff --git a/jstests/noPassthrough/out_drop_temp_collection.js b/jstests/noPassthrough/out_drop_temp_collection.js index af019aa1b83..c74bb5da7c9 100644 --- a/jstests/noPassthrough/out_drop_temp_collection.js +++ b/jstests/noPassthrough/out_drop_temp_collection.js @@ -68,7 +68,7 @@ const st = new ShardingTest({shards: 2, mongos: 1, config: 1}); import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; if (FeatureFlagUtil.isPresentAndEnabled(st.s, "TrackUnshardedCollectionsUponMoveCollection") || FeatureFlagUtil.isPresentAndEnabled(st.s, "TrackUnshardedCollectionsUponCreation")) { - st.stop() + st.stop(); quit(); } runTest(st, st.s.getDB("out_drop_temp"), st.s.port); diff --git a/jstests/noPassthrough/plan_cache_replan_unwind.js b/jstests/noPassthrough/plan_cache_replan_unwind.js index be584106b51..b7c466b05b6 100644 --- a/jstests/noPassthrough/plan_cache_replan_unwind.js +++ b/jstests/noPassthrough/plan_cache_replan_unwind.js @@ -48,7 +48,7 @@ assert.commandWorked(coll.createIndex({b: 1})); function getAssertCount(count) { return function assertCount(cursor) { assert.eq(count, cursor.itcount()); - } + }; } function testFn( diff --git a/jstests/noPassthrough/plan_cache_replan_where.js b/jstests/noPassthrough/plan_cache_replan_where.js index c222f74c840..fd057b1b2cc 100644 --- a/jstests/noPassthrough/plan_cache_replan_where.js +++ b/jstests/noPassthrough/plan_cache_replan_where.js @@ -29,7 +29,7 @@ assert.eq(1, {b: {$gte: 0}}, { $where: function() { - return true + return true; } } ] @@ -43,7 +43,7 @@ assert.eq(1, {b: {$gte: 0}}, { $where: function() { - return true + return true; } } ] @@ -64,7 +64,7 @@ assert.eq(20, {b: {$gte: 0}}, { $where: function() { - return true + return true; } } ] diff --git a/jstests/noPassthrough/profile_operation_metrics.js b/jstests/noPassthrough/profile_operation_metrics.js index ac995f90918..ba925685b78 100644 --- a/jstests/noPassthrough/profile_operation_metrics.js +++ b/jstests/noPassthrough/profile_operation_metrics.js @@ -1542,7 +1542,7 @@ const runTest = (db) => { print(`Caught ${assertions.length} test assertion failures:`); assertions.forEach((a) => { print(a); - }) + }); doassert(`Test failed with ${assertions.length} failures`); assertions = []; } diff --git a/jstests/noPassthrough/queryStats/query_stats_change_stream_per_shard_cursor.js b/jstests/noPassthrough/queryStats/query_stats_change_stream_per_shard_cursor.js index 8f096d11b71..28b30bb2864 100644 --- a/jstests/noPassthrough/queryStats/query_stats_change_stream_per_shard_cursor.js +++ b/jstests/noPassthrough/queryStats/query_stats_change_stream_per_shard_cursor.js @@ -55,7 +55,7 @@ function testCollectionChangeStream(sdb, shardId) { collectionName: "coll", numExecs: numExecs, numDocsReturned: numDocsReturned - }) + }); assert(queryStatsEntry.key.hasOwnProperty("$_passthroughToShard")); assert(queryStatsEntry.key.$_passthroughToShard.hasOwnProperty("shard")); @@ -86,7 +86,7 @@ function testCollectionChangeStream(sdb, shardId) { collectionName: "coll", numExecs: numExecs, numDocsReturned: numDocsReturned - }) + }); assert(queryStatsEntry.key.hasOwnProperty("$_passthroughToShard")); assert(queryStatsEntry.key.$_passthroughToShard.hasOwnProperty("shard")); @@ -120,7 +120,7 @@ function testDatabaseChangeStream(sdb, shardId) { collectionName: "$cmd.aggregate", numExecs: numExecs, numDocsReturned: numDocsReturned - }) + }); assert(queryStatsEntry.key.hasOwnProperty("$_passthroughToShard")); assert(queryStatsEntry.key.$_passthroughToShard.hasOwnProperty("shard")); diff --git a/jstests/noPassthrough/queryStats/query_stats_disk_usage_deep_pipelines.js b/jstests/noPassthrough/queryStats/query_stats_disk_usage_deep_pipelines.js index 74ac4fe4419..45271cfe3d8 100644 --- a/jstests/noPassthrough/queryStats/query_stats_disk_usage_deep_pipelines.js +++ b/jstests/noPassthrough/queryStats/query_stats_disk_usage_deep_pipelines.js @@ -150,8 +150,8 @@ function runDeepBranchingPipelineTest(conn, coll1) { as: "lookedUp", localField: "y", foreignField: "y", - }} - const lookupShape = { pipeline: [ lookup ]}; + }}; + const lookupShape = {pipeline: [lookup]}; // Collection scan over coll2 - 7 docs and 0 keys. const unionWithLookup = { @@ -223,7 +223,7 @@ function runDeepBranchingPipelineTest(conn, coll1) { }; { - const otherNssUnion = otherNss.concat({db: "test", coll: coll1.getName()}) + const otherNssUnion = otherNss.concat({db: "test", coll: coll1.getName()}); const expectedDocs = 17; for (let batchSize = 1; batchSize <= expectedDocs + 1; batchSize++) { clearPlanCacheAndQueryStatsStore(conn, coll1); diff --git a/jstests/noPassthrough/queryStats/query_stats_disk_usage_nested_pipelines.js b/jstests/noPassthrough/queryStats/query_stats_disk_usage_nested_pipelines.js index 5b4d88748f0..7136021be9a 100644 --- a/jstests/noPassthrough/queryStats/query_stats_disk_usage_nested_pipelines.js +++ b/jstests/noPassthrough/queryStats/query_stats_disk_usage_nested_pipelines.js @@ -64,8 +64,8 @@ function runUnindexedLookupPipelineTest(conn, localColl) { as: "lookedUp", localField: "v", foreignField: "v", - }} - const shape = { pipeline: [lookup] }; + }}; + const shape = {pipeline: [lookup]}; const queryStatsKey = getAggregateQueryStatsKey( conn, @@ -132,9 +132,9 @@ function runUnindexedUnoptimizedLookupPipelineTest(conn, localColl) { as: "lookedUp", localField: "v", foreignField: "v", - }} - const pipeline = [{$_internalInhibitOptimization: {}}, lookup] - const shape = { pipeline: pipeline }; + }}; + const pipeline = [{$_internalInhibitOptimization: {}}, lookup]; + const shape = {pipeline: pipeline}; const queryStatsKey = getAggregateQueryStatsKey( conn, diff --git a/jstests/noPassthrough/queryStats/query_stats_disk_usage_sharded.js b/jstests/noPassthrough/queryStats/query_stats_disk_usage_sharded.js index ba93b8697f5..0cbfc45b632 100644 --- a/jstests/noPassthrough/queryStats/query_stats_disk_usage_sharded.js +++ b/jstests/noPassthrough/queryStats/query_stats_disk_usage_sharded.js @@ -62,8 +62,8 @@ function runLookupForeignShardedPipelineTest(st) { as: "lookedUp", localField: "v", foreignField: "y", - }} - const shape = { pipeline: [ lookup ]}; + }}; + const shape = {pipeline: [lookup]}; const queryStatsKey = getAggregateQueryStatsKey( conn, diff --git a/jstests/noPassthrough/queryStats/query_stats_expressions.js b/jstests/noPassthrough/queryStats/query_stats_expressions.js index 1fec73ad943..48a4791db57 100644 --- a/jstests/noPassthrough/queryStats/query_stats_expressions.js +++ b/jstests/noPassthrough/queryStats/query_stats_expressions.js @@ -22,7 +22,7 @@ for (let i = 0; i < numDocs / 2; ++i) { bulk.insert({foo: i, bar: i, applyDiscount: false, word: "ghjk"}); } assert.commandWorked(bulk.execute()); -coll.createIndex({foo: 1}) +coll.createIndex({foo: 1}); // Tests that $meta is re-parsed correctly by ensuring the metaDataKeyword is not serialized as // string literal. diff --git a/jstests/noPassthrough/queryStats/query_stats_server_status_metrics.js b/jstests/noPassthrough/queryStats/query_stats_server_status_metrics.js index ee46c6aaaab..3cfc28b8d50 100644 --- a/jstests/noPassthrough/queryStats/query_stats_server_status_metrics.js +++ b/jstests/noPassthrough/queryStats/query_stats_server_status_metrics.js @@ -276,7 +276,7 @@ function queryStatsAggregationStageTest(conn, testDB, coll) { // Test that you can change it at runtime and have it reflected. assert.commandWorked(conn.getDB("admin").runCommand( - {setParameter: 1, internalQueryStatsCacheSize: "8MB"})) + {setParameter: 1, internalQueryStatsCacheSize: "8MB"})); const metricsPostGrowth = testDB.serverStatus().metrics.queryStats; const debugInfo = {original: metrics, postGrowth: metricsPostGrowth}; assert.eq(8 * 1024 * 1024, metricsPostGrowth.maxSizeBytes, debugInfo); diff --git a/jstests/noPassthrough/query_settings_through_cluster_parameters.js b/jstests/noPassthrough/query_settings_through_cluster_parameters.js index 9e18706d3a3..39027925430 100644 --- a/jstests/noPassthrough/query_settings_through_cluster_parameters.js +++ b/jstests/noPassthrough/query_settings_through_cluster_parameters.js @@ -11,75 +11,74 @@ import {assertDropAndRecreateCollection} from "jstests/libs/collection_drop_recreate.js"; import {QuerySettingsUtils} from "jstests/libs/query_settings_utils.js"; -let test = - (db) => { - // Creating the collection, because some sharding passthrough suites are failing when - // explain command is issued on the nonexistent database and collection. - const coll = assertDropAndRecreateCollection(db, jsTestName()); - const qsutils = new QuerySettingsUtils(db, coll.getName()); +let test = (db) => { + // Creating the collection, because some sharding passthrough suites are failing when + // explain command is issued on the nonexistent database and collection. + const coll = assertDropAndRecreateCollection(db, jsTestName()); + const qsutils = new QuerySettingsUtils(db, coll.getName()); - let query = qsutils.makeAggregateQueryInstance({ - pipeline: [ - {$match: {matchKey: 15}}, - { - $group: { - _id: "groupID", - values: {$addToSet: "$value"}, - }, + let query = qsutils.makeAggregateQueryInstance({ + pipeline: [ + {$match: {matchKey: 15}}, + { + $group: { + _id: "groupID", + values: {$addToSet: "$value"}, }, + }, + ] + }); + const querySettings = { + indexHints: { + ns: {db: db.getName(), coll: coll.getName()}, + allowedIndexes: ["groupID_1", {$natural: 1}] + } + }; + + // Reset query settings. + qsutils.removeAllQuerySettings(); + + // Ensure 'setClusterParameter' doesn't accept query settings parameter directly. + assert.commandFailedWithCode(db.adminCommand({ + setClusterParameter: { + querySettings: [ + querySettings, ] - }); - const querySettings = { - indexHints: { - ns: {db: db.getName(), coll: coll.getName()}, - allowedIndexes: ["groupID_1", {$natural: 1}] - } - }; + } + }), + ErrorCodes.NoSuchKey); - // Reset query settings. - qsutils.removeAllQuerySettings(); + // Ensure that 'querySettings' cluster parameter hasn't changed after invoking + // 'setClusterParameter' command. + qsutils.assertQueryShapeConfiguration([]); - // Ensure 'setClusterParameter' doesn't accept query settings parameter directly. - assert.commandFailedWithCode(db.adminCommand({ - setClusterParameter: { - querySettings: [ - querySettings, - ] - } - }), - ErrorCodes.NoSuchKey); + // Ensure that query settings can be configured through setQuerySettings command. + assert.commandWorked(db.adminCommand({setQuerySettings: query, settings: querySettings})); - // Ensure that 'querySettings' cluster parameter hasn't changed after invoking - // 'setClusterParameter' command. - qsutils.assertQueryShapeConfiguration([]); + // Ensure that 'querySettings' cluster parameter contains QueryShapeConfiguration after + // invoking setQuerySettings command. + qsutils.assertQueryShapeConfiguration( + [qsutils.makeQueryShapeConfiguration(querySettings, query)]); - // Ensure that query settings can be configured through setQuerySettings command. - assert.commandWorked(db.adminCommand({setQuerySettings: query, settings: querySettings})); + // Ensure 'getClusterParameter' doesn't accept query settings parameter directly. + assert.commandFailedWithCode(db.adminCommand({getClusterParameter: "querySettings"}), + ErrorCodes.NoSuchKey); + assert.commandFailedWithCode(db.adminCommand({ + getClusterParameter: + ["testIntClusterParameter", "querySettings", "testStrClusterParameter"] + }), + ErrorCodes.NoSuchKey); - // Ensure that 'querySettings' cluster parameter contains QueryShapeConfiguration after - // invoking setQuerySettings command. - qsutils.assertQueryShapeConfiguration( - [qsutils.makeQueryShapeConfiguration(querySettings, query)]); + // Ensure 'getClusterParameter' doesn't print query settings value with other cluster + // parameters. + const clusterParameters = + assert.commandWorked(db.adminCommand({getClusterParameter: "*"})).clusterParameters; + assert(!clusterParameters.some(parameter => parameter._id === "querySettings"), + "unexpected _id = 'querySettings' in " + tojson(clusterParameters)); - // Ensure 'getClusterParameter' doesn't accept query settings parameter directly. - assert.commandFailedWithCode(db.adminCommand({getClusterParameter: "querySettings"}), - ErrorCodes.NoSuchKey); - assert.commandFailedWithCode(db.adminCommand({ - getClusterParameter: - ["testIntClusterParameter", "querySettings", "testStrClusterParameter"] - }), - ErrorCodes.NoSuchKey); - - // Ensure 'getClusterParameter' doesn't print query settings value with other cluster - // parameters. - const clusterParameters = - assert.commandWorked(db.adminCommand({getClusterParameter: "*"})).clusterParameters; - assert(!clusterParameters.some(parameter => parameter._id === "querySettings"), - "unexpected _id = 'querySettings' in " + tojson(clusterParameters)); - - // Cleanup query settings. - qsutils.removeAllQuerySettings(); - } + // Cleanup query settings. + qsutils.removeAllQuerySettings(); +}; { const rst = new ReplSetTest({nodes: 1}); @@ -92,5 +91,5 @@ let test = { const st = new ShardingTest({shards: 3, mongos: 1}); test(st.getDB("ShardingTestDB")); - st.stop() + st.stop(); } diff --git a/jstests/noPassthrough/read_majority.js b/jstests/noPassthrough/read_majority.js index f09d7ea6e96..854153af675 100644 --- a/jstests/noPassthrough/read_majority.js +++ b/jstests/noPassthrough/read_majority.js @@ -182,7 +182,7 @@ function testReadConcernLevel(level) { assert.eq(getCursorForReadConcernLevel().itcount(), 10); assert.eq(getAggCursorForReadConcernLevel().itcount(), 10); - let explain = getExplainPlan({version: 1}) + let explain = getExplainPlan({version: 1}); let optimizer = getOptimizer(explain); switch (optimizer) { case "classic": diff --git a/jstests/noPassthrough/read_preference_metrics.js b/jstests/noPassthrough/read_preference_metrics.js index ec7d7f87318..1ca0a7a4bf0 100644 --- a/jstests/noPassthrough/read_preference_metrics.js +++ b/jstests/noPassthrough/read_preference_metrics.js @@ -35,14 +35,14 @@ function verifyMetricIncrement(conn, readPref, executedOn, tagged) { assert(expectedCount == count, `Actual count ${count} did not equal expected count ${ - expectedCount} for readPreference ${readPref}.`) + expectedCount} for readPreference ${readPref}.`); if (tagged) { const expectedTaggedCount = preMetrics[executedOn].tagged.external + 1; const taggedCount = postMetrics[executedOn].tagged.external; assert(expectedTaggedCount == taggedCount, `Actual tagged count ${taggedCount} did not equal to expected tagged count ${ - expectedTaggedCount} for read preference ${readPref}.`) + expectedTaggedCount} for read preference ${readPref}.`); } } @@ -59,15 +59,15 @@ function runTest(fixture) { ]; for (const readPref of preferences) { - verifyMetricIncrement(primary, readPref, "executedOnPrimary") + verifyMetricIncrement(primary, readPref, "executedOnPrimary"); if (readPref != "primary") { // For the tagged test on the primary and both tests on the secondary, we skip the // primary read preference case. This is because this read preference does not support // tag sets, and the command will fail on the secondary before we increment any // metrics. - verifyMetricIncrement(primary, readPref, "executedOnPrimary", true /* tagged */) - verifyMetricIncrement(secondary, readPref, "executedOnSecondary") - verifyMetricIncrement(secondary, readPref, "executedOnSecondary", true /* tagged */) + verifyMetricIncrement(primary, readPref, "executedOnPrimary", true /* tagged */); + verifyMetricIncrement(secondary, readPref, "executedOnSecondary"); + verifyMetricIncrement(secondary, readPref, "executedOnSecondary", true /* tagged */); } } } @@ -83,7 +83,7 @@ const rst = new ReplSetTest({nodes: 2}); rst.startSet(); rst.initiateWithHighElectionTimeout(); -jsTestLog("Testing against replica set") +jsTestLog("Testing against replica set"); runTest(rst); rst.stopSet(); @@ -95,7 +95,7 @@ serverStatus = assert.commandWorked(st.s.getDB("admin").runCommand({serverStatus assert(serverStatus.process.startsWith("mongos"), tojson(serverStatus)); assert(!serverStatus.hasOwnProperty("readPreferenceCounters"), tojson(serverStatus)); -jsTestLog("Testing against sharded cluster") -runTest(st.rs0) +jsTestLog("Testing against sharded cluster"); +runTest(st.rs0); st.stop(); diff --git a/jstests/noPassthrough/rename_collection_across_dbs.js b/jstests/noPassthrough/rename_collection_across_dbs.js index 8acfc4268c8..8893cc81f1a 100644 --- a/jstests/noPassthrough/rename_collection_across_dbs.js +++ b/jstests/noPassthrough/rename_collection_across_dbs.js @@ -210,7 +210,7 @@ sampleDocSize = Object.bsonsize(sampleDocFull); // Get default batched size byte limit to verify the current limit serverParam = assert.commandWorked( srcDb.adminCommand({getParameter: 1, maxSizeOfBatchedInsertsForRenameAcrossDatabasesBytes: 1})); -jsTestLog("Default batch size limit for renames across databases:" + tojson(serverParam)) +jsTestLog("Default batch size limit for renames across databases:" + tojson(serverParam)); defaultMaxBatchSize = serverParam["maxSizeOfBatchedInsertsForRenameAcrossDatabasesBytes"]; jsTestLog("defaultMaxBatchSize: " + defaultMaxBatchSize); diff --git a/jstests/noPassthrough/rename_collection_nonexistent_db.js b/jstests/noPassthrough/rename_collection_nonexistent_db.js index 820f2afaf35..1c85b432e7b 100644 --- a/jstests/noPassthrough/rename_collection_nonexistent_db.js +++ b/jstests/noPassthrough/rename_collection_nonexistent_db.js @@ -51,14 +51,14 @@ function runTestRenameCollectionOnEvent(st, eventFunction, expectedErrorCode) { const st = new ShardingTest({shards: 2, mongos: 1, config: 1}); // Tests that the rename command errors if the source database is dropped during execution. -runTestRenameCollectionOnEvent( - st, - (testDb) => {assert.commandWorked(testDb.runCommand({dropDatabase: 1}))}, - ErrorCodes.NamespaceNotFound); +runTestRenameCollectionOnEvent(st, (testDb) => { + assert.commandWorked(testDb.runCommand({dropDatabase: 1})); +}, ErrorCodes.NamespaceNotFound); // Tests that the rename command errors if the source collection is dropped during execution. -runTestRenameCollectionOnEvent( - st, (testDb) => {testDb.renameDifferentDb.drop()}, ErrorCodes.NamespaceNotFound); +runTestRenameCollectionOnEvent(st, (testDb) => { + testDb.renameDifferentDb.drop(); +}, ErrorCodes.NamespaceNotFound); // Tests that the rename command errors if the destination collection is created during execution. const createDestinationCollection = (testDb) => { @@ -73,7 +73,7 @@ runTestRenameCollectionOnEvent( // during execution. const dropDestinationDatabase = (testDb) => { const destDb = testDb.getSiblingDB("destDb"); - assert.commandWorked(destDb.runCommand({dropDatabase: 1})) + assert.commandWorked(destDb.runCommand({dropDatabase: 1})); }; runTestRenameCollectionOnEvent(st, dropDestinationDatabase, ErrorCodes.NamespaceNotFound); diff --git a/jstests/noPassthrough/replicate_record_ids_dbhash.js b/jstests/noPassthrough/replicate_record_ids_dbhash.js index becfb272309..055686ddd7c 100644 --- a/jstests/noPassthrough/replicate_record_ids_dbhash.js +++ b/jstests/noPassthrough/replicate_record_ids_dbhash.js @@ -29,8 +29,7 @@ const findRecordId = function(testDB, collName, doc) { return res["$recordId"]; }; -const insertDocWithInconsistentRids = - function(primaryDB, secondaryDB, docToInsertWithDifRid) { +const insertDocWithInconsistentRids = function(primaryDB, secondaryDB, docToInsertWithDifRid) { const explicitlySetRecordIdOnInsert = configureFailPoint( secondaryDB, "explicitlySetRecordIdOnInsert", @@ -43,7 +42,7 @@ const insertDocWithInconsistentRids = primaryDB.runCommand({insert: collName, documents: [docToInsertWithDifRid]})); rst.awaitReplication(); explicitlySetRecordIdOnInsert.off(); -} +}; const runTest = function(replicatedRecordIds) { const primaryDB = primary.getDB(dbName); diff --git a/jstests/noPassthrough/retryable_vectored_inserts.js b/jstests/noPassthrough/retryable_vectored_inserts.js index 07b596260eb..f5e373f7f78 100644 --- a/jstests/noPassthrough/retryable_vectored_inserts.js +++ b/jstests/noPassthrough/retryable_vectored_inserts.js @@ -110,23 +110,23 @@ function checkRetries() { jsTestLog("Retrying single batch insert."); assert.commandWorked(retryDB.runCommand(singleBatchCommand)); assert.docEq(secondaryDB[singleBatchCommand.insert].find({}).sort({_id: 1}).toArray(), - singleBatchCommand.documents) + singleBatchCommand.documents); assert.docEq(primaryDB[singleBatchCommand.insert].find({}).sort({_id: 1}).toArray(), - singleBatchCommand.documents) + singleBatchCommand.documents); // Retry multi batch command on new primary and make sure documents match after. jsTestLog("Retrying multi batch insert."); assert.commandWorked(retryDB.runCommand(multiBatchCommand)); assert.docEq(secondaryDB[multiBatchCommand.insert].find({}).sort({_id: 1}).toArray(), - multiBatchCommand.documents) + multiBatchCommand.documents); assert.docEq(primaryDB[multiBatchCommand.insert].find({}).sort({_id: 1}).toArray(), - multiBatchCommand.documents) + multiBatchCommand.documents); // Retry broken batch command on new primary and ensure the whole insert worked. jsTestLog("Retrying broken batch insert."); assert.commandWorked(retryDB.runCommand(brokenBatchCommand)); assert.docEq(secondaryDB[brokenBatchCommand.insert].find({}).sort({_id: 1}).toArray(), - brokenBatchCommand.documents) + brokenBatchCommand.documents); // Since this didn't work the first time, we need to await replication for it to work on the old // primary. if (retryNode == secondary) { diff --git a/jstests/noPassthrough/rs_endpoint/apply_ops.js b/jstests/noPassthrough/rs_endpoint/apply_ops.js index 53fdfffa6f9..de87bc4886a 100644 --- a/jstests/noPassthrough/rs_endpoint/apply_ops.js +++ b/jstests/noPassthrough/rs_endpoint/apply_ops.js @@ -70,9 +70,7 @@ function runTests(shard0Primary, tearDownFunc, isMultitenant) { const {router, mongos} = (() => { if (shard0Primary.routerHost) { const router = new Mongo(shard0Primary.routerHost); - return { - router - } + return {router}; } const shard0URL = getReplicaSetURL(shard0Primary); const mongos = MongoRunner.runMongos({configdb: shard0URL}); diff --git a/jstests/noPassthrough/rs_endpoint/crud_and_ddl.js b/jstests/noPassthrough/rs_endpoint/crud_and_ddl.js index 7f9ec4da6b2..cbb0042f464 100644 --- a/jstests/noPassthrough/rs_endpoint/crud_and_ddl.js +++ b/jstests/noPassthrough/rs_endpoint/crud_and_ddl.js @@ -71,9 +71,7 @@ function runTests(shard0Primary, tearDownFunc, isMultitenant) { const {router, mongos} = (() => { if (shard0Primary.routerHost) { const router = new Mongo(shard0Primary.routerHost); - return { - router - } + return {router}; } const shard0URL = getReplicaSetURL(shard0Primary); const mongos = MongoRunner.runMongos({configdb: shard0URL}); diff --git a/jstests/noPassthrough/rs_endpoint/current_op.js b/jstests/noPassthrough/rs_endpoint/current_op.js index b4521a2e1b8..efc0f2c7109 100644 --- a/jstests/noPassthrough/rs_endpoint/current_op.js +++ b/jstests/noPassthrough/rs_endpoint/current_op.js @@ -113,7 +113,7 @@ let clusterDropOpId, shardsvrDropOpId; assert.eq(ops0.length, 1, ops0); assert.eq(ops0[0].role, "ClusterRole{shard}", ops0); assert.eq(ops0[0].host, primary.host, ops0); - shardsvrDropOpId = ops0[0].opid + shardsvrDropOpId = ops0[0].opid; const ops1 = primary.getDB("admin") .aggregate([ @@ -268,7 +268,7 @@ assert.commandWorked(primaryTestDB.killOp(clusterDropOpId)); assert.eq(ops0.length, 1, ops0); assert.eq(ops0[0].role, "ClusterRole{shard}", ops0); assert.eq(ops0[0].host, primary.host, ops0); - shardsvrDropOpId = ops0[0].opid + shardsvrDropOpId = ops0[0].opid; const ops1 = primary.getDB("admin") .aggregate([ diff --git a/jstests/noPassthrough/rs_endpoint/implicit_database_collection_creation.js b/jstests/noPassthrough/rs_endpoint/implicit_database_collection_creation.js index 1efcf70dd16..c8e4289222c 100644 --- a/jstests/noPassthrough/rs_endpoint/implicit_database_collection_creation.js +++ b/jstests/noPassthrough/rs_endpoint/implicit_database_collection_creation.js @@ -131,7 +131,7 @@ function runTests(getShard0PrimaryFunc, // Currently, sharding isn't supported in serverless. const expectShardingMetadata0 = !isMultitenant && FeatureFlagUtil.isPresentAndEnabled(shard0Primary.getDB('admin'), - "TrackUnshardedCollectionsUponCreation") + "TrackUnshardedCollectionsUponCreation"); runTest(shard0Primary, execCtxTypes.kNoSession, expectShardingMetadata0); runTest(shard0Primary, execCtxTypes.kNonRetryableWrite, expectShardingMetadata0); runTest(shard0Primary, execCtxTypes.kRetryableWrite, expectShardingMetadata0); @@ -182,7 +182,7 @@ function runTests(getShard0PrimaryFunc, // Currently, sharding isn't supported in serverless. const expectShardingMetadata2 = !isMultitenant && FeatureFlagUtil.isPresentAndEnabled(getShard0PrimaryFunc().getDB('admin'), - "TrackUnshardedCollectionsUponCreation") + "TrackUnshardedCollectionsUponCreation"); runTest(shard0Primary, execCtxTypes.kNoSession, expectShardingMetadata2); runTest(shard0Primary, execCtxTypes.kNonRetryableWrite, expectShardingMetadata2); runTest(shard0Primary, execCtxTypes.kRetryableWrite, expectShardingMetadata2); @@ -228,9 +228,7 @@ function runTests(getShard0PrimaryFunc, const {router, mongos} = (() => { if (shard0Primary.routerHost) { const router = new Mongo(shard0Primary.routerHost); - return { - router - } + return {router}; } const shard0URL = getReplicaSetURL(shard0Primary); const mongos = MongoRunner.runMongos({configdb: shard0URL}); diff --git a/jstests/noPassthrough/rs_endpoint/lib/fixture.js b/jstests/noPassthrough/rs_endpoint/lib/fixture.js index 5677ca7aa73..d64d3965873 100644 --- a/jstests/noPassthrough/rs_endpoint/lib/fixture.js +++ b/jstests/noPassthrough/rs_endpoint/lib/fixture.js @@ -97,7 +97,8 @@ export var ReplicaSetEndpointTest = class { _authenticateShard0TestUser() { assert(this.shard0AuthDB.logout()); - assert(this.shard0AuthDB.auth(this._shard0TestUser.userName, this._shard0TestUser.password)) + assert( + this.shard0AuthDB.auth(this._shard0TestUser.userName, this._shard0TestUser.password)); } _authenticateShard1AdminUser() { @@ -194,4 +195,4 @@ export var ReplicaSetEndpointTest = class { this._shard0Rst.stopSet(); this._shard1Rst.stopSet(); } -} +}; diff --git a/jstests/noPassthrough/rs_endpoint/lib/validate_direct_secondary_reads.js b/jstests/noPassthrough/rs_endpoint/lib/validate_direct_secondary_reads.js index 6f4148081f1..09492d10a73 100644 --- a/jstests/noPassthrough/rs_endpoint/lib/validate_direct_secondary_reads.js +++ b/jstests/noPassthrough/rs_endpoint/lib/validate_direct_secondary_reads.js @@ -32,7 +32,7 @@ export function validateProfilerCollections(hostDoc, hostDocs, numProfilerDocsPe const conn = new Mongo(hostDoc.host); conn.setSecondaryOk(); jsTest.authenticate(conn); - numProfilerDocsPerHost[hostDoc.host] = 0 + numProfilerDocsPerHost[hostDoc.host] = 0; const dbNames = conn.getDBNames(); for (let dbName of dbNames) { diff --git a/jstests/noPassthrough/rs_endpoint/local_database.js b/jstests/noPassthrough/rs_endpoint/local_database.js index c1252ed683f..0bab01cd3ef 100644 --- a/jstests/noPassthrough/rs_endpoint/local_database.js +++ b/jstests/noPassthrough/rs_endpoint/local_database.js @@ -86,9 +86,7 @@ function runTests(shard0Primary, shard0Secondary, tearDownFunc, isMultitenant) { const {router, mongos} = (() => { if (shard0Primary.routerHost) { const router = new Mongo(shard0Primary.routerHost); - return { - router - } + return {router}; } const shard0URL = getReplicaSetURL(shard0Primary); const mongos = MongoRunner.runMongos({configdb: shard0URL}); diff --git a/jstests/noPassthrough/rs_endpoint/replset_commands.js b/jstests/noPassthrough/rs_endpoint/replset_commands.js index c31d99c4f74..b150435adef 100644 --- a/jstests/noPassthrough/rs_endpoint/replset_commands.js +++ b/jstests/noPassthrough/rs_endpoint/replset_commands.js @@ -59,9 +59,7 @@ function runTests(shard0Primary, tearDownFunc, isMultitenant) { const {router, mongos} = (() => { if (shard0Primary.routerHost) { const router = new Mongo(shard0Primary.routerHost); - return { - router - } + return {router}; } const shard0URL = getReplicaSetURL(shard0Primary); const mongos = MongoRunner.runMongos({configdb: shard0URL}); diff --git a/jstests/noPassthrough/rs_endpoint/shard_local_users.js b/jstests/noPassthrough/rs_endpoint/shard_local_users.js index 44344ecf25a..bc14df54620 100644 --- a/jstests/noPassthrough/rs_endpoint/shard_local_users.js +++ b/jstests/noPassthrough/rs_endpoint/shard_local_users.js @@ -79,9 +79,7 @@ function runTests(shard0Primary, tearDownFunc) { const {router, mongos} = (() => { if (shard0Primary.routerHost) { const router = new Mongo(shard0Primary.routerHost); - return { - router - } + return {router}; } const mongos = MongoRunner.runMongos({configdb: shard0URL, keyFile}); return {router: mongos, mongos}; diff --git a/jstests/noPassthrough/rs_endpoint/skip_read_preference_retargeting.js b/jstests/noPassthrough/rs_endpoint/skip_read_preference_retargeting.js index 3780e8a2ea4..25c8167f470 100644 --- a/jstests/noPassthrough/rs_endpoint/skip_read_preference_retargeting.js +++ b/jstests/noPassthrough/rs_endpoint/skip_read_preference_retargeting.js @@ -52,7 +52,7 @@ assert.commandWorked(primary.adminCommand({balancerStop: 1})); assert.soon(() => { const res = assert.commandWorked(primary.adminCommand({balancerStatus: 1})); return !res.inBalancerRound; -}) +}); const dbName = "testDb"; const collName = "testColl"; diff --git a/jstests/noPassthrough/rs_endpoint/txn_lifetime.js b/jstests/noPassthrough/rs_endpoint/txn_lifetime.js index 04f16f5483c..0e94d37875d 100644 --- a/jstests/noPassthrough/rs_endpoint/txn_lifetime.js +++ b/jstests/noPassthrough/rs_endpoint/txn_lifetime.js @@ -192,4 +192,4 @@ function runTest(hasDirectShardOperationPrivilege) { } runTest(true /* hasDirectShardOperationPrivilege */); -runTest(false /* hasDirectShardOperationPrivilege */) +runTest(false /* hasDirectShardOperationPrivilege */); diff --git a/jstests/noPassthrough/sbe_plan_cache_with_const_let_var.js b/jstests/noPassthrough/sbe_plan_cache_with_const_let_var.js index 5c39e028bd5..c27aab6fea5 100644 --- a/jstests/noPassthrough/sbe_plan_cache_with_const_let_var.js +++ b/jstests/noPassthrough/sbe_plan_cache_with_const_let_var.js @@ -42,11 +42,11 @@ result = assert.commandWorked(db.runCommand(findWithConstLet5)).cursor.firstBatc assert.eq(result, [{_id: 5}]); assert.gt(coll.getPlanCache().list().length, 0, coll.getPlanCache().list()); -let exp = assert.commandWorked(db.runCommand({explain: findWithConstLet5})) +let exp = assert.commandWorked(db.runCommand({explain: findWithConstLet5})); let planCacheKey = getPlanCacheKeyFromExplain(exp); result = assert.commandWorked(db.runCommand(findWithConstLet500)).cursor.firstBatch; -exp = assert.commandWorked(db.runCommand({explain: findWithConstLet500})) +exp = assert.commandWorked(db.runCommand({explain: findWithConstLet500})); // Test that two queries with different consts have the same planCacheKey and the returned results // are correct and different. Otherwise, the const let value may be incorrectly baked into SBE plan diff --git a/jstests/noPassthrough/sbe_subplanning.js b/jstests/noPassthrough/sbe_subplanning.js index 2fe1aaa2bb5..6e0b727acc9 100644 --- a/jstests/noPassthrough/sbe_subplanning.js +++ b/jstests/noPassthrough/sbe_subplanning.js @@ -98,7 +98,7 @@ function assertOneResult(cursor) { assert.eq(entry.isPinned, true); } else { // No version:2 entries should have been written. - assert.eq(planCacheEntries.filter((entry) => entry.version === "2").length, 0) + assert.eq(planCacheEntries.filter((entry) => entry.version === "2").length, 0); // We should have two cache entries. assert.eq(planCacheEntries.length, 2); @@ -149,7 +149,7 @@ function assertOneResult(cursor) { assert.eq(entry.isPinned, true); } else { // No version:2 entries should have been written. - assert.eq(planCacheEntries.filter((entry) => entry.version == 2).length, 0) + assert.eq(planCacheEntries.filter((entry) => entry.version == 2).length, 0); // We should still have two cache entries. assert.eq(planCacheEntries.length, 2); @@ -199,7 +199,7 @@ function assertOneResult(cursor) { // successfully used in the second run. // No version:2 entries should have been written. - assert.eq(planCacheEntries.filter((entry) => entry.version == 2).length, 0) + assert.eq(planCacheEntries.filter((entry) => entry.version == 2).length, 0); // We should still have two cache entries. assert.eq(planCacheEntries.length, 2); @@ -261,7 +261,7 @@ jsTestLog("Running test which forces SubPlanner to plan the entire query"); // Now we run a query where the planner attempts to use subplanning, but ends up planning the whole // query as one. { - coll.dropIndexes() + coll.dropIndexes(); assert.commandWorked(coll.createIndex({a: 1})); assert.commandWorked(coll.createIndex({b: 1})); assert.commandWorked(coll.createIndex({x: 1})); diff --git a/jstests/noPassthrough/server_status_aggregation_stage_counter.js b/jstests/noPassthrough/server_status_aggregation_stage_counter.js index 8ccd20a2de1..085cb503655 100644 --- a/jstests/noPassthrough/server_status_aggregation_stage_counter.js +++ b/jstests/noPassthrough/server_status_aggregation_stage_counter.js @@ -137,7 +137,7 @@ function runTests(db, coll) { pipeline: [{$project: {x: {$add: [6, "$item"]}}}], cursor: {} })); - }, ["$project"]) + }, ["$project"]); } // Standalone diff --git a/jstests/noPassthrough/shell_grpc_uri.js b/jstests/noPassthrough/shell_grpc_uri.js index d9ac9589740..8caa258e46e 100644 --- a/jstests/noPassthrough/shell_grpc_uri.js +++ b/jstests/noPassthrough/shell_grpc_uri.js @@ -3,7 +3,7 @@ import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; // Constructs a new Mongo instance with the provided URI and asserts it fails with the provided // error code. function assertConnectFailsWithErrorCode(uri, errorCode) { - jsTestLog(`Connecting to ${uri}`) + jsTestLog(`Connecting to ${uri}`); assert.throwsWithCode(() => new Mongo(uri), errorCode); } @@ -11,7 +11,7 @@ function assertConnectFailsWithErrorCode(uri, errorCode) { // (true for success). This is used over assertConnectFailsWithErrorCode when CLI-only arguments // need to be specified. function testShellConnect(ok, ...args) { - const cmd = 'assert.commandWorked(db.runCommand({hello: 1}));' + const cmd = 'assert.commandWorked(db.runCommand({hello: 1}));'; const exitCode = runMongoProgram('mongo', '--eval', cmd, ...args); if (ok) { assert.eq(exitCode, 0, "failed to connect with `" + args.join(' ') + "`"); diff --git a/jstests/noPassthrough/standalone_replication_recovery.js b/jstests/noPassthrough/standalone_replication_recovery.js index c9ac6f24c9a..8fc2c74b60d 100644 --- a/jstests/noPassthrough/standalone_replication_recovery.js +++ b/jstests/noPassthrough/standalone_replication_recovery.js @@ -128,7 +128,7 @@ if (node.getDB(dbName).getCollectionInfos({name: collName})[0].options.recordIds // This happens to be a recordId that doesn't clash. rid: NumberLong(6) }] - })) + })); } else { assert.commandWorked(getColl(node).insert({_id: 6})); } diff --git a/jstests/noPassthrough/temporarily_unavailable_on_secondary_transaction_application.js b/jstests/noPassthrough/temporarily_unavailable_on_secondary_transaction_application.js index c2fc9abc419..ac937cb6522 100644 --- a/jstests/noPassthrough/temporarily_unavailable_on_secondary_transaction_application.js +++ b/jstests/noPassthrough/temporarily_unavailable_on_secondary_transaction_application.js @@ -11,7 +11,7 @@ * ] */ -import {funWithArgs} from "jstests/libs/parallel_shell_helpers.js" +import {funWithArgs} from "jstests/libs/parallel_shell_helpers.js"; function checkTemporarilyUnavailableRetriedOnSecondary(rst, isPrepared) { jsTestLog("checkTemporarilyUnavailableRetriedOnSecondary: isPrepared=" + isPrepared); diff --git a/jstests/noPassthrough/timeseries/intermediate_data_consistency_check_arbitrary_update.js b/jstests/noPassthrough/timeseries/intermediate_data_consistency_check_arbitrary_update.js index a5571ef20fa..97bfd71ae30 100644 --- a/jstests/noPassthrough/timeseries/intermediate_data_consistency_check_arbitrary_update.js +++ b/jstests/noPassthrough/timeseries/intermediate_data_consistency_check_arbitrary_update.js @@ -95,7 +95,7 @@ function runIntermediateDataCheckTest(isOrdered) { assert.eq(stats.bucketCount, 3); assert.eq(buckets.length, 3); assert.eq(buckets[0].meta, 1); - assert.eq(buckets[0].control.count, 2) + assert.eq(buckets[0].control.count, 2); assert.eq(buckets[1].meta, 2); assert.eq(buckets[1].control.count, 2); assert.eq(buckets[2].meta, "A"); diff --git a/jstests/noPassthrough/timeseries/promoting_compressed_sorted_bucket_to_compressed_unsorted_bucket.js b/jstests/noPassthrough/timeseries/promoting_compressed_sorted_bucket_to_compressed_unsorted_bucket.js index 073073426db..9280e8e82c3 100644 --- a/jstests/noPassthrough/timeseries/promoting_compressed_sorted_bucket_to_compressed_unsorted_bucket.js +++ b/jstests/noPassthrough/timeseries/promoting_compressed_sorted_bucket_to_compressed_unsorted_bucket.js @@ -30,7 +30,7 @@ assert.commandWorked( const measurements = [ {_id: 0, [timeFieldName]: ISODate("2024-02-15T10:10:10.000Z"), a: 1}, {_id: 1, [timeFieldName]: ISODate("2024-02-15T10:10:20.000Z"), a: 2} -] +]; // Insert first measurement. assert.commandWorked(coll.insert(measurements[1])); diff --git a/jstests/noPassthrough/timeseries/timeseries_concurrent.js b/jstests/noPassthrough/timeseries/timeseries_concurrent.js index 2fdc980933f..890ceffccb0 100644 --- a/jstests/noPassthrough/timeseries/timeseries_concurrent.js +++ b/jstests/noPassthrough/timeseries/timeseries_concurrent.js @@ -11,14 +11,14 @@ import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; import {getEngine, getQueryPlanner, getSingleNodeExplain} from "jstests/libs/analyze_plan.js"; -import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js" +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; import {Thread} from "jstests/libs/parallelTester.js"; import {checkSbeFullyEnabled} from "jstests/libs/sbe_util.js"; function runAndAssertAggregation(coll, pred, ids) { const pipe = [{$match: pred}, {$project: {_id: 1}}]; - const results = coll.aggregate(pipe).toArray().map((x) => x._id) + const results = coll.aggregate(pipe).toArray().map((x) => x._id); results.sort(); assert.eq(ids, results, () => "Aggregate " + tojson(pipe)); } diff --git a/jstests/noPassthrough/timeseries/timeseries_group_aggregations.js b/jstests/noPassthrough/timeseries/timeseries_group_aggregations.js index 848e5599524..f2edfecd971 100644 --- a/jstests/noPassthrough/timeseries/timeseries_group_aggregations.js +++ b/jstests/noPassthrough/timeseries/timeseries_group_aggregations.js @@ -17,8 +17,8 @@ assert.neq(null, bpConn, "mongod was unable to start up"); // queries. function isSlowBuild(db) { const buildInfo = db.adminCommand("buildInfo"); - const isSanitizerEnabled = buildInfo.buildEnvironment.ccflags.includes('-fsanitize') - const optimizationsEnabled = buildInfo.buildEnvironment.ccflags.includes('-O2') + const isSanitizerEnabled = buildInfo.buildEnvironment.ccflags.includes('-fsanitize'); + const optimizationsEnabled = buildInfo.buildEnvironment.ccflags.includes('-O2'); const debugBuild = buildInfo.debug; return debugBuild || !optimizationsEnabled || isSanitizerEnabled; diff --git a/jstests/noPassthrough/timeseries/timeseries_group_bson_types.js b/jstests/noPassthrough/timeseries/timeseries_group_bson_types.js index 0d5fc5f055d..a292e0eb787 100644 --- a/jstests/noPassthrough/timeseries/timeseries_group_bson_types.js +++ b/jstests/noPassthrough/timeseries/timeseries_group_bson_types.js @@ -139,8 +139,7 @@ function compareScalarAndBlockProcessing(test, allowDiskUse) { return doc1Json < doc2Json ? -1 : (doc1Json > doc2Json ? 1 : 0); }; - const normalizeNaN = - function(arg) { + const normalizeNaN = function(arg) { if (Number.isNaN(arg)) { return NumberDecimal("NaN"); } else if (arg !== null && (arg.constructor === Object || Array.isArray(arg))) { @@ -151,7 +150,7 @@ function compareScalarAndBlockProcessing(test, allowDiskUse) { return newArg; } return arg; - } + }; scalarResults = normalizeNaN(scalarResults); bpResults = normalizeNaN(bpResults); diff --git a/jstests/noPassthrough/timeseries/timeseries_query_knob_sbe.js b/jstests/noPassthrough/timeseries/timeseries_query_knob_sbe.js index 84d14392a53..5cecd0a982f 100644 --- a/jstests/noPassthrough/timeseries/timeseries_query_knob_sbe.js +++ b/jstests/noPassthrough/timeseries/timeseries_query_knob_sbe.js @@ -24,7 +24,7 @@ const db = conn.getDB(dbName); const sbeEnabled = checkSbeRestrictedOrFullyEnabled(db) && FeatureFlagUtil.isPresentAndEnabled(db.getMongo(), 'TimeSeriesInSbe'); -const coll = db.timeseries +const coll = db.timeseries; coll.drop(); assert.commandWorked( db.createCollection(coll.getName(), {timeseries: {timeField: "t", metaField: "m"}})); diff --git a/jstests/noPassthrough/timeseries/timeseries_retry_delete_and_update.js b/jstests/noPassthrough/timeseries/timeseries_retry_delete_and_update.js index ca40dd5bae2..e39b9f3f01e 100644 --- a/jstests/noPassthrough/timeseries/timeseries_retry_delete_and_update.js +++ b/jstests/noPassthrough/timeseries/timeseries_retry_delete_and_update.js @@ -29,7 +29,7 @@ rst.initiate(); import { runTimeseriesRetryDeleteAndUpdateTest -} from "jstests/libs/timeseries_retry_delete_and_update.js" +} from "jstests/libs/timeseries_retry_delete_and_update.js"; runTimeseriesRetryDeleteAndUpdateTest( rst.getPrimary(), diff --git a/jstests/noPassthrough/transaction_too_large_for_cache_on_secondary_transaction_application.js b/jstests/noPassthrough/transaction_too_large_for_cache_on_secondary_transaction_application.js index 2820f1f1989..4a2e8104a23 100644 --- a/jstests/noPassthrough/transaction_too_large_for_cache_on_secondary_transaction_application.js +++ b/jstests/noPassthrough/transaction_too_large_for_cache_on_secondary_transaction_application.js @@ -11,7 +11,7 @@ * ] */ -import {funWithArgs} from "jstests/libs/parallel_shell_helpers.js" +import {funWithArgs} from "jstests/libs/parallel_shell_helpers.js"; // jsTestName exceeds 64 characters. const shortName = "transaction_too_large_for_cache_on_secondary"; diff --git a/jstests/query_golden/ce_sampled_histogram.js b/jstests/query_golden/ce_sampled_histogram.js index c00f1b58522..fc9e2afd2e8 100644 --- a/jstests/query_golden/ce_sampled_histogram.js +++ b/jstests/query_golden/ce_sampled_histogram.js @@ -130,59 +130,58 @@ await runHistogramsTest(async function testSampleHistogram() { const totSampleErr = {absError: 0, relError: 0, selError: 0}; const totBaseErr = {absError: 0, relError: 0, selError: 0}; - const runTest = - () => { - let count = 0; - // Sort the values to ensure a stable test result. - const values = - baseColl.find({_id: {$in: [3, 123, 405]}}, projection).sort(sortFields).toArray(); - for (const field of fields) { - for (let i = 1; i < values.length; i++) { - const prev = values[i - 1][field]; - const cur = values[i][field]; + const runTest = () => { + let count = 0; + // Sort the values to ensure a stable test result. + const values = + baseColl.find({_id: {$in: [3, 123, 405]}}, projection).sort(sortFields).toArray(); + for (const field of fields) { + for (let i = 1; i < values.length; i++) { + const prev = values[i - 1][field]; + const cur = values[i][field]; - const min = prev < cur ? prev : cur; - const max = prev > cur ? prev : cur; + const min = prev < cur ? prev : cur; + const max = prev > cur ? prev : cur; - // Test a variety of queries. - testMatchPredicate(baseColl, - sampleColl, - {[field]: {$gte: min, $lte: max}}, - collSize, - totSampleErr, - totBaseErr); - testMatchPredicate(baseColl, - sampleColl, - {[field]: {$lt: min}}, - collSize, - totSampleErr, - totBaseErr); - testMatchPredicate(baseColl, - sampleColl, - {[field]: {$eq: min}}, - collSize, - totSampleErr, - totBaseErr); - count += 3; - } + // Test a variety of queries. + testMatchPredicate(baseColl, + sampleColl, + {[field]: {$gte: min, $lte: max}}, + collSize, + totSampleErr, + totBaseErr); + testMatchPredicate(baseColl, + sampleColl, + {[field]: {$lt: min}}, + collSize, + totSampleErr, + totBaseErr); + testMatchPredicate(baseColl, + sampleColl, + {[field]: {$eq: min}}, + collSize, + totSampleErr, + totBaseErr); + count += 3; } - - const avgBaseErr = { - absError: round2(totBaseErr.absError / count), - relError: round2(totBaseErr.relError / count), - selError: round2(totBaseErr.selError / count) - }; - const avgSampleErr = { - absError: round2(totSampleErr.absError / count), - relError: round2(totSampleErr.relError / count), - selError: round2(totSampleErr.selError / count) - }; - - jsTestLog(`Average errors (${count} queries):`); - print(`Average base error: ${tojson(avgBaseErr)}\n`); - print(`Average sample error: ${tojson(avgSampleErr)}`); } + const avgBaseErr = { + absError: round2(totBaseErr.absError / count), + relError: round2(totBaseErr.relError / count), + selError: round2(totBaseErr.selError / count) + }; + const avgSampleErr = { + absError: round2(totSampleErr.absError / count), + relError: round2(totSampleErr.relError / count), + selError: round2(totSampleErr.selError / count) + }; + + jsTestLog(`Average errors (${count} queries):`); + print(`Average base error: ${tojson(avgBaseErr)}\n`); + print(`Average sample error: ${tojson(avgSampleErr)}`); + }; + forceCE("histogram"); // Sargable nodes and Filter nodes get different CEs. Repeat test with/without sargable rewrite. runWithParams([{key: "internalCascadesOptimizerDisableSargableWhenNoIndexes", value: false}], diff --git a/jstests/replsets/assert_on_prepare_conflict_with_hole.js b/jstests/replsets/assert_on_prepare_conflict_with_hole.js index e1915013abc..ff32634445f 100644 --- a/jstests/replsets/assert_on_prepare_conflict_with_hole.js +++ b/jstests/replsets/assert_on_prepare_conflict_with_hole.js @@ -83,10 +83,10 @@ const triggerPrepareConflictThread = new Thread(function(host, dbName, collName) const session = conn.startSession({retryWrites: true}); const collection = session.getDatabase(dbName).getCollection(collName); jsTestLog("Inserting a conflicting operation while keeping a hole open."); - assert.throwsWithCode( - () => {collection.findAndModify( - {query: {a: 3}, update: {a: 2, fromFindAndModify: true}, upsert: true})}, - ErrorCodes.DuplicateKey); + assert.throwsWithCode(() => { + collection.findAndModify( + {query: {a: 3}, update: {a: 2, fromFindAndModify: true}, upsert: true}); + }, ErrorCodes.DuplicateKey); }, primary.host, db.getName(), collName); triggerPrepareConflictThread.start(); diff --git a/jstests/replsets/clean_shutdown_oplog_state.js b/jstests/replsets/clean_shutdown_oplog_state.js index 01c1e2d6716..17a3c4fb1a7 100644 --- a/jstests/replsets/clean_shutdown_oplog_state.js +++ b/jstests/replsets/clean_shutdown_oplog_state.js @@ -102,7 +102,7 @@ let oplogDocId; if (oplogDoc.ns == 'test.coll') { oplogDocId = oplogDoc.o._id; } else { - const opArray = oplogDoc.o.applyOps + const opArray = oplogDoc.o.applyOps; oplogDocId = opArray[opArray.length - 1].o._id; } diff --git a/jstests/replsets/dbcheck_extra_index_keys.js b/jstests/replsets/dbcheck_extra_index_keys.js index 77fe9f110cf..37789e9c4f5 100644 --- a/jstests/replsets/dbcheck_extra_index_keys.js +++ b/jstests/replsets/dbcheck_extra_index_keys.js @@ -126,16 +126,16 @@ function noExtraIndexKeys( }; if (start != null) { if (docSuffix) { - dbCheckParameters = {...dbCheckParameters, start: {a: start.toString() + docSuffix} } + dbCheckParameters = {...dbCheckParameters, start: {a: start.toString() + docSuffix}}; } else { - dbCheckParameters = {...dbCheckParameters, start: {a: start} } + dbCheckParameters = {...dbCheckParameters, start: {a: start}}; } } if (end != null) { if (docSuffix) { - dbCheckParameters = {...dbCheckParameters, end: {a: end.toString() + docSuffix} } + dbCheckParameters = {...dbCheckParameters, end: {a: end.toString() + docSuffix}}; } else { - dbCheckParameters = {...dbCheckParameters, end: {a: end} } + dbCheckParameters = {...dbCheckParameters, end: {a: end}}; } } runDbCheck(replSet, primaryDB, collName, dbCheckParameters, true /* awaitCompletion */); @@ -193,16 +193,16 @@ function recordNotFound( }; if (start != null) { if (docSuffix) { - dbCheckParameters = {...dbCheckParameters, start: {a: start.toString() + docSuffix} } + dbCheckParameters = {...dbCheckParameters, start: {a: start.toString() + docSuffix}}; } else { - dbCheckParameters = {...dbCheckParameters, start: {a: start} } + dbCheckParameters = {...dbCheckParameters, start: {a: start}}; } } if (end != null) { if (docSuffix) { - dbCheckParameters = {...dbCheckParameters, end: {a: end.toString() + docSuffix} } + dbCheckParameters = {...dbCheckParameters, end: {a: end.toString() + docSuffix}}; } else { - dbCheckParameters = {...dbCheckParameters, end: {a: end} } + dbCheckParameters = {...dbCheckParameters, end: {a: end}}; } } runDbCheck(replSet, primaryDB, collName, dbCheckParameters, true /*awaitCompletion*/); @@ -274,16 +274,16 @@ function recordDoesNotMatch( }; if (start != null) { if (docSuffix) { - dbCheckParameters = {...dbCheckParameters, start: {a: start.toString() + docSuffix} } + dbCheckParameters = {...dbCheckParameters, start: {a: start.toString() + docSuffix}}; } else { - dbCheckParameters = {...dbCheckParameters, start: {a: start} } + dbCheckParameters = {...dbCheckParameters, start: {a: start}}; } } if (end != null) { if (docSuffix) { - dbCheckParameters = {...dbCheckParameters, end: {a: end.toString() + docSuffix} } + dbCheckParameters = {...dbCheckParameters, end: {a: end.toString() + docSuffix}}; } else { - dbCheckParameters = {...dbCheckParameters, end: {a: end} } + dbCheckParameters = {...dbCheckParameters, end: {a: end}}; } } runDbCheck(replSet, primaryDB, collName, dbCheckParameters, true /*awaitCompletion*/); @@ -351,16 +351,16 @@ function hashingInconsistentExtraKeyOnPrimary( }; if (start != null) { if (docSuffix) { - dbCheckParameters = {...dbCheckParameters, start: {a: start.toString() + docSuffix} } + dbCheckParameters = {...dbCheckParameters, start: {a: start.toString() + docSuffix}}; } else { - dbCheckParameters = {...dbCheckParameters, start: {a: start} } + dbCheckParameters = {...dbCheckParameters, start: {a: start}}; } } if (end != null) { if (docSuffix) { - dbCheckParameters = {...dbCheckParameters, end: {a: end.toString() + docSuffix} } + dbCheckParameters = {...dbCheckParameters, end: {a: end.toString() + docSuffix}}; } else { - dbCheckParameters = {...dbCheckParameters, end: {a: end} } + dbCheckParameters = {...dbCheckParameters, end: {a: end}}; } } runDbCheck(replSet, primaryDB, collName, dbCheckParameters, true /*awaitCompletion*/); @@ -435,16 +435,16 @@ batchSize: ${batchSize}, snapshotSize: ${snapshotSize} }; if (start != null) { if (docSuffix) { - dbCheckParameters = {...dbCheckParameters, start: {a: start.toString() + docSuffix} } + dbCheckParameters = {...dbCheckParameters, start: {a: start.toString() + docSuffix}}; } else { - dbCheckParameters = {...dbCheckParameters, start: {a: start} } + dbCheckParameters = {...dbCheckParameters, start: {a: start}}; } } if (end != null) { if (docSuffix) { - dbCheckParameters = {...dbCheckParameters, end: {a: end.toString() + docSuffix} } + dbCheckParameters = {...dbCheckParameters, end: {a: end.toString() + docSuffix}}; } else { - dbCheckParameters = {...dbCheckParameters, end: {a: end} } + dbCheckParameters = {...dbCheckParameters, end: {a: end}}; } } runDbCheck(replSet, primaryDB, collName, dbCheckParameters, true /*awaitCompletion*/); diff --git a/jstests/replsets/dbcheck_extra_keys_rate_limits.js b/jstests/replsets/dbcheck_extra_keys_rate_limits.js index eb2d381fc9b..adb00d965f3 100644 --- a/jstests/replsets/dbcheck_extra_keys_rate_limits.js +++ b/jstests/replsets/dbcheck_extra_keys_rate_limits.js @@ -39,7 +39,7 @@ const recordDoesNotMatchQuery = { const infoBatchQuery = { "severity": "info", "operation": "dbCheckBatch" -} +}; const replSet = new ReplSetTest({ name: jsTestName(), @@ -167,7 +167,7 @@ function exceedMaxSize(nDocs, batchSize, maxSize, docSuffix) { // keystring is 4 or 5 (the first keystring has size 4 and the rest have size 5), which is used // to keep track of bytesSeen. // maximum number of documents that should be checked: - const maxCount = Math.ceil((maxSize - 4) / 5 + 1) + const maxCount = Math.ceil((maxSize - 4) / 5 + 1); jsTestLog("Testing that dbcheck terminates after seeing more than " + maxSize + " bytes: nDocs: " + nDocs + ", batchSize: " + batchSize + diff --git a/jstests/replsets/dbcheck_missing_index_keys.js b/jstests/replsets/dbcheck_missing_index_keys.js index f11b28f996c..df9726ddeab 100644 --- a/jstests/replsets/dbcheck_missing_index_keys.js +++ b/jstests/replsets/dbcheck_missing_index_keys.js @@ -85,7 +85,7 @@ function checkMissingIndexKeys(doc, numDocs = 1, maxDocsPerBatch = 10000) { missingIndexKeysQuery = { ...missingIndexKeysQuery, "data.context.missingIndexKeys.1.keyString.b": 1 - } + }; } forEachNonArbiterNode(replSet, function(node) { diff --git a/jstests/replsets/libs/dbcheck_utils.js b/jstests/replsets/libs/dbcheck_utils.js index 5788cbf662b..19a4e602dd6 100644 --- a/jstests/replsets/libs/dbcheck_utils.js +++ b/jstests/replsets/libs/dbcheck_utils.js @@ -105,13 +105,12 @@ export const clearHealthLog = (replSet) => { replSet.awaitReplication(); }; -export const logEveryBatch = - (replSet) => { - forEachNonArbiterNode(replSet, conn => { - assert.commandWorked( - conn.adminCommand({setParameter: 1, "dbCheckHealthLogEveryNBatches": 1})); - }) - } +export const logEveryBatch = (replSet) => { + forEachNonArbiterNode(replSet, conn => { + assert.commandWorked( + conn.adminCommand({setParameter: 1, "dbCheckHealthLogEveryNBatches": 1})); + }); +}; export const dbCheckCompleted = (db) => { const inprog = db.getSiblingDB("admin").currentOp().inprog; @@ -246,7 +245,7 @@ export const insertDocsWithMissingIndexKeys = assert.eq(Object.keys(doc).length + 1, node.getDB(dbName)[collName].getIndexes().length); }); - } + }; // Run dbCheck with given parameters and potentially wait for completion. export const runDbCheck = (replSet, @@ -519,15 +518,14 @@ export function assertCompleteCoverage( return; } - const truncateDocSuffix = - (batchBoundary, docSuffix) => { - const index = batchBoundary.indexOf(docSuffix); - jsTestLog("Index : " + index); - if (index < 1) { - return batchBoundary; - } - return batchBoundary.substring(0, batchBoundary.indexOf(docSuffix)); + const truncateDocSuffix = (batchBoundary, docSuffix) => { + const index = batchBoundary.indexOf(docSuffix); + jsTestLog("Index : " + index); + if (index < 1) { + return batchBoundary; } + return batchBoundary.substring(0, batchBoundary.indexOf(docSuffix)); + }; let query = logQueries.infoBatchQuery; if (inconsistentBatch) { diff --git a/jstests/replsets/libs/rollback_test.js b/jstests/replsets/libs/rollback_test.js index 4d546ab63de..8b5f551b499 100644 --- a/jstests/replsets/libs/rollback_test.js +++ b/jstests/replsets/libs/rollback_test.js @@ -123,11 +123,11 @@ export function RollbackTest(name = "RollbackTest", replSet, nodeOptions) { dbConn.getMongo()._setSecurityToken(_createTenantToken({tenant: tenantId})); assert.commandWorked(dbConn.runCommand(cmdObj)); dbConn.getMongo()._setSecurityToken(undefined); - } + }; } else { return function(dbConn, cmdObj) { assert.commandWorked(dbConn.runCommand(cmdObj)); - } + }; } })(); diff --git a/jstests/replsets/mr_nonrepl_coll_in_local_db.js b/jstests/replsets/mr_nonrepl_coll_in_local_db.js index 9f8c08e40f2..047c39d59ec 100644 --- a/jstests/replsets/mr_nonrepl_coll_in_local_db.js +++ b/jstests/replsets/mr_nonrepl_coll_in_local_db.js @@ -42,7 +42,7 @@ assert.commandWorked(result); const logLines = checkLog.getGlobalLog(primaryDB); let createdCollections = []; logLines.forEach(function(line) { - const matchResult = line.match(/createCollection: (.+) with/) + const matchResult = line.match(/createCollection: (.+) with/); if (matchResult) { createdCollections.push(matchResult[1]); } diff --git a/jstests/replsets/preserve_record_ids_rename.js b/jstests/replsets/preserve_record_ids_rename.js index 887957ef411..4d889988f2f 100644 --- a/jstests/replsets/preserve_record_ids_rename.js +++ b/jstests/replsets/preserve_record_ids_rename.js @@ -122,7 +122,7 @@ function testRenameReplRidBehavior( `Expected $recordId fields to be reassigned after rename. Before rename: ${ tojson(docsBeforeWithRids)}, After: ${tojson(docsAfterWithRids)}`); } - assert(!src.exists()) + assert(!src.exists()); validateRidsAcrossNodes(dst); } diff --git a/jstests/replsets/replicate_record_ids.js b/jstests/replsets/replicate_record_ids.js index 9174c48a3ac..5d1dc291d37 100644 --- a/jstests/replsets/replicate_record_ids.js +++ b/jstests/replsets/replicate_record_ids.js @@ -103,7 +103,7 @@ validateRidInOplogs({ns: `${replRidNs}`, ...docARemoveOpTime}, docAReplRid); // Therefore to ensure that recordIdsReplicated:true actually works we need to make sure that // the appliers process oplog entries in parallel, and this is done by having a full batch of // entries for the appliers to process. We can achieve this by performing an insertMany. -jsTestLog("Test inserting multiple documents at a time.") +jsTestLog("Test inserting multiple documents at a time."); const docs = []; for (let i = 0; i < 500; i++) { diff --git a/jstests/replsets/replicate_record_ids_collmod.js b/jstests/replsets/replicate_record_ids_collmod.js index 9d7df83ebc6..056b2a391c5 100644 --- a/jstests/replsets/replicate_record_ids_collmod.js +++ b/jstests/replsets/replicate_record_ids_collmod.js @@ -41,7 +41,7 @@ jsTestLog('Result from successful collMod command: ' + tojson(result)); // Check for "Unsetting 'recordIdsReplicated' catalog entry flag" debug log message. checkLog.containsJson(primary, 8650601, {namespace: coll.getFullName()}); -assert.commandWorked(testDB.setLogLevel(originalStorageLogLevel, 'storage')) +assert.commandWorked(testDB.setLogLevel(originalStorageLogLevel, 'storage')); // Confirm that 'recordIdsReplicated' option has been removed from collection options. const collInfo = coll.exists(); diff --git a/jstests/selinux/core.js b/jstests/selinux/core.js index f19ecf3bcb5..4a423aee0b2 100644 --- a/jstests/selinux/core.js +++ b/jstests/selinux/core.js @@ -24,7 +24,7 @@ export class TestDefinition extends SelinuxBaseTest { const HAS_TAG = 0; const NO_TAG = 1; let checkTagRc = runNonMongoProgram( - python, "buildscripts/resmokelib/utils/check_has_tag.py", t, "^no_selinux$") + python, "buildscripts/resmokelib/utils/check_has_tag.py", t, "^no_selinux$"); if (HAS_TAG == checkTagRc) { jsTest.log("Skipping test due to no_selinux tag: " + t); continue; @@ -35,7 +35,7 @@ export class TestDefinition extends SelinuxBaseTest { // Tests relying on featureFlagXXX will not work checkTagRc = runNonMongoProgram( - python, "buildscripts/resmokelib/utils/check_has_tag.py", t, "^featureFlag.+$") + python, "buildscripts/resmokelib/utils/check_has_tag.py", t, "^featureFlag.+$"); if (HAS_TAG == checkTagRc) { jsTest.log("Skipping test due to feature flag tag: " + t); continue; diff --git a/jstests/serverless/change_stream_state_commands.js b/jstests/serverless/change_stream_state_commands.js index 1b0c89a4afb..1716b5499d2 100644 --- a/jstests/serverless/change_stream_state_commands.js +++ b/jstests/serverless/change_stream_state_commands.js @@ -28,7 +28,7 @@ replSetTest.initiate(); function setTokenOnEachNode(token) { replSetTest.nodes.forEach(node => { node._setSecurityToken(token); - }) + }); } function clearTokenOnEachNode(token) { diff --git a/jstests/serverless/invalid_tenant_requests.js b/jstests/serverless/invalid_tenant_requests.js index d9f2521ae6d..a1d7c20ac2f 100644 --- a/jstests/serverless/invalid_tenant_requests.js +++ b/jstests/serverless/invalid_tenant_requests.js @@ -14,7 +14,7 @@ function createnewReplSetTest(param) { } function setupNewReplSetWithParam(param) { - let rst = createnewReplSetTest(param) + let rst = createnewReplSetTest(param); let primary = rst.getPrimary(); let adminDb = primary.getDB('admin'); assert.commandWorked(adminDb.runCommand({createUser: 'admin', pwd: 'pwd', roles: ['root']})); diff --git a/jstests/serverless/list_database_for_tenant.js b/jstests/serverless/list_database_for_tenant.js index 6798d1cc231..02574451ce5 100644 --- a/jstests/serverless/list_database_for_tenant.js +++ b/jstests/serverless/list_database_for_tenant.js @@ -56,7 +56,7 @@ function runTests() { createAndSetSecurityToken(conn, tenant, true); insertDb(conn, tenant + "_firstRegDb"); checkDbNum(conn, 1); - resetSecurityToken(conn) + resetSecurityToken(conn); } createAndSetSecurityToken(primary, tenant2, false); @@ -64,13 +64,13 @@ function runTests() { insertDb(primary, "thirdRegDb"); checkDbNum(primary, 2); - resetSecurityToken(primary) + resetSecurityToken(primary); createAndSetSecurityToken(primary, tenant3, false); insertDb(primary, "fourthRegDb"); checkDbNum(primary, 1); - resetSecurityToken(primary) + resetSecurityToken(primary); createAndSetSecurityToken(primary, tenant2, false); insertDb(primary, "fifthRegDb"); @@ -90,7 +90,7 @@ function runTestExpectPrefixTrue() { createAndSetSecurityToken(primary, tenant, true); insertDb(primary, tenant + "_firstRegDb"); checkDbNum(primary, 1); - resetSecurityToken(primary) + resetSecurityToken(primary); createAndSetSecurityToken(primary, tenant2, true); insertDb(primary, tenant2 + "_secondRegDb"); diff --git a/jstests/serverless/multitenancy_with_atlas_proxy_basic_commands.js b/jstests/serverless/multitenancy_with_atlas_proxy_basic_commands.js index c49d2d8fbaf..e86b2a10c97 100644 --- a/jstests/serverless/multitenancy_with_atlas_proxy_basic_commands.js +++ b/jstests/serverless/multitenancy_with_atlas_proxy_basic_commands.js @@ -459,18 +459,18 @@ const otherSecurityToken = _createTenantToken({tenant: kOtherTenant, expectPrefi rst.awaitSecondaryNodes(); rst.awaitReplication(); assert.soon(function() { - return (healthlog.find({"operation": "dbCheckStop"}).itcount() == 1) + return (healthlog.find({"operation": "dbCheckStop"}).itcount() == 1); }); const tenantNss = kPrefixedDbName + "." + kCollName; if (FeatureFlagUtil.isPresentAndEnabled(rst.getPrimary(), "SecondaryIndexChecksInDbCheck")) { // dbCheckStart and dbCheckStop have tenantId as well assert.soon(function() { - return (healthlog.find({"namespace": tenantNss}).itcount() == 3) + return (healthlog.find({"namespace": tenantNss}).itcount() == 3); }); } else { // only dbCheckBatch has tenantId assert.soon(function() { - return (healthlog.find({"namespace": tenantNss}).itcount() == 1) + return (healthlog.find({"namespace": tenantNss}).itcount() == 1); }); } } diff --git a/jstests/serverless/tenant_migration_recipient_shard_merge_initial_sync.js b/jstests/serverless/tenant_migration_recipient_shard_merge_initial_sync.js index 6c1316bb079..782eff76c85 100644 --- a/jstests/serverless/tenant_migration_recipient_shard_merge_initial_sync.js +++ b/jstests/serverless/tenant_migration_recipient_shard_merge_initial_sync.js @@ -103,7 +103,7 @@ function runInitialSyncTest(recipientMergeStage, failpoint) { assert.eq(res.initialSyncStatus.failedInitialSyncAttempts, 1); checkLog.containsJson(initialSyncNode, 7219900); - fpinitialSyncHangBeforeFinish.off() + fpinitialSyncHangBeforeFinish.off(); // Get rid of the failed node so the fixture can stop properly. recipientRst.stop(initialSyncNode); @@ -111,7 +111,7 @@ function runInitialSyncTest(recipientMergeStage, failpoint) { recipientRst.reInitiate(); // Disable the failpoint to allow merge to continue. - mergeWaitInFailPoint.off() + mergeWaitInFailPoint.off(); if (migrationThread !== undefined && migrationThread !== null) { migrationThread.join(); diff --git a/jstests/sharding/add_participant_to_existing_transaction_from_shard.js b/jstests/sharding/add_participant_to_existing_transaction_from_shard.js index a5b0d20ad94..4b390cac1f1 100644 --- a/jstests/sharding/add_participant_to_existing_transaction_from_shard.js +++ b/jstests/sharding/add_participant_to_existing_transaction_from_shard.js @@ -67,7 +67,7 @@ assert.commandWorked(st.rs1.getPrimary().adminCommand({_flushRoutingTableCacheUp })); session.abortTransaction(); - jsTest.log("Exiting verifyStartOrContinueTransactionCanSpecifyReadConcern.") + jsTest.log("Exiting verifyStartOrContinueTransactionCanSpecifyReadConcern."); })(); st.stop(); diff --git a/jstests/sharding/additional_txn_participants_with_conflicting_ops.js b/jstests/sharding/additional_txn_participants_with_conflicting_ops.js index 5119134044c..ed10548703a 100644 --- a/jstests/sharding/additional_txn_participants_with_conflicting_ops.js +++ b/jstests/sharding/additional_txn_participants_with_conflicting_ops.js @@ -60,10 +60,10 @@ const sessionDB = session.getDatabase(dbName); // Run a $lookup which will add shard1 as an additional participant. This should throw // because shard1 had an incoming migration. - let err = assert.throwsWithCode( - () => {sessionDB.getCollection(localColl).aggregate( - [{$lookup: {from: foreignColl, localField: "x", foreignField: "_id", as: "result"}}])}, - ErrorCodes.MigrationConflict); + let err = assert.throwsWithCode(() => { + sessionDB.getCollection(localColl).aggregate( + [{$lookup: {from: foreignColl, localField: "x", foreignField: "_id", as: "result"}}]); + }, ErrorCodes.MigrationConflict); assert.contains("TransientTransactionError", err.errorLabels, tojson(err)); session.abortTransaction(); diff --git a/jstests/sharding/auto_split_vector_basic.js b/jstests/sharding/auto_split_vector_basic.js index ded5da5f60a..be90bfe9b38 100644 --- a/jstests/sharding/auto_split_vector_basic.js +++ b/jstests/sharding/auto_split_vector_basic.js @@ -13,18 +13,18 @@ var st = new ShardingTest({shards: 2, mongos: 2}); var mongos0 = st.s0; var mongos1 = st.s1; const kDbName = "test"; -var db = mongos0.getDB(kDbName) +var db = mongos0.getDB(kDbName); const kCollName = jsTestName(); const kNs = kDbName + "." + kCollName; -const kUnshardedCollName = jsTestName() + "_unsharded" -const kNonExistingCollName = jsTestName() + "_nonExisting" +const kUnshardedCollName = jsTestName() + "_unsharded"; +const kNonExistingCollName = jsTestName() + "_nonExisting"; assert.commandWorked(mongos0.adminCommand({enableSharding: kDbName, primaryShard: st.shard0.name})); assert.commandWorked(mongos0.adminCommand({shardCollection: kNs, key: {a: 1}})); var shardedColl = db.getCollection(kCollName); // Assert only 2 chunks exist. -assert.eq(2, st.config.chunks.count()) +assert.eq(2, st.config.chunks.count()); function insert10MbOfDummyData(coll) { // Insert some dummy data (10Mb). @@ -39,7 +39,7 @@ function insert10MbOfDummyData(coll) { assert.commandWorked(bulk.execute()); } -insert10MbOfDummyData(shardedColl) +insert10MbOfDummyData(shardedColl); jsTest.log( "Testing autoSplitVector can correctly suggest to split 10Mb of data given 1Mb of maxChunkSize"); @@ -53,7 +53,7 @@ jsTest.log( max: {a: MaxKey}, maxChunkSizeBytes: 1024 * 1024 // 1Mb })); - assert.eq(10, result.splitKeys.length) + assert.eq(10, result.splitKeys.length); } jsTest.log("Having the range over 2 shards should return InvalidOptions"); @@ -122,7 +122,7 @@ jsTest.log("Running on a stale mongos1 should correctly return InvalidOptions"); } let collUnsharded = mongos0.getDB(kDbName).getCollection(kUnshardedCollName); -insert10MbOfDummyData(collUnsharded) +insert10MbOfDummyData(collUnsharded); jsTest.log( "Running on an unsharded collection should fail if an index was not found for the queried shard key"); diff --git a/jstests/sharding/balancer_should_return_random_migrations_failpoint.js b/jstests/sharding/balancer_should_return_random_migrations_failpoint.js index c84fea58ade..6f358046540 100644 --- a/jstests/sharding/balancer_should_return_random_migrations_failpoint.js +++ b/jstests/sharding/balancer_should_return_random_migrations_failpoint.js @@ -6,7 +6,7 @@ * ] */ -import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js" +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; import {findChunksUtil} from "jstests/sharding/libs/find_chunks_util.js"; // TODO SERVER-89399: re-enable the hook once it properly serialize with resharding operations diff --git a/jstests/sharding/batched_writes_with_id_without_shard_key_basic.js b/jstests/sharding/batched_writes_with_id_without_shard_key_basic.js index 8ee0dd33730..95e982b5357 100644 --- a/jstests/sharding/batched_writes_with_id_without_shard_key_basic.js +++ b/jstests/sharding/batched_writes_with_id_without_shard_key_basic.js @@ -19,8 +19,7 @@ CreateShardedCollectionUtil.shardCollectionWithChunks(coll, {x: 1}, [ {min: {x: -100}, max: {x: 0}, shard: st.shard0.shardName}, {min: {x: 0}, max: {x: MaxKey}, shard: st.shard1.shardName}, ]); -const performOps = - function(ordered, numOps) { +const performOps = function(ordered, numOps) { jsTest.log("Perform write with ordered: " + ordered); // Write two documents. assert.commandWorked(coll.insert({x: -1, _id: -1})); @@ -61,7 +60,7 @@ const performOps = assert.eq(numOps, mongosServerStatus.metrics.query.updateOneNonTargetedShardedCount); assert.eq(numOps, mongosServerStatus.metrics.query.deleteOneWithoutShardKeyWithIdCount); session.endSession(); -} +}; // Test batched ops with ordered: true and ordered: false. performOps(true, 2); diff --git a/jstests/sharding/batched_writes_with_id_without_shard_key_stale_config.js b/jstests/sharding/batched_writes_with_id_without_shard_key_stale_config.js index eea7fe9ebf5..7eb9089c1b3 100644 --- a/jstests/sharding/batched_writes_with_id_without_shard_key_stale_config.js +++ b/jstests/sharding/batched_writes_with_id_without_shard_key_stale_config.js @@ -26,8 +26,7 @@ CreateShardedCollectionUtil.shardCollectionWithChunks(coll, {x: 1}, [ {min: {x: 0}, max: {x: MaxKey}, shard: st.shard1.shardName}, ]); -const performOps = - function(ordered, numRetryCount) { +const performOps = function(ordered, numRetryCount) { jsTest.log("Perform write with ordered: " + ordered); // Write two documents. assert.commandWorked(coll.insert({x: -1, _id: -1})); @@ -72,7 +71,7 @@ const performOps = assert.eq(numRetryCount, mongosServerStatus.metrics.query.deleteOneWithoutShardKeyWithIdRetryCount); session.endSession(); -} +}; // Test batched ops with ordered: true and ordered: false. performOps(false, 3); diff --git a/jstests/sharding/clear_jumbo.js b/jstests/sharding/clear_jumbo.js index 488093132ef..18d09fa504a 100644 --- a/jstests/sharding/clear_jumbo.js +++ b/jstests/sharding/clear_jumbo.js @@ -133,8 +133,9 @@ let chunk = findChunksUtil.findOneChunkByNs(configDB, testNs, {min: {x: 0}}); assert(chunk.jumbo, tojson(chunk)); assert.eq(st.shard0.shardName, chunk.shard); -st.forEachConfigServer((conn) => {assert.commandWorked(conn.adminCommand( - {setParameter: 1, balancerMigrationsThrottlingMs: 200}))}); +st.forEachConfigServer((conn) => { + assert.commandWorked(conn.adminCommand({setParameter: 1, balancerMigrationsThrottlingMs: 200})); +}); runBalancer(testColl); diff --git a/jstests/sharding/clustered_coll_scan.js b/jstests/sharding/clustered_coll_scan.js index 1d887ca7ade..38667b152f0 100644 --- a/jstests/sharding/clustered_coll_scan.js +++ b/jstests/sharding/clustered_coll_scan.js @@ -16,7 +16,7 @@ st.s.adminCommand({enableSharding: "test"}); const db = st.getDB("test"); // Create the collection as a clustered collection. const coll = assertDropAndRecreateCollection( - db, jsTestName(), {clusteredIndex: {key: {_id: 1}, unique: true}}) + db, jsTestName(), {clusteredIndex: {key: {_id: 1}, unique: true}}); st.shardColl(coll, {a: 1}); // First of all check that we can execute the query. assert.commandWorked(coll.insertMany([...Array(10).keys()].map(i => { diff --git a/jstests/sharding/collmod_unsplittable_collection.js b/jstests/sharding/collmod_unsplittable_collection.js index 9d0499869f3..2b88f1c8f09 100644 --- a/jstests/sharding/collmod_unsplittable_collection.js +++ b/jstests/sharding/collmod_unsplittable_collection.js @@ -30,13 +30,13 @@ function assertIndexExists(coll, indexKey, options, connections) { }); assert.eq(1, expectedIndex.length, "Index not found on " + conn.name); - }) + }); } function assertIndexDoesntExist(coll, indexKey, options, connections) { assert.throws(() => { assertIndexExists(coll, indexKey, options, connections); - }) + }); } let collId = 1; diff --git a/jstests/sharding/convert_to_and_from_config_shard.js b/jstests/sharding/convert_to_and_from_config_shard.js index f9c125cef97..f687bc7fefd 100644 --- a/jstests/sharding/convert_to_and_from_config_shard.js +++ b/jstests/sharding/convert_to_and_from_config_shard.js @@ -39,8 +39,7 @@ const _id = "randomId"; /** * Checks that basic CRUD operations work as expected. */ -const checkBasicCRUD = - function(coll, _id) { +const checkBasicCRUD = function(coll, _id) { const sleepMs = 1; const numRetries = 99999; const NUM_NODES = 3; @@ -59,10 +58,9 @@ const checkBasicCRUD = assert.commandWorked(coll.insert({_id: _id}, {writeConcern: {w: NUM_NODES}})); assert.eq(_id, retryableFindOne(coll, {_id: _id})._id); -} +}; -const checkCRUDThread = - function(host, ns, _id, countdownLatch, checkBasicCRUD) { +const checkCRUDThread = function(host, ns, _id, countdownLatch, checkBasicCRUD) { const mongo = new Mongo(host); const session = mongo.startSession({retryWrites: true}); const [dbName, collName] = ns.split("."); @@ -71,7 +69,7 @@ const checkCRUDThread = checkBasicCRUD(db[collName], _id); sleep(1); // milliseconds } -} +}; let replSet = new ReplSetTest({nodes: NUM_NODES, name: "rs_to_config_shard"}); replSet.startSet({}); diff --git a/jstests/sharding/convert_to_and_from_sharded.js b/jstests/sharding/convert_to_and_from_sharded.js index 210cf91245a..50828860188 100644 --- a/jstests/sharding/convert_to_and_from_sharded.js +++ b/jstests/sharding/convert_to_and_from_sharded.js @@ -56,7 +56,7 @@ const checkBasicCRUD = function(withCollection, _id) { withCollection((coll) => { assert.commandWorked(coll.remove({_id: _id}, true /* justOne */)); assert.eq(null, retryableFindOne(coll, {_id: _id})); - }) + }); withCollection((coll) => { assert.commandWorked(coll.insert({_id: _id}, {writeConcern: {w: NUM_NODES}})); @@ -114,18 +114,18 @@ const checkDDLOps = function(withDbs) { jsTestLog("Running create."); runDDLCommandWithRetries(db, {create: DDLCollection}); assert.commandWorked(db[DDLCollection].insertOne({x: 1})); - }) + }); withDbs((db, _) => { jsTestLog("Running createIndexes."); let res = runDDLCommandWithRetries(db, {listIndexes: DDLCollection}); - assert.eq(res["cursor"]["firstBatch"].length, 1, res) + assert.eq(res["cursor"]["firstBatch"].length, 1, res); runDDLCommandWithRetries(db, { createIndexes: DDLCollection, indexes: [{name: "x_1", key: {x: 1}}, {name: "y_1", key: {y: 1}}] }); res = runDDLCommandWithRetries(db, {listIndexes: DDLCollection}); - assert.eq(res["cursor"]["firstBatch"].length, 3, res) + assert.eq(res["cursor"]["firstBatch"].length, 3, res); }); withDbs((db, _) => { @@ -173,11 +173,11 @@ const checkDDLOps = function(withDbs) { runDDLCommandWithRetries(db, {dropDatabase: 1}); let res = runDDLCommandWithRetries(adminDb, {listDatabases: 1}); assert(!res["databases"].some((database) => database["name"] == DDLDb), res); - }) + }); }; -const checkCRUDThread = - function(mongosHost, replSetHost, ns, _id, countdownLatch, stage, checkBasicCRUD) { +const checkCRUDThread = function( + mongosHost, replSetHost, ns, _id, countdownLatch, stage, checkBasicCRUD) { const [dbName, collName] = ns.split("."); const mongos = new Mongo(mongosHost); @@ -214,10 +214,9 @@ const checkCRUDThread = checkBasicCRUD(withCollection, _id); sleep(1); // milliseconds. } -} +}; -const checkDDLThread = - function(mongosHost, replSetHost, countdownLatch, stage, checkDDLOps) { +const checkDDLThread = function(mongosHost, replSetHost, countdownLatch, stage, checkDDLOps) { const mongos = new Mongo(mongosHost); const mongosSession = mongos.startSession({retryWrites: true}); const mongosDb = mongosSession.getDatabase("DDL"); @@ -258,7 +257,7 @@ const checkDDLThread = checkDDLOps(withDbs); sleep(1); // milliseconds. } -} +}; const nodeOptions = { setParameter: { diff --git a/jstests/sharding/deleteOne_with_id_without_shard_key_stale_config.js b/jstests/sharding/deleteOne_with_id_without_shard_key_stale_config.js index 576902546fc..fe80eeb9ecf 100644 --- a/jstests/sharding/deleteOne_with_id_without_shard_key_stale_config.js +++ b/jstests/sharding/deleteOne_with_id_without_shard_key_stale_config.js @@ -30,7 +30,7 @@ CreateShardedCollectionUtil.shardCollectionWithChunks(coll, {x: 1}, [ assert.commandWorked(coll.insert({x: -1, _id: -1})); assert.commandWorked(coll.insert({x: 1, _id: 1})); -assert.neq(st.s1.getDB(jsTestName()).coll.findOne({x: -1, _id: -1})) +assert.neq(st.s1.getDB(jsTestName()).coll.findOne({x: -1, _id: -1})); // Move chunk from shard0 to shard1. assert.commandWorked( diff --git a/jstests/sharding/delete_range_deletion_tasks_on_dropped_hashed_shard_key_index.js b/jstests/sharding/delete_range_deletion_tasks_on_dropped_hashed_shard_key_index.js index dcd75cf996e..d889b096618 100644 --- a/jstests/sharding/delete_range_deletion_tasks_on_dropped_hashed_shard_key_index.js +++ b/jstests/sharding/delete_range_deletion_tasks_on_dropped_hashed_shard_key_index.js @@ -17,7 +17,7 @@ const st = new ShardingTest({ enableBalancer: false, shardOptions: {setParameter: {rangeDeleterBatchSize: rangeDeleterBatchSize}}, } -}) +}); // Setup database and collection for test const dbName = 'db'; diff --git a/jstests/sharding/direct_shard_connection_auth_rs_commands.js b/jstests/sharding/direct_shard_connection_auth_rs_commands.js index bf4356eabb5..4fda19c6d00 100644 --- a/jstests/sharding/direct_shard_connection_auth_rs_commands.js +++ b/jstests/sharding/direct_shard_connection_auth_rs_commands.js @@ -112,7 +112,7 @@ jsTest.log("Testing replSetAbortPrimaryCatchUp"); }, "test") .n; - return count == 1 + return count == 1; }); const stopReplProducerFailPoint1 = configureFailPoint(secondary1Conn, 'stopReplProducer'); stopReplProducerFailPoint1.wait(); diff --git a/jstests/sharding/dump_coll_metadata.js b/jstests/sharding/dump_coll_metadata.js index f47e05096a0..234de62e9d0 100644 --- a/jstests/sharding/dump_coll_metadata.js +++ b/jstests/sharding/dump_coll_metadata.js @@ -28,7 +28,7 @@ function getCollMetadataWithRefresh(node, collName) { shardVersionRes = res; return true; - }) + }); return shardVersionRes; } diff --git a/jstests/sharding/eof_plan.js b/jstests/sharding/eof_plan.js index 298d4b9c5dd..0ace7a73d51 100644 --- a/jstests/sharding/eof_plan.js +++ b/jstests/sharding/eof_plan.js @@ -8,7 +8,7 @@ * ] */ -import {getWinningPlanFromExplain} from 'jstests/libs/analyze_plan.js' +import {getWinningPlanFromExplain} from 'jstests/libs/analyze_plan.js'; const st = new ShardingTest({ shards: 2, mongos: 1, diff --git a/jstests/sharding/libs/cluster_cardinality_parameter_util.js b/jstests/sharding/libs/cluster_cardinality_parameter_util.js index d4001125f7f..c4ef7c09cbf 100644 --- a/jstests/sharding/libs/cluster_cardinality_parameter_util.js +++ b/jstests/sharding/libs/cluster_cardinality_parameter_util.js @@ -22,7 +22,9 @@ export function interruptAdminCommand(node, cmdNames) { .aggregate([{$currentOp: {}}, {$match: {$or: cmdNameFilter}}], {$readPreference: {mode: "primaryPreferred"}}) // specify secondary ok. .toArray(); - results.forEach(result => {adminDB.killOp(result.opid)}); + results.forEach(result => { + adminDB.killOp(result.opid); + }); } export function interruptConfigsvrAddShard(configPrimary) { diff --git a/jstests/sharding/merge_chunk_hashed.js b/jstests/sharding/merge_chunk_hashed.js index 357d04ce2ae..a59a2015694 100644 --- a/jstests/sharding/merge_chunk_hashed.js +++ b/jstests/sharding/merge_chunk_hashed.js @@ -32,9 +32,9 @@ assert.commandWorked(admin.runCommand({shardCollection: ns, key: {x: 'hashed'}}) if (FeatureFlagUtil.isPresentAndEnabled(mongos.getDB(dbName), "OneChunkPerShardEmptyCollectionWithHashedShardKey")) { assert.commandWorked( - st.s.adminCommand({split: ns, middle: {x: NumberLong("-4611686018427387902")}})) + st.s.adminCommand({split: ns, middle: {x: NumberLong("-4611686018427387902")}})); assert.commandWorked( - st.s.adminCommand({split: ns, middle: {x: NumberLong("4611686018427387902")}})) + st.s.adminCommand({split: ns, middle: {x: NumberLong("4611686018427387902")}})); } assert.commandWorked(admin.runCommand({ diff --git a/jstests/sharding/merge_split_chunks_test.js b/jstests/sharding/merge_split_chunks_test.js index 16b2cdc01c1..6bf0b26ce24 100644 --- a/jstests/sharding/merge_split_chunks_test.js +++ b/jstests/sharding/merge_split_chunks_test.js @@ -18,13 +18,13 @@ var coll = mongos.getCollection(dbname + ".bar"); assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}})); // Make sure split is correctly disabled for unsharded collection -jsTest.log("Trying to split an unsharded collection ...") +jsTest.log("Trying to split an unsharded collection ..."); const collNameUnsplittable = "unsplittable_bar"; const nsUnsplittable = dbname + '.' + collNameUnsplittable; assert.commandWorked(mongos.getDB(dbname).runCommand({create: collNameUnsplittable})); assert.commandFailedWithCode(admin.runCommand({split: nsUnsplittable, middle: {_id: 0}}), ErrorCodes.NamespaceNotSharded); -jsTest.log("Trying to merge an unsharded collection ...") +jsTest.log("Trying to merge an unsharded collection ..."); assert.commandFailedWithCode( admin.runCommand({mergeChunks: nsUnsplittable, bounds: [{_id: 90}, {_id: MaxKey}]}), ErrorCodes.NamespaceNotSharded); diff --git a/jstests/sharding/migration_blocking_operation/coordinate_multi_update.js b/jstests/sharding/migration_blocking_operation/coordinate_multi_update.js index 4c24951a4a1..ea93e8ee1d3 100644 --- a/jstests/sharding/migration_blocking_operation/coordinate_multi_update.js +++ b/jstests/sharding/migration_blocking_operation/coordinate_multi_update.js @@ -43,7 +43,7 @@ function assertCoordinateMultiUpdateReturns(connection, code) { assert.eq(underlyingUpdateResult["n"], 2); assert.eq(underlyingUpdateResult["ok"], 1); } else { - assert.commandFailedWithCode(response, code) + assert.commandFailedWithCode(response, code); } } diff --git a/jstests/sharding/migration_blocking_operation/migration_blocking_operation_lifecycle.js b/jstests/sharding/migration_blocking_operation/migration_blocking_operation_lifecycle.js index f2faa7c7342..af2994ef450 100644 --- a/jstests/sharding/migration_blocking_operation/migration_blocking_operation_lifecycle.js +++ b/jstests/sharding/migration_blocking_operation/migration_blocking_operation_lifecycle.js @@ -33,7 +33,7 @@ function assertCommandReturns(connection, command, uuid, code) { if (code === ErrorCodes.OK) { assert.commandWorked(response); } else { - assert.commandFailedWithCode(response, code) + assert.commandFailedWithCode(response, code); } } diff --git a/jstests/sharding/move_chunk_remove_shard.js b/jstests/sharding/move_chunk_remove_shard.js index 9798889b9a9..1c6b4d2cdf0 100644 --- a/jstests/sharding/move_chunk_remove_shard.js +++ b/jstests/sharding/move_chunk_remove_shard.js @@ -36,8 +36,9 @@ moveOutSessionChunks(st, st.shard1.shardName, st.shard0.shardName); pauseMoveChunkAtStep(st.shard0, moveChunkStepNames.reachedSteadyState); -st.forEachConfigServer((conn) => {assert.commandWorked(conn.adminCommand( - {setParameter: 1, balancerMigrationsThrottlingMs: 200}))}); +st.forEachConfigServer((conn) => { + assert.commandWorked(conn.adminCommand({setParameter: 1, balancerMigrationsThrottlingMs: 200})); +}); let joinMoveChunk = moveChunkParallel(staticMongod, st.s.host, diff --git a/jstests/sharding/move_collection_balancer_metrics.js b/jstests/sharding/move_collection_balancer_metrics.js index 87fd2a25fc8..52674b60904 100644 --- a/jstests/sharding/move_collection_balancer_metrics.js +++ b/jstests/sharding/move_collection_balancer_metrics.js @@ -52,7 +52,7 @@ assert.commandWorked(configsvr.adminCommand({ writeConcern: {w: "majority"} })); -const shardingMetrics = configsvr.getDB('admin').serverStatus({}).shardingStatistics +const shardingMetrics = configsvr.getDB('admin').serverStatus({}).shardingStatistics; assert.eq(shardingMetrics.moveCollection, undefined); const balancerMetrics = shardingMetrics.balancerMoveCollection; diff --git a/jstests/sharding/move_collection_basic.js b/jstests/sharding/move_collection_basic.js index 54bf187eb16..d7bb8c96c1f 100644 --- a/jstests/sharding/move_collection_basic.js +++ b/jstests/sharding/move_collection_basic.js @@ -34,7 +34,7 @@ assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: {oldKey: 1}} // Fail if collection is sharded. assert.commandFailedWithCode(mongos.adminCommand(cmdObj), ErrorCodes.NamespaceNotFound); -const unsplittableCollName = "foo_unsplittable" +const unsplittableCollName = "foo_unsplittable"; const unsplittableCollNs = dbName + '.' + unsplittableCollName; assert.commandWorked(st.s.getDB(dbName).runCommand({create: unsplittableCollName})); diff --git a/jstests/sharding/move_collection_to_current_shard_is_noop.js b/jstests/sharding/move_collection_to_current_shard_is_noop.js index cb7092c3c8a..14f94b0efb9 100644 --- a/jstests/sharding/move_collection_to_current_shard_is_noop.js +++ b/jstests/sharding/move_collection_to_current_shard_is_noop.js @@ -23,7 +23,7 @@ const st = new ShardingTest({ }); const dbName = 'db'; -const unsplittableCollName = "foo_unsplittable" +const unsplittableCollName = "foo_unsplittable"; const ns = dbName + '.' + unsplittableCollName; let shard0 = st.shard0.shardName; diff --git a/jstests/sharding/move_range_basic.js b/jstests/sharding/move_range_basic.js index f59dd975a96..79c9fa3c568 100644 --- a/jstests/sharding/move_range_basic.js +++ b/jstests/sharding/move_range_basic.js @@ -149,7 +149,7 @@ function test(collName, skPattern) { // Test running running moveRange on an unsplittable collection will fail if (FeatureFlagUtil.isPresentAndEnabled(mongos, "TrackUnshardedCollectionsUponCreation")) { - const collName = "unsplittable_collection" + const collName = "unsplittable_collection"; const ns = kDbName + '.' + collName; jsTest.log("Testing on unsplittable namespace"); diff --git a/jstests/sharding/multi_collection_transaction_placement_conflict_workaround.js b/jstests/sharding/multi_collection_transaction_placement_conflict_workaround.js index c6e9a11bfe1..bb35d80ef96 100644 --- a/jstests/sharding/multi_collection_transaction_placement_conflict_workaround.js +++ b/jstests/sharding/multi_collection_transaction_placement_conflict_workaround.js @@ -397,7 +397,7 @@ const st = new ShardingTest({mongos: 1, shards: 2}); const coll = db['sharded']; assert.commandWorked(st.s.getDB(db.getName()).dropDatabase()); - assert.commandWorked(st.s.adminCommand({shardCollection: coll.getFullName(), key: {x: 1}})) + assert.commandWorked(st.s.adminCommand({shardCollection: coll.getFullName(), key: {x: 1}})); assert.commandWorked(st.s.adminCommand({split: coll.getFullName(), middle: {x: 0}})); assert.commandWorked(st.s.adminCommand({ moveChunk: coll.getFullName(), diff --git a/jstests/sharding/multi_write_target.js b/jstests/sharding/multi_write_target.js index a756b57f47c..b7eb0977d80 100644 --- a/jstests/sharding/multi_write_target.js +++ b/jstests/sharding/multi_write_target.js @@ -53,7 +53,7 @@ assert.commandWorked(staleColl.update({_id: 0}, {$set: {updatedById: true}}, {mu // Ensure _id update goes to at least one shard assert(st.shard0.getCollection(coll.toString()).findOne({updatedById: true}) != null || - st.shard2.getCollection(coll.toString()).findOne({updatedById: true}) != null) + st.shard2.getCollection(coll.toString()).findOne({updatedById: true}) != null); jsTest.log("Testing multi-delete..."); diff --git a/jstests/sharding/query/agg_mongos_merge.js b/jstests/sharding/query/agg_mongos_merge.js index cc08623c9b7..1f590d90281 100644 --- a/jstests/sharding/query/agg_mongos_merge.js +++ b/jstests/sharding/query/agg_mongos_merge.js @@ -110,21 +110,19 @@ function assertMergeBehaviour( "command.pipeline.$mergeCursors": {$exists: 1} }; const owningShardMergeCount = owningShardDB.system.profile.find(mergeFilter).itcount(); - const nonPrimaryShardMergeCount = nonOwningShardDB.system.profile.find(mergeFilter).itcount() + const nonPrimaryShardMergeCount = nonOwningShardDB.system.profile.find(mergeFilter).itcount(); - const foundMessage = - function() { + const foundMessage = function() { return "found " + owningShardMergeCount + " merges on the owning shard and " + nonPrimaryShardMergeCount + " on the other shard. Total merges on shards: " + (owningShardMergeCount + nonPrimaryShardMergeCount); - } + }; if (mergeType === "mongos") { assert.eq(owningShardMergeCount + nonPrimaryShardMergeCount, 0, "Expected merge on mongos, but " + foundMessage()); - } - else { + } else { assert(mergeType === "anyShard" || mergeType === "specificShard", "unknown merge type: " + mergeType); assert.eq(owningShardMergeCount + nonPrimaryShardMergeCount, diff --git a/jstests/sharding/query/lookup_targeting.js b/jstests/sharding/query/lookup_targeting.js index b6222d9c94e..dacb6b4f557 100644 --- a/jstests/sharding/query/lookup_targeting.js +++ b/jstests/sharding/query/lookup_targeting.js @@ -1032,11 +1032,10 @@ if (checkSbeRestrictedOrFullyEnabled(db)) { // Function which runs our $lookup and asserts the expected results. Used to gossip the updated // routing information of the inner collection. - const runAggregateToRefresh = - () => { - assert.eq(db[kUnsplittable1CollName].aggregate(sbeLookupPipeline).toArray(), - expectedResults); - } + const runAggregateToRefresh = () => { + assert.eq(db[kUnsplittable1CollName].aggregate(sbeLookupPipeline).toArray(), + expectedResults); + }; // Function which verifies that SBE $lookup fails with a 'QueryPlanKilled' error when a // collection is moved across getMore commands. @@ -1072,9 +1071,9 @@ if (checkSbeRestrictedOrFullyEnabled(db)) { funWithArgs(function(dbName, collName, pipeline) { // At some point during yielding, we expect a QueryPlanKilled error because the // underlying sharding state has changed - assert.throwsWithCode( - () => {db.getSiblingDB(dbName)[collName].aggregate(pipeline).toArray()}, - ErrorCodes.QueryPlanKilled); + assert.throwsWithCode(() => { + db.getSiblingDB(dbName)[collName].aggregate(pipeline).toArray(); + }, ErrorCodes.QueryPlanKilled); }, kDbName, kUnsplittable1CollName, sbeLookupPipeline), st.s.port); failpoint.wait(); diff --git a/jstests/sharding/query/merge_command_options.js b/jstests/sharding/query/merge_command_options.js index 89b416bcba3..2ed3bdeefc4 100644 --- a/jstests/sharding/query/merge_command_options.js +++ b/jstests/sharding/query/merge_command_options.js @@ -1,6 +1,6 @@ // Tests that aggregations with a $merge stage respect the options set on the command. import {profilerHasNumMatchingEntriesOrThrow} from "jstests/libs/profiler.js"; -import {reconfig} from "jstests/replsets/rslib.js" +import {reconfig} from "jstests/replsets/rslib.js"; const st = new ShardingTest({ shards: 2, diff --git a/jstests/sharding/query/view_on_shard_rewrite.js b/jstests/sharding/query/view_on_shard_rewrite.js index 116f6e7a4ed..2236a4941a0 100644 --- a/jstests/sharding/query/view_on_shard_rewrite.js +++ b/jstests/sharding/query/view_on_shard_rewrite.js @@ -127,8 +127,10 @@ if (FeatureFlagUtil.isPresentAndEnabled(st.s, 'TrackUnshardedCollectionsUponCrea // shard. session1.startTransaction({readConcern: {level: 'snapshot'}}); session2.startTransaction({readConcern: {level: 'majority'}}); - assertReadOnView(session1.getDatabase(dbName)[viewName], false /* expectKickBackToMongos */) - assertReadOnView(session1.getDatabase(dbName)[viewName], false /* expectKickBackToMongos */) + assertReadOnView(session1.getDatabase(dbName)[viewName], + false /* expectKickBackToMongos */); + assertReadOnView(session1.getDatabase(dbName)[viewName], + false /* expectKickBackToMongos */); session1.commitTransaction(); session2.commitTransaction(); } diff --git a/jstests/sharding/rename_unsplittable_collection.js b/jstests/sharding/rename_unsplittable_collection.js index d1227151c13..5dabd378aa5 100644 --- a/jstests/sharding/rename_unsplittable_collection.js +++ b/jstests/sharding/rename_unsplittable_collection.js @@ -51,11 +51,13 @@ function testRenameUnsplittableCollection(configDb, // Print descriptive test message let msg = "Running test: rename collection `" + nssFrom + "` located on shard `" + shardName + - "` to `" + nssTo + "` with dropTarget=`" + dropTarget + "`." + "` to `" + nssTo + "` with dropTarget=`" + dropTarget + "`."; if (collToShouldExist) { - msg += " Target collection exists on shard `" + collToShardName + "`." + msg += " Target collection exists on shard `" + collToShardName + "`."; + } else { + msg += " Target collection doesn't exist."; } - else {msg += " Target collection doesn't exist."} jsTestLog(msg); + jsTestLog(msg); // Create collFrom collection assert.commandWorked( @@ -112,7 +114,7 @@ testRenameUnsplittableCollection(configDb, primaryShard); // 3. Rename collection test:not located on the primary shard -testRenameUnsplittableCollection(configDb, db, "collFrom3", db, "collTo3", nonPrimaryShard) +testRenameUnsplittableCollection(configDb, db, "collFrom3", db, "collTo3", nonPrimaryShard); // 4. Rename collection test:not located on the primary shard when target exists testRenameUnsplittableCollection(configDb, diff --git a/jstests/sharding/reshard_collection_resharding_improvements_shard_distribution.js b/jstests/sharding/reshard_collection_resharding_improvements_shard_distribution.js index 844c37a9198..63068777531 100644 --- a/jstests/sharding/reshard_collection_resharding_improvements_shard_distribution.js +++ b/jstests/sharding/reshard_collection_resharding_improvements_shard_distribution.js @@ -26,234 +26,186 @@ const coordinator = new Mongo(topology.configsvr.nodes[0]); assert.commandWorked(coordinator.getDB("admin").adminCommand( {setParameter: 1, reshardingCriticalSectionTimeoutMillis: criticalSectionTimeoutMS})); -const testCompoundShardKey = - (mongos) => { - if (!FeatureFlagUtil.isEnabled(mongos, "ReshardingImprovements")) { - jsTestLog("Skipping test since featureFlagReshardingImprovements is not enabled."); - return; - } - - assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: {num: 1, str: 1}})); - let bulk = mongos.getDB(kDbName).getCollection(collName).initializeOrderedBulkOp(); - for (let x = 0; x < kNumInitialDocs; x++) { - bulk.insert({str: x.toString(), num: x, obj: {str: x.toString(), num: x}}); - } - assert.commandWorked(bulk.execute()); - - jsTestLog("shardDistribution missing second shardKey should error."); - const missSecondKeyCmd = { - reshardCollection: ns, - key: {num: 1, str: 1}, - forceRedistribution: true, - shardDistribution: [ - {shard: st.shard0.shardName, min: {num: MinKey}, max: {num: 1}}, - {shard: st.shard1.shardName, min: {num: 1}, max: {num: MaxKey}} - ] - }; - assert.commandFailedWithCode(mongos.adminCommand(missSecondKeyCmd), - ErrorCodes.InvalidOptions); - - jsTestLog("shardDistribution not continuous on second shardKey should error."); - const notContinuousCmd = { - reshardCollection: ns, - key: {num: 1, str: 1}, - forceRedistribution: true, - shardDistribution: [ - { - shard: st.shard0.shardName, - min: {num: MinKey, str: MinKey}, - max: {num: 1, str: '1'} - }, - { - shard: st.shard1.shardName, - min: {num: 2, str: '1'}, - max: {num: MaxKey, str: MaxKey} - } - ] - }; - assert.commandFailedWithCode(mongos.adminCommand(notContinuousCmd), - ErrorCodes.InvalidOptions); - - jsTestLog("shardDistribution overlap on second shardKey should error."); - const overlapCmd = { - reshardCollection: ns, - key: {num: 1, str: 1}, - forceRedistribution: true, - shardDistribution: [ - { - shard: st.shard0.shardName, - min: {num: MinKey, str: MinKey}, - max: {num: 1, str: '2'} - }, - { - shard: st.shard1.shardName, - min: {num: 1, str: '1'}, - max: {num: MaxKey, str: MaxKey} - } - ] - }; - assert.commandFailedWithCode(mongos.adminCommand(overlapCmd), ErrorCodes.InvalidOptions); - - jsTestLog("shardDistribution second shardKey not start from min should error."); - const missingMinCmd = { - reshardCollection: ns, - key: {num: 1, str: 1}, - forceRedistribution: true, - shardDistribution: [ - {shard: st.shard0.shardName, min: {num: MinKey, str: '1'}, max: {num: 1, str: '2'}}, - { - shard: st.shard1.shardName, - min: {num: 1, str: '1'}, - max: {num: MaxKey, str: MaxKey} - } - ] - }; - assert.commandFailedWithCode(mongos.adminCommand(missingMinCmd), ErrorCodes.InvalidOptions); - - jsTestLog("shardDistribution second shardKey not end at max should error."); - const missingMaxCmd = { - reshardCollection: ns, - key: {num: 1, str: 1}, - forceRedistribution: true, - shardDistribution: [ - { - shard: st.shard0.shardName, - min: {num: MinKey, str: MinKey}, - max: {num: 1, str: '2'} - }, - {shard: st.shard1.shardName, min: {num: 1, str: '1'}, max: {num: MaxKey, str: '2'}} - ] - }; - assert.commandFailedWithCode(mongos.adminCommand(missingMaxCmd), ErrorCodes.InvalidOptions); - - jsTestLog("This shardDistribution is a valid so the reshardCollection should succeed."); - const correctCmd = { - reshardCollection: ns, - key: {num: 1, str: 1}, - forceRedistribution: true, - shardDistribution: [ - { - shard: st.shard0.shardName, - min: {num: MinKey, str: MinKey}, - max: {num: 1, str: '1'} - }, - { - shard: st.shard1.shardName, - min: {num: 1, str: '1'}, - max: {num: MaxKey, str: MaxKey} - } - ] - }; - assert.commandWorked(mongos.adminCommand(correctCmd)); - mongos.getDB(kDbName)[collName].drop(); +const testCompoundShardKey = (mongos) => { + if (!FeatureFlagUtil.isEnabled(mongos, "ReshardingImprovements")) { + jsTestLog("Skipping test since featureFlagReshardingImprovements is not enabled."); + return; } -const testMoreShardsAndZones = - (mongos) => { - if (!FeatureFlagUtil.isEnabled(mongos, "ReshardingImprovements")) { - jsTestLog("Skipping test since featureFlagReshardingImprovements is not enabled"); - return; - } - - /** - * This test is to ensure we have correct behavior when we have more shards and zones - * The setup is following: - * - shard0 -> [z1, z2, z3] - * - shard1 -> [z2] - * - shard2 -> [z2, z3] - * - shard3 -> [z3] - * - shard4 -> [z3] - * - * The key ranges for zones are: - * - z1 -> [Min, -1000), [1000, Max) - * - z2 -> [-1000, -1) - * - z3 -> [-1, 1000) - */ - jsTestLog("ReshardCollection should succeed when shardDistribution and zones mix together"); - - const additionalSetup = function(test) { - const st = test._st; - const ns = test._ns; - const zoneName1 = 'z1'; - const zoneName2 = 'z2'; - const zoneName3 = 'z3'; - assert.commandWorked( - st.s.adminCommand({addShardToZone: st.shard0.shardName, zone: zoneName1})); - assert.commandWorked( - st.s.adminCommand({addShardToZone: st.shard0.shardName, zone: zoneName2})); - assert.commandWorked( - st.s.adminCommand({addShardToZone: st.shard0.shardName, zone: zoneName3})); - assert.commandWorked( - st.s.adminCommand({addShardToZone: st.shard1.shardName, zone: zoneName2})); - assert.commandWorked( - st.s.adminCommand({addShardToZone: st.shard2.shardName, zone: zoneName2})); - assert.commandWorked( - st.s.adminCommand({addShardToZone: st.shard2.shardName, zone: zoneName3})); - assert.commandWorked( - st.s.adminCommand({addShardToZone: st.shard3.shardName, zone: zoneName3})); - assert.commandWorked( - st.s.adminCommand({addShardToZone: st.shard4.shardName, zone: zoneName3})); - assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {oldKey: 1}})); - assert.commandWorked(st.s.adminCommand({ - updateZoneKeyRange: ns, - min: {oldKey: MinKey}, - max: {oldKey: -1000}, - zone: zoneName1 - })); - assert.commandWorked(st.s.adminCommand({ - updateZoneKeyRange: ns, - min: {oldKey: 1000}, - max: {oldKey: MaxKey}, - zone: zoneName1 - })); - assert.commandWorked(st.s.adminCommand({ - updateZoneKeyRange: ns, - min: {oldKey: -1000}, - max: {oldKey: -1}, - zone: zoneName2 - })); - assert.commandWorked(st.s.adminCommand( - {updateZoneKeyRange: ns, min: {oldKey: -1}, max: {oldKey: 1000}, zone: zoneName3})); - }; - - reshardCmdTest.assertReshardCollOk( - { - reshardCollection: ns, - key: {oldKey: 1}, - forceRedistribution: true, - shardDistribution: [ - {shard: st.shard0.shardName, min: {oldKey: MinKey}, max: {oldKey: -100}}, - {shard: st.shard1.shardName, min: {oldKey: -100}, max: {oldKey: -10}}, - {shard: st.shard2.shardName, min: {oldKey: -10}, max: {oldKey: 0}}, - {shard: st.shard3.shardName, min: {oldKey: 0}, max: {oldKey: 10}}, - {shard: st.shard4.shardName, min: {oldKey: 10}, max: {oldKey: 100}}, - {shard: st.shard0.shardName, min: {oldKey: 100}, max: {oldKey: MaxKey}}, - ] - }, - 9, - [ - { - recipientShardId: st.shard0.shardName, - min: {oldKey: MinKey}, - max: {oldKey: -1000} - }, - {recipientShardId: st.shard0.shardName, min: {oldKey: -1000}, max: {oldKey: -100}}, - {recipientShardId: st.shard1.shardName, min: {oldKey: -100}, max: {oldKey: -10}}, - {recipientShardId: st.shard2.shardName, min: {oldKey: -10}, max: {oldKey: -1}}, - {recipientShardId: st.shard2.shardName, min: {oldKey: -1}, max: {oldKey: 0}}, - {recipientShardId: st.shard3.shardName, min: {oldKey: 0}, max: {oldKey: 10}}, - {recipientShardId: st.shard4.shardName, min: {oldKey: 10}, max: {oldKey: 100}}, - {recipientShardId: st.shard0.shardName, min: {oldKey: 100}, max: {oldKey: 1000}}, - {recipientShardId: st.shard0.shardName, min: {oldKey: 1000}, max: {oldKey: MaxKey}}, - ], - [ - {zone: "z1", min: {oldKey: MinKey}, max: {oldKey: -1000}}, - {zone: "z1", min: {oldKey: 1000}, max: {oldKey: MaxKey}}, - {zone: "z2", min: {oldKey: -1000}, max: {oldKey: -1}}, - {zone: "z3", min: {oldKey: -1}, max: {oldKey: 1000}} - ], - additionalSetup); + assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: {num: 1, str: 1}})); + let bulk = mongos.getDB(kDbName).getCollection(collName).initializeOrderedBulkOp(); + for (let x = 0; x < kNumInitialDocs; x++) { + bulk.insert({str: x.toString(), num: x, obj: {str: x.toString(), num: x}}); } + assert.commandWorked(bulk.execute()); + + jsTestLog("shardDistribution missing second shardKey should error."); + const missSecondKeyCmd = { + reshardCollection: ns, + key: {num: 1, str: 1}, + forceRedistribution: true, + shardDistribution: [ + {shard: st.shard0.shardName, min: {num: MinKey}, max: {num: 1}}, + {shard: st.shard1.shardName, min: {num: 1}, max: {num: MaxKey}} + ] + }; + assert.commandFailedWithCode(mongos.adminCommand(missSecondKeyCmd), ErrorCodes.InvalidOptions); + + jsTestLog("shardDistribution not continuous on second shardKey should error."); + const notContinuousCmd = { + reshardCollection: ns, + key: {num: 1, str: 1}, + forceRedistribution: true, + shardDistribution: [ + {shard: st.shard0.shardName, min: {num: MinKey, str: MinKey}, max: {num: 1, str: '1'}}, + {shard: st.shard1.shardName, min: {num: 2, str: '1'}, max: {num: MaxKey, str: MaxKey}} + ] + }; + assert.commandFailedWithCode(mongos.adminCommand(notContinuousCmd), ErrorCodes.InvalidOptions); + + jsTestLog("shardDistribution overlap on second shardKey should error."); + const overlapCmd = { + reshardCollection: ns, + key: {num: 1, str: 1}, + forceRedistribution: true, + shardDistribution: [ + {shard: st.shard0.shardName, min: {num: MinKey, str: MinKey}, max: {num: 1, str: '2'}}, + {shard: st.shard1.shardName, min: {num: 1, str: '1'}, max: {num: MaxKey, str: MaxKey}} + ] + }; + assert.commandFailedWithCode(mongos.adminCommand(overlapCmd), ErrorCodes.InvalidOptions); + + jsTestLog("shardDistribution second shardKey not start from min should error."); + const missingMinCmd = { + reshardCollection: ns, + key: {num: 1, str: 1}, + forceRedistribution: true, + shardDistribution: [ + {shard: st.shard0.shardName, min: {num: MinKey, str: '1'}, max: {num: 1, str: '2'}}, + {shard: st.shard1.shardName, min: {num: 1, str: '1'}, max: {num: MaxKey, str: MaxKey}} + ] + }; + assert.commandFailedWithCode(mongos.adminCommand(missingMinCmd), ErrorCodes.InvalidOptions); + + jsTestLog("shardDistribution second shardKey not end at max should error."); + const missingMaxCmd = { + reshardCollection: ns, + key: {num: 1, str: 1}, + forceRedistribution: true, + shardDistribution: [ + {shard: st.shard0.shardName, min: {num: MinKey, str: MinKey}, max: {num: 1, str: '2'}}, + {shard: st.shard1.shardName, min: {num: 1, str: '1'}, max: {num: MaxKey, str: '2'}} + ] + }; + assert.commandFailedWithCode(mongos.adminCommand(missingMaxCmd), ErrorCodes.InvalidOptions); + + jsTestLog("This shardDistribution is a valid so the reshardCollection should succeed."); + const correctCmd = { + reshardCollection: ns, + key: {num: 1, str: 1}, + forceRedistribution: true, + shardDistribution: [ + {shard: st.shard0.shardName, min: {num: MinKey, str: MinKey}, max: {num: 1, str: '1'}}, + {shard: st.shard1.shardName, min: {num: 1, str: '1'}, max: {num: MaxKey, str: MaxKey}} + ] + }; + assert.commandWorked(mongos.adminCommand(correctCmd)); + mongos.getDB(kDbName)[collName].drop(); +}; + +const testMoreShardsAndZones = (mongos) => { + if (!FeatureFlagUtil.isEnabled(mongos, "ReshardingImprovements")) { + jsTestLog("Skipping test since featureFlagReshardingImprovements is not enabled"); + return; + } + + /** + * This test is to ensure we have correct behavior when we have more shards and zones + * The setup is following: + * - shard0 -> [z1, z2, z3] + * - shard1 -> [z2] + * - shard2 -> [z2, z3] + * - shard3 -> [z3] + * - shard4 -> [z3] + * + * The key ranges for zones are: + * - z1 -> [Min, -1000), [1000, Max) + * - z2 -> [-1000, -1) + * - z3 -> [-1, 1000) + */ + jsTestLog("ReshardCollection should succeed when shardDistribution and zones mix together"); + + const additionalSetup = function(test) { + const st = test._st; + const ns = test._ns; + const zoneName1 = 'z1'; + const zoneName2 = 'z2'; + const zoneName3 = 'z3'; + assert.commandWorked( + st.s.adminCommand({addShardToZone: st.shard0.shardName, zone: zoneName1})); + assert.commandWorked( + st.s.adminCommand({addShardToZone: st.shard0.shardName, zone: zoneName2})); + assert.commandWorked( + st.s.adminCommand({addShardToZone: st.shard0.shardName, zone: zoneName3})); + assert.commandWorked( + st.s.adminCommand({addShardToZone: st.shard1.shardName, zone: zoneName2})); + assert.commandWorked( + st.s.adminCommand({addShardToZone: st.shard2.shardName, zone: zoneName2})); + assert.commandWorked( + st.s.adminCommand({addShardToZone: st.shard2.shardName, zone: zoneName3})); + assert.commandWorked( + st.s.adminCommand({addShardToZone: st.shard3.shardName, zone: zoneName3})); + assert.commandWorked( + st.s.adminCommand({addShardToZone: st.shard4.shardName, zone: zoneName3})); + assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {oldKey: 1}})); + assert.commandWorked(st.s.adminCommand({ + updateZoneKeyRange: ns, + min: {oldKey: MinKey}, + max: {oldKey: -1000}, + zone: zoneName1 + })); + assert.commandWorked(st.s.adminCommand( + {updateZoneKeyRange: ns, min: {oldKey: 1000}, max: {oldKey: MaxKey}, zone: zoneName1})); + assert.commandWorked(st.s.adminCommand( + {updateZoneKeyRange: ns, min: {oldKey: -1000}, max: {oldKey: -1}, zone: zoneName2})); + assert.commandWorked(st.s.adminCommand( + {updateZoneKeyRange: ns, min: {oldKey: -1}, max: {oldKey: 1000}, zone: zoneName3})); + }; + + reshardCmdTest.assertReshardCollOk( + { + reshardCollection: ns, + key: {oldKey: 1}, + forceRedistribution: true, + shardDistribution: [ + {shard: st.shard0.shardName, min: {oldKey: MinKey}, max: {oldKey: -100}}, + {shard: st.shard1.shardName, min: {oldKey: -100}, max: {oldKey: -10}}, + {shard: st.shard2.shardName, min: {oldKey: -10}, max: {oldKey: 0}}, + {shard: st.shard3.shardName, min: {oldKey: 0}, max: {oldKey: 10}}, + {shard: st.shard4.shardName, min: {oldKey: 10}, max: {oldKey: 100}}, + {shard: st.shard0.shardName, min: {oldKey: 100}, max: {oldKey: MaxKey}}, + ] + }, + 9, + [ + {recipientShardId: st.shard0.shardName, min: {oldKey: MinKey}, max: {oldKey: -1000}}, + {recipientShardId: st.shard0.shardName, min: {oldKey: -1000}, max: {oldKey: -100}}, + {recipientShardId: st.shard1.shardName, min: {oldKey: -100}, max: {oldKey: -10}}, + {recipientShardId: st.shard2.shardName, min: {oldKey: -10}, max: {oldKey: -1}}, + {recipientShardId: st.shard2.shardName, min: {oldKey: -1}, max: {oldKey: 0}}, + {recipientShardId: st.shard3.shardName, min: {oldKey: 0}, max: {oldKey: 10}}, + {recipientShardId: st.shard4.shardName, min: {oldKey: 10}, max: {oldKey: 100}}, + {recipientShardId: st.shard0.shardName, min: {oldKey: 100}, max: {oldKey: 1000}}, + {recipientShardId: st.shard0.shardName, min: {oldKey: 1000}, max: {oldKey: MaxKey}}, + ], + [ + {zone: "z1", min: {oldKey: MinKey}, max: {oldKey: -1000}}, + {zone: "z1", min: {oldKey: 1000}, max: {oldKey: MaxKey}}, + {zone: "z2", min: {oldKey: -1000}, max: {oldKey: -1}}, + {zone: "z3", min: {oldKey: -1}, max: {oldKey: 1000}} + ], + additionalSetup); +}; testCompoundShardKey(mongos); testMoreShardsAndZones(mongos); diff --git a/jstests/sharding/resharding_collection_cloner_resuming_natural_order.js b/jstests/sharding/resharding_collection_cloner_resuming_natural_order.js index aa1483dacda..f089e62c4d8 100644 --- a/jstests/sharding/resharding_collection_cloner_resuming_natural_order.js +++ b/jstests/sharding/resharding_collection_cloner_resuming_natural_order.js @@ -107,25 +107,28 @@ shard0Primary.getDB(inputCollection.getDB().getName()) .advanceClusterTime(inputCollection.getDB().getSession().getClusterTime()); jsTestLog("About to start resharding, first attempt"); -const reshardShell = startParallelShell( - funWithArgs((inputCollectionFullName, - inputCollectionUUID, - shardName, - atClusterTime, - tempCollectionFullName) => {assert.commandWorked(db.adminCommand({ - testReshardCloneCollection: inputCollectionFullName, - shardKey: {newKey: 1}, - uuid: inputCollectionUUID, - shardId: shardName, - atClusterTime: atClusterTime, - outputNs: tempCollectionFullName, - }))}, - inputCollection.getFullName(), - inputCollectionUUID, - st.shard0.shardName, - originalInsertsTs, - temporaryReshardingCollection.getFullName()), - shard0Primary.port); +const reshardShell = + startParallelShell(funWithArgs( + (inputCollectionFullName, + inputCollectionUUID, + shardName, + atClusterTime, + tempCollectionFullName) => { + assert.commandWorked(db.adminCommand({ + testReshardCloneCollection: inputCollectionFullName, + shardKey: {newKey: 1}, + uuid: inputCollectionUUID, + shardId: shardName, + atClusterTime: atClusterTime, + outputNs: tempCollectionFullName, + })); + }, + inputCollection.getFullName(), + inputCollectionUUID, + st.shard0.shardName, + originalInsertsTs, + temporaryReshardingCollection.getFullName()), + shard0Primary.port); // Wait for the first attempt to fail. attemptFp.wait(); diff --git a/jstests/sharding/resharding_default_collation.js b/jstests/sharding/resharding_default_collation.js index 35cb562c20a..feaabaae5b7 100644 --- a/jstests/sharding/resharding_default_collation.js +++ b/jstests/sharding/resharding_default_collation.js @@ -30,10 +30,10 @@ const collection = reshardingTest.createShardedCollection({ collOptions: {collation: {locale: "simple"}}, }); -const idxSimpleCollationName = "idxSimpleCollation" +const idxSimpleCollationName = "idxSimpleCollation"; assert.commandWorked( collection.createIndex({x: 1}, {name: idxSimpleCollationName, collation: {locale: "simple"}})); -const idx2Name = "idx2" +const idx2Name = "idx2"; assert.commandWorked(collection.createIndex({x: 1}, {name: idx2Name})); const preReshardingIndexes = collection.getIndexes(); diff --git a/jstests/sharding/resharding_failover_during_abort.js b/jstests/sharding/resharding_failover_during_abort.js index f7159124585..e08ea2e2913 100644 --- a/jstests/sharding/resharding_failover_during_abort.js +++ b/jstests/sharding/resharding_failover_during_abort.js @@ -50,7 +50,7 @@ reshardingTest.withReshardingInBackground( recipient.getCollection('config.localReshardingOperations.recipient').findOne({ ns: "reshardingDb.coll" }); - assert(recipientDoc != null) + assert(recipientDoc != null); assert(recipientDoc.mutableState.state === "done"); assert(recipientDoc.mutableState.abortReason != null); assert(recipientDoc.mutableState.abortReason.code === ErrorCodes.ReshardCollectionAborted); diff --git a/jstests/sharding/resharding_retryable_writes.js b/jstests/sharding/resharding_retryable_writes.js index 24b2245f8e9..31ba36e1059 100644 --- a/jstests/sharding/resharding_retryable_writes.js +++ b/jstests/sharding/resharding_retryable_writes.js @@ -35,8 +35,9 @@ function runTest(minimumOperationDurationMS, shouldReshardInPlace) { // Test batched insert with multiple batches on shard 0, let it be one batch on shard 1. const rst0 = reshardingTest.getReplSetForShard(donorShardNames[0]); - rst0.nodes.forEach(node => {assert.commandWorked( - node.adminCommand({setParameter: 1, internalInsertMaxBatchSize: 2}))}); + rst0.nodes.forEach(node => { + assert.commandWorked(node.adminCommand({setParameter: 1, internalInsertMaxBatchSize: 2})); + }); assert.commandWorked(sourceCollection.insert([ {_id: "stays on shard0", oldKey: -10, newKey: -10, counter: 0}, diff --git a/jstests/sharding/resharding_timeseries/move_timeseries.js b/jstests/sharding/resharding_timeseries/move_timeseries.js index 5fcd625f888..1700de34dc9 100644 --- a/jstests/sharding/resharding_timeseries/move_timeseries.js +++ b/jstests/sharding/resharding_timeseries/move_timeseries.js @@ -20,7 +20,7 @@ const st = reshardingTest._st; const timeseriesInfo = { timeField: 'ts', metaField: 'meta' -} +}; const timeseriesCollection = reshardingTest.createUnshardedCollection({ ns: ns, @@ -58,12 +58,12 @@ reshardingTest.withMoveCollectionInBackground({toShard: st.shard2.shardName}, () ])); }); -let timeseriesCollDocPostResharding = st.config.collections.findOne({_id: bucketNss}) +let timeseriesCollDocPostResharding = st.config.collections.findOne({_id: bucketNss}); // Resharding keeps timeseries fields. -assert.eq(timeseriesCollDocPostResharding.timeseriesFields.timeField, timeseriesInfo.timeField) -assert.eq(timeseriesCollDocPostResharding.timeseriesFields.metaField, timeseriesInfo.metaField) +assert.eq(timeseriesCollDocPostResharding.timeseriesFields.timeField, timeseriesInfo.timeField); +assert.eq(timeseriesCollDocPostResharding.timeseriesFields.metaField, timeseriesInfo.metaField); // Resharding has updated shard key. -assert.eq(timeseriesCollDocPostResharding.key, {"_id": 1}) +assert.eq(timeseriesCollDocPostResharding.key, {"_id": 1}); assert.eq(timeseriesCollDocPostResharding.unsplittable, true); assert.eq(5, st.rs2.getPrimary().getCollection(bucketNss).countDocuments({})); diff --git a/jstests/sharding/resharding_timeseries/reshard_timeseries.js b/jstests/sharding/resharding_timeseries/reshard_timeseries.js index 5bd9dcce2dd..91f6ff302a0 100644 --- a/jstests/sharding/resharding_timeseries/reshard_timeseries.js +++ b/jstests/sharding/resharding_timeseries/reshard_timeseries.js @@ -16,7 +16,7 @@ const recipientShardNames = reshardingTest.recipientShardNames; const timeseriesInfo = { timeField: 'ts', metaField: 'meta' -} +}; const timeseriesCollection = reshardingTest.createShardedCollection({ ns: ns, @@ -33,10 +33,10 @@ const timeseriesCollection = reshardingTest.createShardedCollection({ const bucketNss = "reshardingDb.system.buckets.coll"; const st = reshardingTest._st; -let timeseriesCollDoc = st.config.collections.findOne({_id: bucketNss}) -assert.eq(timeseriesCollDoc.timeseriesFields.timeField, timeseriesInfo.timeField) -assert.eq(timeseriesCollDoc.timeseriesFields.metaField, timeseriesInfo.metaField) -assert.eq(timeseriesCollDoc.key, {"meta.x": 1}) +let timeseriesCollDoc = st.config.collections.findOne({_id: bucketNss}); +assert.eq(timeseriesCollDoc.timeseriesFields.timeField, timeseriesInfo.timeField); +assert.eq(timeseriesCollDoc.timeseriesFields.metaField, timeseriesInfo.metaField); +assert.eq(timeseriesCollDoc.key, {"meta.x": 1}); // Insert some docs assert.commandWorked(timeseriesCollection.insert([ @@ -69,12 +69,12 @@ reshardingTest.withReshardingInBackground({ ])); }); -let timeseriesCollDocPostResharding = st.config.collections.findOne({_id: bucketNss}) +let timeseriesCollDocPostResharding = st.config.collections.findOne({_id: bucketNss}); // Resharding keeps timeseries fields. -assert.eq(timeseriesCollDocPostResharding.timeseriesFields.timeField, timeseriesInfo.timeField) -assert.eq(timeseriesCollDocPostResharding.timeseriesFields.metaField, timeseriesInfo.metaField) +assert.eq(timeseriesCollDocPostResharding.timeseriesFields.timeField, timeseriesInfo.timeField); +assert.eq(timeseriesCollDocPostResharding.timeseriesFields.metaField, timeseriesInfo.metaField); // Resharding has updated shard key. -assert.eq(timeseriesCollDocPostResharding.key, {"meta.y": 1}) +assert.eq(timeseriesCollDocPostResharding.key, {"meta.y": 1}); assert.eq(0, st.rs0.getPrimary().getCollection(bucketNss).countDocuments({})); assert.eq(0, st.rs1.getPrimary().getCollection(bucketNss).countDocuments({})); diff --git a/jstests/sharding/resharding_timeseries/reshard_timeseries_disallow_writes.js b/jstests/sharding/resharding_timeseries/reshard_timeseries_disallow_writes.js index 16340aa4cfc..1fceb6c5e3a 100644 --- a/jstests/sharding/resharding_timeseries/reshard_timeseries_disallow_writes.js +++ b/jstests/sharding/resharding_timeseries/reshard_timeseries_disallow_writes.js @@ -18,7 +18,7 @@ const ns = dbName + "." + collName; const timeseriesInfo = { timeField: 'ts', metaField: 'meta' -} +}; const donorShardNames = reshardingTest.donorShardNames; diff --git a/jstests/sharding/resharding_timeseries/reshard_timeseries_nonempty_stash.js b/jstests/sharding/resharding_timeseries/reshard_timeseries_nonempty_stash.js index 8849ca56908..dc9e74794e3 100644 --- a/jstests/sharding/resharding_timeseries/reshard_timeseries_nonempty_stash.js +++ b/jstests/sharding/resharding_timeseries/reshard_timeseries_nonempty_stash.js @@ -17,7 +17,7 @@ const recipientShardNames = reshardingTest.recipientShardNames; const timeseriesInfo = { timeField: 'ts', metaField: 'meta' -} +}; const timeseriesCollection = reshardingTest.createShardedCollection({ ns: ns, diff --git a/jstests/sharding/resharding_timeseries/reshard_timeseries_stash_resolution.js b/jstests/sharding/resharding_timeseries/reshard_timeseries_stash_resolution.js index bb889eb17b0..cabbe28d2de 100644 --- a/jstests/sharding/resharding_timeseries/reshard_timeseries_stash_resolution.js +++ b/jstests/sharding/resharding_timeseries/reshard_timeseries_stash_resolution.js @@ -18,7 +18,7 @@ const recipientShardNames = reshardingTest.recipientShardNames; const timeseriesInfo = { timeField: 'ts', metaField: 'meta' -} +}; const timeseriesCollection = reshardingTest.createShardedCollection({ ns: ns, diff --git a/jstests/sharding/resharding_timeseries/reshard_timeseries_validation.js b/jstests/sharding/resharding_timeseries/reshard_timeseries_validation.js index 6e2ab08a985..9c579e32c70 100644 --- a/jstests/sharding/resharding_timeseries/reshard_timeseries_validation.js +++ b/jstests/sharding/resharding_timeseries/reshard_timeseries_validation.js @@ -27,13 +27,13 @@ assert.commandWorked(st.s.adminCommand({ timeseries: timeseriesOptions, })); -const kBucketCollName = "system.buckets.foo" +const kBucketCollName = "system.buckets.foo"; const kBucketNss = kDbName + "." + kBucketCollName; -let timeseriesCollDoc = st.config.collections.findOne({_id: kBucketNss}) -assert.eq(timeseriesCollDoc.timeseriesFields.timeField, timeseriesOptions.timeField) -assert.eq(timeseriesCollDoc.timeseriesFields.metaField, timeseriesOptions.metaField) -assert.eq(timeseriesCollDoc.key, {meta: 1}) +let timeseriesCollDoc = st.config.collections.findOne({_id: kBucketNss}); +assert.eq(timeseriesCollDoc.timeseriesFields.timeField, timeseriesOptions.timeField); +assert.eq(timeseriesCollDoc.timeseriesFields.metaField, timeseriesOptions.metaField); +assert.eq(timeseriesCollDoc.key, {meta: 1}); const sDB = st.s.getDB(kDbName); @@ -58,20 +58,20 @@ assert.commandFailedWithCode( function reshardAndVerifyShardKeyAndIndexes( newKey, indexIdx, expectedViewIndexKey, expectedBucketIndexKey, expectedBucketShardKey) { jsTestLog("Resharding to new key:"); - printjson(newKey) + printjson(newKey); assert.commandWorked(mongos.adminCommand({reshardCollection: ns, key: newKey})); const viewIndexes = assert.commandWorked(sDB.getCollection(kCollName).runCommand({listIndexes: kCollName})); - assert.eq(viewIndexes.cursor.firstBatch[indexIdx]["key"], expectedViewIndexKey) + assert.eq(viewIndexes.cursor.firstBatch[indexIdx]["key"], expectedViewIndexKey); const bucketIndexes = assert.commandWorked( sDB.getCollection(kBucketCollName).runCommand({listIndexes: kBucketCollName})); - assert.eq(bucketIndexes.cursor.firstBatch[indexIdx]["key"], expectedBucketIndexKey) + assert.eq(bucketIndexes.cursor.firstBatch[indexIdx]["key"], expectedBucketIndexKey); - let configCollectionsBucketsEntry = st.config.collections.findOne({_id: kBucketNss}) - assert.eq(configCollectionsBucketsEntry["key"], expectedBucketShardKey) + let configCollectionsBucketsEntry = st.config.collections.findOne({_id: kBucketNss}); + assert.eq(configCollectionsBucketsEntry["key"], expectedBucketShardKey); } // Success scenarios. @@ -79,18 +79,18 @@ reshardAndVerifyShardKeyAndIndexes({[timeFieldName]: 1}, 1, {[timeFieldName]: 1}, {"control.min.time": 1, "control.max.time": 1}, - {"control.min.time": 1}) + {"control.min.time": 1}); reshardAndVerifyShardKeyAndIndexes( - {'hostId.x': "hashed"}, 2, {"hostId.x": "hashed"}, {"meta.x": "hashed"}, {"meta.x": "hashed"}) + {'hostId.x': "hashed"}, 2, {"hostId.x": "hashed"}, {"meta.x": "hashed"}, {"meta.x": "hashed"}); reshardAndVerifyShardKeyAndIndexes({[metaFieldName]: 1}, 0, {[metaFieldName]: 1, [timeFieldName]: 1}, {"meta": 1, "control.min.time": 1, "control.max.time": 1}, - {"meta": 1}) + {"meta": 1}); reshardAndVerifyShardKeyAndIndexes({'hostId.y': 1, [timeFieldName]: 1}, 3, {"hostId.y": 1, [timeFieldName]: 1}, {"meta.y": 1, "control.min.time": 1, "control.max.time": 1}, - {"meta.y": 1, "control.min.time": 1}) + {"meta.y": 1, "control.min.time": 1}); st.stop(); diff --git a/jstests/sharding/resharding_timeseries/resharding_ts_resume_agg_token.js b/jstests/sharding/resharding_timeseries/resharding_ts_resume_agg_token.js index 20af17268d3..be24d724774 100644 --- a/jstests/sharding/resharding_timeseries/resharding_ts_resume_agg_token.js +++ b/jstests/sharding/resharding_timeseries/resharding_ts_resume_agg_token.js @@ -30,7 +30,7 @@ assert.commandWorked(st.s.adminCommand({ timeseries: timeseriesOptions, })); -const kBucketCollName = "system.buckets.foo" +const kBucketCollName = "system.buckets.foo"; const doc1 = { data: 1, diff --git a/jstests/sharding/resharding_timeseries/resharding_ts_txn_cloner.js b/jstests/sharding/resharding_timeseries/resharding_ts_txn_cloner.js index 4992c67f3cd..ef9d59bb4f6 100644 --- a/jstests/sharding/resharding_timeseries/resharding_ts_txn_cloner.js +++ b/jstests/sharding/resharding_timeseries/resharding_ts_txn_cloner.js @@ -21,7 +21,7 @@ const ns = dbName + "." + collName; const timeseriesInfo = { timeField: 'ts', metaField: 'meta' -} +}; const donorShardNames = reshardingTest.donorShardNames; const inputCollection = reshardingTest.createShardedCollection({ diff --git a/jstests/sharding/resharding_timeseries/unshard_timeseries.js b/jstests/sharding/resharding_timeseries/unshard_timeseries.js index 84e8580de76..0e48dd99b3b 100644 --- a/jstests/sharding/resharding_timeseries/unshard_timeseries.js +++ b/jstests/sharding/resharding_timeseries/unshard_timeseries.js @@ -19,7 +19,7 @@ const donorShardNames = reshardingTest.donorShardNames; const timeseriesInfo = { timeField: 'ts', metaField: 'meta' -} +}; const timeseriesCollection = reshardingTest.createShardedCollection({ ns: ns, @@ -48,12 +48,12 @@ reshardingTest.withUnshardCollectionInBackground({ toShard: st.shard2.shardName, }); -let timeseriesCollDocPostResharding = st.config.collections.findOne({_id: bucketNss}) +let timeseriesCollDocPostResharding = st.config.collections.findOne({_id: bucketNss}); // Resharding keeps timeseries fields. -assert.eq(timeseriesCollDocPostResharding.timeseriesFields.timeField, timeseriesInfo.timeField) -assert.eq(timeseriesCollDocPostResharding.timeseriesFields.metaField, timeseriesInfo.metaField) +assert.eq(timeseriesCollDocPostResharding.timeseriesFields.timeField, timeseriesInfo.timeField); +assert.eq(timeseriesCollDocPostResharding.timeseriesFields.metaField, timeseriesInfo.metaField); // Resharding has updated shard key. -assert.eq(timeseriesCollDocPostResharding.key, {"_id": 1}) +assert.eq(timeseriesCollDocPostResharding.key, {"_id": 1}); assert.eq(timeseriesCollDocPostResharding.unsplittable, true); assert.eq(4, st.rs2.getPrimary().getCollection(bucketNss).countDocuments({})); diff --git a/jstests/sharding/retryable_update_one_by_id_chunk_migration.js b/jstests/sharding/retryable_update_one_by_id_chunk_migration.js index 98aec8774fa..73b0705afe1 100644 --- a/jstests/sharding/retryable_update_one_by_id_chunk_migration.js +++ b/jstests/sharding/retryable_update_one_by_id_chunk_migration.js @@ -48,7 +48,7 @@ const deleteCmd = { deletes: [{q: {_id: 0}, limit: 1}], ordered: true, txnNumber: NumberLong(1), -} +}; const updateCmdUnordered = { updates: [ diff --git a/jstests/sharding/snapshot_reads_subpipeline_targeting.js b/jstests/sharding/snapshot_reads_subpipeline_targeting.js index 99bd0ae1f62..80b42070e80 100644 --- a/jstests/sharding/snapshot_reads_subpipeline_targeting.js +++ b/jstests/sharding/snapshot_reads_subpipeline_targeting.js @@ -26,8 +26,8 @@ const st = new ShardingTest({ const dbName = jsTestName(); const db = st.s.getDB(dbName); -const local = db.local -const foreign = db.foreign +const local = db.local; +const foreign = db.foreign; assert.commandWorked( st.s.adminCommand({enableSharding: dbName, primaryShard: st.shard0.shardName})); @@ -64,7 +64,7 @@ jsTestLog("Running operations with readConcert: " + tojson(readConcern)); const resBefore = local.aggregate(pipeline, {readConcern: readConcern}).toArray(); // Update collection and move chunk. -const session = db.getMongo().startSession({retryWrites: true}) +const session = db.getMongo().startSession({retryWrites: true}); assert.commandWorked( session.getDatabase(dbName).getCollection("foreign").updateOne({a: -5}, {$set: {a: 5}})); assert.commandWorked( diff --git a/jstests/sharding/timeseries_buckets_modification_with_id.js b/jstests/sharding/timeseries_buckets_modification_with_id.js index a3e46d8c79d..67fc6f3aa6c 100644 --- a/jstests/sharding/timeseries_buckets_modification_with_id.js +++ b/jstests/sharding/timeseries_buckets_modification_with_id.js @@ -95,6 +95,6 @@ runTest({ multi: false }] }, - updateValidateFn) + updateValidateFn); st.stop(); \ No newline at end of file diff --git a/jstests/sharding/timeseries_multiple_mongos.js b/jstests/sharding/timeseries_multiple_mongos.js index caf73555675..1c3e7b5be81 100644 --- a/jstests/sharding/timeseries_multiple_mongos.js +++ b/jstests/sharding/timeseries_multiple_mongos.js @@ -60,7 +60,7 @@ function runTest({shardKey, cmdObj}) { _waitForDelete: true })); - assert.commandWorked(mongos1.runCommand(cmdObj)) + assert.commandWorked(mongos1.runCommand(cmdObj)); // Insert dummy data so that the 'mongos1' sees the collection as sharded. assert.commandWorked(mongos1.getCollection(collName).insert({[timeField]: ISODate()})); @@ -70,7 +70,7 @@ function runTest({shardKey, cmdObj}) { assert.commandWorked(mongos0.createCollection( collName, {timeseries: {timeField: timeField, metaField: metaField}})); - assert.commandWorked(mongos1.runCommand(cmdObj)) + assert.commandWorked(mongos1.runCommand(cmdObj)); } /** diff --git a/jstests/sharding/timeseries_retry_delete_and_update_multi_shard.js b/jstests/sharding/timeseries_retry_delete_and_update_multi_shard.js index 6dac0f84a5c..648be06a2d7 100644 --- a/jstests/sharding/timeseries_retry_delete_and_update_multi_shard.js +++ b/jstests/sharding/timeseries_retry_delete_and_update_multi_shard.js @@ -14,7 +14,7 @@ import { runTimeseriesRetryDeleteAndUpdateTest -} from "jstests/libs/timeseries_retry_delete_and_update.js" +} from "jstests/libs/timeseries_retry_delete_and_update.js"; const st = new ShardingTest({ shards: 2, diff --git a/jstests/sharding/timeseries_retry_delete_and_update_single_shard.js b/jstests/sharding/timeseries_retry_delete_and_update_single_shard.js index 4d23a0a3d13..58c861a76ff 100644 --- a/jstests/sharding/timeseries_retry_delete_and_update_single_shard.js +++ b/jstests/sharding/timeseries_retry_delete_and_update_single_shard.js @@ -14,7 +14,7 @@ import { runTimeseriesRetryDeleteAndUpdateTest -} from "jstests/libs/timeseries_retry_delete_and_update.js" +} from "jstests/libs/timeseries_retry_delete_and_update.js"; const st = new ShardingTest({ shards: 1, diff --git a/jstests/sharding/timeseries_retry_delete_and_update_unsharded.js b/jstests/sharding/timeseries_retry_delete_and_update_unsharded.js index 539effc2f16..b87b9836d29 100644 --- a/jstests/sharding/timeseries_retry_delete_and_update_unsharded.js +++ b/jstests/sharding/timeseries_retry_delete_and_update_unsharded.js @@ -14,7 +14,7 @@ import { runTimeseriesRetryDeleteAndUpdateTest -} from "jstests/libs/timeseries_retry_delete_and_update.js" +} from "jstests/libs/timeseries_retry_delete_and_update.js"; const st = new ShardingTest({ shards: 1, diff --git a/jstests/sharding/timeseries_shard_collection.js b/jstests/sharding/timeseries_shard_collection.js index 2374c449f3d..0082faf13a4 100644 --- a/jstests/sharding/timeseries_shard_collection.js +++ b/jstests/sharding/timeseries_shard_collection.js @@ -92,7 +92,7 @@ function metaShardKey(implicit) { coll: sDB['system.buckets.ts'], expectedKey: expectedKey, usingTimeseriesDefaultKey: implicit - }) + }); assert.commandWorked(st.s.adminCommand({split: 'test.system.buckets.ts', middle: {meta: 10}})); @@ -141,7 +141,7 @@ function metaSubFieldShardKey(implicit) { validateViewCreated("ts"); - validateIndexBackingShardKey({coll: sDB['system.buckets.ts'], expectedKey: {'meta.a': 1}}) + validateIndexBackingShardKey({coll: sDB['system.buckets.ts'], expectedKey: {'meta.a': 1}}); assert.commandWorked( st.s.adminCommand({split: 'test.system.buckets.ts', middle: {'meta.a': 10}})); @@ -187,7 +187,7 @@ function metaAndTimeShardKey(implicit) { coll: sDB['system.buckets.ts'], expectedKey: {"meta": 1, "control.min.time": 1, "control.max.time": 1}, usingTimeseriesDefaultKey: true - }) + }); validateBucketsCollectionSharded({ collName: 'ts', diff --git a/jstests/sharding/timeseries_user_system_buckets_sharding.js b/jstests/sharding/timeseries_user_system_buckets_sharding.js index 2834cb67234..577af624800 100644 --- a/jstests/sharding/timeseries_user_system_buckets_sharding.js +++ b/jstests/sharding/timeseries_user_system_buckets_sharding.js @@ -35,9 +35,9 @@ const tsOptions2 = { metaField: "metadata2" }; -const kDbName = "test" -const kColl = "coll" -const kBucket = "system.buckets.coll" +const kDbName = "test"; +const kColl = "coll"; +const kBucket = "system.buckets.coll"; var db = st.getDB(kDbName); @@ -60,11 +60,10 @@ function createFailed(collName, tsOptions, errorCode) { } function shardCollectionWorked(collName, tsOptions = {}) { - let nss = kDbName + "." + collName + let nss = kDbName + "." + collName; if (Object.keys(tsOptions).length === 0) { assert.commandWorked(st.s.adminCommand({shardCollection: nss, key: {x: 1}})); - } - else { + } else { assert.commandWorked( st.s.adminCommand({shardCollection: nss, key: {timestamp: 1}, timeseries: tsOptions})); } @@ -72,12 +71,11 @@ function shardCollectionWorked(collName, tsOptions = {}) { } function shardCollectionFailed(collName, tsOptions, errorCode) { - let nss = kDbName + "." + collName + let nss = kDbName + "." + collName; if (Object.keys(tsOptions).length === 0) { assert.commandFailedWithCode(st.s.adminCommand({shardCollection: nss, key: {x: 1}}), errorCode); - } - else { + } else { assert.commandFailedWithCode( st.s.adminCommand({shardCollection: nss, key: {timestamp: 1}, timeseries: tsOptions}), errorCode); @@ -92,7 +90,7 @@ function runTest(testCase, minRequiredVersion = null) { return; } } - testCase() + testCase(); db.dropDatabase(); } diff --git a/jstests/sharding/txn_participant_adds_additional_participants_with_aborts.js b/jstests/sharding/txn_participant_adds_additional_participants_with_aborts.js index 700618304b7..136d84edcba 100644 --- a/jstests/sharding/txn_participant_adds_additional_participants_with_aborts.js +++ b/jstests/sharding/txn_participant_adds_additional_participants_with_aborts.js @@ -194,7 +194,7 @@ const allParticipants = [st.shard0, st.shard1, st.shard2]; assert.commandWorked(st.s.adminCommand({serverStatus: 1})).transactions; const finalMongodTxnMetrics = allParticipants.map((shard) => { return assert.commandWorked(shard.adminCommand({serverStatus: 1})).transactions; - }) + }); verifyFinalAbortedTransactionMetrics(initialMongosTxnMetrics, initialMongodTxnMetrics, finalMongosTxnMetrics, @@ -252,7 +252,7 @@ const allParticipants = [st.shard0, st.shard1, st.shard2]; assert.commandWorked(st.s.adminCommand({serverStatus: 1})).transactions; const finalMongodTxnMetrics = allParticipants.map((shard) => { return assert.commandWorked(shard.adminCommand({serverStatus: 1})).transactions; - }) + }); verifyFinalAbortedTransactionMetrics(initialMongosTxnMetrics, initialMongodTxnMetrics, finalMongosTxnMetrics, diff --git a/jstests/sharding/txn_with_added_participant_fail_to_unyield.js b/jstests/sharding/txn_with_added_participant_fail_to_unyield.js index 358de2235cb..5e96c822dd2 100644 --- a/jstests/sharding/txn_with_added_participant_fail_to_unyield.js +++ b/jstests/sharding/txn_with_added_participant_fail_to_unyield.js @@ -50,10 +50,10 @@ let fp = configureFailPoint(st.shard0, "restoreLocksFail"); // Run a $lookup where shard0 will add shard1 as an additional participant. The failpoint above // should cause shard0 to fail to unyield after getting a response from shard1, causing the request // to fail with a LockTimeout error. -let err = assert.throwsWithCode( - () => {sessionDB.getCollection(localColl).aggregate( - [{$lookup: {from: foreignColl, localField: "x", foreignField: "_id", as: "result"}}])}, - ErrorCodes.LockTimeout); +let err = assert.throwsWithCode(() => { + sessionDB.getCollection(localColl).aggregate( + [{$lookup: {from: foreignColl, localField: "x", foreignField: "_id", as: "result"}}]); +}, ErrorCodes.LockTimeout); assert.contains("TransientTransactionError", err.errorLabels, tojson(err)); fp.off(); diff --git a/jstests/sharding/unshard_collection_basic.js b/jstests/sharding/unshard_collection_basic.js index c44b137506f..e6a681ec3da 100644 --- a/jstests/sharding/unshard_collection_basic.js +++ b/jstests/sharding/unshard_collection_basic.js @@ -35,7 +35,7 @@ let coll = mongos.getDB(dbName)[collName]; assert.commandWorked(coll.insert({oldKey: 50})); // Fail if unsharded collection. -const unshardedCollName = "foo_unsharded" +const unshardedCollName = "foo_unsharded"; const unshardedCollNS = dbName + '.' + unshardedCollName; assert.commandWorked(st.s.getDB(dbName).runCommand({create: unshardedCollName})); assert.commandFailedWithCode(mongos.adminCommand({unshardCollection: unshardedCollNS}), @@ -81,8 +81,8 @@ assert.eq(mongos.getDB(dbName).getCollectionInfos({name: coll.getName()})[0].inf assert.commandFailedWithCode(mongos.adminCommand({unshardCollection: ns, toShard: shard0}), ErrorCodes.NamespaceNotSharded); -const newCollName = "foo1" -const newCollNs = dbName + '.' + newCollName +const newCollName = "foo1"; +const newCollNs = dbName + '.' + newCollName; assert.commandWorked(mongos.adminCommand({shardCollection: newCollNs, key: {oldKey: 1}})); assert.commandWorked(mongos.adminCommand({split: newCollNs, middle: {oldKey: 0}})); @@ -113,7 +113,7 @@ for (let i = -30; i < 30; ++i) { assert.commandWorked(coll.insert({_id: i})); } -assert(st.rs0.getPrimary().getCollection(newCollNs).countDocuments({}) == 30) +assert(st.rs0.getPrimary().getCollection(newCollNs).countDocuments({}) == 30); // Unshard collection should succeed when collection's original shard key is _id. assert.commandWorked(mongos.adminCommand({unshardCollection: newCollNs})); diff --git a/jstests/sharding/untrack_unsplittable_collection.js b/jstests/sharding/untrack_unsplittable_collection.js index ad095aeb838..331ad196fcc 100644 --- a/jstests/sharding/untrack_unsplittable_collection.js +++ b/jstests/sharding/untrack_unsplittable_collection.js @@ -51,7 +51,7 @@ assert.eq(0, st.s.getCollection('config.chunks').countDocuments({uuid: collUUID} // Make sure that persisted cached metadata was removed from the primary shard. const chunksCollName = 'cache.chunks.' + kNss; -const configDb = st.shard0.getDB("config") +const configDb = st.shard0.getDB("config"); assert.eq( 0, configDb.cache.collections.countDocuments({_id: kNss}), diff --git a/jstests/sharding/updateOne_with_id_without_shard_key_stale_config.js b/jstests/sharding/updateOne_with_id_without_shard_key_stale_config.js index 7aafbf21d75..d493d8c990c 100644 --- a/jstests/sharding/updateOne_with_id_without_shard_key_stale_config.js +++ b/jstests/sharding/updateOne_with_id_without_shard_key_stale_config.js @@ -30,7 +30,7 @@ CreateShardedCollectionUtil.shardCollectionWithChunks(coll, {x: 1}, [ assert.commandWorked(coll.insert({x: -1, _id: -1})); assert.commandWorked(coll.insert({x: 1, _id: 1})); -assert.neq(st.s1.getDB(jsTestName()).coll.findOne({x: -1, _id: -1})) +assert.neq(st.s1.getDB(jsTestName()).coll.findOne({x: -1, _id: -1})); // Move chunk from shard0 to shard1. assert.commandWorked( diff --git a/jstests/sharding/updateOne_without_shard_key/libs/write_without_shard_key_test_util.js b/jstests/sharding/updateOne_without_shard_key/libs/write_without_shard_key_test_util.js index 12dbe52a8bc..d3dbb9fe886 100644 --- a/jstests/sharding/updateOne_without_shard_key/libs/write_without_shard_key_test_util.js +++ b/jstests/sharding/updateOne_without_shard_key/libs/write_without_shard_key_test_util.js @@ -72,7 +72,7 @@ export var WriteWithoutShardKeyTestUtil = (function() { // sort order was updated. if (updatedDocId) { let idField = Object.keys(updatedDocId)[0]; - let idValue = updatedDocId[idField] + let idValue = updatedDocId[idField]; assert.eq(doc[idField], idValue); } } diff --git a/jstests/sharding/updateOne_without_shard_key/write_without_shard_key_single_shard_data_placement_change.js b/jstests/sharding/updateOne_without_shard_key/write_without_shard_key_single_shard_data_placement_change.js index 66b8bc9309e..88f04d1251d 100644 --- a/jstests/sharding/updateOne_without_shard_key/write_without_shard_key_single_shard_data_placement_change.js +++ b/jstests/sharding/updateOne_without_shard_key/write_without_shard_key_single_shard_data_placement_change.js @@ -38,7 +38,8 @@ function runTest(testCase) { const session = st.s.startSession(); session.startTransaction({readConcern: {level: "snapshot"}}); session.getDatabase(dbName).getCollection(collName2).insert({x: 1}); - let hangDonorAtStartOfRangeDel = configureFailPoint(st.rs1.getPrimary(), "suspendRangeDeletion") + let hangDonorAtStartOfRangeDel = + configureFailPoint(st.rs1.getPrimary(), "suspendRangeDeletion"); // Move all chunks for testDb.testColl to shard0. const moveChunkShell = startParallelShell( diff --git a/jstests/sharding/update_shard_key_bulk_write.js b/jstests/sharding/update_shard_key_bulk_write.js index 06a9a2bd52d..c7f8ab4786d 100644 --- a/jstests/sharding/update_shard_key_bulk_write.js +++ b/jstests/sharding/update_shard_key_bulk_write.js @@ -65,7 +65,7 @@ const adminDB = st.s.getDB("admin"); summaryFieldsValidator( res, {nErrors: 0, nInserted: 0, nDeleted: 0, nMatched: 0, nModified: 0, nUpserted: 1}); } else { - cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1, nModified: 1}) + cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1, nModified: 1}); summaryFieldsValidator( res, {nErrors: 0, nInserted: 0, nDeleted: 0, nMatched: 1, nModified: 1, nUpserted: 0}); } @@ -88,7 +88,7 @@ const adminDB = st.s.getDB("admin"); summaryFieldsValidator( res, {nErrors: 0, nInserted: 0, nDeleted: 0, nMatched: 0, nModified: 0, nUpserted: 1}); } else { - cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1, nModified: 1}) + cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1, nModified: 1}); summaryFieldsValidator( res, {nErrors: 0, nInserted: 0, nDeleted: 0, nMatched: 1, nModified: 1, nUpserted: 0}); } diff --git a/jstests/sharding/verify_sessions_expiration_sharded.js b/jstests/sharding/verify_sessions_expiration_sharded.js index 2175c8b769e..9a235a896c9 100644 --- a/jstests/sharding/verify_sessions_expiration_sharded.js +++ b/jstests/sharding/verify_sessions_expiration_sharded.js @@ -97,7 +97,7 @@ for (let i = 0; i < 10; i++) { // generate a new session during the commit phase of the create coordinator refreshSessionsAndVerifyExistence(mongosConfig, shardConfig, [], false /* expectToExist */); let openedSessionIDs = mongosConfig.system.sessions.find().toArray().map(s => s._id); -assert.commandWorked(db.runCommand({endSessions: openedSessionIDs})) +assert.commandWorked(db.runCommand({endSessions: openedSessionIDs})); let cursors = []; sessionIDs = []; diff --git a/jstests/ssl_linear/mongo_uri_secondaries.js b/jstests/ssl_linear/mongo_uri_secondaries.js index 40e793cefc7..9c802c40672 100644 --- a/jstests/ssl_linear/mongo_uri_secondaries.js +++ b/jstests/ssl_linear/mongo_uri_secondaries.js @@ -5,7 +5,7 @@ // To install trusted-ca.pem for local testing on OSX, invoke the following at a console: // security add-trusted-cert -d jstests/libs/trusted-ca.pem -import {getPython3Binary} from "jstests/libs/python.js" +import {getPython3Binary} from "jstests/libs/python.js"; const HOST_TYPE = getBuildInfo().buildEnvironment.target_os; jsTest.log("HOST_TYPE = " + HOST_TYPE); diff --git a/jstests/ssl_linear/repl_ssl_noca.js b/jstests/ssl_linear/repl_ssl_noca.js index 32316f8183a..882c854144f 100644 --- a/jstests/ssl_linear/repl_ssl_noca.js +++ b/jstests/ssl_linear/repl_ssl_noca.js @@ -5,7 +5,7 @@ // To install trusted-ca.pem for local testing on OSX, invoke the following at a console: // security add-trusted-cert -d jstests/libs/trusted-ca.pem -import {getPython3Binary} from "jstests/libs/python.js" +import {getPython3Binary} from "jstests/libs/python.js"; const HOST_TYPE = getBuildInfo().buildEnvironment.target_os; jsTest.log("HOST_TYPE = " + HOST_TYPE); @@ -68,15 +68,15 @@ try { return ret; }; - jsTest.log("Testing with no ssl specification...") + jsTest.log("Testing with no ssl specification..."); var noMentionSSLURL = `mongodb://${nodeList}/admin?replicaSet=${replTest.name}`; assert.neq(checkShell(noMentionSSLURL), 0, "shell correctly failed to connect without SSL"); - jsTest.log("Testing with ssl specified false...") + jsTest.log("Testing with ssl specified false..."); var disableSSLURL = `mongodb://${nodeList}/admin?replicaSet=${replTest.name}&ssl=false`; assert.neq(checkShell(disableSSLURL), 0, "shell correctly failed to connect without SSL"); - jsTest.log("Testing with ssl specified true...") + jsTest.log("Testing with ssl specified true..."); var useSSLURL = `mongodb://${nodeList}/admin?replicaSet=${replTest.name}&ssl=true`; assert.eq(checkShell(useSSLURL), 0, "successfully connected with SSL"); diff --git a/jstests/ssl_linear/ssl_cert_selector.js b/jstests/ssl_linear/ssl_cert_selector.js index 6c0660fc66b..a45efc4548c 100644 --- a/jstests/ssl_linear/ssl_cert_selector.js +++ b/jstests/ssl_linear/ssl_cert_selector.js @@ -3,7 +3,7 @@ * server. */ -import {getPython3Binary} from "jstests/libs/python.js" +import {getPython3Binary} from "jstests/libs/python.js"; import {requireSSLProvider} from "jstests/ssl/libs/ssl_helpers.js"; requireSSLProvider('windows', function() { diff --git a/jstests/ssl_linear/ssl_with_system_ca.js b/jstests/ssl_linear/ssl_with_system_ca.js index 3d387bd8ef7..1658c8ee0e7 100644 --- a/jstests/ssl_linear/ssl_with_system_ca.js +++ b/jstests/ssl_linear/ssl_with_system_ca.js @@ -5,7 +5,7 @@ // To install trusted-ca.pem for local testing on OSX, invoke the following at a console: // security add-trusted-cert -d jstests/libs/trusted-ca.pem -import {getPython3Binary} from "jstests/libs/python.js" +import {getPython3Binary} from "jstests/libs/python.js"; const HOST_TYPE = getBuildInfo().buildEnvironment.target_os; jsTest.log("HOST_TYPE = " + HOST_TYPE); diff --git a/jstests/with_mongot/e2e/foo.js b/jstests/with_mongot/e2e/foo.js index 0a1752ef848..50be76f9fc8 100644 --- a/jstests/with_mongot/e2e/foo.js +++ b/jstests/with_mongot/e2e/foo.js @@ -19,11 +19,11 @@ let paramTwo = assert.commandWorked( assert.eq(paramOne["mongotHost"], paramTwo["searchIndexManagementHostAndPort"]); // If a name is not specified during search index creation, mongot will name it default. -coll.createSearchIndex({name: "foo-block", definition: {"mappings": {"dynamic": true}}}) +coll.createSearchIndex({name: "foo-block", definition: {"mappings": {"dynamic": true}}}); // createSearchIndex shell command default behavior is to block returning until mongot lists the new // index as queryable eg blockUntilSearchIndexQueryable is true by default. coll.createSearchIndex({name: "foo-non-block", definition: {"mappings": {"dynamic": true}}}, - {blockUntilSearchIndexQueryable: false}) + {blockUntilSearchIndexQueryable: false}); var searchIndexes = coll.aggregate([{"$listSearchIndexes": {}}]).toArray(); assert.eq(searchIndexes.length, 2, searchIndexes); diff --git a/jstests/with_mongot/e2e_infrastructure_tests/verify_coll_is_sharded.js b/jstests/with_mongot/e2e_infrastructure_tests/verify_coll_is_sharded.js index 9fd8fbb61d2..ef37a2748dc 100644 --- a/jstests/with_mongot/e2e_infrastructure_tests/verify_coll_is_sharded.js +++ b/jstests/with_mongot/e2e_infrastructure_tests/verify_coll_is_sharded.js @@ -18,5 +18,6 @@ outputColl.insert({a: 1}); // You cannot specify a sharded collection as the output collection, so this should throw if // collection is indeed sharded. -assert.throws(() => {inputColl.aggregate( - [{$group: {_id: "$_id", sum: {$sum: "$a"}}}, {$out: outputColl.getName()}])}) +assert.throws(() => { + inputColl.aggregate([{$group: {_id: "$_id", sum: {$sum: "$a"}}}, {$out: outputColl.getName()}]); +}); diff --git a/jstests/with_mongot/search_mocked/search_docsRequested.js b/jstests/with_mongot/search_mocked/search_docsRequested.js index be009feb958..4e86208410f 100644 --- a/jstests/with_mongot/search_mocked/search_docsRequested.js +++ b/jstests/with_mongot/search_mocked/search_docsRequested.js @@ -23,7 +23,7 @@ import { const dbName = "test"; const collName = "search_docsrequested"; const chunkBoundary = 8; -const protocolVersion = getDefaultProtocolVersionForPlanShardedSearch() +const protocolVersion = getDefaultProtocolVersionForPlanShardedSearch(); const docs = [ {"_id": 1, "title": "cakes"}, diff --git a/jstests/with_mongot/search_mocked/search_initial_batchSize.js b/jstests/with_mongot/search_mocked/search_initial_batchSize.js index 5ceae2371dd..4187a5377f3 100644 --- a/jstests/with_mongot/search_mocked/search_initial_batchSize.js +++ b/jstests/with_mongot/search_mocked/search_initial_batchSize.js @@ -108,7 +108,7 @@ for (let i = 0; i < docs.length; i++) { relevantDocsSortedByStreams.push(docs[i]); relevantSearchDocs.push({_id: docs[i]._id, $searchScore: searchScore}); relevantStoredSourceDocs.push({storedSource: docs[i], $searchScore: searchScore}); - relevantDocsOnlyTitle.push({title: docs[i].title}) + relevantDocsOnlyTitle.push({title: docs[i].title}); } // The documents with lower _id will have a higher search score. diff --git a/jstests/with_mongot/search_mocked/search_sequence_token_in_lookup.js b/jstests/with_mongot/search_mocked/search_sequence_token_in_lookup.js index 58ea7119268..db5b4db35a0 100644 --- a/jstests/with_mongot/search_mocked/search_sequence_token_in_lookup.js +++ b/jstests/with_mongot/search_mocked/search_sequence_token_in_lookup.js @@ -28,7 +28,7 @@ function insertDocs(coll) { insertDocs(collSBE); function testPaginationInLookup(mongotConn, db, coll) { - const searchQuery = {query: "cake", path: "titles"} + const searchQuery = {query: "cake", path: "titles"}; let collUUID = getUUIDFromListCollections(db, coll.getName()); const searchCmd = { diff --git a/jstests/with_mongot/search_mocked/sharded_search_internal_unknown_fields.js b/jstests/with_mongot/search_mocked/sharded_search_internal_unknown_fields.js index 17c82c41993..4aea650bf03 100644 --- a/jstests/with_mongot/search_mocked/sharded_search_internal_unknown_fields.js +++ b/jstests/with_mongot/search_mocked/sharded_search_internal_unknown_fields.js @@ -103,7 +103,7 @@ const shardPipelineWithUnknownFields = { metadataMergeProtocolVersion: protocolVersion, unknownField: "cake carrots apple kale", } -} +}; const commandObj = { aggregate: collName, diff --git a/jstests/with_mongot/search_mocked/sharded_sort_subpipeline.js b/jstests/with_mongot/search_mocked/sharded_sort_subpipeline.js index 6a158bf46a4..c2c44ebf62c 100644 --- a/jstests/with_mongot/search_mocked/sharded_sort_subpipeline.js +++ b/jstests/with_mongot/search_mocked/sharded_sort_subpipeline.js @@ -32,7 +32,7 @@ const stWithMock = new ShardingTestWithMongotMock({ } }); -const protocolVersion = getDefaultProtocolVersionForPlanShardedSearch() +const protocolVersion = getDefaultProtocolVersionForPlanShardedSearch(); stWithMock.start(); stWithMock.assertEmptyMocks(); diff --git a/src/mongo/shell/collection.js b/src/mongo/shell/collection.js index 78e651f36c3..55dba4dd6bb 100644 --- a/src/mongo/shell/collection.js +++ b/src/mongo/shell/collection.js @@ -685,12 +685,12 @@ DBCollection.prototype.createSearchIndex = function(keys, blockUntilSearchIndexQ Object.keys(blockUntilSearchIndexQueryable).length != 1 || !blockUntilSearchIndexQueryable.hasOwnProperty('blockUntilSearchIndexQueryable')) { throw new Error( - "createSearchIndex only accepts index definition object and blockUntilSearchIndexQueryable object") + "createSearchIndex only accepts index definition object and blockUntilSearchIndexQueryable object"); } blockOnIndexQueryable = blockUntilSearchIndexQueryable["blockUntilSearchIndexQueryable"]; if (typeof blockOnIndexQueryable != "boolean") { - throw new Error("'blockUntilSearchIndexQueryable' argument must be a boolean") + throw new Error("'blockUntilSearchIndexQueryable' argument must be a boolean"); } } @@ -721,7 +721,7 @@ DBCollection.prototype.createSearchIndex = function(keys, blockUntilSearchIndexQ let host = shardDoc.host; // This connects to primary of each shard. let sconn = new Mongo(host); - response = this._runCreateSearchIndexOnPrimary(keys, blockOnIndexQueryable, sconn) + response = this._runCreateSearchIndexOnPrimary(keys, blockOnIndexQueryable, sconn); } return response; } diff --git a/src/mongo/shell/replsettest.js b/src/mongo/shell/replsettest.js index 35265d0a53e..7bcb5300134 100644 --- a/src/mongo/shell/replsettest.js +++ b/src/mongo/shell/replsettest.js @@ -2210,7 +2210,7 @@ var ReplSetTest = function ReplSetTest(opts) { if (targetNode !== undefined) { print(`ReplSetTest awaitReplication: wait against targetNode ${ - targetNode.host} instead of primary.`) + targetNode.host} instead of primary.`); } timeout = timeout || this.kDefaultTimeoutMS; @@ -3151,7 +3151,7 @@ var ReplSetTest = function ReplSetTest(opts) { dbs.forEach(db => { if (db.tenantId) { try { - const token = _createTenantToken({tenant: db.tenantId}) + const token = _createTenantToken({tenant: db.tenantId}); rst.nodes.forEach(node => node._setSecurityToken(token)); checkTenantChangeCollection(rst, secondaries, db); } finally { @@ -3888,7 +3888,7 @@ var ReplSetTest = function ReplSetTest(opts) { if (rst._useBridge) { assert( !jsTestOptions().tlsMode, - 'useBridge cannot be true when using TLS. Add the requires_mongobridge tag to the test to ensure it will be skipped on variants that use TLS.') + 'useBridge cannot be true when using TLS. Add the requires_mongobridge tag to the test to ensure it will be skipped on variants that use TLS.'); } rst._bridgeOptions = opts.bridgeOptions || {}; diff --git a/src/mongo/shell/shardingtest.js b/src/mongo/shell/shardingtest.js index 3109236fab6..17e3d889c77 100644 --- a/src/mongo/shell/shardingtest.js +++ b/src/mongo/shell/shardingtest.js @@ -1401,7 +1401,7 @@ var ShardingTest = function ShardingTest(params) { if (this._useBridge) { assert( !jsTestOptions().tlsMode, - 'useBridge cannot be true when using TLS. Add the requires_mongobridge tag to the test to ensure it will be skipped on variants that use TLS.') + 'useBridge cannot be true when using TLS. Add the requires_mongobridge tag to the test to ensure it will be skipped on variants that use TLS.'); } this._unbridgedMongos = []; @@ -2118,7 +2118,7 @@ var ShardingTest = function ShardingTest(params) { // replica set. assert.soonNoExcept(() => { function getConfigShardDoc() { - return csrsPrimary.getDB("config").shards.findOne({_id: "config"}) + return csrsPrimary.getDB("config").shards.findOne({_id: "config"}); } const configShardDoc = this.keyFile ? authutil.asCluster(csrsPrimary, this.keyFile, getConfigShardDoc)