0
0
mirror of https://github.com/mongodb/mongo.git synced 2024-11-21 20:49:10 +01:00

SERVER-91379 Re-introduce eslint rule for missing semicolons (#23301)

GitOrigin-RevId: fd9cce5f9f201004e44ffdeabdb33cd93e79b451
This commit is contained in:
Catalin Sumanaru 2024-06-11 14:23:50 +01:00 committed by MongoDB Bot
parent 3b483d36e8
commit de97aac8f6
257 changed files with 956 additions and 990 deletions

View File

@ -17,6 +17,7 @@ rules:
no-redeclare: 0
no-constant-condition: 0
no-loss-of-precision: 0
semi: 2
no-restricted-syntax:
[
"error",

View File

@ -330,7 +330,7 @@ assert.eq([2, 3, 4], embeddedResult[0].result);
// Sort on array
assert(coll.drop());
const makeArray = (i) => [i, i + 1, i + 2]
const makeArray = (i) => [i, i + 1, i + 2];
assert.commandWorked(coll.insertMany([4, 2, 3, 1].map((i) => ({a: makeArray(i)}))));
const nestedResult =
coll.aggregate({$group: {_id: "", result: {$bottomN: {n: 3, output: "$a", sortBy: {"a": 1}}}}})

View File

@ -9,7 +9,7 @@
import {
dropWithoutImplicitRecreate,
withEachMergeMode
} from "jstests/aggregation/extras/merge_helpers.js"
} from "jstests/aggregation/extras/merge_helpers.js";
const outColl = db[`${jsTest.name()}_out`];
const outCollName = outColl.getName();

View File

@ -81,7 +81,7 @@ assertGetFieldFailedWithCode({field: {$const: []}, input: {"a": 1}}, [5654602, 3
// Test that $getField returns the correct value from the provided object.
assertGetFieldResultsEq({field: "a", input: {a: "b"}}, [{_id: 0, test: "b"}, {_id: 1, test: "b"}]);
assertGetFieldResultsEq({field: {$concat: ["a", "b"]}, input: {ab: "b"}},
[{_id: 0, test: "b"}, {_id: 1, test: "b"}])
[{_id: 0, test: "b"}, {_id: 1, test: "b"}]);
assertGetFieldResultsEq({field: {$cond: [false, null, "x"]}, input: {x: "b"}},
[{_id: 0, test: "b"}, {_id: 1, test: "b"}]);
assertGetFieldResultsEq({field: {$cond: [{$eq: ["$y", 9]}, null, "x"]}, input: {x: "b"}},

View File

@ -182,7 +182,7 @@ testExpressionWithIntersection({
array2: [2, 3, 4],
elementIsIncluded: false,
queryFormShouldBeEquivalent: false
})
});
testExpressionWithIntersection({
element: 2,
@ -190,7 +190,7 @@ testExpressionWithIntersection({
array2: [2, 3, 4],
elementIsIncluded: true,
queryFormShouldBeEquivalent: false
})
});
testExpressionWithIntersection({
element: 1,
@ -198,7 +198,7 @@ testExpressionWithIntersection({
array2: [4, 5, 6],
elementIsIncluded: false,
queryFormShouldBeEquivalent: false
})
});
testExpressionWithIntersection({
element: 1,
@ -206,7 +206,7 @@ testExpressionWithIntersection({
array2: [],
elementIsIncluded: false,
queryFormShouldBeEquivalent: false
})
});
testExpressionWithIntersection({
element: 1,
@ -214,7 +214,7 @@ testExpressionWithIntersection({
array2: [4, 5, 6],
elementIsIncluded: false,
queryFormShouldBeEquivalent: false
})
});
/* ------------------------ Mismatched Types Tests ------------------------ */

View File

@ -108,4 +108,4 @@ assert.commandWorked(t.insertOne({three: 3, my_list_of_docs: [{z: 1}, {z: 2}]}))
assertQueryResult({"three": 3, "my_list_of_docs": {"b": 3}}, [
{$set: {my_list_of_docs: {$ifNull: [null, {b: "$three"}]}}},
{$project: {_id: 0, my_list_of_docs: 1, three: 1}}
])
]);

View File

@ -211,7 +211,7 @@ assert.eq(2, matchStages.length);
// Test that we correctly match using the '$elemMatch' expression on renamed subfields. Designed to
// reproduce HELP-59485.
coll.drop()
coll.drop();
assert.commandWorked(coll.insertMany([
{
_id: 0,
@ -266,7 +266,7 @@ runElemMatchTest({
}
],
expectedDocumentIds: [1]
})
});
// Repeat the previous test case, but this time with a $project stage targeting a deeply nested
// transform.
@ -289,7 +289,7 @@ runElemMatchTest({
}
],
expectedDocumentIds: [1],
})
});
// Similarly, ensure that we match on the correct documents when using $elemMatch expressions on
// simple dot-syntax renamed fields.
@ -303,4 +303,4 @@ runElemMatchTest({
}
],
expectedDocumentIds: [1]
})
});

View File

@ -60,7 +60,7 @@ function testDocOnBoundsPartitioned() {
{"time": ISODate("2023-09-15T06:00:00Z")},
{"time": ISODate("2023-09-15T12:00:00Z")},
{"time": ISODate("2023-09-15T18:00:00Z")}
]
];
assert(arrayEq(resultArray, expected), buildErrorString(resultArray, expected));
}
@ -110,7 +110,7 @@ function testDocOnBoundsNotPartitioned() {
{"time": ISODate("2023-09-15T06:00:00Z")},
{"time": ISODate("2023-09-15T12:00:00Z")},
{"time": ISODate("2023-09-15T18:00:00Z")}
]
];
assert(arrayEq(resultArray, expected), buildErrorString(resultArray, expected));
}
@ -175,7 +175,7 @@ function testDocOnAndOffFullBound() {
{"time": ISODate("2023-09-15T12:00:00Z")},
{"time": ISODate("2023-09-15T18:00:00Z")},
{"time": ISODate("2023-09-15T18:00:00Z"), "orig": true}
]
];
assert(arrayEq(resultArray, expected), buildErrorString(resultArray, expected));
}
@ -213,7 +213,7 @@ function testFullNoPartition() {
{"time": ISODate("2023-09-14T12:00:00Z")},
{"time": ISODate("2023-09-14T18:00:00Z")},
{"time": ISODate("2023-09-15T00:00:00Z"), "orig": true},
]
];
assert(arrayEq(resultArray, expected), buildErrorString(resultArray, expected));
}
testDocOnBoundsPartitioned();

View File

@ -103,14 +103,13 @@ export function densifyInJS(stage, docs) {
// Explicit ranges always generate on-step relative to the lower-bound of the range,
// this function encapsulates the logic to do that for dates (requires a loop since steps aren't
// always constant sized).
const getNextStepFromBase =
(val, base, step) => {
let nextStep = base;
while (nextStep <= val) {
nextStep = add(nextStep, step);
}
return nextStep;
const getNextStepFromBase = (val, base, step) => {
let nextStep = base;
while (nextStep <= val) {
nextStep = add(nextStep, step);
}
return nextStep;
};
if (bounds === "full") {
if (docs.length == 0) {
@ -120,11 +119,9 @@ export function densifyInJS(stage, docs) {
const maxValue = docsWithoutNulls[docsWithoutNulls.length - 1][field];
return densifyInJS({field: stage.field, range: {step, bounds: [minValue, maxValue], unit}},
docs);
}
else if (bounds === "partition") {
} else if (bounds === "partition") {
throw new Error("Partitioning not supported by JS densify.");
}
else if (bounds.length == 2) {
} else if (bounds.length == 2) {
const [lower, upper] = bounds;
let currentVal = docsWithoutNulls.length > 0
? Math.min(docsWithoutNulls[0], sub(lower, step))

View File

@ -141,12 +141,12 @@ assert.throwsWithCode(() => {
}, ErrorCodes.InvalidNamespace);
// $unionWith must fail because it requires a collection even when database does not exist
assert.throwsWithCode(
() => {nonExistingDB.aggregate([{
assert.throwsWithCode(() => {
nonExistingDB.aggregate([{
$unionWith:
{pipeline: [{$documents: {$map: {input: {$range: [0, 5]}, in : {x: "$$this"}}}}]}
}])},
ErrorCodes.InvalidNamespace);
}]);
}, ErrorCodes.InvalidNamespace);
// $unionWith must fail due to no $document
assert.throwsWithCode(() => {

View File

@ -37,7 +37,7 @@ let expectedResults = [
{maxStr: "transmit Ohio AI", minStr: "Louisiana system-worthy Borders"},
{maxStr: "transmit Ohio AI", minStr: "compressing Supervisor Synchronised"},
{maxStr: "fuchsia", minStr: "Inlet"},
]
];
coll.drop();
for (let i = 0; i < documents.length; i++) {
@ -67,9 +67,9 @@ documents = [
{_id: 0, "num": 10, "str": "ABCDEFGHIJK"},
{_id: 1, "num": 3, "str": "ABCDE"},
{_id: 2, "num": 5, "str": "AB"},
]
];
expectedResults = [{minStr: "ABCDEFGHIJK"}, {minStr: "AB"}, {minStr: "AB"}]
expectedResults = [{minStr: "ABCDEFGHIJK"}, {minStr: "AB"}, {minStr: "AB"}];
coll.drop();
for (let i = 0; i < documents.length; i++) {

View File

@ -68,7 +68,7 @@ let pipeline = [{
sortBy: {_id: 1},
output: {rank: {$rank: "$_id"}},
}
}]
}];
assertErrCodeAndErrMsgContains(coll, pipeline, 5371603, "$rank");
// Rank based accumulators must have a sortBy.

View File

@ -68,7 +68,7 @@ function runTests(conn, directConn) {
connectionsToCheck.forEach((db, i) => {
const serverStatus = assert.commandWorked(db.runCommand({serverStatus: 1}));
assert.gt(serverStatus.metrics.operation[[metricField]], beforeMetrics[i]);
})
});
}
// Times out due to the default value.

View File

@ -7395,7 +7395,7 @@ export const authCommandsLib = {
apiParameters: {version: "1", strict: true}
},
setup: function(db) {
const collName = "validate_db_metadata_command_specific_db"
const collName = "validate_db_metadata_command_specific_db";
assert.commandWorked(db.getSiblingDB(firstDbName).createCollection(collName));
assert.commandWorked(db.getSiblingDB(secondDbName).createCollection(collName));
assert.commandWorked(db.getSiblingDB("ThirdDB").createCollection(collName));
@ -7429,7 +7429,7 @@ export const authCommandsLib = {
testname: "validate_db_metadata_command_all_dbs",
command: {validateDBMetadata: 1, apiParameters: {version: "1", strict: true}},
setup: function(db) {
const collName = "validate_db_metadata_command_all_dbs"
const collName = "validate_db_metadata_command_all_dbs";
assert.commandWorked(db.getSiblingDB(firstDbName).createCollection(collName));
assert.commandWorked(db.getSiblingDB(secondDbName).createCollection(collName));
},

View File

@ -263,7 +263,7 @@ let resumeToken = cursor.postBatchResumeToken._data;
assert.soon(() => {
assert.commandWorked(db.t1.insert({a: 2}));
cursor = cst.assertNoChange(cursor);
return resumeToken != cursor.postBatchResumeToken._data
return resumeToken != cursor.postBatchResumeToken._data;
});
// With trivially false predicates
@ -273,7 +273,7 @@ resumeToken = cursor.postBatchResumeToken._data;
assert.soon(() => {
assert.commandWorked(db.t1.insert({a: 2}));
cursor = cst.assertNoChange(cursor);
return resumeToken != cursor.postBatchResumeToken._data
return resumeToken != cursor.postBatchResumeToken._data;
});
cst.cleanUp();

View File

@ -205,20 +205,19 @@ assert.eq(stringValues.slice(0, 2), ["Value", "vAlue"]);
// transaction, they effectively occur at exactly the same time.
assert.sameMembers(stringValues.slice(2, 4), ["vaLue", "valUe"]);
const verifyOnChangeStream =
(matchExpression, hasEntriesReturned) => {
const string = JSON.stringify(matchExpression);
const changeStream = coll.aggregate([{$changeStream: {}}, {$match: matchExpression}]);
assert.commandWorked(coll.insert({string}));
if (hasEntriesReturned) {
assert.soon(() => changeStream.hasNext());
const event = changeStream.next();
assert.eq(event.fullDocument.string, string, event);
} else {
assert(!changeStream.hasNext());
}
changeStream.close();
const verifyOnChangeStream = (matchExpression, hasEntriesReturned) => {
const string = JSON.stringify(matchExpression);
const changeStream = coll.aggregate([{$changeStream: {}}, {$match: matchExpression}]);
assert.commandWorked(coll.insert({string}));
if (hasEntriesReturned) {
assert.soon(() => changeStream.hasNext());
const event = changeStream.next();
assert.eq(event.fullDocument.string, string, event);
} else {
assert(!changeStream.hasNext());
}
changeStream.close();
};
// Run a change stream with empty field path match expression to match null. Expect to return all
// the oplog entries as the field "" is not set in oplogs.

View File

@ -194,7 +194,7 @@ function runTest(startChangeStream) {
"capped": false,
"collation": {"locale": "simple"}
}
})
});
}
assert.commandWorked(db.adminCommand({enableSharding: dbName}));

View File

@ -183,7 +183,7 @@ export const $config = (function() {
coll_aux.createIndex({_id: 1});
const mustShardForeignCollection = cluster.isSharded() && Random.rand() > 0.5;
if (mustShardForeignCollection) {
jsTest.log("Sharding auxiliary collection")
jsTest.log("Sharding auxiliary collection");
cluster.shardCollection(coll_aux, this.shardKey, false);
} else {
jsTest.log("Auxiliary collection will be unsharded");

View File

@ -50,15 +50,18 @@ export const $config = (function() {
return function consumerCallback(db, collName) {
return runGetMoreOnCursor(
db, collName, consumerId, this.batchSize, this.cursorIds, this.sessionId);
}
};
}
return {
// A no-op starting state so the worker threads don't all start on the same cursors.
init: function init(db, collName) {}, consumer0: makeConsumerCallback(0),
consumer1: makeConsumerCallback(1), consumer2: makeConsumerCallback(2),
consumer3: makeConsumerCallback(3), consumer4: makeConsumerCallback(4),
}
init: function init(db, collName) {},
consumer0: makeConsumerCallback(0),
consumer1: makeConsumerCallback(1),
consumer2: makeConsumerCallback(2),
consumer3: makeConsumerCallback(3),
consumer4: makeConsumerCallback(4),
};
}();
var allStatesEqual =
@ -124,7 +127,13 @@ export const $config = (function() {
// threadCount must be equal to numConsumers. We need as many worker threads as consumers to
// avoid a deadlock where all threads are waiting for one particular cursor to run a getMore.
return {
threadCount: data.numConsumers, iterations: 20, startState: 'init', states: states,
transitions: transitions, setup: setup, teardown: teardown, data: data
}
threadCount: data.numConsumers,
iterations: 20,
startState: 'init',
states: states,
transitions: transitions,
setup: setup,
teardown: teardown,
data: data
};
})();

View File

@ -261,7 +261,7 @@ export const $config = (function() {
// Check guarantees IF NO CONCURRENT DROP is running.
// If a concurrent rename came in, then either the full operation succeded (meaning
// there will be 0 documents left) or the insert came in first.
assert.contains(currentDocs, [0, numDocs], threadInfos)
assert.contains(currentDocs, [0, numDocs], threadInfos);
jsTestLog('CRUD - Update ' + threadInfos);
res = coll.update({generation: generation}, {$set: {updated: true}}, {multi: true});

View File

@ -77,7 +77,9 @@ function setPauseMigrationsClusterParameter(db, cluster, enabled) {
cluster.executeOnMongosNodes((db) => {
// Ensure all mongoses have refreshed cluster parameter after being set.
assert.soon(() => {return getPauseMigrationsClusterParameter(db) === enabled});
assert.soon(() => {
return getPauseMigrationsClusterParameter(db) === enabled;
});
});
}
@ -225,7 +227,7 @@ export const $config = extendWorkload($baseConfig, function($config, $super) {
ignoreErrorsIfInNonTransactionalStepdownSuite(() => {
const updates = this.createRandomUpdateBatch(collName);
jsTestLog("Executing updates: " + tojson(updates));
const result = db.runCommand({update: collName, updates})
const result = db.runCommand({update: collName, updates});
jsTestLog("Result: " + tojson(result));
assert.commandWorked(result);
let totalUpdates = 0;

View File

@ -14,9 +14,9 @@
import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js";
import {
$config as $baseConfig
} from 'jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_arbitrary_updates.js'
} from 'jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_arbitrary_updates.js';
const logCollection = "log_collection"
const logCollection = "log_collection";
export const $config = extendWorkload($baseConfig, function($config, $super) {
// Perform arbitrary updates on metric fields of measurements.

View File

@ -72,7 +72,7 @@ export const $config = extendWorkload($baseConfig, function($config, $super) {
// when the mongos is fetching data from the shard using getMore(). Remove
// theinterruptedQueryErrors from allowedErrorCodes once this bug is being addressed
if (TestData.runningWithBalancer) {
allowedErrorCodes = allowedErrorCodes.concat(interruptedQueryErrors)
allowedErrorCodes = allowedErrorCodes.concat(interruptedQueryErrors);
}
assert.commandWorkedOrFailedWithCode(res, allowedErrorCodes);

View File

@ -26,7 +26,7 @@ function retryUntilWorked(query) {
return query();
} catch (e) {
if (e.code == ErrorCodes.QueryPlanKilled && TestData.runningWithBalancer) {
attempts++
attempts++;
} else {
throw e;
}
@ -134,13 +134,16 @@ export const $config = (function() {
// Now validate the state of each reading. We will check all of the seed data and each
// reading that we may have inserted.
for (let readingNo = 0; readingNo < data.nTotalReadings; ++readingNo) {
const wasDeleted = retryUntilWorked(
() => {return logColl.count({readingNo: readingNo, deleted: true}) > 0});
const wasInserted = retryUntilWorked(
() => {return logColl.count({readingNo: readingNo, inserted: true}) > 0});
const wasDeleted = retryUntilWorked(() => {
return logColl.count({readingNo: readingNo, deleted: true}) > 0;
});
const wasInserted = retryUntilWorked(() => {
return logColl.count({readingNo: readingNo, inserted: true}) > 0;
});
const nReadings =
retryUntilWorked(() => {return db[collName].count({readingNo: readingNo})});
const nReadings = retryUntilWorked(() => {
return db[collName].count({readingNo: readingNo});
});
if (wasDeleted && !wasInserted) {
// Easy case: this reading was deleted and never inserted - we expect 0 records.

View File

@ -44,7 +44,7 @@ export const $config = extendWorkload($baseConfig, function($config, $super) {
init: {findAndUpdateMany: 0.25, insert: 0.75},
findAndUpdateMany: {findAndUpdateMany: 0.5, insert: 0.5},
insert: {findAndUpdateMany: 0.5, insert: 0.5}
}
};
return $config;
});

View File

@ -104,7 +104,7 @@ export const $config = (function() {
};
function setup(db, collName, cluster) {
assert.commandWorked(db.createCollection(this.getRegularCollectionName()))
assert.commandWorked(db.createCollection(this.getRegularCollectionName()));
assert.commandWorked(db.createCollection(this.getTimeseriesCollectionName(), {
timeseries: {
timeField: this.timeFieldName,

View File

@ -76,9 +76,9 @@ export const $config = (function() {
if (this.tid === 0 && shouldContinueResharding) {
let newShardKey;
if (bsonWoCompare(this.shardKey, shardKeys[0]) === 0) {
newShardKey = shardKeys[1]
newShardKey = shardKeys[1];
} else {
newShardKey = shardKeys[0]
newShardKey = shardKeys[0];
}
executeReshardTimeseries(db, collName, newShardKey);

View File

@ -45,7 +45,7 @@ for (let i = 0; i < 10; ++i) {
assert.commandWorked(coll.insert(docs));
assert.commandWorked(coll.createIndex({a: 1}));
let testComment = "test1"
let testComment = "test1";
assert.commandWorked(testDB.runCommand({
delete: collName,
deletes: [{q: {a: {$gte: 2}, b: {$gte: 2}}, limit: 1, collation: {locale: "fr"}}],

View File

@ -61,7 +61,7 @@ if (FeatureFlagUtil.isPresentAndEnabled(db, "ErrorAndLogValidationAction")) {
t.runCommand("collMod", {validationAction: "errorAndLog"}), ErrorCodes.InvalidOptions);
if (res.ok) {
assertFailsValidation(t.update({}, {$set: {a: 2}}));
checkLogsForFailedValidation(errorAndLogId)
checkLogsForFailedValidation(errorAndLogId);
// make sure persisted
const info = db.getCollectionInfos({name: t.getName()})[0];
assert.eq("errorAndLog", info.options.validationAction, tojson(info));
@ -74,7 +74,7 @@ t.update({}, {$set: {a: 2}});
assert.eq(1, t.find({a: 2}).itcount());
// check log for message. In case of sharded deployments, look on all shards and expect the log to
// be found on one of them.
checkLogsForFailedValidation(warnLogId)
checkLogsForFailedValidation(warnLogId);
// make sure persisted
const info = db.getCollectionInfos({name: t.getName()})[0];
assert.eq("warn", info.options.validationAction, tojson(info));

View File

@ -68,7 +68,7 @@ switch (getOptimizer(plan)) {
// optimizer. M2: allow only collscans, M4: check bonsai behavior for index scan.
break;
default:
break
break;
}
// Test in query
@ -86,6 +86,6 @@ switch (getOptimizer(plan)) {
// optimizer. M2: allow only collscans, M4: check bonsai behavior for index scan.
break;
default:
break
break;
}
print('all tests pass');

View File

@ -62,7 +62,7 @@ let isShardedColl = false;
function recreateCollWith(documents) {
coll.drop();
assert.commandWorked(coll.insert(documents));
isShardedColl = FixtureHelpers.isSharded(coll)
isShardedColl = FixtureHelpers.isSharded(coll);
}
recreateCollWith(docs);
@ -110,7 +110,7 @@ for (let index of [{a: 1}, {a: -1}, {a: 1, b: 1}, {a: 1, b: -1}, {a: -1, b: 1},
// When the index is not dotted, queries against nested fields do not use express unless they look
// for an exact match.
coll.dropIndexes()
coll.dropIndexes();
assert.commandWorked(coll.createIndex({a: 1}));
runExpressTest({filter: {'a.b': 0}, limit: 1, result: [], usesExpress: false});
runExpressTest({

View File

@ -70,7 +70,7 @@ function validateHiddenIndexBehaviour(
assert.eq(numOfUsedIndexes(explain), 0);
break;
default:
break
break;
}
assert.commandWorked(coll.hideIndex(index_name));
@ -102,7 +102,7 @@ function validateHiddenIndexBehaviour(
assert.eq(numOfUsedIndexes(explain), 0);
break;
default:
break
break;
}
assert.commandWorked(coll.dropIndex(index_name));

View File

@ -53,5 +53,5 @@ switch (getOptimizer(t.find(q1).explain())) {
// optimizer. M2: allow only collscans, M4: check bonsai behavior for index scan.
break;
default:
break
break;
}

View File

@ -42,5 +42,5 @@ switch (getOptimizer(explainResult)) {
break;
}
default:
break
break;
}

View File

@ -292,7 +292,7 @@ if (!FixtureHelpers.isMongos(db)) {
explain = getSingleNodeExplain(coll.find({z: 1}).explain(verbosity));
assert.eq(true, getQueryPlanner(explain).indexFilterSet, explain);
explain =
getSingleNodeExplain(coll.find(queryA1, projectionA1).sort(sortA1).explain(verbosity))
getSingleNodeExplain(coll.find(queryA1, projectionA1).sort(sortA1).explain(verbosity));
assert.eq(true, getQueryPlanner(explain).indexFilterSet, verbosity);
});
} else {

View File

@ -200,7 +200,8 @@ const coll = db.index_partial_read_ops;
explain = getSingleNodeExplain(coll.explain('executionStats').find({x: 2, a: 5}).finish());
assert.eq(1, explain.executionStats.nReturned);
assert(isCollscan(db, getWinningPlan(explain.queryPlanner)));
explain = getSingleNodeExplain(coll.explain('executionStats').find({x: 3, a: 5, b: 1}).finish())
explain =
getSingleNodeExplain(coll.explain('executionStats').find({x: 3, a: 5, b: 1}).finish());
assert.eq(1, explain.executionStats.nReturned);
assert(isCollscan(db, getWinningPlan(explain.queryPlanner)));
})();

View File

@ -14,9 +14,9 @@ const isHintsToQuerySettingsSuite = TestData.isHintsToQuerySettingsSuite || fals
const testDB = db.getSiblingDB("agg_hint");
assert.commandWorked(testDB.dropDatabase());
const collName = jsTestName() + "_col"
const collName = jsTestName() + "_col";
const coll = testDB.getCollection(collName);
const viewName = jsTestName() + "_view"
const viewName = jsTestName() + "_view";
const view = testDB.getCollection(viewName);
function confirmWinningPlanUsesExpectedIndex(

View File

@ -38,7 +38,7 @@ switch (getOptimizer(explainRes)) {
// TODO SERVER-77719: Ensure that the decision for using the scan lines up with CQF
// optimizer. M2: allow only collscans, M4: check bonsai behavior for index scan.
assert(isCollscan(db, winningPlan));
break
break;
}
}
assert(!planHasStage(db, winningPlan, "FETCH"));
@ -77,7 +77,7 @@ switch (getOptimizer(explainRes)) {
// TODO SERVER-77719: Ensure that the decision for using the scan lines up with CQF
// optimizer. M2: allow only collscans, M4: check bonsai behavior for index scan.
assert(isCollscan(db, winningPlan));
break
break;
}
}

View File

@ -49,11 +49,11 @@ if ((serverVer[0] == 7 && serverVer[1] >= 3) || serverVer[0] > 7) {
// Explain output differs slightly under SBE versus classic engine
if (explain.queryPlanner.winningPlan.queryPlan) {
assert.eq("EOF", explain.queryPlanner.winningPlan.queryPlan.stage)
assert.eq("EOF", explain.queryPlanner.winningPlan.queryPlan.stage);
} else {
assert.eq("EOF", explain.queryPlanner.winningPlan.stage)
assert.eq("EOF", explain.queryPlanner.winningPlan.stage);
}
assert.eq("does_not_exist_hopefully.jstests_explain_find", explain.queryPlanner.namespace)
assert.eq({"a": {"$lte": 2}}, explain.queryPlanner.parsedQuery)
assert.eq("does_not_exist_hopefully.jstests_explain_find", explain.queryPlanner.namespace);
assert.eq({"a": {"$lte": 2}}, explain.queryPlanner.parsedQuery);
}

View File

@ -274,7 +274,7 @@ function assertExplainResultsMatch(explainOut, expectedMatches, preMsg, currentP
explainOut[key], expectedMatches[key], preMsg, totalFieldName);
} else if (key == "stage" && expectedMatches[key] == "UPDATE") {
// Express handles update-by-id post 8.0
let want = [expectedMatches[key], "EXPRESS_UPDATE"]
let want = [expectedMatches[key], "EXPRESS_UPDATE"];
assert.contains(explainOut[key],
want,
preMsg + "Explain's " + totalFieldName + " (" + explainOut[key] + ")" +
@ -282,7 +282,7 @@ function assertExplainResultsMatch(explainOut, expectedMatches, preMsg, currentP
} else if (key == "stage" && expectedMatches[key] == "DELETE") {
// Express handles delete-by-id post 8.0
let want = [expectedMatches[key], "EXPRESS_DELETE"]
let want = [expectedMatches[key], "EXPRESS_DELETE"];
assert.contains(explainOut[key],
want,
preMsg + "Explain's " + totalFieldName + " (" + explainOut[key] + ")" +

View File

@ -8,7 +8,7 @@
*/
import {getWinningPlanFromExplain, isEofPlan} from "jstests/libs/analyze_plan.js";
import {assertDropAndRecreateCollection} from "jstests/libs/collection_drop_recreate.js"
import {assertDropAndRecreateCollection} from "jstests/libs/collection_drop_recreate.js";
const collName = "jstests_explain_find_trivially_false_predicates";

View File

@ -15,7 +15,7 @@
* ]
*/
import {getExplainPipelineFromAggregationResult} from "jstests/aggregation/extras/utils.js"
import {getExplainPipelineFromAggregationResult} from "jstests/aggregation/extras/utils.js";
import {
aggPlanHasStage,
getAggPlanStages,
@ -23,7 +23,7 @@ import {
isEofPlan,
planHasStage
} from "jstests/libs/analyze_plan.js";
import {assertDropAndRecreateCollection} from "jstests/libs/collection_drop_recreate.js"
import {assertDropAndRecreateCollection} from "jstests/libs/collection_drop_recreate.js";
function assertPlanIsEOF(plan) {
// Explain query output doesn't include planning for the foreign branch hence we use execution
@ -48,12 +48,12 @@ function assertUnionOfPlans(plan, firstPartStage, secondPartStage) {
const collName = "explain_find_trivially_false_predicates_in_agg_pipelines";
const localCollName = `${collName}-local`
const localCollName = `${collName}-local`;
assertDropAndRecreateCollection(db, localCollName);
const localColl = db[localCollName];
assert.commandWorked(localColl.insert(Array.from({length: 10}, (_, i) => ({a: i, side: "local"}))));
const foreignCollName = `${collName}-foreign`
const foreignCollName = `${collName}-foreign`;
assertDropAndRecreateCollection(db, foreignCollName);
const foreignColl = db[foreignCollName];
assert.commandWorked(

View File

@ -9,7 +9,7 @@
* ]
*/
import {getWinningPlanFromExplain, isEofPlan} from "jstests/libs/analyze_plan.js";
import {assertDropAndRecreateCollection} from "jstests/libs/collection_drop_recreate.js"
import {assertDropAndRecreateCollection} from "jstests/libs/collection_drop_recreate.js";
const collName = "explain_find_trivially_false_predicates_in_tailables_over_capped_colls";

View File

@ -54,7 +54,7 @@ function assertExplainFormat(explain, expectedNumReturned) {
// executionStats - SBE format:
const stages = getExecutionStages(explain);
assert.eq(stages.length, 1, explain);
const execStage = stages[0]
const execStage = stages[0];
assert(execStage.hasOwnProperty("opens"), explain);
assert(execStage.hasOwnProperty("closes"), explain);
assert(!execStage.hasOwnProperty("works"), explain);

View File

@ -74,7 +74,7 @@ winningPlan = getWinningPlan(explain.queryPlanner);
assert(!isIdhackOrExpress(db, winningPlan), winningPlan);
// Express is an 8.0+ feature.
const hasExpress = isExpress(db, getWinningPlan(t.find({_id: 1}).explain().queryPlanner))
const hasExpress = isExpress(db, getWinningPlan(t.find({_id: 1}).explain().queryPlanner));
if (hasExpress) {
// Express is used for simple _id queries.
explain = t.find({_id: 1}).explain();

View File

@ -99,7 +99,7 @@ runWithParamsAllNodes(db, [{key: "internalCascadesOptimizerDisableFastPath", val
const serverStatusBefore = db.serverStatus();
for (let val = 0; val < 5; ++val) {
const pipeline = getPipeline(val)
const pipeline = getPipeline(val);
assert.eq(coll.aggregate(pipeline).toArray().length, 5 - val);
}
const planCacheStats = coll.aggregate([{$planCacheStats: {}}]).toArray();

View File

@ -6,7 +6,7 @@
// ]
//
import {assertDropAndRecreateCollection} from "jstests/libs/collection_drop_recreate.js"
import {assertDropAndRecreateCollection} from "jstests/libs/collection_drop_recreate.js";
import {QuerySettingsUtils} from "jstests/libs/query_settings_utils.js";
const collName = jsTestName();

View File

@ -270,7 +270,7 @@ function instantiateTestCasesNoSecondaryView(...testCases) {
if (FixtureHelpers.isSharded(coll) || FixtureHelpers.isSharded(secondaryColl)) {
// TODO: SERVER-88883 Report 'indexesUsed' for $lookup over sharded collections.
instantiateTestCases(testAggregateQuerySettingsApplicationWithGraphLookup)
instantiateTestCases(testAggregateQuerySettingsApplicationWithGraphLookup);
instantiateTestCasesNoSecondaryView(
testAggregateQuerySettingsApplicationWithoutSecondaryCollections,

View File

@ -83,7 +83,7 @@ function testQueryShapeHash(query) {
assert.eq(slowLogQueryShapeHash,
querySettingsQueryShapeHash,
"Query shape hash from the logs doesn't match the one from query settings");
})
});
}
{

View File

@ -11,7 +11,7 @@ import {
isCollscan,
isIxscan
} from "jstests/libs/analyze_plan.js";
import {assertDropAndRecreateCollection} from "jstests/libs/collection_drop_recreate.js"
import {assertDropAndRecreateCollection} from "jstests/libs/collection_drop_recreate.js";
function assertIXScanTightBounds(explain) {
const winningPlan = getWinningPlanFromExplain(explain);

View File

@ -10,7 +10,7 @@ import {FixtureHelpers} from "jstests/libs/fixture_helpers.js";
const collection = db.jstests_sortj;
collection.drop();
assert.commandWorked(collection.createIndex({a: 1}))
assert.commandWorked(collection.createIndex({a: 1}));
const numShards = FixtureHelpers.numberOfShardsForCollection(collection);

View File

@ -28,7 +28,7 @@ const timeseriesOpts = {
// TODO SERVER-89999: remove once the feature flag version becomes last LTS
const simpleBucketCollectionsDisallowed =
FeatureFlagUtil.isEnabled(db, "DisallowBucketCollectionWithoutTimeseriesOptions")
FeatureFlagUtil.isEnabled(db, "DisallowBucketCollectionWithoutTimeseriesOptions");
function setupEnv() {
db.dropDatabase();
@ -123,7 +123,7 @@ function runTests(targetDbName) {
}
}
jsTest.log("Run test cases with rename within same database")
jsTest.log("Run test cases with rename within same database");
runTests(dbName);
jsTest.log("Run test cases with rename across different databases")
jsTest.log("Run test cases with rename across different databases");
runTests(otherDbName);

View File

@ -23,7 +23,7 @@ import {
runDoesntRewriteTest,
runRewritesTest,
setupColl
} from "jstests/core/timeseries/libs/timeseries_sort_util.js"
} from "jstests/core/timeseries/libs/timeseries_sort_util.js";
const metaCollSubFieldsName = "bucket_unpacking_with_compound_sort_with_meta_sub_on_point_queries";
const metaCollSubFields = db[metaCollSubFieldsName];

View File

@ -24,7 +24,7 @@ import {
forwardIxscan,
runRewritesTest,
setupColl
} from "jstests/core/timeseries/libs/timeseries_sort_util.js"
} from "jstests/core/timeseries/libs/timeseries_sort_util.js";
const collName = "bucket_unpacking_with_sort";
const coll = db[collName];

View File

@ -22,7 +22,7 @@ import {
runDoesntRewriteTest,
runRewritesTest,
setupColl
} from "jstests/core/timeseries/libs/timeseries_sort_util.js"
} from "jstests/core/timeseries/libs/timeseries_sort_util.js";
const collName = "bucket_unpacking_with_sort_negative";
const coll = db[collName];

View File

@ -23,7 +23,7 @@ import {
forwardIxscan,
runRewritesTest,
setupColl
} from "jstests/core/timeseries/libs/timeseries_sort_util.js"
} from "jstests/core/timeseries/libs/timeseries_sort_util.js";
const metaCollSubFieldsName =
"bucket_unpacking_with_sort_with_meta_sub_on_multiple_fields_point_queries";

View File

@ -23,7 +23,7 @@ import {
forwardIxscan,
runRewritesTest,
setupColl
} from "jstests/core/timeseries/libs/timeseries_sort_util.js"
} from "jstests/core/timeseries/libs/timeseries_sort_util.js";
const metaCollName = "bucket_unpacking_with_sort_with_meta_on_single_field_point_queries";
const metaColl = db[metaCollName];

View File

@ -21,7 +21,7 @@ import {
forwardIxscan,
runDoesntRewriteTest,
runRewritesTest
} from "jstests/core/timeseries/libs/timeseries_sort_util.js"
} from "jstests/core/timeseries/libs/timeseries_sort_util.js";
const ciStringCollName = 'bucket_unpacking_with_sort_ci';
const ciStringColl = db[ciStringCollName];

View File

@ -17,7 +17,10 @@
* cannot_run_during_upgrade_downgrade,
* ]
*/
import {runDoesntRewriteTest, setupColl} from "jstests/core/timeseries/libs/timeseries_sort_util.js"
import {
runDoesntRewriteTest,
setupColl
} from "jstests/core/timeseries/libs/timeseries_sort_util.js";
const geoCollName = 'bucket_unpacking_with_sort_with_geo';
const geoColl = db[geoCollName];

View File

@ -12,7 +12,7 @@
* ]
*/
import {getAggPlanStage} from "jstests/libs/analyze_plan.js";
import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"
import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
import {getSbePlanStages} from "jstests/libs/sbe_explain_helpers.js";
import {checkSbeFullyEnabled} from "jstests/libs/sbe_util.js";

View File

@ -167,7 +167,7 @@ assert.commandWorked(db.runCommand({
}));
// Setting prepareUnique should return an error on a time-series collection index.
assert.commandWorked(coll.createIndex({"prepareUniqueIndex": 1}))
assert.commandWorked(coll.createIndex({"prepareUniqueIndex": 1}));
assert.commandFailedWithCode(db.runCommand({
"collMod": collName,
"index": {"keyPattern": {"prepareUniqueIndex": 1}, "prepareUnique": true}

View File

@ -61,7 +61,7 @@ TimeseriesTest.run((insert) => {
[timeFieldName]: new Date(datePrefix + 300),
[metaFieldName]: "gpu",
length: -2,
})
});
// Computing a field on a dotted path which is an array, then grouping on it. Note that the
// semantics for setting a computed field on a dotted array path are particularly strange, but

View File

@ -36,9 +36,10 @@ function assertBucketsAreCompressed(db, bucketsColl) {
}
const bucketDocs = bucketsColl.find().toArray();
bucketDocs.forEach(
bucketDoc => {assert(TimeseriesTest.isBucketCompressed(bucketDoc.control.version),
`Expected bucket to be compressed: ${tojson(bucketDoc)}`)});
bucketDocs.forEach(bucketDoc => {
assert(TimeseriesTest.isBucketCompressed(bucketDoc.control.version),
`Expected bucket to be compressed: ${tojson(bucketDoc)}`);
});
}
function prepareCompressedBucket() {

View File

@ -24,7 +24,7 @@ assert.commandWorked(testDB.createCollection(
assert.commandWorked(tsColl.createIndex({'tags.loc': '2dsphere'}));
tsColl.insert({time: ISODate(), tags: {loc: [40, 40], descr: 0}, value: 0})
tsColl.insert({time: ISODate(), tags: {loc: [40, 40], descr: 0}, value: 0});
const coll2 = db.getCollection("store_min_max_values");
coll2.drop();

View File

@ -14,7 +14,7 @@ const timeFieldName = "time";
const metaFieldName = "tags";
const tsColl = db.getCollection("ts_coll");
const normColl = db.getCollection("normal_coll")
const normColl = db.getCollection("normal_coll");
function setUpCollection(coll, options) {
coll.drop();

View File

@ -23,7 +23,7 @@ import {
blockProcessingTestCases,
generateMetaVals
} from "jstests/libs/block_processing_test_cases.js";
import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"
import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
import {
checkSbeFullFeatureFlagEnabled,
checkSbeStatus,

View File

@ -124,7 +124,7 @@ TimeseriesTest.run((insert) => {
acc: {[accumulator]: {n: nVal, sortBy: sortBy, output: "$_id"}}
}
}
])
]);
}
}
}

View File

@ -78,7 +78,7 @@ let lpx2 = undefined; // lastpoint value of x for m = 2
coll.insert({t: timestamps.t3, m: 1, x: 3}); // add to bucket #1
// An event with a different meta goes into a separate bucket.
coll.insert({t: timestamps.t6, m: 2, x: 6})
coll.insert({t: timestamps.t6, m: 2, x: 6});
lpx2 = 6;
// If this assert fails it would mean that bucket creation logic have changed. The lastpoint
@ -404,7 +404,7 @@ const casesLastpointWithDistinctScan = [
// The lastpoint opt currently isn't lowered to SBE.
assert(false,
`Lastpoint opt isn't implemented in SBE for pipeline ${
tojson(pipeline)} but got ${tojson(explainFull)}`)
tojson(pipeline)} but got ${tojson(explainFull)}`);
}
// Check that the result matches the expected by the test case.
@ -444,7 +444,7 @@ const casesLastpointWithDistinctScan = [
// The distinct scan opt currently isn't lowered to SBE.
assert(false,
`Lastpoint opt isn't implemented in SBE for pipeline ${
tojson(pipeline)} but got ${tojson(explainFull)}`)
tojson(pipeline)} but got ${tojson(explainFull)}`);
}
// Check that the result matches the expected by the test case.

View File

@ -67,7 +67,7 @@ let lpa2 = undefined; // lastpoint value of a for m = 1
coll.insert({t: timestamps.t3, m: 1, x: 3, a: 13}); // add to bucket #1
// An event with a different meta goes into a separate bucket.
coll.insert({t: timestamps.t6, m: 2, x: 6, a: 16})
coll.insert({t: timestamps.t6, m: 2, x: 6, a: 16});
lpx2 = 6;
lpa2 = 16;
@ -131,7 +131,7 @@ const casesLastpointOptimization = [
// The lastpoint opt currently isn't lowered to SBE.
assert(false,
`Lastpoint opt isn't implemented in SBE for pipeline ${
tojson(pipeline)} but got ${tojson(explainFull)}`)
tojson(pipeline)} but got ${tojson(explainFull)}`);
}
// Check that the result matches the expected by the test case.

View File

@ -282,7 +282,7 @@ TimeseriesTest.run((insert) => {
],
as: "joined"
}};
const result = testDB.local.aggregate(lookupStage)
const result = testDB.local.aggregate(lookupStage);
assertArrayEq({
actual: result.toArray(),
expected: [

View File

@ -17,7 +17,7 @@
import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js";
import {getEngine, getQueryPlanner, getSingleNodeExplain} from "jstests/libs/analyze_plan.js";
import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"
import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
import {checkSbeFullyEnabled} from "jstests/libs/sbe_util.js";
TimeseriesTest.run((insert) => {
@ -389,7 +389,7 @@ TimeseriesTest.run((insert) => {
// Check results.
{
const results = coll.aggregate(pipe).toArray().map((x) => x._id)
const results = coll.aggregate(pipe).toArray().map((x) => x._id);
results.sort();
assert.eq(testCase.ids, results, () => "Test case " + tojson(testCase));
}
@ -427,7 +427,7 @@ TimeseriesTest.run((insert) => {
{$match: {"measurement": "cpu"}},
{$project: {_id: 1}}
];
const res = coll.aggregate(pipe).toArray()
const res = coll.aggregate(pipe).toArray();
assert.eq(res.length, coll.count(), res);
}
@ -436,7 +436,7 @@ TimeseriesTest.run((insert) => {
assert.commandWorked(db.createCollection(coll.getName(), {
timeseries: {timeField: timeFieldName, metaField: metaFieldName},
}));
assert.contains(bucketsColl.getName(), db.getCollectionNames())
assert.contains(bucketsColl.getName(), db.getCollectionNames());
insert(
coll,

View File

@ -11,11 +11,11 @@
* ]
*/
import {assertDropAndRecreateCollection} from "jstests/libs/collection_drop_recreate.js"
import {assertDropAndRecreateCollection} from "jstests/libs/collection_drop_recreate.js";
const coll = assertDropAndRecreateCollection(db, "timeseries_predicates_normal");
const tsColl = assertDropAndRecreateCollection(
db, "timeseries_predicates_timeseries", {timeseries: {timeField: 'time', metaField: 'mt'}})
db, "timeseries_predicates_timeseries", {timeseries: {timeField: 'time', metaField: 'mt'}});
const bucketsColl = db.getCollection('system.buckets.' + tsColl.getName());
// Tests that we produce the same results for a given 'predicate', with and without timeseries, and

View File

@ -33,7 +33,7 @@ function runTest({docs, pipeline, expectedResults}) {
docs: [{_id: 1, [timeField]: new Date(), [metaField]: 2}],
pipeline: [{$project: {new: {$getField: metaField}, _id: 0}}],
expectedResults: [{new: 2}]
})
});
})();
// $getField does not traverse objects, and should not be rewritten when it relies on a mix of
@ -46,7 +46,7 @@ function runTest({docs, pipeline, expectedResults}) {
],
pipeline: [{$project: {new: {$add: [`$${metaField}`, {$getField: "a.b"}]}}}],
expectedResults: [{_id: 1, new: 5}, {_id: 2, new: null}]
})
});
})();
//
@ -61,7 +61,7 @@ function runTest({docs, pipeline, expectedResults}) {
],
pipeline: [{$project: {new: {$add: [`$${metaField}`, {$getField: {$literal: "a.$b"}}]}}}],
expectedResults: [{_id: 1, new: 5}, {_id: 2, new: null}]
})
});
}());
// There is a difference between the metaField "meta1", and "$meta1". Field paths are allowed to
@ -75,7 +75,7 @@ function runTest({docs, pipeline, expectedResults}) {
],
pipeline: [{$project: {new: {$add: [`$${metaField}`, {$getField: {$literal: "$meta1"}}]}}}],
expectedResults: [{_id: 1, new: null}, {_id: 2, new: 5}]
})
});
})();
//
@ -93,7 +93,7 @@ function runTest({docs, pipeline, expectedResults}) {
}
}],
expectedResults: [{_id: 1, new: 4}, {_id: 2, new: 8}]
})
});
})();
// When we rely on both the metaField and a measurementField we should not perform the rewrite and
@ -109,7 +109,7 @@ function runTest({docs, pipeline, expectedResults}) {
{new: {$add: [`$${metaField}`, {$getField: {$cond: [false, null, "a.b.c"]}}]}}
}],
expectedResults: [{_id: 1, new: null}, {_id: 2, new: 5}]
})
});
})();
//
@ -124,7 +124,7 @@ function runTest({docs, pipeline, expectedResults}) {
],
pipeline: [{$project: {new: {$getField: {input: `$${metaField}`, field: "b"}}}}],
expectedResults: [{_id: 1, new: 4}, {_id: 2}]
})
});
})();
// Validate the correct results are returned when there is a field with '$' inside the metaField.
@ -137,7 +137,7 @@ function runTest({docs, pipeline, expectedResults}) {
],
pipeline: [{$project: {new: {$getField: {input: `$${metaField}`, field: "a.$b"}}}}],
expectedResults: [{_id: 1, new: 4}, {_id: 2}]
})
});
})();
// When we rely on both the metaField and a measurementField we should not perform the rewrite and
@ -157,7 +157,7 @@ function runTest({docs, pipeline, expectedResults}) {
}
}],
expectedResults: [{_id: 1, new: 6}, {_id: 2, new: null}]
})
});
})();
// same test as above but with $addFields and not $project.
@ -179,7 +179,7 @@ function runTest({docs, pipeline, expectedResults}) {
{[timeField]: time, [metaField]: 2, a: {"$meta1": 4}, _id: 1, new: 6},
{[timeField]: time, [metaField]: 2, a: {c: 5}, _id: 2, new: null}
]
})
});
})();
//
@ -203,7 +203,7 @@ function runTest({docs, pipeline, expectedResults}) {
}
}],
expectedResults: [{_id: 1, new: 3}, {_id: 2}]
})
});
})();
// When we rely on both the metaField and a measurementField we should not perform the rewrite and
@ -230,7 +230,7 @@ function runTest({docs, pipeline, expectedResults}) {
}
}],
expectedResults: [{_id: 1, new: 5}, {_id: 2, new: null}]
})
});
})();
// This test validates that $project with '$$ROOT' which requires the whole document returns the
@ -247,7 +247,7 @@ function runTest({docs, pipeline, expectedResults}) {
{_id: 1, new: {_id: 1, [timeField]: time, [metaField]: 2, a: 2}},
{_id: 2, new: {_id: 2, [timeField]: time, [metaField]: 2, b: 3}}
]
})
});
})();
(function testAddFields_WithROOT() {
@ -274,5 +274,5 @@ function runTest({docs, pipeline, expectedResults}) {
new: {_id: 2, [timeField]: time, [metaField]: 2, b: 3}
}
]
})
});
})();

View File

@ -64,7 +64,7 @@ function runTest({pipeline, shouldUseSbe, aggStages}) {
runTest({
pipeline: [{$match: {m: 17}}],
shouldUseSbe: false,
})
});
// $project by itself is not lowered except in SBE full.
jsTestLog("ian: SBE full " + sbeFullyEnabled);
@ -149,7 +149,7 @@ runTest({
runTest({
pipeline: [{$sort: {t: 1}}, {$project: {t: 1}}],
shouldUseSbe: false,
})
});
// $match -> $addFields -> $group is permitted only in SBE full.
runTest({
@ -159,7 +159,7 @@ runTest({
{$group: {_id: null, s: {$sum: "$x"}}}
],
shouldUseSbe: sbeFullyEnabled
})
});
// A stack of $project stages is permitted only in SBE full.
runTest({
@ -179,7 +179,7 @@ runTest({
{$group: {_id: "$a", n: {$sum: "$b"}}},
],
shouldUseSbe: sbeUnpackPushdownEnabled,
})
});
// The full rewrite of a group might avoid unpacking. Let's check that these are fully lowered.
runTest({
@ -188,7 +188,7 @@ runTest({
{$group: {_id: "$m", min: {$min: "$a"}}},
],
shouldUseSbe: sbeUnpackPushdownEnabled,
})
});
// Bucket unpacking should not be lowered when there is an eventFilter with a full match
// expression that is not supported in SBE. This entire pipeline should run in classic.

View File

@ -39,9 +39,10 @@ function assertBucketsAreCompressed(db, bucketsColl) {
}
const bucketDocs = bucketsColl.find().toArray();
bucketDocs.forEach(
bucketDoc => {assert(TimeseriesTest.isBucketCompressed(bucketDoc.control.version),
`Expected bucket to be compressed: ${tojson(bucketDoc)}`)});
bucketDocs.forEach(bucketDoc => {
assert(TimeseriesTest.isBucketCompressed(bucketDoc.control.version),
`Expected bucket to be compressed: ${tojson(bucketDoc)}`);
});
}
function prepareCompressedBucket() {

View File

@ -26,8 +26,8 @@ const tsOptions2 = {
timeField: "timestamp",
metaField: "metadata2"
};
const kColl = "coll"
const kBucket = "system.buckets.coll"
const kColl = "coll";
const kBucket = "system.buckets.coll";
function createWorked(collName, tsOptions = {}) {
if (Object.keys(tsOptions).length === 0) {
@ -70,12 +70,12 @@ function runTest(testCase, minRequiredVersion = null) {
return;
}
}
testCase()
testCase();
db.dropDatabase();
}
// Reset any previous run state.
db.dropDatabase()
db.dropDatabase();
// Case prexisting collection: standard.
{
@ -153,14 +153,13 @@ db.dropDatabase()
{
jsTest.log("Case collection: bucket timeseries / collection: standard.");
runTest(() => {
createWorked(kBucket, tsOptions)
createWorked(kBucket, tsOptions);
if (FixtureHelpers.isMongos(db) || TestData.testingReplicaSetEndpoint) {
// TODO SERVER-87189 Replace this with commandFailed. Now we always pass from the
// coordinator to create a collection which will prevent from using the main namespace
// if a bucket nss already exists.
createWorkedOrFailedWithCode(kColl, {}, ErrorCodes.NamespaceExists);
}
else {
} else {
// TODO SERVER-85855 creating a normal collection with an already created bucket
// timeseries should fail.
createWorked(kColl);
@ -170,7 +169,7 @@ db.dropDatabase()
jsTest.log("Case collection: bucket timeseries / collection: timeseries.");
runTest(
() => {
createWorked(kBucket, tsOptions)
createWorked(kBucket, tsOptions);
createWorked(kColl, tsOptions);
},
// Creation of bucket namespace is not idempotent before 8.0 (SERVER-89827)
@ -180,14 +179,14 @@ db.dropDatabase()
jsTest.log(
"Case collection: bucket timeseries / collection: timeseries with different options.");
runTest(() => {
createWorked(kBucket, tsOptions)
createWorked(kBucket, tsOptions);
createFailed(kColl, tsOptions2, ErrorCodes.NamespaceExists);
});
jsTest.log(
"Case collection: bucket timeseries / collection: bucket timeseries with different options.");
runTest(() => {
createWorked(kBucket, tsOptions)
createWorked(kBucket, tsOptions);
createFailed(kBucket, tsOptions2, ErrorCodes.NamespaceExists);
});

View File

@ -22,8 +22,8 @@ import {getNLatestProfilerEntries} from "jstests/libs/profiler.js";
var testDB = db.getSiblingDB(jsTestName());
assert.commandWorked(testDB.dropDatabase());
const collName1 = jsTestName() + "_1"
const collName2 = jsTestName() + "_2"
const collName1 = jsTestName() + "_1";
const collName2 = jsTestName() + "_2";
const coll1 = testDB.getCollection(collName1);
const coll2 = testDB.getCollection(collName2);

View File

@ -59,7 +59,7 @@ res = db.adminCommand({
});
summaryFieldsValidator(
res, {nErrors: 1, nInserted: 0, nDeleted: 0, nMatched: 0, nModified: 0, nUpserted: 0});
cursorEntryValidator(res.cursor.firstBatch[0], {ok: 0, idx: 0, code: 2, n: 0})
cursorEntryValidator(res.cursor.firstBatch[0], {ok: 0, idx: 0, code: 2, n: 0});
cursorSizeValidator(res, 1);
assert.eq(coll.countDocuments({}), 3);
@ -77,9 +77,9 @@ res = db.adminCommand({
});
summaryFieldsValidator(
res, {nErrors: 1, nInserted: 2, nDeleted: 0, nMatched: 0, nModified: 0, nUpserted: 0});
cursorEntryValidator(res.cursor.firstBatch[0], {ok: 0, idx: 0, code: 2, n: 0})
cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1})
cursorEntryValidator(res.cursor.firstBatch[2], {ok: 1, idx: 2, n: 1})
cursorEntryValidator(res.cursor.firstBatch[0], {ok: 0, idx: 0, code: 2, n: 0});
cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1});
cursorEntryValidator(res.cursor.firstBatch[2], {ok: 1, idx: 2, n: 1});
assert.eq(coll.countDocuments({}), 5);
// Test unordered inserts to 2 collections - 1 timeseries collection and 1 non-timeseries
@ -99,8 +99,8 @@ res = db.adminCommand({
});
summaryFieldsValidator(
res, {nErrors: 2, nInserted: 4, nDeleted: 0, nMatched: 0, nModified: 0, nUpserted: 0});
cursorEntryValidator(res.cursor.firstBatch[0], {ok: 0, idx: 0, code: 2, n: 0})
cursorEntryValidator(res.cursor.firstBatch[4], {ok: 0, idx: 4, code: 11000, n: 0})
cursorEntryValidator(res.cursor.firstBatch[0], {ok: 0, idx: 0, code: 2, n: 0});
cursorEntryValidator(res.cursor.firstBatch[4], {ok: 0, idx: 4, code: 11000, n: 0});
assert.eq(coll.countDocuments({}), 8);
assert.eq(nonTSColl.countDocuments({}), 1);
@ -118,7 +118,7 @@ res = db.adminCommand({
});
summaryFieldsValidator(
res, {nErrors: 1, nInserted: 1, nDeleted: 0, nMatched: 0, nModified: 0, nUpserted: 0});
cursorEntryValidator(res.cursor.firstBatch[1], {ok: 0, idx: 1, code: 2, n: 0})
cursorEntryValidator(res.cursor.firstBatch[1], {ok: 0, idx: 1, code: 2, n: 0});
cursorSizeValidator(res, 2);
assert.eq(coll.countDocuments({}), 8);
assert.eq(nonTSColl.countDocuments({}), 2);

View File

@ -4,7 +4,7 @@
import {findChunksUtil} from "jstests/sharding/libs/find_chunks_util.js";
var kDbName = db.getName()
var kDbName = db.getName();
db.dropDatabase();

View File

@ -21,7 +21,7 @@ assert.commandWorked(t.insert({
}
}
}
}))
}));
const res =
t.aggregate({
@ -43,4 +43,4 @@ const res =
}
}).toArray();
assert.eq([], res, "No documents should match")
assert.eq([], res, "No documents should match");

View File

@ -103,24 +103,36 @@ runWithParams(
// Collection has no indexes except default _id index
// Verify that queries are parameterized correctly for M2 Bonsai-eligible FIND queries
cmds.forEach(
cmdEl => {verifyCommandCorrectness(cmdEl[0], cmdEl[1], find, assertPhysicalScan)});
cmds.forEach(cmdEl => {verifyCommandParameterization(cmdEl[0], find, assertParamerized)});
cmds.forEach(cmdEl => {
verifyCommandCorrectness(cmdEl[0], cmdEl[1], find, assertPhysicalScan);
});
cmds.forEach(cmdEl => {
verifyCommandParameterization(cmdEl[0], find, assertParamerized);
});
// Verify that queries are parameterized correctly for M2 Bonsai-eligible AGG queries
cmds.forEach(
cmdEl => {verifyCommandCorrectness(cmdEl[0], cmdEl[1], agg, assertPhysicalScan)});
cmds.forEach(cmdEl => {verifyCommandParameterization(cmdEl[0], agg, assertParamerized)});
cmds.forEach(cmdEl => {
verifyCommandCorrectness(cmdEl[0], cmdEl[1], agg, assertPhysicalScan);
});
cmds.forEach(cmdEl => {
verifyCommandParameterization(cmdEl[0], agg, assertParamerized);
});
assert.commandWorked(coll.createIndex({'a.b': 1}));
// Collection has indexes
// Verify that queries are not parameterized for M2 Bonsai-ineligible FIND queries
cmds.forEach(
cmdEl => {verifyCommandCorrectness(cmdEl[0], cmdEl[1], find, !assertPhysicalScan)});
cmds.forEach(cmdEl => {verifyCommandParameterization(cmdEl[0], find, !assertParamerized)});
cmds.forEach(cmdEl => {
verifyCommandCorrectness(cmdEl[0], cmdEl[1], find, !assertPhysicalScan);
});
cmds.forEach(cmdEl => {
verifyCommandParameterization(cmdEl[0], find, !assertParamerized);
});
// Verify that queries are not parameterized for M2 Bonsai-ineligible AGG queries
cmds.forEach(
cmdEl => {verifyCommandCorrectness(cmdEl[0], cmdEl[1], agg, !assertPhysicalScan)});
cmds.forEach(cmdEl => {verifyCommandParameterization(cmdEl[0], agg, !assertParamerized)});
cmds.forEach(cmdEl => {
verifyCommandCorrectness(cmdEl[0], cmdEl[1], agg, !assertPhysicalScan);
});
cmds.forEach(cmdEl => {
verifyCommandParameterization(cmdEl[0], agg, !assertParamerized);
});
});

View File

@ -230,7 +230,7 @@ function testInputOutputPipeline({input, pipeline, expectedOutput, interestingIn
expectedOutput:
[{_id: 0, a: 2}, {_id: 1, a: 2}, {_id: 2, a: 2}, {_id: 3, a: 2}, {_id: 4, a: 2}],
interestingIndexes: []
})
});
testInputOutputPipeline({
input: docs,
@ -243,5 +243,5 @@ function testInputOutputPipeline({input, pipeline, expectedOutput, interestingIn
{_id: 4, a: {b: 2}}
],
interestingIndexes: []
})
});
}());

View File

@ -18,12 +18,11 @@ assert.commandWorked(t.insert({a: {b: 3}}));
assert.commandWorked(t.insert({a: {b: 4}}));
assert.commandWorked(t.insert({a: {b: 5}}));
const runTest =
() => {
const res = t.explain("executionStats").aggregate([{$match: {'a.b': 2}}]);
assert.eq(1, res.executionStats.nReturned);
assertValueOnPlanPath("Exchange", res, "child.nodeType");
}
const runTest = () => {
const res = t.explain("executionStats").aggregate([{$match: {'a.b': 2}}]);
assert.eq(1, res.executionStats.nReturned);
assertValueOnPlanPath("Exchange", res, "child.nodeType");
};
// Test exchange with both Sargable nodes & Filter nodes
runWithParams([{key: "internalCascadesOptimizerDisableSargableWhenNoIndexes", value: false}],

View File

@ -31,7 +31,7 @@ let runTest = function(mongodOptions) {
let mongod = startMongodOnExistingPath(dbpath, mongodOptions);
const buildInfo = assert.commandWorked(mongod.getDB(baseName).adminCommand({"buildInfo": 1}));
const isSanitizerEnabled = buildInfo.buildEnvironment.ccflags.includes('-fsanitize')
const isSanitizerEnabled = buildInfo.buildEnvironment.ccflags.includes('-fsanitize');
// Force a checkpoint and make a copy of the turtle file.
assert.commandWorked(mongod.getDB(baseName).adminCommand({fsync: 1}));

View File

@ -22,7 +22,7 @@ export function runWithEncryption(edb, func) {
assert(!edb.getMongo().isAutoEncryptionEnabled(),
"Cannot switch to encrypted connection on already encrypted connection. Do not " +
"nest calls to runWithEncryption.")
"nest calls to runWithEncryption.");
edb.getMongo().toggleAutoEncryption(true);
@ -90,7 +90,9 @@ DB.prototype.eadminCommand = function(cmd, params) {
};
DBCollection.prototype.ecount = function(filter) {
return runWithEncryption(this, () => {return this.find(filter).toArray().length});
return runWithEncryption(this, () => {
return this.find(filter).toArray().length;
});
};
// Note that efind does not exist since find executes

View File

@ -9,7 +9,7 @@ const MAX_MS = 1000;
/* Pick a random millisecond value between 400 and 1000 for the lag value */
function randomMSFromInterval(minMS, maxMS) { // min and max included
return Math.floor(Math.random() * (maxMS - minMS + 1) + minMS)
return Math.floor(Math.random() * (maxMS - minMS + 1) + minMS);
}
/* Returns true if the error code indicates the node is currently shutting down. */

View File

@ -272,7 +272,7 @@ const topology = DiscoverTopology.findConnectedNodes(db);
if (topology.type == Topology.kShardedCluster) {
// Perform restore for the config server.
const path = MongoRunner.dataPath + '../magicRestore/configsvr/node0'
const path = MongoRunner.dataPath + '../magicRestore/configsvr/node0';
let configMongo = new Mongo(topology.configsvr.nodes[0]);
performMagicRestore(configMongo, path, "configsvr", {"replSet": "config-rs", "configsvr": ''});

View File

@ -37,7 +37,7 @@ const topology = DiscoverTopology.findConnectedNodes(db);
if (topology.type == Topology.kReplicaSet) {
const conn = db.getMongo();
const dbPathPrefix = MongoRunner.dataPath + '../magicRestore/node0'
const dbPathPrefix = MongoRunner.dataPath + '../magicRestore/node0';
let [cursor, metadata] = takeBackup(conn, dbPathPrefix);
writeMetadataInfo(conn, metadata.checkpointTimestamp);
cursor.close();
@ -52,7 +52,7 @@ if (topology.type == Topology.kReplicaSet) {
let maxCheckpointTimestamp = Timestamp();
// Take configsvr backup.
const path = MongoRunner.dataPath + '../magicRestore/configsvr/node0'
const path = MongoRunner.dataPath + '../magicRestore/configsvr/node0';
restorePaths.push(path);
let nodeMongo = new Mongo(topology.configsvr.nodes[0]);

View File

@ -246,7 +246,7 @@ function checkReplDbhashBackgroundThread(hosts) {
jsTestLog(`About to run setSecurity token on ${rst}`);
rst.nodes.forEach(node => node._setSecurityToken(token));
jsTestLog(`Running checkcollection for ${dbName} with token ${token}`)
jsTestLog(`Running checkcollection for ${dbName} with token ${token}`);
return checkCollectionHashesForDB(dbName, clusterTime);
} finally {
rst.nodes.forEach(node => node._setSecurityToken(undefined));

View File

@ -33,7 +33,7 @@ export function getQueryPlanner(explain) {
assert(explain.hasOwnProperty("stages"), explain);
const stage = explain.stages[0];
assert(stage.hasOwnProperty("$cursor"), explain);
const cursorStage = stage.$cursor
const cursorStage = stage.$cursor;
assert(cursorStage.hasOwnProperty("queryPlanner"), explain);
return cursorStage.queryPlanner;
}
@ -112,7 +112,7 @@ export function getWinningPlan(queryPlanner) {
}
export function getWinningSBEPlan(queryPlanner) {
assert(queryPlanner.winningPlan.hasOwnProperty("slotBasedPlan"), queryPlanner)
assert(queryPlanner.winningPlan.hasOwnProperty("slotBasedPlan"), queryPlanner);
return queryPlanner.winningPlan.slotBasedPlan;
}
@ -298,7 +298,7 @@ export function getAllPlanStages(root) {
* This helper function can be used for any optimizer.
*/
export function getPlanStage(root, stage) {
assert(stage, "Stage was not defined in getPlanStage.")
assert(stage, "Stage was not defined in getPlanStage.");
var planStageList = getPlanStages(root, stage);
if (planStageList.length === 0) {
@ -636,7 +636,7 @@ export function getAggPlanStages(root, stage, useQueryPlannerSection = false) {
* This helper function can be used for any optimizer.
*/
export function getAggPlanStage(root, stage, useQueryPlannerSection = false) {
assert(stage, "Stage was not defined in getAggPlanStage.")
assert(stage, "Stage was not defined in getAggPlanStage.");
let planStageList = getAggPlanStages(root, stage, useQueryPlannerSection);
if (planStageList.length === 0) {
@ -667,7 +667,7 @@ export function aggPlanHasStage(root, stage) {
* returns true if the plan has a stage called 'stage'.
*/
export function planHasStage(db, root, stage) {
assert(stage, "Stage was not defined in planHasStage.")
assert(stage, "Stage was not defined in planHasStage.");
return getPlanStages(root, stage).length > 0;
}
@ -930,7 +930,7 @@ export function assertCoveredQueryAndCount({collection, query, project, count})
"Winning plan was not covered: " + tojson(explain.queryPlanner.winningPlan));
break;
default:
break
break;
}
// Same query as a count command should also be covered.
@ -944,7 +944,7 @@ export function assertCoveredQueryAndCount({collection, query, project, count})
assertExplainCount({explainResults: explain, expectedCount: count});
break;
default:
break
break;
}
}

View File

@ -52,7 +52,7 @@ export function verifyGetDiagnosticData(adminDb, logData = true, assumeMultiserv
TestData.testingReplicaSetEndpoint) {
const hasKnownData =
(data.hasOwnProperty("shard") && data.shard.hasOwnProperty("serverStatus")) ||
(data.hasOwnProperty("router") && data.router.hasOwnProperty("connPoolStats"))
(data.hasOwnProperty("router") && data.router.hasOwnProperty("connPoolStats"));
assert(hasKnownData,
"does not have 'shard.serverStatus' nor 'router.connPoolStats' in '" +
tojson(data) + "'");

View File

@ -12,7 +12,7 @@ function isTimeToWake(waitFor) {
return false;
}
} else {
jsTestLog("Was not given a termination condition. Will keep looping forever.")
jsTestLog("Was not given a termination condition. Will keep looping forever.");
}
return false;
}
@ -47,7 +47,7 @@ export function hangTestToAttachGDB(st, opts) {
jsTestLog("Here are the ports to connect to");
logClusterPorts(st);
}
jsTestLog("Test is sleeping waiting for you to connect")
jsTestLog("Test is sleeping waiting for you to connect");
if (opts.waitFor && isTimeToWake(opts.waitFor)) {
jsTestLog("Breaking sleep loop");
break;

View File

@ -53,7 +53,7 @@ export function usedBonsaiOptimizer(explain) {
return false;
}
}
return true
return true;
}
// This section handles the explain output for unsharded queries.

View File

@ -15,7 +15,7 @@ const originalAssertEq = assert.eq;
// The name of the implicitly added timestamp field.
const timeFieldName = "overrideTimeFieldName";
const metaFieldName = "metaFieldName"
const metaFieldName = "metaFieldName";
const denylistedNamespaces = [
/^admin\./,
@ -188,7 +188,7 @@ function cleanUpResultCursor(result, batchName) {
}
result["cursor"][batchName].forEach(doc => {
delete doc[timeFieldName];
})
});
}
/**

View File

@ -28,7 +28,7 @@ function populateIndexFilterSetIfQuerySettingsArePresent(response) {
function processAggregateResponse(cmdObj, response) {
if (cmdObj.pipeline.some(stage => stage.hasOwnProperty("$planCacheStats"))) {
for (let cacheEntry of response.cursor.firstBatch) {
cacheEntry.indexFilterSet = cacheEntry.hasOwnProperty('querySettings')
cacheEntry.indexFilterSet = cacheEntry.hasOwnProperty('querySettings');
}
}
@ -138,7 +138,7 @@ function runCommandOverride(conn, dbName, cmdName, cmdObj, clientFunction, makeF
// Remove all query settings associated with that collection upon collection drop. This
// is the semantics of index filters.
planCacheClearFiltersToRemoveAllQuerySettings(conn,
{planCacheClearFilters: cmdObj.drop})
{planCacheClearFilters: cmdObj.drop});
// Drop the collection.
return clientFunction.apply(conn, makeFuncArgs(cmdObj));

View File

@ -1,10 +1,9 @@
assert.soon =
function(func) {
assert.soon = function(func) {
if (typeof (func) == "string") {
eval(func);
} else {
func();
}
}
};
doassert = function() { /* noop */ }
doassert = function() { /* noop */ };

View File

@ -33,7 +33,7 @@ export function shouldSkipCommand(_commandName, commandObj) {
// Ignore fsync to avoid locking the initial sync node without unlocking.
"fsync": true,
"fsyncUnlock": true,
}
};
if (_commandName in skippedCommands) {
return true;

View File

@ -55,7 +55,7 @@ function maybeSendCommandToInitialSyncNodesShardedCluster(
const shardMap = conn.adminCommand({getShardMap: 1});
if (!shardMap.ok) {
jsTestLog("Unable to run getShardMap: " + tojson(shardMap) +
", skipping forwarding command " + _commandName + " to initial sync node")
", skipping forwarding command " + _commandName + " to initial sync node");
return func.apply(conn, makeFuncArgs(commandObj));
}

View File

@ -25,7 +25,7 @@ function runCommandWithRecordIdsReplicated(
const collName = commandObj[commandName];
const ns = dbName + "." + collName;
if (commandName === "drop") {
createdCollections.delete(ns)
createdCollections.delete(ns);
return func.apply(conn, makeFuncArgs(commandObj));
}
if (!commandsToOverride.has(commandName) || createdCollections.has(ns) ||

View File

@ -114,9 +114,7 @@ MongoRunner.validateCollectionsCallback = function(port, options) {
}
assert.commandWorked(res);
dbs = res.databases.map(dbInfo => {
return {
name: dbInfo.name, tenant: dbInfo.tenantId
}
return {name: dbInfo.name, tenant: dbInfo.tenantId};
});
})
.execute();

Some files were not shown because too many files have changed in this diff Show More