0
0
mirror of https://github.com/mongodb/mongo.git synced 2024-11-24 16:46:00 +01:00

SERVER-1424 Rewrite smoke.py.

Split out the passthrough tests into separate suites. The MongoDB
deployment is started up by resmoke.py so that we can record the
success/failure of each individual test in MCI.

Added support for parallel execution of tests by dispatching to
multiple MongoDB deployments.

Added support for grouping different kinds of tests (e.g. C++ unit
tests, dbtests, and jstests) so that they can be run together. This
allows for customizability in specifying what tests to execute when
changes are made to a particular part of the code.
This commit is contained in:
Max Hirschhorn 2015-05-08 14:20:43 -04:00
parent c7ce2e2c56
commit 424314f65e
119 changed files with 6207 additions and 3214 deletions

View File

@ -1,136 +0,0 @@
# -*- mode: python -*-
#
# This SConscript file describes the build rules for smoke tests (scons smoke,
# e.g.)
import os
from buildscripts import utils
Import( "has_option env" )
def add_exe( v ):
return "${PROGPREFIX}%s${PROGSUFFIX}" % v
smokeEnv = env.Clone()
smokeEnv['ENV']['PATH']=os.environ['PATH']
# copy in any envrionment variables beginning with MONGO_; these
# are used by buildscripts/buildlogger.py
for name, value in os.environ.items():
if name.startswith('MONGO_'):
smokeEnv['ENV'][name] = value
smokeEnv.Alias( "dummySmokeSideEffect", [], [] )
smokeFlags = []
# Ugh. Frobbing the smokeFlags must precede using them to construct
# actions, I think.
if has_option( 'smokedbprefix'):
smokeFlags += ['--smoke-db-prefix', GetOption( 'smokedbprefix')]
if 'startMongodSmallOplog' in COMMAND_LINE_TARGETS:
smokeFlags += ["--small-oplog"]
if has_option('smokeauth'):
smokeFlags += ['--auth']
def addTest(name, deps, actions):
smokeEnv.Alias( name, deps, actions )
smokeEnv.AlwaysBuild( name )
# Prevent smoke tests from running in parallel
smokeEnv.SideEffect( "dummySmokeSideEffect", name )
def addSmoketest( name, deps, extraSmokeArgs=[] ):
# Convert from smoke to test, smokeJs to js, and foo to foo
target = name
if name.startswith("smoke"):
if name == "smoke":
target = File("dbtest").path
else:
target = name[5].lower() + name[6:]
smokeArgs = smokeFlags + [target] + extraSmokeArgs
addTest(name, deps, utils.run_smoke_command(*smokeArgs))
def addSmokeSuite( name, suitefile, needMongod=False ):
# Add a smoketest target which invokes smoke.py with
# --from-file, and passes the named suitefile as the
# command line argument.
# resolve an initial # in the suitefile
suitefile = str(env.File(suitefile))
smoke_args = ['--mode', 'files', '--from-file', suitefile]
if not needMongod:
smoke_args.append('--dont-start-mongod')
addTest(name, [suitefile], utils.run_smoke_command(*smoke_args))
addSmoketest( "smoke", [ add_exe( "dbtest" ), add_exe( "mongod" ), add_exe( "mongo" ) ] )
addSmoketest( "mongosTest", [ add_exe( 'mongos' ) ])
addSmokeSuite( "smokeCppUnittests", "$UNITTEST_LIST" )
# These tests require the mongo shell
if not has_option('noshell'):
addSmoketest( "smokeJs", [add_exe("mongo"), add_exe("mongod")] )
addSmoketest( "smokeJsCore", [add_exe("mongo"), add_exe("mongod")] )
addSmoketest( "smokeClone", [ add_exe("mongo"), add_exe("mongod") ] )
addSmoketest( "smokeRepl", [ add_exe("mongo"), add_exe("mongod"), add_exe("mongobridge") ] )
addSmoketest( "smokeReplSets", [ add_exe("mongo"), add_exe("mongod"), add_exe("mongobridge") ] )
addSmoketest( "smokeDur", [ add_exe( "mongo" ), add_exe( "mongod" ), add_exe('mongorestore') ] )
addSmoketest( "smokeDisk", [ add_exe( "mongo" ), add_exe( "mongod" ), add_exe( "mongodump" ), add_exe( "mongorestore" ) ] )
addSmoketest( "smokeAuth", [ add_exe( "mongo" ), add_exe( "mongod" ) ] )
addSmoketest( "smokeParallel", [ add_exe( "mongo" ), add_exe( "mongod" ) ] )
addSmoketest( "smokeSharding", [ add_exe("mongo"), add_exe("mongod"), add_exe("mongos"), add_exe('mongofiles') ] )
addSmoketest( "smokeJsPerf", [ add_exe("mongo"), add_exe("mongod") ] )
addSmoketest( "smokeNoPassthroughWithMongod", [add_exe("mongo"), add_exe("mongod"), add_exe("mongos") ])
addSmoketest( "smokeNoPassthrough", [add_exe("mongo"), add_exe("mongod"), add_exe("mongos") ])
addSmoketest( "smokeSlow1", [add_exe("mongo"), add_exe("mongod"), add_exe("mongos") ])
addSmoketest( "smokeSlow2", [add_exe("mongo"), add_exe("mongod"), add_exe("mongos") ])
addSmoketest( "smokeQuota", [ add_exe("mongo"), add_exe("mongod") ] )
addSmoketest( "smokeTool", [ add_exe( "mongo" ), add_exe("mongod"), add_exe("mongos"), "tools" ] )
addSmoketest( "smokeAggregation", [ add_exe( "mongo" ), add_exe( "mongod" ), add_exe( "mongos" ) ] )
addSmoketest( "smokeMultiVersion", [ add_exe( "mongo" ), add_exe( "mongod" ), add_exe( "mongos" ) ] )
addSmoketest( "smokeFailPoint", [ add_exe( "mongo" ), add_exe( "mongod" ), add_exe( "mongos" ) ] )
addSmoketest( "smokeSsl", [ add_exe("mongo"), add_exe("mongod"), add_exe("mongos"), "tools" ],
['--use-ssl','--use-x509'] )
addSmoketest( "smokeFailingTests", [ add_exe( "mongo" ), add_exe( "mongod" ) ], ['--only-old-fails', '--continue-on-failure'] )
addSmoketest( "smokeResetFails", [ add_exe( "mongo" ), add_exe( "mongod" ) ], ['--reset-old-fails'] )
smokeEnv.Alias( "startMongodSmallOplog", [add_exe("mongod")], [] );
smokeEnv.AlwaysBuild( "startMongodSmallOplog" );
smokeEnv.SideEffect( "dummySmokeSideEffect", "startMongodSmallOplog" )
def addMongodReqTargets( env, target, source ):
mongodReqTargets = [ "smokeJs" ]
for target in mongodReqTargets:
smokeEnv.Depends( target, "startMongod" )
smokeEnv.Depends( "smokeAll", target )
smokeEnv.Alias( "addMongodReqTargets", [], [addMongodReqTargets] )
smokeEnv.AlwaysBuild( "addMongodReqTargets" )
smokeEnv.Alias( "smokeAll", [ "smoke", "mongosTest", "smokeClone", "smokeRepl", "addMongodReqTargets", "smokeDisk", "smokeAuth", "smokeSharding", "smokeTool" ] )
smokeEnv.AlwaysBuild( "smokeAll" )
def addMongodReqNoJsTargets( env, target, source ):
mongodReqTargets = []
for target in mongodReqTargets:
smokeEnv.Depends( target, "startMongod" )
smokeEnv.Depends( "smokeAllNoJs", target )
smokeEnv.Alias( "addMongodReqNoJsTargets", [], [addMongodReqNoJsTargets] )
smokeEnv.AlwaysBuild( "addMongodReqNoJsTargets" )
smokeEnv.Alias( "smokeAllNoJs", [ "smoke", "mongosTest", "addMongodReqNoJsTargets" ] )
smokeEnv.AlwaysBuild( "smokeAllNoJs" )
def run_shell_tests(env, target, source):
from buildscripts import test_shell
test_shell.mongo_path = windows and "mongo.exe" or "mongo"
test_shell.run_tests()
env.Alias("test_shell", [], [run_shell_tests])
env.AlwaysBuild("test_shell")

View File

@ -2079,6 +2079,5 @@ env.AddMethod(injectMongoIncludePaths, 'InjectMongoIncludePaths')
env.Alias("compiledb", env.CompilationDatabase('compile_commands.json'))
env.SConscript('src/SConscript', variant_dir='$BUILD_DIR', duplicate=False)
env.SConscript('SConscript.smoke')
env.Alias('all', ['core', 'tools', 'dbtest', 'unittests'])

View File

@ -1,226 +1,211 @@
#!/usr/bin/python
#!/usr/bin/env python
"""
Command line test utility for MongoDB tests of all kinds.
CURRENTLY IN ACTIVE DEVELOPMENT
If you are not a developer, you probably want to use smoke.py
Command line utility for executing MongoDB tests of all kinds.
"""
import logging
import logging.config
import optparse
import os
import re
import urllib
from __future__ import absolute_import
import smoke
import smoke_config
import json
import os.path
import random
import signal
import sys
import time
import traceback
USAGE = \
"""resmoke.py <YAML/JSON CONFIG>
All options are specified as YAML or JSON - the configuration can be loaded via a file, as a named
configuration in the "smoke_config" module, piped as stdin, or specified on the command line as
options via the --set, --unset, and --push operators.
NOTE: YAML can only be used if the PyYaml library is available on your system. Only JSON is
supported on the command line.
For example:
resmoke.py './jstests/disk/*.js'
results in:
Test Configuration:
---
tests:
roots:
- ./jstests/disk/*.js
suite:
...
executor:
fixtures:
...
testers:
...
logging:
...
Named sets of options are available in the "smoke_config" module, including:
--jscore
--sharding
--replicasets
--disk
For example:
resmoke.py --jscore
resmoke.py --sharding
""" + smoke.json_options.JSONOptionParser.DEFAULT_USAGE
DEFAULT_LOGGER_CONFIG = {}
# Get relative imports to work when the package is not installed on the PYTHONPATH.
if __name__ == "__main__" and __package__ is None:
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from buildscripts import resmokelib
def get_local_logger_filenames(logging_root):
"""Helper to extract filenames from the logging config for helpful reporting to the user."""
def _execute_suite(suite, logging_config):
"""
Executes each test group of 'suite', failing fast if requested.
"""
filenames = []
if "handlers" not in logging_root:
return filenames
logger = resmokelib.logging.loggers.EXECUTOR
for handler_name, handler_info in logging_root["handlers"].iteritems():
if "filename" in handler_info:
logger_filename = handler_info["filename"]
filenames.append("file://%s" %
urllib.pathname2url(os.path.abspath(logger_filename)))
for group in suite.test_groups:
if resmokelib.config.SHUFFLE:
logger.info("Shuffling order of tests for %ss in suite %s. The seed is %d.",
group.test_kind, suite.get_name(), resmokelib.config.RANDOM_SEED)
random.seed(resmokelib.config.RANDOM_SEED)
random.shuffle(group.tests)
return filenames
if resmokelib.config.DRY_RUN == "tests":
sb = []
sb.append("Tests that would be run for %ss in suite %s:"
% (group.test_kind, suite.get_name()))
if len(group.tests) > 0:
for test in group.tests:
sb.append(test)
else:
sb.append("(no tests)")
logger.info("\n".join(sb))
# Set a successful return code on the test group because we want to output the tests
# that would get run by any other suites the user specified.
group.return_code = 0
continue
if len(group.tests) == 0:
logger.info("Skipping %ss, no tests to run", group.test_kind)
continue
group_config = suite.get_executor_config().get(group.test_kind, {})
executor = resmokelib.testing.executor.TestGroupExecutor(logger,
group,
logging_config,
**group_config)
try:
executor.run()
if resmokelib.config.FAIL_FAST and group.return_code != 0:
suite.return_code = group.return_code
return
except resmokelib.errors.StopExecution:
suite.return_code = 130 # Simulate SIGINT as exit code.
return
except:
logger.exception("Encountered an error when running %ss of suite %s.",
group.test_kind, suite.get_name())
suite.return_code = 2
return
def _log_summary(logger, suites, time_taken):
if len(suites) > 1:
sb = []
sb.append("Summary of all suites: %d suites ran in %0.2f seconds"
% (len(suites), time_taken))
for suite in suites:
suite_sb = []
suite.summarize(suite_sb)
sb.append(" %s: %s" % (suite.get_name(), "\n ".join(suite_sb)))
logger.info("=" * 80)
logger.info("\n".join(sb))
def _summarize_suite(suite):
sb = []
suite.summarize(sb)
return "\n".join(sb)
def _dump_suite_config(suite, logging_config):
"""
Returns a string that represents the YAML configuration of a suite.
TODO: include the "options" key in the result
"""
sb = []
sb.append("YAML configuration of suite %s" % (suite.get_name()))
sb.append(resmokelib.utils.dump_yaml({"selector": suite.get_selector_config()}))
sb.append("")
sb.append(resmokelib.utils.dump_yaml({"executor": suite.get_executor_config()}))
sb.append("")
sb.append(resmokelib.utils.dump_yaml({"logging": logging_config}))
return "\n".join(sb)
def _write_report_file(suites, pathname):
"""
Writes the report.json file if requested.
"""
reports = []
for suite in suites:
for group in suite.test_groups:
report = group.get_latest_report()
if report is not None:
reports.append(report)
combined_report_dict = resmokelib.testing.report.TestReport.combine(*reports).as_dict()
with open(pathname, "w") as fp:
json.dump(combined_report_dict, fp)
def main():
start_time = time.time()
named_configs = smoke_config.get_named_configs()
values, args = resmokelib.parser.parse_command_line()
parser = smoke.json_options.JSONOptionParser(usage=USAGE,
configfile_args=named_configs)
logging_config = resmokelib.parser.get_logging_config(values)
resmokelib.logging.config.apply_config(logging_config)
resmokelib.logging.flush.start_thread()
help = \
"""Just outputs the configured JSON options."""
resmokelib.parser.update_config_vars(values)
parser.add_option('--dump-options', default=False, dest='dump_options', action="store_true",
help=help)
exec_logger = resmokelib.logging.loggers.EXECUTOR
resmoke_logger = resmokelib.logging.loggers.new_logger("resmoke", parent=exec_logger)
help = \
"""Outputs all the tests found with metadata."""
parser.add_option('--dump-tests', default=False, dest='dump_tests', action="store_true",
help=help)
help = \
"""Outputs the tests in the suite."""
parser.add_option('--dump-suite', default=False, dest='dump_suite', action="store_true",
help=help)
values, args, json_root = parser.parse_json_args()
# Assume remaining arguments are test roots
if args:
json_root = smoke.json_options.json_update_path(json_root, "tests.roots", args)
# Assume all files in suite if not specified
if "suite" not in json_root or json_root["suite"] is None:
json_root["suite"] = {}
# Assume default_logging if no other logging specified
if "logging" not in json_root or json_root["logging"] is None:
default_logging = \
smoke.json_options.json_file_load(named_configs["log_default"])
json_root["logging"] = default_logging["logging"]
if "executor" not in json_root or json_root["executor"] is None:
default_executor = \
smoke.json_options.json_file_load(named_configs["executor_default"])
json_root["executor"] = default_executor["executor"]
if not values.dump_options:
print "Test Configuration: \n---"
for key in ["tests", "suite", "executor", "logging"]:
if key in json_root:
print smoke.json_options.json_dump({key: json_root[key]}),
print
if values.dump_options:
return
def validate_config(tests=None, suite=None, executor=None, logging=None, **kwargs):
if len(kwargs) > 0:
raise optparse.OptionValueError(
"Unrecognized test options: %s" % kwargs)
if not all([tests is not None, executor is not None]):
raise optparse.OptionValueError(
"Test options must contain \"tests\" and \"executor\".")
validate_config(**json_root)
logging.config.dictConfig(json_root["logging"])
def re_compile_all(re_patterns):
if isinstance(re_patterns, basestring):
re_patterns = [re_patterns]
return [re.compile(pattern) for pattern in re_patterns]
def build_tests(roots=["./"],
include_files=[],
include_files_except=[],
exclude_files=[],
exclude_files_except=[],
extract_metadata=True,
**kwargs):
if len(kwargs) > 0:
raise optparse.OptionValueError(
"Unrecognized options for tests: %s" % kwargs)
file_regex_query = smoke.suites.RegexQuery(re_compile_all(include_files),
re_compile_all(
include_files_except),
re_compile_all(
exclude_files),
re_compile_all(exclude_files_except))
if isinstance(roots, basestring):
roots = [roots]
return smoke.tests.build_tests(roots, file_regex_query, extract_metadata)
tests = build_tests(**json_root["tests"])
if values.dump_tests:
print "Tests:\n%s" % tests
def build_suite(tests,
include_tags=[],
include_tags_except=[],
exclude_tags=[],
exclude_tags_except=[],
**kwargs):
if len(kwargs) > 0:
raise optparse.OptionValueError(
"Unrecognized options for suite: %s" % kwargs)
tag_regex_query = smoke.suites.RegexQuery(re_compile_all(include_tags),
re_compile_all(
include_tags_except),
re_compile_all(exclude_tags),
re_compile_all(exclude_tags_except))
return smoke.suites.build_suite(tests, tag_regex_query)
suite = build_suite(tests, **json_root["suite"])
suite.sort(key=lambda test: test.uri)
if values.dump_suite:
print "Suite:\n%s" % suite
print "Running %s tests in suite (out of %s tests found)..." % (len(tests), len(suite))
local_logger_filenames = get_local_logger_filenames(json_root["logging"])
if local_logger_filenames:
print "\nOutput from tests redirected to:\n\t%s\n" % \
"\n\t".join(local_logger_filenames)
if values.list_suites:
suite_names = resmokelib.parser.get_named_suites()
resmoke_logger.info("Suites available to execute:\n%s", "\n".join(suite_names))
sys.exit(0)
suites = resmokelib.parser.get_suites(values, args)
try:
smoke.executor.exec_suite(suite, logging.getLogger("executor"), **json_root["executor"])
for suite in suites:
resmoke_logger.info(_dump_suite_config(suite, logging_config))
suite.record_start()
_execute_suite(suite, logging_config)
suite.record_end()
resmoke_logger.info("=" * 80)
resmoke_logger.info("Summary of %s suite: %s",
suite.get_name(), _summarize_suite(suite))
if resmokelib.config.FAIL_FAST and suite.return_code != 0:
time_taken = time.time() - start_time
_log_summary(resmoke_logger, suites, time_taken)
sys.exit(suite.return_code)
time_taken = time.time() - start_time
_log_summary(resmoke_logger, suites, time_taken)
# Exit with a nonzero code if any of the suites failed.
exit_code = max(suite.return_code for suite in suites)
sys.exit(exit_code)
finally:
if local_logger_filenames:
print "\nOutput from tests was redirected to:\n\t%s\n" % \
"\n\t".join(local_logger_filenames)
if resmokelib.config.REPORT_FILE is not None:
_write_report_file(suites, resmokelib.config.REPORT_FILE)
if __name__ == "__main__":
def _dump_stacks(signum, frame):
"""
Signal handler that will dump the stacks of all threads.
"""
header_msg = "Dumping stacks due to SIGUSR1 signal"
sb = []
sb.append("=" * len(header_msg))
sb.append(header_msg)
sb.append("=" * len(header_msg))
frames = sys._current_frames()
sb.append("Total threads: %d" % (len(frames)))
sb.append("")
for thread_id in frames:
stack = frames[thread_id]
sb.append("Thread %d:" % (thread_id))
sb.append("".join(traceback.format_stack(stack)))
sb.append("=" * len(header_msg))
print "\n".join(sb)
try:
signal.signal(signal.SIGUSR1, _dump_stacks)
except AttributeError:
print "Cannot catch signals on Windows"
main()

View File

@ -1,103 +0,0 @@
#!/usr/bin/python
"""
Sample utility to build test metadata JSON (i.e. tags) from test files that contain them.
CURRENTLY IN ACTIVE DEVELOPMENT
If you are not a developer, you probably want to look at smoke.py
"""
import re
import smoke
import smoke_config
USAGE = \
"""resmoke_build_metadata.py <YAML/JSON CONFIG>
Generates test metadata based on information in test files themselves. All options are specified \
as YAML or JSON - the configuration is the "tests" subset of the configuration for a resmoke.py
test run.
NOTE: YAML can only be used if the PyYaml library is available on your system. Only JSON is
supported on the command line.
For example:
resmoke_build_metadata.py './jstests/disk/*.js'
results in:
Metadata extraction configuration:
---
tests:
roots:
- ./jstests/disk/*.js
...
Named sets of options are available in the "smoke_config" module, including:
--jscore
--sharding
--replicasets
--disk
For example:
resmoke.py --jscore
resmoke.py --sharding
""" + smoke.json_options.JSONOptionParser.DEFAULT_USAGE
def main():
parser = smoke.json_options.JSONOptionParser(usage=USAGE,
configfile_args=smoke_config.get_named_configs())
values, args, json_root = parser.parse_json_args()
if "tests" in json_root:
json_root = {"tests": json_root["tests"]}
# Assume remaining arguments are test roots
if args:
json_root = smoke.json_options.json_update_path(json_root, "tests.roots", args)
print "Metadata extraction configuration:"
print smoke.json_options.json_dump(json_root)
if not "tests" in json_root or json_root["tests"] is None:
raise Exception("No tests specified.")
def re_compile_all(re_patterns):
if isinstance(re_patterns, basestring):
re_patterns = [re_patterns]
return [re.compile(pattern) for pattern in re_patterns]
def build_test_metadata(roots=["./"],
include_files=[],
include_files_except=[],
exclude_files=[],
exclude_files_except=[],
**kwargs):
if len(kwargs) > 0:
raise optparse.OptionValueError(
"Unrecognized options for building test metadata: %s" % kwargs)
file_regex_query = smoke.suites.RegexQuery(re_compile_all(include_files),
re_compile_all(
include_files_except),
re_compile_all(
exclude_files),
re_compile_all(exclude_files_except))
tests = smoke.tests.build_tests(roots, file_regex_query, extract_metadata=True)
print "Writing test metadata for %s tests..." % len(tests)
smoke.tests.write_metadata(tests, json_only=True)
print "Test metadata written."
build_test_metadata(**json_root["tests"])
if __name__ == "__main__":
main()

View File

@ -0,0 +1,4 @@
from __future__ import absolute_import
from .suites import NAMED_SUITES
from .loggers import NAMED_LOGGERS

View File

@ -0,0 +1,36 @@
"""
Defines a mapping of shortened names for logger configuration files to
their full path.
"""
from __future__ import absolute_import
import os
import os.path
def _get_named_loggers():
"""
Explores this directory for any YAML configuration files.
Returns a mapping of basenames without the file extension to their
full path.
"""
dirname = os.path.dirname(__file__)
named_loggers = {}
try:
(root, _dirs, files) = os.walk(dirname).next()
for filename in files:
(short_name, ext) = os.path.splitext(filename)
if ext in (".yml", ".yaml"):
pathname = os.path.join(root, filename)
named_loggers[short_name] = os.path.relpath(pathname)
except StopIteration:
# 'dirname' does not exist, which should be impossible because it contains __file__.
raise IOError("Directory '%s' does not exist" % (dirname))
return named_loggers
NAMED_LOGGERS = _get_named_loggers()

View File

@ -0,0 +1,13 @@
logging:
executor:
format: '[%(name)s] %(message)s'
handlers:
- class: logging.StreamHandler
tests:
format: '[%(name)s] %(message)s'
handlers:
- class: buildlogger
fixture:
format: '%(message)s'
handlers:
- class: buildlogger

View File

@ -0,0 +1,13 @@
logging:
executor:
format: '%(asctime)s [%(name)s] %(message)s'
handlers:
- class: logging.StreamHandler
fixture:
format: '[%(name)s] %(message)s'
handlers:
- class: logging.StreamHandler
tests:
format: '%(asctime)s [%(name)s] %(message)s'
handlers:
- class: logging.StreamHandler

View File

@ -0,0 +1,19 @@
logging:
executor:
format: '%(asctime)s [%(name)s] %(message)s'
handlers:
- class: logging.FileHandler
filename: executor.log
mode: w
fixture:
format: '[%(name)s] %(message)s'
handlers:
- class: logging.FileHandler
filename: fixture.log
mode: w
tests:
format: '%(asctime)s [%(name)s] %(message)s'
handlers:
- class: logging.FileHandler
filename: tests.log
mode: w

View File

@ -0,0 +1,10 @@
logging:
executor:
handlers:
- class: logging.NullHandler
fixture:
handlers:
- class: logging.NullHandler
tests:
handlers:
- class: logging.NullHandler

View File

@ -0,0 +1,36 @@
"""
Defines a mapping of shortened names for suite configuration files to
their full path.
"""
from __future__ import absolute_import
import os
import os.path
def _get_named_suites():
"""
Explores this directory for any YAML configuration files.
Returns a mapping of basenames without the file extension to their
full path.
"""
dirname = os.path.dirname(__file__)
named_suites = {}
try:
(root, _dirs, files) = os.walk(dirname).next()
for filename in files:
(short_name, ext) = os.path.splitext(filename)
if ext in (".yml", ".yaml"):
pathname = os.path.join(root, filename)
named_suites[short_name] = os.path.relpath(pathname)
except StopIteration:
# 'dirname' does not exist, which should be impossible because it contains __file__.
raise IOError("Directory '%s' does not exist" % (dirname))
return named_suites
NAMED_SUITES = _get_named_suites()

View File

@ -0,0 +1,17 @@
selector:
js_test:
roots:
- jstests/aggregation/*.js
executor:
js_test:
config: {}
hooks:
- class: CleanEveryN
n: 20
fixture:
class: MongoDFixture
mongod_options:
nopreallocj: ''
set_parameters:
enableTestCommands: 1

View File

@ -0,0 +1,40 @@
# Section that is ignored by resmoke.py.
config_variables:
- &keyFile jstests/libs/authTestsKey
- &keyFileData Thiskeyisonlyforrunningthesuitewithauthenticationdontuseitinanytestsdirectly
selector:
js_test:
roots:
- jstests/aggregation/*.js
exclude_files:
# Skip any tests that run with auth explicitly.
- jstests/aggregation/*[aA]uth*.js
executor:
js_test:
config:
shell_options:
global_vars:
TestData:
auth: true
authMechanism: SCRAM-SHA-1
keyFile: *keyFile
keyFileData: *keyFileData
eval: jsTest.authenticate(db.getMongo())
authenticationDatabase: local
authenticationMechanism: SCRAM-SHA-1
password: *keyFileData
username: __system
hooks:
- class: CleanEveryN
n: 20
fixture:
class: MongoDFixture
mongod_options:
auth: ''
keyFile: *keyFile
nopreallocj: ''
set_parameters:
enableTestCommands: 1
enableLocalhostAuthBypass: false

View File

@ -0,0 +1,11 @@
selector:
js_test:
roots:
- src/mongo/db/modules/*/jstests/audit/*.js
# audit tests start their own mongod's.
executor:
js_test:
config:
shell_options:
nodb: ''

View File

@ -0,0 +1,11 @@
selector:
js_test:
roots:
- jstests/auth/*.js
# Auth tests start their own mongod's.
executor:
js_test:
config:
shell_options:
nodb: ''

View File

@ -0,0 +1,20 @@
selector:
js_test:
roots:
- jstests/core/*.js
include_files:
- jstests/core/bulk*.js
executor:
js_test:
config:
shell_options:
writeMode: legacy
hooks:
- class: CleanEveryN
n: 20
fixture:
class: MongoDFixture
mongod_options:
set_parameters:
enableTestCommands: 1

View File

@ -0,0 +1,16 @@
selector:
js_test:
roots:
- jstests/concurrency/*.js
executor:
js_test:
hooks:
- class: CleanEveryN
n: 20
fixture:
class: MongoDFixture
mongod_options:
nopreallocj: ''
set_parameters:
enableTestCommands: 1

View File

@ -0,0 +1,16 @@
selector:
js_test:
roots:
- jstests/core/*.js
executor:
js_test:
hooks:
- class: CleanEveryN
n: 20
fixture:
class: MongoDFixture
mongod_options:
nopreallocj: ''
set_parameters:
enableTestCommands: 1

View File

@ -0,0 +1,42 @@
# Section that is ignored by resmoke.py.
config_variables:
- &keyFile jstests/libs/authTestsKey
- &keyFileData Thiskeyisonlyforrunningthesuitewithauthenticationdontuseitinanytestsdirectly
selector:
js_test:
roots:
- jstests/core/*.js
exclude_files:
# Skip any tests that run with auth explicitly.
- jstests/core/*[aA]uth*.js
# Skip these additional tests when running with auth enabled.
- jstests/core/bench_test*.js
executor:
js_test:
config:
shell_options:
global_vars:
TestData:
auth: true
authMechanism: SCRAM-SHA-1
keyFile: *keyFile
keyFileData: *keyFileData
eval: jsTest.authenticate(db.getMongo())
authenticationDatabase: local
authenticationMechanism: SCRAM-SHA-1
password: *keyFileData
username: __system
hooks:
- class: CleanEveryN
n: 20
fixture:
class: MongoDFixture
mongod_options:
auth: ''
keyFile: *keyFile
nopreallocj: ''
set_parameters:
enableTestCommands: 1
enableLocalhostAuthBypass: false

View File

@ -0,0 +1,28 @@
selector:
js_test:
roots:
- jstests/core/*.js
exclude_files:
# These tests are not expected to pass with master-slave:
- jstests/core/capped_convertToCapped1.js
- jstests/core/capped_max1.js
- jstests/core/dbadmin.js
- jstests/core/dropdb.js
- jstests/core/dropdb_race.js
- jstests/core/opcounters_write_cmd.js
- jstests/core/rename.js
executor:
js_test:
config:
shell_options:
eval: "testingReplication = true;"
hooks:
- class: CheckReplDBHash
fixture:
class: MasterSlaveFixture
mongod_options:
oplogSize: 511
nopreallocj: ''
set_parameters:
enableTestCommands: 1

View File

@ -0,0 +1,29 @@
selector:
js_test:
roots:
- jstests/core/*.js
exclude_files:
# These tests are not expected to pass with replica-sets:
- jstests/core/capped_convertToCapped1.js
- jstests/core/capped_max1.js
- jstests/core/dbadmin.js
- jstests/core/dropdb.js
- jstests/core/dropdb_race.js
- jstests/core/opcounters_write_cmd.js
- jstests/core/rename.js
executor:
js_test:
config:
shell_options:
eval: "testingReplication = true;"
hooks:
- class: CheckReplDBHash
fixture:
class: ReplicaSetFixture
mongod_options:
oplogSize: 511
nopreallocj: ''
set_parameters:
enableTestCommands: 1
num_nodes: 2

View File

@ -0,0 +1,6 @@
selector:
db_test: {}
executor:
db_test:
config: {}

View File

@ -0,0 +1,18 @@
selector:
js_test:
roots:
- jstests/disk/*.js
exclude_files:
- jstests/disk/repair2.js # SERVER-18256
executor:
js_test:
hooks:
- class: CleanEveryN
n: 20
fixture:
class: MongoDFixture
mongod_options:
nopreallocj: ''
set_parameters:
enableTestCommands: 1

View File

@ -0,0 +1,19 @@
selector:
js_test:
roots:
- jstests/core/*.js
exclude_files:
- jstests/core/repair*.js # Fails on recovery.
- jstests/core/shellkillop.js # Takes forever and doesn't test anything new.
executor:
js_test:
fixture:
class: MongoDFixture
mongod_options:
journal: ''
nopreallocj: ''
set_parameters:
enableTestCommands: 1
smallfiles: ''
durOptions: 8

View File

@ -0,0 +1,14 @@
selector:
js_test:
roots:
- jstests/dur/*.js
exclude_files:
# Skip the passthrough test because it is run separately.
- jstests/dur/dur_jscore_passthrough.js
# Durability tests start their own mongod's.
executor:
js_test:
config:
shell_options:
nodb: ''

View File

@ -0,0 +1,11 @@
selector:
js_test:
roots:
- jstests/fail_point/*.js
# Failpoint tests start their own mongod's.
executor:
js_test:
config:
shell_options:
nodb: ''

View File

@ -0,0 +1,25 @@
# Section that is ignored by resmoke.py.
config_variables:
- &keyFile jstests/libs/authTestsKey
- &keyFileData Thiskeyisonlyforrunningthesuitewithauthenticationdontuseitinanytestsdirectly
selector:
js_test:
roots:
- jstests/fail_point/*.js
exclude_files:
# Skip any tests that run with auth explicitly.
- jstests/fail_point/*[aA]uth*.js
# Failpoint tests start their own mongod's.
executor:
js_test:
config:
shell_options:
global_vars:
TestData:
auth: true
authMechanism: SCRAM-SHA-1
keyFile: *keyFile
keyFileData: *keyFileData
nodb: ''

View File

@ -0,0 +1,43 @@
# Section that is ignored by resmoke.py.
config_variables:
- &keyFile jstests/libs/authTestsKey
- &keyFileData Thiskeyisonlyforrunningthesuitewithauthenticationdontuseitinanytestsdirectly
selector:
js_test:
roots:
- jstests/gle/*.js
exclude_files:
# Skip any tests that run with auth explicitly.
- jstests/gle/*[aA]uth*.js
# Skip the passthrough tests because those are run separately.
- jstests/gle/0_gle_basics_passthrough.js
- jstests/gle/1_sharding_gle_basics_passthrough.js
executor:
js_test:
config:
shell_options:
global_vars:
TestData:
auth: true
authMechanism: SCRAM-SHA-1
keyFile: *keyFile
keyFileData: *keyFileData
eval: jsTest.authenticate(db.getMongo())
authenticationDatabase: local
authenticationMechanism: SCRAM-SHA-1
password: *keyFileData
username: __system
hooks:
- class: CleanEveryN
n: 20
fixture:
class: MongoDFixture
mongod_options:
auth: ''
keyFile: *keyFile
nopreallocj: ''
set_parameters:
enableTestCommands: 1
enableLocalhostAuthBypass: false

View File

@ -0,0 +1,34 @@
# Section that is ignored by resmoke.py.
config_variables:
- &keyFile jstests/libs/authTestsKey
- &keyFileData Thiskeyisonlyforrunningthesuitewithauthenticationdontuseitinanytestsdirectly
selector:
js_test:
roots:
- jstests/gle/core/*.js
executor:
js_test:
config:
shell_options:
global_vars:
TestData:
auth: true
authMechanism: SCRAM-SHA-1
keyFile: *keyFile
keyFileData: *keyFileData
eval: jsTest.authenticate(db.getMongo())
authenticationDatabase: local
authenticationMechanism: SCRAM-SHA-1
password: *keyFileData
username: __system
fixture:
class: MongoDFixture
mongod_options:
auth: ''
keyFile: *keyFile
nopreallocj: ''
set_parameters:
enableTestCommands: 1
enableLocalhostAuthBypass: false

View File

@ -0,0 +1,17 @@
selector:
js_test:
roots:
- jstests/mmap_v1/*.js
executor:
js_test:
hooks:
- class: CleanEveryN
n: 20
fixture:
class: MongoDFixture
mongod_options:
nopreallocj: ''
set_parameters:
enableTestCommands: 1
storageEngine: mmapv1

View File

@ -0,0 +1,3 @@
selector:
mongos_test:
test: ''

View File

@ -0,0 +1,12 @@
selector:
js_test:
roots:
- jstests/multiVersion/*.js
# Multiversion tests start their own mongod's.
executor:
js_test:
config:
shell_options:
nodb: ''
writeMode: legacy

View File

@ -0,0 +1,11 @@
selector:
js_test:
roots:
- jstests/noPassthrough/*.js
# noPassthrough tests start their own mongod's.
executor:
js_test:
config:
shell_options:
nodb: ''

View File

@ -0,0 +1,19 @@
selector:
js_test:
roots:
- jstests/noPassthroughWithMongod/*.js
exclude_files:
# Skip the passthrough test because it is run separately.
- jstests/noPassthroughWithMongod/fluent_gle_passthrough.js
executor:
js_test:
hooks:
- class: CleanEveryN
n: 20
fixture:
class: MongoDFixture
mongod_options:
nopreallocj: ''
set_parameters:
enableTestCommands: 1

View File

@ -0,0 +1,16 @@
selector:
js_test:
roots:
- jstests/parallel/*.js
executor:
js_test:
hooks:
- class: CleanEveryN
n: 20
fixture:
class: MongoDFixture
mongod_options:
nopreallocj: ''
set_parameters:
enableTestCommands: 1

View File

@ -0,0 +1,10 @@
selector:
js_test:
roots:
- jstests/replsets/*.js
executor:
js_test:
config:
shell_options:
nodb: ''

View File

@ -0,0 +1,24 @@
# Section that is ignored by resmoke.py.
config_variables:
- &keyFile jstests/libs/authTestsKey
- &keyFileData Thiskeyisonlyforrunningthesuitewithauthenticationdontuseitinanytestsdirectly
selector:
js_test:
roots:
- jstests/replsets/*.js
exclude_files:
# Skip any tests that run with auth explicitly.
- jstests/replsets/*[aA]uth*.js
executor:
js_test:
config:
shell_options:
global_vars:
TestData:
auth: true
authMechanism: SCRAM-SHA-1
keyFile: *keyFile
keyFileData: *keyFileData
nodb: ''

View File

@ -0,0 +1,10 @@
selector:
js_test:
roots:
- jstests/repl/*.js
executor:
js_test:
config:
shell_options:
nodb: ''

View File

@ -0,0 +1,24 @@
# Section that is ignored by resmoke.py.
config_variables:
- &keyFile jstests/libs/authTestsKey
- &keyFileData Thiskeyisonlyforrunningthesuitewithauthenticationdontuseitinanytestsdirectly
selector:
js_test:
roots:
- jstests/repl/*.js
exclude_files:
# Skip any tests that run with auth explicitly.
- jstests/repl/*[aA]uth*.js
executor:
js_test:
config:
shell_options:
global_vars:
TestData:
auth: true
authMechanism: SCRAM-SHA-1
keyFile: *keyFile
keyFileData: *keyFileData
nodb: ''

View File

@ -0,0 +1,18 @@
selector:
js_test:
roots:
- src/mongo/db/modules/*/jstests/rlp/*.js
executor:
js_test:
hooks:
- class: CleanEveryN
n: 20
fixture:
class: MongoDFixture
mongod_options:
nopreallocj: ''
basisTechRootDirectory: /opt/basis
set_parameters:
enableTestCommands: 1
rlpEnableExperimentalLanguagesForTesting: true

View File

@ -0,0 +1,11 @@
selector:
js_test:
roots:
- src/mongo/db/modules/*/jstests/sasl/*.js
# sasl tests start their own mongod's.
executor:
js_test:
config:
shell_options:
nodb: ''

View File

@ -0,0 +1,10 @@
selector:
js_test:
roots:
- jstests/sharding/*.js
executor:
js_test:
config:
shell_options:
nodb: ''

View File

@ -0,0 +1,28 @@
# Section that is ignored by resmoke.py.
config_variables:
- &keyFile jstests/libs/authTestsKey
- &keyFileData Thiskeyisonlyforrunningthesuitewithauthenticationdontuseitinanytestsdirectly
selector:
js_test:
roots:
- jstests/sharding/*.js
exclude_files:
# Skip any tests that run with auth explicitly.
- jstests/sharding/*[aA]uth*.js
# Skip these additional tests when running with auth enabled.
- jstests/sharding/sync[36].js
- jstests/sharding/parallel.js
- jstests/sharding/copydb_from_mongos.js # SERVER-13080
executor:
js_test:
config:
shell_options:
global_vars:
TestData:
auth: true
authMechanism: SCRAM-SHA-1
keyFile: *keyFile
keyFileData: *keyFileData
nodb: ''

View File

@ -0,0 +1,52 @@
# Section that is ignored by resmoke.py.
config_variables:
- &keyFile jstests/libs/authTestsKey
- &keyFileData Thiskeyisonlyforrunningthesuitewithauthenticationdontuseitinanytestsdirectly
selector:
js_test:
roots:
- jstests/gle/core/*.js
exclude_files:
- jstests/gle/core/error1.js # The getPrevError command is not supported under sharding.
- jstests/gle/core/remove5.js
- jstests/gle/core/update4.js
executor:
js_test:
config:
shell_options:
global_vars:
TestData:
auth: true
authMechanism: SCRAM-SHA-1
keyFile: *keyFile
keyFileData: *keyFileData
eval: jsTest.authenticate(db.getMongo())
authenticationDatabase: admin
authenticationMechanism: SCRAM-SHA-1
password: *keyFileData
username: __system
fixture:
class: ShardedClusterFixture
mongos_options:
keyFile: *keyFile
set_parameters:
enableTestCommands: 1
enableLocalhostAuthBypass: false
mongod_options:
auth: ''
keyFile: *keyFile
nopreallocj: ''
set_parameters:
enableTestCommands: 1
enableLocalhostAuthBypass: false
num_shards: 2
enable_sharding:
- test
auth_options:
authenticationDatabase: admin
authenticationMechanism: SCRAM-SHA-1
password: *keyFileData
username: __system

View File

@ -0,0 +1,61 @@
selector:
js_test:
roots:
- jstests/core/*.js
exclude_files:
# These tests correctly fail under sharding:
- jstests/core/capped*.js
- jstests/core/apitest_db.js
- jstests/core/cursor6.js
- jstests/core/profile*.js
- jstests/core/dbhash.js
- jstests/core/dbhash2.js
- jstests/core/evalb.js
- jstests/core/evald.js
- jstests/core/eval_nolock.js
- jstests/core/explain_missing_database.js
- jstests/core/auth1.js
- jstests/core/auth2.js
- jstests/core/dropdb_race.js
# These tests might be fixed under sharding:
- jstests/core/apply_ops[12].js # SERVER-1439
- jstests/core/count5.js # SERVER-1444
- jstests/core/or4.js # SERVER-1444
- jstests/core/shellkillop.js # SERVER-1445
- jstests/core/update_setOnInsert.js # SERVER-8653
- jstests/core/max_time_ms.js # SERVER-2212
- jstests/core/fts_querylang.js # SERVER-9063
- jstests/core/fts_projection.js
# Theses tests should not be run under sharding:
- jstests/core/dbadmin.js
- jstests/core/fsync.js # Isn't supported through mongos.
- jstests/core/geo*.js
- jstests/core/index_bigkeys_nofail.js
- jstests/core/loglong.js
- jstests/core/notablescan.js
- jstests/core/collection_truncate.js # Relies on the emptycapped test command, which isn't in mongos.
- jstests/core/compact*.js
- jstests/core/check_shard_index.js
- jstests/core/bench_test*.js
- jstests/core/mr_replaceIntoDB.js
- jstests/core/queryoptimizera.js
- jstests/core/indexStatsCommand.js
- jstests/core/storageDetailsCommand.js
- jstests/core/stages*.js
- jstests/core/top.js
- jstests/core/dbcase.js # SERVER-11735
- jstests/core/dbcase2.js # SERVER-11735
executor:
js_test:
fixture:
class: ShardedClusterFixture
mongos_options:
set_parameters:
enableTestCommands: 1
mongod_options:
nopreallocj: ''
set_parameters:
enableTestCommands: 1
enable_sharding:
- test

View File

@ -0,0 +1,16 @@
selector:
js_test:
roots:
- jstests/slow1/*.js
executor:
js_test:
hooks:
- class: CleanEveryN
n: 20
fixture:
class: MongoDFixture
mongod_options:
nopreallocj: ''
set_parameters:
enableTestCommands: 1

View File

@ -0,0 +1,19 @@
selector:
js_test:
roots:
- jstests/slow2/*.js
exclude_files:
# Skip the passthrough test because it is run separately.
- jstests/slow2/sharding_jscore_passthrough.js
executor:
js_test:
hooks:
- class: CleanEveryN
n: 1
fixture:
class: MongoDFixture
mongod_options:
nopreallocj: ''
set_parameters:
enableTestCommands: 1

View File

@ -0,0 +1,11 @@
selector:
js_test:
roots:
- src/mongo/db/modules/*/jstests/snmp/*.js
# snmp tests start their own mongod's.
executor:
js_test:
config:
shell_options:
nodb: ''

View File

@ -0,0 +1,15 @@
selector:
js_test:
roots:
- jstests/ssl/*.js
# ssl tests start their own mongod's.
executor:
js_test:
config:
shell_options:
nodb: ''
ssl: ''
sslAllowInvalidCertificates: ''
sslCAFile: jstests/libs/ca.pem
sslPEMKeyFile: jstests/libs/client.pem

View File

@ -0,0 +1,11 @@
selector:
js_test:
roots:
- jstests/sslSpecial/*.js
# ssl tests start their own mongod's.
executor:
js_test:
config:
shell_options:
nodb: ''

View File

@ -0,0 +1,11 @@
selector:
js_test:
roots:
- jstests/tool/*.js
# Tool tests start their own mongod's.
executor:
js_test:
config:
shell_options:
nodb: ''

View File

@ -0,0 +1,7 @@
selector:
cpp_unit_test:
root: build/unittests.txt
executor:
cpp_unit_test:
config: {}

View File

@ -0,0 +1,8 @@
executor:
js_test:
fixture:
class: MongoDFixture
mongod_options:
nopreallocj: ''
set_parameters:
enableTestCommands: 1

View File

@ -0,0 +1,7 @@
from __future__ import absolute_import
from . import errors
from . import logging
from . import parser
from . import testing
from . import utils

View File

@ -0,0 +1,131 @@
"""
Configuration options for resmoke.py.
"""
from __future__ import absolute_import
import os
import os.path
import time
##
# Default values.
##
# Default path for where to look for executables.
DEFAULT_DBTEST_EXECUTABLE = os.path.join(os.curdir, "dbtest")
DEFAULT_MONGO_EXECUTABLE = os.path.join(os.curdir, "mongo")
DEFAULT_MONGOD_EXECUTABLE = os.path.join(os.curdir, "mongod")
DEFAULT_MONGOS_EXECUTABLE = os.path.join(os.curdir, "mongos")
# Default root directory for where resmoke.py puts directories containing data files of mongod's it
# starts, as well as those started by individual tests.
DEFAULT_DBPATH_PREFIX = os.path.normpath("/data/db")
# Subdirectory under the dbpath prefix that contains directories with data files of mongod's started
# by resmoke.py.
FIXTURE_SUBDIR = "resmoke"
# Subdirectory under the dbpath prefix that contains directories with data files of mongod's started
# by individual tests.
MONGO_RUNNER_SUBDIR = "mongorunner"
# Names below correspond to how they are specified via the command line or in the options YAML file.
DEFAULTS = {
"buildloggerUrl": "https://logkeeper.mongodb.org",
"continueOnFailure": False,
"dbpathPrefix": None,
"dbtest": None,
"dryRun": None,
"jobs": 1,
"mongo": None,
"mongod": None,
"mongos": None,
"nojournal": None,
"nopreallocj": None,
"repeat": 1,
"reportFile": None,
"seed": long(time.time() * 256), # Taken from random.py code in Python 2.7.
"shellWriteMode": None,
"shuffle": False,
"storageEngine": None,
"wiredTigerCollectionConfigString": None,
"wiredTigerEngineConfigString": None,
"wiredTigerIndexConfigString": None
}
##
# Variables that are set by the user at the command line or with --options.
##
# The root url of the buildlogger server.
BUILDLOGGER_URL = None
# Root directory for where resmoke.py puts directories containing data files of mongod's it starts,
# as well as those started by individual tests.
DBPATH_PREFIX = None
# The path to the dbtest executable used by resmoke.py.
DBTEST_EXECUTABLE = None
# If set to "tests", then resmoke.py will output the tests that would be run by each suite (without
# actually running them).
DRY_RUN = None
# If true, then a test failure or error will cause resmoke.py to exit and not run any more tests.
FAIL_FAST = None
# If set, then resmoke.py starts the specified number of Job instances to run tests.
JOBS = None
# The path to the mongo executable used by resmoke.py.
MONGO_EXECUTABLE = None
# The path to the mongod executable used by resmoke.py.
MONGOD_EXECUTABLE = None
# The path to the mongos executable used by resmoke.py.
MONGOS_EXECUTABLE = None
# If true, then all mongod's started by resmoke.py and by the mongo shell will not have journaling
# enabled.
NO_JOURNAL = None
# If true, then all mongod's started by resmoke.py and by the mongo shell will not preallocate
# journal files.
NO_PREALLOC_JOURNAL = None
# If set, then the RNG is seeded with the specified value. Otherwise uses a seed based on the time
# this module was loaded.
RANDOM_SEED = None
# If set, then each suite is repeated the specified number of times.
REPEAT = None
# If set, then resmoke.py will write out a report file with the status of each test that ran.
REPORT_FILE = None
# If set, then mongo shells started by resmoke.py will use the specified write mode.
SHELL_WRITE_MODE = None
# If true, then the order the tests run in is randomized. Otherwise the tests will run in
# alphabetical (case-insensitive) order.
SHUFFLE = None
# If set, then all mongod's started by resmoke.py and by the mongo shell will use the specified
# storage engine.
STORAGE_ENGINE = None
# If set, then all mongod's started by resmoke.py and by the mongo shell will use the specified
# WiredTiger collection configuration settings.
WT_COLL_CONFIG = None
# If set, then all mongod's started by resmoke.py and by the mongo shell will use the specified
# WiredTiger storage engine configuration settings.
WT_ENGINE_CONFIG = None
# If set, then all mongod's started by resmoke.py and by the mongo shell will use the specified
# WiredTiger index configuration settings.
WT_INDEX_CONFIG = None

View File

@ -0,0 +1,5 @@
from __future__ import absolute_import
from . import process
from . import programs
from . import network

View File

@ -0,0 +1,29 @@
"""
Helper to reserve a network port.
"""
from __future__ import absolute_import
import socket
class UnusedPort(object):
"""
Acquires a unused port.
"""
def __init__(self):
self.num = None
def __enter__(self):
self.__socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.__socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.__socket.bind(("0.0.0.0", 0))
addr, port = self.__socket.getsockname()
self.num = port
return self
def __exit__(self, *exc_info):
self.__socket.close()

View File

@ -0,0 +1,82 @@
"""
Helper class to read output of a subprocess. Used to avoid deadlocks
from the pipe buffer filling up and blocking the subprocess while it's
being waited on.
"""
from __future__ import absolute_import
import threading
class LoggerPipe(threading.Thread):
"""
Asynchronously reads the output of a subprocess and sends it to a
logger.
"""
# The start() and join() methods are not intended to be called directly on the LoggerPipe
# instance. Since we override them for that effect, the super's version are preserved here.
__start = threading.Thread.start
__join = threading.Thread.join
def __init__(self, logger, level, pipe_out):
"""
Initializes the LoggerPipe with the specified logger, logging
level to use, and pipe to read from.
"""
threading.Thread.__init__(self)
# Main thread should not call join() when exiting
self.daemon = True
self.__logger = logger
self.__level = level
self.__pipe_out = pipe_out
self.__lock = threading.Lock()
self.__condition = threading.Condition(self.__lock)
self.__started = False
self.__finished = False
LoggerPipe.__start(self)
def start(self):
raise NotImplementedError("start should not be called directly")
def run(self):
"""
Reads the output from 'pipe_out' and logs each line to 'logger'.
"""
with self.__lock:
self.__started = True
self.__condition.notify_all()
# Close the pipe when finished reading all of the output.
with self.__pipe_out:
# Avoid buffering the output from the pipe.
for line in iter(self.__pipe_out.readline, b""):
self.__logger.log(self.__level, line.rstrip())
with self.__lock:
self.__finished = True
self.__condition.notify_all()
def join(self, timeout=None):
raise NotImplementedError("join should not be called directly")
def wait_until_started(self):
with self.__lock:
while not self.__started:
self.__condition.wait()
def wait_until_finished(self):
with self.__lock:
while not self.__finished:
self.__condition.wait()
# No need to pass a timeout to join() because the thread should already be done after
# notifying us it has finished reading output from the pipe.
LoggerPipe.__join(self) # Tidy up the started thread.

View File

@ -0,0 +1,185 @@
"""
A more reliable way to create and destroy processes.
Uses job objects when running on Windows to ensure that all created
processes are terminated.
"""
from __future__ import absolute_import
import logging
import os
import os.path
import subprocess
import sys
import threading
from . import pipe
from .. import utils
# Prevent race conditions when starting multiple subprocesses on the same thread.
# See https://bugs.python.org/issue2320 for more details.
_POPEN_LOCK = threading.Lock()
# Job objects are the only reliable way to ensure that processes are terminated on Windows.
if sys.platform == "win32":
import win32con
import win32job
import win32process
import winerror
def _init_job_object():
job_object = win32job.CreateJobObject(None, "")
# Get the limit and job state information of the newly-created job object.
job_info = win32job.QueryInformationJobObject(job_object,
win32job.JobObjectExtendedLimitInformation)
# Set up the job object so that closing the last handle to the job object
# will terminate all associated processes and destroy the job object itself.
job_info["BasicLimitInformation"]["LimitFlags"] |= \
win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
# Update the limits of the job object.
win32job.SetInformationJobObject(job_object,
win32job.JobObjectExtendedLimitInformation,
job_info)
# TODO: register an atexit handler to ensure that the job object handle gets closed
return job_object
_JOB_OBJECT = _init_job_object()
class Process(object):
"""
Wrapper around subprocess.Popen class.
"""
def __init__(self, logger, args, env=None, env_vars=None):
"""
Initializes the process with the specified logger, arguments,
and environment.
"""
# Ensure that executable files on Windows have a ".exe" extension.
if sys.platform == "win32" and os.path.splitext(args[0])[1] != ".exe":
args[0] += ".exe"
self.logger = logger
self.args = args
self.env = utils.default_if_none(env, os.environ.copy())
if env_vars is not None:
self.env.update(env_vars)
self.pid = None
self._process = None
self._stdout_pipe = None
self._stderr_pipe = None
def start(self):
"""
Starts the process and the logger pipes for its stdout and
stderr.
"""
creation_flags = 0
if sys.platform == "win32":
creation_flags |= win32process.CREATE_BREAKAWAY_FROM_JOB
with _POPEN_LOCK:
self._process = subprocess.Popen(self.args,
env=self.env,
creationflags=creation_flags,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.pid = self._process.pid
self._stdout_pipe = pipe.LoggerPipe(self.logger, logging.INFO, self._process.stdout)
self._stderr_pipe = pipe.LoggerPipe(self.logger, logging.ERROR, self._process.stderr)
self._stdout_pipe.wait_until_started()
self._stderr_pipe.wait_until_started()
if sys.platform == "win32":
try:
win32job.AssignProcessToJobObject(_JOB_OBJECT, self._process._handle)
except win32job.error as err:
# ERROR_ACCESS_DENIED (winerror=5) is received when the process has already died.
if err.winerror != winerror.ERROR_ACCESS_DENIED:
raise
return_code = win32process.GetExitCodeProcess(self._process._handle)
if return_code == win32con.STILL_ACTIVE:
raise
def stop(self):
"""
Terminates the process.
"""
if sys.platform == "win32":
# Adapted from implementation of Popen.terminate() in subprocess.py of Python 2.7
# because earlier versions do not catch exceptions.
try:
# Have the process exit with code 0 if it is terminated by us to simplify the
# success-checking logic later on.
win32process.TerminateProcess(self._process._handle, 0)
except win32process.error as err:
# ERROR_ACCESS_DENIED (winerror=5) is received when the process
# has already died.
if err.winerror != winerror.ERROR_ACCESS_DENIED:
raise
return_code = win32process.GetExitCodeProcess(self._process._handle)
if return_code == win32con.STILL_ACTIVE:
raise
else:
try:
self._process.terminate()
except OSError as err:
# ESRCH (errno=3) is received when the process has already died.
if err.errno != 3:
raise
def poll(self):
return self._process.poll()
def wait(self):
"""
Waits until the process has terminated and all output has been
consumed by the logger pipes.
"""
return_code = self._process.wait()
if self._stdout_pipe:
self._stdout_pipe.wait_until_finished()
if self._stderr_pipe:
self._stderr_pipe.wait_until_finished()
return return_code
def as_command(self):
"""
Returns an equivalent command line invocation of the process.
"""
default_env = os.environ
env_diff = self.env.copy()
# Remove environment variables that appear in both 'os.environ' and 'self.env'.
for env_var in default_env:
if env_var in env_diff and env_diff[env_var] == default_env[env_var]:
del env_diff[env_var]
sb = []
for env_var in env_diff:
sb.append("%s=%s" % (env_var, env_diff[env_var]))
sb.extend(self.args)
return " ".join(sb)
def __str__(self):
if self.pid is None:
return self.as_command()
return "%s (%d)" % (self.as_command(), self.pid)

View File

@ -0,0 +1,231 @@
"""
Utility functions to create MongoDB processes.
Handles all the nitty-gritty parameter conversion.
"""
from __future__ import absolute_import
import json
import os
import os.path
import stat
from . import process as _process
from .. import utils
from .. import config
def mongod_program(logger, executable=None, process_kwargs=None, **kwargs):
"""
Returns a Process instance that starts a mongod executable with
arguments constructed from 'kwargs'.
"""
executable = utils.default_if_none(executable, config.DEFAULT_MONGOD_EXECUTABLE)
args = [executable]
# Apply the --setParameter command line argument.
set_parameter = kwargs.pop("set_parameters", {})
_apply_set_parameters(args, set_parameter)
shortcut_opts = {
"nojournal": config.NO_JOURNAL,
"nopreallocj": config.NO_PREALLOC_JOURNAL,
"storageEngine": config.STORAGE_ENGINE,
"wiredTigerCollectionConfigString": config.WT_COLL_CONFIG,
"wiredTigerEngineConfigString": config.WT_ENGINE_CONFIG,
"wiredTigerIndexConfigString": config.WT_INDEX_CONFIG,
}
# These options are just flags, so they should not take a value.
opts_without_vals = ("nojournal", "nopreallocj")
# Have the --nojournal command line argument to resmoke.py unset the journal option.
if shortcut_opts["nojournal"] is not None and "journal" in kwargs:
del kwargs["journal"]
for opt_name in shortcut_opts:
if shortcut_opts[opt_name] is not None:
# Command line options override the YAML configuration.
if opt_name in opts_without_vals:
kwargs[opt_name] = ""
else:
kwargs[opt_name] = shortcut_opts[opt_name]
# Apply the rest of the command line arguments.
_apply_kwargs(args, kwargs)
if "keyFile" in kwargs:
_set_keyfile_permissions(kwargs["keyFile"])
process_kwargs = utils.default_if_none(process_kwargs, {})
return _process.Process(logger, args, **process_kwargs)
def mongos_program(logger, executable=None, process_kwargs=None, **kwargs):
"""
Returns a Process instance that starts a mongos executable with
arguments constructed from 'kwargs'.
"""
executable = utils.default_if_none(executable, config.DEFAULT_MONGOS_EXECUTABLE)
args = [executable]
# Apply the --setParameter command line argument.
set_parameter = kwargs.pop("set_parameters", {})
_apply_set_parameters(args, set_parameter)
# Apply the rest of the command line arguments.
_apply_kwargs(args, kwargs)
if "keyFile" in kwargs:
_set_keyfile_permissions(kwargs["keyFile"])
process_kwargs = utils.default_if_none(process_kwargs, {})
return _process.Process(logger, args, **process_kwargs)
def mongo_shell_program(logger, executable=None, filename=None, process_kwargs=None, **kwargs):
"""
Returns a Process instance that starts a mongo shell with arguments
constructed from 'kwargs'.
"""
executable = utils.default_if_none(executable, config.DEFAULT_MONGO_EXECUTABLE)
args = [executable]
eval_sb = []
global_vars = kwargs.pop("global_vars", {})
shortcut_opts = {
"noJournal": (config.NO_JOURNAL, False),
"noJournalPrealloc": (config.NO_PREALLOC_JOURNAL, False),
"storageEngine": (config.STORAGE_ENGINE, ""),
"testName": (os.path.splitext(os.path.basename(filename))[0], ""),
"wiredTigerCollectionConfigString": (config.WT_COLL_CONFIG, ""),
"wiredTigerEngineConfigString": (config.WT_ENGINE_CONFIG, ""),
"wiredTigerIndexConfigString": (config.WT_INDEX_CONFIG, ""),
}
test_data = global_vars.get("TestData", {}).copy()
for opt_name in shortcut_opts:
(opt_value, opt_default) = shortcut_opts[opt_name]
if opt_value is not None:
test_data[opt_name] = opt_value
elif opt_name not in test_data:
# Only use 'opt_default' if the property wasn't set in the YAML configuration.
test_data[opt_name] = opt_default
global_vars["TestData"] = test_data
for var_name in global_vars:
_format_shell_vars(eval_sb, var_name, global_vars[var_name])
if "eval" in kwargs:
eval_sb.append(kwargs.pop("eval"))
eval_str = "; ".join(eval_sb)
args.append("--eval")
args.append(eval_str)
if config.SHELL_WRITE_MODE is not None:
kwargs["writeMode"] = config.SHELL_WRITE_MODE
# Apply the rest of the command line arguments.
_apply_kwargs(args, kwargs)
# Have the mongos shell run the specified file.
args.append(filename)
if "keyFile" in global_vars["TestData"]:
_set_keyfile_permissions(global_vars["TestData"]["keyFile"])
process_kwargs = utils.default_if_none(process_kwargs, {})
return _process.Process(logger, args, **process_kwargs)
def _format_shell_vars(sb, path, value):
"""
Formats 'value' in a way that can be passed to --eval.
If 'value' is a dictionary, then it is unrolled into the creation of
a new JSON object with properties assigned for each key of the
dictionary.
"""
# Only need to do special handling for JSON objects.
if not isinstance(value, dict):
sb.append("%s = %s" % (path, json.dumps(value)))
return
# Avoid including curly braces and colons in output so that the command invocation can be
# copied and run through bash.
sb.append("%s = new Object()" % (path))
for subkey in value:
_format_shell_vars(sb, ".".join((path, subkey)), value[subkey])
def dbtest_program(logger, executable=None, suites=None, process_kwargs=None, **kwargs):
"""
Returns a Process instance that starts a dbtest executable with
arguments constructed from 'kwargs'.
"""
executable = utils.default_if_none(executable, config.DEFAULT_DBTEST_EXECUTABLE)
args = [executable]
if suites is not None:
args.extend(suites)
if config.STORAGE_ENGINE is not None:
kwargs["storageEngine"] = config.STORAGE_ENGINE
for arg_name in kwargs:
arg_value = str(kwargs[arg_name])
args.append("--%s" % (arg_name))
if arg_value:
args.append(arg_value)
process_kwargs = utils.default_if_none(process_kwargs, {})
return _process.Process(logger, args, **process_kwargs)
def _apply_set_parameters(args, set_parameter):
"""
Converts key-value pairs from 'kwargs' into --setParameter key=value
arguments to an executable and appends them to 'args'.
"""
for param_name in set_parameter:
param_value = set_parameter[param_name]
# --setParameter takes boolean values as lowercase strings.
if isinstance(param_value, bool):
param_value = "true" if param_value else "false"
args.append("--setParameter")
args.append("%s=%s" % (param_name, param_value))
def _apply_kwargs(args, kwargs):
"""
Converts key-value pairs from 'kwargs' into --key value arguments
to an executable and appends them to 'args'.
A --flag without a value is represented with the empty string.
"""
for arg_name in kwargs:
arg_value = str(kwargs[arg_name])
args.append("--%s" % (arg_name))
if arg_value:
args.append(arg_value)
def _set_keyfile_permissions(keyfile_path):
"""
Change the permissions on 'keyfile_path' to 600, i.e. only the user
can read and write the file.
This necessary to avoid having the mongod/mongos fail to start up
because "permissions on 'keyfile_path' are too open".
"""
os.chmod(keyfile_path, stat.S_IRUSR | stat.S_IWUSR)

View File

@ -0,0 +1,35 @@
"""
Exceptions raised by resmoke.py.
"""
class ResmokeError(Exception):
"""
Base class for all resmoke.py exceptions.
"""
pass
class StopExecution(ResmokeError):
"""
Exception that is raised when resmoke.py should stop executing tests
if failing fast is enabled.
"""
pass
class TestFailure(ResmokeError):
"""
Exception that is raised by a hook in the after_test method if it
determines the the previous test should be marked as a failure.
"""
pass
class ServerFailure(TestFailure):
"""
Exception that is raised by a hook in the after_test method if it
detects that the fixture did not exit cleanly and should be marked
as a failure.
"""
pass

View File

@ -0,0 +1,14 @@
"""
Extension to the logging package to support buildlogger.
"""
from __future__ import absolute_import
# Alias the built-in logging.Logger class for type checking arguments. Those interested in
# constructing a new Logger instance should use the loggers.new_logger() function instead.
from logging import Logger
from . import config
from . import buildlogger
from . import flush
from . import loggers

View File

@ -0,0 +1,284 @@
"""
Defines handlers for communicating with a buildlogger server.
"""
from __future__ import absolute_import
import functools
import urllib2
from . import handlers
from . import loggers
from .. import config as _config
CREATE_BUILD_ENDPOINT = "/build"
APPEND_GLOBAL_LOGS_ENDPOINT = "/build/%(build_id)s"
CREATE_TEST_ENDPOINT = "/build/%(build_id)s/test"
APPEND_TEST_LOGS_ENDPOINT = "/build/%(build_id)s/test/%(test_id)s"
_BUILDLOGGER_REALM = "buildlogs"
_BUILDLOGGER_CONFIG = "mci.buildlogger"
_SEND_AFTER_LINES = 2000
_SEND_AFTER_SECS = 10
def _log_on_error(func):
"""
A decorator that causes any exceptions to be logged by the
"buildlogger" Logger instance.
Returns the wrapped function's return value, or None if an error
was encountered.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except urllib2.HTTPError as err:
sb = []
sb.append("HTTP Error %s: %s" % (err.code, err.msg))
sb.append("POST %s" % (err.filename))
for name in err.hdrs:
value = err.hdrs[name]
sb.append(" %s: %s" % (name, value))
# Try to read the response back from the server.
if hasattr(err, "read"):
sb.append(err.read())
loggers._BUILDLOGGER_FALLBACK.exception("\n".join(sb))
except:
loggers._BUILDLOGGER_FALLBACK.exception("Encountered an error.")
return None
return wrapper
@_log_on_error
def get_config():
"""
Returns the buildlogger configuration as evaluated from the
_BUILDLOGGER_CONFIG file.
"""
tmp_globals = {} # Avoid conflicts with variables defined in 'config_file'.
config = {}
execfile(_BUILDLOGGER_CONFIG, tmp_globals, config)
# Rename "slavename" to "username" if present.
if "slavename" in config and "username" not in config:
config["username"] = config["slavename"]
del config["slavename"]
# Rename "passwd" to "password" if present.
if "passwd" in config and "password" not in config:
config["password"] = config["passwd"]
del config["passwd"]
return config
@_log_on_error
def new_build_id(config):
"""
Returns a new build id for sending global logs to.
"""
if config is None:
return None
username = config["username"]
password = config["password"]
builder = config["builder"]
build_num = int(config["build_num"])
handler = handlers.HTTPHandler(
realm=_BUILDLOGGER_REALM,
url_root=_config.BUILDLOGGER_URL,
username=username,
password=password)
response = handler.post(CREATE_BUILD_ENDPOINT, data={
"builder": builder,
"buildnum": build_num,
})
return response["id"]
@_log_on_error
def new_test_id(build_id, build_config, test_filename, test_command):
"""
Returns a new test id for sending test logs to.
"""
if build_id is None or build_config is None:
return None
handler = handlers.HTTPHandler(
realm=_BUILDLOGGER_REALM,
url_root=_config.BUILDLOGGER_URL,
username=build_config["username"],
password=build_config["password"])
endpoint = CREATE_TEST_ENDPOINT % {"build_id": build_id}
response = handler.post(endpoint, data={
"test_filename": test_filename,
"command": test_command,
"phase": build_config.get("build_phase", "unknown"),
})
return response["id"]
class _BaseBuildloggerHandler(handlers.BufferedHandler):
"""
Base class of the buildlogger handler for the global logs and the
handler for the test logs.
"""
def __init__(self,
build_id,
build_config,
capacity=_SEND_AFTER_LINES,
interval_secs=_SEND_AFTER_SECS):
"""
Initializes the buildlogger handler with the build id and
credentials.
"""
handlers.BufferedHandler.__init__(self, capacity, interval_secs)
username = build_config["username"]
password = build_config["password"]
self.http_handler = handlers.HTTPHandler(_BUILDLOGGER_REALM,
_config.BUILDLOGGER_URL,
username,
password)
self.build_id = build_id
self.retry_buffer = []
def process_record(self, record):
"""
Returns a tuple of the time the log record was created, and the
message because the buildlogger expects the log messages
formatted in JSON as:
[ [ <log-time-1>, <log-message-1> ],
[ <log-time-2>, <log-message-2> ],
... ]
"""
msg = self.format(record)
return (record.created, msg)
def post(self, *args, **kwargs):
"""
Convenience method for subclasses to use when making POST requests.
"""
return self.http_handler.post(*args, **kwargs)
def _append_logs(self, log_lines):
raise NotImplementedError("_append_logs must be implemented by _BaseBuildloggerHandler"
" subclasses")
def flush_with_lock(self, close_called):
"""
Ensures all logging output has been flushed to the buildlogger
server.
If _append_logs() returns false, then the log messages are added
to a separate buffer and retried the next time flush() is
called.
"""
self.retry_buffer.extend(self.buffer)
if self._append_logs(self.retry_buffer):
self.retry_buffer = []
elif close_called:
# Request to the buildlogger server returned an error, so use the fallback logger to
# avoid losing the log messages entirely.
for (_, message) in self.retry_buffer:
# TODO: construct an LogRecord instance equivalent to the one passed to the
# process_record() method if we ever decide to log the time when the
# LogRecord was created, e.g. using %(asctime)s in
# _fallback_buildlogger_handler().
loggers._BUILDLOGGER_FALLBACK.info(message)
self.retry_buffer = []
self.buffer = []
class BuildloggerTestHandler(_BaseBuildloggerHandler):
"""
Buildlogger handler for the test logs.
"""
def __init__(self, build_id, build_config, test_id, **kwargs):
"""
Initializes the buildlogger handler with the build id, test id,
and credentials.
"""
_BaseBuildloggerHandler.__init__(self, build_id, build_config, **kwargs)
self.test_id = test_id
@_log_on_error
def _append_logs(self, log_lines):
"""
Sends a POST request to the APPEND_TEST_LOGS_ENDPOINT with the
logs that have been captured.
"""
endpoint = APPEND_TEST_LOGS_ENDPOINT % {
"build_id": self.build_id,
"test_id": self.test_id,
}
response = self.post(endpoint, data=log_lines)
return response is not None
@_log_on_error
def _finish_test(self, failed=False):
"""
Sends a POST request to the APPEND_TEST_LOGS_ENDPOINT with the
test status.
"""
endpoint = APPEND_TEST_LOGS_ENDPOINT % {
"build_id": self.build_id,
"test_id": self.test_id,
}
self.post(endpoint, headers={
"X-Sendlogs-Test-Done": "true",
"X-Sendlogs-Test-Failed": "true" if failed else "false",
})
def close(self):
"""
Closes the buildlogger handler.
"""
_BaseBuildloggerHandler.close(self)
# TODO: pass the test status (success/failure) to this method
self._finish_test()
class BuildloggerGlobalHandler(_BaseBuildloggerHandler):
"""
Buildlogger handler for the global logs.
"""
@_log_on_error
def _append_logs(self, log_lines):
"""
Sends a POST request to the APPEND_GLOBAL_LOGS_ENDPOINT with
the logs that have been captured.
"""
endpoint = APPEND_GLOBAL_LOGS_ENDPOINT % {"build_id": self.build_id}
response = self.post(endpoint, data=log_lines)
return response is not None

View File

@ -0,0 +1,160 @@
"""
Configuration functions for the logging package.
"""
from __future__ import absolute_import
import logging
import sys
from . import buildlogger
from . import loggers
_DEFAULT_FORMAT = "[%(name)s] %(message)s"
def using_buildlogger(logging_config):
"""
Returns true if buildlogger is set as a handler on the "fixture" or
"tests" loggers, and false otherwise.
"""
for logger_name in (loggers.FIXTURE_LOGGER_NAME, loggers.TESTS_LOGGER_NAME):
logger_info = logging_config[logger_name]
if _get_buildlogger_handler_info(logger_info) is not None:
return True
return False
def apply_config(logging_config):
"""
Adds all handlers specified by the configuration to the "executor",
"fixture", and "tests" loggers.
"""
logging_components = (loggers.EXECUTOR_LOGGER_NAME,
loggers.FIXTURE_LOGGER_NAME,
loggers.TESTS_LOGGER_NAME)
if not all(component in logging_config for component in logging_components):
raise ValueError("Logging configuration should contain %s, %s, and %s components"
% logging_components)
# Configure the executor, fixture, and tests loggers.
for component in logging_components:
logger = loggers.LOGGERS_BY_NAME[component]
logger_info = logging_config[component]
_configure_logger(logger, logger_info)
# Configure the buildlogger logger.
loggers._BUILDLOGGER_FALLBACK.addHandler(_fallback_buildlogger_handler())
def apply_buildlogger_global_handler(logger, logging_config, build_id=None, build_config=None):
"""
Adds a buildlogger.BuildloggerGlobalHandler to 'logger' if specified
to do so by the configuration.
"""
logger_info = logging_config[loggers.FIXTURE_LOGGER_NAME]
handler_info = _get_buildlogger_handler_info(logger_info)
if handler_info is None:
# Not configured to use buildlogger.
return
if all(x is not None for x in (build_id, build_config)):
log_format = logger_info.get("format", _DEFAULT_FORMAT)
formatter = logging.Formatter(fmt=log_format)
handler = buildlogger.BuildloggerGlobalHandler(build_id,
build_config,
**handler_info)
handler.setFormatter(formatter)
else:
handler = _fallback_buildlogger_handler()
# Fallback handler already has formatting configured.
logger.addHandler(handler)
def apply_buildlogger_test_handler(logger,
logging_config,
build_id=None,
build_config=None,
test_id=None):
"""
Adds a buildlogger.BuildloggerTestHandler to 'logger' if specified
to do so by the configuration.
"""
logger_info = logging_config[loggers.TESTS_LOGGER_NAME]
handler_info = _get_buildlogger_handler_info(logger_info)
if handler_info is None:
# Not configured to use buildlogger.
return
if all(x is not None for x in (build_id, build_config, test_id)):
log_format = logger_info.get("format", _DEFAULT_FORMAT)
formatter = logging.Formatter(fmt=log_format)
handler = buildlogger.BuildloggerTestHandler(build_id,
build_config,
test_id,
**handler_info)
handler.setFormatter(formatter)
else:
handler = _fallback_buildlogger_handler()
# Fallback handler already has formatting configured.
logger.addHandler(handler)
def _configure_logger(logger, logger_info):
"""
Adds the handlers specified by the configuration to 'logger'.
"""
log_format = logger_info.get("format", _DEFAULT_FORMAT)
formatter = logging.Formatter(fmt=log_format)
for handler_info in logger_info.get("handlers", []):
handler_class = handler_info["class"]
if handler_class == "logging.FileHandler":
handler = logging.FileHandler(filename=handler_info["filename"],
mode=handler_info.get("mode", "w"))
elif handler_class == "logging.NullHandler":
handler = logging.NullHandler()
elif handler_class == "logging.StreamHandler":
handler = logging.StreamHandler(sys.stdout)
elif handler_class == "buildlogger":
continue # Buildlogger handlers are applied when running tests.
else:
raise ValueError("Unknown handler class '%s'" % (handler_class))
handler.setFormatter(formatter)
logger.addHandler(handler)
def _fallback_buildlogger_handler():
"""
Returns a handler that writes to stderr.
"""
log_format = "[buildlogger:%(name)s] %(message)s"
formatter = logging.Formatter(fmt=log_format)
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(formatter)
return handler
def _get_buildlogger_handler_info(logger_info):
"""
Returns the buildlogger handler information if it exists, and None
otherwise.
"""
for handler_info in logger_info["handlers"]:
handler_info = handler_info.copy()
if handler_info.pop("class") == "buildlogger":
return handler_info
return None

View File

@ -0,0 +1,67 @@
"""
Workaround for having too many threads running on 32-bit systems when
logging to buildlogger that still allows periodically flushing messages
to the buildlogger server.
This is because a utils.timer.AlarmClock instance is used for each
buildlogger.BuildloggerTestHandler, but only dismiss()ed when the Python
process is about to exit.
"""
from __future__ import absolute_import
import threading
from ..utils import queue
_LOGGER_QUEUE = queue.Queue()
def start_thread():
"""
Starts the flush thread.
"""
_FlushThread().start()
def close_later(logger):
"""
Adds 'logger' to the queue so that it is closed later by the flush
thread.
"""
_LOGGER_QUEUE.put(logger)
class _FlushThread(threading.Thread):
"""
Asynchronously flushes and closes logging handlers.
"""
def __init__(self):
"""
Initializes the flush thread.
"""
threading.Thread.__init__(self, name="FlushThread")
# atexit handler is already set up to flush any loggers still in the queue when exiting.
self.daemon = True
def run(self):
"""
Continuously shuts down loggers from the queue.
"""
while True:
logger = _LOGGER_QUEUE.get()
_FlushThread._shutdown_logger(logger)
@staticmethod
def _shutdown_logger(logger):
"""
Flushes and closes all handlers of 'logger'.
"""
for handler in logger.handlers:
handler.flush()
handler.close()

View File

@ -0,0 +1,178 @@
"""
Additional handlers that are used as the base classes of the buildlogger
handler.
"""
from __future__ import absolute_import
import json
import logging
import threading
import urllib2
from .. import utils
from ..utils import timer
_TIMEOUT_SECS = 10
class BufferedHandler(logging.Handler):
"""
A handler class that buffers logging records in memory. Whenever
each record is added to the buffer, a check is made to see if the
buffer should be flushed. If it should, then flush() is expected to
do what's needed.
"""
def __init__(self, capacity, interval_secs):
"""
Initializes the handler with the buffer size and timeout after
which the buffer is flushed regardless.
"""
logging.Handler.__init__(self)
if not isinstance(capacity, int):
raise TypeError("capacity must be an integer")
elif capacity <= 0:
raise ValueError("capacity must be a positive integer")
if not isinstance(interval_secs, (int, float)):
raise TypeError("interval_secs must be a number")
elif interval_secs <= 0.0:
raise ValueError("interval_secs must be a positive number")
self.capacity = capacity
self.interval_secs = interval_secs
self.buffer = []
self._lock = threading.Lock()
self._timer = None # Defer creation until actually begin to log messages.
def _new_timer(self):
"""
Returns a new timer.AlarmClock instance that will call the
flush() method after 'interval_secs' seconds.
"""
return timer.AlarmClock(self.interval_secs, self.flush, args=[self])
def process_record(self, record):
"""
Applies a transformation to the record before it gets added to
the buffer.
The default implementation returns 'record' unmodified.
"""
return record
def emit(self, record):
"""
Emits a record.
Append the record to the buffer after it has been transformed by
process_record(). If the length of the buffer is greater than or
equal to its capacity, then flush() is called to process the
buffer.
After flushing the buffer, the timer is restarted so that it
will expire after another 'interval_secs' seconds.
"""
with self._lock:
self.buffer.append(self.process_record(record))
if len(self.buffer) >= self.capacity:
if self._timer is not None:
self._timer.snooze()
self.flush_with_lock(False)
if self._timer is not None:
self._timer.reset()
if self._timer is None:
self._timer = self._new_timer()
self._timer.start()
def flush(self, close_called=False):
"""
Ensures all logging output has been flushed.
"""
with self._lock:
if self.buffer:
self.flush_with_lock(close_called)
def flush_with_lock(self, close_called):
"""
Ensures all logging output has been flushed.
This version resets the buffers back to an empty list and is
intended to be overridden by subclasses.
"""
self.buffer = []
def close(self):
"""
Tidies up any resources used by the handler.
Stops the timer and flushes the buffer.
"""
if self._timer is not None:
self._timer.dismiss()
self.flush(close_called=True)
logging.Handler.close(self)
class HTTPHandler(object):
"""
A class which sends data to a web server using POST requests.
"""
def __init__(self, realm, url_root, username, password):
"""
Initializes the handler with the necessary authenticaton
credentials.
"""
digest_handler = urllib2.HTTPDigestAuthHandler()
digest_handler.add_password(
realm=realm,
uri=url_root,
user=username,
passwd=password)
self.url_root = url_root
self.url_opener = urllib2.build_opener(digest_handler, urllib2.HTTPErrorProcessor())
def _make_url(self, endpoint):
return "%s/%s/" % (self.url_root.rstrip("/"), endpoint.strip("/"))
def post(self, endpoint, data=None, headers=None, timeout_secs=_TIMEOUT_SECS):
"""
Sends a POST request to the specified endpoint with the supplied
data.
Returns the response, either as a string or a JSON object based
on the content type.
"""
data = utils.default_if_none(data, [])
data = json.dumps(data, encoding="utf-8")
headers = utils.default_if_none(headers, {})
headers["Content-Type"] = "application/json; charset=utf-8"
url = self._make_url(endpoint)
request = urllib2.Request(url=url, data=data, headers=headers)
response = self.url_opener.open(request, timeout=timeout_secs)
headers = response.info()
content_type = headers.gettype()
if content_type == "application/json":
encoding = headers.getparam("charset") or "utf-8"
return json.load(response, encoding=encoding)
return response.read()

View File

@ -0,0 +1,37 @@
"""
Module to hold the logger instances themselves.
"""
from __future__ import absolute_import
import logging
EXECUTOR_LOGGER_NAME = "executor"
FIXTURE_LOGGER_NAME = "fixture"
TESTS_LOGGER_NAME = "tests"
def new_logger(logger_name, parent=None):
"""
Returns a new logging.Logger instance with the specified name.
"""
# Set up the logger to handle all messages it receives.
logger = logging.Logger(logger_name, level=logging.DEBUG)
if parent is not None:
logger.parent = parent
logger.propagate = True
return logger
EXECUTOR = new_logger(EXECUTOR_LOGGER_NAME)
FIXTURE = new_logger(FIXTURE_LOGGER_NAME)
TESTS = new_logger(TESTS_LOGGER_NAME)
LOGGERS_BY_NAME = {
EXECUTOR_LOGGER_NAME: EXECUTOR,
FIXTURE_LOGGER_NAME: FIXTURE,
TESTS_LOGGER_NAME: TESTS,
}
_BUILDLOGGER_FALLBACK = new_logger("fallback")

View File

@ -0,0 +1,281 @@
"""
Parser for command line arguments.
"""
from __future__ import absolute_import
import os
import os.path
import optparse
from . import config as _config
from . import testing
from . import utils
from .. import resmokeconfig
# Mapping of the attribute of the parsed arguments (dest) to its key as it appears in the options
# YAML configuration file. Most should only be converting from snake_case to camelCase.
DEST_TO_CONFIG = {
"buildlogger_url": "buildloggerUrl",
"continue_on_failure": "continueOnFailure",
"dbpath_prefix": "dbpathPrefix",
"dbtest_executable": "dbtest",
"dry_run": "dryRun",
"jobs": "jobs",
"mongo_executable": "mongo",
"mongod_executable": "mongod",
"mongos_executable": "mongos",
"no_journal": "nojournal",
"no_prealloc_journal": "nopreallocj",
"repeat": "repeat",
"report_file": "reportFile",
"seed": "seed",
"shell_write_mode": "shellWriteMode",
"shuffle": "shuffle",
"storage_engine": "storageEngine",
"wt_coll_config": "wiredTigerCollectionConfigString",
"wt_engine_config": "wiredTigerEngineConfigString",
"wt_index_config": "wiredTigerIndexConfigString"
}
def parse_command_line():
"""
Parses the command line arguments passed to resmoke.py.
"""
parser = optparse.OptionParser()
parser.add_option("--suites", dest="suite_files", metavar="SUITE1,SUITE2",
help=("Comma separated list of YAML files that each specify the configuration"
" of a suite. If the file is located in the resmokeconfig/suites/"
" directory, then the basename without the .yml extension can be"
" specified, e.g. 'core'."))
parser.add_option("--log", dest="logger_file", metavar="LOGGER",
help=("A YAML file that specifies the logging configuration. If the file is"
" located in the resmokeconfig/suites/ directory, then the basename"
" without the .yml extension can be specified, e.g. 'console'."))
parser.add_option("--options", dest="options_file", metavar="OPTIONS",
help="A YAML file that specifies global options to resmoke.py.")
parser.add_option("--buildloggerUrl", action="store", dest="buildlogger_url", metavar="URL",
help="The root url of the buildlogger server.")
parser.add_option("--continueOnFailure", action="store_true", dest="continue_on_failure",
help="Executes all tests in all suites, even if some of them fail.")
parser.add_option("--dbpathPrefix", dest="dbpath_prefix", metavar="PATH",
help=("The directory which will contain the dbpaths of any mongod's started "
" by resmoke.py or the tests themselves."))
parser.add_option("--dbtest", dest="dbtest_executable", metavar="PATH",
help="The path to the dbtest executable for resmoke to use.")
parser.add_option("-n", action="store_const", const="tests", dest="dry_run",
help=("Output the tests that would be run."))
# TODO: add support for --dryRun=commands
parser.add_option("--dryRun", type="choice", action="store", dest="dry_run",
choices=("off", "tests"), metavar="MODE",
help=("Instead of running the tests, output the tests that would be run"
" (if MODE=tests). Defaults to MODE=%default."))
parser.add_option("-j", "--jobs", type="int", dest="jobs", metavar="JOBS",
help=("The number of Job instances to use. Each instance will receive its own"
" MongoDB deployment to dispatch tests to."))
parser.add_option("-l", "--listSuites", action="store_true", dest="list_suites",
help="List the names of the suites available to execute.")
parser.add_option("--mongo", dest="mongo_executable", metavar="PATH",
help="The path to the mongo shell executable for resmoke.py to use.")
parser.add_option("--mongod", dest="mongod_executable", metavar="PATH",
help="The path to the mongod executable for resmoke.py to use.")
parser.add_option("--mongos", dest="mongos_executable", metavar="PATH",
help="The path to the mongos executable for resmoke.py to use.")
parser.add_option("--nojournal", action="store_true", dest="no_journal",
help="Disable journaling for all mongod's.")
parser.add_option("--nopreallocj", action="store_true", dest="no_prealloc_journal",
help="Disable preallocation of journal files for all mongod's.")
parser.add_option("--repeat", type="int", dest="repeat", metavar="N",
help="Repeat the given suite(s) N times, or until one fails.")
parser.add_option("--reportFile", dest="report_file", metavar="REPORT",
help="Write a JSON file with test status and timing information.")
parser.add_option("--seed", type="int", dest="seed", metavar="SEED",
help=("Seed for the random number generator. Useful in combination with the"
" --shuffle option for producing a consistent test execution order."))
parser.add_option("--shellWriteMode", type="choice", action="store", dest="shell_write_mode",
choices=("commands", "compatibility", "legacy"), metavar="WRITE_MODE",
help="The write mode used by the mongo shell.")
parser.add_option("--shuffle", action="store_true", dest="shuffle",
help="Randomize the order in which tests are executed.")
parser.add_option("--storageEngine", dest="storage_engine", metavar="ENGINE",
help="The storage engine used by dbtests and jstests.")
parser.add_option("--wiredTigerCollectionConfigString", dest="wt_coll_config", metavar="CONFIG",
help="Set the WiredTiger collection configuration setting for all mongod's.")
parser.add_option("--wiredTigerEngineConfigString", dest="wt_engine_config", metavar="CONFIG",
help="Set the WiredTiger engine configuration setting for all mongod's.")
parser.add_option("--wiredTigerIndexConfigString", dest="wt_index_config", metavar="CONFIG",
help="Set the WiredTiger index configuration setting for all mongod's.")
parser.set_defaults(logger_file="console", dry_run="off", list_suites=False)
return parser.parse_args()
def get_logging_config(values):
return _get_logging_config(values.logger_file)
def update_config_vars(values):
options = _get_options_config(values.options_file)
config = _config.DEFAULTS.copy()
config.update(options)
values = vars(values)
for dest in values:
if dest not in DEST_TO_CONFIG:
continue
config_var = DEST_TO_CONFIG[dest]
if values[dest] is not None:
config[config_var] = values[dest]
_config.BUILDLOGGER_URL = config.pop("buildloggerUrl")
_config.DBPATH_PREFIX = _expand_user(config.pop("dbpathPrefix"))
_config.DBTEST_EXECUTABLE = _expand_user(config.pop("dbtest"))
_config.DRY_RUN = config.pop("dryRun")
_config.FAIL_FAST = not config.pop("continueOnFailure")
_config.JOBS = config.pop("jobs")
_config.MONGO_EXECUTABLE = _expand_user(config.pop("mongo"))
_config.MONGOD_EXECUTABLE = _expand_user(config.pop("mongod"))
_config.MONGOS_EXECUTABLE = _expand_user(config.pop("mongos"))
_config.NO_JOURNAL = config.pop("nojournal")
_config.NO_PREALLOC_JOURNAL = config.pop("nopreallocj")
_config.RANDOM_SEED = config.pop("seed")
_config.REPEAT = config.pop("repeat")
_config.REPORT_FILE = config.pop("reportFile")
_config.SHELL_WRITE_MODE = config.pop("shellWriteMode")
_config.SHUFFLE = config.pop("shuffle")
_config.STORAGE_ENGINE = config.pop("storageEngine")
_config.WT_COLL_CONFIG = config.pop("wiredTigerCollectionConfigString")
_config.WT_ENGINE_CONFIG = config.pop("wiredTigerEngineConfigString")
_config.WT_INDEX_CONFIG = config.pop("wiredTigerIndexConfigString")
if config:
raise optparse.OptionValueError("Unknown option(s): %s" % (config.keys()))
def get_suites(values, args):
if (values.suite_files is None and not args) or (values.suite_files is not None and args):
raise optparse.OptionValueError("Must specify either --suites or a list of tests")
# If there are no suites specified, but there are args, assume they are jstests.
if args:
# No specified config, just use the following, and default the logging and executor.
suite_config = _make_jstests_config(args)
_ensure_executor(suite_config)
suite = testing.suite.Suite("<jstests>", suite_config)
return [suite]
suite_files = values.suite_files.split(",")
suites = []
for suite_filename in suite_files:
suite_config = _get_suite_config(suite_filename)
_ensure_executor(suite_config)
suite = testing.suite.Suite(suite_filename, suite_config)
suites.append(suite)
return suites
def get_named_suites():
"""
Returns the list of suites available to execute.
"""
# Skip "with_server" because it does not define any test files to run.
suite_names = [suite for suite in resmokeconfig.NAMED_SUITES if suite != "with_server"]
suite_names.sort()
return suite_names
def _get_logging_config(pathname):
"""
Attempts to read a YAML configuration from 'pathname' that describes
how resmoke.py should log the tests and fixtures.
"""
# Named loggers are specified as the basename of the file, without the .yml extension.
if not utils.is_yaml_file(pathname) and not os.path.dirname(pathname):
if pathname not in resmokeconfig.NAMED_LOGGERS:
raise optparse.OptionValueError("Unknown logger '%s'" % (pathname))
pathname = resmokeconfig.NAMED_LOGGERS[pathname] # Expand 'pathname' to full path.
return utils.load_yaml_file(pathname).pop("logging")
def _get_options_config(pathname):
"""
Attempts to read a YAML configuration from 'pathname' that describes
any modifications to global options.
"""
if pathname is None:
return {}
return utils.load_yaml_file(pathname).pop("options")
def _get_suite_config(pathname):
"""
Attempts to read a YAML configuration from 'pathname' that describes
what tests to run and how to run them.
"""
# Named suites are specified as the basename of the file, without the .yml extension.
if not utils.is_yaml_file(pathname) and not os.path.dirname(pathname):
if pathname not in resmokeconfig.NAMED_SUITES:
raise optparse.OptionValueError("Unknown suite '%s'" % (pathname))
pathname = resmokeconfig.NAMED_SUITES[pathname] # Expand 'pathname' to full path.
return utils.load_yaml_file(pathname)
def _make_jstests_config(js_files):
for pathname in js_files:
if not utils.is_js_file(pathname) or not os.path.isfile(pathname):
raise optparse.OptionValueError("Expected a list of JS files, but got '%s'"
% (pathname))
return {"selector": {"js_test": {"roots": js_files}}}
def _ensure_executor(suite_config):
if "executor" not in suite_config:
pathname = resmokeconfig.NAMED_SUITES["with_server"]
suite_config["executor"] = utils.load_yaml_file(pathname).pop("executor")
def _expand_user(pathname):
"""
Wrapper around os.path.expanduser() to do nothing when given None.
"""
if pathname is None:
return None
return os.path.expanduser(pathname)

View File

@ -0,0 +1,261 @@
"""
Test selection utility.
Defines filtering rules for what tests to include in a suite depending
on whether they apply to C++ unit tests, dbtests, or JS tests.
"""
from __future__ import absolute_import
import fnmatch
import os.path
import subprocess
import sys
from . import config
from . import errors
from . import utils
from .utils import globstar
from .utils import jscomment
def filter_cpp_unit_tests(root="build/unittests.txt", include_files=None, exclude_files=None):
"""
Filters out what C++ unit tests to run.
"""
include_files = utils.default_if_none(include_files, [])
exclude_files = utils.default_if_none(exclude_files, [])
unit_tests = []
with open(root, "r") as fp:
for unit_test_path in fp:
unit_test_path = unit_test_path.rstrip()
unit_tests.append(unit_test_path)
(remaining, included, _) = _filter_by_filename("C++ unit test",
unit_tests,
include_files,
exclude_files)
if include_files:
return list(included)
elif exclude_files:
return list(remaining)
return unit_tests
def filter_dbtests(binary=None, include_suites=None):
"""
Filters out what dbtests to run.
"""
# Command line option overrides the YAML configuration.
binary = utils.default_if_none(config.DBTEST_EXECUTABLE, binary)
# Use the default if nothing specified.
binary = utils.default_if_none(binary, config.DEFAULT_DBTEST_EXECUTABLE)
include_suites = utils.default_if_none(include_suites, [])
if not utils.is_string_list(include_suites):
raise TypeError("include_suites must be a list of strings")
# Ensure that executable files on Windows have a ".exe" extension.
if sys.platform == "win32" and os.path.splitext(binary)[1] != ".exe":
binary += ".exe"
program = subprocess.Popen([binary, "--list"], stdout=subprocess.PIPE)
stdout = program.communicate()[0]
if program.returncode != 0:
raise errors.ResmokeError("Getting list of dbtest suites failed")
dbtests = stdout.splitlines()
if not include_suites:
return dbtests
dbtests = set(dbtests)
(verbatim, globbed) = _partition(include_suites, normpath=False)
included = _pop_all("dbtest suite", dbtests, verbatim)
for suite_pattern in globbed:
for suite_name in dbtests:
if fnmatch.fnmatchcase(suite_name, suite_pattern):
included.add(suite_name)
return list(included)
def filter_jstests(roots,
include_files=None,
include_with_all_tags=None,
include_with_any_tags=None,
exclude_files=None,
exclude_with_all_tags=None,
exclude_with_any_tags=None):
"""
Filters out what jstests to run.
"""
include_files = utils.default_if_none(include_files, [])
exclude_files = utils.default_if_none(exclude_files, [])
include_with_all_tags = set(utils.default_if_none(include_with_all_tags, []))
include_with_any_tags = set(utils.default_if_none(include_with_any_tags, []))
exclude_with_all_tags = set(utils.default_if_none(exclude_with_all_tags, []))
exclude_with_any_tags = set(utils.default_if_none(exclude_with_any_tags, []))
using_tags = 0
for (name, value) in (("include_with_all_tags", include_with_all_tags),
("include_with_any_tags", include_with_any_tags),
("exclude_with_all_tags", exclude_with_all_tags),
("exclude_with_any_tags", exclude_with_any_tags)):
if not utils.is_string_set(value):
raise TypeError("%s must be a list of strings" % (name))
if len(value) > 0:
using_tags += 1
if using_tags > 1:
raise ValueError("Can only specify one of 'include_with_all_tags', 'include_with_any_tags',"
" 'exclude_with_all_tags', and 'exclude_with_any_tags'")
jstests = []
for root in roots:
jstests.extend(globstar.iglob(root))
(remaining, included, _) = _filter_by_filename("jstest",
jstests,
include_files,
exclude_files)
# Skip parsing comments if not using tags
if not using_tags:
if include_files:
return list(included)
elif exclude_files:
return list(remaining)
return jstests
jstests = set(remaining)
excluded = set()
for filename in jstests:
file_tags = set(jscomment.get_tags(filename))
if include_with_all_tags and not include_with_all_tags - file_tags:
included.add(filename)
elif include_with_any_tags and include_with_any_tags & file_tags:
included.add(filename)
elif exclude_with_all_tags and not exclude_with_all_tags - file_tags:
excluded.add(filename)
elif exclude_with_any_tags and exclude_with_any_tags & file_tags:
excluded.add(filename)
if include_with_all_tags or include_with_any_tags:
if exclude_files:
return list((included & jstests) - excluded)
return list(included)
else:
if include_files:
return list(included | (jstests - excluded))
return list(jstests - excluded)
def _filter_by_filename(kind, universe, include_files, exclude_files):
"""
Filters out what tests to run solely by filename.
Returns the triplet (remaining, included, excluded), where
'remaining' is 'universe' after 'included' and 'excluded' were
removed from it.
"""
if not utils.is_string_list(include_files):
raise TypeError("include_files must be a list of strings")
elif not utils.is_string_list(exclude_files):
raise TypeError("exclude_files must be a list of strings")
elif include_files and exclude_files:
raise ValueError("Cannot specify both include_files and exclude_files")
universe = set(universe)
if include_files:
(verbatim, globbed) = _partition(include_files)
# Remove all matching files of 'verbatim' from 'universe'.
included_verbatim = _pop_all(kind, universe, verbatim)
included_globbed = set()
for file_pattern in globbed:
included_globbed.update(globstar.iglob(file_pattern))
# Remove all matching files of 'included_globbed' from 'universe' without checking whether
# the same file is expanded to multiple times. This implicitly takes an intersection
# between 'included_globbed' and 'universe'.
included_globbed = _pop_all(kind, universe, included_globbed, validate=False)
return (universe, included_verbatim | included_globbed, set())
elif exclude_files:
(verbatim, globbed) = _partition(exclude_files)
# Remove all matching files of 'verbatim' from 'universe'.
excluded_verbatim = _pop_all(kind, universe, verbatim)
excluded_globbed = set()
for file_pattern in globbed:
excluded_globbed.update(globstar.iglob(file_pattern))
# Remove all matching files of 'excluded_globbed' from 'universe' without checking whether
# the same file is expanded to multiple times. This implicitly takes an intersection
# between 'excluded_globbed' and 'universe'.
excluded_globbed = _pop_all(kind, universe, excluded_globbed, validate=False)
return (universe, set(), excluded_verbatim | excluded_globbed)
return (universe, set(), set())
def _partition(pathnames, normpath=True):
"""
Splits 'pathnames' into two separate lists based on whether they
use a glob pattern.
Returns the pair (non-globbed pathnames, globbed pathnames).
"""
verbatim = []
globbed = []
for pathname in pathnames:
if globstar.is_glob_pattern(pathname):
globbed.append(pathname)
continue
# Normalize 'pathname' so exact string comparison can be used later.
if normpath:
pathname = os.path.normpath(pathname)
verbatim.append(pathname)
return (verbatim, globbed)
def _pop_all(kind, universe, iterable, validate=True):
"""
Removes all elements of 'iterable' from 'universe' and returns them.
If 'validate' is true, then a ValueError is raised if a element
would be removed multiple times, or if an element of 'iterable' does
not appear in 'universe' at all.
"""
members = set()
for elem in iterable:
if validate and elem in members:
raise ValueError("%s '%s' specified multiple times" % (kind, elem))
if elem in universe:
universe.remove(elem)
members.add(elem)
elif validate:
raise ValueError("Unrecognized %s '%s'" % (kind, elem))
return members

View File

@ -0,0 +1,9 @@
"""
Extension to the unittest package to support buildlogger and parallel
test execution.
"""
from __future__ import absolute_import
from . import executor
from . import suite

View File

@ -0,0 +1,303 @@
"""
Driver of the test execution framework.
"""
from __future__ import absolute_import
import threading
from . import fixtures
from . import hooks as _hooks
from . import job as _job
from . import report as _report
from . import testcases
from .. import config as _config
from .. import errors
from .. import logging
from .. import utils
from ..utils import queue as _queue
class TestGroupExecutor(object):
"""
Executes a test group.
Responsible for setting up and tearing down the fixtures that the
tests execute against.
"""
_TIMEOUT = 24 * 60 * 60 # =1 day (a long time to have tests run)
def __init__(self,
exec_logger,
test_group,
logging_config,
config=None,
fixture=None,
hooks=None):
"""
Initializes the TestGroupExecutor with the test group to run.
"""
# Build a logger for executing this group of tests.
logger_name = "%s:%s" % (exec_logger.name, test_group.test_kind)
self.logger = logging.loggers.new_logger(logger_name, parent=exec_logger)
self.logging_config = logging_config
self.fixture_config = fixture
self.hooks_config = utils.default_if_none(hooks, [])
self.test_config = utils.default_if_none(config, {})
self._test_group = test_group
self._using_buildlogger = logging.config.using_buildlogger(logging_config)
self._build_config = None
if self._using_buildlogger:
self._build_config = logging.buildlogger.get_config()
# Must be done after getting buildlogger configuration.
self._jobs = [self._make_job(job_num) for job_num in xrange(_config.JOBS)]
def run(self):
"""
Executes the test group.
Any exceptions that occur during setting up or tearing down a
fixture are propagated.
"""
self.logger.info("Starting execution of %ss...", self._test_group.test_kind)
return_code = 0
try:
if not self._setup_fixtures():
return_code = 2
return
num_repeats = _config.REPEAT
while num_repeats > 0:
test_queue = self._make_test_queue()
self._test_group.record_start()
(report, interrupted) = self._run_tests(test_queue)
self._test_group.record_end(report)
# If the user triggered a KeyboardInterrupt, then we should stop.
if interrupted:
raise errors.StopExecution("Received interrupt from user")
sb = []
self._test_group.summarize(sb)
self.logger.info("Summary: %s", "\n ".join(sb))
if not report.wasSuccessful():
return_code = 1
if _config.FAIL_FAST:
break
num_repeats -= 1
finally:
if not self._teardown_fixtures():
return_code = 2
self._test_group.return_code = return_code
def _setup_fixtures(self):
"""
Sets up a fixture for each job.
"""
for job in self._jobs:
try:
job.fixture.setup()
except:
self.logger.exception("Encountered an error while setting up %s.", job.fixture)
return False
# Once they have all been started, wait for them to become available.
for job in self._jobs:
try:
job.fixture.await_ready()
except:
self.logger.exception("Encountered an error while waiting for %s to be ready",
job.fixture)
return False
return True
def _run_tests(self, test_queue):
"""
Starts a thread for each Job instance and blocks until all of
the tests are run.
Returns a (combined report, user interrupted) pair, where the
report contains the status and timing information of tests run
by all of the threads.
"""
threads = []
interrupt_flag = threading.Event()
user_interrupted = False
try:
# Run each Job instance in its own thread.
for job in self._jobs:
t = threading.Thread(target=job, args=(test_queue, interrupt_flag))
# Do not wait for tests to finish executing if interrupted by the user.
t.daemon = True
t.start()
threads.append(t)
joined = False
while not joined:
# Need to pass a timeout to join() so that KeyboardInterrupt exceptions
# are propagated.
joined = test_queue.join(TestGroupExecutor._TIMEOUT)
except (KeyboardInterrupt, SystemExit):
interrupt_flag.set()
user_interrupted = True
else:
# Only wait for all the Job instances if not interrupted by the user.
for t in threads:
t.join()
reports = [job.report for job in self._jobs]
combined_report = _report.TestReport.combine(*reports)
# We cannot return 'interrupt_flag.is_set()' because the interrupt flag can be set by a Job
# instance if a test fails and it decides to drain the queue. We only want to raise a
# StopExecution exception in TestGroupExecutor.run() if the user triggered the interrupt.
return (combined_report, user_interrupted)
def _teardown_fixtures(self):
"""
Tears down all of the fixtures.
Returns true if all fixtures were torn down successfully, and
false otherwise.
"""
success = True
for job in self._jobs:
try:
if not job.fixture.teardown():
self.logger.warn("Teardown of %s was not successful.", job.fixture)
success = False
except:
self.logger.exception("Encountered an error while tearing down %s.", job.fixture)
success = False
return success
def _get_build_id(self, job_num):
"""
Returns a unique build id for a job.
"""
build_config = self._build_config
if self._using_buildlogger:
# Use a distinct "builder" for each job in order to separate their logs.
if build_config is not None and "builder" in build_config:
build_config = build_config.copy()
build_config["builder"] = "%s_job%d" % (build_config["builder"], job_num)
build_id = logging.buildlogger.new_build_id(build_config)
if build_config is None or build_id is None:
self.logger.info("Encountered an error configuring buildlogger for job #%d, falling"
" back to stderr.", job_num)
return build_id, build_config
return None, build_config
def _make_fixture(self, job_num, build_id, build_config):
"""
Creates a fixture for a job.
"""
fixture_config = {}
fixture_class = fixtures.NOOP_FIXTURE_CLASS
if self.fixture_config is not None:
fixture_config = self.fixture_config.copy()
fixture_class = fixture_config.pop("class")
logger_name = "%s:job%d" % (fixture_class, job_num)
logger = logging.loggers.new_logger(logger_name, parent=logging.loggers.FIXTURE)
logging.config.apply_buildlogger_global_handler(logger,
self.logging_config,
build_id=build_id,
build_config=build_config)
return fixtures.make_fixture(fixture_class, logger, job_num, **fixture_config)
def _make_hooks(self, job_num, fixture):
"""
Creates the custom behaviors for the job's fixture.
"""
behaviors = []
for behavior_config in self.hooks_config:
behavior_config = behavior_config.copy()
behavior_class = behavior_config.pop("class")
logger_name = "%s:job%d" % (behavior_class, job_num)
logger = logging.loggers.new_logger(logger_name, parent=self.logger)
behavior = _hooks.make_custom_behavior(behavior_class,
logger,
fixture,
**behavior_config)
behaviors.append(behavior)
return behaviors
def _make_job(self, job_num):
"""
Returns a Job instance with its own fixture, hooks, and test
report.
"""
build_id, build_config = self._get_build_id(job_num)
fixture = self._make_fixture(job_num, build_id, build_config)
hooks = self._make_hooks(job_num, fixture)
logger_name = "%s:job%d" % (self.logger.name, job_num)
logger = logging.loggers.new_logger(logger_name, parent=self.logger)
if build_id is not None:
endpoint = logging.buildlogger.APPEND_GLOBAL_LOGS_ENDPOINT % {"build_id": build_id}
url = "%s/%s/" % (_config.BUILDLOGGER_URL.rstrip("/"), endpoint.strip("/"))
logger.info("Writing output of job #%d to %s.", job_num, url)
report = _report.TestReport(logger,
self.logging_config,
build_id=build_id,
build_config=build_config)
return _job.Job(logger, fixture, hooks, report)
def _make_test_queue(self):
"""
Returns a queue of TestCase instances.
Use a multi-consumer queue instead of a unittest.TestSuite so
that the test cases can be dispatched to multiple threads.
"""
test_kind_logger = logging.loggers.new_logger(self._test_group.test_kind,
parent=logging.loggers.TESTS)
# Put all the test cases in a queue.
queue = _queue.Queue()
for test_name in self._test_group.tests:
test_case = testcases.make_test_case(self._test_group.test_kind,
test_kind_logger,
test_name,
**self.test_config)
queue.put(test_case)
# Add sentinel value for each job to indicate when there are no more items to process.
for _ in xrange(_config.JOBS):
queue.put(None)
return queue

View File

@ -0,0 +1,32 @@
"""
Fixtures for executing JSTests against.
"""
from __future__ import absolute_import
from .interface import Fixture, ReplFixture
from .standalone import MongoDFixture
from .replicaset import ReplicaSetFixture
from .masterslave import MasterSlaveFixture
from .shardedcluster import ShardedClusterFixture
NOOP_FIXTURE_CLASS = "Fixture"
_FIXTURES = {
"Fixture": Fixture,
"MongoDFixture": MongoDFixture,
"ReplicaSetFixture": ReplicaSetFixture,
"MasterSlaveFixture": MasterSlaveFixture,
"ShardedClusterFixture": ShardedClusterFixture,
}
def make_fixture(class_name, *args, **kwargs):
"""
Factory function for creating Fixture instances.
"""
if class_name not in _FIXTURES:
raise ValueError("Unknown fixture class '%s'" % (class_name))
return _FIXTURES[class_name](*args, **kwargs)

View File

@ -0,0 +1,89 @@
"""
Interface of the different fixtures for executing JSTests against.
"""
from __future__ import absolute_import
from ... import logging
class Fixture(object):
"""
Base class for all fixtures.
"""
def __init__(self, logger, job_num):
"""
Initializes the fixtures with a logger instance.
"""
if not isinstance(logger, logging.Logger):
raise TypeError("logger must be a Logger instance")
if not isinstance(job_num, int):
raise TypeError("job_num must be an integer")
elif job_num < 0:
raise ValueError("job_num must be a nonnegative integer")
self.logger = logger
self.job_num = job_num
self.port = None # Port that the mongo shell should connect to.
def setup(self):
"""
Creates the fixture.
"""
pass
def await_ready(self):
"""
Blocks until the fixture can be used for testing.
"""
pass
def teardown(self):
"""
Destroys the fixture. Return true if was successful, and false otherwise.
"""
return True
def is_running(self):
"""
Returns true if the fixture is still operating and more tests
can be run, and false otherwise.
"""
return True
def __str__(self):
return "%s (Job #%d)" % (self.__class__.__name__, self.job_num)
def __repr__(self):
return "%r(%r, %r)" % (self.__class__.__name__, self.logger, self.job_num)
class ReplFixture(Fixture):
"""
Base class for all fixtures that support replication.
"""
def get_primary(self):
"""
Returns the primary of a replica set, or the master of a
master-slave deployment.
"""
raise NotImplementedError("get_primary must be implemented by ReplFixture subclasses")
def get_secondaries(self):
"""
Returns a list containing the secondaries of a replica set, or
the slave of a master-slave deployment.
"""
raise NotImplementedError("get_secondaries must be implemented by ReplFixture subclasses")
def await_repl(self):
"""
Blocks until all operations on the primary/master have
replicated to all other nodes.
"""
raise NotImplementedError("await_repl must be implemented by ReplFixture subclasses")

View File

@ -0,0 +1,156 @@
"""
Master/slave fixture for executing JSTests against.
"""
from __future__ import absolute_import
import os.path
from . import interface
from . import standalone
from ... import config
from ... import logging
from ... import utils
class MasterSlaveFixture(interface.ReplFixture):
"""
Fixture which provides JSTests with a master/slave deployment to
run against.
"""
AWAIT_REPL_TIMEOUT_MINS = 5
def __init__(self,
logger,
job_num,
mongod_executable=None,
mongod_options=None,
master_options=None,
slave_options=None,
dbpath_prefix=None,
preserve_dbpath=False):
interface.ReplFixture.__init__(self, logger, job_num)
if "dbpath" in mongod_options:
raise ValueError("Cannot specify mongod_options.dbpath")
self.mongod_executable = mongod_executable
self.mongod_options = utils.default_if_none(mongod_options, {})
self.master_options = utils.default_if_none(master_options, {})
self.slave_options = utils.default_if_none(slave_options, {})
self.preserve_dbpath = preserve_dbpath
# Command line options override the YAML configuration.
dbpath_prefix = utils.default_if_none(config.DBPATH_PREFIX, dbpath_prefix)
dbpath_prefix = utils.default_if_none(dbpath_prefix, config.DEFAULT_DBPATH_PREFIX)
self._dbpath_prefix = os.path.join(dbpath_prefix,
"job%d" % (self.job_num),
config.FIXTURE_SUBDIR)
self.master = None
self.slave = None
def setup(self):
self.master = self._new_mongod_master()
self.master.setup()
self.port = self.master.port
self.slave = self._new_mongod_slave()
self.slave.setup()
def await_ready(self):
self.master.await_ready()
self.slave.await_ready()
def teardown(self):
running_at_start = self.is_running()
success = True # Still a success if nothing is running.
if not running_at_start:
self.logger.info("Master-slave deployment was expected to be running in teardown(),"
" but wasn't.")
if self.slave is not None:
if running_at_start:
self.logger.info("Stopping slave...")
success = self.slave.teardown()
if running_at_start:
self.logger.info("Successfully stopped slave.")
if self.master is not None:
if running_at_start:
self.logger.info("Stopping master...")
success = self.master.teardown() and success
if running_at_start:
self.logger.info("Successfully stopped master.")
return success
def is_running(self):
return (self.master is not None and self.master.is_running() and
self.slave is not None and self.slave.is_running())
def get_primary(self):
return self.master
def get_secondaries(self):
return [self.slave]
def await_repl(self):
self.logger.info("Awaiting replication of insert (w=2, wtimeout=%d min) to master on port"
" %d", MasterSlaveFixture.AWAIT_REPL_TIMEOUT_MINS, self.port)
repl_timeout = MasterSlaveFixture.AWAIT_REPL_TIMEOUT_MINS * 60 * 1000
client = utils.new_mongo_client(self.port)
# Use the same database as the jstests to ensure that the slave doesn't acknowledge the
# write as having completed before it has synced the test database.
client.test.resmoke_await_repl.insert({}, w=2, wtimeout=repl_timeout)
self.logger.info("Replication of write operation completed.")
def _new_mongod(self, mongod_logger, mongod_options):
"""
Returns a standalone.MongoDFixture with the specified logger and
options.
"""
return standalone.MongoDFixture(mongod_logger,
self.job_num,
mongod_executable=self.mongod_executable,
mongod_options=mongod_options,
preserve_dbpath=self.preserve_dbpath)
def _new_mongod_master(self):
"""
Returns a standalone.MongoDFixture configured to be used as the
master of a master-slave deployment.
"""
logger_name = "%s:master" % (self.logger.name)
mongod_logger = logging.loggers.new_logger(logger_name, parent=self.logger)
mongod_options = self.mongod_options.copy()
mongod_options.update(self.master_options)
mongod_options["master"] = ""
mongod_options["dbpath"] = os.path.join(self._dbpath_prefix, "master")
return self._new_mongod(mongod_logger, mongod_options)
def _new_mongod_slave(self):
"""
Returns a standalone.MongoDFixture configured to be used as the
slave of a master-slave deployment.
"""
logger_name = "%s:slave" % (self.logger.name)
mongod_logger = logging.loggers.new_logger(logger_name, parent=self.logger)
mongod_options = self.mongod_options.copy()
mongod_options.update(self.slave_options)
mongod_options["slave"] = ""
mongod_options["source"] = "localhost:%d" % (self.port)
mongod_options["dbpath"] = os.path.join(self._dbpath_prefix, "slave")
return self._new_mongod(mongod_logger, mongod_options)

View File

@ -0,0 +1,183 @@
"""
Replica set fixture for executing JSTests against.
"""
from __future__ import absolute_import
import os.path
import time
import pymongo
from . import interface
from . import standalone
from ... import config
from ... import logging
from ... import utils
class ReplicaSetFixture(interface.ReplFixture):
"""
Fixture which provides JSTests with a replica set to run against.
"""
AWAIT_REPL_TIMEOUT_MINS = 5
def __init__(self,
logger,
job_num,
mongod_executable=None,
mongod_options=None,
dbpath_prefix=None,
preserve_dbpath=False,
num_nodes=2,
auth_options=None):
interface.ReplFixture.__init__(self, logger, job_num)
self.mongod_executable = mongod_executable
self.mongod_options = utils.default_if_none(mongod_options, {})
self.preserve_dbpath = preserve_dbpath
self.num_nodes = num_nodes
self.auth_options = auth_options
# The dbpath in mongod_options is used as the dbpath prefix for replica set members and
# takes precedence over other settings. The ShardedClusterFixture uses this parameter to
# create replica sets and assign their dbpath structure explicitly.
if "dbpath" in self.mongod_options:
self._dbpath_prefix = self.mongod_options.pop("dbpath")
else:
# Command line options override the YAML configuration.
dbpath_prefix = utils.default_if_none(config.DBPATH_PREFIX, dbpath_prefix)
dbpath_prefix = utils.default_if_none(dbpath_prefix, config.DEFAULT_DBPATH_PREFIX)
self._dbpath_prefix = os.path.join(dbpath_prefix,
"job%d" % (self.job_num),
config.FIXTURE_SUBDIR)
self.nodes = []
def setup(self):
replset_name = self.mongod_options.get("replSet", "rs")
for i in xrange(self.num_nodes):
node = self._new_mongod(i, replset_name)
node.setup()
self.nodes.append(node)
self.port = self.get_primary().port
# Call await_ready() on each of the nodes here because we want to start the election as
# soon as possible.
for node in self.nodes:
node.await_ready()
# Initiate the replica set.
members = []
for (i, node) in enumerate(self.nodes):
conn_str = "localhost:%d" % (node.port)
member_info = {"_id": i, "host": conn_str}
if i > 0:
member_info["priority"] = 0
members.append(member_info)
initiate_cmd_obj = {"replSetInitiate": {"_id": replset_name, "members": members}}
client = utils.new_mongo_client(port=self.port)
if self.auth_options is not None:
auth_db = client[self.auth_options["authenticationDatabase"]]
auth_db.authenticate(self.auth_options["username"],
password=self.auth_options["password"],
mechanism=self.auth_options["authenticationMechanism"])
self.logger.info("Issuing replSetInitiate command...")
client.admin.command(initiate_cmd_obj)
def await_ready(self):
# Wait for the primary to be elected.
client = utils.new_mongo_client(port=self.port)
while True:
is_master = client.admin.command("isMaster")["ismaster"]
if is_master:
break
self.logger.info("Waiting for primary on port %d to be elected.", self.port)
time.sleep(1) # Wait a little bit before trying again.
# Wait for the secondaries to become available.
for secondary in self.get_secondaries():
client = utils.new_mongo_client(port=secondary.port,
read_preference=pymongo.ReadPreference.SECONDARY)
while True:
is_secondary = client.admin.command("isMaster")["secondary"]
if is_secondary:
break
self.logger.info("Waiting for secondary on port %d to become available.",
secondary.port)
time.sleep(1) # Wait a little bit before trying again.
def teardown(self):
running_at_start = self.is_running()
success = True # Still a success even if nothing is running.
if not running_at_start:
self.logger.info("Replica set was expected to be running in teardown(), but wasn't.")
else:
self.logger.info("Stopping all members of the replica set...")
# Terminate the secondaries first to reduce noise in the logs.
for node in self.nodes[::-1]:
success = node.teardown() and success
if running_at_start:
self.logger.info("Successfully stopped all members of the replica set.")
return success
def is_running(self):
return all(node.is_running() for node in self.nodes)
def get_primary(self):
# The primary is always the first element of the 'nodes' list because all other members of
# the replica set are configured with priority=0.
return self.nodes[0]
def get_secondaries(self):
return self.nodes[1:]
def await_repl(self):
self.logger.info("Awaiting replication of insert (w=%d, wtimeout=%d min) to primary on port"
" %d", self.num_nodes, ReplicaSetFixture.AWAIT_REPL_TIMEOUT_MINS,
self.port)
repl_timeout = ReplicaSetFixture.AWAIT_REPL_TIMEOUT_MINS * 60 * 1000
client = utils.new_mongo_client(port=self.port)
client.resmoke.await_repl.insert({}, w=self.num_nodes, wtimeout=repl_timeout)
self.logger.info("Replication of write operation completed.")
def _new_mongod(self, index, replset_name):
"""
Returns a standalone.MongoDFixture configured to be used as a
replica-set member of 'replset_name'.
"""
mongod_logger = self._get_logger_for_mongod(index)
mongod_options = self.mongod_options.copy()
mongod_options["replSet"] = replset_name
mongod_options["dbpath"] = os.path.join(self._dbpath_prefix, "node%d" % (index))
return standalone.MongoDFixture(mongod_logger,
self.job_num,
mongod_executable=self.mongod_executable,
mongod_options=mongod_options,
preserve_dbpath=self.preserve_dbpath)
def _get_logger_for_mongod(self, index):
"""
Returns a new logging.Logger instance for use as the primary or
secondary of a replica-set.
"""
if index == 0:
logger_name = "%s:primary" % (self.logger.name)
else:
suffix = str(index - 1) if self.num_nodes > 2 else ""
logger_name = "%s:secondary%s" % (self.logger.name, suffix)
return logging.loggers.new_logger(logger_name, parent=self.logger)

View File

@ -0,0 +1,330 @@
"""
Sharded cluster fixture for executing JSTests against.
"""
from __future__ import absolute_import
import os.path
import time
import pymongo
from . import interface
from . import standalone
from . import replicaset
from ... import config
from ... import core
from ... import errors
from ... import logging
from ... import utils
class ShardedClusterFixture(interface.Fixture):
"""
Fixture which provides JSTests with a sharded cluster to run
against.
"""
_CONFIGSVR_REPLSET_NAME = "config-rs"
def __init__(self,
logger,
job_num,
mongos_executable=None,
mongos_options=None,
mongod_executable=None,
mongod_options=None,
dbpath_prefix=None,
preserve_dbpath=False,
num_shards=1,
separate_configsvr=True,
enable_sharding=None,
auth_options=None):
"""
Initializes ShardedClusterFixture with the different options to
the mongod and mongos processes.
"""
interface.Fixture.__init__(self, logger, job_num)
if "dbpath" in mongod_options:
raise ValueError("Cannot specify mongod_options.dbpath")
self.mongos_executable = mongos_executable
self.mongos_options = utils.default_if_none(mongos_options, {})
self.mongod_executable = mongod_executable
self.mongod_options = utils.default_if_none(mongod_options, {})
self.preserve_dbpath = preserve_dbpath
self.num_shards = num_shards
self.separate_configsvr = separate_configsvr
self.enable_sharding = utils.default_if_none(enable_sharding, [])
self.auth_options = auth_options
# Command line options override the YAML configuration.
dbpath_prefix = utils.default_if_none(config.DBPATH_PREFIX, dbpath_prefix)
dbpath_prefix = utils.default_if_none(dbpath_prefix, config.DEFAULT_DBPATH_PREFIX)
self._dbpath_prefix = os.path.join(dbpath_prefix,
"job%d" % (self.job_num),
config.FIXTURE_SUBDIR)
self.configsvr = None
self.mongos = None
self.shards = []
def setup(self):
if self.separate_configsvr:
self.configsvr = self._new_configsvr()
self.configsvr.setup()
# Start up each of the shards
for i in xrange(self.num_shards):
shard = self._new_shard(i)
shard.setup()
self.shards.append(shard)
def await_ready(self):
# Wait for the config server
if self.configsvr is not None:
self.configsvr.await_ready()
# Wait for each of the shards
for shard in self.shards:
shard.await_ready()
# Start up the mongos
self.mongos = self._new_mongos()
self.mongos.setup()
# Wait for the mongos
self.mongos.await_ready()
self.port = self.mongos.port
client = utils.new_mongo_client(port=self.port)
if self.auth_options is not None:
auth_db = client[self.auth_options["authenticationDatabase"]]
auth_db.authenticate(self.auth_options["username"],
password=self.auth_options["password"],
mechanism=self.auth_options["authenticationMechanism"])
# Inform mongos about each of the shards
for shard in self.shards:
self._add_shard(client, shard)
# Enable sharding on each of the specified databases
for db_name in self.enable_sharding:
self.logger.info("Enabling sharding for '%s' database...", db_name)
client.admin.command({"enablesharding": db_name})
def teardown(self):
"""
Shuts down the sharded cluster.
"""
running_at_start = self.is_running()
success = True # Still a success even if nothing is running.
if not running_at_start:
self.logger.info("Sharded cluster was expected to be running in teardown(), but"
" wasn't.")
if self.configsvr is not None:
if running_at_start:
self.logger.info("Stopping config server...")
success = self.configsvr.teardown() and success
if running_at_start:
self.logger.info("Successfully terminated the config server.")
if self.mongos is not None:
if running_at_start:
self.logger.info("Stopping mongos...")
success = self.mongos.teardown() and success
if running_at_start:
self.logger.info("Successfully terminated the mongos.")
if running_at_start:
self.logger.info("Stopping shards...")
for shard in self.shards:
success = shard.teardown() and success
if running_at_start:
self.logger.info("Successfully terminated all shards.")
return success
def is_running(self):
"""
Returns true if the config server, all shards, and the mongos
are all still operating, and false otherwise.
"""
return (self.configsvr is not None and self.configsvr.is_running() and
all(shard.is_running() for shard in self.shards) and
self.mongos is not None and self.mongos.is_running())
def _new_configsvr(self):
"""
Returns a replicaset.ReplicaSetFixture configured to be used as
the config server of a sharded cluster.
"""
logger_name = "%s:configsvr" % (self.logger.name)
mongod_logger = logging.loggers.new_logger(logger_name, parent=self.logger)
mongod_options = self.mongod_options.copy()
mongod_options["configsvr"] = ""
mongod_options["dbpath"] = os.path.join(self._dbpath_prefix, "config")
mongod_options["replSet"] = ShardedClusterFixture._CONFIGSVR_REPLSET_NAME
return replicaset.ReplicaSetFixture(mongod_logger,
self.job_num,
mongod_executable=self.mongod_executable,
mongod_options=mongod_options,
preserve_dbpath=self.preserve_dbpath,
num_nodes=1,
auth_options=self.auth_options)
def _new_shard(self, index):
"""
Returns a standalone.MongoDFixture configured to be used as a
shard in a sharded cluster.
"""
logger_name = "%s:shard%d" % (self.logger.name, index)
mongod_logger = logging.loggers.new_logger(logger_name, parent=self.logger)
mongod_options = self.mongod_options.copy()
mongod_options["dbpath"] = os.path.join(self._dbpath_prefix, "shard%d" % (index))
return standalone.MongoDFixture(mongod_logger,
self.job_num,
mongod_executable=self.mongod_executable,
mongod_options=mongod_options,
preserve_dbpath=self.preserve_dbpath)
def _new_mongos(self):
"""
Returns a _MongoSFixture configured to be used as the mongos for
a sharded cluster.
"""
logger_name = "%s:mongos" % (self.logger.name)
mongos_logger = logging.loggers.new_logger(logger_name, parent=self.logger)
mongos_options = self.mongos_options.copy()
if self.separate_configsvr:
configdb_replset = ShardedClusterFixture._CONFIGSVR_REPLSET_NAME
configdb_port = self.configsvr.port
mongos_options["configdb"] = "%s/localhost:%d" % (configdb_replset, configdb_port)
else:
mongos_options["configdb"] = "localhost:%d" % (self.shards[0].port)
return _MongoSFixture(mongos_logger,
self.job_num,
mongos_executable=self.mongos_executable,
mongos_options=mongos_options)
def _add_shard(self, client, shard):
"""
Add the specified program as a shard by executing the addShard
command.
See https://docs.mongodb.org/manual/reference/command/addShard
for more details.
"""
self.logger.info("Adding localhost:%d as a shard...", shard.port)
client.admin.command({"addShard": "localhost:%d" % (shard.port)})
class _MongoSFixture(interface.Fixture):
"""
Fixture which provides JSTests with a mongos to connect to.
"""
def __init__(self,
logger,
job_num,
mongos_executable=None,
mongos_options=None):
interface.Fixture.__init__(self, logger, job_num)
# Command line options override the YAML configuration.
self.mongos_executable = utils.default_if_none(config.MONGOS_EXECUTABLE, mongos_executable)
self.mongos_options = utils.default_if_none(mongos_options, {}).copy()
self.mongos = None
def setup(self):
if "chunkSize" not in self.mongos_options:
self.mongos_options["chunkSize"] = 50
if "port" not in self.mongos_options:
with core.network.UnusedPort() as port:
self.mongos_options["port"] = port.num
self.port = self.mongos_options["port"]
mongos = core.programs.mongos_program(self.logger,
executable=self.mongos_executable,
**self.mongos_options)
try:
self.logger.info("Starting mongos on port %d...\n%s", self.port, mongos.as_command())
mongos.start()
self.logger.info("mongos started on port %d with pid %d.", self.port, mongos.pid)
except:
self.logger.exception("Failed to start mongos on port %d.", self.port)
raise
self.mongos = mongos
def await_ready(self):
deadline = time.time() + standalone.MongoDFixture.AWAIT_READY_TIMEOUT_SECS
# Wait until server is accepting connections.
while True:
# Check whether the mongos exited for some reason.
if self.mongos.poll() is not None:
raise errors.ServerFailure("Could not connect to mongos on port %d, process ended"
" unexpectedly." % (self.port))
try:
utils.new_mongo_client(port=self.port).admin.command("ping")
break
except pymongo.errors.ConnectionFailure:
remaining = deadline - time.time()
if remaining <= 0.0:
raise errors.ServerFailure(
"Failed to connect to mongos on port %d after %d seconds"
% (self.port, standalone.MongoDFixture.AWAIT_READY_TIMEOUT_SECS))
self.logger.info("Waiting to connect to mongos on port %d.", self.port)
time.sleep(1) # Wait a little bit before trying again.
self.logger.info("Successfully contacted the mongos on port %d.", self.port)
def teardown(self):
running_at_start = self.is_running()
success = True # Still a success even if nothing is running.
if not running_at_start:
self.logger.info("mongos on port %d was expected to be running in teardown(), but"
" wasn't." % (self.port))
if self.mongos is not None:
if running_at_start:
self.logger.info("Stopping mongos on port %d with pid %d...",
self.port,
self.mongos.pid)
self.mongos.stop()
success = self.mongos.wait() == 0
if running_at_start:
self.logger.info("Successfully terminated the mongos on port %d.", self.port)
return success
def is_running(self):
return self.mongos is not None and self.mongos.poll() is None

View File

@ -0,0 +1,137 @@
"""
Standalone mongod fixture for executing JSTests against.
"""
from __future__ import absolute_import
import os
import os.path
import shutil
import time
import pymongo
from . import interface
from ... import config
from ... import core
from ... import errors
from ... import utils
class MongoDFixture(interface.Fixture):
"""
Fixture which provides JSTests with a standalone mongod to run
against.
"""
AWAIT_READY_TIMEOUT_SECS = 30
def __init__(self,
logger,
job_num,
mongod_executable=None,
mongod_options=None,
dbpath_prefix=None,
preserve_dbpath=False):
interface.Fixture.__init__(self, logger, job_num)
if "dbpath" in mongod_options and dbpath_prefix is not None:
raise ValueError("Cannot specify both mongod_options.dbpath and dbpath_prefix")
# Command line options override the YAML configuration.
self.mongod_executable = utils.default_if_none(config.MONGOD_EXECUTABLE, mongod_executable)
self.mongod_options = utils.default_if_none(mongod_options, {}).copy()
self.preserve_dbpath = preserve_dbpath
# The dbpath in mongod_options takes precedence over other settings to make it easier for
# users to specify a dbpath containing data to test against.
if "dbpath" not in self.mongod_options:
# Command line options override the YAML configuration.
dbpath_prefix = utils.default_if_none(config.DBPATH_PREFIX, dbpath_prefix)
dbpath_prefix = utils.default_if_none(dbpath_prefix, config.DEFAULT_DBPATH_PREFIX)
self.mongod_options["dbpath"] = os.path.join(dbpath_prefix,
"job%d" % (self.job_num),
config.FIXTURE_SUBDIR)
self._dbpath = self.mongod_options["dbpath"]
self.mongod = None
def setup(self):
if not self.preserve_dbpath:
shutil.rmtree(self._dbpath, ignore_errors=True)
try:
os.makedirs(self._dbpath)
except os.error:
# Directory already exists.
pass
if "port" not in self.mongod_options:
with core.network.UnusedPort() as port:
self.mongod_options["port"] = port.num
self.port = self.mongod_options["port"]
mongod = core.programs.mongod_program(self.logger,
executable=self.mongod_executable,
**self.mongod_options)
try:
self.logger.info("Starting mongod on port %d...\n%s", self.port, mongod.as_command())
mongod.start()
self.logger.info("mongod started on port %d with pid %d.", self.port, mongod.pid)
except:
self.logger.exception("Failed to start mongod on port %d.", self.port)
raise
self.mongod = mongod
def await_ready(self):
deadline = time.time() + MongoDFixture.AWAIT_READY_TIMEOUT_SECS
# Wait until server is accepting connections.
while True:
# Check whether the mongod exited for some reason.
if self.mongod.poll() is not None:
raise errors.ServerFailure("Could not connect to mongod on port %d, process ended"
" unexpectedly." % (self.port))
try:
utils.new_mongo_client(self.port).admin.command("ping")
break
except pymongo.errors.ConnectionFailure:
remaining = deadline - time.time()
if remaining <= 0.0:
raise errors.ServerFailure(
"Failed to connect to mongod on port %d after %d seconds"
% (self.port, MongoDFixture.AWAIT_READY_TIMEOUT_SECS))
self.logger.info("Waiting to connect to mongod on port %d.", self.port)
time.sleep(1) # Wait a little bit before trying again.
self.logger.info("Successfully contacted the mongod on port %d.", self.port)
def teardown(self):
running_at_start = self.is_running()
success = True # Still a success even if nothing is running.
if not running_at_start:
self.logger.info("mongod on port %d was expected to be running in teardown(), but"
" wasn't." % (self.port))
if self.mongod is not None:
if running_at_start:
self.logger.info("Stopping mongod on port %d with pid %d...",
self.port,
self.mongod.pid)
self.mongod.stop()
success = self.mongod.wait() == 0
if running_at_start:
self.logger.info("Successfully terminated the mongod on port %d.", self.port)
return success
def is_running(self):
return self.mongod is not None and self.mongod.poll() is None

View File

@ -0,0 +1,211 @@
"""
Customize the behavior of a fixture by allowing special code to be
executed before or after each test, and before or after each suite.
"""
from __future__ import absolute_import
import os
import sys
from . import fixtures
from . import testcases
from .. import errors
from .. import logging
from .. import utils
def make_custom_behavior(class_name, *args, **kwargs):
"""
Factory function for creating CustomBehavior instances.
"""
if class_name not in _CUSTOM_BEHAVIORS:
raise ValueError("Unknown custom behavior class '%s'" % (class_name))
return _CUSTOM_BEHAVIORS[class_name](*args, **kwargs)
class CustomBehavior(object):
"""
The common interface all CustomBehaviors will inherit from.
"""
@staticmethod
def start_dynamic_test(test_case, test_report):
"""
If a CustomBehavior wants to add a test case that will show up
in the test report, it should use this method to add it to the
report, since we will need to count it as a dynamic test to get
the stats in the summary information right.
"""
test_report.startTest(test_case, dynamic=True)
def __init__(self, logger, fixture):
"""
Initializes the CustomBehavior with the specified fixture.
"""
if not isinstance(logger, logging.Logger):
raise TypeError("logger must be a Logger instance")
self.logger = logger
self.fixture = fixture
def before_suite(self, test_report):
"""
The test runner calls this exactly once before they start
running the suite.
"""
pass
def after_suite(self, test_report):
"""
The test runner calls this exactly once after all tests have
finished executing. Be sure to reset the behavior back to its
original state so that it can be run again.
"""
pass
def before_test(self, test_report):
"""
Each test will call this before it executes.
Raises a TestFailure if the test should be marked as a failure,
or a ServerFailure if the fixture exits uncleanly or
unexpectedly.
"""
pass
def after_test(self, test_report):
"""
Each test will call this after it executes.
Raises a TestFailure if the test should be marked as a failure,
or a ServerFailure if the fixture exits uncleanly or
unexpectedly.
"""
pass
class CleanEveryN(CustomBehavior):
"""
Restarts the fixture after it has ran 'n' tests.
On mongod-related fixtures, this will clear the dbpath.
"""
DEFAULT_N = 20
def __init__(self, logger, fixture, n=DEFAULT_N):
CustomBehavior.__init__(self, logger, fixture)
# Try to isolate what test triggers the leak by restarting the fixture each time.
if "detect_leaks=1" in os.getenv("ASAN_OPTIONS", ""):
self.logger.info("ASAN_OPTIONS environment variable set to detect leaks, so restarting"
" the fixture after each test instead of after every %d.", n)
n = 1
self.n = n
self.tests_run = 0
def after_test(self, test_report):
self.tests_run += 1
if self.tests_run >= self.n:
self.logger.info("%d tests have been run against the fixture, stopping it...",
self.tests_run)
self.tests_run = 0
teardown_success = self.fixture.teardown()
self.logger.info("Starting the fixture back up again...")
self.fixture.setup()
self.fixture.await_ready()
# Raise this after calling setup in case --continueOnFailure was specified.
if not teardown_success:
raise errors.ServerFailure("%s did not exit cleanly" % (self.fixture))
class CheckReplDBHash(CustomBehavior):
"""
Waits for replication after each test. Checks that the dbhashes of
the "test" database on the primary and all of its secondaries match.
Compatible only with ReplFixture subclasses.
"""
def __init__(self, logger, fixture):
if not isinstance(fixture, fixtures.ReplFixture):
raise TypeError("%s does not support replication" % (fixture.__class__.__name__))
CustomBehavior.__init__(self, logger, fixture)
self.test_case = testcases.TestCase(self.logger, "Hook", "#dbhash#")
self.failed = False
self.started = False
def after_test(self, test_report):
"""
After each test, check that the dbhash of the test database is
the same on all nodes in the replica set or master/slave
fixture.
"""
if self.failed:
# Already failed, so don't check that the dbhash matches anymore.
return
try:
if not self.started:
CustomBehavior.start_dynamic_test(self.test_case, test_report)
self.started = True
# Wait for all operations to have replicated
self.fixture.await_repl()
db_name = "test"
primary_dbhash = CheckReplDBHash._get_dbhash(self.fixture.get_primary().port, db_name)
for secondary in self.fixture.get_secondaries():
secondary_dbhash = CheckReplDBHash._get_dbhash(secondary.port, db_name)
if primary_dbhash != secondary_dbhash:
# Adding failures to a TestReport requires traceback information, so we raise
# a 'self.test_case.failureException' that we will catch ourselves.
raise self.test_case.failureException(
"The primary's '%s' database does not match its secondary's '%s'"
" database: [ %s ] != [ %s ]"
% (db_name, db_name, primary_dbhash, secondary_dbhash))
except self.test_case.failureException:
self.test_case.logger.exception("The dbhashes did not match.")
self.test_case.return_code = 1
self.failed = True
test_report.addFailure(self.test_case, sys.exc_info())
test_report.stopTest(self.test_case)
raise errors.TestFailure("The dbhashes did not match")
def after_suite(self, test_report):
"""
If we get to this point and haven't failed, the #dbhash# test
is considered a success, so add it to the test report.
"""
if not self.failed and self.started:
self.test_case.logger.exception("The dbhashes matched for all tests.")
self.test_case.return_code = 0
test_report.addSuccess(self.test_case)
# TestReport.stopTest() has already been called if there was a failure.
test_report.stopTest(self.test_case)
self.failed = False
self.started = False
@staticmethod
def _get_dbhash(port, db_name):
"""
Returns the dbhash of 'db_name'.
"""
return utils.new_mongo_client(port=port)[db_name].command("dbHash")["md5"]
_CUSTOM_BEHAVIORS = {
"CleanEveryN": CleanEveryN,
"CheckReplDBHash": CheckReplDBHash,
}

View File

@ -0,0 +1,195 @@
"""
Enables supports for running tests simultaneously by processing them
from a multi-consumer queue.
"""
from __future__ import absolute_import
import sys
from .. import config
from .. import errors
from ..utils import queue as _queue
class Job(object):
"""
Runs tests from a queue.
"""
def __init__(self, logger, fixture, hooks, report):
"""
Initializes the job with the specified fixture and custom
behaviors.
"""
self.logger = logger
self.fixture = fixture
self.hooks = hooks
self.report = report
def __call__(self, queue, interrupt_flag):
"""
Continuously executes tests from 'queue' and records their
details in 'report'.
"""
should_stop = False
try:
self._run(queue, interrupt_flag)
except errors.StopExecution as err:
# Stop running tests immediately.
self.logger.error("Received a StopExecution exception: %s.", err)
should_stop = True
except:
# Unknown error, stop execution.
self.logger.exception("Encountered an error during test execution.")
should_stop = True
if should_stop:
# Set the interrupt flag so that other jobs do not start running more tests.
interrupt_flag.set()
# Drain the queue to unblock the main thread.
Job._drain_queue(queue)
def _run(self, queue, interrupt_flag):
"""
Calls the before/after suite hooks and continuously executes
tests from 'queue'.
"""
for hook in self.hooks:
hook.before_suite(self.report)
while not interrupt_flag.is_set():
test = queue.get_nowait()
try:
if test is None:
# Sentinel value received, so exit.
break
self._execute_test(test)
finally:
queue.task_done()
for hook in self.hooks:
hook.after_suite(self.report)
def _execute_test(self, test):
"""
Calls the before/after test hooks and executes 'test'.
"""
test.configure(self.fixture)
self._run_hooks_before_tests(test)
test(self.report)
if config.FAIL_FAST and not self.report.wasSuccessful():
test.logger.info("%s failed, so stopping..." % (test.shortDescription()))
raise errors.StopExecution("%s failed" % (test.shortDescription()))
if not self.fixture.is_running():
self.logger.error("%s marked as a failure because the fixture crashed during the test.",
test.shortDescription())
self.report.setFailure(test, return_code=2)
# Always fail fast if the fixture fails.
raise errors.StopExecution("%s not running after %s" %
(self.fixture, test.shortDescription()))
self._run_hooks_after_tests(test)
def _run_hooks_before_tests(self, test):
"""
Runs the before_test method on each of the hooks.
Swallows any TestFailure exceptions if set to continue on
failure, and reraises any other exceptions.
"""
try:
for hook in self.hooks:
hook.before_test(self.report)
except errors.StopExecution:
raise
except errors.ServerFailure:
self.logger.error("%s marked as failure by a hook's before_test.",
test.shortDescription())
self._fail_test(test, sys.exc_info(), return_code=2)
if config.FAIL_FAST:
raise errors.StopExecution("A hook's before_test failed")
except errors.TestFailure:
self.logger.error("%s marked as failure by a hook's after_test.",
test.shortDescription())
self._fail_test(test, sys.exc_info(), return_code=2)
if config.FAIL_FAST:
raise errors.StopExecution("A hook's before_test failed")
except:
# Record the before_test() error in 'self.report'.
self.report.startTest(test)
self.report.addError(test, sys.exc_info())
self.report.stopTest(test)
raise
def _run_hooks_after_tests(self, test):
"""
Runs the after_test method on each of the hooks.
Swallows any TestFailure exceptions if set to continue on
failure, and reraises any other exceptions.
"""
try:
for hook in self.hooks:
hook.after_test(self.report)
except errors.StopExecution:
raise
except errors.ServerFailure:
self.logger.error("%s marked as failure by a hook's after_test.",
test.shortDescription())
self.report.setFailure(test, return_code=2)
if config.FAIL_FAST:
raise errors.StopExecution("A hook's after_test failed")
except errors.TestFailure:
self.logger.error("%s marked as failure by a hook's after_test.",
test.shortDescription())
self.report.setFailure(test)
if config.FAIL_FAST:
raise errors.StopExecution("A hook's after_test failed")
except:
self.report.setError(test)
raise
def _fail_test(self, test, exc_info, return_code=1):
"""
Helper to record a test as a failure with the provided return
code.
This method should not be used if 'test' has already been
started, instead use TestReport.setFailure().
"""
self.report.startTest(test)
test.return_code = return_code
self.report.addFailure(test, exc_info)
self.report.stopTest(test)
@staticmethod
def _drain_queue(queue):
"""
Removes all elements from 'queue' without actually doing
anything to them. Necessary to unblock the main thread that is
waiting for 'queue' to be empty.
"""
try:
while not queue.empty():
queue.get_nowait()
queue.task_done()
except _queue.Empty:
# Multiple threads may be draining the queue simultaneously, so just ignore the
# exception from the race between queue.empty() being false and failing to get an item.
pass

View File

@ -0,0 +1,291 @@
"""
Extension to the unittest.TestResult to support additional test status
and timing information for the report.json file.
"""
from __future__ import absolute_import
import time
import unittest
from .. import config
from .. import logging
class TestReport(unittest.TestResult):
"""
Records test status and timing information.
"""
def __init__(self, logger, logging_config, build_id=None, build_config=None):
"""
Initializes the TestReport with the buildlogger configuration.
"""
unittest.TestResult.__init__(self)
self.logger = logger
self.logging_config = logging_config
self.build_id = build_id
self.build_config = build_config
self.start_times = {}
self.end_times = {}
self.statuses = {}
self.return_codes = {}
self.urls = {}
self.num_succeeded = 0
self.num_failed = 0
self.num_errored = 0
self.__dynamic_tests = set()
self.__original_loggers = {}
@classmethod
def combine(cls, *reports):
"""
Merges the results from multiple TestReport instances into one.
If the same test is present in multiple reports, then one that
failed or errored is more preferred over one that succeeded.
This behavior is useful for when running multiple jobs that
dynamically add a #dbhash# test case.
"""
combined_report = cls(logging.loggers.EXECUTOR, {})
combining_time = time.time()
for report in reports:
if not isinstance(report, TestReport):
raise TypeError("reports must be a list of TestReport instances")
for test_id in report.start_times:
if combined_report.statuses.get(test_id, "pass") != "pass":
# 'combined_report' already has a failure recorded for this test, so just keep
# the information about that one.
continue
combined_report.start_times[test_id] = report.start_times[test_id]
combined_report.end_times[test_id] = report.end_times.get(test_id, combining_time)
# If a StopExecution exception is triggered while running the tests, then it is
# possible for dynamic tests not to have called TestReport.stopTest() yet.
if test_id in report.__dynamic_tests:
# Mark a dynamic test as having failed if it was interrupted. It might have
# passed if the suite ran to completion, but we wouldn't know for sure.
combined_report.statuses[test_id] = report.statuses.get(test_id, "fail")
combined_report.return_codes[test_id] = report.return_codes.get(test_id, -2)
else:
# A non-dynamic test should always have a status and return code, so it is a
# resmoke.py error if it does not.
combined_report.statuses[test_id] = report.statuses.get(test_id, "error")
combined_report.return_codes[test_id] = report.return_codes.get(test_id, 2)
if test_id in report.urls:
combined_report.urls[test_id] = report.urls[test_id]
combined_report.__dynamic_tests.update(report.__dynamic_tests)
# Recompute number of success, failures, and errors.
combined_report.num_succeeded = len(combined_report.get_successful())
combined_report.num_failed = len(combined_report.get_failed())
combined_report.num_errored = len(combined_report.get_errored())
return combined_report
def startTest(self, test, dynamic=False):
"""
Called immediately before 'test' is run.
"""
unittest.TestResult.startTest(self, test)
self.start_times[test.id()] = time.time()
basename = test.basename()
if dynamic:
command = "(dynamic test case)"
self.__dynamic_tests.add(test.id())
else:
command = test.as_command()
self.logger.info("Running %s...\n%s", basename, command)
test_id = logging.buildlogger.new_test_id(self.build_id,
self.build_config,
basename,
command)
if self.build_id is not None:
endpoint = logging.buildlogger.APPEND_TEST_LOGS_ENDPOINT % {
"build_id": self.build_id,
"test_id": test_id,
}
self.urls[test.id()] = "%s/%s/" % (config.BUILDLOGGER_URL.rstrip("/"),
endpoint.strip("/"))
self.logger.info("Writing output of %s to %s.",
test.shortDescription(), self.urls[test.id()])
# Set up the test-specific logger.
logger_name = "%s:%s" % (test.logger.name, test.short_name())
logger = logging.loggers.new_logger(logger_name, parent=test.logger)
logging.config.apply_buildlogger_test_handler(logger,
self.logging_config,
build_id=self.build_id,
build_config=self.build_config,
test_id=test_id)
self.__original_loggers[test.id()] = test.logger
test.logger = logger
def stopTest(self, test):
"""
Called immediately after 'test' has run.
"""
unittest.TestResult.stopTest(self, test)
self.end_times[test.id()] = time.time()
time_taken = self.end_times[test.id()] - self.start_times[test.id()]
self.logger.info("%s ran in %0.2f seconds.", test.basename(), time_taken)
# Asynchronously closes the buildlogger test handler to avoid having too many threads open
# on 32-bit systems.
logging.flush.close_later(test.logger)
# Restore the original logger for the test.
test.logger = self.__original_loggers.pop(test.id())
def addError(self, test, err):
"""
Called when a non-failureException was raised during the
execution of 'test'.
"""
unittest.TestResult.addError(self, test, err)
self.num_errored += 1
self.statuses[test.id()] = "error"
self.return_codes[test.id()] = test.return_code
def setError(self, test):
"""
Used to change the outcome of an existing test to an error.
"""
if test.id() not in self.start_times or test.id() not in self.end_times:
raise ValueError("setError called on a test that has not completed.")
self.statuses[test.id()] = "error"
self.return_codes[test.id()] = 2
# Recompute number of success, failures, and errors.
self.num_succeeded = len(self.get_successful())
self.num_failed = len(self.get_failed())
self.num_errored = len(self.get_errored())
def addFailure(self, test, err):
"""
Called when a failureException was raised during the execution
of 'test'.
"""
unittest.TestResult.addFailure(self, test, err)
self.num_failed += 1
self.statuses[test.id()] = "fail"
self.return_codes[test.id()] = test.return_code
def setFailure(self, test, return_code=1):
"""
Used to change the outcome of an existing test to a failure.
"""
if test.id() not in self.start_times or test.id() not in self.end_times:
raise ValueError("setFailure called on a test that has not completed.")
self.statuses[test.id()] = "fail"
self.return_codes[test.id()] = return_code
# Recompute number of success, failures, and errors.
self.num_succeeded = len(self.get_successful())
self.num_failed = len(self.get_failed())
self.num_errored = len(self.get_errored())
def addSuccess(self, test):
"""
Called when 'test' executed successfully.
"""
unittest.TestResult.addSuccess(self, test)
self.num_succeeded += 1
self.statuses[test.id()] = "pass"
self.return_codes[test.id()] = test.return_code
def wasSuccessful(self):
"""
Returns true if all tests executed successfully.
"""
return self.num_failed == self.num_errored == 0
def num_dynamic(self):
"""
Returns the number of tests for which startTest(dynamic=True)
was called.
"""
return len(self.__dynamic_tests)
def get_successful(self):
"""
Returns the ids of the tests that executed successfully.
"""
return [test_id for test_id in self.statuses if self.statuses[test_id] == "pass"]
def get_failed(self):
"""
Returns the ids of the tests that raised a failureException
during their execution.
"""
return [test_id for test_id in self.statuses if self.statuses[test_id] == "fail"]
def get_errored(self):
"""
Returns the ids of the tests that raised a non-failureException
during their execution.
"""
return [test_id for test_id in self.statuses if self.statuses[test_id] == "error"]
def as_dict(self):
"""
Return the test result information as a dictionary.
Used to create the report.json file.
"""
results = []
for test_id in self.start_times:
# Don't distinguish between failures and errors.
status = "pass" if self.statuses[test_id] == "pass" else "fail"
start_time = self.start_times[test_id]
end_time = self.end_times[test_id]
result = {
"test_file": test_id,
"status": status,
"start": start_time,
"end": end_time,
"elapsed": end_time - start_time,
}
return_code = self.return_codes[test_id]
if return_code is not None:
result["exit_code"] = return_code
if test_id in self.urls:
result["url"] = self.urls[test_id]
results.append(result)
return {
"results": results,
"failures": self.num_failed + self.num_errored,
}

View File

@ -0,0 +1,139 @@
"""
Holder for a set of TestGroup instances.
"""
from __future__ import absolute_import
import time
from . import summary as _summary
from . import testgroup
from .. import selector as _selector
class Suite(object):
"""
A suite of tests.
"""
TESTS_ORDER = ("cpp_unit_test", "db_test", "js_test", "mongos_test")
def __init__(self, suite_name, suite_config):
"""
Initializes the suite with the specified name and configuration.
"""
self._suite_name = suite_name
self._suite_config = suite_config
self.test_groups = []
for test_kind in Suite.TESTS_ORDER:
if test_kind not in suite_config["selector"]:
continue
tests = self._get_tests_for_group(test_kind)
test_group = testgroup.TestGroup(test_kind, tests)
self.test_groups.append(test_group)
self.return_code = None
self._start_time = None
self._end_time = None
def _get_tests_for_group(self, test_kind):
"""
Returns the tests to run based on the 'test_kind'-specific
filtering policy.
"""
test_info = self.get_selector_config()[test_kind]
# The mongos_test doesn't have to filter anything, the test_info is just the arguments to
# the mongos program to be used as the test case.
if test_kind == "mongos_test":
mongos_options = test_info # Just for easier reading.
if not isinstance(mongos_options, dict):
raise TypeError("Expected dictionary of arguments to mongos")
return [mongos_options]
if test_kind == "cpp_unit_test":
tests = _selector.filter_cpp_unit_tests(**test_info)
elif test_kind == "db_test":
tests = _selector.filter_dbtests(**test_info)
else: # test_kind == "js_test":
tests = _selector.filter_jstests(**test_info)
return sorted(tests, key=str.lower)
def get_name(self):
"""
Returns the name of the test suite.
"""
return self._suite_name
def get_selector_config(self):
"""
Returns the "selector" section of the YAML configuration.
"""
return self._suite_config["selector"]
def get_executor_config(self):
"""
Returns the "executor" section of the YAML configuration.
"""
return self._suite_config["executor"]
def record_start(self):
"""
Records the start time of the suite.
"""
self._start_time = time.time()
def record_end(self):
"""
Records the end time of the suite.
Sets the 'return_code' of the suite based on the record codes of
each of the individual test groups.
"""
self._end_time = time.time()
# Only set 'return_code' if it hasn't been set already. It may have been set if there was
# an exception that happened during the execution of the suite.
if self.return_code is None:
# The return code of the suite should be 2 if any test group has a return code of 2.
# The return code of the suite should be 1 if any test group has a return code of 1,
# and none have a return code of 2. Otherwise, the return code should be 0.
self.return_code = max(test_group.return_code for test_group in self.test_groups)
def summarize(self, sb):
"""
Appends a summary of each individual test group onto the string
builder 'sb'.
"""
combined_summary = _summary.Summary(0, 0.0, 0, 0, 0, 0)
summarized_groups = []
for group in self.test_groups:
group_sb = []
summary = group.summarize(group_sb)
summarized_groups.append(" %ss: %s" % (group.test_kind, "\n ".join(group_sb)))
combined_summary = _summary.combine(combined_summary, summary)
if combined_summary.num_run == 0:
sb.append("Suite did not run any tests.")
return
# Override the 'time_taken' attribute of the summary if we have more accurate timing
# information available.
if self._start_time is not None and self._end_time is not None:
time_taken = self._end_time - self._start_time
combined_summary = combined_summary._replace(time_taken=time_taken)
sb.append("%d test(s) ran in %0.2f seconds"
" (%d succeeded, %d were skipped, %d failed, %d errored)" % combined_summary)
for summary_text in summarized_groups:
sb.append(summary_text)

View File

@ -0,0 +1,22 @@
"""
Holder for summary information about a test group or suite.
"""
from __future__ import absolute_import
import collections
Summary = collections.namedtuple("Summary", ["num_run", "time_taken", "num_succeeded",
"num_skipped", "num_failed", "num_errored"])
def combine(summary1, summary2):
"""
Returns a summary representing the sum of 'summary1' and 'summary2'.
"""
args = []
for i in xrange(len(Summary._fields)):
args.append(summary1[i] + summary2[i])
return Summary._make(args)

View File

@ -0,0 +1,358 @@
"""
Subclasses of unittest.TestCase.
"""
from __future__ import absolute_import
import os
import os.path
import shutil
import unittest
from .. import config
from .. import core
from .. import logging
from .. import utils
def make_test_case(test_kind, *args, **kwargs):
"""
Factory function for creating TestCase instances.
"""
if test_kind not in _TEST_CASES:
raise ValueError("Unknown test kind '%s'" % (test_kind))
return _TEST_CASES[test_kind](*args, **kwargs)
class TestCase(unittest.TestCase):
"""
A test case to execute.
"""
def __init__(self, logger, test_kind, test_name):
"""
Initializes the TestCase with the name of the test.
"""
unittest.TestCase.__init__(self, methodName="run_test")
if not isinstance(logger, logging.Logger):
raise TypeError("logger must be a Logger instance")
if not isinstance(test_kind, basestring):
raise TypeError("test_kind must be a string")
if not isinstance(test_name, basestring):
raise TypeError("test_name must be a string")
self.logger = logger
self.test_kind = test_kind
self.test_name = test_name
self.fixture = None
self.return_code = None
def long_name(self):
"""
Returns the path to the test, relative to the current working directory.
"""
return os.path.relpath(self.test_name)
def basename(self):
"""
Returns the basename of the test.
"""
return os.path.basename(self.test_name)
def short_name(self):
"""
Returns the basename of the test without the file extension.
"""
return os.path.splitext(self.basename())[0]
def id(self):
return self.test_name
def shortDescription(self):
return "%s %s" % (self.test_kind, self.test_name)
def configure(self, fixture):
"""
Stores 'fixture' as an attribute for later use during execution.
"""
self.fixture = fixture
def run_test(self):
"""
Runs the specified test.
"""
raise NotImplementedError("run_test must be implemented by TestCase subclasses")
def as_command(self):
"""
Returns the command invocation used to run the test.
"""
return self._make_process().as_command()
def _execute(self, process):
"""
Runs the specified process.
"""
self.logger.info("Starting %s...\n%s", self.shortDescription(), process.as_command())
process.start()
self.logger.info("%s started with pid %s.", self.shortDescription(), process.pid)
self.return_code = process.wait()
if self.return_code != 0:
raise self.failureException("%s failed" % (self.shortDescription()))
self.logger.info("%s finished.", self.shortDescription())
def _make_process(self):
"""
Returns a new Process instance that could be used to run the
test or log the command.
"""
raise NotImplementedError("_make_process must be implemented by TestCase subclasses")
class CPPUnitTestCase(TestCase):
"""
A C++ unit test to execute.
"""
def __init__(self,
logger,
program_executable,
program_options=None):
"""
Initializes the CPPUnitTestCase with the executable to run.
"""
TestCase.__init__(self, logger, "Program", program_executable)
self.program_executable = program_executable
self.program_options = utils.default_if_none(program_options, {}).copy()
def run_test(self):
try:
program = self._make_process()
self._execute(program)
except self.failureException:
raise
except:
self.logger.exception("Encountered an error running C++ unit test %s.", self.basename())
raise
def _make_process(self):
return core.process.Process(self.logger,
[self.program_executable],
**self.program_options)
class DBTestCase(TestCase):
"""
A dbtest to execute.
"""
def __init__(self,
logger,
dbtest_suite,
dbtest_executable=None,
dbtest_options=None):
"""
Initializes the DBTestCase with the dbtest suite to run.
"""
TestCase.__init__(self, logger, "DBTest", dbtest_suite)
# Command line options override the YAML configuration.
self.dbtest_executable = utils.default_if_none(config.DBTEST_EXECUTABLE, dbtest_executable)
self.dbtest_suite = dbtest_suite
self.dbtest_options = utils.default_if_none(dbtest_options, {}).copy()
def configure(self, fixture):
TestCase.configure(self, fixture)
# If a dbpath was specified, then use it as a container for all other dbpaths.
dbpath_prefix = self.dbtest_options.pop("dbpath", DBTestCase._get_dbpath_prefix())
dbpath = os.path.join(dbpath_prefix, "job%d" % (self.fixture.job_num), "unittest")
self.dbtest_options["dbpath"] = dbpath
shutil.rmtree(dbpath, ignore_errors=True)
try:
os.makedirs(dbpath)
except os.error:
# Directory already exists.
pass
def run_test(self):
try:
dbtest = self._make_process()
self._execute(dbtest)
except self.failureException:
raise
except:
self.logger.exception("Encountered an error running dbtest suite %s.", self.basename())
raise
def _make_process(self):
return core.programs.dbtest_program(self.logger,
executable=self.dbtest_executable,
suites=[self.dbtest_suite],
**self.dbtest_options)
@staticmethod
def _get_dbpath_prefix():
"""
Returns the prefix of the dbpath to use for the dbtest
executable.
Order of preference:
1. The --dbpathPrefix specified at the command line.
2. Value of the TMPDIR environment variable.
3. Value of the TEMP environment variable.
4. Value of the TMP environment variable.
5. The /tmp directory.
"""
if config.DBPATH_PREFIX is not None:
return config.DBPATH_PREFIX
for env_var in ("TMPDIR", "TEMP", "TMP"):
if env_var in os.environ:
return os.environ[env_var]
return os.path.normpath("/tmp")
class JSTestCase(TestCase):
"""
A jstest to execute.
"""
def __init__(self,
logger,
js_filename,
shell_executable=None,
shell_options=None):
"Initializes the JSTestCase with the JS file to run."
TestCase.__init__(self, logger, "JSTest", js_filename)
# Command line options override the YAML configuration.
self.shell_executable = utils.default_if_none(config.MONGO_EXECUTABLE, shell_executable)
self.js_filename = js_filename
self.shell_options = utils.default_if_none(shell_options, {}).copy()
def configure(self, fixture):
TestCase.configure(self, fixture)
if self.fixture.port is not None:
self.shell_options["port"] = self.fixture.port
global_vars = self.shell_options.get("global_vars", {}).copy()
data_dir = self._get_data_dir(global_vars)
# Set MongoRunner.dataPath if overridden at command line or not specified in YAML.
if config.DBPATH_PREFIX is not None or "MongoRunner.dataPath" not in global_vars:
# dataPath property is the dataDir property with a trailing slash.
data_path = os.path.join(data_dir, "")
else:
data_path = global_vars["MongoRunner.dataPath"]
global_vars["MongoRunner.dataDir"] = data_dir
global_vars["MongoRunner.dataPath"] = data_path
self.shell_options["global_vars"] = global_vars
try:
os.makedirs(data_dir)
except os.error:
# Directory already exists.
pass
def _get_data_dir(self, global_vars):
"""
Returns the value that the mongo shell should set for the
MongoRunner.dataDir property.
"""
# Command line options override the YAML configuration.
data_dir_prefix = utils.default_if_none(config.DBPATH_PREFIX,
global_vars.get("MongoRunner.dataDir"))
data_dir_prefix = utils.default_if_none(data_dir_prefix, config.DEFAULT_DBPATH_PREFIX)
return os.path.join(data_dir_prefix,
"job%d" % (self.fixture.job_num),
config.MONGO_RUNNER_SUBDIR)
def run_test(self):
try:
shell = self._make_process()
self._execute(shell)
except self.failureException:
raise
except:
self.logger.exception("Encountered an error running jstest %s.", self.basename())
raise
def _make_process(self):
return core.programs.mongo_shell_program(self.logger,
executable=self.shell_executable,
filename=self.js_filename,
**self.shell_options)
class MongosTestCase(TestCase):
"""
A TestCase which runs a mongos binary with the given parameters.
"""
def __init__(self,
logger,
mongos_options):
"""
Initializes the mongos test and saves the options.
"""
self.mongos_executable = utils.default_if_none(config.MONGOS_EXECUTABLE,
config.DEFAULT_MONGOS_EXECUTABLE)
# Use the executable as the test name.
TestCase.__init__(self, logger, "mongos", self.mongos_executable)
self.options = mongos_options.copy()
def configure(self, fixture):
"""
Ensures the --test option is present in the mongos options.
"""
TestCase.configure(self, fixture)
# Always specify test option to ensure the mongos will terminate.
if "test" not in self.options:
self.options["test"] = ""
def run_test(self):
try:
mongos = self._make_process()
self._execute(mongos)
except self.failureException:
raise
except:
self.logger.exception("Encountered an error running %s.", mongos.as_command())
raise
def _make_process(self):
return core.programs.mongos_program(self.logger,
executable=self.mongos_executable,
**self.options)
_TEST_CASES = {
"cpp_unit_test": CPPUnitTestCase,
"db_test": DBTestCase,
"js_test": JSTestCase,
"mongos_test": MongosTestCase,
}

View File

@ -0,0 +1,93 @@
"""
Holder for the (test kind, list of tests) pair with additional metadata
about when and how they execute.
"""
from __future__ import absolute_import
import time
from . import summary as _summary
class TestGroup(object):
"""
A class to encapsulate the results of running a group of tests
of a particular kind (e.g. C++ unit tests, dbtests, jstests).
"""
def __init__(self, test_kind, tests):
"""
Initializes the TestGroup with a list of tests.
"""
self.test_kind = test_kind
self.tests = tests
self.return_code = None # Set by the executor.
self._start_times = []
self._end_times = []
self._reports = []
def get_latest_report(self):
"""
Returns the report of the most recent execution, and None if
the test group has not been executed.
"""
if self._reports:
return self._reports[-1]
return None
def record_start(self):
"""
Records the start time of an execution.
"""
self._start_times.append(time.time())
def record_end(self, report):
"""
Records the end time of an execution.
"""
self._end_times.append(time.time())
self._reports.append(report)
def summarize(self, sb):
"""
Appends a summary of the latest execution onto the string
builder 'sb'.
TODO: summarize more than just the most recent report
"""
if not self._reports:
sb.append("No tests ran.")
return _summary.Summary(0, 0.0, 0, 0, 0, 0)
report = self._reports[-1]
time_taken = self._end_times[-1] - self._start_times[-1]
num_run = report.num_succeeded + report.num_errored + report.num_failed
num_skipped = len(self.tests) + report.num_dynamic() - num_run
if report.num_succeeded == num_run and num_skipped == 0:
sb.append("All %d test(s) passed in %0.2f seconds." % (num_run, time_taken))
return _summary.Summary(num_run, time_taken, num_run, 0, 0, 0)
summary = _summary.Summary(num_run, time_taken, report.num_succeeded, num_skipped,
report.num_failed, report.num_errored)
sb.append("%d test(s) ran in %0.2f seconds"
" (%d succeeded, %d were skipped, %d failed, %d errored)" % summary)
if report.num_failed > 0:
sb.append("The following tests failed (with exit code):")
for test_id in report.get_failed():
sb.append(" %s (%d)" % (test_id, report.return_codes[test_id]))
if report.num_errored > 0:
sb.append("The following tests had errors:")
for test_id in report.get_errored():
sb.append(" %s" % test_id)
return summary

View File

@ -0,0 +1,77 @@
"""
Helper functions.
"""
from __future__ import absolute_import
import os.path
import pymongo
import yaml
def default_if_none(value, default):
return value if value is not None else default
def is_string_list(lst):
"""
Returns true if 'lst' is a list of strings, and false otherwise.
"""
return isinstance(lst, list) and all(isinstance(x, basestring) for x in lst)
def is_string_set(value):
"""
Returns true if 'value' is a set of strings, and false otherwise.
"""
return isinstance(value, set) and all(isinstance(x, basestring) for x in value)
def is_js_file(filename):
"""
Returns true if 'filename' ends in .js, and false otherwise.
"""
return os.path.splitext(filename)[1] == ".js"
def is_yaml_file(filename):
"""
Returns true if 'filename' ends in .yml or .yaml, and false
otherwise.
"""
return os.path.splitext(filename)[1] in (".yaml", ".yml")
def load_yaml_file(filename):
"""
Attempts to read 'filename' as YAML.
"""
try:
with open(filename, "r") as fp:
return yaml.safe_load(fp)
except yaml.YAMLError as err:
raise ValueError("File '%s' contained invalid YAML: %s" % (filename, err))
def dump_yaml(value):
"""
Returns 'value' formatted as YAML.
"""
# Use block (indented) style for formatting YAML.
return yaml.safe_dump(value, default_flow_style=False).rstrip()
def new_mongo_client(port, read_preference=pymongo.ReadPreference.PRIMARY):
"""
Returns a pymongo.MongoClient connected on 'port' with a read
preference of 'read_preference'.
"""
timeout_millis = 30000
kwargs = {"connectTimeoutMS": timeout_millis}
if pymongo.version_tuple[0] >= 3:
kwargs["serverSelectionTimeoutMS"] = timeout_millis
kwargs["connect"] = True
return pymongo.MongoClient(port=port, read_preference=read_preference, **kwargs)

View File

@ -0,0 +1,199 @@
"""
Filename globbing utility.
"""
from __future__ import absolute_import
import glob as _glob
import os
import os.path
import re
_GLOBSTAR = "**"
_CONTAINS_GLOB_PATTERN = re.compile("[*?[]")
def is_glob_pattern(s):
"""
Returns true if 's' represents a glob pattern, and false otherwise.
"""
# Copied from glob.has_magic().
return _CONTAINS_GLOB_PATTERN.search(s) is not None
def glob(globbed_pathname):
"""
Return a list of pathnames matching the 'globbed_pathname' pattern.
In addition to containing simple shell-style wildcards a la fnmatch,
the pattern may also contain globstars ("**"), which is recursively
expanded to match zero or more subdirectories.
"""
return list(iglob(globbed_pathname))
def iglob(globbed_pathname):
"""
Emit a list of pathnames matching the 'globbed_pathname' pattern.
In addition to containing simple shell-style wildcards a la fnmatch,
the pattern may also contain globstars ("**"), which is recursively
expanded to match zero or more subdirectories.
"""
parts = _split_path(globbed_pathname)
parts = _canonicalize(parts)
index = _find_globstar(parts)
if index == -1:
for pathname in _glob.iglob(globbed_pathname):
# Normalize 'pathname' so exact string comparison can be used later.
yield os.path.normpath(pathname)
return
# **, **/, or **/a
if index == 0:
expand = _expand_curdir
# a/** or a/**/ or a/**/b
else:
expand = _expand
prefix_parts = parts[:index]
suffix_parts = parts[index + 1:]
prefix = os.path.join(*prefix_parts) if prefix_parts else os.curdir
suffix = os.path.join(*suffix_parts) if suffix_parts else ""
for (kind, path) in expand(prefix):
if not suffix_parts:
yield path
# Avoid following symlinks to avoid an infinite loop
elif suffix_parts and kind == "dir" and not os.path.islink(path):
path = os.path.join(path, suffix)
for pathname in iglob(path):
yield pathname
def _split_path(pathname):
"""
Return 'pathname' as a list of path components.
"""
parts = []
while True:
(dirname, basename) = os.path.split(pathname)
parts.append(basename)
if not dirname:
break
pathname = dirname
parts.reverse()
return parts
def _canonicalize(parts):
"""
Return a copy of 'parts' with consecutive "**"s coalesced.
Raise a ValueError for unsupported uses of "**".
"""
res = []
prev_was_globstar = False
for p in parts:
if p == _GLOBSTAR:
# Skip consecutive **'s
if not prev_was_globstar:
prev_was_globstar = True
res.append(p)
elif _GLOBSTAR in p: # a/b**/c or a/**b/c
raise ValueError("Can only specify glob patterns of the form a/**/b")
else:
prev_was_globstar = False
res.append(p)
return res
def _find_globstar(parts):
"""
Return the index of the first occurrence of "**" in 'parts'.
Return -1 if "**" is not found in the list.
"""
for (i, p) in enumerate(parts):
if p == _GLOBSTAR:
return i
return -1
def _list_dir(pathname):
"""
Return a pair of the subdirectory names and filenames immediately
contained within the 'pathname' directory.
If 'pathname' does not exist, then None is returned.
"""
try:
(_root, dirs, files) = os.walk(pathname).next()
return (dirs, files)
except StopIteration:
return None # 'pathname' directory does not exist
def _expand(pathname):
"""
Emit tuples of the form ("dir", dirname) and ("file", filename)
of all directories and files contained within the 'pathname' directory.
"""
res = _list_dir(pathname)
if res is None:
return
(dirs, files) = res
# Zero expansion
if os.path.basename(pathname):
yield ("dir", os.path.join(pathname, ""))
for f in files:
path = os.path.join(pathname, f)
yield ("file", path)
for d in dirs:
path = os.path.join(pathname, d)
for x in _expand(path):
yield x
def _expand_curdir(pathname):
"""
Emit tuples of the form ("dir", dirname) and ("file", filename)
of all directories and files contained within the 'pathname' directory.
The returned pathnames omit a "./" prefix.
"""
res = _list_dir(pathname)
if res is None:
return
(dirs, files) = res
# Zero expansion
yield ("dir", "")
for f in files:
yield ("file", f)
for d in dirs:
for x in _expand(d):
yield x

View File

@ -0,0 +1,78 @@
"""
Utility for parsing JS comments.
"""
from __future__ import absolute_import
import re
import yaml
# TODO: use a more robust regular expression for matching tags
_JSTEST_TAGS_RE = re.compile(r".*@tags\s*:\s*(\[[^\]]*\])", re.DOTALL)
def get_tags(pathname):
"""
Returns the list of tags found in the (JS-style) comments of
'pathname'. The definition can span multiple lines, use unquoted,
single-quoted, or double-quoted strings, and use the '#' character
for inline commenting.
e.g.
/**
* @tags: [ "tag1", # double quoted
* 'tag2' # single quoted
* # line with only a comment
* , tag3 # no quotes
* tag4, # trailing comma
* ]
*/
"""
with open(pathname) as fp:
match = _JSTEST_TAGS_RE.match(fp.read())
if match:
try:
# TODO: it might be worth supporting the block (indented) style of YAML lists in
# addition to the flow (bracketed) style
tags = yaml.safe_load(_strip_jscomments(match.group(1)))
if not isinstance(tags, list) and all(isinstance(tag, basestring) for tag in tags):
raise TypeError("Expected a list of string tags, but got '%s'" % (tags))
return tags
except yaml.YAMLError as err:
raise ValueError("File '%s' contained invalid tags (expected YAML): %s"
% (pathname, err))
return []
def _strip_jscomments(s):
"""
Given a string 's' that represents the contents after the "@tags:"
annotation in the JS file, this function returns a string that can
be converted to YAML.
e.g.
[ "tag1", # double quoted
* 'tag2' # single quoted
* # line with only a comment
* , tag3 # no quotes
* tag4, # trailing comma
* ]
If the //-style JS comments were used, then the example remains the,
same except with the '*' character is replaced by '//'.
"""
yaml_lines = []
for line in s.splitlines():
# Remove leading whitespace and symbols that commonly appear in JS comments.
line = line.lstrip("\t ").lstrip("*/")
yaml_lines.append(line)
return "\n".join(yaml_lines)

View File

@ -0,0 +1,52 @@
"""
Extension to the Queue.Queue class.
Added support for the join() method to take a timeout. This is necessary
in order for KeyboardInterrupt exceptions to get propagated.
See https://bugs.python.org/issue1167930 for more details.
"""
from __future__ import absolute_import
import Queue
import time
# Exception that is raised when get_nowait() is called on an empty Queue.
Empty = Queue.Empty
class Queue(Queue.Queue):
"""
A multi-producer, multi-consumer queue.
"""
def join(self, timeout=None):
"""
Wait until all items in the queue have been retrieved and processed,
or until 'timeout' seconds have passed.
The count of unfinished tasks is incremented whenever an item is added
to the queue. The count is decremented whenever task_done() is called
to indicate that all work on the retrieved item was completed.
When the number of unfinished tasks reaches zero, True is returned.
If the number of unfinished tasks remains nonzero after 'timeout'
seconds have passed, then False is returned.
"""
with self.all_tasks_done:
if timeout is None:
while self.unfinished_tasks:
self.all_tasks_done.wait()
elif timeout < 0:
raise ValueError("timeout must be a nonnegative number")
else:
# Pass timeout down to lock acquisition
deadline = time.time() + timeout
while self.unfinished_tasks:
remaining = deadline - time.time()
if remaining <= 0.0:
return False
self.all_tasks_done.wait(remaining)
return True

View File

@ -0,0 +1,119 @@
"""
Alternative to the threading.Timer class.
Enables a timer to be restarted without needing to construct a new thread
each time. This is necessary to execute periodic actions, e.g. flushing
log messages to buildlogger, while avoiding errors related to "can't start
new thread" that would otherwise occur on Windows.
"""
from __future__ import absolute_import
import threading
class AlarmClock(threading.Thread):
"""
Calls a function after a specified number of seconds.
"""
def __init__(self, interval, func, args=None, kwargs=None):
"""
Initializes the timer with a function to periodically execute.
"""
threading.Thread.__init__(self)
# A non-dismissed timer should not prevent the program from exiting
self.daemon = True
self.interval = interval
self.func = func
self.args = args if args is not None else []
self.kwargs = kwargs if kwargs is not None else {}
self.lock = threading.Lock()
self.cond = threading.Condition(self.lock)
self.snoozed = False # canceled for one execution
self.dismissed = False # canceled for all time
self.restarted = False
def dismiss(self):
"""
Disables the timer.
"""
with self.lock:
self.dismissed = True
self.cond.notify_all()
self.join() # Tidy up the started thread.
cancel = dismiss # Expose API compatible with that of threading.Timer.
def snooze(self):
"""
Skips the next execution of 'func' if it has not already started.
"""
with self.lock:
if self.dismissed:
raise ValueError("Timer cannot be snoozed if it has been dismissed")
self.snoozed = True
self.restarted = False
self.cond.notify_all()
def reset(self):
"""
Restarts the timer, causing it to wait 'interval' seconds before calling
'func' again.
"""
with self.lock:
if self.dismissed:
raise ValueError("Timer cannot be reset if it has been dismissed")
if not self.snoozed:
raise ValueError("Timer cannot be reset if it has not been snoozed")
self.restarted = True
self.cond.notify_all()
def run(self):
"""
Repeatedly calls 'func' with a delay of 'interval' seconds between executions.
If the timer is snoozed before 'func' is called, then it waits to be reset.
After is has been reset, the timer will again wait 'interval' seconds and
then try to call 'func'.
If the timer is dismissed, then no subsequent executions of 'func' are made.
"""
with self.lock:
while not self.dismissed:
# Wait for the specified amount of time.
self.cond.wait(self.interval)
if self.dismissed:
return
# If the timer was snoozed, then it should wait to be reset.
if self.snoozed:
while not self.restarted:
self.cond.wait()
if self.dismissed:
return
self.restarted = False
self.snoozed = False
continue
# Execute the function.
self.func(*self.args, **self.kwargs)
# Ignore snoozes that took place while the function was being executed.
self.snoozed = False

View File

@ -55,7 +55,6 @@ from pymongo.errors import OperationFailure
from pymongo import ReadPreference
import cleanbb
import smoke
import utils
try:
@ -1053,29 +1052,6 @@ def expand_suites(suites,expandUseDB=True):
return tests
def filter_tests_by_tag(tests, tag_query):
"""Selects tests from a list based on a query over the tags in the tests."""
test_map = {}
roots = []
for test in tests:
root = os.path.abspath(test[0])
roots.append(root)
test_map[root] = test
new_style_tests = smoke.tests.build_tests(roots, extract_metadata=True)
new_style_tests = smoke.suites.build_suite(new_style_tests, tag_query)
print "\nTag query matches %s tests out of %s.\n" % (len(new_style_tests),
len(tests))
tests = []
for new_style_test in new_style_tests:
tests.append(test_map[os.path.abspath(new_style_test.filename)])
return tests
def add_exe(e):
if os.sys.platform.startswith( "win" ) and not e.endswith( ".exe" ):
e += ".exe"
@ -1341,14 +1317,6 @@ def main():
parser.add_option('--basisTechRootDirectory', dest='rlp_path', default=None,
help='Basis Tech Rosette Linguistics Platform root directory')
parser.add_option('--include-tags', dest='include_tags', default="", action='store',
help='Filters jstests run by tag regex(es) - a tag in the test must match the regexes. ' +
'Specify single regex string or JSON array.')
parser.add_option('--exclude-tags', dest='exclude_tags', default="", action='store',
help='Filters jstests run by tag regex(es) - no tags in the test must match the regexes. ' +
'Specify single regex string or JSON array.')
global tests
(options, tests) = parser.parse_args()
@ -1403,22 +1371,6 @@ def main():
tests = filter( ignore_test, tests )
if options.include_tags or options.exclude_tags:
def to_regex_array(tags_option):
if not tags_option:
return []
tags_list = smoke.json_options.json_coerce(tags_option)
if isinstance(tags_list, basestring):
tags_list = [tags_list]
return map(re.compile, tags_list)
tests = filter_tests_by_tag(tests,
smoke.suites.RegexQuery(include_res=to_regex_array(options.include_tags),
exclude_res=to_regex_array(options.exclude_tags)))
if not tests:
print "warning: no tests specified"
return

View File

@ -1,139 +0,0 @@
new smoke module README
CURRENTLY IN ACTIVE DEVELOPMENT
This directory provides a POC implementation of a new test runner. Features include:
- Test metadata and test tagging
- Pluggable and isolated test APIs...
- ...for different test types
- Simple JSON/YAML (re)configuration
RUNNING:
For command line options invoke:
$ ./buildscripts/resmoke.py --help
The smoke test runner is completely configured by a JSON/YAML configuration - this configuration can either be loaded from file(s) or built at the command line using --set/--unset/--push manipulations to "nested.path.specifiers". For basic testing this isn't necessary however, configuration files have already been provided and are available using special command line options, as shown below.
Some simple examples:
Run a MongoDB test suite:
$ ./buildscripts/resmoke.py --jscore
$ ./buildscripts/resmoke.py --disk
$ ./buildscripts/resmoke.py --sharding
$ ./buildscripts/resmoke.py --replicasets
(more to come)
To run selected files inside a MongoDB test suite:
$ ./buildscripts/resmoke.py --jscore jstests/core/count.js
$ ./buildscripts/resmoke.py --disk jstests/disk/b*.js
$ ./buildscripts/resmoke.py --sharding jstests/sharding/addshard1.js jstests/sharding/addshard2.js
To run a suite with authentication:
$ ./buildscripts/resmoke.py --jscore --auth
$ ./buildscripts/resmoke.py --sharding --auth_shell
NOTE: You may need to change permissions for the jstests/libs/authTestsKey.
To run the core suite with master/slave replication (small oplog):
$ ./buildscripts/resmoke.py --jscore --master_slave
$ ./buildscripts/resmoke.py --jscore --master_slave --auth
By default, the output of smoke testing goes to files. This can be changed, however:
$ ./buildscripts/resmoke.py --disk --log_console
$ ./buildscripts/resmoke.py --jscore --log_suppress
Sometimes we may want to set custom options while running the standard suites:
$ ./buildscripts/resmoke.py --jscore \
--set "executor.fixtures.mongodb_server.mongod_options.noprealloc" ""
... or change the dbpath of the mongod fixture:
$ ./buildscripts/resmoke.py --jscore \
--set "executor.fixtures.mongodb_server.mongod_options.dbpath" "/data/db/mypath"
... or change the executables used by the mongod fixture and the shell:
$ ./buildscripts/resmoke.py --jscore \
--set "executor.fixtures.mongodb_server.mongod_executable" "mongod-2.6" \
--set "executor.testers.js_test.shell_executable" "mongo-2.6"
... or change verbosity of the mongod fixture:
$ ./buildscripts/resmoke.py --jscore \
--set "executor.fixtures.mongodb_server.mongod_options.verbose" 2
... or change the value of a server parameter:
$ ./buildscripts/resmoke.py --jscore \
--set "executor.fixtures.mongodb_server.mongod_options.set_parameters.enableLocalhostAuthBypass" "false"
... or set some auth parameters:
$ ./buildscripts/resmoke.py --jscore --auth \
--set "executor.fixtures.mongodb_server.mongod_options.keyFile" "myKey" \
--set "executor.fixtures.mongodb_server.mongod_options.setParameters.enableLocalhostAuthBypass" false \
--set "executor.fixtures.shell_globals.TestData.keyFile" "myKey"
This can quickly get wordy, with lots of parameters. However, if this is a configuration you plan on testing repeatedly:
$ mkdir -p ~/.smoke_config
$ ./buildscripts/resmoke.py [all your options and args here] --dump-options > ~/.smoke_config/my_auth.yaml
$ ./buildscripts/resmoke.py --my_auth
Note that you can also pipe config file data *into* resmoke.py if you'd like to toy with custom config processing.
As you can see, "special" options to resmoke.py are actually just .json/.yaml option files. The "smoke_config" module provides access to the default suite .json/.yaml files, and you can add/override to these option files in your local user ".smoke_config" directory. Equivalently you can use the '--config-file' option to load a file not in the special directories.
Also, considering running individual files in a suite:
$ ./buildscripts/resmoke.py --jscore jstests/core/count.js
This is just shorthand for overriding the "tests.roots" option with the specified files:
$ ./buildscripts/resmoke.py --jscore --set "tests.roots" "jstests/core/count.js"
TEST METADATA:
Test metadata comes from two sources - embedded in the test files themselves and in a special "test_metadata.json" sibling file on the same path as the test file (by default). For jstests, the "test_metadata.json" file isn't really necessary to manage - but for executable tests that are not inspectable (unittests, dbtests) an external metadata file is needed.
For jstests things are generally simpler. Tags can be added to jstests and will be parsed (by default) when resmoke.py builds the test database from the test roots. These tags have the following form, at the beginning of a jstest file:
/**
* @tags : [ mytagA, mytagB ]
*/
Note that the tags array must be well-formed YAML.
These tags are then available for test filtering:
$ ./buildscripts/resmoke.py --jscore --set suite.include_tags '^mytagA$'
$ ./buildscripts/resmoke.py --disk --set suite.exclude_tags '^assumes_memmapped$'
NOTE: smoke.py has also been instrumented to manage basic jstest tags, with the following syntax:
$ ./buildscripts/smoke.py jsCore --include-tags '^mytagA$'
TEST METADATA BUILDING:
For automated / repeated testing, sometimes it isn't desirable to scan every test file for changes to test metadata. The "tests.extract_metadata" option controls this behavior. An example script to extract metadata in one shot (which can then be used for many test runs without further extraction) is available at:
$ ./buildscripts/resmoke_build_metadata.py --jscore
Note that the example script uses the same kind of options as the resmoke.py script.
INTEGRATION WITH OTHER TOOLS:
To use test database, suite extraction, and suite execution functionality in other tools (like SConscript), import the "smoke" module. This provides:
- smoke.tests: test discovery, metadata load/save
- smoke.suite: test filtering by tags
- smoke.executor: test execution with custom fixtures and logging

View File

@ -1,9 +0,0 @@
import json_options
import tests
import suites
import executor
from fixtures import *
from testers import *

View File

@ -1,137 +0,0 @@
"""
Module which allows execution of a suite of tests with customizable fixtures and testers.
Fixtures are set up per-suite, and register APIs per-test. Generally this is custom setup code.
Testers encapsulate test code of different types in a standard, UnitTest object.
"""
import inspect
import logging
import traceback
import unittest
import fixtures
import testers
def exec_suite(suite, logger, **kwargs):
"""Main entry point, executes a suite of tests with the given logger and executor arguments."""
suite_executor = TestSuiteExecutor(logger, **kwargs)
try:
successful_setup = suite_executor.setup_suite(suite)
if successful_setup:
suite_executor.exec_suite()
finally:
suite_executor.teardown_suite(suite)
def instantiate(class_name, *args, **kwargs):
"""Helper to dynamically instantiate a class from a name."""
split_name = class_name.split(".")
module_name = split_name[0]
class_name = ".".join(split_name[1:])
module = __import__(module_name)
class_ = getattr(module, class_name)
return class_(*args, **kwargs)
class TestSuiteExecutor(object):
"""The state of execution of a suite of tests.
The job of the TestSuiteExecutor is to convert the incoming fixtures and tester configuration
into Fixture and TestCase objects, then execute them using the standard unittest framework.
"""
def __init__(self, logger, testers={}, fixtures={}, fail_fast=False, **kwargs):
self.logger = logger
self.testers = testers
self.fixtures = fixtures
self.fail_fast = fail_fast
if len(kwargs) > 0:
raise optparse.OptionValueError("Unrecognized options for executor: %s" % kwargs)
for fixture_name in self.fixtures:
self.fixtures[fixture_name] = \
self.build_fixture(fixture_name, **self.fixtures[fixture_name])
def build_fixture(self, fixture_name, fixture_class=None, fixture_logger=None,
**fixture_kwargs):
if not fixture_class:
fixture_class = fixtures.DEFAULT_FIXTURE_CLASSES[fixture_name]
if not fixture_logger:
fixture_logger = self.logger.getChild("fixtures.%s" % fixture_name)
else:
fixture_logger = logging.getLogger(fixture_logger)
return instantiate(fixture_class, fixture_logger, **fixture_kwargs)
def build_tester(self, test):
tester_type = test.test_type
def extract_tester_args(tester_class=None, tester_logger=None, **tester_kwargs):
return tester_class, tester_logger, tester_kwargs
tester_class, tester_logger, tester_kwargs = \
extract_tester_args(
**(self.testers[tester_type] if tester_type in self.testers else {}))
if not tester_class:
tester_class = testers.DEFAULT_TESTER_CLASSES[tester_type]
if not tester_logger:
tester_logger = self.logger.getChild("testers.%s.%s" % (tester_type, test.uri))
else:
tester_logger = logging.getLogger(tester_logger)
test_apis = []
for fixture_name, fixture in self.fixtures.items():
test_api = fixture.build_api(tester_type, tester_logger)
if test_api:
test_apis.append(test_api)
return instantiate(tester_class, test, test_apis, tester_logger, **tester_kwargs)
def setup_suite(self, suite):
self.setup_fixtures = {}
for fixture_name, fixture in self.fixtures.items():
try:
fixture.setup()
self.setup_fixtures[fixture_name] = fixture
except:
print "Suite setup failed: %s" % fixture_name
traceback.print_exc()
return False
self.unittest_suite = unittest.TestSuite()
for test in suite:
self.unittest_suite.addTest(self.build_tester(test))
return True
def exec_suite(self):
# TODO: More stuff here?
unittest.TextTestRunner(
verbosity=2, failfast=self.fail_fast).run(self.unittest_suite)
def teardown_suite(self, suite):
for fixture_name, fixture in self.setup_fixtures.items():
try:
fixture.teardown()
except:
print "Suite teardown failed: %s" % fixture_name
traceback.print_exc()

View File

@ -1,352 +0,0 @@
"""
Module for simple execution of external programs with keyword arguments.
Also supports piping output into standard logging utilities.
"""
import logging
import os
import threading
import sys
import subprocess
KWARG_TYPE_IGNORE = -1
KWARG_TYPE_NORMAL = 0
KWARG_TYPE_EQUAL = 1
KWARG_TYPE_MULTIPLE = 2
KWARG_TYPE_CALLBACK = 3
def apply_json_args(process, json_doc, custom_kwargs={}):
"""Translate keyword arguments (JSON) into an argument list for an external process.
CALLBACK-type args can do arbitrary things to the process being started (set env vars, change
the process name, etc.).
"""
for field in json_doc:
kwarg, kwarg_type = ("--" + field, KWARG_TYPE_NORMAL) if field not in custom_kwargs \
else custom_kwargs[field][0:2]
value = json_doc[field]
if kwarg_type == KWARG_TYPE_NORMAL:
if value is not None:
process.arguments.append(kwarg)
if str(value):
process.arguments.append(str(value))
elif kwarg_type == KWARG_TYPE_EQUAL:
process.arguments.append(kwarg + "=" + str(value))
elif kwarg_type == KWARG_TYPE_MULTIPLE:
for ind_value in value:
process.arguments.append(kwarg)
process.arguments.append(str(ind_value))
elif kwarg_type == KWARG_TYPE_CALLBACK:
cl_arg_callback = custom_kwargs[field][2]
cl_arg_callback(process, field, value)
class LoggerPipe(threading.Thread):
"""Monitors an external program's output and sends it to a logger."""
def __init__(self, logger, level, pipe_out):
threading.Thread.__init__(self)
self.logger = logger
self.level = level
self.pipe_out = pipe_out
self.lock = threading.Lock()
self.condition = threading.Condition(self.lock)
self.started = False
self.finished = False
self.start()
def run(self):
with self.lock:
self.started = True
self.condition.notify_all()
for line in self.pipe_out:
self.logger.log(self.level, line.strip())
with self.lock:
self.finished = True
self.condition.notify_all()
def wait_until_started(self):
with self.lock:
while not self.started:
self.condition.wait()
def wait_until_finished(self):
with self.lock:
while not self.finished:
self.condition.wait()
def flush(self):
for handler in self.logger.handlers:
handler.flush()
class ExternalContext(object):
def __init__(self, env=None, env_vars={}, logger=None, **kwargs):
self.env = env
self.env_vars = env_vars
self.logger = logger
if not logger:
return logging.getLogger("")
self.kwargs = dict(kwargs.items())
def clone(self):
return ExternalContext(self.env, self.env_vars, self.logger, **self.kwargs)
class ExternalProgram(object):
"""Encapsulates an execution of an external program.
Unlike subprocess, does not immediately execute the program but allows for further configuration
and setup. Converts keyword arguments in JSON into an argument list and allows for easy
execution with custom environment variables.
"""
def __init__(self,
executable,
context=None, env=None, env_vars=None,
custom_kwargs={},
**kwargs):
self.executable = executable
self.context = context
if not self.context:
self.context = ExternalContext(env, env_vars, **kwargs)
else:
self.context.kwargs.update(kwargs)
self.custom_kwargs = custom_kwargs
self.process = None
def build_process(self, context=None):
if not context:
context = self.context
process_kwargs = {}
process_kwargs.update(context.kwargs)
process = _Process(self.executable,
env_vars=context.env_vars,
logger=context.logger)
apply_json_args(process, process_kwargs, self.custom_kwargs)
return process
def logger(self):
return self.context.logger
def start(self):
self.process = self.build_process()
self.process.start()
def pid(self):
return self.process.subprocess.pid
def poll(self):
return self.process.poll()
def wait(self):
return_code = self.process.wait()
self.process = None
return return_code
def stop(self):
return_code = self.process.stop()
self.process = None
return return_code
def flush(self):
self.process.flush()
def __str__(self):
return (self.process if self.process else self.build_process()).__str__()
def __repr__(self):
return self.__str__()
class _Process(object):
"""The system-independent execution of an external program.
Handles finicky stuff once we have our environment, arguments, and logger sorted out.
"""
def __init__(self, executable, arguments=[], env=None, env_vars=None, logger=None):
self.executable = executable
self.arguments = [] + arguments
self.env = env
self.env_vars = env_vars
self.logger = logger
self.subprocess = None
self.stdout_logger = None
self.stderr_logger = None
# Windows only
self.subprocess_job_object = None
def start(self):
argv, env = [self.executable] + self.arguments, self.env
if self.env_vars:
if not env:
env = os.environ.copy()
env.update(self.env_vars)
creation_flags = 0
if os.sys.platform == "win32":
# Magic number needed to allow job reassignment in Windows 7
# see: MSDN - Process Creation Flags - ms684863
CREATE_BREAKAWAY_FROM_JOB = 0x01000000
creation_flags = CREATE_BREAKAWAY_FROM_JOB
stdout = sys.stdout if not self.logger else subprocess.PIPE
stderr = sys.stderr if not self.logger else subprocess.PIPE
self.subprocess = subprocess.Popen(argv, env=env, creationflags=creation_flags,
stdout=stdout, stderr=stderr)
if stdout == subprocess.PIPE:
self.stdout_logger = LoggerPipe(self.logger, logging.INFO, self.subprocess.stdout)
self.stdout_logger.wait_until_started()
if stderr == subprocess.PIPE:
self.stderr_logger = LoggerPipe(self.logger, logging.ERROR, self.subprocess.stderr)
self.stderr_logger.wait_until_started()
if os.sys.platform == "win32":
# Create a job object with the "kill on job close" flag
# This is inherited by child processes (i.e. the mongod started on our behalf by
# buildlogger) and lets us terminate the whole tree of processes rather than
# orphaning the mongod.
import win32job
job_object = win32job.CreateJobObject(None, '')
job_info = win32job.QueryInformationJobObject(
job_object,
win32job.JobObjectExtendedLimitInformation)
job_info['BasicLimitInformation']['LimitFlags'] |= \
win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
win32job.SetInformationJobObject(job_object,
win32job.JobObjectExtendedLimitInformation,
job_info)
win32job.AssignProcessToJobObject(job_object, proc._handle)
self.subprocess_job_object = job_object
def poll(self):
return self.subprocess.poll()
def wait(self):
return_code = self.subprocess.wait()
self.flush()
if self.stdout_logger:
self.stdout_logger.wait_until_finished()
self.stdout_logger = None
if self.stderr_logger:
self.stderr_logger.wait_until_finished()
self.stderr_logger = None
return return_code
def stop(self):
try:
if os.sys.platform == "win32":
import win32job
win32job.TerminateJobObject(self.subprocess_job_object, -1)
# Windows doesn't seem to kill the process immediately, so give
# it some time to die
time.sleep(5)
elif hasattr(self.subprocess, "terminate"):
# This method added in Python 2.6
self.subprocess.terminate()
else:
os.kill(self.subprocess.pid, 15)
except Exception as e:
print >> self.subprocess_outputs.stderr, "error shutting down process"
print >> self.subprocess_outputs.stderr, e
return self.wait()
def flush(self):
if self.subprocess:
if not self.stderr_logger:
# Going to the console
sys.stderr.flush()
else:
self.stderr_logger.flush()
if self.subprocess:
if not self.stdout_logger:
# Going to the console
sys.stdout.flush()
else:
self.stdout_logger.flush()
def __str__(self):
# We only want to show the *different* environment variables
def env_compare(env_orig, env_new):
diff = {}
for field, value in env_new.iteritems():
if not field in env_orig:
diff[field] = value
return diff
env_diff = env_compare(os.environ, self.env) if self.env else {}
if self.env_vars:
for field, value in self.env_vars.iteritems():
env_diff[field] = value
env_strs = []
for field, value in env_diff.iteritems():
env_strs.append("%s=%s" % (field, value))
cl = []
if env_strs:
cl.append(" ".join(env_strs))
cl.append(self.executable)
if self.arguments:
cl.append(" ".join(self.arguments))
if self.subprocess:
cl.append("(%s)" % self.subprocess.pid)
return " ".join(cl)
def __repr__(self):
return self.__str__()

View File

@ -1,314 +0,0 @@
"""
Fixtures for the execution of JSTests
"""
import os
import shutil
import time
from external_programs import *
from mongodb_programs import MongoD
from mongodb_programs import MONGOD_DEFAULT_DATA_PATH
from mongodb_programs import MONGOD_DEFAULT_EXEC
DEFAULT_FIXTURE_CLASSES = {"mongodb_server": "smoke.SingleMongoDFixture",
"shell_globals": "smoke.GlobalShellFixture"}
class Fixture(object):
"""Base class for all fixture objects - require suite setup and teardown and api per-test."""
def __init__(self, logger):
self.logger = logger
def setup(self):
pass
def build_api(self, test_type, test_logger):
pass
def teardown(self):
pass
class SimpleFixture(Fixture):
"""Simple fixture objects do not have extra state per-test.
This means they can implement the api by just implementing the add_to_<type> methods.
Fixtures which need to do per-test logging cannot use this simplification, for example.
"""
def __init__(self, logger):
Fixture.__init__(self, logger)
def build_api(self, test_type, test_logger):
return self
def add_to_shell(self, shell_context):
pass
def add_to_process(self, external_context):
pass
def teardown_api(self):
pass
def _get_mapped_size_MB(client):
status = client.admin.command("serverStatus")
if "mem" not in status or "mapped" not in status["mem"]:
raise Exception(
"Could not get data size of MongoDB server, status was %s" % status)
return status["mem"]["mapped"]
class SingleMongoDFixture(SimpleFixture):
"""Basic fixture which provides JSTests with a single-MongoD database to connect to.
Can be restarted automatically after reaching a configurable "mapped" size.
"""
def __init__(self, logger,
mongod_executable=MONGOD_DEFAULT_EXEC,
mongod_options={},
default_data_path=MONGOD_DEFAULT_DATA_PATH,
preserve_dbpath=False,
max_mapped_size_MB=None):
self.logger = logger
self.mongod_executable = mongod_executable
self.mongod_options = mongod_options
self.default_data_path = default_data_path
self.preserve_dbpath = preserve_dbpath
self.max_mapped_size_MB = max_mapped_size_MB
self.mongod = None
def setup(self):
if self.mongod is None:
self.mongod = MongoD(executable=self.mongod_executable,
default_data_path=self.default_data_path,
preserve_dbpath=self.preserve_dbpath,
context=ExternalContext(logger=self.logger),
**self.mongod_options)
try:
self.logger.info("Starting MongoDB server...\n%s" % self.mongod)
self.mongod.start()
self.logger.info("MongoDB server started at %s:%s with pid %s." %
(self.mongod.host, self.mongod.port, self.mongod.pid()))
self.mongod.wait_for_client()
self.logger.info("MongoDB server at %s:%s successfully contacted." %
(self.mongod.host, self.mongod.port))
self.mongod.flush()
except:
self.logger.error("MongoDB server failed to start.", exc_info=True)
raise
def add_to_shell(self, shell_context):
shell_context.db_address = \
"%s:%s" % (self.mongod.host, self.mongod.port)
def teardown_api(self):
if self.max_mapped_size_MB is not None:
if _get_mapped_size_MB(self.mongod.client()) > self.max_mapped_size_MB:
self.logger.info(
"Maximum mapped size %sMB reached, restarting MongoDB..." %
self.max_mapped_size_MB)
self.teardown()
self.setup()
def teardown(self):
try:
self.logger.info("Stopping MongoDB server at %s:%s with pid %s..." %
(self.mongod.host, self.mongod.port, self.mongod.pid()))
self.mongod.stop()
self.logger.info("MongoDB server stopped.")
except:
self.logger.error("MongoDB server failed to stop.", exc_info=True)
raise
class MasterSlaveFixture(SimpleFixture):
"""Fixture which provides JSTests with a master-MongoD database to connect to.
A slave MongoD instance replicates the master in the background.
"""
def __init__(self, logger,
mongod_executable=MONGOD_DEFAULT_EXEC,
mongod_options={},
master_options={},
slave_options={},
default_data_path=MONGOD_DEFAULT_DATA_PATH,
preserve_dbpath=False,
max_mapped_size_MB=None):
self.logger = logger
self.mongod_executable = mongod_executable
self.master_options = {}
self.master_options.update(mongod_options)
self.master_options.update(master_options)
self.slave_options = {}
self.slave_options.update(mongod_options)
self.slave_options.update(slave_options)
self.default_data_path = default_data_path
self.preserve_dbpath = preserve_dbpath
self.max_mapped_size_MB = max_mapped_size_MB
self.master = None
self.slave = None
def setup(self):
if self.master is None:
self.master_options["master"] = ""
self.master = MongoD(executable=self.mongod_executable,
default_data_path=self.default_data_path,
preserve_dbpath=self.preserve_dbpath,
context=ExternalContext(logger=self.logger),
**self.master_options)
try:
self.logger.info("Starting MongoDB master server...\n%s" % self.master)
self.master.start()
self.logger.info("MongoDB master server started at %s:%s with pid %s." %
(self.master.host, self.master.port, self.master.pid()))
self.master.wait_for_client()
self.logger.info("MongoDB master server at %s:%s successfully contacted." %
(self.master.host, self.master.port))
self.master.flush()
except:
self.logger.error("MongoDB master server failed to start.", exc_info=True)
raise
if self.slave is None:
self.slave_options["slave"] = ""
self.slave_options["source"] = "%s:%s" % (self.master.host, self.master.port)
self.slave = MongoD(executable=self.mongod_executable,
default_data_path=self.default_data_path,
context=ExternalContext(logger=self.logger),
**self.slave_options)
try:
self.logger.info("Starting MongoDB slave server...\n%s" % self.slave)
self.slave.start()
self.logger.info("MongoDB slave server started at %s:%s with pid %s." %
(self.slave.host, self.slave.port, self.slave.pid()))
self.slave.wait_for_client()
self.logger.info("MongoDB slave server at %s:%s successfully contacted." %
(self.slave.host, self.slave.port))
self.slave.flush()
except:
self.logger.error("MongoDB slave server failed to start.", exc_info=True)
raise
def add_to_shell(self, shell_context):
shell_context.db_address = \
"%s:%s" % (self.master.host, self.master.port)
def teardown_api(self):
if self.max_mapped_size_MB is not None:
if _get_mapped_size_MB(self.master.client()) > self.max_mapped_size_MB:
self.logger.info(
"Maximum mapped size %sMB reached, restarting MongoDB..." %
self.max_mapped_size_MB)
self.teardown()
self.setup()
def teardown(self):
try:
self.logger.info("Stopping MongoDB slave server at %s:%s with pid %s..." %
(self.slave.host, self.slave.port, self.slave.pid()))
self.slave.stop()
self.logger.info("MongoDB slave server stopped.")
except:
self.logger.error("MongoDB slave server failed to stop.", exc_info=True)
raise
try:
self.logger.info("Stopping MongoDB master server at %s:%s with pid %s..." %
(self.master.host, self.master.port, self.master.pid()))
self.master.stop()
self.logger.info("MongoDB master server stopped.")
except:
self.logger.error("MongoDB master server failed to stop.", exc_info=True)
raise
class GlobalShellFixture(SimpleFixture):
"""Passthrough fixture which just allows passing JSON options directly as shell global vars.
Useful for passing arbitrary options to jstests when running in the shell, for example auth
options.
"""
def __init__(self, logger, **kwargs):
self.logger = logger
self.kwargs = kwargs
def setup(self):
pass
def add_to_shell(self, shell_context):
shell_context.global_context.update(self.kwargs)
def teardown_api(self):
pass
def teardown(self):
pass

View File

@ -1,476 +0,0 @@
#!/usr/bin/python
"""
JSON/YAML option parsing library and command line manipulation.
Also the entry point for running tests based on JSON options files. See usage for more info.
"""
import json
import optparse
import os
import re
import sys
# Transparently handle YAML existing or not
try:
import yaml
except ImportError:
yaml = None
def json_underscore_fields(root):
"""Convert fields to underscore."""
if isinstance(root, dict):
for field, value in root.items():
del root[field]
root[field.replace("-", "_")] = json_underscore_fields(value)
elif isinstance(root, list):
for i in range(0, len(root)):
root[i] = json_underscore_fields(root[i])
return root
COMMENT_RE = \
re.compile(
'(^)?[^\S\n]*/(?:\*(.*?)\*/[^\S\n]*|/[^\n]*)($)?', re.DOTALL | re.MULTILINE)
def json_strip_comments(json_with_comments):
"""Strip comments from JSON strings, for easier input."""
# Looking for comments
match = COMMENT_RE.search(json_with_comments)
while match:
# single line comment
json_with_comments = json_with_comments[
:match.start()] + json_with_comments[match.end():]
match = COMMENT_RE.search(json_with_comments)
return json_with_comments
def json_update(root, new_root):
"""Recursively update a JSON document with another JSON document, merging where necessary."""
if isinstance(root, dict) and isinstance(new_root, dict):
for field in new_root:
field_value = root[field] if field in root else None
new_field_value = new_root[field]
root[field] = json_update(field_value, new_field_value)
return root
return new_root
class Unset(object):
"""Special type for 'unset' JSON field, used below."""
def __init__(self):
pass
def __str__(self):
return "~"
def __repr__(self):
return self.__str__()
def json_update_path(root, path, value, **kwargs):
"""Update a JSON root based on a path. Special '.'-traversal, and '*' and '**' traversal.
Paths like "x.*.y" resolve to any path starting with x, having a single intermediate subpath,
and ending with y. Example: "x.a.y", "x.b.y"
Paths like "x.**.y" resolve to any path starting with x, having zero or more intermediate
subpaths, and ending with y. Example: "x.y", "x.a.y", "x.b.c.y"
"""
head_path, rest_path = split_json_path(path)
implicit_create = kwargs[
"implicit_create"] if "implicit_create" in kwargs else True
push = kwargs["push"] if "push" in kwargs else False
indent = kwargs["indent"] if "indent" in kwargs else ""
kwargs["indent"] = indent + " "
# print indent, root, head_path, rest_path, kwargs
if not head_path:
if not push:
return value
else:
# Implicitly create a root array if we need to push
if isinstance(root, Unset):
if not implicit_create:
return root
else:
root = []
if not isinstance(root, list):
root = [root]
root.append(value)
return root
# star-star-traverse all children recursively including the root itself
if head_path == "**":
# Don't create nonexistent child paths when star-traversing
kwargs["implicit_create"] = False
root_range = range(0, 0)
if isinstance(root, dict):
root_range = root.keys()
elif isinstance(root, list):
root_range = range(0, len(root))
for field in root_range:
# Update field children *and* field doc if ** - ** updates root
# *and* children
root[field] = json_update_path(
root[field], "**." + rest_path, value, **kwargs)
if isinstance(root[field], Unset):
del root[field]
# Update current root too if ** and we haven't already pushed to the
# list
root = json_update_path(root, rest_path, value, **kwargs)
return root
# don't traverse values
if not isinstance(root, Unset) and not isinstance(root, list) and not isinstance(root, dict):
return root
# star-traverse docs
if head_path == "*" and isinstance(root, dict):
# Don't create nonexistent child paths when star-traversing
kwargs["implicit_create"] = False
for field in root:
root[field] = json_update_path(
root[field], rest_path, value, **kwargs)
if isinstance(root[field], Unset):
del root[field]
return root
# traverse lists
if isinstance(root, list):
root_range = None
if head_path.isdigit():
# numeric index arrays
root_range = range(int(head_path), int(head_path) + 1)
else:
if head_path == "*":
# Don't create nonexistent child paths when star-traversing
kwargs["implicit_create"] = False
# dot- or star-traverse arrays
root_range = range(0, len(root))
# don't consume head unless '*'
rest_path = path if head_path != "*" else rest_path
for i in root_range:
root[i] = json_update_path(root[i], rest_path, value, **kwargs)
if isinstance(root[i], Unset):
del root[i]
return root
# Implicitly create a root doc if we need to keep traversing
if isinstance(root, Unset):
if not implicit_create:
return root
else:
root = {}
# Traverse into the dict object
if not head_path in root:
root[head_path] = Unset()
root[head_path] = json_update_path(
root[head_path], rest_path, value, **kwargs)
if isinstance(root[head_path], Unset):
del root[head_path]
return root
def split_json_path(path):
split_path = path.split(".")
if len(split_path) == 1:
split_path.append(".")
rest_path = ".".join(split_path[1:])
return (split_path[0], rest_path)
def json_coerce(json_value):
try:
return json.loads('[' + json_value + ']')[0]
except:
return json.loads('["' + json_value + '"]')[0]
def json_string_load(json_str):
"""Loads JSON data from a JSON string or a YAML string"""
try:
return json.loads(json_strip_comments(json_str))
except:
if yaml:
return yaml.load(json_str)
else:
raise
def json_pipe_load(json_pipe):
"""Loads JSON data from a JSON data source or a YAML data source"""
return json_string_load("".join(json_pipe.readlines()))
def json_file_load(json_filename):
"""Loads JSON data from a JSON file or a YAML file"""
try:
with open(json_filename) as json_file:
return json_pipe_load(json_file)
except Exception as ex:
filebase, ext = os.path.splitext(json_filename)
if not yaml and ext == ".yaml":
raise Exception(("YAML library not found, cannot load %s, " +
"install PyYAML to correct this.") % json_filename, ex)
def json_dump(root, json_only=False):
if json_only or not yaml:
return json.dumps(root, sort_keys=True, indent=2)
else:
return yaml.safe_dump(root, default_flow_style=False)
class MultipleOption(optparse.Option):
"""Custom option class to allow parsing special JSON options by path."""
ACTIONS = optparse.Option.ACTIONS + \
("extend", "json_file_update", "json_set", "json_unset", "json_push")
STORE_ACTIONS = optparse.Option.STORE_ACTIONS + \
("extend", "json_file_update", "json_set", "json_unset", "json_push")
TYPED_ACTIONS = optparse.Option.TYPED_ACTIONS + \
("extend", "json_file_update", "json_set", "json_unset", "json_push")
ALWAYS_TYPED_ACTIONS = optparse.Option.ALWAYS_TYPED_ACTIONS + \
("extend", "json_file_update", "json_set", "json_unset", "json_push")
def take_action(self, action, dest, opt, value, values, parser):
if action == "extend":
if isinstance(value, list):
dest_values = values.ensure_value(dest, [])
for item in value:
dest_values.append(item)
else:
values.ensure_value(dest, []).append(value)
elif action == "json_set":
values.ensure_value(dest, []).append(value)
json_path, json_value = value
if isinstance(json_value, str):
json_value = json_coerce(json_value)
parser.json_root = json_update_path(
parser.json_root, json_path, json_value)
elif action == "json_unset":
values.ensure_value(dest, []).append(value)
json_path = value
parser.json_root = json_update_path(
parser.json_root, json_path, Unset())
if isinstance(parser.json_root, Unset):
parser.json_root = {}
elif action == "json_push":
values.ensure_value(dest, []).append(value)
json_path, json_value = value
if isinstance(json_value, str):
json_value = json_coerce(json_value)
parser.json_root = json_update_path(
parser.json_root, json_path, json_value, push=True)
elif action == "json_file_update":
json_filename = None
if not value:
# Use default value as file
json_filename = values.ensure_value(dest, [])
else:
# Use specified value as file
values.ensure_value(dest, []).append(value)
json_filename = value
if not os.path.isfile(json_filename):
raise optparse.OptionValueError(
"cannot load json/yaml config from %s" % json_filename)
json_data = json_file_load(json_filename)
parser.json_root = json_update(parser.json_root, json_data)
else:
optparse.Option.take_action(
self, action, dest, opt, value, values, parser)
class JSONOptionParser(optparse.OptionParser):
"""Custom option parser for JSON options.
In addition to parsing normal options, also maintains a JSON document which can be updated by
special --set, --unset, and --push options.
"""
DEFAULT_USAGE = \
"""Complex JSON updates are supported via nested paths with dot separators:
Ex: field-a.field-b.field-c
- The --set option implicitly creates any portion of the path that does not exist, as does the \
--push option.
- The --push option implicitly transforms the target of the push update into an array if not \
already an array, and adds the --push'd value to the end of the array.
- The --unset option removes options by path.
Arrays are traversed implicitly, or you can specify an array index as a field name to traverse a \
particular array element.
JSON specified at the command line is implicitly coerced into JSON types. To avoid ambiguity when \
specifying string arguments, you may explicitly wrap strings in double-quotes which will always \
transform into strings.
Ex: --set tests.foo 'abcdef' -> { "tests" : { "foo" : "abcdef" } }
Ex: --set tests.foo '{ "x" : 3 }' -> { "tests" : { "foo" : { "x" : 3 } }
Ex: --set tests.foo '"{ \"x\" : 3 }"' -> { "tests" : { "foo" : "{ \"x\" : 3 }" }
Ex: --set tests.foo 'true' -> { "tests" : { "foo" : true }
Ex: --set tests.foo '"true"' -> { "tests" : { "foo" : "true" }
The special star and star-star ('*' and '**') operators allow wildcard expansion of paths.
- '*' expands to any field at the current nesting in the path
- '**' expands to *all* fields at the current or child nestings of the path - this lets one \
easily set all fields with the same names from a particular root.
Ex: --set executor.**.mongod-options.nopreallocj ""
Wildcard-expanded paths are not implicitly created when they do not already exist - this also \
applies to wildcard --push operations.
- The --config-file option supports loading a full YAML or JSON document from file. Multiple \
config files can be specified, in which case the documents are merged recursively, in order of \
specification."""
def __init__(self, add_default_options=True, configfile_args={}, *args, **kwargs):
kwargs["option_class"] = MultipleOption
optparse.OptionParser.__init__(self, *args, **kwargs)
self.json_root = {}
self.configfile_args = configfile_args
if add_default_options:
self.build_default_options()
def build_default_options(self):
help = \
"""Options specified as a JSON-formatted file, """ + \
"""applied on top of the current JSON options."""
self.add_option('--config-file', dest='json_config_files',
action="json_file_update", default=[], help=help)
help = \
"""Sets a JSON value or values along the specified path."""
self.add_option(
'--set', dest='json_set_values', action="json_set", nargs=2, help=help)
help = \
"""Unsets a JSON value or values along the specified path."""
self.add_option('--unset', dest='json_unset_values', action="json_unset", nargs=1,
help=help)
help = \
"""Pushes a JSON value or values along the specified path."""
self.add_option('--push', dest='json_unset_values', action="json_push", nargs=2,
help=help)
for configfile_arg, configfile_filename in self.configfile_args.iteritems():
self.add_option("--" + configfile_arg, dest=configfile_arg, action="json_file_update",
default=configfile_filename, nargs=0)
def parse_json_args(self):
if not sys.stdin.isatty():
self.json_root = json_pipe_load(sys.stdin)
values, args = self.parse_args()
return (values, args, self.json_root)
USAGE = \
"""smoke_json.py <JSON CONFIG>
All options are specified as JSON - the json configuration can be loaded via a file and/or \
specified as options via the --set, --unset, and --push operators.
For example:
smoke_json.py --push tests.roots "./jstests/disk/*.js" \\
--set suite '{}' --set executor.test-executors.jstest '{}'
results in:
...
Test Configuration:
{
"suite": {},
"tests": {
"roots": [
"./jstests/disk/*.js"
]
},
"executor": {
"test-executors": {
"jstest": {}
}
}
}
...
""" + JSONOptionParser.DEFAULT_USAGE

View File

@ -1,29 +0,0 @@
"""
Very basic network helpers to allow programs to easily reserve network ports and manage timeouts.
"""
import time
import socket
class Timer(object):
def __init__(self):
self.start_time_secs = time.time()
def elapsed_secs(self):
return time.time() - self.start_time_secs
class UnusedPort(object):
def __init__(self, port=0):
self.unused_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.unused_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.unused_socket.bind(("0.0.0.0", port))
self.addr, self.port = self.unused_socket.getsockname()
def release(self):
self.unused_socket.close()
self.unused_socket, self.addr, self.port = None, None, None

View File

@ -1,317 +0,0 @@
"""
Basic utilities to start and stop mongo processes on the local machine.
Encapsulates all the nitty-gritty parameter conversion, database path setup, and custom arguments.
"""
import json
import os
import shutil
import time
from external_programs import *
from mongodb_network import *
#
# Callback functions defined for special kwargs to MongoD/MongoShell/DBTests
#
def apply_buildlogger_args(process, field, value):
def hookup_bl(python_executable="python",
buildlogger_script="buildlogger.py",
buildlogger_global=False,
**kwargs):
buildlogger_arguments = [buildlogger_script]
if buildlogger_global:
buildlogger_arguments.append("-g")
buildlogger_arguments.append(process.executable)
process.executable = python_executable
process.arguments = buildlogger_arguments + process.arguments
for field in kwargs:
process.env_vars[field.upper()] = kwargs[field]
hookup_bl(**value)
# The "buildlogger" argument is a special command-line parameter, does crazy stuff
BUILDLOGGER_CUSTOM_KWARGS = \
{"buildlogger": (None, KWARG_TYPE_CALLBACK, apply_buildlogger_args)}
def apply_verbose_arg(process, field, value):
verbose_arg = "v" * value
if verbose_arg:
process.arguments.append("-" + verbose_arg)
# The "verbose" argument is a special command-line parameter, converts to "v"s
VERBOSE_CUSTOM_KWARGS = \
{"verbose": (None, KWARG_TYPE_CALLBACK, apply_verbose_arg)}
def apply_setparam_args(process, field, value):
for param_name, param_value in value.iteritems():
process.arguments.append("--setParameter")
process.arguments.append("%s=%s" % (param_name, json.dumps(param_value)))
# The "set_parameters" arg is a special command line parameter, converts to "field=value"
SETPARAM_CUSTOM_KWARGS = \
{"set_parameters": (None, KWARG_TYPE_CALLBACK, apply_setparam_args)}
#
# Default MongoD options
#
MONGOD_DEFAULT_EXEC = "./mongod"
MONGOD_DEFAULT_DATA_PATH = "/data/db"
MONGOD_KWARGS = dict(
BUILDLOGGER_CUSTOM_KWARGS.items() +
VERBOSE_CUSTOM_KWARGS.items() +
SETPARAM_CUSTOM_KWARGS.items())
class MongoD(ExternalProgram):
"""A locally-running MongoD process."""
def __init__(self,
executable=MONGOD_DEFAULT_EXEC,
default_data_path=MONGOD_DEFAULT_DATA_PATH,
preserve_dbpath=False,
custom_kwargs=MONGOD_KWARGS,
**kwargs):
mongod_kwargs = dict(kwargs.items())
self.host = "localhost"
if "port" in mongod_kwargs:
self.unused_port = UnusedPort(mongod_kwargs["port"])
else:
self.unused_port = UnusedPort()
mongod_kwargs["port"] = self.unused_port.port
self.port = mongod_kwargs["port"]
if "dbpath" not in mongod_kwargs:
mongod_kwargs["dbpath"] = \
os.path.join(default_data_path, "%s-%s" % (self.host, self.port))
self.dbpath = mongod_kwargs["dbpath"]
self.preserve_dbpath = preserve_dbpath
ExternalProgram.__init__(self, executable, custom_kwargs=custom_kwargs, **mongod_kwargs)
def _cleanup(self):
if not self.preserve_dbpath and os.path.exists(self.dbpath):
self.logger().info("Removing data in dbpath %s" % self.dbpath)
shutil.rmtree(self.dbpath)
def start(self):
try:
self._cleanup()
if not os.path.exists(self.dbpath):
self.logger().info("Creating dbpath at \"%s\"" % self.dbpath)
os.makedirs(self.dbpath)
except:
self.logger().error("Failed to setup dbpath at \"%s\"" % self.dbpath, exc_info=True)
raise
# Slightly racy - fixing is tricky
self.unused_port.release()
self.unused_port = None
ExternalProgram.start(self)
def wait_for_client(self, timeout_secs=30.0):
timer = Timer()
while True:
if self.poll() is not None:
# MongoD exited for some reason
raise Exception(
"Could not connect to MongoD server at %s:%s, process ended unexpectedly." %
(self.host, self.port))
try:
# Try to connect to the mongod with a pymongo client - 30s default socket timeout
self.client().admin.command("ismaster")
break
except Exception as ex:
if timer.elapsed_secs() > timeout_secs:
raise Exception(
"Failed to connect to MongoD server at %s:%s." %
(self.host, self.port), ex)
else:
self.logger().info("Waiting to connect to MongoD server at %s:%s..." %
(self.host, self.port))
time.sleep(0.5)
self.logger().info("Connected to MongoD server at %s:%s." % (self.host, self.port))
def client(self, **client_args):
# Import pymongo here, only when needed
import pymongo
return pymongo.MongoClient(self.host, self.port, **client_args)
def _wait_for_port(self, timeout_secs=10):
timer = Timer()
while True:
try:
self.unused_port = UnusedPort(self.port)
break
except Exception as ex:
if timer.elapsed_secs() > timeout_secs:
raise Exception("Failed to cleanup port from MongoD server at %s:%s" %
(self.host, self.port), ex)
self.logger().info("Waiting for MongoD server at %s:%s to relinquish port..." %
(self.host, self.port))
time.sleep(0.5)
def wait(self):
ExternalProgram.wait(self)
# Slightly racy - fixing is tricky
self._wait_for_port()
self._cleanup()
def stop(self):
ExternalProgram.stop(self)
# Slightly racy - fixing is tricky
self._wait_for_port()
self._cleanup()
#
# Default MongoShell options
#
MONGOSHELL_DEFAULT_EXEC = "./mongo"
MONGOSHELL_KWARGS = dict(BUILDLOGGER_CUSTOM_KWARGS.items())
class MongoShellContext(object):
"""The context for a mongo shell execution.
Tests using the shell can only have APIs provided by injecting them into the shell when it
starts - generally as global variables.
Shell options and global variables are specified using this structure.
"""
def __init__(self):
self.db_address = None
self.global_context = {}
class MongoShell(ExternalProgram):
"""A locally-running MongoDB shell process.
Makes it easy to start with custom global variables, pointed at a custom database, etc.
"""
def __init__(self,
executable=MONGOSHELL_DEFAULT_EXEC,
shell_context=None,
db_address=None,
global_context={},
js_filenames=[],
custom_kwargs=MONGOSHELL_KWARGS,
**kwargs):
ExternalProgram.__init__(self, executable, custom_kwargs=custom_kwargs, **kwargs)
self.shell_context = shell_context
if not shell_context:
self.shell_context = MongoShellContext()
self.shell_context.db_address = db_address
self.shell_context.global_context.update(global_context)
self.js_filenames = js_filenames
def build_eval_context(self):
eval_strs = []
for variable, variable_json in self.shell_context.global_context.iteritems():
eval_strs.append("%s=%s;" % (variable, json.dumps(variable_json)))
return "".join(eval_strs)
def build_process(self):
process_context = self.context.clone()
if self.shell_context.global_context:
eval_context_str = self.build_eval_context()
if "eval" in process_context.kwargs:
process_context.kwargs["eval"] = process_context.kwargs["eval"] + ";" + \
eval_context_str
else:
process_context.kwargs["eval"] = eval_context_str
process = ExternalProgram.build_process(self, process_context)
if self.shell_context.db_address:
process.arguments.append(self.shell_context.db_address)
else:
process.arguments.append("--nodb")
if self.js_filenames:
for js_filename in self.js_filenames:
process.arguments.append(js_filename)
return process
#
# Default DBTest options
#
DBTEST_DEFAULT_EXEC = "./dbtest"
DBTEST_KWARGS = dict(BUILDLOGGER_CUSTOM_KWARGS.items() + VERBOSE_CUSTOM_KWARGS.items())
class DBTest(ExternalProgram):
"""A locally running MongoDB dbtest process.
Makes it easy to start with custom named dbtests.
"""
def __init__(self,
executable=DBTEST_DEFAULT_EXEC,
dbtest_names=[],
custom_kwargs=DBTEST_KWARGS,
**kwargs):
ExternalProgram.__init__(self, executable, custom_kwargs=custom_kwargs, **kwargs)
self.dbtest_names = dbtest_names
def build_process(self):
process = ExternalProgram.build_process(self)
for dbtest_name in self.dbtest_names:
process.arguments.append(dbtest_name)
return process

View File

@ -1,105 +0,0 @@
"""
Utilities for searching a database of tests based on a query over tags provided by the tests.
The resulting search becomes a test suite.
"""
import re
class RegexQuery(object):
"""A query based on regex includes/excludes.
TODO: Something more complicated, or link to actual MongoDB queries?
"""
def __init__(self,
include_res=[],
include_except_res=[],
exclude_res=[],
exclude_except_res=[]):
self.include_res = []
self.include_res.extend([(include_re, False) for include_re in include_res])
self.include_res.extend([(include_except_re, True)
for include_except_re in include_except_res])
self.exclude_res = []
self.exclude_res.extend([(exclude_re, False) for exclude_re in exclude_res])
self.exclude_res.extend([(exclude_except_re, True)
for exclude_except_re in exclude_except_res])
def matches(self, value):
return self.matches_values([value])
def matches_values(self, values):
# First see if anything in the values make us included
included = True
if self.include_res:
for include_re, invert_match in self.include_res:
if not invert_match:
# Include if any of the values is matched by an include pattern
included = False
for value in values:
if include_re.search(value):
included = True
break
else:
# Include if all of the values are not matched by an include except pattern
included = True
for value in values:
if include_re.search(value):
included = False
break
if included == True:
break
if not included:
return included
if self.exclude_res:
for exclude_re, invert_match in self.exclude_res:
if not invert_match:
# Exclude if any of the values are matched by an exclude pattern
included = True
for value in values:
if exclude_re.search(value):
included = False
break
else:
# Exclude if all of the values are not matched by an exclude except patt
included = False
for value in values:
if exclude_re.search(value):
included = True
break
if included == False:
break
return included
def combine(self, other):
self.include_res.extend(other.include_res)
self.exclude_res.extend(other.exclude_res)
def build_suite(tests, tag_query):
# Filter tests by tag
def tags_match(test):
return tag_query.matches_values(test.tags)
return filter(tags_match, tests)

View File

@ -1,201 +0,0 @@
"""
Testers - TestCase wrappers for tests of different types
"""
import unittest
from external_programs import *
from mongodb_programs import DBTest
from mongodb_programs import MongoShell
from mongodb_programs import MongoShellContext
DEFAULT_TESTER_CLASSES = {"js_test": "smoke.JSUnitTest",
"db_test": "smoke.DBTestUnitTest",
"exe_test": "smoke.ExeUnitTest"}
class JSUnitTest(unittest.TestCase):
"""A MongoDB shell 'jstest' wrapped as a TestCase.
Allows fixtures to provide global variables and databases to connect to as API additions.
"""
def __init__(self, jstest, test_apis, logger, shell_executable="./mongo", shell_options={},
*args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
# Setup the description for the unit test
self._testMethodDoc = "JSTest %s" % jstest.filename
self.jstest = jstest
self.test_apis = test_apis
self.logger = logger
self.shell_executable = shell_executable
self.shell_options = {}
self.shell_options.update(shell_options)
self.shell_context = MongoShellContext()
def setUp(self):
try:
for api in self.test_apis:
api.add_to_shell(self.shell_context)
except:
self.logger.error("Setup failed for shell API.", exc_info=True)
raise
def runTest(self):
shell = MongoShell(executable=self.shell_executable,
shell_context=self.shell_context,
js_filenames=[self.jstest.filename],
context=ExternalContext(logger=self.logger),
**self.shell_options)
try:
self.logger.info("Starting MongoDB shell...\n%s" % shell)
shell.start()
self.logger.info("MongoDB shell started with pid %s." % shell.pid())
return_code = shell.wait()
if return_code != 0:
raise Exception("JSTest %s failed." % self.jstest.filename)
self.logger.info("MongoDB shell finished.")
except:
self.logger.error("MongoDB shell failed.", exc_info=True)
raise
def tearDown(self):
try:
for api in self.test_apis:
api.teardown_api()
except:
self.logger.error("Teardown failed for shell API.", exc_info=True)
raise
class ExeUnitTest(unittest.TestCase):
"""An arbitrary executable file wrapped as a TestCase.
Meant for use with C++ unit tests, for example.
Allows fixtures to provide environment variables as API additions.
"""
def __init__(self, exetest, test_apis, logger,
program_options={},
*args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
self.exetest = exetest
self.test_apis = test_apis
self.logger = logger
# Setup the description for the unit test
self._testMethodDoc = "Program %s" % self.exetest.filename
self.process_context = ExternalContext(logger=self.logger)
if program_options:
self.process_context.kwargs.update(program_options)
def setUp(self):
try:
for api in self.test_apis:
api.add_to_process(self.process_context)
except:
self.logger.error("Setup failed for process API.", exc_info=True)
raise
def runTest(self):
program = ExternalProgram(executable=self.exetest.filename,
context=self.process_context)
try:
self.logger.info("Starting Program...\n%s" % program)
program.start()
self.logger.info("Program %s started with pid %s." %
(self.exetest.filename, program.pid()))
return_code = program.wait()
if return_code != 0:
raise Exception("Program %s failed." % self.exetest.filename)
self.logger.info("Program finished.")
except:
self.logger.error("Program failed.", exc_info=True)
raise
def tearDown(self):
try:
for api in self.test_apis:
api.teardown_api()
except:
self.log.error("Teardown failed for process API.", exc_info=True)
raise
class DBTestUnitTest(ExeUnitTest):
"""A executable MongoDB 'dbtest' wrapped as a TestCase.
Individual dbtests can be specified optionally.
Allows fixtures to provide environment variables as API additions.
"""
def __init__(self, dbtest, test_apis, logger,
dbtest_executable=None,
dbtest_options={},
*args, **kwargs):
ExeUnitTest.__init__(self, dbtest, test_apis, logger, dbtest_options,
*args, **kwargs)
self.dbtest = dbtest
self.dbtest_names = []
if "dbtest_names" in dbtest.metadata:
self.dbtest_names = dbtest.metadata["dbtest_names"]
# Setup the description for the unit test
self._testMethodDoc = "DBTest %s" % (" ".join(self.dbtest_names))
self.dbtest_executable = dbtest_executable
def runTest(self):
dbtest = DBTest(executable=self.dbtest_executable,
dbtest_names=self.dbtest_names,
context=self.process_context)
try:
self.logger.info("Starting DBTest...\n%s" % dbtest)
dbtest.start()
self.logger.info("DBTest %s started with pid %s." % (" ".join(self.dbtest_names),
dbtest.pid()))
return_code = dbtest.wait()
if return_code != 0:
raise Exception("DBTest %s failed." % (" ".join(self.dbtest_names)))
self.logger.info("DBTest finished.")
except:
self.logger.error("DBTest failed.", exc_info=True)
raise

Some files were not shown because too many files have changed in this diff Show More