mirror of
https://github.com/mongodb/mongo.git
synced 2024-12-01 09:32:32 +01:00
SERVER-87815: Remove legacy perf report (#19878)
GitOrigin-RevId: bb2afaa4b4f5784b2ac6c75d7977b0b041f91631
This commit is contained in:
parent
41c95e9d29
commit
17e7a4675f
@ -92,7 +92,6 @@ DEFAULTS = {
|
||||
"num_clients_per_fixture": 1,
|
||||
"use_tenant_client": False,
|
||||
"origin_suite": None,
|
||||
"perf_report_file": None,
|
||||
"cedar_report_file": None,
|
||||
"repeat_suites": 1,
|
||||
"repeat_tests": 1,
|
||||
@ -491,9 +490,6 @@ USE_TENANT_CLIENT = False
|
||||
# Indicates the name of the test suite prior to the suite being split up by uite generation
|
||||
ORIGIN_SUITE = None
|
||||
|
||||
# Report file for the Evergreen performance plugin.
|
||||
PERF_REPORT_FILE = None
|
||||
|
||||
# Report file for Cedar.
|
||||
CEDAR_REPORT_FILE = None
|
||||
|
||||
|
@ -435,7 +435,6 @@ or explicitly pass --installDir to the run subcommand of buildscripts/resmoke.py
|
||||
config.pop("config_shard"), _config.NUM_SHARDS)
|
||||
_config.EMBEDDED_ROUTER = config.pop("embedded_router")
|
||||
_config.ORIGIN_SUITE = config.pop("origin_suite")
|
||||
_config.PERF_REPORT_FILE = config.pop("perf_report_file")
|
||||
_config.CEDAR_REPORT_FILE = config.pop("cedar_report_file")
|
||||
_config.RANDOM_SEED = config.pop("seed")
|
||||
_config.REPEAT_SUITES = config.pop("repeat_suites")
|
||||
|
@ -1239,10 +1239,6 @@ class RunPlugin(PluginInterface):
|
||||
internal_options.add_argument("--internalParam", action="append", dest="internal_params",
|
||||
help=argparse.SUPPRESS)
|
||||
|
||||
internal_options.add_argument("--perfReportFile", dest="perf_report_file",
|
||||
metavar="PERF_REPORT",
|
||||
help="Writes a JSON file with performance test results.")
|
||||
|
||||
internal_options.add_argument("--cedarReportFile", dest="cedar_report_file",
|
||||
metavar="CEDAR_REPORT",
|
||||
help="Writes a JSON file with performance test results.")
|
||||
|
@ -28,7 +28,6 @@ class CombineBenchmarkResults(interface.Hook):
|
||||
def __init__(self, hook_logger, fixture):
|
||||
"""Initialize CombineBenchmarkResults."""
|
||||
interface.Hook.__init__(self, hook_logger, fixture, CombineBenchmarkResults.DESCRIPTION)
|
||||
self.legacy_report_file = _config.PERF_REPORT_FILE
|
||||
self.cedar_report_file = _config.CEDAR_REPORT_FILE
|
||||
|
||||
# Reports grouped by name without thread.
|
||||
@ -43,9 +42,6 @@ class CombineBenchmarkResults(interface.Hook):
|
||||
|
||||
def after_test(self, test, test_report):
|
||||
"""Update test report."""
|
||||
if self.legacy_report_file is None:
|
||||
return
|
||||
|
||||
bm_report_path = test.report_name()
|
||||
|
||||
with open(bm_report_path, "r") as bm_report_file:
|
||||
@ -58,13 +54,8 @@ class CombineBenchmarkResults(interface.Hook):
|
||||
|
||||
def after_suite(self, test_report, teardown_flag=None):
|
||||
"""Update test report."""
|
||||
if self.legacy_report_file is None:
|
||||
return
|
||||
|
||||
self.end_time = datetime.datetime.now()
|
||||
legacy_report = self._generate_perf_plugin_report()
|
||||
with open(self.legacy_report_file, "w") as fh:
|
||||
json.dump(legacy_report, fh)
|
||||
|
||||
try:
|
||||
cedar_report = self._generate_cedar_report()
|
||||
@ -72,28 +63,10 @@ class CombineBenchmarkResults(interface.Hook):
|
||||
teardown_flag.set()
|
||||
raise
|
||||
else:
|
||||
if self.cedar_report_file is not None:
|
||||
with open(self.cedar_report_file, "w") as fh:
|
||||
json.dump(cedar_report, fh)
|
||||
|
||||
def _generate_perf_plugin_report(self):
|
||||
"""Format the data to look like a perf plugin report."""
|
||||
perf_report = {
|
||||
"start": self._strftime(self.create_time),
|
||||
"end": self._strftime(self.end_time),
|
||||
"errors": [], # There are no errors if we have gotten this far.
|
||||
"results": []
|
||||
}
|
||||
|
||||
for name, report in list(self.benchmark_reports.items()):
|
||||
test_report = {
|
||||
"name": name, "context": report.context._asdict(),
|
||||
"results": report.generate_perf_plugin_dict()
|
||||
}
|
||||
|
||||
perf_report["results"].append(test_report)
|
||||
|
||||
return perf_report
|
||||
|
||||
def _generate_cedar_report(self) -> List[dict]:
|
||||
"""Format the data to look like a cedar report."""
|
||||
cedar_report = []
|
||||
@ -225,45 +198,6 @@ class _BenchmarkThreadsReport(object):
|
||||
"""Add to report."""
|
||||
self.thread_benchmark_map[bm_name_obj.thread_count].append(report)
|
||||
|
||||
def generate_perf_plugin_dict(self):
|
||||
"""Generate perf plugin data points of the following format.
|
||||
|
||||
"1": {
|
||||
"error_values": [
|
||||
0,
|
||||
0,
|
||||
0
|
||||
],
|
||||
"ops_per_sec": 9552.108279243452,
|
||||
"ops_per_sec_values": [
|
||||
9574.812658450564,
|
||||
9522.642340821469,
|
||||
9536.252775275878
|
||||
]
|
||||
},
|
||||
"""
|
||||
|
||||
res = {}
|
||||
for thread_count, reports in list(self.thread_benchmark_map.items()):
|
||||
thread_report = {
|
||||
"error_values": [],
|
||||
"ops_per_sec_values": [], # This is actually storing latency per op, not ops/s
|
||||
}
|
||||
|
||||
for report in reports:
|
||||
# Don't show Benchmark's included statistics to prevent cluttering up the graph.
|
||||
if report.get("run_type") == "aggregate":
|
||||
continue
|
||||
thread_report["error_values"].append(0)
|
||||
# Take the negative of the latency numbers to preserve the higher is better semantics.
|
||||
thread_report["ops_per_sec_values"].append(-1 * report["cpu_time"])
|
||||
thread_report["ops_per_sec"] = sum(thread_report["ops_per_sec_values"]) / len(
|
||||
thread_report["ops_per_sec_values"])
|
||||
|
||||
res[thread_count] = thread_report
|
||||
|
||||
return res
|
||||
|
||||
def generate_cedar_metrics(self) -> Dict[int, List[CedarMetric]]:
|
||||
"""
|
||||
Generate metrics for Cedar.
|
||||
|
@ -232,7 +232,6 @@ class TestLocalCommandLine(unittest.TestCase):
|
||||
"run",
|
||||
"--suites=my_suite",
|
||||
"--reportFile=report.json",
|
||||
"--perfReportFile=perf.json",
|
||||
"--storageEngine=my_storage_engine",
|
||||
])
|
||||
|
||||
|
@ -139,18 +139,6 @@ class CombineBenchmarkResultsFixture(unittest.TestCase):
|
||||
|
||||
|
||||
class TestCombineBenchmarkResults(CombineBenchmarkResultsFixture):
|
||||
def test_generate_legacy_report(self):
|
||||
report = self.cbr_hook._generate_perf_plugin_report()
|
||||
|
||||
self.assertEqual(len(list(report.keys())), 4)
|
||||
self.assertEqual(len(report["results"]), 2)
|
||||
|
||||
self.assertDictEqual(report["results"][0]["context"], _BM_CONTEXT)
|
||||
self.assertEqual(report["results"][0]["results"]["1"]["ops_per_sec"], -1304.0)
|
||||
|
||||
self.assertEqual(report["start"], "2999-12-31T23:59:59Z")
|
||||
self.assertEqual(report["end"], "3000-01-01T00:00:00Z")
|
||||
|
||||
def test_generate_cedar_report(self):
|
||||
report = self.cbr_hook._generate_cedar_report()
|
||||
|
||||
@ -214,38 +202,6 @@ class TestBenchmarkThreadsReport(CombineBenchmarkResultsFixture):
|
||||
self.assertEqual(name_obj.statistic_type, "mean")
|
||||
self.assertEqual(name_obj.base_name, "BM_baseline_match_simple/0")
|
||||
|
||||
def test_generate_multithread_perf_plugin_dict(self):
|
||||
# Also test add_report() in the process.
|
||||
self.bm_threads_report.add_report(
|
||||
self.bm_threads_report.parse_bm_name(_BM_MULTITHREAD_REPORT), _BM_MULTITHREAD_REPORT)
|
||||
|
||||
self.assertEqual(len(list(self.bm_threads_report.thread_benchmark_map.keys())), 1)
|
||||
|
||||
report = self.bm_threads_report.generate_perf_plugin_dict()
|
||||
|
||||
self.assertEqual(len(list(report.keys())), 1)
|
||||
self.assertIn("10", list(report.keys()))
|
||||
self.assertNotIn("10_median", list(report.keys()))
|
||||
|
||||
self.assertEqual(len(report["10"]["error_values"]), 1)
|
||||
self.assertEqual(len(report["10"]["ops_per_sec_values"]), 1)
|
||||
self.assertEqual(report["10"]["ops_per_sec"], -303.0)
|
||||
|
||||
def test_generate_single_thread_perf_plugin_dict(self):
|
||||
self.bm_threads_report.add_report(
|
||||
self.bm_threads_report.parse_bm_name(_BM_REPORT_1), _BM_REPORT_1)
|
||||
|
||||
self.bm_threads_report.add_report(
|
||||
self.bm_threads_report.parse_bm_name(_BM_REPORT_2), _BM_REPORT_2)
|
||||
|
||||
self.assertEqual(len(list(self.bm_threads_report.thread_benchmark_map.keys())), 1)
|
||||
|
||||
report = self.bm_threads_report.generate_perf_plugin_dict()
|
||||
|
||||
self.assertEqual(len(list(report.keys())), 1)
|
||||
self.assertIn("1", list(report.keys()))
|
||||
self.assertNotIn("1_mean", list(report.keys()))
|
||||
|
||||
def test_generate_multithread_cedar_metrics(self):
|
||||
self.bm_threads_report.add_report(
|
||||
self.bm_threads_report.parse_bm_name(_BM_MULTITHREAD_REPORT), _BM_MULTITHREAD_REPORT)
|
||||
|
@ -159,7 +159,6 @@ if [[ ${disable_unit_tests} = "false" && ! -f ${skip_tests} ]]; then
|
||||
--taskWorkDir='${workdir}' \
|
||||
--projectConfigPath ${evergreen_config_file_path} \
|
||||
--reportFile=report.json \
|
||||
--perfReportFile=perf.json \
|
||||
--cedarReportFile=cedar_report.json
|
||||
resmoke_exit_code=$?
|
||||
set -o errexit
|
||||
|
Loading…
Reference in New Issue
Block a user