Skip to content
Snippets Groups Projects
Commit eb31e66d4789 authored by Raphaël Gomès's avatar Raphaël Gomès
Browse files

Add migration script for old benchmarks

Some older benchmarks were not recognized by ASV because of a version
discrepancy. ASV generates a version hash from the code that is ran in each
benchmark, which means that any change to the benchmark code renders old
results unusable by ASV.

While the reason for doing this is sensible (any change in the benchmark could
invalidate it), this safety measure does not concern us, and we need to bypass
it. Since ASV does not yet log a warning (pending another PR) when it ignores
results, nor does it allow us to accept those "invalid" results, this change
migrates the old results to a form ASV will accept.

Most of this changeset is unrelated to the version compatibility issue and is
mostly catching up the file structure with the new one: a few new parameters
were inserted, the main "repo" parameter was split into itself and
"sparse-revlog", etc.
parent e16fc15d9ffa
No related branches found
No related tags found
No related merge requests found
#!/usr/bin/env python
from __future__ import unicode_literals
import argparse
import io
import os
import json
import sys
import logging
from collections import OrderedDict
from copy import copy
from itertools import product, repeat
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
logger.addHandler(console)
USUAL_REPOS = [
"'mercurial-2018-08-01'",
"'mozilla-central-2018-08-01'",
"'netbeans-2018-08-01'",
"'pypy-2018-08-01'",
]
# Keys of this dict are `repo` values we expect to see
# Values are the corresponding new base parameters (that is, excluding
# any additional test parameters).
OLD_TO_NEW_REPO_PARAM_MATCHER = {
frozenset(USUAL_REPOS): USUAL_REPOS,
frozenset(
(
"'mercurial-2018-08-01'",
"'mercurial-2018-08-01-sparse-revlog'",
"'mozilla-central-2018-08-01'",
"'mozilla-central-2018-08-01-sparse-revlog'",
"'netbeans-2018-08-01'",
"'netbeans-2018-08-01-sparse-revlog'",
"'pypy-2018-08-01'",
"'pypy-2018-08-01-sparse-revlog'",
)
): USUAL_REPOS,
frozenset(
(
"'mercurial-2018-08-01'",
"'mercurial-2018-08-01-sparse-revlog'",
"'mozilla-central-2018-08-01'",
"'mozilla-central-2018-08-01-sparse-revlog'",
"'netbeans-2018-08-01'",
"'netbeans-2018-08-01-sparse-revlog'",
"'pypy-2018-08-01'",
"'pypy-2018-08-01-sparse-revlog'",
"'mozilla-try-2019-03-01-sparse-revlog'",
)
): USUAL_REPOS
+ ["'mozilla-try-2019-03-01'"],
}
NEW_FORMAT_PARAMS_VARIANTS = [
(
["'zlib'"],
["True"],
["True"],
["True"],
["True"],
["False", "True"], # Found in some existing files
),
(
["'zlib'"],
["True"],
["True"],
["True"],
["True"],
["True", "False"], # Order that way to reduce diff noise
)
]
ACCEPTABLE_NEW_REPO_PARAMS = set(
frozenset(l) for l in OLD_TO_NEW_REPO_PARAM_MATCHER.values()
)
def extract_benchmarks_file_data(benchmark_file_path):
with io.open(benchmark_file_path, encoding="utf-8") as benchmark_file:
benchmarks = json.load(benchmark_file, encoding="utf-8")
bench_version = benchmarks.pop("version")
params_per_test = {}
for test_name, test_config in benchmarks.iteritems():
# Get benchmark version, and zip params to their names
mapped_params = OrderedDict(
(param_name, param_value)
for param_name, param_value in zip(
test_config["param_names"], test_config["params"]
)
)
if not len(mapped_params):
exit(
"Test {} does not have any parameters, aborting.".format(
test_name
)
)
params_per_test[test_name] = {
"version": test_config["version"],
"params": mapped_params,
}
return params_per_test
def rewrite_params_results_and_stats(old_params, old_results, old_stats):
repos = frozenset(old_params[0])
try:
new_base_parameters = OLD_TO_NEW_REPO_PARAM_MATCHER[repos]
except KeyError:
exit("Found irregular `repos` parameter: {}. Aborting.".format(repos))
broken_results = False
broken_stats = False
if old_results is None or old_results == [None]:
old_results = repeat(None)
broken_results = True
if old_stats is None:
old_stats = repeat(None)
broken_stats = True
# Map every old parameter combination *in order* to its results and stats
#
# From the asv docs
# https://github.com/airspeed-velocity/asv/blob/master/docs/source/dev.rst:
#
# The result value is a list of results. Each entry corresponds to one
# combination of the parameter values. The n-th entry in the list
# corresponds to the parameter combination
# itertools.product(*params)[n],
# i.e., the results appear in cartesian product order,
# with the last parameters varying fastest.
old_mapping = zip(product(*old_params), old_results, old_stats)
new_mapping = {}
for params, results, stats in old_mapping:
sparse = False
repo = params[0]
rest = params[1:]
# Split repo parameter value into repo + sparse-revlog
if repo.endswith("-sparse-revlog'"):
sparse = True
transformed_params = [
repo.replace("-sparse-revlog'", "'"),
"'zlib'",
"True",
"True",
"True",
"True",
"True" if sparse else "False",
]
# Add back any additional parameters' values for this test
transformed_params.extend(rest)
# Associate new params' values to the corresponding results and stats
# To later reference in a loop that has the final ordering of
# the combination of all new parameters
new_mapping[tuple(transformed_params)] = (results, stats)
# Then generate the new parameter list
new_params = [new_base_parameters]
new_params.extend(NEW_FORMAT_PARAMS_VARIANTS[1])
new_params.extend(old_params[1:])
new_results = []
new_stats = []
# This is used as a "symbol" (think Ruby or ECMAScript)
# Used to differentiate real `None` result values from ones that we've
# inserted, to help with assertions before eventually turning them
# into `None` at function output.
TO_BE_NONE = "__SCMPERF__TO_BE_NONE"
# Associate the new parameters to the old values with the mapping above
for params in product(*new_params):
try:
results, stats = new_mapping[tuple(params)]
except KeyError:
# Some variants do not have results.
# For instance, the `mozilla-try` repo is only `sparse-revlog`,
# so `sparse-revlog == False` should yield `None`.
new_results.append(TO_BE_NONE)
new_stats.append(TO_BE_NONE)
else:
new_results.append(results)
new_stats.append(stats)
# There should be at least as many new results as old ones,
# and exactly as many non-nulls
if not broken_results:
non_null_results = list(r for r in new_results if r != TO_BE_NONE)
if len(non_null_results) != len(old_results):
message = (
"Non-null new results and old results have "
"different lengths ({} vs {}). Aborting."
)
exit(message.format(len(non_null_results), len(old_results)))
if not broken_stats:
non_null_stats = list(s for s in new_stats if s != TO_BE_NONE)
if len(non_null_stats) < len(old_stats):
message = (
"Non-null new stats and old stats have "
"different lengths ({} vs {}). Aborting."
)
exit(message.format(len(non_null_stats), len(old_stats)))
# TO_BE_NONE -> None
new_results = [None if r == TO_BE_NONE else r for r in new_results]
new_stats = [None if s == TO_BE_NONE else s for s in new_stats]
if all(stat is None for stat in new_stats):
new_stats = None
return new_params, new_results, new_stats
def is_new_format(test_config):
"""
New format tests' parameters must start with a repo parameter which `set`
must be found in the values of `ACCEPTABLE_REPOS_PARAMETERS`,
and be followed by the `new_format_params`.
`zip` will only loop at most until `new_format_params` is depleted,
so will not check any additional params this test might have.
"""
test_repo_param = test_config["params"][0]
test_other_params = test_config["params"][1:]
if not len(test_other_params):
return False
for new_format_params in NEW_FORMAT_PARAMS_VARIANTS:
if any(
set(test_repo_param) == acceptable_repo_param
and all(
new_param == param
for new_param, param in
zip(new_format_params, test_other_params)
)
for acceptable_repo_param in ACCEPTABLE_NEW_REPO_PARAMS
):
return True
return False
def get_new_json(test_results, params_per_test):
new_contents = copy(test_results)
index = 0
# Loop over each test result in file
for index, (test_name, test_config) in enumerate(
test_results["results"].iteritems()
):
try:
test_params = params_per_test[test_name]
except KeyError:
logger.info(
"Test {} does not exist in benchmarks.json, skipping".format(
test_name
)
)
continue
if is_new_format(test_config):
logger.debug(
"Test {} is already in the new format, skipping".format(
test_name
)
)
continue
if (
test_params["version"]
!= test_results["benchmark_version"][test_name]
):
logger.info(
"Test version does not match benchmark.json version, removing"
)
# ASV skips version checking if not present,
# the next `asv run` will add the correct version.
del new_contents["benchmark_version"][test_name]
stats = test_config.get("stats")
new_params, new_results, new_stats = rewrite_params_results_and_stats(
test_config["params"], test_config["result"], stats
)
new_contents["results"][test_name]["params"] = new_params
new_contents["results"][test_name]["result"] = new_results
if stats is not None:
new_contents["results"][test_name]["stats"] = new_stats
if index == 0:
logger.warning("Warning: This file was left untouched.")
return new_contents
def migrate_single_benchmark(test_file_path, params_per_test):
logger.info("---------------------")
logger.info(test_file_path)
logger.info("---------------------")
if os.path.basename(test_file_path) in {"machine.json"}:
logger.info("Ignoring non-test file")
return
with io.open(test_file_path) as test_results_file:
test_results = json.load(test_results_file)
new_contents = get_new_json(
test_results=test_results, params_per_test=params_per_test
)
return new_contents
def handle_args(args):
new_args = {"sources_files": [], "verbose": args.verbose}
sources = args.sources
benchmarks_file_path = os.path.abspath(args.benchmark_file)
output_dir_path = os.path.abspath(args.output_dir)
# Sources can be dictionaries or files
for source in sources:
source = os.path.abspath(source)
if os.path.isdir(source):
# Recursively add any file in the dictionary
for dirpath, _dirnames, nested_files in os.walk(source):
new_args["sources_files"].extend(
os.path.abspath(os.path.join(dirpath, f))
for f in nested_files
if f.endswith(".json")
)
else:
if not source.endswith(".json"):
exit("File {} does not have a .json extension.".format(source))
os.stat(source)
new_args["sources_files"].append(os.path.abspath(source))
os.stat(benchmarks_file_path)
new_args["benchmarks_file_path"] = benchmarks_file_path
if not os.path.isdir(output_dir_path):
exit("{} is not a directory or does not exist".format(output_dir_path))
new_args["output_dir_path"] = output_dir_path
return new_args
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument(
"-s",
"--sources",
nargs="+",
type=str,
required=True,
help="File names or directories containing tests to be migrated",
)
parser.add_argument(
"-b", "--benchmark_file", help="Path to benchmarks.json", required=True
)
parser.add_argument(
"-o", "--output_dir", help="Output directory", required=True
)
parser.add_argument(
"-v",
"--verbose",
action="store_true",
help="Increase output verbosity",
default=False,
)
treated_args = handle_args(parser.parse_args(args))
if treated_args["verbose"]:
logger.setLevel(logging.DEBUG)
params_per_test = extract_benchmarks_file_data(
benchmark_file_path=treated_args["benchmarks_file_path"]
)
for test_file_path in treated_args["sources_files"]:
new_contents = migrate_single_benchmark(
test_file_path=test_file_path, params_per_test=params_per_test
)
outfile_path = os.path.join(
treated_args["output_dir_path"], os.path.basename(test_file_path)
)
with io.open(outfile_path, mode="w", encoding="utf-8") as f:
dump = json.dumps(
new_contents, indent=4, ensure_ascii=False, sort_keys=True
)
f.write(unicode(dump))
if __name__ == "__main__":
main(sys.argv[1:])
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment