from __future__ import print_function import errno import functools import hashlib import json import os import os.path import pickle import re import subprocess import sys import timeit import yaml import collections from functools import wraps from os.path import join REPO_SUFFIX = '.benchrepo' BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)) STRIP_VARIANTS_PATH = os.path.join(BASEDIR, "partial-sets.yaml") def read_configuration(config_path): # TODO refactor into single util (3 copies of this function exist) with open(config_path) as config_file: return yaml.load(config_file.read()) repodir = read_configuration(os.path.join(BASEDIR, "config.yaml")).get('repodir') if repodir: REPOS_DIR = os.path.abspath(repodir) else: REPOS_DIR = os.path.join(BASEDIR, "repos") with open(STRIP_VARIANTS_PATH) as f: STRIP_VARIANTS = yaml.load(f.read())["partial-sets"] def get_repo_variants(repo_dir): """read all .benchrepo in given directory and computes ASV variants Reads all .benchrepo files in the given directory and returns a tuple of 5 elements: - a list of params list. The first element will be a list of repo ids, the following ones will be the possible values for each repository format key, - a list of params name, the first one will be "repo", - the length of the params name, - a dict of the parsed `.benchrepofile`, the key is the benchrepo file name without the `.benchrepo` extension. - a dict of repo hash, the key is the following tuple (repo_id, ((format_key_1, format_value_1), (format_key_2, format_value_1))) For example, given a directory with two benchrepo files: repo-test-1-XXX.benchrepo: reference-repo: format-info: compression: zlib dotencode: true fncache: true generaldelta: false plain-cl-delta: true sparserevlog: false id: repo-test-1 repo-hash: XXX repo-test-1-YYY.benchrepo: reference-repo: format-info: compression: zlib dotencode: true fncache: true generaldelta: true plain-cl-delta: true sparserevlog: false id: repo-test-1 repo-hash: YYY get_repo_variants will returns: - the params: [['repo-test-1'], ['zlib'], [True], [True], [False, True], [True], [False]] - the params_names: ['repo', 'compression', 'dotencode', 'fncache', 'generaldelta', 'plain-cl-delta', 'sparserevlog'] - the length of params: 7 - the dict of parsed benchrepo data: {'repo-test-1-XXX': {...}, 'repo-test-1-YYY: {...}} - the dict of repo hash: {('repo-test-1', (('compression', 'zlib'), ('dotencode', True), ('fncache', True), ('generaldelta', False), ('plain-cl-delta', True), ('sparserevlog', False))): 'XXX', ('repo-test-1', (('compression', 'zlib'), ('dotencode', True), ('fncache', True), ('generaldelta', True), ('plain-cl-delta', True), ('sparserevlog', False))): 'YYY'}) """ # Find all benchrepos benchrepos_files = sorted( d for d in os.listdir(repo_dir) if d.endswith(REPO_SUFFIX) ) repo_ids = set() format_variants = collections.defaultdict(set) repo_details = {} repo_hash_map = {} # Parse the benchrepos files for benchrepo in benchrepos_files: with open(join(repo_dir, benchrepo)) as f: data = yaml.load(f.read()) repo_id = data["reference-repo"]["id"] repo_ids.add(repo_id) repo_hash = data["reference-repo"].get("repo-hash", None) format_info = data["reference-repo"].get("format-info", {}) # Add a prefix to format-info keys new_format_info = {} for format_name, format_value in format_info.items(): new_format_name = "repo-format-{}".format(format_name) new_format_info[new_format_name] = format_value format_variants[new_format_name].add(format_value) repo_name = benchrepo[: -len(".benchrepo")] repo_details[repo_name] = data if repo_hash is not None: repo_hash_map_key = (repo_id, tuple(sorted(new_format_info.items()))) repo_hash_map[repo_hash_map_key] = repo_hash params = [list(sorted(repo_ids))] params_names = ["repo"] for format_name in sorted(format_variants): params_names.append(format_name) params.append(list(sorted(format_variants[format_name]))) return params, params_names, len(params_names), repo_details, repo_hash_map VARIANTS = get_repo_variants(REPOS_DIR) BASE_PARAMS, BASE_PARAMS_NAMES, BASE_PARAMS_LEN = VARIANTS[:3] REPO_DETAILS, REPO_HASH_MAP = VARIANTS[3:] class SkipResult(Exception): pass # Backward compatibility for python 2.6 if not hasattr(subprocess, 'check_output'): STDOUT = subprocess.STDOUT def check_output(*popenargs, **kwargs): if 'stdout' in kwargs: # pragma: no cover raise ValueError('stdout argument not allowed, ' 'it will be overridden.') process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs) output, _ = process.communicate() retcode = process.poll() if retcode: cmd = kwargs.get("args") if cmd is None: cmd = popenargs[0] raise subprocess.CalledProcessError(retcode, cmd, output=output) return output subprocess.check_output = check_output # overwrite CalledProcessError due to `output` # keyword not being available (in 2.6) class CalledProcessError(Exception): def __init__(self, returncode, cmd, output=None): self.returncode = returncode self.cmd = cmd self.output = output def __str__(self): return "Command '%s' returned non-zero exit status %d\n%s" % ( self.cmd, self.returncode, self.output) subprocess.CalledProcessError = CalledProcessError PERF_RE = re.compile(r'! wall (\d+.\d+) comb (\d+.\d+) user (\d+.\d+) sys (\d+.\d+) \(best of (\d+)\)') # noqa: E501 ### # Base classes for benchmarks ### def median(lst): quotient, remainder = divmod(len(lst), 2) if remainder: return sorted(lst)[quotient] return sum(sorted(lst)[quotient - 1:quotient + 1]) / 2. def params_as_kwargs(f): """Pass in test parameters as keyword arguments. Use as a decorator on BaseTestSuite methods """ @functools.wraps(f) def wrapper(self, *args, **kwargs): names = self.param_names args, values = args[len(names):], args[:len(names)] kwargs.update(zip(names, values)) return f(self, *args, **kwargs) return wrapper def compatible_with(revset): """Specifies the revset wherein the command is expected to work. Skips the benchmark if the command is not expected to work. The current version is obtained from the environment. """ def decorator(f): @functools.wraps(f) def wrapper(self, *args, **kwargs): if not f.__name__.startswith('track'): # TODO implement solution for other prefixes than `track` raise NotImplementedError( "This decorator is not compatible with " "any prefixes other than `track`." ) current_version = os.environ.get('ASV_COMMIT') if self.should_skip_benchmark(compatibility_revset=revset, current_version=current_version): message = "{} is not expected to work for hash {}" print( message.format(f.__name__, current_version), file=sys.stderr ) return float('nan') return f(self, *args, **kwargs) return wrapper return decorator class BaseTestSuite(object): timer = timeit.default_timer repeat = (3, 10, 60.0) number = 1 params = BASE_PARAMS param_names = BASE_PARAMS_NAMES def setup(self, repo, *args, **kwargs): venv = os.path.abspath(os.path.join(os.path.dirname(sys.executable), "..")) self.project_dir = os.path.join(venv, 'project') if os.path.isdir(self.project_dir): # use hg in virtualenv for "asv run" self.hgpath = os.path.join(venv, "bin", "hg") else: # use local hg for "asv dev" self.project_dir = os.path.join(BASEDIR, 'mercurial') sys.path.insert(0, self.project_dir) self.hgpath = os.path.join(os.path.join(self.project_dir, 'hg')) self.repo_name = self.get_repo_name(repo, *args) self.repo_path = os.path.join(REPOS_DIR, self.repo_name) # Use a clean environ to run command self.environ = {'HGRCPATH': ''} # keep some environment variables # SSH_AUTH_SOCK for hg over ssh for key in ('SSH_AUTH_SOCK',): if key in os.environ: self.environ[key] = os.environ[key] # define the cache dir for simplicity self._cache_dir = os.getcwd() def should_skip_benchmark(self, compatibility_revset, current_version): """Determines whether the benchmark should be run given a revset and the current mercurial version. If no current version is set, returns `False` """ should_skip = False if compatibility_revset is not None and current_version: tmpl = "{} & {}" full_revset = tmpl.format(compatibility_revset, current_version) match_pattern = 'ASV_REVSET_MATCH' # TODO use `hg test` when it's stable command = [ self.hgpath, '--cwd', os.path.join(BASEDIR, "mercurial"), 'log', '-r', full_revset, '-T', match_pattern ] try: output = self.check_output(*command, env=self.environ) except subprocess.CalledProcessError as exc: if exc.returncode == 255: should_skip = True else: raise else: if not match_pattern in output: should_skip = True return should_skip @staticmethod def get_skip(): with open(os.path.join(REPOS_DIR, 'skip.json'), 'r') as f: return json.load(f) def get_repo_name(self, *args): # Old way if BASE_PARAMS_LEN == 1: return args[0] else: # Rebuilt format data in order to compute the hash repo_id = args[0] format_data = dict(zip(self.param_names[1:BASE_PARAMS_LEN], args[1:BASE_PARAMS_LEN])) repo_hash_key = (repo_id, tuple(sorted(format_data.items()))) repo_hash = REPO_HASH_MAP[repo_hash_key] return "{0}-{1}".format(repo_id, repo_hash) def get_asv_rev(self): '''Return currently benchmarked mercurial revision''' return self.hg('log', '--config', 'experimental.evolution=all', '--cwd', self.project_dir, '-T', '{node|short}', '-r', '.').strip() def check_output(self, *args, **kwargs): """Helper to run commands Run given command in a subprocess Optional expected_return_code (default 0) is used to control whenever we expect the command should exit. If the command succeeded with expected_return_code = 0, return the output If the command succeeded with expected_return_code != 0, raise RuntimeError If the command fail with expected_return_code, return None, else raise original subprocess.CalledProcessError exception. """ env = kwargs.pop('env', self.environ) expected_return_code = kwargs.pop('expected_return_code', 0) cmd = list(args) try: output = subprocess.check_output(cmd, env=env, **kwargs) except subprocess.CalledProcessError as exc: if exc.returncode == expected_return_code: # failed as we expected return None raise else: if expected_return_code != 0: raise RuntimeError('unexpected return code 0 for {}'.format(cmd)) return output def hg(self, *args, **kwargs): """Run given command arguments with hg When there is no '--cwd' in arguments, use the benchmarked repo with 'hg --cwd /path/to/repo' """ args = list(args) # disabled multi worker because out current test setup is bad with multi CPU args = ["--config", "worker.enabled=no"] + args if '--cwd' not in args: # use self.repo_path as repo args = ['--cwd', self.repo_path] + args cmd = [self.hgpath] + list(args) return self.check_output(*cmd, **kwargs) def safe_hg(self, command, *args, **kwargs): """Run given command argument with hg and ignore unknown commands This is to be used for commands that may not exist in earlier mercurial versions. When hg exit code is 255, test the command existence with 'hg help CMD' to test command existence, if it also return 255, raise SkipResult exception. We expect `command` to be the hg command we want to run, either a list of arguments required to run 'hg help' (for extensions etc). """ if isinstance(command, (list, tuple)): cmd = command else: cmd = [command] try: return self.hg(*(cmd + list(args)), **kwargs) except subprocess.CalledProcessError as exc: if exc.returncode == 255: # test if it return 255 because the command does not exist # or if it's another issue try: self.hg(*(['help'] + command)) except subprocess.CalledProcessError as exc: if exc.returncode == 255: # command does not exist in this version of mercurial raise SkipResult() raise raise def _perfext(self, command, *args, **kwargs): """Use contrib/perf.py extension from mercurial to get data""" perfpath = os.path.join(self.project_dir, 'contrib', 'perf.py') kwargs.setdefault('stderr', subprocess.STDOUT) try: return self.safe_hg( [command, '--config', 'extensions.perf={0}'.format(perfpath)], *args, **kwargs) except SkipResult: # command does not exist for this version of perf.py # return NaN which is a "n/a" status for asv return None def perfext(self, command, *args, **kwargs): """Use contrib/perf.py extension from mercurial to get a benchmark result""" output = self._perfext(command, *args, **kwargs) if output is None: return float('nan') match = PERF_RE.search(output) if not match: raise ValueError("Invalid output {0}".format(output)) return float(match.group(1)) def perfextjson(self, command, *args, **kwargs): """Use contrib/perf.py extension from mercurial to get a benchmark result""" args = ['--template', 'json', '--config', 'perf.all-timing=yes'] + list(args) output = self._perfext(command, *args, **kwargs) if output is None: return None try: data = json.loads(output) except ValueError: return None result = {} for item in data: title = item.get("title") if title in result: # multiple conflicting entry, skipping this one print("ignoring duplicated entry in json output:", title, file=sys.stderr) continue result[title] = localdata = { "count": item.get("count"), "minimum": item.get("wall"), } if 'median.wall' in item: localdata['median'] = item['median.wall'] if 'avg.wall' in item: localdata['average'] = item['avg.wall'] if 'max.wall' in item: localdata['maximum'] = item['max.wall'] return result def getperfdata(self, key, cmd): """Retrieve performance data from running cmd The result is cached to avoid running the same benchmark multiple time. Use this to handle commands from the perf extensions that returns multiple values.""" key = (repr(self.__class__), self.get_asv_rev(), self.repo_name) + key try: return self._get_cache(key) except KeyError: data = self.perfextjson(*cmd) self._set_cache(key, data) return data def _cache_file(self, key): filename = hashlib.sha256(repr(key)).hexdigest() return os.path.join(self._cache_dir, 'perf-cache', filename) def _set_cache(self, key, data): cachefile = self._cache_file(key) cachedir = os.path.dirname(cachefile) if not os.path.isdir(cachedir): os.mkdir(cachedir) with open(cachefile, 'wb') as f: pickle.dump(data, f) def _get_cache(self, key): cachefile = self._cache_file(key) try: with open(cachefile, 'rb') as f: return pickle.load(f) except IOError as exc: if not exc.errno == errno.ENOENT: raise raise KeyError(key) def setup_cache(self): self._cache_dir = os.getcwd() def teardown(self, *args, **kwargs): # only here for consintency and ease; you can use super().teardown() # in subclasses to mirror super().setup(). pass class BaseNChangesetsTestSuite(BaseTestSuite): params = BaseTestSuite.params + [[10, 100, 1000, 10000]] param_names = BaseTestSuite.param_names + ["changesets"]