Skip to content
Snippets Groups Projects
utils.py 12.1 KiB
Newer Older
from __future__ import print_function
import errno
import functools
import hashlib
Boris Feld's avatar
Boris Feld committed
import os
Boris Feld's avatar
Boris Feld committed
import re
Boris Feld's avatar
Boris Feld committed
import sys
Boris Feld's avatar
Boris Feld committed
from functools import wraps

Boris Feld's avatar
Boris Feld committed
BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
STRIP_VARIANTS_PATH = os.path.join(BASEDIR, "partial-sets.yaml")
Boris Feld's avatar
Boris Feld committed
REPOS_DIR = os.path.join(BASEDIR, "repos")
REPOS_DATA = sorted(d for d in os.listdir(REPOS_DIR)
               if d.endswith(REPO_SUFFIX))
REPOS = [r[:-len(REPO_SUFFIX)] for r in REPOS_DATA]
# TODO fix
# with open(STRIP_VARIANTS_PATH) as f:
#     STRIP_VARIANTS = yaml.load(f.read())["partial-sets"]
Boris Feld's avatar
Boris Feld committed


class SkipResult(Exception):
    pass


Boris Feld's avatar
Boris Feld committed
# Backward compatibility for python 2.6
if not hasattr(subprocess, 'check_output'):
    STDOUT = subprocess.STDOUT

    def check_output(*popenargs, **kwargs):
        if 'stdout' in kwargs:  # pragma: no cover
            raise ValueError('stdout argument not allowed, '
                             'it will be overridden.')
        process = subprocess.Popen(stdout=subprocess.PIPE,
                                   *popenargs, **kwargs)
        output, _ = process.communicate()
        retcode = process.poll()
        if retcode:
            cmd = kwargs.get("args")
            if cmd is None:
                cmd = popenargs[0]
            raise subprocess.CalledProcessError(retcode, cmd,
                                                output=output)
        return output
    subprocess.check_output = check_output

    # overwrite CalledProcessError due to `output`
    # keyword not being available (in 2.6)
    class CalledProcessError(Exception):

        def __init__(self, returncode, cmd, output=None):
            self.returncode = returncode
            self.cmd = cmd
            self.output = output

        def __str__(self):
            return "Command '%s' returned non-zero exit status %d\n%s" % (
                self.cmd, self.returncode, self.output)
    subprocess.CalledProcessError = CalledProcessError


def _bench_with_repo(
    repo_setup,
    repos=REPOS, setup=None, params=None, param_names=None,
    pretty_name=None,
):
    if params is None:
        assert param_names is None
        params = [repos]
        param_names = ["repo"]
    else:
        params = params[:]
        params.insert(0, repos)
        if param_names is None:
            param_names = ["repo"]
        else:
            param_names = param_names[:]
            param_names.insert(0, "repo")

    def decorator(func):

        _setup = {'args': None}

        @wraps(func)
        def wrapper(*args):
            return func(*(_setup['args'] + args[1:]))

        def bench_setup(repo_name, *args):
            repo = repo_setup(repo_name)
            if setup is not None:
                value = setup(repo, *args)
                _setup['args'] = (repo, value)
            else:
                _setup['args'] = (repo,)

        wrapper.setup = bench_setup
        wrapper.params = params
        wrapper.param_names = param_names
        wrapper.pretty_name = pretty_name
        return wrapper

    return decorator


Philippe Pepiot's avatar
Philippe Pepiot committed
PERF_RE = re.compile(r'! wall (\d+.\d+) comb (\d+.\d+) user (\d+.\d+) sys (\d+.\d+) \(best of (\d+)\)')  # noqa: E501
Boris Feld's avatar
Boris Feld committed


def median(lst):
    quotient, remainder = divmod(len(lst), 2)
    if remainder:
        return sorted(lst)[quotient]
    return sum(sorted(lst)[quotient - 1:quotient + 1]) / 2.


def params_as_kwargs(f):
    """Pass in test parameters as keyword arguments.

    Use as a decorator on BaseTestSuite methods

    """
    @functools.wraps(f)
    def wrapper(self, *args, **kwargs):
        names = self.param_names
        args, values = args[len(names):], args[:len(names)]
        kwargs.update(zip(names, values))
        return f(self, *args, **kwargs)

    return wrapper


class BaseTestSuite(object):
    timer = timeit.default_timer
    repeat = (3, 10, 60.0)
    number = 1

    params = [REPOS]
    param_names = ["repo"]

    @staticmethod
    def get_skip():
        with open(os.path.join(REPOS_DIR, 'skip.json'), 'r') as f:
            return json.load(f)

    def get_asv_rev(self):
        '''Return currently benchmarked mercurial revision'''
        return self.hg('log', '--cwd', self.project_dir, '-T',
                       '{node|short}', '-r', '.').strip()
    def check_output(self, *args, **kwargs):
        """Helper to run commands

        Run given command in a subprocess
        Optional expected_return_code (default 0) is used to control whenever
        we expect the command should exit.

        If the command succeeded with expected_return_code = 0, return the output
        If the command succeeded with expected_return_code != 0, raise RuntimeError
        If the command fail with expected_return_code, return None, else raise
        original subprocess.CalledProcessError exception.
        """
        env = kwargs.pop('env', self.environ)
        expected_return_code = kwargs.pop('expected_return_code', 0)
        cmd = list(args)
        try:
            output = subprocess.check_output(cmd, env=env, **kwargs)
        except subprocess.CalledProcessError as exc:
            if exc.returncode == expected_return_code:
                # failed as we expected
                return None
            raise
        else:
            if expected_return_code != 0:
                raise RuntimeError('unexpected return code 0 for {}'.format(cmd))
            return output

    def hg(self, *args, **kwargs):
        """Run given command arguments with hg

        When there is no '--cwd' in arguments, use the benchmarked repo with
        'hg --cwd /path/to/repo'
        """
        args = list(args)
        # disabled multi worker because out current test setup is bad with multi CPU
        args = ["--config", "worker.enabled=no"] + args
        if '--cwd' not in args:
            # use self.repo_path as repo
            args = ['--cwd', self.repo_path] + args
        cmd = [self.hgpath] + list(args)
        return self.check_output(*cmd, **kwargs)

    def safe_hg(self, command, *args, **kwargs):
        """Run given command argument with hg and ignore unknown commands

        This is to be used for commands that may not exist in earlier mercurial
        versions.

        When hg exit code is 255, test the command existence with 'hg help CMD'
        to test command existence, if it also return 255, raise SkipResult
        exception.

        We expect `command` to be the hg command we want to run, either a list
        of arguments required to run 'hg help' (for extensions etc).
        """
        if isinstance(command, (list, tuple)):
            cmd = command
        else:
            cmd = [command]
        try:
            return self.hg(*(cmd + list(args)), **kwargs)
        except subprocess.CalledProcessError as exc:
            if exc.returncode == 255:
                # test if it return 255 because the command does not exist
                # or if it's another issue
                try:
                    self.hg(*(['help'] + command))
                except subprocess.CalledProcessError as exc:
                    if exc.returncode == 255:
                        # command does not exist in this version of mercurial
                        raise SkipResult()
                    raise
            raise

    def _perfext(self, command, *args, **kwargs):
        """Use contrib/perf.py extension from mercurial to get data"""
        perfpath = os.path.join(self.project_dir, 'contrib', 'perf.py')
        kwargs.setdefault('stderr', subprocess.STDOUT)
        try:
                [command, '--config', 'extensions.perf={0}'.format(perfpath)],
                *args, **kwargs)
        except SkipResult:
            # command does not exist for this version of perf.py
            # return NaN which is a "n/a" status for asv
            return None

    def perfext(self, command, *args, **kwargs):
        """Use contrib/perf.py extension from mercurial to get a benchmark result"""
        output = self._perfext(command, *args, **kwargs)
        if output is None:
            return float('nan')
        match = PERF_RE.search(output)
        if not match:
            raise ValueError("Invalid output {0}".format(output))
        return float(match.group(1))

    def perfextjson(self, command, *args, **kwargs):
        """Use contrib/perf.py extension from mercurial to get a benchmark result"""
        args = ['--template', 'json', '--config', 'perf.all-timing=yes'] + list(args)
        output = self._perfext(command, *args, **kwargs)
        if output is None:
            return None
        try:
            data = json.loads(output)
        except ValueError:
            return None
        result = {}
        for item in data:
            title = item.get("title")
            if title in result:
                # multiple conflicting entry, skipping this one
                print("ignoring duplicated entry in json output:", title,
                      file=sys.stderr)
                continue
            result[title] = localdata = {
                "count": item.get("count"),
                "minimum": item.get("wall"),
            }
            if 'median.wall' in item:
                localdata['median'] = item['median.wall']
            if 'avg.wall' in item:
                localdata['average'] = item['avg.wall']
            if 'max.wall' in item:
                localdata['maximum'] = item['max.wall']
        return result

    def getperfdata(self, key, cmd):
        """Retrieve performance data from running cmd

        The result is cached to avoid running the same benchmark multiple time.
        Use this to handle commands from the perf extensions that returns
        multiple values."""
        key = (repr(self.__class__), self.get_asv_rev(), self.repo_name) + key

        try:
            return self._get_cache(key)
        except KeyError:
            data = self.perfextjson(*cmd)
            self._set_cache(key, data)
            return data

    def _cache_file(self, key):
        filename = hashlib.sha256(repr(key)).hexdigest()
        return os.path.join(self._cache_dir, 'perf-cache', filename)

    def _set_cache(self, key, data):
        cachefile = self._cache_file(key)
        cachedir = os.path.dirname(cachefile)
        if not os.path.isdir(cachedir):
            os.mkdir(cachedir)
        with open(cachefile, 'wb') as f:
            pickle.dump(data, f)

    def _get_cache(self, key):
        cachefile = self._cache_file(key)
        try:
            with open(cachefile, 'rb') as f:
                return pickle.load(f)
        except IOError as exc:
            if not exc.errno == errno.ENOENT:
                raise
            raise KeyError(key)

    def setup_cache(self):
        self._cache_dir = os.getcwd()

    def setup(self, repo, *args, **kwargs):
        venv = os.path.abspath(os.path.join(os.path.dirname(sys.executable), ".."))
        self.repo_name = repo
        self.project_dir = os.path.join(venv, 'project')
        if os.path.isdir(self.project_dir):
            # use hg in virtualenv for "asv run"
            self.hgpath = os.path.join(venv, "bin", "hg")
        else:
            # use local hg for "asv dev"
            self.project_dir = os.path.join(BASEDIR, 'mercurial')
            sys.path.insert(0, self.project_dir)
            self.hgpath = os.path.join(os.path.join(self.project_dir, 'hg'))
        self.repo_path = os.path.join(REPOS_DIR, self.repo_name)

        # Use a clean environ to run command
        self.environ = {'HGRCPATH': ''}
        # keep some environment variables
        # SSH_AUTH_SOCK for hg over ssh
        for key in ('SSH_AUTH_SOCK',):
            if key in os.environ:
                self.environ[key] = os.environ[key]
        # define the cache dir for simplicity
        self._cache_dir = os.getcwd()
    def teardown(self, *args, **kwargs):
        # only here for consintency and ease; you can use super().teardown()
        # in subclasses to mirror super().setup().
        pass

class BaseNChangesetsTestSuite(BaseTestSuite):

    params = [REPOS, [10, 100, 1000, 10000]]
    param_names = ["repo", "changesets"]