Newer
Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
from __future__ import print_function
import errno
import functools
import hashlib
import json
import os
import os.path
import pickle
import re
import subprocess
import sys
import timeit
import yaml
import collections
from functools import wraps
from os.path import join
REPO_SUFFIX = '.benchrepo'
BASEDIR = os.path.abspath(os.path.join(
os.path.dirname(__file__),
os.path.pardir,
os.path.pardir
))
STRIP_VARIANTS_PATH = os.path.join(BASEDIR, "partial-sets.yaml")
def read_yaml_file(config_path):
with open(config_path) as config_file:
return yaml.safe_load(config_file.read())
repodir = read_yaml_file(os.path.join(BASEDIR, "config.yaml")).get('repodir')
if repodir:
REPOS_DIR = os.path.abspath(repodir)
else:
REPOS_DIR = os.path.join(BASEDIR, "repos")
PERFEXTSDIR = os.path.join(BASEDIR, "perfexts")
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
FORMAT_VARIANT_PREFIX = "repo-format"
def get_repo_variants(repo_dir):
"""read all .benchrepo in given directory and computes ASV variants
Reads all .benchrepo files in the given directory and returns a tuple of 5
elements:
- a list of params list. The first element will be a list of repo ids, the
following ones will be the possible values for each repository format
key,
- a list of params name, the first one will be "repo",
- the length of the params name,
- a dict of the parsed `.benchrepofile`, the key is the benchrepo file
name without the `.benchrepo` extension.
- a dict of repo hash, the key is the following tuple (repo_id,
((format_key_1, format_value_1), (format_key_2, format_value_1)))
For example, given a directory with two benchrepo files:
repo-test-1-XXX.benchrepo:
reference-repo:
format-info:
compression: zlib
dotencode: true
fncache: true
generaldelta: false
plain-cl-delta: true
sparserevlog: false
id: repo-test-1
repo-hash: XXX
repo-test-1-YYY.benchrepo:
reference-repo:
format-info:
compression: zlib
dotencode: true
fncache: true
generaldelta: true
plain-cl-delta: true
sparserevlog: false
id: repo-test-1
repo-hash: YYY
get_repo_variants will returns:
- the params:
[['repo-test-1'], ['zlib'], [True], [True], [False, True], [True],
[False]]
- the params_names:
['repo', 'compression', 'dotencode', 'fncache', 'generaldelta',
'plain-cl-delta', 'sparserevlog']
- the length of params: 7
- the dict of parsed benchrepo data:
{'repo-test-1-XXX': {...}, 'repo-test-1-YYY: {...}}
- the dict of repo hash:
{('repo-test-1', (('compression', 'zlib'), ('dotencode', True), ('fncache', True), ('generaldelta', False), ('plain-cl-delta', True), ('sparserevlog', False))): 'XXX',
('repo-test-1', (('compression', 'zlib'), ('dotencode', True), ('fncache', True), ('generaldelta', True), ('plain-cl-delta', True), ('sparserevlog', False))): 'YYY'})
"""
# Find all benchrepos
if not os.path.isdir(repo_dir):
return ([], [], 0, {}, {}, [], {})
benchrepos_files = sorted(
d for d in os.listdir(repo_dir) if d.endswith(REPO_SUFFIX)
)
repo_ids = set()
format_variants = collections.defaultdict(set)
repo_details = {}
repo_map = {}
partial_sets_set = set()
partial_sets_roles = {}
# Parse the benchrepos files
for benchrepo in benchrepos_files:
data = read_yaml_file(join(repo_dir, benchrepo))
reference_repo = data["reference-repo"]
repo_id = reference_repo["id"]
repo_ids.add(repo_id)
repo_name = reference_repo['repo-prefix']
repo_details[repo_name] = data
format_info = reference_repo.get("format-info", {})
partial_sets = data.get("partial-sets", {})
if not partial_sets:
# compat layer for the level transition. We can drop this later
partial_sets = reference_repo.get("partial-sets", {})
partial_sets_set.update(partial_sets)
partial_roles = data.get("roles", {})
# We are parsing this kind of data
#
# roles:
# pull:
# noop:
# same:
# source: "reference"
# target: "same"
for action, subtypes in partial_roles.items():
action_map = partial_sets_roles.setdefault(action, {})
for st, variants in subtypes.items():
subtypes_map = action_map.setdefault(st, {})
for v, pdata in variants.items():
variant_map = subtypes_map.setdefault(v, {})
repo_key = repo_name
variant_map[repo_key] = pdata
# Add a prefix to format-info keys
new_format_info = {}
for format_name, format_value in format_info.items():
new_format_name = "{}-{}".format(FORMAT_VARIANT_PREFIX, format_name)
new_format_info[new_format_name] = format_value
format_variants[new_format_name].add(format_value)
repo_map_key = (repo_id, tuple(sorted(new_format_info.items())))
repo_map[repo_map_key] = repo_name
params = [list(sorted(repo_ids))]
params_names = ["repo"]
for format_name in sorted(format_variants):
params_names.append(format_name)
params.append(list(sorted(format_variants[format_name])))
return (
params,
params_names,
len(params_names),
repo_details,
repo_map,
list(sorted(partial_sets_set)),
partial_sets_roles,
)
VARIANTS = get_repo_variants(REPOS_DIR)
BASE_PARAMS, BASE_PARAMS_NAMES, BASE_PARAMS_LEN = VARIANTS[:3]
REPO_DETAILS, REPO_HASH_MAP, STRIP_VARIANTS_LIST = VARIANTS[3:6]
ROLES = VARIANTS[6]
class SkipResult(Exception):
pass
# Backward compatibility for python 2.6
if not hasattr(subprocess, 'check_output'):
STDOUT = subprocess.STDOUT
def check_output(*popenargs, **kwargs):
if 'stdout' in kwargs: # pragma: no cover
raise ValueError('stdout argument not allowed, '
'it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE,
*popenargs, **kwargs)
output, _ = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd,
output=output)
return output
subprocess.check_output = check_output
# overwrite CalledProcessError due to `output`
# keyword not being available (in 2.6)
class CalledProcessError(Exception):
def __init__(self, returncode, cmd, output=None):
self.returncode = returncode
self.cmd = cmd
self.output = output
def __str__(self):
return "Command '%s' returned non-zero exit status %d\n%s" % (
self.cmd, self.returncode, self.output)
subprocess.CalledProcessError = CalledProcessError
PERF_RE = re.compile(r'! wall (\d+.\d+) comb (\d+.\d+) user (\d+.\d+) sys (\d+.\d+) \(best of (\d+)\)') # noqa: E501
###
# Base classes for benchmarks
###
def median(lst):
quotient, remainder = divmod(len(lst), 2)
if remainder:
return sorted(lst)[quotient]
return sum(sorted(lst)[quotient - 1:quotient + 1]) / 2.
def params_as_kwargs(f):
"""Pass in test parameters as keyword arguments.
Use as a decorator on BaseTestSuite methods
"""
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
names = self.param_names
args, values = args[len(names):], args[:len(names)]
kwargs.update(zip(names, values))
return f(self, *args, **kwargs)
return wrapper
def not_compatible_with(revset, filter_fn=None):
"""Specifies the revset wherein the command is NOT expected to work.
Skips the benchmark if the command is not expected to work.
The current version is obtained from the environment.
"""
def decorator(f):
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
current_version = os.environ.get('ASV_COMMIT')
if self.should_skip_benchmark(incompatibility_revset=revset,
current_version=current_version,
filter_fn=filter_fn,
test_kwargs=kwargs):
message = "{} is not expected to work for hash {}"
print(
message.format(f.__name__, current_version),
file=sys.stderr
)
raise NotImplementedError()
return f(self, *args, **kwargs)
return wrapper
return decorator
class BaseTestSuite(object):
timer = timeit.default_timer
repeat = (3, 10, 60.0)
number = 1
params = BASE_PARAMS
param_names = BASE_PARAMS_NAMES
@params_as_kwargs
def setup(self, repo, **kwargs):
venv = os.path.abspath(os.path.join(os.path.dirname(sys.executable), ".."))
self.project_dir = os.path.join(venv, 'project')
if os.path.isdir(self.project_dir):
# use hg in virtualenv for "asv run"
self.hgpath = os.path.join(venv, "bin", "hg")
else:
# use local hg for "asv dev"
self.project_dir = os.path.join(BASEDIR, 'mercurial')
sys.path.insert(0, self.project_dir)
self.hgpath = os.path.join(os.path.join(self.project_dir, 'hg'))
repo_name = self.get_repo_name(repo, **kwargs)
if repo_name is None:
raise NotImplementedError(
"not repository for formats/params combination, skipping."
)
self.repo_id = repo
self.repo_name = repo_name
reference_basename = "{}-reference".format(self.repo_name)
self.repo_path = os.path.join(REPOS_DIR, reference_basename)
### Use a clean environ to run command
#
# (We rely on running from the vevn using an explicite path)
self.environ = {'HGRCPATH': ''}
# keep some environment variables
# SSH_AUTH_SOCK for hg over ssh
for key in ('SSH_AUTH_SOCK', 'HGMODULEPOLICY'):
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
if key in os.environ:
self.environ[key] = os.environ[key]
# define the cache dir for simplicity
self._cache_dir = os.getcwd()
# Don't run if using Rust on a revision before the Rust modulepolicy
# was introduced
if os.environ.get("HGMODULEPOLICY").startswith("rust"):
# Don't use `subprocess.PIPE` as it can deadlock (see docs).
# We capture `stderr` because the error message from `hg log` would
# make it seem like there was a bug.
currentrev = self.get_asv_rev()
revset = "94167e701e125dce1788e19b1e1489958235e40c::"
if not self._matchrevset(revset, currentrev):
raise NotImplementedError(
"Rust modulepolicy was not implemented before "
"revision 94167e701e12"
", skipping."
)
def _matchrevset(self, revset, target_rev):
"""return True if <target-rev> is within <revset>"""
tmpl = "({}) and ({})"
full_revset = tmpl.format(revset, target_rev)
match_pattern = 'ASV-REVSET-MATCH'
# TODO use `hg test` if/when it is stable
command = [
self.hgpath,
'--cwd',
os.path.join(BASEDIR, "mercurial"),
'log',
'-r',
full_revset,
'--template',
match_pattern
]
try:
output = self.check_output(*command, env=self.environ)
except subprocess.CalledProcessError as exc:
if exc.returncode == 255:
return False
else:
raise
return match_pattern in output
def should_skip_benchmark(self, incompatibility_revset, current_version, filter_fn, test_kwargs):
"""Determines whether the benchmark should be run given an exclusion
revset, the current mercurial version and an optional filter function.
The filter function can be used for deciding more precisely (most likely based
on test_kwargs) if the benchmark should be skipped. It must return a boolean, `True` if
it should indeed be skipped, `False` otherwise.
"""
in_revset = True
in_filter = False
if incompatibility_revset is not None and current_version:
in_revset = self._matchrevset(incompatibility_revset, current_version)
in_filter = True
if filter_fn is not None:
in_filter = bool(filter_fn(test_kwargs, current_version))
return in_revset and in_filter
@staticmethod
def get_skip():
with open(os.path.join(REPOS_DIR, 'skip.json'), 'r') as f:
return json.load(f)
def get_repo_name(self, repo, **kwargs):
# Old way
if BASE_PARAMS_LEN == 1:
return repo
# Filter the format variants
variants = {}
for key, value in kwargs.items():
if not key.startswith(FORMAT_VARIANT_PREFIX):
continue
variants[key] = value
repo_hash_key = (repo, tuple(sorted(variants.items())))
repo_name = REPO_HASH_MAP.get(repo_hash_key)
return repo_name
def get_asv_rev(self):
'''Return currently benchmarked mercurial revision'''
return self.hg('log', '--config', 'experimental.evolution=all',
'--cwd', self.project_dir, '--template', '{node|short}',
'-r', '.').strip()
def check_output(self, *args, **kwargs):
"""Helper to run commands
Run given command in a subprocess
Optional expected_return_code (default 0) is used to control whenever
we expect the command should exit.
If the command succeeded with expected_return_code = 0, return the output
If the command succeeded with expected_return_code != 0, raise RuntimeError
If the command fail with expected_return_code, return None, else raise
original subprocess.CalledProcessError exception.
"""
env = kwargs.pop('env', self.environ)
expected_return_code = kwargs.pop('expected_return_code', 0)
cmd = list(args)
try:
output = subprocess.check_output(cmd, env=env, **kwargs)
except subprocess.CalledProcessError as exc:
if exc.returncode == expected_return_code:
# failed as we expected
return None
raise
else:
if expected_return_code != 0:
raise RuntimeError('unexpected return code 0 for {}'.format(cmd))
return output
def hg(self, *args, **kwargs):
"""Run given command arguments with hg
When there is no '--cwd' in arguments, use the benchmarked repo with
'hg --cwd /path/to/repo'
"""
args = list(args)
# disabled multi worker because out current test setup is bad with multi CPU
args = ["--config", "worker.enabled=no"] + args
if '--cwd' not in args:
# use self.repo_path as repo
args = ['--cwd', self.repo_path] + args
cmd = [self.hgpath] + list(args)
return self.check_output(*cmd, **kwargs)
def safe_hg(self, command, *args, **kwargs):
"""Run given command argument with hg and ignore unknown commands
This is to be used for commands that may not exist in earlier mercurial
versions.
When hg exit code is 255, test the command existence with 'hg help CMD'
to test command existence, if it also return 255, raise SkipResult
exception.
We expect `command` to be the hg command we want to run, either a list
of arguments required to run 'hg help' (for extensions etc).
"""
if isinstance(command, (list, tuple)):
cmd = command
else:
cmd = [command]
try:
return self.hg(*(cmd + list(args)), **kwargs)
except subprocess.CalledProcessError as exc:
if exc.returncode == 255:
# test if it return 255 because the command does not exist
# or if it's another issue
try:
self.hg(*(['help'] + command))
except subprocess.CalledProcessError as exc:
if exc.returncode == 255:
# command does not exist in this version of mercurial
raise SkipResult()
raise
raise
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
def _getperfext(self, version):
"""given a mercurial-core node (or None) return a path to perfext
If a mecurial-core node is provided, the path will point to a file with
the perf.py content at that revision.
"""
if version is None:
return os.path.join(self.project_dir, 'contrib', 'perf.py')
filename = 'perf-%s.py' % version
extpath = os.path.join(PERFEXTSDIR, filename)
if not os.path.exists(extpath):
# XXX this is a bit fragile because if anything goes wrong, we create and empty file
with open(extpath, 'wb') as f:
subprocess.check_call(
[ 'hg',
'--cwd',
os.path.join(BASEDIR, 'mercurial'),
'cat',
'--rev',
version,
'contrib/perf.py',
],
stdout=f
)
return extpath
def _perfext(self, command, *args, **kwargs):
"""Use contrib/perf.py extension from mercurial to get data"""
kwargs = kwargs.copy()
perfextversion = kwargs.pop('perfextversion', None)
perfpath = self._getperfext(perfextversion)
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
kwargs.setdefault('stderr', subprocess.STDOUT)
try:
return self.safe_hg(
[command, '--config', 'extensions.perf={0}'.format(perfpath)],
*args, **kwargs)
except SkipResult:
# command does not exist for this version of perf.py
# return NaN which is a "n/a" status for asv
return None
except subprocess.CalledProcessError as exc:
msg = "calling perf command failed [%d]: %s %s" % (exc.returncode, command, args)
print(msg, file=sys.stderr)
print(exc.output, file=sys.stderr)
def perfext(self, command, *args, **kwargs):
"""Use contrib/perf.py extension from mercurial to get a benchmark result"""
output = self._perfext(command, *args, **kwargs)
if output is None:
return float('nan')
match = PERF_RE.search(output)
if not match:
raise ValueError("Invalid output {0}".format(output))
return float(match.group(1))
def perfextjson(self, command, *args, **kwargs):
"""Use contrib/perf.py extension from mercurial to get a benchmark result"""
args = ['--template', 'json', '--config', 'perf.all-timing=yes'] + list(args)
output = self._perfext(command, *args, **kwargs)
if output is None:
return None
try:
data = json.loads(output)
except ValueError:
return None
result = {}
for item in data:
title = item.get("title")
if title in result:
# multiple conflicting entry, skipping this one
print("ignoring duplicated entry in json output:", title,
file=sys.stderr)
continue
result[title] = localdata = {
"count": item.get("count"),
"minimum": item.get("wall"),
}
if 'median.wall' in item:
localdata['median'] = item['median.wall']
if 'avg.wall' in item:
localdata['average'] = item['avg.wall']
if 'max.wall' in item:
localdata['maximum'] = item['max.wall']
return result
def getperfdata(self, key, cmd):
"""Retrieve performance data from running cmd
The result is cached to avoid running the same benchmark multiple time.
Use this to handle commands from the perf extensions that returns
multiple values."""
key = (repr(self.__class__), self.get_asv_rev(), self.repo_name) + key
try:
return self._get_cache(key)
except KeyError:
data = self.perfextjson(*cmd)
self._set_cache(key, data)
return data
def _cache_file(self, key):
filename = hashlib.sha256(repr(key)).hexdigest()
return os.path.join(self._cache_dir, 'perf-cache', filename)
def _set_cache(self, key, data):
cachefile = self._cache_file(key)
cachedir = os.path.dirname(cachefile)
if not os.path.isdir(cachedir):
os.mkdir(cachedir)
with open(cachefile, 'wb') as f:
pickle.dump(data, f)
def _get_cache(self, key):
cachefile = self._cache_file(key)
try:
with open(cachefile, 'rb') as f:
return pickle.load(f)
except IOError as exc:
if not exc.errno == errno.ENOENT:
raise
raise KeyError(key)
def setup_cache(self):
self._cache_dir = os.getcwd()
def teardown(self, *args, **kwargs):
# only here for consintency and ease; you can use super().teardown()
# in subclasses to mirror super().setup().
pass
class BaseNChangesetsTestSuite(BaseTestSuite):
params = BaseTestSuite.params + [[10, 100, 1000, 10000]]
param_names = BaseTestSuite.param_names + ["changesets"]