Skip to content
Snippets Groups Projects
Commit faf338d1fe0e authored by Pierre-Yves David's avatar Pierre-Yves David :octopus:
Browse files

benchmark: comment all PerfTestSuite benchmark

The current bench suite grew to an unmanageable point. Let us comment everything
and reintroduce them slowly with scrutiny.
parent f60c4b841320
No related branches found
No related tags found
No related merge requests found
......@@ -4,470 +4,470 @@
from .utils import BaseTestSuite, params_as_kwargs, REPO_DETAILS
class PerfTestSuite(BaseTestSuite):
def track_status(self, *args, **kwargs):
return self.perfext("perfstatus")
def track_heads(self, *args, **kwargs):
return self.perfext("perfheads")
def track_branchmap(self, *args, **kwargs):
return self.perfext("perfbranchmap")
def track_startup(self, *args, **kwargs):
return self.perfext("perfstartup")
def track_ancestors(self, *args, **kwargs):
return self.perfext("perfancestors")
def track_changegroupchangelog(self, *args, **kwargs):
return self.perfext("perfchangegroupchangelog")
def track_dirstate(self, *args, **kwargs):
return self.perfext("perfdirstate")
def track_perfdirstatewrite(self, *args, **kwargs):
return self.perfext("perfdirstatewrite")
def track_perffncacheencode(self, *args, **kwargs):
return self.perfext("perffncacheencode")
# def track_perffncachewrite(self, *args, **kwargs):
# return self.perfext("perffncachewrite")
def track_perffncacheload(self, *args, **kwargs):
return self.perfext("perffncacheload")
def track_perfignore_load(self, *args, **kwargs):
data = self.perfextjson('perfignore')
return data['load']['median']
def track_perfparents(self, *args, **kwargs):
return self.perfext("perfparents")
def track_perfphases(self, *args, **kwargs):
return self.perfext("perfphases")
def track_perfphasesremote(self, *args, **kwargs):
"""Check the time necessary to compute phase against oneself"""
# XXX We could do testing against more variation
# (eg: lots of unknown nodes, many draft root, etc)
return self.perfext("perfphasesremote", '.')
def track_perfwalk(self, *args, **kwargs):
return self.perfext("perfwalk")
class RevlogReadTestSuite(BaseTestSuite):
params = BaseTestSuite.params[:]
param_names = BaseTestSuite.param_names[:]
timeout = 1800
# add a parameters the distance between each read
param_names += ['distance']
params += [[1]]
# add a parameters for the number of read to make
param_names += ['nbrev']
params += [[100]]
def _run_one(self, target, distance, nbrev, reverse=False):
cmd = [
"perfrevlogrevisions",
target,
"--dist", "%d" % distance,
"--startrev", "-%d" % (distance * nbrev)
]
if reverse:
cmd.append("--reverse")
return self.perfext(*cmd)
@params_as_kwargs
def track_perfrevlogrevisions_manifest_asc(self, distance, nbrev,
*args, **kwargs):
return self._run_one('--manifest', distance, nbrev)
@params_as_kwargs
def track_perfrevlogrevisions_manifest_desc(self, distance, nbrev,
*args, **kwargs):
return self._run_one('--manifest', distance, nbrev, reverse=True)
@params_as_kwargs
def track_perfrevlogrevisions_changelog(self, distance, nbrev,
*args, **kwargs):
return self._run_one('--changelog', distance, nbrev)
class RevlogWriteTestSuite(BaseTestSuite):
params = BaseTestSuite.params[:]
param_names = BaseTestSuite.param_names[:]
timeout = 1800
# add a parameters for the number of read to make
param_names += ['nbrev']
params += [[100]]
def _run_one(self, target, nbrev, source, lazydeltabase=False):
cmd = [
"perfrevlogwrite",
target,
"--startrev", "-%d" % nbrev,
"--source", source,
]
if lazydeltabase:
cmd.append("--lazydeltabase")
else:
cmd.append("--no-lazydeltabase")
key = ('perfrevlogwrite', target, nbrev, source, lazydeltabase)
return self.getperfdata(key, cmd)
def _run_one_id(self, title, target, nbrev, source, lazydeltabase=False):
data = self._run_one(target, nbrev, source, lazydeltabase)
if data is None:
return float('nan')
title = None
for title in data.keys():
if title.startswith(title):
break
if title is None:
return float('nan')
return data[title]['median']
def _run_one_full(self, target, nbrev, source, lazydeltabase=False):
return self._run_one_id('total time', target, nbrev, source, lazydeltabase)
@params_as_kwargs
def track_perfrevlogwrite_changelog(self, nbrev, *args, **kwargs):
return self._run_one_full('--changelog', nbrev, 'full',
lazydeltabase=False)
@params_as_kwargs
def track_perfrevlogwrite_manifest_full(self, nbrev, *args, **kwargs):
return self._run_one_full('--manifest', nbrev, 'full',
lazydeltabase=False)
@params_as_kwargs
def track_perfrevlogwrite_manifest_parent_nonlazy(self, nbrev,
*args, **kwargs):
return self._run_one_full('--manifest', nbrev, 'parent-smallest',
lazydeltabase=False)
@params_as_kwargs
def track_perfrevlogwrite_manifest_parent_lazy(self, nbrev, *args, **kwargs):
return self._run_one_full('--manifest', nbrev, 'parent-smallest',
lazydeltabase=True)
@params_as_kwargs
def track_perfrevlogwrite_manifest_storage_nonlazy(self, nbrev,
*args, **kwargs):
return self._run_one_full('--manifest', nbrev, 'storage',
lazydeltabase=False)
@params_as_kwargs
def track_perfrevlogwrite_manifest_storage_lazy(self, nbrev, *args, **kwargs):
return self._run_one_full('--manifest', nbrev, 'storage',
lazydeltabase=True)
@params_as_kwargs
def track_perfrevlogwrite_manifest_full_max(self, nbrev, *args, **kwargs):
return self._run_one_id('max of', '--manifest', nbrev, 'full',
lazydeltabase=False)
@params_as_kwargs
def track_perfrevlogwrite_manifest_full_99percentile(self, nbrev, *args, **kwargs):
return self._run_one_id('99% of', '--manifest', nbrev, 'full',
lazydeltabase=False)
@params_as_kwargs
def track_perfrevlogwrite_manifest_full_95percentile(self, nbrev, *args, **kwargs):
return self._run_one_id('95% of', '--manifest', nbrev, 'full',
lazydeltabase=False)
@params_as_kwargs
def track_perfrevlogwrite_manifest_full_90percentile(self, nbrev, *args, **kwargs):
return self._run_one_id('90% of', '--manifest', nbrev, 'full',
lazydeltabase=False)
@params_as_kwargs
def track_perfrevlogwrite_manifest_full_75percentile(self, nbrev, *args, **kwargs):
return self._run_one_id('75% of', '--manifest', nbrev, 'full',
lazydeltabase=False)
@params_as_kwargs
def track_perfrevlogwrite_manifest_full_50percentile(self, nbrev, *args, **kwargs):
return self._run_one_id('50% of', '--manifest', nbrev, 'full',
lazydeltabase=False)
@params_as_kwargs
def track_perfrevlogwrite_manifest_full_25percentile(self, nbrev, *args, **kwargs):
return self._run_one_id('25% of', '--manifest', nbrev, 'full',
lazydeltabase=False)
@params_as_kwargs
def track_perfrevlogwrite_manifest_full_min(self, nbrev, *args, **kwargs):
return self._run_one_id('min of', '--manifest', nbrev, 'full',
lazydeltabase=False)
class ManifestPerfTestSuite(BaseTestSuite):
params = BaseTestSuite.params + [['persist', 'clear']]
param_names = BaseTestSuite.param_names + ['disk_cache']
_manifestcache = os.path.join('.hg', 'cache', 'manifestfulltextcache')
_manifestcache_backup = _manifestcache + '.asv_backup'
@params_as_kwargs
def setup(self, **kwargs):
super(ManifestPerfTestSuite, self).setup(**kwargs)
# back up manifest cache state
self.cache_path = os.path.join(self.repo_path, self._manifestcache)
self.cache_present = os.path.exists(self.cache_path)
if self.cache_present:
self.backup_path = os.path.join(
self.repo_path, self._manifestcache_backup)
if os.path.exists(self.backup_path):
# in case a previous run left one behind
os.remove(self.backup_path)
os.link(self.cache_path, self.backup_path)
# ensure we have a current cache
self.hg('debugupdatecaches')
@params_as_kwargs
def teardown(self, **kwargs):
super(ManifestPerfTestSuite, self).teardown(**kwargs)
if self.cache_present:
# restore manifest cache
os.remove(self.cache_path)
os.rename(self.backup_path, self.cache_path)
elif os.path.exists(self.cache_path):
# remove manifest cache, it wasn't there before
os.remove(self.cache_path)
@params_as_kwargs
def track_manifest(self, disk_cache, **kwargs):
command = ("perfmanifest", "tip")
if disk_cache == 'clear':
command += '--clear-disk',
return self.perfext(*command)
class RevlogReadOneTestSuite(BaseTestSuite):
params = BaseTestSuite.params[:]
param_names = BaseTestSuite.param_names[:]
timeout = 300
@params_as_kwargs
def track_full(self, *args, **kwargs):
cmd = ["perfrevlogrevision", "--manifest", "--", "-1"]
data = self.getperfdata((-1,), cmd)
if data is None:
return float('nan')
return data["full"].get('median', float('nan'))
@params_as_kwargs
def track_hash(self, *args, **kwargs):
cmd = ["perfrevlogrevision", "--manifest", "--", "-1"]
data = self.getperfdata((-1,), cmd)
if data is None:
return float('nan')
return data["hash"].get('median', float('nan'))
@params_as_kwargs
def track_patch(self, *args, **kwargs):
cmd = ["perfrevlogrevision", "--manifest", "--", "-1"]
data = self.getperfdata((-1,), cmd)
if data is None:
return float('nan')
return data["patch"].get('median', float('nan'))
@params_as_kwargs
def track_decompress(self, *args, **kwargs):
cmd = ["perfrevlogrevision", "--manifest", "--", "-1"]
data = self.getperfdata((-1,), cmd)
if data is None:
return float('nan')
return data["decompress"].get('median', float('nan'))
@params_as_kwargs
def track_rawchunks(self, *args, **kwargs):
cmd = ["perfrevlogrevision", "--manifest", "--", "-1"]
data = self.getperfdata((-1,), cmd)
if data is None:
return float('nan')
return data["rawchunks"].get('median', float('nan'))
@params_as_kwargs
def track_read(self, *args, **kwargs):
cmd = ["perfrevlogrevision", "--manifest", "--", "-1"]
data = self.getperfdata((-1,), cmd)
if data is None:
return float('nan')
return data["read"].get('median', float('nan'))
@params_as_kwargs
def track_deltachain(self, *args, **kwargs):
cmd = ["perfrevlogrevision", "--manifest", "--", "-1"]
data = self.getperfdata((-1,), cmd)
if data is None:
return float('nan')
return data["deltachain"].get('median', float('nan'))
@params_as_kwargs
def track_slice(self, *args, **kwargs):
cmd = ["perfrevlogrevision", "--manifest", "--", "-1"]
data = self.getperfdata((-1,), cmd)
if data is None or 'slice-sparse-chain' not in data:
return float('nan')
return data["slice-sparse-chain"].get('median', float('nan'))
class PerfTracecopiesSuite(BaseTestSuite):
params = BaseTestSuite.params + [("changeset-compability", "default")]
param_names = BaseTestSuite.param_names + ["copies-data"]
timeout = 300
@params_as_kwargs
def track_tracecopies_large(self, *args, **kwargs):
"""benchmark the tracecopies logic over a large amount of candidates
This benchmark requires the repository data to specify the following value:
benchmark-variables:
trace-copies:
large:
source: REV
destination: REV
"""
x = REPO_DETAILS.get(self.repo_name)
x = x.get('benchmark-variables', {})
x = x.get('trace-copies', {})
x = x.get('large', {})
source = x.get('source')
destination = x.get('destination')
if source is None or destination is None:
return float('nan')
cmd = ['perfpathcopies']
cmd.append(source)
cmd.append(destination)
if kwargs["copies-data"] == "changeset-compability":
skip =self.should_skip_benchmark
if skip("(not 49ad315b39ee::)", self.get_asv_rev(), None, None):
return float('nan')
cmd.append("--config")
cmd.append("experimental.copies.read-from=compatibility")
return self.perfext(*cmd)
class PerfTagsTestSuite(BaseTestSuite):
params = BaseTestSuite.params + [['cold', 'warm']]
param_names = BaseTestSuite.param_names + ['changelog']
timeout = 300
@params_as_kwargs
def track_tags(self, changelog, *args, **kwargs):
if changelog == 'warm':
clearcache = False
elif changelog == 'cold':
clearcache = True
else:
assert False
cmd = ['perftags']
if clearcache:
cmd.append('--clear-revlogs')
else:
cmd.append('--no-clear-revlogs')
data = self.perfextjson(*cmd)
return data[None]['median']
class PerfBranchmapLoadSuite(BaseTestSuite):
params = BaseTestSuite.params + [['cold', 'warm']]
param_names = BaseTestSuite.param_names + ['changelog']
timeout = 300
@params_as_kwargs
def track_perfbranchmap_load(self, changelog, *args, **kwargs):
if changelog == 'warm':
clearcache = False
elif changelog == 'cold':
clearcache = True
else:
assert False
cmd = ['perfbranchmapload']
if clearcache:
cmd.append('--clear-revlogs')
else:
cmd.append('--no-clear-revlogs')
data = self.perfextjson(*cmd)
if None not in data:
return float('nan')
return data[None]['median']
class PerfBranchmapUpdateSuite(BaseTestSuite):
params = BaseTestSuite.params + [['cold', 'warm'], [1, 100, 10000]]
param_names = BaseTestSuite.param_names + ['caches', 'missingrevs']
timeout = 300
@params_as_kwargs
def track_perfbranchmap_update(self, caches, missingrevs, *args, **kwargs):
if caches == 'warm':
clearcache = False
elif caches == 'cold':
clearcache = True
else:
assert False
cmd = ['perfbranchmapupdate', '--quiet']
if clearcache:
cmd.append('--clear-caches')
else:
cmd.append('--no-clear-caches')
cmd.append('--base')
cmd.append('not -%d:' % missingrevs)
cmd.append('--target')
cmd.append('-%d:' % missingrevs)
data = self.perfextjson(*cmd)
if None not in data:
return float('nan')
return data[None]['median']
class PerfHeadsSuite(BaseTestSuite):
params = BaseTestSuite.params + [['unfiltered', 'visible']]
param_names = BaseTestSuite.param_names + ['filter']
timeout = 300
@params_as_kwargs
def track_perfheads(self, filter, *args, **kwargs):
cmd = ['perfheads']
if filter == 'unfiltered':
cmd.append('--hidden')
data = self.perfextjson(*cmd)
if None not in data:
return float('nan')
return data[None]['median']
class PerfIndexSuite(BaseTestSuite):
params = BaseTestSuite.params + [['first', 'last']]
param_names = BaseTestSuite.param_names + ['lookup']
timeout = 300
@params_as_kwargs
def track_perfindex(self, lookup, *args, **kwargs):
cmd = ['perfindex']
if lookup == 'first':
cmd.append('--rev')
cmd.append('0')
elif lookup == 'last':
cmd.append('--rev')
cmd.append('tip')
else:
assert False
data = self.perfextjson(*cmd)
if None not in data:
return float('nan')
return data[None]['median']
# class PerfTestSuite(BaseTestSuite):
#
# def track_status(self, *args, **kwargs):
# return self.perfext("perfstatus")
#
# def track_heads(self, *args, **kwargs):
# return self.perfext("perfheads")
#
# def track_branchmap(self, *args, **kwargs):
# return self.perfext("perfbranchmap")
#
# def track_startup(self, *args, **kwargs):
# return self.perfext("perfstartup")
#
# def track_ancestors(self, *args, **kwargs):
# return self.perfext("perfancestors")
#
# def track_changegroupchangelog(self, *args, **kwargs):
# return self.perfext("perfchangegroupchangelog")
#
# def track_dirstate(self, *args, **kwargs):
# return self.perfext("perfdirstate")
#
# def track_perfdirstatewrite(self, *args, **kwargs):
# return self.perfext("perfdirstatewrite")
#
# def track_perffncacheencode(self, *args, **kwargs):
# return self.perfext("perffncacheencode")
#
# # def track_perffncachewrite(self, *args, **kwargs):
# # return self.perfext("perffncachewrite")
#
# def track_perffncacheload(self, *args, **kwargs):
# return self.perfext("perffncacheload")
#
# def track_perfignore_load(self, *args, **kwargs):
# data = self.perfextjson('perfignore')
# return data['load']['median']
#
# def track_perfparents(self, *args, **kwargs):
# return self.perfext("perfparents")
#
# def track_perfphases(self, *args, **kwargs):
# return self.perfext("perfphases")
#
# def track_perfphasesremote(self, *args, **kwargs):
# """Check the time necessary to compute phase against oneself"""
# # XXX We could do testing against more variation
# # (eg: lots of unknown nodes, many draft root, etc)
# return self.perfext("perfphasesremote", '.')
#
# def track_perfwalk(self, *args, **kwargs):
# return self.perfext("perfwalk")
#
# class RevlogReadTestSuite(BaseTestSuite):
# params = BaseTestSuite.params[:]
# param_names = BaseTestSuite.param_names[:]
#
# timeout = 1800
#
# # add a parameters the distance between each read
# param_names += ['distance']
# params += [[1]]
# # add a parameters for the number of read to make
# param_names += ['nbrev']
# params += [[100]]
#
# def _run_one(self, target, distance, nbrev, reverse=False):
# cmd = [
# "perfrevlogrevisions",
# target,
# "--dist", "%d" % distance,
# "--startrev", "-%d" % (distance * nbrev)
# ]
# if reverse:
# cmd.append("--reverse")
# return self.perfext(*cmd)
#
# @params_as_kwargs
# def track_perfrevlogrevisions_manifest_asc(self, distance, nbrev,
# *args, **kwargs):
# return self._run_one('--manifest', distance, nbrev)
#
# @params_as_kwargs
# def track_perfrevlogrevisions_manifest_desc(self, distance, nbrev,
# *args, **kwargs):
# return self._run_one('--manifest', distance, nbrev, reverse=True)
#
# @params_as_kwargs
# def track_perfrevlogrevisions_changelog(self, distance, nbrev,
# *args, **kwargs):
# return self._run_one('--changelog', distance, nbrev)
#
# class RevlogWriteTestSuite(BaseTestSuite):
# params = BaseTestSuite.params[:]
# param_names = BaseTestSuite.param_names[:]
#
# timeout = 1800
#
# # add a parameters for the number of read to make
# param_names += ['nbrev']
# params += [[100]]
#
# def _run_one(self, target, nbrev, source, lazydeltabase=False):
# cmd = [
# "perfrevlogwrite",
# target,
# "--startrev", "-%d" % nbrev,
# "--source", source,
# ]
# if lazydeltabase:
# cmd.append("--lazydeltabase")
# else:
# cmd.append("--no-lazydeltabase")
#
# key = ('perfrevlogwrite', target, nbrev, source, lazydeltabase)
# return self.getperfdata(key, cmd)
#
# def _run_one_id(self, title, target, nbrev, source, lazydeltabase=False):
# data = self._run_one(target, nbrev, source, lazydeltabase)
# if data is None:
# return float('nan')
# title = None
# for title in data.keys():
# if title.startswith(title):
# break
# if title is None:
# return float('nan')
# return data[title]['median']
#
# def _run_one_full(self, target, nbrev, source, lazydeltabase=False):
# return self._run_one_id('total time', target, nbrev, source, lazydeltabase)
#
# @params_as_kwargs
# def track_perfrevlogwrite_changelog(self, nbrev, *args, **kwargs):
# return self._run_one_full('--changelog', nbrev, 'full',
# lazydeltabase=False)
#
# @params_as_kwargs
# def track_perfrevlogwrite_manifest_full(self, nbrev, *args, **kwargs):
# return self._run_one_full('--manifest', nbrev, 'full',
# lazydeltabase=False)
#
# @params_as_kwargs
# def track_perfrevlogwrite_manifest_parent_nonlazy(self, nbrev,
# *args, **kwargs):
# return self._run_one_full('--manifest', nbrev, 'parent-smallest',
# lazydeltabase=False)
#
# @params_as_kwargs
# def track_perfrevlogwrite_manifest_parent_lazy(self, nbrev, *args, **kwargs):
# return self._run_one_full('--manifest', nbrev, 'parent-smallest',
# lazydeltabase=True)
#
# @params_as_kwargs
# def track_perfrevlogwrite_manifest_storage_nonlazy(self, nbrev,
# *args, **kwargs):
# return self._run_one_full('--manifest', nbrev, 'storage',
# lazydeltabase=False)
#
# @params_as_kwargs
# def track_perfrevlogwrite_manifest_storage_lazy(self, nbrev, *args, **kwargs):
# return self._run_one_full('--manifest', nbrev, 'storage',
# lazydeltabase=True)
#
# @params_as_kwargs
# def track_perfrevlogwrite_manifest_full_max(self, nbrev, *args, **kwargs):
# return self._run_one_id('max of', '--manifest', nbrev, 'full',
# lazydeltabase=False)
#
# @params_as_kwargs
# def track_perfrevlogwrite_manifest_full_99percentile(self, nbrev, *args, **kwargs):
# return self._run_one_id('99% of', '--manifest', nbrev, 'full',
# lazydeltabase=False)
#
# @params_as_kwargs
# def track_perfrevlogwrite_manifest_full_95percentile(self, nbrev, *args, **kwargs):
# return self._run_one_id('95% of', '--manifest', nbrev, 'full',
# lazydeltabase=False)
#
# @params_as_kwargs
# def track_perfrevlogwrite_manifest_full_90percentile(self, nbrev, *args, **kwargs):
# return self._run_one_id('90% of', '--manifest', nbrev, 'full',
# lazydeltabase=False)
#
# @params_as_kwargs
# def track_perfrevlogwrite_manifest_full_75percentile(self, nbrev, *args, **kwargs):
# return self._run_one_id('75% of', '--manifest', nbrev, 'full',
# lazydeltabase=False)
#
# @params_as_kwargs
# def track_perfrevlogwrite_manifest_full_50percentile(self, nbrev, *args, **kwargs):
# return self._run_one_id('50% of', '--manifest', nbrev, 'full',
# lazydeltabase=False)
#
# @params_as_kwargs
# def track_perfrevlogwrite_manifest_full_25percentile(self, nbrev, *args, **kwargs):
# return self._run_one_id('25% of', '--manifest', nbrev, 'full',
# lazydeltabase=False)
#
# @params_as_kwargs
# def track_perfrevlogwrite_manifest_full_min(self, nbrev, *args, **kwargs):
# return self._run_one_id('min of', '--manifest', nbrev, 'full',
# lazydeltabase=False)
#
# class ManifestPerfTestSuite(BaseTestSuite):
# params = BaseTestSuite.params + [['persist', 'clear']]
# param_names = BaseTestSuite.param_names + ['disk_cache']
#
# _manifestcache = os.path.join('.hg', 'cache', 'manifestfulltextcache')
# _manifestcache_backup = _manifestcache + '.asv_backup'
#
# @params_as_kwargs
# def setup(self, **kwargs):
# super(ManifestPerfTestSuite, self).setup(**kwargs)
# # back up manifest cache state
# self.cache_path = os.path.join(self.repo_path, self._manifestcache)
# self.cache_present = os.path.exists(self.cache_path)
# if self.cache_present:
# self.backup_path = os.path.join(
# self.repo_path, self._manifestcache_backup)
# if os.path.exists(self.backup_path):
# # in case a previous run left one behind
# os.remove(self.backup_path)
# os.link(self.cache_path, self.backup_path)
# # ensure we have a current cache
# self.hg('debugupdatecaches')
#
# @params_as_kwargs
# def teardown(self, **kwargs):
# super(ManifestPerfTestSuite, self).teardown(**kwargs)
# if self.cache_present:
# # restore manifest cache
# os.remove(self.cache_path)
# os.rename(self.backup_path, self.cache_path)
# elif os.path.exists(self.cache_path):
# # remove manifest cache, it wasn't there before
# os.remove(self.cache_path)
#
# @params_as_kwargs
# def track_manifest(self, disk_cache, **kwargs):
# command = ("perfmanifest", "tip")
# if disk_cache == 'clear':
# command += '--clear-disk',
# return self.perfext(*command)
#
# class RevlogReadOneTestSuite(BaseTestSuite):
# params = BaseTestSuite.params[:]
# param_names = BaseTestSuite.param_names[:]
#
# timeout = 300
#
# @params_as_kwargs
# def track_full(self, *args, **kwargs):
# cmd = ["perfrevlogrevision", "--manifest", "--", "-1"]
# data = self.getperfdata((-1,), cmd)
# if data is None:
# return float('nan')
# return data["full"].get('median', float('nan'))
#
# @params_as_kwargs
# def track_hash(self, *args, **kwargs):
# cmd = ["perfrevlogrevision", "--manifest", "--", "-1"]
# data = self.getperfdata((-1,), cmd)
# if data is None:
# return float('nan')
# return data["hash"].get('median', float('nan'))
#
# @params_as_kwargs
# def track_patch(self, *args, **kwargs):
# cmd = ["perfrevlogrevision", "--manifest", "--", "-1"]
# data = self.getperfdata((-1,), cmd)
# if data is None:
# return float('nan')
# return data["patch"].get('median', float('nan'))
#
# @params_as_kwargs
# def track_decompress(self, *args, **kwargs):
# cmd = ["perfrevlogrevision", "--manifest", "--", "-1"]
# data = self.getperfdata((-1,), cmd)
# if data is None:
# return float('nan')
# return data["decompress"].get('median', float('nan'))
#
# @params_as_kwargs
# def track_rawchunks(self, *args, **kwargs):
# cmd = ["perfrevlogrevision", "--manifest", "--", "-1"]
# data = self.getperfdata((-1,), cmd)
# if data is None:
# return float('nan')
# return data["rawchunks"].get('median', float('nan'))
#
# @params_as_kwargs
# def track_read(self, *args, **kwargs):
# cmd = ["perfrevlogrevision", "--manifest", "--", "-1"]
# data = self.getperfdata((-1,), cmd)
# if data is None:
# return float('nan')
# return data["read"].get('median', float('nan'))
#
# @params_as_kwargs
# def track_deltachain(self, *args, **kwargs):
# cmd = ["perfrevlogrevision", "--manifest", "--", "-1"]
# data = self.getperfdata((-1,), cmd)
# if data is None:
# return float('nan')
# return data["deltachain"].get('median', float('nan'))
#
# @params_as_kwargs
# def track_slice(self, *args, **kwargs):
# cmd = ["perfrevlogrevision", "--manifest", "--", "-1"]
# data = self.getperfdata((-1,), cmd)
# if data is None or 'slice-sparse-chain' not in data:
# return float('nan')
# return data["slice-sparse-chain"].get('median', float('nan'))
#
# class PerfTracecopiesSuite(BaseTestSuite):
# params = BaseTestSuite.params + [("changeset-compability", "default")]
# param_names = BaseTestSuite.param_names + ["copies-data"]
#
# timeout = 300
#
# @params_as_kwargs
# def track_tracecopies_large(self, *args, **kwargs):
# """benchmark the tracecopies logic over a large amount of candidates
#
# This benchmark requires the repository data to specify the following value:
#
# benchmark-variables:
# trace-copies:
# large:
# source: REV
# destination: REV
# """
#
# x = REPO_DETAILS.get(self.repo_name)
# x = x.get('benchmark-variables', {})
# x = x.get('trace-copies', {})
# x = x.get('large', {})
# source = x.get('source')
# destination = x.get('destination')
# if source is None or destination is None:
# return float('nan')
#
# cmd = ['perfpathcopies']
# cmd.append(source)
# cmd.append(destination)
# if kwargs["copies-data"] == "changeset-compability":
# skip =self.should_skip_benchmark
# if skip("(not 49ad315b39ee::)", self.get_asv_rev(), None, None):
# return float('nan')
# cmd.append("--config")
# cmd.append("experimental.copies.read-from=compatibility")
# return self.perfext(*cmd)
#
# class PerfTagsTestSuite(BaseTestSuite):
# params = BaseTestSuite.params + [['cold', 'warm']]
# param_names = BaseTestSuite.param_names + ['changelog']
#
# timeout = 300
#
# @params_as_kwargs
# def track_tags(self, changelog, *args, **kwargs):
# if changelog == 'warm':
# clearcache = False
# elif changelog == 'cold':
# clearcache = True
# else:
# assert False
# cmd = ['perftags']
# if clearcache:
# cmd.append('--clear-revlogs')
# else:
# cmd.append('--no-clear-revlogs')
# data = self.perfextjson(*cmd)
# return data[None]['median']
#
# class PerfBranchmapLoadSuite(BaseTestSuite):
# params = BaseTestSuite.params + [['cold', 'warm']]
# param_names = BaseTestSuite.param_names + ['changelog']
#
# timeout = 300
#
# @params_as_kwargs
# def track_perfbranchmap_load(self, changelog, *args, **kwargs):
# if changelog == 'warm':
# clearcache = False
# elif changelog == 'cold':
# clearcache = True
# else:
# assert False
# cmd = ['perfbranchmapload']
# if clearcache:
# cmd.append('--clear-revlogs')
# else:
# cmd.append('--no-clear-revlogs')
# data = self.perfextjson(*cmd)
# if None not in data:
# return float('nan')
# return data[None]['median']
#
# class PerfBranchmapUpdateSuite(BaseTestSuite):
# params = BaseTestSuite.params + [['cold', 'warm'], [1, 100, 10000]]
# param_names = BaseTestSuite.param_names + ['caches', 'missingrevs']
#
# timeout = 300
#
# @params_as_kwargs
# def track_perfbranchmap_update(self, caches, missingrevs, *args, **kwargs):
# if caches == 'warm':
# clearcache = False
# elif caches == 'cold':
# clearcache = True
# else:
# assert False
#
# cmd = ['perfbranchmapupdate', '--quiet']
# if clearcache:
# cmd.append('--clear-caches')
# else:
# cmd.append('--no-clear-caches')
# cmd.append('--base')
# cmd.append('not -%d:' % missingrevs)
# cmd.append('--target')
# cmd.append('-%d:' % missingrevs)
# data = self.perfextjson(*cmd)
# if None not in data:
# return float('nan')
# return data[None]['median']
#
# class PerfHeadsSuite(BaseTestSuite):
# params = BaseTestSuite.params + [['unfiltered', 'visible']]
# param_names = BaseTestSuite.param_names + ['filter']
#
# timeout = 300
#
# @params_as_kwargs
# def track_perfheads(self, filter, *args, **kwargs):
# cmd = ['perfheads']
# if filter == 'unfiltered':
# cmd.append('--hidden')
# data = self.perfextjson(*cmd)
# if None not in data:
# return float('nan')
# return data[None]['median']
#
# class PerfIndexSuite(BaseTestSuite):
# params = BaseTestSuite.params + [['first', 'last']]
# param_names = BaseTestSuite.param_names + ['lookup']
#
# timeout = 300
#
# @params_as_kwargs
# def track_perfindex(self, lookup, *args, **kwargs):
# cmd = ['perfindex']
# if lookup == 'first':
# cmd.append('--rev')
# cmd.append('0')
# elif lookup == 'last':
# cmd.append('--rev')
# cmd.append('tip')
# else:
# assert False
# data = self.perfextjson(*cmd)
# if None not in data:
# return float('nan')
# return data[None]['median']
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment