Skip to content
Snippets Groups Projects
Commit 3293086ff663 authored by Pierre-Yves David's avatar Pierre-Yves David :octopus:
Browse files

perf: add an option to profile the benchmark section

Running a perf command with --profile gather data for the whole command
execution, including setup and cleanup. This can significantly alter the data.

To work around this we introduce a new option, it trigger the profiling of only one
iteration of the benchmarked section.
parent 563cd9a72682
No related branches found
No related tags found
No related merge requests found
......@@ -18,6 +18,10 @@
``pre-run``
number of run to perform before starting measurement.
``profile-benchmark``
Enable profiling for the benchmarked section.
(The first iteration is benchmarked)
``run-limits``
Control the number of runs each benchmark will perform. The option value
should be a list of `<time>-<numberofrun>` pairs. After each run the
......@@ -109,6 +113,10 @@
except ImportError:
pass
try:
from mercurial import profiling
except ImportError:
profiling = None
def identity(a):
return a
......@@ -246,6 +254,9 @@
configitem(b'perf', b'pre-run',
default=mercurial.configitems.dynamicdefault,
)
configitem(b'perf', b'profile-benchmark',
default=mercurial.configitems.dynamicdefault,
)
configitem(b'perf', b'run-limits',
default=mercurial.configitems.dynamicdefault,
)
......@@ -257,6 +268,13 @@
return lambda x: 1
return len
class noop(object):
"""dummy context manager"""
def __enter__(self):
pass
def __exit__(self, *args):
pass
def gettimer(ui, opts=None):
"""return a timer function and formatter: (timer, formatter)
......@@ -347,5 +365,10 @@
if not limits:
limits = DEFAULTLIMITS
profiler = None
if profiling is not None:
if ui.configbool(b"perf", b"profile-benchmark", False):
profiler = profiling.profile(ui)
prerun = getint(ui, b"perf", b"pre-run", 0)
t = functools.partial(_timer, fm, displayall=displayall, limits=limits,
......@@ -350,6 +373,6 @@
prerun = getint(ui, b"perf", b"pre-run", 0)
t = functools.partial(_timer, fm, displayall=displayall, limits=limits,
prerun=prerun)
prerun=prerun, profiler=profiler)
return t, fm
def stub_timer(fm, func, setup=None, title=None):
......@@ -376,8 +399,8 @@
)
def _timer(fm, func, setup=None, title=None, displayall=False,
limits=DEFAULTLIMITS, prerun=0):
limits=DEFAULTLIMITS, prerun=0, profiler=None):
gc.collect()
results = []
begin = util.timer()
count = 0
......@@ -380,7 +403,9 @@
gc.collect()
results = []
begin = util.timer()
count = 0
if profiler is None:
profiler = noop()
for i in xrange(prerun):
if setup is not None:
setup()
......@@ -389,8 +414,9 @@
while keepgoing:
if setup is not None:
setup()
with timeone() as item:
r = func()
with profiler:
with timeone() as item:
r = func()
count += 1
results.append(item[0])
cstop = util.timer()
......
......@@ -58,6 +58,10 @@
"pre-run"
number of run to perform before starting measurement.
"profile-benchmark"
Enable profiling for the benchmarked section. (The first iteration is
benchmarked)
"run-limits"
Control the number of runs each benchmark will perform. The option value
should be a list of '<time>-<numberofrun>' pairs. After each run the
......@@ -349,6 +353,15 @@
searching for changes
searching for changes
test profile-benchmark option
------------------------------
Function to check that statprof ran
$ statprofran () {
> egrep 'Sample count:|No samples recorded' > /dev/null
> }
$ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 | statprofran
Check perf.py for historical portability
----------------------------------------
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment