diff --git a/contrib/perf.py b/contrib/perf.py index 563cd9a726826e0318bde9fb588b761a404b8d56_Y29udHJpYi9wZXJmLnB5..3293086ff66326d423e124a5f9ea7076fb0b685d_Y29udHJpYi9wZXJmLnB5 100644 --- a/contrib/perf.py +++ b/contrib/perf.py @@ -18,6 +18,10 @@ ``pre-run`` number of run to perform before starting measurement. +``profile-benchmark`` + Enable profiling for the benchmarked section. + (The first iteration is benchmarked) + ``run-limits`` Control the number of runs each benchmark will perform. The option value should be a list of `<time>-<numberofrun>` pairs. After each run the @@ -109,6 +113,10 @@ except ImportError: pass +try: + from mercurial import profiling +except ImportError: + profiling = None def identity(a): return a @@ -246,6 +254,9 @@ configitem(b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault, ) + configitem(b'perf', b'profile-benchmark', + default=mercurial.configitems.dynamicdefault, + ) configitem(b'perf', b'run-limits', default=mercurial.configitems.dynamicdefault, ) @@ -257,6 +268,13 @@ return lambda x: 1 return len +class noop(object): + """dummy context manager""" + def __enter__(self): + pass + def __exit__(self, *args): + pass + def gettimer(ui, opts=None): """return a timer function and formatter: (timer, formatter) @@ -347,5 +365,10 @@ if not limits: limits = DEFAULTLIMITS + profiler = None + if profiling is not None: + if ui.configbool(b"perf", b"profile-benchmark", False): + profiler = profiling.profile(ui) + prerun = getint(ui, b"perf", b"pre-run", 0) t = functools.partial(_timer, fm, displayall=displayall, limits=limits, @@ -350,6 +373,6 @@ prerun = getint(ui, b"perf", b"pre-run", 0) t = functools.partial(_timer, fm, displayall=displayall, limits=limits, - prerun=prerun) + prerun=prerun, profiler=profiler) return t, fm def stub_timer(fm, func, setup=None, title=None): @@ -376,8 +399,8 @@ ) def _timer(fm, func, setup=None, title=None, displayall=False, - limits=DEFAULTLIMITS, prerun=0): + limits=DEFAULTLIMITS, prerun=0, profiler=None): gc.collect() results = [] begin = util.timer() count = 0 @@ -380,7 +403,9 @@ gc.collect() results = [] begin = util.timer() count = 0 + if profiler is None: + profiler = noop() for i in xrange(prerun): if setup is not None: setup() @@ -389,8 +414,9 @@ while keepgoing: if setup is not None: setup() - with timeone() as item: - r = func() + with profiler: + with timeone() as item: + r = func() count += 1 results.append(item[0]) cstop = util.timer() diff --git a/tests/test-contrib-perf.t b/tests/test-contrib-perf.t index 563cd9a726826e0318bde9fb588b761a404b8d56_dGVzdHMvdGVzdC1jb250cmliLXBlcmYudA==..3293086ff66326d423e124a5f9ea7076fb0b685d_dGVzdHMvdGVzdC1jb250cmliLXBlcmYudA== 100644 --- a/tests/test-contrib-perf.t +++ b/tests/test-contrib-perf.t @@ -58,6 +58,10 @@ "pre-run" number of run to perform before starting measurement. + "profile-benchmark" + Enable profiling for the benchmarked section. (The first iteration is + benchmarked) + "run-limits" Control the number of runs each benchmark will perform. The option value should be a list of '<time>-<numberofrun>' pairs. After each run the @@ -349,6 +353,15 @@ searching for changes searching for changes +test profile-benchmark option +------------------------------ + +Function to check that statprof ran + $ statprofran () { + > egrep 'Sample count:|No samples recorded' > /dev/null + > } + $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 | statprofran + Check perf.py for historical portability ----------------------------------------