diff --git a/contrib/perf.py b/contrib/perf.py index 45c18f7345c112318cc34cb4ae1ba854a5dd7b55_Y29udHJpYi9wZXJmLnB5..563cd9a726826e0318bde9fb588b761a404b8d56_Y29udHJpYi9wZXJmLnB5 100644 --- a/contrib/perf.py +++ b/contrib/perf.py @@ -15,6 +15,9 @@ ``presleep`` number of second to wait before any group of runs (default: 1) +``pre-run`` + number of run to perform before starting measurement. + ``run-limits`` Control the number of runs each benchmark will perform. The option value should be a list of `<time>-<numberofrun>` pairs. After each run the @@ -240,6 +243,9 @@ configitem(b'perf', b'all-timing', default=mercurial.configitems.dynamicdefault, ) + configitem(b'perf', b'pre-run', + default=mercurial.configitems.dynamicdefault, + ) configitem(b'perf', b'run-limits', default=mercurial.configitems.dynamicdefault, ) @@ -341,7 +347,9 @@ if not limits: limits = DEFAULTLIMITS - t = functools.partial(_timer, fm, displayall=displayall, limits=limits) + prerun = getint(ui, b"perf", b"pre-run", 0) + t = functools.partial(_timer, fm, displayall=displayall, limits=limits, + prerun=prerun) return t, fm def stub_timer(fm, func, setup=None, title=None): @@ -368,8 +376,8 @@ ) def _timer(fm, func, setup=None, title=None, displayall=False, - limits=DEFAULTLIMITS): + limits=DEFAULTLIMITS, prerun=0): gc.collect() results = [] begin = util.timer() count = 0 @@ -372,7 +380,11 @@ gc.collect() results = [] begin = util.timer() count = 0 + for i in xrange(prerun): + if setup is not None: + setup() + func() keepgoing = True while keepgoing: if setup is not None: diff --git a/tests/test-contrib-perf.t b/tests/test-contrib-perf.t index 45c18f7345c112318cc34cb4ae1ba854a5dd7b55_dGVzdHMvdGVzdC1jb250cmliLXBlcmYudA==..563cd9a726826e0318bde9fb588b761a404b8d56_dGVzdHMvdGVzdC1jb250cmliLXBlcmYudA== 100644 --- a/tests/test-contrib-perf.t +++ b/tests/test-contrib-perf.t @@ -55,6 +55,9 @@ "presleep" number of second to wait before any group of runs (default: 1) + "pre-run" + number of run to perform before starting measurement. + "run-limits" Control the number of runs each benchmark will perform. The option value should be a list of '<time>-<numberofrun>' pairs. After each run the @@ -327,6 +330,25 @@ } ] +Test pre-run feature +-------------------- + +(perf discovery has some spurious output) + + $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0 + ! wall * comb * user * sys * (best of 1) (glob) + searching for changes + $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1 + ! wall * comb * user * sys * (best of 1) (glob) + searching for changes + searching for changes + $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3 + ! wall * comb * user * sys * (best of 1) (glob) + searching for changes + searching for changes + searching for changes + searching for changes + Check perf.py for historical portability ----------------------------------------