Skip to content
Snippets Groups Projects
Commit 5f01c23e26e6 authored by Pierre-Yves David's avatar Pierre-Yves David :octopus:
Browse files

runner: add a --debug mode

The debug mode display information about the run, the run output, and reduce the
number of run to only two
parent 021dc2cdec6e
No related branches found
No related tags found
No related merge requests found
import json import json
import os import os
import subprocess import subprocess
import sys
import tempfile import tempfile
from . import ( from . import (
...@@ -197,7 +198,7 @@ ...@@ -197,7 +198,7 @@
# XXX missing the "variable" part # XXX missing the "variable" part
return variables return variables
def run_one(self, bin_env_path, data_env_path): def run_one(self, bin_env_path, data_env_path, debug=False):
data_env = data_mod.get_data_env(data_env_path) data_env = data_mod.get_data_env(data_env_path)
cmd = SimpleCommand(self) cmd = SimpleCommand(self)
...@@ -205,7 +206,7 @@ ...@@ -205,7 +206,7 @@
prepare = self.get_var('simple-command.prepare-run') prepare = self.get_var('simple-command.prepare-run')
r = self._time_command(bin_env_path, data_env_path, cmd, prepare) r = self._time_command(bin_env_path, data_env_path, cmd, prepare, debug=debug)
res = {} res = {}
# we should store more # we should store more
res['time'] = {} res['time'] = {}
...@@ -216,7 +217,7 @@ ...@@ -216,7 +217,7 @@
res['time']['max'] = r['results'][0]['max'] res['time']['max'] = r['results'][0]['max']
return res return res
def _time_command(self, bin_env_path, data_env_path, cmd, prepare=None): def _time_command(self, bin_env_path, data_env_path, cmd, prepare=None, debug=False):
bin_env_path = os.path.abspath(bin_env_path) bin_env_path = os.path.abspath(bin_env_path)
data_env_path = os.path.abspath(data_env_path) data_env_path = os.path.abspath(data_env_path)
shell_path = poulpe.bin_env_script(bin_env_path) shell_path = poulpe.bin_env_script(bin_env_path)
...@@ -237,7 +238,13 @@ ...@@ -237,7 +238,13 @@
if cmd.accept_failure: if cmd.accept_failure:
time_cmd.append("--ignore-failure") time_cmd.append("--ignore-failure")
if debug:
time_cmd.append('--runs')
time_cmd.append('2')
if prepare is not None:
prepare = f'echo "### starting prepare ###" ; date ; {prepare}'
if prepare is not None: if prepare is not None:
time_cmd.append('--prepare') time_cmd.append('--prepare')
time_cmd.append(prepare) time_cmd.append(prepare)
...@@ -240,5 +247,9 @@ ...@@ -240,5 +247,9 @@
if prepare is not None: if prepare is not None:
time_cmd.append('--prepare') time_cmd.append('--prepare')
time_cmd.append(prepare) time_cmd.append(prepare)
command = cmd.command
if debug:
command = f'echo "### starting command ###" ; date ; {command}'
time_cmd.append("--") time_cmd.append("--")
...@@ -244,6 +255,6 @@ ...@@ -244,6 +255,6 @@
time_cmd.append("--") time_cmd.append("--")
time_cmd.append(cmd.command) time_cmd.append(command)
cwd = data_env_path cwd = data_env_path
if cmd.cwd is not None: if cmd.cwd is not None:
cwd = os.path.join(cwd, cmd.cwd) cwd = os.path.join(cwd, cmd.cwd)
...@@ -246,8 +257,15 @@ ...@@ -246,8 +257,15 @@
cwd = data_env_path cwd = data_env_path
if cmd.cwd is not None: if cmd.cwd is not None:
cwd = os.path.join(cwd, cmd.cwd) cwd = os.path.join(cwd, cmd.cwd)
if debug:
stdout=sys.stdout
stderr=sys.stderr
else:
stdout = subprocess.PIPE
stderr = subprocess.PIPE
r = subprocess.run( r = subprocess.run(
time_cmd, time_cmd,
cwd=cwd, cwd=cwd,
env=env, env=env,
...@@ -250,9 +268,9 @@ ...@@ -250,9 +268,9 @@
r = subprocess.run( r = subprocess.run(
time_cmd, time_cmd,
cwd=cwd, cwd=cwd,
env=env, env=env,
stdout=subprocess.PIPE, stdout=stdout,
stderr=subprocess.PIPE, stderr=stderr
) )
if r.returncode != 0: if r.returncode != 0:
raise errors.BenchmarkRunFailure( raise errors.BenchmarkRunFailure(
...@@ -370,7 +388,7 @@ ...@@ -370,7 +388,7 @@
variables.add(key) variables.add(key)
return variables return variables
def run_one(self, bin_env_path, data_env_path): def run_one(self, bin_env_path, data_env_path, debug=False):
bin_env_path = os.path.abspath(bin_env_path) bin_env_path = os.path.abspath(bin_env_path)
data_env_path = os.path.abspath(data_env_path) data_env_path = os.path.abspath(data_env_path)
shell_path = poulpe.bin_env_script(bin_env_path) shell_path = poulpe.bin_env_script(bin_env_path)
...@@ -437,7 +455,13 @@ ...@@ -437,7 +455,13 @@
if args: if args:
cmd.extend(args) cmd.extend(args)
if debug:
stdout=sys.stdout
stderr=sys.stderr
else:
stdout = subprocess.PIPE
stderr = subprocess.PIPE
r = subprocess.run( r = subprocess.run(
cmd, cmd,
cwd=cwd, cwd=cwd,
env=env, env=env,
...@@ -440,9 +464,9 @@ ...@@ -440,9 +464,9 @@
r = subprocess.run( r = subprocess.run(
cmd, cmd,
cwd=cwd, cwd=cwd,
env=env, env=env,
stdout=subprocess.PIPE, stdout=stdout,
stderr=subprocess.PIPE, stderr=stderr,
) )
if r.returncode != 0: if r.returncode != 0:
raise errors.BenchmarkRunFailure( raise errors.BenchmarkRunFailure(
......
...@@ -7,4 +7,5 @@ ...@@ -7,4 +7,5 @@
@click.argument("bin_env") @click.argument("bin_env")
@click.argument('data_env') @click.argument('data_env')
@click.argument('benchmark') @click.argument('benchmark')
@click.option("--debug", is_flag=True)
@click.argument('result') @click.argument('result')
...@@ -10,5 +11,5 @@ ...@@ -10,5 +11,5 @@
@click.argument('result') @click.argument('result')
def run_util(bin_env, data_env, benchmark, result): def run_util(bin_env, data_env, benchmark, result, debug=False):
"""Run an "atomic" benchmark unit from already setup environments. """Run an "atomic" benchmark unit from already setup environments.
\b \b
...@@ -23,6 +24,7 @@ ...@@ -23,6 +24,7 @@
data_env, data_env,
benchmark, benchmark,
result, result,
debug=debug,
) )
return ret return ret
......
...@@ -36,7 +36,7 @@ ...@@ -36,7 +36,7 @@
shutil.rmtree(tmp_data, ignore_errors=True) shutil.rmtree(tmp_data, ignore_errors=True)
def run_one_core(bin_env_path, data_env_path, benchmark_path): def run_one_core(bin_env_path, data_env_path, benchmark_path, debug=False):
result_data = {} result_data = {}
result_data['run'] = {} result_data['run'] = {}
result_data['run']["timestamp"] = time.time() result_data['run']["timestamp"] = time.time()
...@@ -68,7 +68,7 @@ ...@@ -68,7 +68,7 @@
result_data['data-env-vars'] = data_env_data.data_env_vars result_data['data-env-vars'] = data_env_data.data_env_vars
try: try:
bench_result = benchmark.run_one(bin_env_path, data_env_path) bench_result = benchmark.run_one(bin_env_path, data_env_path, debug=debug)
result_data['result'] = bench_result result_data['result'] = bench_result
finally: finally:
cleanup_data_env(tmp_data) cleanup_data_env(tmp_data)
...@@ -77,9 +77,9 @@ ...@@ -77,9 +77,9 @@
return result_data return result_data
def run_one(bin_env_path, data_env_path, benchmark_path, result): def run_one(bin_env_path, data_env_path, benchmark_path, result, debug=False):
try: try:
result_data = run_one_core( result_data = run_one_core(
bin_env_path, bin_env_path,
data_env_path, data_env_path,
benchmark_path, benchmark_path,
...@@ -81,8 +81,9 @@ ...@@ -81,8 +81,9 @@
try: try:
result_data = run_one_core( result_data = run_one_core(
bin_env_path, bin_env_path,
data_env_path, data_env_path,
benchmark_path, benchmark_path,
debug=debug,
) )
except errors.MissingDataEnvInputVars as exc: except errors.MissingDataEnvInputVars as exc:
# TODO it's not the lib's responsibility to write to stderr # TODO it's not the lib's responsibility to write to stderr
......
Check we have enough simple pieces together to do a simple run
--------------------------------------------------------------
$ python -m venv $TESTTMP/
$ . $TESTTMP/bin/activate
$ pip install $TESTDIR/.. --quiet
Setup the bin-env
-----------------
$ BLACK_VERSION="18.6b4" poulpe bin-env-util setup-one bin-env-black-18.6b4 \
> $TESTDIR/test-data/setup-black.sh
$ poulpe bin-env-util show bin-env-black-18.6b4
bin-env-vars:
black:
install-method = pip
version = 18.6b4
python:
version = 3.* (glob)
poulpe-environment:
environment-type = binary
format-version = 0
setup-method = script
ready = True
Setup a data-env
----------------
(currently built by hand as it simple and mostly innert)
$ mkdir data-env
$ poulpe env-desc set data-env/data-env.poulpe poulpe-environment.environment-type data
creating new file: "data-env/data-env.poulpe"
$ poulpe env-desc set data-env/data-env.poulpe poulpe-environment.format-version 0
$ poulpe env-desc set data-env/data-env.poulpe poulpe-environment.setup-method manual
$ poulpe env-desc set data-env/data-env.poulpe data-env-vars.name black-bench
$ mkdir data-env/py-files
$ cat << EOF > data-env/py-files/good.py
> foo = [1, 2, 3, 4, 5]
> EOF
$ cat << EOF > data-env/py-files/bad.py
> foo = [1,
> 2,
> 3,
> 4,
> 5]
> EOF
$ poulpe env-desc set data-env/data-env.poulpe bench-input-vars.black.check.tiny.good py-files/good.py
$ poulpe env-desc set data-env/data-env.poulpe bench-input-vars.black.check.tiny.bad py-files/bad.py
(that one will be a string, so its not good.)
$ poulpe env-desc set data-env/data-env.poulpe ready 1
$ poulpe env-desc show data-env/data-env.poulpe
bench-input-vars:
black:
check:
tiny:
bad = py-files/bad.py
good = py-files/good.py
data-env-vars:
name = black-bench
poulpe-environment:
environment-type = data
format-version = 0
setup-method = manual
ready = 1
Define a benchmark
------------------
$ poulpe env-desc set black-tiny-bad.pbd meta.format 0
creating new file: "black-tiny-bad.pbd"
$ poulpe env-desc set black-tiny-bad.pbd meta.name black.check.tiny
$ poulpe env-desc set black-tiny-bad.pbd meta.method simple-command
$ poulpe env-desc set black-tiny-bad.pbd simple-command.command "black --check {file}"
$ poulpe env-desc set black-tiny-bad.pbd simple-command.accept-failure "1"
$ poulpe env-desc set black-tiny-bad.pbd simple-command.variables.file DATA-VARS:black.check.tiny.bad
$ poulpe env-desc set black-tiny-bad.pbd simple-command.cwd .
$ poulpe env-desc show black-tiny-bad.pbd
meta:
format = 0
method = simple-command
name = black.check.tiny
simple-command:
accept-failure = 1
command = black --check {file}
cwd = .
variables:
file = DATA-VARS:black.check.tiny.bad
Run the benchmark
-----------------
$ poulpe run-util bin-env-black-18.6b4 data-env black-tiny-bad.pbd test-result-18.6b4-bad.pbr
$ poulpe env-desc show test-result-18.6b4-bad.pbr
benchmark:
name = black.check.tiny
bin-env-vars:
black:
install-method = pip
version = 18.6b4
python:
version = 3.* (glob)
data-env-vars:
name = black-bench
result:
time:
max = * (glob)
mean = * (glob)
median = * (glob)
min = * (glob)
standard-deviation = * (glob)
run:
duration = * (glob)
timestamp = * (glob)
Run the benchmark with --debug
------------------------------
It should display details about the run and limit the number of iteration
$ poulpe run-util --debug bin-env-black-18.6b4 data-env black-tiny-bad.pbd test-result-18.6b4-bad.pbr
Benchmark #1: echo "### starting command ###" ; date ; black --check py-files/bad.py
### starting command ###
??? ??? * (glob)
would reformat py-files/bad.py
All done! * (glob)
1 file would be reformatted.
### starting command ###
??? ??? * (glob)
would reformat py-files/bad.py
All done! * (glob)
1 file would be reformatted.
Time * (glob)
Range * 2 runs (glob)
Warning: Ignoring non-zero exit code.
$ poulpe env-desc show test-result-18.6b4-bad.pbr
benchmark:
name = black.check.tiny
bin-env-vars:
black:
install-method = pip
version = 18.6b4
python:
version = 3.* (glob)
data-env-vars:
name = black-bench
result:
time:
max = * (glob)
mean = * (glob)
median = * (glob)
min = * (glob)
standard-deviation = * (glob)
run:
duration = * (glob)
timestamp = * (glob)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment