Skip to content

Commit

Permalink
Merge PR #73: Move to argparse and group command-line options in a mo…
Browse files Browse the repository at this point in the history
…re helpful way
  • Loading branch information
smarr committed Jun 6, 2018
2 parents 20c27cb + a120b37 commit 32f64a6
Show file tree
Hide file tree
Showing 3 changed files with 113 additions and 98 deletions.
200 changes: 107 additions & 93 deletions rebench/rebench.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,7 @@
# IN THE SOFTWARE.
import logging
import sys

from optparse import OptionParser, OptionGroup
from argparse import ArgumentParser, RawDescriptionHelpFormatter, SUPPRESS

from .executor import Executor, BatchScheduler, RoundRobinScheduler, \
RandomScheduler
Expand All @@ -48,119 +47,133 @@ def __init__(self):
self._config = None

def shell_options(self):
usage = """%prog [options] <config> [run_name] [vm:$]* [s:$]*
usage = """%(prog)s [options] <config> [exp_name] [vm:$]* [s:$]*
Argument:
config required argument, file containing the run definition to be executed
run_name optional argument, the name of a run definition
config required argument, file containing the experiment to be executed
exp_name optional argument, the name of a experiment definition
from the config file
vm:$ filter runs to only include the named VM, example: vm:VM1 vm:VM3
s:$ filter runs to only include the named suite and possibly benchmark
vm:$ filter experiments to only include the named VM, example: vm:VM1 vm:VM3
s:$ filter experiments to only include the named suite and possibly benchmark
example: s:Suite1 s:Suite2:Bench3
Note, filters are combined with `or` semantics in the same group,
i.e., vm or suite, and at least one filter needs to match per group.
"""

options = OptionParser(usage=usage, version="%prog " + self.version)

options.add_option("-q", "--quick", action="store_true", dest="quick",
help="Do a quick benchmark run instead of a full, "
"statistical rigorous experiment.",
default=False)
options.add_option("-d", "--debug", action="store_true", dest="debug",
default=False, help="Enable debug output.")
options.add_option("-f", "--faulty", action="store_true",
dest="include_faulty", default=False,
help="Include results of faulty or failing runs")
options.add_option("-v", "--verbose", action="store_true",
dest="verbose", default=False,
help="Out more details in the report.")

options.add_option("-N", "--without-nice", action="store_false",
dest="use_nice",
help="Used for debugging and environments without "
" the tool nice.",
default=True)
options.add_option("-s", "--scheduler", action="store", type="string",
dest="scheduler", default="batch", help="execution "
"order of benchmarks: batch, round-robin, random "
"[default: %default]")
options.add_option("-o", "--out", dest="output_file", default=None,
help="Report is saved to the given file. "
"The report is always verbose.")
options.add_option("-c", "--clean", action="store_true", dest="clean",
default=False,
help="Discard old data from the data file "
"(configured in the run description).")
options.add_option("-r", "--rerun", action="store_true",
dest="do_rerun", default=False,
help="Rerun selected experiments, " +
"and discard old data from data file.")

# now here some thing which have to be passed in to make codespeed
# reporting complete
codespeed = OptionGroup(options, "Reporting to Codespeed",
"Some of these parameters are mandatory for "
"reporting to codespeed")
codespeed.add_option("--commit-id", dest="commit_id", default=None,
help="MANDATORY: when codespeed reporting is "
" used, the commit-id has to be specified.")
codespeed.add_option("--environment", dest="environment",
default=None,
help="MANDATORY: name the machine on which the "
"results are obtained.")
codespeed.add_option("--branch", dest="branch",
default="HEAD",
help="The branch for which the results have to be "
"recorded, i.e., to which the commit belongs."
" Default: HEAD")
codespeed.add_option("--executable", dest="executable",
default=None,
help="The executable name given to codespeed. "
"Default: The name used for the virtual "
"machine.")
codespeed.add_option("--project", dest="project",
default=None,
help="The project name given to codespeed. "
"Default: Value given in the config file.")
codespeed.add_option("-I", "--disable-inc-report",
action="store_false", dest="report_incrementally",
default=True, help="Does a final report at the "
"end instead of reporting "
"incrementally.")
codespeed.add_option("-S", "--disable-codespeed",
action="store_false", dest="use_codespeed",
default=True, help="Override configuration and "
"disable reporting to codespeed.")
parser = ArgumentParser(
usage=usage, add_help=False,
formatter_class=RawDescriptionHelpFormatter)

parser.add_argument('config', nargs=1, help=SUPPRESS)
parser.add_argument('exp_filter', nargs='*', help=SUPPRESS)

basics = parser.add_argument_group('Basic Options')
basics.add_argument('-h', '--help', action='help',
help='Show this help message and exit')
basics.add_argument('--version', action='version',
version="%(prog)s " + self.version)
basics.add_argument('-d', '--debug', action='store_true', dest='debug',
default=False, help='Enable debug output')
basics.add_argument('-v', '--verbose', action='store_true',
dest='verbose', default=False,
help='Output more details in the report.')

execution = parser.add_argument_group(
'Execution Options', 'Adapt how ReBench executes benchmarks')
execution.add_argument(
'-q', '--quick', action='store_true', dest='quick',
help='Do a quick benchmark run instead of a full, '
'statistical rigorous experiment.',
default=False)
execution.add_argument(
'-N', '--without-nice', action='store_false', dest='use_nice',
help='Used for debugging and environments without the tool nice.',
default=True)
execution.add_argument(
'-s', '--scheduler', action='store', dest='scheduler',
default='batch',
help='execution order of benchmarks: '
'batch, round-robin, random [default: %(default)s]')

data = parser.add_argument_group(
'Data and Reporting',
'Configure how recorded data is handled and reported')
data.add_argument('-c', '--clean', action='store_true', dest='clean',
default=False,
help='Discard old data from the data file '
'(configured in the experiment).')
data.add_argument('-r', '--rerun', action='store_true',
dest='do_rerun', default=False,
help='Rerun experiments, ' +
'and discard old data from data file.')
data.add_argument('-f', '--faulty', action='store_true',
dest='include_faulty', default=False,
help='Include results of faulty or failing runs')
data.add_argument('-o', '--out', dest='output_file', default=None,
help='Report is saved to the given file. '
'The report is always verbose.')

codespeed = parser.add_argument_group(
'Reporting to Codespeed',
'Some of these parameters are mandatory for reporting to Codespeed')
codespeed.add_argument('--commit-id', dest='commit_id', default=None,
help='MANDATORY: when codespeed reporting is '
' used, the commit-id has to be specified.')
codespeed.add_argument('--environment', dest='environment',
default=None,
help='MANDATORY: name the machine on which the '
'results are obtained.')
codespeed.add_argument('--branch', dest='branch',
default='HEAD',
help='The branch for which the results have to '
'be recorded, i.e., to which the commit'
' belongs. Default: HEAD')
codespeed.add_argument('--executable', dest='executable',
default=None,
help='The executable name given to codespeed. '
'Default: The name used for the virtual '
'machine.')
codespeed.add_argument('--project', dest='project',
default=None,
help='The project name given to codespeed. '
'Default: Value given in the config file.')
codespeed.add_argument('-I', '--disable-inc-report',
action='store_false', dest='report_incrementally',
default=True, help='Does a final report at the '
'end instead of reporting '
'incrementally.')
codespeed.add_argument('-S', '--disable-codespeed',
action='store_false', dest='use_codespeed',
default=True,
help='Override configuration and '
'disable reporting to codespeed.')

options.add_option_group(codespeed)
return options
return parser

def run(self, argv = None):
if argv is None:
argv = sys.argv

data_store = DataStore()
cli_options, args = self.shell_options().parse_args(argv[1:])
if len(args) < 1:
logging.error("<config> is a mandatory parameter and was not given."
"See --help for more information.")
sys.exit(-1)
opt_parser = self.shell_options()
args = opt_parser.parse_args(argv[1:])

cli_reporter = CliReporter(cli_options.verbose)
cli_reporter = CliReporter(args.verbose)

# interpret remaining args
exp_name = args[1] if len(args) > 1 and (
not args[1].startswith("vm:") and
not args[1].startswith("s:")) else "all"
run_filter = [f for f in args if (f.startswith("vm:") or
f.startswith("s:"))]
exp_filter = args.exp_filter
exp_name = exp_filter[1] if len(exp_filter) > 1 and (
not exp_filter[1].startswith("vm:") and
not exp_filter[1].startswith("s:")) else "all"
exp_filter = [f for f in exp_filter if (f.startswith("vm:") or
f.startswith("s:"))]

try:
self._config = Configurator(args[0], data_store, cli_options,
config_filename = args.config[0]
self._config = Configurator(config_filename, data_store, args,
cli_reporter, exp_name, None,
run_filter)
exp_filter)
except ConfigurationError as e:
logging.error(e.message)
sys.exit(-1)
Expand Down Expand Up @@ -196,5 +209,6 @@ def main_func():
logging.info("Aborted by user request")
return -1


if __name__ == "__main__":
sys.exit(main_func())
6 changes: 3 additions & 3 deletions rebench/tests/executor_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ def setUp(self):
def test_setup_and_run_benchmark(self):
# before executing the benchmark, we override stuff in subprocess for testing
subprocess.Popen = Popen_override
options = ReBench().shell_options().parse_args([])[0]
options = ReBench().shell_options().parse_args(['dummy'])

cnf = Configurator(self._path + '/test.conf', DataStore(), options,
None, 'Test', standard_data_file = self._tmp_file)
Expand Down Expand Up @@ -67,7 +67,7 @@ def test_exit(val):
sys.exit = test_exit

try:
options = ReBench().shell_options().parse_args([])[0]
options = ReBench().shell_options().parse_args(['dummy'])
cnf = Configurator(self._path + '/test.conf', DataStore(), options,
None, 'TestBrokenCommandFormat',
standard_data_file=self._tmp_file)
Expand All @@ -85,7 +85,7 @@ def test_exit(val):
sys.exit = test_exit

try:
options = ReBench().shell_options().parse_args([])[0]
options = ReBench().shell_options().parse_args(['dummy'])
cnf = Configurator(self._path + '/test.conf', DataStore(), options,
None, 'TestBrokenCommandFormat2',
standard_data_file=self._tmp_file)
Expand Down
5 changes: 3 additions & 2 deletions rebench/tests/features/issue_34_accept_faulty_runs_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,10 +48,11 @@ def test_faulty_runs_rejected_without_switch(self):
self.assertEqual(0, runs[2].get_number_of_data_points())

def test_parse_command_switch(self):
options = ReBench().shell_options().parse_args(["--faulty"])[0]
options = ReBench().shell_options().parse_args(
["--faulty", "test.conf"])
self.assertTrue(options.include_faulty)

options = ReBench().shell_options().parse_args([])[0]
options = ReBench().shell_options().parse_args(["test.conf"])
self.assertFalse(options.include_faulty)

def test_faulty_runs_accepted_with_switch(self):
Expand Down

0 comments on commit 32f64a6

Please sign in to comment.