Skip to content

Commit

Permalink
add tests' debug-flag, now shows last 10 ops' addresses/labels
Browse files Browse the repository at this point in the history
  • Loading branch information
tomhea committed Apr 28, 2023
1 parent 06998fa commit b24468e
Show file tree
Hide file tree
Showing 5 changed files with 80 additions and 36 deletions.
13 changes: 8 additions & 5 deletions src/breakpoints.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,14 +43,17 @@ def should_break(self, ip: int, op_counter: int) -> bool:
def get_address_str(self, address: int) -> str:
if address in self.breakpoints and self.breakpoints[address] is not None:
label_repr = get_nice_label_repr(self.breakpoints[address], pad=4)
return f'{hex(address)[2:]}:\n{label_repr}'
return f'{hex(address)}:\n{label_repr}'
elif address in self.address_to_label:
label_repr = get_nice_label_repr(self.address_to_label[address], pad=4)
return f'{hex(address)[2:]}:\n{label_repr}'
return f'{hex(address)}:\n{label_repr}'
else:
address_before = max([a for a in self.address_to_label if a <= address])
label_repr = get_nice_label_repr(self.address_to_label[address_before], pad=4)
return f'{hex(address)[2:]} ({hex(address - address_before)} after:)\n{label_repr}'
try:
address_before = max(a for a in self.address_to_label if a <= address)
label_repr = get_nice_label_repr(self.address_to_label[address_before], pad=4)
return f'{hex(address)} ({hex(address - address_before)} bits after:)\n{label_repr}'
except ValueError:
return f'{hex(address)}'

def get_message_box_body(self, ip: int, mem: fjm.Reader, op_counter: int) -> str:
address = self.get_address_str(ip)
Expand Down
2 changes: 1 addition & 1 deletion src/fj.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ def run(in_fjm_path: Path, debug_file: Path, args: argparse.Namespace, error_fun
breakpoint_handler=breakpoint_handler
)
if not args.silent:
print(termination_statistics)
termination_statistics.print(labels_handler=breakpoint_handler)
except FJReadFjmException as e:
print()
print(e)
Expand Down
41 changes: 28 additions & 13 deletions src/fjm_run.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
from pathlib import Path
from typing import Optional, Deque
from collections import deque

import fjm

Expand Down Expand Up @@ -28,22 +27,38 @@ def __init__(self, run_statistics: RunStatistics, termination_cause: Termination

self.termination_cause = termination_cause

def __str__(self):
@staticmethod
def beautify_address(address: int, breakpoint_handler: Optional[BreakpointHandler]):
if not breakpoint_handler:
return hex(address)

return breakpoint_handler.get_address_str(address)

def print(self, *, labels_handler: Optional[BreakpointHandler] = None):
"""
Prints the termination cause, run times, ops-statistics.
If ended not by looping - Then print the last-opcodes` addresses as well (and their label names if possible).
@param labels_handler: Used to find the label name for each address (from the last-opcodes` addresses).
"""

flips_percentage = self.flip_counter / self.op_counter * 100
jumps_percentage = self.jump_counter / self.op_counter * 100

last_ops_str = ''
if True or TerminationCause.RuntimeMemoryError == self.termination_cause: # TODO remove the "if True" part.
last_ops_str = f'\nLast {len(self.last_ops_addresses)} ops were at:\n ' + \
'\n '.join([hex(address) for address in self.last_ops_addresses])

return f'Finished by {str(self.termination_cause)} after {self.run_time:.3f}s ' \
f'(' \
f'{self.op_counter:,} ops executed; ' \
f'{flips_percentage:.2f}% flips, ' \
f'{jumps_percentage:.2f}% jumps' \
f').' \
f'{last_ops_str}'
if TerminationCause.Looping != self.termination_cause:
last_ops_str = f'\n\nLast {len(self.last_ops_addresses)} ops were at these addresses ' \
f'(The most-recent op, the one that failed, is first):\n ' + \
'\n '.join([self.beautify_address(address, labels_handler)
for address in self.last_ops_addresses][::-1])

print(f'Finished by {str(self.termination_cause)} after {self.run_time:.3f}s '
f'('
f'{self.op_counter:,} ops executed; '
f'{flips_percentage:.2f}% flips, '
f'{jumps_percentage:.2f}% jumps'
f').'
f'{last_ops_str}'
)


def handle_input(io_device: IODevice, ip: int, mem: fjm.Reader, statistics: RunStatistics) -> None:
Expand Down
28 changes: 19 additions & 9 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@
RUN_ORDER_INDEX = 2


NO_DEBUG_INFO_FLAG = 'nodebuginfo'
ALL_FLAG = 'all'
REGULAR_FLAG = 'regular'
COMPILE_FLAG = 'compile'
Expand Down Expand Up @@ -77,31 +78,33 @@ def argument_line_iterator(csv_file_path: Path, num_of_args: int) -> Iterable[Li
yield map(str.strip, line)


def get_compile_tests_params_from_csv(csv_file_path: Path) -> List:
def get_compile_tests_params_from_csv(csv_file_path: Path, save_debug_info: bool) -> List:
"""
read the compile-tests from the csv
@param csv_file_path: read tests from this csv
@param save_debug_info: should save the debugging info file
@return: the list of pytest.params(CompileTestArgs, )
"""
params = []

for line in argument_line_iterator(csv_file_path, CompileTestArgs.num_of_args):
args = CompileTestArgs(*line)
for line in argument_line_iterator(csv_file_path, CompileTestArgs.num_of_csv_line_args):
args = CompileTestArgs(*line, save_debug_info)
params.append(pytest.param(args, marks=pytest.mark.run(order=COMPILE_ORDER_INDEX)))

return params


def get_run_tests_params_from_csv(csv_file_path: Path) -> List:
def get_run_tests_params_from_csv(csv_file_path: Path, use_debug_info: bool) -> List:
"""
read the run-tests from the csv
@param csv_file_path: read tests from this csv
@param use_debug_info: should use the debugging info file
@return: the list of pytest.params(RunTestArgs, depends=)
"""
params = []

for line in argument_line_iterator(csv_file_path, RunTestArgs.num_of_args):
args = RunTestArgs(*line)
for line in argument_line_iterator(csv_file_path, RunTestArgs.num_of_csv_line_args):
args = RunTestArgs(*line, use_debug_info)
params.append(pytest.param(args, marks=pytest.mark.run(order=RUN_ORDER_INDEX)))

return params
Expand All @@ -115,10 +118,15 @@ def pytest_addoption(parser) -> None:
colliding_keywords = set(TEST_TYPES) & SAVED_KEYWORDS
assert not colliding_keywords

parser.addoption(f"--{NO_DEBUG_INFO_FLAG}", action="store_true",
help="don't show the last executed opcodes on tests that failed during their run"
"(thus the tests are ~15% faster, and takes ~half the size)."
"Anyway doesn't show last executed opcodes on parallel tests.")

for test_type in TEST_TYPES:
parser.addoption(f"--{test_type}", action="store_true", help=f"run {test_type} tests")
parser.addoption(f"--{REGULAR_FLAG}", action="store_true", help=f"run all regular tests ({', '.join(REGULAR_TYPES)})")
parser.addoption(f"--{ALL_FLAG}", action="store_true", help=f"run all tests")
parser.addoption(f"--{ALL_FLAG}", action="store_true", help="run all tests")

parser.addoption(f"--{COMPILE_FLAG}", action='store_true', help='only test compiling .fj files')
parser.addoption(f"--{RUN_FLAG}", action='store_true', help='only test running .fjm files')
Expand Down Expand Up @@ -295,20 +303,22 @@ def get_tests_from_csvs(get_option: Callable[[str], Any]) -> Tuple[List, List]:

types_to_run__heavy_first = get_test_types_to_run__heavy_first(get_option)

use_debug_info = not is_parallel_active() and not get_option(NO_DEBUG_INFO_FLAG)

compile_tests = []
if check_compile_tests:
compiles_csvs = {test_type: TESTS_PATH / f"test_compile_{test_type}.csv"
for test_type in types_to_run__heavy_first}
for test_type in types_to_run__heavy_first:
compile_tests.extend(get_compile_tests_params_from_csv(compiles_csvs[test_type]))
compile_tests.extend(get_compile_tests_params_from_csv(compiles_csvs[test_type], use_debug_info))
compile_tests = filter_by_test_name(compile_tests, get_option)

run_tests = []
if check_run_tests:
run_csvs = {test_type: TESTS_PATH / f"test_run_{test_type}.csv"
for test_type in types_to_run__heavy_first}
for test_type in types_to_run__heavy_first:
run_tests.extend(get_run_tests_params_from_csv(run_csvs[test_type]))
run_tests.extend(get_run_tests_params_from_csv(run_csvs[test_type], use_debug_info))
run_tests = filter_by_test_name(run_tests, get_option)

return compile_tests, run_tests
32 changes: 24 additions & 8 deletions tests/test_fj.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,9 @@
CSV_BOOLEAN = (CSV_TRUE, CSV_FALSE)


DEBUGGING_FILE_SUFFIX = '.fj_debugging_info'


ROOT_PATH = Path(__file__).parent.parent


Expand All @@ -26,11 +29,12 @@ class CompileTestArgs:
Arguments class for a compile test
"""

num_of_args = 8
num_of_csv_line_args = 8

def __init__(self, test_name: str, fj_paths: str, fjm_out_path: str,
word_size__str: str, version__str: str, flags__str: str,
use_stl__str: str, warning_as_errors__str: str):
use_stl__str: str, warning_as_errors__str: str,
save_debug_info: bool):
"""
handling a line.split() from a csv file
"""
Expand All @@ -39,6 +43,8 @@ def __init__(self, test_name: str, fj_paths: str, fjm_out_path: str,
self.use_stl = use_stl__str == CSV_TRUE
self.warning_as_errors = warning_as_errors__str == CSV_TRUE

self.save_debug_info = save_debug_info

self.test_name = test_name

included_files = get_stl_paths() if self.use_stl else []
Expand Down Expand Up @@ -78,21 +84,27 @@ def test_compile(compile_args: CompileTestArgs) -> None:

fjm_writer = fjm.Writer(compile_args.fjm_out_path, compile_args.word_size, compile_args.version,
flags=compile_args.flags, lzma_preset=lzma.PRESET_DEFAULT)

debugging_file_path = None
if compile_args.save_debug_info:
debugging_file_path = Path(f'{compile_args.fjm_out_path}{DEBUGGING_FILE_SUFFIX}')

assembler.assemble(compile_args.fj_files_tuples, compile_args.word_size, fjm_writer,
warning_as_errors=compile_args.warning_as_errors,
debugging_file_path=Path(f'{compile_args.fjm_out_path}.fj_debugging_info'))
debugging_file_path=debugging_file_path)


class RunTestArgs:
"""
Arguments class for a run test
"""

num_of_args = 6
num_of_csv_line_args = 6

def __init__(self, test_name: str, fjm_path: str,
in_file_path: str, out_file_path: str,
read_in_as_binary__str: str, read_out_as_binary__str: str):
read_in_as_binary__str: str, read_out_as_binary__str: str,
use_debug_info: bool):
"""
@note handling a line.split() (each is stripped) from a csv file
"""
Expand All @@ -101,6 +113,8 @@ def __init__(self, test_name: str, fjm_path: str,
self.read_in_as_binary = read_in_as_binary__str == CSV_TRUE
self.read_out_as_binary = read_out_as_binary__str == CSV_TRUE

self.use_debug_info = use_debug_info

self.test_name = test_name
self.fjm_path = ROOT_PATH / fjm_path

Expand Down Expand Up @@ -157,14 +171,16 @@ def test_run(run_args: RunTestArgs) -> None:

io_device = FixedIO(run_args.get_defined_input())

label_to_address = load_labels_dictionary(Path(f'{run_args.fjm_path}.fj_debugging_info'), True)
breakpoint_handler = BreakpointHandler({}, {label_to_address[label]: label for label in label_to_address})
breakpoint_handler = None
if run_args.use_debug_info:
label_to_address = load_labels_dictionary(Path(f'{run_args.fjm_path}{DEBUGGING_FILE_SUFFIX}'), True)
breakpoint_handler = BreakpointHandler({}, {label_to_address[label]: label for label in label_to_address})

termination_statistics = fjm_run.run(run_args.fjm_path,
io_device=io_device,
time_verbose=True)

print(termination_statistics)
termination_statistics.print(labels_handler=breakpoint_handler)

expected_termination_cause = TerminationCause.Looping
assert termination_statistics.termination_cause == expected_termination_cause
Expand Down

0 comments on commit b24468e

Please sign in to comment.