diff --git a/lisa/tools/__init__.py b/lisa/tools/__init__.py index 4dc2e237ce..4caf8e0836 100644 --- a/lisa/tools/__init__.py +++ b/lisa/tools/__init__.py @@ -102,6 +102,7 @@ from .strace import Strace from .stress_ng import StressNg from .swap import Swap +from .sysbench import Sysbench from .sysctl import Sysctl from .systemd_analyze import SystemdAnalyze from .tar import Tar @@ -225,6 +226,7 @@ "Strace", "StressNg", "Swap", + "Sysbench", "Sysctl", "SystemdAnalyze", "Tar", diff --git a/lisa/tools/sysbench.py b/lisa/tools/sysbench.py new file mode 100644 index 0000000000..fe52616ac9 --- /dev/null +++ b/lisa/tools/sysbench.py @@ -0,0 +1,485 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. +import re +from dataclasses import dataclass +from typing import Any, Dict, List, cast + +from lisa.executable import Tool +from lisa.operating_system import Posix +from lisa.util import LisaException +from lisa.util.process import ExecutableResult + + +@dataclass +class SysbenchTestResult: + exit_code: int = 0 + result = {} + + +class Sysbench(Tool): + @property + def command(self) -> str: + return "sysbench" + + @property + def can_install(self) -> bool: + return True + + def install(self) -> bool: + posix_os: Posix = cast(Posix, self.node.os) + posix_os.install_packages("sysbench") + + return self._check_exists() + + def __run_sysbench_test( + self, + test_type: str, + threads: int, + events: int, + verbosity: int, + time_limit: int, + percentile: int, + debug: str, + histogram: str, + validation: str, + extra_args: str = "", + ) -> ExecutableResult: + args = ( + f"{test_type} run --threads={threads} --events={events}" + f" --validate={validation} --percentile={percentile}" + f" --verbosity={verbosity} --histogram={histogram}" + f" --time={time_limit} --debug={debug} {extra_args}" + ) + + result: ExecutableResult = self.run(args) + return result + + def run_cpu_perf( + self, + threads: int = 1, + events: int = 0, + cpu_max_prime: int = 10000, + verbosity: int = 5, + time_limit: int = 10, + percentile: int = 95, + debug: str = "on", + histogram: str = "on", + validation: str = "on", + ) -> SysbenchTestResult: + extra_args: str = f" --cpu-max-prime={cpu_max_prime}" + res = self.__run_sysbench_test( + test_type="cpu", + threads=threads, + events=events, + verbosity=verbosity, + time_limit=time_limit, + percentile=percentile, + debug=debug, + histogram=histogram, + validation=validation, + extra_args=extra_args, + ) + result: Dict[Any, Any] = {} + if res.exit_code == 0: + result = self.__process_cpu_perf_result(res.stdout) + + test_result = SysbenchTestResult() + test_result.exit_code = res.exit_code + test_result.result = result + + return test_result + + def run_fileio_perf( + self, + test_mode: str, + ops: str, + threads: int = 1, + events: int = 0, + verbosity: int = 5, + time_limit: int = 10, + percentile: int = 95, + debug: str = "on", + histogram: str = "on", + validation: str = "off", + file_io_mode: str = "sync", + file_fsync_all: str = "off", + file_fsync_end: str = "on", + file_fsync_mode: str = "fsync", + total_file: int = 128, + block_size_in_kb: int = 16, + file_total_size_in_gb: int = 2, + file_async_backlog: int = 128, + file_fsync_freq: int = 100, + file_merged_requests: int = 0, + file_rw_ratio: float = 1.5, + ) -> ExecutableResult: + total_size = file_total_size_in_gb * 1024 * 1024 * 1024 + block_size = block_size_in_kb * 1024 + valid_io_mode: List[str] = ["sync", "async", "mmap"] + valid_fsync_mode: List[str] = ["fsync", "fdatasync"] + valid_test_mode: List[str] = [ + "seqwr", + "seqrewr", + "seqrd", + "rndrd", + "rndwr", + "rndrw", + ] + + if test_mode not in valid_test_mode: + raise LisaException( + f"Invalid test_mode. Valid test_mode: {valid_test_mode}" + ) + elif file_io_mode not in valid_io_mode: + raise LisaException( + f"Invalid file_io_mode. Valid file_io_mode: {valid_io_mode}" + ) + elif file_fsync_mode not in valid_fsync_mode: + raise LisaException( + f"Invalid file_fsync_mode. Valid file_fsync_mode: {valid_fsync_mode}" + ) + + extra_args: str = ( + f" --file-test-mode={test_mode} --file-num={total_file}" + f" --file-block-size={block_size} --file-total-size={total_size}" + f" --file-io-mode={file_io_mode} --file-async-backlog={file_async_backlog}" + f" --file-fsync-freq={file_fsync_freq} --file-fsync-all={file_fsync_all}" + f" --file-fsync-end={file_fsync_end} --file-fsync-mode={file_fsync_mode}" + f" --file-merged-requests={file_merged_requests}" + f" --file-rw-ratio={file_rw_ratio}" + ) + + self.run( + f"fileio prepare --file-total-size={total_size} --file-num={total_file}", + force_run=True, + ) + + res = self.__run_sysbench_test( + test_type="fileio", + threads=threads, + events=events, + verbosity=verbosity, + time_limit=time_limit, + percentile=percentile, + debug=debug, + histogram=histogram, + validation=validation, + extra_args=extra_args, + ) + + self.run( + f"fileio cleanup --file-total-size={total_size} --file-num={total_file}", + force_run=True, + ) + + result: Dict[Any, Any] = {} + if res.exit_code == 0: + result = self.__process_fileio_perf_result(res.stdout, ops) + + test_result = SysbenchTestResult() + test_result.exit_code = res.exit_code + test_result.result = result + + return test_result + + def run_memory_perf( + self, + threads: int = 1, + events: int = 0, + verbosity: int = 5, + time_limit: int = 10, + percentile: int = 95, + debug: str = "on", + histogram: str = "on", + validation: str = "on", + memory_block_size_in_kb: int = 1, + memory_total_size_in_gb: int = 100, + memory_scope: str = "global", + memory_hugetlb: str = "off", + memory_oper: str = "write", + memory_access_mode: str = "seq", + ) -> ExecutableResult: + block_size = memory_block_size_in_kb * 1024 + total_mem_size = memory_total_size_in_gb * 1024 * 1024 * 1024 + + valid_mem_scope: List[str] = ["global", "local"] + valid_mem_operation: List[str] = ["read", "write", "none"] + valid_mem_access_mode: List[str] = ["seq", "rnd"] + + if memory_scope not in valid_mem_scope: + raise LisaException( + f"Invalid memory_scope. Valid memory_scope: {valid_mem_scope}" + ) + elif memory_oper not in valid_mem_operation: + raise LisaException( + f"Invalid memory_oper. Valid memory_oper: {valid_mem_operation}" + ) + elif memory_access_mode not in valid_mem_access_mode: + raise LisaException( + "Invalid memory_access_mode." + f"Valid memory_access_mode: {valid_mem_access_mode}" + ) + + extra_args: str = ( + f" --memory-block-size={block_size} --memory-total-size={total_mem_size}" + f" --memory-scope={memory_scope} --memory-hugetlb={memory_hugetlb}" + f" --memory-oper={memory_oper} --memory-access-mode={memory_access_mode}" + ) + + res = self.__run_sysbench_test( + test_type="memory", + threads=threads, + events=events, + verbosity=verbosity, + time_limit=time_limit, + percentile=percentile, + debug=debug, + histogram=histogram, + validation=validation, + extra_args=extra_args, + ) + + result: Dict[Any, Any] = {} + if res.exit_code == 0: + result = self.__process_memory_perf_result(res.stdout) + + test_result = SysbenchTestResult() + test_result.exit_code = res.exit_code + test_result.result = result + + return test_result + + def __process_perf_result( + self, + data: str, + ) -> Dict[Any, Any]: + # Sample Output + # ================ + # General statistics: + # total time: 10.0005s + # total number of events: 27617 + + # Latency (ms): + # min: 0.33 + # avg: 0.36 + # max: 10.14 + # 95th percentile: 0.43 + # sum: 9988.94 + + # Threads fairness: + # events (avg/stddev): 27617.0000/0.00 + # execution time (avg/stddev): 9.9889/0.00 + + # DEBUG: Verbose per-thread statistics: + + # DEBUG: thread # 0: min: 0.0003s avg: 0.0004s max: 0.0101s events: 27617 + # DEBUG: total time taken by event execution: 9.9889s + + non_debug_pattern = r"^(?!.*DEBUG: ).*$" + debug_pattern = r".*DEBUG: .*" + + result: Dict[Any, Any] = {} + + # Find all non-DEBUG lines in the text + non_debug_lines = "\n".join( + re.findall( + non_debug_pattern, + data, + re.MULTILINE, + ) + ) + # Find all DEBUG lines in the text + debug_lines = "\n".join( + re.findall( + debug_pattern, + data, + re.MULTILINE, + ) + ) + + # Extract total time using regular expression + total_time_pattern = re.compile(r"total time:\s+(?P[\d.]+s)") + match = total_time_pattern.search(non_debug_lines) + if match: + result["total_time"] = match.group("total_time") + + # Extract total number of events using regular expression + total_events_pattern = re.compile( + r"total number of events:\s+(?P\d+)" + ) + match = total_events_pattern.search(non_debug_lines) + if match: + result["total_events"] = match.group("total_events") + + # Extract latency information using regular expressions + latency_param = "latency_ms" + latency_metrics_pattern = re.compile( + r"(?Pmin|avg|max|95th percentile|sum):\s+(?P[\d.]+)" + ) + result[latency_param] = {} + matches = latency_metrics_pattern.findall(non_debug_lines) + for match in matches: + if match: + metric = match[0] + value = float(match[1]) + result[latency_param][metric] = value + + thread_param = "thread_fairness" + result[thread_param] = {} + thread_events_pattern = re.compile( + r"events \(avg/stddev\):\s+(?P[\d.]+)/(?P[\d.]+)" + ) + thread_events = thread_events_pattern.search(non_debug_lines) + if thread_events: + result[thread_param]["events_avg"] = thread_events.group("events_avg") + result[thread_param]["events_stddev"] = thread_events.group("events_stddev") + + thread_exec_time_pattern = re.compile( + r"execution time \(avg/stddev\):\s+(?P[\d.]+)/(?P[\d.]+)" + ) + exec_time = thread_exec_time_pattern.search(non_debug_lines) + if exec_time: + result[thread_param]["execution_time_avg"] = exec_time.group("avg") + result[thread_param]["execution_time_stddev"] = exec_time.group("std_dev") + + # Get Verbose per-thread statistics + verbose_param = "verbose_per_thread_statistics" + verbose_metrics_pattern = re.compile( + r"(?Pmin|avg|max|events):\s+(?P[\d.]+)" + ) + result[verbose_param] = {} + + matches = verbose_metrics_pattern.findall(debug_lines) + for match in matches: + if match: + metric = match[0] + value = float(match[1]) + result[verbose_param][metric] = value + + event_execution_pattern = re.compile( + r"total time taken by event execution:\s+(?P