diff --git a/devops/scripts/benchmarks/benches/compute.py b/devops/scripts/benchmarks/benches/compute.py index 2ca6b674e33bb..bcaf547fc9028 100644 --- a/devops/scripts/benchmarks/benches/compute.py +++ b/devops/scripts/benchmarks/benches/compute.py @@ -1,4 +1,4 @@ -# Copyright (C) 2024-2025 Intel Corporation +# Copyright (C) 2024-2026 Intel Corporation # Part of the Unified-Runtime Project, under the Apache License v2.0 with LLVM Exceptions. # See LICENSE.TXT # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception @@ -1027,9 +1027,11 @@ def _supported_runtimes(self) -> list[RUNTIMES]: def _bin_args(self, run_trace: TracingType = TracingType.NONE) -> list[str]: iters = self._get_iters(run_trace) - return [f"--iterations={iters}"] + [ - f"--{k}={v}" for k, v in self._torch_params.items() - ] + return ( + [f"--iterations={iters}"] + + [f"--profilerType={self._profiler_type.value}"] + + [f"--{k}={v}" for k, v in self._torch_params.items()] + ) class TorchSingleQueue(TorchBenchmark): diff --git a/devops/scripts/benchmarks/main.py b/devops/scripts/benchmarks/main.py index a66c3c7bf3b6c..fb7e7d17ce548 100755 --- a/devops/scripts/benchmarks/main.py +++ b/devops/scripts/benchmarks/main.py @@ -107,8 +107,8 @@ def run_iterations( Unless options.exit_on_failure is set, then exception is raised. """ + log.info(f"Running '{benchmark.name()}' {iters}x iterations...") for iter in range(iters): - log.info(f"running {benchmark.name()}, iteration {iter}... ") try: bench_results = benchmark.run( env_vars, run_trace=run_trace, force_trace=force_trace @@ -145,7 +145,7 @@ def run_iterations( log.error(f"{failure_label}: verification failed: {str(e)}.") continue - # Iterations completed successfully + log.info(f"Completed '{benchmark.name()}' {iters}x iterations") return True diff --git a/devops/scripts/benchmarks/tests/test_integration.py b/devops/scripts/benchmarks/tests/test_integration.py index 636e7017e2f21..fbb903c1686b6 100644 --- a/devops/scripts/benchmarks/tests/test_integration.py +++ b/devops/scripts/benchmarks/tests/test_integration.py @@ -1,4 +1,4 @@ -# Copyright (C) 2025 Intel Corporation +# Copyright (C) 2025-2026 Intel Corporation # Part of the Unified-Runtime Project, under the Apache License v2.0 with LLVM Exceptions. # See LICENSE.TXT # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception @@ -63,7 +63,7 @@ def remove_dirs(self): if d is not None: shutil.rmtree(d, ignore_errors=True) - def run_main(self, *args): + def run_main(self, *args) -> subprocess.CompletedProcess: # TODO: not yet tested: "--detect-version", "sycl,compute_runtime" @@ -101,9 +101,9 @@ def run_main(self, *args): "MAIN_PY_STDERR:", "\n" + proc.stderr.decode() if proc.stderr else " ", ) - return proc.returncode + return proc - def get_output(self): + def get_benchmark_output_data(self): with open(os.path.join(self.OUTPUT_DIR, "data.json")) as f: out = json.load(f) return DataJson( @@ -169,11 +169,30 @@ def _checkGroup( def _checkResultsExist(self, caseName: str, out: DataJson): self.assertIn(caseName, [r.name for r in out.runs[0].results]) - def _checkCase(self, caseName: str, groupName: str, tags: set[str]): - run_result = self.app.run_main("--filter", caseName + "$") - self.assertEqual(run_result, 0, "Subprocess did not exit cleanly") + def _checkExistsInProcessOutput( + self, proc: subprocess.CompletedProcess, expected: str + ): + """ + Check that expected regex string exists in process output. + It's useful for checking e.g. if expected params are passed to the benchmark's bin execution. + """ + stdout = proc.stdout.decode() + self.assertRegex(stdout, expected, "Expected string not found in output") + + def _checkCase( + self, + caseName: str, + groupName: str, + tags: set[str], + expected_in_output: str = None, + ): + return_proc = self.app.run_main("--filter", caseName + "$") + self.assertEqual(return_proc.returncode, 0, "Subprocess did not exit cleanly") + + if expected_in_output: + self._checkExistsInProcessOutput(return_proc, expected_in_output) - out = self.app.get_output() + out = self.app.get_benchmark_output_data() self._checkResultsExist(caseName, out) metadata = out.metadata[caseName] @@ -199,11 +218,13 @@ def test_torch_l0(self): "torch_benchmark_l0 KernelSubmitSingleQueue KernelBatchSize 512, KernelDataType Int32, KernelName Add, KernelParamsNum 5, KernelSubmitPattern Single, KernelWGCount 4096, KernelWGSize 512", "KernelSubmitSingleQueue Int32Large", {"pytorch", "L0"}, + "--test=KernelSubmitSingleQueue.*--profilerType=timer", ) self._checkCase( "torch_benchmark_l0 KernelSubmitSingleQueue KernelBatchSize 512, KernelDataType Int32, KernelName Add, KernelParamsNum 5, KernelSubmitPattern Single, KernelWGCount 4096, KernelWGSize 512 CPU count", "KernelSubmitSingleQueue Int32Large, CPU count", {"pytorch", "L0"}, + "--test=KernelSubmitSingleQueue.*--profilerType=cpuCounter", ) self._checkCase( "torch_benchmark_l0 KernelSubmitMultiQueue kernelsPerQueue 20, workgroupCount 4096, workgroupSize 512", diff --git a/devops/scripts/benchmarks/utils/utils.py b/devops/scripts/benchmarks/utils/utils.py index b2c36406bc0bc..9e3653370ce21 100644 --- a/devops/scripts/benchmarks/utils/utils.py +++ b/devops/scripts/benchmarks/utils/utils.py @@ -71,7 +71,7 @@ def run( command_str = " ".join(command) env_str = " ".join(f"{key}={value}" for key, value in env_vars.items()) full_command_str = f"{env_str} {command_str}".strip() - log.debug(f"Running: {full_command_str}") + log.info(f"Running: {full_command_str}") for key, value in env_vars.items(): # Only PATH and LD_LIBRARY_PATH should be prepended to existing values