Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

add parallel test running to the framework. #1664

Open
wants to merge 6 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 18 additions & 1 deletion test_common/harness/testHarness.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -823,18 +823,35 @@ void callTestFunctions(test_definition testList[],
else
{
// Queue all tests that need to run
// queue tests that require sequential run:
std::vector<int> sequentialTestList;
for (int i = 0; i < testNum; ++i)
{
if (selectedTestList[i])
{
gTestQueue.push_back(i);
// test if required to run sequentially:
if (!testList[i].support_parallel)
{
sequentialTestList.push_back(i);
}
else
{
gTestQueue.push_back(i);
}
}
}

// Spawn thread pool
std::vector<std::thread *> threads;
test_harness_state state = { testList, resultTestList, deviceToUse,
config };
// run the requested sequential tests first:
for (auto test_num : sequentialTestList)
{
state.results[test_num] =
callSingleTestFunction(testList[test_num], deviceToUse, config);
}

for (unsigned i = 0; i < config.numWorkerThreads; i++)
{
log_info("Spawning worker thread %u\n", i);
Expand Down
13 changes: 11 additions & 2 deletions test_common/harness/testHarness.h
Original file line number Diff line number Diff line change
Expand Up @@ -57,11 +57,19 @@ Version get_device_cl_version(cl_device_id device);

#define ADD_TEST(fn) \
{ \
test_##fn, #fn, Version(1, 0) \
test_##fn, #fn, Version(1, 0), true \
}
#define ADD_TEST_VERSION(fn, ver) \
{ \
test_##fn, #fn, ver \
test_##fn, #fn, ver, true \
}
#define ADD_TEST_SEQUENTIAL(fn) \
{ \
test_##fn, #fn, Version(1, 0), false \
}
#define ADD_TEST_VERSION_SEQUENTIAL(fn, ver) \
{ \
test_##fn, #fn, ver, false \
}

#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
Expand All @@ -74,6 +82,7 @@ typedef struct test_definition
test_function_pointer func;
const char *name;
Version min_version;
bool support_parallel;
} test_definition;


Expand Down
12 changes: 6 additions & 6 deletions test_conformance/allocations/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -261,12 +261,12 @@ int test_image2d_write_non_blocking(cl_device_id device, cl_context context, cl_
}

test_definition test_list[] = {
ADD_TEST( buffer ),
ADD_TEST( image2d_read ),
ADD_TEST( image2d_write ),
ADD_TEST( buffer_non_blocking ),
ADD_TEST( image2d_read_non_blocking ),
ADD_TEST( image2d_write_non_blocking ),
ADD_TEST_SEQUENTIAL(buffer),
ADD_TEST_SEQUENTIAL(image2d_read),
ADD_TEST_SEQUENTIAL(image2d_write),
ADD_TEST_SEQUENTIAL(buffer_non_blocking),
ADD_TEST_SEQUENTIAL(image2d_read_non_blocking),
ADD_TEST_SEQUENTIAL(image2d_write_non_blocking),
};

const int test_num = ARRAY_SIZE( test_list );
Expand Down
54 changes: 27 additions & 27 deletions test_conformance/api/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -61,33 +61,33 @@ test_definition test_list[] = {
ADD_TEST(kernel_global_constant),
ADD_TEST(kernel_attributes),

ADD_TEST(min_max_thread_dimensions),
ADD_TEST(min_max_work_items_sizes),
ADD_TEST(min_max_work_group_size),
ADD_TEST(min_max_read_image_args),
ADD_TEST(min_max_write_image_args),
ADD_TEST(min_max_mem_alloc_size),
ADD_TEST(min_max_image_2d_width),
ADD_TEST(min_max_image_2d_height),
ADD_TEST(min_max_image_3d_width),
ADD_TEST(min_max_image_3d_height),
ADD_TEST(min_max_image_3d_depth),
ADD_TEST(min_max_image_array_size),
ADD_TEST(min_max_image_buffer_size),
ADD_TEST(min_max_parameter_size),
ADD_TEST(min_max_samplers),
ADD_TEST(min_max_constant_buffer_size),
ADD_TEST(min_max_constant_args),
ADD_TEST(min_max_compute_units),
ADD_TEST(min_max_address_bits),
ADD_TEST(min_max_single_fp_config),
ADD_TEST(min_max_double_fp_config),
ADD_TEST(min_max_local_mem_size),
ADD_TEST(min_max_kernel_preferred_work_group_size_multiple),
ADD_TEST(min_max_execution_capabilities),
ADD_TEST(min_max_queue_properties),
ADD_TEST(min_max_device_version),
ADD_TEST(min_max_language_version),
ADD_TEST_SEQUENTIAL(min_max_thread_dimensions),
ADD_TEST_SEQUENTIAL(min_max_work_items_sizes),
ADD_TEST_SEQUENTIAL(min_max_work_group_size),
ADD_TEST_SEQUENTIAL(min_max_read_image_args),
ADD_TEST_SEQUENTIAL(min_max_write_image_args),
ADD_TEST_SEQUENTIAL(min_max_mem_alloc_size),
ADD_TEST_SEQUENTIAL(min_max_image_2d_width),
ADD_TEST_SEQUENTIAL(min_max_image_2d_height),
ADD_TEST_SEQUENTIAL(min_max_image_3d_width),
ADD_TEST_SEQUENTIAL(min_max_image_3d_height),
ADD_TEST_SEQUENTIAL(min_max_image_3d_depth),
ADD_TEST_SEQUENTIAL(min_max_image_array_size),
ADD_TEST_SEQUENTIAL(min_max_image_buffer_size),
ADD_TEST_SEQUENTIAL(min_max_parameter_size),
ADD_TEST_SEQUENTIAL(min_max_samplers),
ADD_TEST_SEQUENTIAL(min_max_constant_buffer_size),
ADD_TEST_SEQUENTIAL(min_max_constant_args),
ADD_TEST_SEQUENTIAL(min_max_compute_units),
ADD_TEST_SEQUENTIAL(min_max_address_bits),
ADD_TEST_SEQUENTIAL(min_max_single_fp_config),
ADD_TEST_SEQUENTIAL(min_max_double_fp_config),
ADD_TEST_SEQUENTIAL(min_max_local_mem_size),
ADD_TEST_SEQUENTIAL(min_max_kernel_preferred_work_group_size_multiple),
ADD_TEST_SEQUENTIAL(min_max_execution_capabilities),
ADD_TEST_SEQUENTIAL(min_max_queue_properties),
ADD_TEST_SEQUENTIAL(min_max_device_version),
ADD_TEST_SEQUENTIAL(min_max_language_version),

ADD_TEST(kernel_arg_changes),
ADD_TEST(kernel_arg_multi_setup_random),
Expand Down
14 changes: 7 additions & 7 deletions test_conformance/multiple_device_context/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,15 +26,15 @@
#endif

test_definition test_list[] = {
ADD_TEST( context_multiple_contexts_same_device ),
ADD_TEST( context_two_contexts_same_device ),
ADD_TEST( context_three_contexts_same_device ),
ADD_TEST( context_four_contexts_same_device ),
ADD_TEST_SEQUENTIAL(context_multiple_contexts_same_device),
ADD_TEST_SEQUENTIAL(context_two_contexts_same_device),
ADD_TEST_SEQUENTIAL(context_three_contexts_same_device),
ADD_TEST_SEQUENTIAL(context_four_contexts_same_device),

ADD_TEST( two_devices ),
ADD_TEST( max_devices ),
ADD_TEST_SEQUENTIAL(two_devices),
ADD_TEST_SEQUENTIAL(max_devices),

ADD_TEST( hundred_queues ),
ADD_TEST_SEQUENTIAL(hundred_queues),
};

const int test_num = ARRAY_SIZE( test_list );
Expand Down
143 changes: 81 additions & 62 deletions test_conformance/run_conformance.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,12 +10,16 @@

from __future__ import print_function

import multiprocessing
import os
import re
import sys
import subprocess
import time
import tempfile
from itertools import repeat

from distlib.compat import raw_input

DEBUG = 0

Expand Down Expand Up @@ -206,75 +210,90 @@ def run_test_checking_output(current_directory, test_dir, log_file):
return p.returncode


def run_specific_test(test_iter, arg_list):
failures, previous_test, test_number = arg_list
# Print the name of the test we're running and the time
(test_name, test_dir) = test_iter
if test_dir != previous_test:
print("========== " + test_dir)
log_file.write("========================================================================================\n")
log_file.write("========================================================================================\n")
log_file.write("(" + get_time() + ") Running Tests: " + test_dir + "\n")
log_file.write("========================================================================================\n")
log_file.write("========================================================================================\n")
previous_test = test_dir
print("(" + get_time() + ") BEGIN " + test_name.ljust(40) + ": ", end='')
log_file.write(" ----------------------------------------------------------------------------------------\n")
log_file.write(" (" + get_time() + ") Running Sub Test: " + test_name + "\n")
log_file.write(" ----------------------------------------------------------------------------------------\n")
log_file.flush()
sys.stdout.flush()

# Run the test
result = 0
start_time = time.time()
try:
process_pid = 0
result = run_test_checking_output(current_directory, test_dir, log_file)
except KeyboardInterrupt:
# Catch an interrupt from the user
write_screen_log("\nFAILED: Execution interrupted. Killing test process, but not aborting full test run.")
os.kill(process_pid, 9)
if sys.version_info[0] < 3:
answer = raw_input("Abort all tests? (y/n)")
else:
answer = input("Abort all tests? (y/n)")
if answer.find("y") != -1:
write_screen_log("\nUser chose to abort all tests.")
log_file.close()
sys.exit(-1)
else:
write_screen_log("\nUser chose to continue with other tests. Reporting this test as failed.")
result = 1
run_time = (time.time() - start_time)

# Move print the finish status
if result == 0:
print("(" + get_time() + ") PASSED " + test_name.ljust(40) + ": (" + str(int(run_time)).rjust(3) + "s, test " + str(test_number).rjust(3) + os.sep + str(len(tests)) + ")", end='')
else:
print("(" + get_time() + ") FAILED " + test_name.ljust(40) + ": (" + str(int(run_time)).rjust(3) + "s, test " + str(test_number).rjust(3) + os.sep + str(len(tests)) + ")", end='')

test_number = test_number + 1
log_file.write(" ----------------------------------------------------------------------------------------\n")
log_file.flush()

print("")
if result != 0:
log_file.write(" *******************************************************************************************\n")
log_file.write(" * (" + get_time() + ") Test " + test_name + " ==> FAILED: " + str(result) + "\n")
log_file.write(" *******************************************************************************************\n")
failures = failures + 1
else:
log_file.write(" (" + get_time() + ") Test " + test_name + " passed in " + str(run_time) + "s\n")

log_file.write(" ----------------------------------------------------------------------------------------\n")
log_file.write("\n")
return failures, previous_test, test_number

def run_tests(tests):
global curent_directory
global process_pid
# Run the tests
failures = 0
previous_test = None
test_number = 1
for test in tests:
# Print the name of the test we're running and the time
(test_name, test_dir) = test
if test_dir != previous_test:
print("========== " + test_dir)
log_file.write("========================================================================================\n")
log_file.write("========================================================================================\n")
log_file.write("(" + get_time() + ") Running Tests: " + test_dir + "\n")
log_file.write("========================================================================================\n")
log_file.write("========================================================================================\n")
previous_test = test_dir
print("(" + get_time() + ") BEGIN " + test_name.ljust(40) + ": ", end='')
log_file.write(" ----------------------------------------------------------------------------------------\n")
log_file.write(" (" + get_time() + ") Running Sub Test: " + test_name + "\n")
log_file.write(" ----------------------------------------------------------------------------------------\n")
log_file.flush()
sys.stdout.flush()

# Run the test
result = 0
start_time = time.time()
try:
process_pid = 0
result = run_test_checking_output(current_directory, test_dir, log_file)
except KeyboardInterrupt:
# Catch an interrupt from the user
write_screen_log("\nFAILED: Execution interrupted. Killing test process, but not aborting full test run.")
os.kill(process_pid, 9)
if sys.version_info[0] < 3:
answer = raw_input("Abort all tests? (y/n)")
else:
answer = input("Abort all tests? (y/n)")
if answer.find("y") != -1:
write_screen_log("\nUser chose to abort all tests.")
log_file.close()
sys.exit(-1)
else:
write_screen_log("\nUser chose to continue with other tests. Reporting this test as failed.")
result = 1
run_time = (time.time() - start_time)

# Move print the finish status
if result == 0:
print("(" + get_time() + ") PASSED " + test_name.ljust(40) + ": (" + str(int(run_time)).rjust(3) + "s, test " + str(test_number).rjust(3) + os.sep + str(len(tests)) + ")", end='')
else:
print("(" + get_time() + ") FAILED " + test_name.ljust(40) + ": (" + str(int(run_time)).rjust(3) + "s, test " + str(test_number).rjust(3) + os.sep + str(len(tests)) + ")", end='')

test_number = test_number + 1
log_file.write(" ----------------------------------------------------------------------------------------\n")
log_file.flush()

print("")
if result != 0:
log_file.write(" *******************************************************************************************\n")
log_file.write(" * (" + get_time() + ") Test " + test_name + " ==> FAILED: " + str(result) + "\n")
log_file.write(" *******************************************************************************************\n")
failures = failures + 1
else:
log_file.write(" (" + get_time() + ") Test " + test_name + " passed in " + str(run_time) + "s\n")

log_file.write(" ----------------------------------------------------------------------------------------\n")
log_file.write("\n")
run_in_parallel = False
# Open the log file
for arg in sys.argv:
match = re.search("RUN_PARALLEL", arg)
if match:
run_in_parallel = True
if run_in_parallel:
pool = multiprocessing.Pool(4)
failures, previous_test, test_number = pool.starmap(run_specific_test, zip(tests, repeat([failures, previous_test, test_number])))
else:
for test_itr in tests:
failures, previous_test, test_number = run_specific_test(test_itr, [failures, previous_test, test_number])
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

From the code, to run in parallel, the word "RUN_PARALLEL" is expected in the arg list. But this causes errors right at the beginning because it is not ignored like other args in lines 337-340 (in changed file).
So the following should be added right after:

if re.search("RUN_PARALLEL", arg):
continue

Second, after the above is fixed, it still can't run (at least in windows). Following errors are seen: "An attempt has been made to start a new process before the current process has finished its bootstrapping phase."
This can be fixed by wrapping the calling of the main execution function (run_tests) and maybe surrounding code in a "main" function and then calling it as below:

if name=="main":
main()

After this, the tests run to completion (for a small subset of tests (6)) but "ValueError: too many values to unpack" is seen at the end.

The test specific stdout is also all mixed up due to tests running in parallel.

The help message should also be updated to mention the new "RUN_PARALLEL" argument

Looks like change needs some re-work before it can be reviewed again.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks for the review, working on a fix now. I've been told that python test harness is not the most up to date method of running the tests. I am looking to abandon this in favor of fixing it in the proper place.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can we remove the focused review tag on this one if we are planning a different fix?

return failures


Expand Down
Loading