diff --git a/onnxruntime/test/providers/cpu/math/gemm_test.cc b/onnxruntime/test/providers/cpu/math/gemm_test.cc index 8d0018c6e8b7b..60a8dfca658d4 100644 --- a/onnxruntime/test/providers/cpu/math/gemm_test.cc +++ b/onnxruntime/test/providers/cpu/math/gemm_test.cc @@ -2,13 +2,28 @@ // Licensed under the MIT License. #include "gtest/gtest.h" -#include "test/providers/provider_test_utils.h" + +#include "core/framework/run_options.h" #include "test/common/cuda_op_test_utils.h" +#include "test/providers/provider_test_utils.h" +#include "test/providers/run_options_config_keys.h" #include "test/util/include/default_providers.h" namespace onnxruntime { namespace test { +namespace { + +const onnxruntime::RunOptions run_options = []() { + onnxruntime::RunOptions options{}; + ORT_THROW_IF_ERROR(options.config_options.AddConfigEntry(kOpTesterRunOptionsConfigTestTunableOp, "true")); + return options; +}(); + +const constexpr auto run_with_tunable_op = &run_options; + +} // namespace + template void TestGemmNoTrans() { auto run_test = [](bool b_is_initializer, bool c_is_initializer = false) { @@ -27,7 +42,8 @@ void TestGemmNoTrans() { test.AddOutput("Y", {2, 3}, {11.0f, 11.0f, 11.0f, -9.0f, -9.0f, -9.0f}); - test.Run(); + test.Config(run_with_tunable_op) + .RunWithConfig(); }; run_test(false, false); @@ -82,7 +98,9 @@ TEST(GemmOpTest, GemmNoTrans_f16) { test.AddInput("B", {4, 3}, f_B); test.AddInput("C", {2, 3}, f_C); test.AddOutput("Y", {2, 3}, f_Y); - test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); //TensorRT: fp16 is not supported + test.ConfigExcludeEps({kTensorrtExecutionProvider}) // TensorRT: fp16 is not supported + .Config(run_with_tunable_op) + .RunWithConfig(); } #endif @@ -105,12 +123,19 @@ TEST(GemmOpTest, GemmNoTrans_bfloat16) { test.AddInput("C", {2, 3}, MakeBFloat16({1.f, 1.f, 1.f, 1.f, 1.f, 1.f})); test.AddOutput("Y", {2, 3}, MakeBFloat16({11.0f, 11.0f, 11.0f, -9.0f, -9.0f, -9.0f})); std::vector> execution_providers; + test.Config(run_with_tunable_op); #ifdef USE_CUDA - execution_providers.push_back(DefaultCudaExecutionProvider()); + execution_providers.emplace_back(DefaultCudaExecutionProvider()); #elif USE_ROCM - execution_providers.push_back(DefaultRocmExecutionProvider()); -#endif - test.Run(OpTester::ExpectResult::kExpectSuccess, "", {}, nullptr, &execution_providers); + execution_providers.emplace_back(DefaultRocmExecutionProvider(/*test_tunable_op=*/true)); + test.ConfigEps(std::move(execution_providers)) + .RunWithConfig(); + + execution_providers.clear(); + execution_providers.emplace_back(DefaultRocmExecutionProvider(/*test_tunable_op=*/false)); +#endif + test.ConfigEps(std::move(execution_providers)) + .RunWithConfig(); } #endif @@ -133,10 +158,10 @@ void TestGemmBroadcast() { {11.0f, 12.0f, 13.0f, -9.0f, -8.0f, -7.0f}); #if defined(OPENVINO_CONFIG_GPU_FP16) || defined(OPENVINO_CONFIG_GPU_FP32) - test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kOpenVINOExecutionProvider}); // OpenVINO : Temporarily disabled due to accuracy issues -#else - test.Run(); + test.ConfigExcludeEps({kOpenVINOExecutionProvider}); // OpenVINO: Temporarily disabled due to accuracy issues #endif + test.Config(run_with_tunable_op) + .RunWithConfig(); }; run_test(false, false); @@ -171,10 +196,10 @@ static void TestGemmTrans() { {11.0f, 11.0f, 11.0f, -9.0f, -9.0f, -9.0f}); #if defined(OPENVINO_CONFIG_GPU_FP16) || defined(OPENVINO_CONFIG_GPU_FP32) - test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kOpenVINOExecutionProvider}); // OpenVINO: Temporarily disabled due to accuracy issues -#else - test.Run(); + test.ConfigExcludeEps({kOpenVINOExecutionProvider}); // OpenVINO: Temporarily disabled due to accuracy issues #endif + test.Config(run_with_tunable_op) + .RunWithConfig(); } TEST(GemmOpTest, GemmTrans) { @@ -203,10 +228,10 @@ static void TestGemmTransB() { {11.0f, 11.0f, 11.0f, -9.0f, -9.0f, -9.0f}); #if defined(OPENVINO_CONFIG_GPU_FP16) || defined(OPENVINO_CONFIG_GPU_FP32) - test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kOpenVINOExecutionProvider}); // OpenVINO: Temporarily disabled due to accuracy issues -#else - test.Run(); + test.ConfigExcludeEps({kOpenVINOExecutionProvider}); // OpenVINO: Temporarily disabled due to accuracy issues #endif + test.Config(run_with_tunable_op) + .RunWithConfig(); }; run_test(false, false); // CoreML EP requires weight and bias both to be initializers @@ -239,10 +264,10 @@ static void TestGemmTransB_1() { {11.0f, 11.0f, 11.0f, -9.0f, -9.0f, -9.0f}); #if defined(OPENVINO_CONFIG_GPU_FP16) || defined(OPENVINO_CONFIG_GPU_FP32) - test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kOpenVINOExecutionProvider}); // OpenVINO: Temporarily disabled due to accuracy issues -#else - test.Run(); + test.ConfigExcludeEps({kOpenVINOExecutionProvider}); // OpenVINO: Temporarily disabled due to accuracy issues #endif + test.Config(run_with_tunable_op) + .RunWithConfig(); }; run_test(false, false); // CoreML EP requires weight and bias both to be initializers @@ -271,14 +296,16 @@ void TestGemmAlpha() { test.AddOutput("Y", {2, 3}, {6.0f, 6.0f, 6.0f, -4.0f, -4.0f, -4.0f}); - //test.AddOutput("Y", {2, 3}, - // {5.0f, 5.0f, 5.0f, - // -5.0f, -5.0f, -5.0f}); + // test.AddOutput("Y", {2, 3}, + // {5.0f, 5.0f, 5.0f, + // -5.0f, -5.0f, -5.0f}); #if defined(OPENVINO_CONFIG_GPU_FP16) || defined(OPENVINO_CONFIG_GPU_FP32) - test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider, kOpenVINOExecutionProvider}); // OpenVINO: Temporarily disabled due to accuracy issues + test.ConfigExcludeEps({kOpenVINOExecutionProvider}); // OpenVINO: Temporarily disabled due to accuracy issues #else - test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); //TensorRT: Seg fault in parser + test.ConfigExcludeEps({kTensorrtExecutionProvider}); // TensorRT: Seg fault in parser #endif + test.Config(run_with_tunable_op) + .RunWithConfig(); } TEST(GemmOpTest, GemmAlpha) { @@ -304,10 +331,12 @@ void TestGemmBeta() { {12.0f, 12.0f, 12.0f, -8.0f, -8.0f, -8.0f}); #if defined(OPENVINO_CONFIG_GPU_FP16) || defined(OPENVINO_CONFIG_GPU_FP32) - test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider, kOpenVINOExecutionProvider}); // OpenVINO: Temporarily disabled due to accuracy issues + test.ConfigExcludeEps({kOpenVINOExecutionProvider}); // OpenVINO: Temporarily disabled due to accuracy issues #else - test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); //TensorRT: Seg fault in parser + test.ConfigExcludeEps({kTensorrtExecutionProvider}); // TensorRT: Seg fault in parser #endif + test.Config(run_with_tunable_op) + .RunWithConfig(); } TEST(GemmOpTest, GemmBeta) { @@ -333,10 +362,12 @@ void TestGemmAlphaBeta() { {7.0f, 7.0f, 7.0f, -3.0f, -3.0f, -3.0f}); #if defined(OPENVINO_CONFIG_GPU_FP16) || defined(OPENVINO_CONFIG_GPU_FP32) - test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider, kOpenVINOExecutionProvider}); // OpenVINO: Temporarily disabled due to accuracy issues + test.ConfigExcludeEps({kOpenVINOExecutionProvider}); // OpenVINO: Temporarily disabled due to accuracy issues #else - test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); //TensorRT: Seg fault in parser + test.ConfigExcludeEps({kTensorrtExecutionProvider}); // TensorRT: Seg fault in parser #endif + test.Config(run_with_tunable_op) + .RunWithConfig(); } TEST(GemmOpTest, GemmAlphaBeta) { @@ -361,7 +392,11 @@ void TestGemmNaN() { test.AddOutput("Y", {2, 3}, {10.0f, 10.0f, 10.0f, -10.0f, -10.0f, -10.0f}); - test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); //TensorRT: Seg fault in parser + + // TensorRT: Seg fault in parser + test.ConfigExcludeEps({kTensorrtExecutionProvider}) + .Config(run_with_tunable_op) + .RunWithConfig(); } TEST(GemmOpTest, GemmNaN) { @@ -386,7 +421,8 @@ void TestGemmScalarBroadcast() { test.AddOutput("Y", {2, 3}, {11.0f, 11.0f, 11.0f, -9.0f, -9.0f, -9.0f}); - test.Run(); + test.Config(run_with_tunable_op) + .RunWithConfig(); } TEST(GemmOpTest, GemmScalarBroadcast) { @@ -411,7 +447,8 @@ void TestGemm2DBroadcast_1() { test.AddOutput("Y", {2, 3}, {11.0f, 11.0f, 11.0f, -8.0f, -8.0f, -8.0f}); - test.Run(); + test.Config(run_with_tunable_op) + .RunWithConfig(); } TEST(GemmOpTest, Gemm2DBroadcast_1) { @@ -437,7 +474,8 @@ void TestGemm2DBroadcast_2() { test.AddOutput("Y", {2, 3}, {11.0f, 12.0f, 13.0f, -9.0f, -8.0f, -7.0f}); - test.Run(); + test.Config(run_with_tunable_op) + .RunWithConfig(); } TEST(GemmOpTest, Gemm2DBroadcast_2) { @@ -462,7 +500,8 @@ void TestGemmFalseBroadcast() { test.AddOutput("Y", {2, 3}, {11.0f, 11.0f, 11.0f, -8.0f, -8.0f, -8.0f}); - test.Run(); + test.Config(run_with_tunable_op) + .RunWithConfig(); } TEST(GemmOpTest, GemmFalseBroadcast) { @@ -485,7 +524,10 @@ void TestGemmEmptyTensor() { test.AddInput("C", {3}, std::vector(3, 1.0f)); test.AddOutput("Y", {0, 3}, {}); - test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider, kDnnlExecutionProvider}); //TensorRT: doesn't support dynamic shape yet + // TensorRT: doesn't support dynamic shape yet + test.ConfigExcludeEps({kTensorrtExecutionProvider, kDnnlExecutionProvider}) + .Config(run_with_tunable_op) + .RunWithConfig(); } TEST(GemmOpTest, GemmEmptyTensor) { @@ -510,7 +552,9 @@ static void TestGemmNoBiasOpset11() { {10.0f, 10.0f, 10.0f, -10.0f, -10.0f, -10.0f}); // tensorRT don't seem to support missing bias - test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); + test.ConfigExcludeEps({kTensorrtExecutionProvider}) + .Config(run_with_tunable_op) + .RunWithConfig(); } TEST(GemmOpTest, GemmNoBiasOpset11) { @@ -530,7 +574,9 @@ static void TestGemmWithAlphaOpset11() { test.AddOutput("Y", {2, 2}, {6.0f, 6.0f, 14.0f, 14.0f}); // tensorRT don't seem to support missing bias - test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); + test.ConfigExcludeEps({kTensorrtExecutionProvider}) + .Config(run_with_tunable_op) + .RunWithConfig(); } TEST(GemmOpTest, GemmWithAlphaOpset11) { @@ -583,9 +629,10 @@ TEST(GemmOpTest, SharedPrepackedWeights) { // Session 1 { - auto ep_vec = cpu_ep(); - test.Run(so, OpTester::ExpectResult::kExpectSuccess, "", {}, nullptr, - &ep_vec, {}, &number_of_pre_packed_weights_counter_session_1, &number_of_shared_pre_packed_weights_counter); + test.Config(so) + .ConfigEps(cpu_ep()) + .Config(run_with_tunable_op) + .RunWithConfig(&number_of_pre_packed_weights_counter_session_1, &number_of_shared_pre_packed_weights_counter); // Assert that no pre-packed weights have been shared thus far ASSERT_EQ(number_of_shared_pre_packed_weights_counter, static_cast(0)); } @@ -605,9 +652,10 @@ TEST(GemmOpTest, SharedPrepackedWeights) { // Session 2 { size_t number_of_pre_packed_weights_counter_session_2 = 0; - auto ep_vec = cpu_ep(); - test.Run(so, OpTester::ExpectResult::kExpectSuccess, "", {}, nullptr, - &ep_vec, {}, &number_of_pre_packed_weights_counter_session_2, &number_of_shared_pre_packed_weights_counter); + test.Config(so) + .ConfigEps(cpu_ep()) + .Config(run_with_tunable_op) + .RunWithConfig(&number_of_pre_packed_weights_counter_session_2, &number_of_shared_pre_packed_weights_counter); // Assert that the same number of weights were pre-packed in both sessions ASSERT_EQ(number_of_pre_packed_weights_counter_session_1, number_of_pre_packed_weights_counter_session_2); diff --git a/onnxruntime/test/providers/cpu/math/matmul_test.cc b/onnxruntime/test/providers/cpu/math/matmul_test.cc index 31f32f932de21..334a1ca983475 100644 --- a/onnxruntime/test/providers/cpu/math/matmul_test.cc +++ b/onnxruntime/test/providers/cpu/math/matmul_test.cc @@ -3,6 +3,7 @@ #include "gtest/gtest.h" #include "test/providers/provider_test_utils.h" +#include "test/providers/run_options_config_keys.h" #include "test/common/cuda_op_test_utils.h" #include "test/common/tensor_op_test_utils.h" #include "default_providers.h" @@ -10,6 +11,18 @@ namespace onnxruntime { namespace test { +namespace { + +const onnxruntime::RunOptions run_options = []() { + onnxruntime::RunOptions options{}; + ORT_THROW_IF_ERROR(options.config_options.AddConfigEntry(kOpTesterRunOptionsConfigTestTunableOp, "true")); + return options; +}(); + +const constexpr auto run_with_tunable_op = &run_options; + +} // namespace + template struct MatMulTestData { std::string name; @@ -154,7 +167,9 @@ void RunMatMulTest(int32_t opset_version, bool is_a_constant, bool is_b_constant // NNAPI: currently fails for the "test 2D empty input" case excluded_providers.insert(kNnapiExecutionProvider); } - test.Run(OpTester::ExpectResult::kExpectSuccess, "", excluded_providers); + test.ConfigExcludeEps(excluded_providers) + .Config(run_with_tunable_op) + .RunWithConfig(); } } @@ -218,7 +233,9 @@ TEST(MathOpTest, MatMul_Float16) { test.AddInput("A", {2, 4}, f_A); test.AddInput("B", {4, 3}, f_B); test.AddOutput("Y", {2, 3}, f_Y); - test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kTensorrtExecutionProvider}); // TensorRT: fp16 is not supported + test.ConfigExcludeEps({kTensorrtExecutionProvider}) // TensorRT: fp16 is not supported + .Config(run_with_tunable_op) + .RunWithConfig(); } #endif @@ -237,12 +254,19 @@ TEST(MathOpTest, MatMul_BFloat16) { test.AddInput("B", {4, 3}, MakeBFloat16({1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f})); test.AddOutput("Y", {2, 3}, MakeBFloat16({10.0f, 10.0f, 10.0f, -10.0f, -10.0f, -10.0f})); std::vector> execution_providers; + test.Config(run_with_tunable_op); #ifdef USE_CUDA - execution_providers.push_back(DefaultCudaExecutionProvider()); + execution_providers.emplace_back(DefaultCudaExecutionProvider()); #elif USE_ROCM - execution_providers.push_back(DefaultRocmExecutionProvider()); + execution_providers.emplace_back(DefaultRocmExecutionProvider(/*test_tunable_op=*/true)); + test.ConfigEps(std::move(execution_providers)) + .RunWithConfig(); + + execution_providers.clear(); + execution_providers.emplace_back(DefaultRocmExecutionProvider(/*test_tunable_op=*/false)); #endif - test.Run(OpTester::ExpectResult::kExpectSuccess, "", {}, nullptr, &execution_providers); + test.ConfigEps(std::move(execution_providers)) + .RunWithConfig(); } #endif @@ -285,9 +309,10 @@ TEST(MathOpTest, MatMulSharedPrepackedWeights) { // Session 1 { - auto ep_vec = cpu_ep(); - test.Run(so, OpTester::ExpectResult::kExpectSuccess, "", {}, nullptr, - &ep_vec, {}, &number_of_pre_packed_weights_counter_session_1, &number_of_shared_pre_packed_weights_counter); + test.Config(so) + .Config(run_with_tunable_op) + .ConfigEps(cpu_ep()) + .RunWithConfig(&number_of_pre_packed_weights_counter_session_1, &number_of_shared_pre_packed_weights_counter); // Assert that no pre-packed weights have been shared thus far ASSERT_EQ(number_of_shared_pre_packed_weights_counter, static_cast(0)); } @@ -307,9 +332,10 @@ TEST(MathOpTest, MatMulSharedPrepackedWeights) { // Session 2 { size_t number_of_pre_packed_weights_counter_session_2 = 0; - auto ep_vec = cpu_ep(); - test.Run(so, OpTester::ExpectResult::kExpectSuccess, "", {}, nullptr, - &ep_vec, {}, &number_of_pre_packed_weights_counter_session_2, &number_of_shared_pre_packed_weights_counter); + test.Config(so) + .Config(run_with_tunable_op) + .ConfigEps(cpu_ep()) + .RunWithConfig(&number_of_pre_packed_weights_counter_session_2, &number_of_shared_pre_packed_weights_counter); // Assert that the same number of weights were pre-packed in both sessions ASSERT_EQ(number_of_pre_packed_weights_counter_session_1, number_of_pre_packed_weights_counter_session_2); diff --git a/onnxruntime/test/providers/provider_test_utils.cc b/onnxruntime/test/providers/provider_test_utils.cc index 8d2cf16a6bdba..9a9e0218f4b1d 100644 --- a/onnxruntime/test/providers/provider_test_utils.cc +++ b/onnxruntime/test/providers/provider_test_utils.cc @@ -12,6 +12,7 @@ #include "core/graph/model_load_utils.h" #include "gmock/gmock.h" #include "test/providers/provider_test_utils.h" +#include "test/providers/run_options_config_keys.h" #include "test/util/include/default_providers.h" #include "test/framework/test_utils.h" #include @@ -1006,6 +1007,39 @@ bool SetEpsForAllNodes( return true; } +OpTester& OpTester::Config(const SessionOptions& sess_options) { + ctx_.session_options = sess_options; + return *this; +} + +OpTester& OpTester::Config(ExpectResult expect_result, const std::string& expected_failure_string) { + ctx_.expect_result = expect_result; + ctx_.expected_failure_string = expected_failure_string; + return *this; +} + +OpTester& OpTester::ConfigExcludeEps(const std::unordered_set& excluded_provider_types) { + ctx_.excluded_provider_types = excluded_provider_types; + return *this; +} + +OpTester& OpTester::Config(const RunOptions* run_options) { + ctx_.run_options = run_options; + return *this; +} + +OpTester& OpTester::ConfigEps(std::vector>&& execution_providers) { + ORT_ENFORCE(execution_providers.size() > 0); + ctx_.run_with_specified_eps = true; + ctx_.execution_providers = std::move(execution_providers); + return *this; +} + +OpTester& OpTester::Config(const Graph::ResolveOptions& resolve_options) { + ctx_.resolve_options = resolve_options; + return *this; +} + void OpTester::Run( ExpectResult expect_result, const std::string& expected_failure_string, const std::unordered_set& excluded_provider_types, @@ -1040,6 +1074,34 @@ void OpTester::Run( const Graph::ResolveOptions& options, /*out*/ size_t* number_of_pre_packed_weights_counter, /*out*/ size_t* number_of_shared_pre_packed_weights_counter) { + if (execution_providers == nullptr) { + ctx_.run_with_specified_eps = false; + ctx_.execution_providers.clear(); + } else { + this->ConfigEps(std::move(*execution_providers)); + // NOTE: some callsites do the following: + // + // std::vector> execution_providers; + // execution_providers.push_back(DefaultCPUExecutionProvider()); + // test.run(..., &execution_providers, ...); + // execution_providers[0] = DefaultCUDAExecutionProvider(); // <-- std::move cause segfault here. + // test.run(..., &execution_providers, ...); + // + // So we need to restore the old vector's size. + execution_providers->resize(ctx_.execution_providers.size()); + } + + (*this) + .Config(so) + .Config(expect_result, expected_failure_string) + .Config(run_options) + .ConfigExcludeEps(excluded_provider_types) + .Config(options) + .RunWithConfig(number_of_pre_packed_weights_counter, number_of_shared_pre_packed_weights_counter); +} + +void OpTester::RunWithConfig(size_t* number_of_pre_packed_weights_counter, + size_t* number_of_shared_pre_packed_weights_counter) { std::string cur_provider = "not set"; ORT_TRY { #ifndef NDEBUG @@ -1068,7 +1130,7 @@ void OpTester::Run( fetches_.clear(); bool cache_enabled = cached_model_ != nullptr; - const bool strict_shape_type_inference = so.config_options.GetConfigOrDefault( + const bool strict_shape_type_inference = ctx_.session_options.config_options.GetConfigOrDefault( kOrtSessionOptionsConfigStrictShapeTypeInference, "1") == "1"; const ModelOptions model_options(allow_released_onnx_opset_only, strict_shape_type_inference); @@ -1078,10 +1140,10 @@ void OpTester::Run( Status status = Status::OK(); if (!cache_enabled) { if (add_shape_to_tensor_data_ && - expect_result == ExpectResult::kExpectFailure) { + ctx_.expect_result == ExpectResult::kExpectFailure) { // capture possible exceptions from shape inference for invalid testcase ORT_TRY { - status = graph.Resolve(options); + status = graph.Resolve(ctx_.resolve_options); } ORT_CATCH(const std::exception& ex) { ORT_HANDLE_EXCEPTION([&]() { @@ -1089,14 +1151,14 @@ void OpTester::Run( }); } } else { - status = graph.Resolve(options); + status = graph.Resolve(ctx_.resolve_options); } if (!status.IsOK()) { - if (expect_result == ExpectResult::kExpectFailure) { + if (ctx_.expect_result == ExpectResult::kExpectFailure) { EXPECT_TRUE(!status.IsOK()); EXPECT_THAT(status.ErrorMessage(), - testing::HasSubstr(expected_failure_string)); + testing::HasSubstr(ctx_.expected_failure_string)); } else { LOGS_DEFAULT(ERROR) << "Resolve failed with status: " << status.ErrorMessage(); @@ -1115,11 +1177,11 @@ void OpTester::Run( FillFeedsAndOutputNames(feeds, output_names); // Run the model - if (execution_providers) { + if (ctx_.run_with_specified_eps) { ExecuteModelForEps( - std::move(*execution_providers), *p_model, so, - expect_result, expected_failure_string, - run_options, feeds, output_names, + std::move(ctx_.execution_providers), *p_model, ctx_.session_options, + ctx_.expect_result, ctx_.expected_failure_string, + ctx_.run_options, feeds, output_names, /*custom_registries=*/nullptr, /*assign_ep_for_nodes=*/false, allow_released_onnx_opset_only, @@ -1152,7 +1214,7 @@ void OpTester::Run( bool has_run = false; for (const std::string& provider_type : all_provider_types) { - if (excluded_provider_types.count(provider_type) > 0) + if (ctx_.excluded_provider_types.count(provider_type) > 0) continue; cur_provider = provider_type; @@ -1195,15 +1257,36 @@ void OpTester::Run( ret.emplace_back(std::move(execution_provider)); return ret; }(), - *p_model, so, - expect_result, expected_failure_string, - run_options, feeds, output_names, + *p_model, ctx_.session_options, + ctx_.expect_result, ctx_.expected_failure_string, + ctx_.run_options, feeds, output_names, &custom_session_registries_, /*try_assign_ep_for_nodes=*/true, allow_released_onnx_opset_only, number_of_pre_packed_weights_counter, number_of_shared_pre_packed_weights_counter); + // Run Models with subscribed run_options->config_options + if (ctx_.run_options != nullptr && + ctx_.run_options->config_options.GetConfigEntry(kOpTesterRunOptionsConfigTestTunableOp) == "true") { + std::vector> execution_providers; + if (provider_type == onnxruntime::kRocmExecutionProvider) { + execution_providers.emplace_back(DefaultRocmExecutionProvider(/*test_tunable_op=*/true)); + } + + if (!execution_providers.empty()) { + ExecuteModelForEps( + std::move(execution_providers), *p_model, ctx_.session_options, + ctx_.expect_result, ctx_.expected_failure_string, + ctx_.run_options, feeds, output_names, + &custom_session_registries_, + /*assign_ep_for_nodes=*/true, + allow_released_onnx_opset_only, + number_of_pre_packed_weights_counter, + number_of_shared_pre_packed_weights_counter); + } + } + has_run = true; cur_provider = "not set"; } diff --git a/onnxruntime/test/providers/provider_test_utils.h b/onnxruntime/test/providers/provider_test_utils.h index 8391a5389ccc9..2a3eb621aa6ed 100644 --- a/onnxruntime/test/providers/provider_test_utils.h +++ b/onnxruntime/test/providers/provider_test_utils.h @@ -727,6 +727,17 @@ class OpTester { enum class ExpectResult { kExpectSuccess, kExpectFailure }; + OpTester& Config(const SessionOptions& sess_options); + OpTester& Config(ExpectResult expect_result, const std::string& expected_failure_string); + OpTester& ConfigExcludeEps(const std::unordered_set& excluded_provider_types); + OpTester& Config(const RunOptions* run_options); + OpTester& ConfigEps(std::vector>&& execution_providers); + OpTester& Config(const Graph::ResolveOptions& resolve_options); + + void RunWithConfig(size_t* number_of_pre_packed_weights_counter = nullptr, + size_t* number_of_shared_pre_packed_weights_counter = nullptr); + + // [[deprecated("Use builder pattern Config* and RunWithConfig")]] void Run(ExpectResult expect_result = ExpectResult::kExpectSuccess, const std::string& expected_failure_string = "", const std::unordered_set& excluded_provider_types = {}, const RunOptions* run_options = nullptr, @@ -734,6 +745,7 @@ class OpTester { ExecutionMode execution_mode = ExecutionMode::ORT_SEQUENTIAL, const Graph::ResolveOptions& resolve_options = {}); + // [[deprecated("Use builder pattern Config* and RunWithConfig")]] void Run(SessionOptions session_options, ExpectResult expect_result = ExpectResult::kExpectSuccess, const std::string& expected_failure_string = "", @@ -834,6 +846,19 @@ class OpTester { const std::string& provider_type, bool allow_released_onnx_opset_only = true); + struct RunContext { + SessionOptions session_options{}; + ExpectResult expect_result{ExpectResult::kExpectSuccess}; + std::string expected_failure_string{}; + std::unordered_set excluded_provider_types = {}; + const RunOptions* run_options{}; + bool run_with_specified_eps{false}; + std::vector> execution_providers{}; + Graph::ResolveOptions resolve_options{}; + }; + + RunContext ctx_{}; + const char* op_; std::vector input_data_; std::vector output_data_; diff --git a/onnxruntime/test/providers/run_options_config_keys.h b/onnxruntime/test/providers/run_options_config_keys.h new file mode 100644 index 0000000000000..56afd5ff353c9 --- /dev/null +++ b/onnxruntime/test/providers/run_options_config_keys.h @@ -0,0 +1,13 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#pragma once + +// Extends config_key for testing purpose only, see following files for more information: +// - include/onnxruntime/core/session/onnxruntime_run_options_config_keys.h +// - include/onnxruntime/core/framework/run_options.h +// - onnxruntime/core/framework/config_options.h + +// Key for enabling OpTester for additionally test an OpKernel with EP config to enable TunableOp. Valid values are +// "true" or "false" +static const char* const kOpTesterRunOptionsConfigTestTunableOp = "op_tester.is_tunable_op_under_test"; diff --git a/onnxruntime/test/util/default_providers.cc b/onnxruntime/test/util/default_providers.cc index 268e9c3faf52d..2db354d97167e 100644 --- a/onnxruntime/test/util/default_providers.cc +++ b/onnxruntime/test/util/default_providers.cc @@ -164,13 +164,15 @@ std::unique_ptr DefaultArmNNExecutionProvider(bool enable_ar #endif } -std::unique_ptr DefaultRocmExecutionProvider() { +std::unique_ptr DefaultRocmExecutionProvider(bool test_tunable_op) { #ifdef USE_ROCM OrtROCMProviderOptions provider_options{}; provider_options.do_copy_in_default_stream = true; + provider_options.tunable_op_enabled = test_tunable_op ? 1 : 0; if (auto factory = RocmProviderFactoryCreator::Create(&provider_options)) return factory->CreateProvider(); #endif + ORT_UNUSED_PARAMETER(test_tunable_op); return nullptr; } diff --git a/onnxruntime/test/util/include/default_providers.h b/onnxruntime/test/util/include/default_providers.h index 6d0d66a721ca1..5edd2c00bdea8 100644 --- a/onnxruntime/test/util/include/default_providers.h +++ b/onnxruntime/test/util/include/default_providers.h @@ -48,7 +48,7 @@ std::unique_ptr DefaultNnapiExecutionProvider(); std::unique_ptr DefaultRknpuExecutionProvider(); std::unique_ptr DefaultAclExecutionProvider(bool enable_arena = true); std::unique_ptr DefaultArmNNExecutionProvider(bool enable_arena = true); -std::unique_ptr DefaultRocmExecutionProvider(); +std::unique_ptr DefaultRocmExecutionProvider(bool test_tunable_op = false); std::unique_ptr DefaultCoreMLExecutionProvider(); std::unique_ptr DefaultSnpeExecutionProvider(); std::unique_ptr DefaultXnnpackExecutionProvider();