diff --git a/include/onnxruntime/core/providers/coreml/coreml_provider_factory.h b/include/onnxruntime/core/providers/coreml/coreml_provider_factory.h index bf190f96fe7b2..9846da8f7fa7d 100644 --- a/include/onnxruntime/core/providers/coreml/coreml_provider_factory.h +++ b/include/onnxruntime/core/providers/coreml/coreml_provider_factory.h @@ -43,11 +43,13 @@ enum COREMLFlags { // MLComputeUnits can be one of the following values: // 'MLComputeUnitsCPUAndNeuralEngine|MLComputeUnitsCPUAndGPU|MLComputeUnitsCPUOnly|MLComputeUnitsAll' +// these values are intended to be used with Ort.::SessionOptions::AppendExecutionProvider(C++ API) +// /SessionOptionsAppendExecutionProvider (C API). For the old API, use COREMLFlags instead. static const char* const kCoremlProviderOption_MLComputeUnits = "MLComputeUnits"; -static const char* const kCoremlProviderOption_MLModelFormat = "MLModelFormat"; -static const char* const kCoremlProviderOption_MLAllowStaticInputShapes = "MLAllowStaticInputShapes"; -static const char* const kCoremlProviderOption_MLEnableOnSubgraphs = "MLEnableOnSubgraphs"; -static const char* const kCoremlProviderOption_MLModelCacheDir = "MLModelCacheDir"; +static const char* const kCoremlProviderOption_ModelFormat = "ModelFormat"; +static const char* const kCoremlProviderOption_RequireStaticInputShapes = "RequireStaticInputShapes"; +static const char* const kCoremlProviderOption_EnableOnSubgraphs = "EnableOnSubgraphs"; +static const char* const kCoremlProviderOption_ModelCacheDir = "ModelCacheDir"; #ifdef __cplusplus extern "C" { diff --git a/objectivec/include/ort_coreml_execution_provider.h b/objectivec/include/ort_coreml_execution_provider.h index 427e78cd539c8..417868a08972c 100644 --- a/objectivec/include/ort_coreml_execution_provider.h +++ b/objectivec/include/ort_coreml_execution_provider.h @@ -76,11 +76,18 @@ NS_ASSUME_NONNULL_BEGIN * decreasing priority. * * @param provider_options The CoreML execution provider options in dict. + * available keys-values: more detail in onnxruntime/core/providers/coreml/coreml_provider_factory.cc + * kCoremlProviderOption_MLComputeUnits: one of "MLComputeUnitsCPUAndNeuralEngine", + * "MLComputeUnitsCPUAndGPU", "MLComputeUnitsCPUOnly", "MLComputeUnitsAll" + * kCoremlProviderOption_ModelFormat: one of "MLProgram", "NeuralNetwork" + * kCoremlProviderOption_RequireStaticInputShapes: "1" or "0" + * kCoremlProviderOption_EnableOnSubgraphs: "1" or "0" + * kCoremlProviderOption_ModelCacheDir: path to the model cache directory * @param error Optional error information set if an error occurs. * @return Whether the provider was enabled successfully. */ -- (BOOL)appendCoreMLExecutionProviderWithOptions_v2:(NSDictionary*)provider_options - error:(NSError**)error; +- (BOOL)appendCoreMLExecutionProviderWithOptionsV2:(NSDictionary*)provider_options + error:(NSError**)error; @end NS_ASSUME_NONNULL_END diff --git a/objectivec/test/ort_session_test.mm b/objectivec/test/ort_session_test.mm index 8690aa5ee8183..409ee7e1584e2 100644 --- a/objectivec/test/ort_session_test.mm +++ b/objectivec/test/ort_session_test.mm @@ -226,10 +226,10 @@ - (void)testAppendCoreMLEP { - (void)testAppendCoreMLEP_v2 { NSError* err = nil; ORTSessionOptions* sessionOptions = [ORTSessionTest makeSessionOptions]; - NSDictionary* provider_options = @{@"MLEnableOnSubgraphs" : @"1"}; // set an arbitrary option + NSDictionary* provider_options = @{@"EnableOnSubgraphs" : @"1"}; // set an arbitrary option - BOOL appendResult = [sessionOptions appendCoreMLExecutionProviderWithOptions_v2:provider_options - error:&err]; + BOOL appendResult = [sessionOptions appendCoreMLExecutionProviderWithOptionsV2:provider_options + error:&err]; if (!ORTIsCoreMLExecutionProviderAvailable()) { ORTAssertBoolResultUnsuccessful(appendResult, err); diff --git a/onnxruntime/core/providers/coreml/coreml_execution_provider.cc b/onnxruntime/core/providers/coreml/coreml_execution_provider.cc index f3e8bd9b0e2af..7044150334a7b 100644 --- a/onnxruntime/core/providers/coreml/coreml_execution_provider.cc +++ b/onnxruntime/core/providers/coreml/coreml_execution_provider.cc @@ -23,9 +23,52 @@ namespace onnxruntime { constexpr const char* COREML = "CoreML"; +void CoreMLOptions::ValidateAndParseProviderOption(const ProviderOptions& options) { + const std::unordered_map available_computeunits_options = { + {"CPUAndNeuralEngine", COREML_FLAG_ONLY_ENABLE_DEVICE_WITH_ANE}, + {"CPUAndGPU", COREML_FLAG_USE_CPU_AND_GPU}, + {"CPUOnly", COREML_FLAG_USE_CPU_ONLY}, + {"ALL", COREML_FLAG_USE_NONE}, + }; + const std::unordered_map available_modelformat_options = { + {"MLProgram", COREML_FLAG_CREATE_MLPROGRAM}, + {"NeuralNetwork", COREML_FLAG_USE_NONE}, + }; + std::unordered_set valid_options = { + kCoremlProviderOption_MLComputeUnits, + kCoremlProviderOption_ModelFormat, + kCoremlProviderOption_RequireStaticInputShapes, + kCoremlProviderOption_EnableOnSubgraphs, + kCoremlProviderOption_ModelCacheDir, + }; + // Validate the options + for (const auto& option : options) { + if (valid_options.find(option.first) == valid_options.end()) { + ORT_THROW("Unknown option: ", option.first); + } + if (kCoremlProviderOption_MLComputeUnits == option.first) { + if (available_computeunits_options.find(option.second) == available_computeunits_options.end()) { + ORT_THROW("Invalid value for option ", option.first, ": ", option.second); + } else { + coreml_flags_ |= available_computeunits_options.at(option.second); + } + } else if (kCoremlProviderOption_ModelFormat == option.first) { + if (available_modelformat_options.find(option.second) == available_modelformat_options.end()) { + ORT_THROW("Invalid value for option ", option.first, ": ", option.second); + } else { + coreml_flags_ |= available_modelformat_options.at(option.second); + } + } else if (kCoremlProviderOption_RequireStaticInputShapes == option.first) { + coreml_flags_ |= COREML_FLAG_ONLY_ALLOW_STATIC_INPUT_SHAPES; + } else if (kCoremlProviderOption_EnableOnSubgraphs == option.first) { + coreml_flags_ |= COREML_FLAG_ENABLE_ON_SUBGRAPH; + } + } +} + CoreMLExecutionProvider::CoreMLExecutionProvider(const CoreMLOptions& options) : IExecutionProvider{onnxruntime::kCoreMLExecutionProvider}, - coreml_flags_(options.coreml_flags), + coreml_flags_(options.CoreMLFlags()), coreml_version_(coreml::util::CoreMLVersion()) { LOGS_DEFAULT(VERBOSE) << "CoreML version: " << coreml_version_; if (coreml_version_ < MINIMUM_COREML_VERSION) { diff --git a/onnxruntime/core/providers/coreml/coreml_execution_provider.h b/onnxruntime/core/providers/coreml/coreml_execution_provider.h index d37f6bdc2732d..45679655ba429 100644 --- a/onnxruntime/core/providers/coreml/coreml_execution_provider.h +++ b/onnxruntime/core/providers/coreml/coreml_execution_provider.h @@ -12,9 +12,18 @@ namespace coreml { class Model; } -struct CoreMLOptions { - uint32_t coreml_flags = 0; - std::string cache_path; +class CoreMLOptions { + uint32_t coreml_flags_ = 0; + + public: + CoreMLOptions(uint32_t coreml_flags) : coreml_flags_(coreml_flags) {} + CoreMLOptions(const ProviderOptions& options) { + ValidateAndParseProviderOption(options); + } + uint32_t CoreMLFlags() const { return coreml_flags_; } + + private: + void ValidateAndParseProviderOption(const ProviderOptions& options); }; class CoreMLExecutionProvider : public IExecutionProvider { diff --git a/onnxruntime/core/providers/coreml/coreml_provider_factory.cc b/onnxruntime/core/providers/coreml/coreml_provider_factory.cc index 58c84f37930ef..bc8702d3290f6 100644 --- a/onnxruntime/core/providers/coreml/coreml_provider_factory.cc +++ b/onnxruntime/core/providers/coreml/coreml_provider_factory.cc @@ -10,55 +10,6 @@ using namespace onnxruntime; namespace onnxruntime { -namespace { -CoreMLOptions ParseProviderOption(const ProviderOptions& options) { - CoreMLOptions coreml_options; - const std::unordered_map available_computeunits_options = { - {"MLComputeUnitsCPUAndNeuralEngine", COREML_FLAG_ONLY_ENABLE_DEVICE_WITH_ANE}, - {"MLComputeUnitsCPUAndGPU", COREML_FLAG_USE_CPU_AND_GPU}, - {"MLComputeUnitsCPUOnly", COREML_FLAG_USE_CPU_ONLY}, - {"MLComputeUnitsAll", COREML_FLAG_USE_NONE}, - }; - const std::unordered_map available_modelformat_options = { - {"MLProgram", COREML_FLAG_CREATE_MLPROGRAM}, - {"NeuralNetwork", COREML_FLAG_USE_NONE}, - }; - std::unordered_set valid_options = { - kCoremlProviderOption_MLComputeUnits, - kCoremlProviderOption_MLModelFormat, - kCoremlProviderOption_MLAllowStaticInputShapes, - kCoremlProviderOption_MLEnableOnSubgraphs, - kCoremlProviderOption_MLModelCacheDir, - }; - // Validate the options - for (const auto& option : options) { - if (valid_options.find(option.first) == valid_options.end()) { - ORT_THROW("Unknown option: ", option.first); - } - if (kCoremlProviderOption_MLComputeUnits == option.first) { - if (available_computeunits_options.find(option.second) == available_computeunits_options.end()) { - ORT_THROW("Invalid value for option ", option.first, ": ", option.second); - } else { - coreml_options.coreml_flags |= available_computeunits_options.at(option.second); - } - } else if (kCoremlProviderOption_MLModelFormat == option.first) { - if (available_modelformat_options.find(option.second) == available_modelformat_options.end()) { - ORT_THROW("Invalid value for option ", option.first, ": ", option.second); - } else { - coreml_options.coreml_flags |= available_modelformat_options.at(option.second); - } - } else if (kCoremlProviderOption_MLAllowStaticInputShapes == option.first) { - coreml_options.coreml_flags |= COREML_FLAG_ONLY_ALLOW_STATIC_INPUT_SHAPES; - } else if (kCoremlProviderOption_MLEnableOnSubgraphs == option.first) { - coreml_options.coreml_flags |= COREML_FLAG_ENABLE_ON_SUBGRAPH; - } else if (kCoremlProviderOption_MLModelCacheDir == option.first) { - coreml_options.cache_path = option.second; - } - } - - return coreml_options; -} -} // namespace struct CoreMLProviderFactory : IExecutionProviderFactory { CoreMLProviderFactory(const CoreMLOptions& options) : options_(options) {} @@ -73,13 +24,13 @@ std::unique_ptr CoreMLProviderFactory::CreateProvider() { } std::shared_ptr CoreMLProviderFactoryCreator::Create(uint32_t coreml_flags) { - CoreMLOptions coreml_options; - coreml_options.coreml_flags = coreml_flags; + CoreMLOptions coreml_options(coreml_flags); return std::make_shared(coreml_options); } std::shared_ptr CoreMLProviderFactoryCreator::Create(const ProviderOptions& options) { - return std::make_shared(ParseProviderOption(options)); + CoreMLOptions coreml_options(options); + return std::make_shared(coreml_options); } } // namespace onnxruntime diff --git a/onnxruntime/test/perftest/command_args_parser.cc b/onnxruntime/test/perftest/command_args_parser.cc index 23a1ad97e6df4..8f840cede63a0 100644 --- a/onnxruntime/test/perftest/command_args_parser.cc +++ b/onnxruntime/test/perftest/command_args_parser.cc @@ -131,7 +131,7 @@ namespace perftest { "\t [Example] [For NNAPI EP] -e nnapi -i \"NNAPI_FLAG_USE_FP16 NNAPI_FLAG_USE_NCHW NNAPI_FLAG_CPU_DISABLED\"\n" "\n" "\t [CoreML only] [ModelFormat]:[MLProgram, NeuralNetwork] Create an ML Program model or Neural Network. Default is NeuralNetwork.\n" - "\t [CoreML only] [MLComputeUnits]:[CPUAndNeuralEngine CPUAndGPU CPUAndGPU CPUOnly] Specify to limit the backend device/s used to run the model.\n" + "\t [CoreML only] [MLComputeUnits]:[CPUAndNeuralEngine CPUAndGPU ALL CPUOnly] Specify to limit the backend device used to run the model.\n" "\t [CoreML only] [AllowStaticInputShapes]:[0 1].\n" "\t [CoreML only] [EnableOnSubgraphs]:[0 1].\n" "\t [CoreML only] [ModelCacheDir]: a path to cached compiled coreml model.\n" diff --git a/onnxruntime/test/perftest/ort_test_session.cc b/onnxruntime/test/perftest/ort_test_session.cc index b715e32cad3d0..a0f01eda93873 100644 --- a/onnxruntime/test/perftest/ort_test_session.cc +++ b/onnxruntime/test/perftest/ort_test_session.cc @@ -74,7 +74,7 @@ OnnxRuntimeTestSession::OnnxRuntimeTestSession(Ort::Env& env, std::random_device std::string ov_string = performance_test_config.run_config.ep_runtime_config_string; #endif // defined(_MSC_VER) int num_threads = 0; - if (!ParseSessionConfigs(ov_string, provider_options)) { + if (!ParseSessionConfigs(ov_string, provider_options, {"num_of_threads"})) { ORT_THROW( "[ERROR] Use a '|' to separate the key and value for the " "run-time option you are trying to use.\n"); @@ -89,10 +89,6 @@ OnnxRuntimeTestSession::OnnxRuntimeTestSession(Ort::Env& env, std::random_device " set number of threads or use '0' for default\n"); // If the user doesnt define num_threads, auto detect threads later } - } else { - ORT_THROW( - "[ERROR] [OneDNN] wrong key type entered. " - "Choose from the following runtime key options that are available for OneDNN. ['num_of_threads']\n"); } } dnnl_options.threadpool_args = static_cast(&num_threads); @@ -214,7 +210,7 @@ OnnxRuntimeTestSession::OnnxRuntimeTestSession(Ort::Env& env, std::random_device #else std::string option_string = performance_test_config.run_config.ep_runtime_config_string; #endif - if (!ParseSessionConfigs(option_string, provider_options)) { + if (!ParseSessionConfigs(option_string, provider_options, {"backend_path", "profiling_file_path", "profiling_level", "rpc_control_latency", "vtcm_mb", "soc_model", "device_id", "htp_performance_mode", "qnn_saver_path", "htp_graph_finalization_optimization_mode", "qnn_context_priority", "htp_arch", "enable_htp_fp16_precision", "offload_graph_io_quantization"})) { ORT_THROW( "[ERROR] Use a '|' to separate the key and value for the " "run-time option you are trying to use.\n"); @@ -278,11 +274,6 @@ OnnxRuntimeTestSession::OnnxRuntimeTestSession(Ort::Env& env, std::random_device std::string str = str_stream.str(); ORT_THROW("Wrong value for ", key, ". select from: ", str); } - } else { - ORT_THROW(R"(Wrong key type entered. Choose from options: ['backend_path', -'profiling_level', 'profiling_file_path', 'rpc_control_latency', 'vtcm_mb', 'htp_performance_mode', -'qnn_saver_path', 'htp_graph_finalization_optimization_mode', 'qnn_context_priority', 'soc_model', -'htp_arch', 'device_id', 'enable_htp_fp16_precision', 'offload_graph_io_quantization'])"); } } session_options.AppendExecutionProvider("QNN", provider_options); @@ -296,7 +287,7 @@ OnnxRuntimeTestSession::OnnxRuntimeTestSession(Ort::Env& env, std::random_device #else std::string option_string = performance_test_config.run_config.ep_runtime_config_string; #endif - if (!ParseSessionConfigs(option_string, provider_options)) { + if (!ParseSessionConfigs(option_string, provider_options, {"runtime", "priority", "buffer_type", "enable_init_cache"})) { ORT_THROW( "[ERROR] Use a '|' to separate the key and value for the " "run-time option you are trying to use.\n"); @@ -320,8 +311,6 @@ select from 'TF8', 'TF16', 'UINT8', 'FLOAT', 'ITENSOR'. \n)"); if (value != "1") { ORT_THROW("Set to 1 to enable_init_cache."); } - } else { - ORT_THROW("Wrong key type entered. Choose from options: ['runtime', 'priority', 'buffer_type', 'enable_init_cache'] \n"); } } @@ -370,32 +359,27 @@ select from 'TF8', 'TF16', 'UINT8', 'FLOAT', 'ITENSOR'. \n)"); #ifdef __APPLE__ #ifdef USE_COREML std::string ov_string = performance_test_config.run_config.ep_runtime_config_string; - if (!ParseSessionConfigs(ov_string, provider_options)) { + if (!ParseSessionConfigs(ov_string, provider_options, {kCoremlProviderOption_MLComputeUnits, kCoremlProviderOption_ModelFormat, kCoremlProviderOption_RequireStaticInputShapes, kCoremlProviderOption_EnableOnSubgraphs})) { ORT_THROW( "[ERROR] Use a '|' to separate the key and value for the " "run-time option you are trying to use.\n"); } std::unordered_map available_options = { - {"MLComputeUnitsCPUAndNeuralEngine", "1"}, - {"MLComputeUnitsCPUAndGPU", "1"}, - {"MLComputeUnitsCPUOnly", "1"}, - {"MLComputeUnitsAll", "1"}, + {"CPUAndNeuralEngine", "1"}, + {"CPUAndGPU", "1"}, + {"CPUOnly", "1"}, + {"ALL", "1"}, }; for (const auto& provider_option : provider_options) { if (provider_option.first == kCoremlProviderOption_MLComputeUnits && available_options.find(provider_option.second) != available_options.end()) { - } else if (provider_option.first == kCoremlProviderOption_MLModelFormat && + } else if (provider_option.first == kCoremlProviderOption_ModelFormat && (provider_option.second == "MLProgram" || provider_option.second == "NeuralNetwork")) { - } else if (provider_option.first == kCoremlProviderOption_MLAllowStaticInputShapes && + } else if (provider_option.first == kCoremlProviderOption_RequireStaticInputShapes && (provider_option.second == "1" || provider_option.second == "0")) { - } else if (provider_option.first == kCoremlProviderOption_MLEnableOnSubgraphs && + } else if (provider_option.first == kCoremlProviderOption_EnableOnSubgraphs && (provider_option.second == "0" || provider_option.second == "1")) { - } else { - ORT_THROW( - "[ERROR] [CoreML] wrong key type entered. Choose from the following runtime key options " - "that are available for CoreML. " - "['MLComputeUnits', 'ModelFormat', 'AllowStaticInputShapes', 'EnableOnSubgraphs'] \n"); } } // COREML_FLAG_CREATE_MLPROGRAM @@ -413,7 +397,9 @@ select from 'TF8', 'TF16', 'UINT8', 'FLOAT', 'ITENSOR'. \n)"); #else std::string ov_string = performance_test_config.run_config.ep_runtime_config_string; #endif - if (!ParseSessionConfigs(ov_string, provider_options)) { + if (!ParseSessionConfigs(ov_string, provider_options, + {"device_filter", "performance_preference", "disable_metacommands", + "enable_graph_capture", "enable_graph_serialization"})) { ORT_THROW( "[ERROR] Use a '|' to separate the key and value for the " "run-time option you are trying to use.\n"); @@ -488,7 +474,7 @@ select from 'TF8', 'TF16', 'UINT8', 'FLOAT', 'ITENSOR'. \n)"); std::string ov_string = performance_test_config.run_config.ep_runtime_config_string; #endif // defined(_MSC_VER) bool enable_fast_math = false; - if (!ParseSessionConfigs(ov_string, provider_options)) { + if (!ParseSessionConfigs(ov_string, provider_options, {"enable_fast_math"})) { ORT_THROW( "[ERROR] Use a '|' to separate the key and value for the " "run-time option you are trying to use.\n"); @@ -503,9 +489,6 @@ select from 'TF8', 'TF16', 'UINT8', 'FLOAT', 'ITENSOR'. \n)"); "[ERROR] [ACL] You have selcted an invalid value for the key 'enable_fast_math'. " "Select from 'true' or 'false' \n"); } - } else { - ORT_THROW( - "[ERROR] [ACL] Unrecognized option: ", key); } } Ort::ThrowOnError( diff --git a/onnxruntime/test/perftest/strings_helper.cc b/onnxruntime/test/perftest/strings_helper.cc index 22f682159b924..d0ee779ff7264 100644 --- a/onnxruntime/test/perftest/strings_helper.cc +++ b/onnxruntime/test/perftest/strings_helper.cc @@ -7,12 +7,14 @@ #include #include "strings_helper.h" +#include "core/common/common.h" namespace onnxruntime { namespace perftest { bool ParseSessionConfigs(const std::string& configs_string, - std::unordered_map& session_configs) { + std::unordered_map& session_configs, + const std::unordered_set& available_keys) { std::istringstream ss(configs_string); std::string token; @@ -25,17 +27,27 @@ bool ParseSessionConfigs(const std::string& configs_string, auto pos = token_sv.find("|"); if (pos == std::string_view::npos || pos == 0 || pos == token_sv.length()) { - // Error: must use a '|' to separate the key and value for session configuration entries. - return false; + ORT_THROW("Use a '|' to separate the key and value for the run-time option you are trying to use.\n"); } std::string key(token_sv.substr(0, pos)); std::string value(token_sv.substr(pos + 1)); + if (available_keys.empty() == false && available_keys.count(key) == 0) { + // Error: unknown option: {key} + std::string available_keys_str; + for (const auto& key : available_keys) { + available_keys_str += key; + available_keys_str += ", "; + } + ORT_THROW("[ERROR] wrong key type entered.: ", key, + " Choose from the following runtime key options that are available. ", available_keys_str); + } + auto it = session_configs.find(key); if (it != session_configs.end()) { // Error: specified duplicate session configuration entry: {key} - return false; + ORT_THROW("Specified duplicate session configuration entry: ", key); } session_configs.insert(std::make_pair(std::move(key), std::move(value))); diff --git a/onnxruntime/test/perftest/strings_helper.h b/onnxruntime/test/perftest/strings_helper.h index 24feb90a20a61..b4b5704ebb2cd 100644 --- a/onnxruntime/test/perftest/strings_helper.h +++ b/onnxruntime/test/perftest/strings_helper.h @@ -4,11 +4,13 @@ // Licensed under the MIT License. #include #include +#include namespace onnxruntime { namespace perftest { bool ParseSessionConfigs(const std::string& configs_string, - std::unordered_map& session_configs); + std::unordered_map& session_configs, + const std::unordered_set& available_keys = {}); } // namespace perftest } // namespace onnxruntime diff --git a/onnxruntime/test/platform/apple/apple_package_test/ios_package_testUITests/ios_package_uitest_cpp_api.mm b/onnxruntime/test/platform/apple/apple_package_test/ios_package_testUITests/ios_package_uitest_cpp_api.mm index 8fe382b8fe246..fa95c1fc52b94 100644 --- a/onnxruntime/test/platform/apple/apple_package_test/ios_package_testUITests/ios_package_uitest_cpp_api.mm +++ b/onnxruntime/test/platform/apple/apple_package_test/ios_package_testUITests/ios_package_uitest_cpp_api.mm @@ -36,7 +36,7 @@ void testSigmoid(const char* modelPath, bool useCoreML = false, bool useWebGPU = #if COREML_EP_AVAILABLE if (useCoreML) { std::unordered_map provider_options = { - {kCoremlProviderOption_MLComputeUnits, "MLComputeUnitsCPUOnly"}}; + {kCoremlProviderOption_MLComputeUnits, "CPUOnly"}}; session_options.AppendExecutionProvider("CoreML", provider_options); } #else diff --git a/onnxruntime/test/platform/apple/apple_package_test/macos_package_testUITests/macos_package_uitest_cpp_api.mm b/onnxruntime/test/platform/apple/apple_package_test/macos_package_testUITests/macos_package_uitest_cpp_api.mm index 8a877bc4c9f7a..b53a4a2df09b4 100644 --- a/onnxruntime/test/platform/apple/apple_package_test/macos_package_testUITests/macos_package_uitest_cpp_api.mm +++ b/onnxruntime/test/platform/apple/apple_package_test/macos_package_testUITests/macos_package_uitest_cpp_api.mm @@ -36,7 +36,7 @@ void testSigmoid(const char* modelPath, bool useCoreML = false, bool useWebGPU = #if COREML_EP_AVAILABLE if (useCoreML) { std::unordered_map provider_options = { - {kCoremlProviderOption_MLComputeUnits, "MLComputeUnitsCPUOnly"}}; + {kCoremlProviderOption_MLComputeUnits, "CPUOnly"}}; session_options.AppendExecutionProvider("CoreML", provider_options); } #else diff --git a/onnxruntime/test/providers/coreml/coreml_basic_test.cc b/onnxruntime/test/providers/coreml/coreml_basic_test.cc index acedfd1241021..a8480e7416de5 100644 --- a/onnxruntime/test/providers/coreml/coreml_basic_test.cc +++ b/onnxruntime/test/providers/coreml/coreml_basic_test.cc @@ -31,9 +31,9 @@ namespace onnxruntime { namespace test { static std::unique_ptr MakeCoreMLExecutionProvider( - std::string ModelFormat = "NeuralNetwork", std::string ComputeUnits = "MLComputeUnitsCPUOnly") { + std::string ModelFormat = "NeuralNetwork", std::string ComputeUnits = "CPUOnly") { std::unordered_map provider_options = {{kCoremlProviderOption_MLComputeUnits, ComputeUnits}, - {kCoremlProviderOption_MLModelFormat, ModelFormat}}; + {kCoremlProviderOption_ModelFormat, ModelFormat}}; return CoreMLProviderFactoryCreator::Create(provider_options)->CreateProvider(); } diff --git a/onnxruntime/test/providers/coreml/dynamic_input_test.cc b/onnxruntime/test/providers/coreml/dynamic_input_test.cc index af81e75e5fe53..8294f65745256 100644 --- a/onnxruntime/test/providers/coreml/dynamic_input_test.cc +++ b/onnxruntime/test/providers/coreml/dynamic_input_test.cc @@ -102,7 +102,7 @@ TEST(CoreMLExecutionProviderDynamicInputShapeTest, EmptyInputFails) { TEST(CoreMLExecutionProviderDynamicInputShapeTest, OnlyAllowStaticInputShapes) { constexpr auto model_path = ORT_TSTR("testdata/matmul_with_dynamic_input_shape.onnx"); - std::unordered_map options = {{kCoremlProviderOption_MLAllowStaticInputShapes, "1"}}; + std::unordered_map options = {{kCoremlProviderOption_RequireStaticInputShapes, "1"}}; auto coreml_ep = CoreMLProviderFactoryCreator::Create(options)->CreateProvider(); ; diff --git a/onnxruntime/test/util/default_providers.cc b/onnxruntime/test/util/default_providers.cc index 2b435c7cdcf67..b27f3116911e9 100644 --- a/onnxruntime/test/util/default_providers.cc +++ b/onnxruntime/test/util/default_providers.cc @@ -254,10 +254,10 @@ std::unique_ptr DefaultCoreMLExecutionProvider(bool use_mlpr #if defined(USE_COREML) && defined(__APPLE__) // We want to run UT on CPU only to get output value without losing precision auto option = ProviderOptions(); - option[kCoremlProviderOption_MLComputeUnits] = "MLComputeUnitsCPUOnly"; + option[kCoremlProviderOption_MLComputeUnits] = "CPUOnly"; if (use_mlprogram) { - option[kCoremlProviderOption_MLModelFormat] = "MLProgram"; + option[kCoremlProviderOption_ModelFormat] = "MLProgram"; } return CoreMLProviderFactoryCreator::Create(option)->CreateProvider();