Skip to content

Commit

Permalink
[CoreML ML Program] support acclerators selector (#22383)
Browse files Browse the repository at this point in the history
### Description
For no, CoreML only support run mlmodels on CPU/ALL, However, sometimes
CPU_GPU would be faster a lot.

We support the option to select different hardware to boost performance
in this PR.



### Motivation and Context
<!-- - Why is this change required? What problem does it solve?
- If it fixes an open issue, please link to the issue here. -->

---------

Co-authored-by: Edward Chen <[email protected]>
  • Loading branch information
wejoncy and edgchen1 authored Oct 15, 2024
1 parent 8c21680 commit 20a45dd
Show file tree
Hide file tree
Showing 12 changed files with 46 additions and 8 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -330,7 +330,8 @@ public enum CoreMLFlags : uint
COREML_FLAG_ONLY_ENABLE_DEVICE_WITH_ANE = 0x004,
COREML_FLAG_ONLY_ALLOW_STATIC_INPUT_SHAPES = 0x008,
COREML_FLAG_CREATE_MLPROGRAM = 0x010,
COREML_FLAG_LAST = COREML_FLAG_CREATE_MLPROGRAM,
COREML_FLAG_USE_CPU_AND_GPU = 0x020,
COREML_FLAG_LAST = COREML_FLAG_USE_CPU_AND_GPU,
}

/// <summary>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,9 +31,14 @@ enum COREMLFlags {
// Create an MLProgram. By default it will create a NeuralNetwork model. Requires Core ML 5 or later.
COREML_FLAG_CREATE_MLPROGRAM = 0x010,

// Exclude ANE as sometimes this decrease performance
// https://developer.apple.com/documentation/coreml/mlcomputeunits?language=objc
// there are four compute units:
// MLComputeUnitsCPUAndNeuralEngine|MLComputeUnitsCPUAndGPU|MLComputeUnitsCPUOnly|MLComputeUnitsAll
COREML_FLAG_USE_CPU_AND_GPU = 0x020,
// Keep COREML_FLAG_LAST at the end of the enum definition
// And assign the last COREMLFlag to it
COREML_FLAG_LAST = COREML_FLAG_CREATE_MLPROGRAM,
COREML_FLAG_LAST = COREML_FLAG_USE_CPU_AND_GPU,
};

#ifdef __cplusplus
Expand Down
4 changes: 3 additions & 1 deletion java/src/main/java/ai/onnxruntime/providers/CoreMLFlags.java
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,9 @@ public enum CoreMLFlags implements OrtFlags {
* Create an MLProgram. By default it will create a NeuralNetwork model. Requires Core ML 5 or
* later.
*/
CREATE_MLPROGRAM(16); // COREML_FLAG_CREATE_MLPROGRAM(0x010)
CREATE_MLPROGRAM(16), // COREML_FLAG_CREATE_MLPROGRAM(0x010)
/** exclude ANE */
CPU_AND_GPU(32); // COREML_FLAG_USE_CPU_AND_GPU(0x020)

/** The native value of the enum. */
public final int value;
Expand Down
2 changes: 2 additions & 0 deletions js/common/lib/inference-session.ts
Original file line number Diff line number Diff line change
Expand Up @@ -320,6 +320,7 @@ export declare namespace InferenceSession {
* COREML_FLAG_ONLY_ENABLE_DEVICE_WITH_ANE = 0x004
* COREML_FLAG_ONLY_ALLOW_STATIC_INPUT_SHAPES = 0x008
* COREML_FLAG_CREATE_MLPROGRAM = 0x010
* COREML_FLAG_USE_CPU_AND_GPU = 0x020
* ```
*
* See include/onnxruntime/core/providers/coreml/coreml_provider_factory.h for more details.
Expand All @@ -333,6 +334,7 @@ export declare namespace InferenceSession {
* This setting is available only in ONNXRuntime (react-native).
*/
useCPUOnly?: boolean;
useCPUAndGPU?: boolean;
/**
* Specify whether to enable CoreML EP on subgraph.
*
Expand Down
2 changes: 2 additions & 0 deletions js/react_native/ios/OnnxruntimeModule.mm
Original file line number Diff line number Diff line change
Expand Up @@ -389,6 +389,8 @@ - (NSDictionary*)run:(NSString*)url
if (useOptions) {
if ([[executionProvider objectForKey:@"useCPUOnly"] boolValue]) {
coreml_flags |= COREML_FLAG_USE_CPU_ONLY;
} else if ([[executionProvider objectForKey:@"useCPUAndGPU"] boolValue]) {
coreml_flags |= COREML_FLAG_USE_CPU_AND_GPU;
}
if ([[executionProvider objectForKey:@"enableOnSubgraph"] boolValue]) {
coreml_flags |= COREML_FLAG_ENABLE_ON_SUBGRAPH;
Expand Down
5 changes: 4 additions & 1 deletion objectivec/include/ort_coreml_execution_provider.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,10 @@ NS_ASSUME_NONNULL_BEGIN
* Whether the CoreML execution provider should run on CPU only.
*/
@property BOOL useCPUOnly;

/**
* exclude ANE in CoreML.
*/
@property BOOL useCPUAndGPU;
/**
* Whether the CoreML execution provider is enabled on subgraphs.
*/
Expand Down
1 change: 1 addition & 0 deletions objectivec/ort_coreml_execution_provider.mm
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ - (BOOL)appendCoreMLExecutionProviderWithOptions:(ORTCoreMLExecutionProviderOpti
try {
const uint32_t flags =
(options.useCPUOnly ? COREML_FLAG_USE_CPU_ONLY : 0) |
(options.useCPUAndGPU ? COREML_FLAG_USE_CPU_AND_GPU : 0) |
(options.enableOnSubgraphs ? COREML_FLAG_ENABLE_ON_SUBGRAPH : 0) |
(options.onlyEnableForDevicesWithANE ? COREML_FLAG_ONLY_ENABLE_DEVICE_WITH_ANE : 0) |
(options.onlyAllowStaticInputShapes ? COREML_FLAG_ONLY_ALLOW_STATIC_INPUT_SHAPES : 0) |
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,14 @@ CoreMLExecutionProvider::CoreMLExecutionProvider(uint32_t coreml_flags)
LOGS_DEFAULT(ERROR) << "CoreML EP is not supported on this platform.";
}

// check if only one flag is set
if ((coreml_flags & COREML_FLAG_USE_CPU_ONLY) && (coreml_flags & COREML_FLAG_USE_CPU_AND_GPU)) {
// multiple device options selected
ORT_THROW(
"Multiple device options selected, you should use at most one of the following options:"
"COREML_FLAG_USE_CPU_ONLY or COREML_FLAG_USE_CPU_AND_GPU or not set");
}

#if defined(COREML_ENABLE_MLPROGRAM)
if (coreml_version_ < MINIMUM_COREML_MLPROGRAM_VERSION &&
(coreml_flags_ & COREML_FLAG_CREATE_MLPROGRAM) != 0) {
Expand Down
12 changes: 9 additions & 3 deletions onnxruntime/core/providers/coreml/model/model.mm
Original file line number Diff line number Diff line change
Expand Up @@ -395,9 +395,15 @@ Status Predict(const std::unordered_map<std::string, OnnxTensorData>& inputs,
compiled_model_path_ = [compileUrl path];

MLModelConfiguration* config = [[MLModelConfiguration alloc] init];
config.computeUnits = (coreml_flags_ & COREML_FLAG_USE_CPU_ONLY)
? MLComputeUnitsCPUOnly
: MLComputeUnitsAll;

if (coreml_flags_ & COREML_FLAG_USE_CPU_ONLY) {
config.computeUnits = MLComputeUnitsCPUOnly;
} else if (coreml_flags_ & COREML_FLAG_USE_CPU_AND_GPU) {
config.computeUnits = MLComputeUnitsCPUAndGPU;
} else {
config.computeUnits = MLComputeUnitsAll;
}

model_ = [MLModel modelWithContentsOfURL:compileUrl configuration:config error:&error];

if (error != nil || model_ == nil) {
Expand Down
2 changes: 2 additions & 0 deletions onnxruntime/python/onnxruntime_pybind_state.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1213,6 +1213,8 @@ std::unique_ptr<IExecutionProvider> CreateExecutionProviderInstance(

if (flags_str.find("COREML_FLAG_USE_CPU_ONLY") != std::string::npos) {
coreml_flags |= COREMLFlags::COREML_FLAG_USE_CPU_ONLY;
} else if (flags_str.find("COREML_FLAG_USE_CPU_AND_GPU") != std::string::npos) {
coreml_flags |= COREMLFlags::COREML_FLAG_USE_CPU_AND_GPU;
}

if (flags_str.find("COREML_FLAG_ONLY_ALLOW_STATIC_INPUT_SHAPES") != std::string::npos) {
Expand Down
2 changes: 1 addition & 1 deletion onnxruntime/test/perftest/command_args_parser.cc
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ namespace perftest {
"\t [NNAPI only] [NNAPI_FLAG_CPU_ONLY]: Using CPU only in NNAPI EP.\n"
"\t [Example] [For NNAPI EP] -e nnapi -i \"NNAPI_FLAG_USE_FP16 NNAPI_FLAG_USE_NCHW NNAPI_FLAG_CPU_DISABLED\"\n"
"\n"
"\t [CoreML only] [COREML_FLAG_CREATE_MLPROGRAM]: Create an ML Program model instead of Neural Network.\n"
"\t [CoreML only] [COREML_FLAG_CREATE_MLPROGRAM COREML_FLAG_USE_CPU_ONLY COREML_FLAG_USE_CPU_AND_GPU]: Create an ML Program model instead of Neural Network.\n"
"\t [Example] [For CoreML EP] -e coreml -i \"COREML_FLAG_CREATE_MLPROGRAM\"\n"
"\n"
"\t [SNPE only] [runtime]: SNPE runtime, options: 'CPU', 'GPU', 'GPU_FLOAT16', 'DSP', 'AIP_FIXED_TF'. \n"
Expand Down
6 changes: 6 additions & 0 deletions onnxruntime/test/perftest/ort_test_session.cc
Original file line number Diff line number Diff line change
Expand Up @@ -425,6 +425,12 @@ select from 'TF8', 'TF16', 'UINT8', 'FLOAT', 'ITENSOR'. \n)");
if (key == "COREML_FLAG_CREATE_MLPROGRAM") {
coreml_flags |= COREML_FLAG_CREATE_MLPROGRAM;
std::cout << "Enabling ML Program.\n";
} else if (key == "COREML_FLAG_USE_CPU_ONLY") {
coreml_flags |= COREML_FLAG_USE_CPU_ONLY;
std::cout << "CoreML enabled COREML_FLAG_USE_CPU_ONLY.\n";
} else if (key == "COREML_FLAG_USE_CPU_AND_GPU") {
coreml_flags |= COREML_FLAG_USE_CPU_AND_GPU;
std::cout << "CoreML enabled COREML_FLAG_USE_CPU_AND_GPU.\n";
} else if (key.empty()) {
} else {
ORT_THROW(
Expand Down

0 comments on commit 20a45dd

Please sign in to comment.