Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[CoreML ML Program] support acclerators selector #22383

Merged
merged 11 commits into from
Oct 15, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -330,7 +330,8 @@ public enum CoreMLFlags : uint
COREML_FLAG_ONLY_ENABLE_DEVICE_WITH_ANE = 0x004,
COREML_FLAG_ONLY_ALLOW_STATIC_INPUT_SHAPES = 0x008,
COREML_FLAG_CREATE_MLPROGRAM = 0x010,
COREML_FLAG_LAST = COREML_FLAG_CREATE_MLPROGRAM,
COREML_FLAG_USE_CPU_AND_GPU = 0x020,
COREML_FLAG_LAST = COREML_FLAG_USE_CPU_AND_GPU,
}

/// <summary>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,9 +31,14 @@ enum COREMLFlags {
// Create an MLProgram. By default it will create a NeuralNetwork model. Requires Core ML 5 or later.
COREML_FLAG_CREATE_MLPROGRAM = 0x010,

// Exclude ANE as sometimes this decrease performance
// https://developer.apple.com/documentation/coreml/mlcomputeunits?language=objc
// there are four compute units:
// MLComputeUnitsCPUAndNeuralEngine|MLComputeUnitsCPUAndGPU|MLComputeUnitsCPUOnly|MLComputeUnitsAll
COREML_FLAG_USE_CPU_AND_GPU = 0x020,
wejoncy marked this conversation as resolved.
Show resolved Hide resolved
// Keep COREML_FLAG_LAST at the end of the enum definition
// And assign the last COREMLFlag to it
COREML_FLAG_LAST = COREML_FLAG_CREATE_MLPROGRAM,
COREML_FLAG_LAST = COREML_FLAG_USE_CPU_AND_GPU,
};

#ifdef __cplusplus
Expand Down
4 changes: 3 additions & 1 deletion java/src/main/java/ai/onnxruntime/providers/CoreMLFlags.java
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,9 @@ public enum CoreMLFlags implements OrtFlags {
* Create an MLProgram. By default it will create a NeuralNetwork model. Requires Core ML 5 or
* later.
*/
CREATE_MLPROGRAM(16); // COREML_FLAG_CREATE_MLPROGRAM(0x010)
CREATE_MLPROGRAM(16), // COREML_FLAG_CREATE_MLPROGRAM(0x010)
/** exclude ANE */
CPU_AND_GPU(32); // COREML_FLAG_USE_CPU_AND_GPU(0x020)

/** The native value of the enum. */
public final int value;
Expand Down
2 changes: 2 additions & 0 deletions js/common/lib/inference-session.ts
Original file line number Diff line number Diff line change
Expand Up @@ -320,6 +320,7 @@ export declare namespace InferenceSession {
* COREML_FLAG_ONLY_ENABLE_DEVICE_WITH_ANE = 0x004
* COREML_FLAG_ONLY_ALLOW_STATIC_INPUT_SHAPES = 0x008
* COREML_FLAG_CREATE_MLPROGRAM = 0x010
* COREML_FLAG_USE_CPU_AND_GPU = 0x020
* ```
*
* See include/onnxruntime/core/providers/coreml/coreml_provider_factory.h for more details.
Expand All @@ -333,6 +334,7 @@ export declare namespace InferenceSession {
* This setting is available only in ONNXRuntime (react-native).
*/
useCPUOnly?: boolean;
useCPUAndGPU?: boolean;
/**
* Specify whether to enable CoreML EP on subgraph.
*
Expand Down
2 changes: 2 additions & 0 deletions js/react_native/ios/OnnxruntimeModule.mm
Original file line number Diff line number Diff line change
Expand Up @@ -389,6 +389,8 @@ - (NSDictionary*)run:(NSString*)url
if (useOptions) {
if ([[executionProvider objectForKey:@"useCPUOnly"] boolValue]) {
coreml_flags |= COREML_FLAG_USE_CPU_ONLY;
} else if ([[executionProvider objectForKey:@"useCPUAndGPU"] boolValue]) {
coreml_flags |= COREML_FLAG_USE_CPU_AND_GPU;
}
if ([[executionProvider objectForKey:@"enableOnSubgraph"] boolValue]) {
coreml_flags |= COREML_FLAG_ENABLE_ON_SUBGRAPH;
Expand Down
5 changes: 4 additions & 1 deletion objectivec/include/ort_coreml_execution_provider.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,10 @@ NS_ASSUME_NONNULL_BEGIN
* Whether the CoreML execution provider should run on CPU only.
*/
@property BOOL useCPUOnly;

/**
* exclude ANE in CoreML.
*/
@property BOOL useCPUAndGPU;
/**
* Whether the CoreML execution provider is enabled on subgraphs.
*/
Expand Down
1 change: 1 addition & 0 deletions objectivec/ort_coreml_execution_provider.mm
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ - (BOOL)appendCoreMLExecutionProviderWithOptions:(ORTCoreMLExecutionProviderOpti
try {
const uint32_t flags =
(options.useCPUOnly ? COREML_FLAG_USE_CPU_ONLY : 0) |
(options.useCPUAndGPU ? COREML_FLAG_USE_CPU_AND_GPU : 0) |
(options.enableOnSubgraphs ? COREML_FLAG_ENABLE_ON_SUBGRAPH : 0) |
(options.onlyEnableForDevicesWithANE ? COREML_FLAG_ONLY_ENABLE_DEVICE_WITH_ANE : 0) |
(options.onlyAllowStaticInputShapes ? COREML_FLAG_ONLY_ALLOW_STATIC_INPUT_SHAPES : 0) |
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,14 @@ CoreMLExecutionProvider::CoreMLExecutionProvider(uint32_t coreml_flags)
LOGS_DEFAULT(ERROR) << "CoreML EP is not supported on this platform.";
}

// check if only one flag is set
if ((coreml_flags & COREML_FLAG_USE_CPU_ONLY) && (coreml_flags & COREML_FLAG_USE_CPU_AND_GPU)) {
// multiple device options selected
ORT_THROW(
"Multiple device options selected, you should use at most one of the following options:"
"COREML_FLAG_USE_CPU_ONLY or COREML_FLAG_USE_CPU_AND_GPU or not set");
}

#if defined(COREML_ENABLE_MLPROGRAM)
if (coreml_version_ < MINIMUM_COREML_MLPROGRAM_VERSION &&
(coreml_flags_ & COREML_FLAG_CREATE_MLPROGRAM) != 0) {
Expand Down
12 changes: 9 additions & 3 deletions onnxruntime/core/providers/coreml/model/model.mm
Original file line number Diff line number Diff line change
Expand Up @@ -395,9 +395,15 @@ Status Predict(const std::unordered_map<std::string, OnnxTensorData>& inputs,
compiled_model_path_ = [compileUrl path];

MLModelConfiguration* config = [[MLModelConfiguration alloc] init];
config.computeUnits = (coreml_flags_ & COREML_FLAG_USE_CPU_ONLY)
? MLComputeUnitsCPUOnly
: MLComputeUnitsAll;

if (coreml_flags_ & COREML_FLAG_USE_CPU_ONLY) {
config.computeUnits = MLComputeUnitsCPUOnly;
} else if (coreml_flags_ & COREML_FLAG_USE_CPU_AND_GPU) {
config.computeUnits = MLComputeUnitsCPUAndGPU;
wejoncy marked this conversation as resolved.
Show resolved Hide resolved
} else {
config.computeUnits = MLComputeUnitsAll;
}

model_ = [MLModel modelWithContentsOfURL:compileUrl configuration:config error:&error];

if (error != nil || model_ == nil) {
Expand Down
2 changes: 2 additions & 0 deletions onnxruntime/python/onnxruntime_pybind_state.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1219,6 +1219,8 @@ std::unique_ptr<IExecutionProvider> CreateExecutionProviderInstance(

if (flags_str.find("COREML_FLAG_USE_CPU_ONLY") != std::string::npos) {
coreml_flags |= COREMLFlags::COREML_FLAG_USE_CPU_ONLY;
} else if (flags_str.find("COREML_FLAG_USE_CPU_AND_GPU") != std::string::npos) {
coreml_flags |= COREMLFlags::COREML_FLAG_USE_CPU_AND_GPU;
}

if (flags_str.find("COREML_FLAG_ONLY_ALLOW_STATIC_INPUT_SHAPES") != std::string::npos) {
Expand Down
2 changes: 1 addition & 1 deletion onnxruntime/test/perftest/command_args_parser.cc
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ namespace perftest {
"\t [NNAPI only] [NNAPI_FLAG_CPU_ONLY]: Using CPU only in NNAPI EP.\n"
"\t [Example] [For NNAPI EP] -e nnapi -i \"NNAPI_FLAG_USE_FP16 NNAPI_FLAG_USE_NCHW NNAPI_FLAG_CPU_DISABLED\"\n"
"\n"
"\t [CoreML only] [COREML_FLAG_CREATE_MLPROGRAM]: Create an ML Program model instead of Neural Network.\n"
"\t [CoreML only] [COREML_FLAG_CREATE_MLPROGRAM COREML_FLAG_USE_CPU_ONLY COREML_FLAG_USE_CPU_AND_GPU]: Create an ML Program model instead of Neural Network.\n"
"\t [Example] [For CoreML EP] -e coreml -i \"COREML_FLAG_CREATE_MLPROGRAM\"\n"
"\n"
"\t [SNPE only] [runtime]: SNPE runtime, options: 'CPU', 'GPU', 'GPU_FLOAT16', 'DSP', 'AIP_FIXED_TF'. \n"
Expand Down
6 changes: 6 additions & 0 deletions onnxruntime/test/perftest/ort_test_session.cc
Original file line number Diff line number Diff line change
Expand Up @@ -426,6 +426,12 @@ select from 'TF8', 'TF16', 'UINT8', 'FLOAT', 'ITENSOR'. \n)");
if (key == "COREML_FLAG_CREATE_MLPROGRAM") {
coreml_flags |= COREML_FLAG_CREATE_MLPROGRAM;
std::cout << "Enabling ML Program.\n";
} else if (key == "COREML_FLAG_USE_CPU_ONLY") {
coreml_flags |= COREML_FLAG_USE_CPU_ONLY;
std::cout << "CoreML enabled COREML_FLAG_USE_CPU_ONLY.\n";
} else if (key == "COREML_FLAG_USE_CPU_AND_GPU") {
coreml_flags |= COREML_FLAG_USE_CPU_AND_GPU;
std::cout << "CoreML enabled COREML_FLAG_USE_CPU_AND_GPU.\n";
} else if (key.empty()) {
} else {
ORT_THROW(
Expand Down
Loading