diff --git a/onnxruntime/core/framework/sequential_executor.cc b/onnxruntime/core/framework/sequential_executor.cc index 6ea12c7f3336b..2185b8332b9cf 100644 --- a/onnxruntime/core/framework/sequential_executor.cc +++ b/onnxruntime/core/framework/sequential_executor.cc @@ -68,7 +68,7 @@ static void CalculateTotalOutputSizes(OpKernelContextInternal* op_kernel_context int output_count = op_kernel_context->OutputCount(); for (auto i = 0; i < output_count; i++) { const OrtValue* p_output = op_kernel_context->GetOutputMLValue(i); - if (p_output != nullptr && p_output->IsTensor()) { + if (p_output != nullptr && p_output->IsTensor() && p_output->IsAllocated()) { const auto& tensor = p_output->Get(); size_t tensor_size = tensor.SizeInBytes(); #if defined(TRACE_EXECUTION) @@ -104,7 +104,7 @@ static void CalculateTotalInputSizes(const OpKernelContextInternal* op_kernel_co const int input_count = op_kernel_context->InputCount(); for (auto i = 0; i < input_count; i++) { const OrtValue* p_input = op_kernel_context->GetInputMLValue(i); - if (p_input != nullptr && p_input->IsTensor()) { + if (p_input != nullptr && p_input->IsTensor() && p_input->IsAllocated()) { const OpKernelInfo& op_kernel_info = p_op_kernel->Info(); const Tensor* p_tensor = nullptr; bool is_param = op_kernel_info.TryGetConstantInput(i, &p_tensor); diff --git a/onnxruntime/test/framework/inference_session_test.cc b/onnxruntime/test/framework/inference_session_test.cc index c6a81e8a1c1ad..7f4616c964e33 100644 --- a/onnxruntime/test/framework/inference_session_test.cc +++ b/onnxruntime/test/framework/inference_session_test.cc @@ -818,6 +818,47 @@ TEST(InferenceSessionTests, CheckRunProfilerStartTime) { ASSERT_TRUE(before_start_time <= profiling_start_time && profiling_start_time <= after_start_time); } +TEST(InferenceSessionTests, CheckRunProfilerWithOptionalValues) { + // Test whether the profiler can work on model with optional values + SessionOptions so; + + so.session_logid = "CheckRunProfiler"; + so.enable_profiling = true; + so.profile_file_prefix = ORT_TSTR("onnxprofile_profile_test"); + + InferenceSession session_object(so, GetEnvironment()); + ASSERT_STATUS_OK(session_object.Load(ORT_TSTR("testdata/relu_with_optional.onnx"))); + ASSERT_STATUS_OK(session_object.Initialize()); + + RunOptions run_options; + run_options.run_tag = "RunTag"; + + // prepare inputs + std::vector dims_x = {1}; + std::vector values_x = {-4}; + OrtValue ml_value; + CreateMLValue(TestCPUExecutionProvider()->CreatePreferredAllocators()[0], dims_x, values_x, &ml_value); + NameMLValMap feeds; + feeds.insert(std::make_pair("input", ml_value)); + + // prepare outputs + std::vector output_names; + output_names.push_back("output"); + std::vector fetches; + + // prepare expected inputs and outputs + std::vector expected_dims_y = {1}; + std::vector expected_values_y = {0}; + + // Now run + common::Status st = session_object.Run(run_options, feeds, output_names, &fetches); + if (!st.IsOK()) { + std::cout << "Run returned status: " << st.ErrorMessage() << std::endl; + } + ASSERT_TRUE(st.IsOK()); + VerifyOutputs(fetches.at(0).Get(), expected_dims_y, expected_values_y); +} + TEST(InferenceSessionTests, MultipleSessionsNoTimeout) { SessionOptions session_options; diff --git a/onnxruntime/test/testdata/relu_with_optional.onnx b/onnxruntime/test/testdata/relu_with_optional.onnx new file mode 100644 index 0000000000000..b52c6927527bd Binary files /dev/null and b/onnxruntime/test/testdata/relu_with_optional.onnx differ