Skip to content

Commit

Permalink
Revert "apply fixes for issues spotted by coverity" (openvinotoolkit#…
Browse files Browse the repository at this point in the history
…25639)

Reverts openvinotoolkit#25548
Probably breaks MacOS debug
  • Loading branch information
akladiev committed Jul 19, 2024
1 parent b26602f commit 814f306
Show file tree
Hide file tree
Showing 11 changed files with 35 additions and 40 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -132,13 +132,13 @@ class OPENVINO_RUNTIME_API IStreamsExecutor : virtual public ITaskExecutor {
bool cpu_reservation = false,
bool cpu_pinning = false,
std::vector<std::vector<int>> streams_info_table = {})
: _name{std::move(name)},
: _name{name},
_streams{streams},
_threads_per_stream{threads_per_stream},
_thread_preferred_core_type(thread_preferred_core_type),
_cpu_reservation{cpu_reservation},
_cpu_pinning{cpu_pinning},
_streams_info_table{std::move(streams_info_table)} {
_streams_info_table{streams_info_table} {
update_executor_config();
}

Expand Down
3 changes: 1 addition & 2 deletions src/inference/include/openvino/runtime/properties.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -942,8 +942,7 @@ struct Properties : public Property<std::map<std::string, std::map<std::string,
inline util::EnableIfAllStringAny<std::pair<std::string, Any>, Properties...> operator()(
const std::string& device_name,
Properties&&... configs) const {
return {name() + std::string("_") + device_name,
AnyMap{std::pair<std::string, Any>{std::forward<Properties>(configs)}...}};
return {name() + std::string("_") + device_name, AnyMap{std::pair<std::string, Any>{configs}...}};
}
};

Expand Down
2 changes: 1 addition & 1 deletion src/inference/src/cpp/core.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ std::string find_plugins_xml(const std::string& xml_file) {
return xmlConfigFileDefault;

// 2. in folder with libopenvino.so
xmlConfigFileDefault = ov::util::path_join({std::move(ov_library_path), std::move(xml_file_name)});
xmlConfigFileDefault = ov::util::path_join({ov_library_path, xml_file_name});
if (ov::util::file_exists(xmlConfigFileDefault))
return xmlConfigFileDefault;

Expand Down
4 changes: 2 additions & 2 deletions src/inference/src/cpp/infer_request.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,7 @@ Tensor InferRequest::get_output_tensor(size_t idx) {

Tensor InferRequest::get_input_tensor() {
OV_INFER_REQ_CALL_STATEMENT({
const auto& inputs = _impl->get_inputs();
const auto inputs = _impl->get_inputs();
OPENVINO_ASSERT(inputs.size() == 1,
"get_input_tensor() must be called on a function with exactly one parameter.");
return get_tensor(inputs.at(0));
Expand All @@ -212,7 +212,7 @@ Tensor InferRequest::get_input_tensor() {

Tensor InferRequest::get_output_tensor() {
OV_INFER_REQ_CALL_STATEMENT({
const auto& outputs = _impl->get_outputs();
const auto outputs = _impl->get_outputs();
OPENVINO_ASSERT(outputs.size() == 1,
"get_output_tensor() must be called on a function with exactly one parameter.");
return get_tensor(outputs.at(0));
Expand Down
26 changes: 13 additions & 13 deletions src/inference/src/dev/core_impl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -216,11 +216,11 @@ bool ov::is_config_applicable(const std::string& user_device_name, const std::st

// if device name is matched, check additional condition
auto is_matched = [&](const std::string& key, MatchType match_type) -> bool {
const auto& user_value =
auto user_value =
parsed_user_device_name._config.count(key) ? parsed_user_device_name._config.at(key).as<std::string>() : "";
const auto& subprop_value = parsed_subprop_device_name._config.count(key)
? parsed_subprop_device_name._config.at(key).as<std::string>()
: "";
auto subprop_value = parsed_subprop_device_name._config.count(key)
? parsed_subprop_device_name._config.at(key).as<std::string>()
: "";

if (!user_value.empty() && subprop_value.empty()) {
// property without additional limitation can be applied
Expand Down Expand Up @@ -307,7 +307,7 @@ ov::Parsed ov::parseDeviceNameIntoConfig(const std::string& deviceName,
clean_batch_properties(updated_device_name, updated_config, ov::auto_batch_timeout);
}

return {std::move(updated_device_name), std::move(updated_config)};
return {updated_device_name, updated_config};
}

ov::CoreImpl::CoreImpl() {
Expand Down Expand Up @@ -395,7 +395,7 @@ void ov::CoreImpl::register_plugin_in_registry_unsafe(const std::string& device_
// Register proxy plugin
if (config.find(ov::proxy::configuration::alias.name()) != config.end()) {
// Create proxy plugin for alias
const auto& alias = config.at(ov::proxy::configuration::alias.name()).as<std::string>();
auto alias = config.at(ov::proxy::configuration::alias.name()).as<std::string>();
if (alias == device_name)
dev_name = get_internal_plugin_name(dev_name, config);
// Alias can be registered by several plugins
Expand All @@ -407,7 +407,7 @@ void ov::CoreImpl::register_plugin_in_registry_unsafe(const std::string& device_
desc.defaultConfig[ov::proxy::configuration::internal_name.name()] = dev_name;

fill_config(desc.defaultConfig, config, dev_name);
pluginRegistry[alias] = std::move(desc);
pluginRegistry[alias] = desc;
add_mutex(alias);
} else {
// Update registered plugin
Expand All @@ -428,7 +428,7 @@ void ov::CoreImpl::register_plugin_in_registry_unsafe(const std::string& device_
PluginDescriptor desc = PluginDescriptor(ov::proxy::create_plugin);
desc.defaultConfig[ov::proxy::configuration::internal_name.name()] = dev_name;
fill_config(desc.defaultConfig, config, dev_name);
pluginRegistry[device_name] = std::move(desc);
pluginRegistry[device_name] = desc;
add_mutex(device_name);
}

Expand Down Expand Up @@ -601,7 +601,7 @@ ov::Plugin ov::CoreImpl::get_plugin(const std::string& pluginName) const {
// Set Core class reference to plugins
std::weak_ptr<ov::ICore> mutableCore =
std::const_pointer_cast<ov::ICore>(std::dynamic_pointer_cast<const ov::ICore>(shared_from_this()));
plugin.set_core(std::move(mutableCore));
plugin.set_core(mutableCore);
}

// configuring
Expand Down Expand Up @@ -1252,7 +1252,7 @@ void ov::CoreImpl::set_property_for_device(const ov::AnyMap& configMap, const st
auto base_desc = pluginRegistry.find(clearDeviceName);
if (pluginRegistry.find(deviceName) == pluginRegistry.end() && base_desc != pluginRegistry.end()) {
PluginDescriptor desc{base_desc->second.libraryLocation, config, base_desc->second.listOfExtentions};
pluginRegistry[deviceName] = std::move(desc);
pluginRegistry[deviceName] = desc;
}

// set config for plugins in registry
Expand Down Expand Up @@ -1532,8 +1532,8 @@ ov::CoreImpl::CoreConfig::CacheConfig ov::CoreImpl::CoreConfig::get_cache_config
ov::AnyMap& parsedConfig) const {
// cache_dir is enabled locally in compile_model only
if (parsedConfig.count(ov::cache_dir.name())) {
const auto& cache_dir_val = parsedConfig.at(ov::cache_dir.name()).as<std::string>();
const auto& tempConfig = CoreConfig::CacheConfig::create(cache_dir_val);
auto cache_dir_val = parsedConfig.at(ov::cache_dir.name()).as<std::string>();
auto tempConfig = CoreConfig::CacheConfig::create(cache_dir_val);
// if plugin does not explicitly support cache_dir, and if plugin is not virtual, we need to remove
// it from config
if (!util::contains(plugin.get_property(ov::supported_properties), ov::cache_dir) &&
Expand Down Expand Up @@ -1563,7 +1563,7 @@ ov::CoreImpl::CoreConfig::CacheConfig ov::CoreImpl::CoreConfig::CacheConfig::cre
cache_manager = std::make_shared<ov::FileStorageCacheManager>(dir);
}

return {dir, std::move(cache_manager)};
return {dir, cache_manager};
}

std::mutex& ov::CoreImpl::get_mutex(const std::string& dev_name) const {
Expand Down
6 changes: 3 additions & 3 deletions src/inference/src/dev/iplugin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ std::unordered_set<std::string> ov::get_supported_nodes(
m.run_passes(transformed_model);

transform(transformed_model);
const auto& ops = transformed_model->get_ordered_ops();
auto ops = transformed_model->get_ordered_ops();

NameSet supported;
NameSet unsupported;
Expand All @@ -120,18 +120,18 @@ std::unordered_set<std::string> ov::get_supported_nodes(
// Collect all operation names even there are no such names in original model
std::map<std::string, std::shared_ptr<ov::Node>> transformed_model_op_map;
std::map<std::string, std::string> fused_model_op_map;
for (const auto& op : ops) {
for (auto&& op : ops) {
auto names = get_names_set(op);
for (auto& name : names) {
if (name != op->get_friendly_name())
fused_model_op_map[name] = op->get_friendly_name();
}
transformed_model_op_map[op->get_friendly_name()] = op;
if (is_node_supported(op)) {
supported.insert(names.begin(), names.end());
} else {
unsupported.insert(names.begin(), names.end());
}
transformed_model_op_map[op->get_friendly_name()] = op;
}

// If operation was fused into several operations where one is supported
Expand Down
4 changes: 2 additions & 2 deletions src/inference/src/dev/performance_heuristics.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -99,8 +99,8 @@ MemBandwidthPressure mem_bandwidth_pressure_tolerance(const std::shared_ptr<ov::

// Check that input and output shape a fully defined (not dynamic)
if (input.get_partial_shape().is_static() && output.get_partial_shape().is_static()) {
const auto& shapeInput = input.get_shape();
const auto& shapeOutput = output.get_shape();
const auto shapeInput = input.get_shape();
const auto shapeOutput = output.get_shape();
if (shapeInput.size() > 4 /*5D*/ && isINT8) {
compute_deconvs++;
continue;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -184,6 +184,7 @@ void reserve_cpu_by_streams_info(const std::vector<std::vector<int>> _streams_in
void update_proc_type_table(const std::vector<std::vector<int>> _cpu_mapping_table,
const int _numa_nodes,
std::vector<std::vector<int>>& _proc_type_table) {
std::vector<int> all_table;
std::map<int, int> numa_node_map;

_proc_type_table.assign((_numa_nodes == 1) ? 1 : _numa_nodes + 1, std::vector<int>({0, 0, 0, 0, -1, -1}));
Expand All @@ -201,8 +202,7 @@ void update_proc_type_table(const std::vector<std::vector<int>> _cpu_mapping_tab
} else {
numa_node_map.insert(std::pair<int, int>(_proc_type_table[0][PROC_NUMA_NODE_ID], 0));
}

std::vector<int> all_table{0, 0, 0, 0, -1, -1};
all_table = {0, 0, 0, 0, -1, -1};
for (size_t i = 0; i < _cpu_mapping_table.size(); i++) {
if (_cpu_mapping_table[i][CPU_MAP_USED_FLAG] == NOT_USED && _cpu_mapping_table[i][CPU_MAP_NUMA_NODE_ID] >= 0 &&
_cpu_mapping_table[i][CPU_MAP_CORE_TYPE] >= ALL_PROC) {
Expand All @@ -216,7 +216,7 @@ void update_proc_type_table(const std::vector<std::vector<int>> _cpu_mapping_tab
}
}
if (_numa_nodes > 1) {
_proc_type_table[0] = std::move(all_table);
_proc_type_table[0] = all_table;
}

if (_proc_type_table.size() > 1) {
Expand Down
2 changes: 1 addition & 1 deletion src/inference/src/dev/threading/istreams_executor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ void IStreamsExecutor::Config::set_property(const std::string& key, const ov::An
void IStreamsExecutor::Config::set_property(const ov::AnyMap& property) {
for (const auto& it : property) {
const auto& key = it.first;
const auto& value = it.second;
const auto value = it.second;
if (key == ov::num_streams) {
auto streams = value.as<ov::streams::Num>();
if (streams == ov::streams::NUMA) {
Expand Down
4 changes: 2 additions & 2 deletions src/inference/src/model_reader.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -86,10 +86,10 @@ void update_v10_model(std::shared_ptr<ov::Model>& model, bool frontendMode = fal
"Model operation names have collisions with tensor names.",
" Please use MO to generate new IR version, it should allow to avoid the issue");
leaf_names.emplace(res_name, nullptr);
result->output(0).get_tensor().add_names({std::move(res_name)});
result->output(0).get_tensor().add_names({res_name});
}
for (const auto& param : model->get_parameters()) {
const auto& param_name = param->get_friendly_name();
auto param_name = param->get_friendly_name();
OPENVINO_ASSERT(leaf_names.find(param_name) == leaf_names.end() ||
param->output(0).get_names().find(param_name) != param->output(0).get_names().end(),
"Model operation names have collisions with tensor names.",
Expand Down
14 changes: 5 additions & 9 deletions src/inference/src/os/lin/lin_system_conf.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ CPU::CPU() {
}
std::string cache_info;
std::getline(cache_file, cache_info);
one_info[n] = std::move(cache_info);
one_info[n] = cache_info;
}

if (cache_index == -1) {
Expand Down Expand Up @@ -80,7 +80,7 @@ CPU::CPU() {
}
std::string cache_info;
std::getline(cache_file, cache_info);
one_info[n] = std::move(cache_info);
one_info[n] = cache_info;
}

if (cache_index == -1) {
Expand Down Expand Up @@ -108,7 +108,7 @@ CPU::CPU() {
}
std::string cache_info;
std::getline(cache_file, cache_info);
node_info_table.emplace_back(std::move(cache_info));
node_info_table.push_back(cache_info);
node_index++;
}
};
Expand Down Expand Up @@ -188,11 +188,7 @@ CPU::CPU() {
} else {
_processors = valid_cpu_mapping_table.size();
_cpu_mapping_table.swap(valid_cpu_mapping_table);
update_valid_processor_linux(std::move(phy_core_list),
_numa_nodes,
_cores,
_proc_type_table,
_cpu_mapping_table);
update_valid_processor_linux(phy_core_list, _numa_nodes, _cores, _proc_type_table, _cpu_mapping_table);
return 0;
}
};
Expand Down Expand Up @@ -462,7 +458,7 @@ void parse_cache_info_linux(const std::vector<std::vector<std::string>> system_i
return;
};

const std::vector<int> line_value_0({0, 0, 0, 0, -1, -1});
std::vector<int> line_value_0({0, 0, 0, 0, -1, -1});

for (int n = 0; n < _processors; n++) {
if (-1 == _cpu_mapping_table[n][CPU_MAP_SOCKET_ID]) {
Expand Down

0 comments on commit 814f306

Please sign in to comment.