return {};
}
+IStreamsExecutor::Config IStreamsExecutor::Config::MakeDefaultMultiThreaded(const IStreamsExecutor::Config& initial) {
+ const auto envThreads = parallel_get_env_threads();
+ const auto& numaNodes = getAvailableNUMANodes();
+ const auto numaNodesNum = numaNodes.size();
+ auto streamExecutorConfig = initial;
+ const auto hwCores = streamExecutorConfig._streams > 1 && numaNodesNum == 1 ? parallel_get_max_threads() : getNumberOfCPUCores();
+ const auto threads = streamExecutorConfig._threads ? streamExecutorConfig._threads : (envThreads ? envThreads : hwCores);
+ streamExecutorConfig._threadsPerStream = streamExecutorConfig._streams
+ ? std::max(1, threads/streamExecutorConfig._streams)
+ : threads;
+ return streamExecutorConfig;
+}
+
} // namespace InferenceEngine
\ No newline at end of file
*/
Parameter GetConfig(const std::string& key);
+ /**
+ * @brief Create appropriate multithreaded configuration
+ * filing unconfigured values from initial configuration using hardware properties
+ * @param initial Inital configuration
+ * @return configured values
+ */
+ static Config MakeDefaultMultiThreaded(const Config& initial);
+
std::string _name; //!< Used by `ITT` to name executor threads
int _streams = 1; //!< Number of streams.
int _threadsPerStream = 0; //!< Number of threads per stream that executes `ie_parallel` calls