--- /dev/null
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+/**
+ * @brief A header that defines advanced related properties for HDDL plugin.
+ * These properties should be used in SetConfig() and LoadNetwork() methods of plugins
+ *
+ * @file hddl_config.hpp
+ */
+
+#pragma once
+
+#include "vpu_config.hpp"
+
+namespace InferenceEngine {
+
+namespace Metrics {
+
+/**
+* @brief Metric to get a int of the device number, String value is METRIC_HDDL_DEVICE_NUM
+*/
+DECLARE_METRIC_KEY(HDDL_DEVICE_NUM, int);
+
+/**
+* @brief Metric to get a std::vector<std::string> of device names, String value is METRIC_HDDL_DEVICE_NAME
+*/
+DECLARE_METRIC_KEY(HDDL_DEVICE_NAME, std::vector<std::string>);
+
+/**
+* @brief Metric to get a std::vector<float> of device thermal, String value is METRIC_HDDL_DEVICE_THERMAL
+*/
+DECLARE_METRIC_KEY(HDDL_DEVICE_THERMAL, std::vector<float>);
+
+/**
+* @brief Metric to get a std::vector<uint32> of device ids, String value is METRIC_HDDL_DEVICE_ID
+*/
+DECLARE_METRIC_KEY(HDDL_DEVICE_ID, std::vector<unsigned int>);
+
+/**
+* @brief Metric to get a std::vector<int> of device subclasses, String value is METRIC_HDDL_DEVICE_SUBCLASS
+*/
+DECLARE_METRIC_KEY(HDDL_DEVICE_SUBCLASS, std::vector<int>);
+
+/**
+* @brief Metric to get a std::vector<uint32> of device total memory, String value is METRIC_HDDL_MEMORY_TOTAL
+*/
+DECLARE_METRIC_KEY(HDDL_DEVICE_MEMORY_TOTAL, std::vector<unsigned int>);
+
+/**
+* @brief Metric to get a std::vector<uint32> of device used memory, String value is METRIC_HDDL_DEVICE_MEMORY_USED
+*/
+DECLARE_METRIC_KEY(HDDL_DEVICE_MEMORY_USED, std::vector<unsigned int>);
+
+/**
+* @brief Metric to get a std::vector<float> of device utilization, String value is METRIC_HDDL_DEVICE_UTILIZATION
+*/
+DECLARE_METRIC_KEY(HDDL_DEVICE_UTILIZATION, std::vector<float>);
+
+/**
+* @brief Metric to get a std::vector<std::string> of stream ids, String value is METRIC_HDDL_DEVICE_STREAM_ID
+*/
+DECLARE_METRIC_KEY(HDDL_STREAM_ID, std::vector<std::string>);
+
+/**
+* @brief Metric to get a std::vector<std::string> of device tags, String value is METRIC_HDDL_DEVICE_TAG
+*/
+DECLARE_METRIC_KEY(HDDL_DEVICE_TAG, std::vector<std::string>);
+
+/**
+* @brief Metric to get a std::vector<int> of group ids, String value is METRIC_HDDL_GROUP_ID
+*/
+DECLARE_METRIC_KEY(HDDL_GROUP_ID, std::vector<int>);
+
+/**
+* @brief Metric to get a int number of device be using for group, String value is METRIC_HDDL_DEVICE_GROUP_USING_NUM
+*/
+DECLARE_METRIC_KEY(HDDL_DEVICE_GROUP_USING_NUM, int);
+
+/**
+* @brief Metric to get a int number of total device, String value is METRIC_HDDL_DEVICE_TOTAL_NUM
+*/
+DECLARE_METRIC_KEY(HDDL_DEVICE_TOTAL_NUM, int);
+
+} // namespace Metrics
+
+/**
+ * @brief [Only for HDDLPlugin]
+ * Type: Arbitrary non-empty string. If empty (""), equals no set, default: "";
+ * This option allows to specify the number of MYX devices used for inference a specific Executable network.
+ * Note: Only one network would be allocated to one device.
+ * The number of devices for the tag is specified in the hddl_service.config file.
+ * Example:
+ * "service_settings":
+ * {
+ * "graph_tag_map":
+ * {
+ * "tagA":3
+ * }
+ * }
+ * It means that an executable network marked with tagA will be executed on 3 devices
+ */
+DECLARE_VPU_CONFIG(HDDL_GRAPH_TAG);
+
+/**
+ * @brief [Only for HDDLPlugin]
+ * Type: Arbitrary non-empty string. If empty (""), equals no set, default: "";
+ * This config makes the executable networks to be allocated on one certain device (instead of multiple devices).
+ * And all inference through this executable network, will be done on this device.
+ * Note: Only one network would be allocated to one device.
+ * The number of devices which will be used for stream-affinity must be specified in hddl_service.config file.
+ * Example:
+ * "service_settings":
+ * {
+ * "stream_device_number":5
+ * }
+ * It means that 5 device will be used for stream-affinity
+ */
+DECLARE_VPU_CONFIG(HDDL_STREAM_ID);
+
+/**
+ * @brief [Only for HDDLPlugin]
+ * Type: Arbitrary non-empty string. If empty (""), equals no set, default: "";
+ * This config allows user to control device flexibly. This config gives a "tag" for a certain device while
+ * allocating a network to it. Afterward, user can allocating/deallocating networks to this device with this "tag".
+ * Devices used for such use case is controlled by a so-called "Bypass Scheduler" in HDDL backend, and the number
+ * of such device need to be specified in hddl_service.config file.
+ * Example:
+ * "service_settings":
+ * {
+ * "bypass_device_number": 5
+ * }
+ * It means that 5 device will be used for Bypass scheduler.
+ */
+DECLARE_VPU_CONFIG(HDDL_DEVICE_TAG);
+
+/**
+ * @brief [Only for HDDLPlugin]
+ * Type: "YES/NO", default is "NO".
+ * This config is a sub-config of DEVICE_TAG, and only available when "DEVICE_TAG" is set. After a user load a
+ * network, the user got a handle for the network.
+ * If "YES", the network allocated is bind to the device (with the specified "DEVICE_TAG"), which means all afterwards
+ * inference through this network handle will be executed on this device only.
+ * If "NO", the network allocated is not bind to the device (with the specified "DEVICE_TAG"). If the same network
+ * is allocated on multiple other devices (also set BIND_DEVICE to "False"), then inference through any handle of these
+ * networks may be executed on any of these devices those have the network loaded.
+ */
+DECLARE_VPU_CONFIG(HDDL_BIND_DEVICE);
+
+/**
+ * @brief [Only for HDDLPlugin]
+ * Type: A signed int wrapped in a string, default is "0".
+ * This config is a sub-config of DEVICE_TAG, and only available when "DEVICE_TAG" is set and "BIND_DEVICE" is "False".
+ * When there are multiple devices running a certain network (a same network running on multiple devices in Bypass Scheduler),
+ * the device with a larger number has a higher priority, and more inference tasks will be fed to it with priority.
+ */
+DECLARE_VPU_CONFIG(HDDL_RUNTIME_PRIORITY);
+
+/**
+ * @brief [Only for HDDLPlugin]
+ * Type: "YES/NO", default is "NO".
+ * SGAD is short for "Single Graph All Device". With this scheduler, once application allocates 1 network, all devices
+ * (managed by SGAD scheduler) will be loaded with this graph. The number of network that can be loaded to one device
+ * can exceed one. Once application deallocates 1 network from device, all devices will unload the network from them.
+ */
+DECLARE_VPU_CONFIG(HDDL_USE_SGAD);
+
+/**
+ * @brief [Only for HDDLPlugin]
+ * Type: A signed int wrapped in a string, default is "0".
+ * This config gives a "group id" for a certain device when this device has been reserved for certain client, client
+ * can use this device grouped by calling this group id while other client can't use this device
+ * Each device has their own group id. Device in one group shares same group id.
+ */
+DECLARE_VPU_CONFIG(HDDL_GROUP_DEVICE);
+
+} // namespace InferenceEngine
//
/**
+ * @deprecated Use vpu/hddl_config.hpp instead.
* @brief A header that defines advanced related properties for VPU plugins.
* These properties should be used in SetConfig() and LoadNetwork() methods of plugins
*
#pragma once
+#include "ie_api.h"
#include "ie_plugin_config.hpp"
//
namespace Metrics {
/**
+* @deprecated Use InferenceEngine::METRIC_HDDL_DEVICE_NUM instead
* @brief Metric to get a int of the device number, String value is METRIC_VPU_HDDL_DEVICE_NUM
*/
+INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::METRIC_HDDL_DEVICE_NUM instead")
DECLARE_VPU_HDDL_METRIC(DEVICE_NUM, int);
/**
+* @deprecated Use InferenceEngine::METRIC_HDDL_DEVICE_NAME instead
* @brief Metric to get a std::vector<std::string> of device names, String value is METRIC_VPU_HDDL_DEVICE_NAME
*/
+INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::METRIC_HDDL_DEVICE_NAME instead")
DECLARE_VPU_HDDL_METRIC(DEVICE_NAME, std::vector<std::string>);
/**
+* @deprecated Use InferenceEngine::METRIC_HDDL_DEVICE_THERMAL instead
* @brief Metric to get a std::vector<float> of device thermal, String value is METRIC_VPU_HDDL_DEVICE_THERMAL
*/
+INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::METRIC_HDDL_DEVICE_THERMAL instead")
DECLARE_VPU_HDDL_METRIC(DEVICE_THERMAL, std::vector<float>);
/**
+* @deprecated Use InferenceEngine::METRIC_HDDL_DEVICE_ID instead
* @brief Metric to get a std::vector<uint32> of device ids, String value is METRIC_VPU_HDDL_DEVICE_ID
*/
+INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::METRIC_HDDL_DEVICE_ID instead")
DECLARE_VPU_HDDL_METRIC(DEVICE_ID, std::vector<unsigned int>);
/**
+* @deprecated Use InferenceEngine::METRIC_HDDL_DEVICE_SUBCLASS instead
* @brief Metric to get a std::vector<int> of device subclasses, String value is METRIC_VPU_HDDL_DEVICE_SUBCLASS
*/
+INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::METRIC_HDDL_DEVICE_SUBCLASS instead")
DECLARE_VPU_HDDL_METRIC(DEVICE_SUBCLASS, std::vector<int>);
/**
+* @deprecated Use InferenceEngine::METRIC_HDDL_DEVICE_MEMORY_TOTAL instead
* @brief Metric to get a std::vector<uint32> of device total memory, String value is METRIC_VPU_HDDL_MEMORY_TOTAL
*/
+INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::METRIC_HDDL_DEVICE_MEMORY_TOTAL instead")
DECLARE_VPU_HDDL_METRIC(DEVICE_MEMORY_TOTAL, std::vector<unsigned int>);
/**
+* @deprecated Use InferenceEngine::METRIC_HDDL_DEVICE_MEMORY_USED instead
* @brief Metric to get a std::vector<uint32> of device used memory, String value is METRIC_VPU_HDDL_DEVICE_MEMORY_USED
*/
+INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::METRIC_HDDL_DEVICE_MEMORY_USED instead")
DECLARE_VPU_HDDL_METRIC(DEVICE_MEMORY_USED, std::vector<unsigned int>);
/**
+* @deprecated Use InferenceEngine::METRIC_HDDL_DEVICE_UTILIZATION instead
* @brief Metric to get a std::vector<float> of device utilization, String value is METRIC_VPU_HDDL_DEVICE_UTILIZATION
*/
+INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::METRIC_HDDL_DEVICE_UTILIZATION instead")
DECLARE_VPU_HDDL_METRIC(DEVICE_UTILIZATION, std::vector<float>);
/**
+* @deprecated Use InferenceEngine::METRIC_HDDL_STREAM_ID instead
* @brief Metric to get a std::vector<std::string> of stream ids, String value is METRIC_VPU_HDDL_DEVICE_STREAM_ID
*/
+INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::METRIC_HDDL_STREAM_ID instead")
DECLARE_VPU_HDDL_METRIC(STREAM_ID, std::vector<std::string>);
/**
+* @deprecated Use InferenceEngine::METRIC_HDDL_DEVICE_TAG instead
* @brief Metric to get a std::vector<std::string> of device tags, String value is METRIC_VPU_HDDL_DEVICE_TAG
*/
+INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::METRIC_HDDL_DEVICE_TAG instead")
DECLARE_VPU_HDDL_METRIC(DEVICE_TAG, std::vector<std::string>);
/**
+* @deprecated Use InferenceEngine::METRIC_HDDL_GROUP_ID instead
* @brief Metric to get a std::vector<int> of group ids, String value is METRIC_VPU_HDDL_GROUP_ID
*/
+INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::METRIC_HDDL_GROUP_ID instead")
DECLARE_VPU_HDDL_METRIC(GROUP_ID, std::vector<int>);
/**
+* @deprecated Use InferenceEngine::METRIC_HDDL_DEVICE_GROUP_USING_NUM instead
* @brief Metric to get a int number of device be using for group, String value is METRIC_VPU_HDDL_DEVICE_GROUP_USING_NUM
*/
+INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::METRIC_HDDL_DEVICE_GROUP_USING_NUM instead")
DECLARE_VPU_HDDL_METRIC(DEVICE_GROUP_USING_NUM, int);
/**
+* @deprecated Use InferenceEngine::METRIC_HDDL_DEVICE_TOTAL_NUM instead
* @brief Metric to get a int number of total device, String value is METRIC_VPU_HDDL_DEVICE_TOTAL_NUM
*/
+INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::METRIC_HDDL_DEVICE_TOTAL_NUM instead")
DECLARE_VPU_HDDL_METRIC(DEVICE_TOTAL_NUM, int);
} // namespace Metrics
namespace VPUConfigParams {
/**
+ * @deprecated Use InferenceEngine::HDDL_GRAPH_TAG instead
* @brief [Only for HDDLPlugin]
* Type: Arbitrary non-empty string. If empty (""), equals no set, default: "";
* This option allows to specify the number of MYX devices used for inference a specific Executable network.
* }
* It means that an executable network marked with tagA will be executed on 3 devices
*/
+INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::HDDL_GRAPH_TAG instead")
DECLARE_VPU_HDDL_CONFIG_KEY(GRAPH_TAG);
/**
+ * @deprecated Use InferenceEngine::HDDL_STREAM_ID instead
* @brief [Only for HDDLPlugin]
* Type: Arbitrary non-empty string. If empty (""), equals no set, default: "";
* This config makes the executable networks to be allocated on one certain device (instead of multiple devices).
* }
* It means that 5 device will be used for stream-affinity
*/
+INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::HDDL_STREAM_ID instead")
DECLARE_VPU_HDDL_CONFIG_KEY(STREAM_ID);
/**
+ * @deprecated Use InferenceEngine::HDDL_DEVICE_TAG instead
* @brief [Only for HDDLPlugin]
* Type: Arbitrary non-empty string. If empty (""), equals no set, default: "";
* This config allows user to control device flexibly. This config gives a "tag" for a certain device while
* }
* It means that 5 device will be used for Bypass scheduler.
*/
+INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::HDDL_DEVICE_TAG instead")
DECLARE_VPU_HDDL_CONFIG_KEY(DEVICE_TAG);
/**
+ * @deprecated Use InferenceEngine::HDDL_BIND_DEVICE instead
* @brief [Only for HDDLPlugin]
* Type: "YES/NO", default is "NO".
* This config is a sub-config of DEVICE_TAG, and only available when "DEVICE_TAG" is set. After a user load a
* is allocated on multiple other devices (also set BIND_DEVICE to "False"), then inference through any handle of these
* networks may be executed on any of these devices those have the network loaded.
*/
+INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::HDDL_BIND_DEVICE instead")
DECLARE_VPU_HDDL_CONFIG_KEY(BIND_DEVICE);
/**
+ * @deprecated Use InferenceEngine::HDDL_RUNTIME_PRIORITY instead
* @brief [Only for HDDLPlugin]
* Type: A signed int wrapped in a string, default is "0".
* This config is a sub-config of DEVICE_TAG, and only available when "DEVICE_TAG" is set and "BIND_DEVICE" is "False".
* When there are multiple devices running a certain network (a same network running on multiple devices in Bypass Scheduler),
* the device with a larger number has a higher priority, and more inference tasks will be fed to it with priority.
*/
+INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::HDDL_RUNTIME_PRIORITY instead")
DECLARE_VPU_HDDL_CONFIG_KEY(RUNTIME_PRIORITY);
/**
+ * @deprecated Use InferenceEngine::HDDL_USE_SGAD instead
* @brief [Only for HDDLPlugin]
* Type: "YES/NO", default is "NO".
* SGAD is short for "Single Graph All Device". With this scheduler, once application allocates 1 network, all devices
* (managed by SGAD scheduler) will be loaded with this graph. The number of network that can be loaded to one device
* can exceed one. Once application deallocates 1 network from device, all devices will unload the network from them.
*/
+INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::HDDL_USE_SGAD instead")
DECLARE_VPU_HDDL_CONFIG_KEY(USE_SGAD);
/**
+ * @deprecated Use InferenceEngine::HDDL_GROUP_DEVICE instead
* @brief [Only for HDDLPlugin]
* Type: A signed int wrapped in a string, default is "0".
* This config gives a "group id" for a certain device when this device has been reserved for certain client, client
* can use this device grouped by calling this group id while other client can't use this device
* Each device has their own group id. Device in one group shares same group id.
- */
+ */
+INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::HDDL_GROUP_DEVICE instead")
DECLARE_VPU_HDDL_CONFIG_KEY(GROUP_DEVICE);
} // namespace VPUConfigParams
--- /dev/null
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+/**
+ * @brief A header that defines advanced related properties for Myriad plugin.
+ * These properties should be used in SetConfig() and LoadNetwork() methods of plugins
+ *
+ * @file myriad_config.hpp
+ */
+
+#pragma once
+
+#include "vpu_config.hpp"
+
+namespace InferenceEngine {
+
+/**
+ * @brief The flag to reset stalled devices.
+ * This is a plugin scope option and must be used with the plugin's SetConfig method
+ * The only possible values are:
+ * CONFIG_VALUE(YES)
+ * CONFIG_VALUE(NO) (default value)
+ */
+DECLARE_VPU_CONFIG(MYRIAD_ENABLE_FORCE_RESET);
+
+/**
+ * @brief This option allows to specify device memory type.
+ */
+DECLARE_VPU_CONFIG(MYRIAD_DDR_TYPE);
+
+/**
+ * @brief Supported keys definition for InferenceEngine::MYRIAD_DDR_TYPE option.
+ */
+DECLARE_VPU_CONFIG(MYRIAD_DDR_AUTO);
+DECLARE_VPU_CONFIG(MYRIAD_DDR_MICRON_2GB);
+DECLARE_VPU_CONFIG(MYRIAD_DDR_SAMSUNG_2GB);
+DECLARE_VPU_CONFIG(MYRIAD_DDR_HYNIX_2GB);
+DECLARE_VPU_CONFIG(MYRIAD_DDR_MICRON_1GB);
+
+/**
+ * @brief This option allows to specify protocol.
+ */
+DECLARE_VPU_CONFIG(MYRIAD_PROTOCOL);
+
+/**
+ * @brief Supported keys definition for InferenceEngine::MYRIAD_PROTOCOL option.
+ */
+DECLARE_VPU_CONFIG(MYRIAD_PCIE);
+DECLARE_VPU_CONFIG(MYRIAD_USB);
+
+/**
+ * @brief Optimize vpu plugin execution to maximize throughput.
+ * This option should be used with integer value which is the requested number of streams.
+ * The only possible values are:
+ * 1
+ * 2
+ * 3
+ */
+DECLARE_VPU_CONFIG(MYRIAD_THROUGHPUT_STREAMS);
+
+} // namespace InferenceEngine
//
/**
+ * @deprecated Use vpu/myriad_config.hpp instead.
* @brief A header that defines advanced related properties for VPU plugins.
* These properties should be used in SetConfig() and LoadNetwork() methods of plugins
*
- * @file myriad_plugin_config
+ * @file myriad_plugin_config.hpp
*/
#pragma once
+#include "ie_api.h"
#include "ie_plugin_config.hpp"
/**
namespace VPUConfigParams {
/**
+ * @deprecated Use InferenceEngine::MYRIAD_ENABLE_FORCE_RESET instead.
* @brief The flag to reset stalled devices: CONFIG_VALUE(YES) or CONFIG_VALUE(NO) (default)
* This is a plugin scope option and must be used with the plugin's SetConfig method
*/
+INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::MYRIAD_ENABLE_FORCE_RESET instead")
DECLARE_VPU_MYRIAD_CONFIG_KEY(FORCE_RESET);
/**
+ * @deprecated
* @brief This option allows to specify device.
* If specified device is not available then creating infer request will throw an exception.
*/
+INFERENCE_ENGINE_DEPRECATED("")
DECLARE_VPU_MYRIAD_CONFIG_KEY(PLATFORM);
/**
+ * @deprecated
* @brief Supported keys definition for VPU_MYRIAD_CONFIG_KEY(PLATFORM) option.
*/
+INFERENCE_ENGINE_DEPRECATED("")
DECLARE_VPU_MYRIAD_CONFIG_VALUE(2450);
+INFERENCE_ENGINE_DEPRECATED("")
DECLARE_VPU_MYRIAD_CONFIG_VALUE(2480);
/**
+ * @deprecated Use InferenceEngine::MYRIAD_DDR_TYPE instead
* @brief This option allows to specify device memory type.
*/
+INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::MYRIAD_DDR_TYPE instead")
DECLARE_VPU_MYRIAD_CONFIG_KEY(MOVIDIUS_DDR_TYPE);
/**
+ * @deprecated Use DDR type values from InferenceEngine namespace with MYRIAD_DDR_ prefix
* @brief Supported keys definition for VPU_MYRIAD_CONFIG_KEY(MOVIDIUS_DDR_TYPE) option.
*/
+INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::MYRIAD_DDR_AUTO instead")
DECLARE_VPU_MYRIAD_CONFIG_VALUE(DDR_AUTO);
+INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::MYRIAD_DDR_MICRON_2GB instead")
DECLARE_VPU_MYRIAD_CONFIG_VALUE(MICRON_2GB);
+INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::MYRIAD_DDR_SAMSUNG_2GB instead")
DECLARE_VPU_MYRIAD_CONFIG_VALUE(SAMSUNG_2GB);
+INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::MYRIAD_DDR_HYNIX_2GB instead")
DECLARE_VPU_MYRIAD_CONFIG_VALUE(HYNIX_2GB);
+INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::MYRIAD_DDR_MICRON_1GB instead")
DECLARE_VPU_MYRIAD_CONFIG_VALUE(MICRON_1GB);
} // namespace VPUConfigParams
--- /dev/null
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+/**
+ * @brief A header that defines common config subset for VPU plugins.
+ * Include myriad_config.hpp or hddl_config.hpp directly.
+ * These properties should be used in SetConfig() and LoadNetwork() methods of plugins
+ *
+ * @file vpu_config.hpp
+ */
+
+#pragma once
+
+#include "ie_plugin_config.hpp"
+#include "ie_api.h"
+
+#include <string>
+
+#define DECLARE_VPU_CONFIG(name) static constexpr auto name = #name
+
+namespace InferenceEngine {
+
+//
+// Common options
+//
+
+/**
+ * @brief Turn on HW stages usage (applicable for MyriadX devices only).
+ * The only possible values are:
+ * CONFIG_VALUE(YES) (default value)
+ * CONFIG_VALUE(NO)
+ */
+DECLARE_VPU_CONFIG(MYRIAD_ENABLE_HW_ACCELERATION);
+
+/**
+ * @brief The flag for adding to the profiling information the time of obtaining a tensor.
+ * The only possible values are:
+ * CONFIG_VALUE(YES)
+ * CONFIG_VALUE(NO) (default value)
+ */
+DECLARE_VPU_CONFIG(MYRIAD_ENABLE_RECEIVING_TENSOR_TIME);
+
+/**
+ * @brief This option allows to pass custom layers binding xml.
+ * If layer is present in such an xml, it would be used during inference even if the layer is natively supported
+ */
+DECLARE_VPU_CONFIG(MYRIAD_CUSTOM_LAYERS);
+
+} // namespace InferenceEngine
//
/**
+ * @deprecated Use vpu/myriad_config.hpp or vpu/hddl_config.hpp instead.
* @brief A header that defines advanced related properties for VPU plugins.
* These properties should be used in SetConfig() and LoadNetwork() methods of plugins
*
//
/**
+ * @deprecated Use InferenceEngine::MYRIAD_ENABLE_HW_ACCELERATION instead.
* @brief Turn on HW stages usage (applicable for MyriadX devices only).
* This option should be used with values: CONFIG_VALUE(YES) or CONFIG_VALUE(NO) (default)
*/
+INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::MYRIAD_ENABLE_HW_ACCELERATION instead")
DECLARE_VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION);
/**
DECLARE_VPU_CONFIG_KEY(LOG_LEVEL);
/**
- * @deprecated
- * @brief The key to define normalization coefficient for the network input.
- * This option should used with be a real number. Example "255.f"
- */
-INFERENCE_ENGINE_DEPRECATED("")
-DECLARE_VPU_CONFIG_KEY(INPUT_NORM);
-
-/**
- * @deprecated
- * @brief The flag to specify Bias value that is added to each element of the network input.
- * This option should used with be a real number. Example "0.1f"
- */
-INFERENCE_ENGINE_DEPRECATED("")
-DECLARE_VPU_CONFIG_KEY(INPUT_BIAS);
-
-/**
+ * @deprecated Use InferenceEngine::MYRIAD_ENABLE_RECEIVING_TENSOR_TIME instead.
* @brief The flag for adding to the profiling information the time of obtaining a tensor.
* This option should be used with values: CONFIG_VALUE(YES) or CONFIG_VALUE(NO) (default)
*/
+INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::MYRIAD_ENABLE_RECEIVING_TENSOR_TIME instead")
DECLARE_VPU_CONFIG_KEY(PRINT_RECEIVE_TENSOR_TIME);
/**
- * @deprecated The data scaling now works automatically.
- */
-INFERENCE_ENGINE_DEPRECATED("The data scaling now works automatically")
-DECLARE_VPU_CONFIG_KEY(NETWORK_CONFIG);
-
-/**
* @deprecated Use InputInfo::setLayout on input data from ICNNNetwork::getInputsInfo() or
* Data::setLayout on output data from ICNNNetwork::getOutputsInfo()
* @brief This option allows to to specify input output layouts for network layers.
DECLARE_VPU_CONFIG_VALUE(NDHWC);
/**
+ * @deprecated Use InferenceEngine::MYRIAD_CUSTOM_LAYERS instead.
* @brief This option allows to pass custom layers binding xml.
* If layer is present in such an xml, it would be used during inference even if the layer is natively supported
*/
+INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::MYRIAD_CUSTOM_LAYERS instead")
DECLARE_VPU_CONFIG_KEY(CUSTOM_LAYERS);
/**
+ * @deprecated Use InferenceEngine::MYRIAD_PROTOCOL instead.
* @brief This option allows to specify protocol.
*/
+INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::MYRIAD_PROTOCOL instead")
DECLARE_VPU_MYRIAD_CONFIG_KEY(PROTOCOL);
/**
+ * @deprecated Use InferenceEngine::MYRIAD_PCIE or InferenceEngine::MYRIAD_USB instead.
* @brief Supported keys definition for VPU_MYRIAD_CONFIG_KEY(PROTOCOL) option.
*/
+INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::MYRIAD_PCIE instead")
DECLARE_VPU_MYRIAD_CONFIG_VALUE(PCIE);
+INFERENCE_ENGINE_DEPRECATED("Use InferenceEngine::MYRIAD_USB instead")
DECLARE_VPU_MYRIAD_CONFIG_VALUE(USB);
-/**
- * @deprecated Use VPU_MYRIAD_CONFIG_KEY(FORCE_RESET) instead.
- */
-INFERENCE_ENGINE_DEPRECATED("Use VPU_MYRIAD_CONFIG_KEY(FORCE_RESET) instead")
-DECLARE_VPU_CONFIG_KEY(FORCE_RESET);
-
-/**
- * @deprecated Use VPU_MYRIAD_CONFIG_KEY(PLATFORM) instead.
- */
-INFERENCE_ENGINE_DEPRECATED("Use VPU_MYRIAD_CONFIG_KEY(PLATFORM) instead")
-DECLARE_VPU_CONFIG_KEY(PLATFORM);
-
-/**
- * @brief Supported keys definition for DECLARE_VPU_CONFIG_KEY(PLATFORM) option.
- */
-INFERENCE_ENGINE_DEPRECATED("")
-DECLARE_VPU_CONFIG_VALUE(2450);
-INFERENCE_ENGINE_DEPRECATED("")
-DECLARE_VPU_CONFIG_VALUE(2480);
-
} // namespace VPUConfigParams
} // namespace InferenceEngine
#include <unordered_set>
#include <string>
-#include <vpu/vpu_plugin_config.hpp>
+#include <vpu/myriad_config.hpp>
#include <vpu/private_plugin_config.hpp>
#include <vpu/parsed_config_base.hpp>
#include <string>
+#include <vpu/myriad_config.hpp>
#include <vpu/vpu_plugin_config.hpp>
namespace InferenceEngine {
-namespace VPUConfigParams {
//
// Compilation options
//
-DECLARE_VPU_CONFIG_KEY(NUMBER_OF_SHAVES);
-DECLARE_VPU_CONFIG_KEY(NUMBER_OF_CMX_SLICES);
-DECLARE_VPU_CONFIG_KEY(TILING_CMX_LIMIT_KB);
+DECLARE_VPU_CONFIG(MYRIAD_NUMBER_OF_SHAVES);
+DECLARE_VPU_CONFIG(MYRIAD_NUMBER_OF_CMX_SLICES);
+DECLARE_VPU_CONFIG(MYRIAD_TILING_CMX_LIMIT_KB);
-DECLARE_VPU_CONFIG_KEY(TENSOR_STRIDES);
+DECLARE_VPU_CONFIG(MYRIAD_TENSOR_STRIDES);
-DECLARE_VPU_CONFIG_KEY(IR_WITH_SCALES_DIRECTORY);
-DECLARE_VPU_CONFIG_KEY(DETECT_NETWORK_BATCH);
-DECLARE_VPU_CONFIG_KEY(COPY_OPTIMIZATION);
-DECLARE_VPU_CONFIG_KEY(HW_INJECT_STAGES);
-DECLARE_VPU_CONFIG_KEY(HW_POOL_CONV_MERGE);
-DECLARE_VPU_CONFIG_KEY(PACK_DATA_IN_CMX);
-DECLARE_VPU_CONFIG_KEY(HW_DILATION);
-DECLARE_VPU_CONFIG_KEY(HW_EXTRA_SPLIT);
-DECLARE_VPU_CONFIG_KEY(FORCE_DEPRECATED_CNN_CONVERSION);
-
-DECLARE_VPU_CONFIG_KEY(PERF_REPORT_MODE);
-DECLARE_VPU_CONFIG_VALUE(PER_LAYER);
-DECLARE_VPU_CONFIG_VALUE(PER_STAGE);
+DECLARE_VPU_CONFIG(MYRIAD_IR_WITH_SCALES_DIRECTORY);
+DECLARE_VPU_CONFIG(MYRIAD_DETECT_NETWORK_BATCH);
+DECLARE_VPU_CONFIG(MYRIAD_COPY_OPTIMIZATION);
+DECLARE_VPU_CONFIG(MYRIAD_HW_INJECT_STAGES);
+DECLARE_VPU_CONFIG(MYRIAD_HW_POOL_CONV_MERGE);
+DECLARE_VPU_CONFIG(MYRIAD_PACK_DATA_IN_CMX);
+DECLARE_VPU_CONFIG(MYRIAD_HW_DILATION);
+DECLARE_VPU_CONFIG(MYRIAD_HW_EXTRA_SPLIT);
+DECLARE_VPU_CONFIG(MYRIAD_FORCE_DEPRECATED_CNN_CONVERSION);
+
+DECLARE_VPU_CONFIG(MYRIAD_PERF_REPORT_MODE);
+DECLARE_VPU_CONFIG(MYRIAD_PER_LAYER);
+DECLARE_VPU_CONFIG(MYRIAD_PER_STAGE);
//
// Debug options
//
-DECLARE_VPU_CONFIG_KEY(HW_WHITE_LIST);
-DECLARE_VPU_CONFIG_KEY(HW_BLACK_LIST);
+DECLARE_VPU_CONFIG(MYRIAD_HW_WHITE_LIST);
+DECLARE_VPU_CONFIG(MYRIAD_HW_BLACK_LIST);
-DECLARE_VPU_CONFIG_KEY(NONE_LAYERS);
-DECLARE_VPU_CONFIG_KEY(IGNORE_UNKNOWN_LAYERS);
+DECLARE_VPU_CONFIG(MYRIAD_NONE_LAYERS);
+DECLARE_VPU_CONFIG(MYRIAD_IGNORE_UNKNOWN_LAYERS);
-DECLARE_VPU_CONFIG_KEY(COMPILER_LOG_FILE_PATH);
+DECLARE_VPU_CONFIG(MYRIAD_COMPILER_LOG_FILE_PATH);
-DECLARE_VPU_CONFIG_KEY(DUMP_INTERNAL_GRAPH_FILE_NAME);
-DECLARE_VPU_CONFIG_KEY(DUMP_INTERNAL_GRAPH_DIRECTORY);
-DECLARE_VPU_CONFIG_KEY(DUMP_ALL_PASSES);
+DECLARE_VPU_CONFIG(MYRIAD_DUMP_INTERNAL_GRAPH_FILE_NAME);
+DECLARE_VPU_CONFIG(MYRIAD_DUMP_INTERNAL_GRAPH_DIRECTORY);
+DECLARE_VPU_CONFIG(MYRIAD_DUMP_ALL_PASSES);
/**
* @brief Used to disable reorder passes in tests to be able to precisely set
* desired layout on every stage.
*/
-DECLARE_VPU_CONFIG_KEY(DISABLE_REORDER);
+DECLARE_VPU_CONFIG(MYRIAD_DISABLE_REORDER);
/**
* @brief Used to disable convert stages in tests to be able to insert
* convert layer with desired precision.
*/
-DECLARE_VPU_CONFIG_KEY(DISABLE_CONVERT_STAGES);
+DECLARE_VPU_CONFIG(MYRIAD_DISABLE_CONVERT_STAGES);
/**
* @brief Used to disable permute merging pass (with setting "NO") in tests to check it preserves behaviour. Default = "YES"
*/
-DECLARE_VPU_CONFIG_KEY(ENABLE_PERMUTE_MERGING);
+DECLARE_VPU_CONFIG(MYRIAD_ENABLE_PERMUTE_MERGING);
-DECLARE_VPU_CONFIG_KEY(ENABLE_REPL_WITH_SCRELU);
+DECLARE_VPU_CONFIG(MYRIAD_ENABLE_REPL_WITH_SCRELU);
-DECLARE_VPU_CONFIG_KEY(ENABLE_REPLACE_WITH_REDUCE_MEAN);
+DECLARE_VPU_CONFIG(MYRIAD_ENABLE_REPLACE_WITH_REDUCE_MEAN);
/**
* @brief Used to enable Tensor Iterator unrolling to get a reference for Tensor Iterator per-layer tests.
* Default is "NO".
*/
-DECLARE_VPU_CONFIG_KEY(ENABLE_TENSOR_ITERATOR_UNROLLING);
+DECLARE_VPU_CONFIG(MYRIAD_ENABLE_TENSOR_ITERATOR_UNROLLING);
/**
* @brief Used to guarantee Tensor Iterator layer will remain in the network regardless of possible performance transformation.
* Example of transformation: combining to RNN sequence. Needed for Tensor Iterator per-layer tests.
* Default is "NO".
*/
-DECLARE_VPU_CONFIG_KEY(FORCE_PURE_TENSOR_ITERATOR);
+DECLARE_VPU_CONFIG(MYRIAD_FORCE_PURE_TENSOR_ITERATOR);
//
// Myriad plugin options
//
-DECLARE_VPU_MYRIAD_CONFIG_KEY(POWER_MANAGEMENT);
-DECLARE_VPU_MYRIAD_CONFIG_VALUE(POWER_FULL);
-DECLARE_VPU_MYRIAD_CONFIG_VALUE(POWER_INFER);
-DECLARE_VPU_MYRIAD_CONFIG_VALUE(POWER_STAGE);
-DECLARE_VPU_MYRIAD_CONFIG_VALUE(POWER_STAGE_SHAVES);
-DECLARE_VPU_MYRIAD_CONFIG_VALUE(POWER_STAGE_NCES);
+DECLARE_VPU_CONFIG(MYRIAD_POWER_MANAGEMENT);
+DECLARE_VPU_CONFIG(MYRIAD_POWER_FULL);
+DECLARE_VPU_CONFIG(MYRIAD_POWER_INFER);
+DECLARE_VPU_CONFIG(MYRIAD_POWER_STAGE);
+DECLARE_VPU_CONFIG(MYRIAD_POWER_STAGE_SHAVES);
+DECLARE_VPU_CONFIG(MYRIAD_POWER_STAGE_NCES);
-DECLARE_VPU_MYRIAD_CONFIG_KEY(THROUGHPUT_STREAMS);
+DECLARE_VPU_CONFIG(MYRIAD_WATCHDOG);
-DECLARE_VPU_MYRIAD_CONFIG_KEY(WATCHDOG);
+DECLARE_VPU_CONFIG(MYRIAD_PLUGIN_LOG_FILE_PATH);
-DECLARE_VPU_MYRIAD_CONFIG_KEY(PLUGIN_LOG_FILE_PATH);
+DECLARE_VPU_CONFIG(MYRIAD_DEVICE_CONNECT_TIMEOUT);
-DECLARE_VPU_MYRIAD_CONFIG_KEY(DEVICE_CONNECT_TIMEOUT);
+namespace VPUConfigParams {
+
+IE_SUPPRESS_DEPRECATED_START
+// Used to update API usage in the dependent repos.
+DECLARE_VPU_CONFIG_KEY(DETECT_NETWORK_BATCH);
+IE_SUPPRESS_DEPRECATED_END
} // namespace VPUConfigParams
+
} // namespace InferenceEngine
VPU_THROW_UNLESS(g_compileEnv->config.numSHAVEs <= g_compileEnv->config.numCMXSlices,
R"(Value of configuration option ("{}") must be not greater than value of configuration option ("{}"), but {} > {} are provided)",
- VPU_CONFIG_KEY(NUMBER_OF_SHAVES), VPU_CONFIG_KEY(NUMBER_OF_CMX_SLICES), config.numSHAVEs, config.numCMXSlices);
+ ie::MYRIAD_NUMBER_OF_SHAVES, ie::MYRIAD_NUMBER_OF_CMX_SLICES, config.numSHAVEs, config.numCMXSlices);
const auto numExecutors = config.numExecutors != -1 ? config.numExecutors : DefaultAllocation::numStreams(platform, config);
VPU_THROW_UNLESS(numExecutors >= 1 && numExecutors <= DeviceResources::numStreams(),
R"(Value of configuration option ("{}") must be in the range [{}, {}], actual is "{}")",
- VPU_MYRIAD_CONFIG_KEY(THROUGHPUT_STREAMS), 1, DeviceResources::numStreams(), numExecutors);
+ ie::MYRIAD_THROUGHPUT_STREAMS, 1, DeviceResources::numStreams(), numExecutors);
const auto numSlices = config.numCMXSlices != -1 ? config.numCMXSlices : DefaultAllocation::numSlices(platform, numExecutors);
VPU_THROW_UNLESS(numSlices >= 1 && numSlices <= DeviceResources::numSlices(platform),
R"(Value of configuration option ("{}") must be in the range [{}, {}], actual is "{}")",
- VPU_CONFIG_KEY(NUMBER_OF_CMX_SLICES), 1, DeviceResources::numSlices(platform), numSlices);
+ ie::MYRIAD_NUMBER_OF_CMX_SLICES, 1, DeviceResources::numSlices(platform), numSlices);
int defaultCmxLimit = DefaultAllocation::tilingCMXLimit(numSlices);
const auto tilingCMXLimit = config.tilingCMXLimitKB != -1 ? std::min(config.tilingCMXLimitKB * 1024, defaultCmxLimit) : defaultCmxLimit;
VPU_THROW_UNLESS(tilingCMXLimit >= 0,
R"(Value of configuration option ("{}") must be greater than {}, actual is "{}")",
- VPU_CONFIG_KEY(TILING_CMX_LIMIT_KB), 0, tilingCMXLimit);
+ ie::MYRIAD_TILING_CMX_LIMIT_KB, 0, tilingCMXLimit);
const auto numShaves = config.numSHAVEs != -1 ? config.numSHAVEs : DefaultAllocation::numShaves(platform, numExecutors, numSlices);
VPU_THROW_UNLESS(numShaves >= 1 && numShaves <= DeviceResources::numShaves(platform),
R"(Value of configuration option ("{}") must be in the range [{}, {}], actual is "{}")",
- VPU_CONFIG_KEY(NUMBER_OF_SHAVES), 1, DeviceResources::numShaves(platform), numShaves);
+ ie::MYRIAD_NUMBER_OF_SHAVES, 1, DeviceResources::numShaves(platform), numShaves);
const auto numAllocatedShaves = numShaves * numExecutors;
VPU_THROW_UNLESS(numAllocatedShaves >= 1 && numAllocatedShaves <= DeviceResources::numShaves(platform),
CONFIG_KEY(CONFIG_FILE),
- VPU_CONFIG_KEY(NETWORK_CONFIG),
+ ie::MYRIAD_ENABLE_HW_ACCELERATION,
+ ie::MYRIAD_CUSTOM_LAYERS,
+ ie::MYRIAD_THROUGHPUT_STREAMS,
+
+ //
+ // Public deprecated
+ //
+
VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION),
- VPU_CONFIG_KEY(HW_EXTRA_SPLIT),
VPU_CONFIG_KEY(CUSTOM_LAYERS),
- VPU_CONFIG_KEY(INPUT_NORM),
- VPU_CONFIG_KEY(INPUT_BIAS),
-
//
// Private options
//
- VPU_CONFIG_KEY(NUMBER_OF_SHAVES),
- VPU_CONFIG_KEY(NUMBER_OF_CMX_SLICES),
- VPU_CONFIG_KEY(TILING_CMX_LIMIT_KB),
-
- VPU_CONFIG_KEY(TENSOR_STRIDES),
-
- VPU_CONFIG_KEY(IR_WITH_SCALES_DIRECTORY),
- VPU_CONFIG_KEY(DETECT_NETWORK_BATCH),
- VPU_CONFIG_KEY(COPY_OPTIMIZATION),
- VPU_CONFIG_KEY(HW_INJECT_STAGES),
- VPU_CONFIG_KEY(HW_POOL_CONV_MERGE),
- VPU_CONFIG_KEY(PACK_DATA_IN_CMX),
- VPU_CONFIG_KEY(HW_DILATION),
- VPU_CONFIG_KEY(FORCE_DEPRECATED_CNN_CONVERSION),
- VPU_CONFIG_KEY(DISABLE_REORDER),
- VPU_CONFIG_KEY(ENABLE_PERMUTE_MERGING),
- VPU_CONFIG_KEY(ENABLE_REPL_WITH_SCRELU),
- VPU_CONFIG_KEY(ENABLE_REPLACE_WITH_REDUCE_MEAN),
- VPU_CONFIG_KEY(ENABLE_TENSOR_ITERATOR_UNROLLING),
- VPU_CONFIG_KEY(FORCE_PURE_TENSOR_ITERATOR),
- VPU_CONFIG_KEY(DISABLE_CONVERT_STAGES),
+ ie::MYRIAD_HW_EXTRA_SPLIT,
+
+ ie::MYRIAD_NUMBER_OF_SHAVES,
+ ie::MYRIAD_NUMBER_OF_CMX_SLICES,
+ ie::MYRIAD_TILING_CMX_LIMIT_KB,
+
+ ie::MYRIAD_TENSOR_STRIDES,
+
+ ie::MYRIAD_IR_WITH_SCALES_DIRECTORY,
+ ie::MYRIAD_DETECT_NETWORK_BATCH,
+ ie::MYRIAD_COPY_OPTIMIZATION,
+ ie::MYRIAD_HW_INJECT_STAGES,
+ ie::MYRIAD_HW_POOL_CONV_MERGE,
+ ie::MYRIAD_PACK_DATA_IN_CMX,
+ ie::MYRIAD_HW_DILATION,
+ ie::MYRIAD_FORCE_DEPRECATED_CNN_CONVERSION,
+ ie::MYRIAD_DISABLE_REORDER,
+ ie::MYRIAD_ENABLE_PERMUTE_MERGING,
+ ie::MYRIAD_ENABLE_REPL_WITH_SCRELU,
+ ie::MYRIAD_ENABLE_REPLACE_WITH_REDUCE_MEAN,
+ ie::MYRIAD_ENABLE_TENSOR_ITERATOR_UNROLLING,
+ ie::MYRIAD_FORCE_PURE_TENSOR_ITERATOR,
+ ie::MYRIAD_DISABLE_CONVERT_STAGES,
//
// Debug options
//
- VPU_CONFIG_KEY(HW_WHITE_LIST),
- VPU_CONFIG_KEY(HW_BLACK_LIST),
+ ie::MYRIAD_HW_WHITE_LIST,
+ ie::MYRIAD_HW_BLACK_LIST,
+
+ ie::MYRIAD_NONE_LAYERS,
+ ie::MYRIAD_IGNORE_UNKNOWN_LAYERS,
- VPU_CONFIG_KEY(NONE_LAYERS),
- VPU_CONFIG_KEY(IGNORE_UNKNOWN_LAYERS),
+ ie::MYRIAD_COMPILER_LOG_FILE_PATH,
- VPU_CONFIG_KEY(COMPILER_LOG_FILE_PATH),
+ ie::MYRIAD_DUMP_INTERNAL_GRAPH_FILE_NAME,
+ ie::MYRIAD_DUMP_INTERNAL_GRAPH_DIRECTORY,
+ ie::MYRIAD_DUMP_ALL_PASSES,
+
+ //
+ // Private deprecated options
+ //
- VPU_CONFIG_KEY(DUMP_INTERNAL_GRAPH_FILE_NAME),
- VPU_CONFIG_KEY(DUMP_INTERNAL_GRAPH_DIRECTORY),
- VPU_CONFIG_KEY(DUMP_ALL_PASSES),
+ VPU_CONFIG_KEY(DETECT_NETWORK_BATCH),
});
IE_SUPPRESS_DEPRECATED_END
}
const std::unordered_set<std::string>& ParsedConfig::getRunTimeOptions() const {
+IE_SUPPRESS_DEPRECATED_START
static const std::unordered_set<std::string> options = merge(ParsedConfigBase::getRunTimeOptions(), {
CONFIG_KEY(PERF_COUNT),
VPU_CONFIG_KEY(PRINT_RECEIVE_TENSOR_TIME),
- VPU_CONFIG_KEY(PERF_REPORT_MODE),
+ ie::MYRIAD_ENABLE_RECEIVING_TENSOR_TIME,
+ ie::MYRIAD_PERF_REPORT_MODE,
});
+IE_SUPPRESS_DEPRECATED_END
return options;
}
const std::unordered_set<std::string>& ParsedConfig::getDeprecatedOptions() const {
IE_SUPPRESS_DEPRECATED_START
static const std::unordered_set<std::string> options = merge(ParsedConfigBase::getDeprecatedOptions(), {
- VPU_CONFIG_KEY(INPUT_NORM),
- VPU_CONFIG_KEY(INPUT_BIAS),
- VPU_CONFIG_KEY(NETWORK_CONFIG),
+ VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION),
+ VPU_CONFIG_KEY(CUSTOM_LAYERS),
+ VPU_CONFIG_KEY(PRINT_RECEIVE_TENSOR_TIME),
});
IE_SUPPRESS_DEPRECATED_END
void ParsedConfig::parse(const std::map<std::string, std::string>& config) {
static const std::unordered_map<std::string, PerfReport> perfReports {
- { VPU_CONFIG_VALUE(PER_LAYER), PerfReport::PerLayer },
- { VPU_CONFIG_VALUE(PER_STAGE), PerfReport::PerStage },
+ { ie::MYRIAD_PER_LAYER, PerfReport::PerLayer },
+ { ie::MYRIAD_PER_STAGE, PerfReport::PerStage },
};
static const auto parseStrides = [](const std::string& src) {
ParsedConfigBase::parse(config);
- setOption(_compilerLogFilePath, config, VPU_CONFIG_KEY(COMPILER_LOG_FILE_PATH));
- setOption(_compileConfig.dumpInternalGraphFileName, config, VPU_CONFIG_KEY(DUMP_INTERNAL_GRAPH_FILE_NAME));
- setOption(_compileConfig.dumpInternalGraphDirectory, config, VPU_CONFIG_KEY(DUMP_INTERNAL_GRAPH_DIRECTORY));
- setOption(_compileConfig.dumpAllPasses, switches, config, VPU_CONFIG_KEY(DUMP_ALL_PASSES));
-
- setOption(_compileConfig.detectBatch, switches, config, VPU_CONFIG_KEY(DETECT_NETWORK_BATCH));
- setOption(_compileConfig.copyOptimization, switches, config, VPU_CONFIG_KEY(COPY_OPTIMIZATION));
- setOption(_compileConfig.packDataInCmx, switches, config, VPU_CONFIG_KEY(PACK_DATA_IN_CMX));
- setOption(_compileConfig.ignoreUnknownLayers, switches, config, VPU_CONFIG_KEY(IGNORE_UNKNOWN_LAYERS));
- setOption(_compileConfig.hwOptimization, switches, config, VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION));
- setOption(_compileConfig.hwExtraSplit, switches, config, VPU_CONFIG_KEY(HW_EXTRA_SPLIT));
- setOption(_compileConfig.injectSwOps, switches, config, VPU_CONFIG_KEY(HW_INJECT_STAGES));
- setOption(_compileConfig.mergeHwPoolToConv, switches, config, VPU_CONFIG_KEY(HW_POOL_CONV_MERGE));
- setOption(_compileConfig.hwDilation, switches, config, VPU_CONFIG_KEY(HW_DILATION));
- setOption(_compileConfig.forceDeprecatedCnnConversion, switches, config, VPU_CONFIG_KEY(FORCE_DEPRECATED_CNN_CONVERSION));
- setOption(_compileConfig.disableReorder, switches, config, VPU_CONFIG_KEY(DISABLE_REORDER));
- setOption(_compileConfig.enablePermuteMerging, switches, config, VPU_CONFIG_KEY(ENABLE_PERMUTE_MERGING));
- setOption(_compileConfig.enableReplWithSCRelu, switches, config, VPU_CONFIG_KEY(ENABLE_REPL_WITH_SCRELU));
- setOption(_compileConfig.enableReplaceWithReduceMean, switches, config, VPU_CONFIG_KEY(ENABLE_REPLACE_WITH_REDUCE_MEAN));
- setOption(_compileConfig.enableTensorIteratorUnrolling, switches, config, VPU_CONFIG_KEY(ENABLE_TENSOR_ITERATOR_UNROLLING));
- setOption(_compileConfig.forcePureTensorIterator, switches, config, VPU_CONFIG_KEY(FORCE_PURE_TENSOR_ITERATOR));
- setOption(_compileConfig.disableConvertStages, switches, config, VPU_CONFIG_KEY(DISABLE_CONVERT_STAGES));
-
- setOption(_compileConfig.irWithVpuScalesDir, config, VPU_CONFIG_KEY(IR_WITH_SCALES_DIRECTORY));
- setOption(_compileConfig.noneLayers, config, VPU_CONFIG_KEY(NONE_LAYERS), parseStringSet);
- setOption(_compileConfig.hwWhiteList, config, VPU_CONFIG_KEY(HW_WHITE_LIST), parseStringSet);
- setOption(_compileConfig.hwBlackList, config, VPU_CONFIG_KEY(HW_BLACK_LIST), parseStringSet);
+ setOption(_compilerLogFilePath, config, ie::MYRIAD_COMPILER_LOG_FILE_PATH);
+ setOption(_compileConfig.dumpInternalGraphFileName, config, ie::MYRIAD_DUMP_INTERNAL_GRAPH_FILE_NAME);
+ setOption(_compileConfig.dumpInternalGraphDirectory, config, ie::MYRIAD_DUMP_INTERNAL_GRAPH_DIRECTORY);
+ setOption(_compileConfig.dumpAllPasses, switches, config, ie::MYRIAD_DUMP_ALL_PASSES);
+
+ setOption(_compileConfig.detectBatch, switches, config, ie::MYRIAD_DETECT_NETWORK_BATCH);
+ setOption(_compileConfig.copyOptimization, switches, config, ie::MYRIAD_COPY_OPTIMIZATION);
+ setOption(_compileConfig.packDataInCmx, switches, config, ie::MYRIAD_PACK_DATA_IN_CMX);
+ setOption(_compileConfig.ignoreUnknownLayers, switches, config, ie::MYRIAD_IGNORE_UNKNOWN_LAYERS);
+ setOption(_compileConfig.hwOptimization, switches, config, ie::MYRIAD_ENABLE_HW_ACCELERATION);
+ setOption(_compileConfig.hwExtraSplit, switches, config, ie::MYRIAD_HW_EXTRA_SPLIT);
+ setOption(_compileConfig.injectSwOps, switches, config, ie::MYRIAD_HW_INJECT_STAGES);
+ setOption(_compileConfig.mergeHwPoolToConv, switches, config, ie::MYRIAD_HW_POOL_CONV_MERGE);
+ setOption(_compileConfig.hwDilation, switches, config, ie::MYRIAD_HW_DILATION);
+ setOption(_compileConfig.forceDeprecatedCnnConversion, switches, config, ie::MYRIAD_FORCE_DEPRECATED_CNN_CONVERSION);
+ setOption(_compileConfig.disableReorder, switches, config, ie::MYRIAD_DISABLE_REORDER);
+ setOption(_compileConfig.enablePermuteMerging, switches, config, ie::MYRIAD_ENABLE_PERMUTE_MERGING);
+ setOption(_compileConfig.enableReplWithSCRelu, switches, config, ie::MYRIAD_ENABLE_REPL_WITH_SCRELU);
+ setOption(_compileConfig.enableReplaceWithReduceMean, switches, config, ie::MYRIAD_ENABLE_REPLACE_WITH_REDUCE_MEAN);
+ setOption(_compileConfig.enableTensorIteratorUnrolling, switches, config, ie::MYRIAD_ENABLE_TENSOR_ITERATOR_UNROLLING);
+ setOption(_compileConfig.forcePureTensorIterator, switches, config, ie::MYRIAD_FORCE_PURE_TENSOR_ITERATOR);
+ setOption(_compileConfig.disableConvertStages, switches, config, ie::MYRIAD_DISABLE_CONVERT_STAGES);
+
+ setOption(_compileConfig.irWithVpuScalesDir, config, ie::MYRIAD_IR_WITH_SCALES_DIRECTORY);
+ setOption(_compileConfig.noneLayers, config, ie::MYRIAD_NONE_LAYERS, parseStringSet);
+ setOption(_compileConfig.hwWhiteList, config, ie::MYRIAD_HW_WHITE_LIST, parseStringSet);
+ setOption(_compileConfig.hwBlackList, config, ie::MYRIAD_HW_BLACK_LIST, parseStringSet);
// Priority is set to VPU configuration file over plug-in config.
- setOption(_compileConfig.customLayers, config, VPU_CONFIG_KEY(CUSTOM_LAYERS));
+ setOption(_compileConfig.customLayers, config, ie::MYRIAD_CUSTOM_LAYERS);
if (_compileConfig.customLayers.empty()) {
- setOption(_compileConfig.customLayers, config, CONFIG_KEY(CONFIG_FILE));
+ setOption(_compileConfig.customLayers, config, CONFIG_KEY(CONFIG_FILE));
}
auto isPositive = [](int value) {
throw std::invalid_argument("Value must be positive or default(-1).");
};
- setOption(_compileConfig.numSHAVEs, config, VPU_CONFIG_KEY(NUMBER_OF_SHAVES), preprocessCompileOption);
- setOption(_compileConfig.numCMXSlices, config, VPU_CONFIG_KEY(NUMBER_OF_CMX_SLICES), preprocessCompileOption);
- setOption(_compileConfig.numExecutors, config, VPU_MYRIAD_CONFIG_KEY(THROUGHPUT_STREAMS), preprocessCompileOption);
- setOption(_compileConfig.tilingCMXLimitKB, config, VPU_CONFIG_KEY(TILING_CMX_LIMIT_KB), preprocessCompileOption);
+ setOption(_compileConfig.numSHAVEs, config, ie::MYRIAD_NUMBER_OF_SHAVES, preprocessCompileOption);
+ setOption(_compileConfig.numCMXSlices, config, ie::MYRIAD_NUMBER_OF_CMX_SLICES, preprocessCompileOption);
+ setOption(_compileConfig.numExecutors, config, ie::MYRIAD_THROUGHPUT_STREAMS, preprocessCompileOption);
+ setOption(_compileConfig.tilingCMXLimitKB, config, ie::MYRIAD_TILING_CMX_LIMIT_KB, preprocessCompileOption);
if ((_compileConfig.numSHAVEs < 0 && _compileConfig.numCMXSlices >= 0) ||
(_compileConfig.numSHAVEs >= 0 && _compileConfig.numCMXSlices < 0)) {
THROW_IE_EXCEPTION << "You should set both option for resource management: VPU_NUMBER_OF_CMX_SLICES and VPU_NUMBER_OF_SHAVES";
}
- setOption(_compileConfig.ioStrides, config, VPU_CONFIG_KEY(TENSOR_STRIDES), parseStrides);
+ setOption(_compileConfig.ioStrides, config, ie::MYRIAD_TENSOR_STRIDES, parseStrides);
- setOption(_printReceiveTensorTime, switches, config, VPU_CONFIG_KEY(PRINT_RECEIVE_TENSOR_TIME));
- setOption(_perfCount, switches, config, CONFIG_KEY(PERF_COUNT));
- setOption(_perfReport, perfReports, config, VPU_CONFIG_KEY(PERF_REPORT_MODE));
+ setOption(_printReceiveTensorTime, switches, config, ie::MYRIAD_ENABLE_RECEIVING_TENSOR_TIME);
+ setOption(_perfCount, switches, config, CONFIG_KEY(PERF_COUNT));
+ setOption(_perfReport, perfReports, config, ie::MYRIAD_PERF_REPORT_MODE);
IE_SUPPRESS_DEPRECATED_START
- setOption(_compileConfig.inputScale, config, VPU_CONFIG_KEY(INPUT_NORM), parseFloatReverse);
- setOption(_compileConfig.inputBias, config, VPU_CONFIG_KEY(INPUT_BIAS), parseFloat);
+ setOption(_compileConfig.hwOptimization, switches, config, VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION));
+ setOption(_compileConfig.customLayers, config, VPU_CONFIG_KEY(CUSTOM_LAYERS));
+ setOption(_printReceiveTensorTime, switches, config, VPU_CONFIG_KEY(PRINT_RECEIVE_TENSOR_TIME));
+ setOption(_compileConfig.detectBatch, switches, config, VPU_CONFIG_KEY(DETECT_NETWORK_BATCH));
IE_SUPPRESS_DEPRECATED_END
#ifndef NDEBUG
#include <cpp_interfaces/exception2status.hpp>
#include <vpu/vpu_plugin_config.hpp>
+#include <vpu/myriad_config.hpp>
namespace vpu {
namespace MyriadPlugin {
IE_SUPPRESS_DEPRECATED_START
static const std::unordered_set<std::string> options = merge(ParsedConfig::getCompileOptions(), {
VPU_MYRIAD_CONFIG_KEY(PLATFORM),
- VPU_CONFIG_KEY(PLATFORM),
});
IE_SUPPRESS_DEPRECATED_END
static const std::unordered_set<std::string> options = merge(ParsedConfig::getRunTimeOptions(), {
CONFIG_KEY(DEVICE_ID),
- VPU_MYRIAD_CONFIG_KEY(FORCE_RESET),
- VPU_MYRIAD_CONFIG_KEY(PLATFORM),
- VPU_MYRIAD_CONFIG_KEY(PROTOCOL),
- VPU_MYRIAD_CONFIG_KEY(WATCHDOG),
- VPU_MYRIAD_CONFIG_KEY(THROUGHPUT_STREAMS),
- VPU_MYRIAD_CONFIG_KEY(POWER_MANAGEMENT),
+ ie::MYRIAD_ENABLE_FORCE_RESET,
+
+ ie::MYRIAD_PROTOCOL,
+ ie::MYRIAD_WATCHDOG,
+ ie::MYRIAD_THROUGHPUT_STREAMS,
+ ie::MYRIAD_POWER_MANAGEMENT,
- VPU_CONFIG_KEY(FORCE_RESET),
- VPU_CONFIG_KEY(PLATFORM),
+ ie::MYRIAD_PLUGIN_LOG_FILE_PATH,
+ ie::MYRIAD_DEVICE_CONNECT_TIMEOUT,
- VPU_MYRIAD_CONFIG_KEY(PLUGIN_LOG_FILE_PATH),
- VPU_MYRIAD_CONFIG_KEY(DEVICE_CONNECT_TIMEOUT),
+ ie::MYRIAD_DDR_TYPE,
+ // Deprecated
+ VPU_MYRIAD_CONFIG_KEY(FORCE_RESET),
+ VPU_MYRIAD_CONFIG_KEY(PLATFORM),
+ VPU_MYRIAD_CONFIG_KEY(PROTOCOL),
VPU_MYRIAD_CONFIG_KEY(MOVIDIUS_DDR_TYPE),
});
IE_SUPPRESS_DEPRECATED_END
const std::unordered_set<std::string>& MyriadConfig::getDeprecatedOptions() const {
IE_SUPPRESS_DEPRECATED_START
static const std::unordered_set<std::string> options = merge(ParsedConfig::getDeprecatedOptions(), {
- VPU_CONFIG_KEY(FORCE_RESET),
- VPU_CONFIG_KEY(PLATFORM),
+ VPU_MYRIAD_CONFIG_KEY(FORCE_RESET),
+ VPU_MYRIAD_CONFIG_KEY(PLATFORM),
+ VPU_MYRIAD_CONFIG_KEY(PROTOCOL),
+ VPU_MYRIAD_CONFIG_KEY(MOVIDIUS_DDR_TYPE),
});
IE_SUPPRESS_DEPRECATED_END
}
void MyriadConfig::parse(const std::map<std::string, std::string>& config) {
- static const std::unordered_map<std::string, ncDevicePlatform_t> platforms = {
- { VPU_MYRIAD_CONFIG_VALUE(2450), NC_MYRIAD_2 },
- { VPU_MYRIAD_CONFIG_VALUE(2480), NC_MYRIAD_X },
- { std::string(), NC_ANY_PLATFORM }
+IE_SUPPRESS_DEPRECATED_START
+ static const std::unordered_map<std::string, ncDevicePlatform_t> platformsDeprecated = {
+ { VPU_MYRIAD_CONFIG_VALUE(2450), NC_MYRIAD_2 },
+ { VPU_MYRIAD_CONFIG_VALUE(2480), NC_MYRIAD_X },
+ { std::string(), NC_ANY_PLATFORM }
};
-IE_SUPPRESS_DEPRECATED_START
- static const std::unordered_map<std::string, ncDevicePlatform_t> platformsDepr = {
- { VPU_CONFIG_VALUE(2450), NC_MYRIAD_2 },
- { VPU_CONFIG_VALUE(2480), NC_MYRIAD_X },
- { std::string(), NC_ANY_PLATFORM }
+ static const std::unordered_map<std::string, ncDeviceProtocol_t> protocolsDeprecated = {
+ { VPU_MYRIAD_CONFIG_VALUE(USB), NC_USB},
+ { VPU_MYRIAD_CONFIG_VALUE(PCIE), NC_PCIE},
+ { std::string(), NC_ANY_PROTOCOL}
+ };
+
+ static const std::unordered_map<std::string, MovidiusDdrType> memoryTypesDeprecated = {
+ { VPU_MYRIAD_CONFIG_VALUE(DDR_AUTO), MovidiusDdrType::AUTO },
+ { VPU_MYRIAD_CONFIG_VALUE(MICRON_2GB), MovidiusDdrType::MICRON_2GB },
+ { VPU_MYRIAD_CONFIG_VALUE(SAMSUNG_2GB), MovidiusDdrType::SAMSUNG_2GB },
+ { VPU_MYRIAD_CONFIG_VALUE(HYNIX_2GB), MovidiusDdrType::HYNIX_2GB },
+ { VPU_MYRIAD_CONFIG_VALUE(MICRON_1GB), MovidiusDdrType::MICRON_1GB }
};
IE_SUPPRESS_DEPRECATED_END
static const std::unordered_map<std::string, ncDeviceProtocol_t> protocols = {
- { VPU_MYRIAD_CONFIG_VALUE(USB), NC_USB},
- { VPU_MYRIAD_CONFIG_VALUE(PCIE), NC_PCIE},
- { std::string(), NC_ANY_PROTOCOL}
+ { ie::MYRIAD_USB, NC_USB},
+ { ie::MYRIAD_PCIE, NC_PCIE},
+ { std::string(), NC_ANY_PROTOCOL}
};
static const std::unordered_map<std::string, std::chrono::milliseconds> watchdogIntervals = {
};
static const std::unordered_map<std::string, PowerConfig> powerConfigs = {
- { VPU_MYRIAD_CONFIG_VALUE(POWER_FULL), PowerConfig::FULL },
- { VPU_MYRIAD_CONFIG_VALUE(POWER_INFER), PowerConfig::INFER },
- { VPU_MYRIAD_CONFIG_VALUE(POWER_STAGE), PowerConfig::STAGE },
- { VPU_MYRIAD_CONFIG_VALUE(POWER_STAGE_SHAVES), PowerConfig::STAGE_SHAVES },
- { VPU_MYRIAD_CONFIG_VALUE(POWER_STAGE_NCES), PowerConfig::STAGE_NCES },
+ { ie::MYRIAD_POWER_FULL, PowerConfig::FULL },
+ { ie::MYRIAD_POWER_INFER, PowerConfig::INFER },
+ { ie::MYRIAD_POWER_STAGE, PowerConfig::STAGE },
+ { ie::MYRIAD_POWER_STAGE_SHAVES, PowerConfig::STAGE_SHAVES },
+ { ie::MYRIAD_POWER_STAGE_NCES, PowerConfig::STAGE_NCES },
};
static const std::unordered_map<std::string, MovidiusDdrType> memoryTypes = {
- { VPU_MYRIAD_CONFIG_VALUE(DDR_AUTO), MovidiusDdrType::AUTO },
- { VPU_MYRIAD_CONFIG_VALUE(MICRON_2GB), MovidiusDdrType::MICRON_2GB },
- { VPU_MYRIAD_CONFIG_VALUE(SAMSUNG_2GB), MovidiusDdrType::SAMSUNG_2GB },
- { VPU_MYRIAD_CONFIG_VALUE(HYNIX_2GB), MovidiusDdrType::HYNIX_2GB },
- { VPU_MYRIAD_CONFIG_VALUE(MICRON_1GB), MovidiusDdrType::MICRON_1GB }
+ { ie::MYRIAD_DDR_AUTO, MovidiusDdrType::AUTO },
+ { ie::MYRIAD_DDR_MICRON_2GB, MovidiusDdrType::MICRON_2GB },
+ { ie::MYRIAD_DDR_SAMSUNG_2GB, MovidiusDdrType::SAMSUNG_2GB },
+ { ie::MYRIAD_DDR_HYNIX_2GB, MovidiusDdrType::HYNIX_2GB },
+ { ie::MYRIAD_DDR_MICRON_1GB, MovidiusDdrType::MICRON_1GB }
};
ParsedConfig::parse(config);
- setOption(_pluginLogFilePath, config, VPU_MYRIAD_CONFIG_KEY(PLUGIN_LOG_FILE_PATH));
- setOption(_deviceName, config, CONFIG_KEY(DEVICE_ID));
- setOption(_forceReset, switches, config, VPU_MYRIAD_CONFIG_KEY(FORCE_RESET));
- setOption(_platform, platforms, config, VPU_MYRIAD_CONFIG_KEY(PLATFORM));
- setOption(_protocol, protocols, config, VPU_MYRIAD_CONFIG_KEY(PROTOCOL));
- setOption(_watchdogInterval, watchdogIntervals, config, VPU_MYRIAD_CONFIG_KEY(WATCHDOG));
- setOption(_deviceConnectTimeout, config, VPU_MYRIAD_CONFIG_KEY(DEVICE_CONNECT_TIMEOUT), parseSeconds);
- setOption(_powerConfig, powerConfigs, config, VPU_MYRIAD_CONFIG_KEY(POWER_MANAGEMENT));
- setOption(_memoryType, memoryTypes, config, VPU_MYRIAD_CONFIG_KEY(MOVIDIUS_DDR_TYPE));
+ setOption(_pluginLogFilePath, config, ie::MYRIAD_PLUGIN_LOG_FILE_PATH);
+ setOption(_deviceName, config, CONFIG_KEY(DEVICE_ID));
+ setOption(_forceReset, switches, config, ie::MYRIAD_ENABLE_FORCE_RESET);
+ setOption(_protocol, protocols, config, ie::MYRIAD_PROTOCOL);
+ setOption(_watchdogInterval, watchdogIntervals, config, ie::MYRIAD_WATCHDOG);
+ setOption(_deviceConnectTimeout, config, ie::MYRIAD_DEVICE_CONNECT_TIMEOUT, parseSeconds);
+ setOption(_powerConfig, powerConfigs, config, ie::MYRIAD_POWER_MANAGEMENT);
+ setOption(_memoryType, memoryTypes, config, ie::MYRIAD_DDR_TYPE);
IE_SUPPRESS_DEPRECATED_START
- setOption(_forceReset, switches, config, VPU_CONFIG_KEY(FORCE_RESET));
- setOption(_platform, platformsDepr, config, VPU_CONFIG_KEY(PLATFORM));
+ setOption(_forceReset, switches, config, VPU_MYRIAD_CONFIG_KEY(FORCE_RESET));
+ setOption(_platform, platformsDeprecated, config, VPU_MYRIAD_CONFIG_KEY(PLATFORM));
+ setOption(_protocol, protocolsDeprecated, config, VPU_MYRIAD_CONFIG_KEY(PROTOCOL));
+ setOption(_memoryType, memoryTypesDeprecated, config, VPU_MYRIAD_CONFIG_KEY(MOVIDIUS_DDR_TYPE));
IE_SUPPRESS_DEPRECATED_END
#ifndef NDEBUG
#include <vpu/utils/error.hpp>
using namespace vpu::MyriadPlugin;
-using namespace InferenceEngine::VPUConfigParams;
-using namespace InferenceEngine::PluginConfigParams;
+using namespace InferenceEngine;
+using namespace VPUConfigParams;
+using namespace PluginConfigParams;
//------------------------------------------------------------------------------
// Implementation of methods of class MyriadMetrics
IE_SUPPRESS_DEPRECATED_START
_supportedConfigKeys = {
+ MYRIAD_ENABLE_HW_ACCELERATION,
+ MYRIAD_ENABLE_RECEIVING_TENSOR_TIME,
+ MYRIAD_CUSTOM_LAYERS,
+ MYRIAD_ENABLE_FORCE_RESET,
+
+ // deprecated
KEY_VPU_HW_STAGES_OPTIMIZATION,
- KEY_LOG_LEVEL,
KEY_VPU_PRINT_RECEIVE_TENSOR_TIME,
KEY_VPU_CUSTOM_LAYERS,
KEY_VPU_MYRIAD_FORCE_RESET,
KEY_VPU_MYRIAD_PLATFORM,
- KEY_EXCLUSIVE_ASYNC_REQUESTS,
- KEY_PERF_COUNT,
- KEY_CONFIG_FILE,
- KEY_DEVICE_ID
+
+ CONFIG_KEY(LOG_LEVEL),
+ CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS),
+ CONFIG_KEY(PERF_COUNT),
+ CONFIG_KEY(CONFIG_FILE),
+ CONFIG_KEY(DEVICE_ID)
};
IE_SUPPRESS_DEPRECATED_END
RangeType MyriadMetrics::RangeForAsyncInferRequests(
const std::map<std::string, std::string>& config) const {
- auto throughput_streams_str = config.find(KEY_VPU_MYRIAD_THROUGHPUT_STREAMS);
+ auto throughput_streams_str = config.find(ie::MYRIAD_THROUGHPUT_STREAMS);
if (throughput_streams_str != config.end()) {
try {
int throughput_streams = std::stoi(throughput_streams_str->second);
}
}
catch(...) {
- THROW_IE_EXCEPTION << "Invalid config value for VPU_MYRIAD_THROUGHPUT_STREAMS, can't cast to int";
+ THROW_IE_EXCEPTION << "Invalid config value for MYRIAD_THROUGHPUT_STREAMS, can't cast to int";
}
}
_pluginName = "MYRIAD";
+IE_SUPPRESS_DEPRECATED_START
_config = {
- { KEY_VPU_HW_STAGES_OPTIMIZATION, "ON" },
- { KEY_LOG_LEVEL, "LOG_NONE" },
- { KEY_VPU_PRINT_RECEIVE_TENSOR_TIME, "OFF" },
+ { MYRIAD_ENABLE_HW_ACCELERATION, CONFIG_VALUE(YES) },
+ { MYRIAD_ENABLE_RECEIVING_TENSOR_TIME, CONFIG_VALUE(NO) },
+ { MYRIAD_CUSTOM_LAYERS, "" },
+ { MYRIAD_ENABLE_FORCE_RESET, CONFIG_VALUE(NO) },
+
+ // Deprecated
+ { KEY_VPU_HW_STAGES_OPTIMIZATION, CONFIG_VALUE(YES) },
+ { KEY_VPU_PRINT_RECEIVE_TENSOR_TIME, CONFIG_VALUE(NO) },
{ KEY_VPU_CUSTOM_LAYERS, "" },
- { KEY_VPU_MYRIAD_FORCE_RESET, "OFF" },
+ { KEY_VPU_MYRIAD_FORCE_RESET, CONFIG_VALUE(NO) },
{ KEY_VPU_MYRIAD_PLATFORM, "" },
- { KEY_EXCLUSIVE_ASYNC_REQUESTS, "OFF" },
- { KEY_PERF_COUNT, "OFF" },
+
+ { KEY_LOG_LEVEL, CONFIG_VALUE(LOG_NONE) },
+ { KEY_EXCLUSIVE_ASYNC_REQUESTS, CONFIG_VALUE(NO) },
+ { KEY_PERF_COUNT, CONFIG_VALUE(NO) },
{ KEY_CONFIG_FILE, "" },
{ KEY_DEVICE_ID, "" },
};
+IE_SUPPRESS_DEPRECATED_END
}
InferenceEngine::ExecutableNetwork Engine::ImportNetwork(
};
const std::vector<std::map<std::string, std::string>> Configs = {
- {{VPU_MYRIAD_CONFIG_KEY(FORCE_RESET), CONFIG_VALUE(YES)}},
- {{VPU_MYRIAD_CONFIG_KEY(FORCE_RESET), CONFIG_VALUE(NO)}},
+ {{InferenceEngine::MYRIAD_ENABLE_FORCE_RESET, CONFIG_VALUE(YES)}},
+ {{InferenceEngine::MYRIAD_ENABLE_FORCE_RESET, CONFIG_VALUE(NO)}},
{{CONFIG_KEY(LOG_LEVEL), CONFIG_VALUE(LOG_NONE)}},
{{CONFIG_KEY(LOG_LEVEL), CONFIG_VALUE(LOG_ERROR)}},
{{CONFIG_KEY(LOG_LEVEL), CONFIG_VALUE(LOG_DEBUG)}},
{{CONFIG_KEY(LOG_LEVEL), CONFIG_VALUE(LOG_TRACE)}},
+ {{InferenceEngine::MYRIAD_ENABLE_HW_ACCELERATION, CONFIG_VALUE(YES)}},
+ {{InferenceEngine::MYRIAD_ENABLE_HW_ACCELERATION, CONFIG_VALUE(NO)}},
+
+ {{InferenceEngine::MYRIAD_TILING_CMX_LIMIT_KB, "-1"}},
+ {{InferenceEngine::MYRIAD_TILING_CMX_LIMIT_KB, "0"}},
+ {{InferenceEngine::MYRIAD_TILING_CMX_LIMIT_KB, "10"}},
+
+ {{InferenceEngine::MYRIAD_ENABLE_RECEIVING_TENSOR_TIME, CONFIG_VALUE(YES)}},
+ {{InferenceEngine::MYRIAD_ENABLE_RECEIVING_TENSOR_TIME, CONFIG_VALUE(NO)}},
+ {{InferenceEngine::MYRIAD_PROTOCOL, InferenceEngine::MYRIAD_USB}},
+ {{InferenceEngine::MYRIAD_PROTOCOL, InferenceEngine::MYRIAD_PCIE}},
+
+ {{InferenceEngine::MYRIAD_THROUGHPUT_STREAMS, "1"}},
+ {{InferenceEngine::MYRIAD_THROUGHPUT_STREAMS, "2"}},
+ {{InferenceEngine::MYRIAD_THROUGHPUT_STREAMS, "3"}},
+
+ // Deprecated
+ {{VPU_MYRIAD_CONFIG_KEY(FORCE_RESET), CONFIG_VALUE(YES)}},
+ {{VPU_MYRIAD_CONFIG_KEY(FORCE_RESET), CONFIG_VALUE(NO)}},
+
{{VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), CONFIG_VALUE(YES)}},
{{VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), CONFIG_VALUE(NO)}},
- {{VPU_CONFIG_KEY(TILING_CMX_LIMIT_KB), "-1"}},
- {{VPU_CONFIG_KEY(TILING_CMX_LIMIT_KB), "0"}},
- {{VPU_CONFIG_KEY(TILING_CMX_LIMIT_KB), "10"}},
-
{{VPU_CONFIG_KEY(PRINT_RECEIVE_TENSOR_TIME), CONFIG_VALUE(YES)}},
{{VPU_CONFIG_KEY(PRINT_RECEIVE_TENSOR_TIME), CONFIG_VALUE(NO)}},
+
{{VPU_MYRIAD_CONFIG_KEY(PROTOCOL), VPU_MYRIAD_CONFIG_VALUE(USB)}},
{{VPU_MYRIAD_CONFIG_KEY(PROTOCOL), VPU_MYRIAD_CONFIG_VALUE(PCIE)}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_MYRIAD},
{CONFIG_KEY(LOG_LEVEL), CONFIG_VALUE(LOG_DEBUG)}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_MYRIAD},
+ {InferenceEngine::MYRIAD_ENABLE_HW_ACCELERATION, CONFIG_VALUE(YES)}},
+
+ // Deprecated
+ {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_MYRIAD},
{VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), CONFIG_VALUE(YES)}}
};
CorrectConfigTests::getTestCaseName);
const std::vector<std::map<std::string, std::string>> inconfigs = {
+ {{InferenceEngine::MYRIAD_PROTOCOL, "BLUETOOTH"}},
+ {{InferenceEngine::MYRIAD_PROTOCOL, "LAN"}},
+
+ {{InferenceEngine::MYRIAD_ENABLE_HW_ACCELERATION, "ON"}},
+ {{InferenceEngine::MYRIAD_ENABLE_HW_ACCELERATION, "OFF"}},
+
+ {{InferenceEngine::MYRIAD_ENABLE_FORCE_RESET, "ON"}},
+ {{InferenceEngine::MYRIAD_ENABLE_FORCE_RESET, "OFF"}},
+
+ {{CONFIG_KEY(LOG_LEVEL), "VERBOSE"}},
+
+ {{InferenceEngine::MYRIAD_TILING_CMX_LIMIT_KB, "-10"}},
+
+ {{InferenceEngine::MYRIAD_ENABLE_RECEIVING_TENSOR_TIME, "ON"}},
+ {{InferenceEngine::MYRIAD_ENABLE_RECEIVING_TENSOR_TIME, "OFF"}},
+
+ {{InferenceEngine::MYRIAD_THROUGHPUT_STREAMS, "Two"}},
+ {{InferenceEngine::MYRIAD_THROUGHPUT_STREAMS, "SINGLE"}},
+
+ // Deprecated
{{VPU_MYRIAD_CONFIG_KEY(PROTOCOL), "BLUETOOTH"}},
{{VPU_MYRIAD_CONFIG_KEY(PROTOCOL), "LAN"}},
{{VPU_MYRIAD_CONFIG_KEY(FORCE_RESET), "ON"}},
{{VPU_MYRIAD_CONFIG_KEY(FORCE_RESET), "OFF"}},
- {{CONFIG_KEY(LOG_LEVEL), "VERBOSE"}},
+ {{VPU_CONFIG_KEY(PRINT_RECEIVE_TENSOR_TIME), "ON"}},
+ {{VPU_CONFIG_KEY(PRINT_RECEIVE_TENSOR_TIME), "OFF"}},
{{VPU_MYRIAD_CONFIG_KEY(PLATFORM), "-1"}},
{{VPU_MYRIAD_CONFIG_KEY(PLATFORM), "0"}},
{{VPU_MYRIAD_CONFIG_KEY(PLATFORM), "1"}},
-
- {{VPU_CONFIG_KEY(TILING_CMX_LIMIT_KB), "-10"}},
-
- {{VPU_CONFIG_KEY(PRINT_RECEIVE_TENSOR_TIME), "ON"}},
- {{VPU_CONFIG_KEY(PRINT_RECEIVE_TENSOR_TIME), "OFF"}}
};
const std::vector<std::map<std::string, std::string>> multiinconfigs = {
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_MYRIAD},
- {VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), "ON"}},
+ {InferenceEngine::MYRIAD_ENABLE_HW_ACCELERATION, "ON"}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_MYRIAD},
{CONFIG_KEY(LOG_LEVEL), "VERBOSE"}},
+
+ // Deprecated
+ {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_MYRIAD},
+ {VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), "ON"}},
+
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_MYRIAD},
{VPU_MYRIAD_CONFIG_KEY(PLATFORM), "-1"}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_MYRIAD},
{VPU_MYRIAD_CONFIG_KEY(PLATFORM), "0"}},
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_MYRIAD},
- {VPU_MYRIAD_CONFIG_KEY(PLATFORM), "1"}}
+ {VPU_MYRIAD_CONFIG_KEY(PLATFORM), "1"}},
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, IncorrectConfigTests,
::testing::Values(CommonTestUtils::DEVICE_MULTI),
::testing::ValuesIn(multiconf)),
CorrectConfigAPITests::getTestCaseName);
-} // namespace
\ No newline at end of file
+} // namespace
const std::vector<std::map<std::string, std::string>> inferConfigs = {
{},
- {{VPU_MYRIAD_CONFIG_KEY(FORCE_RESET), CONFIG_VALUE(YES)}},
- {{VPU_MYRIAD_CONFIG_KEY(FORCE_RESET), CONFIG_VALUE(NO)}},
+ {{InferenceEngine::MYRIAD_ENABLE_FORCE_RESET, CONFIG_VALUE(YES)}},
+ {{InferenceEngine::MYRIAD_ENABLE_FORCE_RESET, CONFIG_VALUE(NO)}},
{{CONFIG_KEY(LOG_LEVEL), CONFIG_VALUE(LOG_NONE)}},
{{CONFIG_KEY(LOG_LEVEL), CONFIG_VALUE(LOG_ERROR)}},
{{CONFIG_KEY(LOG_LEVEL), CONFIG_VALUE(LOG_DEBUG)}},
{{CONFIG_KEY(LOG_LEVEL), CONFIG_VALUE(LOG_TRACE)}},
- {{VPU_CONFIG_KEY(TILING_CMX_LIMIT_KB), "-1"}},
- {{VPU_CONFIG_KEY(TILING_CMX_LIMIT_KB), "0"}},
- {{VPU_CONFIG_KEY(TILING_CMX_LIMIT_KB), "1"}},
+ {{InferenceEngine::MYRIAD_TILING_CMX_LIMIT_KB, "-1"}},
+ {{InferenceEngine::MYRIAD_TILING_CMX_LIMIT_KB, "0"}},
+ {{InferenceEngine::MYRIAD_TILING_CMX_LIMIT_KB, "1"}},
+
+ {{InferenceEngine::MYRIAD_ENABLE_HW_ACCELERATION, CONFIG_VALUE(YES)}},
+ {{InferenceEngine::MYRIAD_ENABLE_HW_ACCELERATION, CONFIG_VALUE(NO)}},
+
+ {{InferenceEngine::MYRIAD_ENABLE_RECEIVING_TENSOR_TIME, CONFIG_VALUE(YES)}},
+ {{InferenceEngine::MYRIAD_ENABLE_RECEIVING_TENSOR_TIME, CONFIG_VALUE(NO)}},
+
+ {{InferenceEngine::MYRIAD_THROUGHPUT_STREAMS, "1"}},
+ {{InferenceEngine::MYRIAD_THROUGHPUT_STREAMS, "2"}},
+ {{InferenceEngine::MYRIAD_THROUGHPUT_STREAMS, "3"}},
+
+
+ // Deprecated
+ {{VPU_MYRIAD_CONFIG_KEY(FORCE_RESET), CONFIG_VALUE(YES)}},
+ {{VPU_MYRIAD_CONFIG_KEY(FORCE_RESET), CONFIG_VALUE(NO)}},
{{VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), CONFIG_VALUE(YES)}},
{{VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), CONFIG_VALUE(NO)}},
{{VPU_CONFIG_KEY(PRINT_RECEIVE_TENSOR_TIME), CONFIG_VALUE(YES)}},
- {{VPU_CONFIG_KEY(PRINT_RECEIVE_TENSOR_TIME), CONFIG_VALUE(NO)}}
+ {{VPU_CONFIG_KEY(PRINT_RECEIVE_TENSOR_TIME), CONFIG_VALUE(NO)}},
};
const std::vector<std::map<std::string, std::string>> inferMultiConfigs = {
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_MYRIAD},
- {CONFIG_KEY(LOG_LEVEL), CONFIG_VALUE(LOG_DEBUG)}},
+ {CONFIG_KEY(LOG_LEVEL), CONFIG_VALUE(LOG_DEBUG)}},
+ {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_MYRIAD},
+ {InferenceEngine::MYRIAD_ENABLE_HW_ACCELERATION, CONFIG_VALUE(YES)}},
+
+ // Deprecated
{{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_MYRIAD},
- {VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), CONFIG_VALUE(YES)}}
+ {VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), CONFIG_VALUE(YES)}},
};
INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferConfigTests,
Config getConfig() {
Config config;
- config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+ config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
if (CommonTestUtils::vpu::CheckMyriad2()) {
- config[VPU_CONFIG_KEY(DISABLE_REORDER)] = CONFIG_VALUE(YES);
+ config[InferenceEngine::MYRIAD_DISABLE_REORDER] = CONFIG_VALUE(YES);
}
return config;
}
ConfigMap getConfig() {
ConfigMap config;
- config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+ config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
if (CommonTestUtils::vpu::CheckMyriad2()) {
- config[VPU_CONFIG_KEY(DISABLE_REORDER)] = CONFIG_VALUE(YES);
+ config[InferenceEngine::MYRIAD_DISABLE_REORDER] = CONFIG_VALUE(YES);
}
return config;
}
Config getConfig() {
Config config;
if (CommonTestUtils::vpu::CheckMyriad2()) {
- config[VPU_CONFIG_KEY(DISABLE_REORDER)] = CONFIG_VALUE(YES);
+ config[InferenceEngine::MYRIAD_DISABLE_REORDER] = CONFIG_VALUE(YES);
}
return config;
}
protected:
void SetUp() override {
SetRefMode(LayerTestsUtils::RefMode::INTERPRETER);
- configuration[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
- configuration[VPU_CONFIG_KEY(DISABLE_REORDER)] = CONFIG_VALUE(YES);
+ configuration[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
+ configuration[InferenceEngine::MYRIAD_DISABLE_REORDER] = CONFIG_VALUE(YES);
OutShapeOfReshapeParam shapesParam;
std::tie(shapesParam, targetDevice) = this->GetParam();
protected:
void SetUp() override {
SetRefMode(LayerTestsUtils::RefMode::INTERPRETER);
- configuration[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+ configuration[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
StaticShapeBroadcastParam shapes;
std::tie(shapes, inPrc, targetDevice) = this->GetParam();
protected:
void SetUp() override {
SetRefMode(LayerTestsUtils::RefMode::INTERPRETER);
- configuration[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
- configuration[VPU_CONFIG_KEY(DISABLE_REORDER)] = CONFIG_VALUE(YES);
+ configuration[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
+ configuration[InferenceEngine::MYRIAD_DISABLE_REORDER] = CONFIG_VALUE(YES);
InferenceEngine::SizeVector inputShape;
std::tie(inputShape, inPrc, targetDevice) = this->GetParam();
void SetUp() override {
SetRefMode(LayerTestsUtils::RefMode::CONSTANT_FOLDING);
- configuration[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+ configuration[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
if (CommonTestUtils::vpu::CheckMyriad2()) {
- configuration[VPU_CONFIG_KEY(DISABLE_REORDER)] = CONFIG_VALUE(YES);
+ configuration[InferenceEngine::MYRIAD_DISABLE_REORDER] = CONFIG_VALUE(YES);
}
const auto testedOp = createTestedOp();
protected:
void SetUp() override {
SetRefMode(LayerTestsUtils::RefMode::CONSTANT_FOLDING);
- configuration[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+ configuration[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
// DISABLE_REORDER is needed for Myriad2 cases
if (CommonTestUtils::vpu::CheckMyriad2()) {
- configuration[VPU_CONFIG_KEY(DISABLE_REORDER)] = CONFIG_VALUE(YES);
+ configuration[InferenceEngine::MYRIAD_DISABLE_REORDER] = CONFIG_VALUE(YES);
}
const auto& parameters = GetParam();
switch (protocol) {
case NC_ANY_PROTOCOL :
return {{CONFIG_KEY(LOG_LEVEL), CONFIG_VALUE(LOG_INFO)},
- {VPU_MYRIAD_CONFIG_KEY(PROTOCOL), ""}};
+ {InferenceEngine::MYRIAD_PROTOCOL, ""}};
case NC_USB:
return {{CONFIG_KEY(LOG_LEVEL), CONFIG_VALUE(LOG_INFO)},
- {VPU_MYRIAD_CONFIG_KEY(PROTOCOL), VPU_MYRIAD_CONFIG_VALUE(USB)}};
+ {InferenceEngine::MYRIAD_PROTOCOL, InferenceEngine::MYRIAD_USB}};
case NC_PCIE:
return {{CONFIG_KEY(LOG_LEVEL), CONFIG_VALUE(LOG_INFO)},
- {VPU_MYRIAD_CONFIG_KEY(PROTOCOL), VPU_MYRIAD_CONFIG_VALUE(PCIE)}};
+ {InferenceEngine::MYRIAD_PROTOCOL, InferenceEngine::MYRIAD_PCIE}};
default:
return {};
}
CNNNetwork network = ie.ReadNetwork(GetParam().model_xml_str, Blob::CPtr());
ExecutableNetwork net = ie.LoadNetwork(network, GetParam().device,
{ {KEY_LOG_LEVEL, LOG_DEBUG},
- {KEY_VPU_MYRIAD_WATCHDOG, NO} });
+ {InferenceEngine::MYRIAD_WATCHDOG, NO} });
ASSERT_EQ(getAmountOfBootedDevices(), 1);
}
CNNNetwork network = ie.ReadNetwork(GetParam().model_xml_str, Blob::CPtr());
ExecutableNetwork net = ie.LoadNetwork(network, GetParam().device,
{ {KEY_LOG_LEVEL, LOG_DEBUG},
- {KEY_VPU_MYRIAD_WATCHDOG, NO} });
+ {InferenceEngine::MYRIAD_WATCHDOG, NO} });
ASSERT_EQ(getAmountOfBootedDevices(), 2);
}
ctime = Time::now();
ret = core.LoadNetwork(network, GetParam().device, {
{KEY_LOG_LEVEL, LOG_INFO},
- {KEY_VPU_MYRIAD_WATCHDOG, NO}});
+ {InferenceEngine::MYRIAD_WATCHDOG, NO}});
ASSERT_BOOTED_DEVICES_ONE_MORE();
};
const std::vector<BehTestParams> deviceSpecificConfigurations = {
- BEH_MYRIAD.withConfig({{VPU_MYRIAD_CONFIG_KEY(PROTOCOL), VPU_MYRIAD_CONFIG_VALUE(USB)}}),
- BEH_MYRIAD.withConfig({{VPU_MYRIAD_CONFIG_KEY(PROTOCOL), VPU_MYRIAD_CONFIG_VALUE(PCIE)}}),
+ BEH_MYRIAD.withConfig({{InferenceEngine::MYRIAD_PROTOCOL, InferenceEngine::MYRIAD_USB}}),
+ BEH_MYRIAD.withConfig({{InferenceEngine::MYRIAD_PROTOCOL, InferenceEngine::MYRIAD_PCIE}}),
+ // Deprecated
BEH_MYRIAD.withConfig({{VPU_MYRIAD_CONFIG_KEY(PLATFORM), VPU_MYRIAD_CONFIG_VALUE(2450)}}),
BEH_MYRIAD.withConfig({{VPU_MYRIAD_CONFIG_KEY(PLATFORM), VPU_MYRIAD_CONFIG_VALUE(2480)}}),
+
+ BEH_MYRIAD.withConfig({{VPU_MYRIAD_CONFIG_KEY(PROTOCOL), VPU_MYRIAD_CONFIG_VALUE(USB)}}),
+ BEH_MYRIAD.withConfig({{VPU_MYRIAD_CONFIG_KEY(PROTOCOL), VPU_MYRIAD_CONFIG_VALUE(PCIE)}}),
};
const std::vector<BehTestParams> deviceAgnosticConfigurations = {
- BEH_MYRIAD.withConfig({{VPU_MYRIAD_CONFIG_KEY(FORCE_RESET), CONFIG_VALUE(YES)}}),
- BEH_MYRIAD.withConfig({{VPU_MYRIAD_CONFIG_KEY(FORCE_RESET), CONFIG_VALUE(NO)}}),
+ BEH_MYRIAD.withConfig({{InferenceEngine::MYRIAD_ENABLE_FORCE_RESET, CONFIG_VALUE(YES)}}),
+ BEH_MYRIAD.withConfig({{InferenceEngine::MYRIAD_ENABLE_FORCE_RESET, CONFIG_VALUE(NO)}}),
BEH_MYRIAD.withConfig({{CONFIG_KEY(LOG_LEVEL), CONFIG_VALUE(LOG_NONE)}}),
BEH_MYRIAD.withConfig({{CONFIG_KEY(LOG_LEVEL), CONFIG_VALUE(LOG_ERROR)}}),
BEH_MYRIAD.withConfig({{CONFIG_KEY(LOG_LEVEL), CONFIG_VALUE(LOG_DEBUG)}}),
BEH_MYRIAD.withConfig({{CONFIG_KEY(LOG_LEVEL), CONFIG_VALUE(LOG_TRACE)}}),
+ BEH_MYRIAD.withConfig({{InferenceEngine::MYRIAD_ENABLE_HW_ACCELERATION, CONFIG_VALUE(YES)}}),
+ BEH_MYRIAD.withConfig({{InferenceEngine::MYRIAD_ENABLE_HW_ACCELERATION, CONFIG_VALUE(NO)}}),
+
+ BEH_MYRIAD.withConfig({{InferenceEngine::MYRIAD_ENABLE_RECEIVING_TENSOR_TIME, CONFIG_VALUE(YES)}}),
+ BEH_MYRIAD.withConfig({{InferenceEngine::MYRIAD_ENABLE_RECEIVING_TENSOR_TIME, CONFIG_VALUE(NO)}}),
+
+ BEH_MYRIAD.withConfig({{InferenceEngine::MYRIAD_THROUGHPUT_STREAMS, "1"}}),
+ BEH_MYRIAD.withConfig({{InferenceEngine::MYRIAD_THROUGHPUT_STREAMS, "2"}}),
+ BEH_MYRIAD.withConfig({{InferenceEngine::MYRIAD_THROUGHPUT_STREAMS, "3"}}),
+
+
+ BEH_MULTI_CONFIG.withConfig({
+ {MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, "MYRIAD"},
+ {CONFIG_KEY(LOG_LEVEL), CONFIG_VALUE(LOG_DEBUG)}
+ }),
+ BEH_MULTI_CONFIG.withConfig({
+ {MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, "MYRIAD"},
+ {InferenceEngine::MYRIAD_ENABLE_HW_ACCELERATION, CONFIG_VALUE(YES)}
+ }),
+
+ // Please do not use other types of DDR in tests with a real device, because it may hang.
+ BEH_MYRIAD.withConfig({{InferenceEngine::MYRIAD_DDR_TYPE, InferenceEngine::MYRIAD_DDR_AUTO}}),
+
+ // Deprecated
+ BEH_MYRIAD.withConfig({{VPU_MYRIAD_CONFIG_KEY(FORCE_RESET), CONFIG_VALUE(YES)}}),
+ BEH_MYRIAD.withConfig({{VPU_MYRIAD_CONFIG_KEY(FORCE_RESET), CONFIG_VALUE(NO)}}),
+
BEH_MYRIAD.withConfig({{VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), CONFIG_VALUE(YES)}}),
BEH_MYRIAD.withConfig({{VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), CONFIG_VALUE(NO)}}),
BEH_MULTI_CONFIG.withConfig({
{MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, "MYRIAD"},
- {CONFIG_KEY(LOG_LEVEL), CONFIG_VALUE(LOG_DEBUG)}
- }),
- BEH_MULTI_CONFIG.withConfig({
- {MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, "MYRIAD"},
{VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), CONFIG_VALUE(YES)}
}),
};
const BehTestParams withIncorrectConfValues[] = {
+ BEH_MYRIAD.withConfig({{InferenceEngine::MYRIAD_PROTOCOL, "BLUETOOTH"}}),
+ BEH_MYRIAD.withConfig({{InferenceEngine::MYRIAD_PROTOCOL, "LAN"}}),
+
+ BEH_MYRIAD.withConfig({{InferenceEngine::MYRIAD_ENABLE_HW_ACCELERATION, "ON"}}),
+ BEH_MYRIAD.withConfig({{InferenceEngine::MYRIAD_ENABLE_HW_ACCELERATION, "OFF"}}),
+
+ BEH_MYRIAD.withConfig({{InferenceEngine::MYRIAD_ENABLE_FORCE_RESET, "ON"}}),
+ BEH_MYRIAD.withConfig({{InferenceEngine::MYRIAD_ENABLE_FORCE_RESET, "OFF"}}),
+
+ BEH_MYRIAD.withConfig({{CONFIG_KEY(LOG_LEVEL), "VERBOSE"}}),
+
+ BEH_MYRIAD.withConfig({{InferenceEngine::MYRIAD_ENABLE_RECEIVING_TENSOR_TIME, "ON"}}),
+ BEH_MYRIAD.withConfig({{InferenceEngine::MYRIAD_ENABLE_RECEIVING_TENSOR_TIME, "OFF"}}),
+
+ BEH_MYRIAD.withConfig({{InferenceEngine::MYRIAD_THROUGHPUT_STREAMS, "Single"}}),
+ BEH_MYRIAD.withConfig({{InferenceEngine::MYRIAD_THROUGHPUT_STREAMS, "TWO"}}),
+
+ BEH_MULTI_CONFIG.withConfig({{MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, "MYRIAD"},
+ {InferenceEngine::MYRIAD_ENABLE_HW_ACCELERATION,"ON"}}),
+ BEH_MULTI_CONFIG.withConfig({{MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, "MYRIAD"},
+ {CONFIG_KEY(LOG_LEVEL), "VERBOSE"}}),
+
+ BEH_MYRIAD.withConfig({{InferenceEngine::MYRIAD_DDR_TYPE, "-1"}}),
+ BEH_MYRIAD.withConfig({{InferenceEngine::MYRIAD_DDR_TYPE, "MICRON"}}),
+
+ // Deprecated
BEH_MYRIAD.withConfig({{VPU_MYRIAD_CONFIG_KEY(PROTOCOL), "BLUETOOTH"}}),
BEH_MYRIAD.withConfig({{VPU_MYRIAD_CONFIG_KEY(PROTOCOL), "LAN"}}),
BEH_MYRIAD.withConfig({{VPU_MYRIAD_CONFIG_KEY(FORCE_RESET), "ON"}}),
BEH_MYRIAD.withConfig({{VPU_MYRIAD_CONFIG_KEY(FORCE_RESET), "OFF"}}),
- BEH_MYRIAD.withConfig({{CONFIG_KEY(LOG_LEVEL), "VERBOSE"}}),
-
- BEH_MYRIAD.withConfig({{VPU_MYRIAD_CONFIG_KEY(PLATFORM), "-1"}}),
- BEH_MYRIAD.withConfig({{VPU_MYRIAD_CONFIG_KEY(PLATFORM), "0"}}),
- BEH_MYRIAD.withConfig({{VPU_MYRIAD_CONFIG_KEY(PLATFORM), "1"}}),
-
BEH_MYRIAD.withConfig({{VPU_CONFIG_KEY(PRINT_RECEIVE_TENSOR_TIME), "ON"}}),
BEH_MYRIAD.withConfig({{VPU_CONFIG_KEY(PRINT_RECEIVE_TENSOR_TIME), "OFF"}}),
BEH_MULTI_CONFIG.withConfig({{MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, "MYRIAD"},
{VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION),"ON"}}),
- BEH_MULTI_CONFIG.withConfig({{MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, "MYRIAD"},
- {CONFIG_KEY(LOG_LEVEL), "VERBOSE"}}),
+
+ BEH_MYRIAD.withConfig({{VPU_MYRIAD_CONFIG_KEY(MOVIDIUS_DDR_TYPE), "-1"}}),
+ BEH_MYRIAD.withConfig({{VPU_MYRIAD_CONFIG_KEY(MOVIDIUS_DDR_TYPE), "MICRON"}}),
+
+ BEH_MYRIAD.withConfig({{VPU_MYRIAD_CONFIG_KEY(PLATFORM), "-1"}}),
+ BEH_MYRIAD.withConfig({{VPU_MYRIAD_CONFIG_KEY(PLATFORM), "0"}}),
+ BEH_MYRIAD.withConfig({{VPU_MYRIAD_CONFIG_KEY(PLATFORM), "1"}}),
+
BEH_MULTI_CONFIG.withConfig({{MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, "MYRIAD"},
{VPU_MYRIAD_CONFIG_KEY(PLATFORM), "-1"}}),
BEH_MULTI_CONFIG.withConfig({{MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, "MYRIAD"},
{VPU_MYRIAD_CONFIG_KEY(PLATFORM), "0"}}),
BEH_MULTI_CONFIG.withConfig({{MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, "MYRIAD"},
{VPU_MYRIAD_CONFIG_KEY(PLATFORM), "1"}}),
-
- BEH_MYRIAD.withConfig({{VPU_MYRIAD_CONFIG_KEY(MOVIDIUS_DDR_TYPE), "-1"}}),
- BEH_MYRIAD.withConfig({{VPU_MYRIAD_CONFIG_KEY(MOVIDIUS_DDR_TYPE), "MICRON"}}),
};
const BehTestParams withIncorrectConfKeys[] = {
GTEST_SKIP() << "Custom layers for MYRIAD2 not supported";
}
- _config[VPU_CONFIG_KEY(CUSTOM_LAYERS)] = customConfig;
+ _config[InferenceEngine::MYRIAD_CUSTOM_LAYERS] = customConfig;
const auto inputTensors = IN_OUT_desc{{dims.c, dims.h, dims.w}, {dims.h, dims.c}};
const auto outputTensors = IN_OUT_desc{{1, 1, dims.h, dims.c}};
};
TEST_P(myriadLayersTestsBias_smoke, TestsBias) {
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
auto input_dim = GetParam();
InferenceEngine::SizeVector input_dim1;
auto dims = input_dim.size();
StatusCode st;
ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, _cnnNetwork,
- { {VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), HWConfigValue } }, &_resp));
+ { {InferenceEngine::MYRIAD_ENABLE_HW_ACCELERATION, HWConfigValue } }, &_resp));
ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
std::stringstream modelFilenameStream;
modelFilenameStream << "spltConvConcat" << i << ".blob";
ASSERT_EQ(StatusCode::OK, _exeNetwork->Export(modelFilenameStream.str(), &_resp)) << _resp.msg;
- std::map<std::string, std::string> config = { {VPU_CONFIG_KEY(COPY_OPTIMIZATION), CONFIG_VALUE(YES)},
- {VPU_CONFIG_KEY(IGNORE_UNKNOWN_LAYERS), CONFIG_VALUE(YES)},
- {VPU_CONFIG_KEY(NONE_LAYERS), CONFIG_VALUE(YES)},
- {VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), CONFIG_VALUE(YES)},
- {VPU_CONFIG_KEY(NUMBER_OF_SHAVES), std::to_string(10)},
- {VPU_CONFIG_KEY(NUMBER_OF_CMX_SLICES), std::to_string(10)} };
-
- IE_SUPPRESS_DEPRECATED_START
- config[VPU_CONFIG_KEY(INPUT_NORM)] = std::to_string(1.f);
- config[VPU_CONFIG_KEY(INPUT_BIAS)] = std::to_string(1.f);
- IE_SUPPRESS_DEPRECATED_START
+ std::map<std::string, std::string> config = { {InferenceEngine::MYRIAD_COPY_OPTIMIZATION, CONFIG_VALUE(YES)},
+ {InferenceEngine::MYRIAD_IGNORE_UNKNOWN_LAYERS, CONFIG_VALUE(YES)},
+ {InferenceEngine::MYRIAD_NONE_LAYERS, CONFIG_VALUE(YES)},
+ {InferenceEngine::MYRIAD_ENABLE_HW_ACCELERATION, CONFIG_VALUE(YES)},
+ {InferenceEngine::MYRIAD_NUMBER_OF_SHAVES, std::to_string(10)},
+ {InferenceEngine::MYRIAD_NUMBER_OF_CMX_SLICES, std::to_string(10)} };
InferenceEngine::IExecutableNetwork::Ptr importedNetworkPtr;
ASSERT_EQ(StatusCode::OK, _vpuPluginPtr->ImportNetwork(importedNetworkPtr, modelFilenameStream.str(), config, &_resp)) << _resp.msg;
std::map<std::string, std::string> config = { {CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS), CONFIG_VALUE(YES)},
{CONFIG_KEY(LOG_LEVEL), CONFIG_VALUE(LOG_INFO)},
{CONFIG_KEY(PERF_COUNT), CONFIG_VALUE(YES)},
- {VPU_CONFIG_KEY(PRINT_RECEIVE_TENSOR_TIME), CONFIG_VALUE(YES)} };
+ {InferenceEngine::MYRIAD_ENABLE_RECEIVING_TENSOR_TIME, CONFIG_VALUE(YES)} };
if (vpu::tests::deviceForceReset()) {
- config.insert({VPU_MYRIAD_CONFIG_KEY(FORCE_RESET), CONFIG_VALUE(NO)});
- config.insert({VPU_CONFIG_KEY(PLATFORM), VPU_CONFIG_VALUE(2480)});
+ config.insert({InferenceEngine::MYRIAD_ENABLE_FORCE_RESET, CONFIG_VALUE(NO)});
+ config.insert({VPU_MYRIAD_CONFIG_KEY(PLATFORM), VPU_MYRIAD_CONFIG_VALUE(2480)});
}
InferenceEngine::IExecutableNetwork::Ptr importedNetworkPtr;
INSTANTIATE_TEST_CASE_P(accuracy, myriadBlobExportAccuracyDifferentPrecisionOfInAndOutTests_smoke,
- ::testing::Combine(::testing::ValuesIn(inputPrecisions), ::testing::ValuesIn(outputPrecisions)));
\ No newline at end of file
+ ::testing::Combine(::testing::ValuesIn(inputPrecisions), ::testing::ValuesIn(outputPrecisions)));
typedef myriadLayerTestBaseWithParam<std::tuple<SizeVector, clamp_test_params>> myriadLayersTestsClampParams_smoke;
TEST_P(myriadLayersTestsClampParams_smoke, TestsClamp) {
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
auto param = GetParam();
SizeVector tensor = std::get<0>(param);
clamp_test_params p = std::get<1>(param);
protected:
void testConvND() {
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
- _config[VPU_CONFIG_KEY(HW_INJECT_STAGES)] = CONFIG_VALUE(NO);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
+ _config[InferenceEngine::MYRIAD_HW_INJECT_STAGES] = CONFIG_VALUE(NO);
//
// Get test parameters
if(!customConfig.empty() && !CheckMyriadX()) {
GTEST_SKIP()<<"Custom layers for MYRIAD2 not supported";
}
- _config[VPU_CONFIG_KEY(CUSTOM_LAYERS)] = customConfig;
+ _config[InferenceEngine::MYRIAD_CUSTOM_LAYERS] = customConfig;
- _config[VPU_CONFIG_KEY(DISABLE_CONVERT_STAGES)] = CONFIG_VALUE(YES);
+ _config[InferenceEngine::MYRIAD_DISABLE_CONVERT_STAGES] = CONFIG_VALUE(YES);
SetInputTensors({dims});
SetOutputTensors({dims});
if(!customConfig.custom_config.empty() && !CheckMyriadX()) {
GTEST_SKIP()<<"Custom layers for MYRIAD2 not supported";
}
- _config[VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION)] = HWConfigValue;
- _config[VPU_CONFIG_KEY(CUSTOM_LAYERS)] = customConfig.custom_config;
+ _config[InferenceEngine::MYRIAD_ENABLE_HW_ACCELERATION] = HWConfigValue;
+ _config[InferenceEngine::MYRIAD_CUSTOM_LAYERS] = customConfig.custom_config;
int IB = customConfig.src_dims[0];
int IC = customConfig.src_dims[1];
_outputsInfo["conv1x1"]->setLayout(layout);
ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, network,
- {{VPU_CONFIG_KEY(CUSTOM_LAYERS), customConfig.custom_config}, {VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), HWConfigValue}}, &_resp));
+ {{InferenceEngine::MYRIAD_CUSTOM_LAYERS, customConfig.custom_config},
+ {InferenceEngine::MYRIAD_ENABLE_HW_ACCELERATION, HWConfigValue}},
+ &_resp));
ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
ASSERT_NE(_exeNetwork, nullptr) << _resp.msg;
if(!customConfig.custom_config.empty() && !CheckMyriadX()) {
GTEST_SKIP()<<"Custom layers for MYRIAD2 not supported";
}
- _config[VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION)] = HWConfigValue;
- _config[VPU_CONFIG_KEY(CUSTOM_LAYERS)] = customConfig.custom_config;
+ _config[InferenceEngine::MYRIAD_ENABLE_HW_ACCELERATION] = HWConfigValue;
+ _config[InferenceEngine::MYRIAD_CUSTOM_LAYERS] = customConfig.custom_config;
int IB = customConfig.src_dims[0];
int IC = customConfig.src_dims[1];
_outputsInfo["conv3x3"]->setLayout(NCHW);
ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, network,
- {{VPU_CONFIG_KEY(CUSTOM_LAYERS), customConfig.custom_config}, {VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), HWConfigValue}}, &_resp));
+ {{InferenceEngine::MYRIAD_CUSTOM_LAYERS, customConfig.custom_config},
+ {InferenceEngine::MYRIAD_ENABLE_HW_ACCELERATION, HWConfigValue}},
+ &_resp));
ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
ASSERT_NE(_exeNetwork, nullptr) << _resp.msg;
{{1, 256, 30, 30}, {1, 256, 14, 14}, 2, getIELibraryPath() + "/vpu_custom_kernels/customLayerBindings.xml"},
{{1, 256, 16, 16}, {1, 384, 7, 7}, 2, getIELibraryPath() + "/vpu_custom_kernels/customLayerBindings.xml"},
#endif
-};
\ No newline at end of file
+};
group = std::get<6>(p);
get_dims(input_tensor, IW, IH, IC, I_N);
if (I_N > 1)
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
else
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(YES);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(YES);
size_t out_w = (IW + pads_begin.x + pads_end.x - kernel.x + stride.x) / stride.x;
size_t out_h = (IH + pads_begin.y + pads_end.y - kernel.y + stride.y) / stride.y;
gen_dims(output_tensor, input_tensor.size(), out_w, out_h, out_channels, I_N);
group = std::get<6>(p);
get_dims(input_tensor, IW, IH, IC, I_N);
if (I_N > 1)
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
else
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(YES);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(YES);
size_t out_w = (IW + pads_begin.x + pads_end.x - kernel.x + stride.x) / stride.x;
size_t out_h = (IH + pads_begin.y + pads_end.y - kernel.y + stride.y) / stride.y;
gen_dims(output_tensor, input_tensor.size(), out_w, out_h, out_channels, I_N);
group = std::get<6>(p);
get_dims(input_tensor, IW, IH, IC, I_N);
if (I_N > 1)
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
else
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(YES);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(YES);
size_t out_w = (IW + pads_begin.x + pads_end.x - kernel.x + stride.x) / stride.x;
size_t out_h = (IH + pads_begin.y + pads_end.y - kernel.y + stride.y) / stride.y;
gen_dims(output_tensor, input_tensor.size(), out_w, out_h, out_channels, I_N);
SetInputTensors(inputTensors);
SetOutputTensors(outputTensors);
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(LayerInitParams("Copy")));
SetFirstInputToRange(1.0f, 100.0f);
if(!customConfig.empty() && !CheckMyriadX()) {
GTEST_SKIP()<<"Custom layers for MYRIAD2 not supported";
}
- _config[VPU_CONFIG_KEY(CUSTOM_LAYERS)] = customConfig;
+ _config[InferenceEngine::MYRIAD_CUSTOM_LAYERS] = customConfig;
SetInputTensor(dims);
SetOutputTensor(dims);
if (!customConfig.empty() && !CheckMyriadX()) {
GTEST_SKIP() << "Custom layers for MYRIAD2 not supported";
}
- _config[VPU_CONFIG_KEY(CUSTOM_LAYERS)] = customConfig;
+ _config[InferenceEngine::MYRIAD_CUSTOM_LAYERS] = customConfig;
srand(42);
if(!customConfig.empty() && !CheckMyriadX()) {
GTEST_SKIP()<<"Custom layers for MYRIAD2 not supported";
}
- _config[VPU_CONFIG_KEY(CUSTOM_LAYERS)] = customConfig;
+ _config[InferenceEngine::MYRIAD_CUSTOM_LAYERS] = customConfig;
int IB = dims.n;
int IC = dims.c;
_outputsInfo["Quantize"]->setLayout(NCHW);
ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, network,
- {{VPU_CONFIG_KEY(CUSTOM_LAYERS), customConfig }}, &_resp));
+ {{InferenceEngine::MYRIAD_CUSTOM_LAYERS, customConfig }}, &_resp));
ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
ASSERT_NE(_exeNetwork, nullptr) << _resp.msg;
if(!customConfig.empty() && !CheckMyriadX()) {
GTEST_SKIP()<<"Custom layers for MYRIAD2 not supported";
}
- _config[VPU_CONFIG_KEY(CUSTOM_LAYERS)] = customConfig;
+ _config[InferenceEngine::MYRIAD_CUSTOM_LAYERS] = customConfig;
SetInputTensor(dims);
auto dimsOutput = dims;
if(!customConfig.empty() && !CheckMyriadX()) {
GTEST_SKIP() << "Custom layers for MYRIAD2 not supported";
}
- _config[VPU_CONFIG_KEY(CUSTOM_LAYERS)] = customConfig;
+ _config[InferenceEngine::MYRIAD_CUSTOM_LAYERS] = customConfig;
IN_OUT_desc inputTensors = {{1, 1, 3, 4}, image_dims, {1, 3, 480, 480}};
IN_OUT_desc outputTensors = {{1, 1,
if(!customConfig.empty() && !CheckMyriadX()) {
GTEST_SKIP() << "Custom layers for MYRIAD2 not supported";
}
- _config[VPU_CONFIG_KEY(CUSTOM_LAYERS)] = customConfig;
+ _config[InferenceEngine::MYRIAD_CUSTOM_LAYERS] = customConfig;
const int paddedbottomwidth = dims.w + 2 * pad_size;
const int paddedbottomheight = dims.h + 2 * pad_size;
if(!customConfig.empty() && !CheckMyriadX()) {
GTEST_SKIP() << "Custom layers for MYRIAD2 not supported";
}
- _config[VPU_CONFIG_KEY(CUSTOM_LAYERS)] = customConfig;
+ _config[InferenceEngine::MYRIAD_CUSTOM_LAYERS] = customConfig;
SetInputTensors({dims.asVector(), {1, 1, 2, 3}});
SetOutputTensor(dims);
}
if (input_dims.n > 1)
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
else
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(YES);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(YES);
size_t out_w = stride.x * (input_dims.w - 1) + kernel.x - 2 * pad.x;
size_t out_h = stride.y * (input_dims.h - 1) + kernel.y - 2 * pad.y;
}
if (input_dims.n > 1)
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
else
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(YES);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(YES);
size_t out_w = stride.x * (input_dims.w - 1) + kernel.x - (pad.x + pad_end.x);
size_t out_h = stride.y * (input_dims.h - 1) + kernel.y - (pad.y + pad_end.y);
InferenceEngine::Blob::Ptr inputBlob;
InferenceEngine::IExecutableNetwork::Ptr exeNetwork;
- std::map<std::string, std::string> networkConfig = {{VPU_CONFIG_KEY(PERF_REPORT_MODE), VPU_CONFIG_VALUE(PER_STAGE)}};
+ std::map<std::string, std::string> networkConfig = {{InferenceEngine::MYRIAD_PERF_REPORT_MODE, InferenceEngine::MYRIAD_PER_STAGE}};
ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(exeNetwork, network, networkConfig, &_resp));
ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
ASSERT_NE(exeNetwork, nullptr) << _resp.msg;
outputInfo->setPrecision(Precision::FP16);
StatusCode st;
- ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, network, { {VPU_CONFIG_KEY(PERF_REPORT_MODE), VPU_CONFIG_VALUE(PER_STAGE)},
- {CONFIG_KEY(PERF_COUNT), CONFIG_VALUE(YES)},
- {VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), CONFIG_VALUE(NO)} },
+ ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, network,
+ { {InferenceEngine::MYRIAD_PERF_REPORT_MODE, InferenceEngine::MYRIAD_PER_STAGE},
+ {CONFIG_KEY(PERF_COUNT), CONFIG_VALUE(YES)},
+ {InferenceEngine::MYRIAD_ENABLE_HW_ACCELERATION, CONFIG_VALUE(NO)} },
&_resp));
ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
ASSERT_NE(_exeNetwork, nullptr) << _resp.msg;
outputInfo->setPrecision(Precision::FP16);
StatusCode st;
- ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, network, { {VPU_CONFIG_KEY(PERF_REPORT_MODE), VPU_CONFIG_VALUE(PER_STAGE)},
- {CONFIG_KEY(PERF_COUNT), CONFIG_VALUE(YES)},
- {VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), CONFIG_VALUE(NO)} },
- &_resp));
+ ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, network,
+ { {InferenceEngine::MYRIAD_PERF_REPORT_MODE, InferenceEngine::MYRIAD_PER_STAGE},
+ {CONFIG_KEY(PERF_COUNT), CONFIG_VALUE(YES)},
+ {InferenceEngine::MYRIAD_ENABLE_HW_ACCELERATION, CONFIG_VALUE(NO)} },
+ &_resp));
ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
ASSERT_NE(_exeNetwork, nullptr) << _resp.msg;
outputInfo->setPrecision(Precision::FP16);
StatusCode st;
- ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, network,{ {VPU_CONFIG_KEY(PERF_REPORT_MODE), VPU_CONFIG_VALUE(PER_STAGE)},
- {CONFIG_KEY(PERF_COUNT), CONFIG_VALUE(YES)},
- {VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), CONFIG_VALUE(NO)} },
- &_resp));
+ ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(
+ _exeNetwork, network,
+ { {InferenceEngine::MYRIAD_PERF_REPORT_MODE, InferenceEngine::MYRIAD_PER_STAGE},
+ {CONFIG_KEY(PERF_COUNT), CONFIG_VALUE(YES)},
+ {InferenceEngine::MYRIAD_ENABLE_HW_ACCELERATION, CONFIG_VALUE(NO)} },
+ &_resp));
ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
ASSERT_NE(_exeNetwork, nullptr) << _resp.msg;
SetInputTensors(inpt);
SetOutputTensors({dims});
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
if (!CheckMyriadX()) {
- _config[VPU_CONFIG_KEY(DISABLE_REORDER)] = CONFIG_VALUE(YES);
+ _config[InferenceEngine::MYRIAD_DISABLE_REORDER] = CONFIG_VALUE(YES);
}
ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(LayerInitParams("Eltwise").params(_params)));
typedef myriadLayerTestBaseWithParam<std::tuple<SizeVector, alpha>> myriadLayersTestsELUParams_smoke;
TEST_P(myriadLayersTestsELUParams_smoke, TestsELU) {
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
auto param = GetParam();
SizeVector tensor = std::get<0>(param);
TEST_P(myriadLayersTestsErf_smoke, TestsErf)
{
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
auto p = ::testing::WithParamInterface<SizeVector>::GetParam();
SetInputTensors({p});
SetOutputTensors({p});
protected:
void testExpDetectionOutput()
{
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
const auto testParams = GetParam();
const auto sizeParams = std::get<0>(testParams);
const auto numProposals = scoresDims.c * scoresDims.h * scoresDims.w;
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
IN_OUT_desc inputTensors, outputTensors;
inputTensors.push_back({3}); // im info
const auto numPriors = inputTensorsDims.priors.n;
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
IN_OUT_desc inputTensors, outputTensors;
inputTensors.push_back({inputTensorsDims.priors.n, inputTensorsDims.priors.c});
int inputRoisNum = std::get<0>(GetParam());
TopKROIsParam opParams = std::get<1>(GetParam());
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
IN_OUT_desc inputTensors, outputTensors;
inputTensors.push_back({static_cast<size_t>(inputRoisNum), NUM_COORDS}); // input rois
get_dims(input_tensor, IW, IH, IC, I_N);
InferenceEngine::SizeVector output_tensor = {(size_t)I_N, (size_t)out_size};
if (I_N > 1)
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
else
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(YES);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(YES);
size_t sz_weights = IC * IH * IW * out_size;
size_t sz_bias = 0;
get_dims(input_tensor, IW, IH, IC, I_N);
InferenceEngine::SizeVector output_tensor = {(size_t)I_N, (size_t)out_size};
if (I_N > 1)
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
else
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(YES);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(YES);
size_t sz_weights = IC * IH * IW * out_size;
size_t sz_bias = 0;
void testGather() {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
//
// Parse and check test parameters
};
if (MB1_D > 1)
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
else
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(YES);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(YES);
ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(LayerInitParams("GEMM").params(params), NetworkInitParams().layoutPreference(layoutPreference)));
if (!customConfig.empty() && !CheckMyriadX()) {
GTEST_SKIP() << "Custom layers for MYRIAD2 not supported";
}
- _config[VPU_CONFIG_KEY(CUSTOM_LAYERS)] = customConfig;
+ _config[InferenceEngine::MYRIAD_CUSTOM_LAYERS] = customConfig;
SetInputTensors({dims});
SetOutputTensors({dims});
static std::vector<SizeVector> s_GRNInputs = {
{1, 3, 16, 224},
{1, 24, 128, 224},
-};
\ No newline at end of file
+};
_outputsInfo["mvn"]->setPrecision(Precision::FP16);
ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, network,
- {{VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), CONFIG_VALUE(YES)}}, &_resp));
+ {{InferenceEngine::MYRIAD_ENABLE_HW_ACCELERATION, CONFIG_VALUE(YES)}}, &_resp));
ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
ASSERT_NE(_exeNetwork, nullptr) << _resp.msg;
if(!customConfig.empty() && !CheckMyriadX()) {
GTEST_SKIP()<<"Custom layers for MYRIAD2 not supported";
}
- _config[VPU_CONFIG_KEY(CUSTOM_LAYERS)] = customConfig;
+ _config[InferenceEngine::MYRIAD_CUSTOM_LAYERS] = customConfig;
SetInputTensor(dims);
SetOutputTensor(dims);
};
void myriadLayerTestNonZero_smoke::testNonZero(vpu::LayoutPreference preference, Precision precision) {
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
const auto& inputDims = GetParam();
const size_t numDims = inputDims.size();
typedef myriadLayerTestBaseWithParam<oneHot_test_params> myriadLayerTestOneHot_smoke;
TEST_P(myriadLayerTestOneHot_smoke, OneHot) {
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
OneHotParams testParams = GetParam();
const std::map<std::string, std::string> layerParams{{"order", order}};
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
switch (precision)
{
static const std::vector<InferenceEngine::Precision> s_permutePrecisions = {
InferenceEngine::Precision::I32,
InferenceEngine::Precision::FP16,
-};
\ No newline at end of file
+};
auto order = std::get<1>(p);
get_dims(input_tensor, IW, IH, IC, I_N);
if (I_N > 1)
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
else
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(YES);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(YES);
if (input_tensor.size()) {
gen_dims(output_tensor, input_tensor.size(), input_tensor[order[3]],
input_tensor[order[2]],
// Initialize network
//
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
std::string model = createModel(inputShape,
outputShape,
get_dims(input_tensor, IW, IH, IC, I_N);
if (I_N > 1)
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
else
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(YES);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(YES);
int32_t OW = 1;
int32_t OH = 1;
int32_t OC = 1;
get_dims(input_tensor, IW, IH, IC, I_N);
if (I_N > 1)
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
else
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(YES);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(YES);
int32_t OW = 1;
int32_t OH = 1;
int32_t OC = 1;
typedef myriadLayerTestBaseWithParam<std::tuple<SizeVector, pwr_test_params>> myriadLayersTestsPowerParams_smoke;
TEST_P(myriadLayersTestsPowerParams_smoke, TestsPower) {
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
auto param = GetParam();
SizeVector tensor = std::get<0>(param);
pwr_test_params p = std::get<1>(param);
typedef myriadLayerTestBaseWithParam<tuple<SizeVector, ChannelSharedPrelu >> myriadLayerPReLU_smoke;
TEST_P(myriadLayerPReLU_smoke, PReLU) {
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
SizeVector dims = get<0>(GetParam());
int channel_shared = get<1>(GetParam());
void testReduce()
{
DISABLE_IF(!CheckMyriadX());
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
const auto params = GetParam();
const auto inputPair = std::get<0>(params);
GTEST_SKIP() << "Custom layers for MYRIAD2 not supported";
}
- _config[VPU_CONFIG_KEY(CUSTOM_LAYERS)] = customConfig;
+ _config[InferenceEngine::MYRIAD_CUSTOM_LAYERS] = customConfig;
const auto mask = [&] {
std::string mask;
#ifdef VPU_HAS_CUSTOM_KERNELS
getIELibraryPath() + "/vpu_custom_kernels/customLayerBindings.xml"
#endif
-};
\ No newline at end of file
+};
_outputsInfo = network.getOutputsInfo();
_outputsInfo["relu"]->setPrecision(Precision::FP16);
- ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, network, { {CONFIG_KEY(PERF_COUNT), CONFIG_VALUE(YES)},
- {VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), CONFIG_VALUE(NO)} },
+ ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, network,
+ { {CONFIG_KEY(PERF_COUNT), CONFIG_VALUE(YES)},
+ {InferenceEngine::MYRIAD_ENABLE_HW_ACCELERATION, CONFIG_VALUE(NO)} },
&_resp));
ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
ASSERT_NE(_exeNetwork, nullptr) << _resp.msg;
typedef myriadLayerTestBaseWithParam<std::tuple<InferenceEngine::SizeVector, ReLULayerDef>> myriadLayerReLU_smoke;
TEST_P(myriadLayerReLU_smoke, ReLU) {
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
auto input_dims = std::get<0>(GetParam());
auto extraLayerParams = std::get<1>(GetParam());
IN_OUT_desc input_tensor;
if(!customConfig.empty() && !CheckMyriadX()) {
GTEST_SKIP() << "Custom layers for MYRIAD2 not supported";
}
- _config[VPU_CONFIG_KEY(CUSTOM_LAYERS)] = customConfig;
+ _config[InferenceEngine::MYRIAD_CUSTOM_LAYERS] = customConfig;
const auto dimsOutput = SizeVector{dimsInput[0],
dimsInput[1] * (stride * stride),
{1, 64, 26, 26},
{1, 192, 6 * 26, 6 * 26},
{1, 4, 6, 6}
-};
\ No newline at end of file
+};
GTEST_SKIP() << "Custom layers for MYRIAD2 not supported";
}
- _config[VPU_CONFIG_KEY(CUSTOM_LAYERS)] = customConfig;
+ _config[InferenceEngine::MYRIAD_CUSTOM_LAYERS] = customConfig;
const auto outputDims = SizeVector{inputDims[0],
inputDims[1],
ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, _cnnNetwork,
{
{
- VPU_CONFIG_KEY(PERF_REPORT_MODE),
- VPU_CONFIG_VALUE(PER_STAGE)
+ InferenceEngine::MYRIAD_PERF_REPORT_MODE,
+ InferenceEngine::MYRIAD_PER_STAGE
},
{
- VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION),
+ InferenceEngine::MYRIAD_ENABLE_HW_ACCELERATION,
CONFIG_VALUE(NO)
},
{
_outputsInfo = network.getOutputsInfo();
_outputsInfo["reshape_copy"]->setPrecision(Precision::FP16);
- ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, network, { {VPU_CONFIG_KEY(PERF_REPORT_MODE), VPU_CONFIG_VALUE(PER_STAGE)},
- {VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), CONFIG_VALUE(NO)},
- {CONFIG_KEY(PERF_COUNT), CONFIG_VALUE(YES)} }, &_resp));
+ ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, network,
+ { {InferenceEngine::MYRIAD_PERF_REPORT_MODE, InferenceEngine::MYRIAD_PER_STAGE},
+ {InferenceEngine::MYRIAD_ENABLE_HW_ACCELERATION, CONFIG_VALUE(NO)},
+ {CONFIG_KEY(PERF_COUNT), CONFIG_VALUE(YES)} }, &_resp));
ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
ASSERT_NE(_exeNetwork, nullptr) << _resp.msg;
.out({output_tensor}),
ref_reshape_wrap);
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
ASSERT_TRUE(generateNetAndInfer(NetworkInitParams().useHWOpt(CheckMyriadX()).layoutPreference(vpu::LayoutPreference::ChannelMinor)));
}
InferenceEngine::IExecutableNetwork::Ptr exeNetwork;
ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(exeNetwork, network,
- { {VPU_CONFIG_KEY(PERF_REPORT_MODE), VPU_CONFIG_VALUE(PER_STAGE)},
- {VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), HWConfigValue},
- {CONFIG_KEY(PERF_COUNT), CONFIG_VALUE(YES) }}, &resp));
+ { {InferenceEngine::MYRIAD_PERF_REPORT_MODE, InferenceEngine::MYRIAD_PER_STAGE},
+ {InferenceEngine::MYRIAD_ENABLE_HW_ACCELERATION, HWConfigValue},
+ {CONFIG_KEY(PERF_COUNT), CONFIG_VALUE(YES) }}, &resp));
ASSERT_EQ(StatusCode::OK, st) << _resp.msg;
InferenceEngine::IInferRequest::Ptr inferRequest;
}
TEST_P(myriadLayerReverseSequence_smoke, ReverseSequence) {
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
reverse_sequence_test_params input_dims = std::get<0>(GetParam());
_irVersion = std::get<1>(GetParam());
}
TEST_P(myriadLayersTestsROIAlign_smoke, ROIAlign) {
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
const tensor_test_params dims_layer_in = std::get<0>(GetParam());
const roi_align_param test_params = std::get<1>(GetParam());
static std::vector<roi_align_mode> s_ROIAlignMode = {
std::string("avg"),
std::string("max")
-};
\ No newline at end of file
+};
bool use_output_rois = true;
const int levels_num = 4;
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
IN_OUT_desc input_tensors, output_tensors;
input_tensors.push_back({num_rois, NUM_ELEM_ROIS});
TEST_P(myriadLayersTestsScale_smoke, TestsScale)
{
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
SizeVector p = std::get<0>(::testing::WithParamInterface<TestScaleShift>::GetParam());
bool biasAdd = std::get<1>(::testing::WithParamInterface<TestScaleShift>::GetParam());
void testScatterElementsUpdate() {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
//
// Parse test parameters
protected:
void testScatterUpdate() {
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
//
// Get and verify test parameters, and deduce other parameters
SetInputTensors(inpt);
SetOutputTensors({dims});
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
ASSERT_NO_FATAL_FAILURE(makeSingleLayerNetwork(LayerInitParams("Select").params(_params)));
ASSERT_TRUE(Infer());
TEST_P(myriadLayersTestsSigmoid_smoke, TestsSigmoid)
{
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
SizeVector p = GetParam();
SetInputTensors({p});
typedef myriadLayerTestBaseWithParam<SliceTestParams> myriadLayersTestsSlice_smoke;
TEST_P(myriadLayersTestsSlice_smoke, Slice) {
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
const SliceParams testParams = GetParam();
TEST_P(myriadLayersTestsSoftMax_smoke, TestsSoftMax)
{
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
SetInputTensors({_testingInput.sizes});
SetOutputTensors({_testingInput.sizes});
typedef myriadLayerTestBaseWithParam<SplitTestParams> myriadLayersTestsSplit_smoke;
TEST_P(myriadLayersTestsSplit_smoke, Split) {
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
const SplitParams testParams = GetParam();
StatusCode st = GENERAL_ERROR;
std::map<std::string, std::string> config = {
- { VPU_CONFIG_KEY(DETECT_NETWORK_BATCH), CONFIG_VALUE(NO) }
+ { InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH, CONFIG_VALUE(NO) }
};
if (!CheckMyriadX()) {
- config.insert({ VPU_CONFIG_KEY(DISABLE_REORDER), CONFIG_VALUE(YES) });
+ config.insert({ InferenceEngine::MYRIAD_DISABLE_REORDER, CONFIG_VALUE(YES) });
}
ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(
TEST_P(myriadLayersTestsTanh_smoke, TestsTanh)
{
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
auto p = ::testing::WithParamInterface<SizeVector>::GetParam();
SetInputTensors({p});
SetOutputTensors({p});
typedef myriadLayerTestBaseWithParam<tuple<test_params, tiles>> myriadLayerTestTile_smoke;
TEST_P(myriadLayerTestTile_smoke, Tile) {
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
tile_test::nd_tensor_test_params input_dims = get<0>(GetParam());
int tiles = get<1>(GetParam());
{
protected:
void testTopK(const IRVersion irVersion, const bool outputValues, const bool outputIndices) {
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
_config[CONFIG_KEY(PERF_COUNT)] = CONFIG_VALUE(YES);
_irVersion = irVersion;
numWeights = kernel.x * kernel.y * (in_dims.c / group) * out_dims.c;
numBiases = out_dims.c;
- _config[VPU_CONFIG_KEY(HW_DILATION)] = CONFIG_VALUE(YES);
+ _config[InferenceEngine::MYRIAD_HW_DILATION] = CONFIG_VALUE(YES);
}
void AddInitialCopyLayer() {
ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, network,
{
{
- VPU_CONFIG_KEY(PERF_REPORT_MODE),
- VPU_CONFIG_VALUE(PER_STAGE)
+ InferenceEngine::MYRIAD_PERF_REPORT_MODE,
+ InferenceEngine::MYRIAD_PER_STAGE
},
{
- VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION),
+ InferenceEngine::MYRIAD_ENABLE_HW_ACCELERATION,
CONFIG_VALUE(YES)
},
},
in_tensor.push_back({p.in.n, p.in.c, p.in.h, p.in.w});
out_tensor.push_back({p.in.n, p.out_c});
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
}
void AddFCLayer() {
const int NUM_REQUESTS = 4;
std::map<std::string, std::string> config = {
- { VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), CONFIG_VALUE(YES) },
+ { InferenceEngine::MYRIAD_ENABLE_HW_ACCELERATION, CONFIG_VALUE(YES) },
{ CONFIG_KEY(PERF_COUNT), CONFIG_VALUE(YES) },
- { VPU_CONFIG_KEY(PERF_REPORT_MODE), VPU_CONFIG_VALUE(PER_STAGE) }
+ { InferenceEngine::MYRIAD_PERF_REPORT_MODE, InferenceEngine::MYRIAD_PER_STAGE }
};
StatusCode st;
in_tensor.push_back({in_dims.n, in_dims.c, in_dims.h, in_dims.w});
out_tensor.push_back({out_dims.n, out_dims.c, out_dims.h, out_dims.w});
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
}
void AddPoolingLayer(const std::string& poolMethod) {
StatusCode st;
std::map<std::string, std::string> config = {
- { VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION), runInfo.hwMode ? CONFIG_VALUE(YES) : CONFIG_VALUE(NO) },
+ { InferenceEngine::MYRIAD_ENABLE_HW_ACCELERATION, runInfo.hwMode ? CONFIG_VALUE(YES) : CONFIG_VALUE(NO) },
{ CONFIG_KEY(PERF_COUNT), CONFIG_VALUE(YES) },
- { VPU_CONFIG_KEY(PERF_REPORT_MODE), VPU_CONFIG_VALUE(PER_STAGE) },
+ { InferenceEngine::MYRIAD_PERF_REPORT_MODE, InferenceEngine::MYRIAD_PER_STAGE },
{ CONFIG_KEY(LOG_LEVEL), logLevel }
};
ResetGeneratedNet();
ResetReferenceLayers();
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH) ] = CONFIG_VALUE(NO);
- _config[VPU_CONFIG_KEY(ENABLE_PERMUTE_MERGING)] = usePermuteMerging ? CONFIG_VALUE(YES) : CONFIG_VALUE(NO) ;
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH ] = CONFIG_VALUE(NO);
+ _config[InferenceEngine::MYRIAD_ENABLE_PERMUTE_MERGING] = usePermuteMerging ? CONFIG_VALUE(YES) : CONFIG_VALUE(NO) ;
for (const auto& permutation_vector : permutation_vectors) {
const auto num_dims = input_tensor_sizes.size();
static const std::vector<config_t> myriadCorrectPlatformConfigValues = {
{{VPU_MYRIAD_CONFIG_KEY(PLATFORM), VPU_MYRIAD_CONFIG_VALUE(2450)}},
{{VPU_MYRIAD_CONFIG_KEY(PLATFORM), VPU_MYRIAD_CONFIG_VALUE(2480)}},
- {{VPU_MYRIAD_CONFIG_KEY(PLATFORM), ""}},
- // Deprecated
- {{VPU_CONFIG_KEY(PLATFORM), VPU_CONFIG_VALUE(2450)}},
- {{VPU_CONFIG_KEY(PLATFORM), VPU_CONFIG_VALUE(2480)}},
- {{VPU_CONFIG_KEY(PLATFORM), ""}}
+ {{VPU_MYRIAD_CONFIG_KEY(PLATFORM), ""}}
};
static const std::vector<config_t> myriadIncorrectPlatformConfigValues = {
{{VPU_MYRIAD_CONFIG_KEY(PLATFORM), "-1"}},
{{VPU_MYRIAD_CONFIG_KEY(PLATFORM), " 0"}},
- {{VPU_MYRIAD_CONFIG_KEY(PLATFORM), "MyriadX"}},
- // Deprecated
- {{VPU_CONFIG_KEY(PLATFORM), "-1"}},
- {{VPU_CONFIG_KEY(PLATFORM), " 0"}},
- {{VPU_CONFIG_KEY(PLATFORM), "MyriadX"}},
- // Deprecated key & value from current
- {{VPU_CONFIG_KEY(PLATFORM), VPU_MYRIAD_CONFIG_VALUE(2450)}},
- {{VPU_CONFIG_KEY(PLATFORM), VPU_MYRIAD_CONFIG_VALUE(2480)}},
- // Current key & deprecated value
- {{VPU_MYRIAD_CONFIG_KEY(PLATFORM), VPU_CONFIG_VALUE(2450)}},
- {{VPU_MYRIAD_CONFIG_KEY(PLATFORM), VPU_CONFIG_VALUE(2480)}},
+ {{VPU_MYRIAD_CONFIG_KEY(PLATFORM), "MyriadX"}}
};
static const std::vector<config_t> myriadCorrectPackageTypeConfigValues = {
// Please do not use other types of DDR in tests with a real device, because it may hang.
+ {{InferenceEngine::MYRIAD_DDR_TYPE, InferenceEngine::MYRIAD_DDR_AUTO}},
+
+ // Deprecated
{{VPU_MYRIAD_CONFIG_KEY(MOVIDIUS_DDR_TYPE), VPU_MYRIAD_CONFIG_VALUE(DDR_AUTO)}}
};
static const std::vector<config_t> myriadIncorrectPackageTypeConfigValues = {
+ {{InferenceEngine::MYRIAD_DDR_TYPE, "-1"}},
+ {{InferenceEngine::MYRIAD_DDR_TYPE, "-MICRON_1GB"}},
+ // Deprecated
{{VPU_MYRIAD_CONFIG_KEY(MOVIDIUS_DDR_TYPE), "-1"}},
{{VPU_MYRIAD_CONFIG_KEY(MOVIDIUS_DDR_TYPE), "-MICRON_1GB"}},
};
INSTANTIATE_TEST_CASE_P(MyriadConfigs, myriadCorrectModelsConfigsTests_nightly,
- ::testing::ValuesIn(myriadCorrectPlatformConfigValues));
+ ::testing::ValuesIn(myriadCorrectPlatformConfigValues));
INSTANTIATE_TEST_CASE_P(MyriadConfigs, myriadIncorrectModelsConfigsTests_nightly,
- ::testing::ValuesIn(myriadIncorrectPlatformConfigValues));
+ ::testing::ValuesIn(myriadIncorrectPlatformConfigValues));
INSTANTIATE_TEST_CASE_P(MyriadPackageConfigs, myriadCorrectModelsConfigsTests_nightly,
::testing::ValuesIn(myriadCorrectPackageTypeConfigValues));
class myriadCorrectStreamsConfiguration_nightly : public vpuLayersTests, public testing::WithParamInterface<std::uint32_t> {};
TEST_P(myriadCorrectStreamsConfiguration_nightly, InfersWithConfiguredStreams) {
- _config[VPU_MYRIAD_CONFIG_KEY(THROUGHPUT_STREAMS)] = std::to_string(GetParam());
+ _config[InferenceEngine::MYRIAD_THROUGHPUT_STREAMS] = std::to_string(GetParam());
_irVersion = IRVersion::v10;
auto fn_ptr = ngraph::builder::subgraph::makeSplitMultiConvConcat();
const NetworkParams& networkParams,
const WeightsBlob::Ptr& weights) {
// Disable reorder in per-layer tests to make sure intended layout is used
- _config[VPU_CONFIG_KEY(DISABLE_REORDER)] = CONFIG_VALUE(YES);
+ _config[InferenceEngine::MYRIAD_DISABLE_REORDER] = CONFIG_VALUE(YES);
// White list of per-layer tests that allowed to reorder
if (layerParams._layerType == "Flatten") {
- _config[VPU_CONFIG_KEY(DISABLE_REORDER)] = CONFIG_VALUE(NO);
+ _config[InferenceEngine::MYRIAD_DISABLE_REORDER] = CONFIG_VALUE(NO);
}
ASSERT_NO_FATAL_FAILURE(
_output_tensor[2] = std::floor((_input_tensor[2] + 2. * _pad_val.y - _kernel_val.y) / _stride_val.y) + 1;
_output_tensor[1] = _input_tensor[1];
_output_tensor[0] = _input_tensor[0];
- _config[VPU_CONFIG_KEY(DETECT_NETWORK_BATCH)] = CONFIG_VALUE(NO);
+ _config[InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH] = CONFIG_VALUE(NO);
ASSERT_EQ(_input_tensor.size(), 4);
_testNet.addLayer(LayerInitParams("Pooling")
.params(_params)
std::map<std::string, std::string> config(_config);
if (params._useHWOpt) {
- config[VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION)] = CONFIG_VALUE(YES);
+ config[InferenceEngine::MYRIAD_ENABLE_HW_ACCELERATION] = CONFIG_VALUE(YES);
} else {
- config[VPU_CONFIG_KEY(HW_STAGES_OPTIMIZATION)] = CONFIG_VALUE(NO);
+ config[InferenceEngine::MYRIAD_ENABLE_HW_ACCELERATION] = CONFIG_VALUE(NO);
}
#if 0
config[CONFIG_KEY(LOG_LEVEL)] = CONFIG_VALUE(LOG_INFO);
#endif
config[CONFIG_KEY(PERF_COUNT)] = CONFIG_VALUE(YES);
- config[VPU_CONFIG_KEY(PERF_REPORT_MODE)] = VPU_CONFIG_VALUE(PER_STAGE);
- config[VPU_CONFIG_KEY(FORCE_DEPRECATED_CNN_CONVERSION)] = CONFIG_VALUE(NO); // Make VPU plugin be able to use NGraph network.
+ config[InferenceEngine::MYRIAD_PERF_REPORT_MODE] = InferenceEngine::MYRIAD_PER_STAGE;
+ config[InferenceEngine::MYRIAD_FORCE_DEPRECATED_CNN_CONVERSION] = CONFIG_VALUE(NO); // Make VPU plugin be able to use NGraph network.
InferenceEngine::StatusCode st = InferenceEngine::StatusCode::GENERAL_ERROR;
ASSERT_NO_THROW(st = _vpuPluginPtr->LoadNetwork(_exeNetwork, _cnnNetwork, config, &_resp));
const auto st = _inferRequest->Infer(&_resp);
EXPECT_EQ(InferenceEngine::StatusCode::OK, st) << _resp.msg;
// dumpPerformance();
- if (!_config[VPU_CONFIG_KEY(CUSTOM_LAYERS)].empty()) {
+ if (!_config[InferenceEngine::MYRIAD_CUSTOM_LAYERS].empty()) {
EXPECT_TRUE(wasCustomLayerInferred())
<< "CustomBindings.xml has been provided but Custom layer was not inferred";
}
suitsConfig = hasRequestedMyriad2 || hasRequestedMyriadX;
}
- bool suitsDeprecatedConfig;
- // Deprecated api
- IE_SUPPRESS_DEPRECATED_START
- platform = config.find(VPU_CONFIG_KEY(PLATFORM));
- if (platform == config.end() || platform->second.empty()) {
- suitsDeprecatedConfig = hasMyriad2() || hasMyriadX();
- } else {
- bool hasRequestedMyriad2 =
- platform->second == VPU_CONFIG_VALUE(2450) && hasMyriad2();
- bool hasRequestedMyriadX =
- platform->second == VPU_CONFIG_VALUE(2480) && hasMyriadX();
- suitsDeprecatedConfig = hasRequestedMyriad2 || hasRequestedMyriadX;
- }
- IE_SUPPRESS_DEPRECATED_END
-
- return suitsConfig && suitsDeprecatedConfig;
+ return suitsConfig;
}
static bool hasHDDL_R() {
IE_SUPPRESS_DEPRECATED_START
static const std::vector<config_t> myriadCorrectPlatformConfigValues = {
+ // Deprecated
{{VPU_MYRIAD_CONFIG_KEY(PLATFORM), VPU_MYRIAD_CONFIG_VALUE(2450)}},
{{VPU_MYRIAD_CONFIG_KEY(PLATFORM), VPU_MYRIAD_CONFIG_VALUE(2480)}},
- {{VPU_MYRIAD_CONFIG_KEY(PLATFORM), ""}},
- // Deprecated
- {{VPU_CONFIG_KEY(PLATFORM), VPU_CONFIG_VALUE(2450)}},
- {{VPU_CONFIG_KEY(PLATFORM), VPU_CONFIG_VALUE(2480)}},
- {{VPU_CONFIG_KEY(PLATFORM), ""}}
+ {{VPU_MYRIAD_CONFIG_KEY(PLATFORM), ""}}
};
static const std::vector<config_t> myriadIncorrectPlatformConfigValues = {
+ // Deprecated
{{VPU_MYRIAD_CONFIG_KEY(PLATFORM), "-1"}},
{{VPU_MYRIAD_CONFIG_KEY(PLATFORM), " 0"}},
- {{VPU_MYRIAD_CONFIG_KEY(PLATFORM), "MyriadX"}},
- // Deprecated
- {{VPU_CONFIG_KEY(PLATFORM), "-1"}},
- {{VPU_CONFIG_KEY(PLATFORM), " 0"}},
- {{VPU_CONFIG_KEY(PLATFORM), "MyriadX"}},
- // Deprecated key & value from current
- {{VPU_CONFIG_KEY(PLATFORM), VPU_MYRIAD_CONFIG_VALUE(2450)}},
- {{VPU_CONFIG_KEY(PLATFORM), VPU_MYRIAD_CONFIG_VALUE(2480)}},
- // Current key & deprecated value
- {{VPU_MYRIAD_CONFIG_KEY(PLATFORM), VPU_CONFIG_VALUE(2450)}},
- {{VPU_MYRIAD_CONFIG_KEY(PLATFORM), VPU_CONFIG_VALUE(2480)}},
-
+ {{VPU_MYRIAD_CONFIG_KEY(PLATFORM), "MyriadX"}}
};
static const std::vector<config_t> myriadCorrectProtocolConfigValues = {
+ {{InferenceEngine::MYRIAD_PROTOCOL, InferenceEngine::MYRIAD_PCIE}},
+ {{InferenceEngine::MYRIAD_PROTOCOL, InferenceEngine::MYRIAD_USB}},
+ {{InferenceEngine::MYRIAD_PROTOCOL, ""}},
+
+ // Deprecated
{{VPU_MYRIAD_CONFIG_KEY(PROTOCOL), VPU_MYRIAD_CONFIG_VALUE(PCIE)}},
{{VPU_MYRIAD_CONFIG_KEY(PROTOCOL), VPU_MYRIAD_CONFIG_VALUE(USB)}},
{{VPU_MYRIAD_CONFIG_KEY(PROTOCOL), ""}},
static const std::vector<config_t> myriadIncorrectProtocolConfigValues = {
// Protocols
+ {{InferenceEngine::MYRIAD_PROTOCOL, "0"}},
+ {{InferenceEngine::MYRIAD_PROTOCOL, "2450"}},
+ {{InferenceEngine::MYRIAD_PROTOCOL, "PCI"}},
+
+ // Deprecated
{{VPU_MYRIAD_CONFIG_KEY(PROTOCOL), "0"}},
{{VPU_MYRIAD_CONFIG_KEY(PROTOCOL), "2450"}},
{{VPU_MYRIAD_CONFIG_KEY(PROTOCOL), "PCI"}},
};
static const std::vector<config_t> myriadCorrectConfigCombinationValues = {
- {{VPU_MYRIAD_CONFIG_KEY(PLATFORM), ""},
- {VPU_MYRIAD_CONFIG_KEY(PROTOCOL), ""}}
+ {{InferenceEngine::MYRIAD_PROTOCOL, ""},
+ // Deprecated
+ {VPU_MYRIAD_CONFIG_KEY(PROTOCOL), ""}}
};
static const std::vector<config_t> myriadIncorrectPowerConfigValues = {
- {{VPU_MYRIAD_CONFIG_KEY(POWER_MANAGEMENT), "-1"}},
- {{VPU_MYRIAD_CONFIG_KEY(POWER_MANAGEMENT), "POWER_STANDARD"}},
- {{VPU_MYRIAD_CONFIG_KEY(POWER_MANAGEMENT), "INFER"}},
- {{VPU_MYRIAD_CONFIG_KEY(POWER_MANAGEMENT), ""}},
+ {{InferenceEngine::MYRIAD_POWER_MANAGEMENT, "-1"}},
+ {{InferenceEngine::MYRIAD_POWER_MANAGEMENT, "POWER_STANDARD"}},
+ {{InferenceEngine::MYRIAD_POWER_MANAGEMENT, "INFER"}},
+ {{InferenceEngine::MYRIAD_POWER_MANAGEMENT, ""}},
};
static const std::vector<config_t> myriadCorrectPowerConfigValues = {
- {{VPU_MYRIAD_CONFIG_KEY(POWER_MANAGEMENT), VPU_MYRIAD_CONFIG_VALUE(POWER_FULL)}},
- {{VPU_MYRIAD_CONFIG_KEY(POWER_MANAGEMENT), VPU_MYRIAD_CONFIG_VALUE(POWER_INFER)}},
- {{VPU_MYRIAD_CONFIG_KEY(POWER_MANAGEMENT), VPU_MYRIAD_CONFIG_VALUE(POWER_STAGE)}},
- {{VPU_MYRIAD_CONFIG_KEY(POWER_MANAGEMENT), VPU_MYRIAD_CONFIG_VALUE(POWER_STAGE_SHAVES)}},
- {{VPU_MYRIAD_CONFIG_KEY(POWER_MANAGEMENT), VPU_MYRIAD_CONFIG_VALUE(POWER_STAGE_NCES)}},
+ {{InferenceEngine::MYRIAD_POWER_MANAGEMENT, InferenceEngine::MYRIAD_POWER_FULL}},
+ {{InferenceEngine::MYRIAD_POWER_MANAGEMENT, InferenceEngine::MYRIAD_POWER_INFER}},
+ {{InferenceEngine::MYRIAD_POWER_MANAGEMENT, InferenceEngine::MYRIAD_POWER_STAGE}},
+ {{InferenceEngine::MYRIAD_POWER_MANAGEMENT, InferenceEngine::MYRIAD_POWER_STAGE_SHAVES}},
+ {{InferenceEngine::MYRIAD_POWER_MANAGEMENT, InferenceEngine::MYRIAD_POWER_STAGE_NCES}},
};
static const std::vector<config_t> myriadCorrectPackageTypeConfigValues = {
+ {{InferenceEngine::MYRIAD_DDR_TYPE, InferenceEngine::MYRIAD_DDR_AUTO}},
+ {{InferenceEngine::MYRIAD_DDR_TYPE, InferenceEngine::MYRIAD_DDR_MICRON_2GB}},
+ {{InferenceEngine::MYRIAD_DDR_TYPE, InferenceEngine::MYRIAD_DDR_SAMSUNG_2GB}},
+ {{InferenceEngine::MYRIAD_DDR_TYPE, InferenceEngine::MYRIAD_DDR_HYNIX_2GB}},
+ {{InferenceEngine::MYRIAD_DDR_TYPE, InferenceEngine::MYRIAD_DDR_MICRON_1GB}},
+
+ // Deprecated
{{VPU_MYRIAD_CONFIG_KEY(MOVIDIUS_DDR_TYPE), VPU_MYRIAD_CONFIG_VALUE(DDR_AUTO)}},
{{VPU_MYRIAD_CONFIG_KEY(MOVIDIUS_DDR_TYPE), VPU_MYRIAD_CONFIG_VALUE(MICRON_2GB)}},
{{VPU_MYRIAD_CONFIG_KEY(MOVIDIUS_DDR_TYPE), VPU_MYRIAD_CONFIG_VALUE(SAMSUNG_2GB)}},
{{VPU_MYRIAD_CONFIG_KEY(MOVIDIUS_DDR_TYPE), VPU_MYRIAD_CONFIG_VALUE(HYNIX_2GB)}},
- {{VPU_MYRIAD_CONFIG_KEY(MOVIDIUS_DDR_TYPE), VPU_MYRIAD_CONFIG_VALUE(MICRON_1GB)}}
+ {{VPU_MYRIAD_CONFIG_KEY(MOVIDIUS_DDR_TYPE), VPU_MYRIAD_CONFIG_VALUE(MICRON_1GB)}},
};
static const std::vector<config_t> myriadIncorrectPackageTypeConfigValues = {
+ {{InferenceEngine::MYRIAD_DDR_TYPE, "-1"}},
+ {{InferenceEngine::MYRIAD_DDR_TYPE, "-MICRON_1GB"}},
+
+ // Deprecated
{{VPU_MYRIAD_CONFIG_KEY(MOVIDIUS_DDR_TYPE), "-1"}},
{{VPU_MYRIAD_CONFIG_KEY(MOVIDIUS_DDR_TYPE), "-MICRON_1GB"}},
};
TEST_F(MyriadMetricsTest, ShouldThrowExceptionWhenOption_MYRIAD_THROUGHPUT_STREAMS_isInvalid) {
range_type act_res;
std::map<std::string, std::string> config {
- {KEY_VPU_MYRIAD_THROUGHPUT_STREAMS, std::string("bad param")}
+ {InferenceEngine::MYRIAD_THROUGHPUT_STREAMS, std::string("bad param")}
};
ASSERT_ANY_THROW(act_res = metrics_container_->RangeForAsyncInferRequests(config));
TEST_P(MyriadRangeInferMetricsTestWithParam, CheckValues) {
range_type act_res;
std::map<std::string, std::string> config {
- {KEY_VPU_MYRIAD_THROUGHPUT_STREAMS, confir_param_}
+ {InferenceEngine::MYRIAD_THROUGHPUT_STREAMS, confir_param_}
};
ASSERT_NO_THROW(act_res = metrics_container_->RangeForAsyncInferRequests(config));
Overwrites precision from ip and op options for specified layers.
VPU options:
- -VPU_MYRIAD_PLATFORM <value> Optional. Specifies Movidius platform. Supported values: VPU_MYRIAD_2450, VPU_MYRIAD_2480. Overwrites value from config.
- This option must be used in order to compile blob without a connected Myriad device.
-VPU_NUMBER_OF_SHAVES <value> Optional. Specifies number of shaves. Should be set with "VPU_NUMBER_OF_CMX_SLICES". Overwrites value from config.
-VPU_NUMBER_OF_CMX_SLICES <value> Optional. Specifies number of CMX slices. Should be set with "VPU_NUMBER_OF_SHAVES". Overwrites value from config.
-VPU_TILING_CMX_LIMIT_KB <value> Optional. Specifies CMX limit for data tiling in kB. Value should be equal or greater than -1, where -1 means default value of limit. Overwrites value from config.
./compile_tool -m <path_to_model>/model_name.xml
```
-## MYRIAD Platform Option
-
-You can dump a blob without a connected MYRIAD device.
-To do that, specify the type of an Intel® Movidius™ platform using the `-VPU_MYRIAD_PLATFORM` parameter.
-
-Supported values: `VPU_MYRIAD_2450`, `VPU_MYRIAD_2480`.
-
## FPGA Option
You can compile executable network without a connected FPGA device with a loaded DLA bitstream.
#include <gflags/gflags.h>
#include "inference_engine.hpp"
+#include <vpu/vpu_plugin_config.hpp>
#include <vpu/private_plugin_config.hpp>
#include <vpu/utils/string.hpp>
#include "samples/common.hpp"
static constexpr char output_message[] = "Optional. Path to the output file. Default value: \"<model_xml_file>.blob\".";
static constexpr char config_message[] = "Optional. Path to the configuration file. Default value: \"config\".";
-static constexpr char platform_message[] = "Optional. Specifies Movidius platform."
- " Supported values: VPU_MYRIAD_2450, VPU_MYRIAD_2480."
- " Overwrites value from config.\n"
-" This option must be used in order to compile blob"
- " without a connected Myriad device.";
static constexpr char number_of_shaves_message[] = "Optional. Specifies number of shaves."
" Should be set with \"VPU_NUMBER_OF_CMX_SLICES\"."
" Overwrites value from config.";
DEFINE_string(iop, "", iop_message);
DEFINE_string(il, "", inputs_layout_message);
DEFINE_string(ol, "", outputs_layout_message);
-DEFINE_string(VPU_MYRIAD_PLATFORM, "", platform_message);
DEFINE_string(VPU_NUMBER_OF_SHAVES, "", number_of_shaves_message);
DEFINE_string(VPU_NUMBER_OF_CMX_SLICES, "", number_of_cmx_slices_message);
DEFINE_string(VPU_TILING_CMX_LIMIT_KB, "", tiling_cmx_limit_message);
std::cout << " -ol <value> " << outputs_layout_message << std::endl;
std::cout << " " << std::endl;
std::cout << " VPU options: " << std::endl;
- std::cout << " -VPU_MYRIAD_PLATFORM <value> " << platform_message << std::endl;
std::cout << " -VPU_NUMBER_OF_SHAVES <value> " << number_of_shaves_message << std::endl;
std::cout << " -VPU_NUMBER_OF_CMX_SLICES <value> " << number_of_cmx_slices_message << std::endl;
std::cout << " -VPU_TILING_CMX_LIMIT_KB <value> " << tiling_cmx_limit_message << std::endl;
throw std::invalid_argument("Target device name is required");
}
- if (std::string::npos != FLAGS_d.find("MYRIAD") && FLAGS_VPU_MYRIAD_PLATFORM.empty()) {
+ if (std::string::npos != FLAGS_d.find("MYRIAD")) {
std::vector<std::string> myriadDeviceIds = ie.GetMetric("MYRIAD", METRIC_KEY(AVAILABLE_DEVICES));
if (myriadDeviceIds.empty()) {
- throw std::runtime_error{"No available MYRIAD devices. Please specify -VPU_MYRIAD_PLATFORM option explicitly"};
+ throw std::runtime_error{"No available MYRIAD devices"};
}
}
static std::map<std::string, std::string> configure(const std::string &configFile, const std::string &xmlFileName) {
auto config = parseConfig(configFile);
- if (!FLAGS_VPU_MYRIAD_PLATFORM.empty()) {
- config[VPU_MYRIAD_CONFIG_KEY(PLATFORM)] = FLAGS_VPU_MYRIAD_PLATFORM;
- }
-
if (!FLAGS_VPU_NUMBER_OF_SHAVES.empty()) {
- config[VPU_CONFIG_KEY(NUMBER_OF_SHAVES)] = FLAGS_VPU_NUMBER_OF_SHAVES;
+ config[InferenceEngine::MYRIAD_NUMBER_OF_SHAVES] = FLAGS_VPU_NUMBER_OF_SHAVES;
}
if (!FLAGS_VPU_NUMBER_OF_CMX_SLICES.empty()) {
- config[VPU_CONFIG_KEY(NUMBER_OF_CMX_SLICES)] = FLAGS_VPU_NUMBER_OF_CMX_SLICES;
+ config[InferenceEngine::MYRIAD_NUMBER_OF_CMX_SLICES] = FLAGS_VPU_NUMBER_OF_CMX_SLICES;
}
if (!FLAGS_VPU_TILING_CMX_LIMIT_KB.empty()) {
- config[VPU_CONFIG_KEY(TILING_CMX_LIMIT_KB)] = FLAGS_VPU_TILING_CMX_LIMIT_KB;
+ config[InferenceEngine::MYRIAD_TILING_CMX_LIMIT_KB] = FLAGS_VPU_TILING_CMX_LIMIT_KB;
}
if (!FLAGS_DLA_ARCH_NAME.empty()) {
Example: -iop "input:FP16, output:FP16".
Notice that quotes are required.
Overwrites precision from ip and op options for specified layers.
- -VPU_MYRIAD_PLATFORM <value> Optional. Specifies movidius platform. Supported values: VPU_MYRIAD_2450, VPU_MYRIAD_2480. Overwrites value from config.
- This option must be used in order to compile blob without a connected Myriad device.
-VPU_NUMBER_OF_SHAVES <value> Optional. Specifies number of shaves. Should be set with "VPU_NUMBER_OF_CMX_SLICES". Overwrites value from config.
-VPU_NUMBER_OF_CMX_SLICES <value> Optional. Specifies number of CMX slices. Should be set with "VPU_NUMBER_OF_SHAVES". Overwrites value from config.
-VPU_TILING_CMX_LIMIT_KB <value> Optional. Specifies CMX limit for data tiling in kB. Value should be equal or greater than -1, where -1 means default value of limit. Overwrites value from config.
./myriad_compile -m <path_to_model>/model_name.xml
```
-## Platform option
-
-You can dump blob without a connected Myriad device.
-To do that, you must specify type of movidius platform using the parameter -VPU_MYRIAD_PLATFORM.
-Supported values: VPU_MYRIAD_2450, VPU_MYRIAD_2480
-
## Import and Export functionality
#### Export
static constexpr char plugin_path_message[] = "Optional. Path to a plugin folder.";
static constexpr char output_message[] = "Optional. Path to the output file. Default value: \"<model_xml_file>.blob\".";
static constexpr char config_message[] = "Optional. Path to the configuration file. Default value: \"config\".";
-static constexpr char platform_message[] = "Optional. Specifies movidius platform."
- " Supported values: VPU_MYRIAD_2450, VPU_MYRIAD_2480."
- " Overwrites value from config.\n"
-" This option must be used in order to compile blob"
- " without a connected Myriad device.";
static constexpr char number_of_shaves_message[] = "Optional. Specifies number of shaves."
" Should be set with \"VPU_NUMBER_OF_CMX_SLICES\"."
" Overwrites value from config.";
DEFINE_string(ip, "", inputs_precision_message);
DEFINE_string(op, "", outputs_precision_message);
DEFINE_string(iop, "", iop_message);
-DEFINE_string(VPU_MYRIAD_PLATFORM, "", platform_message);
DEFINE_string(VPU_NUMBER_OF_SHAVES, "", number_of_shaves_message);
DEFINE_string(VPU_NUMBER_OF_CMX_SLICES, "", number_of_cmx_slices_message);
DEFINE_string(VPU_TILING_CMX_LIMIT_KB, "", tiling_cmx_limit_message);
std::cout << " -ip <value> " << inputs_precision_message << std::endl;
std::cout << " -op <value> " << outputs_precision_message << std::endl;
std::cout << " -iop \"<value>\" " << iop_message << std::endl;
- std::cout << " -VPU_MYRIAD_PLATFORM <value> " << platform_message << std::endl;
std::cout << " -VPU_NUMBER_OF_SHAVES <value> " << number_of_shaves_message << std::endl;
std::cout << " -VPU_NUMBER_OF_CMX_SLICES <value> " << number_of_cmx_slices_message << std::endl;
std::cout << " -VPU_TILING_CMX_LIMIT_KB <value> " << tiling_cmx_limit_message << std::endl;
static std::map<std::string, std::string> configure(const std::string &configFile, const std::string &xmlFileName) {
auto config = parseConfig(configFile);
- if (!FLAGS_VPU_MYRIAD_PLATFORM.empty()) {
- config[VPU_MYRIAD_CONFIG_KEY(PLATFORM)] = FLAGS_VPU_MYRIAD_PLATFORM;
- }
-
if (!FLAGS_VPU_NUMBER_OF_SHAVES.empty()) {
- config[VPU_CONFIG_KEY(NUMBER_OF_SHAVES)] = FLAGS_VPU_NUMBER_OF_SHAVES;
+ config[InferenceEngine::MYRIAD_NUMBER_OF_SHAVES] = FLAGS_VPU_NUMBER_OF_SHAVES;
}
if (!FLAGS_VPU_NUMBER_OF_CMX_SLICES.empty()) {
- config[VPU_CONFIG_KEY(NUMBER_OF_CMX_SLICES)] = FLAGS_VPU_NUMBER_OF_CMX_SLICES;
+ config[InferenceEngine::MYRIAD_NUMBER_OF_CMX_SLICES] = FLAGS_VPU_NUMBER_OF_CMX_SLICES;
}
if (!FLAGS_VPU_TILING_CMX_LIMIT_KB.empty()) {
- config[VPU_CONFIG_KEY(TILING_CMX_LIMIT_KB)] = FLAGS_VPU_TILING_CMX_LIMIT_KB;
+ config[InferenceEngine::MYRIAD_TILING_CMX_LIMIT_KB] = FLAGS_VPU_TILING_CMX_LIMIT_KB;
}
return config;
#include <inference_engine.hpp>
#include <common.hpp>
-#include <vpu/vpu_plugin_config.hpp>
+#include <vpu/vpu_config.hpp>
static char* m_exename = nullptr;
static void setConfig(std::map<std::string, std::string>& config,
const std::string& file_config_cl) {
config[CONFIG_KEY(LOG_LEVEL)] = CONFIG_VALUE(LOG_WARNING);
- config[VPU_CONFIG_KEY(PRINT_RECEIVE_TENSOR_TIME)] = CONFIG_VALUE(YES);
- config[VPU_CONFIG_KEY(CUSTOM_LAYERS)] = file_config_cl;
+ config[InferenceEngine::MYRIAD_ENABLE_RECEIVING_TENSOR_TIME] = CONFIG_VALUE(YES);
+ config[InferenceEngine::MYRIAD_CUSTOM_LAYERS] = file_config_cl;
}
static void printPerformanceCounts(const std::map<std::string, InferenceEngine::InferenceEngineProfileInfo>& perfMap) {