From 8cc7eb7a171c885c7dd998eaa80fcb5313e38938 Mon Sep 17 00:00:00 2001 From: Vladimir Paramuzov Date: Tue, 8 Sep 2020 09:56:54 +0300 Subject: [PATCH] [IE CLDNN] Added is_discrete flag into device info and FULL_DEVICE_NAME flag (#2089) --- inference-engine/src/cldnn_engine/cldnn_engine.cpp | 4 +++- inference-engine/thirdparty/clDNN/api/device.hpp | 8 ++++++++ inference-engine/thirdparty/clDNN/src/gpu/device_info.cpp | 13 +++++++++---- inference-engine/thirdparty/clDNN/src/gpu/device_info.h | 4 ++-- inference-engine/thirdparty/clDNN/src/gpu/ocl_toolkit.cpp | 1 - 5 files changed, 22 insertions(+), 8 deletions(-) diff --git a/inference-engine/src/cldnn_engine/cldnn_engine.cpp b/inference-engine/src/cldnn_engine/cldnn_engine.cpp index 954d49f..14effa2 100644 --- a/inference-engine/src/cldnn_engine/cldnn_engine.cpp +++ b/inference-engine/src/cldnn_engine/cldnn_engine.cpp @@ -578,7 +578,9 @@ Parameter clDNNEngine::GetMetric(const std::string& name, const std::map configKeys; for (auto opt : _impl->m_config.key_config_map) diff --git a/inference-engine/thirdparty/clDNN/api/device.hpp b/inference-engine/thirdparty/clDNN/api/device.hpp index 7e49aa0..4789324 100644 --- a/inference-engine/thirdparty/clDNN/api/device.hpp +++ b/inference-engine/thirdparty/clDNN/api/device.hpp @@ -29,6 +29,12 @@ namespace cldnn { /// @defgroup cpp_device GPU Device /// @{ +/// @brief Enumeration of supported device types +enum class device_type { + integrated_gpu = 0, + discrete_gpu = 1 +}; + /// @brief Information about the device properties and capabilities. struct device_info { uint32_t cores_count; ///< Number of available HW cores. @@ -55,6 +61,8 @@ struct device_info { std::string dev_name; ///< Device ID string std::string driver_version; ///< Version of OpenCL driver + + device_type dev_type; ///< Defines type of current GPU device (integrated or discrete) }; struct device_impl; diff --git a/inference-engine/thirdparty/clDNN/src/gpu/device_info.cpp b/inference-engine/thirdparty/clDNN/src/gpu/device_info.cpp index 1fc851d..8383fdf 100644 --- a/inference-engine/thirdparty/clDNN/src/gpu/device_info.cpp +++ b/inference-engine/thirdparty/clDNN/src/gpu/device_info.cpp @@ -117,15 +117,20 @@ int driver_dev_id() return result.back(); } -bool get_imad_support(const cl::Device& device) { +static device_type get_device_type(const cl::Device& device) { + auto unified_mem = device.getInfo(); + + return unified_mem ? device_type::integrated_gpu : device_type::discrete_gpu; +} + +static bool get_imad_support(const cl::Device& device) { std::string dev_name = device.getInfo(); if (dev_name.find("Gen12") != std::string::npos || dev_name.find("Xe") != std::string::npos) return true; - auto flag = device.getInfo(); - if (flag != 0) { + if (get_device_type(device) == device_type::integrated_gpu) { const std::vector imad_ids = { 0x9A40, 0x9A49, 0x9A59, 0x9AD9, 0x9A60, 0x9A68, 0x9A70, 0x9A78, @@ -189,6 +194,7 @@ bool is_local_block_io_supported(const cl::Device& device) { device_info_internal::device_info_internal(const cl::Device& device) { dev_name = device.getInfo(); driver_version = device.getInfo(); + dev_type = get_device_type(device); compute_units_count = device.getInfo(); @@ -220,7 +226,6 @@ device_info_internal::device_info_internal(const cl::Device& device) { supports_imad = get_imad_support(device); supports_immad = false; - dev_type = static_cast(device.getInfo()); vendor_id = static_cast(device.getInfo()); supports_usm = extensions.find("cl_intel_unified_shared_memory") != std::string::npos; diff --git a/inference-engine/thirdparty/clDNN/src/gpu/device_info.h b/inference-engine/thirdparty/clDNN/src/gpu/device_info.h index 076bf76..9fb804b 100644 --- a/inference-engine/thirdparty/clDNN/src/gpu/device_info.h +++ b/inference-engine/thirdparty/clDNN/src/gpu/device_info.h @@ -26,7 +26,6 @@ namespace gpu { struct device_info_internal : cldnn::device_info { std::uint32_t compute_units_count; - uint32_t dev_type; uint32_t vendor_id; uint8_t supports_usm; bool supports_optimization_hints; @@ -51,7 +50,8 @@ struct device_info_internal : cldnn::device_info { supports_immad, supports_usm, dev_name, - driver_version + driver_version, + dev_type }; } }; diff --git a/inference-engine/thirdparty/clDNN/src/gpu/ocl_toolkit.cpp b/inference-engine/thirdparty/clDNN/src/gpu/ocl_toolkit.cpp index dc8ea53..0d1f2d3 100644 --- a/inference-engine/thirdparty/clDNN/src/gpu/ocl_toolkit.cpp +++ b/inference-engine/thirdparty/clDNN/src/gpu/ocl_toolkit.cpp @@ -117,7 +117,6 @@ gpu_toolkit::gpu_toolkit(const device_impl& device_impl, const configuration& co << " profiling: " << std::boolalpha << _configuration.enable_profiling << "\n" << " meaningful names: " << std::boolalpha << _configuration.meaningful_kernels_names << "\n" << " dump custom program: " << std::boolalpha << _configuration.dump_custom_program << "\n" - << " device type: " << std::to_string(device_info.dev_type) << "\n" << " vendor type: " << std::hex << std::setfill('0') << std::setw(4) << std::right << std::to_string(device_info.vendor_id) << "\n" << std::dec << std::setfill(' ') << std::right -- 2.7.4