1 // Copyright (C) 2018-2019 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
5 #include "ext_base.hpp"
12 namespace InferenceEngine {
13 namespace Extensions {
16 inline int div_up(const int a, const int b) {
18 return (a + b - 1) / b;
22 ExtLayerBase::getSupportedConfigurations(std::vector<LayerConfig>& conf, ResponseDesc *resp) noexcept {
23 if (!errorMsg.empty()) {
25 errorMsg.copy(resp->msg, sizeof(resp->msg) - 1);
34 ExtLayerBase::init(LayerConfig& config, ResponseDesc *resp) noexcept {
35 for (auto& input : config.inConfs) {
36 for (auto& offset : input.desc.getBlockingDesc().getOffsetPaddingToData()) {
41 if (input.desc.getBlockingDesc().getOffsetPadding()) {
45 for (auto& output : config.outConfs) {
46 for (auto& offset : output.desc.getBlockingDesc().getOffsetPaddingToData()) {
51 if (output.desc.getBlockingDesc().getOffsetPadding()) {
58 void ExtLayerBase::addConfig(const CNNLayer* layer, std::vector<DataConfigurator> in_l, std::vector<DataConfigurator> out_l, bool dynBatchSupport) {
61 if (in_l.size() != layer->insData.size())
62 THROW_IE_EXCEPTION << "Incorrect number of input edges. Expected " << layer->insData.size()
63 << " but layout specification provided for " << in_l.size();
64 if (out_l.size() != layer->outData.size())
65 THROW_IE_EXCEPTION << "Incorrect number of input edges. Expected " << layer->outData.size()
66 << " but layout specification provided for " << out_l.size();
68 // Fill tensor parameters into config
69 auto fill_port = [] (std::vector<DataConfig>& port, DataConfigurator conf, const DataPtr& data) {
70 if (!data) THROW_IE_EXCEPTION << "Cannot get input data!";
72 DataConfig dataConfig;
73 dataConfig.inPlace = conf.inplace;
74 dataConfig.constant = conf.constant;
76 const TensorDesc& data_desc = data->getTensorDesc();
77 const SizeVector& data_dims = data_desc.getDims();
79 std::vector<size_t> blocks = data_dims;
80 std::vector<size_t> order(blocks.size());
81 for (size_t i = 0; i < order.size(); i++) order[i] = i;
83 const bool isInt8 = (data->getPrecision() == Precision::I8 || data->getPrecision() == Precision::U8);
85 if (conf.layout == ConfLayout::BLK8 || conf.layout == ConfLayout::BLK16) {
86 if (data_dims.size() < 4 && data_dims.size() > 5)
87 THROW_IE_EXCEPTION << "Inapplicable blocking layout."
88 << "Tensor should be 4D or 5D.";
90 int blk_size = conf.layout == ConfLayout::BLK8 ? 8 : 16;
92 // Blocking through Channel dimension. Like [nChwXc]
94 blocks[1] = div_up(blocks[1], blk_size);
95 blocks.push_back(blk_size);
98 size_t tmp = blocks[1];
99 blocks[1] = blocks[3];
102 conf.layout = ConfLayout::PLN;
105 // All extension layers support only FP32 precision!
106 InferenceEngine::Precision precision = data_desc.getPrecision();
107 if (conf.layout == ConfLayout::ANY) {
108 dataConfig.desc = TensorDesc(precision, data_dims, InferenceEngine::Layout::ANY);
110 dataConfig.desc = TensorDesc(precision, data_dims, {blocks, order});
112 port.push_back(dataConfig);
115 for (size_t i = 0; i < in_l.size(); i++)
116 fill_port(config.inConfs, in_l[i], layer->insData[i].lock());
118 for (size_t i = 0; i < out_l.size(); i++)
119 fill_port(config.outConfs, out_l[i], layer->outData[i]);
121 config.dynBatchSupport = dynBatchSupport;
122 confs.push_back(config);
127 } // namespace Extensions
128 } // namespace InferenceEngine