uint32_t c_dim_in = FROM_IR_DIM(inputs, 3);
num_columns = (w_dim_in == 1) ? h_dim_in * c_dim_in : w_dim_in * c_dim_in;
- num_rows = 1;
+ num_rows = (w_dim_in == 1) ? w_dim_in : h_dim_in;
} else {
num_columns = FROM_IR_DIM(inputs, 2);
num_rows = FROM_IR_DIM(inputs, 1);
// remove osp->layer connection
for (auto && outData : getInputTo(osp)) {
- for (auto i = outData.second->insData.begin(); i != outData.second->insData.end(); i++) {
- auto insData = i->lock();
+ for (int i = 0; i < outData.second->insData.size(); i++) {
+ auto insData = outData.second->insData[i].lock();
if (!insData) {
THROW_IE_EXCEPTION << "Cannot remove layer : "<< layer->name <<", its output layer(" <<
outData.first << " has invalid input configuration";
// found layer that need to be removed
if (creator.get() == layer.get()) {
- outData.second->insData.erase(i);
+ outData.second->insData[i] = isp;
break;
}
}
getInputTo(isp)[layer->name + "_" + outData.first] = outData.second;
}
- // add osp->isp connections
- for (auto && outData : getInputTo(osp)) {
- outData.second->insData.push_back(isp);
- }
-
// removing layer->osp, and layer->isp connection not necessary - layer will delete it by itself
}
THROW_GNA_EXCEPTION << "cannot insert identity layer after" << prev->name << " and before " << l->name;
}
- auto inputData = l->insData[0].lock();
+ auto inputData = l->insData[insDataIdx].lock();
auto dataPtr = std::make_shared<Data>("identity_data_" + std::to_string(numOfIdentityLayers), inputData->getTensorDesc());
auto activationLayerWithQuant = quantized ?
--- /dev/null
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+#include <vector>
+#include "subgraph_tests/perm_conv_perm_concat.hpp"
+#include "common_test_utils/test_constants.hpp"
+namespace {
+std::vector<std::array<size_t, 4>> input_shapes {
+ {1, 1, 7, 32},
+ {1, 1, 8, 16},
+};
+
+std::vector<std::array<size_t, 2>> kernel_shapes {
+ {1, 3},
+ {1, 5},
+};
+
+std::vector<size_t> output_channels {
+ 32,
+ 64,
+};
+
+std::vector<InferenceEngine::Precision> netPrecisions = {
+ InferenceEngine::Precision::FP32,
+ InferenceEngine::Precision::FP16,
+};
+
+std::map<std::string, std::string> additional_config = {
+};
+} // namespace
+
+namespace SubgraphTestsDefinitions {
+INSTANTIATE_TEST_CASE_P(smoke_basic, PermConvPermConcat,
+ ::testing::Combine(
+ ::testing::ValuesIn(netPrecisions),
+ ::testing::Values(CommonTestUtils::DEVICE_CPU),
+ ::testing::ValuesIn(input_shapes),
+ ::testing::ValuesIn(kernel_shapes),
+ ::testing::ValuesIn(output_channels),
+ ::testing::Values(additional_config)),
+ PermConvPermConcat::getTestCaseName);
+} // namespace SubgraphTestsDefinitions
--- /dev/null
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+#include <vector>
+#include "subgraph_tests/perm_conv_perm_concat.hpp"
+#include "common_test_utils/test_constants.hpp"
+namespace {
+std::vector<std::array<size_t, 4>> input_shapes {
+ {1, 1, 7, 32},
+ {1, 1, 8, 16},
+};
+
+std::vector<std::array<size_t, 2>> kernel_shapes {
+ {1, 3},
+ {1, 5},
+};
+
+std::vector<size_t> output_channels {
+ 32,
+ 64,
+};
+
+std::vector<InferenceEngine::Precision> netPrecisions = {
+ InferenceEngine::Precision::FP32,
+ InferenceEngine::Precision::FP16,
+};
+
+std::map<std::string, std::string> additional_config = {
+ {"GNA_DEVICE_MODE", "GNA_SW_EXACT"},
+ {"GNA_SCALE_FACTOR_0", "1234"}
+};
+} // namespace
+
+namespace SubgraphTestsDefinitions {
+ INSTANTIATE_TEST_CASE_P(smoke_basic, PermConvPermConcat,
+ ::testing::Combine(
+ ::testing::ValuesIn(netPrecisions),
+ ::testing::Values(CommonTestUtils::DEVICE_GNA),
+ ::testing::ValuesIn(input_shapes),
+ ::testing::ValuesIn(kernel_shapes),
+ ::testing::ValuesIn(output_channels),
+ ::testing::Values(additional_config)),
+ PermConvPermConcat::getTestCaseName);
+} // namespace SubgraphTestsDefinitions
--- /dev/null
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+#include <vector>
+#include "subgraph_tests/perm_conv_perm_concat.hpp"
+#include "common_test_utils/test_constants.hpp"
+namespace {
+std::vector<std::array<size_t, 4>> input_shapes {
+ {1, 1, 7, 32},
+ {1, 1, 8, 16},
+};
+
+std::vector<std::array<size_t, 2>> kernel_shapes {
+ {1, 3},
+ {1, 5},
+};
+
+std::vector<size_t> output_channels {
+ 32,
+ 64,
+};
+
+std::vector<InferenceEngine::Precision> netPrecisions = {
+ InferenceEngine::Precision::FP32,
+// InferenceEngine::Precision::FP16,
+};
+
+std::map<std::string, std::string> additional_config = {
+};
+} // namespace
+
+namespace SubgraphTestsDefinitions {
+ INSTANTIATE_TEST_CASE_P(smoke_basic, PermConvPermConcat,
+ ::testing::Combine(
+ ::testing::ValuesIn(netPrecisions),
+ ::testing::Values(CommonTestUtils::DEVICE_GPU),
+ ::testing::ValuesIn(input_shapes),
+ ::testing::ValuesIn(kernel_shapes),
+ ::testing::ValuesIn(output_channels),
+ ::testing::Values(additional_config)),
+ PermConvPermConcat::getTestCaseName);
+} // namespace SubgraphTestsDefinitions
--- /dev/null
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <tuple>
+#include <vector>
+#include <array>
+#include <string>
+#include <memory>
+
+#include "functional_test_utils/layer_test_utils.hpp"
+#include "ngraph_functions/utils/ngraph_helpers.hpp"
+#include "ngraph_functions/builders.hpp"
+
+namespace SubgraphTestsDefinitions {
+typedef std::tuple<
+ InferenceEngine::Precision, // Network Precision
+ std::string, // Target Device
+ std::array<size_t, 4>, // Input shape
+ std::array<size_t, 2>, // Kernel shape
+ size_t, // Output channels
+ std::map<std::string, std::string> // Configuration
+> PermConvPermConcatParams;
+
+class PermConvPermConcat : public testing::WithParamInterface<PermConvPermConcatParams>,
+ virtual public LayerTestsUtils::LayerTestsCommon {
+public:
+ static std::string getTestCaseName(testing::TestParamInfo<PermConvPermConcatParams> obj);
+
+protected:
+ void SetUp() override;
+ void Run() override;
+};
+} // namespace SubgraphTestsDefinitions
--- /dev/null
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+
+#include <tuple>
+#include <string>
+#include <numeric>
+#include <vector>
+#include <memory>
+#include <debug.h>
+#include "common_test_utils/common_utils.hpp"
+#include "functional_test_utils/precision_utils.hpp"
+#include "functional_test_utils/skip_tests_config.hpp"
+#include "subgraph_tests/perm_conv_perm_concat.hpp"
+
+namespace SubgraphTestsDefinitions {
+std::string PermConvPermConcat::getTestCaseName(testing::TestParamInfo<PermConvPermConcatParams> obj) {
+ InferenceEngine::Precision netPrecision;
+ std::string targetName;
+ std::array<size_t, 4> input_shape;
+ std::array<size_t, 2> kernel_shape;
+ size_t output_channels;
+ std::map<std::string, std::string> configuration;
+
+
+ std::tie(netPrecision, targetName, input_shape, kernel_shape, output_channels, configuration) = obj.param;
+ std::ostringstream results;
+
+ results << "IS=" << CommonTestUtils::vec2str(std::vector<size_t>(input_shape.begin(), input_shape.end())) << "_";
+ results << "KS=" << CommonTestUtils::vec2str(std::vector<size_t>(kernel_shape.begin(), kernel_shape.end())) << "_";
+ results << "OC=" << output_channels << "_";
+ results << "netPRC=" << netPrecision.name() << "_";
+ results << "targetDevice=" << targetName;
+ return results.str();
+}
+
+void PermConvPermConcat::SetUp() {
+ InferenceEngine::Precision netPrecision;
+ std::array<size_t, 4> input_shape;
+ std::array<size_t, 2> kernel_shape;
+ size_t output_channels;
+ std::map<std::string, std::string> additional_config;
+
+ std::tie(netPrecision, targetDevice, input_shape, kernel_shape, output_channels, additional_config) = this->GetParam();
+
+ configuration.insert(additional_config.begin(), additional_config.end());
+
+ const std::size_t input_dim = std::accumulate(input_shape.begin(), input_shape.end(), 1, std::multiplies<size_t>());
+ auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
+
+ std::vector<size_t> input_dims { 1, input_dim };
+ std::vector<size_t> reshape_in_dims = std::vector<size_t>(input_shape.begin(), input_shape.end());
+ std::vector<size_t> permute_in_order = { 0, 3, 1, 2 };
+ std::vector<size_t> permute_out_order = { 0, 2, 3, 1 };
+
+ const int seed = 0;
+ std::mt19937 gen(static_cast<float>(seed));
+
+ auto generateFloatNumbers = [gen](std::size_t vec_len, float min, float max) mutable {
+ std::vector<float> res;
+
+ std::uniform_real_distribution<float> dist(min, max);
+ for (int i = 0; i < vec_len; i++)
+ res.emplace_back(static_cast<float>(dist(gen)));
+
+ return res;
+ };
+
+ auto input_parameter = ngraph::builder::makeParams(ngPrc, {input_dims});
+
+ auto reshape_in_pattern = std::make_shared<ngraph::op::Constant>(ngraph::element::i64,
+ ngraph::Shape{4},
+ reshape_in_dims);
+ auto reshape_in = std::make_shared<ngraph::op::v1::Reshape>(input_parameter[0], reshape_in_pattern, false);
+
+ auto permute_in_params = std::make_shared<ngraph::opset1::Constant>(ngraph::element::i64,
+ ngraph::Shape{4},
+ ngraph::Shape{permute_in_order});
+ auto permute_in = std::make_shared<ngraph::opset1::Transpose>(reshape_in, permute_in_params);
+ auto conv_in_shape = permute_in->get_output_shape(0);
+ auto conv_weights_size = output_channels * (conv_in_shape[1]) * kernel_shape[0] * kernel_shape[1];
+ auto conv = ngraph::builder::makeConvolution(permute_in, ngPrc, {kernel_shape[0], kernel_shape[1]}, {1, 1}, {0, 0}, {0, 0}, {1, 1},
+ ngraph::op::PadType::VALID, output_channels, false, generateFloatNumbers(conv_weights_size, -0.5f, 0.5f));
+
+ auto permute_out_params = std::make_shared<ngraph::opset1::Constant>(ngraph::element::i64,
+ ngraph::Shape{4},
+ permute_out_order);
+ auto permute_out = std::make_shared<ngraph::opset1::Transpose>(conv, permute_out_params);
+
+ auto permute_out_shape = permute_out->get_output_shape(0);
+
+ auto concat_const = ngraph::builder::makeConstant(ngPrc, {1, 1, 1, permute_out_shape[3]}, generateFloatNumbers(permute_out_shape[3], -10, 10));
+
+ auto concat = ngraph::builder::makeConcat({permute_out, concat_const}, 2);
+
+ auto reshape_out_pattern = std::make_shared<ngraph::opset1::Constant>(ngraph::element::i64,
+ ngraph::Shape{2},
+ InferenceEngine::SizeVector({1, (permute_out_shape[2] + 1) * permute_out_shape[3]}));
+ auto reshape_out = std::make_shared<ngraph::op::v1::Reshape>(concat, reshape_out_pattern, false);
+
+ function = std::make_shared<ngraph::Function>(reshape_out, input_parameter, "perm_conv_perm_concat");
+}
+
+void PermConvPermConcat::Run() {
+ SKIP_IF_CURRENT_TEST_IS_DISABLED()
+
+ LoadNetwork();
+
+ inferRequest = executableNetwork.CreateInferRequest();
+ inputs.clear();
+
+ for (const auto &input : cnnNetwork.getInputsInfo()) {
+ const auto &info = input.second;
+ auto tensorDesc = info->getTensorDesc();
+
+ auto blob = FuncTestUtils::createAndFillBlobFloat(tensorDesc, 2, -1, 100, 111);
+
+ FuncTestUtils::fillInputsBySinValues(blob);
+ inferRequest.SetBlob(info->name(), blob);
+ inputs.push_back(blob);
+ }
+ if (configuration.count(InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED) &&
+ configuration.count(InferenceEngine::PluginConfigParams::YES)) {
+ auto batchSize = cnnNetwork.getInputsInfo().begin()->second->getTensorDesc().getDims()[0] / 2;
+ inferRequest.SetBatch(batchSize);
+ }
+ inferRequest.Infer();
+
+ Validate();
+}
+
+TEST_P(PermConvPermConcat, CompareWithRefs) {
+ Run();
+}
+} // namespace SubgraphTestsDefinitions