From c9749ce39773a44cfaaf1cb18345fcb403f78061 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Wed, 1 Jul 2020 22:34:43 +0300 Subject: [PATCH] Clean-up files in tests helpers (#1173) --- .../network_tests/network_test.cpp | 17 +- .../single_layer_tests/bin_conv_tests.hpp | 13 ++ .../single_layer_transformations_test.cpp | 29 ++++ .../helpers/single_layer_common.hpp | 11 +- .../tests_deprecated/helpers/tests_common.cpp | 176 ++++++++------------- .../tests_deprecated/helpers/tests_common.hpp | 131 +-------------- .../tests_deprecated/helpers/tests_common_func.hpp | 46 +----- .../unit/cnn_network/layer_builder.cpp | 120 ++++++++++++++ .../unit/cnn_network/layer_builder.h | 4 +- 9 files changed, 256 insertions(+), 291 deletions(-) create mode 100644 inference-engine/tests_deprecated/unit/cnn_network/layer_builder.cpp diff --git a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/network_tests/network_test.cpp b/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/network_tests/network_test.cpp index 7cd7e47..288e178 100644 --- a/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/network_tests/network_test.cpp +++ b/inference-engine/tests_deprecated/functional/mkldnn/shared_tests_instance/network_tests/network_test.cpp @@ -23,6 +23,21 @@ TEST_P(ModelTransformationsTest, LPT) {} +static void checkLayerInputPrecision(const ICNNNetwork& network, const std::string& layerName, Precision expectedPrecision, int inputIndex = -1) { + CNNLayerPtr layer = getLayer(network, layerName); + if (layer == nullptr) { + THROW_IE_EXCEPTION << "layer '" << layerName << "' was not found"; + } + for (size_t index = 0ul; index < layer->insData.size(); ++index) { + if ((inputIndex != -1) && (index != inputIndex)) { + continue; + } + + const DataWeakPtr weakData = layer->insData[index]; + ASSERT_EQ(expectedPrecision, weakData.lock()->getPrecision()) << " unexpected precision " << weakData.lock()->getPrecision() << " for layer " << layerName; + } +} + ModelParams getModelParams(const std::string modelName) { std::map modelParams = { { @@ -68,7 +83,7 @@ std::map modelParams = { for (const std::pair item : fakeQuantizeAndConcolutionItems) { TestsCommonFunc::checkLayerOuputPrecision(*usedNetwork, item.first, Precision::U8); if (!item.second.empty()) { - TestsCommonFunc::checkLayerInputPrecision(*usedNetwork, item.second, Precision::U8, 0); + checkLayerInputPrecision(*usedNetwork, item.second, Precision::U8, 0); } } } diff --git a/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/bin_conv_tests.hpp b/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/bin_conv_tests.hpp index 8d3c4af..5c599e7 100644 --- a/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/bin_conv_tests.hpp +++ b/inference-engine/tests_deprecated/functional/shared_tests/single_layer_tests/bin_conv_tests.hpp @@ -90,6 +90,19 @@ class BinaryConvolutionOnlyTest : public TestsCommon, protected: + static void fill_data_bin(float *data, size_t size) { + for (size_t i = 0; i < size; i++) { + data[i] = sinf((float)i) > 0.f ? 1.f : -1.f; + } + } + + static void fill_data_bin_packed(int8_t *data, size_t size) { + int nbits = 8; + for (size_t i = 0; i < div_up(size, nbits); i++) { + data[i] = static_cast(i % 255); + } + } + size_t calculateOutDim(size_t in_dim, size_t kernel, size_t stride, size_t pad_begin) { return (in_dim + 2lu * pad_begin - kernel) / stride + 1lu; } diff --git a/inference-engine/tests_deprecated/functional/shared_tests/transformations/single_layer_transformations_test.cpp b/inference-engine/tests_deprecated/functional/shared_tests/transformations/single_layer_transformations_test.cpp index b701254..5ba4e3f 100644 --- a/inference-engine/tests_deprecated/functional/shared_tests/transformations/single_layer_transformations_test.cpp +++ b/inference-engine/tests_deprecated/functional/shared_tests/transformations/single_layer_transformations_test.cpp @@ -181,6 +181,35 @@ void SingleLayerTransformationsTest::compareInDetails( } } +static void relative_compare( + const float* res, + const float* ref, + size_t size, + float max_diff = 0.01f, + const std::string assertDetails = "", + float zero_diff = 1e-7f) { + for (size_t i = 0lu; i < size; i++) { + if (std::isnan(res[i]) && std::isnan(ref[i])) { + continue; + } + + if ((ref[i] == 0.f) || (res[i] == 0.f)) { + const float diff = fabs(res[i] - ref[i]); + ASSERT_TRUE(diff < zero_diff) << + "\nAbsolute comparison of values ref: " << ref[i] << " and res: " << res[i] << + ", diff: " << diff << + ", index: " << i << "\n" << assertDetails; + } else { + const float diff = fabs((res[i] - ref[i]) / (std::max)(ref[i], res[i])); + ASSERT_LT(diff, max_diff) << + "\nRelative comparison of values ref: " << ref[i] << " and res: " << res[i] << + ", diff: " << diff << + ", max_diff: " << max_diff << + ", index: " << i << "\n" << assertDetails; + } + } +} + void SingleLayerTransformationsTest::SetUp() { try { const SingleLayerTransformationsTestParams p = ::testing::WithParamInterface::GetParam(); diff --git a/inference-engine/tests_deprecated/helpers/single_layer_common.hpp b/inference-engine/tests_deprecated/helpers/single_layer_common.hpp index da39953..b4b421c 100644 --- a/inference-engine/tests_deprecated/helpers/single_layer_common.hpp +++ b/inference-engine/tests_deprecated/helpers/single_layer_common.hpp @@ -31,6 +31,7 @@ #endif #define REPLACE_WITH_NUM(SRC, PATTERN, NUM) REPLACE_WITH_STR(SRC, PATTERN, to_string_c_locale(NUM)) + #define REPLACE_WITH_NUM_VECTOR(SRC, PATTERN, NUMS) \ { std::string result; \ if (NUMS.size() > 0u) { \ @@ -40,6 +41,7 @@ } \ } \ REPLACE_WITH_STR(SRC, PATTERN, result); } + #define REPLACE_WITH_NUM_VECTOR_REVERSE(SRC, PATTERN, NUMS) \ { std::string result; \ auto nums_size = NUMS.size(); \ @@ -50,6 +52,7 @@ } \ } \ REPLACE_WITH_STR(SRC, PATTERN, result); } + #define REMOVE_LINE(SRC, PATTERN) REPLACE_WITH_STR(SRC, PATTERN, "") #define PRETTY_PARAM(name, type) \ @@ -67,14 +70,6 @@ *os << #name ": " << ::testing::PrintToString((name::param_type)(param)); \ } -struct MapStrStr { - std::map data{}; - - explicit MapStrStr(std::map _data) : data(std::move(_data)) {} - - MapStrStr() = default; -}; - template inline InferenceEngine::CNNNetwork buildSingleLayerNetworkCommon(const std::string &layerType, diff --git a/inference-engine/tests_deprecated/helpers/tests_common.cpp b/inference-engine/tests_deprecated/helpers/tests_common.cpp index 8a6188d..6b9bc92 100644 --- a/inference-engine/tests_deprecated/helpers/tests_common.cpp +++ b/inference-engine/tests_deprecated/helpers/tests_common.cpp @@ -72,121 +72,73 @@ void TestsCommon::TearDown() { InferenceEngine::ExecutorManager::getInstance()->clear(); } -IE_SUPPRESS_DEPRECATED_START - -class BaseTestCreator { -protected: - std::string _type; -public: - explicit BaseTestCreator(const std::string& type) : _type(type) {} - virtual ~BaseTestCreator() = default; - - virtual InferenceEngine::CNNLayerPtr create(const std::string& type) = 0; - - virtual bool shouldCreate(const std::string& type) = 0; -}; - -template -class LayerTestCreator : public BaseTestCreator { -public: - explicit LayerTestCreator(const std::string& type) : BaseTestCreator(type) {} - - InferenceEngine::CNNLayerPtr create(const std::string& type) override { - InferenceEngine::LayerParams params; - params.type = type; - return std::make_shared(params); +/** + * @brief Copies a 8-bit RGB image to the blob. + * + * Throws an exception in case of dimensions or input size mismatch + * + * @tparam data_t Type of the target blob + * @param RGB8 8-bit RGB image + * @param RGB8_size Size of the image + * @param blob Target blob to write image to + */ +template +void copyFromRGB8(uint8_t* RGB8, size_t RGB8_size, InferenceEngine::TBlob* blob) { + InferenceEngine::SizeVector dims = blob->getTensorDesc().getDims(); + if (4 != dims.size()) + THROW_IE_EXCEPTION << "Cannot write data to input blob! Blob has incorrect dimensions size " << dims.size(); + size_t num_channels = dims[1]; // because RGB + size_t num_images = dims[0]; + size_t w = dims[3]; + size_t h = dims[2]; + size_t nPixels = w * h; + + if (RGB8_size != w * h * num_channels * num_images) + THROW_IE_EXCEPTION << "input pixels mismatch, expecting " << w * h * num_channels * num_images + << " bytes, got: " << RGB8_size; + + std::vector dataArray; + for (unsigned int n = 0; n < num_images; n++) { + for (unsigned int i = 0; i < num_channels; i++) { + if (!n && !i && dataArray.empty()) { + dataArray.push_back(blob->data()); + } else { + dataArray.push_back(dataArray.at(n * num_channels + i - 1) + nPixels); + } + } } - - bool shouldCreate(const std::string& type) override { - return type == _type; + for (size_t n = 0; n < num_images; n++) { + size_t n_num_channels = n * num_channels; + size_t n_num_channels_nPixels = n_num_channels * nPixels; + for (size_t i = 0; i < nPixels; i++) { + size_t i_num_channels = i * num_channels + n_num_channels_nPixels; + for (size_t j = 0; j < num_channels; j++) { + dataArray.at(n_num_channels + j)[i] = RGB8[i_num_channels + j]; + } + } } -}; - -static std::vector>& getCreators() { - // there should be unique_ptr but it cant be used with initializer lists - static std::vector > creators = { - std::make_shared>("Power"), - std::make_shared>("Convolution"), - std::make_shared>("Deconvolution"), - std::make_shared>("Pooling"), - std::make_shared>("InnerProduct"), - std::make_shared>("FullyConnected"), - std::make_shared>("LRN"), - std::make_shared>("Norm"), - std::make_shared>("Softmax"), - std::make_shared>("LogSoftMax"), - std::make_shared>("GRN"), - std::make_shared>("MVN"), - std::make_shared>("ReLU"), - std::make_shared>("Clamp"), - std::make_shared>("Split"), - std::make_shared>("Slice"), - std::make_shared>("Concat"), - std::make_shared>("Eltwise"), - std::make_shared>("ScaleShift"), - std::make_shared>("PReLU"), - std::make_shared>("Crop"), - std::make_shared>("Reshape"), - std::make_shared>("Tile"), - std::make_shared>("BatchNormalization"), - std::make_shared>("Gemm"), - std::make_shared>("Pad"), - std::make_shared>("Gather"), - std::make_shared>("StridedSlice"), - std::make_shared>("ShuffleChannels"), - std::make_shared>("DepthToSpace"), - std::make_shared>("ReverseSequence"), - std::make_shared>("Abs"), - std::make_shared>("Acos"), - std::make_shared>("Acosh"), - std::make_shared>("Asin"), - std::make_shared>("Asinh"), - std::make_shared>("Atan"), - std::make_shared>("Atanh"), - std::make_shared>("Ceil"), - std::make_shared>("Cos"), - std::make_shared>("Cosh"), - std::make_shared>("Erf"), - std::make_shared>("Floor"), - std::make_shared>("HardSigmoid"), - std::make_shared>("Log"), - std::make_shared>("Exp"), - std::make_shared>("Reciprocal"), - std::make_shared>("Selu"), - std::make_shared>("Sign"), - std::make_shared>("Sin"), - std::make_shared>("Sinh"), - std::make_shared>("Softplus"), - std::make_shared>("Softsign"), - std::make_shared>("Tan"), - std::make_shared>("ReduceAnd"), - std::make_shared>("ReduceL1"), - std::make_shared>("ReduceL2"), - std::make_shared>("ReduceLogSum"), - std::make_shared>("ReduceLogSumExp"), - std::make_shared>("ReduceMax"), - std::make_shared>("ReduceMean"), - std::make_shared>("ReduceMin"), - std::make_shared>("ReduceOr"), - std::make_shared>("ReduceProd"), - std::make_shared>("ReduceSum"), - std::make_shared>("ReduceSumSquare"), - std::make_shared>("TopK"), - std::make_shared>("NonMaxSuppression"), - std::make_shared>("ScatterUpdate"), - std::make_shared>("ScatterElementsUpdate") - }; - return creators; } -InferenceEngine::CNNLayer::Ptr TestsCommon::createLayer(const std::string& type) { - for (auto& creator : getCreators()) { - if (!creator->shouldCreate(type)) - continue; - return creator->create(type); - } - static LayerTestCreator genericCreator(""); - return genericCreator.create(type); +/** + * @brief Splits the RGB channels to either I16 Blob or float blob. + * + * The image buffer is assumed to be packed with no support for strides. + * + * @param imgBufRGB8 Packed 24bit RGB image (3 bytes per pixel: R-G-B) + * @param lengthbytesSize Size in bytes of the RGB image. It is equal to amount of pixels times 3 (number of channels) + * @param input Blob to contain the split image (to 3 channels) + */ +void ConvertImageToInput(unsigned char* imgBufRGB8, size_t lengthbytesSize, InferenceEngine::Blob& input) { + InferenceEngine::TBlob* float_input = dynamic_cast*>(&input); + if (float_input != nullptr) + copyFromRGB8(imgBufRGB8, lengthbytesSize, float_input); + + InferenceEngine::TBlob* short_input = dynamic_cast*>(&input); + if (short_input != nullptr) + copyFromRGB8(imgBufRGB8, lengthbytesSize, short_input); + + InferenceEngine::TBlob* byte_input = dynamic_cast*>(&input); + if (byte_input != nullptr) + copyFromRGB8(imgBufRGB8, lengthbytesSize, byte_input); } -IE_SUPPRESS_DEPRECATED_END diff --git a/inference-engine/tests_deprecated/helpers/tests_common.hpp b/inference-engine/tests_deprecated/helpers/tests_common.hpp index 6711bc2..687788c 100644 --- a/inference-engine/tests_deprecated/helpers/tests_common.hpp +++ b/inference-engine/tests_deprecated/helpers/tests_common.hpp @@ -40,16 +40,13 @@ inline std::string to_string_c_locale(T value) { } class TestsCommon : public ::testing::Test { -public: - IE_SUPPRESS_DEPRECATED_START - - static InferenceEngine::CNNLayer::Ptr createLayer(const std::string &type); - - IE_SUPPRESS_DEPRECATED_END - protected: void SetUp() override; + static std::string make_so_name(const std::string & input) { + return CommonTestUtils::pre + input + IE_BUILD_POSTFIX + CommonTestUtils::ext; + } + void TearDown() override; public: @@ -57,10 +54,6 @@ public: return make_plugin_name("mock_engine"); } - static std::string make_so_name(const std::string & input) { - return CommonTestUtils::pre + input + IE_BUILD_POSTFIX + CommonTestUtils::ext; - } - static std::string make_plugin_name(const std::string & input) { return make_so_name(input); } @@ -79,25 +72,6 @@ public: } } - static void fill_data_non_zero(int32_t *data, size_t size, int n) { - for (size_t i = 0; i < size; i++) { - data[i] = n*i%254+1; - } - } - - static void fill_data_bin(float *data, size_t size) { - for (size_t i = 0; i < size; i++) { - data[i] = sinf((float)i) > 0.f ? 1.f : -1.f; - } - } - - static void fill_data_bin_packed(int8_t *data, size_t size) { - int nbits = 8; - for (size_t i = 0; i < div_up(size, nbits); i++) { - data[i] = static_cast(i % 255); - } - } - static void fill_data_dbgval(float *data, size_t size, float alpha = 1.0f) { for (size_t i = 0; i < size; i++) { data[i] = i * alpha; @@ -141,35 +115,6 @@ public: } } - static void relative_compare( - const float* res, - const float* ref, - size_t size, - float max_diff = 0.01f, - const std::string assertDetails = "", - float zero_diff = 1e-7f) { - for (size_t i = 0lu; i < size; i++) { - if (std::isnan(res[i]) && std::isnan(ref[i])) { - continue; - } - - if ((ref[i] == 0.f) || (res[i] == 0.f)) { - const float diff = fabs(res[i] - ref[i]); - ASSERT_TRUE(diff < zero_diff) << - "\nAbsolute comparison of values ref: " << ref[i] << " and res: " << res[i] << - ", diff: " << diff << - ", index: " << i << "\n" << assertDetails; - } else { - const float diff = fabs((res[i] - ref[i]) / (std::max)(ref[i], res[i])); - ASSERT_LT(diff, max_diff) << - "\nRelative comparison of values ref: " << ref[i] << " and res: " << res[i] << - ", diff: " << diff << - ", max_diff: " << max_diff << - ", index: " << i << "\n" << assertDetails; - } - } - } - void replace(std::string& str, const std::string& from, const std::string& to) { std::string::size_type pos = 0; @@ -221,13 +166,6 @@ public: # error Unsupported architecture #endif - -template -std::shared_ptr> to_tblob(const std::shared_ptr &obj) -{ - return std::dynamic_pointer_cast>(obj); -} - inline InferenceEngine::InputInfo::Ptr getFirstInput(InferenceEngine::ICNNNetwork *pNet) { InferenceEngine::InputsDataMap inputs; @@ -237,53 +175,6 @@ inline InferenceEngine::InputInfo::Ptr getFirstInput(InferenceEngine::ICNNNetwor } /** - * @brief Copies a 8-bit RGB image to the blob. - * - * Throws an exception in case of dimensions or input size mismatch - * - * @tparam data_t Type of the target blob - * @param RGB8 8-bit RGB image - * @param RGB8_size Size of the image - * @param blob Target blob to write image to - */ -template -void copyFromRGB8(uint8_t* RGB8, size_t RGB8_size, InferenceEngine::TBlob* blob) { - InferenceEngine::SizeVector dims = blob->getTensorDesc().getDims(); - if (4 != dims.size()) - THROW_IE_EXCEPTION << "Cannot write data to input blob! Blob has incorrect dimensions size " << dims.size(); - size_t num_channels = dims[1]; // because RGB - size_t num_images = dims[0]; - size_t w = dims[3]; - size_t h = dims[2]; - size_t nPixels = w * h; - - if (RGB8_size != w * h * num_channels * num_images) - THROW_IE_EXCEPTION << "input pixels mismatch, expecting " << w * h * num_channels * num_images - << " bytes, got: " << RGB8_size; - - std::vector dataArray; - for (unsigned int n = 0; n < num_images; n++) { - for (unsigned int i = 0; i < num_channels; i++) { - if (!n && !i && dataArray.empty()) { - dataArray.push_back(blob->data()); - } else { - dataArray.push_back(dataArray.at(n * num_channels + i - 1) + nPixels); - } - } - } - for (size_t n = 0; n < num_images; n++) { - size_t n_num_channels = n * num_channels; - size_t n_num_channels_nPixels = n_num_channels * nPixels; - for (size_t i = 0; i < nPixels; i++) { - size_t i_num_channels = i * num_channels + n_num_channels_nPixels; - for (size_t j = 0; j < num_channels; j++) { - dataArray.at(n_num_channels + j)[i] = RGB8[i_num_channels + j]; - } - } - } -} - -/** * @brief Splits the RGB channels to either I16 Blob or float blob. * * The image buffer is assumed to be packed with no support for strides. @@ -292,16 +183,4 @@ void copyFromRGB8(uint8_t* RGB8, size_t RGB8_size, InferenceEngine::TBlob* float_input = dynamic_cast*>(&input); - if (float_input != nullptr) - copyFromRGB8(imgBufRGB8, lengthbytesSize, float_input); - - InferenceEngine::TBlob* short_input = dynamic_cast*>(&input); - if (short_input != nullptr) - copyFromRGB8(imgBufRGB8, lengthbytesSize, short_input); - - InferenceEngine::TBlob* byte_input = dynamic_cast*>(&input); - if (byte_input != nullptr) - copyFromRGB8(imgBufRGB8, lengthbytesSize, byte_input); -} \ No newline at end of file +void ConvertImageToInput(unsigned char* imgBufRGB8, size_t lengthbytesSize, InferenceEngine::Blob& input); diff --git a/inference-engine/tests_deprecated/helpers/tests_common_func.hpp b/inference-engine/tests_deprecated/helpers/tests_common_func.hpp index d0ae895..d6edceb 100644 --- a/inference-engine/tests_deprecated/helpers/tests_common_func.hpp +++ b/inference-engine/tests_deprecated/helpers/tests_common_func.hpp @@ -20,10 +20,6 @@ using namespace InferenceEngine; IE_SUPPRESS_DEPRECATED_START class TestsCommonFunc { -public: - - InferenceEngine::Blob::Ptr readInput(std::string path, int batch = 1); - static CNNLayerPtr getLayer(const ICNNNetwork& network, const std::string& layerName) { std::vector layers = InferenceEngine::details::CNNNetSortTopologically(network); for (CNNLayerPtr layer : layers) { @@ -34,6 +30,9 @@ public: return nullptr; } +public: + + InferenceEngine::Blob::Ptr readInput(std::string path, int batch = 1); static void checkLayerOuputPrecision( const ICNNNetwork& network, @@ -65,21 +64,6 @@ public: } } - static void checkLayerInputPrecision(const ICNNNetwork& network, const std::string& layerName, Precision expectedPrecision, int inputIndex = -1) { - CNNLayerPtr layer = getLayer(network, layerName); - if (layer == nullptr) { - THROW_IE_EXCEPTION << "layer '" << layerName << "' was not found"; - } - for (size_t index = 0ul; index < layer->insData.size(); ++index) { - if ((inputIndex != -1) && (index != inputIndex)) { - continue; - } - - const DataWeakPtr weakData = layer->insData[index]; - ASSERT_EQ(expectedPrecision, weakData.lock()->getPrecision()) << " unexpected precision " << weakData.lock()->getPrecision() << " for layer " << layerName; - } - } - static void checkLayerOuputPrecision(const ICNNNetwork& network, const std::string& layerName, std::vector expectedPrecisions) { CNNLayerPtr layer = getLayer(network, layerName); if (layer == nullptr) { @@ -94,30 +78,6 @@ public: } } - static bool hasBlobEqualsValues(Blob& blob) { - const float* buffer = blob.buffer().as(); - for (int i = 0; i < (blob.size() - 1); ++i) { - if (buffer[i] != buffer[i + 1]) { - return false; - } - } - return true; - } - - static bool checkScalesAndShifts(const CNNLayer& scaleShift, const bool equals) { - const Blob::Ptr scalesBlob = InferenceEngine::details::CNNNetworkHelper::getBlob(std::make_shared(scaleShift), "weights"); - if (equals != hasBlobEqualsValues(*scalesBlob)) { - return false; - } - - const Blob::Ptr shiftsBlob = InferenceEngine::details::CNNNetworkHelper::getBlob(std::make_shared(scaleShift), "biases"); - if (equals != hasBlobEqualsValues(*shiftsBlob)) { - return false; - } - - return true; - } - bool compareTop( InferenceEngine::Blob& blob, std::vector> &ref_top, diff --git a/inference-engine/tests_deprecated/unit/cnn_network/layer_builder.cpp b/inference-engine/tests_deprecated/unit/cnn_network/layer_builder.cpp new file mode 100644 index 0000000..f942763 --- /dev/null +++ b/inference-engine/tests_deprecated/unit/cnn_network/layer_builder.cpp @@ -0,0 +1,120 @@ +// Copyright (C) 2018-2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "layer_builder.h" + +class BaseTestCreator { +protected: + std::string _type; +public: + explicit BaseTestCreator(const std::string& type) : _type(type) {} + virtual ~BaseTestCreator() = default; + + virtual InferenceEngine::CNNLayerPtr create(const std::string& type) = 0; + + virtual bool shouldCreate(const std::string& type) = 0; +}; + +template +class LayerTestCreator : public BaseTestCreator { +public: + explicit LayerTestCreator(const std::string& type) : BaseTestCreator(type) {} + + InferenceEngine::CNNLayerPtr create(const std::string& type) override { + InferenceEngine::LayerParams params; + params.type = type; + return std::make_shared(params); + } + + bool shouldCreate(const std::string& type) override { + return type == _type; + } +}; + +static std::vector>& getCreators() { + // there should be unique_ptr but it cant be used with initializer lists + static std::vector > creators = { + std::make_shared>("Power"), + std::make_shared>("Convolution"), + std::make_shared>("Deconvolution"), + std::make_shared>("Pooling"), + std::make_shared>("InnerProduct"), + std::make_shared>("FullyConnected"), + std::make_shared>("LRN"), + std::make_shared>("Norm"), + std::make_shared>("Softmax"), + std::make_shared>("LogSoftMax"), + std::make_shared>("GRN"), + std::make_shared>("MVN"), + std::make_shared>("ReLU"), + std::make_shared>("Clamp"), + std::make_shared>("Split"), + std::make_shared>("Slice"), + std::make_shared>("Concat"), + std::make_shared>("Eltwise"), + std::make_shared>("ScaleShift"), + std::make_shared>("PReLU"), + std::make_shared>("Crop"), + std::make_shared>("Reshape"), + std::make_shared>("Tile"), + std::make_shared>("BatchNormalization"), + std::make_shared>("Gemm"), + std::make_shared>("Pad"), + std::make_shared>("Gather"), + std::make_shared>("StridedSlice"), + std::make_shared>("ShuffleChannels"), + std::make_shared>("DepthToSpace"), + std::make_shared>("ReverseSequence"), + std::make_shared>("Abs"), + std::make_shared>("Acos"), + std::make_shared>("Acosh"), + std::make_shared>("Asin"), + std::make_shared>("Asinh"), + std::make_shared>("Atan"), + std::make_shared>("Atanh"), + std::make_shared>("Ceil"), + std::make_shared>("Cos"), + std::make_shared>("Cosh"), + std::make_shared>("Erf"), + std::make_shared>("Floor"), + std::make_shared>("HardSigmoid"), + std::make_shared>("Log"), + std::make_shared>("Exp"), + std::make_shared>("Reciprocal"), + std::make_shared>("Selu"), + std::make_shared>("Sign"), + std::make_shared>("Sin"), + std::make_shared>("Sinh"), + std::make_shared>("Softplus"), + std::make_shared>("Softsign"), + std::make_shared>("Tan"), + std::make_shared>("ReduceAnd"), + std::make_shared>("ReduceL1"), + std::make_shared>("ReduceL2"), + std::make_shared>("ReduceLogSum"), + std::make_shared>("ReduceLogSumExp"), + std::make_shared>("ReduceMax"), + std::make_shared>("ReduceMean"), + std::make_shared>("ReduceMin"), + std::make_shared>("ReduceOr"), + std::make_shared>("ReduceProd"), + std::make_shared>("ReduceSum"), + std::make_shared>("ReduceSumSquare"), + std::make_shared>("TopK"), + std::make_shared>("NonMaxSuppression"), + std::make_shared>("ScatterUpdate"), + std::make_shared>("ScatterElementsUpdate") + }; + return creators; +} + +InferenceEngine::CNNLayer::Ptr CNNLayerValidationTests::createLayer(const std::string& type) { + for (auto& creator : getCreators()) { + if (!creator->shouldCreate(type)) + continue; + return creator->create(type); + } + static LayerTestCreator genericCreator(""); + return genericCreator.create(type); +} diff --git a/inference-engine/tests_deprecated/unit/cnn_network/layer_builder.h b/inference-engine/tests_deprecated/unit/cnn_network/layer_builder.h index 699addd..8476d6a 100644 --- a/inference-engine/tests_deprecated/unit/cnn_network/layer_builder.h +++ b/inference-engine/tests_deprecated/unit/cnn_network/layer_builder.h @@ -92,13 +92,15 @@ public: class CNNLayerValidationTests : public testing::TestWithParam{ public: + static InferenceEngine::CNNLayer::Ptr createLayer(const std::string &type); + void SetUp() override { auto params = GetParam(); type = params; } std::shared_ptr& createConcreteLayer(const std::string& type) { - layer = std::make_shared(TestsCommon::createLayer(type)); + layer = std::make_shared(createLayer(type)); return layer; } -- 2.7.4