[IE TESTS] Move old IE Unit tests to the new infra (#605)
authorIrina Efode <irina.efode@intel.com>
Wed, 27 May 2020 08:53:00 +0000 (11:53 +0300)
committerGitHub <noreply@github.com>
Wed, 27 May 2020 08:53:00 +0000 (11:53 +0300)
inference-engine/tests/functional/inference_engine/CMakeLists.txt
inference-engine/tests/functional/inference_engine/cnn_network/cnn_ngraph_impl_tests.cpp [moved from inference-engine/tests_deprecated/unit/inference_engine_tests/cnn_ngraph_impl_tests.cpp with 95% similarity]
inference-engine/tests/functional/inference_engine/cnn_network/convert_ngraph_to_cnn_network_tests.cpp [moved from inference-engine/tests_deprecated/unit/inference_engine_tests/convert_ngraph_to_cnn_network_tests.cpp with 92% similarity]
inference-engine/tests/functional/inference_engine/local_test.cpp [moved from inference-engine/tests_deprecated/unit/inference_engine_tests/local_test.cpp with 86% similarity]
inference-engine/tests/unit/inference_engine/ie_blob_proxy_test.cpp [moved from inference-engine/tests/unit/inference_engine/ie_blob_proxy.cpp with 100% similarity]
inference-engine/tests/unit/inference_engine/ie_locked_memory_test.cpp [new file with mode: 0644]
inference-engine/tests_deprecated/unit/cnn_network/cnn_layer_validation_tests.cpp [deleted file]
inference-engine/tests_deprecated/unit/cnn_network/parser_tests_base.hpp
inference-engine/tests_deprecated/unit/inference_engine_tests/locked_memory_test.cpp [deleted file]

index b810498..6612d95 100644 (file)
@@ -10,6 +10,9 @@ add_subdirectory(extension_lib)
 addIeTargetTest(
         NAME ${TARGET_NAME}
         ROOT ${CMAKE_CURRENT_SOURCE_DIR}
+        INCLUDES
+            # TODO: remove after removing `cnn_network_ngraph_imp.hpp`
+            ${IE_MAIN_SOURCE_DIR}/src/inference_engine
         EXCLUDED_SOURCE_DIRS
             ${CMAKE_CURRENT_SOURCE_DIR}/extension_lib
         LINK_LIBRARIES
@@ -4,8 +4,7 @@
 
 #include <gtest/gtest.h>
 
-#include <cnn_network_ngraph_impl.hpp>
-#include "tests_common.hpp"
+#include <cnn_network_impl.hpp>
 #include <string>
 #include <sstream>
 #include <fstream>
 
 #include "common_test_utils/file_utils.hpp"
 #include "transformations/rt_info/primitives_priority_attribute.hpp"
+#include "cnn_network_ngraph_impl.hpp"
 
 using namespace testing;
 using namespace InferenceEngine;
 
-class CNNNGraphImplTests : public TestsCommon {};
+IE_SUPPRESS_DEPRECATED_START
 
-TEST_F(CNNNGraphImplTests, TestConvertNetwork) {
+TEST(CNNNGraphImplTests, TestConvertNetwork) {
     std::shared_ptr<ngraph::Function> ngraph;
     {
         ngraph::PartialShape shape({1, 3, 22, 22});
@@ -56,7 +56,7 @@ TEST_F(CNNNGraphImplTests, TestConvertNetwork) {
     ASSERT_EQ(cnnRefNet, cnnNet.getCNNNetwork());
 }
 
-TEST_F(CNNNGraphImplTests, TestResultWithNotEqualName) {
+TEST(CNNNGraphImplTests, TestResultWithNotEqualName) {
     std::shared_ptr<ngraph::Function> ngraph;
     {
         ngraph::PartialShape shape({1, 3, 22, 22});
@@ -77,7 +77,7 @@ TEST_F(CNNNGraphImplTests, TestResultWithNotEqualName) {
     ASSERT_NO_THROW(cnnNet.getCNNNetwork());
 }
 
-TEST_F(CNNNGraphImplTests, TestGetOutputAfterConvertNetwork) {
+TEST(CNNNGraphImplTests, TestGetOutputAfterConvertNetwork) {
     const std::string testLayerName = "testReLU";
     std::shared_ptr<ngraph::Function> ngraph;
     {
@@ -104,7 +104,7 @@ TEST_F(CNNNGraphImplTests, TestGetOutputAfterConvertNetwork) {
     ASSERT_EQ(2, outs.size());
 }
 
-TEST_F(CNNNGraphImplTests, TestSetCurrentBatch) {
+TEST(CNNNGraphImplTests, TestSetCurrentBatch) {
     std::shared_ptr<ngraph::Function> ngraph;
     {
         ngraph::PartialShape shape({1, 3, 22, 22});
@@ -126,7 +126,7 @@ TEST_F(CNNNGraphImplTests, TestSetCurrentBatch) {
     ASSERT_NE(nullptr, cnnNet.getFunction());
 }
 
-TEST_F(CNNNGraphImplTests, TestSetBatch) {
+TEST(CNNNGraphImplTests, TestSetBatch) {
     std::shared_ptr<ngraph::Function> ngraph;
     {
         ngraph::PartialShape shape({1, 3, 22, 22});
@@ -154,7 +154,7 @@ TEST_F(CNNNGraphImplTests, TestSetBatch) {
     ASSERT_EQ(2, cnnNet.getCNNNetwork()->getBatchSize());
 }
 
-TEST_F(CNNNGraphImplTests, TestSaveAffinity) {
+TEST(CNNNGraphImplTests, TestSaveAffinity) {
     const std::string testAffinity = "testAffinity";
     std::shared_ptr<ngraph::Function> ngraph;
     {
@@ -179,7 +179,7 @@ TEST_F(CNNNGraphImplTests, TestSaveAffinity) {
     ASSERT_EQ(cnnLayer->affinity, testAffinity);
 }
 
-TEST_F(CNNNGraphImplTests, TestAddOutput) {
+TEST(CNNNGraphImplTests, TestAddOutput) {
     const std::string testLayerName = "testReLU";
     std::shared_ptr<ngraph::Function> ngraph;
     {
@@ -211,7 +211,7 @@ TEST_F(CNNNGraphImplTests, TestAddOutput) {
     ASSERT_TRUE(outputs.find(testLayerName) != outputs.end());
 }
 
-TEST_F(CNNNGraphImplTests, TestAddOutputFromConvertedNetwork) {
+TEST(CNNNGraphImplTests, TestAddOutputFromConvertedNetwork) {
     const std::string testLayerName = "testReLU";
     std::shared_ptr<ngraph::Function> ngraph;
     {
@@ -244,7 +244,7 @@ TEST_F(CNNNGraphImplTests, TestAddOutputFromConvertedNetwork) {
     ASSERT_TRUE(outputs.find(testLayerName) != outputs.end());
 }
 
-TEST_F(CNNNGraphImplTests, ConstantAsInternalAndExternalLayer) {
+TEST(CNNNGraphImplTests, ConstantAsInternalAndExternalLayer) {
     std::shared_ptr<ngraph::Function> ngraph;
     {
         ngraph::PartialShape shape({1, 3, 22, 22});
@@ -266,7 +266,7 @@ TEST_F(CNNNGraphImplTests, ConstantAsInternalAndExternalLayer) {
     ASSERT_EQ(4, cnnNet.layerCount());
 }
 
-TEST_F(CNNNGraphImplTests, SaveInputInfoAfterConversion) {
+TEST(CNNNGraphImplTests, SaveInputInfoAfterConversion) {
     std::string name = "param";
     std::shared_ptr<ngraph::Function> ngraph;
     {
@@ -297,7 +297,7 @@ TEST_F(CNNNGraphImplTests, SaveInputInfoAfterConversion) {
     ASSERT_EQ(inputInfo->getPreProcess().getResizeAlgorithm(), ResizeAlgorithm::RESIZE_AREA);
 }
 
-TEST_F(CNNNGraphImplTests, SaveAttributesAfterConversion) {
+TEST(CNNNGraphImplTests, SaveAttributesAfterConversion) {
     std::string name = "prelu";
     std::shared_ptr<ngraph::Function> ngraph;
     {
@@ -330,7 +330,7 @@ TEST_F(CNNNGraphImplTests, SaveAttributesAfterConversion) {
     ASSERT_EQ(layer->params["test"], "2");
 }
 
-TEST_F(CNNNGraphImplTests, SavePrimitivesPriority) {
+TEST(CNNNGraphImplTests, SavePrimitivesPriority) {
     std::string model = R"V0G0N(
 <net name="Activation" version="10">
     <layers>
@@ -391,7 +391,7 @@ TEST_F(CNNNGraphImplTests, SavePrimitivesPriority) {
         ASSERT_EQ("cpu:avx2", cnnLayer->params["PrimitivesPriority"]);
 }
 
-TEST_F(CNNNGraphImplTests, ReadFromCNNNetReader) {
+TEST(CNNNGraphImplTests, ReadFromCNNNetReader) {
     std::string model = R"V0G0N(
 <net name="Activation" version="10">
     <layers>
@@ -447,7 +447,7 @@ TEST_F(CNNNGraphImplTests, ReadFromCNNNetReader) {
     ASSERT_EQ(2, network.layerCount());
 }
 
-TEST_F(CNNNGraphImplTests, CanChangeInputPrecision) {
+TEST(CNNNGraphImplTests, CanChangeInputPrecision) {
     std::shared_ptr<ngraph::Function> ngraph;
     {
         ngraph::PartialShape shape({1, 3, 16, 16});
@@ -493,7 +493,7 @@ TEST_F(CNNNGraphImplTests, CanChangeInputPrecision) {
     }
 }
 
-TEST_F(CNNNGraphImplTests, CanChangeInputLayout) {
+TEST(CNNNGraphImplTests, CanChangeInputLayout) {
     std::shared_ptr<ngraph::Function> ngraph;
     {
         ngraph::PartialShape shape({1, 3, 16, 16});
@@ -539,7 +539,7 @@ TEST_F(CNNNGraphImplTests, CanChangeInputLayout) {
     }
 }
 
-TEST_F(CNNNGraphImplTests, CanChangeOutputPrecision) {
+TEST(CNNNGraphImplTests, CanChangeOutputPrecision) {
     std::shared_ptr<ngraph::Function> ngraph;
     {
         ngraph::PartialShape shape({1, 3, 16, 16});
@@ -585,7 +585,7 @@ TEST_F(CNNNGraphImplTests, CanChangeOutputPrecision) {
     }
 }
 
-TEST_F(CNNNGraphImplTests, CanChangeOutputLayout) {
+TEST(CNNNGraphImplTests, CanChangeOutputLayout) {
     std::shared_ptr<ngraph::Function> ngraph;
     {
         ngraph::PartialShape shape({1, 3, 16, 16});
@@ -631,7 +631,7 @@ TEST_F(CNNNGraphImplTests, CanChangeOutputLayout) {
     }
 }
 
-TEST_F(CNNNGraphImplTests, TestCheckStats) {
+TEST(CNNNGraphImplTests, TestCheckStats) {
     std::shared_ptr<ngraph::Function> ngraph;
     {
         ngraph::PartialShape shape({1, 3, 22, 22});
@@ -651,3 +651,5 @@ TEST_F(CNNNGraphImplTests, TestCheckStats) {
     ASSERT_EQ(NOT_FOUND, cnnNet.getStats(&_stats, nullptr));
     ASSERT_EQ(nullptr, _stats);
 }
+
+IE_SUPPRESS_DEPRECATED_END
@@ -4,8 +4,6 @@
 
 #include <gtest/gtest.h>
 
-#include "tests_common.hpp"
-
 #include <convert_function_to_cnn_network.hpp>
 #include <cpp/ie_cnn_network.h>
 
@@ -17,9 +15,7 @@
 using namespace testing;
 using namespace InferenceEngine;
 
-using ConvertFunctionToCNNNetworkTests = TestsCommon;
-
-TEST_F(ConvertFunctionToCNNNetworkTests, ConvertPReLUNetwork) {
+TEST(ConvertFunctionToCNNNetworkTests, ConvertPReLUNetwork) {
     std::shared_ptr<ngraph::Function> f;
     {
         auto param1 = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::f32, ngraph::Shape{2, 2});
@@ -34,9 +30,9 @@ TEST_F(ConvertFunctionToCNNNetworkTests, ConvertPReLUNetwork) {
     }
 
     InferenceEngine::CNNNetwork nGraphImpl(f);
+    ASSERT_ANY_THROW(InferenceEngine::details::convertFunctionToICNNNetwork(f, nGraphImpl));
     try {
         auto net = InferenceEngine::details::convertFunctionToICNNNetwork(f, nGraphImpl);
-        FAIL();
     } catch (InferenceEngine::details::InferenceEngineException &err) {
         const std::string ref_msg = "Error of validate layer: prelu with type: PReLU. Number of inputs (2) is not equal to expected ones: 1";
         const std::string resp_msg = err.what();
@@ -44,7 +40,7 @@ TEST_F(ConvertFunctionToCNNNetworkTests, ConvertPReLUNetwork) {
     }
 }
 
-TEST_F(ConvertFunctionToCNNNetworkTests, ConvertConvolutionNetwork) {
+TEST(ConvertFunctionToCNNNetworkTests, ConvertConvolutionNetwork) {
     std::shared_ptr<ngraph::Function> f;
     {
         auto param1 = std::make_shared<ngraph::opset1::Parameter>(ngraph::element::f32, ngraph::Shape{1, 3, 64, 64});
@@ -3,7 +3,6 @@
 //
 
 #include <gtest/gtest.h>
-#include <single_layer_common.hpp>
 
 #include <ie_core.hpp>
 #include <net_pass.h>
@@ -20,10 +19,10 @@ class LocaleTests : public ::testing::Test {
         <layer name="data" type="Input" precision="FP32" id="0">
             <output>
                 <port id="0">
-                    <dim>_IN_</dim>
-                    <dim>_IC_</dim>
-                    <dim>_IH_</dim>
-                    <dim>_IW_</dim>
+                    <dim>2</dim>
+                    <dim>3</dim>
+                    <dim>5</dim>
+                    <dim>5</dim>
                 </port>
             </output>
         </layer>
@@ -34,18 +33,18 @@ class LocaleTests : public ::testing::Test {
 
             <input>
                 <port id="1">
-                    <dim>_IN_</dim>
-                    <dim>_IC_</dim>
-                    <dim>_IH_</dim>
-                    <dim>_IW_</dim>
+                    <dim>2</dim>
+                    <dim>3</dim>
+                    <dim>5</dim>
+                    <dim>5</dim>
                 </port>
             </input>
             <output>
                 <port id="2">
-                    <dim>_IN_</dim>
-                    <dim>_IC_</dim>
-                    <dim>_IH_</dim>
-                    <dim>_IW_</dim>
+                    <dim>2</dim>
+                    <dim>3</dim>
+                    <dim>5</dim>
+                    <dim>5</dim>
                 </port>
             </output>
         </layer>
@@ -53,24 +52,24 @@ class LocaleTests : public ::testing::Test {
             <data coeff="0.77,0.33"/>
             <input>
                 <port id="1">
-                    <dim>_IN_</dim>
-                    <dim>_IC_</dim>
-                    <dim>_IH_</dim>
-                    <dim>_IW_</dim>
+                    <dim>2</dim>
+                    <dim>3</dim>
+                    <dim>5</dim>
+                    <dim>5</dim>
                 </port>
                 <port id="2">
-                    <dim>_IN_</dim>
-                    <dim>_IC_</dim>
-                    <dim>_IH_</dim>
-                    <dim>_IW_</dim>
+                    <dim>2</dim>
+                    <dim>3</dim>
+                    <dim>5</dim>
+                    <dim>5</dim>
                 </port>
             </input>
             <output>
                 <port id="3">
-                    <dim>_IN_</dim>
-                    <dim>_IC_</dim>
-                    <dim>_IH_</dim>
-                    <dim>_IW_</dim>
+                    <dim>2</dim>
+                    <dim>3</dim>
+                    <dim>5</dim>
+                    <dim>5</dim>
                 </port>
             </output>
         </layer>
@@ -98,7 +97,6 @@ class LocaleTests : public ::testing::Test {
 </net>
 )V0G0N";
 
-
     std::string _model_LSTM = R"V0G0N(
  <net batch="1" name="model" version="2">
     <layers>
@@ -196,7 +194,6 @@ class LocaleTests : public ::testing::Test {
 )V0G0N";
 
 protected:
-
     void SetUp() override {
         originalLocale  = setlocale(LC_ALL, nullptr);
     }
@@ -204,25 +201,15 @@ protected:
         setlocale(LC_ALL, originalLocale.c_str());
     }
 
-    std::string getModel() const {
-        std::string model = _model;
-
-        REPLACE_WITH_NUM(model, "_IN_", 2);
-        REPLACE_WITH_NUM(model, "_IC_", 3);
-        REPLACE_WITH_NUM(model, "_IH_", 4);
-        REPLACE_WITH_NUM(model, "_IW_", 5);
-
-        return model;
-    }
-
     void testBody(bool isLSTM = false) const {
         InferenceEngine::Core core;
 
         // This model contains layers with float attributes.
         // Conversion from string may be affected by locale.
-        std::string model = isLSTM ? _model_LSTM : getModel();
+        std::string model = isLSTM ? _model_LSTM : _model;
         auto net = core.ReadNetwork(model, InferenceEngine::Blob::CPtr());
 
+        IE_SUPPRESS_DEPRECATED_START
         if (!isLSTM) {
             auto power_layer = dynamic_pointer_cast<PowerLayer>(net.getLayerByName("power"));
             ASSERT_EQ(power_layer->scale, 0.75f);
@@ -250,6 +237,7 @@ protected:
             ASSERT_EQ(lstmcell_layer->GetParamAsFloat("min"), -ref_coeff);
             ASSERT_EQ(lstmcell_layer->GetParamAsFloat("max"),  ref_coeff);
         }
+        IE_SUPPRESS_DEPRECATED_END
     }
 };
 
diff --git a/inference-engine/tests/unit/inference_engine/ie_locked_memory_test.cpp b/inference-engine/tests/unit/inference_engine/ie_locked_memory_test.cpp
new file mode 100644 (file)
index 0000000..c4c44da
--- /dev/null
@@ -0,0 +1,52 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <ie_locked_memory.hpp>
+#include "unit_test_utils/mocks/mock_allocator.hpp"
+
+using namespace InferenceEngine;
+using namespace ::testing;
+
+TEST(LockedMemoryTest, canUnlockMemoryAfterUsage) {
+    std::unique_ptr<MockAllocator> allocator(new MockAllocator());
+    char array[] = {1, 2, 3};
+
+    EXPECT_CALL(*allocator.get(), lock(reinterpret_cast<void*>(1), _)).WillRepeatedly(Return(reinterpret_cast<void*>(array)));
+    EXPECT_CALL(*allocator.get(), unlock(_)).Times(1);
+    {
+        auto x = LockedMemory<char>(allocator.get(), reinterpret_cast<void*>(1), 1);
+        //force locking of memory
+        auto t = x[0];
+        (void)t;
+    }
+}
+
+TEST(LockedMemoryTest, canReadFromLockedMemory) {
+    std::unique_ptr<MockAllocator> allocator(new MockAllocator());
+    char array[] = {1, 2, 3, 4, 5};
+
+    EXPECT_CALL(*allocator.get(), lock(reinterpret_cast<void*>(1), _)).WillRepeatedly(Return(reinterpret_cast<void*>(array)));
+    EXPECT_CALL(*allocator.get(), unlock(_)).Times(1);
+    {
+        auto x = LockedMemory<char>(allocator.get(), reinterpret_cast<void*>(1), 0);
+        //we are getting first element
+        ASSERT_EQ(1, x[0]);
+    }
+}
+
+TEST(LockedMemoryTest, canWriteToLockedMemory) {
+    std::unique_ptr<MockAllocator> allocator(new MockAllocator());
+    char array[] = {1, 2, 3, 4, 5};
+
+    EXPECT_CALL(*allocator.get(), lock(reinterpret_cast<void*>(1), _)).WillRepeatedly(Return(reinterpret_cast<void*>(array)));
+    EXPECT_CALL(*allocator.get(), unlock(_)).Times(1);
+    {
+        auto x = LockedMemory<char>(allocator.get(), reinterpret_cast<void*>(1), 0);
+
+        //we are getting first element
+        ASSERT_EQ(std::distance(array, &x[0]), 0);
+        x[0] = 5;
+    }
+    EXPECT_EQ(array[0], 5);
+}
diff --git a/inference-engine/tests_deprecated/unit/cnn_network/cnn_layer_validation_tests.cpp b/inference-engine/tests_deprecated/unit/cnn_network/cnn_layer_validation_tests.cpp
deleted file mode 100644 (file)
index df6d134..0000000
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <gtest/gtest.h>
-#include <ie_layer_validators.hpp>
-#include <memory>
-#include <ie_data.h>
-
-#include "layer_builder.h"
-#include "shapes.h"
-using namespace InferenceEngine;
-using namespace InferenceEngine::details;
-
-TEST_P(CNNLayerValidationTests, checkValidParams) {
-
-    assertThat(type)->setParams(valid_params);
-    auto layer = getLayer();
-    LayerValidator::Ptr validator = LayerValidators::getInstance()->getValidator(type);
-
-    ASSERT_NO_THROW(validator->parseParams(layer.get()));
-    ASSERT_NO_THROW(validator->checkParams(layer.get()));
-}
-
-TEST_P(CNNLayerValidationTests, checkInvalidParams) {
-
-    assertThat(type);
-    int numberOfParams = getNumOfParams();
-    LayerValidator::Ptr validator = LayerValidators::getInstance()->getValidator(type);
-    auto layer_ = getLayer();
-    for (int i = 0; i < numberOfParams; ++i) {
-        layer->setParams(!valid_params);
-        ASSERT_THROW(validator->parseParams(layer_.get()), InferenceEngineException);
-        ASSERT_THROW(validator->checkParams(layer_.get()), InferenceEngineException);
-    }
-}
-
-TEST_P(CNNLayerValidationTests, checkInvalidInputShapes) {
-    LayerValidator::Ptr validator = LayerValidators::getInstance()->getValidator(type);
-    std::vector<DataPtr> spData;
-    assertThat(type)->setShapes(spData, !valid_input);
-
-    auto layer_ = getLayer();
-    InOutDims shapes;
-    InferenceEngine::details::getInOutShapes(layer_.get(), shapes);
-    ASSERT_THROW(validator->checkShapes(layer_.get(), shapes.inDims), InferenceEngineException);
-}
-
-TEST_P(CNNLayerValidationTests, checkValidShapes) {
-
-    std::vector<DataPtr> spData;
-    assertThat(type)->setShapes(spData, valid_input);
-    auto layer = getLayer();
-    LayerValidator::Ptr validator = LayerValidators::getInstance()->getValidator(type);
-    InOutDims shapes;
-    InferenceEngine::details::getInOutShapes(layer.get(), shapes);
-    ASSERT_NO_THROW(validator->checkShapes(layer.get(), shapes.inDims));
-}
-
-INSTANTIATE_TEST_CASE_P(
-        InstantiationName, CNNLayerValidationTests,
-        ::testing::Values(
-                "Convolution"
-                ,"Deconvolution"
-                ,"DetectionOutput"
-        )
-);
index 1190865..f85be47 100644 (file)
@@ -9,11 +9,11 @@
 #include "cnn_network_impl.hpp"
 #include  <tests_common.hpp>
 #include "ie_format_parser.h"
+#include "ie_blob_proxy.hpp"
 #include <string>
 #include "pugixml.hpp"
 #include "xml_parse_utils.h"
 #include "mean_image.h"
-#include "ie_blob_proxy.hpp"
 
 #include "common_test_utils/xml_net_builder/xml_father.hpp"
 
@@ -296,8 +296,7 @@ xml().node("net").attr("name", "AlexNet").attr("version", x)\
         std::vector<T> meanValues = MeanImage<T>::getValue();
         std::copy(meanValues.begin(), meanValues.end(), (T *) binBlobFloat->data());
         InferenceEngine::SizeVector dims_dst = {MT_HEIGHT, MT_WIDTH * sizeof(T), MT_CHANNELS};
-        typename InferenceEngine::TBlobProxy<uint8_t>::Ptr binBlob(new
-                                                                           InferenceEngine::TBlobProxy<uint8_t>(
+        typename InferenceEngine::TBlobProxy<uint8_t>::Ptr binBlob(new InferenceEngine::TBlobProxy<uint8_t>(
                 InferenceEngine::Precision::FP32, InferenceEngine::CHW, binBlobFloat, 0, dims_dst));
         return binBlob;
     }
diff --git a/inference-engine/tests_deprecated/unit/inference_engine_tests/locked_memory_test.cpp b/inference-engine/tests_deprecated/unit/inference_engine_tests/locked_memory_test.cpp
deleted file mode 100644 (file)
index 6eccab9..0000000
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include "tests_common.hpp"
-#include "unit_test_utils/mocks/mock_allocator.hpp"
-
-using namespace InferenceEngine;
-using namespace ::testing;
-
-using LockedMemoryTest = testing::Test;
-
-TEST_F(LockedMemoryTest, canUnlockMemoryAfterUsage) {
-    std::unique_ptr<MockAllocator> allocator(new MockAllocator());
-    char array [] = {1,2,3};
-
-    EXPECT_CALL(*allocator.get(), lock((void*)1, _)).WillRepeatedly(Return((void*)array));
-    EXPECT_CALL(*allocator.get(), unlock(_)).Times(1);
-    {
-        auto x = LockedMemory<char>(allocator.get(), (void *) 1, 1);
-        //force locking of memory
-        auto t = x[0];
-        (void)t;
-    }
-}
-
-TEST_F(LockedMemoryTest, canReadFromLockedMemory) {
-    std::unique_ptr<MockAllocator> allocator(new MockAllocator());
-    char array [] = {1,2,3,4,5};
-
-    EXPECT_CALL(*allocator.get(), lock((void*)1, _)).WillRepeatedly(Return((void*)array));
-    EXPECT_CALL(*allocator.get(), unlock(_)).Times(1);
-    {
-        auto x = LockedMemory<char>(allocator.get(), (void *) 1, 0);
-        //we are getting first element
-        ASSERT_EQ(1, x[0]);
-    }
-}
-
-TEST_F(LockedMemoryTest, canWriteToLockedMemory) {
-    std::unique_ptr<MockAllocator> allocator(new MockAllocator());
-    char array [] = {1,2,3,4,5};
-
-    EXPECT_CALL(*allocator.get(), lock((void*)1, _)).WillRepeatedly(Return((void*)array));
-    EXPECT_CALL(*allocator.get(), unlock(_)).Times(1);
-    {
-        auto x = LockedMemory<char>(allocator.get(), (void *) 1, 0);
-
-        //we are getting first element
-        ASSERT_EQ(std::distance(array, &x[0]), 0);
-        x[0] = 5;
-    }
-    EXPECT_EQ(array[0], 5);
-
-}