[PP GAPI] Addded tests to cover exisiting precision conversions done by (#1976)
authorAnton Potapov <anton.potapov@intel.com>
Wed, 16 Sep 2020 09:41:14 +0000 (12:41 +0300)
committerGitHub <noreply@github.com>
Wed, 16 Sep 2020 09:41:14 +0000 (12:41 +0300)
some plugins

- added shared parameterized tests
- instantiated for template plugin
- instantiated for cpu plugin
- fixed CPU plugin to properly handle U16 input
- fixed CPU reverse_sequence primitive to alolw input/oputput tensors to
be in FP32 only
- updated ngraph test_simple_computation_on_ndarrays to not expect
failure on U16 input

docs/template_plugin/tests/functional/shared_tests_instances/behavior/preprocessing.cpp [new file with mode: 0644]
inference-engine/src/mkldnn_plugin/mkldnn_infer_request.cpp
inference-engine/src/mkldnn_plugin/nodes/base.hpp
inference-engine/src/mkldnn_plugin/nodes/reverse_sequence.cpp
inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/preprocessing.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/shared/include/behavior/preprocessing.hpp [new file with mode: 0644]
ngraph/python/tests/test_ngraph/test_basic.py

diff --git a/docs/template_plugin/tests/functional/shared_tests_instances/behavior/preprocessing.cpp b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/preprocessing.cpp
new file mode 100644 (file)
index 0000000..747f286
--- /dev/null
@@ -0,0 +1,29 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "multi-device/multi_device_config.hpp"
+
+#include "behavior/preprocessing.hpp"
+
+using namespace BehaviorTestsDefinitions;
+
+namespace {
+
+const std::vector<InferenceEngine::Precision> inputPrecisions = {
+    InferenceEngine::Precision::U8,
+    InferenceEngine::Precision::FP32
+};
+
+const std::vector<std::map<std::string, std::string>> configs = {
+    {}
+};
+
+INSTANTIATE_TEST_CASE_P(PreprocessingPrecisionConvertTests, PreprocessingPrecisionConvertTest,
+                        ::testing::Combine(
+                                ::testing::ValuesIn(inputPrecisions),
+                                ::testing::Values("TEMPLATE"),
+                                ::testing::ValuesIn(configs)),
+                        PreprocessingPrecisionConvertTest::getTestCaseName);
+
+}  // namespace
index 248bb47..fb778e5 100644 (file)
@@ -57,8 +57,8 @@ void MKLDNNPlugin::MKLDNNInferRequest::pushInput(const std::string& inputName, I
 
 namespace {
 
-template <typename T>
-void copyToFloat(float* dst, const InferenceEngine::Blob* src) {
+template <typename T, typename DstT>
+void copyFrom(const InferenceEngine::Blob* src, DstT* dst) {
     if (!dst) {
         return;
     }
@@ -75,6 +75,11 @@ void copyToFloat(float* dst, const InferenceEngine::Blob* src) {
     for (size_t i = 0; i < t_blob->size(); i++) dst[i] = srcPtr[i];
 }
 
+template <typename T>
+void copyToFloat(float* dst, const InferenceEngine::Blob* src) {
+    copyFrom<T>(src, dst);
+}
+
 }  // namespace
 
 void MKLDNNPlugin::MKLDNNInferRequest::InferImpl() {
@@ -108,18 +113,19 @@ void MKLDNNPlugin::MKLDNNInferRequest::InferImpl() {
                 case InferenceEngine::Precision::I8:
                     pushInput<int8_t>(input.first, input.second);
                     break;
-                case InferenceEngine::Precision::U16:
-                    // U16 is unsupported by mkldnn, so here we convert the blob and send FP32
-                    iconv = InferenceEngine::make_shared_blob<float>({InferenceEngine::Precision::FP32,
+                case InferenceEngine::Precision::U16: {
+                    // U16 is unsupported by mkldnn, so here we convert the blob and send I32
+                    iconv = InferenceEngine::make_shared_blob<std::int32_t>({InferenceEngine::Precision::I32,
                                                                         input.second->getTensorDesc().getDims(),
                                                                         input.second->getTensorDesc().getLayout()});
                     convertedInputs.push_back(iconv);
                     iconv->allocate();
-                    in_f = dynamic_cast<InferenceEngine::TBlob<float> *>(iconv.get());
-                    if (in_f == nullptr)
+                    auto in = dynamic_cast<InferenceEngine::TBlob<std::int32_t> *>(iconv.get());
+                    if (in == nullptr)
                         THROW_IE_EXCEPTION << "Cannot get TBlob";
-                    copyToFloat<uint16_t>(in_f->data(), input.second.get());
-                    pushInput<float>(input.first, iconv);
+                    copyFrom<uint16_t, std::int32_t>(input.second.get(), in->data());
+                    pushInput<std::int32_t>(input.first, iconv);
+                    }
                     break;
                 case InferenceEngine::Precision::I16:
                     if (graph->hasMeanImageFor(input.first)) {
index c0adec0..f31812e 100644 (file)
@@ -63,9 +63,13 @@ protected:
         DataConfigurator(ConfLayout l, bool constant, int inplace = -1):
             layout(l), constant(constant), inplace(inplace) {}
 
+        DataConfigurator(ConfLayout l, Precision::ePrecision prc):
+            layout(l), prc(prc) {}
+
         ConfLayout layout;
         bool constant = false;
         int inplace = -1;
+        Precision::ePrecision prc = Precision::UNSPECIFIED;     // by default use the layer precision
     };
 
     void addConfig(const CNNLayer* layer, std::vector<DataConfigurator> in_l,
@@ -128,7 +132,7 @@ protected:
             // fixing of BF16 precisions where they are - layers naturally support only FP32
             // if we see BF16, that means another floating point format which will be converted by reorder
             // added by current mkl-dnn cpu plugin when it figure out diff in data types on input and output of edges
-            InferenceEngine::Precision precision = data_desc.getPrecision();
+            InferenceEngine::Precision precision = (conf.prc == Precision::UNSPECIFIED) ? data_desc.getPrecision() : Precision(conf.prc);
             if (precision == Precision::BF16) {
                 precision = Precision::FP32;
             }
index 52499d1..a76a0d4 100644 (file)
@@ -59,7 +59,9 @@ public:
             srcStrides = layer->insData[REVERSESEQUENCE_DATA].lock()->getTensorDesc().getBlockingDesc().getStrides();
             work_amount_dst = srcStrides[0] * src_dims[0];
 
-            addConfig(layer, { DataConfigurator(ConfLayout::PLN), DataConfigurator(ConfLayout::PLN) }, { DataConfigurator(ConfLayout::PLN) });
+            addConfig(layer,
+                    { DataConfigurator(ConfLayout::PLN, Precision::FP32), DataConfigurator(ConfLayout::PLN) },
+                    { DataConfigurator(ConfLayout::PLN, Precision::FP32) });
         } catch (InferenceEngine::details::InferenceEngineException &ex) {
             errorMsg = ex.what();
         }
diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/preprocessing.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/preprocessing.cpp
new file mode 100644 (file)
index 0000000..769cf8f
--- /dev/null
@@ -0,0 +1,29 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "multi-device/multi_device_config.hpp"
+
+#include "behavior/preprocessing.hpp"
+
+using namespace BehaviorTestsDefinitions;
+
+namespace {
+
+const std::vector<InferenceEngine::Precision> inputPrecisions = {
+    InferenceEngine::Precision::U16,
+    InferenceEngine::Precision::FP32
+};
+
+const std::vector<std::map<std::string, std::string>> configs = {
+    {}
+};
+
+INSTANTIATE_TEST_CASE_P(smoke_BehaviourPreprocessingTests, PreprocessingPrecisionConvertTest,
+                        ::testing::Combine(
+                                ::testing::ValuesIn(inputPrecisions),
+                                ::testing::Values(CommonTestUtils::DEVICE_CPU),
+                                ::testing::ValuesIn(configs)),
+                        PreprocessingPrecisionConvertTest::getTestCaseName);
+
+}  // namespace
diff --git a/inference-engine/tests/functional/plugin/shared/include/behavior/preprocessing.hpp b/inference-engine/tests/functional/plugin/shared/include/behavior/preprocessing.hpp
new file mode 100644 (file)
index 0000000..7d9e773
--- /dev/null
@@ -0,0 +1,106 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <vector>
+
+#include <ie_core.hpp>
+#include "common_test_utils/test_assertions.hpp"
+#include "common_test_utils/common_utils.hpp"
+#include "functional_test_utils/plugin_cache.hpp"
+#include "functional_test_utils/layer_test_utils.hpp"
+#include "functional_test_utils/blob_utils.hpp"
+#include "ie_preprocess.hpp"
+#include "functional_test_utils/behavior_test_utils.hpp"
+
+namespace {
+void setInputNetworkPrecision(InferenceEngine::CNNNetwork &network, InferenceEngine::InputsDataMap &inputs_info,
+                              InferenceEngine::Precision input_precision) {
+    inputs_info = network.getInputsInfo();
+    ASSERT_EQ(1u, inputs_info.size());
+    inputs_info.begin()->second->setPrecision(input_precision);
+}
+
+}
+
+namespace BehaviorTestsDefinitions {
+
+using  PreprocessingPrecisionConvertParams = std::tuple<
+        InferenceEngine::Precision,         // Input precision
+        std::string,                        // Device name
+        std::map<std::string, std::string>  // Config
+>;
+
+struct PreprocessingPrecisionConvertTest :
+        public testing::WithParamInterface<PreprocessingPrecisionConvertParams>,
+        LayerTestsUtils::LayerTestsCommon {
+public:
+    static std::string getTestCaseName(testing::TestParamInfo<PreprocessingPrecisionConvertParams> obj) {
+        InferenceEngine::Precision  inPrc;
+        std::string targetDevice;
+        std::map<std::string, std::string> configuration;
+        std::tie(inPrc, targetDevice, configuration) = obj.param;
+        std::ostringstream result;
+        result << "inPRC=" << inPrc.name() << "_";
+        result << "targetDevice=" << targetDevice;
+        if (!configuration.empty()) {
+            for (auto& configItem : configuration) {
+                result << "configItem=" << configItem.first << "_" << configItem.second << "_";
+            }
+        }
+        return result.str();
+    }
+
+    void SetUp() override {
+        // This test:
+        // - Strive to test the plugin internal preprocessing (precision conversion) only.
+        //   Thus (logically) no-op graph is used.
+        // - Reference code mimic the preprocessing via extra ngraph Convert operation.
+        // - Create/uses two (different) graphs here : one to feed the the plugin and one calculate the reference result.
+
+        SetRefMode(LayerTestsUtils::RefMode::INTERPRETER);
+
+        std::tie(inPrc, targetDevice, configuration) = this->GetParam();
+
+        bool specialZero = true;
+
+        std::vector<size_t> inputShape    {4, 4};
+
+        auto make_ngraph = [&](bool with_extra_conv) {
+            auto in_prec = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(with_extra_conv ? inPrc : decltype(inPrc)(InferenceEngine::Precision::FP32));
+            auto paramsIn = ngraph::builder::makeParams(in_prec, {inputShape});
+            auto paramIn = ngraph::helpers::convert2OutputVector(
+                    ngraph::helpers::castOps2Nodes<ngraph::op::Parameter>(paramsIn));
+
+            auto toF32 = std::make_shared<ngraph::opset1::Convert>(paramIn[0], ngraph::element::Type_t::f32);
+
+            auto constNode = std::make_shared<ngraph::opset1::Constant>(
+                    ngraph::element::Type_t::i64, ngraph::Shape{inputShape.size()}, inputShape);
+            auto reshape = std::dynamic_pointer_cast<ngraph::opset1::Reshape>(
+                    std::make_shared<ngraph::opset1::Reshape>(with_extra_conv ? toF32 : paramIn[0], constNode, specialZero));
+            ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(reshape)};
+            return std::make_shared<ngraph::Function>(results, paramsIn, "Reshape");
+        };
+
+        function            = make_ngraph(false);
+        reference_function  = make_ngraph(true);  //use extra ops to mimic the preprocessing
+    }
+
+    void Validate() override {
+        //force the reference implementation to use graph with extra Convert operation
+        function = reference_function;
+        LayerTestsUtils::LayerTestsCommon::Validate();
+    }
+
+public:
+    std::shared_ptr<InferenceEngine::Core> ie = PluginCache::get().ie();
+    std::shared_ptr<ngraph::Function> reference_function;
+};
+
+
+TEST_P(PreprocessingPrecisionConvertTest, InternalPluginPrecisionConvert) {
+    // Skip test according to plugin specific disabledTestPatterns() (if any)
+    SKIP_IF_CURRENT_TEST_IS_DISABLED()
+    Run();
+}
+}  // namespace BehaviorTestsDefinitions
index a86355c..35f62fb 100644 (file)
@@ -71,7 +71,7 @@ def test_ngraph_function_api():
         np.int32,
         pytest.param(np.int64, marks=xfail_issue_35926),
         pytest.param(np.uint8, marks=xfail_issue_36479),
-        pytest.param(np.uint16, marks=xfail_issue_36479),
+        np.uint16,
         pytest.param(np.uint32, marks=xfail_issue_36476),
         pytest.param(np.uint64, marks=xfail_issue_36478),
     ],