[PP GAPI] Extended plug-ins shared precision conversion tests to use (#2677)
authorAnton Potapov <anton.potapov@intel.com>
Mon, 19 Oct 2020 09:35:59 +0000 (12:35 +0300)
committerGitHub <noreply@github.com>
Mon, 19 Oct 2020 09:35:59 +0000 (12:35 +0300)
`GetBlob()` as well

- test were extended to cover case when input tensors are copied into
Blob return by `InferRequest::GetBlob`
- channel number of input tensor is made a test parameter

docs/template_plugin/tests/functional/shared_tests_instances/behavior/preprocessing.cpp
inference-engine/tests/functional/plugin/cpu/shared_tests_instances/behavior/preprocessing.cpp
inference-engine/tests/functional/plugin/shared/include/behavior/preprocessing.hpp

index 747f286..344ceac 100644 (file)
@@ -19,9 +19,20 @@ const std::vector<std::map<std::string, std::string>> configs = {
     {}
 };
 
-INSTANTIATE_TEST_CASE_P(PreprocessingPrecisionConvertTests, PreprocessingPrecisionConvertTest,
+INSTANTIATE_TEST_CASE_P(PreprocessingPrecisionConvertTestsViaSetInput, PreprocessingPrecisionConvertTest,
                         ::testing::Combine(
                                 ::testing::ValuesIn(inputPrecisions),
+                                ::testing::Values(1, 2, 3, 4, 5),   // Number of input tensor channels
+                                ::testing::Values(true),            // Use SetInput
+                                ::testing::Values("TEMPLATE"),
+                                ::testing::ValuesIn(configs)),
+                        PreprocessingPrecisionConvertTest::getTestCaseName);
+
+INSTANTIATE_TEST_CASE_P(PreprocessingPrecisionConvertTestsViaGetBlob, PreprocessingPrecisionConvertTest,
+                        ::testing::Combine(
+                                ::testing::ValuesIn(inputPrecisions),
+                                ::testing::Values(4, 5),       // Number of input tensor channels (blob_copy only supports 4d and 5d tensors)
+                                ::testing::Values(false),      // use GetBlob
                                 ::testing::Values("TEMPLATE"),
                                 ::testing::ValuesIn(configs)),
                         PreprocessingPrecisionConvertTest::getTestCaseName);
index 769cf8f..a0106b0 100644 (file)
@@ -19,11 +19,21 @@ const std::vector<std::map<std::string, std::string>> configs = {
     {}
 };
 
-INSTANTIATE_TEST_CASE_P(smoke_BehaviourPreprocessingTests, PreprocessingPrecisionConvertTest,
+INSTANTIATE_TEST_CASE_P(BehaviourPreprocessingTestsViaSetInput, PreprocessingPrecisionConvertTest,
                         ::testing::Combine(
                                 ::testing::ValuesIn(inputPrecisions),
+                                ::testing::Values(1, 2, 3, 4, 5),   // Number of input tensor channels
+                                ::testing::Values(true),            // Use SetInput
                                 ::testing::Values(CommonTestUtils::DEVICE_CPU),
                                 ::testing::ValuesIn(configs)),
                         PreprocessingPrecisionConvertTest::getTestCaseName);
 
+INSTANTIATE_TEST_CASE_P(BehaviourPreprocessingTestsViaGetBlob, PreprocessingPrecisionConvertTest,
+                        ::testing::Combine(
+                                ::testing::ValuesIn(inputPrecisions),
+                                ::testing::Values(4, 5),       // Number of input tensor channels (blob_copy only supports 4d and 5d tensors)
+                                ::testing::Values(false),      // use GetBlob
+                                ::testing::Values(CommonTestUtils::DEVICE_CPU),
+                                ::testing::ValuesIn(configs)),
+                        PreprocessingPrecisionConvertTest::getTestCaseName);
 }  // namespace
index 7d9e773..bb27da5 100644 (file)
@@ -27,6 +27,8 @@ namespace BehaviorTestsDefinitions {
 
 using  PreprocessingPrecisionConvertParams = std::tuple<
         InferenceEngine::Precision,         // Input precision
+        unsigned,                           // channels number
+        bool,                               // Use normal (i.e. SetInput() or unusal i.e. GetBlob()) inut method
         std::string,                        // Device name
         std::map<std::string, std::string>  // Config
 >;
@@ -37,11 +39,15 @@ struct PreprocessingPrecisionConvertTest :
 public:
     static std::string getTestCaseName(testing::TestParamInfo<PreprocessingPrecisionConvertParams> obj) {
         InferenceEngine::Precision  inPrc;
+        bool useSetInput;
+        unsigned channels;
         std::string targetDevice;
         std::map<std::string, std::string> configuration;
-        std::tie(inPrc, targetDevice, configuration) = obj.param;
+        std::tie(inPrc, channels, useSetInput, targetDevice, configuration) = obj.param;
         std::ostringstream result;
         result << "inPRC=" << inPrc.name() << "_";
+        result << channels << "Ch" << "_";
+        result << (useSetInput ? "SetInput" : "GetBlob") << "_";
         result << "targetDevice=" << targetDevice;
         if (!configuration.empty()) {
             for (auto& configItem : configuration) {
@@ -51,6 +57,32 @@ public:
         return result.str();
     }
 
+    // Need to override Infer() due to usage of GetBlob() as input method.
+    // Mostly a copy of LayerTestsCommon::Infer()
+    void Infer() override {
+        inferRequest = executableNetwork.CreateInferRequest();
+        inputs.clear();
+
+        for (const auto &input : executableNetwork.GetInputsInfo()) {
+            const auto &info = input.second;
+            auto blob = GenerateInput(*info);
+            if (!use_set_input) {
+                InferenceEngine::Blob::Ptr input = inferRequest.GetBlob(info->name());
+                blob_copy(blob, input);
+            } else {
+                inferRequest.SetBlob(info->name(), blob);
+            }
+
+            inputs.push_back(blob);
+        }
+        if (configuration.count(InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED) &&
+            configuration.count(InferenceEngine::PluginConfigParams::YES)) {
+            auto batchSize = executableNetwork.GetInputsInfo().begin()->second->getTensorDesc().getDims()[0] / 2;
+            inferRequest.SetBatch(batchSize);
+        }
+        inferRequest.Infer();
+    }
+
     void SetUp() override {
         // This test:
         // - Strive to test the plugin internal preprocessing (precision conversion) only.
@@ -60,11 +92,11 @@ public:
 
         SetRefMode(LayerTestsUtils::RefMode::INTERPRETER);
 
-        std::tie(inPrc, targetDevice, configuration) = this->GetParam();
+        std::tie(inPrc, channels, use_set_input, targetDevice, configuration) = this->GetParam();
 
         bool specialZero = true;
 
-        std::vector<size_t> inputShape    {4, 4};
+        std::vector<size_t> inputShape(channels, 4);
 
         auto make_ngraph = [&](bool with_extra_conv) {
             auto in_prec = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(with_extra_conv ? inPrc : decltype(inPrc)(InferenceEngine::Precision::FP32));
@@ -95,6 +127,8 @@ public:
 public:
     std::shared_ptr<InferenceEngine::Core> ie = PluginCache::get().ie();
     std::shared_ptr<ngraph::Function> reference_function;
+    bool use_set_input = true;
+    unsigned channels = 0;
 };