Added BatchNormInference tests (#1927)
authorLiubov Batanina <liubov.batanina@intel.com>
Tue, 25 Aug 2020 13:28:05 +0000 (16:28 +0300)
committerGitHub <noreply@github.com>
Tue, 25 Aug 2020 13:28:05 +0000 (16:28 +0300)
inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/batch_norm.cpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/shared/include/single_layer_tests/batch_norm.hpp [new file with mode: 0644]
inference-engine/tests/functional/plugin/shared/src/single_layer_tests/batch_norm.cpp [new file with mode: 0644]
inference-engine/tests/ngraph_functions/include/ngraph_functions/builders.hpp
inference-engine/tests/ngraph_functions/src/batch_norm.cpp [new file with mode: 0644]

diff --git a/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/batch_norm.cpp b/inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/batch_norm.cpp
new file mode 100644 (file)
index 0000000..a63bfc6
--- /dev/null
@@ -0,0 +1,45 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <vector>
+
+#include "single_layer_tests/batch_norm.hpp"
+
+using namespace LayerTestsDefinitions;
+
+namespace {
+const std::vector<InferenceEngine::Precision> netPrecisions = {
+        InferenceEngine::Precision::FP32,
+        InferenceEngine::Precision::FP16
+};
+
+const std::vector<double> epsilon = {
+    1e-6,
+    1e-5,
+    1e-4
+};
+const std::vector<std::vector<size_t>> inputShapes = {
+        {1, 3},
+        {2, 5},
+        {1, 3, 10},
+        {1, 3, 1, 1},
+        {2, 5, 4, 4},
+};
+
+
+const auto batchNormParams = testing::Combine(
+        testing::ValuesIn(epsilon),
+        testing::ValuesIn(netPrecisions),
+        testing::ValuesIn(inputShapes),
+        testing::Values(CommonTestUtils::DEVICE_CPU)
+);
+
+INSTANTIATE_TEST_CASE_P(
+        BatchNorm,
+        BatchNormLayerTest,
+        batchNormParams,
+        BatchNormLayerTest::getTestCaseName
+);
+
+}  // namespace
diff --git a/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/batch_norm.hpp b/inference-engine/tests/functional/plugin/shared/include/single_layer_tests/batch_norm.hpp
new file mode 100644 (file)
index 0000000..ced8a2e
--- /dev/null
@@ -0,0 +1,28 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include "functional_test_utils/layer_test_utils.hpp"
+#include "ngraph_functions/builders.hpp"
+
+typedef std::tuple<
+        double,                        // epsilon
+        InferenceEngine::Precision,    // Net precision
+        InferenceEngine::SizeVector,   // Input shapes
+        LayerTestsUtils::TargetDevice  // Target device name
+> BatchNormLayerTestParams;
+
+namespace LayerTestsDefinitions {
+
+class BatchNormLayerTest : public testing::WithParamInterface<BatchNormLayerTestParams>,
+                           public LayerTestsUtils::LayerTestsCommon {
+public:
+    static std::string getTestCaseName(const testing::TestParamInfo<BatchNormLayerTestParams>& obj);
+
+protected:
+    void SetUp() override;
+};
+
+}  // namespace LayerTestsDefinitions
\ No newline at end of file
diff --git a/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/batch_norm.cpp b/inference-engine/tests/functional/plugin/shared/src/single_layer_tests/batch_norm.cpp
new file mode 100644 (file)
index 0000000..76c5255
--- /dev/null
@@ -0,0 +1,44 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "single_layer_tests/batch_norm.hpp"
+
+
+namespace LayerTestsDefinitions {
+std::string BatchNormLayerTest::getTestCaseName(const testing::TestParamInfo<BatchNormLayerTestParams>& obj) {
+    InferenceEngine::Precision netPrecision;
+    InferenceEngine::SizeVector inputShapes;
+    double epsilon;
+    std::string targetDevice;
+    std::tie(epsilon, netPrecision, inputShapes, targetDevice) = obj.param;
+
+    std::ostringstream result;
+    result << "IS=" << CommonTestUtils::vec2str(inputShapes) << "_";
+    result << "epsilon=" << epsilon << "_";
+    result << "netPRC=" << netPrecision.name() << "_";
+    result << "targetDevice=" << targetDevice;
+    return result.str();
+}
+
+void BatchNormLayerTest::SetUp() {
+    InferenceEngine::Precision netPrecision;
+    InferenceEngine::SizeVector inputShapes;
+    double epsilon;
+    std::tie(epsilon, netPrecision, inputShapes, targetDevice) = this->GetParam();
+    auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision);
+
+    auto params = ngraph::builder::makeParams(ngPrc, {inputShapes});
+    auto paramOuts = ngraph::helpers::convert2OutputVector(
+            ngraph::helpers::castOps2Nodes<ngraph::opset4::Parameter>(params));
+
+    auto batchNorm = ngraph::builder::makeBatchNormInference(paramOuts[0], epsilon);
+    ngraph::ResultVector results{std::make_shared<ngraph::opset4::Result>(batchNorm)};
+    function = std::make_shared<ngraph::Function>(results, params, "BatchNormInference");
+}
+
+TEST_P(BatchNormLayerTest, CompareWithRefs) {
+    Run();
+}
+
+}  // namespace LayerTestsDefinitions
\ No newline at end of file
index 142280e..fc1f6bb 100644 (file)
@@ -334,5 +334,8 @@ std::shared_ptr<ngraph::Node> makePad(const ngraph::Output<Node>& data,
                                       float argPadValue,
                                       ngraph::helpers::PadMode padMode);
 
+std::shared_ptr<ngraph::Node> makeBatchNormInference(const ngraph::Output<Node>& data,
+                                                     double epsilon);
+
 }  // namespace builder
 }  // namespace ngraph
diff --git a/inference-engine/tests/ngraph_functions/src/batch_norm.cpp b/inference-engine/tests/ngraph_functions/src/batch_norm.cpp
new file mode 100644 (file)
index 0000000..c0f9606
--- /dev/null
@@ -0,0 +1,30 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <vector>
+#include <memory>
+
+#include "ngraph_functions/builders.hpp"
+
+namespace ngraph {
+namespace builder {
+std::shared_ptr<ngraph::Node> makeBatchNormInference(const ngraph::Output<Node>& data,
+                                                     double epsilon) {
+    auto ngPrc = data.get_element_type();
+    size_t C   = data.get_shape().at(1);
+    bool random = true;
+    std::vector<float> values(C);
+    auto gamma = ngraph::builder::makeConstant(ngPrc, ngraph::Shape{C}, values, random);
+    auto beta  = ngraph::builder::makeConstant(ngPrc, ngraph::Shape{C}, values, random);
+    auto mean  = ngraph::builder::makeConstant(ngPrc, ngraph::Shape{C}, values, random);
+
+    // Fill the vector for variance with positive values
+    std::default_random_engine gen;
+    std::uniform_real_distribution<float> dis(0.0, 10.0);
+    std::generate(values.begin(), values.end(), [&dis, &gen]() { return dis(gen); });
+    auto variance = ngraph::builder::makeConstant(ngPrc, ngraph::Shape{C}, values, !random);
+    return std::make_shared<ngraph::opset4::BatchNormInference>(data, gamma, beta, mean, variance, epsilon);
+}
+}  // namespace builder
+}  // namespace ngraph