[CPU] Added eltwise Round-5 (#2347)
authorAlexandra Sidorova <alexandra.sidorova@intel.com>
Mon, 2 Nov 2020 15:44:50 +0000 (18:44 +0300)
committerGitHub <noreply@github.com>
Mon, 2 Nov 2020 15:44:50 +0000 (18:44 +0300)
inference-engine/src/mkldnn_plugin/mkldnn_graph_optimizer.cpp
inference-engine/src/mkldnn_plugin/mkldnn_node.cpp
inference-engine/src/mkldnn_plugin/nodes/mkldnn_eltwise_node.cpp
inference-engine/src/mkldnn_plugin/nodes/mkldnn_eltwise_node.h
inference-engine/src/mkldnn_plugin/nodes/mkldnn_interpolate_node.cpp
inference-engine/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/activation.cpp
inference-engine/thirdparty/mkl-dnn
ngraph/python/tests/test_ngraph/test_ops_unary.py
ngraph/python/tests/test_onnx/test_backend.py

index ccdef34..8c2236b 100644 (file)
@@ -600,7 +600,8 @@ void MKLDNNGraphOptimizer::FuseConvolutionAndActivation(MKLDNNGraph &graph) {
         return eltwiseNode &&
             (eltwiseNode->getOpType() == Relu ||
             (conv->getCnnLayer()->precision == Precision::FP32 &&
-            IsOneOf(eltwiseNode->getOpType(), {Elu, Logistic, BoundedRelu, Clamp, Swish, Hswish, Mish, Hsigmoid})));
+            IsOneOf(eltwiseNode->getOpType(), {Elu, Logistic, BoundedRelu, Clamp, Swish, Hswish, Mish, Hsigmoid,
+                                               Round})));
     };
 
     for (int i = 0; i < graphNodes.size(); i++) {
@@ -678,7 +679,8 @@ void MKLDNNGraphOptimizer::FuseFullyConnectedAndSimpleOperation(MKLDNNGraph &gra
             if (eltwiseNode == nullptr)
                 THROW_IE_EXCEPTION << "Cannot get Eltwise node " << childNode->getName();
 
-            if (IsOneOf(eltwiseNode->getOpType(), {Relu, Gelu, Elu, Logistic, BoundedRelu, Clamp, Swish, Hswish, Mish, Hsigmoid})) {
+            if (IsOneOf(eltwiseNode->getOpType(), {Relu, Gelu, Elu, Logistic, BoundedRelu, Clamp, Swish, Hswish, Mish,
+                                                   Hsigmoid, Round})) {
                 return true;
             } else if (IsOneOf(eltwiseNode->getOpType(), {MulAdd, Prelu})) {
                 if (eltwiseNode->getOpType() == MulAdd && eltwiseNode->getCnnLayer()->blobs.size() != 2)
@@ -1044,7 +1046,8 @@ void MKLDNNGraphOptimizer::FuseConvolutionAndSimpleOperation(MKLDNNGraph &graph)
 
             return ((eltwiseNode->getOpType() == MulAdd && node->getCnnLayer()->blobs.size() == 2) ||
                     (eltwiseNode->getOpType() == Prelu) ||
-                    IsOneOf(eltwiseNode->getOpType(), {Relu, Elu, Logistic, BoundedRelu, Clamp, Swish, Hswish, Mish, Hsigmoid}));
+                    IsOneOf(eltwiseNode->getOpType(), {Relu, Elu, Logistic, BoundedRelu, Clamp, Swish, Hswish, Mish,
+                                                       Hsigmoid, Round}));
         }
 
         return false;
@@ -1258,7 +1261,8 @@ void MKLDNNGraphOptimizer::FuseConvolutionSumAndConvolutionSumActivation(MKLDNNG
         return eltwiseNode &&
             (eltwiseNode->getOpType() == Relu ||
             (conv->getCnnLayer()->precision == Precision::FP32 &&
-             IsOneOf(eltwiseNode->getOpType(), {Elu, Logistic, BoundedRelu, Clamp, Swish, Hswish, Mish, Hsigmoid})));
+             IsOneOf(eltwiseNode->getOpType(), {Elu, Logistic, BoundedRelu, Clamp, Swish, Hswish, Mish, Hsigmoid,
+                                                Round})));
     };
 
     for (auto &graphNode : graphNodes) {
@@ -1611,7 +1615,7 @@ void MKLDNNGraphOptimizer::FuseNormalizeAndSimpleOperation(MKLDNNGraph &graph) {
             if (eltwiseNode == nullptr)
                 THROW_IE_EXCEPTION << "Cannot get Eltwise node " << node->getName();
             return IsOneOf(eltwiseNode->getOpType(), {Relu, Gelu, Elu, Logistic, BoundedRelu, Clamp, Tanh, Swish,
-                                                      Hswish, Mish, Hsigmoid, Linear, Abs, Square, Sqrt}) ||
+                                                      Hswish, Mish, Hsigmoid, Round, Linear, Abs, Square, Sqrt}) ||
                     ((eltwiseNode->getOpType() == MulAdd && eltwiseNode->getCnnLayer()->blobs.size() == 2) ||
                      (eltwiseNode->getOpType() == Prelu));
         }
index 9af1dc5..a4bfc11 100644 (file)
@@ -75,6 +75,7 @@ static const InferenceEngine::details::caseless_unordered_map<std::string, Type>
         { "HSwish", Eltwise },
         { "Mish", Eltwise },
         { "HSigmoid", Eltwise },
+        { "Round", Eltwise },
         { "ScaleShift", Eltwise },
         { "PReLU", Eltwise },
         { "Norm", Lrn },
index 2c0fc6f..2bffe17 100644 (file)
@@ -312,7 +312,8 @@ private:
         auto& eltwiseNode = dynamic_cast<const MKLDNNEltwiseNode&>(node);
         switch (eltwiseNode.getOpType()) {
             case Relu: case Gelu: case Elu: case Tanh: case Logistic: case Square: case Abs: case Sqrt:
-            case Linear: case BoundedRelu: case SoftRelu: case Relu6: case Exp: case Clamp: case Swish: case Hswish: case Mish: case Hsigmoid:
+            case Linear: case BoundedRelu: case SoftRelu: case Relu6: case Exp: case Clamp: case Swish: case Hswish:
+            case Mish: case Hsigmoid: case Round:
                 return jit_mkldnn_emitter::get_supported_precisions();
             case Add:               return jit_add_emitter::get_supported_precisions();
             case MulAdd:            return jit_mul_add_emitter::get_supported_precisions();
@@ -345,7 +346,8 @@ private:
         auto& eltwiseNode = dynamic_cast<const MKLDNNEltwiseNode&>(node);
         switch (eltwiseNode.getOpType()) {
             case Relu: case Gelu: case Elu: case Tanh: case Logistic: case Square: case Abs: case Sqrt:
-            case Linear: case BoundedRelu: case SoftRelu: case Relu6: case Exp: case Clamp: case Swish: case Hswish: case Mish: case Hsigmoid:
+            case Linear: case BoundedRelu: case SoftRelu: case Relu6: case Exp: case Clamp: case Swish: case Hswish:
+            case Mish: case Hsigmoid: case Round:
                                     return std::make_shared<jit_mkldnn_emitter>(this, isa, eltwiseNode, exec_prec);
             case Add:               return std::make_shared<jit_add_emitter>(this, isa, eltwiseNode, exec_prec);
             case MulAdd:            return std::make_shared<jit_mul_add_emitter>(this, isa, eltwiseNode, exec_prec);
@@ -764,6 +766,18 @@ MKLDNNEltwiseNode::initializers = {
             opType = Hsigmoid;
             algorithm = mkldnn::eltwise_hsigmoid;
         }},
+        {"round", [](GenericLayer* activationLayer, EltwiseOpType& opType, mkldnn::algorithm& algorithm, float& alpha, float& beta) {
+            alpha = 0.0f;
+            beta = 0.0f;
+            opType = Round;
+            std::string mode = activationLayer->GetParamAsString("mode", "half_to_even");
+            if (mode == "half_to_even")
+                algorithm = mkldnn::eltwise_round_half_to_even;
+            else if (mode == "half_away_from_zero")
+                algorithm = mkldnn::eltwise_round_half_away_from_zero;
+            else
+                THROW_IE_EXCEPTION << "Round layer with name " << activationLayer->name << " doesn't support mode " << mode;
+        }},
 };
 
 void MKLDNNEltwiseNode::init() {
@@ -833,7 +847,8 @@ void MKLDNNEltwiseNode::init() {
                comparator(layerType, "swish") ||
                comparator(layerType, "hswish") ||
                comparator(layerType, "mish") ||
-               comparator(layerType, "hsigmoid")) {
+               comparator(layerType, "hsigmoid") ||
+               comparator(layerType, "round")) {
         initializers[layerType](getCnnLayer().get(), eltwiseOp, eltwiseAlgorithm, alpha, beta);
     } else {
         THROW_IE_EXCEPTION << "Unsupported algorithm for Eltwise node with name `" << getName() << "`.";
@@ -843,7 +858,8 @@ void MKLDNNEltwiseNode::init() {
 size_t MKLDNNEltwiseNode::getOpInputsNum() const {
     switch (getOpType()) {
         case Relu: case Gelu: case Elu: case Tanh: case Logistic: case Square: case Abs: case Sqrt: case PowerStatic:
-        case Linear: case BoundedRelu: case SoftRelu: case Relu6: case Exp: case Clamp: case Swish: case Hswish: case Mish: case Hsigmoid:
+        case Linear: case BoundedRelu: case SoftRelu: case Relu6: case Exp: case Clamp: case Swish: case Hswish:
+        case Mish: case Hsigmoid: case Round:
         case LogicalNot:
             return 1;
         case Add: case Subtract: case Multiply: case Divide: case FloorMod: case Mod: case Maximum: case Minimum: case SquaredDifference:
@@ -1469,7 +1485,8 @@ void MKLDNNEltwiseNode::executeReference(const std::vector<const uint8_t *>& src
 
             switch (getOpType()) {
                 case Relu: case Gelu: case Elu: case Tanh: case Logistic: case Square: case Abs: case Sqrt:
-                case Linear: case BoundedRelu: case SoftRelu: case Relu6: case Exp: case Clamp: case Swish: case Hswish: case Mish: case Hsigmoid:
+                case Linear: case BoundedRelu: case SoftRelu: case Relu6: case Exp: case Clamp: case Swish: case Hswish:
+                case Mish: case Hsigmoid: case Round:
                     *dst_ptr_f = ref_eltwise_injector->compute_scalar(src_f[0]); break;
                 case Add:               *dst_ptr_f = src_f[0] + src_f[1]; break;
                 case MulAdd:            *dst_ptr_f = src_f[0] * src_f[1] + src_f[2]; break;
@@ -1570,6 +1587,8 @@ void MKLDNNEltwiseNode::appendPostOps(mkldnn::post_ops& ops) {
         case mkldnn::eltwise_hswish:
         case mkldnn::eltwise_mish:
         case mkldnn::eltwise_hsigmoid:
+        case mkldnn::eltwise_round_half_to_even:
+        case mkldnn::eltwise_round_half_away_from_zero:
             ops.append_eltwise(1.0, getAlgorithm(), getAlpha(), getBeta());
             break;
         case mkldnn::depthwise_scale_shift:
index 9b003ca..1590d37 100644 (file)
@@ -59,7 +59,8 @@ enum EltwiseOpType {
     Prelu,
     Mish,
     Hswish,
-    Hsigmoid
+    Hsigmoid,
+    Round
 };
 
 struct jit_eltwise_params {
index de76d7d..799f2da 100644 (file)
@@ -2123,7 +2123,7 @@ bool MKLDNNInterpolateNode::canFuse(const MKLDNNNodePtr& node) const {
         if (eltwiseNode == nullptr)
             THROW_IE_EXCEPTION << "Cannot get eltwise node " << node->getName();
         return isOneOf(eltwiseNode->getOpType(), {MulAdd, Prelu, Relu, Gelu, Elu, Logistic, BoundedRelu, Clamp,
-                                                  Tanh, Swish, Hswish, Mish, Hsigmoid, Linear, Abs, Square, Sqrt});
+                                                  Tanh, Swish, Hswish, Mish, Hsigmoid, Round, Linear, Abs, Square, Sqrt});
     }
 
     return false;
index fc50b3b..c011eb3 100644 (file)
@@ -23,34 +23,36 @@ const std::vector<InferenceEngine::Precision> netPrecisions = {
 };
 
 const std::map<ActivationTypes, std::vector<std::vector<float>>> activationTypes = {
-        {Sigmoid,     {}},
-        {Tanh,        {}},
-        {Relu,        {}},
-        {Exp,         {}},
-        {Log,         {}},
-        {Sign,        {}},
-        {Abs,         {}},
-        {Clamp,       {{-2.0f, 2.0f}}},
-        {Negative,    {}},
-        {Acos,        {}},
-        {Asin,        {}},
-        {Atan,        {}},
-        {Cos,         {}},
-        {Cosh,        {}},
-        {Floor,       {}},
-        {Sin,         {}},
-        {Sinh,        {}},
-        {Sqrt,        {}},
-        {Tan,         {}},
-        {Elu,         {{0.1f}}},
-        {Erf,         {}},
-        {HardSigmoid, {{0.2f, 0.5f}}},
-        {Selu,        {{1.6732f, 1.0507f}}},
-        {Ceiling,     {}},
-        {Mish,        {}},
-        {HSwish,      {}},
-        {SoftPlus,    {}},
-        {HSigmoid,    {}}
+        {Sigmoid,               {}},
+        {Tanh,                  {}},
+        {Relu,                  {}},
+        {Exp,                   {}},
+        {Log,                   {}},
+        {Sign,                  {}},
+        {Abs,                   {}},
+        {Clamp,                 {{-2.0f, 2.0f}}},
+        {Negative,              {}},
+        {Acos,                  {}},
+        {Asin,                  {}},
+        {Atan,                  {}},
+        {Cos,                   {}},
+        {Cosh,                  {}},
+        {Floor,                 {}},
+        {Sin,                   {}},
+        {Sinh,                  {}},
+        {Sqrt,                  {}},
+        {Tan,                   {}},
+        {Elu,                   {{0.1f}}},
+        {Erf,                   {}},
+        {HardSigmoid,           {{0.2f, 0.5f}}},
+        {Selu,                  {{1.6732f, 1.0507f}}},
+        {Ceiling,               {}},
+        {Mish,                  {}},
+        {HSwish,                {}},
+        {SoftPlus,              {}},
+        {HSigmoid,              {}},
+        {RoundHalfToEven,       {}},
+        {RoundHalfAwayFromZero, {}}
 };
 
 const std::map<ActivationTypes, std::vector<std::vector<float>>> activationParamTypes = {
index d7d8ed4..5ef085d 160000 (submodule)
@@ -1 +1 @@
-Subproject commit d7d8ed46078b637794bc91215e1a982bb0f1683a
+Subproject commit 5ef085d5af65e8966e03cdfcbaa65761d61a5c9a
index b58e280..8621c09 100644 (file)
@@ -153,13 +153,11 @@ def test_round_even():
     assert list(node.get_output_shape(0)) == [3, 10]
     assert node.get_output_element_type(0) == Type.f32
 
-    # Excluded because this part needs mklddn implementation of Round operation
-    # Need to uncomment and check when 37651 will be done.
-    # input_tensor = np.array([-2.5, -1.5, -0.5, 0.5, 0.9, 1.5, 2.3, 2.5, 3.5], dtype=np.float32)
-    # expected = [-2.0, -2.0, 0.0, 0.0, 1.0, 2.0, 2.0, 2.0, 4.0]
+    input_tensor = np.array([-2.5, -1.5, -0.5, 0.5, 0.9, 1.5, 2.3, 2.5, 3.5], dtype=np.float32)
+    expected = [-2.0, -2.0, 0.0, 0.0, 1.0, 2.0, 2.0, 2.0, 4.0]
 
-    result = run_op_node([input_tensor], ng.round, "HALF_TO_EVEN")
-    assert np.allclose(result, expected)
+    result = run_op_node([input_tensor], ng.round, "HALF_TO_EVEN")
+    assert np.allclose(result, expected)
 
 
 def test_round_away():
@@ -172,13 +170,11 @@ def test_round_away():
     assert list(node.get_output_shape(0)) == [3, 10]
     assert node.get_output_element_type(0) == Type.f32
 
-    # Excluded because this part needs mklddn implementation of Round operation
-    # Need to uncomment and check when 37651 will be done.
-    # input_tensor = np.array([-2.5, -1.5, -0.5, 0.5, 0.9, 1.5, 2.3, 2.5, 3.5], dtype=np.float32)
-    # expected = [-3.0, -2.0, -1.0, 1.0, 1.0, 2.0, 2.0, 3.0, 4.0]
+    input_tensor = np.array([-2.5, -1.5, -0.5, 0.5, 0.9, 1.5, 2.3, 2.5, 3.5], dtype=np.float32)
+    expected = [-3.0, -2.0, -1.0, 1.0, 1.0, 2.0, 2.0, 3.0, 4.0]
 
-    result = run_op_node([input_tensor], ng.round, "HALF_AWAY_FROM_ZERO")
-    assert np.allclose(result, expected)
+    result = run_op_node([input_tensor], ng.round, "HALF_AWAY_FROM_ZERO")
+    assert np.allclose(result, expected)
 
 
 def test_hsigmoid():
index 214b078..565ca81 100644 (file)
@@ -338,7 +338,6 @@ tests_expected_to_fail = [
         "OnnxBackendNodeModelTest.test_clip_default_int8_max_cpu"),
     (xfail_issue_38091,
         "OnnxBackendNodeModelTest.test_gather_negative_indices_cpu",
-        "OnnxBackendNodeModelTest.test_round_cpu",
         "OnnxBackendNodeModelTest.test_mvn_cpu",
         "OnnxBackendNodeModelTest.test_elu_example_cpu"),
     (xfail_issue_35929,