return eltwiseNode &&
(eltwiseNode->getOpType() == Relu ||
(conv->getCnnLayer()->precision == Precision::FP32 &&
- IsOneOf(eltwiseNode->getOpType(), {Elu, Logistic, BoundedRelu, Clamp, Swish, Hswish, Mish, Hsigmoid})));
+ IsOneOf(eltwiseNode->getOpType(), {Elu, Logistic, BoundedRelu, Clamp, Swish, Hswish, Mish, Hsigmoid,
+ Round})));
};
for (int i = 0; i < graphNodes.size(); i++) {
if (eltwiseNode == nullptr)
THROW_IE_EXCEPTION << "Cannot get Eltwise node " << childNode->getName();
- if (IsOneOf(eltwiseNode->getOpType(), {Relu, Gelu, Elu, Logistic, BoundedRelu, Clamp, Swish, Hswish, Mish, Hsigmoid})) {
+ if (IsOneOf(eltwiseNode->getOpType(), {Relu, Gelu, Elu, Logistic, BoundedRelu, Clamp, Swish, Hswish, Mish,
+ Hsigmoid, Round})) {
return true;
} else if (IsOneOf(eltwiseNode->getOpType(), {MulAdd, Prelu})) {
if (eltwiseNode->getOpType() == MulAdd && eltwiseNode->getCnnLayer()->blobs.size() != 2)
return ((eltwiseNode->getOpType() == MulAdd && node->getCnnLayer()->blobs.size() == 2) ||
(eltwiseNode->getOpType() == Prelu) ||
- IsOneOf(eltwiseNode->getOpType(), {Relu, Elu, Logistic, BoundedRelu, Clamp, Swish, Hswish, Mish, Hsigmoid}));
+ IsOneOf(eltwiseNode->getOpType(), {Relu, Elu, Logistic, BoundedRelu, Clamp, Swish, Hswish, Mish,
+ Hsigmoid, Round}));
}
return false;
return eltwiseNode &&
(eltwiseNode->getOpType() == Relu ||
(conv->getCnnLayer()->precision == Precision::FP32 &&
- IsOneOf(eltwiseNode->getOpType(), {Elu, Logistic, BoundedRelu, Clamp, Swish, Hswish, Mish, Hsigmoid})));
+ IsOneOf(eltwiseNode->getOpType(), {Elu, Logistic, BoundedRelu, Clamp, Swish, Hswish, Mish, Hsigmoid,
+ Round})));
};
for (auto &graphNode : graphNodes) {
if (eltwiseNode == nullptr)
THROW_IE_EXCEPTION << "Cannot get Eltwise node " << node->getName();
return IsOneOf(eltwiseNode->getOpType(), {Relu, Gelu, Elu, Logistic, BoundedRelu, Clamp, Tanh, Swish,
- Hswish, Mish, Hsigmoid, Linear, Abs, Square, Sqrt}) ||
+ Hswish, Mish, Hsigmoid, Round, Linear, Abs, Square, Sqrt}) ||
((eltwiseNode->getOpType() == MulAdd && eltwiseNode->getCnnLayer()->blobs.size() == 2) ||
(eltwiseNode->getOpType() == Prelu));
}
{ "HSwish", Eltwise },
{ "Mish", Eltwise },
{ "HSigmoid", Eltwise },
+ { "Round", Eltwise },
{ "ScaleShift", Eltwise },
{ "PReLU", Eltwise },
{ "Norm", Lrn },
auto& eltwiseNode = dynamic_cast<const MKLDNNEltwiseNode&>(node);
switch (eltwiseNode.getOpType()) {
case Relu: case Gelu: case Elu: case Tanh: case Logistic: case Square: case Abs: case Sqrt:
- case Linear: case BoundedRelu: case SoftRelu: case Relu6: case Exp: case Clamp: case Swish: case Hswish: case Mish: case Hsigmoid:
+ case Linear: case BoundedRelu: case SoftRelu: case Relu6: case Exp: case Clamp: case Swish: case Hswish:
+ case Mish: case Hsigmoid: case Round:
return jit_mkldnn_emitter::get_supported_precisions();
case Add: return jit_add_emitter::get_supported_precisions();
case MulAdd: return jit_mul_add_emitter::get_supported_precisions();
auto& eltwiseNode = dynamic_cast<const MKLDNNEltwiseNode&>(node);
switch (eltwiseNode.getOpType()) {
case Relu: case Gelu: case Elu: case Tanh: case Logistic: case Square: case Abs: case Sqrt:
- case Linear: case BoundedRelu: case SoftRelu: case Relu6: case Exp: case Clamp: case Swish: case Hswish: case Mish: case Hsigmoid:
+ case Linear: case BoundedRelu: case SoftRelu: case Relu6: case Exp: case Clamp: case Swish: case Hswish:
+ case Mish: case Hsigmoid: case Round:
return std::make_shared<jit_mkldnn_emitter>(this, isa, eltwiseNode, exec_prec);
case Add: return std::make_shared<jit_add_emitter>(this, isa, eltwiseNode, exec_prec);
case MulAdd: return std::make_shared<jit_mul_add_emitter>(this, isa, eltwiseNode, exec_prec);
opType = Hsigmoid;
algorithm = mkldnn::eltwise_hsigmoid;
}},
+ {"round", [](GenericLayer* activationLayer, EltwiseOpType& opType, mkldnn::algorithm& algorithm, float& alpha, float& beta) {
+ alpha = 0.0f;
+ beta = 0.0f;
+ opType = Round;
+ std::string mode = activationLayer->GetParamAsString("mode", "half_to_even");
+ if (mode == "half_to_even")
+ algorithm = mkldnn::eltwise_round_half_to_even;
+ else if (mode == "half_away_from_zero")
+ algorithm = mkldnn::eltwise_round_half_away_from_zero;
+ else
+ THROW_IE_EXCEPTION << "Round layer with name " << activationLayer->name << " doesn't support mode " << mode;
+ }},
};
void MKLDNNEltwiseNode::init() {
comparator(layerType, "swish") ||
comparator(layerType, "hswish") ||
comparator(layerType, "mish") ||
- comparator(layerType, "hsigmoid")) {
+ comparator(layerType, "hsigmoid") ||
+ comparator(layerType, "round")) {
initializers[layerType](getCnnLayer().get(), eltwiseOp, eltwiseAlgorithm, alpha, beta);
} else {
THROW_IE_EXCEPTION << "Unsupported algorithm for Eltwise node with name `" << getName() << "`.";
size_t MKLDNNEltwiseNode::getOpInputsNum() const {
switch (getOpType()) {
case Relu: case Gelu: case Elu: case Tanh: case Logistic: case Square: case Abs: case Sqrt: case PowerStatic:
- case Linear: case BoundedRelu: case SoftRelu: case Relu6: case Exp: case Clamp: case Swish: case Hswish: case Mish: case Hsigmoid:
+ case Linear: case BoundedRelu: case SoftRelu: case Relu6: case Exp: case Clamp: case Swish: case Hswish:
+ case Mish: case Hsigmoid: case Round:
case LogicalNot:
return 1;
case Add: case Subtract: case Multiply: case Divide: case FloorMod: case Mod: case Maximum: case Minimum: case SquaredDifference:
switch (getOpType()) {
case Relu: case Gelu: case Elu: case Tanh: case Logistic: case Square: case Abs: case Sqrt:
- case Linear: case BoundedRelu: case SoftRelu: case Relu6: case Exp: case Clamp: case Swish: case Hswish: case Mish: case Hsigmoid:
+ case Linear: case BoundedRelu: case SoftRelu: case Relu6: case Exp: case Clamp: case Swish: case Hswish:
+ case Mish: case Hsigmoid: case Round:
*dst_ptr_f = ref_eltwise_injector->compute_scalar(src_f[0]); break;
case Add: *dst_ptr_f = src_f[0] + src_f[1]; break;
case MulAdd: *dst_ptr_f = src_f[0] * src_f[1] + src_f[2]; break;
case mkldnn::eltwise_hswish:
case mkldnn::eltwise_mish:
case mkldnn::eltwise_hsigmoid:
+ case mkldnn::eltwise_round_half_to_even:
+ case mkldnn::eltwise_round_half_away_from_zero:
ops.append_eltwise(1.0, getAlgorithm(), getAlpha(), getBeta());
break;
case mkldnn::depthwise_scale_shift:
Prelu,
Mish,
Hswish,
- Hsigmoid
+ Hsigmoid,
+ Round
};
struct jit_eltwise_params {
if (eltwiseNode == nullptr)
THROW_IE_EXCEPTION << "Cannot get eltwise node " << node->getName();
return isOneOf(eltwiseNode->getOpType(), {MulAdd, Prelu, Relu, Gelu, Elu, Logistic, BoundedRelu, Clamp,
- Tanh, Swish, Hswish, Mish, Hsigmoid, Linear, Abs, Square, Sqrt});
+ Tanh, Swish, Hswish, Mish, Hsigmoid, Round, Linear, Abs, Square, Sqrt});
}
return false;
};
const std::map<ActivationTypes, std::vector<std::vector<float>>> activationTypes = {
- {Sigmoid, {}},
- {Tanh, {}},
- {Relu, {}},
- {Exp, {}},
- {Log, {}},
- {Sign, {}},
- {Abs, {}},
- {Clamp, {{-2.0f, 2.0f}}},
- {Negative, {}},
- {Acos, {}},
- {Asin, {}},
- {Atan, {}},
- {Cos, {}},
- {Cosh, {}},
- {Floor, {}},
- {Sin, {}},
- {Sinh, {}},
- {Sqrt, {}},
- {Tan, {}},
- {Elu, {{0.1f}}},
- {Erf, {}},
- {HardSigmoid, {{0.2f, 0.5f}}},
- {Selu, {{1.6732f, 1.0507f}}},
- {Ceiling, {}},
- {Mish, {}},
- {HSwish, {}},
- {SoftPlus, {}},
- {HSigmoid, {}}
+ {Sigmoid, {}},
+ {Tanh, {}},
+ {Relu, {}},
+ {Exp, {}},
+ {Log, {}},
+ {Sign, {}},
+ {Abs, {}},
+ {Clamp, {{-2.0f, 2.0f}}},
+ {Negative, {}},
+ {Acos, {}},
+ {Asin, {}},
+ {Atan, {}},
+ {Cos, {}},
+ {Cosh, {}},
+ {Floor, {}},
+ {Sin, {}},
+ {Sinh, {}},
+ {Sqrt, {}},
+ {Tan, {}},
+ {Elu, {{0.1f}}},
+ {Erf, {}},
+ {HardSigmoid, {{0.2f, 0.5f}}},
+ {Selu, {{1.6732f, 1.0507f}}},
+ {Ceiling, {}},
+ {Mish, {}},
+ {HSwish, {}},
+ {SoftPlus, {}},
+ {HSigmoid, {}},
+ {RoundHalfToEven, {}},
+ {RoundHalfAwayFromZero, {}}
};
const std::map<ActivationTypes, std::vector<std::vector<float>>> activationParamTypes = {
-Subproject commit d7d8ed46078b637794bc91215e1a982bb0f1683a
+Subproject commit 5ef085d5af65e8966e03cdfcbaa65761d61a5c9a
assert list(node.get_output_shape(0)) == [3, 10]
assert node.get_output_element_type(0) == Type.f32
- # Excluded because this part needs mklddn implementation of Round operation
- # Need to uncomment and check when 37651 will be done.
- # input_tensor = np.array([-2.5, -1.5, -0.5, 0.5, 0.9, 1.5, 2.3, 2.5, 3.5], dtype=np.float32)
- # expected = [-2.0, -2.0, 0.0, 0.0, 1.0, 2.0, 2.0, 2.0, 4.0]
+ input_tensor = np.array([-2.5, -1.5, -0.5, 0.5, 0.9, 1.5, 2.3, 2.5, 3.5], dtype=np.float32)
+ expected = [-2.0, -2.0, 0.0, 0.0, 1.0, 2.0, 2.0, 2.0, 4.0]
- # result = run_op_node([input_tensor], ng.round, "HALF_TO_EVEN")
- # assert np.allclose(result, expected)
+ result = run_op_node([input_tensor], ng.round, "HALF_TO_EVEN")
+ assert np.allclose(result, expected)
def test_round_away():
assert list(node.get_output_shape(0)) == [3, 10]
assert node.get_output_element_type(0) == Type.f32
- # Excluded because this part needs mklddn implementation of Round operation
- # Need to uncomment and check when 37651 will be done.
- # input_tensor = np.array([-2.5, -1.5, -0.5, 0.5, 0.9, 1.5, 2.3, 2.5, 3.5], dtype=np.float32)
- # expected = [-3.0, -2.0, -1.0, 1.0, 1.0, 2.0, 2.0, 3.0, 4.0]
+ input_tensor = np.array([-2.5, -1.5, -0.5, 0.5, 0.9, 1.5, 2.3, 2.5, 3.5], dtype=np.float32)
+ expected = [-3.0, -2.0, -1.0, 1.0, 1.0, 2.0, 2.0, 3.0, 4.0]
- # result = run_op_node([input_tensor], ng.round, "HALF_AWAY_FROM_ZERO")
- # assert np.allclose(result, expected)
+ result = run_op_node([input_tensor], ng.round, "HALF_AWAY_FROM_ZERO")
+ assert np.allclose(result, expected)
def test_hsigmoid():
"OnnxBackendNodeModelTest.test_clip_default_int8_max_cpu"),
(xfail_issue_38091,
"OnnxBackendNodeModelTest.test_gather_negative_indices_cpu",
- "OnnxBackendNodeModelTest.test_round_cpu",
"OnnxBackendNodeModelTest.test_mvn_cpu",
"OnnxBackendNodeModelTest.test_elu_example_cpu"),
(xfail_issue_35929,