From fac1298af670bf4dc201c77372625679b2145b06 Mon Sep 17 00:00:00 2001 From: =?utf8?q?=D0=A1=D0=B5=D1=80=D0=B3=D0=B5=D0=B9=20=D0=91=D0=B0=D1=80?= =?utf8?q?=D0=B0=D0=BD=D0=BD=D0=B8=D0=BA=D0=BE=D0=B2/AI=20Tools=20Lab=20/S?= =?utf8?q?RR/Engineer/=EC=82=BC=EC=84=B1=EC=A0=84=EC=9E=90?= Date: Tue, 15 Jan 2019 23:00:52 +0300 Subject: [PATCH] [nnc] Adapt ONNX importer to modified ScaleOp interface (#2860) Take into account modified interface of ScaleOp in ONNX importer. Signed-off-by: Sergei Barannikov --- contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp | 27 +++++++++++++--------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp b/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp index a76ea1e..04031af 100644 --- a/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp +++ b/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp @@ -332,7 +332,8 @@ ONNXOpCreator::convertBatchNorm(const std::vector& inputs, Tensor var_accessor(var_tensor); for (auto& idx: ShapeRange(scale_tensor.getShape())) multiplier.at(idx) /= std::sqrt(var_accessor.at(idx) + epsilon); - result = createOp(result->getOutput(0), scale_tensor); + auto scale = createOp(scale_tensor)->getOutput(0); + result = createOp(result->getOutput(0), scale); // overall_res = res2 + bias auto bias = createOp(bias_tensor)->getOutput(0); @@ -358,9 +359,11 @@ ONNXOpCreator::convertScale(const std::vector& inputs, bool found; float value; std::tie(found, value) = getFloatAttribute(onnx_node, "scale"); - float scale = found ? value : 1.0; + float scale_val = found ? value : 1.0; const auto& shape = inputs[0].op->getOutputShape(inputs[0].index); - auto result = createOp(inputs[0], createTensor(scale, shape)); + auto scale_tensor = createTensor(scale_val, shape); + auto scale = createOp(scale_tensor)->getOutput(0); + auto result = createOp(inputs[0], scale); return {result->getOutput(0)}; } @@ -385,9 +388,9 @@ ONNXOpCreator::convertGemm(const std::vector& inputs, std::tie (found, ivalue) = getIntAttribute(onnx_node, "broadcast"); bool broadcast = found ? ivalue : 0; std::tie (found, fvalue) = getFloatAttribute(onnx_node, "alpha"); - float alpha = found ? fvalue : 1.0; + float alpha_val = found ? fvalue : 1.0; std::tie (found, fvalue) = getFloatAttribute(onnx_node, "beta"); - float beta = found ? fvalue : 1.0; + float beta_val = found ? fvalue : 1.0; // 1. Prepare input matrix A // Flatten the shape by dim(0) @@ -396,9 +399,11 @@ ONNXOpCreator::convertGemm(const std::vector& inputs, auto input_a = createOp(inputs[0], shape0); if (trans_a) input_a = createOp(input_a->getOutput(0), std::vector{1, 0}); - if (alpha != 1.0) - input_a = createOp(input_a->getOutput(0), - createTensor(alpha, input_a->getOutputShape(0))); + if (alpha_val != 1.0) { + auto alpha_tensor = createTensor(alpha_val, input_a->getOutputShape(0)); + auto alpha = createOp(alpha_tensor)->getOutput(0); + input_a = createOp(input_a->getOutput(0), alpha); + } // 2. Prepare input matrix B // @@ -414,13 +419,13 @@ ONNXOpCreator::convertGemm(const std::vector& inputs, // 3. Prepare input matrix C // auto input_c = inputs[2]; - auto beta_tensor = createTensor(beta, input_c.op->getOutputShape(0)); + auto beta_tensor = createTensor(beta_val, input_c.op->getOutputShape(0)); // TODO: check 'broadcast' attribute here if ((mult_a_b.rank() == 2) && (input_c.op->getOutputShape(0).rank() == 1)) { beta_tensor = TensorVariant(beta_tensor, mult_a_b); } - auto constant = createOp(beta_tensor)->getOutput(0); - std::vector descriptors = {constant, input_c}; + auto beta = createOp(beta_tensor)->getOutput(0); + std::vector descriptors = {beta, input_c}; auto c_mult = createOp(descriptors, ops::ElementwiseOp::OpType::mul); assert(c_mult->getOutputShape(0) == mult_a_b); auto result = createOp(input_a->getOutput(0), input_b, c_mult->getOutput(0)); -- 2.7.4