From: Сергей Баранников/AI Tools Lab /SRR/Engineer/삼성전자 Date: Tue, 15 Jan 2019 20:00:52 +0000 (+0300) Subject: [nnc] Adapt ONNX importer to modified ScaleOp interface (#2860) X-Git-Tag: nncc_backup~958 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=fac1298af670bf4dc201c77372625679b2145b06;p=platform%2Fcore%2Fml%2Fnnfw.git [nnc] Adapt ONNX importer to modified ScaleOp interface (#2860) Take into account modified interface of ScaleOp in ONNX importer. Signed-off-by: Sergei Barannikov --- diff --git a/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp b/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp index a76ea1e..04031af 100644 --- a/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp +++ b/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp @@ -332,7 +332,8 @@ ONNXOpCreator::convertBatchNorm(const std::vector& inputs, Tensor var_accessor(var_tensor); for (auto& idx: ShapeRange(scale_tensor.getShape())) multiplier.at(idx) /= std::sqrt(var_accessor.at(idx) + epsilon); - result = createOp(result->getOutput(0), scale_tensor); + auto scale = createOp(scale_tensor)->getOutput(0); + result = createOp(result->getOutput(0), scale); // overall_res = res2 + bias auto bias = createOp(bias_tensor)->getOutput(0); @@ -358,9 +359,11 @@ ONNXOpCreator::convertScale(const std::vector& inputs, bool found; float value; std::tie(found, value) = getFloatAttribute(onnx_node, "scale"); - float scale = found ? value : 1.0; + float scale_val = found ? value : 1.0; const auto& shape = inputs[0].op->getOutputShape(inputs[0].index); - auto result = createOp(inputs[0], createTensor(scale, shape)); + auto scale_tensor = createTensor(scale_val, shape); + auto scale = createOp(scale_tensor)->getOutput(0); + auto result = createOp(inputs[0], scale); return {result->getOutput(0)}; } @@ -385,9 +388,9 @@ ONNXOpCreator::convertGemm(const std::vector& inputs, std::tie (found, ivalue) = getIntAttribute(onnx_node, "broadcast"); bool broadcast = found ? ivalue : 0; std::tie (found, fvalue) = getFloatAttribute(onnx_node, "alpha"); - float alpha = found ? fvalue : 1.0; + float alpha_val = found ? fvalue : 1.0; std::tie (found, fvalue) = getFloatAttribute(onnx_node, "beta"); - float beta = found ? fvalue : 1.0; + float beta_val = found ? fvalue : 1.0; // 1. Prepare input matrix A // Flatten the shape by dim(0) @@ -396,9 +399,11 @@ ONNXOpCreator::convertGemm(const std::vector& inputs, auto input_a = createOp(inputs[0], shape0); if (trans_a) input_a = createOp(input_a->getOutput(0), std::vector{1, 0}); - if (alpha != 1.0) - input_a = createOp(input_a->getOutput(0), - createTensor(alpha, input_a->getOutputShape(0))); + if (alpha_val != 1.0) { + auto alpha_tensor = createTensor(alpha_val, input_a->getOutputShape(0)); + auto alpha = createOp(alpha_tensor)->getOutput(0); + input_a = createOp(input_a->getOutput(0), alpha); + } // 2. Prepare input matrix B // @@ -414,13 +419,13 @@ ONNXOpCreator::convertGemm(const std::vector& inputs, // 3. Prepare input matrix C // auto input_c = inputs[2]; - auto beta_tensor = createTensor(beta, input_c.op->getOutputShape(0)); + auto beta_tensor = createTensor(beta_val, input_c.op->getOutputShape(0)); // TODO: check 'broadcast' attribute here if ((mult_a_b.rank() == 2) && (input_c.op->getOutputShape(0).rank() == 1)) { beta_tensor = TensorVariant(beta_tensor, mult_a_b); } - auto constant = createOp(beta_tensor)->getOutput(0); - std::vector descriptors = {constant, input_c}; + auto beta = createOp(beta_tensor)->getOutput(0); + std::vector descriptors = {beta, input_c}; auto c_mult = createOp(descriptors, ops::ElementwiseOp::OpType::mul); assert(c_mult->getOutputShape(0) == mult_a_b); auto result = createOp(input_a->getOutput(0), input_b, c_mult->getOutput(0));