From: Vladimir Plazun/AI Tools Lab /SRR/Engineer/삼성전자 Date: Fri, 23 Nov 2018 18:36:58 +0000 (+0300) Subject: [nnc] Implement elementwise div operation (#2363) X-Git-Tag: nncc_backup~1260 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=332cc91cbf2ffb9a8849ea526e4f76d955bb1c67;p=platform%2Fcore%2Fml%2Fnnfw.git [nnc] Implement elementwise div operation (#2363) Added support of div elementwise: - in tflite importer - in interpreter - in soft backend using Eigen Signed-off-by: Vladimir Plazun --- diff --git a/contrib/nnc/include/core/modelIR/operations/ElementwiseOp.h b/contrib/nnc/include/core/modelIR/operations/ElementwiseOp.h index 3f3e638..19c0ae5 100644 --- a/contrib/nnc/include/core/modelIR/operations/ElementwiseOp.h +++ b/contrib/nnc/include/core/modelIR/operations/ElementwiseOp.h @@ -27,7 +27,10 @@ class ElementwiseOp : public Operation { public: enum class OpType { - prod = 0, sum = 1, max = 2 + mul, + add, + max, + div }; /** diff --git a/contrib/nnc/passes/acl_soft_backend/AclCppOpGenerator.cpp b/contrib/nnc/passes/acl_soft_backend/AclCppOpGenerator.cpp index 41da426..7b06637 100644 --- a/contrib/nnc/passes/acl_soft_backend/AclCppOpGenerator.cpp +++ b/contrib/nnc/passes/acl_soft_backend/AclCppOpGenerator.cpp @@ -369,11 +369,11 @@ void AclCppOpGenerator::visit(ops::ElementwiseOp& op) { // On the last iteration the result is saved in the node output. // Different ACL layers used to implement different types of elementwise operations. switch (op.getOpType()) { - case ops::ElementwiseOp::OpType::prod: + case ops::ElementwiseOp::OpType::mul: in1 = genMultiplication(out->name() + "_" + "multiplication", i - 1, op.getInputShape(i), in1, in2, i == prev_nodes.size() - 1 ? out : nullptr); break; - case ops::ElementwiseOp::OpType::sum: + case ops::ElementwiseOp::OpType::add: in1 = genAddition(out->name() + "_" + "addition", i - 1, op.getInputShape(i), in1, in2, i == prev_nodes.size() - 1 ? out : nullptr); break; diff --git a/contrib/nnc/passes/caffe_frontend/caffe_op_creator.cpp b/contrib/nnc/passes/caffe_frontend/caffe_op_creator.cpp index 145aaee..e968054 100644 --- a/contrib/nnc/passes/caffe_frontend/caffe_op_creator.cpp +++ b/contrib/nnc/passes/caffe_frontend/caffe_op_creator.cpp @@ -563,10 +563,10 @@ CaffeOpCreator::convertEltwise(const std::vector& inputs, ops::ElementwiseOp::OpType optype; switch (opts.operation()){ case EltwiseParameter_EltwiseOp_PROD: - optype = ops::ElementwiseOp::OpType::prod; + optype = ops::ElementwiseOp::OpType::mul; break; case EltwiseParameter_EltwiseOp_SUM: - optype = ops::ElementwiseOp::OpType::sum; + optype = ops::ElementwiseOp::OpType::add; // TODO TechDebt: When broadcast is implemented this should create Scale Ops before sum args for (float c: opts.coeff()) assert(c == 1.0f && "Coeff != 1 is not supported"); diff --git a/contrib/nnc/passes/interpreter/Interpreter.cpp b/contrib/nnc/passes/interpreter/Interpreter.cpp index fe89352..877f945 100644 --- a/contrib/nnc/passes/interpreter/Interpreter.cpp +++ b/contrib/nnc/passes/interpreter/Interpreter.cpp @@ -221,22 +221,26 @@ void NNInterpreter::visit(ops::ElementwiseOp& op) { auto operands = op.getPrevNodes(); std::vector> ins; for (auto &in : operands) { - ins.push_back(Tensor(var(in.op->getId())[in.index])); + ins.emplace_back(var(in.op->getId())[in.index]); } float (*func)(float,float); // Another dirty hack switch (op.getOpType()) { - case ops::ElementwiseOp::OpType::sum: + case ops::ElementwiseOp::OpType::add: func = [](float a, float b) { return a + b; }; break; - case ops::ElementwiseOp::OpType::prod: + case ops::ElementwiseOp::OpType::mul: func = [](float a, float b) { return a * b;}; break; case ops::ElementwiseOp::OpType::max: func = [](float a, float b) { return std::max(a,b);}; break; + case ops::ElementwiseOp::OpType::div: + func = [](float a, float b) { return a / b; }; + break; default: - assert(false && "Not supported Optype"); + assert(false && "Unsupported Optype"); } + var(op.getId()) = Fill(op.getOutputShape(0), [&func, &ins, &op](const Index &id) { float acc = ins[0].at(id); for (size_t i = 1; i < ins.size() ; i++) diff --git a/contrib/nnc/passes/soft_backend/ModelAnalyzer.cpp b/contrib/nnc/passes/soft_backend/ModelAnalyzer.cpp index 34a093c..9d93201 100644 --- a/contrib/nnc/passes/soft_backend/ModelAnalyzer.cpp +++ b/contrib/nnc/passes/soft_backend/ModelAnalyzer.cpp @@ -242,21 +242,24 @@ void ModelAnalyzer::visit(mir::ops::TanhOp& op) { } void ModelAnalyzer::visit(mir::ops::ElementwiseOp& op) { - const char *funcName = nullptr; + const char* func_name = nullptr; switch ( op.getOpType() ) { - case ops::ElementwiseOp::OpType::sum: - funcName = "ElementWise"; + case ops::ElementwiseOp::OpType::add: + func_name = "ElementWise"; break; - case ops::ElementwiseOp::OpType::prod: - funcName = "ElementWise"; + case ops::ElementwiseOp::OpType::mul: + func_name = "ElementWise"; break; case ops::ElementwiseOp::OpType::max: - funcName = "ElementWise"; + func_name = "ElementWise"; + break; + case ops::ElementwiseOp::OpType::div: + func_name = "ElementWise
"; break; default: assert(false && "unsupported elementwise operation type"); } - addOpDescr(&op, funcName); + addOpDescr(&op, func_name); } void ModelAnalyzer::visit(mir::ops::EluOp& op) { diff --git a/contrib/nnc/passes/soft_backend/code_snippets/cpp_elementwise.def b/contrib/nnc/passes/soft_backend/code_snippets/cpp_elementwise.def index 1b4a078..e72c900 100644 --- a/contrib/nnc/passes/soft_backend/code_snippets/cpp_elementwise.def +++ b/contrib/nnc/passes/soft_backend/code_snippets/cpp_elementwise.def @@ -108,3 +108,13 @@ struct Mul { } } }; + +//TODO maybe move to a separate file since everything else here is extracted from TF Lite +//23.11.2018 +struct Div { + static inline void Call(const float* input1_data, const float* input2_data, + float* output_data, Dims<4> dims) { + auto output = MapAsVector(output_data, dims); + output = output.cwiseQuotient(MapAsVector(input2_data, dims)); + } +}; diff --git a/contrib/nnc/passes/tflite_frontend/tflite_importer.cpp b/contrib/nnc/passes/tflite_frontend/tflite_importer.cpp index bf11b5e..28f1dc5 100644 --- a/contrib/nnc/passes/tflite_frontend/tflite_importer.cpp +++ b/contrib/nnc/passes/tflite_frontend/tflite_importer.cpp @@ -97,6 +97,7 @@ void TfliteImporter::processUnsupportedOp(const Operator* op) { case BuiltinOperator_ADD: case BuiltinOperator_MUL: case BuiltinOperator_MAXIMUM: + case BuiltinOperator_DIV: case BuiltinOperator_TRANSPOSE_CONV: case BuiltinOperator_TANH: case BuiltinOperator_RELU: @@ -197,6 +198,9 @@ void TfliteImporter::walkOperator(const Operator* op) { case BuiltinOperator_MUL: outputs = _opCreator->createMul(inputs, params, op->builtin_options_as()); break; + case BuiltinOperator_DIV: + outputs = _opCreator->createDiv(inputs, params, op->builtin_options_as()); + break; case BuiltinOperator_MAXIMUM: outputs = _opCreator->createMax(inputs, params, op->builtin_options_as()); break; diff --git a/contrib/nnc/passes/tflite_frontend/tflite_op_creator.cpp b/contrib/nnc/passes/tflite_frontend/tflite_op_creator.cpp index 3b1a673..e61378c 100644 --- a/contrib/nnc/passes/tflite_frontend/tflite_op_creator.cpp +++ b/contrib/nnc/passes/tflite_frontend/tflite_op_creator.cpp @@ -148,7 +148,7 @@ TFLiteOpCreator::createAdd(InputOps& inputs, InputParams&, const ::tflite::AddOp for (auto i : inputs) descriptors.push_back(i->getOutput(0)); return createOp(opts->fused_activation_function(), descriptors, - ops::ElementwiseOp::OpType::sum); + ops::ElementwiseOp::OpType::add); } std::vector @@ -157,7 +157,16 @@ TFLiteOpCreator::createMul(InputOps& inputs, InputParams&, const ::tflite::MulOp for (auto i : inputs) descriptors.push_back(i->getOutput(0)); return createOp(opts->fused_activation_function(), descriptors, - ops::ElementwiseOp::OpType::prod); + ops::ElementwiseOp::OpType::mul); +} + +std::vector +TFLiteOpCreator::createDiv(InputOps& inputs, InputParams&, const ::tflite::DivOptions* opts) { + std::vector descriptors; + for (auto i : inputs) + descriptors.push_back(i->getOutput(0)); + return createOp(opts->fused_activation_function(), descriptors, + ops::ElementwiseOp::OpType::div); } std::vector diff --git a/contrib/nnc/passes/tflite_frontend/tflite_op_creator.h b/contrib/nnc/passes/tflite_frontend/tflite_op_creator.h index 0077e9b..8574e15 100644 --- a/contrib/nnc/passes/tflite_frontend/tflite_op_creator.h +++ b/contrib/nnc/passes/tflite_frontend/tflite_op_creator.h @@ -78,10 +78,12 @@ public: std::vector createMul(InputOps&, InputParams&, const ::tflite::MulOptions*); /** @brief Elementwise maximum */ std::vector createMax(InputOps&, InputParams&, const ::tflite::MaximumMinimumOptions*); + /** @brief Elementwise division */ + std::vector createDiv(InputOps&, InputParams&, const ::tflite::DivOptions*); /// @brief Free-standing ( non-fused ) activation function based on tflite activation - std::vector createActivation(InputOps&, InputParams&, const ::tflite::ActivationFunctionType); - + std::vector createActivation(InputOps&, InputParams&, + const ::tflite::ActivationFunctionType); /** * @brief Creates a Transposed convolution * @param params 0 - output shape (unused), 1 - kernel, 2- input diff --git a/contrib/nnc/unittests/soft_backend/CPPOperations.cpp b/contrib/nnc/unittests/soft_backend/CPPOperations.cpp index 5ca14a6..05c50ff 100644 --- a/contrib/nnc/unittests/soft_backend/CPPOperations.cpp +++ b/contrib/nnc/unittests/soft_backend/CPPOperations.cpp @@ -398,7 +398,7 @@ TEST(cpp_operations_test, add2) { fillTensors(input_n_tensors[0], input_a_tensors[0], shape_data, 1.0f); fillTensors(input_n_tensors[1], input_a_tensors[1], shape_data, 2.0f); auto op_generator = [](mir::Graph& g, const std::vector& inputs) { - return g.create("y", inputs, mir::ops::ElementwiseOp::OpType::sum); + return g.create("y", inputs, mir::ops::ElementwiseOp::OpType::add); }; createAndRunTestGraph(op_generator, ElementWise, input_n_tensors, input_a_tensors[0], @@ -417,7 +417,7 @@ TEST(cpp_operations_test, mul3) { fillTensors(input_n_tensors[1], input_a_tensors[1], shape_data, 2.0f); fillTensors(input_n_tensors[2], input_a_tensors[2], shape_data, 3.0f); auto opGenerator = [](mir::Graph& g, const std::vector& inputs) { - return g.create("y", inputs, mir::ops::ElementwiseOp::OpType::prod); + return g.create("y", inputs, mir::ops::ElementwiseOp::OpType::mul); }; createAndRunTestGraph(opGenerator, ElementWise, input_n_tensors, input_a_tensors[0],