[nnc] TFlite import: Implement FullyConnected (#1012)
authorDenis Maksimenko/AI Tools Lab /SRR/Assistant Engineer/삼성전자 <d.maksimenko@partner.samsung.com>
Wed, 15 Aug 2018 12:13:51 +0000 (15:13 +0300)
committerSergey Vostokov/AI Tools Lab /SRR/Staff Engineer/삼성전자 <s.vostokov@samsung.com>
Wed, 15 Aug 2018 12:13:51 +0000 (15:13 +0300)
TFlite importer can now handle FullyConnected op.

Signed-off-by: Denis Maksimenko <d.maksimenko@partner.samsung.com>
contrib/nnc/libs/frontend/tflite/include/tflite_op_creator.h
contrib/nnc/libs/frontend/tflite/src/tflite_ir_visitor.cpp
contrib/nnc/libs/frontend/tflite/src/tflite_op_creator.cpp

index 20dd975..c671a27 100644 (file)
@@ -56,6 +56,8 @@ public:
                                         const SoftmaxOptions *opts);
   std::vector<INode::Ref> createReshape(InputOps inputs, InputParams params,
                                         const ReshapeOptions *opts);
+  std::vector<INode::Ref> createFullyConnected(InputOps inputs, InputParams params,
+                                               const FullyConnectedOptions *opts);
 
 private:
   Graph *graph = nullptr;
index f260352..5da0232 100644 (file)
@@ -94,6 +94,9 @@ void IrVisitor::visit(const Operator *op)
   case BuiltinOperator_RESHAPE:
     outputs = opCreator->createReshape(inputs, params, op->builtin_options_as<ReshapeOptions>());
     break;
+  case BuiltinOperator_FULLY_CONNECTED:
+    outputs = opCreator->createFullyConnected(inputs, params, op->builtin_options_as<FullyConnectedOptions>());
+    break;
   case BuiltinOperator_SOFTMAX:
     outputs = opCreator->createSoftmax(inputs, params, op->builtin_options_as<SoftmaxOptions>());
     break;
@@ -163,6 +166,10 @@ std::vector<std::shared_ptr<IrTensor>> IrVisitor::createOpParams(const Operator
         // don't forget to change this if tensor shape processing architecture changes.
         paramsForOp.emplace_back(transposeTensor<1, 2, 3, 0>(tensor));
       }
+      else if (opcode == BuiltinOperator_FULLY_CONNECTED && t->shape()->size() == 2)
+      {
+        paramsForOp.emplace_back(transposeTensor<1, 0>(tensor));
+      }
       else
       {
         paramsForOp.push_back(tensor);
index 9e87d20..629aa9a 100644 (file)
@@ -3,6 +3,7 @@
 #include "nnc/core/IR/model/operations/concat_op.h"
 #include "nnc/core/IR/model/operations/conv_2d_op.h"
 #include "nnc/core/IR/model/operations/depthwise_conv2d_op.h"
+#include "nnc/core/IR/model/operations/fully_connected_op.h"
 #include "nnc/core/IR/model/operations/relu_op.h"
 #include "nnc/core/IR/model/operations/capped_relu_op.h"
 #include "nnc/core/IR/model/operations/softmax_op.h"
@@ -92,6 +93,18 @@ std::vector<INode::Ref> OpCreator::createReshape(InputOps inputs, InputParams pa
   return outputs;
 }
 
+std::vector<INode::Ref> OpCreator::createFullyConnected(InputOps &inputs, InputParams &params,
+                                                        const FullyConnectedOptions *opts)
+{
+  // Add Reshape operation to make sure the input for FC operation has shape [1, fcInputSize]
+  auto outputs = createOp<ops::ReshapeOp>(inputs, ActivationFunctionType_NONE);
+  uint32_t fcInputSize = params[0]->getShape().dim(0);
+  outputs[0]->getOperation()->setOutputShape(0, {1, fcInputSize});
+
+  auto fcOutputs = createOp<ops::FullyConnectedOp>(outputs, ActivationFunctionType_NONE, std::move(*params[0]));
+  return createOp<ops::BiasAddOp>(fcOutputs, opts->fused_activation_function(), std::move(*params[1]));
+}
+
 INode::Ref OpCreator::addFusedActivation(INode::Ref input, ActivationFunctionType activationType)
 {
   INode::Ref activation;