[nnc] Remove transposes of inputs and outputs in importers (#2837)
authorСергей Баранников/AI Tools Lab /SRR/Engineer/삼성전자 <s.barannikov@samsung.com>
Fri, 11 Jan 2019 19:58:47 +0000 (22:58 +0300)
committerEfimov Alexander/AI Tools Lab/./Samsung Electronics <a.efimov@samsung.com>
Fri, 11 Jan 2019 19:58:47 +0000 (22:58 +0300)
Remove transposes of input and outputs tensors in Caffe and Caffe2 importers

Signed-off-by: Sergei Barannikov <s.barannikov@samsung.com>
contrib/nnc/passes/caffe2_frontend/caffe2_importer.cpp
contrib/nnc/passes/caffe2_frontend/caffe2_op_creator.cpp
contrib/nnc/passes/caffe2_frontend/caffe2_op_creator.h
contrib/nnc/passes/caffe_frontend/caffe_op_creator.cpp

index a6c543b..2aaccdc 100644 (file)
@@ -27,7 +27,6 @@
 #include "caffe2_op_creator.h"
 
 #include "core/modelIR/Shape.h"
-#include "core/modelIR/operations/VariableOp.h"
 #include "pass/PassException.h"
 
 #include "caffe2_proto_helper.h"
@@ -35,7 +34,6 @@
 namespace nnc {
 
 using namespace ::caffe2;
-using VariableOp = nnc::mir::ops::VariableOp;
 using nnc::mir::Shape;
 
 Caffe2Importer::Caffe2Importer(std::string predictNet, std::string initNet,
index ee92ff9..eb54491 100644 (file)
@@ -492,15 +492,11 @@ Caffe2OpCreator::convertReshape(const std::vector<mir::IODescriptor>& inputs,
 }
 
 std::vector<IODescriptor>
-Caffe2OpCreator::createInput(const std::string& input_name, const mir::Shape& input_shape) {
+Caffe2OpCreator::createInput(const std::string& name, const mir::Shape& shape) {
   // TODO For now we only support convolutional networks with one element per batch.
-  assert(input_shape.rank() == 4 && input_shape.dim(0) == 1);
-
-  // TODO Do not transpose data on input and remove transpose.
-  auto transposed_shape = mir::Shape{input_shape.dim(0), input_shape.dim(2),
-                                     input_shape.dim(3), input_shape.dim(1)};
-  auto variable = _graph->create<ops::VariableOp>(input_name, transposed_shape);
-  return {convertMIRToCaffe(variable->getOutput(0))};
+  assert(shape.rank() == 4 && shape.dim(0) == 1);
+  auto variable = _graph->create<ops::VariableOp>(name, shape);
+  return {variable->getOutput(0)};
 }
 
 } // namespace nnc
index ec2bbc5..5c6fe9f 100644 (file)
@@ -72,7 +72,7 @@ public:
                                                        const ::caffe2::OperatorDef&,
                                                        const MIRTensors&);
 
-  std::vector<mir::IODescriptor> createInput(const std::string&, const mir::Shape&);
+  std::vector<mir::IODescriptor> createInput(const std::string& name, const mir::Shape& shape);
 
   std::vector<mir::IODescriptor> convertMaxPool(const std::vector<mir::IODescriptor>&,
                                                 const ::caffe2::OperatorDef&);
index f9fcd09..9fe6aa1 100644 (file)
@@ -170,14 +170,8 @@ CaffeOpCreator::convertInput(const LayerParameter& layer) {
     const auto& blob_name = layer.top(i);
     const auto& blob_shape = params.shape(num_shapes == 1 ? 0 : i);
     Shape shape = ShapeHelper::createShape(blob_shape.dim(), blob_shape.dim_size());
-
-    // TODO For now we only support convolutional networks with one element per batch.
-    assert(shape.rank() == 4 && shape.dim(0) == 1);
-
-    // TODO Do not transpose data on input and remove transpose.
-    shape = Shape{shape.dim(0), shape.dim(2), shape.dim(3), shape.dim(1)};
     auto variable = createOp<ops::VariableOp>(blob_name, shape);
-    descriptors.push_back(convertMIRToCaffe(variable->getOutput(0)));
+    descriptors.push_back(variable->getOutput(0));
   }
 
   return descriptors;
@@ -505,10 +499,6 @@ CaffeOpCreator::convertScale(const caffe::LayerParameter& layer,
     result = createOp<ops::BiasAddOp>(layer.name() + ".bias", result->getOutput(0), bias_weights);
   }
 
-  // FIXME Workaround until the tests for style transfer network are regenerated.
-  if (layer.top(0) == "output")
-    return {result->getOutput(0)};
-
   return {convertMIRToCaffe(result->getOutput(0))};
 }