[nnc] Remove the debugTranspose option (#2813)
authorСергей Баранников/AI Tools Lab /SRR/Engineer/삼성전자 <s.barannikov@samsung.com>
Fri, 11 Jan 2019 17:28:51 +0000 (20:28 +0300)
committerEfimov Alexander/AI Tools Lab/./Samsung Electronics <a.efimov@samsung.com>
Fri, 11 Jan 2019 17:28:51 +0000 (20:28 +0300)
Remove the `debugTranspose` option which is no longer needed.

Signed-off-by: Sergei Barannikov <s.barannikov@samsung.com>
contrib/nnc/driver/Options.cpp
contrib/nnc/include/option/Options.h
contrib/nnc/passes/acl_soft_backend/AclCppOpGenerator.cpp
contrib/nnc/passes/caffe_frontend/caffe_importer.cpp
contrib/nnc/passes/caffe_frontend/caffe_op_creator.cpp
contrib/nnc/passes/caffe_frontend/caffe_op_creator.h

index 889ca81..1621d74 100644 (file)
@@ -169,13 +169,6 @@ Option<std::string> interInputData(optname("--input-model-data"),
                                    optional(true),
                                    optvalues(""),
                                    checkInFile);
-/**
- * Miscellaneous options.
- */
-Option<bool> debugTranspose(optname("--debug-transpose"),
-                           overview("insert transpose operations for debugging purposes"),
-                           false,
-                           optional(true));
 
 } // namespace cli
 } // namespace nnc
index 670afc8..1e25eb2 100644 (file)
@@ -59,11 +59,6 @@ extern Option<std::string> artifactName;  // name of artifact
  */
 extern Option<std::string> interInputData;  // input data for model
 
-/**
- * Miscellaneous options.
- */
-extern Option<bool> debugTranspose;
-
 } // namespace cli
 } // namespace nnc
 
index f2119e9..9a3a774 100644 (file)
@@ -103,24 +103,15 @@ const ArtifactModule& AclCppOpGenerator::generate(mir::Graph* g) {
 }
 
 void AclCppOpGenerator::visit(ops::ConcatOp& op) {
+  static const char* axis_names[] = {"arm_compute::DataLayoutDimension::BATCHES",
+                                     "arm_compute::DataLayoutDimension::CHANNEL",
+                                     "arm_compute::DataLayoutDimension::HEIGHT",
+                                     "arm_compute::DataLayoutDimension::WIDTH"};
 
   int axis = op.getAxis();
-  assert(axis < 4 && axis >= 0 && "axis outside this range is not supported in ACL");
-
-  const char* axis_name;
-  if (cli::debugTranspose) {
-    static const char* axis_names[] = {"arm_compute::DataLayoutDimension::BATCHES",
-                                       "arm_compute::DataLayoutDimension::CHANNEL",
-                                       "arm_compute::DataLayoutDimension::HEIGHT",
-                                       "arm_compute::DataLayoutDimension::WIDTH"};
-    axis_name = axis_names[axis];
-  } else {
-    static const char* axis_names[] = {"arm_compute::DataLayoutDimension::BATCHES",
-                                       "arm_compute::DataLayoutDimension::HEIGHT",
-                                       "arm_compute::DataLayoutDimension::WIDTH",
-                                       "arm_compute::DataLayoutDimension::CHANNEL"};
-    axis_name = axis_names[axis];
-  }
+  assert(axis >= 0 && axis < sizeof(axis_names) / sizeof(axis_names[0]) &&
+         "axis outside this range is not supported in ACL");
+  const char* axis_name = axis_names[axis];
 
   auto out = genTensor(op, op.getOutputShape(0));
   auto prefix = out->name() + "_concatenate_layer";
@@ -255,17 +246,8 @@ shared_ptr<ArtifactId>
 AclCppOpGenerator::genTransposeMIRtoACL(const string& name,
                                         const Shape& input_shape,
                                         const shared_ptr<ArtifactId>& input) {
-
-  if (!cli::debugTranspose) {
-    // Generate output tensor description in the DOM.
-    shared_ptr<ArtifactId> output = AF::id(name);
-
-    _constrBlock->var("arm_compute::CLTensor&", output->name(), {}, {input});
-    return output;
-  }
   Shape transposed_shape = transposeShape<0, 3, 1, 2>(input_shape);
-  shared_ptr<ArtifactId> transposed_id =
-      genTensor(name, transposed_shape, false);
+  shared_ptr<ArtifactId> transposed_id = genTensor(name, transposed_shape, false);
   const bool allocate_at_inference = true;
   genTranspose(input, transposed_id, {0, 3, 1, 2}, allocate_at_inference);
   return transposed_id;
@@ -275,18 +257,8 @@ shared_ptr<ArtifactId>
 AclCppOpGenerator::genTransposeACLtoMIR(const string& name,
                                         const Shape& input_shape,
                                         const shared_ptr<ArtifactId>& input) {
-
-  if (!cli::debugTranspose) {
-    // Generate output tensor description in the DOM.
-    shared_ptr<ArtifactId> output = AF::id(name);
-
-    _constrBlock->var("arm_compute::CLTensor&", output->name(), {}, {input});
-    return output;
-  }
   Shape transposed_shape = transposeShape<0, 2, 3, 1>(input_shape);
-  shared_ptr<ArtifactId> transposed_id =
-      genTensor(name, transposed_shape, false);
-
+  shared_ptr<ArtifactId> transposed_id = genTensor(name, transposed_shape, false);
   const bool allocate_at_inference = false;
   genTranspose(input, transposed_id, {0, 2, 3, 1}, allocate_at_inference);
   return transposed_id;
@@ -355,10 +327,8 @@ void AclCppOpGenerator::visit(ops::PoolOp& op) {
   shared_ptr<ArtifactId> output =
       genTransposeACLtoMIR(output_tensor_name, transposed_output_shape, transposed_output);
 
-  if (cli::debugTranspose) {
-    genTensorDeallocation(_infBlock, transposed_input);
-    genTensorDeallocation(_infBlock, transposed_output);
-  }
+  genTensorDeallocation(_infBlock, transposed_input);
+  genTensorDeallocation(_infBlock, transposed_output);
 
   if (op.getNextNodes().empty())
     _outputs.insert(&op);
@@ -474,10 +444,8 @@ void AclCppOpGenerator::visit(ops::BiasAddOp& op) {
     shared_ptr<ArtifactId> output =
         genTransposeACLtoMIR(output_tensor_name, transposed_output_shape, transposed_output);
 
-    if (cli::debugTranspose) {
-      genTensorDeallocation(_infBlock, transposed_input);
-      genTensorDeallocation(_infBlock, transposed_output);
-    }
+    genTensorDeallocation(_infBlock, transposed_input);
+    genTensorDeallocation(_infBlock, transposed_output);
   }
 
   if (op.getNextNodes().empty())
@@ -486,14 +454,7 @@ void AclCppOpGenerator::visit(ops::BiasAddOp& op) {
 
 void AclCppOpGenerator::visit(ops::VariableOp& op) {
   shared_ptr<ArtifactId> tensor;
-  if (cli::debugTranspose) {
-    tensor = genTensor(op, op.getOutputShape(0));
-  } else {
-    if (op.getOutputShape(0).rank() == 4)
-      tensor = genTensor(op, transposeShape<0, 3, 1, 2>(op.getOutputShape(0)));
-    else
-      tensor = genTensor(op, op.getOutputShape(0));
-  }
+  tensor = genTensor(op, op.getOutputShape(0));
   addToPersistentTensors(tensor);
 }
 
@@ -627,10 +588,8 @@ void AclCppOpGenerator::visit(ops::ScaleOp& op) {
   shared_ptr<ArtifactId> output =
       genTransposeACLtoMIR(output_tensor_name, transposed_output_shape, transposed_output);
 
-  if (cli::debugTranspose) {
-    genTensorDeallocation(_infBlock, transposed_input);
-    genTensorDeallocation(_infBlock, transposed_output);
-  }
+  genTensorDeallocation(_infBlock, transposed_input);
+  genTensorDeallocation(_infBlock, transposed_output);
 
   if (op.getNextNodes().empty())
     _outputs.insert(&op);
@@ -773,10 +732,8 @@ void AclCppOpGenerator::genConvolution(Op& op, const string& acl_func_name, cons
   shared_ptr<ArtifactId> output =
       genTransposeACLtoMIR(output_tensor_name, transposed_output_shape, transposed_output);
 
-  if (cli::debugTranspose) {
-    genTensorDeallocation(_infBlock, transposed_input);
-    genTensorDeallocation(_infBlock, transposed_output);
-  }
+  genTensorDeallocation(_infBlock, transposed_input);
+  genTensorDeallocation(_infBlock, transposed_output);
 
   if (op.getNextNodes().empty())
     _outputs.insert(&op);
@@ -792,11 +749,7 @@ void AclCppOpGenerator::genActivation(mir::Operation& op, const std::string& act
   auto in = AF::id(tensorName(in_op));
 
   // Create the output tensor in the DOM and return its id.
-  shared_ptr<ArtifactId> output;
-  if (cli::debugTranspose)
-    output = genTensor(op, op.getOutputShape(0));
-  else
-    output = genTensor(op, transposeShape<0, 3, 1, 2>(op.getOutputShape(0)));
+  shared_ptr<ArtifactId> output = genTensor(op, op.getOutputShape(0));
 
   auto prefix = output->name() + "_activation_layer";
 
index 5258c0c..d285aba 100644 (file)
@@ -176,6 +176,7 @@ void CaffeImporter::collectUnsupportedOp(const LayerParameter& lp) {
     case CaffeOpType::split:
     case CaffeOpType::eltwise:
     case CaffeOpType::ELU:
+    case CaffeOpType::ReLU:
     case CaffeOpType::embed:
     case CaffeOpType::sigmoid:
     case CaffeOpType::tanh:
@@ -192,9 +193,6 @@ void CaffeImporter::collectUnsupportedOp(const LayerParameter& lp) {
     case CaffeOpType::reshape:
       _opCreator->checkReshape(lp.reshape_param(), _problemsOpSet);
       break;
-    case CaffeOpType::ReLU:
-      _opCreator->checkReLU(lp.relu_param(), _problemsOpSet);
-      break;
     case CaffeOpType::batchNorm:
       _opCreator->checkBatchNorm(lp, _problemsOpSet);
       break;
index 0effb9f..f9fcd09 100644 (file)
@@ -58,23 +58,15 @@ using namespace mir;
 using namespace ::caffe;
 
 mir::IODescriptor CaffeOpCreator::convertCaffeToMIR(const mir::IODescriptor& arg) {
-  if (cli::debugTranspose) {
-    // NCHW -> NHWC
-    auto transpose = createOp<ops::TransposeOp>("", arg, std::vector<std::size_t>{0, 2, 3, 1});
-    return transpose->getOutput(0);
-  } else {
-    return arg;
-  }
+  // NCHW -> NHWC
+  auto transpose = createOp<ops::TransposeOp>("", arg, std::vector<std::size_t>{0, 2, 3, 1});
+  return transpose->getOutput(0);
 }
 
 mir::IODescriptor CaffeOpCreator::convertMIRToCaffe(const mir::IODescriptor& arg) {
-  if (cli::debugTranspose) {
-    // NHWC -> NCHW
-    auto transpose = createOp<ops::TransposeOp>("", arg, std::vector<std::size_t>{0, 3, 1, 2});
-    return transpose->getOutput(0);
-  } else {
-    return arg;
-  }
+  // NHWC -> NCHW
+  auto transpose = createOp<ops::TransposeOp>("", arg, std::vector<std::size_t>{0, 3, 1, 2});
+  return transpose->getOutput(0);
 }
 
 mir::IODescriptor CaffeOpCreator::createAdd(mir::IODescriptor arg1, mir::IODescriptor arg2) {
@@ -341,16 +333,9 @@ CaffeOpCreator::convertInnerProduct(const LayerParameter& layer,
 std::vector<mir::IODescriptor>
 CaffeOpCreator::convertConcat(const caffe::LayerParameter& layer,
                               const std::vector<mir::IODescriptor>& inputs) {
-  auto& opts = layer.concat_param();
-  if (cli::debugTranspose) {
-    auto concat = createOp<ops::ConcatOp>(layer.name(), inputs, opts.axis());
-    return {concat->getOutput(0)};
-  } else {
-    assert(opts.axis() == 1);
-    int32_t axis = 3;
-    auto concat = createOp<ops::ConcatOp>(layer.name(), inputs, axis);
-    return {concat->getOutput(0)};
-  }
+  const auto& params = layer.concat_param();
+  auto concat = createOp<ops::ConcatOp>(layer.name(), inputs, params.axis());
+  return {concat->getOutput(0)};
 }
 
 static ops::PoolOp::PoolingType getPoolingType(const PoolingParameter& pool_param) {
@@ -442,33 +427,25 @@ CaffeOpCreator::convertPooling(const caffe::LayerParameter& layer,
 std::vector<mir::IODescriptor>
 CaffeOpCreator::convertSoftmax(const caffe::LayerParameter& layer,
                                const std::vector<mir::IODescriptor>& inputs) {
-  auto& opts = layer.softmax_param();
-
-  if (cli::debugTranspose) {
-    // CPP and ACL backends are able to perform Softmax only along the last axis.
-    if (inputs[0].op->getOutputShape(inputs[0].index).rank() == 4) {
-      // For now, we only account for the most common case.
-      if (opts.axis() != 1)
-        throw PassException("Softmax: unsupported axis");
-      int32_t axis = 3;
-      auto input = createOp<ops::TransposeOp>(layer.name() + ".trans1", inputs[0],
-                                              std::vector<std::size_t>{0, 2, 3, 1});
-      auto softmax = createOp<ops::SoftmaxOp>(layer.name(), input->getOutput(0), axis);
-      auto result = createOp<ops::TransposeOp>(layer.name() + ".trans2", softmax->getOutput(0),
-                                               std::vector<std::size_t>{0, 3, 1, 2});
-      return {result->getOutput(0)};
-    }
+  const auto& params = layer.softmax_param();
 
-    auto softmax = createOp<ops::SoftmaxOp>(layer.name(), inputs[0], opts.axis());
-    return {softmax->getOutput(0)};
-  } else {
-    auto& input = inputs[0];
-    auto& input_shape = input.op->getOutputShape(input.index);
-    if (opts.axis() != 1)
+  // CPP and ACL backends are able to perform Softmax only along the last axis.
+  // FIXME Do it in backends.
+  if (inputs[0].op->getOutputShape(inputs[0].index).rank() == 4) {
+    // For now, we only account for the most common case.
+    if (params.axis() != 1)
       throw PassException("Softmax: unsupported axis");
-    auto softmax = createOp<ops::SoftmaxOp>(layer.name(), inputs[0], -1);
-    return {softmax->getOutput(0)};
+    int32_t axis = 3;
+    auto input = createOp<ops::TransposeOp>(layer.name() + ".trans1", inputs[0],
+                                            std::vector<std::size_t>{0, 2, 3, 1});
+    auto softmax = createOp<ops::SoftmaxOp>(layer.name(), input->getOutput(0), axis);
+    auto result = createOp<ops::TransposeOp>(layer.name() + ".trans2", softmax->getOutput(0),
+                                             std::vector<std::size_t>{0, 3, 1, 2});
+    return {result->getOutput(0)};
   }
+
+  auto softmax = createOp<ops::SoftmaxOp>(layer.name(), inputs[0], params.axis());
+  return {softmax->getOutput(0)};
 }
 
 void CaffeOpCreator::checkReshape(const ReshapeParameter& opts,
@@ -501,10 +478,6 @@ CaffeOpCreator::convertReshape(const caffe::LayerParameter& layer,
   return {reshape->getOutput(0)};
 }
 
-void CaffeOpCreator::checkReLU(const ReLUParameter& opts,
-                               std::set<std::string>& problems_op_set) {
-}
-
 std::vector<mir::IODescriptor>
 CaffeOpCreator::convertReLU(const caffe::LayerParameter& layer,
                             const std::vector<mir::IODescriptor>& inputs) {
@@ -516,7 +489,6 @@ CaffeOpCreator::convertReLU(const caffe::LayerParameter& layer,
     relu = createOp<ops::ReluOp>(layer.name(), inputs[0]);
   }
 
-
   return {relu->getOutput(0)};
 }
 
@@ -533,6 +505,10 @@ CaffeOpCreator::convertScale(const caffe::LayerParameter& layer,
     result = createOp<ops::BiasAddOp>(layer.name() + ".bias", result->getOutput(0), bias_weights);
   }
 
+  // FIXME Workaround until the tests for style transfer network are regenerated.
+  if (layer.top(0) == "output")
+    return {result->getOutput(0)};
+
   return {convertMIRToCaffe(result->getOutput(0))};
 }
 
index 4f02a74..b18bfd1 100644 (file)
@@ -118,8 +118,6 @@ public:
 
   void checkReshape(const caffe::ReshapeParameter& opts, std::set<std::string>&);
 
-  void checkReLU(const caffe::ReLUParameter& opts, std::set<std::string>&);
-
   void checkBatchNorm(const caffe::LayerParameter& layer, std::set<std::string>&);
 
   void checkLSTM(const caffe::LayerParameter& layer, std::set<std::string>&);