Move tensor transpose operation to nnc linalg (#853)
authorDmitry Mozolev/AI Tools Lab /SRR/Engineer/삼성전자 <d.mozolev@samsung.com>
Tue, 31 Jul 2018 16:09:23 +0000 (19:09 +0300)
committerSergey Vostokov/AI Tools Lab /SRR/Staff Engineer/삼성전자 <s.vostokov@samsung.com>
Tue, 31 Jul 2018 16:09:23 +0000 (01:09 +0900)
Moved tensor transpose operation that operates on the TensorVariant
from frontend-common module to nnc core linalg.

Signed-off-by: Dmitry Mozolev <d.mozolev@samsung.com>
contrib/nnc/libs/core/include/nnc/core/linalg/TensorUtil.h [new file with mode: 0644]
contrib/nnc/libs/frontend/caffe/src/caffe_model_visitor.cpp
contrib/nnc/libs/frontend/include/shape_helper.h
contrib/nnc/libs/frontend/tflite/src/tflite_ir_visitor.cpp

diff --git a/contrib/nnc/libs/core/include/nnc/core/linalg/TensorUtil.h b/contrib/nnc/libs/core/include/nnc/core/linalg/TensorUtil.h
new file mode 100644 (file)
index 0000000..82889f1
--- /dev/null
@@ -0,0 +1,67 @@
+#ifndef _NNC_CORE_LINALG_TENSOR_UTIL_H_
+#define _NNC_CORE_LINALG_TENSOR_UTIL_H_
+
+#include <cstring>
+#include <memory>
+
+#include "nnc/core/linalg/TensorVariant.h"
+#include "nncc/core/ADT/tensor/Shape.h"
+#include "nncc/core/ADT/tensor/Index.h"
+#include "nncc/core/ADT/tensor/IndexRange.h"
+
+namespace nncc
+{
+namespace contrib
+{
+namespace core
+{
+namespace data
+{
+namespace util
+{
+
+using nncc::core::ADT::tensor::Shape;
+using namespace nncc::contrib::core::ADT;
+using namespace nncc::contrib::core::data;
+
+template<unsigned int... Ints>
+static std::shared_ptr <TensorVariant>
+transposeTensor(std::shared_ptr <TensorVariant> tensor)
+{
+  using nncc::core::ADT::tensor::Index;
+  using nncc::core::ADT::tensor::IndexRange;
+
+  const Shape &inShape = tensor->getShape();
+  Shape targetShape{inShape.dim(Ints)...};
+
+  uint64_t size = num_elements(targetShape);
+
+  std::shared_ptr<char> convertedTensorData(new char[size * tensor->getElementSize()],
+                                            [](char *d) { delete[] d; });
+
+  auto convertedTensor = std::make_shared<TensorVariant>(targetShape, convertedTensorData,
+                                                         tensor->getDataType(),
+                                                         tensor->getElementSize());
+  
+  // Swaps two elements in the initial and new tensors
+  // according to the correct tensor dimension order
+  auto swap = [&convertedTensor, &tensor](const Index &idx) {
+    Index targetIndex{idx.at(Ints)...};
+
+    // Copy element_size bytes which constitute one number
+    std::memcpy(convertedTensor->at(targetIndex), tensor->at(idx),
+                convertedTensor->getElementSize());
+  };
+
+  IndexRange(tensor->getShape()).iterate() << swap;
+
+  return convertedTensor;
+}
+
+} // namespace util
+} // namespace data
+} // namespace core
+} // namespace contrib
+} // namespace nncc
+
+#endif // _NNC_CORE_LINALG_TENSOR_UTIL_H_
index 141c6d5..d59dada 100644 (file)
@@ -3,6 +3,7 @@
 
 #include "nncc/core/ADT/tensor/Shape.h"
 #include "nnc/core/IR/model/operations/variable_op.h"
+#include "nnc/core/linalg/TensorUtil.h"
 #include "PluginException.h"
 
 #include "shape_helper.h"
@@ -19,6 +20,7 @@ namespace caffe
 
 using VariableOp = nncc::contrib::core::IR::model::ops::VariableOp;
 using nncc::core::ADT::tensor::Shape;
+using nncc::contrib::core::data::util::transposeTensor;
 
 void ModelVisitor::visit(const NetParameter& np)
 {
@@ -204,7 +206,7 @@ std::vector<std::shared_ptr<IrTensor>> ModelVisitor::createOpParams(const LayerP
 
     if (lp.has_convolution_param() && blob.shape().dim_size() == 4)
     {
-      params.emplace_back(common::transposeTensor<2, 3, 1, 0>(tensor));
+      params.emplace_back(transposeTensor<2, 3, 1, 0>(tensor));
     }
     else
     {
index a416967..edd5210 100644 (file)
@@ -1,13 +1,7 @@
 #ifndef NNCC_SHAPE_HELPER_H
 #define NNCC_SHAPE_HELPER_H
 
-#include <memory>
-#include <cstring>
-
-#include "nnc/core/linalg/TensorVariant.h"
 #include "nncc/core/ADT/tensor/Shape.h"
-#include "nncc/core/ADT/tensor/Index.h"
-#include "nncc/core/ADT/tensor/IndexRange.h"
 
 namespace nncc
 {
@@ -19,7 +13,6 @@ namespace common
 {
 
 using nncc::core::ADT::tensor::Shape;
-using TensorVariant = nncc::contrib::core::ADT::TensorVariant;
 
 class ShapeHelper
 {
@@ -45,41 +38,6 @@ Shape ShapeHelper::createShape(const Iterable &iter, std::size_t size)
   return sh;
 }
 
-template <unsigned int... Ints>
-static std::shared_ptr<TensorVariant>
-transposeTensor(std::shared_ptr<TensorVariant> tensor)
-{
-  using nncc::core::ADT::tensor::Index;
-  using nncc::core::ADT::tensor::IndexRange;
-
-  const Shape &inShape = tensor->getShape();
-  Shape targetShape{inShape.dim(Ints)...};
-
-  uint64_t size = num_elements(targetShape);
-
-  std::shared_ptr<char> convertedTensorData(new char[size * tensor->getElementSize()],
-                                            [](char *d) { delete[] d; });
-
-  auto convertedTensor = std::make_shared<TensorVariant>(targetShape, convertedTensorData,
-                                                    tensor->getDataType(),
-                                                    tensor->getElementSize());
-
-
-  // Swaps two elements in the initial and new tensors
-  // according to the correct tensor dimension order
-  auto swap = [&convertedTensor, &tensor](const Index &idx) {
-    Index targetIndex{idx.at(Ints)...};
-
-    // Copy element_size bytes which constitute one number
-    std::memcpy(convertedTensor->at(targetIndex), tensor->at(idx),
-                convertedTensor->getElementSize());
-  };
-
-  IndexRange(tensor->getShape()).iterate() << swap;
-
-  return convertedTensor;
-}
-
 } // namespace common
 } // namespace frontend
 } // namespace contrib
index 4d52d66..e652a50 100644 (file)
@@ -6,6 +6,7 @@
 #include "nncc/core/ADT/tensor/Shape.h"
 #include "nncc/core/ADT/tensor/Index.h"
 #include "nncc/core/ADT/tensor/IndexRange.h"
+#include "nnc/core/linalg/TensorUtil.h"
 #include "nnc/core/IR/model/operations/variable_op.h"
 
 #include "shape_helper.h"
@@ -25,6 +26,7 @@ using nncc::core::ADT::tensor::Index;
 using nncc::core::ADT::tensor::IndexRange;
 using VariableOp = nncc::contrib::core::IR::model::ops::VariableOp;
 using nncc::core::ADT::tensor::Shape;
+using nncc::contrib::core::data::util::transposeTensor;
 
 IrVisitor::IrVisitor()
 {
@@ -158,7 +160,7 @@ std::vector<std::shared_ptr<IrTensor>> IrVisitor::createOpParams(const Operator
         // This is needed because TFLite convolution weights are stored as NHWC, and we use HWCN.
         // TODO: Currently this is only used by the interpreter and shape inference,
         // don't forget to change this if tensor shape processing architecture changes.
-        paramsForOp.emplace_back(common::transposeTensor<1, 2, 3, 0>(tensor));
+        paramsForOp.emplace_back(transposeTensor<1, 2, 3, 0>(tensor));
       }
       else
       {