From: Dmitry Mozolev/AI Tools Lab /SRR/Engineer/삼성전자 Date: Tue, 31 Jul 2018 16:09:23 +0000 (+0300) Subject: Move tensor transpose operation to nnc linalg (#853) X-Git-Tag: nncc_backup~2286 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=49da8ae105fbfd257e5bed65deb77582f6cda79e;p=platform%2Fcore%2Fml%2Fnnfw.git Move tensor transpose operation to nnc linalg (#853) Moved tensor transpose operation that operates on the TensorVariant from frontend-common module to nnc core linalg. Signed-off-by: Dmitry Mozolev --- diff --git a/contrib/nnc/libs/core/include/nnc/core/linalg/TensorUtil.h b/contrib/nnc/libs/core/include/nnc/core/linalg/TensorUtil.h new file mode 100644 index 0000000..82889f1 --- /dev/null +++ b/contrib/nnc/libs/core/include/nnc/core/linalg/TensorUtil.h @@ -0,0 +1,67 @@ +#ifndef _NNC_CORE_LINALG_TENSOR_UTIL_H_ +#define _NNC_CORE_LINALG_TENSOR_UTIL_H_ + +#include +#include + +#include "nnc/core/linalg/TensorVariant.h" +#include "nncc/core/ADT/tensor/Shape.h" +#include "nncc/core/ADT/tensor/Index.h" +#include "nncc/core/ADT/tensor/IndexRange.h" + +namespace nncc +{ +namespace contrib +{ +namespace core +{ +namespace data +{ +namespace util +{ + +using nncc::core::ADT::tensor::Shape; +using namespace nncc::contrib::core::ADT; +using namespace nncc::contrib::core::data; + +template +static std::shared_ptr +transposeTensor(std::shared_ptr tensor) +{ + using nncc::core::ADT::tensor::Index; + using nncc::core::ADT::tensor::IndexRange; + + const Shape &inShape = tensor->getShape(); + Shape targetShape{inShape.dim(Ints)...}; + + uint64_t size = num_elements(targetShape); + + std::shared_ptr convertedTensorData(new char[size * tensor->getElementSize()], + [](char *d) { delete[] d; }); + + auto convertedTensor = std::make_shared(targetShape, convertedTensorData, + tensor->getDataType(), + tensor->getElementSize()); + + // Swaps two elements in the initial and new tensors + // according to the correct tensor dimension order + auto swap = [&convertedTensor, &tensor](const Index &idx) { + Index targetIndex{idx.at(Ints)...}; + + // Copy element_size bytes which constitute one number + std::memcpy(convertedTensor->at(targetIndex), tensor->at(idx), + convertedTensor->getElementSize()); + }; + + IndexRange(tensor->getShape()).iterate() << swap; + + return convertedTensor; +} + +} // namespace util +} // namespace data +} // namespace core +} // namespace contrib +} // namespace nncc + +#endif // _NNC_CORE_LINALG_TENSOR_UTIL_H_ diff --git a/contrib/nnc/libs/frontend/caffe/src/caffe_model_visitor.cpp b/contrib/nnc/libs/frontend/caffe/src/caffe_model_visitor.cpp index 141c6d5..d59dada 100644 --- a/contrib/nnc/libs/frontend/caffe/src/caffe_model_visitor.cpp +++ b/contrib/nnc/libs/frontend/caffe/src/caffe_model_visitor.cpp @@ -3,6 +3,7 @@ #include "nncc/core/ADT/tensor/Shape.h" #include "nnc/core/IR/model/operations/variable_op.h" +#include "nnc/core/linalg/TensorUtil.h" #include "PluginException.h" #include "shape_helper.h" @@ -19,6 +20,7 @@ namespace caffe using VariableOp = nncc::contrib::core::IR::model::ops::VariableOp; using nncc::core::ADT::tensor::Shape; +using nncc::contrib::core::data::util::transposeTensor; void ModelVisitor::visit(const NetParameter& np) { @@ -204,7 +206,7 @@ std::vector> ModelVisitor::createOpParams(const LayerP if (lp.has_convolution_param() && blob.shape().dim_size() == 4) { - params.emplace_back(common::transposeTensor<2, 3, 1, 0>(tensor)); + params.emplace_back(transposeTensor<2, 3, 1, 0>(tensor)); } else { diff --git a/contrib/nnc/libs/frontend/include/shape_helper.h b/contrib/nnc/libs/frontend/include/shape_helper.h index a416967..edd5210 100644 --- a/contrib/nnc/libs/frontend/include/shape_helper.h +++ b/contrib/nnc/libs/frontend/include/shape_helper.h @@ -1,13 +1,7 @@ #ifndef NNCC_SHAPE_HELPER_H #define NNCC_SHAPE_HELPER_H -#include -#include - -#include "nnc/core/linalg/TensorVariant.h" #include "nncc/core/ADT/tensor/Shape.h" -#include "nncc/core/ADT/tensor/Index.h" -#include "nncc/core/ADT/tensor/IndexRange.h" namespace nncc { @@ -19,7 +13,6 @@ namespace common { using nncc::core::ADT::tensor::Shape; -using TensorVariant = nncc::contrib::core::ADT::TensorVariant; class ShapeHelper { @@ -45,41 +38,6 @@ Shape ShapeHelper::createShape(const Iterable &iter, std::size_t size) return sh; } -template -static std::shared_ptr -transposeTensor(std::shared_ptr tensor) -{ - using nncc::core::ADT::tensor::Index; - using nncc::core::ADT::tensor::IndexRange; - - const Shape &inShape = tensor->getShape(); - Shape targetShape{inShape.dim(Ints)...}; - - uint64_t size = num_elements(targetShape); - - std::shared_ptr convertedTensorData(new char[size * tensor->getElementSize()], - [](char *d) { delete[] d; }); - - auto convertedTensor = std::make_shared(targetShape, convertedTensorData, - tensor->getDataType(), - tensor->getElementSize()); - - - // Swaps two elements in the initial and new tensors - // according to the correct tensor dimension order - auto swap = [&convertedTensor, &tensor](const Index &idx) { - Index targetIndex{idx.at(Ints)...}; - - // Copy element_size bytes which constitute one number - std::memcpy(convertedTensor->at(targetIndex), tensor->at(idx), - convertedTensor->getElementSize()); - }; - - IndexRange(tensor->getShape()).iterate() << swap; - - return convertedTensor; -} - } // namespace common } // namespace frontend } // namespace contrib diff --git a/contrib/nnc/libs/frontend/tflite/src/tflite_ir_visitor.cpp b/contrib/nnc/libs/frontend/tflite/src/tflite_ir_visitor.cpp index 4d52d66..e652a50 100644 --- a/contrib/nnc/libs/frontend/tflite/src/tflite_ir_visitor.cpp +++ b/contrib/nnc/libs/frontend/tflite/src/tflite_ir_visitor.cpp @@ -6,6 +6,7 @@ #include "nncc/core/ADT/tensor/Shape.h" #include "nncc/core/ADT/tensor/Index.h" #include "nncc/core/ADT/tensor/IndexRange.h" +#include "nnc/core/linalg/TensorUtil.h" #include "nnc/core/IR/model/operations/variable_op.h" #include "shape_helper.h" @@ -25,6 +26,7 @@ using nncc::core::ADT::tensor::Index; using nncc::core::ADT::tensor::IndexRange; using VariableOp = nncc::contrib::core::IR::model::ops::VariableOp; using nncc::core::ADT::tensor::Shape; +using nncc::contrib::core::data::util::transposeTensor; IrVisitor::IrVisitor() { @@ -158,7 +160,7 @@ std::vector> IrVisitor::createOpParams(const Operator // This is needed because TFLite convolution weights are stored as NHWC, and we use HWCN. // TODO: Currently this is only used by the interpreter and shape inference, // don't forget to change this if tensor shape processing architecture changes. - paramsForOp.emplace_back(common::transposeTensor<1, 2, 3, 0>(tensor)); + paramsForOp.emplace_back(transposeTensor<1, 2, 3, 0>(tensor)); } else {