[nnc] Rectify shared_ptr wrapper around TensorVariant (#2684)
authorСергей Баранников/AI Tools Lab /SRR/Engineer/삼성전자 <s.barannikov@samsung.com>
Fri, 14 Dec 2018 18:37:58 +0000 (21:37 +0300)
committerEfimov Alexander/AI Tools Lab/./Samsung Electronics <a.efimov@samsung.com>
Fri, 14 Dec 2018 18:37:58 +0000 (21:37 +0300)
- Get rid of shared_ptr wrapper around TensorVariant class.
- Add const qualifiers to auto reference declarations

Signed-off-by: Sergei Barannikov <s.barannikov@samsung.com>
20 files changed:
contrib/nnc/include/core/modelIR/TensorUtil.h
contrib/nnc/include/passes/caffe2_frontend/caffe2_importer.h
contrib/nnc/include/passes/common_frontend/op_creator_helper.h
contrib/nnc/include/passes/tflite_frontend/tflite_importer.h
contrib/nnc/passes/acl_soft_backend/AclCppOpGenerator.cpp
contrib/nnc/passes/caffe2_frontend/caffe2_importer.cpp
contrib/nnc/passes/caffe2_frontend/caffe2_op_creator.cpp
contrib/nnc/passes/caffe2_frontend/caffe2_op_creator.h
contrib/nnc/passes/caffe_frontend/caffe_op_creator.cpp
contrib/nnc/passes/caffe_frontend/caffe_op_creator.h
contrib/nnc/passes/common_frontend/op_creator_helper.cpp
contrib/nnc/passes/interpreter/ops/DeConv2D.cpp
contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.cpp
contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.h
contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp
contrib/nnc/passes/onnx_frontend/ONNXOpCreator.h
contrib/nnc/passes/soft_backend/SBSerializer.cpp
contrib/nnc/passes/tflite_frontend/tflite_importer.cpp
contrib/nnc/passes/tflite_frontend/tflite_op_creator.cpp
contrib/nnc/passes/tflite_frontend/tflite_op_creator.h

index 0685670..7a1a5fb 100644 (file)
 #include "core/modelIR/Index.h"
 #include "core/modelIR/ShapeRange.h"
 
-namespace nnc
-{
-namespace mir
-{
+namespace nnc {
+namespace mir {
 
-// TODO: This is potentialy unsafe. Consider how to improve the transposition concept.
 template<int32_t... Ints>
 Shape transposeShape(const Shape& shape) {
-  std::vector<int32_t> permutes{Ints...};
-
-  assert(permutes.size() == shape.rank());
-
-  Shape result(shape);
-    int32_t nof_permutes = std::min<int32_t>(shape.rank(), permutes.size());
-
-  for (int32_t i = 0; i < nof_permutes; ++i) {
-    if (permutes[i] < nof_permutes)
-      result.dim(i) = shape.dim(permutes[i]);
-  }
-
-  return result;
+  assert(sizeof...(Ints) == shape.rank());
+  return {shape.dim(Ints)...};
 }
 
 template<unsigned int... Ints>
-static std::shared_ptr <TensorVariant>
-transposeTensor(std::shared_ptr <const TensorVariant> tensor)
-{
-  const Shape &inShape = tensor->getShape();
-  Shape targetShape{inShape.dim(Ints)...};
-
-  auto size = targetShape.numElements();
+TensorVariant transposeTensor(const TensorVariant& tensor) {
+  const auto& shape = tensor.getShape();
+  Shape transposed_shape{shape.dim(Ints)...};
 
-  std::shared_ptr<char> convertedTensorData(new char[size * tensor->getElementSize()],
-                                            [](char *d) { delete[] d; });
+  auto elem_type = tensor.getDataType();
+  auto elem_size = tensor.getElementSize();
+  auto num_elems = shape.numElements();
 
-  auto convertedTensor = std::make_shared<TensorVariant>(targetShape, convertedTensorData,
-                                                         tensor->getDataType(),
-                                                         tensor->getElementSize());
-  
-  // Swaps two elements in the initial and new tensors
-  // according to the correct tensor dimension order
-  auto swap = [&convertedTensor, &tensor](const Index &idx) {
-    Index targetIndex{idx.at(Ints)...};
+  std::shared_ptr<char> data(new char[num_elems * elem_size], std::default_delete<char[]>());
+  TensorVariant transposed_tensor(transposed_shape, data, elem_type, elem_size);
 
-    // Copy element_size bytes which constitute one number
-    std::memcpy(convertedTensor->at(targetIndex), tensor->at(idx),
-                convertedTensor->getElementSize());
-  };
-
-  for (Index idx: ShapeRange(tensor->getShape()))
-    swap(idx);
+  for (const auto& index : ShapeRange(shape)) {
+    Index transposed_index{index.at(Ints)...};
+    std::memcpy(transposed_tensor.at(transposed_index), tensor.at(index), elem_size);
+  }
 
-  return convertedTensor;
+  return transposed_tensor;
 }
 
 } // namespace mir
index d6fd358..bcee736 100644 (file)
@@ -39,8 +39,6 @@ enum class SupportedCaffe2OpType : uint8_t;
 
 namespace nnc {
 
-using MIRTensor = nnc::mir::TensorVariant;
-
 class Caffe2Importer : public NNImporter, public Pass {
 public:
   explicit Caffe2Importer(std::string predictNet, std::string initNet,
@@ -79,7 +77,7 @@ private:
   std::map<std::string, mir::IODescriptor> _blobNameToIODescriptor;
   mir::Operation* _lastMIROp = nullptr;
 
-  std::map<std::string, std::shared_ptr<MIRTensor>> _MIRTensors;
+  std::map<std::string, mir::TensorVariant> _MIRTensors;
 
   /**
   * @brief Pass through caffe2 graph and collect ops unsupported by NNC
@@ -105,7 +103,7 @@ private:
   /**
   * @brief Creates MIR tensor from caffe2 givenTensorFill op
   */
-  std::shared_ptr<mir::TensorVariant> createTensor(const ::caffe2::OperatorDef&);
+  mir::TensorVariant createTensor(const ::caffe2::OperatorDef& op);
 
   /**
   * @brief Returns MIR ops, under given caffe2 op
index 11309b9..2c36e09 100644 (file)
@@ -36,8 +36,7 @@ namespace nnc {
  * @param foldedKernel original grouped kernel
  * @return unfolded kernel, compatible with ordinary conv2D operation
  */
-std::shared_ptr<mir::TensorVariant>
-fixGroupedKernel(int groups, std::shared_ptr<mir::TensorVariant> folded_kernel);
+mir::TensorVariant fixGroupedKernel(int groups, const mir::TensorVariant& folded_kernel);
 
 } // namespace nnc
 
index ee1a291..1d24f2f 100644 (file)
@@ -128,17 +128,15 @@ private:
   /**
   * @brief Prepare data for creating an MIR node/operation.
   */
-  std::vector<std::shared_ptr<mir::TensorVariant>> createOpParams(const ::tflite::Operator* op);
+  std::vector<mir::TensorVariant> createOpParams(const ::tflite::Operator* op);
 
   /**
   * @brief Return MIR ops, preceding given tflite operator
   */
   std::vector<mir::Operation*> getPrecedingMIROps(const ::tflite::Operator* op);
 
-  std::shared_ptr<mir::TensorVariant> createTensor(const ::tflite::Tensor* t,
-                                                   const ::tflite::Buffer* b);
-
-  std::shared_ptr<mir::TensorVariant> convertTensorForConv(std::shared_ptr<mir::TensorVariant>);
+  mir::TensorVariant createTensor(const ::tflite::Tensor* t,
+                                  const ::tflite::Buffer* b);
 };
 
 }  // namespace nnc
index ae0fa90..36dd622 100644 (file)
@@ -569,8 +569,8 @@ void AclCppOpGenerator::visit(ops::PadOp& op) {
 
 template <typename Op>
 void AclCppOpGenerator::genConvolution(Op& op, const string& acl_func_name, const string& suffix) {
-  auto ir_weights = transposeTensor<1, 0, 2, 3>(make_shared<TensorVariant>(op.getKernel()));
-  const Shape& ir_weights_shape = ir_weights->getShape();
+  auto ir_weights = transposeTensor<1, 0, 2, 3>(op.getKernel());
+  const auto& ir_weights_shape = ir_weights.getShape();
   assert(ir_weights_shape.rank() == 4);
   Shape ir_biases_shape({ir_weights_shape.dim(-1)});
 
@@ -609,7 +609,7 @@ void AclCppOpGenerator::genConvolution(Op& op, const string& acl_func_name, cons
   auto layer = genLayer(acl_func_name, operation_name, config_params);
   allocate(weights);
   // Save the IR weights tensor to later read this in the artifact.
-  serializeTensor(weights, *ir_weights);
+  serializeTensor(weights, ir_weights);
   allocate(out);
   runLayer(layer);
 }
index d5d8116..6f91b8b 100644 (file)
@@ -148,8 +148,7 @@ void Caffe2Importer::preloadAllTensors() {
     if ((opType == SupportedCaffe2OpType::givenTensorFill
          || opType == SupportedCaffe2OpType::constantFill)
         && hasArgument(op.arg(), "values")) {
-      _MIRTensors.insert(
-              std::pair<std::string, std::shared_ptr<MIRTensor>>(op.output(0), createTensor(op)));
+      _MIRTensors.insert(std::make_pair(op.output(0), createTensor(op)));
     }
   }
 }
@@ -223,28 +222,23 @@ void Caffe2Importer::createMIRNodesFromOp(const OperatorDef& op) {
   _lastMIROp = outputs.at(0).op;
 }
 
-std::shared_ptr<IrTensor> Caffe2Importer::createTensor(const OperatorDef& op) {
+mir::TensorVariant Caffe2Importer::createTensor(const OperatorDef& op) {
   assert(hasArgument(op.arg(), "shape") && hasArgument(op.arg(), "values"));
 
-  auto shape = findArgumentByName(op.arg(), "shape");
-  auto values = findArgumentByName(op.arg(), "values");
+  const auto& shape = findArgumentByName(op.arg(), "shape");
+  const auto& values = findArgumentByName(op.arg(), "values");
 
   // Create untyped tensor. Note, tensor contents will be *copied* here.
-  auto type = mir::DTYPE::FLOAT32;
-  size_t elementSize = sizeof(float);
-  size_t bufferSize = values.floats().size() * elementSize;
-  const char* srcData = reinterpret_cast<const char*>(values.floats().data());
-  std::shared_ptr<char> tensorBufferCopy(new char[bufferSize],
-                                         std::default_delete<char[]>());
-  char* dstData = tensorBufferCopy.get();
-  memcpy(dstData, srcData, bufferSize);
+  auto element_type = mir::DTYPE::FLOAT32;
+  size_t element_size = sizeof(float);
+  size_t data_size = values.floats().size() * element_size;
+  std::shared_ptr<char> data(new char[data_size], std::default_delete<char[]>());
+  memcpy(data.get(), values.floats().data(), data_size);
 
   Shape tensor_shape = ShapeHelper::createShape(
           shape.ints(), static_cast<size_t>(shape.ints().size()));
 
-  auto tensor = std::make_shared<IrTensor>(tensor_shape, tensorBufferCopy, type, elementSize);
-
-  return tensor;
+  return mir::TensorVariant(tensor_shape, data, element_type, element_size);
 }
 
 std::vector<mir::IODescriptor> Caffe2Importer::getInputMIROps(const OperatorDef& op) {
index 086a66e..c6ab0c1 100644 (file)
@@ -199,15 +199,15 @@ std::vector<mir::IODescriptor>
 Caffe2OpCreator::convertAdd(const std::vector<mir::IODescriptor>& inputs,
                             const ::caffe2::OperatorDef& op,
                             const MIRTensors& mir_tensors) {
-  auto& input_shape = inputs[0].op->getOutputShape(inputs[0].index);
-  auto& addend = mir_tensors.at(op.input(1));
+  const auto& input_shape = inputs[0].op->getOutputShape(inputs[0].index);
+  const auto& addend = mir_tensors.at(op.input(1));
 
-  assert(addend->getShape().rank() == 1 && "Only 1-rank addend is supported");
-  assert(addend->getShape().numElements() == input_shape.dim(1)
+  assert(addend.getShape().rank() == 1 && "Only 1-rank addend is supported");
+  assert(addend.getShape().numElements() == input_shape.dim(1)
          && "Only addend size equal to number of input channels is supported");
 
   // TODO: replace with elementwise op, when broadcating will be added in elementwise op
-  auto add = createOp<ops::BiasAddOp>(convertCaffeToMIR(inputs[0]), *addend);
+  auto add = createOp<ops::BiasAddOp>(convertCaffeToMIR(inputs[0]), addend);
   return {convertMIRToCaffe(add->getOutput(0))};
 }
 
@@ -242,28 +242,28 @@ std::vector<IODescriptor> Caffe2OpCreator::convertConv(const std::vector<IODescr
   std::tie(pad_before, pad_after) = getPadding(op);
 
   auto kernel_tensor = transposeTensor<2, 3, 1, 0>(mir_tensors.at(op.input(1)));
-  auto in_group_size = kernel_tensor->getShape().dim(2);
-  auto out_channels = kernel_tensor->getShape().dim(3);
+  auto in_group_size = kernel_tensor.getShape().dim(2);
+  auto out_channels = kernel_tensor.getShape().dim(3);
   int num_groups = getSingleArgument(op, "group", 1);
   bool is_depthwise = (num_groups != 1) && (in_group_size == 1) && (out_channels == num_groups);
 
   mir::Operation* conv2d;
   if (is_depthwise) {
     // TODO handle properly kernel with layer multiplier
-    std::shared_ptr<IrTensor> transposed_tensor = mir::transposeTensor<0, 1, 3, 2>(kernel_tensor);
-    conv2d = createOp<ops::DepthwiseConv2DOp>(convertCaffeToMIR(inputs[0]), *transposed_tensor,
+    auto transposed_tensor = mir::transposeTensor<0, 1, 3, 2>(kernel_tensor);
+    conv2d = createOp<ops::DepthwiseConv2DOp>(convertCaffeToMIR(inputs[0]), transposed_tensor,
                                               stride_shape, pad_before, pad_after);
   } else {
     // first we need to convert kernel of grouped convolution to appropriate ordinary kernel
     if (num_groups != 1)
       kernel_tensor = fixGroupedKernel(num_groups, kernel_tensor);
 
-    conv2d = createOp<ops::Conv2DOp>(convertCaffeToMIR(inputs[0]), *kernel_tensor,
+    conv2d = createOp<ops::Conv2DOp>(convertCaffeToMIR(inputs[0]), kernel_tensor,
                                      stride_shape, pad_before, pad_after);
   }
 
   if (op.input_size() > 2) {  // Bias is optional
-    auto bias_add = createOp<ops::BiasAddOp>(conv2d->getOutput(0), *mir_tensors.at(op.input(2)));
+    auto bias_add = createOp<ops::BiasAddOp>(conv2d->getOutput(0), mir_tensors.at(op.input(2)));
     return {convertMIRToCaffe(bias_add->getOutput(0))};
   }
   return {convertMIRToCaffe(conv2d->getOutput(0))};
@@ -291,16 +291,15 @@ std::vector<IODescriptor>
 Caffe2OpCreator::convertFullyConnected(const std::vector<IODescriptor>& inputs,
                                        const ::caffe2::OperatorDef& op,
                                        const MIRTensors& mir_tensors) {
-  auto weights_tensor = mir_tensors.at(op.input(1));
-  weights_tensor = transposeTensor<1, 0>(weights_tensor);
+  auto weights_tensor = transposeTensor<1, 0>(mir_tensors.at(op.input(1)));
 
   auto& input_shape = inputs[0].op->getOutputShape(inputs[0].index);
   // Transform input into 2-D tensor by flattening axes
   Shape shape{input_shape.dim(0), input_shape.numElements() / input_shape.dim(0)};
   auto reshape = createOp<ops::ReshapeOp>(inputs[0], shape);
-  auto fully_connected = createOp<ops::FullyConnectedOp>(reshape->getOutput(0), *weights_tensor);
+  auto fully_connected = createOp<ops::FullyConnectedOp>(reshape->getOutput(0), weights_tensor);
 
-  auto bias = createOp<ops::BiasAddOp>(fully_connected->getOutput(0), *mir_tensors.at(op.input(2)));
+  auto bias = createOp<ops::BiasAddOp>(fully_connected->getOutput(0), mir_tensors.at(op.input(2)));
   return {bias->getOutput(0)};
 }
 
@@ -328,15 +327,15 @@ std::vector<mir::IODescriptor>
 Caffe2OpCreator::convertMul(const std::vector<mir::IODescriptor>& inputs,
                             const ::caffe2::OperatorDef& op,
                             const MIRTensors& mir_tensors) {
-  auto& input_shape = inputs[0].op->getOutputShape(inputs[0].index);
-  auto& multiplier = mir_tensors.at(op.input(1));
+  const auto& input_shape = inputs[0].op->getOutputShape(inputs[0].index);
+  const auto& multiplier = mir_tensors.at(op.input(1));
 
-  assert(multiplier->getShape().rank() == 1 && "Only 1-rank multiplier is supported");
-  assert(multiplier->getShape().numElements() == input_shape.dim(1)
+  assert(multiplier.getShape().rank() == 1 && "Only 1-rank multiplier is supported");
+  assert(multiplier.getShape().numElements() == input_shape.dim(1)
          && "Only multiplier size equal to number of input channels is supported");
 
   // TODO: replace with elementwise op, when broadcating will be added in elementwise op
-  auto mul = createOp<ops::ScaleOp>(convertCaffeToMIR(inputs[0]), *multiplier);
+  auto mul = createOp<ops::ScaleOp>(convertCaffeToMIR(inputs[0]), multiplier);
   return {convertMIRToCaffe(mul->getOutput(0))};
 }
 
@@ -358,26 +357,26 @@ Caffe2OpCreator::convertSpatialBN(const std::vector<mir::IODescriptor>& inputs,
                                   const MIRTensors& mir_tensors) {
   // overall_res = (X - mean) / sqrt(var + epsilon) * scale + bias
 
-  auto& scale = mir_tensors.at(op.input(1));
-  auto& bias = mir_tensors.at(op.input(2));
-  auto& mean = mir_tensors.at(op.input(3));
-  auto& var = mir_tensors.at(op.input(4));
+  const auto& scale = mir_tensors.at(op.input(1));
+  const auto& bias = mir_tensors.at(op.input(2));
+  const auto& mean = mir_tensors.at(op.input(3));
+  const auto& var = mir_tensors.at(op.input(4));
   float eps = getSingleArgument(op, "epsilon", 1e-5f);
 
   // res1 = X - mean
-  Tensor<float> bias_data(*mean);
+  Tensor<float> bias_data(mean);
   for (auto& idx: ShapeRange(bias_data.getShape()))
     bias_data.at(idx) *= -1;
-  auto bias_add_1 = createOp<ops::BiasAddOp>(convertCaffeToMIR(inputs[0]), *mean);
+  auto bias_add_1 = createOp<ops::BiasAddOp>(convertCaffeToMIR(inputs[0]), mean);
 
   // res2 = res1 * scale / (var + epsilon)
-  Tensor<float> multiplier(*scale);
-  for (auto& idx: ShapeRange(scale->getShape()))
-    multiplier.at(idx) /= std::sqrt(*(float*) var->at(idx) + eps);
-  auto scale_op = createOp<ops::ScaleOp>(bias_add_1->getOutput(0), *scale);
+  Tensor<float> multiplier(scale);
+  for (auto& idx: ShapeRange(scale.getShape()))
+    multiplier.at(idx) /= std::sqrt(*(float*) var.at(idx) + eps);
+  auto scale_op = createOp<ops::ScaleOp>(bias_add_1->getOutput(0), scale);
 
   // overall_res = res2 + bias
-  auto bias_add_2 = createOp<ops::BiasAddOp>(scale_op->getOutput(0), *bias);
+  auto bias_add_2 = createOp<ops::BiasAddOp>(scale_op->getOutput(0), bias);
 
   return {convertMIRToCaffe(bias_add_2->getOutput(0))};
 }
index fdff757..cee363f 100644 (file)
@@ -34,9 +34,8 @@ namespace nnc {
 
 using nnc::mir::Graph;
 using nnc::mir::Operation;
-using IrTensor = nnc::mir::TensorVariant;
 using nnc::mir::Shape;
-using MIRTensors = const std::map<std::string, std::shared_ptr<mir::TensorVariant>>;
+using MIRTensors = const std::map<std::string, mir::TensorVariant>;
 
 class Caffe2OpCreator {
 public:
index 35e7208..1432e0a 100644 (file)
@@ -74,7 +74,7 @@ mir::IODescriptor CaffeOpCreator::convertMIRToCaffe(const mir::IODescriptor& arg
   }
 }
 
-std::shared_ptr<TensorVariant> CaffeOpCreator::convertBlob(const BlobProto& blob) {
+TensorVariant CaffeOpCreator::convertBlob(const BlobProto& blob) {
   size_t element_size;
   const char* src_data;
   size_t buffer_size;
@@ -96,7 +96,7 @@ std::shared_ptr<TensorVariant> CaffeOpCreator::convertBlob(const BlobProto& blob
                                          static_cast<size_t>(blob.shape().dim_size()));
   std::shared_ptr<char> data(new char[buffer_size], std::default_delete<char[]>());
   std::memcpy(data.get(), src_data, buffer_size);
-  return std::make_shared<TensorVariant>(shape, data, DTYPE::FLOAT32, element_size);
+  return TensorVariant(shape, data, DTYPE::FLOAT32, element_size);
 }
 
 std::vector<mir::IODescriptor>
@@ -194,8 +194,8 @@ CaffeOpCreator::convertConvolution(const caffe::LayerParameter& layer,
   kernel_weights = transposeTensor<2, 3, 1, 0>(kernel_weights);
 
   Operation* result;
-  auto in_group_size = kernel_weights->getShape().dim(2);
-  auto out_channels = kernel_weights->getShape().dim(3);
+  auto in_group_size = kernel_weights.getShape().dim(2);
+  auto out_channels = kernel_weights.getShape().dim(3);
   int32_t num_groups = params.group();
   bool is_depthwise = (num_groups != 1) && (in_group_size == 1) && (out_channels == num_groups);
   if (is_depthwise) {
@@ -203,20 +203,20 @@ CaffeOpCreator::convertConvolution(const caffe::LayerParameter& layer,
     // TODO handle properly kernel with layer multiplier
     auto transposed_tensor = transposeTensor<0, 1, 3, 2>(kernel_weights);
     result = createOp<ops::DepthwiseConv2DOp>(layer.name(), convertCaffeToMIR(inputs[0]),
-                                              *transposed_tensor, strides, padding, padding);
+                                              transposed_tensor, strides, padding, padding);
   } else {
     if (num_groups != 1) {
       // first we need to convert kernel of grouped convolution to appropriate ordinary kernel
       kernel_weights = fixGroupedKernel(params.group(), kernel_weights);
     }
-    result = createOp<ops::Conv2DOp>(layer.name(), convertCaffeToMIR(inputs[0]), *kernel_weights,
+    result = createOp<ops::Conv2DOp>(layer.name(), convertCaffeToMIR(inputs[0]), kernel_weights,
                                      strides, padding, padding);
   }
 
   // Add the bias, if any.
   if (params.bias_term()) {
     auto bias_weights = convertBlob(layer.blobs(1));
-    result = createOp<ops::BiasAddOp>(layer.name() + ".bias", result->getOutput(0), *bias_weights);
+    result = createOp<ops::BiasAddOp>(layer.name() + ".bias", result->getOutput(0), bias_weights);
   }
 
   return {convertMIRToCaffe(result->getOutput(0))};
@@ -239,13 +239,13 @@ CaffeOpCreator::convertDeconvolution(const caffe::LayerParameter& layer,
     kernel_weights = fixGroupedKernel(opts.group(), kernel_weights);
   }
   auto deconv2d = createOp<ops::DeConv2DOp>(layer.name(), convertCaffeToMIR(inputs[0]),
-                                            *kernel_weights, strides, padding);
+                                            kernel_weights, strides, padding);
 
   // bias_term is optional (so might not be present) and defaults to true
   if (!opts.has_bias_term() || opts.bias_term()) {
     auto bias_weights = convertBlob(layer.blobs(1));
     auto bias_add = createOp<ops::BiasAddOp>(layer.name() + ".bias", deconv2d->getOutput(0),
-                                             *bias_weights);
+                                             bias_weights);
     return {convertMIRToCaffe(bias_add->getOutput(0))};
   } else {
     return {convertMIRToCaffe(deconv2d->getOutput(0))};
@@ -280,12 +280,12 @@ CaffeOpCreator::convertInnerProduct(const LayerParameter& layer,
   assert(params.axis() == 1);
   Shape shape{input_shape.dim(0), input_shape.numElements() / input_shape.dim(0)};
   auto reshape = createOp<ops::ReshapeOp>(layer.name() + ".reshape", inputs[0], shape);
-  auto result = createOp<ops::FullyConnectedOp>(layer.name(), reshape->getOutput(0), *weights);
+  auto result = createOp<ops::FullyConnectedOp>(layer.name(), reshape->getOutput(0), weights);
 
   // Add the bias, if any.
   if (params.bias_term()) {
     auto bias_weights = convertBlob(layer.blobs(1));
-    result = createOp<ops::BiasAddOp>(layer.name() + ".bias", result->getOutput(0), *bias_weights);
+    result = createOp<ops::BiasAddOp>(layer.name() + ".bias", result->getOutput(0), bias_weights);
   }
 
   return {result->getOutput(0)};
@@ -472,12 +472,12 @@ CaffeOpCreator::convertScale(const caffe::LayerParameter& layer,
                              const std::vector<mir::IODescriptor>& inputs) {
   const auto& params = layer.scale_param();
   auto weights = convertBlob(layer.blobs(0));
-  auto result = createOp<ops::ScaleOp>(layer.name(), convertCaffeToMIR(inputs[0]), *weights);
+  auto result = createOp<ops::ScaleOp>(layer.name(), convertCaffeToMIR(inputs[0]), weights);
 
   // Add the bias, if any.
   if (params.bias_term()) {
     auto bias_weights = convertBlob(layer.blobs(1));
-    result = createOp<ops::BiasAddOp>(layer.name() + ".bias", result->getOutput(0), *bias_weights);
+    result = createOp<ops::BiasAddOp>(layer.name() + ".bias", result->getOutput(0), bias_weights);
   }
 
   return {result->getOutput(0)};
@@ -499,7 +499,7 @@ CaffeOpCreator::convertBatchNorm(const caffe::LayerParameter& layer,
   auto& opts = layer.batch_norm_param();
   float eps = opts.eps();
   auto scale_weight = convertBlob(layer.blobs(2));
-  float scale_factor = *reinterpret_cast<float*>(scale_weight->at(mir::Index{0}));
+  float scale_factor = *reinterpret_cast<float*>(scale_weight.at(mir::Index{0}));
   // Code below is taken from cpu caffe implementation:
   // https://github.com/BVLC/caffe/blob/master/src/caffe/layers/batch_norm_layer.cpp#L100
   if (scale_factor != 0.0f)
@@ -509,21 +509,21 @@ CaffeOpCreator::convertBatchNorm(const caffe::LayerParameter& layer,
   // multiply elements of mean by scaleFactor and get opposite numbers
   // to subtract mean from input via biasAdd operation
   auto mean_weights = convertBlob(layer.blobs(0));
-  Tensor<float> bias_data(*mean_weights);
+  Tensor<float> bias_data(mean_weights);
   for (Index idx : ShapeRange(bias_data.getShape()))
     bias_data.at(idx) *= -scale_factor;
   auto bias_add = createOp<ops::BiasAddOp>(layer.name() + ".bias", convertCaffeToMIR(inputs[0]),
-                                           *mean_weights);
+                                           mean_weights);
 
   // create scale argument from variance:
   // multiply elements of variance by scaleFactor and
   // normalize biased input using scale operation
   auto variance_weights = convertBlob(layer.blobs(1));
-  Tensor<float> scale_data(*variance_weights);
+  Tensor<float> scale_data(variance_weights);
   for (Index idx : ShapeRange(scale_data.getShape()))
     scale_data.at(idx) = 1.0f / std::sqrt(scale_data.at(idx) * scale_factor + eps);
   auto scale = createOp<ops::ScaleOp>(layer.name() + ".scale", bias_add->getOutput(0),
-                                      *variance_weights);
+                                      variance_weights);
   return {convertMIRToCaffe(scale->getOutput(0))};
 }
 
@@ -547,13 +547,13 @@ std::vector<mir::IODescriptor>
 CaffeOpCreator::convertEmbed(const caffe::LayerParameter& layer,
                              const std::vector<mir::IODescriptor>& inputs) {
   const auto& params = layer.embed_param();
-  auto data = createOp<ops::ConstantOp>(layer.name() + ".weights", *convertBlob(layer.blobs(0)));
+  auto data = createOp<ops::ConstantOp>(layer.name() + ".weights", convertBlob(layer.blobs(0)));
   auto result = createOp<ops::GatherOp>(layer.name(), data->getOutput(0), inputs[0], 0);
 
   // Add the bias, if any.
   if (params.bias_term()) {
     auto bias_weights = convertBlob(layer.blobs(1));
-    result = createOp<ops::BiasAddOp>(layer.name() + ".bias", result->getOutput(0), *bias_weights);
+    result = createOp<ops::BiasAddOp>(layer.name() + ".bias", result->getOutput(0), bias_weights);
   }
 
   return {result->getOutput(0)};
index 2c1b78a..d8e4b57 100644 (file)
@@ -123,7 +123,7 @@ private:
 
   mir::IODescriptor convertMIRToCaffe(const mir::IODescriptor& arg);
 
-  std::shared_ptr<TensorVariant> convertBlob(const caffe::BlobProto& blob);
+  TensorVariant convertBlob(const caffe::BlobProto& blob);
 
   template<typename OpType, typename... Types>
   mir::Operation* createOp(const std::string& name, Types&&... args);
index 7bc0315..7d20f64 100644 (file)
@@ -24,12 +24,12 @@ namespace nnc {
 
 using namespace mir;
 
-std::shared_ptr<TensorVariant>
-fixGroupedKernel(int groups, std::shared_ptr<TensorVariant> folded_kernel) {
+TensorVariant
+fixGroupedKernel(int groups, const TensorVariant& folded_kernel) {
   const int kernel_in_chan_num = 2;
   const int kernel_out_chan_num = 3;
 
-  const Shape& kernel_shape = folded_kernel->getShape();
+  const Shape& kernel_shape = folded_kernel.getShape();
   auto kernel_in_channels = kernel_shape.dim(kernel_in_chan_num);
   auto kernel_out_channels = kernel_shape.dim(kernel_out_chan_num);
   auto in_channels = kernel_in_channels * groups;
@@ -38,12 +38,10 @@ fixGroupedKernel(int groups, std::shared_ptr<TensorVariant> folded_kernel) {
   // here creates unfolded kernel with shape [H, W, inputChannels, outputChannels]
   Shape unfold_kernel_shape(kernel_shape);
   unfold_kernel_shape.dim(kernel_in_chan_num) = in_channels;
-  auto buffer_size = unfold_kernel_shape.numElements() * folded_kernel->getElementSize();
+  auto buffer_size = unfold_kernel_shape.numElements() * folded_kernel.getElementSize();
   std::shared_ptr<char> buffer(new char[buffer_size], std::default_delete<char[]>());
-  size_t data_size = folded_kernel->getElementSize();
-  std::shared_ptr<TensorVariant> unfold_kernel =
-          std::make_shared<TensorVariant>(unfold_kernel_shape, buffer, folded_kernel->getDataType(),
-                                          data_size);
+  size_t data_size = folded_kernel.getElementSize();
+  TensorVariant unfold_kernel(unfold_kernel_shape, buffer, folded_kernel.getDataType(), data_size);
 
   int in_group_size = kernel_in_channels;
   int out_group_size = kernel_out_channels / groups;
@@ -59,13 +57,13 @@ fixGroupedKernel(int groups, std::shared_ptr<TensorVariant> folded_kernel) {
       mir::Index folded_idx(idx);
       folded_idx.at(kernel_in_chan_num) %= in_group_size;
 
-      std::copy(folded_kernel->at(folded_idx), folded_kernel->at(folded_idx) + data_size,
-                unfold_kernel->at(idx));
+      std::copy(folded_kernel.at(folded_idx), folded_kernel.at(folded_idx) + data_size,
+                unfold_kernel.at(idx));
     } else {
       // fill element of output kernel with zero element
-      assert(folded_kernel->getDataType() == DTYPE::FLOAT32 &&
+      assert(folded_kernel.getDataType() == DTYPE::FLOAT32 &&
              "unsupported data type, add appropriate zero element creation");
-      float* elem = reinterpret_cast<float*>(unfold_kernel->at(idx));
+      float* elem = reinterpret_cast<float*>(unfold_kernel.at(idx));
       *elem = 0.0f;
     }
   }
index b33d32f..af2b5d5 100644 (file)
@@ -40,11 +40,8 @@ std::vector<nnc::mir::TensorVariant> nnc::DeConv2D::operator()() {
   Shape in_shape = _input.getShape();
   ShapeRange in_range(in_shape);
 
-  std::shared_ptr<TensorVariant> tr_kernel;
-  const std::shared_ptr<const mir::TensorVariant> kernel_ptr(
-          &_kernel, []( const TensorVariant* ){});
-  tr_kernel = transposeTensor<0,1,3,2>(kernel_ptr);
-  auto kernel = Tensor<float>(*tr_kernel);
+  auto tr_kernel = transposeTensor<0,1,3,2>(_kernel);
+  Tensor<float> kernel(tr_kernel);
 
   Shape k_shape = kernel.getShape();
   int32_t num_kernels = k_shape.dim(3);
index 6c1ffea..f960ead 100644 (file)
@@ -77,9 +77,7 @@ static void collectUnsupportedOps(std::unique_ptr<onnx::ModelProto>& model) {
   }
 }
 
-static std::shared_ptr<mir::TensorVariant> createTensor(const onnx::TensorProto *tensor,
-                                                        mir::Shape input_shape) {
-  assert(tensor);
+static mir::TensorVariant createTensor(const onnx::TensorProto* tensor) {
   mir::DTYPE type = mir::DTYPE::FLOAT32;
   size_t element_size;
   size_t buffer_size;
@@ -119,9 +117,11 @@ static std::shared_ptr<mir::TensorVariant> createTensor(const onnx::TensorProto
     throw PassException("Invalid data in Proto file, investigate");
   }
 
-  std::shared_ptr<char> shared_buffer (new char[buffer_size], std::default_delete<char[]>());
-  memcpy(shared_buffer.get(), src_data, buffer_size);
-  return std::make_shared<mir::TensorVariant>(input_shape, shared_buffer, type, element_size);
+  std::shared_ptr<char> data(new char[buffer_size], std::default_delete<char[]>());
+  memcpy(data.get(), src_data, buffer_size);
+
+  auto shape = ShapeHelper::createShape(tensor->dims(), static_cast<size_t>(tensor->dims_size()));
+  return mir::TensorVariant(shape, data, type, element_size);
 }
 
 void ONNXImporterImpl::createGraphInputs() {
@@ -147,10 +147,8 @@ void ONNXImporterImpl::createGraphInputs() {
 
     if (onnx_tensors.find(name) != onnx_tensors.end()) {
       const onnx::TensorProto* onnx_tensor = onnx_tensors[name];
-      mir::Shape input_shape = ShapeHelper::createShape(onnx_tensor->dims(),
-                                                   static_cast<size_t>(onnx_tensor->dims_size()));
-      _inputTensors[name] = createTensor(onnx_tensor, input_shape);
-      auto constant = _graph->create<mir::ops::ConstantOp>(name, *_inputTensors[name]);
+      _inputTensors.insert(std::make_pair(name, createTensor(onnx_tensor)));
+      auto constant = _graph->create<mir::ops::ConstantOp>(name, _inputTensors.at(name));
       _tensorNameToPrevMirOp[name] = constant;
       constants.insert(constant);
     } else {
index 24ec1c3..706ec4b 100644 (file)
@@ -46,7 +46,7 @@ private:
   // This map maps onnx tensor names to MIR operations/nodes
   std::map<std::string, mir::Operation*> _tensorNameToPrevMirOp;
   // This map keeps named tensors used as graph input initializers.
-  std::map<std::string, std::shared_ptr<mir::TensorVariant>> _inputTensors;
+  std::map<std::string, mir::TensorVariant> _inputTensors;
   std::vector<mir::Operation*> _graphOutputs;
   std::string _modelFilename;
   std::unique_ptr<onnx::ModelProto> _model;
index 0e9b111..1f89611 100644 (file)
@@ -78,18 +78,16 @@ static std::pair<bool, float> getFloatAttribute(const onnx::NodeProto& onnx_node
   return {false, 0.0};
 }
 
-static const mir::TensorVariant* createTensor(float data) {
-  mir::DTYPE type = mir::DTYPE::FLOAT32;
-  size_t element_size = sizeof(float);
-  size_t buffer_size = sizeof(float);
-  const char* src_data = reinterpret_cast<const char*>(&data);
-
-  std::shared_ptr<char> shared_buffer (new char[buffer_size], std::default_delete<char[]>());
-  memcpy(shared_buffer.get(), src_data, buffer_size);
-  Shape tensor_shape = Shape({1});
-  // FIXME: it has to be shared_ptr
-  auto mir_tensor = new mir::TensorVariant(tensor_shape, shared_buffer, type, element_size);
-  return mir_tensor;
+static TensorVariant createTensor(float value) {
+  mir::DTYPE element_type = mir::DTYPE::FLOAT32;
+  size_t element_size = sizeof(value);
+  size_t buffer_size = 1 * element_size;
+  const char* src_data = reinterpret_cast<const char*>(&value);
+
+  std::shared_ptr<char> data(new char[buffer_size], std::default_delete<char[]>());
+  std::memcpy(data.get(), src_data, buffer_size);
+  Shape shape{1};
+  return mir::TensorVariant(shape, data, element_type, element_size);
 }
 
 struct KernelStridesPadding {
@@ -131,7 +129,7 @@ std::vector<Operation*> ONNXOpCreator::convertConv2D(InputOps& inputs,
   // FIXME: It can be non-constant value.
   auto* in_weights = dynamic_cast<mir::ops::ConstantOp*>(inputs[1]);
   assert(in_weights && "Weights could be a constant tensor only");
-  auto in_weights_tensor = in_weights->getValue();
+  const auto& in_weights_tensor = in_weights->getValue();
   // We should transpose ONNX MCHW to HWOI
   auto transposed = transposeTensor<2, 3, 1, 0>(in_weights_tensor);
 
@@ -143,10 +141,10 @@ std::vector<Operation*> ONNXOpCreator::convertConv2D(InputOps& inputs,
 
   inputs.resize(1);
   std::vector<Operation*> outputs;
-  outputs = createOp<ops::Conv2DOp>(inputs[0]->getOutput(0), *transposed, cdata.strides_shape,
+  outputs = createOp<ops::Conv2DOp>(inputs[0]->getOutput(0), transposed, cdata.strides_shape,
                                     cdata.padding_before, cdata.padding_after);
   if (input_bias)
-    outputs = createOp<ops::BiasAddOp>(outputs[0]->getOutput(0), *input_bias->getValue());
+    outputs = createOp<ops::BiasAddOp>(outputs[0]->getOutput(0), input_bias->getValue());
 
   return outputs;
 }
@@ -248,14 +246,14 @@ std::vector<Operation*> ONNXOpCreator::convertBatchNorm(InputOps& inputs,
   // Scale tensor
   assert(input_tensors.find(inputs[1]->getName()) != input_tensors.end());
   auto ptensor = input_tensors.at(inputs[1]->getName());
-  Tensor<float> nnc_scale (*ptensor);
+  Tensor<float> nnc_scale(ptensor);
   // Bias tensor
   assert(input_tensors.find(inputs[2]->getName()) != input_tensors.end());
   auto nnc_bias = input_tensors.at(inputs[2]->getName());
   // TODO: there are 2 training tensors in the inputs
 
   inputs.resize(1);
-  auto mean_outputs =  createOp<ops::BiasAddOp>(inputs[0]->getOutput(0), *nnc_bias);
+  auto mean_outputs = createOp<ops::BiasAddOp>(inputs[0]->getOutput(0), nnc_bias);
 
   // create scale argument from variance:
   // multiply elements of variance by scaleFactor and
@@ -263,7 +261,7 @@ std::vector<Operation*> ONNXOpCreator::convertBatchNorm(InputOps& inputs,
   for (Index idx : ShapeRange(nnc_scale.getShape()))
     nnc_scale.at(idx) = 1.0f / std::sqrt(nnc_scale.at(idx) * scale_factor + epsilon);
 
-  auto variance_outputs = createOp<ops::ScaleOp>(mean_outputs[0]->getOutput(0), *ptensor);
+  auto variance_outputs = createOp<ops::ScaleOp>(mean_outputs[0]->getOutput(0), ptensor);
   return variance_outputs;
 }
 
@@ -282,7 +280,7 @@ std::vector<Operation*> ONNXOpCreator::convertScale(InputOps& inputs,
   float value;
   std::tie(found, value) = getFloatAttribute(onnx_node, "scale");
   float scale = found ? value : 1.0;
-  auto outputs = createOp<ops::ScaleOp>(inputs[0]->getOutput(0), *createTensor(scale));
+  auto outputs = createOp<ops::ScaleOp>(inputs[0]->getOutput(0), createTensor(scale));
   return outputs;
 }
 
index c0eee9e..592efca 100644 (file)
@@ -34,7 +34,7 @@ namespace nnc {
 class ONNXOpCreator {
 public:
   using InputOps = std::vector<mir::Operation*>;
-  using InputTensors = std::map<std::string, std::shared_ptr<mir::TensorVariant>>;
+  using InputTensors = std::map<std::string, mir::TensorVariant>;
 
   ONNXOpCreator() = default;
   void setMirGraph(mir::Graph* g) {_graph = g;};
index 7964a4d..d8e3d45 100644 (file)
@@ -163,10 +163,8 @@ void Serializer::visit(ops::ConcatOp& op) {
 void Serializer::visit(ops::Conv2DOp& op) {
   _curOp->_paramStartOffset = _buffer.size();
   // serialize kernel
-  shared_ptr<TensorVariant> HWCNKernel = make_shared<TensorVariant>(op.getKernel());
   // HWCN -> NHWC
-  shared_ptr<TensorVariant> NHWCKernel = transposeTensor<3, 0, 1, 2>(HWCNKernel);
-  serializeTensor(*NHWCKernel);
+  serializeTensor(transposeTensor<3, 0, 1, 2>(op.getKernel()));
   // serialize strides
   serializeShape(op.getStrides());
   // serialize pads
@@ -226,9 +224,7 @@ void Serializer::visit(ops::PoolOp& op) {
 
 void Serializer::visit(ops::FullyConnectedOp& op) {
   _curOp->_paramStartOffset = _buffer.size();
-  shared_ptr<TensorVariant> weights = make_shared<TensorVariant>(op.getWeights());
-  shared_ptr<TensorVariant> transposedWeights = transposeTensor<1, 0>(weights);
-  serializeTensor(*transposedWeights);
+  serializeTensor(transposeTensor<1, 0>(op.getWeights()));
   serializeShape(op.getOutputShape(0));
 }
 
@@ -312,10 +308,8 @@ void Serializer::visit(mir::ops::EluOp& op) {
 void Serializer::visit(mir::ops::DeConv2DOp& op) {
   _curOp->_paramStartOffset = _buffer.size();
   // serialize kernel
-  shared_ptr<TensorVariant> HWCNKernel = make_shared<TensorVariant>(op.getKernel());
   // HWCN -> "IN"HW"OUT"
-  shared_ptr<TensorVariant> NHWCKernel = transposeTensor<2, 0, 1, 3>(HWCNKernel);
-  serializeTensor(*NHWCKernel);
+  serializeTensor(transposeTensor<2, 0, 1, 3>(op.getKernel()));
   // serialize strides
   serializeShape(op.getStrides());
   // serialize pads
index 0e8ec98..62c62a0 100644 (file)
@@ -266,14 +266,14 @@ std::vector<mir::Operation*> TfliteImporter::getPrecedingMIROps(const Operator*
   return inputsForOp;
 }
 
-std::vector<std::shared_ptr<IrTensor>> TfliteImporter::createOpParams(const Operator* op) {
-  std::vector<std::shared_ptr<IrTensor>> params_for_op;
+std::vector<mir::TensorVariant> TfliteImporter::createOpParams(const Operator* op) {
+  std::vector<mir::TensorVariant> params_for_op;
 
   for (auto i : *(op->inputs())) {
     const Tensor* t = (*_tensors)[i];
     const Buffer* b = (*_buffers)[t->buffer()];
     if (b->data() != nullptr) {
-      std::shared_ptr<IrTensor> tensor = createTensor(t, b);
+      auto tensor = createTensor(t, b);
 
       unsigned int opcode = (*_opcodes)[op->opcode_index()]->builtin_code();
 
@@ -287,7 +287,7 @@ std::vector<std::shared_ptr<IrTensor>> TfliteImporter::createOpParams(const Oper
       } else if (opcode == BuiltinOperator_FULLY_CONNECTED && t->shape()->size() == 2) {
         params_for_op.emplace_back(mir::transposeTensor<1, 0>(tensor));
       } else {
-        params_for_op.push_back(tensor);
+        params_for_op.emplace_back(std::move(tensor));
       }
     }
   }
@@ -295,13 +295,12 @@ std::vector<std::shared_ptr<IrTensor>> TfliteImporter::createOpParams(const Oper
   return params_for_op;
 }
 
-std::shared_ptr<IrTensor> TfliteImporter::createTensor(const Tensor* t, const Buffer* b) {
+mir::TensorVariant TfliteImporter::createTensor(const Tensor* t, const Buffer* b) {
   // Create TensorVariant by copying the tensor buffer contents.
   // Another option is to copy the data in a TensorVariant constructor.
   assert(b->data() != nullptr);
-  std::shared_ptr<char> tensor_buffer_copy(new char[b->data()->size()],
-                                           [](char* d) { delete[] d; });
-  std::copy(b->data()->begin(), b->data()->end(), tensor_buffer_copy.get());
+  std::shared_ptr<char> data(new char[b->data()->size()], std::default_delete<char[]>());
+  std::copy(b->data()->begin(), b->data()->end(), data.get());
 
   size_t elementSize;
   mir::DTYPE type;
@@ -332,11 +331,8 @@ std::shared_ptr<IrTensor> TfliteImporter::createTensor(const Tensor* t, const Bu
               EnumNamesTensorType()[t->type()]);
   }
 
-  Shape tensor_shape = ShapeHelper::createShape(*t->shape(), t->shape()->size());
-
-  assert(tensor_shape.numElements() * elementSize == b->data()->size());
-
-  return std::make_shared<IrTensor>(tensor_shape, tensor_buffer_copy, type, elementSize);
+  auto shape = ShapeHelper::createShape(*t->shape(), t->shape()->size());
+  return mir::TensorVariant(shape, data, type, elementSize);
 }
 
 void TfliteImporter::setGraphOutputs() {
index 83e6295..f02ed08 100644 (file)
@@ -84,8 +84,8 @@ void TFLiteOpCreator::checkConv2D(const Conv2DOptions* opts,
 
 std::vector<mir::Operation*> TFLiteOpCreator::convertConv2D(InputOps inputs, InputParams params,
                                                             const Conv2DOptions* opts) {
-  auto& input_shape = inputs[0]->getOutputShape(0);
-  auto& kernel_shape = params[0]->getShape();
+  const auto& input_shape = inputs[0]->getOutputShape(0);
+  const auto& kernel_shape = params[0].getShape();
   Shape strides{opts->stride_h(), opts->stride_w()};
   std::vector<int32_t> padding_before(2);
   std::vector<int32_t> padding_after(2);
@@ -94,9 +94,9 @@ std::vector<mir::Operation*> TFLiteOpCreator::convertConv2D(InputOps inputs, Inp
                    padding_after);
 
   auto outputs = createOp<ops::Conv2DOp>(ActivationFunctionType_NONE, inputs[0]->getOutput(0),
-                                         *params[0], strides, padding_before, padding_after);
+                                         params[0], strides, padding_before, padding_after);
   return createOp<ops::BiasAddOp>(opts->fused_activation_function(), outputs[0]->getOutput(0),
-                                  *params[1]);
+                                  params[1]);
 }
 
 void TFLiteOpCreator::checkDepthwiseConv2D(const DepthwiseConv2DOptions* opts,
@@ -107,8 +107,8 @@ void TFLiteOpCreator::checkDepthwiseConv2D(const DepthwiseConv2DOptions* opts,
 std::vector<mir::Operation*>
 TFLiteOpCreator::convertDepthwiseConv2D(InputOps inputs, InputParams params,
                                         const DepthwiseConv2DOptions* opts) {
-  auto& input_shape = inputs[0]->getOutputShape(0);
-  auto& kernel_shape = params[0]->getShape();
+  const auto& input_shape = inputs[0]->getOutputShape(0);
+  const auto& kernel_shape = params[0].getShape();
   Shape strides{opts->stride_h(), opts->stride_w()};
   std::vector<int32_t> padding_before(2);
   std::vector<int32_t> padding_after(2);
@@ -117,10 +117,10 @@ TFLiteOpCreator::convertDepthwiseConv2D(InputOps inputs, InputParams params,
                    padding_after);
 
   auto outputs = createOp<ops::DepthwiseConv2DOp>(ActivationFunctionType_NONE,
-                                                  inputs[0]->getOutput(0), *params[0], strides,
+                                                  inputs[0]->getOutput(0), params[0], strides,
                                                   padding_before, padding_after);
   return createOp<ops::BiasAddOp>(opts->fused_activation_function(), outputs[0]->getOutput(0),
-                                  *params[1]);
+                                  params[1]);
 }
 
 void TFLiteOpCreator::checkConcatenation(const ConcatenationOptions* opts,
@@ -200,7 +200,7 @@ std::vector<mir::Operation*>
 TFLiteOpCreator::createTransposeConv(InputOps& inputs, InputParams& params,
                                      const ::tflite::TransposeConvOptions* opts) {
   Shape strides{opts->stride_h(), opts->stride_w(), 1};
-  return createOp<ops::DeConv2DOp>(ActivationFunctionType_NONE, inputs[0]->getOutput(0), *params[1],
+  return createOp<ops::DeConv2DOp>(ActivationFunctionType_NONE, inputs[0]->getOutput(0), params[1],
                                    strides, paddingMap[opts->padding()]);
 }
 
@@ -210,9 +210,9 @@ TFLiteOpCreator::convertResizeNN(InputOps& inputs, InputParams& params,
   // TODO support aligned corners
   assert(!opts->align_corners() && "Aligned corners not currently supported");
 
-  auto& input_shape = inputs[0]->getOutputShape(0);
+  const auto& input_shape = inputs[0]->getOutputShape(0);
   assert(input_shape.rank() == 4);
-  mir::Tensor<int> out_shapes = mir::Tensor<int>(*params[0].get());
+  mir::Tensor<int> out_shapes(params[0]);
   Shape res_shape(4);
   res_shape.dim(0) = input_shape.dim(0);
   res_shape.dim(1) = out_shapes.at(Index{0});
@@ -262,11 +262,11 @@ TFLiteOpCreator::createMax(InputOps& inputs, InputParams&,
 std::vector<mir::Operation*> TFLiteOpCreator::convertReducer(InputOps inputs, InputParams params,
                                                              ops::ReduceFOp::FuncType ft,
                                                              const ::tflite::ReducerOptions* opts) {
-  assert(params.at(0)->getShape().rank() <= 1 && "Must be 1-dim or 0-dim tensor");
-  auto tensor = mir::Tensor<int>(*params.at(0));
+  assert(params.at(0).getShape().rank() <= 1 && "Must be 1-dim or 0-dim tensor");
+  mir::Tensor<int> tensor(params.at(0));
   std::vector<int32_t> axes;
 
-  for (const auto& i: mir::ShapeRange(tensor.getShape())) {
+  for (const auto& i : mir::ShapeRange(tensor.getShape())) {
     axes.emplace_back(tensor.at(i));
   }
 
@@ -287,14 +287,14 @@ TFLiteOpCreator::convertFullyConnected(InputOps& inputs,
                                        InputParams& params,
                                        const FullyConnectedOptions* opts) {
   // Add Reshape operation to make sure the input for FC operation has shape [1, fc_input_size]
-  int32_t fc_input_size = params[0]->getShape().dim(0);
+  int32_t fc_input_size = params[0].getShape().dim(0);
   auto outputs = createOp<ops::ReshapeOp>(ActivationFunctionType_NONE, inputs[0]->getOutput(0),
                                           Shape{1, fc_input_size});
 
   auto fc_outputs = createOp<ops::FullyConnectedOp>(ActivationFunctionType_NONE,
-                                                    outputs[0]->getOutput(0), *params[0]);
+                                                    outputs[0]->getOutput(0), params[0]);
   return createOp<ops::BiasAddOp>(opts->fused_activation_function(), fc_outputs[0]->getOutput(0),
-                                  *params[1]);
+                                  params[1]);
 }
 
 void TFLiteOpCreator::checkActivationType(ActivationFunctionType activation_type,
@@ -347,7 +347,7 @@ std::vector<mir::Operation*> TFLiteOpCreator::createPad(InputOps inputs, InputPa
   assert(params.size() == 1); // support pad with one param
   std::vector<std::pair<int32_t, int32_t>> paddings;
 
-  auto paddings_tensor = mir::Tensor<int32_t>(*params[0].get());
+  mir::Tensor<int32_t> paddings_tensor(params[0]);
   // check right paddings structure
   assert(paddings_tensor.getShape().dim(1) == 2);
 
index c56a47c..3342f1c 100644 (file)
@@ -38,13 +38,12 @@ namespace nnc {
 
 namespace ops = mir::ops;
 using mir::Graph;
-using IrTensor = mir::TensorVariant;
 using mir::Shape;
 
 class TFLiteOpCreator {
 public:
   using InputOps = std::vector<mir::Operation*>&;
-  using InputParams = std::vector<std::shared_ptr<IrTensor>>&;
+  using InputParams = std::vector<mir::TensorVariant>&;
 
   explicit TFLiteOpCreator(Graph* g) : _graph(g) {}