From d23b01308178919d67ad8f303e5f0d3fbe1d6dc3 Mon Sep 17 00:00:00 2001
From: Pavel Iliutchenko/AI Tools Lab /SRR/Engineer/Samsung Electronics
Date: Thu, 17 Oct 2019 21:45:38 +0300
Subject: [PATCH] [ncc/interpreter] Fix Reshape operation for quantization
(#8246)
* Made Reshape independent from DataType
* Fixed setting output quantization
Signed-off-by: Pavel Iliutchenko
---
compiler/nnc/backends/interpreter/Interpreter.cpp | 4 +-
compiler/nnc/backends/interpreter/ops/Reshape.h | 48 ++++++++++-------------
2 files changed, 22 insertions(+), 30 deletions(-)
diff --git a/compiler/nnc/backends/interpreter/Interpreter.cpp b/compiler/nnc/backends/interpreter/Interpreter.cpp
index 00a32ac..b7e159a 100644
--- a/compiler/nnc/backends/interpreter/Interpreter.cpp
+++ b/compiler/nnc/backends/interpreter/Interpreter.cpp
@@ -117,7 +117,7 @@ void NNInterpreter::visit(ops::MaxPool2DOp &op)
void NNInterpreter::visit(ops::ReshapeOp &op)
{
auto inputs = getInputTensors(op);
- auto outputs = Reshape(inputs[0], op.getOutputShape(0))();
+ auto outputs = Reshape(inputs[0], op.getOutputShape(0));
setOutputTensors(op, std::move(outputs));
}
@@ -215,7 +215,7 @@ void NNInterpreter::visit(ops::SqueezeOp &op)
{
auto inputs = getInputTensors(op);
// Squeeze is just a special case of reshape.
- auto outputs = Reshape(inputs[0], op.getOutputShape(0))();
+ auto outputs = Reshape(inputs[0], op.getOutputShape(0));
setOutputTensors(op, std::move(outputs));
}
diff --git a/compiler/nnc/backends/interpreter/ops/Reshape.h b/compiler/nnc/backends/interpreter/ops/Reshape.h
index 1979f5c..83f0140 100644
--- a/compiler/nnc/backends/interpreter/ops/Reshape.h
+++ b/compiler/nnc/backends/interpreter/ops/Reshape.h
@@ -17,40 +17,32 @@
#ifndef _NNC_CORE_BACKEND_INTERPRETER_RESHAPE_IMPL_
#define _NNC_CORE_BACKEND_INTERPRETER_RESHAPE_IMPL_
-#include "mir/ops/ReshapeOp.h"
+#include "mir/ShapeRange.h"
+#include "mir/TensorVariant.h"
-#include "OperationImpl.h"
-#include "Fill.h"
+#include
namespace nnc
{
-template class Reshape : public OperationImpl
+std::vector Reshape(const mir::TensorVariant &input,
+ const mir::Shape &output_shape)
{
-public:
- Reshape(const mir::TensorVariant &input, const mir::Shape &output_shape)
- : _input(input), _output_shape(output_shape)
- {
-
- assert(input.getShape().numElements() == _output_shape.numElements());
- }
-
- std::vector operator()() override
- {
- mir::ShapeRange inRange(_input.getShape());
- auto inIter = inRange.begin();
-
- auto out = OperationImpl::allocate_tensor(_output_shape);
-
- // Shapes element count compared in Reshape ctor
- return Fill(_output_shape,
- [this, &inIter](const mir::Index &) -> T { return _input.at(*inIter++); })();
- }
-
-private:
- mir::Tensor _input;
- const mir::Shape &_output_shape;
-};
+ assert(input.getShape().numElements() == output_shape.numElements());
+ mir::TensorType type(input.getElementType(), output_shape);
+ if (input.getType().isQuantized())
+ type.setQuantization(input.getType().getQuantization());
+
+ mir::TensorVariant result(type);
+ mir::ShapeRange input_range(input.getShape());
+ auto in_iter = input_range.begin();
+ const size_t elem_size = input.getElementSize();
+
+ for (const auto &out_index : mir::ShapeRange(output_shape))
+ std::memcpy(result.at(out_index), input.at(*in_iter++), elem_size);
+
+ return {result};
+}
} // namespace nnc
--
2.7.4