This commit implements partial support over reshape operation.
Signed-off-by: Jonghyun Park <jh1302.park@samsung.com>
#include <arm_compute/runtime/CL/CLSubTensor.h>
#include <arm_compute/runtime/CL/functions/CLPoolingLayer.h>
#include <arm_compute/runtime/CL/functions/CLActivationLayer.h>
+#include <arm_compute/runtime/CL/functions/CLReshapeLayer.h>
#include "internal/arm_compute/kernel/View.h"
#include "internal/nnapi/kernel/Reader.h"
void visit(const ::internal::tflite::op::MaxPool2D::implicit::Node &node) override;
void visit(const ::internal::tflite::op::AvgPool2D::implicit::Node &node) override;
void visit(const ::internal::tflite::op::Concat::Node &node) override;
+ void visit(const ::internal::tflite::op::Reshape::Node &node) override;
private:
const ::internal::tflite::operand::Set &_ctx;
// NOTE Concat has no actual operation!
}
+void Planner::visit(const ::internal::tflite::op::Reshape::Node &node)
+{
+ const ::internal::tflite::operand::Index output_index{node.param().output_index};
+ const ::internal::tflite::operand::Index input_index{node.param().input_index};
+
+ // NOTE The content of a tensor specified by shape_index should be aligned with
+ // output tensor shape
+ // TODO Check consistency of ouput shape
+
+ // 'Feature Map' to 'Vector' reshape
+ assert(_ctx.at(input_index).shape().rank() == 4);
+ assert(_ctx.at(output_index).shape().rank() == 2);
+ assert(_ctx.at(output_index).shape().dim(0) == 1);
+
+ const auto ifm_shape = _ctx.at(input_index).shape().asFeature();
+ const auto out_size = _ctx.at(output_index).shape().dim(1);
+
+ // NOTE Vector element ordering issue arises when H or W is not 1
+ assert(ifm_shape.H == 1);
+ assert(ifm_shape.W == 1);
+ assert((ifm_shape.C * ifm_shape.H * ifm_shape.W) == out_size);
+
+ _builder.addShapeConstr(output_index, asTensorInfo(out_size));
+ _builder.addShapeConstr(input_index, asTensorInfo(ifm_shape));
+
+ struct Param
+ {
+ int output_index;
+ int input_index;
+ };
+
+ Param param;
+
+ param.output_index = output_index.asInt();
+ param.input_index = input_index.asInt();
+
+ auto stage = [param] (const IAllocationContext &ctx, IExecutionBuilder &builder)
+ {
+ auto output_alloc = ctx.at(::internal::tflite::operand::Index{param.output_index});
+ auto input_alloc = ctx.at(::internal::tflite::operand::Index{param.input_index});
+
+ auto fn = make_layer<::arm_compute::CLReshapeLayer>();
+
+ fn->configure(input_alloc, output_alloc);
+
+ builder.append(std::move(fn));
+ };
+
+ _builder.addStage(stage);
+}
+
class AllocationContext final : public IAllocationContext
{
public:
};
//
+// VectorSink
+//
+class VectorSink final : public Sink
+{
+public:
+ VectorSink(const int32_t vlen, uint8_t *base, const size_t size) : _vlen{vlen}, _base{base}
+ {
+ assert(size >= _vlen * sizeof(float));
+ }
+
+public:
+ void pull(::arm_compute::ITensor &tensor) const override
+ {
+ float *base = reinterpret_cast<float *>(_base);
+
+ for (int32_t n = 0; n < _vlen; ++n)
+ {
+ auto from = reinterpret_cast<float *>(tensor.ptr_to_element(::arm_compute::Coordinates{n}));
+ auto into = base + n;
+
+ *into = *from;
+ }
+ }
+
+private:
+ const int32_t _vlen;
+ uint8_t * const _base;
+};
+
+
+//
// FeatureSink
//
class FeatureSink final : public Sink
// NOTE The current implemenation assumes that every output is a feature map.
// TODO Remove this assumption
const auto operand_index = execution->plan().model().outputs.at(index);
- const auto &operand_shape = operands.at(operand_index).shape().asFeature();
- execution->sink<FeatureSink>(index,
- operand_shape, reinterpret_cast<uint8_t *>(buffer), length);
+ if (operands.at(operand_index).shape().rank() == 2)
+ {
+ assert(operands.at(operand_index).shape().dim(0) == 1);
+
+ const auto len = operands.at(operand_index).shape().dim(1);
+
+ execution->sink<VectorSink>(index, len, reinterpret_cast<uint8_t *>(buffer), length);
+ }
+ else if (operands.at(operand_index).shape().rank() == 4)
+ {
+ const auto &operand_shape = operands.at(operand_index).shape().asFeature();
+
+ execution->sink<FeatureSink>(index,
+ operand_shape, reinterpret_cast<uint8_t *>(buffer), length);
+ }
+ else
+ {
+ throw std::runtime_error{"Not supported, yet"};
+ }
return ANEURALNETWORKS_NO_ERROR;
}
#include "internal/op/MaxPool2D.h"
#include "internal/op/AvgPool2D.h"
#include "internal/op/Concat.h"
+#include "internal/op/Reshape.h"
namespace internal
{
virtual void visit(const MaxPool2D::implicit::Node &) = 0;
virtual void visit(const AvgPool2D::implicit::Node &) = 0;
virtual void visit(const Concat::Node &) = 0;
+ virtual void visit(const Reshape::Node &) = 0;
};
} // namespace op
--- /dev/null
+#include "internal/op/Reshape.h"
+#include "internal/op/NodeVisitor.h"
+
+#include <cassert>
+
+namespace internal
+{
+namespace tflite
+{
+namespace op
+{
+namespace Reshape
+{
+
+void Node::accept(NodeVisitor &&v) const { v.visit(*this); }
+
+} // namespace Reshape
+} // namespace op
+} // namespace tflite
+} // namespace internal
+
+namespace internal
+{
+namespace tflite
+{
+namespace op
+{
+namespace Reshape
+{
+
+Param::Param(uint32_t inputCount, const uint32_t* inputs,
+ uint32_t outputCount, const uint32_t* outputs)
+{
+ assert(inputCount == 2 && outputCount == 1);
+
+ output_index = outputs[0];
+
+ // Each input should be interpreted as follows:
+ //
+ // 0 -> A tensor, specifying the tensor to be reshaped.
+ // 1 -> A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32, defining the shape of the output tensor
+ input_index = inputs[0];
+ shape_index = inputs[1];
+}
+
+} // namespace Reshape
+} // namespace op
+} // namespace tflite
+} // namespace internal
--- /dev/null
+#ifndef __INTERNAL_OP_RESHAPE_H__
+#define __INTERNAL_OP_RESHAPE_H__
+
+#include "internal/op/Node.h"
+
+#include <cstdint>
+
+namespace internal
+{
+namespace tflite
+{
+namespace op
+{
+namespace Reshape
+{
+
+struct Param
+{
+ int32_t output_index;
+
+ int32_t input_index;
+ int32_t shape_index;
+
+ Param() = default;
+ Param(uint32_t inputCount, const uint32_t* inputs,
+ uint32_t outputCount, const uint32_t* outputs);
+};
+
+class Node final : public op::Node
+{
+public:
+ Node(const Param ¶m) : _param(param)
+ {
+ // DO NOTHING
+ }
+
+public:
+ virtual ~Node() = default;
+
+public:
+ const Param ¶m(void) const { return _param; }
+
+public:
+ void accept(NodeVisitor &&) const override;
+
+private:
+ const Param _param;
+};
+
+} // namespace Reshape
+} // namespace op
+} // namespace tflite
+} // namespace internal
+
+#endif // __INTERNAL_OP_RESHAPE_H__
break;
}
+ case ANEURALNETWORKS_RESHAPE:
+ {
+ using internal::tflite::op::Reshape::Param;
+ using internal::tflite::op::Reshape::Node;
+
+ // Add 'operations'
+ auto &operations = model->deref().operations();
+
+ operations.emplace_back<Node>(Param{inputCount, inputs, outputCount, outputs});
+
+ break;
+ }
default:
throw std::runtime_error{"Not supported operation"};
};