void visit(const ::internal::tflite::op::FullyConnected::Node &node) override;
void visit(const ::internal::tflite::op::ResizeBilinear::Node &node) override;
void visit(const ::internal::tflite::op::Reshape::Node &node) override;
+ void visit(const ::internal::tflite::op::Squeeze::Node &node) override;
void visit(const ::internal::tflite::op::Softmax::Node &node) override;
void visit(const ::internal::tflite::op::StridedSlice::Node &node) override;
void visit(const ::internal::tflite::op::ReduceMax::Node &node) override;
_builder.addStage(stage);
}
+void Planner::visit(const ::internal::tflite::op::Squeeze::Node &node)
+{
+ // node.param().dims_index_optional is ignored since output tensor already has squeezed shape
+ // by freezer and toco
+ const ::internal::tflite::operand::Index output_index{node.param().output_index};
+ const ::internal::tflite::operand::Index input_index{node.param().input_index};
+
+ // Currently, 3D-input with dims is tested. Note that param(). dims_index_optional is optional.
+ // two generated test passed:
+ // - 3D input : squeeze_float_1
+ // - 2D input : squeeze_3D_float_1
+ // - 4D input fails (squeeze.mod.py) -> we need general tensor support
+ assert(_ctx.at(input_index).shape().rank() < 4);
+
+ // TODO Support generic tensor shape
+ const auto output_shape = _ctx.at(output_index).shape();
+ const auto input_shape = _ctx.at(input_index).shape();
+
+ // Set Shape Constraints
+ _builder.addShapeConstr(output_index, asTensorInfo(output_shape, _ctx.at(output_index).type()));
+ _builder.addShapeConstr(input_index, asTensorInfo(input_shape, _ctx.at(input_index).type()));
+
+ // Construct operation parameters
+ struct Param
+ {
+ int output_index;
+ int input_index;
+ };
+
+ Param param;
+
+ param.output_index = output_index.asInt();
+ param.input_index = input_index.asInt();
+
+ auto stage = [param](const IAllocationContext &ctx, IExecutionBuilder &builder) {
+ auto output_alloc = ctx.at(::internal::tflite::operand::Index{param.output_index});
+ auto input_alloc = ctx.at(::internal::tflite::operand::Index{param.input_index});
+
+ auto fn = nnfw::make_unique<::arm_compute::CLReshapeLayer>();
+ fn->configure(input_alloc, output_alloc);
+
+ builder.append("Squeeze", std::move(fn));
+ };
+
+ _builder.addStage(stage);
+}
+
void Planner::visit(const ::internal::tflite::op::Softmax::Node &node)
{
VERBOSE(Softmax) << "Configure SOFTMAX operation" << std::endl;
--- /dev/null
+#include "internal/op/Squeeze.h"
+#include "internal/op/NodeVisitor.h"
+
+#include <cassert>
+
+namespace internal
+{
+namespace tflite
+{
+namespace op
+{
+namespace Squeeze
+{
+
+void Node::accept(NodeVisitor &&v) const { v.visit(*this); }
+
+} // namespace Squeeze
+} // namespace op
+} // namespace tflite
+} // namespace internal
+
+namespace internal
+{
+namespace tflite
+{
+namespace op
+{
+namespace Squeeze
+{
+// dims_index is optional input
+// if dims_index is not provided, dims_index is set to -1
+Param::Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount,
+ const uint32_t *outputs)
+{
+ assert(inputCount == 1 || inputCount == 2);
+ assert(outputCount == 1);
+
+ output_index = outputs[0];
+
+ input_index = inputs[0];
+
+ // dims_index_optional = -1 by default
+ if (inputCount == 2)
+ dims_index_optional = inputs[1];
+}
+
+} // namespace Squeeze
+} // namespace op
+} // namespace tflite
+} // namespace internal
--- /dev/null
+#ifndef __INTERNAL_OP_SQUEEZE_H__
+#define __INTERNAL_OP_SQUEEZE_H__
+
+#include "internal/op/Node.h"
+
+#include <cstdint>
+
+namespace internal
+{
+namespace tflite
+{
+namespace op
+{
+namespace Squeeze
+{
+
+struct Param
+{
+ int32_t output_index;
+
+ int32_t input_index;
+ int32_t dims_index_optional = -1; // optional param. default is -1
+
+ Param() = default;
+ Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount, const uint32_t *outputs);
+};
+
+class Node final : public op::Node
+{
+public:
+ Node(const Param ¶m) : _param(param)
+ {
+ // DO NOTHING
+ }
+
+public:
+ virtual ~Node() = default;
+
+public:
+ const Param ¶m(void) const { return _param; }
+
+public:
+ void accept(NodeVisitor &&) const override;
+
+private:
+ const Param _param;
+};
+
+} // namespace Squeeze
+} // namespace op
+} // namespace tflite
+} // namespace internal
+
+#endif // __INTERNAL_OP_SQUEEZE_H__
break;
}
+ case ANEURALNETWORKS_SQUEEZE:
+ {
+ using internal::tflite::op::Squeeze::Param;
+ using internal::tflite::op::Squeeze::Node;
+
+ // Add 'operations'
+ auto &operations = model->deref().operations();
+
+ operations.emplace_back<Node>(Param{inputCount, inputs, outputCount, outputs});
+
+ break;
+ }
case ANEURALNETWORKS_FULLY_CONNECTED:
{
using internal::tflite::op::FullyConnected::Param;