void visit(const ::internal::tflite::op::RSQRT::Node &node) override;
void visit(const ::internal::tflite::op::Pad::Node &node) override;
void visit(const ::internal::tflite::op::SpaceToDepth::Node &node) override;
+ void visit(const ::internal::tflite::op::L2Pool2D::Implicit::Node &node) override;
private:
const ::internal::tflite::operand::Set &_ctx;
_builder.addStage(stage);
}
+void Planner::visit(const ::internal::tflite::op::L2Pool2D::Implicit::Node &node)
+{
+ const ::internal::tflite::operand::Index ofm_index{node.param().ofm_index};
+ const ::internal::tflite::operand::Index ifm_index{node.param().ifm_index};
+
+ const ::internal::tflite::operand::Index kh_index{node.param().kh_index};
+ const ::internal::tflite::operand::Index kw_index{node.param().kw_index};
+
+ const ::internal::tflite::operand::Index vstride_index{node.param().vstride_index};
+ const ::internal::tflite::operand::Index hstride_index{node.param().hstride_index};
+
+ const ::internal::tflite::operand::Index padding_index{node.param().padding_index};
+ const ::internal::tflite::operand::Index activation_index{node.param().activation_index};
+
+ const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature();
+ const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature();
+
+ const int32_t kh = _ctx.at(kh_index).asScalar<int32_t>();
+ const int32_t kw = _ctx.at(kw_index).asScalar<int32_t>();
+
+ const int32_t vstride = _ctx.at(vstride_index).asScalar<int32_t>();
+ const int32_t hstride = _ctx.at(hstride_index).asScalar<int32_t>();
+
+ const PaddingCode padding_type =
+ static_cast<PaddingCode>(_ctx.at(padding_index).asScalar<int32_t>());
+
+ assert((ANEURALNETWORKS_PADDING_SAME == padding_type) ||
+ (ANEURALNETWORKS_PADDING_VALID == padding_type));
+
+ _builder.addShapeConstr(ofm_index, asTensorInfo(asTensorShape(_ctx.at(ofm_index).shape()),
+ _ctx.at(ofm_index).type()));
+ _builder.addShapeConstr(ifm_index, asTensorInfo(asTensorShape(_ctx.at(ifm_index).shape()),
+ _ctx.at(ifm_index).type()));
+
+ struct Param
+ {
+ int ofm_index;
+ int ifm_index;
+
+ uint32_t kw;
+ uint32_t kh;
+
+ Padding padding;
+ Stride stride;
+
+ FuseCode activation;
+ };
+
+ Param param;
+
+ param.ofm_index = ofm_index.asInt();
+ param.ifm_index = ifm_index.asInt();
+
+ param.kh = kh;
+ param.kw = kw;
+
+ param.stride.vertical = vstride;
+ param.stride.horizontal = hstride;
+
+ param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME)
+ ? same_padding(ifm_shape, ofm_shape, param.stride, kw, kh)
+ : valid_padding();
+ param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
+
+ auto stage = [param](const IAllocationContext &ctx, IExecutionBuilder &builder) {
+ auto ofm_alloc = ctx.at(::internal::tflite::operand::Index{param.ofm_index});
+ auto ifm_alloc = ctx.at(::internal::tflite::operand::Index{param.ifm_index});
+
+ ::arm_compute::PoolingLayerInfo info{::arm_compute::PoolingType::L2,
+ ::arm_compute::Size2D{param.kw, param.kh},
+ asPadStringInfo(param.padding, param.stride)};
+
+ if (::internal::arm_compute::isGpuMode())
+ {
+ std::unique_ptr<::arm_compute::CLPoolingLayer> fn{new ::arm_compute::CLPoolingLayer};
+
+ fn->configure(CAST_CL(ifm_alloc), CAST_CL(ofm_alloc), info);
+
+ builder.append("L2Pool2D", std::move(fn));
+ }
+ else
+ {
+ std::unique_ptr<::arm_compute::NEPoolingLayer> fn{new ::arm_compute::NEPoolingLayer};
+
+ fn->configure(ifm_alloc, ofm_alloc, info);
+
+ builder.append("L2Pool2D", std::move(fn));
+ }
+
+ ActivationBuilder{builder}.append(param.activation, ofm_alloc);
+ };
+
+ _builder.addStage(stage);
+}
+
class AllocationContext final : public IAllocationContext
{
public:
--- /dev/null
+#include "internal/op/L2Pool2D.h"\r
+#include "internal/op/NodeVisitor.h"\r
+\r
+#include <cassert>\r
+\r
+namespace internal\r
+{\r
+namespace tflite\r
+{\r
+namespace op\r
+{\r
+namespace L2Pool2D\r
+{\r
+namespace Implicit\r
+{\r
+\r
+void Node::accept(NodeVisitor &&v) const { v.visit(*this); }\r
+\r
+} // namespace Implicit\r
+} // namespace L2Pool2D\r
+} // namespace op\r
+} // namespace tflite\r
+} // namespace internal\r
+\r
+namespace internal\r
+{\r
+namespace tflite\r
+{\r
+namespace op\r
+{\r
+namespace L2Pool2D\r
+{\r
+namespace Implicit\r
+{\r
+\r
+Param::Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount,\r
+ const uint32_t *outputs)\r
+{\r
+ assert(inputCount == 7 && outputCount == 1);\r
+\r
+ ofm_index = outputs[0];\r
+\r
+ // Each input should be interpreted as follows:\r
+ //\r
+ // 0 -> IFM Tensor Index\r
+ // 1 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index\r
+ // 2 -> Horizontal (over width) Stride Index\r
+ // 3 -> Vertial (over height) Stride Index\r
+ // 4 -> Filter Width Index\r
+ // 5 -> Filter Height Index\r
+ // 6 -> FuseCode (activation) Index\r
+ ifm_index = inputs[0];\r
+ padding_index = inputs[1];\r
+ hstride_index = inputs[2];\r
+ vstride_index = inputs[3];\r
+ kw_index = inputs[4];\r
+ kh_index = inputs[5];\r
+ activation_index = inputs[6];\r
+}\r
+\r
+} // namespace Implicit\r
+} // namespace L2Pool2D\r
+} // namespace op\r
+} // namespace tflite\r
+} // namespace internal\r
--- /dev/null
+#ifndef __INTERNAL_OP_L2_POOL_2D_H__\r
+#define __INTERNAL_OP_L2_POOL_2D_H__\r
+\r
+#include "internal/op/Node.h"\r
+\r
+#include <cstdint>\r
+\r
+namespace internal\r
+{\r
+namespace tflite\r
+{\r
+namespace op\r
+{\r
+namespace L2Pool2D\r
+{\r
+namespace Implicit\r
+{\r
+\r
+struct Param\r
+{\r
+ int32_t ofm_index;\r
+\r
+ int32_t ifm_index;\r
+\r
+ int32_t kw_index;\r
+ int32_t kh_index;\r
+\r
+ int32_t hstride_index;\r
+ int32_t vstride_index;\r
+\r
+ int32_t padding_index;\r
+ int32_t activation_index;\r
+\r
+ Param() = default;\r
+ Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount, const uint32_t *outputs);\r
+};\r
+\r
+class Node final : public op::Node\r
+{\r
+public:\r
+ Node(const Param ¶m) : _param(param)\r
+ {\r
+ // DO NOTHING\r
+ }\r
+\r
+public:\r
+ virtual ~Node() = default;\r
+\r
+public:\r
+ const Param ¶m(void) const { return _param; }\r
+\r
+public:\r
+ void accept(NodeVisitor &&) const override;\r
+\r
+private:\r
+ const Param _param;\r
+};\r
+\r
+} // namespace Implicit\r
+} // namespace L2Pool2D\r
+} // namespace op\r
+} // namespace tflite\r
+} // namespace internal\r
+\r
+#endif // __INTERNAL_OP_L2_POOL_2D_H__\r
#include "internal/op/RSQRT.h"
#include "internal/op/Pad.h"
#include "internal/op/SpaceToDepth.h"
+#include "internal/op/L2Pool2D.h"
namespace internal
{
virtual void visit(const RSQRT::Node &) = 0;
virtual void visit(const Pad::Node &) = 0;
virtual void visit(const SpaceToDepth::Node &) = 0;
+ virtual void visit(const L2Pool2D::Implicit::Node &) = 0;
};
} // namespace op
break;
}
+ case ANEURALNETWORKS_L2_POOL_2D:
+ {
+ // Input count is 7 for Implicit Padding
+ // TODO: Support explicit padding i.e for input count 10.
+ assert(inputCount == 7);
+ assert(outputCount == 1);
+
+ if (inputCount == 7)
+ {
+ using internal::tflite::op::L2Pool2D::Implicit::Param;
+ using internal::tflite::op::L2Pool2D::Implicit::Node;
+
+ // Add 'operations'
+ auto &operations = model->deref().operations();
+
+ operations.emplace_back<Node>(Param{inputCount, inputs, outputCount, outputs});
+ }
+ else
+ {
+ /* TODO: Support Explicit padding as well. */
+ throw std::runtime_error{"Not supported operation"};
+ }
+
+ break;
+ }
default:
throw std::runtime_error{"Not supported operation"};
};