#include <arm_compute/runtime/IFunction.h>
#include <arm_compute/runtime/CL/CLScheduler.h>
+#include <arm_compute/runtime/CL/functions/CLPoolingLayer.h>
#include "compilation.h"
#include "model.h"
+#include "logging.h"
+
+struct Padding
+{
+ uint32_t top;
+ uint32_t bottom;
+ uint32_t left;
+ uint32_t right;
+};
+
+struct Stride
+{
+ uint32_t vertical;
+ uint32_t horizontal;
+};
::arm_compute::TensorShape asTensorShape(const nnfw::util::feature::Shape &shape)
{
return ::arm_compute::TensorInfo(::arm_compute::TensorShape(size), 1, ::arm_compute::DataType::F32);
}
+::arm_compute::PadStrideInfo asPadStringInfo(const Padding &padding, const Stride &stride)
+{
+ return ::arm_compute::PadStrideInfo{stride.horizontal, stride.vertical,
+ padding.left, padding.right,
+ padding.top, padding.bottom,
+ ::arm_compute::DimensionRoundingType::FLOOR};
+}
struct IAllocationContext
{
// TODO Set initializer for kernel and bias
// Construct operation parameters
- struct Padding
- {
- uint32_t top;
- uint32_t bottom;
- uint32_t left;
- uint32_t right;
- };
-
- struct Stride
- {
- uint32_t vertical;
- uint32_t horizontal;
- };
-
struct Param
{
int ofm_index;
auto ker_alloc = ctx.at(::internal::tflite::operand::Index{param.ker_index});
auto bias_alloc = ctx.at(::internal::tflite::operand::Index{param.bias_index});
- const ::arm_compute::PadStrideInfo conv_info{param.stride.horizontal, param.stride.vertical,
- param.padding.left, param.padding.right,
- param.padding.top, param.padding.bottom,
- ::arm_compute::DimensionRoundingType::FLOOR};
+ const auto conv_info = asPadStringInfo(param.padding, param.stride);
std::unique_ptr<::arm_compute::CLConvolutionLayer> fn{new ::arm_compute::CLConvolutionLayer};
void Planner::visit(const ::internal::tflite::op::MaxPool2D::implicit::Node &node)
{
- throw std::runtime_error{"Not supported, yet"};
+ const ::internal::tflite::operand::Index ofm_index{node.param().ofm_index};
+ const ::internal::tflite::operand::Index ifm_index{node.param().ifm_index};
+
+ const ::internal::tflite::operand::Index kh_index{node.param().kh_index};
+ const ::internal::tflite::operand::Index kw_index{node.param().kw_index};
+
+ const ::internal::tflite::operand::Index vstride_index{node.param().vstride_index};
+ const ::internal::tflite::operand::Index hstride_index{node.param().hstride_index};
+
+ const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature();
+ const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature();
+
+ const int32_t kh = _ctx.at(kh_index).asScala<int32_t>();
+ const int32_t kw = _ctx.at(kw_index).asScala<int32_t>();
+
+ const int32_t vstride = _ctx.at(vstride_index).asScala<int32_t>();
+ const int32_t hstride = _ctx.at(hstride_index).asScala<int32_t>();
+
+ // Set Shape Constraints
+ _builder.addShapeConstr(ofm_index, asTensorInfo(ofm_shape));
+ _builder.addShapeConstr(ifm_index, asTensorInfo(ifm_shape));
+
+ // Construct operation parameters
+ struct Param
+ {
+ int ofm_index;
+ int ifm_index;
+
+ uint32_t kw;
+ uint32_t kh;
+
+ Padding padding;
+ Stride stride;
+
+ // TODO Add 'activation' field
+ };
+
+ Param param;
+
+ param.ofm_index = ofm_index.asInt();
+ param.ifm_index = ifm_index.asInt();
+
+ param.kh = kh;
+ param.kw = kw;
+
+ param.stride.vertical = vstride;
+ param.stride.horizontal = hstride;
+
+ // NOTE padding code is assumes as 'ANEURALNETWORKS_PADDING_SAME'
+ {
+ // ANEURALNETWORKS_PADDING_SAME (from NNAPI spec)
+ //
+ // SAME padding. Padding on both ends are the "same":
+ //
+ // padding_to_beginning = total_padding / 2
+ // padding_to_end = (total_padding + 1)/2.
+ //
+ const int32_t vertical_needed_input = (ofm_shape.H - 1) * vstride + kh;
+ const int32_t vertical_total_padding = std::max(0, vertical_needed_input - ifm_shape.H);
+
+ const int32_t horizontal_needed_input = (ofm_shape.W - 1) * hstride + kw;
+ const int32_t horizontal_total_padding = std::max(0, horizontal_needed_input - ifm_shape.W);
+
+ param.padding.top = vertical_total_padding / 2;
+ param.padding.bottom = (vertical_total_padding + 1) / 2;
+ param.padding.left = horizontal_total_padding / 2;
+ param.padding.right = (horizontal_total_padding + 1)/ 2;
+ }
+
+ VERBOSE(MaxPool2D) << "IFM_H: " << ifm_shape.H << std::endl;
+ VERBOSE(MaxPool2D) << "IFM_W: " << ifm_shape.W << std::endl;
+ VERBOSE(MaxPool2D) << "OFM_H: " << ofm_shape.H << std::endl;
+ VERBOSE(MaxPool2D) << "OFM_W: " << ofm_shape.W << std::endl;
+ VERBOSE(MaxPool2D) << "KER_H: " << kh << std::endl;
+ VERBOSE(MaxPool2D) << "KER_W: " << kw << std::endl;
+ VERBOSE(MaxPool2D) << "STRIDE_H: " << vstride << std::endl;
+ VERBOSE(MaxPool2D) << "STRIDE_W: " << hstride << std::endl;
+ VERBOSE(MaxPool2D) << "PAD(T): " << param.padding.top << std::endl;
+ VERBOSE(MaxPool2D) << "PAD(B): " << param.padding.bottom << std::endl;
+ VERBOSE(MaxPool2D) << "PAD(L): " << param.padding.left << std::endl;
+ VERBOSE(MaxPool2D) << "PAD(R): " << param.padding.right << std::endl;
+
+ auto stage = [param] (const IAllocationContext &ctx, IExecutionBuilder &builder)
+ {
+ auto ofm_alloc = ctx.at(::internal::tflite::operand::Index{param.ofm_index});
+ auto ifm_alloc = ctx.at(::internal::tflite::operand::Index{param.ifm_index});
+
+ ::arm_compute::PoolingLayerInfo info{::arm_compute::PoolingType::MAX,
+ ::arm_compute::Size2D{param.kw, param.kh},
+ asPadStringInfo(param.padding, param.stride)};
+
+ std::unique_ptr<::arm_compute::CLPoolingLayer> fn{new ::arm_compute::CLPoolingLayer};
+
+ fn->configure(ifm_alloc, ofm_alloc, info);
+
+ builder.append(std::move(fn));
+ };
+
+ _builder.addStage(stage);
}
class AllocationContext final : public IAllocationContext