uint32_t horizontal;
};
+Padding valid_padding(void)
+{
+ //
+ // ANEURALNETWORKS_PADDING_VALID
+ //
+ // VALID padding. No padding.
+ //
+ // When the input size is not evenly divisible by the filter size,
+ // the input at the end that could not fill the whole filter tile
+ // will simply be ignored.
+ //
+ Padding padding;
+
+ padding.top = 0;
+ padding.bottom = 0;
+ padding.left = 0;
+ padding.right = 0;
+
+ return padding;
+}
+
+Padding same_padding(const nnfw::util::feature::Shape &ifm_shape,
+ const nnfw::util::feature::Shape &ofm_shape,
+ const Stride &stride,
+ uint32_t kw, uint32_t kh)
+{
+ Padding padding;
+
+ // ANEURALNETWORKS_PADDING_SAME (from NNAPI spec)
+ //
+ // SAME padding. Padding on both ends are the "same":
+ //
+ // padding_to_beginning = total_padding / 2
+ // padding_to_end = (total_padding + 1)/2.
+ //
+ const int32_t vertical_needed_input = (ofm_shape.H - 1) * stride.vertical + kh;
+ const int32_t vertical_total_padding = std::max(0, vertical_needed_input - ifm_shape.H);
+
+ const int32_t horizontal_needed_input = (ofm_shape.W - 1) * stride.horizontal + kw;
+ const int32_t horizontal_total_padding = std::max(0, horizontal_needed_input - ifm_shape.W);
+
+ padding.top = vertical_total_padding / 2;
+ padding.bottom = (vertical_total_padding + 1) / 2;
+ padding.left = horizontal_total_padding / 2;
+ padding.right = (horizontal_total_padding + 1)/ 2;
+
+ return padding;
+}
+
template<typename T> std::unique_ptr<T> make_layer(void)
{
return std::unique_ptr<T>{new T};
const ::internal::tflite::operand::Index vstride_index{node.param().vstride_index};
const ::internal::tflite::operand::Index hstride_index{node.param().hstride_index};
+ const ::internal::tflite::operand::Index padding_index{node.param().padding_index};
+
const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature();
const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature();
const int32_t vstride = _ctx.at(vstride_index).asScala<int32_t>();
const int32_t hstride = _ctx.at(hstride_index).asScala<int32_t>();
+ const PaddingCode padding_type =
+ static_cast<PaddingCode>(_ctx.at(padding_index).asScala<int32_t>());
+
+ assert((ANEURALNETWORKS_PADDING_SAME == padding_type) ||
+ (ANEURALNETWORKS_PADDING_VALID == padding_type));
+
// Set Shape Constraints
_builder.addShapeConstr(ofm_index, asTensorInfo(ofm_shape));
_builder.addShapeConstr(ifm_index, asTensorInfo(ifm_shape));
param.stride.vertical = vstride;
param.stride.horizontal = hstride;
- // NOTE padding code is assumes as 'ANEURALNETWORKS_PADDING_SAME'
- {
- // ANEURALNETWORKS_PADDING_SAME (from NNAPI spec)
- //
- // SAME padding. Padding on both ends are the "same":
- //
- // padding_to_beginning = total_padding / 2
- // padding_to_end = (total_padding + 1)/2.
- //
- const int32_t vertical_needed_input = (ofm_shape.H - 1) * vstride + kh;
- const int32_t vertical_total_padding = std::max(0, vertical_needed_input - ifm_shape.H);
-
- const int32_t horizontal_needed_input = (ofm_shape.W - 1) * hstride + kw;
- const int32_t horizontal_total_padding = std::max(0, horizontal_needed_input - ifm_shape.W);
-
- param.padding.top = vertical_total_padding / 2;
- param.padding.bottom = (vertical_total_padding + 1) / 2;
- param.padding.left = horizontal_total_padding / 2;
- param.padding.right = (horizontal_total_padding + 1)/ 2;
- }
+ param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME)
+ ? same_padding(ifm_shape, ofm_shape, param.stride, kw, kh)
+ : valid_padding();
VERBOSE(MaxPool2D) << "IFM_H: " << ifm_shape.H << std::endl;
VERBOSE(MaxPool2D) << "IFM_W: " << ifm_shape.W << std::endl;