Revert same padding calculation because current padding calculation is incorrect
Signed-off-by: Hyeongseok Oh <hseok82.oh@samsung.com>
const auto activation_index{node.param().activation_index};
const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature();
+ const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature();
const auto ker_shape = _ctx.at(ker_index).shape().asKernel();
neurun::util::Stride stride;
(ANEURALNETWORKS_PADDING_VALID == padding_type));
return (padding_type == ANEURALNETWORKS_PADDING_SAME)
- ? neurun::util::same_padding(ifm_shape, stride, ker_shape.W, ker_shape.H)
+ ? neurun::util::same_padding(ifm_shape, ofm_shape, stride, ker_shape.W,
+ ker_shape.H)
: neurun::util::valid_padding();
}
else // explicit padding
(ANEURALNETWORKS_PADDING_VALID == padding_type));
return (padding_type == ANEURALNETWORKS_PADDING_SAME)
- ? neurun::util::same_padding(_ctx.at(ifm_index).shape().asFeature(), param.stride,
+ ? neurun::util::same_padding(_ctx.at(ifm_index).shape().asFeature(),
+ _ctx.at(ofm_index).shape().asFeature(), param.stride,
ker_shape.W, ker_shape.H)
: neurun::util::valid_padding();
}
VERBOSE(AvgPool2D) << "PAD: " << neurun::util::to_string(padding_type) << std::endl;
return (padding_type == ANEURALNETWORKS_PADDING_SAME)
- ? neurun::util::same_padding(ifm_shape, param.stride, kw, kh)
+ ? neurun::util::same_padding(ifm_shape, ofm_shape, param.stride, kw, kh)
: neurun::util::valid_padding();
}
else // explicit padding
VERBOSE(AvgPool2D) << "PAD: " << neurun::util::to_string(padding_type) << std::endl;
return (padding_type == ANEURALNETWORKS_PADDING_SAME)
- ? neurun::util::same_padding(ifm_shape, param.stride, kw, kh)
+ ? neurun::util::same_padding(ifm_shape, ofm_shape, param.stride, kw, kh)
: neurun::util::valid_padding();
}
else // explicit padding
const auto activation_index{node.param().activation_index};
- const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature();
const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature();
+ const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature();
const int32_t kh = _ctx.at(kh_index).asScalar<int32_t>();
const int32_t kw = _ctx.at(kw_index).asScalar<int32_t>();
(ANEURALNETWORKS_PADDING_VALID == padding_type));
return (padding_type == ANEURALNETWORKS_PADDING_SAME)
- ? neurun::util::same_padding(_ctx.at(ifm_index).shape().asFeature(), stride,
+ ? neurun::util::same_padding(_ctx.at(ifm_index).shape().asFeature(),
+ _ctx.at(ofm_index).shape().asFeature(), stride,
_ctx.at(ker_index).shape().asKernel().W,
_ctx.at(ker_index).shape().asKernel().H)
: neurun::util::valid_padding();
param.stride = stride;
param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME)
- ? util::same_padding(_ctx.at(ifm_index).shape().asFeature(), stride,
+ ? util::same_padding(_ctx.at(ifm_index).shape().asFeature(),
+ _ctx.at(ofm_index).shape().asFeature(), stride,
_ctx.at(ker_index).shape().asKernel().W,
_ctx.at(ker_index).shape().asKernel().H)
: util::valid_padding();
(ANEURALNETWORKS_PADDING_VALID == padding_type));
return (padding_type == ANEURALNETWORKS_PADDING_SAME)
- ? neurun::util::same_padding(_ctx.at(ifm_index).shape().asFeature(), param.stride,
+ ? neurun::util::same_padding(_ctx.at(ifm_index).shape().asFeature(),
+ _ctx.at(ofm_index).shape().asFeature(), param.stride,
kw, kh)
: neurun::util::valid_padding();
}
(ANEURALNETWORKS_PADDING_VALID == padding_type));
return (padding_type == ANEURALNETWORKS_PADDING_SAME)
- ? neurun::util::same_padding(_ctx.at(ifm_index).shape().asFeature(), param.stride,
+ ? neurun::util::same_padding(_ctx.at(ifm_index).shape().asFeature(),
+ _ctx.at(ofm_index).shape().asFeature(), param.stride,
kw, kh)
: neurun::util::valid_padding();
}
return padding;
}
-Padding same_padding(const nnfw::misc::feature::Shape &ifm_shape, const Stride &stride, uint32_t kw,
+Padding same_padding(const nnfw::misc::feature::Shape &ifm_shape,
+ const nnfw::misc::feature::Shape &ofm_shape, const Stride &stride, uint32_t kw,
uint32_t kh)
{
Padding padding;
// padding_to_beginning = total_padding / 2
// padding_to_end = (total_padding + 1)/2.
//
- const int32_t out_size_height = (ifm_shape.H + stride.vertical - 1) / stride.vertical;
- const int32_t out_size_width = (ifm_shape.W + stride.horizontal - 1) / stride.horizontal;
-
- const int32_t vertical_needed_input = (out_size_height - 1) * stride.vertical + kh;
+ const int32_t vertical_needed_input = (ofm_shape.H - 1) * stride.vertical + kh;
const int32_t vertical_total_padding = std::max(0, vertical_needed_input - ifm_shape.H);
- const int32_t horizontal_needed_input = (out_size_width - 1) * stride.horizontal + kw;
+ const int32_t horizontal_needed_input = (ofm_shape.W - 1) * stride.horizontal + kw;
const int32_t horizontal_total_padding = std::max(0, horizontal_needed_input - ifm_shape.W);
padding.top = vertical_total_padding / 2;
};
Padding valid_padding(void);
-Padding same_padding(const nnfw::misc::feature::Shape &ifm_shape, const Stride &stride, uint32_t kw,
+Padding same_padding(const nnfw::misc::feature::Shape &ifm_shape,
+ const nnfw::misc::feature::Shape &ofm_shape, const Stride &stride, uint32_t kw,
uint32_t kh);
} // namespace util
return padding;
}
-Padding same_padding(const nnfw::misc::feature::Shape &ifm_shape, const Stride &stride, uint32_t kw,
+Padding same_padding(const nnfw::misc::feature::Shape &ifm_shape,
+ const nnfw::misc::feature::Shape &ofm_shape, const Stride &stride, uint32_t kw,
uint32_t kh)
{
Padding padding;
// padding_to_beginning = total_padding / 2
// padding_to_end = (total_padding + 1)/2.
//
- const int32_t out_size_height = (ifm_shape.H + stride.vertical - 1) / stride.vertical;
- const int32_t out_size_width = (ifm_shape.W + stride.horizontal - 1) / stride.horizontal;
-
- const int32_t vertical_needed_input = (out_size_height - 1) * stride.vertical + kh;
+ const int32_t vertical_needed_input = (ofm_shape.H - 1) * stride.vertical + kh;
const int32_t vertical_total_padding = std::max(0, vertical_needed_input - ifm_shape.H);
- const int32_t horizontal_needed_input = (out_size_width - 1) * stride.horizontal + kw;
+ const int32_t horizontal_needed_input = (ofm_shape.W - 1) * stride.horizontal + kw;
const int32_t horizontal_total_padding = std::max(0, horizontal_needed_input - ifm_shape.W);
padding.top = vertical_total_padding / 2;
param.stride = stride;
param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME)
- ? same_padding(ifm_shape, stride, ker_shape.W, ker_shape.H)
+ ? same_padding(ifm_shape, ofm_shape, stride, ker_shape.W, ker_shape.H)
: valid_padding();
param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
param.stride = stride;
param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME)
- ? same_padding(ifm_shape, stride, ker_shape.W, ker_shape.H)
+ ? same_padding(ifm_shape, ofm_shape, stride, ker_shape.W, ker_shape.H)
: valid_padding();
param.multipler = multiplier;
param.stride.horizontal = hstride;
param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME)
- ? same_padding(ifm_shape, param.stride, kw, kh)
+ ? same_padding(ifm_shape, ofm_shape, param.stride, kw, kh)
: valid_padding();
param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
param.stride.horizontal = hstride;
param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME)
- ? same_padding(ifm_shape, param.stride, kw, kh)
+ ? same_padding(ifm_shape, ofm_shape, param.stride, kw, kh)
: valid_padding();
param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
param.stride.vertical = vstride;
param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME)
- ? same_padding(ofm_shape, param.stride, ker_shape.W, ker_shape.H)
+ ? same_padding(ofm_shape, ifm_shape, param.stride, ker_shape.W, ker_shape.H)
: valid_padding();
auto stage = [param](const IAllocationContext &ctx, IExecutionBuilder &builder) {
param.stride.horizontal = hstride;
param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME)
- ? same_padding(ifm_shape, param.stride, kw, kh)
+ ? same_padding(ifm_shape, ofm_shape, param.stride, kw, kh)
: valid_padding();
param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());