#include "model/operation/AvgPool2DNode.h"
#include "util/Utils.h"
#include "util/Padding.h"
+#include "util/ShapeInference.h"
#include "misc/polymorphic_downcast.h"
namespace neurun
assert(in_tensor->num_dimensions() == 4);
- // TODO handle unspecified output shape:
- // calculate output shape using ifm shape, kernel width/height, padding, stride
const auto output_info = env->model().operands.at(out_index).info();
- assert(output_info.total_size() != 0);
- env->allocateIfNeeded(out_index, output_info);
+ if (output_info.total_size() == 0)
+ {
+ // Handle unspecified output shape
+ const auto &avgpool_node =
+ nnfw::misc::polymorphic_downcast<const model::operation::AvgPool2DNode &>(node);
+ const auto infered_output_shapes =
+ shape_inference::inferAvgPoolShape(in_tensor->tensorInfo().shape(), avgpool_node.param());
+ env->allocateIfNeeded(out_index, {infered_output_shapes[0], output_info.typeInfo()});
+ }
+ else
+ {
+ env->allocateIfNeeded(out_index, output_info);
+ }
auto out_tensor = env->tensorAt(out_index);
UNUSED_RELEASE(out_tensor);
#include "model/operation/Conv2DNode.h"
#include "util/Utils.h"
#include "util/Padding.h"
+#include "util/ShapeInference.h"
#include "misc/polymorphic_downcast.h"
namespace neurun
UNUSED_RELEASE(kernel_tensor);
UNUSED_RELEASE(bias_tensor);
- // TODO handle unspecified output shape:
- // calculate output shape using ifm shape, kernel shape, padding, stride
const auto output_info = env->model().operands.at(out_index).info();
- assert(output_info.total_size() != 0);
- env->allocateIfNeeded(out_index, output_info);
+ if (output_info.total_size() == 0)
+ {
+ // Handle unspecified output shape
+ const auto &conv_node =
+ nnfw::misc::polymorphic_downcast<const model::operation::Conv2DNode &>(node);
+ const auto infered_output_shapes = shape_inference::inferConv2DShape(
+ in_tensor->tensorInfo().shape(), kernel_tensor->tensorInfo().shape(), conv_node.param());
+ env->allocateIfNeeded(out_index, {infered_output_shapes[0], output_info.typeInfo()});
+ }
+ else
+ {
+ env->allocateIfNeeded(out_index, output_info);
+ }
auto out_tensor = env->tensorAt(out_index);
UNUSED_RELEASE(out_tensor);
*/
#include <cker/operation/DepthwiseConv.h>
+#include <misc/polymorphic_downcast.h>
#include "OperationUtil.h"
#include "model/operation/DepthwiseConv2DNode.h"
#include "util/Padding.h"
#include "util/Utils.h"
+#include "util/ShapeInference.h"
namespace neurun
{
// TODO handle unspecified output shape:
// calculate output shape using ifm shape, kernel shape, padding, stride
const auto output_info = env->model().operands.at(out_index).info();
- assert(output_info.total_size() != 0);
- env->allocateIfNeeded(out_index, output_info);
+ if (output_info.total_size() == 0)
+ {
+ // Handle unspecified output shape
+ const auto &depth_conv_node =
+ nnfw::misc::polymorphic_downcast<const model::operation::DepthwiseConv2DNode &>(node);
+ const auto infered_output_shapes = shape_inference::inferDepthwiseConv2DShape(
+ in_tensor->tensorInfo().shape(), kernel_tensor->tensorInfo().shape(),
+ depth_conv_node.param());
+ env->allocateIfNeeded(out_index, {infered_output_shapes[0], output_info.typeInfo()});
+ }
+ else
+ {
+ env->allocateIfNeeded(out_index, output_info);
+ }
auto out_tensor = env->tensorAt(out_index);
UNUSED_RELEASE(out_tensor);
#include "model/operation/MaxPool2DNode.h"
#include "util/Utils.h"
#include "util/Padding.h"
+#include "util/ShapeInference.h"
#include "misc/polymorphic_downcast.h"
namespace neurun
assert(in_tensor->num_dimensions() == 4);
UNUSED_RELEASE(in_tensor);
- // TODO handle unspecified output shape:
- // calculate output shape using ifm shape, kernel width/height, padding, stride
const auto output_info = env->model().operands.at(out_index).info();
- assert(output_info.total_size() != 0);
- env->allocateIfNeeded(out_index, output_info);
+ if (output_info.total_size() == 0)
+ {
+ // Handle unspecified output shape
+ const auto &maxpool_node =
+ nnfw::misc::polymorphic_downcast<const model::operation::MaxPool2DNode &>(node);
+ const auto infered_output_shapes =
+ shape_inference::inferMaxPoolShape(in_tensor->tensorInfo().shape(), maxpool_node.param());
+ env->allocateIfNeeded(out_index, {infered_output_shapes[0], output_info.typeInfo()});
+ }
+ else
+ {
+ env->allocateIfNeeded(out_index, output_info);
+ }
auto out_tensor = env->tensorAt(out_index);
UNUSED_RELEASE(out_tensor);