const auto vstride_index{node.param().vstride_index};
const auto hstride_index{node.param().hstride_index};
- const auto activation_index{node.param().activation_index};
-
const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature();
const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature();
neurun::util::Padding padding;
neurun::util::Stride stride;
- FuseCode activation;
+ model::Activation activation;
};
Param param;
param.stride.horizontal = hstride;
// TODO : Extract this to a function
+ const auto padding_type = node.param().padding;
param.padding = [&]() {
- if (!node.param().explicit_padding) // implicit padding
+ if (padding_type != model::Padding::EXPLICIT) // implicit padding
{
- const auto padding_code_index{node.param().padding_code_index};
-
- const PaddingCode padding_type =
- static_cast<PaddingCode>(_ctx.at(padding_code_index).asScalar<int32_t>());
-
- assert((ANEURALNETWORKS_PADDING_SAME == padding_type) ||
- (ANEURALNETWORKS_PADDING_VALID == padding_type));
-
- VERBOSE(AvgPool2D) << "PAD: " << neurun::util::to_string(padding_type) << std::endl;
+ assert((padding_type == model::Padding::SAME) || (padding_type == model::Padding::VALID));
- return (padding_type == ANEURALNETWORKS_PADDING_SAME)
+ return (padding_type == model::Padding::SAME)
? neurun::util::same_padding(ifm_shape, ofm_shape, param.stride, kw, kh)
: neurun::util::valid_padding();
}
}
}();
- param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
+ param.activation = node.param().activation;
VERBOSE(MaxPool2D) << "IFM_H: " << ifm_shape.H << std::endl;
VERBOSE(MaxPool2D) << "IFM_W: " << ifm_shape.W << std::endl;
const auto vstride_index{node.param().vstride_index};
const auto hstride_index{node.param().hstride_index};
- const auto activation_index{node.param().activation_index};
-
const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature();
const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature();
neurun::util::Padding padding;
neurun::util::Stride stride;
- FuseCode activation;
+ model::Activation activation;
};
Param param;
param.stride.horizontal = hstride;
// TODO : Extract this to a function
+ const auto padding_type = node.param().padding;
param.padding = [&]() {
- if (!node.param().explicit_padding) // implicit padding
+ if (padding_type != model::Padding::EXPLICIT) // implicit padding
{
- const auto padding_code_index{node.param().padding_code_index};
-
- const PaddingCode padding_type =
- static_cast<PaddingCode>(_ctx.at(padding_code_index).asScalar<int32_t>());
-
- assert((ANEURALNETWORKS_PADDING_SAME == padding_type) ||
- (ANEURALNETWORKS_PADDING_VALID == padding_type));
-
- VERBOSE(AvgPool2D) << "PAD: " << neurun::util::to_string(padding_type) << std::endl;
+ assert((padding_type == model::Padding::SAME) || (padding_type == model::Padding::VALID));
- return (padding_type == ANEURALNETWORKS_PADDING_SAME)
+ return (padding_type == model::Padding::SAME)
? neurun::util::same_padding(ifm_shape, ofm_shape, param.stride, kw, kh)
: neurun::util::valid_padding();
}
}
}();
- param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
+ param.activation = node.param().activation;
VERBOSE(MaxPool2D) << "IFM_H: " << ifm_shape.H << std::endl;
VERBOSE(MaxPool2D) << "IFM_W: " << ifm_shape.W << std::endl;
const auto vstride_index{node.param().vstride_index};
const auto hstride_index{node.param().hstride_index};
- const auto activation_index{node.param().activation_index};
-
const int32_t kh = _ctx.at(kh_index).asScalar<int32_t>();
const int32_t kw = _ctx.at(kw_index).asScalar<int32_t>();
util::Padding padding;
util::Stride stride;
- FuseCode activation;
+ model::Activation activation;
};
Param param;
param.stride.horizontal = hstride;
// TODO : Extract this to a function
+ const auto padding_type = node.param().padding;
param.padding = [&]() {
- if (!node.param().explicit_padding) // implicit padding
+ if (padding_type != model::Padding::EXPLICIT) // implicit padding
{
- const auto padding_code_index{node.param().padding_code_index};
-
- const PaddingCode padding_type =
- static_cast<PaddingCode>(_ctx.at(padding_code_index).asScalar<int32_t>());
-
- assert((ANEURALNETWORKS_PADDING_SAME == padding_type) ||
- (ANEURALNETWORKS_PADDING_VALID == padding_type));
+ assert((padding_type == model::Padding::SAME) || (padding_type == model::Padding::VALID));
- return (padding_type == ANEURALNETWORKS_PADDING_SAME)
+ return (padding_type == model::Padding::SAME)
? neurun::util::same_padding(_ctx.at(ifm_index).shape().asFeature(),
_ctx.at(ofm_index).shape().asFeature(), param.stride,
kw, kh)
}
}();
- param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
+ param.activation = node.param().activation;
auto tensors = _tensor_builder;
MaxPoolLayer::MaxPoolLayer()
: _inputData(), _outputData(), _inputShape(), _outputShape(), _paddingLeft(0), _paddingTop(0),
_paddingRight(0), _paddingBottom(0), _strideWidth(0), _strideHeight(0), _kernelWidth(0),
- _kernelHeight(0), _activation(ANEURALNETWORKS_FUSED_NONE), _inputType(OperandType::FLOAT32)
+ _kernelHeight(0), _activation(model::Activation::NONE), _inputType(OperandType::FLOAT32)
{
// DO NOTHING
}
const uint32_t paddingRight, const uint32_t paddingTop,
const uint32_t paddingBottom, const uint32_t strideWidth,
const uint32_t strideHeight, const uint32_t kernelWidth,
- const uint32_t kernelHeight, const FuseCode activation,
+ const uint32_t kernelHeight, const model::Activation activation,
uint8_t *outputData, const Shape outputShape)
{
_inputData.u8 = inputData;
const uint32_t paddingRight, const uint32_t paddingTop,
const uint32_t paddingBottom, const uint32_t strideWidth,
const uint32_t strideHeight, const uint32_t kernelWidth,
- const uint32_t kernelHeight, const FuseCode activation, uint8_t *outputData,
- const Shape outputShape);
+ const uint32_t kernelHeight, const model::Activation activation,
+ uint8_t *outputData, const Shape outputShape);
void run();
uint32_t _kernelWidth;
uint32_t _kernelHeight;
- FuseCode _activation;
+ model::Activation _activation;
OperandType _inputType;
};
#include <memory>
#include "model/Operation.h"
+#include "model/InternalType.h"
namespace neurun
{
OperandIndex hstride_index;
OperandIndex vstride_index;
- OperandIndex padding_code_index;
+ Padding padding;
OperandIndex padding_left_index;
OperandIndex padding_right_index;
OperandIndex padding_top_index;
OperandIndex padding_bottom_index;
- OperandIndex activation_index;
-
- bool explicit_padding;
+ Activation activation;
};
public:
};
_map[ANEURALNETWORKS_MAX_POOL_2D] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &) {
+ Operands &operands) {
assert(init_param.input_count == 7 || init_param.input_count == 10);
assert(init_param.output_count == 1);
// 5 -> Filter Height Index
// 6 -> FuseCode (activation) Index
- param.explicit_padding = false;
+ const auto padding_index = OperandIndex{init_param.inputs[1]};
+ const auto activation_index = OperandIndex{init_param.inputs[6]};
+
+ param.padding =
+ NNAPIConvert::getPaddingType(operands.at(padding_index).asScalar<PaddingCode>());
- param.padding_code_index = OperandIndex{init_param.inputs[1]};
param.hstride_index = OperandIndex{init_param.inputs[2]};
param.vstride_index = OperandIndex{init_param.inputs[3]};
param.kw_index = OperandIndex{init_param.inputs[4]};
param.kh_index = OperandIndex{init_param.inputs[5]};
- param.activation_index = OperandIndex{init_param.inputs[6]};
+ param.activation =
+ NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
}
else if (init_param.input_count == 10) // support explicit padding
{
// 8 -> Filter Height Index
// 9 -> FuseCode (activation) Index
- param.explicit_padding = true;
+ const auto activation_index = OperandIndex{init_param.inputs[9]};
+
+ param.padding = Padding::EXPLICIT;
param.padding_left_index = OperandIndex{init_param.inputs[1]};
param.padding_right_index = OperandIndex{init_param.inputs[2]};
param.kw_index = OperandIndex{init_param.inputs[7]};
param.kh_index = OperandIndex{init_param.inputs[8]};
- param.activation_index = OperandIndex{init_param.inputs[9]};
+ param.activation =
+ NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
}
return new operation::MaxPool2DNode{inputs, outputs, param};
};