Move depthwise convolution padding & activation type resolve: backend to frontend
Signed-off-by: Hyeongseok Oh <hseok82.oh@samsung.com>
const auto vstride_index{node.param().vstride_index};
const auto hstride_index{node.param().hstride_index};
- const auto padding_index{node.param().padding_index};
const auto multiplier_index{node.param().multiplier_index};
- const auto activation_index{node.param().activation_index};
const auto ker_shape = _ctx.at(ker_index).shape().asKernel();
neurun::util::Stride stride;
int multiplier;
- FuseCode activation;
+ model::Activation activation;
};
Param param;
param.stride = stride;
// TODO : Extract this to a function
+ const auto padding_type = node.param().padding;
param.padding = [&]() {
- if (!node.param().explicit_padding) // implicit padding
+ if (padding_type != model::Padding::EXPLICIT) // implicit padding
{
- const auto padding_index{node.param().padding_index};
+ assert((padding_type == model::Padding::SAME) || (padding_type == model::Padding::VALID));
- const PaddingCode padding_type =
- static_cast<PaddingCode>(_ctx.at(padding_index).asScalar<int32_t>());
-
- assert((ANEURALNETWORKS_PADDING_SAME == padding_type) ||
- (ANEURALNETWORKS_PADDING_VALID == padding_type));
-
- return (padding_type == ANEURALNETWORKS_PADDING_SAME)
+ return (padding_type == model::Padding::SAME)
? neurun::util::same_padding(_ctx.at(ifm_index).shape().asFeature(),
_ctx.at(ofm_index).shape().asFeature(), param.stride,
ker_shape.W, ker_shape.H)
}
}();
param.multiplier = multiplier;
- param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
+ param.activation = node.param().activation;
auto tensors = _tensor_builder;
const auto vstride_index{node.param().vstride_index};
const auto hstride_index{node.param().hstride_index};
- const auto padding_index{node.param().padding_index};
const auto multiplier_index{node.param().multiplier_index};
- const auto activation_index{node.param().activation_index};
const auto ker_shape = _ctx.at(ker_index).shape().asKernel();
neurun::util::Stride stride;
int multiplier;
- FuseCode activation;
+ model::Activation activation;
};
Param param;
param.stride = stride;
// TODO : Extract this to a function
+ const auto padding_type = node.param().padding;
param.padding = [&]() {
- if (!node.param().explicit_padding) // implicit padding
+ if (padding_type != model::Padding::EXPLICIT) // implicit padding
{
- const auto padding_index{node.param().padding_index};
-
- const PaddingCode padding_type =
- static_cast<PaddingCode>(_ctx.at(padding_index).asScalar<int32_t>());
+ assert((padding_type == model::Padding::SAME) || (padding_type == model::Padding::VALID));
- assert((ANEURALNETWORKS_PADDING_SAME == padding_type) ||
- (ANEURALNETWORKS_PADDING_VALID == padding_type));
-
- return (padding_type == ANEURALNETWORKS_PADDING_SAME)
+ return (padding_type == model::Padding::SAME)
? neurun::util::same_padding(_ctx.at(ifm_index).shape().asFeature(),
_ctx.at(ofm_index).shape().asFeature(), param.stride,
ker_shape.W, ker_shape.H)
}
}();
param.multiplier = multiplier;
- param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
+ param.activation = node.param().activation;
auto tensors = _tensor_builder;
const auto vstride_index{node.param().vstride_index};
const auto hstride_index{node.param().hstride_index};
- const auto padding_index{node.param().padding_index};
const auto multiplier_index{node.param().multiplier_index};
- const auto activation_index{node.param().activation_index};
util::Stride stride;
int multiplier;
- FuseCode activation;
+ model::Activation activation;
};
Param param;
param.stride = stride;
// TODO : Extract this to a function
+ const auto padding_type = node.param().padding;
param.padding = [&]() {
- if (!node.param().explicit_padding) // implicit padding
+ if (padding_type != model::Padding::EXPLICIT) // implicit padding
{
- const auto padding_index{node.param().padding_index};
-
- const PaddingCode padding_type =
- static_cast<PaddingCode>(_ctx.at(padding_index).asScalar<int32_t>());
+ assert((padding_type == model::Padding::SAME) || (padding_type == model::Padding::VALID));
- assert((ANEURALNETWORKS_PADDING_SAME == padding_type) ||
- (ANEURALNETWORKS_PADDING_VALID == padding_type));
-
- return (padding_type == ANEURALNETWORKS_PADDING_SAME)
+ return (padding_type == model::Padding::SAME)
? neurun::util::same_padding(_ctx.at(ifm_index).shape().asFeature(),
_ctx.at(ofm_index).shape().asFeature(), param.stride,
_ctx.at(ker_index).shape().asKernel().W,
}();
param.multiplier = multiplier;
- param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
+ param.activation = node.param().activation;
auto tensors = _tensor_builder;
: _inputData(), _kernelData(), _outputData(), _biasData(), _inputShape(), _kernelShape(),
_outputShape(), _biasShape(), _paddingLeft(0), _paddingTop(0), _paddingRight(0),
_paddingBottom(0), _strideWidth(0), _strideHeight(0), _multiplier(0),
- _activation(ANEURALNETWORKS_FUSED_NONE), _inputType(OperandType::FLOAT32)
+ _activation(model::Activation::NONE), _inputType(OperandType::FLOAT32)
{
// DO NOTHING
}
void DepthwiseConvolutionLayer::convQuant8() { throw "NYI"; }
-void DepthwiseConvolutionLayer::configure(uint8_t *inputData, const Shape inputShape,
- uint8_t *kernelData, const Shape kernelShape,
- uint8_t *biasData, const Shape biasShape,
- const uint32_t paddingLeft, const uint32_t paddingRight,
- const uint32_t paddingTop, const uint32_t paddingBottom,
- const uint32_t strideWidth, const uint32_t strideHeight,
- const uint32_t multiplier, const FuseCode activation,
- uint8_t *outputData, const Shape outputShape)
+void DepthwiseConvolutionLayer::configure(
+ uint8_t *inputData, const Shape inputShape, uint8_t *kernelData, const Shape kernelShape,
+ uint8_t *biasData, const Shape biasShape, const uint32_t paddingLeft,
+ const uint32_t paddingRight, const uint32_t paddingTop, const uint32_t paddingBottom,
+ const uint32_t strideWidth, const uint32_t strideHeight, const uint32_t multiplier,
+ const model::Activation activation, uint8_t *outputData, const Shape outputShape)
{
_inputData.u8 = inputData;
_inputShape = inputShape;
const Shape kernelShape, uint8_t *biasData, const Shape biasShape,
const uint32_t paddingLeft, const uint32_t paddingRight, const uint32_t paddingTop,
const uint32_t paddingBottom, const uint32_t strideW, const uint32_t strideH,
- const uint32_t multiplier, const FuseCode activation, uint8_t *outputData,
+ const uint32_t multiplier, const model::Activation activation, uint8_t *outputData,
const Shape outputShape);
void run();
uint32_t _multiplier;
- FuseCode _activation;
+ model::Activation _activation;
OperandType _inputType;
};
#include <memory>
#include "model/Operation.h"
+#include "model/InternalType.h"
namespace neurun
{
OperandIndex hstride_index;
OperandIndex vstride_index;
- OperandIndex padding_index;
+ Padding padding;
OperandIndex padding_left_index;
OperandIndex padding_right_index;
OperandIndex padding_bottom_index;
OperandIndex multiplier_index;
- OperandIndex activation_index;
-
- bool explicit_padding;
+ Activation activation;
};
public:
using namespace neurun::model;
_map[ANEURALNETWORKS_DEPTHWISE_CONV_2D] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &) {
+ Operands &operands) {
assert((init_param.input_count == 8 || init_param.input_count == 11) &&
init_param.output_count == 1);
// 6 -> Depthwise multiplier
// 7 -> Activation Index
- param.padding_index = OperandIndex{init_param.inputs[3]};
+ const auto padding_index = OperandIndex{init_param.inputs[3]};
+ const auto activation_index = OperandIndex{init_param.inputs[7]};
+
+ param.padding =
+ NNAPIConvert::getPaddingType(operands.at(padding_index).asScalar<PaddingCode>());
param.hstride_index = OperandIndex{init_param.inputs[4]};
param.vstride_index = OperandIndex{init_param.inputs[5]};
param.multiplier_index = OperandIndex{init_param.inputs[6]};
- param.activation_index = OperandIndex{init_param.inputs[7]};
-
- param.explicit_padding = false;
+ param.activation =
+ NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
}
else
{
// 9 -> Depthwise multiplier
// 10-> Activation Index
+ const auto activation_index = OperandIndex{init_param.inputs[10]};
+
+ param.padding = Padding::EXPLICIT;
param.padding_left_index = OperandIndex{init_param.inputs[3]};
param.padding_right_index = OperandIndex{init_param.inputs[4]};
param.padding_top_index = OperandIndex{init_param.inputs[5]};
param.hstride_index = OperandIndex{init_param.inputs[7]};
param.vstride_index = OperandIndex{init_param.inputs[8]};
param.multiplier_index = OperandIndex{init_param.inputs[9]};
- param.activation_index = OperandIndex{init_param.inputs[10]};
-
- param.explicit_padding = true;
+ param.activation =
+ NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
}
return new operation::DepthwiseConv2DNode{inputs, outputs, param};