* Move `InternalType.h` in `ir` directory.
* Move `Activation`, `PaddingType`, `ExplicitPadding`, `Padding`, `Stride` to `neurun::ir` namespace.
Signed-off-by: Sergei Barannikov <s.barannikov@samsung.com>
#include "kernel/ConcatLayer.h"
#include "model/Index.h"
#include "ir/DataType.h"
-#include "model/InternalType.h"
+#include "ir/InternalType.h"
#include "compiler/IExecutionBuilder.h"
#include "exec/NopFunction.h"
#include "util/logging.h"
void appendReLU6(::arm_compute::ICLTensor *ifm_alloc);
public:
- void append(model::Activation code, ::arm_compute::ICLTensor *ifm_alloc);
+ void append(ir::Activation code, ::arm_compute::ICLTensor *ifm_alloc);
private:
IExecutionBuilder &_builder;
_builder.append(std::move(acl_fn));
}
-void ActivationBuilder::append(model::Activation code, ::arm_compute::ICLTensor *ifm_alloc)
+void ActivationBuilder::append(ir::Activation code, ::arm_compute::ICLTensor *ifm_alloc)
{
switch (code)
{
- case model::Activation::NONE:
+ case ir::Activation::NONE:
{
// DO NOTHING
break;
}
- case model::Activation::RELU:
+ case ir::Activation::RELU:
{
appendReLU(ifm_alloc);
break;
}
- case model::Activation::RELU1:
+ case ir::Activation::RELU1:
{
appendReLU1(ifm_alloc);
break;
}
- case model::Activation::RELU6:
+ case ir::Activation::RELU6:
{
appendReLU6(ifm_alloc);
break;
const auto stride = node.param().stride;
- assert((node.param().padding.type == model::PaddingType::SAME) ||
- (node.param().padding.type == model::PaddingType::VALID));
+ assert((node.param().padding.type == ir::PaddingType::SAME) ||
+ (node.param().padding.type == ir::PaddingType::VALID));
auto padding = neurun::util::calculatePadding(node.param().padding, ofm_shape, ifm_shape, stride,
ker_shape.W, ker_shape.H);
uint32_t invalid_horizontal = 0;
uint32_t invalid_vertical = 0;
- if (node.param().padding.type == model::PaddingType::VALID)
+ if (node.param().padding.type == ir::PaddingType::VALID)
{
invalid_horizontal =
ofm_shape.W - (1 + (ifm_shape.W - 1) * stride.horizontal) - (ker_shape.W - 1);
#include "kernel/ConcatLayer.h"
#include "model/Index.h"
-#include "model/InternalType.h"
#include "compiler/IExecutionBuilder.h"
#include "exec/NopFunction.h"
#include "util/logging.h"
return info;
}
-::arm_compute::PadStrideInfo asPadStrideInfo(const model::ExplicitPadding &padding,
- const model::Stride &stride)
+::arm_compute::PadStrideInfo asPadStrideInfo(const ir::ExplicitPadding &padding,
+ const ir::Stride &stride)
{
return ::arm_compute::PadStrideInfo{stride.horizontal,
stride.vertical,
::arm_compute::DimensionRoundingType::FLOOR};
}
-::arm_compute::ActivationLayerInfo
-asActivationLayerInfo(const ::neurun::model::Activation &act_code)
+::arm_compute::ActivationLayerInfo asActivationLayerInfo(const ir::Activation act_code)
{
switch (act_code)
{
- case ::neurun::model::Activation::NONE:
+ case ir::Activation::NONE:
return ::arm_compute::ActivationLayerInfo{};
- case ::neurun::model::Activation::RELU:
+ case ir::Activation::RELU:
return ::arm_compute::ActivationLayerInfo{
::arm_compute::ActivationLayerInfo::ActivationFunction::RELU};
- case ::neurun::model::Activation::RELU1:
+ case ir::Activation::RELU1:
return ::arm_compute::ActivationLayerInfo{
::arm_compute::ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 1.0f, -1.0f};
- case ::neurun::model::Activation::RELU6:
+ case ir::Activation::RELU6:
return ::arm_compute::ActivationLayerInfo{
::arm_compute::ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.0f, 0.0f};
// Cases for activation of LSTM.
- case ::neurun::model::Activation::TANH:
+ case ir::Activation::TANH:
return ::arm_compute::ActivationLayerInfo{
::arm_compute::ActivationLayerInfo::ActivationFunction::TANH, 1.0f, 1.0f};
- case ::neurun::model::Activation::SIGMOID:
+ case ir::Activation::SIGMOID:
// NOTE The sigmoid function is a special case of the Logistic function when L=1, k=1, x0=0.
// TODO In ACL and nnapi sepc, currently, Logistic's L always is 1, k always is 1, x0 always
// 0(always sigmoid) regardless of values of the parameter.
#include <arm_compute/core/TensorShape.h>
#include "ir/Layout.h"
-#include "model/InternalType.h"
+#include "ir/InternalType.h"
#include "model/Operand.h"
#include "model/Shape.h"
#include "model/TypeInfo.h"
ir::Layout frontend_layout, ir::Layout backend_layout,
bool apply_dim_correction = true);
-::arm_compute::PadStrideInfo asPadStrideInfo(const model::ExplicitPadding &padding,
- const model::Stride &stride);
+::arm_compute::PadStrideInfo asPadStrideInfo(const ir::ExplicitPadding &padding,
+ const ir::Stride &stride);
-::arm_compute::ActivationLayerInfo
-asActivationLayerInfo(const ::neurun::model::Activation &act_code);
+::arm_compute::ActivationLayerInfo asActivationLayerInfo(ir::Activation act_code);
std::unique_ptr<AclFunction> asAclFunction(std::unique_ptr<::arm_compute::IFunction> &&layer);
#include "util/Padding.h"
#include "model/Index.h"
#include "ir/DataType.h"
-#include "model/InternalType.h"
+#include "ir/InternalType.h"
#include "compiler/IExecutionBuilder.h"
#include "exec/NopFunction.h"
#include "util/logging.h"
void appendReLU6(::arm_compute::ITensor *ifm_alloc);
public:
- void append(model::Activation act, ::arm_compute::ITensor *ifm_alloc);
+ void append(ir::Activation act, ::arm_compute::ITensor *ifm_alloc);
private:
IExecutionBuilder &_builder;
_builder.append(std::move(acl_fn));
}
-void ActivationBuilder::append(model::Activation act, ::arm_compute::ITensor *ifm_alloc)
+void ActivationBuilder::append(ir::Activation act, ::arm_compute::ITensor *ifm_alloc)
{
switch (act)
{
- case model::Activation::NONE:
+ case ir::Activation::NONE:
{
// DO NOTHING
break;
}
- case model::Activation::RELU:
+ case ir::Activation::RELU:
{
appendReLU(ifm_alloc);
break;
}
- case model::Activation::RELU1:
+ case ir::Activation::RELU1:
{
appendReLU1(ifm_alloc);
break;
}
- case model::Activation::RELU6:
+ case ir::Activation::RELU6:
{
appendReLU6(ifm_alloc);
break;
const auto stride = node.param().stride;
- assert((node.param().padding.type == model::PaddingType::SAME) ||
- (node.param().padding.type == model::PaddingType::VALID));
+ assert((node.param().padding.type == ir::PaddingType::SAME) ||
+ (node.param().padding.type == ir::PaddingType::VALID));
auto padding = neurun::util::calculatePadding(node.param().padding, ofm_shape, ifm_shape, stride,
ker_shape.W, ker_shape.H);
uint32_t invalid_horizontal = 0;
uint32_t invalid_vertical = 0;
- if (node.param().padding.type == model::PaddingType::VALID)
+ if (node.param().padding.type == ir::PaddingType::VALID)
{
invalid_horizontal =
ofm_shape.W - (1 + (ifm_shape.W - 1) * stride.horizontal) - (ker_shape.W - 1);
#include "kernel/ConcatLayer.h"
#include "util/Padding.h"
#include "model/Index.h"
-#include "model/InternalType.h"
#include "compiler/IExecutionBuilder.h"
#include "exec/NopFunction.h"
#include "util/logging.h"
}
void AddLayer::configure(uint8_t *lhsData, const TensorDescriptor &lhsDescr, uint8_t *rhsData,
- const TensorDescriptor &rhsDescr, const model::Activation activation,
+ const TensorDescriptor &rhsDescr, const ir::Activation activation,
uint8_t *outputData, const TensorDescriptor &outputDescr)
{
_lhsData.u8 = lhsData;
void addQuant8();
void configure(uint8_t *lhsData, const TensorDescriptor &lhsDescr, uint8_t *rhsData,
- const TensorDescriptor &rhsDescr, const model::Activation activation,
+ const TensorDescriptor &rhsDescr, const ir::Activation activation,
uint8_t *outputData, const TensorDescriptor &outputDescr);
void run();
TensorDescriptor _rhsDescr;
TensorDescriptor _outputDescr;
- model::Activation _activation{model::Activation::NONE};
+ ir::Activation _activation{ir::Activation::NONE};
OperandType _inputType{OperandType::FLOAT32};
};
AvgPoolLayer::AvgPoolLayer()
: _inputData(), _outputData(), _inputDescr(), _outputDescr(), _paddingLeft(0), _paddingTop(0),
_paddingRight(0), _paddingBottom(0), _strideWidth(0), _strideHeight(0), _kernelWidth(0),
- _kernelHeight(0), _activation(model::Activation::NONE), _inputType(OperandType::FLOAT32)
+ _kernelHeight(0), _activation(ir::Activation::NONE), _inputType(OperandType::FLOAT32)
{
// DO NOTHING
}
const uint32_t paddingTop, const uint32_t paddingBottom,
const uint32_t strideWidth, const uint32_t strideHeight,
const uint32_t kernelWidth, const uint32_t kernelHeight,
- const model::Activation activation, uint8_t *outputData,
+ const ir::Activation activation, uint8_t *outputData,
const TensorDescriptor outputDescr)
{
_inputData.u8 = inputData;
const uint32_t paddingRight, const uint32_t paddingTop,
const uint32_t paddingBottom, const uint32_t strideWidth,
const uint32_t strideHeight, const uint32_t kernelWidth,
- const uint32_t kernelHeight, const model::Activation activation,
- uint8_t *outputData, const TensorDescriptor outputDescr);
+ const uint32_t kernelHeight, const ir::Activation activation, uint8_t *outputData,
+ const TensorDescriptor outputDescr);
void run();
void runSync()
uint32_t _kernelWidth;
uint32_t _kernelHeight;
- model::Activation _activation;
+ ir::Activation _activation;
OperandType _inputType;
};
ConvolutionLayer::ConvolutionLayer()
: _inputData(), _kernelData(), _outputData(), _biasData(), _inputDescr(), _kernelDescr(),
_outputDescr(), _biasDescr(), _paddingLeft(0), _paddingTop(0), _paddingRight(0),
- _paddingBottom(0), _strideWidth(0), _strideHeight(0), _activation(model::Activation::NONE),
+ _paddingBottom(0), _strideWidth(0), _strideHeight(0), _activation(ir::Activation::NONE),
_inputType(OperandType::FLOAT32)
{
// DO NOTHING
const uint32_t paddingLeft, const uint32_t paddingRight,
const uint32_t paddingTop, const uint32_t paddingBottom,
const uint32_t strideWidth, const uint32_t strideHeight,
- const model::Activation activation, uint8_t *outputData,
+ const ir::Activation activation, uint8_t *outputData,
const TensorDescriptor outputDescr)
{
_inputData.u8 = inputData;
const TensorDescriptor biasDescr, const uint32_t paddingLeft,
const uint32_t paddingRight, const uint32_t paddingTop,
const uint32_t paddingBottom, const uint32_t strideW, const uint32_t strideH,
- const model::Activation activation, uint8_t *outputData,
+ const ir::Activation activation, uint8_t *outputData,
const TensorDescriptor outputDescr);
void run();
uint32_t _strideWidth;
uint32_t _strideHeight;
- model::Activation _activation;
+ ir::Activation _activation;
OperandType _inputType;
};
: _inputData(), _kernelData(), _outputData(), _biasData(), _inputDescr(), _kernelDescr(),
_outputDescr(), _biasDescr(), _paddingLeft(0), _paddingTop(0), _paddingRight(0),
_paddingBottom(0), _strideWidth(0), _strideHeight(0), _multiplier(0),
- _activation(model::Activation::NONE), _inputType(OperandType::FLOAT32)
+ _activation(ir::Activation::NONE), _inputType(OperandType::FLOAT32)
{
// DO NOTHING
}
const uint32_t paddingTop, const uint32_t paddingBottom,
const uint32_t strideWidth, const uint32_t strideHeight,
const uint32_t multiplier,
- const model::Activation activation, uint8_t *outputData,
+ const ir::Activation activation, uint8_t *outputData,
const TensorDescriptor outputDescr)
{
_inputData.u8 = inputData;
const TensorDescriptor biasDescr, const uint32_t paddingLeft,
const uint32_t paddingRight, const uint32_t paddingTop,
const uint32_t paddingBottom, const uint32_t strideW, const uint32_t strideH,
- const uint32_t multiplier, const model::Activation activation, uint8_t *outputData,
+ const uint32_t multiplier, const ir::Activation activation, uint8_t *outputData,
const TensorDescriptor outputDescr);
void run();
uint32_t _multiplier;
- model::Activation _activation;
+ ir::Activation _activation;
OperandType _inputType;
};
FullyConnectedLayer::FullyConnectedLayer()
: _inputData(), _weightsData(), _biasData(), _outputData(), _inputDescr(), _weightsDescr(),
- _biasDescr(), _outputDescr(), _activation(model::Activation::NONE),
+ _biasDescr(), _outputDescr(), _activation(ir::Activation::NONE),
_inputType(OperandType::FLOAT32)
{
// DO NOTHING
void FullyConnectedLayer::configure(uint8_t *inputData, const TensorDescriptor inputDescr,
uint8_t *weightsData, const TensorDescriptor weightsDescr,
uint8_t *biasData, const TensorDescriptor biasDescr,
- model::Activation activation, uint8_t *outputData,
+ ir::Activation activation, uint8_t *outputData,
const TensorDescriptor outputDescr)
{
_inputData.u8 = inputData;
void configure(uint8_t *inputData, const TensorDescriptor inputDescr, uint8_t *weightsData,
const TensorDescriptor weightsDescr, uint8_t *biasData,
- const TensorDescriptor biasDescr, model::Activation activation,
- uint8_t *outputData, const TensorDescriptor outputDescr);
+ const TensorDescriptor biasDescr, ir::Activation activation, uint8_t *outputData,
+ const TensorDescriptor outputDescr);
void run();
void runSync()
TensorDescriptor _biasDescr;
TensorDescriptor _outputDescr;
- model::Activation _activation;
+ ir::Activation _activation;
OperandType _inputType;
};
MaxPoolLayer::MaxPoolLayer()
: _inputData(), _outputData(), _inputDescr(), _outputDescr(), _paddingLeft(0), _paddingTop(0),
_paddingRight(0), _paddingBottom(0), _strideWidth(0), _strideHeight(0), _kernelWidth(0),
- _kernelHeight(0), _activation(model::Activation::NONE), _inputType(OperandType::FLOAT32)
+ _kernelHeight(0), _activation(ir::Activation::NONE), _inputType(OperandType::FLOAT32)
{
// DO NOTHING
}
const uint32_t paddingTop, const uint32_t paddingBottom,
const uint32_t strideWidth, const uint32_t strideHeight,
const uint32_t kernelWidth, const uint32_t kernelHeight,
- const model::Activation activation, uint8_t *outputData,
+ const ir::Activation activation, uint8_t *outputData,
const TensorDescriptor outputDescr)
{
_inputData.u8 = inputData;
const uint32_t paddingRight, const uint32_t paddingTop,
const uint32_t paddingBottom, const uint32_t strideWidth,
const uint32_t strideHeight, const uint32_t kernelWidth,
- const uint32_t kernelHeight, const model::Activation activation,
- uint8_t *outputData, const TensorDescriptor outputDescr);
+ const uint32_t kernelHeight, const ir::Activation activation, uint8_t *outputData,
+ const TensorDescriptor outputDescr);
void run();
void runSync()
uint32_t _kernelWidth;
uint32_t _kernelHeight;
- model::Activation _activation;
+ ir::Activation _activation;
OperandType _inputType;
};
}
void MulLayer::configure(uint8_t *lhsData, const TensorDescriptor &lhsDescr, uint8_t *rhsData,
- const TensorDescriptor &rhsDescr, const model::Activation activation,
+ const TensorDescriptor &rhsDescr, const ir::Activation activation,
uint8_t *outputData, const TensorDescriptor &outputDescr)
{
_lhsData.u8 = lhsData;
void mulQuant8();
void configure(uint8_t *lhsData, const TensorDescriptor &lhsDescr, uint8_t *rhsData,
- const TensorDescriptor &rhsDescr, const model::Activation activation,
+ const TensorDescriptor &rhsDescr, const ir::Activation activation,
uint8_t *outputData, const TensorDescriptor &outputDescr);
void run();
TensorDescriptor _rhsDescr;
TensorDescriptor _outputDescr;
- model::Activation _activation{model::Activation::NONE};
+ ir::Activation _activation{ir::Activation::NONE};
OperandType _inputType{OperandType::FLOAT32};
};
*quantized_multiplier = static_cast<int32_t>(q_fixed);
}
-void CalculateActivationRangeFloat(model::Activation activation, float *activation_min,
+void CalculateActivationRangeFloat(ir::Activation activation, float *activation_min,
float *activation_max)
{
- if (activation == model::Activation::RELU)
+ if (activation == ir::Activation::RELU)
{
*activation_min = 0.f;
*activation_max = std::numeric_limits<float>::max();
}
- else if (activation == model::Activation::RELU6)
+ else if (activation == ir::Activation::RELU6)
{
*activation_min = 0.f;
*activation_max = 6.f;
}
- else if (activation == model::Activation::RELU1)
+ else if (activation == ir::Activation::RELU1)
{
*activation_min = -1.f;
*activation_max = 1.f;
}
- else if (activation == model::Activation::SIGMOID)
+ else if (activation == ir::Activation::SIGMOID)
{
*activation_min = 0.f;
*activation_max = 1.f;
}
- else if (activation == model::Activation::NONE)
+ else if (activation == ir::Activation::NONE)
{
*activation_min = std::numeric_limits<float>::lowest();
*activation_max = std::numeric_limits<float>::max();
}
}
-void CalculateActivationRangeUint8(model::Activation activation,
- const TensorDescriptor &outputDescr, int32_t *act_min,
- int32_t *act_max)
+void CalculateActivationRangeUint8(ir::Activation activation, const TensorDescriptor &outputDescr,
+ int32_t *act_min, int32_t *act_max)
{
const int32_t qmin = std::numeric_limits<uint8_t>::min();
const int32_t qmax = std::numeric_limits<uint8_t>::max();
auto quantize = [scale, zero_point](float f) {
return zero_point + static_cast<int32_t>(std::round(f / scale));
};
- if (activation == model::Activation::RELU)
+ if (activation == ir::Activation::RELU)
{
*act_min = std::max(qmin, quantize(0.0));
*act_max = qmax;
}
- else if (activation == model::Activation::RELU6)
+ else if (activation == ir::Activation::RELU6)
{
*act_min = std::max(qmin, quantize(0.0));
*act_max = std::min(qmax, quantize(6.0));
}
- else if (activation == model::Activation::RELU1)
+ else if (activation == ir::Activation::RELU1)
{
*act_min = std::max(qmin, quantize(-1.0));
*act_max = std::min(qmax, quantize(1.0));
}
- else if (activation == model::Activation::SIGMOID)
+ else if (activation == ir::Activation::SIGMOID)
{
*act_min = std::max(qmin, quantize(0.0));
*act_max = std::min(qmax, quantize(1.0));
}
- else if (activation == model::Activation::NONE)
+ else if (activation == ir::Activation::NONE)
{
*act_min = qmin;
*act_max = qmax;
#include "model/Operand.h"
#include "ir/DataType.h"
-#include <model/InternalType.h>
+#include <ir/InternalType.h>
using OperandType = neurun::ir::DataType;
void QuantizeMultiplierGreaterThanOne(double double_multiplier, int32_t *quantized_multiplier,
int *left_shift);
-void CalculateActivationRangeFloat(model::Activation activation, float *activation_min,
+void CalculateActivationRangeFloat(ir::Activation activation, float *activation_min,
float *activation_max);
-void CalculateActivationRangeUint8(model::Activation activation,
- const TensorDescriptor &outputDescr, int32_t *act_min,
- int32_t *act_max);
+void CalculateActivationRangeUint8(ir::Activation activation, const TensorDescriptor &outputDescr,
+ int32_t *act_min, int32_t *act_max);
int32_t CalculateInputRadius(int input_integer_bits, int input_left_shift);
}
void SubLayer::configure(uint8_t *lhsData, const TensorDescriptor &lhsDescr, uint8_t *rhsData,
- const TensorDescriptor &rhsDescr, const model::Activation activation,
+ const TensorDescriptor &rhsDescr, const ir::Activation activation,
uint8_t *outputData, const TensorDescriptor &outputDescr)
{
_lhsData.u8 = lhsData;
void subQuant8();
void configure(uint8_t *lhsData, const TensorDescriptor &lhsDescr, uint8_t *rhsData,
- const TensorDescriptor &rhsDescr, const model::Activation activation,
+ const TensorDescriptor &rhsDescr, const ir::Activation activation,
uint8_t *outputData, const TensorDescriptor &outputDescr);
void run();
TensorDescriptor _rhsDescr;
TensorDescriptor _outputDescr;
- model::Activation _activation{model::Activation::NONE};
+ ir::Activation _activation{ir::Activation::NONE};
OperandType _inputType{OperandType::FLOAT32};
};
const auto ker_height = ker_shape.H;
const auto ker_width = ker_shape.W;
const auto stride = node.param().stride;
- const int padding_type = (node.param().padding.type == model::PaddingType::SAME);
+ const int padding_type = (node.param().padding.type == ir::PaddingType::SAME);
const auto padding = neurun::util::calculatePadding(node.param().padding, ofm_shape, ifm_shape,
stride, ker_width, ker_height);
void AddLayer::addFloat32()
{
- assert(_activation == model::Activation::NONE);
+ assert(_activation == ir::Activation::NONE);
// ncnn kernel support
// 1. rank < 4
}
void AddLayer::configure(uint8_t *lhsData, const TensorDescriptor &lhsDescr, uint8_t *rhsData,
- const TensorDescriptor &rhsDescr, const model::Activation activation,
+ const TensorDescriptor &rhsDescr, const ir::Activation activation,
uint8_t *outputData, const TensorDescriptor &outputDescr,
const ir::Layout backendLayout)
{
void addQuant8();
void configure(uint8_t *lhsData, const TensorDescriptor &lhsDescr, uint8_t *rhsData,
- const TensorDescriptor &rhsDescr, const model::Activation activation,
+ const TensorDescriptor &rhsDescr, const ir::Activation activation,
uint8_t *outputData, const TensorDescriptor &outputDescr,
const ir::Layout backendLayout);
TensorDescriptor _rhsDescr;
TensorDescriptor _outputDescr;
- model::Activation _activation{model::Activation::NONE};
+ ir::Activation _activation{ir::Activation::NONE};
OperandType _inputType{OperandType::FLOAT32};
InstanceNormLayer::InstanceNormLayer()
: _inputData(), _gammaData(), _betaData(), _outputData(), _inputDescr(), _gammaDescr(),
- _betaDescr(), _outputDescr(), _epsilon(1e-5), _activation(model::Activation::NONE),
+ _betaDescr(), _outputDescr(), _epsilon(1e-5), _activation(ir::Activation::NONE),
_inputType(OperandType::FLOAT32), _backendLayout(ir::Layout::UNKNOWN)
{
// DO NOTHING
const int output_width = _outputDescr.dimensions[3];
nnfw::ncnn::Mat out_mat(output_width, output_height, output_channels, _outputData.f);
- if (_activation == model::Activation::NONE)
+ if (_activation == ir::Activation::NONE)
{
nnfw::ncnn::ncnn_instance_norm_rowmajor(in_mat, out_mat, gamma_mat, beta_mat, input_channels,
_epsilon);
}
- else if (_activation == model::Activation::RELU)
+ else if (_activation == ir::Activation::RELU)
{
nnfw::ncnn::ncnn_instance_norm_with_relu_rowmajor(in_mat, out_mat, gamma_mat, beta_mat,
input_channels, _epsilon, 0.f);
const int output_channels = _outputDescr.dimensions[3];
nnfw::ncnn::Mat out_mat(output_channels, output_width, output_height, _outputData.f);
- if (_activation == model::Activation::NONE)
+ if (_activation == ir::Activation::NONE)
{
nnfw::ncnn::ncnn_instance_norm_colmajor(in_mat, out_mat, gamma_mat, beta_mat, input_channels,
_epsilon);
}
- else if (_activation == model::Activation::RELU)
+ else if (_activation == ir::Activation::RELU)
{
nnfw::ncnn::ncnn_instance_norm_with_relu_colmajor(in_mat, out_mat, gamma_mat, beta_mat,
input_channels, _epsilon, 0.f);
uint8_t *gammaData, const TensorDescriptor gammaDescr,
uint8_t *betaData, const TensorDescriptor betaDescr,
uint8_t *outputData, const TensorDescriptor outputDescr,
- float epsilon, model::Activation activation,
+ float epsilon, ir::Activation activation,
ir::Layout backendLayout)
{
_inputData.u8 = inputData;
void configure(uint8_t *inputData, const TensorDescriptor inputDescr, uint8_t *gammaData,
const TensorDescriptor gammaDescr, uint8_t *betaData,
const TensorDescriptor betaDescr, uint8_t *outputData,
- const TensorDescriptor outputDescr, float epsilon, model::Activation activation,
+ const TensorDescriptor outputDescr, float epsilon, ir::Activation activation,
ir::Layout backendLayout);
void run();
TensorDescriptor _outputDescr;
float _epsilon;
- model::Activation _activation;
+ ir::Activation _activation;
OperandType _inputType;
ir::Layout _backendLayout;
#include "model/Operand.h"
#include "ir/DataType.h"
-#include <model/InternalType.h>
+#include <ir/InternalType.h>
#include <ncnn/srcn/conv_type.h>
using OperandType = neurun::ir::DataType;
* limitations under the License.
*/
-#ifndef __NEURUN_MODEL_INTERNAL_TYPE_H__
-#define __NEURUN_MODEL_INTERNAL_TYPE_H__
+#ifndef __NEURUN_IR_INTERNAL_TYPE_H__
+#define __NEURUN_IR_INTERNAL_TYPE_H__
#include <cstdint>
namespace neurun
{
-namespace model
+namespace ir
{
enum class Activation
uint32_t horizontal;
};
+} // namespace ir
+
+// TODO Remove after merging 'graph' and 'model' namespaces.
+namespace model
+{
+using Activation = ir::Activation;
+using PaddingType = ir::PaddingType;
+using ExplicitPadding = ir::ExplicitPadding;
+using Padding = ir::Padding;
+using Stride = ir::Stride;
} // namespace model
} // namespace neurun
-#endif // __NEURUN_MODEL_INTERNAL_TYPE_H__
+#endif // __NEURUN_IR_INTERNAL_TYPE_H__
#define __NEURUN_MODEL_OPERATION_ADD_H__
#include "model/Operation.h"
-#include "model/InternalType.h"
+#include "ir/InternalType.h"
namespace neurun
{
#include <memory>
#include "model/Operation.h"
-#include "model/InternalType.h"
+#include "ir/InternalType.h"
namespace neurun
{
#include <memory>
#include "model/Operation.h"
-#include "model/InternalType.h"
+#include "ir/InternalType.h"
namespace neurun
{
#include <memory>
#include "model/Operation.h"
-#include "model/InternalType.h"
+#include "ir/InternalType.h"
namespace neurun
{
#define __NEURUN_MODEL_OPERATION_DIV_H__
#include "model/Operation.h"
-#include "model/InternalType.h"
+#include "ir/InternalType.h"
namespace neurun
{
#include <memory>
#include "model/Operation.h"
-#include "model/InternalType.h"
+#include "ir/InternalType.h"
namespace neurun
{
#define __NEURUN_MODEL_OPERATION_INSTANCE_NORM_H__
#include "model/Operation.h"
-#include "model/InternalType.h"
+#include "ir/InternalType.h"
namespace neurun
{
#include <memory>
#include "model/Operation.h"
-#include "model/InternalType.h"
+#include "ir/InternalType.h"
namespace neurun
{
#ifndef __NEURUN_MODEL_OPERATION_LSTM_H__
#define __NEURUN_MODEL_OPERATION_LSTM_H__
-#include "model/InternalType.h"
+#include "ir/InternalType.h"
#include "model/Operation.h"
namespace neurun
#include <memory>
#include "model/Operation.h"
-#include "model/InternalType.h"
+#include "ir/InternalType.h"
namespace neurun
{
#define __NEURUN_MODEL_OPERATION_MUL_H__
#include "model/Operation.h"
-#include "model/InternalType.h"
+#include "ir/InternalType.h"
namespace neurun
{
#ifndef __NEURUN_MODEL_OPERATION_RNN_H__
#define __NEURUN_MODEL_OPERATION_RNN_H__
-#include "model/InternalType.h"
+#include "ir/InternalType.h"
#include "model/Operation.h"
namespace neurun
#define __NEURUN_MODEL_OPERATION_SUB_H__
#include "model/Operation.h"
-#include "model/InternalType.h"
+#include "ir/InternalType.h"
namespace neurun
{
#include <memory>
#include "model/Operation.h"
-#include "model/InternalType.h"
+#include "ir/InternalType.h"
namespace neurun
{
#include <stdint.h>
#include "model/Shape.h"
-#include "model/InternalType.h"
+#include "ir/InternalType.h"
namespace neurun
{
namespace util
{
-model::ExplicitPadding validPadding(void);
-model::ExplicitPadding samePadding(const model::FeatureShape &ifm_shape,
- const model::FeatureShape &ofm_shape,
- const model::Stride &stride, uint32_t kw, uint32_t kh);
-model::ExplicitPadding calculatePadding(const model::Padding &padding,
- const model::FeatureShape &ifm_shape,
- const model::FeatureShape &ofm_shape,
- const model::Stride &stride, uint32_t kw, uint32_t kh);
+ir::ExplicitPadding validPadding(void);
+ir::ExplicitPadding samePadding(const model::FeatureShape &ifm_shape,
+ const model::FeatureShape &ofm_shape, const ir::Stride &stride,
+ uint32_t kw, uint32_t kh);
+ir::ExplicitPadding calculatePadding(const ir::Padding &padding,
+ const model::FeatureShape &ifm_shape,
+ const model::FeatureShape &ofm_shape, const ir::Stride &stride,
+ uint32_t kw, uint32_t kh);
} // namespace util
} // namespace neurun
#ifndef __NEURUN_UTIL_UTILS_H__
#define __NEURUN_UTIL_UTILS_H__
-#include "model/InternalType.h"
+#include "ir/InternalType.h"
#include "ir/Layout.h"
#include "model/Operand.h"
#include "util/Coordinates.h"
/**
* @brief Converts a internal padding type to const char*
- * @param[in] code Padding type to be converted
+ * @param[in] type Padding type to be converted
* @return A string holding the converted value
*/
-const char *to_string(const model::PaddingType &type);
+const char *to_string(ir::PaddingType type);
Coordinates convertCoordinates(const Coordinates &from_coordinates, ir::Layout from_layout,
ir::Layout to_layout);
UNUSED_RELEASE(ifm_shape);
UNUSED_RELEASE(ker_shape);
- assert((node.param().padding.type == model::PaddingType::SAME) ||
- (node.param().padding.type == model::PaddingType::VALID));
+ assert((node.param().padding.type == ir::PaddingType::SAME) ||
+ (node.param().padding.type == ir::PaddingType::VALID));
assert(ifm_shape.N == ofm_shape.N);
assert(ifm_shape.C == ker_shape.C);
assert(ker_shape.N == ofm_shape.C);
#define __NEURUN_EXEC_INTERP_OPERATIONS_OPERATION_UTILS_H_
#include "model/Shape.h"
-#include "model/InternalType.h"
+#include "ir/InternalType.h"
#include <cker/Shape.h>
}
template <typename T>
-void calculateActivationRange(model::Activation activation, T *activation_min, T *activation_max)
+void calculateActivationRange(ir::Activation activation, T *activation_min, T *activation_max)
{
- if (activation == model::Activation::RELU)
+ if (activation == ir::Activation::RELU)
{
*activation_min = 0;
*activation_max = std::numeric_limits<T>::max();
}
- else if (activation == model::Activation::RELU6)
+ else if (activation == ir::Activation::RELU6)
{
*activation_min = 0;
*activation_max = 6;
}
- else if (activation == model::Activation::RELU1)
+ else if (activation == ir::Activation::RELU1)
{
*activation_min = -1;
*activation_max = 1;
}
- else if (activation == model::Activation::NONE)
+ else if (activation == ir::Activation::NONE)
{
*activation_min = std::numeric_limits<T>::lowest();
*activation_max = std::numeric_limits<T>::max();
void Dumper::visit(const Conv2D &node)
{
std::string padding_type =
- node.param().padding.type == model::PaddingType::EXPLICIT ? "Explicit" : "Implicit";
+ node.param().padding.type == ir::PaddingType::EXPLICIT ? "Explicit" : "Implicit";
VERBOSE(LIR) << "* Conv2D(" << padding_type << ")" << std::endl;
VERBOSE(LIR) << " - Inputs : IFM(" << node.getInputs().at(Conv2D::Input::INPUT).value()
<< ") Kernel(" << node.getInputs().at(Conv2D::Input::KERNEL).value() << ") Bias("
void Dumper::visit(const DepthwiseConv2D &node)
{
std::string padding_type =
- node.param().padding.type == model::PaddingType::EXPLICIT ? "Explicit" : "Implicit";
+ node.param().padding.type == ir::PaddingType::EXPLICIT ? "Explicit" : "Implicit";
VERBOSE(LIR) << "* DepthwiseConv2D(" << padding_type << ")" << std::endl;
VERBOSE(LIR) << " - Inputs : IFM(" << node.getInputs().at(DepthwiseConv2D::Input::INPUT).value()
<< ") Kernel(" << node.getInputs().at(DepthwiseConv2D::Input::KERNEL).value()
void Dumper::visit(const MaxPool2D &node)
{
std::string padding_type =
- node.param().padding.type == model::PaddingType::EXPLICIT ? "Explicit" : "Implicit";
+ node.param().padding.type == ir::PaddingType::EXPLICIT ? "Explicit" : "Implicit";
VERBOSE(LIR) << "* MaxPool2D(" << padding_type << ")" << std::endl;
VERBOSE(LIR) << " - Inputs : IFM(" << node.getInputs().at(MaxPool2D::Input::INPUT).value() << ")"
<< std::endl;
void Dumper::visit(const TransposeConv &node)
{
std::string padding_type =
- node.param().padding.type == model::PaddingType::EXPLICIT ? "Explicit" : "Implicit";
+ node.param().padding.type == ir::PaddingType::EXPLICIT ? "Explicit" : "Implicit";
VERBOSE(LIR) << "* TransposeConv(" << padding_type << ")" << std::endl;
VERBOSE(LIR) << " - Inputs : Output Shape("
<< node.getInputs().at(TransposeConv::Input::OUTPUT_SHAPE).value() << ") KERNEL("
namespace util
{
-model::ExplicitPadding validPadding(void)
+ir::ExplicitPadding validPadding(void)
{
//
// ANEURALNETWORKS_PADDING_VALID
// the input at the end that could not fill the whole filter tile
// will simply be ignored.
//
- model::ExplicitPadding padding;
+ ir::ExplicitPadding padding;
padding.top = 0;
padding.bottom = 0;
return padding;
}
-model::ExplicitPadding samePaddingUsingIFM(const model::FeatureShape &ifm_shape,
- const model::Stride &stride, uint32_t kw, uint32_t kh)
+ir::ExplicitPadding samePaddingUsingIFM(const model::FeatureShape &ifm_shape,
+ const ir::Stride &stride, uint32_t kw, uint32_t kh)
{
- model::ExplicitPadding padding;
+ ir::ExplicitPadding padding;
// ANEURALNETWORKS_PADDING_SAME (from NNAPI spec)
//
return padding;
}
-model::ExplicitPadding samePadding(const model::FeatureShape &ifm_shape,
- const model::FeatureShape &ofm_shape,
- const model::Stride &stride, uint32_t kw, uint32_t kh)
+ir::ExplicitPadding samePadding(const model::FeatureShape &ifm_shape,
+ const model::FeatureShape &ofm_shape, const ir::Stride &stride,
+ uint32_t kw, uint32_t kh)
{
const int32_t vertical_expected_output = (ifm_shape.H + stride.vertical - 1) / stride.vertical;
const int32_t horizontal_expected_output =
return samePaddingUsingIFM(ifm_shape, stride, kw, kh);
}
-model::ExplicitPadding calculatePadding(const model::Padding &padding,
- const model::FeatureShape &ifm_shape,
- const model::FeatureShape &ofm_shape,
- const model::Stride &stride, uint32_t kw, uint32_t kh)
+ir::ExplicitPadding calculatePadding(const ir::Padding &padding,
+ const model::FeatureShape &ifm_shape,
+ const model::FeatureShape &ofm_shape, const ir::Stride &stride,
+ uint32_t kw, uint32_t kh)
{
- if (padding.type == model::PaddingType::EXPLICIT)
+ if (padding.type == ir::PaddingType::EXPLICIT)
{
return padding.param;
}
- else if (padding.type == model::PaddingType::SAME)
+ else if (padding.type == ir::PaddingType::SAME)
{
return samePadding(ifm_shape, ofm_shape, stride, kw, kh);
}
- else if (padding.type == model::PaddingType::VALID)
+ else if (padding.type == ir::PaddingType::VALID)
{
return validPadding();
}
*/
#include "util/Utils.h"
-#include "model/InternalType.h"
+#include "ir/InternalType.h"
#include "model/Shape.h"
#include "model/operation/AvgPool2D.h"
#include "model/operation/MaxPool2D.h"
// Calculate output height and width of convolution-like operation
std::pair<int, int> calcConvLikeHeightAndWidth(const int in_h, const int in_w, const int ker_h,
- const int ker_w, const model::Padding pad,
- const model::Stride stride)
+ const int ker_w, const ir::Padding pad,
+ const ir::Stride stride)
{
int32_t out_h = 0, out_w = 0;
switch (pad.type)
{
- case model::PaddingType::SAME:
+ case ir::PaddingType::SAME:
out_h = ceil_div(in_h, stride.vertical);
out_w = ceil_div(in_w, stride.horizontal);
break;
- case model::PaddingType::VALID:
+ case ir::PaddingType::VALID:
out_h = ceil_div(in_h - ker_h + 1, stride.vertical);
out_w = ceil_div(in_w - ker_w + 1, stride.horizontal);
break;
- case model::PaddingType::EXPLICIT:
+ case ir::PaddingType::EXPLICIT:
out_h = (in_h + pad.param.top + pad.param.bottom - ker_h) / stride.vertical + 1;
out_w = (in_w + pad.param.left + pad.param.right - ker_w) / stride.horizontal + 1;
break;
namespace util
{
-const char *to_string(const model::PaddingType &type)
+const char *to_string(const ir::PaddingType type)
{
- assert((type == model::PaddingType::EXPLICIT) || (type == model::PaddingType::SAME) ||
- (type == model::PaddingType::VALID));
+ assert((type == ir::PaddingType::EXPLICIT) || (type == ir::PaddingType::SAME) ||
+ (type == ir::PaddingType::VALID));
switch (type)
{
- case model::PaddingType::EXPLICIT:
+ case ir::PaddingType::EXPLICIT:
return "Padding::EXPLICIT";
- case model::PaddingType::SAME:
+ case ir::PaddingType::SAME:
return "Padding::SAME";
- case model::PaddingType::VALID:
+ case ir::PaddingType::VALID:
return "Padding::VALID";
}
void loadModel();
// Helper functions
- model::Activation convertActivation(ActivationFunctionType type);
+ ir::Activation convertActivation(ActivationFunctionType type);
ir::DataType tensorTypeToDataType(TensorType type);
// Create operands form tflite::Tensor
}
template <typename LoaderDomain, typename SpecificLoader>
-model::Activation BaseLoader<LoaderDomain, SpecificLoader>::BaseLoader::convertActivation(
+ir::Activation BaseLoader<LoaderDomain, SpecificLoader>::BaseLoader::convertActivation(
const ActivationFunctionType type)
{
switch (type)
{
case ActivationFunctionType::ActivationFunctionType_NONE:
- return model::Activation::NONE;
+ return ir::Activation::NONE;
case ActivationFunctionType::ActivationFunctionType_RELU:
- return model::Activation::RELU;
+ return ir::Activation::RELU;
case ActivationFunctionType::ActivationFunctionType_RELU_N1_TO_1:
- return model::Activation::RELU1;
+ return ir::Activation::RELU1;
case ActivationFunctionType::ActivationFunctionType_RELU6:
- return model::Activation::RELU6;
+ return ir::Activation::RELU6;
case ActivationFunctionType::ActivationFunctionType_TANH:
- return model::Activation::TANH;
+ return ir::Activation::TANH;
default:
throw std::runtime_error(std::string("Unsupported activation type: ")
.append(EnumNameActivationFunctionType(type)));
param.stride.horizontal = options->stride_h();
// Paddings
if (options->padding() == Padding::Padding_SAME)
- param.padding.type = model::PaddingType::SAME;
+ param.padding.type = ir::PaddingType::SAME;
if (options->padding() == Padding::Padding_VALID)
- param.padding.type = model::PaddingType::VALID;
+ param.padding.type = ir::PaddingType::VALID;
// param paddings indexes unused
}
#include <model/TypeInfo.h>
#include <model/Shape.h>
-#include <model/InternalType.h>
+#include <ir/InternalType.h>
class NNAPIConvert
{
* @param[in] act NNAPI's FuseCode type
* @return neurun's internal activation type
*/
- static ::neurun::model::Activation getFusedActivation(FuseCode act);
+ static neurun::ir::Activation getFusedActivation(FuseCode act);
/**
* @brief Convert NNAPI PaddingCode to internal padding type
- * @param[in] act NNAPI's PaddingCode type
+ * @param[in] type NNAPI's PaddingCode type
* @return neurun's internal padding type
*/
- static ::neurun::model::PaddingType getPaddingType(PaddingCode type);
+ static neurun::ir::PaddingType getPaddingType(PaddingCode type);
};
#endif // __NEURUN_NNAPI_CONVERT_H__
#include <backend/IShapeFixer.h>
#include <model/Shape.h>
-#include <model/InternalType.h>
+#include <ir/InternalType.h>
#include <model/TypeInfo.h>
#include <ir/DataType.h>
16));
// 2nd add operations (result2 <= result1 + rhs2)
operation::Add::Param param1;
- param1.activation = neurun::model::Activation::NONE;
+ param1.activation = Activation::NONE;
auto input_set1 = OperandIndexSequence{operand_lhs, operand_rhs1};
auto output_set1 = OperandIndexSequence{operand_result1};
graph->addOperation(nnfw::cpp14::make_unique<operation::Add>(input_set1, output_set1, param1));
operation::Add::Param param2;
- param2.activation = neurun::model::Activation::NONE;
+ param2.activation = Activation::NONE;
auto input_set2 = OperandIndexSequence{operand_result1, operand_rhs2};
auto output_set2 = OperandIndexSequence{operand_result2};
graph->addOperation(nnfw::cpp14::make_unique<operation::Add>(input_set2, output_set2, param2));
// Add operations
operation::Add::Param param;
- param.activation = neurun::model::Activation::NONE;
+ param.activation = Activation::NONE;
auto input_set = OperandIndexSequence{operand_lhs, operand_rhs};
auto output_set = OperandIndexSequence{operand_result};
_graph->addOperation(nnfw::cpp14::make_unique<operation::Add>(input_set, output_set, param));
// 2nd add operations (result2 <= result1 + rhs2)
operation::Add::Param param1;
- param1.activation = neurun::model::Activation::NONE;
+ param1.activation = Activation::NONE;
auto input_set1 = OperandIndexSequence{operand_lhs, operand_rhs1};
auto output_set1 = OperandIndexSequence{operand_result1};
_graph->addOperation(nnfw::cpp14::make_unique<operation::Add>(input_set1, output_set1, param1));
operation::Add::Param param2;
- param2.activation = neurun::model::Activation::NONE;
+ param2.activation = Activation::NONE;
auto input_set2 = OperandIndexSequence{operand_result1, operand_rhs2};
auto output_set2 = OperandIndexSequence{operand_result2};
_graph->addOperation(nnfw::cpp14::make_unique<operation::Add>(input_set2, output_set2, param2));
// Add operations
operation::Add::Param param;
- param.activation = neurun::model::Activation::NONE;
+ param.activation = Activation::NONE;
auto input_set = OperandIndexSequence{operand_lhs, operand_rhs};
auto output_set = OperandIndexSequence{operand_result};
_graph->addOperation(nnfw::cpp14::make_unique<operation::Add>(input_set, output_set, param));
IndexSet inputs{input_operand, kernel_operand, bias_operand};
Graph::Param conv_params;
- conv_params.padding.type = neurun::model::PaddingType::SAME;
+ conv_params.padding.type = neurun::ir::PaddingType::SAME;
conv_params.stride.horizontal = 1;
conv_params.stride.vertical = 1;
- conv_params.activation = neurun::model::Activation::NONE;
+ conv_params.activation = neurun::ir::Activation::NONE;
auto output_operand = graph.addOperand(shape, type).value();
IndexSet outputs{output_operand};