#include <stdexcept>
#include "internal/Padding.h"
+#include "kernel/cpu/OperationUtils.h"
#include "kernel/cpu/ConvolutionLayer.h"
#include "kernel/cpu/AvgPoolLayer.h"
#include "kernel/cpu/MaxPoolLayer.h"
int ker_index;
int bias_index;
- ::neurun::internal::operand::Shape ofm_shape{1};
- ::neurun::internal::operand::Shape ifm_shape{1};
- ::neurun::internal::operand::Shape ker_shape{1};
- ::neurun::internal::operand::Shape bias_shape{1};
+ ::neurun::kernel::cpu::Shape ofm_shape;
+ ::neurun::kernel::cpu::Shape ifm_shape;
+ ::neurun::kernel::cpu::Shape ker_shape;
+ ::neurun::kernel::cpu::Shape bias_shape;
::internal::Padding padding;
::internal::Stride stride;
param.ker_index = ker_index.asInt();
param.bias_index = bias_index.asInt();
- param.ofm_shape = _ctx.at(ofm_index).shape();
- param.ifm_shape = _ctx.at(ifm_index).shape();
- param.ker_shape = _ctx.at(ker_index).shape();
- param.bias_shape = _ctx.at(bias_index).shape();
+ param.ofm_shape = ::neurun::kernel::cpu::getShape(_ctx.at(ofm_index));
+ param.ifm_shape = ::neurun::kernel::cpu::getShape(_ctx.at(ifm_index));
+ param.ker_shape = ::neurun::kernel::cpu::getShape(_ctx.at(ker_index));
+ param.bias_shape = ::neurun::kernel::cpu::getShape(_ctx.at(bias_index));
param.stride = stride;
param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME)
- ? ::internal::same_padding(
- param.ifm_shape.asFeature(), param.ofm_shape.asFeature(), stride,
- param.ker_shape.asKernel().W, param.ker_shape.asKernel().H)
+ ? ::internal::same_padding(_ctx.at(ifm_index).shape().asFeature(),
+ _ctx.at(ofm_index).shape().asFeature(), stride,
+ _ctx.at(ker_index).shape().asKernel().W,
+ _ctx.at(ker_index).shape().asKernel().H)
: ::internal::valid_padding();
param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
uint32_t kw;
uint32_t kh;
- ::neurun::internal::operand::Shape ofm_shape{1};
- ::neurun::internal::operand::Shape ifm_shape{1};
+ ::neurun::kernel::cpu::Shape ofm_shape;
+ ::neurun::kernel::cpu::Shape ifm_shape;
::internal::Padding padding;
::internal::Stride stride;
param.kh = kh;
param.kw = kw;
- param.ofm_shape = _ctx.at(ofm_index).shape();
- param.ifm_shape = _ctx.at(ifm_index).shape();
+ param.ofm_shape = ::neurun::kernel::cpu::getShape(_ctx.at(ofm_index));
+ param.ifm_shape = ::neurun::kernel::cpu::getShape(_ctx.at(ifm_index));
param.stride.vertical = vstride;
param.stride.horizontal = hstride;
- param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME)
- ? ::internal::same_padding(param.ifm_shape.asFeature(),
- param.ofm_shape.asFeature(), param.stride, kw, kh)
- : ::internal::valid_padding();
+ param.padding =
+ (padding_type == ANEURALNETWORKS_PADDING_SAME)
+ ? ::internal::same_padding(_ctx.at(ifm_index).shape().asFeature(),
+ _ctx.at(ofm_index).shape().asFeature(), param.stride, kw, kh)
+ : ::internal::valid_padding();
param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
- VERBOSE(MaxPool2D) << "IFM_H: " << param.ifm_shape.asFeature().H << std::endl;
- VERBOSE(MaxPool2D) << "IFM_W: " << param.ifm_shape.asFeature().W << std::endl;
- VERBOSE(MaxPool2D) << "OFM_H: " << param.ofm_shape.asFeature().H << std::endl;
- VERBOSE(MaxPool2D) << "OFM_W: " << param.ofm_shape.asFeature().W << std::endl;
+ VERBOSE(MaxPool2D) << "IFM_H: " << _ctx.at(ifm_index).shape().asFeature().H << std::endl;
+ VERBOSE(MaxPool2D) << "IFM_W: " << _ctx.at(ifm_index).shape().asFeature().W << std::endl;
+ VERBOSE(MaxPool2D) << "OFM_H: " << _ctx.at(ofm_index).shape().asFeature().H << std::endl;
+ VERBOSE(MaxPool2D) << "OFM_W: " << _ctx.at(ofm_index).shape().asFeature().W << std::endl;
VERBOSE(MaxPool2D) << "KER_H: " << kh << std::endl;
VERBOSE(MaxPool2D) << "KER_W: " << kw << std::endl;
VERBOSE(MaxPool2D) << "STRIDE_H: " << vstride << std::endl;
uint32_t kw;
uint32_t kh;
- ::neurun::internal::operand::Shape ofm_shape{1};
- ::neurun::internal::operand::Shape ifm_shape{1};
+ ::neurun::kernel::cpu::Shape ofm_shape;
+ ::neurun::kernel::cpu::Shape ifm_shape;
::internal::Padding padding;
::internal::Stride stride;
param.kh = kh;
param.kw = kw;
- param.ofm_shape = _ctx.at(ofm_index).shape();
- param.ifm_shape = _ctx.at(ifm_index).shape();
+ param.ofm_shape = ::neurun::kernel::cpu::getShape(_ctx.at(ofm_index));
+ param.ifm_shape = ::neurun::kernel::cpu::getShape(_ctx.at(ifm_index));
param.stride.vertical = vstride;
param.stride.horizontal = hstride;
- param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME)
- ? ::internal::same_padding(param.ifm_shape.asFeature(),
- param.ofm_shape.asFeature(), param.stride, kw, kh)
- : ::internal::valid_padding();
+ param.padding =
+ (padding_type == ANEURALNETWORKS_PADDING_SAME)
+ ? ::internal::same_padding(_ctx.at(ifm_index).shape().asFeature(),
+ _ctx.at(ofm_index).shape().asFeature(), param.stride, kw, kh)
+ : ::internal::valid_padding();
param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
- VERBOSE(AvgPool2D) << "IFM_H: " << param.ifm_shape.asFeature().H << std::endl;
- VERBOSE(AvgPool2D) << "IFM_W: " << param.ifm_shape.asFeature().W << std::endl;
- VERBOSE(AvgPool2D) << "OFM_H: " << param.ofm_shape.asFeature().H << std::endl;
- VERBOSE(AvgPool2D) << "OFM_W: " << param.ofm_shape.asFeature().W << std::endl;
+ VERBOSE(AvgPool2D) << "IFM_H: " << _ctx.at(ifm_index).shape().asFeature().H << std::endl;
+ VERBOSE(AvgPool2D) << "IFM_W: " << _ctx.at(ifm_index).shape().asFeature().W << std::endl;
+ VERBOSE(AvgPool2D) << "OFM_H: " << _ctx.at(ofm_index).shape().asFeature().H << std::endl;
+ VERBOSE(AvgPool2D) << "OFM_W: " << _ctx.at(ofm_index).shape().asFeature().W << std::endl;
VERBOSE(AvgPool2D) << "KER_H: " << kh << std::endl;
VERBOSE(AvgPool2D) << "KER_W: " << kw << std::endl;
VERBOSE(AvgPool2D) << "STRIDE_H: " << vstride << std::endl;
int32_t axis;
- ::neurun::internal::operand::Shape ofm_shape{1};
- std::vector<::neurun::internal::operand::Shape> ifm_shapes;
+ ::neurun::kernel::cpu::Shape ofm_shape;
+ std::vector<::neurun::kernel::cpu::Shape> ifm_shapes;
};
Param param;
param.input_indexes = node.param().ifm_indexes;
param.axis = _ctx.at(axis_index).asScalar<int32_t>();
- param.ofm_shape = _ctx.at(ofm_index).shape();
+ param.ofm_shape = ::neurun::kernel::cpu::getShape(_ctx.at(ofm_index));
for (auto ifm_ind : node.param().ifm_indexes)
{
const ::neurun::graph::operand::Index ifm_index{ifm_ind};
- param.ifm_shapes.emplace_back(_ctx.at(ifm_index).shape());
+ param.ifm_shapes.emplace_back(::neurun::kernel::cpu::getShape(_ctx.at(ifm_index)));
}
auto tensors = _tensor_builder;
int weight_index;
int bias_index;
- ::neurun::internal::operand::Shape ofm_shape{1};
- ::neurun::internal::operand::Shape ifm_shape{1};
- ::neurun::internal::operand::Shape weight_shape{1};
- ::neurun::internal::operand::Shape bias_shape{1};
+ ::neurun::kernel::cpu::Shape ofm_shape;
+ ::neurun::kernel::cpu::Shape ifm_shape;
+ ::neurun::kernel::cpu::Shape weight_shape;
+ ::neurun::kernel::cpu::Shape bias_shape;
FuseCode activation;
};
param.weight_index = weight_index.asInt();
param.bias_index = bias_index.asInt();
- param.ofm_shape = _ctx.at(output_index).shape();
- param.ifm_shape = _ctx.at(input_index).shape();
- param.weight_shape = _ctx.at(weight_index).shape();
- param.bias_shape = _ctx.at(bias_index).shape();
+ param.ofm_shape = ::neurun::kernel::cpu::getShape(_ctx.at(output_index));
+ param.ifm_shape = ::neurun::kernel::cpu::getShape(_ctx.at(input_index));
+ param.weight_shape = ::neurun::kernel::cpu::getShape(_ctx.at(weight_index));
+ param.bias_shape = ::neurun::kernel::cpu::getShape(_ctx.at(bias_index));
param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
int output_index;
int input_index;
- ::neurun::internal::operand::Shape ofm_shape{1};
- ::neurun::internal::operand::Shape ifm_shape{1};
+ ::neurun::kernel::cpu::Shape ofm_shape;
+ ::neurun::kernel::cpu::Shape ifm_shape;
};
Param param;
param.output_index = output_index.asInt();
param.input_index = input_index.asInt();
- param.ofm_shape = _ctx.at(output_index).shape();
- param.ifm_shape = _ctx.at(input_index).shape();
+ param.ofm_shape = ::neurun::kernel::cpu::getShape(_ctx.at(output_index));
+ param.ifm_shape = ::neurun::kernel::cpu::getShape(_ctx.at(input_index));
auto tensors = _tensor_builder;
int output_index;
int input_index;
- ::neurun::internal::operand::Shape ofm_shape{1};
- ::neurun::internal::operand::Shape ifm_shape{1};
+ ::neurun::kernel::cpu::Shape ofm_shape;
+ ::neurun::kernel::cpu::Shape ifm_shape;
float scale;
};
param.output_index = output_index.asInt();
param.input_index = input_index.asInt();
- param.ofm_shape = _ctx.at(output_index).shape();
- param.ifm_shape = _ctx.at(input_index).shape();
+ param.ofm_shape = ::neurun::kernel::cpu::getShape(_ctx.at(output_index));
+ param.ifm_shape = ::neurun::kernel::cpu::getShape(_ctx.at(input_index));
param.scale = _ctx.at(scale_index).asScalar<float>();
return true;
}
-void AvgPoolLayer::configure(uint8_t *inputData,
- const ::neurun::internal::operand::Shape inputShape,
- const uint32_t paddingLeft, const uint32_t paddingRight,
- const uint32_t paddingTop, const uint32_t paddingBottom,
- const uint32_t strideWidth, const uint32_t strideHeight,
- const uint32_t kernelWidth, const uint32_t kernelHeight,
- const FuseCode activation, uint8_t *outputData,
- const ::neurun::internal::operand::Shape outputShape)
+void AvgPoolLayer::configure(uint8_t *inputData, const Shape inputShape, const uint32_t paddingLeft,
+ const uint32_t paddingRight, const uint32_t paddingTop,
+ const uint32_t paddingBottom, const uint32_t strideWidth,
+ const uint32_t strideHeight, const uint32_t kernelWidth,
+ const uint32_t kernelHeight, const FuseCode activation,
+ uint8_t *outputData, const Shape outputShape)
{
_inputData = inputData;
- _inputShape = convertShape(inputShape);
- _inputType = inputShape.type();
+ _inputShape = inputShape;
+ _inputType = inputShape.type;
_paddingLeft = paddingLeft;
_paddingRight = paddingRight;
_paddingTop = paddingTop;
_kernelHeight = kernelHeight;
_activation = activation;
_outputData = outputData;
- _outputShape = convertShape(outputShape);
+ _outputShape = outputShape;
}
void AvgPoolLayer::run()
{
- if (_inputType == static_cast<uint32_t>(OperandType::TENSOR_FLOAT32))
+ if (_inputType == OperandType::TENSOR_FLOAT32)
{
averagePoolFloat32();
}
- else if (_inputType == static_cast<uint32_t>(OperandType::TENSOR_QUANT8_ASYMM))
+ else if (_inputType == OperandType::TENSOR_QUANT8_ASYMM)
{
averagePoolQuant8();
}
bool averagePoolQuant8();
- void configure(uint8_t *inputData, const ::neurun::internal::operand::Shape inputShape,
- const uint32_t paddingLeft, const uint32_t paddingRight, const uint32_t paddingTop,
+ void configure(uint8_t *inputData, const Shape inputShape, const uint32_t paddingLeft,
+ const uint32_t paddingRight, const uint32_t paddingTop,
const uint32_t paddingBottom, const uint32_t strideWidth,
const uint32_t strideHeight, const uint32_t kernelWidth,
const uint32_t kernelHeight, const FuseCode activation, uint8_t *outputData,
- const ::neurun::internal::operand::Shape outputShape);
+ const Shape outputShape);
void run();
FuseCode _activation;
- int32_t _inputType;
+ OperandType _inputType;
};
} // namespace cpu
}
void ConcatLayer::configure(const std::vector<const uint8_t *> &inputDataPtrs,
- const std::vector<::neurun::internal::operand::Shape> &inputShapes,
- int32_t axis, uint8_t *outputData,
- const ::neurun::internal::operand::Shape outputShape)
+ const std::vector<Shape> &inputShapes, int32_t axis,
+ uint8_t *outputData, const Shape outputShape)
{
_inputDataPtrs = inputDataPtrs;
for (auto shape : inputShapes)
{
- _inputShapes.emplace_back(convertShape(shape));
- _inputType = shape.type();
+ _inputShapes.emplace_back(shape);
+ _inputType = shape.type;
}
_axis = axis;
_outputData = outputData;
- _outputShape = convertShape(outputShape);
+ _outputShape = outputShape;
}
void ConcatLayer::run()
{
- if (_inputType == static_cast<uint32_t>(OperandType::TENSOR_FLOAT32))
+ if (_inputType == OperandType::TENSOR_FLOAT32)
{
concatenationFloat32();
}
- else if (_inputType == static_cast<uint32_t>(OperandType::TENSOR_QUANT8_ASYMM))
+ else if (_inputType == OperandType::TENSOR_QUANT8_ASYMM)
{
concatenationQuant8();
}
bool concatenationQuant8();
void configure(const std::vector<const uint8_t *> &inputDataPtrs,
- const std::vector<::neurun::internal::operand::Shape> &inputShapes, int32_t axis,
- uint8_t *outputData, const ::neurun::internal::operand::Shape outputShape);
+ const std::vector<Shape> &inputShapes, int32_t axis, uint8_t *outputData,
+ const Shape outputShape);
void run();
std::vector<Shape> _inputShapes;
Shape _outputShape;
- int32_t _inputType;
+ OperandType _inputType;
};
} // namespace cpu
return true;
}
-void ConvolutionLayer::configure(
- uint8_t *inputData, const ::neurun::internal::operand::Shape inputShape, uint8_t *kernelData,
- const ::neurun::internal::operand::Shape kernelShape, uint8_t *biasData,
- const ::neurun::internal::operand::Shape biasShape, const uint32_t paddingLeft,
- const uint32_t paddingRight, const uint32_t paddingTop, const uint32_t paddingBottom,
- const uint32_t strideWidth, const uint32_t strideHeight, const FuseCode activation,
- uint8_t *outputData, const ::neurun::internal::operand::Shape outputShape)
+void ConvolutionLayer::configure(uint8_t *inputData, const Shape inputShape, uint8_t *kernelData,
+ const Shape kernelShape, uint8_t *biasData, const Shape biasShape,
+ const uint32_t paddingLeft, const uint32_t paddingRight,
+ const uint32_t paddingTop, const uint32_t paddingBottom,
+ const uint32_t strideWidth, const uint32_t strideHeight,
+ const FuseCode activation, uint8_t *outputData,
+ const Shape outputShape)
{
_inputData = inputData;
- _inputShape = convertShape(inputShape);
+ _inputShape = inputShape;
_kernelData = kernelData;
- _kernelShape = convertShape(kernelShape);
+ _kernelShape = kernelShape;
_biasData = biasData;
- _biasShape = convertShape(biasShape);
+ _biasShape = biasShape;
_paddingLeft = paddingLeft;
_paddingRight = paddingRight;
_paddingTop = paddingTop;
_strideHeight = strideHeight;
_activation = activation;
_outputData = outputData;
- _outputShape = convertShape(outputShape);
+ _outputShape = outputShape;
}
void ConvolutionLayer::run()
bool convQuant8();
- void configure(uint8_t *inputData, const ::neurun::internal::operand::Shape inputShape,
- uint8_t *kernelData, const ::neurun::internal::operand::Shape kernelShape,
- uint8_t *biasData, const ::neurun::internal::operand::Shape biasShape,
+ void configure(uint8_t *inputData, const Shape inputShape, uint8_t *kernelData,
+ const Shape kernelShape, uint8_t *biasData, const Shape biasShape,
const uint32_t paddingLeft, const uint32_t paddingRight, const uint32_t paddingTop,
const uint32_t paddingBottom, const uint32_t strideW, const uint32_t strideH,
- const FuseCode activation, uint8_t *outputData,
- const ::neurun::internal::operand::Shape outputShape);
+ const FuseCode activation, uint8_t *outputData, const Shape outputShape);
void run();
return true;
}
-void FullyConnectedLayer::configure(
- uint8_t *inputData, const ::neurun::internal::operand::Shape inputShape, uint8_t *weightsData,
- const ::neurun::internal::operand::Shape weightsShape, uint8_t *biasData,
- const ::neurun::internal::operand::Shape biasShape, FuseCode activation, uint8_t *outputData,
- const ::neurun::internal::operand::Shape outputShape)
+void FullyConnectedLayer::configure(uint8_t *inputData, const Shape inputShape,
+ uint8_t *weightsData, const Shape weightsShape,
+ uint8_t *biasData, const Shape biasShape, FuseCode activation,
+ uint8_t *outputData, const Shape outputShape)
{
_inputData = inputData;
- _inputShape = convertShape(inputShape);
- _inputType = inputShape.type();
+ _inputShape = inputShape;
+ _inputType = inputShape.type;
_weightsData = weightsData;
- _weightsShape = convertShape(weightsShape);
+ _weightsShape = weightsShape;
_biasData = biasData;
- _biasShape = convertShape(biasShape);
+ _biasShape = biasShape;
_activation = activation;
_outputData = outputData;
- _outputShape = convertShape(outputShape);
+ _outputShape = outputShape;
}
void FullyConnectedLayer::run()
{
- if (_inputType == static_cast<uint32_t>(OperandType::TENSOR_FLOAT32))
+ if (_inputType == OperandType::TENSOR_FLOAT32)
{
fullyConnectedFloat32();
}
- else if (_inputType == static_cast<uint32_t>(OperandType::TENSOR_QUANT8_ASYMM))
+ else if (_inputType == OperandType::TENSOR_QUANT8_ASYMM)
{
fullyConnectedQuant8();
}
bool fullyConnectedQuant8();
- void configure(uint8_t *inputData, const ::neurun::internal::operand::Shape inputShape,
- uint8_t *weightsData, const ::neurun::internal::operand::Shape weightsShape,
- uint8_t *biasData, const ::neurun::internal::operand::Shape biasShape,
- FuseCode activation, uint8_t *outputData,
- const ::neurun::internal::operand::Shape outputShape);
+ void configure(uint8_t *inputData, const Shape inputShape, uint8_t *weightsData,
+ const Shape weightsShape, uint8_t *biasData, const Shape biasShape,
+ FuseCode activation, uint8_t *outputData, const Shape outputShape);
void run();
FuseCode _activation;
- int32_t _inputType;
+ OperandType _inputType;
};
} // namespace cpu
return true;
}
-void MaxPoolLayer::configure(uint8_t *inputData,
- const ::neurun::internal::operand::Shape inputShape,
- const uint32_t paddingLeft, const uint32_t paddingRight,
- const uint32_t paddingTop, const uint32_t paddingBottom,
- const uint32_t strideWidth, const uint32_t strideHeight,
- const uint32_t kernelWidth, const uint32_t kernelHeight,
- const FuseCode activation, uint8_t *outputData,
- const ::neurun::internal::operand::Shape outputShape)
+void MaxPoolLayer::configure(uint8_t *inputData, const Shape inputShape, const uint32_t paddingLeft,
+ const uint32_t paddingRight, const uint32_t paddingTop,
+ const uint32_t paddingBottom, const uint32_t strideWidth,
+ const uint32_t strideHeight, const uint32_t kernelWidth,
+ const uint32_t kernelHeight, const FuseCode activation,
+ uint8_t *outputData, const Shape outputShape)
{
_inputData = inputData;
- _inputShape = convertShape(inputShape);
- _inputType = inputShape.type();
+
+ _inputShape = inputShape;
+ _inputType = inputShape.type;
_paddingLeft = paddingLeft;
_paddingRight = paddingRight;
_paddingTop = paddingTop;
_kernelHeight = kernelHeight;
_activation = activation;
_outputData = outputData;
- _outputShape = convertShape(outputShape);
+ _outputShape = outputShape;
}
void MaxPoolLayer::run()
{
- if (_inputType == static_cast<uint32_t>(OperandType::TENSOR_FLOAT32))
+ if (_inputType == OperandType::TENSOR_FLOAT32)
{
maxPoolFloat32();
}
- else if (_inputType == static_cast<uint32_t>(OperandType::TENSOR_QUANT8_ASYMM))
+ else if (_inputType == OperandType::TENSOR_QUANT8_ASYMM)
{
maxPoolQuant8();
}
bool maxPoolQuant8();
- void configure(uint8_t *inputData, const ::neurun::internal::operand::Shape inputShape,
- const uint32_t paddingLeft, const uint32_t paddingRight, const uint32_t paddingTop,
+ void configure(uint8_t *inputData, const Shape inputShape, const uint32_t paddingLeft,
+ const uint32_t paddingRight, const uint32_t paddingTop,
const uint32_t paddingBottom, const uint32_t strideWidth,
const uint32_t strideHeight, const uint32_t kernelWidth,
const uint32_t kernelHeight, const FuseCode activation, uint8_t *outputData,
- const ::neurun::internal::operand::Shape outputShape);
+ const Shape outputShape);
void run();
FuseCode _activation;
- uint32_t _inputType;
+ OperandType _inputType;
};
} // namespace cpu
return static_cast<int32_t>(std::floor(max_input_rescaled));
}
-Shape convertShape(const ::neurun::internal::operand::Shape &o)
+Shape getShape(const ::neurun::internal::operand::Object &o)
{
Shape shape;
- shape.type = static_cast<OperandType>(o.type());
- shape.dimensions = std::vector<uint32_t>(o.dims().begin(), o.dims().end());
- shape.scale = o.scale();
+ shape.type = static_cast<OperandType>(static_cast<int32_t>(o.typeInfo().type()));
+ shape.dimensions = std::vector<uint32_t>(o.shape().dims().begin(), o.shape().dims().end());
+ shape.scale = o.shape().scale();
// shape.offset = _offset;
return shape;
int32_t CalculateInputRadius(int input_integer_bits, int input_left_shift);
-Shape convertShape(const ::neurun::internal::operand::Shape &o);
+Shape getShape(const ::neurun::internal::operand::Object &o);
uint32_t sizeOfData(OperandType type, const std::vector<uint32_t> &dimensions);
return true;
}
-void ReshapeLayer::configure(uint8_t *inputData,
- const ::neurun::internal::operand::Shape &inputShape,
- uint8_t *outputData,
- const ::neurun::internal::operand::Shape &outputShape)
+void ReshapeLayer::configure(uint8_t *inputData, const Shape &inputShape, uint8_t *outputData,
+ const Shape &outputShape)
{
_inputData = inputData;
- _inputShape = convertShape(inputShape);
+ _inputShape = inputShape;
_outputData = outputData;
- _outputShape = convertShape(outputShape);
+ _outputShape = outputShape;
}
void ReshapeLayer::run() { reshapeGeneric(); }
public:
bool reshapeGeneric();
- void configure(uint8_t *inputData, const ::neurun::internal::operand::Shape &inputShape,
- uint8_t *outputData, const ::neurun::internal::operand::Shape &outputShape);
+ void configure(uint8_t *inputData, const Shape &inputShape, uint8_t *outputData,
+ const Shape &outputShape);
void run();
return true;
}
-void SoftMaxLayer::configure(uint8_t *inputData,
- const ::neurun::internal::operand::Shape &inputShape, const float beta,
- uint8_t *outputData,
- const ::neurun::internal::operand::Shape &outputShape)
+void SoftMaxLayer::configure(uint8_t *inputData, const Shape &inputShape, const float beta,
+ uint8_t *outputData, const Shape &outputShape)
{
_inputData = inputData;
- _inputShape = convertShape(inputShape);
- _inputType = inputShape.type();
+ _inputShape = inputShape;
+ _inputType = inputShape.type;
_outputData = outputData;
- _outputShape = convertShape(outputShape);
+ _outputShape = outputShape;
_beta = beta;
}
void SoftMaxLayer::run()
{
- if (_inputType == static_cast<uint32_t>(OperandType::TENSOR_FLOAT32))
+ if (_inputType == OperandType::TENSOR_FLOAT32)
{
softmaxFloat32();
}
- else if (_inputType == static_cast<uint32_t>(OperandType::TENSOR_QUANT8_ASYMM))
+ else if (_inputType == OperandType::TENSOR_QUANT8_ASYMM)
{
softmaxQuant8();
}
bool softmaxQuant8();
- void configure(uint8_t *inputData, const ::neurun::internal::operand::Shape &inputShape,
- const float beta, uint8_t *outputData,
- const ::neurun::internal::operand::Shape &outputShape);
+ void configure(uint8_t *inputData, const Shape &inputShape, const float beta, uint8_t *outputData,
+ const Shape &outputShape);
void run();
Shape _inputShape;
Shape _outputShape;
- int32_t _inputType;
+ OperandType _inputType;
};
} // namespace cpu
void TensorConvertFromCommonLayer::configure(::internal::common::Tensor *inputTensor,
::internal::cpu::Tensor *outputTensor,
- const ::neurun::internal::operand::Shape &tensorShape)
+ const Shape &tensorShape)
{
_inputTensor = inputTensor;
_outputTensor = outputTensor;
bool convert();
void configure(::internal::common::Tensor *inputTensor, ::internal::cpu::Tensor *outputTensor,
- const ::neurun::internal::operand::Shape &tensorShape);
+ const Shape &tensorShape);
void run();
::internal::common::Tensor *_inputTensor;
::internal::cpu::Tensor *_outputTensor;
- ::neurun::internal::operand::Shape _tensorShape{1};
+ Shape _tensorShape{1};
};
} // namespace cpu
void TensorConvertToCommonLayer::configure(::internal::cpu::Tensor *inputTensor,
::internal::common::Tensor *outputTensor,
- const ::neurun::internal::operand::Shape &tensorShape)
+ const Shape &tensorShape)
{
_inputTensor = inputTensor;
_outputTensor = outputTensor;
bool convert();
void configure(::internal::cpu::Tensor *inputTensor, ::internal::common::Tensor *outputTensor,
- const ::neurun::internal::operand::Shape &tensorShape);
+ const Shape &tensorShape);
void run();
::internal::cpu::Tensor *_inputTensor;
::internal::common::Tensor *_outputTensor;
- ::neurun::internal::operand::Shape _tensorShape{1};
+ Shape _tensorShape{1};
};
} // namespace cpu