namespace acl_cl
{
+ConcatLayer::ConcatLayer()
+ : _input_allocs(), _output_alloc(nullptr), _axis(0), _input_type(OperandType::FLOAT32)
+{
+ // DO NOTHING
+}
+
bool ConcatLayer::concatenationFloat32()
{
// Input and output size check
class ConcatLayer : public ::arm_compute::IFunction
{
public:
- ConcatLayer() {}
+ ConcatLayer();
public:
void configure(const std::vector<::arm_compute::ICLTensor *> &input_allocs,
uint32_t paddingHeight = (uint32_t)_paddingTop; \
uint32_t paddingWidth = (uint32_t)_paddingLeft;
+AvgPoolLayer::AvgPoolLayer()
+ : _inputData(nullptr), _outputData(nullptr), _inputShape(), _outputShape(), _paddingLeft(0),
+ _paddingTop(0), _paddingRight(0), _paddingBottom(0), _strideWidth(0), _strideHeight(0),
+ _kernelWidth(0), _kernelHeight(0), _activation(ANEURALNETWORKS_FUSED_NONE),
+ _inputType(OperandType::FLOAT32)
+{
+ // DO NOTHING
+}
+
bool AvgPoolLayer::averagePoolFloat32()
{
class AvgPoolLayer : public ::arm_compute::IFunction
{
public:
- AvgPoolLayer() {}
+ AvgPoolLayer();
public:
bool averagePoolFloat32();
namespace cpu
{
+ConcatLayer::ConcatLayer()
+ : _inputDataPtrs(), _outputData(nullptr), _axis(0), _inputShapes(), _outputShape(),
+ _inputType(OperandType::FLOAT32)
+{
+ // DO NOTHING
+}
+
bool ConcatLayer::concatenationFloat32()
{
int num_inputs = _inputShapes.size();
class ConcatLayer : public ::arm_compute::IFunction
{
public:
- ConcatLayer() {}
+ ConcatLayer();
public:
bool concatenationFloat32();
im2colGuard.reset(im2colData); \
}
+ConvolutionLayer::ConvolutionLayer()
+ : _inputData(nullptr), _kernelData(nullptr), _outputData(nullptr), _biasData(nullptr),
+ _inputShape(), _kernelShape(), _outputShape(), _biasShape(), _paddingLeft(0), _paddingTop(0),
+ _paddingRight(0), _paddingBottom(0), _strideWidth(0), _strideHeight(0),
+ _activation(ANEURALNETWORKS_FUSED_NONE), _inputType(OperandType::FLOAT32)
+{
+ // DO NOTHING
+}
+
bool ConvolutionLayer::convFloat32()
{
ANDROID_NN_CONV_PARAMETERS(float)
class ConvolutionLayer : public ::arm_compute::IFunction
{
public:
- ConvolutionLayer() {}
+ ConvolutionLayer();
public:
bool convFloat32();
namespace cpu
{
+FullyConnectedLayer::FullyConnectedLayer()
+ : _inputData(nullptr), _weightsData(nullptr), _biasData(nullptr), _outputData(nullptr),
+ _inputShape(), _weightsShape(), _biasShape(), _outputShape(),
+ _activation(ANEURALNETWORKS_FUSED_NONE), _inputType(OperandType::FLOAT32)
+{
+ // DO NOTHING
+}
+
// executionMutex is used to protect concurrent access of non-threadsafe resources
// like gemmlowp::GemmContext.
// std::mutex is safe for pthreads on Android.
class FullyConnectedLayer : public ::arm_compute::IFunction
{
public:
- FullyConnectedLayer() {}
+ FullyConnectedLayer();
public:
bool fullyConnectedFloat32();
uint32_t paddingHeight = (uint32_t)_paddingTop; \
uint32_t paddingWidth = (uint32_t)_paddingLeft;
+MaxPoolLayer::MaxPoolLayer()
+ : _inputData(nullptr), _outputData(nullptr), _inputShape(), _outputShape(), _paddingLeft(0),
+ _paddingTop(0), _paddingRight(0), _paddingBottom(0), _strideWidth(0), _strideHeight(0),
+ _kernelWidth(0), _kernelHeight(0), _activation(ANEURALNETWORKS_FUSED_NONE),
+ _inputType(OperandType::FLOAT32)
+{
+ // DO NOTHING
+}
+
bool MaxPoolLayer::maxPoolFloat32()
{
class MaxPoolLayer : public ::arm_compute::IFunction
{
public:
- MaxPoolLayer() {}
+ MaxPoolLayer();
public:
bool maxPoolFloat32();
namespace cpu
{
+ReshapeLayer::ReshapeLayer()
+ : _inputData(nullptr), _outputData(nullptr), _inputShape(), _outputShape()
+{
+ // DO NOTHING
+}
+
bool ReshapeLayer::reshapeGeneric()
{
size_t count = sizeOfData(_inputShape.type, _inputShape.dimensions);
class ReshapeLayer : public ::arm_compute::IFunction
{
public:
- ReshapeLayer() {}
+ ReshapeLayer();
public:
bool reshapeGeneric();
namespace cpu
{
+SoftMaxLayer::SoftMaxLayer()
+ : _inputData(nullptr), _outputData(nullptr), _beta(0.0), _inputShape(), _outputShape(),
+ _inputType(OperandType::FLOAT32)
+{
+ // DO NOTHING
+}
+
bool SoftMaxLayer::softmaxFloat32()
{
::tflite::Dims<4> dim;
class SoftMaxLayer : public ::arm_compute::IFunction
{
public:
- SoftMaxLayer() {}
+ SoftMaxLayer();
public:
bool softmaxFloat32();