#include <backendsCommon/Workload.hpp>
-#include <arm_compute/runtime/CL/CLFunctions.h>
+#include <arm_compute/runtime/CL/functions/CLActivationLayer.h>
namespace armnn
{
#include <backendsCommon/Workload.hpp>
-#include <arm_compute/runtime/CL/CLFunctions.h>
+#include <arm_compute/runtime/CL/functions/CLElementwiseOperations.h>
namespace armnn
{
#include <backendsCommon/Workload.hpp>
-#include <arm_compute/runtime/CL/CLFunctions.h>
+#include <arm_compute/runtime/CL/CLTensor.h>
+#include <arm_compute/runtime/CL/functions/CLBatchNormalizationLayer.h>
namespace armnn
{
#pragma once
#include <backendsCommon/Workload.hpp>
-#include <arm_compute/runtime/CL/CLFunctions.h>
+#include <arm_compute/runtime/CL/functions/CLBatchToSpaceLayer.h>
namespace armnn
{
#include <cl/ClLayerSupport.hpp>
#include <arm_compute/core/Types.h>
+#include <arm_compute/runtime/CL/functions/CLConcatenateLayer.h>
#include <boost/polymorphic_pointer_cast.hpp>
m_Data.m_Outputs[0])->GetTensor();
// Create the layer function
- m_Layer.reset(new arm_compute::CLConcatenateLayer());
+ auto layer = std::make_unique<arm_compute::CLConcatenateLayer>();
// Configure input and output tensors
size_t aclAxis = CalcAxis(descriptor.m_Parameters);
- m_Layer->configure(aclInputs, &output, aclAxis);
+ layer->configure(aclInputs, &output, aclAxis);
// Prepare
- m_Layer->prepare();
+ layer->prepare();
+ m_Layer = std::move(layer);
}
void ClConcatWorkload::Execute() const
}
}
-} //namespace armnn
\ No newline at end of file
+} //namespace armnn
#include <backendsCommon/Workload.hpp>
-#include <arm_compute/runtime/CL/CLFunctions.h>
+#include <arm_compute/core/Error.h>
+#include <arm_compute/runtime/IFunction.h>
namespace armnn
{
void Execute() const override;
private:
- mutable std::unique_ptr<arm_compute::CLConcatenateLayer> m_Layer;
+ mutable std::unique_ptr<arm_compute::IFunction> m_Layer;
};
} //namespace armnn
#include <backendsCommon/Workload.hpp>
-#include <arm_compute/runtime/CL/CLFunctions.h>
-
namespace armnn
{
class ClConstantWorkload : public BaseWorkload<ConstantQueueDescriptor>
#include <backendsCommon/Workload.hpp>
-#include <arm_compute/runtime/CL/CLFunctions.h>
+#include <arm_compute/runtime/CL/functions/CLDepthConvertLayer.h>
namespace armnn
{
#include <backendsCommon/Workload.hpp>
-#include <arm_compute/runtime/CL/CLFunctions.h>
+#include <arm_compute/runtime/CL/functions/CLDepthConvertLayer.h>
namespace armnn
{
#include <backendsCommon/Workload.hpp>
-#include <arm_compute/runtime/CL/CLFunctions.h>
+#include <arm_compute/runtime/CL/functions/CLElementwiseOperations.h>
namespace armnn
{
#include <backendsCommon/Workload.hpp>
-#include <arm_compute/runtime/CL/CLFunctions.h>
+#include <arm_compute/runtime/CL/functions/CLFloor.h>
namespace armnn
{
#include <backendsCommon/Workload.hpp>
-#include <arm_compute/runtime/CL/CLFunctions.h>
+#include <arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h>
#include <arm_compute/runtime/MemoryManagerOnDemand.h>
#include <memory>
#include <backendsCommon/Workload.hpp>
-#include <arm_compute/runtime/CL/CLFunctions.h>
+#include <arm_compute/runtime/CL/functions/CLComparison.h>
namespace armnn
{
#include <backendsCommon/Workload.hpp>
-#include <arm_compute/runtime/CL/CLFunctions.h>
+#include <arm_compute/runtime/CL/functions/CLL2NormalizeLayer.h>
namespace armnn
{
#include <backendsCommon/Workload.hpp>
#include <backendsCommon/WorkloadData.hpp>
-#include <arm_compute/runtime/CL/CLFunctions.h>
+#include <arm_compute/runtime/CL/functions/CLLSTMLayer.h>
namespace armnn
{
#include <backendsCommon/Workload.hpp>
-#include <arm_compute/runtime/CL/CLFunctions.h>
+#include <arm_compute/runtime/CL/functions/CLElementwiseOperations.h>
namespace armnn
{
mutable arm_compute::CLElementwiseMax m_MaximumLayer;
};
-} //namespace armnn
\ No newline at end of file
+} //namespace armnn
#include <backendsCommon/Workload.hpp>
-#include <arm_compute/runtime/CL/CLFunctions.h>
+#include <arm_compute/runtime/CL/functions/CLReduceMean.h>
namespace armnn
{
#include <backendsCommon/Workload.hpp>
-#include <arm_compute/runtime/CL/CLFunctions.h>
+#include <arm_compute/runtime/CL/functions/CLElementwiseOperations.h>
namespace armnn
{
mutable arm_compute::CLElementwiseMin m_MinimumLayer;
};
-} //namespace armnn
\ No newline at end of file
+} //namespace armnn
#include <backendsCommon/Workload.hpp>
-#include <arm_compute/runtime/CL/CLFunctions.h>
+#include <arm_compute/runtime/CL/functions/CLPixelWiseMultiplication.h>
namespace armnn
{
#include <backendsCommon/Workload.hpp>
-#include <arm_compute/runtime/CL/CLFunctions.h>
+#include <arm_compute/runtime/CL/functions/CLNormalizationLayer.h>
namespace armnn
{
#include <backendsCommon/Workload.hpp>
-#include <arm_compute/runtime/CL/CLFunctions.h>
+#include <arm_compute/runtime/CL/functions/CLPoolingLayer.h>
namespace armnn
{
#include <backendsCommon/Workload.hpp>
-#include <arm_compute/runtime/CL/CLFunctions.h>
+#include <arm_compute/runtime/CL/functions/CLPReluLayer.h>
namespace armnn
{
#include <backendsCommon/Workload.hpp>
-#include <arm_compute/runtime/CL/CLFunctions.h>
+#include <arm_compute/runtime/CL/functions/CLReshapeLayer.h>
namespace armnn
{
#include <backendsCommon/Workload.hpp>
-#include <arm_compute/runtime/CL/CLFunctions.h>
+#include <arm_compute/runtime/CL/functions/CLScale.h>
namespace armnn
{
#include <backendsCommon/Workload.hpp>
-#include <arm_compute/runtime/CL/CLFunctions.h>
+#include <arm_compute/runtime/CL/functions/CLSoftmaxLayer.h>
#include <arm_compute/runtime/MemoryManagerOnDemand.h>
#include <memory>
#include <backendsCommon/Workload.hpp>
-#include <arm_compute/runtime/CL/CLFunctions.h>
-#include "arm_compute/runtime/MemoryManagerOnDemand.h"
+#include <arm_compute/runtime/CL/functions/CLSoftmaxLayer.h>
+#include <arm_compute/runtime/MemoryManagerOnDemand.h>
#include <memory>
#include <aclCommon/ArmComputeTensorUtils.hpp>
#include <aclCommon/ArmComputeUtils.hpp>
+#include <arm_compute/runtime/CL/functions/CLSplit.h>
#include <backendsCommon/CpuTensorHandle.hpp>
#include <cl/ClTensorHandle.hpp>
}
// Create the layer function
- m_Layer.reset(new arm_compute::CLSplit());
// Configure input and output tensors
std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor.m_Parameters, m_Data.m_Inputs[0]->GetShape());
}
unsigned int aclAxis = CalcAclAxis(descriptor.m_Parameters.GetNumDimensions(), *splitAxis.begin());
- m_Layer->configure(&input, aclOutputs, aclAxis);
+ auto layer = std::make_unique<arm_compute::CLSplit>();
+ layer->configure(&input, aclOutputs, aclAxis);
// Prepare
- m_Layer->prepare();
+ layer->prepare();
+
+ m_Layer = std::move(layer);
}
void ClSplitterWorkload::Execute() const
#include <backendsCommon/Workload.hpp>
-#include <arm_compute/runtime/CL/CLFunctions.h>
+#include <arm_compute/core/Error.h>
+#include <arm_compute/runtime/IFunction.h>
#include <functional>
void Execute() const override;
private:
- mutable std::unique_ptr<arm_compute::CLSplit> m_Layer;
+ std::unique_ptr<arm_compute::IFunction> m_Layer;
};
} //namespace armnn
#include <backendsCommon/Workload.hpp>
-#include <arm_compute/runtime/CL/CLFunctions.h>
+#include <arm_compute/runtime/CL/functions/CLStackLayer.h>
namespace armnn
{
#include <backendsCommon/Workload.hpp>
-#include <arm_compute/runtime/CL/CLFunctions.h>
+#include <arm_compute/runtime/CL/functions/CLStridedSlice.h>
namespace armnn
{
#include <backendsCommon/Workload.hpp>
-#include <arm_compute/runtime/CL/CLFunctions.h>
+#include <arm_compute/runtime/CL/functions/CLElementwiseOperations.h>
namespace armnn
{
#include <armnn/Utils.hpp>
-#include <arm_compute/runtime/CL/CLFunctions.h>
+#include <arm_compute/runtime/CL/CLTensor.h>
+#include <arm_compute/runtime/IFunction.h>
#include <sstream>