workloads/NeonSoftmaxBaseWorkload.cpp \
workloads/NeonSoftmaxFloatWorkload.cpp \
workloads/NeonSoftmaxUint8Workload.cpp \
- workloads/NeonSubtractionFloatWorkload.cpp \
- workloads/NeonWorkloadUtils.cpp
+ workloads/NeonSubtractionFloatWorkload.cpp
NeonSubtractionFloatWorkload.cpp
NeonSubtractionFloatWorkload.hpp
NeonWorkloads.hpp
- NeonWorkloadUtils.cpp
NeonWorkloadUtils.hpp
)
m_Gamma.get(),
m_Data.m_Parameters.m_Eps);
- InitializeArmComputeTensorDataForFloatTypes(*m_Mean, m_Data.m_Mean);
- InitializeArmComputeTensorDataForFloatTypes(*m_Variance, m_Data.m_Variance);
- InitializeArmComputeTensorDataForFloatTypes(*m_Gamma, m_Data.m_Gamma);
- InitializeArmComputeTensorDataForFloatTypes(*m_Beta, m_Data.m_Beta);
+ InitializeArmComputeTensorData(*m_Mean, m_Data.m_Mean);
+ InitializeArmComputeTensorData(*m_Variance, m_Data.m_Variance);
+ InitializeArmComputeTensorData(*m_Gamma, m_Data.m_Gamma);
+ InitializeArmComputeTensorData(*m_Beta, m_Data.m_Beta);
// Force Compute Library to perform the necessary copying and reshaping, after which
// delete all the input tensors that will no longer be needed
}
} //namespace armnn
-
-
}
BOOST_ASSERT(m_ConvolutionLayer);
- armnn::DataType dataType = m_Data.m_Weight->GetTensorInfo().GetDataType();
+ InitializeArmComputeTensorData(*m_KernelTensor, m_Data.m_Weight);
- switch (dataType)
- {
- case DataType::Float16:
- {
- InitialiseArmComputeTensorData(*m_KernelTensor, m_Data.m_Weight->template GetConstTensor<Half>());
- break;
- }
- case DataType::Float32:
- {
- InitialiseArmComputeTensorData(*m_KernelTensor, m_Data.m_Weight->template GetConstTensor<float>());
- break;
- }
- case DataType::QuantisedAsymm8:
- {
- InitialiseArmComputeTensorData(*m_KernelTensor, m_Data.m_Weight->template GetConstTensor<uint8_t>());
- break;
- }
- default:
- {
- BOOST_ASSERT_MSG(false, "Unknown DataType.");
- }
- }
}
template<armnn::DataType... dataTypes>
template class NeonConvolution2dBaseWorkload<armnn::DataType::QuantisedAsymm8>;
} //namespace armnn
-
{
if (m_Data.m_Parameters.m_BiasEnabled)
{
- InitializeArmComputeTensorDataForFloatTypes(*m_BiasTensor, m_Data.m_Bias);
+ InitializeArmComputeTensorData(*m_BiasTensor, m_Data.m_Bias);
}
m_ConvolutionLayer->prepare();
}
} //namespace armnn
-
{
if (m_Data.m_Parameters.m_BiasEnabled)
{
- InitialiseArmComputeTensorData(*m_BiasTensor, m_Data.m_Bias->template GetConstTensor<int32_t>());
+ InitializeArmComputeTensorData(*m_BiasTensor, m_Data.m_Bias);
}
m_ConvolutionLayer->prepare();
BOOST_ASSERT(m_pDepthwiseConvolutionLayer);
- InitializeArmComputeTensorDataForFloatTypes(*m_KernelTensor, m_Data.m_Weight);
+ InitializeArmComputeTensorData(*m_KernelTensor, m_Data.m_Weight);
if (m_BiasTensor)
{
- InitializeArmComputeTensorDataForFloatTypes(*m_BiasTensor, m_Data.m_Bias);
+ InitializeArmComputeTensorData(*m_BiasTensor, m_Data.m_Bias);
}
m_pDepthwiseConvolutionLayer->prepare();
BOOST_ASSERT(m_pDepthwiseConvolutionLayer);
- InitialiseArmComputeTensorData(*m_KernelTensor, m_Data.m_Weight->GetConstTensor<uint8_t>());
+ InitializeArmComputeTensorData(*m_KernelTensor, m_Data.m_Weight);
if (m_BiasTensor)
{
- InitialiseArmComputeTensorData(*m_BiasTensor, m_Data.m_Bias->GetConstTensor<int32_t>());
+ InitializeArmComputeTensorData(*m_BiasTensor, m_Data.m_Bias);
}
m_pDepthwiseConvolutionLayer->prepare();
// Allocate
if (m_Data.m_Weight->GetTensorInfo().GetDataType() == DataType::QuantisedAsymm8)
{
- InitialiseArmComputeTensorData(*m_WeightsTensor, m_Data.m_Weight->GetConstTensor<uint8_t>());
+ InitializeArmComputeTensorData(*m_WeightsTensor, m_Data.m_Weight);
}
else
{
- InitializeArmComputeTensorDataForFloatTypes(*m_WeightsTensor, m_Data.m_Weight);
+ InitializeArmComputeTensorData(*m_WeightsTensor, m_Data.m_Weight);
}
if (m_BiasesTensor)
{
if (m_Data.m_Bias->GetTensorInfo().GetDataType() == DataType::Signed32)
{
- InitialiseArmComputeTensorData(*m_BiasesTensor, m_Data.m_Bias->GetConstTensor<int32_t>());
+ InitializeArmComputeTensorData(*m_BiasesTensor, m_Data.m_Bias);
}
else
{
- InitializeArmComputeTensorDataForFloatTypes(*m_BiasesTensor, m_Data.m_Bias);
+ InitializeArmComputeTensorData(*m_BiasesTensor, m_Data.m_Bias);
}
}
}
} //namespace armnn
-
armcomputetensorutils::InitialiseArmComputeTensorEmpty(*m_ScratchBuffer);
- InitialiseArmComputeTensorData(*m_InputToForgetWeightsTensor,
- m_Data.m_InputToForgetWeights->GetConstTensor<float>());
- InitialiseArmComputeTensorData(*m_InputToCellWeightsTensor,
- m_Data.m_InputToCellWeights->GetConstTensor<float>());
- InitialiseArmComputeTensorData(*m_InputToOutputWeightsTensor,
- m_Data.m_InputToOutputWeights->GetConstTensor<float>());
- InitialiseArmComputeTensorData(*m_RecurrentToForgetWeightsTensor,
- m_Data.m_RecurrentToForgetWeights->GetConstTensor<float>());
- InitialiseArmComputeTensorData(*m_RecurrentToCellWeightsTensor,
- m_Data.m_RecurrentToCellWeights->GetConstTensor<float>());
- InitialiseArmComputeTensorData(*m_RecurrentToOutputWeightsTensor,
- m_Data.m_RecurrentToOutputWeights->GetConstTensor<float>());
- InitialiseArmComputeTensorData(*m_ForgetGateBiasTensor,
- m_Data.m_ForgetGateBias->GetConstTensor<float>());
- InitialiseArmComputeTensorData(*m_CellBiasTensor,
- m_Data.m_CellBias->GetConstTensor<float>());
- InitialiseArmComputeTensorData(*m_OutputGateBiasTensor,
- m_Data.m_OutputGateBias->GetConstTensor<float>());
+ InitializeArmComputeTensorData(*m_InputToForgetWeightsTensor,
+ m_Data.m_InputToForgetWeights);
+ InitializeArmComputeTensorData(*m_InputToCellWeightsTensor,
+ m_Data.m_InputToCellWeights);
+ InitializeArmComputeTensorData(*m_InputToOutputWeightsTensor,
+ m_Data.m_InputToOutputWeights);
+ InitializeArmComputeTensorData(*m_RecurrentToForgetWeightsTensor,
+ m_Data.m_RecurrentToForgetWeights);
+ InitializeArmComputeTensorData(*m_RecurrentToCellWeightsTensor,
+ m_Data.m_RecurrentToCellWeights);
+ InitializeArmComputeTensorData(*m_RecurrentToOutputWeightsTensor,
+ m_Data.m_RecurrentToOutputWeights);
+ InitializeArmComputeTensorData(*m_ForgetGateBiasTensor,
+ m_Data.m_ForgetGateBias);
+ InitializeArmComputeTensorData(*m_CellBiasTensor,
+ m_Data.m_CellBias);
+ InitializeArmComputeTensorData(*m_OutputGateBiasTensor,
+ m_Data.m_OutputGateBias);
if (!m_Data.m_Parameters.m_CifgEnabled)
{
- InitialiseArmComputeTensorData(*m_InputToInputWeightsTensor,
- m_Data.m_InputToInputWeights->GetConstTensor<float>());
- InitialiseArmComputeTensorData(*m_RecurrentToInputWeightsTensor,
- m_Data.m_RecurrentToInputWeights->GetConstTensor<float>());
+ InitializeArmComputeTensorData(*m_InputToInputWeightsTensor,
+ m_Data.m_InputToInputWeights);
+ InitializeArmComputeTensorData(*m_RecurrentToInputWeightsTensor,
+ m_Data.m_RecurrentToInputWeights);
if (m_Data.m_CellToInputWeights != nullptr)
{
- InitialiseArmComputeTensorData(*m_CellToInputWeightsTensor,
- m_Data.m_CellToInputWeights->GetConstTensor<float>());
+ InitializeArmComputeTensorData(*m_CellToInputWeightsTensor,
+ m_Data.m_CellToInputWeights);
}
- InitialiseArmComputeTensorData(*m_InputGateBiasTensor,
- m_Data.m_InputGateBias->GetConstTensor<float>());
+ InitializeArmComputeTensorData(*m_InputGateBiasTensor,
+ m_Data.m_InputGateBias);
}
if (m_Data.m_Parameters.m_ProjectionEnabled)
{
- InitialiseArmComputeTensorData(*m_ProjectionWeightsTensor,
- m_Data.m_ProjectionWeights->GetConstTensor<float>());
+ InitializeArmComputeTensorData(*m_ProjectionWeightsTensor,
+ m_Data.m_ProjectionWeights);
if (m_Data.m_ProjectionBias != nullptr)
{
- InitialiseArmComputeTensorData(*m_ProjectionBiasTensor,
- m_Data.m_ProjectionBias->GetConstTensor<float>());
+ InitializeArmComputeTensorData(*m_ProjectionBiasTensor,
+ m_Data.m_ProjectionBias);
}
}
if (m_Data.m_Parameters.m_PeepholeEnabled)
{
- InitialiseArmComputeTensorData(*m_CellToForgetWeightsTensor,
- m_Data.m_CellToForgetWeights->GetConstTensor<float>());
- InitialiseArmComputeTensorData(*m_CellToOutputWeightsTensor,
- m_Data.m_CellToOutputWeights->GetConstTensor<float>());
+ InitializeArmComputeTensorData(*m_CellToForgetWeightsTensor,
+ m_Data.m_CellToForgetWeights);
+ InitializeArmComputeTensorData(*m_CellToOutputWeightsTensor,
+ m_Data.m_CellToOutputWeights);
}
// Force Compute Library to perform the necessary copying and reshaping, after which
+++ /dev/null
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-#include "NeonWorkloadUtils.hpp"
-#include <backends/aclCommon/ArmComputeTensorUtils.hpp>
-#include <backends/aclCommon/ArmComputeUtils.hpp>
-#include <backends/neon/NeonTensorHandle.hpp>
-#include <backends/neon/NeonLayerSupport.hpp>
-#include <backends/CpuTensorHandle.hpp>
-
-#include <armnn/Utils.hpp>
-#include <armnn/Exceptions.hpp>
-
-#include <cstring>
-#include <boost/assert.hpp>
-#include <boost/cast.hpp>
-#include <boost/format.hpp>
-
-#include "Profiling.hpp"
-
-#include <armnn/Types.hpp>
-#include <Half.hpp>
-
-using namespace armnn::armcomputetensorutils;
-
-namespace armnn
-{
-
-// Allocates a tensor and copy the contents in data to the tensor contents.
-template<typename T>
-void InitialiseArmComputeTensorData(arm_compute::Tensor& tensor, const T* data)
-{
- InitialiseArmComputeTensorEmpty(tensor);
- CopyArmComputeITensorData(data, tensor);
-}
-
-template void InitialiseArmComputeTensorData(arm_compute::Tensor& tensor, const Half* data);
-template void InitialiseArmComputeTensorData(arm_compute::Tensor& tensor, const float* data);
-template void InitialiseArmComputeTensorData(arm_compute::Tensor& tensor, const uint8_t* data);
-template void InitialiseArmComputeTensorData(arm_compute::Tensor& tensor, const int32_t* data);
-
-void InitializeArmComputeTensorDataForFloatTypes(arm_compute::Tensor& tensor,
- const ConstCpuTensorHandle* handle)
-{
- BOOST_ASSERT(handle);
- switch(handle->GetTensorInfo().GetDataType())
- {
- case DataType::Float16:
- InitialiseArmComputeTensorData(tensor, handle->GetConstTensor<Half>());
- break;
- case DataType::Float32:
- InitialiseArmComputeTensorData(tensor, handle->GetConstTensor<float>());
- break;
- default:
- BOOST_ASSERT_MSG(false, "Unexpected floating point type.");
- }
-};
-
-} //namespace armnn
#pragma once
#include <backends/Workload.hpp>
-
+#include <backends/aclCommon/ArmComputeTensorUtils.hpp>
#include <backends/neon/NeonTensorHandle.hpp>
#include <backends/neon/NeonTimer.hpp>
-
-#include <arm_compute/core/Types.h>
-#include <arm_compute/core/Helpers.h>
+#include <backends/CpuTensorHandle.hpp>
#include <arm_compute/runtime/NEON/NEFunctions.h>
-#include <arm_compute/runtime/SubTensor.h>
-#include <boost/cast.hpp>
+#include <Half.hpp>
+
+#define ARMNN_SCOPED_PROFILING_EVENT_NEON(name) \
+ ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::CpuAcc, \
+ name, \
+ armnn::NeonTimer(), \
+ armnn::WallClockTimer())
+
+using namespace armnn::armcomputetensorutils;
namespace armnn
{
-class Layer;
-
-template<typename T>
-void InitialiseArmComputeTensorData(arm_compute::Tensor& tensor, const T* data);
-void InitializeArmComputeTensorDataForFloatTypes(arm_compute::Tensor& tensor, const ConstCpuTensorHandle* handle);
-} //namespace armnn
+template <typename T>
+void CopyArmComputeTensorData(arm_compute::Tensor& dstTensor, const T* srcData)
+{
+ InitialiseArmComputeTensorEmpty(dstTensor);
+ CopyArmComputeITensorData(srcData, dstTensor);
+}
+inline void InitializeArmComputeTensorData(arm_compute::Tensor& tensor,
+ const ConstCpuTensorHandle* handle)
+{
+ BOOST_ASSERT(handle);
+
+ switch(handle->GetTensorInfo().GetDataType())
+ {
+ case DataType::Float16:
+ CopyArmComputeTensorData(tensor, handle->GetConstTensor<armnn::Half>());
+ break;
+ case DataType::Float32:
+ CopyArmComputeTensorData(tensor, handle->GetConstTensor<float>());
+ break;
+ case DataType::QuantisedAsymm8:
+ CopyArmComputeTensorData(tensor, handle->GetConstTensor<uint8_t>());
+ break;
+ case DataType::Signed32:
+ CopyArmComputeTensorData(tensor, handle->GetConstTensor<int32_t>());
+ break;
+ default:
+ BOOST_ASSERT_MSG(false, "Unexpected tensor type.");
+ }
+};
-#define ARMNN_SCOPED_PROFILING_EVENT_NEON(name) \
- ARMNN_SCOPED_PROFILING_EVENT_WITH_INSTRUMENTS(armnn::Compute::CpuAcc, \
- name, \
- armnn::NeonTimer(), \
- armnn::WallClockTimer())
+} //namespace armnn