src/armnnUtils/Logging.cpp \
src/armnnUtils/Permute.cpp \
src/armnnUtils/ParserHelper.cpp \
+ src/armnnUtils/TensorUtils.cpp \
src/armnn/layers/ActivationLayer.cpp \
src/armnn/layers/AdditionLayer.cpp \
src/armnn/layers/ArithmeticBaseLayer.cpp \
src/armnnUtils/VerificationHelpers.cpp
src/armnnUtils/ParserHelper.hpp
src/armnnUtils/ParserHelper.cpp
+ src/armnnUtils/TensorUtils.hpp
+ src/armnnUtils/TensorUtils.cpp
)
if(BUILD_TF_PARSER OR BUILD_CAFFE_PARSER)
list(APPEND armnnUtils_sources
int32_t qOffset = tensorInfo.GetQuantizationOffset();
return MakeTensor<T, n>(tensorInfo, QuantizedVector<T>(qScale, qOffset, init));
}
-
-template<typename T>
-armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches,
- unsigned int numberOfChannels,
- unsigned int height,
- unsigned int width,
- const armnn::DataLayout dataLayout)
-{
- switch (dataLayout)
- {
- case armnn::DataLayout::NCHW:
- return armnn::TensorInfo({numberOfBatches, numberOfChannels, height, width}, armnn::GetDataType<T>());
- case armnn::DataLayout::NHWC:
- return armnn::TensorInfo({numberOfBatches, height, width, numberOfChannels}, armnn::GetDataType<T>());
- default:
- throw armnn::InvalidArgumentException("unknown data layout ["
- + std::to_string(static_cast<int>(dataLayout)) + "]");
- }
-}
--- /dev/null
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "TensorUtils.hpp"
+
+namespace armnnUtils
+{
+
+armnn::TensorShape GetTensorShape(unsigned int numberOfBatches,
+ unsigned int numberOfChannels,
+ unsigned int height,
+ unsigned int width,
+ const armnn::DataLayout dataLayout)
+{
+ switch (dataLayout)
+ {
+ case armnn::DataLayout::NCHW:
+ return armnn::TensorShape({numberOfBatches, numberOfChannels, height, width});
+ case armnn::DataLayout::NHWC:
+ return armnn::TensorShape({numberOfBatches, height, width, numberOfChannels});
+ default:
+ throw armnn::InvalidArgumentException("Unknown data layout ["
+ + std::to_string(static_cast<int>(dataLayout)) +
+ "]", CHECK_LOCATION());
+ }
+}
+
+}
+
--- /dev/null
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/TypesUtils.hpp>
+
+namespace armnnUtils
+{
+armnn::TensorShape GetTensorShape(unsigned int numberOfBatches,
+ unsigned int numberOfChannels,
+ unsigned int height,
+ unsigned int width,
+ const armnn::DataLayout dataLayout);
+
+template<typename T>
+armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches,
+ unsigned int numberOfChannels,
+ unsigned int height,
+ unsigned int width,
+ const armnn::DataLayout dataLayout)
+{
+ switch (dataLayout)
+ {
+ case armnn::DataLayout::NCHW:
+ return armnn::TensorInfo({numberOfBatches, numberOfChannels, height, width}, armnn::GetDataType<T>());
+ case armnn::DataLayout::NHWC:
+ return armnn::TensorInfo({numberOfBatches, height, width, numberOfChannels}, armnn::GetDataType<T>());
+ default:
+ throw armnn::InvalidArgumentException("Unknown data layout ["
+ + std::to_string(static_cast<int>(dataLayout)) +
+ "]", CHECK_LOCATION());
+ }
+}
+} // namespace armnnUtils
\ No newline at end of file
#pragma once
#include "WorkloadTestUtils.hpp"
+#include "TensorUtils.hpp"
#include <string>
#include <armnn/ArmNN.hpp>
// Note these tensors will use two (identical) batches.
- armnn::TensorInfo inputTensorInfo = GetTensorInfo<T>(2*inputNum, inputChannels, inputHeight, inputWidth, layout);
- armnn::TensorInfo outputTensorInfo = GetTensorInfo<T>(
- 2*outputNum, outputChannels, outputHeight, outputWidth, layout);
- armnn::TensorInfo kernelDesc = GetTensorInfo<T>(kernelDepthMul, kernelChannels, kernelHeight, kernelWidth, layout);
+ armnn::TensorInfo inputTensorInfo =
+ armnnUtils::GetTensorInfo<T>(2*inputNum, inputChannels, inputHeight, inputWidth, layout);
+ armnn::TensorInfo outputTensorInfo =
+ armnnUtils::GetTensorInfo<T>(2*outputNum, outputChannels, outputHeight, outputWidth, layout);
+ armnn::TensorInfo kernelDesc =
+ armnnUtils::GetTensorInfo<T>(kernelDepthMul, kernelChannels, kernelHeight, kernelWidth, layout);
armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, armnn::GetDataType<B>());
// Set quantization parameters if the requested type is a quantized type.
BOOST_ASSERT(!biasEnabled || bias.size() == outputChannels);
// Creates the tensors.
- armnn::TensorInfo inputTensorInfo = GetTensorInfo<T>(inputNum, inputChannels, inputHeight, inputWidth, layout);
- armnn::TensorInfo outputTensorInfo = GetTensorInfo<T>(outputNum, outputChannels, outputHeight, outputWidth, layout);
- armnn::TensorInfo kernelDesc = GetTensorInfo<T>(kernelChanMul, kernelChannels, kernelHeight, kernelWidth, layout);
+ armnn::TensorInfo inputTensorInfo =
+ armnnUtils::GetTensorInfo<T>(inputNum, inputChannels, inputHeight, inputWidth, layout);
+ armnn::TensorInfo outputTensorInfo =
+ armnnUtils::GetTensorInfo<T>(outputNum, outputChannels, outputHeight, outputWidth, layout);
+ armnn::TensorInfo kernelDesc =
+ armnnUtils::GetTensorInfo<T>(kernelChanMul, kernelChannels, kernelHeight, kernelWidth, layout);
armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, armnn::GetDataType<B>());
// Set quantization parameters if the requested type is a quantized type.
unsigned int outputChannels = kernelChannels;
unsigned int outputNum = inputNum;
- armnn::TensorInfo inputTensorInfo = GetTensorInfo<T>(inputNum, inputChannels, inputHeight, inputWidth, layout);
- armnn::TensorInfo outputTensorInfo = GetTensorInfo<T>(outputNum, outputChannels, outputHeight, outputWidth, layout);
- armnn::TensorInfo kernelDesc = GetTensorInfo<T>(1, outputChannels, kernelHeight, kernelWidth, layout);
+ armnn::TensorInfo inputTensorInfo =
+ armnnUtils::GetTensorInfo<T>(inputNum, inputChannels, inputHeight, inputWidth, layout);
+ armnn::TensorInfo outputTensorInfo =
+ armnnUtils::GetTensorInfo<T>(outputNum, outputChannels, outputHeight, outputWidth, layout);
+ armnn::TensorInfo kernelDesc = armnnUtils::GetTensorInfo<T>(1, outputChannels, kernelHeight, kernelWidth, layout);
armnn::TensorInfo biasDesc({ outputChannels }, armnn::GetDataType<B>());
// Set quantization parameters if the requested type is a quantized type.
unsigned int outputChannels = inputChannels * depthMultiplier;
unsigned int outputBatchSize = inputBatchSize;
- armnn::TensorInfo inputTensorInfo = GetTensorInfo<T>(
+ armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<T>(
inputBatchSize, inputChannels, inputHeight, inputWidth, layout);
- armnn::TensorInfo outputTensorInfo = GetTensorInfo<T>(
+ armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<T>(
outputBatchSize, outputChannels, outputHeight, outputWidth, layout);
- armnn::TensorInfo kernelDesc = GetTensorInfo<T>(
+ armnn::TensorInfo kernelDesc = armnnUtils::GetTensorInfo<T>(
depthMultiplier, inputChannels, kernelHeight, kernelWidth, layout);
armnn::TensorInfo biasDesc({outputChannels}, armnn::GetDataType<B>());
//
#include "LayerTests.hpp"
#include "WorkloadTestUtils.hpp"
+#include "TensorUtils.hpp"
#include "test/TensorHelpers.hpp"
#include "TensorCopyUtils.hpp"
// 2-channel bias used by a number of Conv2d tests.
static std::vector<float> Bias2({0, 2});
-armnn::TensorShape GetTestTensorShape(unsigned int numberOfBatches,
- unsigned int numberOfChannels,
- unsigned int height,
- unsigned int width,
- const armnn::DataLayoutIndexed& dataLayout)
-{
- switch (dataLayout.GetDataLayout())
- {
- case armnn::DataLayout::NCHW:
- return armnn::TensorShape({numberOfBatches, numberOfChannels, height, width});
- case armnn::DataLayout::NHWC:
- return armnn::TensorShape({numberOfBatches, height, width, numberOfChannels});
- default:
- throw armnn::InvalidArgumentException("unknown data layout ["
- + std::to_string(static_cast<int>(dataLayout.GetDataLayout())) + "]");
- }
-}
-
// Helper function that returns either Bias2 or an empty vector depending on whether bias is enabled.
template<typename T>
boost::multi_array<T, 1> GetBias2(bool biasEnabled, float qScale, int32_t qOffset)
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
- const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
+ const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
+ const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
std::vector<float> inputData({
1.0f, 2.0f, 3.0f, 4.0f,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 2, 2, dataLayout);
- const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 1, 1, dataLayout);
+ const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 2, 2, dataLayout);
+ const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 1, 1, dataLayout);
std::vector<float> inputData({
1.0f, 255.0f,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
- const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 2, 2, dataLayout);
+ const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
+ const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 2, 2, dataLayout);
std::vector<float> inputData({
1.0f, 2.0f, 3.0f, 4.0f,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 3, 5, dataLayout);
- const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 2, 3, dataLayout);
+ const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 3, 5, dataLayout);
+ const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 2, 3, dataLayout);
std::vector<float> inputData({
1.0f, 2.0f, 3.0f, 5.0f, 8.0f,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 3, 2, dataLayout);
- const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 3, 5, dataLayout);
+ const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 3, 2, dataLayout);
+ const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 3, 5, dataLayout);
std::vector<float> inputData({
1.0f, 2.0f,
unsigned int width = 1;
- const armnn::TensorShape inputOutputShape = GetTestTensorShape(
+ const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
numberOfBatches, numberOfChannels, height, width, layout);
std::vector<float> inputValues
{
unsigned int height = 1;
unsigned int width = 5;
- const armnn::TensorShape inputOutputShape = GetTestTensorShape(
+ const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
numberOfBatches, numberOfChannels, height, width, layout);
std::vector<float> inputValues
{
unsigned int height = 4;
unsigned int width = 3;
- const armnn::TensorShape inputOutputShape = GetTestTensorShape(
+ const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
numberOfBatches, numberOfChannels, height, width, layout);
std::vector<float> inputValues
{
unsigned int height = 4;
unsigned int width = 3;
- const armnn::TensorShape inputOutputShape = GetTestTensorShape(
+ const armnn::TensorShape inputOutputShape = armnnUtils::GetTensorShape(
numberOfBatches, numberOfChannels, height, width, layout);
std::vector<float> inputValues
{
#pragma once
#include "WorkloadTestUtils.hpp"
+#include "TensorUtils.hpp"
#include "QuantizeHelper.hpp"
unsigned int outputChannels = boost::numeric_cast<unsigned int>(outputExpected.shape()[channelsIndex]);
unsigned int outputBatchSize = boost::numeric_cast<unsigned int>(outputExpected.shape()[0]);
- armnn::TensorInfo inputTensorInfo = GetTensorInfo<T>(inputBatchSize, inputChannels, inputHeight,
- inputWidth, dataLayout);
- armnn::TensorInfo outputTensorInfo = GetTensorInfo<T>(outputBatchSize, outputChannels, outputHeight,
- outputWidth, dataLayout);
+ armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<T>(inputBatchSize, inputChannels, inputHeight,
+ inputWidth, dataLayout);
+ armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<T>(outputBatchSize, outputChannels, outputHeight,
+ outputWidth, dataLayout);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
descriptor.m_DataLayout = dataLayout;
- armnn::TensorInfo inputTensorInfo = GetTensorInfo<T>(1, 2, 4, 4, dataLayout);
- armnn::TensorInfo outputTensorInfo = GetTensorInfo<T>(1, 2, 2, 2, dataLayout);
+ armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<T>(1, 2, 4, 4, dataLayout);
+ armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<T>(1, 2, 2, 2, dataLayout);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
descriptor.m_DataLayout = dataLayout;
- armnn::TensorInfo inputTensorInfo = GetTensorInfo<T>(1, 2, 4, 4, dataLayout);
- armnn::TensorInfo outputTensorInfo = GetTensorInfo<T>(1, 2, 2, 2, dataLayout);
+ armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<T>(1, 2, 4, 4, dataLayout);
+ armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<T>(1, 2, 2, 2, dataLayout);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
descriptor.m_DataLayout = dataLayout;
- armnn::TensorInfo inputTensorInfo = GetTensorInfo<T>(1, 2, 4, 4, dataLayout);
- armnn::TensorInfo outputTensorInfo = GetTensorInfo<T>(1, 2, 2, 2, dataLayout);
+ armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<T>(1, 2, 4, 4, dataLayout);
+ armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<T>(1, 2, 2, 2, dataLayout);
std::vector<T> inputData(
QuantizedVector<T>(qScale, qOffset, {