src/armnn/layers/SubtractionLayer.cpp
src/armnn/layers/SubtractionLayer.hpp
src/armnn/BackendSettings.hpp
+ src/armnn/CompatibleTypes.hpp
src/armnn/Descriptors.cpp
src/armnn/DeviceSpec.hpp
src/armnn/Exceptions.cpp
: std::integral_constant<bool, std::is_floating_point<T>::value && sizeof(T) == 2>
{};
-template<typename T, typename U=T>
-struct GetDataTypeImpl;
-
-template<typename T>
-struct GetDataTypeImpl<T, typename std::enable_if_t<IsHalfType<T>::value, T>>
-{
- static constexpr DataType Value = DataType::Float16;
-};
-
-template<>
-struct GetDataTypeImpl<float>
-{
- static constexpr DataType Value = DataType::Float32;
-};
-
-template<>
-struct GetDataTypeImpl<uint8_t>
-{
- static constexpr DataType Value = DataType::QuantisedAsymm8;
-};
-
-template<>
-struct GetDataTypeImpl<int32_t>
-{
- static constexpr DataType Value = DataType::Signed32;
-};
-
-template<>
-struct GetDataTypeImpl<bool>
-{
- static constexpr DataType Value = DataType::Boolean;
-};
-
-template <typename T>
-constexpr DataType GetDataType()
-{
- return GetDataTypeImpl<T>::Value;
-}
-
template<typename T>
constexpr bool IsQuantizedType()
{
return dequantized;
}
-template <typename DataType>
+template <armnn::DataType DataType>
void VerifyTensorInfoDataType(const armnn::TensorInfo & info)
{
- auto expectedType = armnn::GetDataType<DataType>();
- if (info.GetDataType() != expectedType)
+ if (info.GetDataType() != DataType)
{
std::stringstream ss;
ss << "Unexpected datatype:" << armnn::GetDataTypeName(info.GetDataType())
<< " for tensor:" << info.GetShape()
- << ". The type expected to be: " << armnn::GetDataTypeName(expectedType);
+ << ". The type expected to be: " << armnn::GetDataTypeName(DataType);
throw armnn::Exception(ss.str());
}
}
--- /dev/null
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "armnn/Types.hpp"
+#include "Half.hpp"
+
+namespace armnn
+{
+
+template<typename T>
+bool CompatibleTypes(DataType dataType)
+{
+ return false;
+}
+
+template<>
+inline bool CompatibleTypes<float>(DataType dataType)
+{
+ return dataType == DataType::Float32;
+}
+
+template<>
+inline bool CompatibleTypes<Half>(DataType dataType)
+{
+ return dataType == DataType::Float16;
+}
+
+template<>
+inline bool CompatibleTypes<uint8_t>(DataType dataType)
+{
+ return dataType == DataType::Boolean || dataType == DataType::QuantisedAsymm8;
+}
+
+template<>
+inline bool CompatibleTypes<int32_t>(DataType dataType)
+{
+ return dataType == DataType::Signed32;
+}
+
+} //namespace armnn
BOOST_TEST(armnn::GetDataTypeSize(armnn::DataType::Boolean) == 1);
}
-BOOST_AUTO_TEST_CASE(GetDataTypeTest)
-{
- BOOST_TEST((armnn::GetDataType<float>() == armnn::DataType::Float32));
- BOOST_TEST((armnn::GetDataType<uint8_t>() == armnn::DataType::QuantisedAsymm8));
- BOOST_TEST((armnn::GetDataType<int32_t>() == armnn::DataType::Signed32));
- BOOST_TEST((armnn::GetDataType<bool>() == armnn::DataType::Boolean));
-}
-
BOOST_AUTO_TEST_CASE(PermuteDescriptorWithTooManyMappings)
{
BOOST_CHECK_THROW(armnn::PermuteDescriptor({ 0u, 1u, 2u, 3u, 4u }), armnn::InvalidArgumentException);
constexpr bool isHalfType = std::is_same<armnn::Half, ResolvedType>::value;
BOOST_CHECK(isHalfType);
- armnn::DataType dt = armnn::GetDataType<armnn::Half>();
- BOOST_CHECK(dt == armnn::DataType::Float16);
-
//Test utility functions return correct size
BOOST_CHECK(GetDataTypeSize(armnn::DataType::Float16) == 2);
};
BOOST_FIXTURE_TEST_CASE(ParseReLu, ReLuFixture)
{
- RunTest<2, float>(0, { -1.0f, -0.5f, 1.25f, -3.0f, 0.0f, 0.5f, -0.75f },
- { 0.0f, 0.0f, 1.25f, 0.0f, 0.0f, 0.5f, 0.0f });
+ RunTest<2, armnn::DataType::Float32>(0, { -1.0f, -0.5f, 1.25f, -3.0f, 0.0f, 0.5f, -0.75f },
+ { 0.0f, 0.0f, 1.25f, 0.0f, 0.0f, 0.5f, 0.0f });
}
struct ReLu6Fixture : ActivationFixture
};
BOOST_FIXTURE_TEST_CASE(ParseReLu6, ReLu6Fixture)
{
- RunTest<2, float>(0, { -1.0f, -0.5f, 7.25f, -3.0f, 0.0f, 0.5f, -0.75f },
- { 0.0f, 0.0f, 6.0f, 0.0f, 0.0f, 0.5f, 0.0f });
+ RunTest<2, armnn::DataType::Float32>(0, { -1.0f, -0.5f, 7.25f, -3.0f, 0.0f, 0.5f, -0.75f },
+ { 0.0f, 0.0f, 6.0f, 0.0f, 0.0f, 0.5f, 0.0f });
}
BOOST_AUTO_TEST_SUITE_END()
BOOST_FIXTURE_TEST_CASE(SimpleAdd, SimpleAddFixture)
{
- RunTest<2, uint8_t>(0,
- {{"inputTensor1", { 0, 1, 2, 3 }},
- {"inputTensor2", { 4, 5, 6, 7 }}},
- {{"outputTensor", { 4, 6, 8, 10 }}});
+ RunTest<2, armnn::DataType::QuantisedAsymm8>(
+ 0,
+ {{"inputTensor1", { 0, 1, 2, 3 }},
+ {"inputTensor2", { 4, 5, 6, 7 }}},
+ {{"outputTensor", { 4, 6, 8, 10 }}});
}
BOOST_AUTO_TEST_SUITE_END()
-
BOOST_FIXTURE_TEST_CASE(AvgPoolLite1DOutput, AvgPoolLiteFixtureUint1DOutput)
{
- RunTest<4, uint8_t>(0, {2, 3, 5, 2 }, { 3 });
+ RunTest<4, armnn::DataType::QuantisedAsymm8>(0, {2, 3, 5, 2 }, { 3 });
}
BOOST_FIXTURE_TEST_CASE(AvgPoolLiteFloat1DOutput, AvgPoolLiteFixtureFloat1DOutput)
{
- RunTest<4, float>(0, { 2.0f, 3.0f, 5.0f, 2.0f }, { 3.0f });
+ RunTest<4, armnn::DataType::Float32>(0, { 2.0f, 3.0f, 5.0f, 2.0f }, { 3.0f });
}
BOOST_FIXTURE_TEST_CASE(AvgPoolLite2DOutput, AvgPoolLiteFixture2DOutput)
{
- RunTest<4, uint8_t>(0, { 1, 2, 2, 3, 5, 6, 7, 8, 3, 2, 1, 0, 1, 2, 3, 4 }, { 4, 5, 2, 2 });
+ RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ 0, { 1, 2, 2, 3, 5, 6, 7, 8, 3, 2, 1, 0, 1, 2, 3, 4 }, { 4, 5, 2, 2 });
}
BOOST_FIXTURE_TEST_CASE(IncorrectDataTypeError, AvgPoolLiteFixtureFloat1DOutput)
{
- BOOST_CHECK_THROW((RunTest<4, uint8_t>(0, {2, 3, 5, 2 }, { 3 })), armnn::Exception);
+ BOOST_CHECK_THROW((RunTest<4, armnn::DataType::QuantisedAsymm8>(0, {2, 3, 5, 2 }, { 3 })), armnn::Exception);
}
BOOST_AUTO_TEST_SUITE_END()
BOOST_FIXTURE_TEST_CASE(ParseConcatenationNegativeDim, ConcatenationFixtureNegativeDim)
{
- RunTest<4, uint8_t>(0,
- {{"inputTensor1", { 0, 1, 2, 3 }},
- {"inputTensor2", { 4, 5, 6, 7 }}},
- {{"outputTensor", { 0, 1, 2, 3, 4, 5, 6, 7 }}});
+ RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ 0,
+ {{"inputTensor1", { 0, 1, 2, 3 }},
+ {"inputTensor2", { 4, 5, 6, 7 }}},
+ {{"outputTensor", { 0, 1, 2, 3, 4, 5, 6, 7 }}});
}
struct ConcatenationFixtureNCHW : ConcatenationFixture
BOOST_FIXTURE_TEST_CASE(ParseConcatenationNCHW, ConcatenationFixtureNCHW)
{
- RunTest<4, uint8_t>(0,
- {{"inputTensor1", { 0, 1, 2, 3 }},
- {"inputTensor2", { 4, 5, 6, 7 }}},
- {{"outputTensor", { 0, 1, 2, 3, 4, 5, 6, 7 }}});
+ RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ 0,
+ {{"inputTensor1", { 0, 1, 2, 3 }},
+ {"inputTensor2", { 4, 5, 6, 7 }}},
+ {{"outputTensor", { 0, 1, 2, 3, 4, 5, 6, 7 }}});
}
struct ConcatenationFixtureNHWC : ConcatenationFixture
BOOST_FIXTURE_TEST_CASE(ParseConcatenationNHWC, ConcatenationFixtureNHWC)
{
- RunTest<4, uint8_t>(0,
- {{"inputTensor1", { 0, 1, 2, 3 }},
- {"inputTensor2", { 4, 5, 6, 7 }}},
- {{"outputTensor", { 0, 1, 4, 5, 2, 3, 6, 7 }}});
+ RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ 0,
+ {{"inputTensor1", { 0, 1, 2, 3 }},
+ {"inputTensor2", { 4, 5, 6, 7 }}},
+ {{"outputTensor", { 0, 1, 4, 5, 2, 3, 6, 7 }}});
}
struct ConcatenationFixtureDim1 : ConcatenationFixture
BOOST_FIXTURE_TEST_CASE(ParseConcatenationDim1, ConcatenationFixtureDim1)
{
- RunTest<4, uint8_t>(0,
- { { "inputTensor1", { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
- 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 } },
- { "inputTensor2", { 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
- 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73 } } },
- { { "outputTensor", { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
- 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
- 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
- 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73 } } });
+ RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ 0,
+ { { "inputTensor1", { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+ 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 } },
+ { "inputTensor2", { 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
+ 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73 } } },
+ { { "outputTensor", { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+ 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
+ 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73 } } });
}
struct ConcatenationFixtureDim3 : ConcatenationFixture
BOOST_FIXTURE_TEST_CASE(ParseConcatenationDim3, ConcatenationFixtureDim3)
{
- RunTest<4, uint8_t>(0,
- { { "inputTensor1", { 0, 1, 2, 3,
- 4, 5, 6, 7,
- 8, 9, 10, 11,
- 12, 13, 14, 15,
- 16, 17, 18, 19,
- 20, 21, 22, 23 } },
- { "inputTensor2", { 50, 51, 52, 53,
- 54, 55, 56, 57,
- 58, 59, 60, 61,
- 62, 63, 64, 65,
- 66, 67, 68, 69,
- 70, 71, 72, 73 } } },
- { { "outputTensor", { 0, 1, 2, 3,
- 50, 51, 52, 53,
- 4, 5, 6, 7,
- 54, 55, 56, 57,
- 8, 9, 10, 11,
- 58, 59, 60, 61,
- 12, 13, 14, 15,
- 62, 63, 64, 65,
- 16, 17, 18, 19,
- 66, 67, 68, 69,
- 20, 21, 22, 23,
- 70, 71, 72, 73 } } });
+ RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ 0,
+ { { "inputTensor1", { 0, 1, 2, 3,
+ 4, 5, 6, 7,
+ 8, 9, 10, 11,
+ 12, 13, 14, 15,
+ 16, 17, 18, 19,
+ 20, 21, 22, 23 } },
+ { "inputTensor2", { 50, 51, 52, 53,
+ 54, 55, 56, 57,
+ 58, 59, 60, 61,
+ 62, 63, 64, 65,
+ 66, 67, 68, 69,
+ 70, 71, 72, 73 } } },
+ { { "outputTensor", { 0, 1, 2, 3,
+ 50, 51, 52, 53,
+ 4, 5, 6, 7,
+ 54, 55, 56, 57,
+ 8, 9, 10, 11,
+ 58, 59, 60, 61,
+ 12, 13, 14, 15,
+ 62, 63, 64, 65,
+ 16, 17, 18, 19,
+ 66, 67, 68, 69,
+ 20, 21, 22, 23,
+ 70, 71, 72, 73 } } });
}
BOOST_AUTO_TEST_SUITE_END()
BOOST_FIXTURE_TEST_CASE( ParseSimpleConv2D, SimpleConv2DFixture )
{
- RunTest<4, uint8_t>(
+ RunTest<4, armnn::DataType::QuantisedAsymm8>(
0,
{
1, 2, 3,
BOOST_FIXTURE_TEST_CASE( ParseConv2DWithBias, SimpleConv2DWithBiasesFixture )
{
- RunTest<4, uint8_t>(
+ RunTest<4, armnn::DataType::QuantisedAsymm8>(
0,
{
1, 2,
uint8_t outZero = 20;
uint8_t fz = 4; // filter zero point
- RunTest<4, uint8_t>(
+ RunTest<4, armnn::DataType::QuantisedAsymm8>(
0,
{
1, 2,
{
uint8_t relu6Min = 6 / 2; // divide by output scale
- RunTest<4, uint8_t>(
+ RunTest<4, armnn::DataType::QuantisedAsymm8>(
0,
{
1, 2,
BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DSame, DepthwiseConvolution2dSameFixture)
{
- RunTest<4, uint8_t>(
+ RunTest<4, armnn::DataType::QuantisedAsymm8>(
0,
{ 0, 1, 2,
3, 4, 5,
BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DValid, DepthwiseConvolution2dValidFixture)
{
- RunTest<4, uint8_t>(
+ RunTest<4, armnn::DataType::QuantisedAsymm8>(
0,
{ 0, 1, 2,
3, 4, 5,
BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DSameBias, DepthwiseConvolution2dSameBiasFixture)
{
- RunTest<4, uint8_t>(
+ RunTest<4, armnn::DataType::QuantisedAsymm8>(
0,
{ 0, 1, 2,
3, 4, 5,
BOOST_FIXTURE_TEST_CASE(FullyConnectedWithNoBias, FullyConnectedWithNoBiasFixture)
{
- RunTest<2, uint8_t>(
+ RunTest<2, armnn::DataType::QuantisedAsymm8>(
0,
{ 10, 20, 30, 40 },
{ 400/2 });
BOOST_FIXTURE_TEST_CASE(ParseFullyConnectedWithBias, FullyConnectedWithBiasFixture)
{
- RunTest<2, uint8_t>(
+ RunTest<2, armnn::DataType::QuantisedAsymm8>(
0,
{ 10, 20, 30, 40 },
{ (400+10)/2 });
BOOST_FIXTURE_TEST_CASE(MaxPoolLiteUint1DOutput, MaxPoolLiteFixtureUint1DOutput)
{
- RunTest<4, uint8_t>(0, { 2, 3, 5, 2 }, { 5 });
+ RunTest<4, armnn::DataType::QuantisedAsymm8>(0, { 2, 3, 5, 2 }, { 5 });
}
BOOST_FIXTURE_TEST_CASE(MaxPoolLiteFloat1DOutput, MaxPoolLiteFixtureFloat1DOutput)
{
- RunTest<4, float>(0, { 2.0f, 3.0f, 5.0f, 2.0f }, { 5.0f });
+ RunTest<4, armnn::DataType::Float32>(0, { 2.0f, 3.0f, 5.0f, 2.0f }, { 5.0f });
}
BOOST_FIXTURE_TEST_CASE(MaxPoolLiteUint2DOutput, MaxPoolLiteFixtureUint2DOutput)
{
- RunTest<4, uint8_t>(0, { 1, 2, 2, 3, 5, 6, 7, 8, 3, 2, 1, 0, 1, 2, 3, 4 }, { 6, 8, 3, 4 });
+ RunTest<4, armnn::DataType::QuantisedAsymm8>(
+ 0, { 1, 2, 2, 3, 5, 6, 7, 8, 3, 2, 1, 0, 1, 2, 3, 4 }, { 6, 8, 3, 4 });
}
BOOST_FIXTURE_TEST_CASE(MaxPoolIncorrectDataTypeError, MaxPoolLiteFixtureFloat1DOutput)
{
- BOOST_CHECK_THROW((RunTest<4, uint8_t>(0, { 2, 3, 5, 2 }, { 5 })), armnn::Exception);
+ BOOST_CHECK_THROW((RunTest<4, armnn::DataType::QuantisedAsymm8>(0, { 2, 3, 5, 2 }, { 5 })), armnn::Exception);
}
BOOST_AUTO_TEST_SUITE_END()
BOOST_FIXTURE_TEST_CASE(ParseMeanNoReduce, SimpleMeanNoReduceFixture)
{
- RunTest<2, float>(0, {{ "inputTensor", { 1.0f, 1.0f, 2.0f, 2.0f } } },
- {{ "outputTensor", { 1.5f } } });
+ RunTest<2, armnn::DataType::Float32>(0, {{ "inputTensor", { 1.0f, 1.0f, 2.0f, 2.0f } } },
+ {{ "outputTensor", { 1.5f } } });
}
BOOST_AUTO_TEST_SUITE_END()
-
BOOST_FIXTURE_TEST_CASE(ParseMultiplication, SimpleMultiplicationFixture)
{
- RunTest<4, float>(0, {{ "inputTensor1", { 0.0f, 1.0f, 2.0f,
- 3.0f, 4.0f, 5.0f,
- 6.0f, 7.0f, 8.0f,
- 9.0f, 10.0f, 11.0f } },
- { "inputTensor2", { 1.0f, 1.0f, 1.0f,
- 5.0f, 5.0f, 5.0f,
- 1.0f, 1.0f, 1.0f,
- 5.0f, 5.0f, 5.0f} } },
- {{ "outputTensor", { 0.0f, 1.0f, 2.0f,
- 15.0f, 20.0f, 25.0f,
- 6.0f, 7.0f, 8.0f,
- 45.0f, 50.0f, 55.0f } } });
+ RunTest<4, armnn::DataType::Float32>(0, {{ "inputTensor1", { 0.0f, 1.0f, 2.0f,
+ 3.0f, 4.0f, 5.0f,
+ 6.0f, 7.0f, 8.0f,
+ 9.0f, 10.0f, 11.0f } },
+ { "inputTensor2", { 1.0f, 1.0f, 1.0f,
+ 5.0f, 5.0f, 5.0f,
+ 1.0f, 1.0f, 1.0f,
+ 5.0f, 5.0f, 5.0f} } },
+ {{ "outputTensor", { 0.0f, 1.0f, 2.0f,
+ 15.0f, 20.0f, 25.0f,
+ 6.0f, 7.0f, 8.0f,
+ 45.0f, 50.0f, 55.0f } } });
}
BOOST_AUTO_TEST_SUITE_END()
-
BOOST_FIXTURE_TEST_CASE(ParsePad, SimplePadFixture)
{
- RunTest<2, float>(0,
- {{ "inputTensor", { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f }}},
- {{ "outputTensor", { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
- 0.0f, 0.0f, 1.0f, 2.0f, 3.0f, 0.0f, 0.0f,
- 0.0f, 0.0f, 4.0f, 5.0f, 6.0f, 0.0f, 0.0f,
- 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f }}});
+ RunTest<2, armnn::DataType::Float32>
+ (0,
+ {{ "inputTensor", { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f }}},
+ {{ "outputTensor", { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 1.0f, 2.0f, 3.0f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 4.0f, 5.0f, 6.0f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f }}});
}
BOOST_AUTO_TEST_SUITE_END()
-
#include <armnn/TypesUtils.hpp>
#include "test/TensorHelpers.hpp"
+#include "TypeUtils.hpp"
#include "armnnTfLiteParser/ITfLiteParser.hpp"
#include <backendsCommon/BackendRegistry.hpp>
/// Executes the network with the given input tensor and checks the result against the given output tensor.
/// This overload assumes the network has a single input and a single output.
- template <std::size_t NumOutputDimensions, typename DataType>
+ template <std::size_t NumOutputDimensions,
+ armnn::DataType ArmnnType,
+ typename DataType = armnn::ResolveType<ArmnnType>>
void RunTest(size_t subgraphId,
- const std::vector<DataType>& inputData,
- const std::vector<DataType>& expectedOutputData);
+ const std::vector<DataType>& inputData,
+ const std::vector<DataType>& expectedOutputData);
/// Executes the network with the given input tensors and checks the results against the given output tensors.
/// This overload supports multiple inputs and multiple outputs, identified by name.
- template <std::size_t NumOutputDimensions, typename DataType>
+ template <std::size_t NumOutputDimensions,
+ armnn::DataType ArmnnType,
+ typename DataType = armnn::ResolveType<ArmnnType>>
void RunTest(size_t subgraphId,
const std::map<std::string, std::vector<DataType>>& inputData,
const std::map<std::string, std::vector<DataType>>& expectedOutputData);
}
};
-template <std::size_t NumOutputDimensions, typename DataType>
+template <std::size_t NumOutputDimensions,
+ armnn::DataType ArmnnType,
+ typename DataType>
void ParserFlatbuffersFixture::RunTest(size_t subgraphId,
const std::vector<DataType>& inputData,
const std::vector<DataType>& expectedOutputData)
{
- RunTest<NumOutputDimensions, DataType>(subgraphId,
- { { m_SingleInputName, inputData } },
- { { m_SingleOutputName, expectedOutputData } });
+ RunTest<NumOutputDimensions, ArmnnType>(subgraphId,
+ { { m_SingleInputName, inputData } },
+ { { m_SingleOutputName, expectedOutputData } });
}
-template <std::size_t NumOutputDimensions, typename DataType>
-void
-ParserFlatbuffersFixture::RunTest(size_t subgraphId,
- const std::map<std::string, std::vector<DataType>>& inputData,
- const std::map<std::string, std::vector<DataType>>& expectedOutputData)
+template <std::size_t NumOutputDimensions,
+ armnn::DataType ArmnnType,
+ typename DataType>
+void ParserFlatbuffersFixture::RunTest(size_t subgraphId,
+ const std::map<std::string, std::vector<DataType>>& inputData,
+ const std::map<std::string, std::vector<DataType>>& expectedOutputData)
{
using BindingPointInfo = std::pair<armnn::LayerBindingId, armnn::TensorInfo>;
for (auto&& it : inputData)
{
BindingPointInfo bindingInfo = m_Parser->GetNetworkInputBindingInfo(subgraphId, it.first);
- armnn::VerifyTensorInfoDataType<DataType>(bindingInfo.second);
+ armnn::VerifyTensorInfoDataType<ArmnnType>(bindingInfo.second);
inputTensors.push_back({ bindingInfo.first, armnn::ConstTensor(bindingInfo.second, it.second.data()) });
}
for (auto&& it : expectedOutputData)
{
BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(subgraphId, it.first);
- armnn::VerifyTensorInfoDataType<DataType>(bindingInfo.second);
+ armnn::VerifyTensorInfoDataType<ArmnnType>(bindingInfo.second);
outputStorage.emplace(it.first, MakeTensor<DataType, NumOutputDimensions>(bindingInfo.second));
outputTensors.push_back(
{ bindingInfo.first, armnn::Tensor(bindingInfo.second, outputStorage.at(it.first).data()) });
BOOST_FIXTURE_TEST_CASE(ParseReshapeWithReshapeDims, ReshapeFixtureWithReshapeDims)
{
SetupSingleInputSingleOutput("inputTensor", "outputTensor");
- RunTest<2, uint8_t>(0,
- { 1, 2, 3, 4, 5, 6, 7, 8, 9 },
- { 1, 2, 3, 4, 5, 6, 7, 8, 9 });
+ RunTest<2, armnn::DataType::QuantisedAsymm8>(0,
+ { 1, 2, 3, 4, 5, 6, 7, 8, 9 },
+ { 1, 2, 3, 4, 5, 6, 7, 8, 9 });
BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
== armnn::TensorShape({3,3})));
}
BOOST_FIXTURE_TEST_CASE(ParseReshapeWithReshapeDimsFlatten, ReshapeFixtureWithReshapeDimsFlatten)
{
SetupSingleInputSingleOutput("inputTensor", "outputTensor");
- RunTest<2, uint8_t>(0,
- { 1, 2, 3, 4, 5, 6, 7, 8, 9 },
- { 1, 2, 3, 4, 5, 6, 7, 8, 9 });
+ RunTest<2, armnn::DataType::QuantisedAsymm8>(0,
+ { 1, 2, 3, 4, 5, 6, 7, 8, 9 },
+ { 1, 2, 3, 4, 5, 6, 7, 8, 9 });
BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
== armnn::TensorShape({1,9})));
}
BOOST_FIXTURE_TEST_CASE(ParseReshapeWithReshapeDimsFlattenTwoDims, ReshapeFixtureWithReshapeDimsFlattenTwoDims)
{
SetupSingleInputSingleOutput("inputTensor", "outputTensor");
- RunTest<2, uint8_t>(0,
- { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 },
- { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 });
+ RunTest<2, armnn::DataType::QuantisedAsymm8>(0,
+ { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 },
+ { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 });
BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
== armnn::TensorShape({2,9})));
}
BOOST_FIXTURE_TEST_CASE(ParseReshapeWithReshapeDimsFlattenOneDim, ReshapeFixtureWithReshapeDimsFlattenOneDim)
{
SetupSingleInputSingleOutput("inputTensor", "outputTensor");
- RunTest<3, uint8_t>(0,
- { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 },
- { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 });
+ RunTest<3, armnn::DataType::QuantisedAsymm8>(0,
+ { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 },
+ { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 });
BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
== armnn::TensorShape({2,3,3})));
}
BOOST_FIXTURE_TEST_CASE(ParseSoftmaxLite, SoftmaxFixture)
{
- RunTest<2, uint8_t>(0, { 0, 0, 100, 0, 0, 0, 0 }, { 0, 0, 255, 0, 0, 0, 0 });
+ RunTest<2, armnn::DataType::QuantisedAsymm8>(0, { 0, 0, 100, 0, 0, 0, 0 }, { 0, 0, 255, 0, 0, 0, 0 });
}
BOOST_AUTO_TEST_SUITE_END()
-
BOOST_FIXTURE_TEST_CASE(ParseSqueezeWithSqueezeDims, SqueezeFixtureWithSqueezeDims)
{
SetupSingleInputSingleOutput("inputTensor", "outputTensor");
- RunTest<3, uint8_t>(0, { 1, 2, 3, 4 }, { 1, 2, 3, 4 });
+ RunTest<3, armnn::DataType::QuantisedAsymm8>(0, { 1, 2, 3, 4 }, { 1, 2, 3, 4 });
BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
== armnn::TensorShape({2,2,1})));
BOOST_FIXTURE_TEST_CASE(ParseSqueezeWithoutSqueezeDims, SqueezeFixtureWithoutSqueezeDims)
{
SetupSingleInputSingleOutput("inputTensor", "outputTensor");
- RunTest<2, uint8_t>(0, { 1, 2, 3, 4 }, { 1, 2, 3, 4 });
+ RunTest<2, armnn::DataType::QuantisedAsymm8>(0, { 1, 2, 3, 4 }, { 1, 2, 3, 4 });
BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
== armnn::TensorShape({2,2})));
}
m_Storage(tensorData, tensorData + tensorInfo.GetNumElements()),
m_TensorInfo(tensorInfo)
{
- BOOST_ASSERT(tensorInfo.GetDataType() == GetDataType<T>());
+ BOOST_ASSERT(GetDataTypeSize(tensorInfo.GetDataType()) == sizeof(T));
}
void CreateLayerDeferred() override
}
}
+armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches,
+ unsigned int numberOfChannels,
+ unsigned int height,
+ unsigned int width,
+ const armnn::DataLayout dataLayout,
+ const armnn::DataType dataType)
+{
+ switch (dataLayout)
+ {
+ case armnn::DataLayout::NCHW:
+ return armnn::TensorInfo({numberOfBatches, numberOfChannels, height, width}, dataType);
+ case armnn::DataLayout::NHWC:
+ return armnn::TensorInfo({numberOfBatches, height, width, numberOfChannels}, dataType);
+ default:
+ throw armnn::InvalidArgumentException("Unknown data layout ["
+ + std::to_string(static_cast<int>(dataLayout)) +
+ "]", CHECK_LOCATION());
+ }
}
+}
unsigned int width,
const armnn::DataLayout dataLayout);
-template<typename T>
armnn::TensorInfo GetTensorInfo(unsigned int numberOfBatches,
unsigned int numberOfChannels,
unsigned int height,
unsigned int width,
- const armnn::DataLayout dataLayout)
-{
- switch (dataLayout)
- {
- case armnn::DataLayout::NCHW:
- return armnn::TensorInfo({numberOfBatches, numberOfChannels, height, width}, armnn::GetDataType<T>());
- case armnn::DataLayout::NHWC:
- return armnn::TensorInfo({numberOfBatches, height, width, numberOfChannels}, armnn::GetDataType<T>());
- default:
- throw armnn::InvalidArgumentException("Unknown data layout ["
- + std::to_string(static_cast<int>(dataLayout)) +
- "]", CHECK_LOCATION());
- }
-}
-} // namespace armnnUtils
\ No newline at end of file
+ const armnn::DataLayout dataLayout,
+ const armnn::DataType dataType);
+
+} // namespace armnnUtils
#pragma once
#include "CpuTensorHandleFwd.hpp"
+#include "CompatibleTypes.hpp"
#include <armnn/TypesUtils.hpp>
template <typename T>
const T* GetConstTensor() const
{
- BOOST_ASSERT(GetTensorInfo().GetDataType() == GetDataType<T>());
+ BOOST_ASSERT(CompatibleTypes<T>(GetTensorInfo().GetDataType()));
return reinterpret_cast<const T*>(m_Memory);
}
template <typename T>
T* GetTensor() const
{
- BOOST_ASSERT(GetTensorInfo().GetDataType() == GetDataType<T>());
+ BOOST_ASSERT(CompatibleTypes<T>(GetTensorInfo().GetDataType()));
return reinterpret_cast<T*>(m_MutableMemory);
}
#include <algorithm>
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> BoundedReLuTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
unsigned int outputChannels = inputChannels;
unsigned int outputBatchSize = inputBatchSize;
- armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
- armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType);
- armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
- armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType);
if(armnn::IsQuantizedType<T>())
{
0.999f, 1.0f, 0.89f, 1.0f,
};
- return BoundedReLuTestCommon(
+ return BoundedReLuTestCommon<armnn::DataType::Float32>(
workloadFactory, memoryManager, 1.0f, -1.0f, 1.0f, 0, 1.0f, 0, input, output,
inputWidth, inputHeight, inputChannels, inputBatchSize);
}
0.999f, 1.2f, 0.89f, 6.0f,
};
- return BoundedReLuTestCommon(
+ return BoundedReLuTestCommon<armnn::DataType::Float32>(
workloadFactory, memoryManager, 6.0f, 0.0f, 1.0f, 0, 1.0f, 0, input, output,
inputWidth, inputHeight, inputChannels, inputBatchSize);
}
float outputScale = 6.0f / 255.0f;
int32_t outputOffset = 0;
- return BoundedReLuTestCommon(workloadFactory, memoryManager, 6.0f, 0.0f,
- inputScale, inputOffset, outputScale, outputOffset,
- input, output,
- inputWidth, inputHeight, inputChannels, inputBatchSize);
+ return BoundedReLuTestCommon<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, 6.0f, 0.0f,
+ inputScale, inputOffset, outputScale, outputOffset,
+ input, output, inputWidth, inputHeight, inputChannels, inputBatchSize);
}
LayerTestResult<uint8_t, 4> BoundedReLuUint8UpperAndLowerBoundTest(
int32_t inputOffset = 112;
float inputScale = 0.0125f;
- return BoundedReLuTestCommon(workloadFactory, memoryManager, 1.0f, -1.0f,
- inputScale, inputOffset, inputScale, inputOffset, // Input/output scale & offset same.
- input, output,
- inputWidth, inputHeight, inputChannels, inputBatchSize);
+ return BoundedReLuTestCommon<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, 1.0f, -1.0f,
+ inputScale, inputOffset, inputScale, inputOffset, // Input/output scale & offset same.
+ input, output, inputWidth, inputHeight, inputChannels, inputBatchSize);
}
namespace
return result;
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T,4> ConstantLinearActivationTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
unsigned int shape[] = {batchSize, inputChannels, inputHeight, inputWidth};
- inputTensorInfo = armnn::TensorInfo(4, shape, armnn::GetDataType<T>());
- outputTensorInfo = armnn::TensorInfo(4, shape, armnn::GetDataType<T>());
+ inputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ConstantLinearActivationTestCommon<float>(workloadFactory, memoryManager);
+ return ConstantLinearActivationTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> ConstantLinearActivationUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ConstantLinearActivationTestCommon<uint8_t>(workloadFactory, memoryManager, 4.0f, 3);
+ return ConstantLinearActivationTestCommon<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, 4.0f, 3);
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SimpleActivationTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
constexpr static unsigned int outputChannels = inputChannels;
constexpr static unsigned int outputBatchSize = inputBatchSize;
- armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
- armnn::GetDataType<T>());
- armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
- armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType);
+ armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
return result;
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SimpleSigmoidTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
std::vector<float> outputExpectedData(inputData.size());
std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
- return SimpleActivationTest<T>(workloadFactory,
- memoryManager,
- armnn::ActivationFunction::Sigmoid,
- 0.f,
- 0.f,
- qScale,
- qOffset,
- inputData,
- outputExpectedData);
+ return SimpleActivationTest<ArmnnType>(workloadFactory,
+ memoryManager,
+ armnn::ActivationFunction::Sigmoid,
+ 0.f,
+ 0.f,
+ qScale,
+ qOffset,
+ inputData,
+ outputExpectedData);
}
LayerTestResult<float, 4> SimpleSigmoidTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SimpleSigmoidTestCommon<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return SimpleSigmoidTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
}
LayerTestResult<uint8_t, 4> SimpleSigmoidUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SimpleSigmoidTestCommon<uint8_t>(workloadFactory, memoryManager, 0.1f, 50);
+ return SimpleSigmoidTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.1f, 50);
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T,4> CompareActivationTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
unsigned int shape[] = {batchSize, channels, height, width};
- inputTensorInfo = armnn::TensorInfo(4, shape, armnn::GetDataType<T>());
- outputTensorInfo = armnn::TensorInfo(4, shape, armnn::GetDataType<T>());
+ inputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
armnn::ActivationFunction f,
unsigned int batchSize)
{
- return CompareActivationTestImpl<float>(
+ return CompareActivationTestImpl<armnn::DataType::Float32>(
workloadFactory, memoryManager, refWorkloadFactory, f, batchSize);
}
armnn::IWorkloadFactory& refWorkloadFactory,
armnn::ActivationFunction f)
{
- return CompareActivationTestImpl<uint8_t>(
+ return CompareActivationTestImpl<armnn::DataType::QuantisedAsymm8>(
workloadFactory, memoryManager, refWorkloadFactory, f, 5, 0.1f, 50);
}
//
#pragma once
+#include "TypeUtils.hpp"
+
#include <armnn/INetwork.hpp>
#include <backendsCommon/test/CommonTestUtils.hpp>
return net;
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
void ArithmeticSimpleEndToEnd(const std::vector<BackendId>& backends,
const LayerType type,
const std::vector<T> expectedOutput)
const TensorShape& outputShape = { 2, 2, 2, 2 };
// Builds up the structure of the network
- INetworkPtr net = CreateArithmeticNetwork<GetDataType<T>()>(inputShapes, outputShape, type);
+ INetworkPtr net = CreateArithmeticNetwork<ArmnnType>(inputShapes, outputShape, type);
BOOST_TEST_CHECKPOINT("create a network");
EndToEndLayerTestImpl<T>(move(net), inputTensorData, expectedOutputData, backends);
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
void ArithmeticBroadcastEndToEnd(const std::vector<BackendId>& backends,
const LayerType type,
const std::vector<T> expectedOutput)
const TensorShape& outputShape = { 1, 2, 2, 3 };
// Builds up the structure of the network
- INetworkPtr net = CreateArithmeticNetwork<GetDataType<T>()>(inputShapes, outputShape, type);
+ INetworkPtr net = CreateArithmeticNetwork<ArmnnType>(inputShapes, outputShape, type);
BOOST_TEST_CHECKPOINT("create a network");
//
#pragma once
+#include "TypeUtils.hpp"
#include "WorkloadTestUtils.hpp"
#include <armnn/ArmNN.hpp>
#include <DataLayoutIndexed.hpp>
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> BatchNormTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
int32_t qOffset,
armnn::DataLayout dataLayout)
{
- armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::GetDataType<T>());
- armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType);
+ armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType);
armnnUtils::DataLayoutIndexed dataLayoutIndexed(dataLayout);
armnn::TensorInfo tensorInfo({ inputOutputTensorShape[dataLayoutIndexed.GetChannelsIndex()] },
- armnn::GetDataType<T>());
+ ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
if (armnn::IsQuantizedType<T>())
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T,4> BatchNormTestNhwcImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const unsigned int channels = 2;
const unsigned int num = 1;
- armnn::TensorInfo inputTensorInfo({num, height, width, channels}, armnn::GetDataType<T>());
- armnn::TensorInfo outputTensorInfo({num, height, width, channels}, armnn::GetDataType<T>());
- armnn::TensorInfo tensorInfo({channels}, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({num, height, width, channels}, ArmnnType);
+ armnn::TensorInfo outputTensorInfo({num, height, width, channels}, ArmnnType);
+ armnn::TensorInfo tensorInfo({channels}, ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
#include "WorkloadTestUtils.hpp"
#include "TensorUtils.hpp"
+#include "TypeUtils.hpp"
#include <Permute.hpp>
#include <DataLayoutIndexed.hpp>
}
}
-template<typename T, typename B>
+template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
+ typename T = armnn::ResolveType<ArmnnType>, typename B = armnn::ResolveType<ArmnnBType>>
LayerTestResult<T, 4> SimpleConvolution2dTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
// Note these tensors will use two (identical) batches.
armnn::TensorInfo inputTensorInfo =
- armnnUtils::GetTensorInfo<T>(2*inputNum, inputChannels, inputHeight, inputWidth, layout);
+ armnnUtils::GetTensorInfo(2*inputNum, inputChannels, inputHeight, inputWidth, layout, ArmnnType);
armnn::TensorInfo outputTensorInfo =
- armnnUtils::GetTensorInfo<T>(2*outputNum, outputChannels, outputHeight, outputWidth, layout);
+ armnnUtils::GetTensorInfo(2*outputNum, outputChannels, outputHeight, outputWidth, layout, ArmnnType);
armnn::TensorInfo kernelDesc =
- armnnUtils::GetTensorInfo<T>(kernelDepthMul, kernelChannels, kernelHeight, kernelWidth, layout);
- armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, armnn::GetDataType<B>());
+ armnnUtils::GetTensorInfo(kernelDepthMul, kernelChannels, kernelHeight, kernelWidth, layout, ArmnnType);
+ armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, ArmnnBType);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
return ret;
}
-template<typename T, typename B>
+template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
+ typename T = armnn::ResolveType<ArmnnType>, typename B = armnn::ResolveType<ArmnnBType>>
LayerTestResult<T, 4> SimpleConvolution2dNhwcTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool biasEnabled = bias.size() > 0;
// Creates the tensors.
- armnn::TensorInfo inputTensorInfo({inputNum, inputHeight, inputWidth, inputChannels}, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({inputNum, inputHeight, inputWidth, inputChannels}, ArmnnType);
armnn::TensorInfo outputTensorInfo({outputNum, outputHeight, outputWidth, outputChannels},
- armnn::GetDataType<T>());
- armnn::TensorInfo kernelDesc({kernelChanMul, kernelHeight, kernelWidth, kernelChannels}, armnn::GetDataType<T>());
- armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, armnn::GetDataType<B>());
+ ArmnnType);
+ armnn::TensorInfo kernelDesc({kernelChanMul, kernelHeight, kernelWidth, kernelChannels}, ArmnnType);
+ armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, ArmnnBType);
// Construct the input data.
std::vector<T> inputData;
return ret;
}
-template<typename T, typename B>
+template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
+ typename T = armnn::ResolveType<ArmnnType>, typename B = armnn::ResolveType<ArmnnBType>>
LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
// Creates the tensors.
armnn::TensorInfo inputTensorInfo =
- armnnUtils::GetTensorInfo<T>(inputNum, inputChannels, inputHeight, inputWidth, layout);
+ armnnUtils::GetTensorInfo(inputNum, inputChannels, inputHeight, inputWidth, layout, ArmnnType);
armnn::TensorInfo outputTensorInfo =
- armnnUtils::GetTensorInfo<T>(outputNum, outputChannels, outputHeight, outputWidth, layout);
- armnn::TensorInfo kernelDesc({kernelChanMul, kernelChannels, kernelHeight, kernelWidth}, armnn::GetDataType<T>());
- armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, armnn::GetDataType<B>());
+ armnnUtils::GetTensorInfo(outputNum, outputChannels, outputHeight, outputWidth, layout, ArmnnType);
+ armnn::TensorInfo kernelDesc({kernelChanMul, kernelChannels, kernelHeight, kernelWidth}, ArmnnType);
+ armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, ArmnnBType);
// Set quantization parameters if the requested type is a quantized type.
if (armnn::IsQuantizedType<T>())
return ret;
}
-template<typename T, typename B>
+template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> DepthwiseConvolution2dDepthMul1TestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool biasEnabled,
const armnn::DataLayout layout)
{
+ using B = armnn::ResolveType<ArmnnBType>;
+
unsigned int inputHeight = 3;
unsigned int inputWidth = 3;
unsigned int inputChannels = 2;
unsigned int outputNum = inputNum;
armnn::TensorInfo inputTensorInfo =
- armnnUtils::GetTensorInfo<T>(inputNum, inputChannels, inputHeight, inputWidth, layout);
+ armnnUtils::GetTensorInfo(inputNum, inputChannels, inputHeight, inputWidth, layout, ArmnnType);
armnn::TensorInfo outputTensorInfo =
- armnnUtils::GetTensorInfo<T>(outputNum, outputChannels, outputHeight, outputWidth, layout);
+ armnnUtils::GetTensorInfo(outputNum, outputChannels, outputHeight, outputWidth, layout, ArmnnType);
armnn::TensorInfo kernelDesc({kernelDepthMultiplier, kernelChannels, kernelHeight, kernelWidth},
- armnn::GetDataType<T>());
- armnn::TensorInfo biasDesc({ outputChannels }, armnn::GetDataType<B>());
+ ArmnnType);
+ armnn::TensorInfo biasDesc({ outputChannels }, ArmnnBType);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
return ret;
}
-template<typename T, typename B>
+template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool biasEnabled,
const armnn::DataLayout layout)
{
+ using B = armnn::ResolveType<ArmnnBType>;
+
unsigned int depthMultiplier = 2;
unsigned int inputHeight = 8;
unsigned int outputChannels = inputChannels * depthMultiplier;
unsigned int outputBatchSize = inputBatchSize;
- armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<T>(
- inputBatchSize, inputChannels, inputHeight, inputWidth, layout);
- armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<T>(
- outputBatchSize, outputChannels, outputHeight, outputWidth, layout);
+ armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(
+ inputBatchSize, inputChannels, inputHeight, inputWidth, layout, ArmnnType);
+ armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(
+ outputBatchSize, outputChannels, outputHeight, outputWidth, layout, ArmnnType);
armnn::TensorInfo kernelDesc({depthMultiplier, inputChannels, kernelHeight, kernelWidth},
- armnn::GetDataType<T>());
- armnn::TensorInfo biasDesc({outputChannels}, armnn::GetDataType<B>());
+ ArmnnType);
+ armnn::TensorInfo biasDesc({outputChannels}, ArmnnBType);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
return ret;
}
-template<typename T, typename B>
+template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
+ typename T = armnn::ResolveType<ArmnnType>, typename B = armnn::ResolveType<ArmnnBType>>
LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
unsigned int outputWidth = boost::numeric_cast<unsigned int>(outputExpected.shape()[2]);
// Creates the tensors.
- armnn::TensorInfo inputTensorInfo({inputNum, inputHeight, inputWidth, inputChannels}, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({inputNum, inputHeight, inputWidth, inputChannels}, ArmnnType);
armnn::TensorInfo outputTensorInfo({outputNum, outputHeight, outputWidth, outputChannels},
- armnn::GetDataType<T>());
- armnn::TensorInfo kernelDesc({kernelChanMul, kernelChannels, kernelHeight, kernelWidth}, armnn::GetDataType<T>());
- armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, armnn::GetDataType<B>());
+ ArmnnType);
+ armnn::TensorInfo kernelDesc({kernelChanMul, kernelChannels, kernelHeight, kernelWidth}, ArmnnType);
+ armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, ArmnnBType);
// Set quantization parameters if the requested type is a quantized type.
if (armnn::IsQuantizedType<T>())
return ret;
}
-template<typename T>
+template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T,4> Convolution1dTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
int32_t qOffset,
bool biasEnabled)
{
- using B = typename FullyConnectedBiasTypeForInputType<T>::Type;
-
+ using B = armnn::ResolveType<ArmnnBType>;
// Until we have a specialist 1D convolution layer, we can fake one using
// 2D convolution with the final dimension set to 1.
// I don't anticipate this being particularly slow, given that convolution is implemented
unsigned int stride = 1;
unsigned int outputSize = 7; // (inputSize + 2 * padSize - kernelSize + 1) / stride.
- armnn::TensorInfo inputInfo({batchSize, inputChannels, inputSize, 1}, armnn::GetDataType<T>());
- armnn::TensorInfo outputInfo({batchSize, outputChannels, outputSize, 1}, armnn::GetDataType<T>());
- armnn::TensorInfo kernelInfo({outputChannels, inputChannels, kernelSize, 1}, armnn::GetDataType<T>());
- armnn::TensorInfo biasInfo({outputChannels}, armnn::GetDataType<B>());
+ armnn::TensorInfo inputInfo({batchSize, inputChannels, inputSize, 1}, ArmnnType);
+ armnn::TensorInfo outputInfo({batchSize, outputChannels, outputSize, 1}, ArmnnType);
+ armnn::TensorInfo kernelInfo({outputChannels, inputChannels, kernelSize, 1}, ArmnnType);
+ armnn::TensorInfo biasInfo({outputChannels}, ArmnnBType);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
return ret;
}
-
-
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T,4> CompareConvolution2dTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
unsigned int kernelShape[] = {outputChannels, inputChannels, kernelHeight, kernelWidth};
unsigned int biasShape[] = {outputChannels};
- inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType<T>());
- outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::GetDataType<T>());
- kernelDesc = armnn::TensorInfo(4, kernelShape, armnn::GetDataType<T>());
- biasDesc = armnn::TensorInfo(1, biasShape, armnn::GetDataType<T>());
+ inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
+ kernelDesc = armnn::TensorInfo(4, kernelShape, ArmnnType);
+ biasDesc = armnn::TensorInfo(1, biasShape, ArmnnType);
LayerTestResult<T,4> ret(outputTensorInfo);
return ret;
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> CompareDepthwiseConvolution2dTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float outputQScale = armnn::IsQuantizedType<T>() ? 2.0f : 0;
int32_t qOffset = 0;
- inputTensorInfo = armnn::TensorInfo(4, inputShape.data(), armnn::GetDataType<T>(), inputsQScale, qOffset);
- outputTensorInfo = armnn::TensorInfo(4, outputShape.data(), armnn::GetDataType<T>(), outputQScale, qOffset);
- kernelDesc = armnn::TensorInfo(4, kernelShape.data(), armnn::GetDataType<T>(), inputsQScale, qOffset);
+ inputTensorInfo = armnn::TensorInfo(4, inputShape.data(), ArmnnType, inputsQScale, qOffset);
+ outputTensorInfo = armnn::TensorInfo(4, outputShape.data(), ArmnnType, outputQScale, qOffset);
+ kernelDesc = armnn::TensorInfo(4, kernelShape.data(), ArmnnType, inputsQScale, qOffset);
biasDesc = armnn::TensorInfo(
- 1, biasShape.data(), armnn::GetBiasDataType(armnn::GetDataType<T>()), inputsQScale, qOffset);
+ 1, biasShape.data(), armnn::GetBiasDataType(ArmnnType), inputsQScale, qOffset);
LayerTestResult<T, 4> ret(outputTensorInfo);
return ret;
}
-template <typename T>
+template <armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> Debug4DTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
desc.m_Parameters.m_LayerName = "TestOutput";
desc.m_Parameters.m_SlotIndex = 1;
- inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType<T>());
- outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::GetDataType<T>());
+ inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
std::vector<float> input = std::vector<float>(
{
expectedStringOutput);
}
-template <typename T>
+template <armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 3> Debug3DTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
armnn::DebugQueueDescriptor desc;
desc.m_Parameters.m_LayerName = "TestOutput";
- inputTensorInfo = armnn::TensorInfo(3, inputShape, armnn::GetDataType<T>());
- outputTensorInfo = armnn::TensorInfo(3, outputShape, armnn::GetDataType<T>());
+ inputTensorInfo = armnn::TensorInfo(3, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(3, outputShape, ArmnnType);
std::vector<float> input = std::vector<float>(
{
expectedStringOutput);
}
-template <typename T>
+template <armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 2> Debug2DTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
armnn::DebugQueueDescriptor desc;
desc.m_Parameters.m_LayerName = "TestOutput";
- inputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::GetDataType<T>());
- outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::GetDataType<T>());
+ inputTensorInfo = armnn::TensorInfo(2, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(2, outputShape, ArmnnType);
std::vector<float> input = std::vector<float>(
{
expectedStringOutput);
}
-template <typename T>
+template <armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 1> Debug1DTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
armnn::DebugQueueDescriptor desc;
desc.m_Parameters.m_LayerName = "TestOutput";
- inputTensorInfo = armnn::TensorInfo(1, inputShape, armnn::GetDataType<T>());
- outputTensorInfo = armnn::TensorInfo(1, outputShape, armnn::GetDataType<T>());
+ inputTensorInfo = armnn::TensorInfo(1, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(1, outputShape, ArmnnType);
std::vector<float> input = std::vector<float>(
{
// SPDX-License-Identifier: MIT
//
+#include "TypeUtils.hpp"
#include "WorkloadTestUtils.hpp"
#include <backendsCommon/IBackendInternal.hpp>
// Tests the fully connected layer with large values, optionally transposing weights.
// Note this is templated for consistency, but the nature of this tests makes it unlikely to be useful in Uint8 mode.
//
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 2> FullyConnectedLargeTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
unsigned int biasShape[] = { outputChannels };
- inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType<T>());
- outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::GetDataType<T>());
- weightsDesc = armnn::TensorInfo(2, weightsShape, armnn::GetDataType<T>());
- biasesDesc = armnn::TensorInfo(1, biasShape, armnn::GetDataType<T>());
+ inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(2, outputShape, ArmnnType);
+ weightsDesc = armnn::TensorInfo(2, weightsShape, ArmnnType);
+ biasesDesc = armnn::TensorInfo(1, biasShape, ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
#include "LayerTests.hpp"
#include "WorkloadTestUtils.hpp"
#include "TensorUtils.hpp"
+#include "TypeUtils.hpp"
#include "test/TensorHelpers.hpp"
#include "TensorCopyUtils.hpp"
static std::vector<float> Bias2({0, 2});
// Helper function that returns either Bias2 or an empty vector depending on whether bias is enabled.
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
boost::multi_array<T, 1> GetBias2(bool biasEnabled, float qScale, int32_t qOffset)
{
if(biasEnabled)
{
- armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias2.size())}, armnn::GetDataType<T>());
+ armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias2.size())}, ArmnnType);
boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(qScale, qOffset, Bias2));
return bias;
}
}
}
-template<typename T>
+template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout layout)
{
// Use common single-batch 3-channel 16x8 image.
- armnn::TensorInfo inputDesc({1, 3, 8, 16}, armnn::GetDataType<T>());
+ armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
// Use a 2-element batch with 3-channel 3x5 kernels.
- armnn::TensorInfo kernelDesc({2, 3, 5, 3}, armnn::GetDataType<T>());
+ armnn::TensorInfo kernelDesc({2, 3, 5, 3}, ArmnnType);
boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
QuantizedVector<T>(qScale, qOffset, {
1, 1, 1,
})));
// Expected output is 2 batch elements of a 1-channel 14x4 image.
- armnn::TensorInfo outputDesc({1, 2, 4, 14}, armnn::GetDataType<T>());
+ armnn::TensorInfo outputDesc({1, 2, 4, 14}, ArmnnType);
boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
QuantizedVector<T>(qScale, qOffset, {
-24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24,
5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
})));
- return SimpleConvolution2dTestImpl<T>(workloadFactory,
- memoryManager,
- input,
- kernel,
- GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
- expectedOutput,
- qScale,
- qOffset,
- layout);
+ return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
+ workloadFactory,
+ memoryManager,
+ input,
+ kernel,
+ GetBias2<ArmnnBType>(biasEnabled, qScale, qOffset),
+ expectedOutput,
+ qScale,
+ qOffset,
+ layout);
}
-template<typename T>
+template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
+ typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
// Use a 3x3 kernel, which exercises ArmCompute's direct convolution path.
// Use common single-batch 3-channel 16x8 image.
- armnn::TensorInfo inputDesc({1, 3, 8, 16}, armnn::GetDataType<T>());
+ armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
// Use a 2-element batch of 3-channel 3x3 kernels.
- armnn::TensorInfo kernelDesc({2, 3, 3, 3}, armnn::GetDataType<T>());
+ armnn::TensorInfo kernelDesc({2, 3, 3, 3}, ArmnnType);
boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
QuantizedVector<T>(qScale, qOffset, {
1, 1, 1,
})));
// Expected output is 1 batch of a 2-channel 14x6 image.
- armnn::TensorInfo outputDesc({1, 2, 6, 14}, armnn::GetDataType<T>());
+ armnn::TensorInfo outputDesc({1, 2, 6, 14}, ArmnnType);
boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
QuantizedVector<T>(qScale, qOffset, {
-15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15,
3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
})));
- return SimpleConvolution2dTestImpl<T>(workloadFactory,
- memoryManager,
- input,
- kernel,
- GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
- expectedOutput,
- qScale,
- qOffset,
- layout);
+ return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
+ workloadFactory,
+ memoryManager,
+ input,
+ kernel,
+ GetBias2<ArmnnBType>(biasEnabled, qScale, qOffset),
+ expectedOutput,
+ qScale,
+ qOffset,
+ layout);
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
{
// Use common single-batch 5x5 image.
- armnn::TensorInfo inputDesc({1, 3, 4, 1}, armnn::GetDataType<T>());
+ armnn::TensorInfo inputDesc({1, 3, 4, 1}, ArmnnType);
boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
{
1, 5, 2, 3,
// Use a 2-element batch of 3-channel 3x3 kernels.
- armnn::TensorInfo kernelDesc({1, 3, 3, 1}, armnn::GetDataType<T>());
+ armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType);
boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, {
4, 5, 6,
0, 0, 0,
});
// Expected output is 1 batch of a 5x5 image.
- armnn::TensorInfo outputDesc({1, 3, 4, 1}, armnn::GetDataType<T>());
+ armnn::TensorInfo outputDesc({1, 3, 4, 1}, ArmnnType);
const std::vector<float> outputData =
{
boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
- return SimpleConvolution2dNhwcTestImpl<T>(workloadFactory,
- memoryManager,
- input,
- kernel,
- boost::multi_array<T, 1>(),
- expectedOutput,
- dataLayout,
- qScale,
- qOffset);
+ return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
+ workloadFactory,
+ memoryManager,
+ input,
+ kernel,
+ boost::multi_array<T, 1>(),
+ expectedOutput,
+ dataLayout,
+ qScale,
+ qOffset);
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SimpleConvolution2d3x3Stride2x2TestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout& dataLayout)
{
// Input is a single-batch, 1 channel, 5x5 image.
- armnn::TensorInfo inputDesc({1, 5, 5, 1}, armnn::GetDataType<T>());
+ armnn::TensorInfo inputDesc({1, 5, 5, 1}, ArmnnType);
boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
{
1, 5, 2, 3, 5,
});
// Use a 3x3 kernel.
- armnn::TensorInfo kernelDesc({1, 3, 3, 1}, armnn::GetDataType<T>());
+ armnn::TensorInfo kernelDesc({1, 3, 3, 1}, ArmnnType);
boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc,
{
4, 5, 6,
});
// Expected output is a single-batch, 1 channel, 3x3 image.
- armnn::TensorInfo outputDesc({1, 3, 3, 1}, armnn::GetDataType<T>());
+ armnn::TensorInfo outputDesc({1, 3, 3, 1}, ArmnnType);
const std::vector<T> outputData =
{
uint32_t strideX = 2;
uint32_t strideY = 2;
- return SimpleConvolution2dNhwcTestImpl<T>(workloadFactory,
- memoryManager,
- input,
- kernel,
- boost::multi_array<T, 1>(),
- expectedOutput,
- dataLayout,
- qScale,
- qOffset,
- padLeft,
- padTop,
- padRight,
- padBottom,
- strideX,
- strideY);
+ return SimpleConvolution2dNhwcTestImpl<ArmnnType, ArmnnType>(
+ workloadFactory,
+ memoryManager,
+ input,
+ kernel,
+ boost::multi_array<T, 1>(),
+ expectedOutput,
+ dataLayout,
+ qScale,
+ qOffset,
+ padLeft,
+ padTop,
+ padRight,
+ padBottom,
+ strideX,
+ strideY);
}
LayerTestResult<float, 4> SimpleConvolution2d3x5Test(
bool biasEnabled,
const armnn::DataLayout layout)
{
- return SimpleConvolution2d3x5TestCommon<float>(workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
+ return SimpleConvolution2d3x5TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
+ workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
}
LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(
bool biasEnabled,
const armnn::DataLayout layout)
{
- return SimpleConvolution2d3x5TestCommon<uint8_t>(workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
+ return SimpleConvolution2d3x5TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+ workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
}
LayerTestResult<float, 4> SimpleConvolution2d3x3Test(
bool biasEnabled,
const armnn::DataLayout layout)
{
- return SimpleConvolution2d3x3TestCommon<float>(workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
+ return SimpleConvolution2d3x3TestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
+ workloadFactory, memoryManager, 0.f, 0, biasEnabled, layout);
}
LayerTestResult<float, 4> SimpleConvolution2d3x3NhwcTest(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool biasEnabled)
{
- return SimpleConvolution2d3x3NhwcTestCommon<float>(workloadFactory,
- memoryManager,
- 0.f,
- 0,
- biasEnabled,
- armnn::DataLayout::NHWC);
+ return SimpleConvolution2d3x3NhwcTestCommon<armnn::DataType::Float32>(
+ workloadFactory,
+ memoryManager,
+ 0.f,
+ 0,
+ biasEnabled,
+ armnn::DataLayout::NHWC);
}
LayerTestResult<float, 4> SimpleConvolution2d3x3Stride2x2Test(
bool biasEnabled,
const armnn::DataLayout layout)
{
- return SimpleConvolution2d3x3Stride2x2TestCommon<float>(workloadFactory,
- memoryManager,
- 0.f,
- 0,
- biasEnabled,
- layout);
+ return SimpleConvolution2d3x3Stride2x2TestCommon<armnn::DataType::Float32>(
+ workloadFactory,
+ memoryManager,
+ 0.f,
+ 0,
+ biasEnabled,
+ layout);
}
LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(
bool biasEnabled,
const armnn::DataLayout layout)
{
- return SimpleConvolution2d3x3TestCommon<uint8_t>(workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
+ return SimpleConvolution2d3x3TestCommon<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+ workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
}
-template<typename T>
+template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
+ typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
int32_t qOffset)
{
// Use a single-batch 1-channel 3x3 image as input.
- armnn::TensorInfo inputDesc({1, 1, 3, 3}, armnn::GetDataType<T>());
+ armnn::TensorInfo inputDesc({1, 1, 3, 3}, ArmnnType);
boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
QuantizedVector<T>(qScale, qOffset, {
11,21,31,
})));
// Use 1 batch of a 1-channel 2x2 kernel.
- armnn::TensorInfo kernelDesc({1, 1, 2, 2}, armnn::GetDataType<T>());
+ armnn::TensorInfo kernelDesc({1, 1, 2, 2}, ArmnnType);
boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
QuantizedVector<T>(qScale, qOffset, {
-11,-21,
//[-11*0 -21*13 -12*0 -22*0 ; -11*13 -21*23 -12*0 -22*0 ; -11*23 -21*33 -12*0 -22*0 ; -11*33 -21*0 -12*0 -22*0 ..]
//[-11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ; -11*0 -21*0 -12*0 -22*0 ..]
//[..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ; ..... ..... ..... ..... ..]
- armnn::TensorInfo outputDesc({1, 1, 8, 6}, armnn::GetDataType<T>());
+ armnn::TensorInfo outputDesc({1, 1, 8, 6}, ArmnnType);
boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
QuantizedVector<T>(qScale, qOffset, {
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0
})));
- return SimpleConvolution2dTestImpl<T>(workloadFactory,
- memoryManager,
- input,
- kernel,
- GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(false, qScale, qOffset),
- expectedOutput,
- qScale,
- qOffset,
- layout,
- 1, // Padding left.
- 2, // Padding top.
- 3, // Padding right.
- 4); // Padding bottom.
+ return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
+ workloadFactory,
+ memoryManager,
+ input,
+ kernel,
+ GetBias2<ArmnnBType>(false, qScale, qOffset),
+ expectedOutput,
+ qScale,
+ qOffset,
+ layout,
+ 1, // Padding left.
+ 2, // Padding top.
+ 3, // Padding right.
+ 4); // Padding bottom.
}
-template<typename T>
+template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
+ typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
int32_t qOffset)
{
// Use a single-batch 1-channel 5x5 image as input.
- armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, ArmnnType);
boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
QuantizedVector<T>(qScale, qOffset, {
11,21,31,41,51,
})));
// Use 1 batch of a 1-channel 4x4 kernel.
- armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
+ armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, ArmnnType);
boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
QuantizedVector<T>(qScale, qOffset, {
-11,-21,-31,-41,
})));
// Expected output is 1 batch of a 1-channel 5x5 image.
- armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, ArmnnType);
std::vector<T> myVec(outputDesc.GetNumElements(), 0);
boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
QuantizedVector<T>(qScale, qOffset, {
-5032, -7256, -9376, -6142, -3368,
})));
- return SimpleConvolution2dTestImpl<T>(workloadFactory,
+ return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
+ workloadFactory,
memoryManager,
input,
kernel,
- GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(false, qScale, qOffset),
+ GetBias2<ArmnnBType>(false, qScale, qOffset),
expectedOutput,
qScale,
qOffset,
2); // Padding bottom.
}
-template<typename T>
+template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
+ typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout layout)
{
// Use a single-batch 2-channel 5x5 image as input.
- armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
0, 1, 2, 3, 4,
})));
// Use a depth multiplier of 1 on a 2-channel 4x4 kernel.
- armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, armnn::GetDataType<T>());
+ armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
32, 31, 30, 29,
// Expected output is 1 batch of a 2-channel 5x5 image.
// Calculated using the python tensorflow library with strideX=1, strideY=1.
- armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
1062, 1580, 1850, 1530, 1117,
3100, 4352, 4452, 3517, 2465
})));
- return DepthwiseConvolution2dAsymmetricTestImpl<T>(workloadFactory,
+ return DepthwiseConvolution2dAsymmetricTestImpl<ArmnnType, ArmnnBType>(
+ workloadFactory,
memoryManager,
input,
kernel,
- GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
+ GetBias2<ArmnnBType>(biasEnabled, qScale, qOffset),
expectedOutput,
qScale,
qOffset,
1); // strideY
}
-template<typename T>
+template<armnn::DataType ArmnnType, armnn::DataType ArmnnBType,
+ typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
int32_t qOffset,
bool biasEnabled)
{
- armnn::TensorInfo inputTensorInfo({ 1, 5, 5, 2}, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ 1, 5, 5, 2}, ArmnnType);
auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
0, 25,
24, 49
})));
- armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, armnn::GetDataType<T>());
+ armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
32, 31, 30, 29,
4, 3, 2, 1
})));
- armnn::TensorInfo outputTensorInfo({ 1, 5, 5, 2}, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 1, 5, 5, 2}, ArmnnType);
boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
1062, 1550,
2457, 2465
})));
- return DepthwiseConvolution2dNhwcTestImpl<T>(workloadFactory,
+ return DepthwiseConvolution2dNhwcTestImpl<ArmnnType, ArmnnBType>(
+ workloadFactory,
memoryManager,
input,
kernel,
- GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
+ GetBias2<ArmnnBType>(biasEnabled, qScale, qOffset),
expectedOutput,
qScale,
qOffset,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout layout)
{
- return Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon<float>(
- workloadFactory, memoryManager, layout, 0.0f, 0);
+ return Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon
+ <armnn::DataType::Float32, armnn::DataType::Float32>(
+ workloadFactory, memoryManager, layout, 0.0f, 0);
}
LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout layout)
{
- return SimpleConvolution2dAsymmetricPaddingTestCommon<float>(
+ return SimpleConvolution2dAsymmetricPaddingTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
workloadFactory, memoryManager, layout, 0.0f, 0);
}
bool biasEnabled,
const armnn::DataLayout layout)
{
- return DepthwiseConvolution2dTestImpl<float, float>(
+ return DepthwiseConvolution2dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
}
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool biasEnabled)
{
- return DepthwiseConvolution2dNhwcTestCommon<float>(workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
+ return DepthwiseConvolution2dNhwcTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
+ workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
}
LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul1Test(
bool biasEnabled,
const armnn::DataLayout layout)
{
- return DepthwiseConvolution2dDepthMul1TestImpl<float, float>(
+ return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
}
bool biasEnabled,
const armnn::DataLayout layout)
{
- return DepthwiseConvolution2dAsymmetricTestCommon<float>(
+ return DepthwiseConvolution2dAsymmetricTestCommon<armnn::DataType::Float32, armnn::DataType::Float32>(
workloadFactory, memoryManager, 0.0f, 0, biasEnabled, layout);
}
bool biasEnabled,
const armnn::DataLayout layout)
{
- return DepthwiseConvolution2dTestImpl<uint8_t, int32_t>(
+ return DepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
}
bool biasEnabled,
const armnn::DataLayout layout)
{
- return DepthwiseConvolution2dDepthMul1TestImpl<uint8_t, int32_t>(
+ return DepthwiseConvolution2dDepthMul1TestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
workloadFactory, memoryManager, 0.5f, 50, biasEnabled, layout);
}
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool biasEnabled)
{
- return Convolution1dTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
+ return Convolution1dTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
+ workloadFactory, memoryManager, 0.0f, 0, biasEnabled);
}
LayerTestResult<uint8_t, 4> Convolution1dUint8Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool biasEnabled)
{
- return Convolution1dTestImpl<uint8_t>(workloadFactory, memoryManager, 0.1f, 128, biasEnabled);
+ return Convolution1dTestImpl<armnn::DataType::QuantisedAsymm8, armnn::DataType::Signed32>(
+ workloadFactory, memoryManager, 0.1f, 128, biasEnabled);
}
LayerTestResult<float,4> CompareConvolution2dTest(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
armnn::IWorkloadFactory& refWorkloadFactory)
{
- return CompareConvolution2dTestImpl<float>(workloadFactory, memoryManager, refWorkloadFactory);
+ return CompareConvolution2dTestImpl<armnn::DataType::Float32>(
+ workloadFactory, memoryManager, refWorkloadFactory);
}
-template<typename T>
-LayerTestResult<T,4> CompareDepthwiseConvolution2dTest(
+LayerTestResult<float, 4> CompareDepthwiseConvolution2dFloatTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
armnn::IWorkloadFactory& refWorkloadFactory,
const armnn::DataLayout layout)
{
- return CompareDepthwiseConvolution2dTestImpl<T>(workloadFactory, memoryManager, refWorkloadFactory, layout);
+ return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::Float32>(
+ workloadFactory, memoryManager, refWorkloadFactory, layout);
}
-template LayerTestResult<float, 4> CompareDepthwiseConvolution2dTest<float>(
- armnn::IWorkloadFactory&,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
- armnn::IWorkloadFactory&,
- const armnn::DataLayout);
-
-template LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dTest<uint8_t>(
- armnn::IWorkloadFactory&,
- const armnn::IBackendInternal::IMemoryManagerSharedPtr&,
- armnn::IWorkloadFactory&,
- const armnn::DataLayout);
+LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dUint8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ armnn::IWorkloadFactory& refWorkloadFactory,
+ const armnn::DataLayout layout)
+{
+ return CompareDepthwiseConvolution2dTestImpl<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, refWorkloadFactory, layout);
+}
LayerTestResult<float,4> SimpleNormalizationAcrossTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float beta)
{
- return SimpleSoftmaxTestImpl<float>(workloadFactory, memoryManager, beta);
+ return SimpleSoftmaxTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, beta);
}
LayerTestResult<uint8_t,2> SimpleSoftmaxUint8Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float beta)
{
- return SimpleSoftmaxTestImpl<uint8_t>(workloadFactory, memoryManager, beta);
+ return SimpleSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, beta);
}
LayerTestResult<float,4> CompareNormalizationTest(
armnn::IWorkloadFactory& refWorkloadFactory,
float beta)
{
- return CompareSoftmaxTestImpl<float>(workloadFactory, memoryManager, refWorkloadFactory, beta);
+ return CompareSoftmaxTestImpl<armnn::DataType::Float32>(
+ workloadFactory, memoryManager, refWorkloadFactory, beta);
}
LayerTestResult<uint8_t,2> CompareSoftmaxUint8Test(
armnn::IWorkloadFactory& refWorkloadFactory,
float beta)
{
- return CompareSoftmaxTestImpl<uint8_t>(workloadFactory, memoryManager, refWorkloadFactory, beta);
+ return CompareSoftmaxTestImpl<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, refWorkloadFactory, beta);
}
std::vector<LayerTestResult<float,3>> SplitterTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SplitterTestCommon<float>(workloadFactory, memoryManager);
+ return SplitterTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
std::vector<LayerTestResult<uint8_t,3>> SplitterUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SplitterTestCommon<uint8_t>(workloadFactory, memoryManager, 1.0f, 0);
+ return SplitterTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
}
LayerTestResult<float, 3> CopyViaSplitterTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return CopyViaSplitterTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return CopyViaSplitterTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
}
LayerTestResult<uint8_t, 3> CopyViaSplitterUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return CopyViaSplitterTestImpl<uint8_t>(workloadFactory, memoryManager, 1.0f, 0);
+ return CopyViaSplitterTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
}
LayerTestResult<float, 2> LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- armnn::TensorInfo inputDesc({ 2, 2 }, armnn::GetDataType<float>());
+ armnn::TensorInfo inputDesc({ 2, 2 }, armnn::DataType::Float32);
boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
{ 2., 3., 3., 4. }));
- armnn::TensorInfo outputDesc({ 2, 4 }, armnn::GetDataType<float>());
+ armnn::TensorInfo outputDesc({ 2, 4 }, armnn::DataType::Float32);
boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
{-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
-0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f}));
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- armnn::TensorInfo inputDesc({ 2, 5 }, armnn::GetDataType<float>());
+ armnn::TensorInfo inputDesc({ 2, 5 }, armnn::DataType::Float32);
boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
{0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f}));
- armnn::TensorInfo outputDesc({ 2, 16 }, armnn::GetDataType<float>());
+ armnn::TensorInfo outputDesc({ 2, 16 }, armnn::DataType::Float32);
boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
{-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
-0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- armnn::TensorInfo inputDesc({2, 2}, armnn::GetDataType<float>());
+ armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::Float32);
boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
{2., 3., 3., 4.}));
- armnn::TensorInfo outputDesc({2, 4}, armnn::GetDataType<float>());
+ armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::Float32);
boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
{{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
-0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}}));
return ret;
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> AdditionBroadcastTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
int32_t qOffset)
{
- armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, armnn::GetDataType<T>());
- armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, armnn::GetDataType<T>());
- armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, ArmnnType);
+ armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, ArmnnType);
+ armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
if (armnn::IsQuantizedType<T>())
{
return ret;
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
int32_t qOffset)
{
- armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
- armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, armnn::GetDataType<T>());
- armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
+ armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, ArmnnType);
+ armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, ArmnnType);
if (armnn::IsQuantizedType<T>())
{
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return AdditionBroadcastTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return AdditionBroadcastTestImpl<armnn::DataType::Float32>(
+ workloadFactory, memoryManager, 0.0f, 0);
}
LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return AdditionBroadcastTestImpl<uint8_t>(workloadFactory, memoryManager, 2.f, 0);
+ return AdditionBroadcastTestImpl<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, 2.f, 0);
}
LayerTestResult<float, 4> AdditionBroadcast1ElementTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return AdditionBroadcast1ElementTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return AdditionBroadcast1ElementTestImpl<armnn::DataType::Float32>(
+ workloadFactory, memoryManager, 0.0f, 0);
}
LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return AdditionBroadcast1ElementTestImpl<uint8_t>(workloadFactory, memoryManager, 0.1333333f, 128);
+ return AdditionBroadcast1ElementTestImpl<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, 0.1333333f, 128);
}
LayerTestResult<float,4> CompareAdditionTest(
}
namespace {
- template <typename Descriptor, typename dataType>
- LayerTestResult<dataType, 4> ElementwiseTestHelper
+ template <typename Descriptor, armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+ LayerTestResult<T, 4> ElementwiseTestHelper
(armnn::IWorkloadFactory & workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,
- const unsigned int shape0[4], std::vector<dataType> values0,
- const unsigned int shape1[4], std::vector<dataType> values1,
- const unsigned int outShape[4], std::vector<dataType> outValues,
+ const unsigned int shape0[4], std::vector<T> values0,
+ const unsigned int shape1[4], std::vector<T> values1,
+ const unsigned int outShape[4], std::vector<T> outValues,
float qScale = 0.0f, int qOffset = 0)
{
const size_t dimensionCount = 4;
- armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, armnn::GetDataType<dataType>()};
- armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, armnn::GetDataType<dataType>()};
- armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, armnn::GetDataType<dataType>()};
+ armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, ArmnnType};
+ armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, ArmnnType};
+ armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, ArmnnType};
- auto input0 = MakeTensor<dataType, 4>(inputTensorInfo0, values0);
- auto input1 = MakeTensor<dataType, 4>(inputTensorInfo1, values1);
+ auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
+ auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
- if (armnn::IsQuantizedType<dataType>())
+ if (armnn::IsQuantizedType<T>())
{
inputTensorInfo0.SetQuantizationScale(qScale);
inputTensorInfo0.SetQuantizationOffset(qOffset);
outputTensorInfo.SetQuantizationOffset(qOffset);
}
- LayerTestResult<dataType,4> ret(outputTensorInfo);
+ LayerTestResult<T,4> ret(outputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
- ret.outputExpected = MakeTensor<dataType, 4>(outputTensorInfo, outValues);
+ ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
return ret;
}
}
std::vector<float> output({ 1, 1, 1, 1, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 1 });
- return ElementwiseTestHelper<armnn::EqualQueueDescriptor, float>
- (workloadFactory,
- memoryManager,
- shape,
- input0,
- shape,
- input1,
- shape,
- output);
+ return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32>(
+ workloadFactory,
+ memoryManager,
+ shape,
+ input0,
+ shape,
+ input1,
+ shape,
+ output);
}
LayerTestResult<float, 4> EqualBroadcast1ElementTest(
std::vector<float> output({ 1, 0, 0, 0, 0, 0, 0, 0});
- return ElementwiseTestHelper<armnn::EqualQueueDescriptor, float>
- (workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output);
+ return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32>(
+ workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output);
}
LayerTestResult<float, 4> EqualBroadcast1DVectorTest(
std::vector<float> output({ 1, 1, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0 });
- return ElementwiseTestHelper<armnn::EqualQueueDescriptor, float>
- (workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output);
+ return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::Float32>(
+ workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output);
}
LayerTestResult<uint8_t, 4> EqualUint8Test(
std::vector<uint8_t> output({ 0, 0, 0, 0, 1, 1, 1, 1,
1, 1, 1, 1, 0, 0, 0, 0 });
- return ElementwiseTestHelper<armnn::EqualQueueDescriptor, uint8_t >
- (workloadFactory,
- memoryManager,
- shape,
- input0,
- shape,
- input1,
- shape,
- output,
- 1.0f,
- 0);
+ return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ workloadFactory,
+ memoryManager,
+ shape,
+ input0,
+ shape,
+ input1,
+ shape,
+ output,
+ 1.0f,
+ 0);
}
LayerTestResult<uint8_t, 4> EqualBroadcast1ElementUint8Test(
std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0 });
- return ElementwiseTestHelper<armnn::EqualQueueDescriptor, uint8_t >
- (workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output,
- 1.0f,
- 0);
+ return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output,
+ 1.0f,
+ 0);
}
LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorUint8Test(
std::vector<uint8_t> output({ 1, 0, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0 });
- return ElementwiseTestHelper<armnn::EqualQueueDescriptor, uint8_t>
- (workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output,
- 1.0f,
- 0);
+ return ElementwiseTestHelper<armnn::EqualQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output,
+ 1.0f,
+ 0);
}
LayerTestResult<float, 4> GreaterSimpleTest(armnn::IWorkloadFactory& workloadFactory,
std::vector<float> output({ 0, 0, 0, 0, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0 });
- return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, float>
- (workloadFactory,
- memoryManager,
- shape,
- input0,
- shape,
- input1,
- shape,
- output);
+ return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32>(
+ workloadFactory,
+ memoryManager,
+ shape,
+ input0,
+ shape,
+ input1,
+ shape,
+ output);
}
LayerTestResult<float, 4> GreaterBroadcast1ElementTest(
std::vector<float> output({ 0, 1, 1, 1, 1, 1, 1, 1});
- return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, float>
- (workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output);
+ return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32>(
+ workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output);
}
LayerTestResult<float, 4> GreaterBroadcast1DVectorTest(
std::vector<float> output({ 0, 0, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1 });
- return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, float>
- (workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output);
+ return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::Float32>(
+ workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output);
}
LayerTestResult<uint8_t, 4> GreaterUint8Test(
std::vector<uint8_t> output({ 0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 0, 0, 0, 0 });
- return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, uint8_t >
- (workloadFactory,
- memoryManager,
- shape,
- input0,
- shape,
- input1,
- shape,
- output,
- 1.0f,
- 0);
+ return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ workloadFactory,
+ memoryManager,
+ shape,
+ input0,
+ shape,
+ input1,
+ shape,
+ output,
+ 1.0f,
+ 0);
}
LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementUint8Test(
std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1 });
- return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, uint8_t >
- (workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output,
- 1.0f,
- 0);
+ return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output,
+ 1.0f,
+ 0);
}
LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorUint8Test(
std::vector<uint8_t> output({ 0, 1, 0, 1, 1, 1,
1, 1, 1, 1, 1, 1 });
- return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, uint8_t>
- (workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output,
- 1.0f,
- 0);
+ return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output,
+ 1.0f,
+ 0);
}
LayerTestResult<float, 4> MaximumSimpleTest(armnn::IWorkloadFactory& workloadFactory,
std::vector<float> output({ 2, 2, 2, 2, 5, 5, 5, 5,
4, 4, 4, 4, 5, 5, 5, 5 });
- return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, float>
- (workloadFactory,
- memoryManager,
- shape,
- input0,
- shape,
- input1,
- shape,
- output);
+ return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
+ workloadFactory,
+ memoryManager,
+ shape,
+ input0,
+ shape,
+ input1,
+ shape,
+ output);
}
LayerTestResult<float, 4> MaximumBroadcast1ElementTest(
std::vector<float> output({ 2, 2, 3, 4, 5, 6, 7, 8});
- return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, float>
- (workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output);
+ return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
+ workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output);
}
LayerTestResult<float, 4> MaximumBroadcast1DVectorTest(
std::vector<float> output({ 1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 11, 12 });
- return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, float>
- (workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output);
+ return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::Float32>(
+ workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output);
}
LayerTestResult<uint8_t, 4> MaximumUint8Test(
std::vector<uint8_t> output({ 2, 2, 2, 2, 6, 6, 6, 6,
4, 4, 4, 4, 5, 5, 5, 5 });
- return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, uint8_t >
- (workloadFactory,
- memoryManager,
- shape,
- input0,
- shape,
- input1,
- shape,
- output,
- 1.0f,
- 0);
+ return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ workloadFactory,
+ memoryManager,
+ shape,
+ input0,
+ shape,
+ input1,
+ shape,
+ output,
+ 1.0f,
+ 0);
}
LayerTestResult<uint8_t, 4> MaximumBroadcast1ElementUint8Test(
std::vector<uint8_t> output({ 2, 2, 3, 4, 5, 6,
7, 8, 9, 10, 11, 12 });
- return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, uint8_t >
- (workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output,
- 1.0f,
- 0);
+ return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output,
+ 1.0f,
+ 0);
}
LayerTestResult<uint8_t, 4> MaximumBroadcast1DVectorUint8Test(
std::vector<uint8_t> output({ 1, 10, 3, 4, 10, 6,
7, 10, 9, 10, 11, 12 });
- return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, uint8_t>
- (workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output,
- 1.0f,
- 0);
+ return ElementwiseTestHelper<armnn::MaximumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output,
+ 1.0f,
+ 0);
}
LayerTestResult<float, 4> MinimumBroadcast1ElementTest1(
std::vector<float> output({ 1, 2, 2, 2, 2, 2, 2, 2});
- return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, float>(workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output);
+ return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
+ workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output);
}
std::vector<float> output({ 1, 5, 3, 2, 5, 5, 1, 5});
- return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, float>(workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output);
+ return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::Float32>(
+ workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output);
}
LayerTestResult<uint8_t, 4> MinimumBroadcast1DVectorUint8Test(
std::vector<uint8_t> output({ 1, 2, 3, 1, 2, 1,
1, 1, 2, 1, 2, 3 });
- return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, uint8_t>(workloadFactory,
- memoryManager,
- shape0,
- input0,
- shape1,
- input1,
- shape0,
- output,
- 1.0f,
- 0);
+ return ElementwiseTestHelper<armnn::MinimumQueueDescriptor, armnn::DataType::QuantisedAsymm8>(
+ workloadFactory,
+ memoryManager,
+ shape0,
+ input0,
+ shape1,
+ input1,
+ shape0,
+ output,
+ 1.0f,
+ 0);
}
namespace {
}
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 1> Concatenation1dTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
int32_t qOffset)
{
- armnn::TensorInfo inputTensorInfo({ 3 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ 3 }, ArmnnType);
auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 1.0f, 2.0f, 3.0f }));
auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 4.0f, 5.0f, 6.0f }));
auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 7.0f, 8.0f, 9.0f }));
- armnn::TensorInfo outputTensorInfo({ 9 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 9 }, ArmnnType);
LayerTestResult<T, 1> result(outputTensorInfo);
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation1dTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concatenation1dTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 2> Concatenation2dTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const float qScale,
const int32_t qOffset)
{
- armnn::TensorInfo inputTensorInfo({ 2, 3 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ 2, 3 }, ArmnnType);
auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0
return result;
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 2> Concatenation2dDim0TestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
int32_t qOffset)
{
- armnn::TensorInfo outputTensorInfo({ 6, 3 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType);
+
+ LayerTestResult<T, 2> result = Concatenation2dTestImpl<ArmnnType>(
+ workloadFactory, memoryManager, outputTensorInfo, 0, qScale, qOffset);
- LayerTestResult<T, 2> result =
- Concatenation2dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 0, qScale, qOffset);
result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0
1.0f, 2.0f, 3.0f,
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation2dDim0TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concatenation2dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 2> Concatenation2dDim1TestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
int32_t qOffset)
{
- armnn::TensorInfo outputTensorInfo({ 2, 9 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType);
+
+ LayerTestResult<T, 2> result = Concatenation2dTestImpl<ArmnnType>(
+ workloadFactory, memoryManager, outputTensorInfo, 1, qScale, qOffset);
- LayerTestResult<T, 2> result =
- Concatenation2dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 1, qScale, qOffset);
result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0
1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation2dDim1TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concatenation2dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 2> Concatenation2dDim0DiffInputDimsTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
int32_t qOffset)
{
- armnn::TensorInfo input0TensorInfo({ 2, 3 }, armnn::GetDataType<T>());
+ armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType);
auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0
1.0f, 2.0f, 3.0f,
10.0f, 11.0f, 12.0f,
}));
- armnn::TensorInfo input1TensorInfo({ 3, 3 }, armnn::GetDataType<T>());
+ armnn::TensorInfo input1TensorInfo({ 3, 3 }, ArmnnType);
auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0
4.0f, 5.0f, 6.0f,
7.0f, 8.0f, 9.0f,
}));
- armnn::TensorInfo input2TensorInfo({ 1, 3 }, armnn::GetDataType<T>());
+ armnn::TensorInfo input2TensorInfo({ 1, 3 }, ArmnnType);
auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 1
16.0f, 17.0f, 18.0f,
}));
- armnn::TensorInfo outputTensorInfo({ 6, 3 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType);
LayerTestResult<T, 2> result(outputTensorInfo);
std::vector<T> output;
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation2dDim0DiffInputDimsTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concatenation2dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
+ workloadFactory, memoryManager, 0.0f, 0);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 2> Concatenation2dDim1DiffInputDimsTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
int32_t qOffset)
{
- armnn::TensorInfo input0TensorInfo({ 2, 3 }, armnn::GetDataType<T>());
+ armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType);
auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0
1.0f, 2.0f, 3.0f,
10.0f, 11.0f, 12.0f,
}));
- armnn::TensorInfo input1TensorInfo({ 2, 5 }, armnn::GetDataType<T>());
+ armnn::TensorInfo input1TensorInfo({ 2, 5 }, ArmnnType);
auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0
4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
}));
- armnn::TensorInfo input2TensorInfo({ 2, 1 }, armnn::GetDataType<T>());
+ armnn::TensorInfo input2TensorInfo({ 2, 1 }, ArmnnType);
auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0
9.0f,
18.0f
}));
- armnn::TensorInfo outputTensorInfo({ 2, 9 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType);
LayerTestResult<T, 2> result(outputTensorInfo);
std::vector<T> output;
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation2dDim1DiffInputDimsTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concatenation2dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(
+ workloadFactory, memoryManager, 0.0f, 0);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 3> Concatenation3dTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
int32_t qOffset)
{
- armnn::TensorInfo inputTensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ 2, 3, 2 }, ArmnnType);
auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0, Channel 0
return result;
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 3> Concatenation3dDim0TestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
int32_t qOffset)
{
- armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType);
+
+ LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
+ workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
- LayerTestResult<T, 3> result =
- Concatenation3dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0, Channel 0
1.0f, 2.0f,
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation3dDim0TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concatenation3dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 3> Concatenation3dDim1TestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
int32_t qOffset)
{
- armnn::TensorInfo outputTensorInfo({ 2, 9, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 2, 9, 2 }, ArmnnType);
- LayerTestResult<T, 3> result =
- Concatenation3dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
+ LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
+ workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0, Channel 0
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation3dDim1TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concatenation3dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 3> Concatenation3dDim2TestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
int32_t qOffset)
{
- armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType);
- LayerTestResult<T, 3> result =
- Concatenation3dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 2, useSubtensor, qScale, qOffset);
+ LayerTestResult<T, 3> result = Concatenation3dTestImpl<ArmnnType>(
+ workloadFactory, memoryManager, outputTensorInfo, 2, useSubtensor, qScale, qOffset);
result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0, Channel 0
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool useSubtensor)
{
- return Concatenation3dDim2TestImpl<float>(workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
+ return Concatenation3dDim2TestImpl<armnn::DataType::Float32>(
+ workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 3> Concatenation3dDim0DiffInputDimsTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
int32_t qOffset)
{
- armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType);
auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0, Channel 0
1.0f, 2.0f,
23.0f, 24.0f
}));
- armnn::TensorInfo input1TensorInfo({ 1, 3, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo input1TensorInfo({ 1, 3, 2 }, ArmnnType);
auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0, Channel 0
7.0f, 8.0f,
11.0f, 12.0f,
}));
- armnn::TensorInfo input2TensorInfo({ 3, 3, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo input2TensorInfo({ 3, 3, 2 }, ArmnnType);
auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0, Channel 0
25.0f, 26.0f,
35.0f, 36.0f
}));
- armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType);
LayerTestResult<T, 3> result(outputTensorInfo);
std::vector<T> output;
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation3dDim0DiffInputDimsTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concatenation3dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
+ workloadFactory, memoryManager, 0.0f, 0);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 3> Concatenation3dDim1DiffInputDimsTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
int32_t qOffset)
{
- armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType);
auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0, Channel 0
1.0f, 2.0f,
23.0f, 24.0f
}));
- armnn::TensorInfo input1TensorInfo({ 2, 4, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo input1TensorInfo({ 2, 4, 2 }, ArmnnType);
auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0, Channel 0
7.0f, 8.0f,
15.0f, 16.0f,
}));
- armnn::TensorInfo input2TensorInfo({ 2, 1, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo input2TensorInfo({ 2, 1, 2 }, ArmnnType);
auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0, Channel 0
17.0f, 18.0f,
31.0f, 32.0f,
}));
- armnn::TensorInfo outputTensorInfo({ 2, 8, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 2, 8, 2 }, ArmnnType);
LayerTestResult<T, 3> result(outputTensorInfo);
std::vector<T> output;
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation3dDim1DiffInputDimsTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concatenation3dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(
+ workloadFactory, memoryManager, 0.0f, 0);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 3> Concatenation3dDim2DiffInputDimsTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
int32_t qOffset)
{
- armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType);
auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0, Channel 0
1.0f, 2.0f,
23.0f, 24.0f
}));
- armnn::TensorInfo input1TensorInfo({ 2, 3, 1 }, armnn::GetDataType<T>());
+ armnn::TensorInfo input1TensorInfo({ 2, 3, 1 }, ArmnnType);
auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0, Channel 0
7.0f,
29.0f
}));
- armnn::TensorInfo input2TensorInfo({ 2, 3, 3 }, armnn::GetDataType<T>());
+ armnn::TensorInfo input2TensorInfo({ 2, 3, 3 }, ArmnnType);
auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
// Batch 0, Channel 0
13.0f, 14.0f, 50.0f,
35.0f, 36.0f, 55.0f,
}));
- armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType);
LayerTestResult<T, 3> result(outputTensorInfo);
std::vector<T> output;
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool useSubtensor)
{
- return Concatenation3dDim2DiffInputDimsTestImpl<float>(workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
+ return Concatenation3dDim2DiffInputDimsTestImpl<armnn::DataType::Float32>(
+ workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> Concatenation4dTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
int32_t qOffset)
{
- armnn::TensorInfo inputTensorInfo({ 1, 3, 2, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ 1, 3, 2, 2 }, ArmnnType);
auto input0 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
1.0f, 2.0f,
return result;
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> Concatenation4dDim0TestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
int32_t qOffset)
{
- armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType);
+
+ LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
+ workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
- LayerTestResult<T, 4> result = Concatenation4dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 0,
- true, qScale, qOffset);
result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
1.0f, 2.0f,
3.0f, 4.0f,
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation4dDim0TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concatenation4dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> Concatenation4dDim1TestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
int32_t qOffset)
{
- armnn::TensorInfo outputTensorInfo({ 1, 9, 2, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 1, 9, 2, 2 }, ArmnnType);
+
+ LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
+ workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
- LayerTestResult<T, 4> result = Concatenation4dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 1,
- true, qScale, qOffset);
result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
1.0f, 2.0f,
3.0f, 4.0f,
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation4dDim1TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concatenation4dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> Concatenation4dDim2TestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale,
int32_t qOffset)
{
- armnn::TensorInfo outputTensorInfo({ 1, 3, 6, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 1, 3, 6, 2 }, ArmnnType);
+
+ LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
+ workloadFactory, memoryManager, outputTensorInfo, 2, true, qScale, qOffset);
- LayerTestResult<T, 4> result = Concatenation4dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 2,
- true, qScale, qOffset);
result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
1.0f, 2.0f,
3.0f, 4.0f,
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation4dDim2TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concatenation4dDim2TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> Concatenation4dDim3TestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
int32_t qOffset,
bool useSubtensor)
{
- armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 6 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 6 }, ArmnnType);
+
+ LayerTestResult<T, 4> result = Concatenation4dTestImpl<ArmnnType>(
+ workloadFactory, memoryManager, outputTensorInfo, 3, useSubtensor, qScale, qOffset);
- LayerTestResult<T, 4> result = Concatenation4dTestImpl<T>(workloadFactory, memoryManager, outputTensorInfo, 3,
- useSubtensor, qScale, qOffset);
result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
1.0f, 2.0f,
11.0f, 12.0f,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool useSubtensor)
{
- return Concatenation4dDim3TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
+ return Concatenation4dDim3TestImpl<armnn::DataType::Float32>(
+ workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> Concatenation4dDiffShapeDim0TestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
int32_t qOffset)
{
unsigned int dimension = 0;
- armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType);
auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
1.0f, 2.0f,
11.0f, 12.0f
}));
- armnn::TensorInfo inputTensorInfo1({ 2, 3, 2, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo1({ 2, 3, 2, 2 }, ArmnnType);
auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
11.0f, 12.0f,
}));
- armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType);
LayerTestResult<T, 4> result(outputTensorInfo);
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation4dDiffShapeDim0TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concatenation4dDiffShapeDim0TestImpl<armnn::DataType::Float32>(
+ workloadFactory, memoryManager, 0.0f, 0);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> Concatenation4dDiffShapeDim1TestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
int32_t qOffset)
{
unsigned int dimension = 1;
- armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType);
auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
1.0f, 2.0f,
11.0f, 12.0f
}));
- armnn::TensorInfo inputTensorInfo1({ 1, 2, 2, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo1({ 1, 2, 2, 2 }, ArmnnType);
auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
11.0f, 12.0f,
}));
- armnn::TensorInfo outputTensorInfo({ 1, 5, 2, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 1, 5, 2, 2 }, ArmnnType);
LayerTestResult<T, 4> result(outputTensorInfo);
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation4dDiffShapeDim1TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concatenation4dDiffShapeDim1TestImpl<armnn::DataType::Float32>(
+ workloadFactory, memoryManager, 0.0f, 0);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> Concatenation4dDiffShapeDim2TestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
int32_t qOffset)
{
unsigned int dimension = 2;
- armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType);
auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
1.0f, 2.0f,
11.0f, 12.0f
}));
- armnn::TensorInfo inputTensorInfo1({ 1, 3, 3, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo1({ 1, 3, 3, 2 }, ArmnnType);
auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
11.0f, 12.0f,
27.0f, 28.0f
}));
- armnn::TensorInfo outputTensorInfo({ 1, 3, 5, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 1, 3, 5, 2 }, ArmnnType);
LayerTestResult<T, 4> result(outputTensorInfo);
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation4dDiffShapeDim2TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return Concatenation4dDiffShapeDim2TestImpl<armnn::DataType::Float32>(
+ workloadFactory, memoryManager, 0.0f, 0);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> Concatenation4dDiffShapeDim3TestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool useSubtensor)
{
unsigned int dimension = 3;
- armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType);
auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
1.0f, 2.0f,
11.0f, 12.0f
}));
- armnn::TensorInfo inputTensorInfo1({ 1, 3, 2, 3 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo1({ 1, 3, 2, 3 }, ArmnnType);
auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
11.0f, 12.0f, 13.0f,
26.0f, 27.0f, 28.0f
}));
- armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 5 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 5 }, ArmnnType);
LayerTestResult<T, 4> result(outputTensorInfo);
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool useSubtensor)
{
- return Concatenation4dDiffShapeDim3TestImpl<float>(workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
+ return Concatenation4dDiffShapeDim3TestImpl<armnn::DataType::Float32>(
+ workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
}
LayerTestResult<float, 4> ResizeBilinearNopTest(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
- const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
+ const armnn::TensorInfo inputTensorInfo =
+ armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, armnn::DataType::Float32);
+
+ const armnn::TensorInfo outputTensorInfo =
+ armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, armnn::DataType::Float32);
std::vector<float> inputData({
1.0f, 2.0f, 3.0f, 4.0f,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 2, 2, dataLayout);
- const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 1, 1, dataLayout);
+ const armnn::TensorInfo inputTensorInfo =
+ armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, armnn::DataType::Float32);
+
+ const armnn::TensorInfo outputTensorInfo =
+ armnnUtils::GetTensorInfo(1, 2, 1, 1, dataLayout, armnn::DataType::Float32);
std::vector<float> inputData({
1.0f, 255.0f,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
- const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 2, 2, dataLayout);
+ const armnn::TensorInfo inputTensorInfo =
+ armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, armnn::DataType::Float32);
+
+ const armnn::TensorInfo outputTensorInfo =
+ armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, armnn::DataType::Float32);
std::vector<float> inputData({
1.0f, 2.0f, 3.0f, 4.0f,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 3, 5, dataLayout);
- const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 2, 3, dataLayout);
+ const armnn::TensorInfo inputTensorInfo =
+ armnnUtils::GetTensorInfo(1, 2, 3, 5, dataLayout, armnn::DataType::Float32);
+
+ const armnn::TensorInfo outputTensorInfo =
+ armnnUtils::GetTensorInfo(1, 2, 2, 3, dataLayout, armnn::DataType::Float32);
std::vector<float> inputData({
1.0f, 2.0f, 3.0f, 5.0f, 8.0f,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- const armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 3, 2, dataLayout);
- const armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<float>(1, 2, 3, 5, dataLayout);
+ const armnn::TensorInfo inputTensorInfo =
+ armnnUtils::GetTensorInfo(1, 2, 3, 2, dataLayout, armnn::DataType::Float32);
+
+ const armnn::TensorInfo outputTensorInfo =
+ armnnUtils::GetTensorInfo(1, 2, 3, 5, dataLayout, armnn::DataType::Float32);
std::vector<float> inputData({
1.0f, 2.0f,
} // anonymous namespace
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 2> Pad2dTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::TensorShape inputShape{ 3, 3 };
const armnn::TensorShape outputShape{ 7, 7 };
- const armnn::TensorInfo inputTensorInfo(inputShape, armnn::GetDataType<T>());
- const armnn::TensorInfo outputTensorInfo(outputShape, armnn::GetDataType<T>());
+ const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
+ const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
std::vector<T> inputValues(
QuantizedVector<T>(qScale, qOffset,
return result;
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 3> Pad3dTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::TensorShape inputShape{ 2, 2, 2 };
const armnn::TensorShape outputShape{ 3, 5, 6 };
- const armnn::TensorInfo inputTensorInfo(inputShape, armnn::GetDataType<T>());
- const armnn::TensorInfo outputTensorInfo(outputShape, armnn::GetDataType<T>());
+ const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
+ const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
std::vector<T> inputValues(
QuantizedVector<T>(qScale,qOffset,
return result;
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> Pad4dTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
- const armnn::TensorInfo inputTensorInfo(inputShape, armnn::GetDataType<T>());
- const armnn::TensorInfo outputTensorInfo(outputShape, armnn::GetDataType<T>());
+ const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
+ const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType);
std::vector<T> inputValues(
QuantizedVector<T>(qScale,qOffset,
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Pad2dTestCommon<uint8_t>(workloadFactory, memoryManager, 1.0f, 0);
+ return Pad2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
}
LayerTestResult<uint8_t, 3> PadUint83dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Pad3dTestCommon<uint8_t>(workloadFactory, memoryManager, 1.0f, 0);
+ return Pad3dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
}
LayerTestResult<uint8_t, 4> PadUint84dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Pad4dTestCommon<uint8_t>(workloadFactory, memoryManager, 1.0f, 0);
+ return Pad4dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
}
LayerTestResult<float, 2> PadFloat322dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Pad2dTestCommon<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return Pad2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
}
LayerTestResult<float, 3> PadFloat323dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Pad3dTestCommon<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return Pad3dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
}
LayerTestResult<float, 4> PadFloat324dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Pad4dTestCommon<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return Pad4dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
}
LayerTestResult<float, 4> L2Normalization1dTest(
inputValues, expectedOutputValues, layout);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> ConstantTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
constexpr unsigned int outputChannels = inputChannels;
constexpr unsigned int outputBatchSize = inputBatchSize;
- armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
- armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType);
- armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
- armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ConstantTestImpl<float>(workloadFactory, memoryManager, 0.0f, 0);
+ return ConstantTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
}
LayerTestResult<uint8_t, 4> ConstantTestUint8(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ConstantTestImpl<uint8_t>(workloadFactory, memoryManager, 1.0f, 0);
+ return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
}
LayerTestResult<uint8_t, 3> MergerUint8Test(
2.f, 4.f
};
- return BatchNormTestImpl<float>(workloadFactory, memoryManager,
- inputOutputShape, inputValues, expectedOutputValues,
- 0.f, 0, armnn::DataLayout::NCHW);
+ return BatchNormTestImpl<armnn::DataType::Float32>(
+ workloadFactory, memoryManager,
+ inputOutputShape, inputValues, expectedOutputValues,
+ 0.f, 0, armnn::DataLayout::NCHW);
}
LayerTestResult<float, 4> BatchNormNhwcTest(
6.f, 4.f
};
- return BatchNormTestImpl<float>(workloadFactory, memoryManager,
- inputOutputShape, inputValues, expectedOutputValues,
- 0.f, 0, armnn::DataLayout::NHWC);
+ return BatchNormTestImpl<armnn::DataType::Float32>(
+ workloadFactory, memoryManager,
+ inputOutputShape, inputValues, expectedOutputValues,
+ 0.f, 0, armnn::DataLayout::NHWC);
}
LayerTestResult<uint8_t, 4> BatchNormUint8Test(
2.f, 4.f
};
- return BatchNormTestImpl<uint8_t>(workloadFactory, memoryManager,
- inputOutputShape, inputValues, expectedOutputValues,
- 1.f/20.f, 50, armnn::DataLayout::NCHW);
+ return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager,
+ inputOutputShape, inputValues, expectedOutputValues,
+ 1.f/20.f, 50, armnn::DataLayout::NCHW);
}
LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest(
6.f, 4.f
};
- return BatchNormTestImpl<uint8_t>(workloadFactory, memoryManager,
- inputOutputShape, inputValues, expectedOutputValues,
- 1.f/20.f, 50, armnn::DataLayout::NHWC);
+ return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>
+ (workloadFactory, memoryManager,
+ inputOutputShape, inputValues, expectedOutputValues,
+ 1.f/20.f, 50, armnn::DataLayout::NHWC);
}
LayerTestResult<uint8_t, 4> ConstantUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return ConstantTestImpl<uint8_t>(workloadFactory, memoryManager, 2e-6f, 1);
+ return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 2e-6f, 1);
}
LayerTestResult<uint8_t, 1> Concatenation1dUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation1dTestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concatenation1dTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 2> Concatenation2dDim0Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation2dDim0TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concatenation2dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 2> Concatenation2dDim1Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation2dDim1TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concatenation2dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 2> Concatenation2dDim0DiffInputDimsUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation2dDim0DiffInputDimsTestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concatenation2dDim0DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 2> Concatenation2dDim1DiffInputDimsUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation2dDim1DiffInputDimsTestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concatenation2dDim1DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 3> Concatenation3dDim0Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation3dDim0TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concatenation3dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 3> Concatenation3dDim1Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation3dDim1TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concatenation3dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 3> Concatenation3dDim2Uint8Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool useSubtensor)
{
- return Concatenation3dDim2TestImpl<uint8_t>(workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
+ return Concatenation3dDim2TestImpl<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
}
LayerTestResult<uint8_t, 3> Concatenation3dDim0DiffInputDimsUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation3dDim0TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concatenation3dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 3> Concatenation3dDim1DiffInputDimsUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation3dDim1DiffInputDimsTestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concatenation3dDim1DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 3> Concatenation3dDim2DiffInputDimsUint8Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool useSubtensor)
{
- return Concatenation3dDim2DiffInputDimsTestImpl<uint8_t>(workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
+ return Concatenation3dDim2DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
}
LayerTestResult<uint8_t, 4> Concatenation4dDim0Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation4dDim0TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concatenation4dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 4> Concatenation4dDim1Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation4dDim1TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concatenation4dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 4> Concatenation4dDim2Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation4dDim2TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concatenation4dDim2TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 4> Concatenation4dDim3Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor)
{
- return Concatenation4dDim3TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
+ return Concatenation4dDim3TestImpl<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
}
LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim0Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation4dDiffShapeDim0TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concatenation4dDiffShapeDim0TestImpl<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim1Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation4dDiffShapeDim1TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concatenation4dDiffShapeDim1TestImpl<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim2Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Concatenation4dDiffShapeDim2TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1);
+ return Concatenation4dDiffShapeDim2TestImpl<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, 0.5f, -1);
}
LayerTestResult<uint8_t, 4> Concatenation4dDiffShapeDim3Uint8Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool useSubtensor)
{
- return Concatenation4dDiffShapeDim3TestImpl<uint8_t>(workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
+ return Concatenation4dDiffShapeDim3TestImpl<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
}
LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool forceNoPadding)
{
- return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<float>(workloadFactory, memoryManager, forceNoPadding);
+ return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::Float32>(
+ workloadFactory, memoryManager, forceNoPadding);
}
LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool forceNoPadding)
{
- return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<uint8_t>(
+ return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QuantisedAsymm8>(
workloadFactory, memoryManager, forceNoPadding, 3.0f, -5);
}
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool forceNoPadding)
{
- return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<float>(workloadFactory, memoryManager, forceNoPadding);
+ return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::Float32>(
+ workloadFactory, memoryManager, forceNoPadding);
}
LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool forceNoPadding)
{
- return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<uint8_t>(
+ return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QuantisedAsymm8>(
workloadFactory, memoryManager, forceNoPadding, 0.1f, 128);
}
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- return SimpleMaxPooling2dTestCommon<float>(workloadFactory, memoryManager, dataLayout);
+ return SimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
}
LayerTestResult<uint8_t, 4> SimpleMaxPooling2dUint8Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- return SimpleMaxPooling2dTestCommon<uint8_t>(workloadFactory, memoryManager, dataLayout);
+ return SimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
}
LayerTestResult<float, 4> SimpleAveragePooling2dTest(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- return SimpleAveragePooling2dTestCommon<float>(workloadFactory, memoryManager, dataLayout);
+ return SimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
}
LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- return SimpleAveragePooling2dTestCommon<uint8_t>(
+ return SimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
workloadFactory, memoryManager, dataLayout, 0.5, -1);
}
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool forceNoPadding)
{
- return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<float>(
+ return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<armnn::DataType::Float32>(
workloadFactory, memoryManager, forceNoPadding);
}
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return LargeTensorsAveragePooling2dTestCommon<float>(workloadFactory, memoryManager);
+ return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return LargeTensorsAveragePooling2dTestCommon<uint8_t>(workloadFactory, memoryManager, 0.5, -1);
+ return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, 0.5, -1);
}
LayerTestResult<float, 4> SimpleL2Pooling2dTest(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- return SimpleL2Pooling2dTestCommon<float>(workloadFactory, memoryManager, dataLayout);
+ return SimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
}
LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
const armnn::DataLayout dataLayout)
{
- return SimpleL2Pooling2dTestCommon<uint8_t>(workloadFactory, memoryManager, dataLayout);
+ return SimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, dataLayout);
}
LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return L2Pooling2dSize3Stride1TestCommon<float>(workloadFactory, memoryManager);
+ return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return L2Pooling2dSize3Stride1TestCommon<uint8_t>(workloadFactory, memoryManager);
+ return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return L2Pooling2dSize3Stride3TestCommon<float>(workloadFactory, memoryManager);
+ return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return L2Pooling2dSize3Stride3TestCommon<uint8_t>(workloadFactory, memoryManager);
+ return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return L2Pooling2dSize3Stride4TestCommon<float>(workloadFactory, memoryManager);
+ return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return L2Pooling2dSize3Stride4TestCommon<uint8_t>(workloadFactory, memoryManager);
+ return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> L2Pooling2dSize7Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return L2Pooling2dSize7TestCommon<float>(workloadFactory, memoryManager);
+ return L2Pooling2dSize7TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return L2Pooling2dSize7TestCommon<uint8_t>(workloadFactory, memoryManager);
+ return L2Pooling2dSize7TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> L2Pooling2dSize9Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return L2Pooling2dSize9TestCommon<float>(workloadFactory, memoryManager);
+ return L2Pooling2dSize9TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return L2Pooling2dSize9TestCommon<uint8_t>(workloadFactory, memoryManager);
+ return L2Pooling2dSize9TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return AsymmetricNonSquarePooling2dTestCommon<float>(workloadFactory, memoryManager);
+ return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return AsymmetricNonSquarePooling2dTestCommon<uint8_t>(workloadFactory, memoryManager);
+ return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> ComparePooling2dTest(
armnn::IWorkloadFactory& refWorkloadFactory,
armnn::PoolingAlgorithm poolingType)
{
- return ComparePooling2dTestCommon<float>(
+ return ComparePooling2dTestCommon<armnn::DataType::Float32>(
workloadFactory, memoryManager, refWorkloadFactory, poolingType);
}
armnn::IWorkloadFactory& refWorkloadFactory,
armnn::PoolingAlgorithm poolingType)
{
- return ComparePooling2dTestCommon<uint8_t>(
+ return ComparePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
workloadFactory, memoryManager, refWorkloadFactory, poolingType, 0.1f, 128);
}
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
bool transposeWeights)
{
- return FullyConnectedLargeTestCommon<float>(workloadFactory, memoryManager, transposeWeights);
+ return FullyConnectedLargeTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, transposeWeights);
}
LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingSimpleMaxPooling2dTestCommon<float>(workloadFactory, memoryManager);
+ return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingSimpleMaxPooling2dTestCommon<uint8_t>(workloadFactory, memoryManager, 1.0f, -5);
+ return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, 1.0f, -5);
}
LayerTestResult<float, 4> IgnorePaddingMaxPooling2dSize3Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingMaxPooling2dSize3TestCommon<float>(workloadFactory, memoryManager);
+ return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingMaxPooling2dSize3TestCommon<uint8_t>(workloadFactory, memoryManager, 1.0f, -5);
+ return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager, 1.0f, -5);
}
LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingSimpleAveragePooling2dTestCommon<float>(workloadFactory, memoryManager);
+ return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingSimpleAveragePooling2dTestCommon<uint8_t>(workloadFactory, memoryManager);
+ return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager);
}
LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<float>(workloadFactory, memoryManager);
+ return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::Float32>(
+ workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<uint8_t>(workloadFactory, memoryManager);
+ return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager);
}
LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingAveragePooling2dSize3TestCommon<float>(workloadFactory, memoryManager);
+ return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingAveragePooling2dSize3TestCommon<uint8_t>(workloadFactory, memoryManager);
+ return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(
+ workloadFactory, memoryManager);
}
LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingSimpleL2Pooling2dTestCommon<float>(workloadFactory, memoryManager);
+ return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingSimpleL2Pooling2dTestCommon<uint8_t>(workloadFactory, memoryManager);
+ return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingL2Pooling2dSize3TestCommon<float>(workloadFactory, memoryManager);
+ return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return IgnorePaddingL2Pooling2dSize3TestCommon<uint8_t>(workloadFactory, memoryManager);
+ return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> SimplePermuteFloat32Test(
// 4, 5, 6
// 7, 8, 9
- armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::GetDataType<float>());
- armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::GetDataType<float>());
+ armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::DataType::Float32);
+ armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::DataType::Float32);
boost::multi_array<float, 4> poolingInput = MakeTensor<float,4>(poolingInputTensorInfo,
{1, 2, 3,
// 12, 16
// 24, 28
- armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::GetDataType<float>());
- armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::GetDataType<float>());
+ armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
+ armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::DataType::Float32);
boost::multi_array<float, 4> addInput = MakeTensor<float,4>(addInputTensorInfo,
{12, 16,
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdSimpleTest<float>(workloadFactory, memoryManager);
+ return SpaceToBatchNdSimpleTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdMultiChannelsTest<float>(workloadFactory, memoryManager);
+ return SpaceToBatchNdMultiChannelsTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> SpaceToBatchNdMultiBlockFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdMultiBlockTest<float>(workloadFactory, memoryManager);
+ return SpaceToBatchNdMultiBlockTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> SpaceToBatchNdPaddingFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdPaddingTest<float>(workloadFactory, memoryManager);
+ return SpaceToBatchNdPaddingTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdSimpleTest<uint8_t>(workloadFactory, memoryManager);
+ return SpaceToBatchNdSimpleTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdMultiChannelsTest<uint8_t>(workloadFactory, memoryManager);
+ return SpaceToBatchNdMultiChannelsTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdMultiBlockTest<uint8_t>(workloadFactory, memoryManager);
+ return SpaceToBatchNdMultiBlockTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdPaddingTest<uint8_t>(workloadFactory, memoryManager);
+ return SpaceToBatchNdPaddingTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> SpaceToBatchNdSimpleNHWCFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdSimpleNHWCTest<float>(workloadFactory, memoryManager);
+ return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> SpaceToBatchNdMultiChannelsNHWCFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdMultiChannelsNHWCTest<float>(workloadFactory, memoryManager);
+ return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> SpaceToBatchNdMultiBlockNHWCFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdMultiBlockNHWCTest<float>(workloadFactory, memoryManager);
+ return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> SpaceToBatchNdPaddingNHWCFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdPaddingNHWCTest<float>(workloadFactory, memoryManager);
+ return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> SpaceToBatchNdSimpleNHWCUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdSimpleNHWCTest<uint8_t>(workloadFactory, memoryManager);
+ return SpaceToBatchNdSimpleNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiChannelsNHWCUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdMultiChannelsNHWCTest<uint8_t>(workloadFactory, memoryManager);
+ return SpaceToBatchNdMultiChannelsNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> SpaceToBatchNdMultiBlockNHWCUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdMultiBlockNHWCTest<uint8_t>(workloadFactory, memoryManager);
+ return SpaceToBatchNdMultiBlockNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> SpaceToBatchNdPaddingNHWCUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdPaddingNHWCTest<uint8_t>(workloadFactory, memoryManager);
+ return SpaceToBatchNdPaddingNHWCTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
namespace {
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice4DTest<float>(workloadFactory, memoryManager);
+ return StridedSlice4DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> StridedSlice4DReverseFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice4DReverseTest<float>(workloadFactory, memoryManager);
+ return StridedSlice4DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> StridedSliceSimpleStrideFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSliceSimpleStrideTest<float>(workloadFactory, memoryManager);
+ return StridedSliceSimpleStrideTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<float, 4> StridedSliceSimpleRangeMaskFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSliceSimpleRangeMaskTest<float>(workloadFactory, memoryManager);
+ return StridedSliceSimpleRangeMaskTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<float, 2> StridedSliceShrinkAxisMaskFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSliceShrinkAxisMaskTest<float>(workloadFactory, memoryManager);
+ return StridedSliceShrinkAxisMaskTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<float, 3> StridedSlice3DFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice3DTest<float>(workloadFactory, memoryManager);
+ return StridedSlice3DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<float, 3> StridedSlice3DReverseFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice3DReverseTest<float>(workloadFactory, memoryManager);
+ return StridedSlice3DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<float, 2> StridedSlice2DFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice2DTest<float>(workloadFactory, memoryManager);
+ return StridedSlice2DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<float, 2> StridedSlice2DReverseFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice2DReverseTest<float>(workloadFactory, memoryManager);
+ return StridedSlice2DReverseTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> StridedSlice4DUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice4DTest<uint8_t>(workloadFactory, memoryManager);
+ return StridedSlice4DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> StridedSlice4DReverseUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice4DReverseTest<uint8_t>(workloadFactory, memoryManager);
+ return StridedSlice4DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> StridedSliceSimpleStrideUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSliceSimpleStrideTest<uint8_t>(workloadFactory, memoryManager);
+ return StridedSliceSimpleStrideTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> StridedSliceSimpleRangeMaskUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSliceSimpleRangeMaskTest<uint8_t>(workloadFactory, memoryManager);
+ return StridedSliceSimpleRangeMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 2> StridedSliceShrinkAxisMaskUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSliceShrinkAxisMaskTest<uint8_t>(workloadFactory, memoryManager);
+ return StridedSliceShrinkAxisMaskTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 3> StridedSlice3DUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice3DTest<uint8_t>(workloadFactory, memoryManager);
+ return StridedSlice3DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 3> StridedSlice3DReverseUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice3DReverseTest<uint8_t>(workloadFactory, memoryManager);
+ return StridedSlice3DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 2> StridedSlice2DUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice2DTest<uint8_t>(workloadFactory, memoryManager);
+ return StridedSlice2DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 2> StridedSlice2DReverseUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return StridedSlice2DReverseTest<uint8_t>(workloadFactory, memoryManager);
+ return StridedSlice2DReverseTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> BatchToSpaceNdNhwcUintTest2(
armnn::IWorkloadFactory& workloadFactory,
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Debug4DTest<float>(workloadFactory, memoryManager);
+ return Debug4DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<float, 3> Debug3DFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Debug3DTest<float>(workloadFactory, memoryManager);
+ return Debug3DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<float, 2> Debug2DFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Debug2DTest<float>(workloadFactory, memoryManager);
+ return Debug2DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<float, 1> Debug1DFloat32Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Debug1DTest<float>(workloadFactory, memoryManager);
+ return Debug1DTest<armnn::DataType::Float32>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> Debug4DUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Debug4DTest<uint8_t>(workloadFactory, memoryManager);
+ return Debug4DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 3> Debug3DUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Debug3DTest<uint8_t>(workloadFactory, memoryManager);
+ return Debug3DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 2> Debug2DUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Debug2DTest<uint8_t>(workloadFactory, memoryManager);
+ return Debug2DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 1> Debug1DUint8Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return Debug1DTest<uint8_t>(workloadFactory, memoryManager);
+ return Debug1DTest<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager);
}
LayerTestResult<uint8_t, 4> PreCompiledConvolution2dTest(
bool biasEnabled,
const armnn::DataLayout layout);
+LayerTestResult<float, 4> CompareDepthwiseConvolution2dFloatTest(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ armnn::IWorkloadFactory& refWorkloadFactory,
+ const armnn::DataLayout layout);
+
+LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dUint8Test(
+ armnn::IWorkloadFactory& workloadFactory,
+ const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+ armnn::IWorkloadFactory& refWorkloadFactory,
+ const armnn::DataLayout layout);
+
LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
unsigned numUnits = outputSize;
- armnn::TensorInfo inputTensorInfo({batchSize , inputSize}, armnn::GetDataType<float>());
- armnn::TensorInfo cellStateInTensorInfo({batchSize , numUnits}, armnn::GetDataType<float>());
- armnn::TensorInfo outputStateInTensorInfo({batchSize , outputSize}, armnn::GetDataType<float>());
+ armnn::TensorInfo inputTensorInfo({batchSize , inputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo cellStateInTensorInfo({batchSize , numUnits}, armnn::DataType::Float32);
+ armnn::TensorInfo outputStateInTensorInfo({batchSize , outputSize}, armnn::DataType::Float32);
- armnn::TensorInfo scratchBufferTensorInfo({batchSize, numUnits * 4}, armnn::GetDataType<float>());
- armnn::TensorInfo cellStateOutTensorInfo({batchSize, numUnits}, armnn::GetDataType<float>());
- armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, armnn::GetDataType<float>());
- armnn::TensorInfo outputTensorInfo({batchSize, outputSize}, armnn::GetDataType<float>());
+ armnn::TensorInfo scratchBufferTensorInfo({batchSize, numUnits * 4}, armnn::DataType::Float32);
+ armnn::TensorInfo cellStateOutTensorInfo({batchSize, numUnits}, armnn::DataType::Float32);
+ armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo outputTensorInfo({batchSize, outputSize}, armnn::DataType::Float32);
LayerTestResult<float, 2> ret(outputTensorInfo);
AddOutputToWorkload(data, info, cellStateOutTensorInfo, cellStateOutHandle.get());
AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
- armnn::TensorInfo tensorInfo4({numUnits}, armnn::GetDataType<float>());
- armnn::TensorInfo tensorInfo8({numUnits, 2}, armnn::GetDataType<float>());
- armnn::TensorInfo tensorInfo16({numUnits, 4}, armnn::GetDataType<float>());
+ armnn::TensorInfo tensorInfo4({numUnits}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo8({numUnits, 2}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo16({numUnits, 4}, armnn::DataType::Float32);
auto inputToInputWeights = MakeTensor<float, 2>(tensorInfo8, {-0.45018822f, -0.02338299f, -0.0870589f,
-0.34550029f, 0.04266912f, -0.15680569f,
unsigned int inputSize = 5;
unsigned numUnits = 20;
- armnn::TensorInfo inputTensorInfo({batchSize , inputSize}, armnn::GetDataType<float>());
- armnn::TensorInfo cellStateInTensorInfo({batchSize , numUnits}, armnn::GetDataType<float>());
- armnn::TensorInfo outputStateInTensorInfo({batchSize , outputSize}, armnn::GetDataType<float>());
+ armnn::TensorInfo inputTensorInfo({batchSize , inputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo cellStateInTensorInfo({batchSize , numUnits}, armnn::DataType::Float32);
+ armnn::TensorInfo outputStateInTensorInfo({batchSize , outputSize}, armnn::DataType::Float32);
// Scratch buffer size without CIFG [batchSize, numUnits * 4]
- armnn::TensorInfo scratchBufferTensorInfo({batchSize, numUnits * 4}, armnn::GetDataType<float>());
- armnn::TensorInfo cellStateOutTensorInfo({batchSize, numUnits}, armnn::GetDataType<float>());
- armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, armnn::GetDataType<float>());
- armnn::TensorInfo outputTensorInfo({batchSize, outputSize}, armnn::GetDataType<float>());
+ armnn::TensorInfo scratchBufferTensorInfo({batchSize, numUnits * 4}, armnn::DataType::Float32);
+ armnn::TensorInfo cellStateOutTensorInfo({batchSize, numUnits}, armnn::DataType::Float32);
+ armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo outputTensorInfo({batchSize, outputSize}, armnn::DataType::Float32);
LayerTestResult<float, 2> ret(outputTensorInfo);
AddOutputToWorkload(data, info, cellStateOutTensorInfo, cellStateOutHandle.get());
AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
- armnn::TensorInfo tensorInfo16({outputSize}, armnn::GetDataType<float>());
- armnn::TensorInfo tensorInfo20({numUnits}, armnn::GetDataType<float>());
- armnn::TensorInfo tensorInfo20x5({numUnits, inputSize}, armnn::GetDataType<float>());
- armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, armnn::GetDataType<float>());
- armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, armnn::GetDataType<float>());
+ armnn::TensorInfo tensorInfo16({outputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo20({numUnits}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo20x5({numUnits, inputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, armnn::DataType::Float32);
auto inputToInputWeights =
MakeTensor<float, 2>(tensorInfo20x5, {0.021393683f,0.06124551f, 0.046905167f,-0.014657677f,-0.03149463f,
const unsigned int cellSize = outputSize;
// Decide the shape of all input tensors
- armnn::TensorInfo inputTensorInfo({batchSize , inputSize}, armnn::GetDataType<float>());
- armnn::TensorInfo outputStateInTensorInfo({batchSize, outputSize}, armnn::GetDataType<float>());
- armnn::TensorInfo cellStateInTensorInfo({batchSize, cellSize}, armnn::GetDataType<float>());
+ armnn::TensorInfo inputTensorInfo({batchSize , inputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo outputStateInTensorInfo({batchSize, outputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo cellStateInTensorInfo({batchSize, cellSize}, armnn::DataType::Float32);
unsigned int scratchBufferSize = cifgEnabled ? cellSize * 3 : cellSize * 4;
- armnn::TensorInfo scratchBufferTensorInfo({batchSize, scratchBufferSize}, armnn::GetDataType<float>());
- armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, armnn::GetDataType<float>());
- armnn::TensorInfo cellStateOutTensorInfo({batchSize, cellSize}, armnn::GetDataType<float>());
- armnn::TensorInfo outputTensorInfo({batchSize, outputSize}, armnn::GetDataType<float>());
+ armnn::TensorInfo scratchBufferTensorInfo({batchSize, scratchBufferSize}, armnn::DataType::Float32);
+ armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo cellStateOutTensorInfo({batchSize, cellSize}, armnn::DataType::Float32);
+ armnn::TensorInfo outputTensorInfo({batchSize, outputSize}, armnn::DataType::Float32);
// List of inputs
std::vector<float> inputData;
// Prepare all the weights in the descriptor for LSTM
armnn::LstmQueueDescriptor data;
- armnn::TensorInfo tensorInfoInput({cellSize, inputSize}, armnn::GetDataType<float>());
- armnn::TensorInfo tensorInfoOutput({cellSize, outputSize}, armnn::GetDataType<float>());
- armnn::TensorInfo tensorInfoNumUnits({cellSize}, armnn::GetDataType<float>());
+ armnn::TensorInfo tensorInfoInput({cellSize, inputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfoOutput({cellSize, outputSize}, armnn::DataType::Float32);
+ armnn::TensorInfo tensorInfoNumUnits({cellSize}, armnn::DataType::Float32);
auto inputToCellWeights = MakeTensor<float, 2>(tensorInfoInput,
{-0.49770179f, -0.27711356f, -0.09624726f, 0.05100781f,
//
#pragma once
+#include "TypeUtils.hpp"
+
#include <armnn/INetwork.hpp>
#include <backendsCommon/test/CommonTestUtils.hpp>
return net;
}
-template<typename T>
+template<armnn::DataType ArmnnType>
void MergerDim0EndToEnd(const std::vector<BackendId>& backends)
{
using namespace armnn;
+ using T = ResolveType<ArmnnType>;
unsigned int concatAxis = 0;
const std::vector<TensorShape> inputShapes{{ 2, 3, 2, 2 }, { 2, 3, 2, 2 }};
const TensorShape& outputShape = { 4, 3, 2, 2 };
// Builds up the structure of the network
- INetworkPtr net = CreateMergerNetwork<GetDataType<T>()>(inputShapes, outputShape, concatAxis);
+ INetworkPtr net = CreateMergerNetwork<ArmnnType>(inputShapes, outputShape, concatAxis);
BOOST_TEST_CHECKPOINT("create a network");
EndToEndLayerTestImpl<T>(move(net), inputTensorData, expectedOutputData, backends);
}
-template<typename T>
+template<armnn::DataType ArmnnType>
void MergerDim1EndToEnd(const std::vector<BackendId>& backends)
{
using namespace armnn;
+ using T = ResolveType<ArmnnType>;
unsigned int concatAxis = 1;
const std::vector<TensorShape> inputShapes{{ 2, 3, 2, 2 }, { 2, 3, 2, 2 }};
const TensorShape& outputShape = { 2, 6, 2, 2 };
// Builds up the structure of the network
- INetworkPtr net = CreateMergerNetwork<GetDataType<T>()>(inputShapes, outputShape, concatAxis);
+ INetworkPtr net = CreateMergerNetwork<ArmnnType>(inputShapes, outputShape, concatAxis);
BOOST_TEST_CHECKPOINT("create a network");
EndToEndLayerTestImpl<T>(move(net), inputTensorData, expectedOutputData, backends);
}
-template<typename T>
+template<armnn::DataType ArmnnType>
void MergerDim2EndToEnd(const std::vector<BackendId>& backends)
{
using namespace armnn;
+ using T = ResolveType<ArmnnType>;
unsigned int concatAxis = 2;
const std::vector<TensorShape> inputShapes{{ 2, 3, 2, 2 }, { 2, 3, 2, 2 }};
const TensorShape& outputShape = { 2, 3, 4, 2 };
// Builds up the structure of the network
- INetworkPtr net = CreateMergerNetwork<GetDataType<T>()>(inputShapes, outputShape, concatAxis);
+ INetworkPtr net = CreateMergerNetwork<ArmnnType>(inputShapes, outputShape, concatAxis);
BOOST_TEST_CHECKPOINT("create a network");
EndToEndLayerTestImpl<T>(move(net), inputTensorData, expectedOutputData, backends);
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
void MergerDim3EndToEnd(const std::vector<BackendId>& backends)
{
using namespace armnn;
const TensorShape& outputShape = { 2, 3, 2, 4 };
// Builds up the structure of the network
- INetworkPtr net = CreateMergerNetwork<GetDataType<T>()>(inputShapes, outputShape, concatAxis);
+ INetworkPtr net = CreateMergerNetwork<ArmnnType>(inputShapes, outputShape, concatAxis);
BOOST_TEST_CHECKPOINT("create a network");
#include <algorithm>
#include <string>
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SimplePooling2dTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
unsigned int outputChannels = boost::numeric_cast<unsigned int>(outputExpected.shape()[channelsIndex]);
unsigned int outputBatchSize = boost::numeric_cast<unsigned int>(outputExpected.shape()[0]);
- armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<T>(inputBatchSize, inputChannels, inputHeight,
- inputWidth, dataLayout);
- armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<T>(outputBatchSize, outputChannels, outputHeight,
- outputWidth, dataLayout);
+ armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(
+ inputBatchSize, inputChannels, inputHeight, inputWidth, dataLayout, ArmnnType);
+
+ armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(
+ outputBatchSize, outputChannels, outputHeight, outputWidth, dataLayout, ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
// channels: 2
// batch size: 2
//
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SimpleMaxPooling2dSize3x3Stride2x4TestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
unsigned int channels = 2;
unsigned int batchSize = 2;
- armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, armnn::GetDataType<T>());
- armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
+ armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
}));
}
- return SimplePooling2dTestImpl<T>(
+ return SimplePooling2dTestImpl<ArmnnType>(
workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SimpleMaxPooling2dTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
descriptor.m_DataLayout = dataLayout;
- armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<T>(1, 2, 4, 4, dataLayout);
- armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<T>(1, 2, 2, 2, dataLayout);
+ armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
+ armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
- return SimplePooling2dTestImpl<T>(
+ return SimplePooling2dTestImpl<ArmnnType>(
workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SimpleAveragePooling2dTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
descriptor.m_DataLayout = dataLayout;
- armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<T>(1, 2, 4, 4, dataLayout);
- armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<T>(1, 2, 2, 2, dataLayout);
+ armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
+ armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
- return SimplePooling2dTestImpl<T>(
+ return SimplePooling2dTestImpl<ArmnnType>(
workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> LargeTensorsAveragePooling2dTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
descriptor.m_PadBottom = 50;
descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
- armnn::TensorInfo inputTensorInfo({ 5, 3, 52, 60 }, armnn::GetDataType<T>());
- armnn::TensorInfo outputTensorInfo({ 5, 3, 11, 13 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ 5, 3, 52, 60 }, ArmnnType);
+ armnn::TensorInfo outputTensorInfo({ 5, 3, 11, 13 }, ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputVec);
- return SimplePooling2dTestImpl<T>(
+ return SimplePooling2dTestImpl<ArmnnType>(
workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SimpleL2Pooling2dTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
descriptor.m_DataLayout = dataLayout;
- armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo<T>(1, 2, 4, 4, dataLayout);
- armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo<T>(1, 2, 2, 2, dataLayout);
+ armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
+ armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
std::vector<T> inputData(
QuantizedVector<T>(qScale, qOffset, {
auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
- return SimplePooling2dTestImpl<T>(
+ return SimplePooling2dTestImpl<ArmnnType>(
workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> L2Pooling2dSize3Stride1TestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
descriptor.m_StrideX = descriptor.m_StrideY = 1;
descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
- armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
auto input = MakeTensor<T, 4>(inputTensorInfo,
QuantizedVector<T>(qScale, qOffset, {
2.0f, 1.0f, 5.0f, 2.0f,
2.0f, 1.0f, 5.0f, 2.0f,
}));
- armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
QuantizedVector<T>(qScale, qOffset, {
3.0f, 3.0f,
3.0f, 3.0f,
}));
- return SimplePooling2dTestImpl<T>(
+ return SimplePooling2dTestImpl<ArmnnType>(
workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> L2Pooling2dSize3Stride3TestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
descriptor.m_StrideX = descriptor.m_StrideY = 3;
descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
- armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, ArmnnType);
auto input = MakeTensor<T, 4>(inputTensorInfo,
QuantizedVector<T>(qScale, qOffset, {
2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
}));
- armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
QuantizedVector<T>(qScale, qOffset, {
3.0f, 3.0f, 3.0f,
3.0f, 3.0f, 3.0f,
}));
- return SimplePooling2dTestImpl<T>(
+ return SimplePooling2dTestImpl<ArmnnType>(
workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> L2Pooling2dSize3Stride4TestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
descriptor.m_StrideX = descriptor.m_StrideY = 4;
descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
- armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, ArmnnType);
auto input = MakeTensor<T, 4>(inputTensorInfo,
QuantizedVector<T>(qScale, qOffset, {
2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
}));
- armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
QuantizedVector<T>(qScale, qOffset, {
3.0f, 3.0f,
3.0f, 3.0f,
}));
- return SimplePooling2dTestImpl<T>(
+ return SimplePooling2dTestImpl<ArmnnType>(
workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> L2Pooling2dSize7TestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
descriptor.m_StrideX = descriptor.m_StrideY = 7;
descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
- armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, ArmnnType);
auto input = MakeTensor<T, 4>(inputTensorInfo,
QuantizedVector<T>(qScale, qOffset, {
1.0f, 0.0f, 2.0f, 0.0f, 3.0f, 0.0f, 4.0f,
0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
}));
- armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, ArmnnType);
auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
QuantizedVector<T>(qScale, qOffset, {
3.0f,
}));
- return SimplePooling2dTestImpl<T>(
+ return SimplePooling2dTestImpl<ArmnnType>(
workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> L2Pooling2dSize9TestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
descriptor.m_StrideX = descriptor.m_StrideY = 9;
descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
- armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, ArmnnType);
auto input = MakeTensor<T, 4>(inputTensorInfo,
QuantizedVector<T>(qScale, qOffset, {
2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
}));
- armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, ArmnnType);
auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
QuantizedVector<T>(qScale, qOffset, {
3.0f,
}));
- return SimplePooling2dTestImpl<T>(
+ return SimplePooling2dTestImpl<ArmnnType>(
workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> AsymmetricNonSquarePooling2dTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale = 1.0f,
int32_t qOffset = 0)
{
- armnn::TensorInfo inputTensorInfo({ 1, 1, 1, 3 }, armnn::GetDataType<T>());
- armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ 1, 1, 1, 3 }, ArmnnType);
+ armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
armnn::Pooling2dDescriptor descriptor;
descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
0.0f, 3.0f, 0.0f, 3.0f,
}));
- return SimplePooling2dTestImpl<T>(
+ return SimplePooling2dTestImpl<ArmnnType>(
workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> ComparePooling2dTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
unsigned int inputShape[] = { batchSize, channelCount, inputHeight, inputWidth };
unsigned int outputShape[] = { batchSize, channelCount, outputHeight, outputWidth };
- inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType<T>());
- outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::GetDataType<T>());
+ inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
// channels: 1
// batch size: 1
//
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SimpleMaxPooling2dSize2x2Stride2x2TestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
618.0f, 582.0f
};
- armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
// Scale and offset should match input - we're just calculating maximum values.
- armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
forceNoPadding ? QuantizedVector<T>(qScale, qOffset, expectedOutputDataNoPadding) :
QuantizedVector<T>(qScale, qOffset, expectedOutputDataWithPadding));
- return SimplePooling2dTestImpl<T>(
+ return SimplePooling2dTestImpl<ArmnnType>(
workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
}
// channels: 1
// batch size: 1
//
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
10.5f,
};
- armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
// Scale and offset should match input - we're just calculating average values.
- armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
forceNoPadding ? QuantizedVector<T>(qScale, qOffset, expectedOutputDataNoPadding) :
QuantizedVector<T>(qScale, qOffset, expectedOutputDataWithPadding));
- return SimplePooling2dTestImpl<T>(
+ return SimplePooling2dTestImpl<ArmnnType>(
workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> IgnorePaddingSimpleMaxPooling2dTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
descriptor.m_PadBottom = 1;
descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
- armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
- armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
+ armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
1.0f, 2.0f, -4.0f,
}));
- return SimplePooling2dTestImpl<T>(
+ return SimplePooling2dTestImpl<ArmnnType>(
workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> IgnorePaddingMaxPooling2dSize3TestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
descriptor.m_PadBottom = 1;
descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
- armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
- armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
+ armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
2.0f, 2.0f, 2.0f, -3.0f,
}));
- return SimplePooling2dTestImpl<T>(
+ return SimplePooling2dTestImpl<ArmnnType>(
workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> IgnorePaddingSimpleAveragePooling2dTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
descriptor.m_PadBottom = 1;
descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
- armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
- armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
+ armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
3.0f, 13.0f, 10.0f,
}));
- return SimplePooling2dTestImpl<T>(
+ return SimplePooling2dTestImpl<ArmnnType>(
workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Ceiling;
- armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4}, armnn::GetDataType<T>());
- armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
+ armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
2.0f, 3.5f
}));
- return SimplePooling2dTestImpl<T>(
+ return SimplePooling2dTestImpl<ArmnnType>(
workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> IgnorePaddingAveragePooling2dSize3TestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
descriptor.m_PadBottom = 1;
descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
- armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
- armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
+ armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
9.0f, 11.0f, 12.0f, 7.0f,
}));
- return SimplePooling2dTestImpl<T>(
+ return SimplePooling2dTestImpl<ArmnnType>(
workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> IgnorePaddingSimpleL2Pooling2dTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
descriptor.m_PadBottom = 1;
descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
- armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
- armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
+ armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
8.0f, 1.4142f, 4.0f,
}));
- return SimplePooling2dTestImpl<T>(
+ return SimplePooling2dTestImpl<ArmnnType>(
workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> IgnorePaddingL2Pooling2dSize3TestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
descriptor.m_PadBottom = 1;
descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
- armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
- armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
+ armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
if(armnn::IsQuantizedType<T>())
1.0540f, 1.7638f, 2.5385f, 2.3570f,
}));
- return SimplePooling2dTestImpl<T>(
+ return SimplePooling2dTestImpl<ArmnnType>(
workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
}
#include <algorithm>
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 2> SimpleSoftmaxTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
unsigned int inputShape[] = { 2, 4 };
- inputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::GetDataType<T>());
+ inputTensorInfo = armnn::TensorInfo(2, inputShape, ArmnnType);
float qScale = 1.f / 256.f;
int qOffset = 0;
inputTensorInfo.SetQuantizationScale(qScale);
inputTensorInfo.SetQuantizationOffset(qOffset);
- outputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::GetDataType<T>());
+ outputTensorInfo = armnn::TensorInfo(2, inputShape, ArmnnType);
outputTensorInfo.SetQuantizationScale(qScale);
outputTensorInfo.SetQuantizationOffset(qOffset);
return ret;
}
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 2> CompareSoftmaxTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
unsigned int inputShape[] = { batchSize, channels };
- inputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::GetDataType<T>());
- outputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::GetDataType<T>());
+ inputTensorInfo = armnn::TensorInfo(2, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(2, inputShape, ArmnnType);
float qScale = 1.f / 256.f;
int qOffset = 0;
inputTensorInfo.SetQuantizationScale(qScale);
return ret;
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SpaceToBatchNdSimpleTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
desc.m_Parameters.m_BlockShape = {2, 2};
desc.m_Parameters.m_PadList = {{0, 0}, {0, 0}};
- inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType<T>());
- outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::GetDataType<T>());
+ inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
std::vector<float> input = std::vector<float>(
{
workloadFactory, memoryManager, inputTensorInfo, outputTensorInfo, input, outputExpected, desc);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SpaceToBatchNdMultiChannelsTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
desc.m_Parameters.m_BlockShape = {2, 2};
desc.m_Parameters.m_PadList = {{0, 0}, {0, 0}};
- inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType<T>());
- outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::GetDataType<T>());
+ inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
std::vector<float> input = std::vector<float>(
{
workloadFactory, memoryManager, inputTensorInfo, outputTensorInfo, input, outputExpected, desc);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SpaceToBatchNdMultiBlockTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
desc.m_Parameters.m_BlockShape = {2, 2};
desc.m_Parameters.m_PadList = {{0, 0}, {0, 0}};
- inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType<T>());
- outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::GetDataType<T>());
+ inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
std::vector<float> input = std::vector<float>(
{
workloadFactory, memoryManager, inputTensorInfo, outputTensorInfo, input, outputExpected, desc);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SpaceToBatchNdPaddingTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
desc.m_Parameters.m_BlockShape = {2, 2};
desc.m_Parameters.m_PadList = {{0, 0}, {2, 0}};
- inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType<T>());
- outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::GetDataType<T>());
+ inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
std::vector<float> input = std::vector<float>(
{
workloadFactory, memoryManager, inputTensorInfo, outputTensorInfo, input, outputExpected, desc);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SpaceToBatchNdSimpleNHWCTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdSimpleTest<T>(workloadFactory, memoryManager, armnn::DataLayout::NHWC);
+ return SpaceToBatchNdSimpleTest<ArmnnType>(workloadFactory, memoryManager, armnn::DataLayout::NHWC);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SpaceToBatchNdMultiChannelsNHWCTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdMultiChannelsTest<T>(workloadFactory, memoryManager, armnn::DataLayout::NHWC);
+ return SpaceToBatchNdMultiChannelsTest<ArmnnType>(workloadFactory, memoryManager, armnn::DataLayout::NHWC);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SpaceToBatchNdMultiBlockNHWCTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdMultiBlockTest<T>(workloadFactory, memoryManager, armnn::DataLayout::NHWC);
+ return SpaceToBatchNdMultiBlockTest<ArmnnType>(workloadFactory, memoryManager, armnn::DataLayout::NHWC);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> SpaceToBatchNdPaddingNHWCTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
{
- return SpaceToBatchNdPaddingTest<T>(workloadFactory, memoryManager, armnn::DataLayout::NHWC);
+ return SpaceToBatchNdPaddingTest<ArmnnType>(workloadFactory, memoryManager, armnn::DataLayout::NHWC);
}
#include <test/TensorHelpers.hpp>
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
std::vector<LayerTestResult<T,3>> SplitterTestCommon(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
// Define the tensor descriptors.
- armnn::TensorInfo inputTensorInfo({ inputChannels, inputHeight, inputWidth }, armnn::GetDataType<T>());
+ armnn::TensorInfo inputTensorInfo({ inputChannels, inputHeight, inputWidth }, ArmnnType);
// Outputs of the original split.
- armnn::TensorInfo outputTensorInfo1({ outputChannels1, outputHeight1, outputWidth1 }, armnn::GetDataType<T>());
- armnn::TensorInfo outputTensorInfo2({ outputChannels2, outputHeight2, outputWidth2 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo1({ outputChannels1, outputHeight1, outputWidth1 }, ArmnnType);
+ armnn::TensorInfo outputTensorInfo2({ outputChannels2, outputHeight2, outputWidth2 }, ArmnnType);
// Outputs of the subsequent subtensor split.
- armnn::TensorInfo outputTensorInfo3({ outputChannels1, outputHeight1, outputWidth1 }, armnn::GetDataType<T>());
- armnn::TensorInfo outputTensorInfo4({ outputChannels1, outputHeight1, outputWidth1 }, armnn::GetDataType<T>());
+ armnn::TensorInfo outputTensorInfo3({ outputChannels1, outputHeight1, outputWidth1 }, ArmnnType);
+ armnn::TensorInfo outputTensorInfo4({ outputChannels1, outputHeight1, outputWidth1 }, ArmnnType);
// Set quantization parameters if the requested type is a quantized type.
// The quantization doesn't really matter as the splitter operator doesn't dequantize/quantize.
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 3> CopyViaSplitterTestImpl(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
float qScale, int32_t qOffset)
{
- const armnn::TensorInfo tensorInfo({ 3, 6, 5 }, armnn::GetDataType<T>());
+ const armnn::TensorInfo tensorInfo({ 3, 6, 5 }, ArmnnType);
auto input = MakeTensor<T, 3>(tensorInfo, QuantizedVector<T>(qScale, qOffset,
{
1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
//
#pragma once
+#include "TypeUtils.hpp"
#include "WorkloadTestUtils.hpp"
#include <armnn/ArmNN.hpp>
return ret;
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> StridedSlice4DTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
desc.m_Parameters.m_End = {2, 2, 3, 1};
desc.m_Parameters.m_Stride = {1, 1, 1, 1};
- inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType<T>());
- outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::GetDataType<T>());
+ inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
std::vector<float> input = std::vector<float>(
{
workloadFactory, memoryManager, inputTensorInfo, outputTensorInfo, input, outputExpected, desc);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> StridedSlice4DReverseTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
desc.m_Parameters.m_End = {2, -3, 3, 1};
desc.m_Parameters.m_Stride = {1, -1, 1, 1};
- inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType<T>());
- outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::GetDataType<T>());
+ inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
std::vector<float> input = std::vector<float>(
{
workloadFactory, memoryManager, inputTensorInfo, outputTensorInfo, input, outputExpected, desc);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> StridedSliceSimpleStrideTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
desc.m_Parameters.m_End = {3, 2, 3, 1};
desc.m_Parameters.m_Stride = {2, 2, 2, 1};
- inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType<T>());
- outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::GetDataType<T>());
+ inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
std::vector<float> input = std::vector<float>(
{
workloadFactory, memoryManager, inputTensorInfo, outputTensorInfo, input, outputExpected, desc);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 4> StridedSliceSimpleRangeMaskTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
desc.m_Parameters.m_BeginMask = (1 << 4) - 1;
desc.m_Parameters.m_EndMask = (1 << 4) - 1;
- inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType<T>());
- outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::GetDataType<T>());
+ inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
std::vector<float> input = std::vector<float>(
{
workloadFactory, memoryManager, inputTensorInfo, outputTensorInfo, input, outputExpected, desc);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 2> StridedSliceShrinkAxisMaskTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
desc.m_Parameters.m_EndMask = (1 << 4) - 1;
desc.m_Parameters.m_ShrinkAxisMask = (1 << 1) | (1 << 2);
- inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType<T>());
- outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::GetDataType<T>());
+ inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(2, outputShape, ArmnnType);
std::vector<float> input = std::vector<float>(
{
workloadFactory, memoryManager, inputTensorInfo, outputTensorInfo, input, outputExpected, desc);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 3> StridedSlice3DTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
desc.m_Parameters.m_Stride = {2, 2, 2};
desc.m_Parameters.m_EndMask = (1 << 3) - 1;
- inputTensorInfo = armnn::TensorInfo(3, inputShape, armnn::GetDataType<T>());
- outputTensorInfo = armnn::TensorInfo(3, outputShape, armnn::GetDataType<T>());
+ inputTensorInfo = armnn::TensorInfo(3, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(3, outputShape, ArmnnType);
std::vector<float> input = std::vector<float>(
{
workloadFactory, memoryManager, inputTensorInfo, outputTensorInfo, input, outputExpected, desc);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 3> StridedSlice3DReverseTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
desc.m_Parameters.m_End = {-4, -4, -4};
desc.m_Parameters.m_Stride = {-2, -2, -2};
- inputTensorInfo = armnn::TensorInfo(3, inputShape, armnn::GetDataType<T>());
- outputTensorInfo = armnn::TensorInfo(3, outputShape, armnn::GetDataType<T>());
+ inputTensorInfo = armnn::TensorInfo(3, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(3, outputShape, ArmnnType);
std::vector<float> input = std::vector<float>(
{
workloadFactory, memoryManager, inputTensorInfo, outputTensorInfo, input, outputExpected, desc);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 2> StridedSlice2DTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
desc.m_Parameters.m_Stride = {2, 2};
desc.m_Parameters.m_EndMask = (1 << 2) - 1;
- inputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::GetDataType<T>());
- outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::GetDataType<T>());
+ inputTensorInfo = armnn::TensorInfo(2, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(2, outputShape, ArmnnType);
std::vector<float> input = std::vector<float>(
{
workloadFactory, memoryManager, inputTensorInfo, outputTensorInfo, input, outputExpected, desc);
}
-template <typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
LayerTestResult<T, 2> StridedSlice2DReverseTest(
armnn::IWorkloadFactory& workloadFactory,
const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
desc.m_Parameters.m_BeginMask = (1 << 2) - 1;
desc.m_Parameters.m_EndMask = (1 << 2) - 1;
- inputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::GetDataType<T>());
- outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::GetDataType<T>());
+ inputTensorInfo = armnn::TensorInfo(2, inputShape, ArmnnType);
+ outputTensorInfo = armnn::TensorInfo(2, outputShape, ArmnnType);
std::vector<float> input = std::vector<float>(
{
BOOST_AUTO_TEST_CASE(ClMergerEndToEndDim0Test)
{
- MergerDim0EndToEnd<float>(defaultBackends);
+ MergerDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(ClMergerEndToEndDim0Uint8Test)
{
- MergerDim0EndToEnd<uint8_t>(defaultBackends);
+ MergerDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(ClMergerEndToEndDim1Test)
{
- MergerDim1EndToEnd<float>(defaultBackends);
+ MergerDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(ClMergerEndToEndDim1Uint8Test)
{
- MergerDim1EndToEnd<uint8_t>(defaultBackends);
+ MergerDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(ClMergerEndToEndDim3Test)
{
- MergerDim3EndToEnd<float>(defaultBackends);
+ MergerDim3EndToEnd<armnn::DataType::Float32>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(ClMergerEndToEndDim3Uint8Test)
{
- MergerDim3EndToEnd<uint8_t>(defaultBackends);
+ MergerDim3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
}
BOOST_AUTO_TEST_SUITE_END()
ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareConv2dWithReference, CompareConvolution2dTest)
ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareDepthwiseConv2dWithReferenceFloat32,
- CompareDepthwiseConvolution2dTest<float>,
+ CompareDepthwiseConvolution2dFloatTest,
armnn::DataLayout::NCHW)
ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareDepthwiseConv2dWithReferenceUint8,
- CompareDepthwiseConvolution2dTest<uint8_t>,
+ CompareDepthwiseConvolution2dUint8Test,
armnn::DataLayout::NCHW)
ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareDepthwiseConv2dWithReferenceFloat32Nhwc,
- CompareDepthwiseConvolution2dTest<float>,
+ CompareDepthwiseConvolution2dFloatTest,
armnn::DataLayout::NHWC)
ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareDepthwiseConv2dWithReferenceUint8Nhwc,
- CompareDepthwiseConvolution2dTest<uint8_t>,
+ CompareDepthwiseConvolution2dUint8Test,
armnn::DataLayout::NHWC)
ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareNormalizationWithinWithReference, CompareNormalizationTest,
int32_t qOffset = 0;
float qScale = 0.f;
- TensorInfo inputTensorInfo({num, channels, height, width}, GetDataType<float>());
- TensorInfo outputTensorInfo({num, channels, height, width}, GetDataType<float>());
- TensorInfo tensorInfo({channels}, GetDataType<float>());
+ TensorInfo inputTensorInfo({num, channels, height, width}, DataType::Float32);
+ TensorInfo outputTensorInfo({num, channels, height, width}, DataType::Float32);
+ TensorInfo tensorInfo({channels}, DataType::Float32);
// Set quantization parameters if the requested type is a quantized type.
if(IsQuantizedType<float>())
BOOST_AUTO_TEST_SUITE_END()
-#endif //aarch64 or x86_64
\ No newline at end of file
+#endif //aarch64 or x86_64
BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim0Test)
{
- MergerDim0EndToEnd<float>(defaultBackends);
+ MergerDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim0Uint8Test)
{
- MergerDim0EndToEnd<uint8_t>(defaultBackends);
+ MergerDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim1Test)
{
- MergerDim1EndToEnd<float>(defaultBackends);
+ MergerDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim1Uint8Test)
{
- MergerDim1EndToEnd<uint8_t>(defaultBackends);
+ MergerDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim3Test)
{
- MergerDim3EndToEnd<float>(defaultBackends);
+ MergerDim3EndToEnd<armnn::DataType::Float32>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim3Uint8Test)
{
- MergerDim3EndToEnd<uint8_t>(defaultBackends);
+ MergerDim3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
}
BOOST_AUTO_TEST_SUITE_END()
ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareConv2dWithReference, CompareConvolution2dTest)
ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareDepthwiseConv2dWithReferenceFloat32,
- CompareDepthwiseConvolution2dTest<float>,
+ CompareDepthwiseConvolution2dFloatTest,
armnn::DataLayout::NCHW)
ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareDepthwiseConv2dWithReferenceUint8,
- CompareDepthwiseConvolution2dTest<uint8_t>,
+ CompareDepthwiseConvolution2dUint8Test,
armnn::DataLayout::NCHW)
ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareDepthwiseConv2dWithReferenceFloat32Nhwc,
- CompareDepthwiseConvolution2dTest<float>,
+ CompareDepthwiseConvolution2dFloatTest,
armnn::DataLayout::NHWC)
ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareDepthwiseConv2dWithReferenceUint8Nhwc,
- CompareDepthwiseConvolution2dTest<uint8_t>,
+ CompareDepthwiseConvolution2dUint8Test,
armnn::DataLayout::NHWC)
ARMNN_COMPARE_REF_AUTO_TEST_CASE(CompareNormalizationWithinWithReference, CompareNormalizationTest,
unsigned int outputBatchSize = inputBatchSize;
armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
- armnn::GetDataType<float>());
+ armnn::DataType::Float32);
armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
- armnn::GetDataType<float>());
+ armnn::DataType::Float32);
LayerTestResult<float, 4> result(inputTensorInfo);
const std::vector<float > expectedOutput({ 1, 1, 1, 1, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 1 });
- ArithmeticSimpleEndToEnd<float>(defaultBackends, LayerType::Equal, expectedOutput);
+ ArithmeticSimpleEndToEnd<armnn::DataType::Float32>(defaultBackends, LayerType::Equal, expectedOutput);
}
BOOST_AUTO_TEST_CASE(RefGreaterSimpleEndToEndTest)
const std::vector<float> expectedOutput({ 0, 0, 0, 0, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0 });
- ArithmeticSimpleEndToEnd<float>(defaultBackends, LayerType::Greater, expectedOutput);
+ ArithmeticSimpleEndToEnd<armnn::DataType::Float32>(defaultBackends, LayerType::Greater, expectedOutput);
}
BOOST_AUTO_TEST_CASE(RefEqualSimpleEndToEndUint8Test)
const std::vector<uint8_t> expectedOutput({ 1, 1, 1, 1, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 1 });
- ArithmeticSimpleEndToEnd<uint8_t>(defaultBackends, LayerType::Equal, expectedOutput);
+ ArithmeticSimpleEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, LayerType::Equal, expectedOutput);
}
BOOST_AUTO_TEST_CASE(RefGreaterSimpleEndToEndUint8Test)
const std::vector<uint8_t> expectedOutput({ 0, 0, 0, 0, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0 });
- ArithmeticSimpleEndToEnd<uint8_t>(defaultBackends, LayerType::Greater, expectedOutput);
+ ArithmeticSimpleEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, LayerType::Greater, expectedOutput);
}
BOOST_AUTO_TEST_CASE(RefEqualBroadcastEndToEndTest)
const std::vector<float > expectedOutput({ 1, 0, 1, 1, 0, 0,
0, 0, 0, 0, 0, 0 });
- ArithmeticBroadcastEndToEnd<float>(defaultBackends, LayerType::Equal, expectedOutput);
+ ArithmeticBroadcastEndToEnd<armnn::DataType::Float32>(defaultBackends, LayerType::Equal, expectedOutput);
}
BOOST_AUTO_TEST_CASE(RefGreaterBroadcastEndToEndTest)
const std::vector<float> expectedOutput({ 0, 1, 0, 0, 0, 1,
1, 1, 1, 1, 1, 1 });
- ArithmeticBroadcastEndToEnd<float>(defaultBackends, LayerType::Greater, expectedOutput);
+ ArithmeticBroadcastEndToEnd<armnn::DataType::Float32>(defaultBackends, LayerType::Greater, expectedOutput);
}
BOOST_AUTO_TEST_CASE(RefEqualBroadcastEndToEndUint8Test)
const std::vector<uint8_t > expectedOutput({ 1, 0, 1, 1, 0, 0,
0, 0, 0, 0, 0, 0 });
- ArithmeticBroadcastEndToEnd<uint8_t>(defaultBackends, LayerType::Equal, expectedOutput);
+ ArithmeticBroadcastEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, LayerType::Equal, expectedOutput);
}
BOOST_AUTO_TEST_CASE(RefGreaterBroadcastEndToEndUint8Test)
const std::vector<uint8_t> expectedOutput({ 0, 1, 0, 0, 0, 1,
1, 1, 1, 1, 1, 1 });
- ArithmeticBroadcastEndToEnd<uint8_t>(defaultBackends, LayerType::Greater, expectedOutput);
+ ArithmeticBroadcastEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, LayerType::Greater, expectedOutput);
}
BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim0Test)
{
- MergerDim0EndToEnd<float>(defaultBackends);
+ MergerDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim0Uint8Test)
{
- MergerDim0EndToEnd<uint8_t>(defaultBackends);
+ MergerDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim1Test)
{
- MergerDim1EndToEnd<float>(defaultBackends);
+ MergerDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim1Uint8Test)
{
- MergerDim1EndToEnd<uint8_t>(defaultBackends);
+ MergerDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim2Test)
{
- MergerDim2EndToEnd<float>(defaultBackends);
+ MergerDim2EndToEnd<armnn::DataType::Float32>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim2Uint8Test)
{
- MergerDim2EndToEnd<uint8_t>(defaultBackends);
+ MergerDim2EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim3Test)
{
- MergerDim3EndToEnd<float>(defaultBackends);
+ MergerDim3EndToEnd<armnn::DataType::Float32>(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim3Uint8Test)
{
- MergerDim3EndToEnd<uint8_t>(defaultBackends);
+ MergerDim3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
}
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+BOOST_AUTO_TEST_SUITE_END()