return workload;
}
+template <typename SpaceToDepthWorkload, armnn::DataType DataType>
+std::unique_ptr<SpaceToDepthWorkload> CreateSpaceToDepthWorkloadTest(armnn::IWorkloadFactory& factory,
+ armnn::Graph& graph)
+{
+ SpaceToDepthDescriptor desc;
+ desc.m_BlockSize = 2;
+ Layer* const layer = graph.AddLayer<SpaceToDepthLayer>(desc, "spaceToDepth");
+
+ // Creates extra layers.
+ Layer* const input = graph.AddLayer<InputLayer>(0, "input");
+ Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
+
+ // Connects up.
+ armnn::TensorInfo inputTensorInfo({ 1, 2, 2, 1 }, DataType);
+ armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 4 }, DataType);
+
+ Connect(input, layer, inputTensorInfo);
+ Connect(layer, output, outputTensorInfo);
+
+ CreateTensorHandles(graph, factory);
+
+ // Makes the workload and checks it.
+ auto workload = MakeAndCheckWorkload<SpaceToDepthWorkload>(*layer, graph, factory);
+
+ SpaceToDepthQueueDescriptor queueDescriptor = workload->GetData();
+ BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
+ BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
+
+ return workload;
+}
+
} // Anonymous namespace
{
using namespace armnn;
- if (dataLayout == armnn::DataLayout::NCHW){
+ if (dataLayout == armnn::DataLayout::NCHW)
+ {
PermuteDataToNCHW<armnn::DataType::Float32>(backends, dataLayout, inputTensorInfo, inputData);
PermuteDataToNCHW<armnn::DataType::Float32>(backends, dataLayout, outputTensorInfo, expectedOutputData);
}
// Builds up the structure of the network
- INetworkPtr net = CreateSpaceToDepthNetwork<armnn::DataType::Float32>(inputTensorInfo.GetShape(),
- outputTensorInfo.GetShape(),
- dataLayout,
- blockSize);
+ INetworkPtr net = CreateSpaceToDepthNetwork<armnn::DataType::Float32>(
+ inputTensorInfo.GetShape(),
+ outputTensorInfo.GetShape(),
+ dataLayout,
+ blockSize);
BOOST_TEST_CHECKPOINT("Create a network");
std::map<int, std::vector<float>> inputTensorData = { { 0, inputData } };
std::map<int, std::vector<float>> expectedOutputTensorData = { { 0, expectedOutputData } };
- EndToEndLayerTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(move(net),
- inputTensorData,
- expectedOutputTensorData,
- backends);
+ EndToEndLayerTestImpl<armnn::DataType::Float32, armnn::DataType::Float32>(
+ move(net),
+ inputTensorData,
+ expectedOutputTensorData,
+ backends);
+}
+
+void SpaceToDepthNHWCEndToEndTest1(const std::vector<armnn::BackendId>& defaultBackends)
+{
+ const unsigned int blockSize = 2;
+
+ armnn::TensorShape inputShape{1, 2, 2, 1};
+ armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
+
+ armnn::TensorShape outputShape{1, 1, 1, 4};
+ armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
+
+ std::vector<float> inputData = std::vector<float>(
+ {
+ 1.0f, 2.0f, 3.0f, 4.0f
+ });
+
+ std::vector<float> expectedOutputData = std::vector<float>(
+ {
+ 1.0f, 2.0f, 3.0f, 4.0f
+ });
+
+ SpaceToDepthEndToEnd(defaultBackends,
+ armnn::DataLayout::NHWC,
+ inputTensorInfo,
+ outputTensorInfo,
+ inputData,
+ expectedOutputData,
+ blockSize);
+}
+
+void SpaceToDepthNCHWEndToEndTest1(const std::vector<armnn::BackendId>& defaultBackends)
+{
+ const unsigned int blockSize = 2;
+
+ armnn::TensorShape inputShape{1, 2, 2, 1};
+ armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
+
+ armnn::TensorShape outputShape{1, 1, 1, 4};
+ armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
+
+ std::vector<float> inputData = std::vector<float>(
+ {
+ 1.0f, 2.0f, 3.0f, 4.0f
+ });
+
+ std::vector<float> expectedOutputData = std::vector<float>(
+ {
+ 1.0f, 2.0f, 3.0f, 4.0f
+ });
+
+ SpaceToDepthEndToEnd(defaultBackends,
+ armnn::DataLayout::NCHW,
+ inputTensorInfo,
+ outputTensorInfo,
+ inputData,
+ expectedOutputData,
+ blockSize);
+}
+
+void SpaceToDepthNHWCEndToEndTest2(const std::vector<armnn::BackendId>& defaultBackends)
+{
+ const unsigned int blockSize = 2;
+
+ armnn::TensorShape inputShape{1, 2, 2, 2};
+ armnn::TensorShape outputShape{1, 1, 1, 8};
+
+ armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
+ armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
+
+ std::vector<float> inputData = std::vector<float>(
+ {
+ 1.4f, 2.3f, 3.2f, 4.1f, 5.4f, 6.3f, 7.2f, 8.1f
+ });
+
+ std::vector<float> expectedOutputData = std::vector<float>(
+ {
+ 1.4f, 2.3f, 3.2f, 4.1f, 5.4f, 6.3f, 7.2f, 8.1f
+ });
+
+ SpaceToDepthEndToEnd(defaultBackends,
+ armnn::DataLayout::NHWC,
+ inputTensorInfo,
+ outputTensorInfo,
+ inputData,
+ expectedOutputData,
+ blockSize);
+}
+
+void SpaceToDepthNCHWEndToEndTest2(const std::vector<armnn::BackendId>& defaultBackends)
+{
+ const unsigned int blockSize = 2;
+
+ armnn::TensorShape inputShape{1, 2, 2, 2};
+ armnn::TensorShape outputShape{1, 1, 1, 8};
+
+ armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
+ armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
+
+
+ std::vector<float> inputData = std::vector<float>(
+ {
+ 1.4f, 2.3f, 3.2f, 4.1f, 5.4f, 6.3f, 7.2f, 8.1f
+ });
+
+ std::vector<float> expectedOutputData = std::vector<float>(
+ {
+ 1.4f, 2.3f, 3.2f, 4.1f, 5.4f, 6.3f, 7.2f, 8.1f
+ });
+
+ SpaceToDepthEndToEnd(defaultBackends,
+ armnn::DataLayout::NCHW,
+ inputTensorInfo,
+ outputTensorInfo,
+ inputData,
+ expectedOutputData,
+ blockSize);
}
} // anonymous namespace
ClCreateConcatWorkloadTest<ClConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 3, 2, 10 }, 3);
}
+template <typename SpaceToDepthWorkloadType, typename armnn::DataType DataType>
+static void ClSpaceToDepthWorkloadTest()
+{
+ Graph graph;
+ ClWorkloadFactory factory =
+ ClWorkloadFactoryHelper::GetFactory(ClWorkloadFactoryHelper::GetMemoryManager());
+
+ auto workload = CreateSpaceToDepthWorkloadTest<SpaceToDepthWorkloadType, DataType>(factory, graph);
+
+ SpaceToDepthQueueDescriptor queueDescriptor = workload->GetData();
+ auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
+ auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
+
+ BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 1, 2, 2, 1 }));
+ BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 1, 1, 1, 4 }));
+}
+
+BOOST_AUTO_TEST_CASE(CreateSpaceToDepthFloat32Workload)
+{
+ ClSpaceToDepthWorkloadTest<ClSpaceToDepthWorkload, armnn::DataType::Float32>();
+}
+
+BOOST_AUTO_TEST_CASE(CreateSpaceToDepthFloat16Workload)
+{
+ ClSpaceToDepthWorkloadTest<ClSpaceToDepthWorkload, armnn::DataType::Float16>();
+}
+
+BOOST_AUTO_TEST_CASE(CreateSpaceToDepthQAsymm8Workload)
+{
+ ClSpaceToDepthWorkloadTest<ClSpaceToDepthWorkload, armnn::DataType::QuantisedAsymm8>();
+}
+
+BOOST_AUTO_TEST_CASE(CreateSpaceToDepthQSymm16Workload)
+{
+ ClSpaceToDepthWorkloadTest<ClSpaceToDepthWorkload, armnn::DataType::QuantisedSymm16>();
+}
+
BOOST_AUTO_TEST_SUITE_END()
#include <backendsCommon/test/ArithmeticTestImpl.hpp>
#include <backendsCommon/test/ConcatTestImpl.hpp>
#include <backendsCommon/test/DequantizeEndToEndTestImpl.hpp>
+#include <backendsCommon/test/SpaceToDepthEndToEndTestImpl.hpp>
#include <backendsCommon/test/SplitterEndToEndTestImpl.hpp>
#include <backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp>
expectedOutput);
}
+BOOST_AUTO_TEST_CASE(ClSpaceToDepthNHWCEndToEndTest1)
+{
+ SpaceToDepthNHWCEndToEndTest1(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(ClSpaceToDepthNCHWEndToEndTest1)
+{
+ SpaceToDepthNCHWEndToEndTest1(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(ClSpaceToDepthNHWCEndToEndTest2)
+{
+ SpaceToDepthNHWCEndToEndTest2(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(ClSpaceToDepthNCHWEndToEndTest2)
+{
+ SpaceToDepthNCHWEndToEndTest2(defaultBackends);
+}
+
BOOST_AUTO_TEST_CASE(ClSplitter1dEndToEndTest)
{
Splitter1dEndToEnd<armnn::DataType::Float32>(defaultBackends);
armnn::InvalidArgumentException);
}
+template <typename SpaceToDepthWorkloadType, armnn::DataType DataType>
+static void RefCreateSpaceToDepthWorkloadTest()
+{
+ Graph graph;
+ RefWorkloadFactory factory;
+
+ auto workload = CreateSpaceToDepthWorkloadTest<SpaceToDepthWorkloadType, DataType>(factory, graph);
+
+ CheckInputOutput(std::move(workload),
+ TensorInfo({ 1, 2, 2, 1 }, DataType),
+ TensorInfo({ 1, 1, 1, 4 }, DataType));
+}
+
+BOOST_AUTO_TEST_CASE(CreateSpaceToDepthWorkloadFloat32)
+{
+ RefCreateSpaceToDepthWorkloadTest<RefSpaceToDepthWorkload, armnn::DataType::Float32>();
+}
+
+BOOST_AUTO_TEST_CASE(CreateSpaceToDepthWorkloadQASymm8)
+{
+ RefCreateSpaceToDepthWorkloadTest<RefSpaceToDepthWorkload, armnn::DataType::QuantisedAsymm8>();
+}
+
+BOOST_AUTO_TEST_CASE(CreateSpaceToDepthWorkloadQSymm16)
+{
+ RefCreateSpaceToDepthWorkloadTest<RefSpaceToDepthWorkload, armnn::DataType::QuantisedSymm16>();
+}
+
BOOST_AUTO_TEST_SUITE_END()
BOOST_AUTO_TEST_CASE(RefSpaceToDepthNHWCEndToEndTest1)
{
- const unsigned int blockSize = 2;
-
- armnn::TensorShape inputShape{1, 2, 2, 1};
- armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
-
- armnn::TensorShape outputShape{1, 1, 1, 4};
- armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
-
- std::vector<float> inputData = std::vector<float>(
- {
- 1.0f, 2.0f, 3.0f, 4.0f
- });
-
- std::vector<float> expectedOutputData = std::vector<float>(
- {
- 1.0f, 2.0f, 3.0f, 4.0f
- });
-
- SpaceToDepthEndToEnd(defaultBackends,
- armnn::DataLayout::NHWC,
- inputTensorInfo,
- outputTensorInfo,
- inputData,
- expectedOutputData,
- blockSize);
+ SpaceToDepthNHWCEndToEndTest1(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefSpaceToDepthNCHWEndToEndTest1)
{
- const unsigned int blockSize = 2;
+ SpaceToDepthNCHWEndToEndTest1(defaultBackends);
- armnn::TensorShape inputShape{1, 2, 2, 1};
- armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
-
- armnn::TensorShape outputShape{1, 1, 1, 4};
- armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
-
- std::vector<float> inputData = std::vector<float>(
- {
- 1.0f, 2.0f, 3.0f, 4.0f
- });
-
- std::vector<float> expectedOutputData = std::vector<float>(
- {
- 1.0f, 2.0f, 3.0f, 4.0f
- });
-
- SpaceToDepthEndToEnd(defaultBackends,
- armnn::DataLayout::NCHW,
- inputTensorInfo,
- outputTensorInfo,
- inputData,
- expectedOutputData,
- blockSize);
}
BOOST_AUTO_TEST_CASE(RefSpaceToDepthNHWCEndToEndTest2)
{
- const unsigned int blockSize = 2;
-
- armnn::TensorShape inputShape{1, 2, 2, 2};
- armnn::TensorShape outputShape{1, 1, 1, 8};
-
- armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
- armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
-
- std::vector<float> inputData = std::vector<float>(
- {
- 1.4f, 2.3f, 3.2f, 4.1f, 5.4f, 6.3f, 7.2f, 8.1f
- });
-
- std::vector<float> expectedOutputData = std::vector<float>(
- {
- 1.4f, 2.3f, 3.2f, 4.1f, 5.4f, 6.3f, 7.2f, 8.1f
- });
-
- SpaceToDepthEndToEnd(defaultBackends,
- armnn::DataLayout::NHWC,
- inputTensorInfo,
- outputTensorInfo,
- inputData,
- expectedOutputData,
- blockSize);
+ SpaceToDepthNHWCEndToEndTest2(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefSpaceToDepthNCHWEndToEndTest2)
{
- const unsigned int blockSize = 2;
-
- armnn::TensorShape inputShape{1, 2, 2, 2};
- armnn::TensorShape outputShape{1, 1, 1, 8};
-
- armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Float32);
- armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
-
-
- std::vector<float> inputData = std::vector<float>(
- {
- 1.4f, 2.3f, 3.2f, 4.1f, 5.4f, 6.3f, 7.2f, 8.1f
- });
-
- std::vector<float> expectedOutputData = std::vector<float>(
- {
- 1.4f, 2.3f, 3.2f, 4.1f, 5.4f, 6.3f, 7.2f, 8.1f
- });
-
- SpaceToDepthEndToEnd(defaultBackends,
- armnn::DataLayout::NCHW,
- inputTensorInfo,
- outputTensorInfo,
- inputData,
- expectedOutputData,
- blockSize);
+ SpaceToDepthNCHWEndToEndTest2(defaultBackends);
}
BOOST_AUTO_TEST_CASE(RefSplitter1dEndToEndTest)