BackendIdTests.cpp
BackendRegistryTests.cpp
BatchNormTestImpl.hpp
+ CommonTestUtils.hpp
Conv2dTestImpl.hpp
ConvertFp16ToFp32TestImpl.hpp
ConvertFp32ToFp16TestImpl.hpp
LayerTests.hpp
LstmTestImpl.hpp
NormTestImpl.hpp
+ MergerTestImpl.hpp
OptimizedNetworkTests.cpp
PermuteTestImpl.hpp
Pooling2dTestImpl.hpp
--- /dev/null
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <Graph.hpp>
+
+using namespace armnn;
+
+namespace
+{
+
+// Connects two layers.
+void Connect(IConnectableLayer* from, IConnectableLayer* to, const TensorInfo& tensorInfo,
+ unsigned int fromIndex = 0, unsigned int toIndex = 0)
+{
+ from->GetOutputSlot(fromIndex).Connect(to->GetInputSlot(toIndex));
+ from->GetOutputSlot(fromIndex).SetTensorInfo(tensorInfo);
+}
+
+}
#pragma once
#include <armnn/ArmNN.hpp>
+#include <armnn/INetwork.hpp>
#include <backendsCommon/test/QuantizeHelper.hpp>
+#include <boost/test/unit_test.hpp>
+
#include <vector>
namespace
);
}
+template<typename T>
+void EndToEndLayerTestImpl(INetworkPtr network,
+ const std::map<int, std::vector<T>>& inputTensorData,
+ const std::map<int, std::vector<T>>& expectedOutputData,
+ std::vector<BackendId> backends)
+{
+ // Create runtime in which test will run
+ IRuntime::CreationOptions options;
+ IRuntimePtr runtime(IRuntime::Create(options));
+
+ // optimize the network
+ IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec());
+
+ // Loads it into the runtime.
+ NetworkId netId;
+ runtime->LoadNetwork(netId, std::move(optNet));
+
+ InputTensors inputTensors;
+ inputTensors.reserve(inputTensorData.size());
+ for (auto&& it : inputTensorData)
+ {
+ inputTensors.push_back({it.first,
+ ConstTensor(runtime->GetInputTensorInfo(netId, it.first), it.second.data())});
+ }
+ OutputTensors outputTensors;
+ outputTensors.reserve(expectedOutputData.size());
+ std::map<int, std::vector<T>> outputStorage;
+ for (auto&& it : expectedOutputData)
+ {
+ std::vector<T> out(it.second.size());
+ outputStorage.emplace(it.first, out);
+ outputTensors.push_back({it.first,
+ Tensor(runtime->GetOutputTensorInfo(netId, it.first),
+ outputStorage.at(it.first).data())});
+ }
+
+ // Does the inference.
+ runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
+
+ // Checks the results.
+ for (auto&& it : expectedOutputData)
+ {
+ std::vector<T> out = outputStorage.at(it.first);
+ BOOST_TEST(it.second == out);
+ }
+}
+
} // anonymous namespace
\ No newline at end of file
--- /dev/null
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <armnn/INetwork.hpp>
+
+#include <backendsCommon/test/CommonTestUtils.hpp>
+
+#include <boost/test/unit_test.hpp>
+
+#include <vector>
+
+namespace
+{
+
+template<typename armnn::DataType DataType>
+INetworkPtr CreateMergerNetwork(const std::vector<TensorShape>& inputShapes,
+ const TensorShape& outputShape,
+ unsigned int concatAxis,
+ const float qScale = 1.0f,
+ const int32_t qOffset = 0)
+{
+ using namespace armnn;
+ // Builds up the structure of the network.
+ INetworkPtr net(INetwork::Create());
+
+ OriginsDescriptor descriptor;
+
+ descriptor = CreateMergerDescriptorForConcatenation(inputShapes.begin(),
+ inputShapes.end(),
+ concatAxis);
+ IConnectableLayer* merger = net->AddMergerLayer(descriptor, "merger");
+
+ for (unsigned int i = 0; i < inputShapes.size(); ++i)
+ {
+ TensorInfo inputTensorInfo(inputShapes[i], DataType, qScale, qOffset);
+ IConnectableLayer* input = net->AddInputLayer(boost::numeric_cast<LayerBindingId>(i));
+ Connect(input, merger, inputTensorInfo, 0, i);
+ }
+
+ TensorInfo outputTensorInfo(outputShape, DataType, qScale, qOffset);
+ IConnectableLayer* output = net->AddOutputLayer(0, "output");
+ Connect(merger, output, outputTensorInfo, 0, 0);
+
+ return net;
+}
+
+template<typename T>
+void MergerDim0EndToEnd(const std::vector<BackendId>& backends)
+{
+ using namespace armnn;
+
+ unsigned int concatAxis = 0;
+ const std::vector<TensorShape> inputShapes{{ 2, 3, 2, 2 }, { 2, 3, 2, 2 }};
+ const TensorShape& outputShape = { 4, 3, 2, 2 };
+
+ // Builds up the structure of the network
+ INetworkPtr net = CreateMergerNetwork<GetDataType<T>()>(inputShapes, outputShape, concatAxis);
+
+ BOOST_TEST_CHECKPOINT("create a network");
+
+ // Creates structures for input & output.
+ std::vector<T> inputData{
+ 1, 2,
+ 3, 4,
+ 5, 6,
+ 7, 8,
+ 9, 10,
+ 11, 12,
+ 1, 2,
+ 3, 4,
+ 5, 6,
+ 7, 8,
+ 9, 10,
+ 11, 12
+ };
+
+ std::vector<T> expectedOutput{
+ 1, 2,
+ 3, 4,
+ 5, 6,
+ 7, 8,
+ 9, 10,
+ 11, 12,
+ 1, 2,
+ 3, 4,
+ 5, 6,
+ 7, 8,
+ 9, 10,
+ 11, 12,
+ 1, 2,
+ 3, 4,
+ 5, 6,
+ 7, 8,
+ 9, 10,
+ 11, 12,
+ 1, 2,
+ 3, 4,
+ 5, 6,
+ 7, 8,
+ 9, 10,
+ 11, 12
+ };
+
+ std::map<int, std::vector<T>> inputTensorData = {{ 0,inputData }, { 1,inputData }};
+ std::map<int, std::vector<T>> expectedOutputData = {{ 0,expectedOutput }};
+
+ EndToEndLayerTestImpl<T>(move(net), inputTensorData, expectedOutputData, backends);
+}
+
+template<typename T>
+void MergerDim1EndToEnd(const std::vector<BackendId>& backends)
+{
+ using namespace armnn;
+
+ unsigned int concatAxis = 1;
+ const std::vector<TensorShape> inputShapes{{ 2, 3, 2, 2 }, { 2, 3, 2, 2 }};
+ const TensorShape& outputShape = { 2, 6, 2, 2 };
+
+ // Builds up the structure of the network
+ INetworkPtr net = CreateMergerNetwork<GetDataType<T>()>(inputShapes, outputShape, concatAxis);
+
+ BOOST_TEST_CHECKPOINT("create a network");
+
+ // Creates structures for input & output.
+ std::vector<T> inputData{
+ 1, 2,
+ 3, 4,
+ 5, 6,
+ 7, 8,
+ 9, 10,
+ 11, 12,
+ 1, 2,
+ 3, 4,
+ 5, 6,
+ 7, 8,
+ 9, 10,
+ 11, 12
+ };
+
+ std::vector<T> expectedOutput{
+ 1, 2,
+ 3, 4,
+ 5, 6,
+ 7, 8,
+ 9, 10,
+ 11, 12,
+ 1, 2,
+ 3, 4,
+ 5, 6,
+ 7, 8,
+ 9, 10,
+ 11, 12,
+ 1, 2,
+ 3, 4,
+ 5, 6,
+ 7, 8,
+ 9, 10,
+ 11, 12,
+ 1, 2,
+ 3, 4,
+ 5, 6,
+ 7, 8,
+ 9, 10,
+ 11, 12
+ };
+
+ std::map<int, std::vector<T>> inputTensorData = {{ 0,inputData }, { 1,inputData }};
+ std::map<int, std::vector<T>> expectedOutputData = {{ 0,expectedOutput }};
+
+ EndToEndLayerTestImpl<T>(move(net), inputTensorData, expectedOutputData, backends);
+}
+
+template<typename T>
+void MergerDim2EndToEnd(const std::vector<BackendId>& backends)
+{
+ using namespace armnn;
+
+ unsigned int concatAxis = 2;
+ const std::vector<TensorShape> inputShapes{{ 2, 3, 2, 2 }, { 2, 3, 2, 2 }};
+ const TensorShape& outputShape = { 2, 3, 4, 2 };
+
+ // Builds up the structure of the network
+ INetworkPtr net = CreateMergerNetwork<GetDataType<T>()>(inputShapes, outputShape, concatAxis);
+
+ BOOST_TEST_CHECKPOINT("create a network");
+
+ // Creates structures for input & output.
+ std::vector<T> inputData{
+ 1, 2,
+ 3, 4,
+ 5, 6,
+ 7, 8,
+ 9, 10,
+ 11, 12,
+ 1, 2,
+ 3, 4,
+ 5, 6,
+ 7, 8,
+ 9, 10,
+ 11, 12
+ };
+
+ std::vector<T> expectedOutput{
+ 1, 2,
+ 3, 4,
+ 1, 2,
+ 3, 4,
+ 5, 6,
+ 7, 8,
+ 5, 6,
+ 7, 8,
+ 9, 10,
+ 11, 12,
+ 9, 10,
+ 11, 12,
+ 1, 2,
+ 3, 4,
+ 1, 2,
+ 3, 4,
+ 5, 6,
+ 7, 8,
+ 5, 6,
+ 7, 8,
+ 9, 10,
+ 11, 12,
+ 9, 10,
+ 11, 12
+ };
+
+ std::map<int, std::vector<T>> inputTensorData = {{ 0,inputData }, { 1,inputData }};
+ std::map<int, std::vector<T>> expectedOutputData = {{ 0,expectedOutput }};
+
+ EndToEndLayerTestImpl<T>(move(net), inputTensorData, expectedOutputData, backends);
+}
+
+template<typename T>
+void MergerDim3EndToEnd(const std::vector<BackendId>& backends)
+{
+ using namespace armnn;
+
+ unsigned int concatAxis = 3;
+ const std::vector<TensorShape> inputShapes{{ 2, 3, 2, 2 }, { 2, 3, 2, 2 }};
+ const TensorShape& outputShape = { 2, 3, 2, 4 };
+
+ // Builds up the structure of the network
+ INetworkPtr net = CreateMergerNetwork<GetDataType<T>()>(inputShapes, outputShape, concatAxis);
+
+ BOOST_TEST_CHECKPOINT("create a network");
+
+ // Creates structures for input & output.
+ std::vector<T> inputData{
+ 1, 2,
+ 3, 4,
+ 5, 6,
+ 7, 8,
+ 9, 10,
+ 11, 12,
+ 1, 2,
+ 3, 4,
+ 5, 6,
+ 7, 8,
+ 9, 10,
+ 11, 12
+ };
+
+ std::vector<T> expectedOutput{
+ 1, 2,
+ 1, 2,
+ 3, 4,
+ 3, 4,
+ 5, 6,
+ 5, 6,
+ 7, 8,
+ 7, 8,
+ 9, 10,
+ 9, 10,
+ 11, 12,
+ 11, 12,
+ 1, 2,
+ 1, 2,
+ 3, 4,
+ 3, 4,
+ 5, 6,
+ 5, 6,
+ 7, 8,
+ 7, 8,
+ 9, 10,
+ 9, 10,
+ 11, 12,
+ 11, 12
+ };
+
+ std::map<int, std::vector<T>> inputTensorData = {{ 0,inputData }, { 1,inputData }};
+ std::map<int, std::vector<T>> expectedOutputData = {{ 0,expectedOutput }};
+
+ EndToEndLayerTestImpl<T>(move(net), inputTensorData, expectedOutputData, backends);
+}
+
+} // anonymous namespace
//
#include <backendsCommon/test/EndToEndTestImpl.hpp>
+#include <backendsCommon/test/MergerTestImpl.hpp>
#include <boost/test/unit_test.hpp>
BOOST_AUTO_TEST_SUITE(ClEndToEnd)
+std::vector<armnn::BackendId> defaultBackends = {armnn::Compute::GpuAcc};
+
BOOST_AUTO_TEST_CASE(ConstantUsage_Cl_Float32)
{
- std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
- ConstantUsageFloat32Test(backends);
+ ConstantUsageFloat32Test(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(ClMergerEndToEndDim0Test)
+{
+ MergerDim0EndToEnd<float>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(ClMergerEndToEndDim0Uint8Test)
+{
+ MergerDim0EndToEnd<uint8_t>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(ClMergerEndToEndDim1Test)
+{
+ MergerDim1EndToEnd<float>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(ClMergerEndToEndDim1Uint8Test)
+{
+ MergerDim1EndToEnd<uint8_t>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(ClMergerEndToEndDim3Test)
+{
+ MergerDim3EndToEnd<float>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(ClMergerEndToEndDim3Uint8Test)
+{
+ MergerDim3EndToEnd<uint8_t>(defaultBackends);
}
BOOST_AUTO_TEST_SUITE_END()
//
#include <backendsCommon/test/EndToEndTestImpl.hpp>
+#include <backendsCommon/test/MergerTestImpl.hpp>
#include <boost/test/unit_test.hpp>
BOOST_AUTO_TEST_SUITE(NeonEndToEnd)
+std::vector<armnn::BackendId> defaultBackends = {armnn::Compute::CpuAcc};
+
BOOST_AUTO_TEST_CASE(ConstantUsage_Neon_Float32)
{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
- BOOST_TEST(ConstantUsageFloat32Test(backends));
+ BOOST_TEST(ConstantUsageFloat32Test(defaultBackends));
}
BOOST_AUTO_TEST_CASE(FallbackToCpuRef)
BOOST_TEST(runtime->LoadNetwork(netId, std::move(optNet)) == Status::Success);
}
+BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim0Test)
+{
+ MergerDim0EndToEnd<float>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim0Uint8Test)
+{
+ MergerDim0EndToEnd<uint8_t>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim1Test)
+{
+ MergerDim1EndToEnd<float>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim1Uint8Test)
+{
+ MergerDim1EndToEnd<uint8_t>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim3Test)
+{
+ MergerDim3EndToEnd<float>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(NeonMergerEndToEndDim3Uint8Test)
+{
+ MergerDim3EndToEnd<uint8_t>(defaultBackends);
+}
+
BOOST_AUTO_TEST_SUITE_END()
//
#include <backendsCommon/test/EndToEndTestImpl.hpp>
+#include <backendsCommon/test/MergerTestImpl.hpp>
#include <boost/test/unit_test.hpp>
BOOST_AUTO_TEST_SUITE(RefEndToEnd)
+std::vector<armnn::BackendId> defaultBackends = {armnn::Compute::CpuRef};
+
BOOST_AUTO_TEST_CASE(ConstantUsage_Ref_Float32)
{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- BOOST_TEST(ConstantUsageFloat32Test(backends));
+ BOOST_TEST(ConstantUsageFloat32Test(defaultBackends));
}
BOOST_AUTO_TEST_CASE(ConstantUsage_Ref_Uint8)
{
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- BOOST_TEST(ConstantUsageUint8Test(backends));
+ BOOST_TEST(ConstantUsageUint8Test(defaultBackends));
}
BOOST_AUTO_TEST_CASE(Unsigned8)
softmax->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
// optimize the network
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
+ IOptimizedNetworkPtr optNet = Optimize(*net, defaultBackends, runtime->GetDeviceSpec());
// Loads it into the runtime.
NetworkId netId;
add->GetOutputSlot(0).SetTensorInfo(tensorInfo);
// optimize the network
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
+ IOptimizedNetworkPtr optNet = Optimize(*net, defaultBackends, runtime->GetDeviceSpec());
// Loads it into the runtime.
NetworkId netId;
activation3->GetOutputSlot(0).SetTensorInfo(tensorInfo);
// optimize the network
- std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
- IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
+ IOptimizedNetworkPtr optNet = Optimize(*net, defaultBackends, runtime->GetDeviceSpec());
// Loads it into the runtime.
NetworkId netId;
BOOST_TEST(output3Data == std::vector<float>({ 3.f, 5.f, 2.f, 3.f, 5.f, 2.f, 2.f, 2.f, 3.f, 3.f })); // [2, 5]
}
+BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim0Test)
+{
+ MergerDim0EndToEnd<float>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim0Uint8Test)
+{
+ MergerDim0EndToEnd<uint8_t>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim1Test)
+{
+ MergerDim1EndToEnd<float>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim1Uint8Test)
+{
+ MergerDim1EndToEnd<uint8_t>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim2Test)
+{
+ MergerDim2EndToEnd<float>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim2Uint8Test)
+{
+ MergerDim2EndToEnd<uint8_t>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim3Test)
+{
+ MergerDim3EndToEnd<float>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim3Uint8Test)
+{
+ MergerDim3EndToEnd<uint8_t>(defaultBackends);
+}
+
BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file