2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
8 #include "armnn/IRuntime.hpp"
9 #include "armnnOnnxParser/IOnnxParser.hpp"
10 #include "test/TensorHelpers.hpp"
11 #include "VerificationHelpers.hpp"
13 #include <boost/format.hpp>
19 template<typename TParser>
20 struct ParserPrototxtFixture
22 ParserPrototxtFixture()
23 : m_Parser(TParser::Create())
24 , m_NetworkIdentifier(-1)
26 armnn::IRuntime::CreationOptions options;
27 m_Runtimes.push_back(std::make_pair(armnn::IRuntime::Create(options), armnn::Compute::CpuRef));
29 #if ARMCOMPUTENEON_ENABLED
30 m_Runtimes.push_back(std::make_pair(armnn::IRuntime::Create(options), armnn::Compute::CpuAcc));
33 #if ARMCOMPUTECL_ENABLED
34 m_Runtimes.push_back(std::make_pair(armnn::IRuntime::Create(options), armnn::Compute::GpuAcc));
38 /// Parses and loads the network defined by the m_Prototext string.
40 void SetupSingleInputSingleOutput(const std::string& inputName, const std::string& outputName);
41 void SetupSingleInputSingleOutput(const armnn::TensorShape& inputTensorShape,
42 const std::string& inputName,
43 const std::string& outputName);
44 void Setup(const std::map<std::string, armnn::TensorShape>& inputShapes,
45 const std::vector<std::string>& requestedOutputs);
49 /// Executes the network with the given input tensor and checks the result against the given output tensor.
50 /// This overload assumes that the network has a single input and a single output.
51 template <std::size_t NumOutputDimensions>
52 void RunTest(const std::vector<float>& inputData, const std::vector<float>& expectedOutputData);
54 /// Executes the network with the given input tensors and checks the results against the given output tensors.
55 /// This overload supports multiple inputs and multiple outputs, identified by name.
56 template <std::size_t NumOutputDimensions>
57 void RunTest(const std::map<std::string, std::vector<float>>& inputData,
58 const std::map<std::string, std::vector<float>>& expectedOutputData);
60 std::string m_Prototext;
61 std::unique_ptr<TParser, void(*)(TParser* parser)> m_Parser;
62 std::vector<std::pair<armnn::IRuntimePtr, armnn::Compute>> m_Runtimes;
63 armnn::NetworkId m_NetworkIdentifier;
65 /// If the single-input-single-output overload of Setup() is called, these will store the input and output name
66 /// so they don't need to be passed to the single-input-single-output overload of RunTest().
68 std::string m_SingleInputName;
69 std::string m_SingleOutputName;
73 template<typename TParser>
74 void ParserPrototxtFixture<TParser>::SetupSingleInputSingleOutput(const std::string& inputName,
75 const std::string& outputName)
77 // Stores the input and output name so they don't need to be passed to the single-input-single-output RunTest().
78 m_SingleInputName = inputName;
79 m_SingleOutputName = outputName;
80 Setup({ }, { outputName });
83 template<typename TParser>
84 void ParserPrototxtFixture<TParser>::SetupSingleInputSingleOutput(const armnn::TensorShape& inputTensorShape,
85 const std::string& inputName,
86 const std::string& outputName)
88 // Stores the input and output name so they don't need to be passed to the single-input-single-output RunTest().
89 m_SingleInputName = inputName;
90 m_SingleOutputName = outputName;
91 Setup({ { inputName, inputTensorShape } }, { outputName });
94 template<typename TParser>
95 void ParserPrototxtFixture<TParser>::Setup(const std::map<std::string, armnn::TensorShape>& inputShapes,
96 const std::vector<std::string>& requestedOutputs)
98 for (auto&& runtime : m_Runtimes)
100 std::string errorMessage;
102 armnn::INetworkPtr network =
103 m_Parser->CreateNetworkFromString(m_Prototext.c_str(), inputShapes, requestedOutputs);
104 auto optimized = Optimize(*network,
105 { runtime.second, armnn::Compute::CpuRef }, runtime.first->GetDeviceSpec());
106 armnn::Status ret = runtime.first->LoadNetwork(m_NetworkIdentifier, move(optimized), errorMessage);
107 if (ret != armnn::Status::Success)
109 throw armnn::Exception(boost::str(
110 boost::format("LoadNetwork failed with error: '%1%' %2%")
112 % CHECK_LOCATION().AsString()));
117 template<typename TParser>
118 void ParserPrototxtFixture<TParser>::Setup()
120 for (auto&& runtime : m_Runtimes)
122 std::string errorMessage;
124 armnn::INetworkPtr network =
125 m_Parser->CreateNetworkFromString(m_Prototext.c_str());
126 auto optimized = Optimize(*network,
127 { runtime.second, armnn::Compute::CpuRef }, runtime.first->GetDeviceSpec());
128 armnn::Status ret = runtime.first->LoadNetwork(m_NetworkIdentifier, move(optimized), errorMessage);
129 if (ret != armnn::Status::Success)
131 throw armnn::Exception(boost::str(
132 boost::format("LoadNetwork failed with error: '%1%' %2%")
134 % CHECK_LOCATION().AsString()));
139 template<typename TParser>
140 template <std::size_t NumOutputDimensions>
141 void ParserPrototxtFixture<TParser>::RunTest(const std::vector<float>& inputData,
142 const std::vector<float>& expectedOutputData)
144 RunTest<NumOutputDimensions>({ { m_SingleInputName, inputData } }, { { m_SingleOutputName, expectedOutputData } });
147 template<typename TParser>
148 template <std::size_t NumOutputDimensions>
149 void ParserPrototxtFixture<TParser>::RunTest(const std::map<std::string, std::vector<float>>& inputData,
150 const std::map<std::string, std::vector<float>>& expectedOutputData)
152 for (auto&& runtime : m_Runtimes)
154 using BindingPointInfo = std::pair<armnn::LayerBindingId, armnn::TensorInfo>;
156 // Sets up the armnn input tensors from the given vectors.
157 armnn::InputTensors inputTensors;
158 for (auto&& it : inputData)
160 BindingPointInfo bindingInfo = m_Parser->GetNetworkInputBindingInfo(it.first);
161 inputTensors.push_back({ bindingInfo.first, armnn::ConstTensor(bindingInfo.second, it.second.data()) });
164 // Allocates storage for the output tensors to be written to and sets up the armnn output tensors.
165 std::map<std::string, boost::multi_array<float, NumOutputDimensions>> outputStorage;
166 armnn::OutputTensors outputTensors;
167 for (auto&& it : expectedOutputData)
169 BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(it.first);
170 outputStorage.emplace(it.first, MakeTensor<float, NumOutputDimensions>(bindingInfo.second));
171 outputTensors.push_back(
172 { bindingInfo.first, armnn::Tensor(bindingInfo.second, outputStorage.at(it.first).data()) });
175 runtime.first->EnqueueWorkload(m_NetworkIdentifier, inputTensors, outputTensors);
177 // Compares each output tensor to the expected values.
178 for (auto&& it : expectedOutputData)
180 BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(it.first);
181 if (bindingInfo.second.GetNumElements() != it.second.size())
183 throw armnn::Exception(
185 boost::format("Output tensor %1% is expected to have %2% elements. "
186 "%3% elements supplied. %4%") %
188 bindingInfo.second.GetNumElements() %
190 CHECK_LOCATION().AsString()));
192 auto outputExpected = MakeTensor<float, NumOutputDimensions>(bindingInfo.second, it.second);
193 BOOST_TEST(CompareTensors(outputExpected, outputStorage[it.first]));
198 } // namespace armnnUtils