2 * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
3 * Copyright (C) 2017 The Android Open Source Project
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include "TestGenerated.h"
19 #include "TestHarness.h"
21 #include <gtest/gtest.h>
32 // Systrace is not available from CTS tests due to platform layering
33 // constraints. We reuse the NNTEST_ONLY_PUBLIC_API flag, as that should also be
34 // the case for CTS (public APIs only).
35 // NNFW Fix: Always use NNTEST_ONLY_PUBLIC_API
36 //#ifndef NNTEST_ONLY_PUBLIC_API
37 //#include "Tracing.h"
39 #define NNTRACE_FULL_RAW(...)
40 #define NNTRACE_APP(...)
41 #define NNTRACE_APP_SWITCH(...)
44 namespace generated_tests {
45 using namespace nnfw::rt::test_wrapper;
46 using namespace test_helper;
50 void print(std::ostream& os, const std::map<int, std::vector<T>>& test) {
51 // dump T-typed inputs
52 for_each<T>(test, [&os](int idx, const std::vector<T>& f) {
53 os << " aliased_output" << idx << ": [";
54 for (size_t i = 0; i < f.size(); ++i) {
55 os << (i == 0 ? "" : ", ") << +f[i];
61 // Specialized for _Float16 because it requires explicit conversion.
63 void print<_Float16>(std::ostream& os, const std::map<int, std::vector<_Float16>>& test) {
64 for_each<_Float16>(test, [&os](int idx, const std::vector<_Float16>& f) {
65 os << " aliased_output" << idx << ": [";
66 for (size_t i = 0; i < f.size(); ++i) {
67 os << (i == 0 ? "" : ", ") << +static_cast<float>(f[i]);
73 void printAll(std::ostream& os, const MixedTyped& test) {
74 print(os, test.float32Operands);
75 print(os, test.int32Operands);
76 print(os, test.quant8AsymmOperands);
77 print(os, test.quant16SymmOperands);
78 print(os, test.float16Operands);
79 print(os, test.bool8Operands);
80 print(os, test.quant8ChannelOperands);
81 print(os, test.quant16AsymmOperands);
82 print(os, test.quant8SymmOperands);
83 static_assert(9 == MixedTyped::kNumTypes,
84 "Number of types in MixedTyped changed, but printAll function wasn't updated");
88 Compilation GeneratedTests::compileModel(const Model* model) {
89 NNTRACE_APP(NNTRACE_PHASE_COMPILATION, "compileModel");
90 if (mTestCompilationCaching) {
91 // Compile the model twice with the same token, so that compilation caching will be
92 // exercised if supported by the driver.
93 Compilation compilation1(model);
94 compilation1.setCaching(mCacheDir, mToken);
95 compilation1.finish();
96 Compilation compilation2(model);
97 compilation2.setCaching(mCacheDir, mToken);
98 compilation2.finish();
101 Compilation compilation(model);
102 compilation.finish();
107 void GeneratedTests::executeWithCompilation(const Model* model, Compilation* compilation,
108 std::function<bool(int)> isIgnored,
109 std::vector<MixedTypedExample>& examples,
110 std::string dumpFile) {
111 bool dumpToFile = !dumpFile.empty();
114 s.open(dumpFile, std::ofstream::trunc);
115 ASSERT_TRUE(s.is_open());
119 float fpAtol = 1e-5f;
120 float fpRtol = 5.0f * 1.1920928955078125e-7f;
121 for (auto& example : examples) {
122 NNTRACE_APP(NNTRACE_PHASE_EXECUTION, "executeWithCompilation example");
123 SCOPED_TRACE(exampleNo);
124 // TODO: We leave it as a copy here.
125 // Should verify if the input gets modified by the test later.
126 MixedTyped inputs = example.operands.first;
127 const MixedTyped& golden = example.operands.second;
129 // NNFW Fix: comment out using hasFloat16Inputs
130 //const bool hasFloat16Inputs = !inputs.float16Operands.empty();
131 if (model->isRelaxed()/* || hasFloat16Inputs*/) {
132 // TODO: Adjust the error limit based on testing.
133 // If in relaxed mode, set the absolute tolerance to be 5ULP of FP16.
134 fpAtol = 5.0f * 0.0009765625f;
135 // Set the relative tolerance to be 5ULP of the corresponding FP precision.
136 fpRtol = 5.0f * 0.0009765625f;
139 Execution execution(compilation);
142 NNTRACE_APP(NNTRACE_PHASE_INPUTS_AND_OUTPUTS, "executeWithCompilation example");
144 for_all(inputs, [&execution](int idx, const void* p, size_t s) {
145 const void* buffer = s == 0 ? nullptr : p;
146 ASSERT_EQ(Result::NO_ERROR, execution.setInput(idx, buffer, s));
149 // Go through all typed outputs
150 resize_accordingly(golden, test);
151 for_all(test, [&execution](int idx, void* p, size_t s) {
152 void* buffer = s == 0 ? nullptr : p;
153 ASSERT_EQ(Result::NO_ERROR, execution.setOutput(idx, buffer, s));
157 Result r = execution.compute();
158 ASSERT_EQ(Result::NO_ERROR, r);
160 NNTRACE_APP(NNTRACE_PHASE_RESULTS, "executeWithCompilation example");
162 // Get output dimensions
164 test.operandDimensions, [&execution](int idx, std::vector<uint32_t>& t) {
165 ASSERT_EQ(Result::NO_ERROR, execution.getOutputOperandDimensions(idx, &t));
168 // Dump all outputs for the slicing tool
170 s << "output" << exampleNo << " = {\n";
172 // all outputs are done
176 // Filter out don't cares
177 MixedTyped filteredGolden = filter(golden, isIgnored);
178 MixedTyped filteredTest = filter(test, isIgnored);
179 // We want "close-enough" results for float
181 compare(filteredGolden, filteredTest, fpAtol, fpRtol);
185 if (example.expectedMultinomialDistributionTolerance > 0) {
186 expectMultinomialDistributionWithinTolerance(test, example);
191 void GeneratedTests::executeOnce(const Model* model, std::function<bool(int)> isIgnored,
192 std::vector<MixedTypedExample>& examples, std::string dumpFile) {
193 NNTRACE_APP(NNTRACE_PHASE_OVERALL, "executeOnce");
194 Compilation compilation = compileModel(model);
195 executeWithCompilation(model, &compilation, isIgnored, examples, dumpFile);
198 void GeneratedTests::executeMultithreadedOwnCompilation(const Model* model,
199 std::function<bool(int)> isIgnored,
200 std::vector<MixedTypedExample>& examples) {
201 NNTRACE_APP(NNTRACE_PHASE_OVERALL, "executeMultithreadedOwnCompilation");
202 SCOPED_TRACE("MultithreadedOwnCompilation");
203 std::vector<std::thread> threads;
204 for (int i = 0; i < 10; i++) {
205 threads.push_back(std::thread([&]() { executeOnce(model, isIgnored, examples, ""); }));
207 std::for_each(threads.begin(), threads.end(), [](std::thread& t) { t.join(); });
210 void GeneratedTests::executeMultithreadedSharedCompilation(
211 const Model* model, std::function<bool(int)> isIgnored,
212 std::vector<MixedTypedExample>& examples) {
213 NNTRACE_APP(NNTRACE_PHASE_OVERALL, "executeMultithreadedSharedCompilation");
214 SCOPED_TRACE("MultithreadedSharedCompilation");
215 Compilation compilation = compileModel(model);
216 std::vector<std::thread> threads;
217 for (int i = 0; i < 10; i++) {
218 threads.push_back(std::thread(
219 [&]() { executeWithCompilation(model, &compilation, isIgnored, examples, ""); }));
221 std::for_each(threads.begin(), threads.end(), [](std::thread& t) { t.join(); });
224 // Test driver for those generated from ml/nn/runtime/test/spec
225 void GeneratedTests::execute(std::function<void(Model*)> createModel,
226 std::function<bool(int)> isIgnored,
227 std::vector<MixedTypedExample>& examples,
228 [[maybe_unused]] std::string dumpFile) {
229 NNTRACE_APP(NNTRACE_PHASE_OVERALL, "execute");
233 auto executeInternal = [&model, &isIgnored, &examples,
234 this]([[maybe_unused]] std::string dumpFile) {
235 SCOPED_TRACE("TestCompilationCaching = " + std::to_string(mTestCompilationCaching));
236 #ifndef NNTEST_MULTITHREADED
237 executeOnce(&model, isIgnored, examples, dumpFile);
238 #else // defined(NNTEST_MULTITHREADED)
239 executeMultithreadedOwnCompilation(&model, isIgnored, examples);
240 executeMultithreadedSharedCompilation(&model, isIgnored, examples);
241 #endif // !defined(NNTEST_MULTITHREADED)
244 mTestCompilationCaching = false;
245 // Fix for onert: Not supported feature - copmilation caching
248 executeInternal(dumpFile);
249 mTestCompilationCaching = true;
254 void GeneratedTests::SetUp() {
255 #ifdef NNTEST_COMPUTE_MODE
256 mOldComputeMode = Execution::setComputeMode(GetParam());
258 // Fix for onert: Fix file path for linux
259 char cacheDirTemp[] = "/tmp/TestCompilationCachingXXXXXX";
260 //char cacheDirTemp[] = "/data/local/tmp/TestCompilationCachingXXXXXX";
261 char* cacheDir = mkdtemp(cacheDirTemp);
262 ASSERT_NE(cacheDir, nullptr);
263 mCacheDir = cacheDir;
264 mToken = std::vector<uint8_t>(ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN, 0);
267 void GeneratedTests::TearDown() {
268 #ifdef NNTEST_COMPUTE_MODE
269 Execution::setComputeMode(mOldComputeMode);
271 if (!::testing::Test::HasFailure()) {
272 // TODO: Switch to std::filesystem::remove_all once libc++fs is made available in CTS.
273 // Remove the cache directory specified by path recursively.
274 auto callback = [](const char* child, const struct stat*, int, struct FTW*) {
275 return remove(child);
277 nftw(mCacheDir.c_str(), callback, 128, FTW_DEPTH | FTW_MOUNT | FTW_PHYS);
281 #ifdef NNTEST_COMPUTE_MODE
282 INSTANTIATE_TEST_SUITE_P(ComputeMode, GeneratedTests,
283 testing::Values(Execution::ComputeMode::SYNC,
284 Execution::ComputeMode::ASYNC,
285 Execution::ComputeMode::BURST));
288 } // namespace generated_tests