2 * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef __NNFW_API_TEST_GEN_MODEL_TEST_H__
18 #define __NNFW_API_TEST_GEN_MODEL_TEST_H__
20 #include <gtest/gtest.h>
21 #include <nnfw_internal.h>
25 #include <unordered_map>
27 #include "CircleGen.h"
30 inline size_t sizeOfNnfwType(NNFW_TYPE type)
34 case NNFW_TYPE_TENSOR_BOOL:
35 case NNFW_TYPE_TENSOR_UINT8:
36 case NNFW_TYPE_TENSOR_QUANT8_ASYMM:
37 case NNFW_TYPE_TENSOR_QUANT8_ASYMM_SIGNED:
39 case NNFW_TYPE_TENSOR_FLOAT32:
40 case NNFW_TYPE_TENSOR_INT32:
42 case NNFW_TYPE_TENSOR_INT64:
45 throw std::runtime_error{"Invalid tensor type"};
49 // TODO Unify this with `SessionObject` in `fixtures.h`
50 struct SessionObjectGeneric
52 nnfw_session *session = nullptr;
53 std::vector<std::vector<uint8_t>> inputs;
54 std::vector<std::vector<uint8_t>> outputs;
60 * @brief A vector of input buffers
62 std::vector<std::vector<uint8_t>> inputs;
65 * @brief A vector of output buffers
67 std::vector<std::vector<uint8_t>> outputs;
70 * @brief Append vector data to inputs
73 * @param data vector data array
75 template <typename T> TestCaseData &addInput(const std::vector<T> &data)
77 addData(inputs, data);
82 * @brief Append vector data to inputs
85 * @param data vector data array
87 template <typename T> TestCaseData &addOutput(const std::vector<T> &data)
89 addData(outputs, data);
94 * @brief Call this when @c nnfw_run() for this test case is expected to be failed
96 TestCaseData &expectFailRun()
98 _expected_fail_run = true;
101 bool expected_fail_run() const { return _expected_fail_run; }
104 template <typename T>
105 static void addData(std::vector<std::vector<uint8_t>> &dest, const std::vector<T> &data)
107 size_t size = data.size() * sizeof(T);
109 dest.back().resize(size);
110 std::memcpy(dest.back().data(), data.data(), size);
113 bool _expected_fail_run = false;
117 inline void TestCaseData::addData<bool>(std::vector<std::vector<uint8_t>> &dest,
118 const std::vector<bool> &data)
120 size_t size = data.size() * sizeof(uint8_t);
122 dest.back().resize(size);
123 std::transform(data.cbegin(), data.cend(), dest.back().data(),
124 [](bool b) { return static_cast<uint8_t>(b); });
128 * @brief Create a TestCaseData with a uniform type
130 * A helper function for generating test cases that has the same data type for model inputs/outputs.
132 * @tparam T Uniform tensor type
133 * @param inputs Inputs tensor buffers
134 * @param outputs Output tensor buffers
135 * @return TestCaseData Generated test case data
137 template <typename T>
138 static TestCaseData uniformTCD(const std::vector<std::vector<T>> &inputs,
139 const std::vector<std::vector<T>> &outputs)
142 for (const auto &data : inputs)
144 for (const auto &data : outputs)
150 * @brief A test configuration class
152 class GenModelTestContext
155 GenModelTestContext(CircleBuffer &&cbuf) : _cbuf{std::move(cbuf)}, _backends{"cpu"} {}
158 * @brief Return circle buffer
160 * @return CircleBuffer& the circle buffer
162 const CircleBuffer &cbuf() const { return _cbuf; }
165 * @brief Return test cases
167 * @return std::vector<TestCaseData>& the test cases
169 const std::vector<TestCaseData> &test_cases() const { return _test_cases; }
172 * @brief Return backends
174 * @return const std::vector<std::string>& the backends to be tested
176 const std::vector<std::string> &backends() const { return _backends; }
179 * @brief Return test is defined to fail on model load
181 * @return bool test is defined to fail on model load
183 bool expected_fail_model_load() const { return _expected_fail_model_load; }
186 * @brief Return test is defined to fail on compile
188 * @return bool test is defined to fail on compile
190 bool expected_fail_compile() const { return _expected_fail_compile; }
193 * @brief Set the output buffer size of specified output tensor
194 * Note that output tensor size of a model with dynamic tensor is calculated while
196 * Therefore, before runniing the model, the sufficient size of buffer should
197 * be prepared by calling this method.
198 * The size does not need to be the exact size.
200 void output_sizes(uint32_t ind, size_t size) { _output_sizes[ind] = size; }
202 size_t output_sizes(uint32_t ind) const { return _output_sizes.at(ind); }
204 bool hasOutputSizes(uint32_t ind) const { return _output_sizes.find(ind) != _output_sizes.end(); }
207 * @brief Add a test case
209 * @param tc the test case to be added
211 void addTestCase(const TestCaseData &tc) { _test_cases.emplace_back(tc); }
214 * @brief Add a test case
216 * @param tc the test case to be added
218 void setBackends(const std::vector<std::string> &backends)
222 for (auto backend : backends)
224 #ifdef TEST_ACL_BACKEND
225 if (backend == "acl_cl" || backend == "acl_neon")
227 _backends.push_back(backend);
230 if (backend == "cpu" || backend == "ruy")
232 _backends.push_back(backend);
234 #ifdef TEST_XNNPACK_BACKEND
235 if (backend == "xnnpack")
237 _backends.push_back(backend);
244 * @brief Expect failure while model load
246 void expectFailModelLoad() { _expected_fail_model_load = true; }
249 * @brief Expect failure while compiling
251 void expectFailCompile() { _expected_fail_compile = true; }
254 * @brief Expect failure while execution
256 void expectFailExecution() { _expected_fail_execution = true; }
260 std::vector<TestCaseData> _test_cases;
261 std::vector<std::string> _backends;
262 std::unordered_map<uint32_t, size_t> _output_sizes;
263 bool _expected_fail_model_load{false};
264 bool _expected_fail_compile{false};
265 bool _expected_fail_execution{false};
269 * @brief Generated Model test fixture for a one time inference
271 * This fixture is for one-time inference test with variety of generated models.
272 * It is the test maker's responsiblity to create @c _context which contains
273 * test body, which are generated circle buffer, model input data and output data and
274 * backend list to be tested.
275 * The rest(calling API functions for execution) is done by @c Setup and @c TearDown .
278 class GenModelTest : public ::testing::Test
281 void SetUp() override
285 void TearDown() override
287 for (std::string backend : _context->backends())
289 // NOTE If we can prepare many times for one model loading on same session,
290 // we can move nnfw_create_session to SetUp and
291 // nnfw_load_circle_from_buffer to outside forloop
292 NNFW_ENSURE_SUCCESS(nnfw_create_session(&_so.session));
293 auto &cbuf = _context->cbuf();
294 auto model_load_result =
295 nnfw_load_circle_from_buffer(_so.session, cbuf.buffer(), cbuf.size());
296 if (_context->expected_fail_model_load())
298 ASSERT_NE(model_load_result, NNFW_STATUS_NO_ERROR);
299 std::cerr << "Failed model loading as expected." << std::endl;
300 NNFW_ENSURE_SUCCESS(nnfw_close_session(_so.session));
303 NNFW_ENSURE_SUCCESS(model_load_result);
304 NNFW_ENSURE_SUCCESS(nnfw_set_available_backends(_so.session, backend.data()));
306 if (_context->expected_fail_compile())
308 ASSERT_NE(nnfw_prepare(_so.session), NNFW_STATUS_NO_ERROR);
310 NNFW_ENSURE_SUCCESS(nnfw_close_session(_so.session));
313 NNFW_ENSURE_SUCCESS(nnfw_prepare(_so.session));
315 // In/Out buffer settings
317 NNFW_ENSURE_SUCCESS(nnfw_input_size(_so.session, &num_inputs));
318 _so.inputs.resize(num_inputs);
319 for (uint32_t ind = 0; ind < _so.inputs.size(); ind++)
322 NNFW_ENSURE_SUCCESS(nnfw_input_tensorinfo(_so.session, ind, &ti));
323 uint64_t input_elements = num_elems(&ti);
324 _so.inputs[ind].resize(input_elements * sizeOfNnfwType(ti.dtype));
325 if (_so.inputs[ind].size() == 0)
328 ASSERT_EQ(nnfw_set_input(_so.session, ind, ti.dtype, nullptr, 0), NNFW_STATUS_NO_ERROR);
332 ASSERT_EQ(nnfw_set_input(_so.session, ind, ti.dtype, _so.inputs[ind].data(),
333 _so.inputs[ind].size()),
334 NNFW_STATUS_NO_ERROR);
338 uint32_t num_outputs;
339 NNFW_ENSURE_SUCCESS(nnfw_output_size(_so.session, &num_outputs));
340 _so.outputs.resize(num_outputs);
341 for (uint32_t ind = 0; ind < _so.outputs.size(); ind++)
344 NNFW_ENSURE_SUCCESS(nnfw_output_tensorinfo(_so.session, ind, &ti));
348 if (_context->hasOutputSizes(ind))
350 size = _context->output_sizes(ind);
354 uint64_t output_elements = num_elems(&ti);
355 size = output_elements * sizeOfNnfwType(ti.dtype);
357 _so.outputs[ind].resize(size);
360 ASSERT_EQ(nnfw_set_output(_so.session, ind, ti.dtype, _so.outputs[ind].data(),
361 _so.outputs[ind].size()),
362 NNFW_STATUS_NO_ERROR);
365 // Set input values, run, and check output values
366 for (auto &test_case : _context->test_cases())
368 auto &ref_inputs = test_case.inputs;
369 auto &ref_outputs = test_case.outputs;
370 ASSERT_EQ(_so.inputs.size(), ref_inputs.size());
371 for (uint32_t i = 0; i < _so.inputs.size(); i++)
374 ASSERT_EQ(_so.inputs[i].size(), ref_inputs[i].size());
375 memcpy(_so.inputs[i].data(), ref_inputs[i].data(), ref_inputs[i].size());
378 if (test_case.expected_fail_run())
380 ASSERT_NE(nnfw_run(_so.session), NNFW_STATUS_NO_ERROR);
384 NNFW_ENSURE_SUCCESS(nnfw_run(_so.session));
386 ASSERT_EQ(_so.outputs.size(), ref_outputs.size());
387 for (uint32_t i = 0; i < _so.outputs.size(); i++)
390 NNFW_ENSURE_SUCCESS(nnfw_output_tensorinfo(_so.session, i, &ti));
392 // Check output tensor values
393 auto &ref_output = ref_outputs[i];
394 auto &output = _so.outputs[i];
395 ASSERT_EQ(output.size(), ref_output.size());
399 case NNFW_TYPE_TENSOR_BOOL:
400 compareBuffersExactBool(ref_output, output, i);
402 case NNFW_TYPE_TENSOR_UINT8:
403 case NNFW_TYPE_TENSOR_QUANT8_ASYMM:
404 compareBuffersExact<uint8_t>(ref_output, output, i);
406 case NNFW_TYPE_TENSOR_QUANT8_ASYMM_SIGNED:
407 compareBuffersExact<int8_t>(ref_output, output, i);
409 case NNFW_TYPE_TENSOR_INT32:
410 compareBuffersExact<int32_t>(ref_output, output, i);
412 case NNFW_TYPE_TENSOR_FLOAT32:
413 // TODO better way for handling FP error?
414 for (uint32_t e = 0; e < ref_output.size() / sizeof(float); e++)
416 float refval = reinterpret_cast<const float *>(ref_output.data())[e];
417 float val = reinterpret_cast<const float *>(output.data())[e];
418 EXPECT_NEAR(refval, val, 0.001) << "Output #" << i << ", Element Index : " << e;
421 case NNFW_TYPE_TENSOR_INT64:
422 compareBuffersExact<int64_t>(ref_output, output, i);
425 throw std::runtime_error{"Invalid tensor type"};
427 // TODO Add shape comparison
431 NNFW_ENSURE_SUCCESS(nnfw_close_session(_so.session));
436 template <typename T>
437 void compareBuffersExact(const std::vector<uint8_t> &ref_buf, const std::vector<uint8_t> &act_buf,
440 for (uint32_t e = 0; e < ref_buf.size() / sizeof(T); e++)
442 T ref = reinterpret_cast<const T *>(ref_buf.data())[e];
443 T act = reinterpret_cast<const T *>(act_buf.data())[e];
444 EXPECT_EQ(ref, act) << "Output #" << index << ", Element Index : " << e;
448 void compareBuffersExactBool(const std::vector<uint8_t> &ref_buf,
449 const std::vector<uint8_t> &act_buf, uint32_t index)
451 for (uint32_t e = 0; e < ref_buf.size() / sizeof(uint8_t); e++)
453 uint8_t ref_raw = reinterpret_cast<const uint8_t *>(ref_buf.data())[e];
454 bool ref = (ref_raw != 0 ? true : false);
455 uint8_t act_raw = reinterpret_cast<const uint8_t *>(act_buf.data())[e];
456 bool act = (act_raw != 0 ? true : false);
457 EXPECT_EQ(ref, act) << "Output #" << index << ", Element Index : " << e;
462 SessionObjectGeneric _so;
463 std::unique_ptr<GenModelTestContext> _context;
466 #endif // __NNFW_API_TEST_GEN_MODEL_TEST_H__