2 * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include <gtest/gtest.h>
18 #include <nnfw_internal.h>
22 #include <unordered_map>
24 #include "CircleGen.h"
27 inline size_t sizeOfNnfwType(NNFW_TYPE type)
31 case NNFW_TYPE_TENSOR_BOOL:
32 case NNFW_TYPE_TENSOR_UINT8:
33 case NNFW_TYPE_TENSOR_QUANT8_ASYMM:
34 case NNFW_TYPE_TENSOR_QUANT8_ASYMM_SIGNED:
36 case NNFW_TYPE_TENSOR_FLOAT32:
37 case NNFW_TYPE_TENSOR_INT32:
39 case NNFW_TYPE_TENSOR_INT64:
42 throw std::runtime_error{"Invalid tensor type"};
46 // TODO Unify this with `SessionObject` in `fixtures.h`
47 struct SessionObjectGeneric
49 nnfw_session *session = nullptr;
50 std::vector<std::vector<uint8_t>> inputs;
51 std::vector<std::vector<uint8_t>> outputs;
57 * @brief A vector of input buffers
59 std::vector<std::vector<uint8_t>> inputs;
62 * @brief A vector of output buffers
64 std::vector<std::vector<uint8_t>> outputs;
67 * @brief Append vector data to inputs
70 * @param data vector data array
72 template <typename T> TestCaseData &addInput(const std::vector<T> &data)
74 addData(inputs, data);
79 * @brief Append vector data to inputs
82 * @param data vector data array
84 template <typename T> TestCaseData &addOutput(const std::vector<T> &data)
86 addData(outputs, data);
91 * @brief Call this when @c nnfw_run() for this test case is expected to be failed
93 TestCaseData &expectFailRun()
95 _expected_fail_run = true;
98 bool expected_fail_run() const { return _expected_fail_run; }
101 template <typename T>
102 static void addData(std::vector<std::vector<uint8_t>> &dest, const std::vector<T> &data)
104 size_t size = data.size() * sizeof(T);
106 dest.back().resize(size);
107 std::memcpy(dest.back().data(), data.data(), size);
110 bool _expected_fail_run = false;
114 inline void TestCaseData::addData<bool>(std::vector<std::vector<uint8_t>> &dest,
115 const std::vector<bool> &data)
117 size_t size = data.size() * sizeof(uint8_t);
119 dest.back().resize(size);
120 std::transform(data.cbegin(), data.cend(), dest.back().data(),
121 [](bool b) { return static_cast<uint8_t>(b); });
125 * @brief Create a TestCaseData with a uniform type
127 * A helper function for generating test cases that has the same data type for model inputs/outputs.
129 * @tparam T Uniform tensor type
130 * @param inputs Inputs tensor buffers
131 * @param outputs Output tensor buffers
132 * @return TestCaseData Generated test case data
134 template <typename T>
135 static TestCaseData uniformTCD(const std::vector<std::vector<T>> &inputs,
136 const std::vector<std::vector<T>> &outputs)
139 for (const auto &data : inputs)
141 for (const auto &data : outputs)
147 * @brief A test configuration class
149 class GenModelTestContext
152 GenModelTestContext(CircleBuffer &&cbuf) : _cbuf{std::move(cbuf)}, _backends{"cpu"} {}
155 * @brief Return circle buffer
157 * @return CircleBuffer& the circle buffer
159 const CircleBuffer &cbuf() const { return _cbuf; }
162 * @brief Return test cases
164 * @return std::vector<TestCaseData>& the test cases
166 const std::vector<TestCaseData> &test_cases() const { return _test_cases; }
169 * @brief Return backends
171 * @return const std::vector<std::string>& the backends to be tested
173 const std::vector<std::string> &backends() const { return _backends; }
176 * @brief Return test is defined to fail on model load
178 * @return bool test is defined to fail on model load
180 bool expected_fail_model_load() const { return _expected_fail_model_load; }
183 * @brief Return test is defined to fail on compile
185 * @return bool test is defined to fail on compile
187 bool expected_fail_compile() const { return _expected_fail_compile; }
190 * @brief Set the output buffer size of specified output tensor
191 * Note that output tensor size of a model with dynamic tensor is calculated while
193 * Therefore, before runniing the model, the sufficient size of buffer should
194 * be prepared by calling this method.
195 * The size does not need to be the exact size.
197 void output_sizes(uint32_t ind, size_t size) { _output_sizes[ind] = size; }
199 size_t output_sizes(uint32_t ind) const { return _output_sizes.at(ind); }
201 bool hasOutputSizes(uint32_t ind) const { return _output_sizes.find(ind) != _output_sizes.end(); }
204 * @brief Add a test case
206 * @param tc the test case to be added
208 void addTestCase(const TestCaseData &tc) { _test_cases.emplace_back(tc); }
211 * @brief Add a test case
213 * @param tc the test case to be added
215 void setBackends(const std::vector<std::string> &backends)
219 for (auto backend : backends)
221 #ifdef TEST_ACL_BACKEND
222 if (backend == "acl_cl" || backend == "acl_neon")
224 _backends.push_back(backend);
227 if (backend == "cpu")
229 _backends.push_back(backend);
235 * @brief Expect failure while model load
237 void expectFailModelLoad() { _expected_fail_model_load = true; }
240 * @brief Expect failure while compiling
242 void expectFailCompile() { _expected_fail_compile = true; }
246 std::vector<TestCaseData> _test_cases;
247 std::vector<std::string> _backends;
248 std::unordered_map<uint32_t, size_t> _output_sizes;
249 bool _expected_fail_model_load{false};
250 bool _expected_fail_compile{false};
254 * @brief Generated Model test fixture for a one time inference
256 * This fixture is for one-time inference test with variety of generated models.
257 * It is the test maker's responsiblity to create @c _context which contains
258 * test body, which are generated circle buffer, model input data and output data and
259 * backend list to be tested.
260 * The rest(calling API functions for execution) is done by @c Setup and @c TearDown .
263 class GenModelTest : public ::testing::Test
266 void SetUp() override
270 void TearDown() override
272 for (std::string backend : _context->backends())
274 // NOTE If we can prepare many times for one model loading on same session,
275 // we can move nnfw_create_session to SetUp and
276 // nnfw_load_circle_from_buffer to outside forloop
277 NNFW_ENSURE_SUCCESS(nnfw_create_session(&_so.session));
278 auto &cbuf = _context->cbuf();
279 auto model_load_result =
280 nnfw_load_circle_from_buffer(_so.session, cbuf.buffer(), cbuf.size());
281 if (_context->expected_fail_model_load())
283 ASSERT_NE(model_load_result, NNFW_STATUS_NO_ERROR);
284 std::cerr << "Failed model loading as expected." << std::endl;
285 NNFW_ENSURE_SUCCESS(nnfw_close_session(_so.session));
288 NNFW_ENSURE_SUCCESS(model_load_result);
289 NNFW_ENSURE_SUCCESS(nnfw_set_available_backends(_so.session, backend.data()));
291 if (_context->expected_fail_compile())
293 ASSERT_EQ(nnfw_prepare(_so.session), NNFW_STATUS_ERROR);
295 NNFW_ENSURE_SUCCESS(nnfw_close_session(_so.session));
298 NNFW_ENSURE_SUCCESS(nnfw_prepare(_so.session));
300 // In/Out buffer settings
302 NNFW_ENSURE_SUCCESS(nnfw_input_size(_so.session, &num_inputs));
303 _so.inputs.resize(num_inputs);
304 for (uint32_t ind = 0; ind < _so.inputs.size(); ind++)
307 NNFW_ENSURE_SUCCESS(nnfw_input_tensorinfo(_so.session, ind, &ti));
308 uint64_t input_elements = num_elems(&ti);
309 _so.inputs[ind].resize(input_elements * sizeOfNnfwType(ti.dtype));
310 if (_so.inputs[ind].size() == 0)
313 ASSERT_EQ(nnfw_set_input(_so.session, ind, ti.dtype, nullptr, 0), NNFW_STATUS_NO_ERROR);
317 ASSERT_EQ(nnfw_set_input(_so.session, ind, ti.dtype, _so.inputs[ind].data(),
318 _so.inputs[ind].size()),
319 NNFW_STATUS_NO_ERROR);
323 uint32_t num_outputs;
324 NNFW_ENSURE_SUCCESS(nnfw_output_size(_so.session, &num_outputs));
325 _so.outputs.resize(num_outputs);
326 for (uint32_t ind = 0; ind < _so.outputs.size(); ind++)
329 NNFW_ENSURE_SUCCESS(nnfw_output_tensorinfo(_so.session, ind, &ti));
333 if (_context->hasOutputSizes(ind))
335 size = _context->output_sizes(ind);
339 uint64_t output_elements = num_elems(&ti);
340 size = output_elements * sizeOfNnfwType(ti.dtype);
342 _so.outputs[ind].resize(size);
345 ASSERT_EQ(nnfw_set_output(_so.session, ind, ti.dtype, _so.outputs[ind].data(),
346 _so.outputs[ind].size()),
347 NNFW_STATUS_NO_ERROR);
350 // Set input values, run, and check output values
351 for (auto &test_case : _context->test_cases())
353 auto &ref_inputs = test_case.inputs;
354 auto &ref_outputs = test_case.outputs;
355 ASSERT_EQ(_so.inputs.size(), ref_inputs.size());
356 for (uint32_t i = 0; i < _so.inputs.size(); i++)
359 ASSERT_EQ(_so.inputs[i].size(), ref_inputs[i].size());
360 memcpy(_so.inputs[i].data(), ref_inputs[i].data(), ref_inputs[i].size());
363 if (test_case.expected_fail_run())
365 ASSERT_EQ(nnfw_run(_so.session), NNFW_STATUS_ERROR);
369 NNFW_ENSURE_SUCCESS(nnfw_run(_so.session));
371 ASSERT_EQ(_so.outputs.size(), ref_outputs.size());
372 for (uint32_t i = 0; i < _so.outputs.size(); i++)
375 NNFW_ENSURE_SUCCESS(nnfw_output_tensorinfo(_so.session, i, &ti));
377 // Check output tensor values
378 auto &ref_output = ref_outputs[i];
379 auto &output = _so.outputs[i];
380 ASSERT_EQ(output.size(), ref_output.size());
384 case NNFW_TYPE_TENSOR_BOOL:
385 compareBuffersExactBool(ref_output, output, i);
387 case NNFW_TYPE_TENSOR_UINT8:
388 case NNFW_TYPE_TENSOR_QUANT8_ASYMM:
389 compareBuffersExact<uint8_t>(ref_output, output, i);
391 case NNFW_TYPE_TENSOR_QUANT8_ASYMM_SIGNED:
392 compareBuffersExact<int8_t>(ref_output, output, i);
394 case NNFW_TYPE_TENSOR_INT32:
395 compareBuffersExact<int32_t>(ref_output, output, i);
397 case NNFW_TYPE_TENSOR_FLOAT32:
398 // TODO better way for handling FP error?
399 for (uint32_t e = 0; e < ref_output.size() / sizeof(float); e++)
401 float refval = reinterpret_cast<const float *>(ref_output.data())[e];
402 float val = reinterpret_cast<const float *>(output.data())[e];
403 EXPECT_NEAR(refval, val, 0.001) << "Output #" << i << ", Element Index : " << e;
406 case NNFW_TYPE_TENSOR_INT64:
407 compareBuffersExact<int64_t>(ref_output, output, i);
410 throw std::runtime_error{"Invalid tensor type"};
412 // TODO Add shape comparison
416 NNFW_ENSURE_SUCCESS(nnfw_close_session(_so.session));
421 template <typename T>
422 void compareBuffersExact(const std::vector<uint8_t> &ref_buf, const std::vector<uint8_t> &act_buf,
425 for (uint32_t e = 0; e < ref_buf.size() / sizeof(T); e++)
427 T ref = reinterpret_cast<const T *>(ref_buf.data())[e];
428 T act = reinterpret_cast<const T *>(act_buf.data())[e];
429 EXPECT_EQ(ref, act) << "Output #" << index << ", Element Index : " << e;
433 void compareBuffersExactBool(const std::vector<uint8_t> &ref_buf,
434 const std::vector<uint8_t> &act_buf, uint32_t index)
436 for (uint32_t e = 0; e < ref_buf.size() / sizeof(uint8_t); e++)
438 uint8_t ref_raw = reinterpret_cast<const uint8_t *>(ref_buf.data())[e];
439 bool ref = (ref_raw != 0 ? true : false);
440 uint8_t act_raw = reinterpret_cast<const uint8_t *>(act_buf.data())[e];
441 bool act = (act_raw != 0 ? true : false);
442 EXPECT_EQ(ref, act) << "Output #" << index << ", Element Index : " << e;
447 SessionObjectGeneric _so;
448 std::unique_ptr<GenModelTestContext> _context;