2 * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include "gtest/gtest.h"
19 #include "tflite/ext/kernels/register.h"
20 #include "tensorflow/lite/model.h"
21 #include "tensorflow/lite/builtin_op_data.h"
24 #include "misc/environment.h"
26 #include "tflite/Diff.h"
27 #include "tflite/Quantization.h"
28 #include "tflite/interp/FunctionBuilder.h"
36 using namespace tflite;
37 using namespace nnfw::tflite;
39 template <typename T> T *make_malloc(void) { return reinterpret_cast<T *>(malloc(sizeof(T))); }
41 TEST(NNAPI_Quickcheck_fully_connected_1, simple_test)
46 nnfw::misc::env::IntAccessor("VERBOSE").access(verbose);
47 nnfw::misc::env::IntAccessor("TOLERANCE").access(tolerance);
50 int SEED = std::chrono::system_clock::now().time_since_epoch().count();
52 nnfw::misc::env::IntAccessor("SEED").access(SEED);
54 #define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
55 #include "fully_connected_quan_1.lst"
58 const int32_t IFM_C = IFM_C_Value();
59 const int32_t IFM_H = IFM_H_Value();
60 const int32_t IFM_W = IFM_W_Value();
62 const int32_t KER_H = KER_H_Value();
63 const int32_t KER_W = IFM_C_Value() * IFM_H_Value() * IFM_W_Value();
65 const int32_t OUT_LEN = KER_H;
67 // Initialize random number generator
68 std::minstd_rand random(SEED);
70 std::cout << "Configurations:" << std::endl;
71 #define PRINT_NEWLINE() \
73 std::cout << std::endl; \
75 #define PRINT_VALUE(value) \
77 std::cout << " " << #value << ": " << (value) << std::endl; \
95 // Configure Kernel Data
96 const uint32_t kernel_size = KER_H * KER_W;
97 float kernel_data[kernel_size] = {
101 // Fill kernel data with random data
103 std::normal_distribution<float> kernel_dist(-1.0f, +1.0f);
105 for (uint32_t off = 0; off < kernel_size; ++off)
107 kernel_data[off++] = kernel_dist(random);
111 // Configure Bias Data
112 const auto bias_size = KER_H;
113 int32_t bias_data[bias_size] = {
117 // Fill bias data with random data
119 std::normal_distribution<float> bias_dist(-1.0f, +1.0f);
121 for (uint32_t off = 0; off < bias_size; ++off)
123 bias_data[off] = static_cast<int32_t>(bias_dist(random));
127 auto setup = [&](Interpreter &interp) {
128 // Comment from 'context.h'
130 // Parameters for asymmetric quantization. Quantized values can be converted
131 // back to float using:
132 // real_value = scale * (quantized_value - zero_point);
134 // Q: Is this necessary?
135 TfLiteQuantizationParams quantization = make_default_quantization();
136 quantization.scale = FLOAT_NEAREST_TO_1;
137 quantization.zero_point = 0;
139 // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
140 interp.AddTensors(4);
143 interp.SetTensorParametersReadWrite(0, kTfLiteUInt8 /* type */, "output" /* name */,
144 {1 /*N*/, KER_H} /* dims */, quantization);
147 interp.SetTensorParametersReadWrite(1, kTfLiteUInt8 /* type */, "input" /* name */,
148 {1 /*N*/, IFM_H, IFM_W, IFM_C} /* dims */, quantization);
150 // NOTE kernel_data & bias_data should live longer than interpreter!
151 interp.SetTensorParametersReadOnly(
152 2, kTfLiteUInt8 /* type */, "filter" /* name */, {KER_H, KER_W} /* dims */, quantization,
153 reinterpret_cast<const char *>(kernel_data), kernel_size * sizeof(uint8_t));
155 interp.SetTensorParametersReadOnly(
156 3, kTfLiteInt32 /* type */, "bias" /* name */, {bias_size} /* dims */, quantization,
157 reinterpret_cast<const char *>(bias_data), bias_size * sizeof(int32_t));
159 // Add Fully Connected Node
161 // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
162 // So, param should be allocated with malloc
163 auto param = make_malloc<TfLiteFullyConnectedParams>();
165 param->activation = kTfLiteActRelu;
167 // Run Convolution and store its result into Tensor #0
168 // - Read IFM from Tensor #1
169 // - Read Filter from Tensor #2,
170 // - Read Bias from Tensor #3
171 interp.AddNodeWithParameters({1, 2, 3}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
172 BuiltinOpResolver().FindOp(BuiltinOperator_FULLY_CONNECTED, 1));
174 // Set Tensor #1 as Input #0, and Tensor #0 as Output #0
175 interp.SetInputs({1});
176 interp.SetOutputs({0});
179 const nnfw::tflite::FunctionBuilder builder(setup);
181 RandomTestParam param;
183 param.verbose = verbose;
184 param.tolerance = tolerance;
186 int res = RandomTestRunner{SEED, param}.run(builder);