2 * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include "gtest/gtest.h"
19 #include "tflite/ext/kernels/register.h"
20 #include "tensorflow/lite/model.h"
21 #include "tensorflow/lite/builtin_op_data.h"
25 #include "misc/environment.h"
27 #include "tflite/Diff.h"
28 #include "tflite/Quantization.h"
29 #include "tflite/interp/FunctionBuilder.h"
37 using namespace tflite;
38 using namespace nnfw::tflite;
40 TEST(NNAPI_Quickcheck_dconv_1, simple_test)
45 nnfw::misc::env::IntAccessor("VERBOSE").access(verbose);
46 nnfw::misc::env::IntAccessor("TOLERANCE").access(tolerance);
49 int SEED = std::chrono::system_clock::now().time_since_epoch().count();
51 nnfw::misc::env::IntAccessor("SEED").access(SEED);
53 #define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
54 #include "dconv_1.lst"
57 const int32_t STRIDE_H = STRIDE_H_Value();
58 const int32_t STRIDE_W = STRIDE_W_Value();
60 const int32_t IFM_C = IFM_C_Value();
61 const int32_t IFM_H = IFM_H_Value();
62 const int32_t IFM_W = IFM_W_Value();
64 const int32_t KER_C = KER_C_Value();
65 const int32_t KER_H = KER_H_Value();
66 const int32_t KER_W = KER_W_Value();
68 const int32_t OFM_C = KER_C;
69 const int32_t OFM_H = (IFM_H - KER_H) / STRIDE_H + 1;
70 const int32_t OFM_W = (IFM_W - KER_W) / STRIDE_W + 1;
72 const int32_t MULTIPLIER = MULTIPLIER_Value();
74 // Initialize random number generator
75 std::minstd_rand random(SEED);
77 std::cout << "Configurations:" << std::endl;
78 #define PRINT_NEWLINE() \
80 std::cout << std::endl; \
82 #define PRINT_VALUE(value) \
84 std::cout << " " << #value << ": " << (value) << std::endl; \
99 PRINT_VALUE(STRIDE_H);
100 PRINT_VALUE(STRIDE_W);
103 PRINT_VALUE(MULTIPLIER);
107 assert(MULTIPLIER * IFM_C == KER_C);
109 // Configure Kernel Data
110 const uint32_t kernel_size = KER_C * KER_H * KER_W;
111 float kernel_data[kernel_size] = {
115 // Fill kernel data with random data
117 std::normal_distribution<float> kernel_dist(-1.0f, +1.0f);
119 for (uint32_t off = 0; off < kernel_size; ++off)
121 kernel_data[off] = kernel_dist(random);
125 // Configure Bias Data
126 const auto bias_size = KER_C;
127 float bias_data[bias_size] = {
131 // Fill bias data with random data
133 std::normal_distribution<float> bias_dist(-1.0f, +1.0f);
135 for (uint32_t off = 0; off < bias_size; ++off)
137 bias_data[off] = bias_dist(random);
141 auto setup = [&](Interpreter &interp) {
142 // Comment from 'context.h'
144 // Parameters for asymmetric quantization. Quantized values can be converted
145 // back to float using:
146 // real_value = scale * (quantized_value - zero_point);
148 // Q: Is this necessary?
149 TfLiteQuantizationParams quantization = make_default_quantization();
151 // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
152 interp.AddTensors(4);
155 interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
156 {1 /*N*/, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
159 interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "input" /* name */,
160 {1 /*N*/, IFM_H, IFM_W, IFM_C} /* dims */, quantization);
162 // NOTE kernel_data & bias_data should live longer than interpreter!
163 interp.SetTensorParametersReadOnly(
164 2, kTfLiteFloat32 /* type */, "filter" /* name */, {1, KER_H, KER_W, KER_C} /* dims */,
165 quantization, reinterpret_cast<const char *>(kernel_data), kernel_size * sizeof(float));
167 interp.SetTensorParametersReadOnly(
168 3, kTfLiteFloat32 /* type */, "bias" /* name */, {bias_size} /* dims */, quantization,
169 reinterpret_cast<const char *>(bias_data), bias_size * sizeof(float));
171 // Add Convolution Node
173 // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
174 // So, param should be allocated with malloc
175 auto param = make_alloc<TfLiteDepthwiseConvParams>();
177 param->padding = kTfLitePaddingValid;
178 param->stride_width = STRIDE_W;
179 param->stride_height = STRIDE_H;
180 param->depth_multiplier = MULTIPLIER;
181 param->activation = kTfLiteActRelu;
183 // Run Convolution and store its result into Tensor #0
184 // - Read IFM from Tensor #1
185 // - Read Filter from Tensor #2,
186 // - Read Bias from Tensor #3
187 interp.AddNodeWithParameters({1, 2, 3}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
188 BuiltinOpResolver().FindOp(BuiltinOperator_DEPTHWISE_CONV_2D, 1));
190 // Set Tensor #1 as Input #0, and Tensor #0 as Output #0
191 interp.SetInputs({1});
192 interp.SetOutputs({0});
195 const nnfw::tflite::FunctionBuilder builder(setup);
197 RandomTestParam param;
199 param.verbose = verbose;
200 param.tolerance = tolerance;
202 int res = RandomTestRunner{SEED, param}.run(builder);