2 * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include "gtest/gtest.h"
19 #include "tflite/ext/kernels/register.h"
20 #include "tensorflow/lite/model.h"
21 #include "tensorflow/lite/builtin_op_data.h"
25 #include "misc/environment.h"
27 #include "tflite/Diff.h"
28 #include "tflite/Quantization.h"
29 #include "tflite/interp/FunctionBuilder.h"
34 using namespace tflite;
35 using namespace nnfw::tflite;
37 TEST(NNAPI_Quickcheck_max_pool_1, simple_test)
40 int SEED = std::chrono::system_clock::now().time_since_epoch().count();
42 nnfw::misc::env::IntAccessor("SEED").access(SEED);
44 // Set random test parameters
48 nnfw::misc::env::IntAccessor("VERBOSE").access(verbose);
49 nnfw::misc::env::IntAccessor("TOLERANCE").access(tolerance);
51 #define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
52 #include "max_pool_1.lst"
55 const TfLitePadding PADDING_TYPE = static_cast<TfLitePadding>(PADDING_TYPE_Value());
57 const int32_t IFM_C = IFM_C_Value();
58 const int32_t IFM_H = IFM_H_Value();
59 const int32_t IFM_W = IFM_W_Value();
61 const int32_t KER_H = KER_H_Value();
62 const int32_t KER_W = KER_W_Value();
64 const int32_t OFM_C = IFM_C;
65 const int32_t OFM_H = OFM_H_Value();
66 const int32_t OFM_W = OFM_W_Value();
68 assert((OFM_H >= (IFM_H - KER_H)));
69 assert((OFM_W >= (IFM_W - KER_W)));
70 assert((kTfLitePaddingSame == PADDING_TYPE) || (kTfLitePaddingValid == PADDING_TYPE));
72 std::cout << "Configurations:" << std::endl;
73 #define PRINT_NEWLINE() \
75 std::cout << std::endl; \
77 #define PRINT_VALUE(value) \
79 std::cout << " " << #value << ": " << (value) << std::endl; \
84 PRINT_VALUE(PADDING_TYPE);
102 auto setup = [&](Interpreter &interp) {
103 // Comment from 'context.h'
105 // Parameters for asymmetric quantization. Quantized values can be converted
106 // back to float using:
107 // real_value = scale * (quantized_value - zero_point);
109 // Q: Is this necessary?
110 TfLiteQuantizationParams quantization = make_default_quantization();
112 // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
113 interp.AddTensors(2);
116 interp.SetTensorParametersReadWrite(0, kTfLiteFloat32 /* type */, "output" /* name */,
117 {1 /*N*/, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
120 interp.SetTensorParametersReadWrite(1, kTfLiteFloat32 /* type */, "input" /* name */,
121 {1 /*N*/, IFM_H, IFM_W, IFM_C} /* dims */, quantization);
123 // Add Max Pooling Node
125 // NOTE AddNodeWithParameters take the ownership of param, and deallocate it with free
126 // So, param should be allocated with malloc
127 auto param = make_alloc<TfLitePoolParams>();
129 param->padding = PADDING_TYPE;
130 param->stride_width = 1;
131 param->stride_height = 1;
132 param->filter_width = KER_W;
133 param->filter_height = KER_H;
134 param->activation = kTfLiteActNone;
136 // Run Convolution and store its result into Tensor #0
137 // - Read IFM from Tensor #1
138 interp.AddNodeWithParameters({1}, {0}, nullptr, 0, reinterpret_cast<void *>(param),
139 BuiltinOpResolver().FindOp(BuiltinOperator_MAX_POOL_2D, 1));
141 // Set Tensor #1 as Input #0, and Tensor #0 as Output #0
142 interp.SetInputs({1});
143 interp.SetOutputs({0});
146 const nnfw::tflite::FunctionBuilder builder(setup);
148 RandomTestParam param;
150 param.verbose = verbose;
151 param.tolerance = tolerance;
153 int res = RandomTestRunner{SEED, param}.run(builder);