From 606057e7ca9318a95349e464e78c43fbb3fabadc Mon Sep 17 00:00:00 2001 From: =?utf8?q?=EC=9E=A5=EC=A7=80=EC=84=AD/=EB=8F=99=EC=9E=91=EC=A0=9C?= =?utf8?q?=EC=96=B4Lab=28SR=29/Engineer/=EC=82=BC=EC=84=B1=EC=A0=84?= =?utf8?q?=EC=9E=90?= Date: Mon, 9 Jul 2018 14:43:23 +0900 Subject: [PATCH] Change the scale value of unit test related to the convolution. (#1887) This commit change the scale value of unit test related to the convolution. - Change the scale value of output operand to 1.0f. - Change the scale value of input and kernel operand to 0.02f. - Change the scale value of bias to the value of multiplying input scale and kernel scale. Signed-off-by: jiseob.jang --- tools/nnapi_unittests/tests/conv_quan_1.cpp | 6 +++--- tools/nnapi_unittests/tests/dconv_quan_1.cpp | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tools/nnapi_unittests/tests/conv_quan_1.cpp b/tools/nnapi_unittests/tests/conv_quan_1.cpp index 444fa5f..dd8d01e 100644 --- a/tools/nnapi_unittests/tests/conv_quan_1.cpp +++ b/tools/nnapi_unittests/tests/conv_quan_1.cpp @@ -147,20 +147,19 @@ TEST(NNAPI_Unittest_conv_1, simple_test) // Parameters for asymmetric quantization. Quantized values can be converted // back to float using: // real_value = scale * (quantized_value - zero_point); - // - // Q: Is this necessary? TfLiteQuantizationParams quantization; - quantization.scale = FLOAT_NEAREST_TO_1; quantization.zero_point = 0; // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N) interp.AddTensors(5); // Configure OFM + quantization.scale = 1.0f; interp.SetTensorParametersReadWrite(0, kTfLiteUInt8 /* type */, "output" /* name */, {1 /*N*/, OFM_H, OFM_W, OFM_C} /* dims */, quantization); // Configure IFM + quantization.scale = 0.02f; interp.SetTensorParametersReadWrite(1, kTfLiteUInt8 /* type */, "input" /* name */, {1 /*N*/, IFM_H, IFM_W, IFM_C} /* dims */, quantization); @@ -169,6 +168,7 @@ TEST(NNAPI_Unittest_conv_1, simple_test) 2, kTfLiteUInt8 /* type */, "filter" /* name */, {KER_N, KER_H, KER_W, KER_C} /* dims */, quantization, reinterpret_cast(kernel_data), kernel_size * sizeof(uint8_t)); + quantization.scale *= quantization.scale; interp.SetTensorParametersReadOnly( 3, kTfLiteInt32 /* type */, "bias" /* name */, {bias_size} /* dims */, quantization, reinterpret_cast(bias_data), bias_size * sizeof(int32_t)); diff --git a/tools/nnapi_unittests/tests/dconv_quan_1.cpp b/tools/nnapi_unittests/tests/dconv_quan_1.cpp index a59ac5c..5a22591 100644 --- a/tools/nnapi_unittests/tests/dconv_quan_1.cpp +++ b/tools/nnapi_unittests/tests/dconv_quan_1.cpp @@ -144,20 +144,19 @@ TEST(NNAPI_Unittest_dconv_1, simple_test) // Parameters for asymmetric quantization. Quantized values can be converted // back to float using: // real_value = scale * (quantized_value - zero_point); - // - // Q: Is this necessary? TfLiteQuantizationParams quantization; - quantization.scale = FLOAT_NEAREST_TO_1; quantization.zero_point = 0; // On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N) interp.AddTensors(4); // Configure OFM + quantization.scale = 1.0f; interp.SetTensorParametersReadWrite(0, kTfLiteUInt8 /* type */, "output" /* name */, {1 /*N*/, OFM_H, OFM_W, OFM_C} /* dims */, quantization); // Configure IFM + quantization.scale = 0.02f; interp.SetTensorParametersReadWrite(1, kTfLiteUInt8 /* type */, "input" /* name */, {1 /*N*/, IFM_H, IFM_W, IFM_C} /* dims */, quantization); @@ -166,6 +165,7 @@ TEST(NNAPI_Unittest_dconv_1, simple_test) 2, kTfLiteUInt8 /* type */, "filter" /* name */, {1, KER_H, KER_W, KER_C} /* dims */, quantization, reinterpret_cast(kernel_data), kernel_size * sizeof(uint8_t)); + quantization.scale *= quantization.scale; interp.SetTensorParametersReadOnly( 3, kTfLiteInt32 /* type */, "bias" /* name */, {bias_size} /* dims */, quantization, reinterpret_cast(bias_data), bias_size * sizeof(int32_t)); -- 2.7.4