This commit fixes wrong inclusion and scale value in nnapi_unittests.
List of fixed scale value
- add
- conv
- dconv
- mul
Signed-off-by: jiseob.jang <jiseob.jang@samsung.com>
nnfw::util::env::IntAccessor("SEED").access(SEED);
#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
-#include "add_1.lst"
+#include "add_quan_1.lst"
#undef INT_VALUE
const int32_t LEFT_N = LEFT_N_Value();
//
// Q: Is this necessary?
TfLiteQuantizationParams quantization;
- quantization.scale = 1.0f;
quantization.zero_point = 0;
// On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
interp.AddTensors(3);
// Configure output
+ quantization.scale = 2.0f;
interp.SetTensorParametersReadWrite(0, kTfLiteUInt8 /* type */, "output" /* name */,
{OFM_N, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
// Configure input(s)
+ quantization.scale = 1.0f;
interp.SetTensorParametersReadWrite(1, kTfLiteUInt8 /* type */, "left" /* name */,
{LEFT_N, LEFT_H, LEFT_W, LEFT_C} /* dims */, quantization);
nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
-#include "avg_pool_1.lst"
+#include "avg_pool_quan_1.lst"
#undef INT_VALUE
const int32_t IFM_C = IFM_C_Value();
quantization.scale = 1.0f;
quantization.zero_point = 0;
- quantization.scale = 1;
- quantization.zero_point = 0;
-
// On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
interp.AddTensors(2);
nnfw::util::env::IntAccessor("SEED").access(SEED);
#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
-#include "cast_1.lst"
+#include "cast_q_to_f_1.lst"
#undef INT_VALUE
const int32_t IFM_N = IFM_N_Value();
nnfw::util::env::IntAccessor("SEED").access(SEED);
#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
-#include "concat_1.lst"
+#include "concat_quan_1.lst"
#undef INT_VALUE
// TODO Allow users to set concat axis!
nnfw::util::env::IntAccessor("SEED").access(SEED);
#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
-#include "conv_1.lst"
+#include "conv_quan_1.lst"
#undef INT_VALUE
const int32_t STRIDE_H = STRIDE_H_Value();
interp.AddTensors(5);
// Configure OFM
- quantization.scale = 1.0f;
+ float max_scale = (KER_N, KER_C * KER_H * KER_W) *
+ std::numeric_limits<uint8_t>::max(); // * IFM_scale(1.0f) * kernel_scale(1.0f)
+ quantization.scale = max_scale;
interp.SetTensorParametersReadWrite(0, kTfLiteUInt8 /* type */, "output" /* name */,
{1 /*N*/, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
// Configure IFM
- quantization.scale = 0.02f;
+ quantization.scale = 1.0f;
interp.SetTensorParametersReadWrite(1, kTfLiteUInt8 /* type */, "input" /* name */,
{1 /*N*/, IFM_H, IFM_W, IFM_C} /* dims */, quantization);
nnfw::util::env::IntAccessor("SEED").access(SEED);
#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
-#include "dconv_1.lst"
+#include "dconv_quan_1.lst"
#undef INT_VALUE
const int32_t STRIDE_H = STRIDE_H_Value();
interp.AddTensors(4);
// Configure OFM
- quantization.scale = 1.0f;
+ float max_scale = (1 * KER_C * KER_H * KER_W) *
+ std::numeric_limits<uint8_t>::max(); // * IFM_scale(1.0f) * kernel_scale(1.0f)
+ quantization.scale = max_scale;
interp.SetTensorParametersReadWrite(0, kTfLiteUInt8 /* type */, "output" /* name */,
{1 /*N*/, OFM_H, OFM_W, OFM_C} /* dims */, quantization);
// Configure IFM
- quantization.scale = 0.02f;
+ quantization.scale = 1.0f;
interp.SetTensorParametersReadWrite(1, kTfLiteUInt8 /* type */, "input" /* name */,
{1 /*N*/, IFM_H, IFM_W, IFM_C} /* dims */, quantization);
nnfw::util::env::IntAccessor("SEED").access(SEED);
#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
-#include "conv_1.lst"
+#include "fully_connected_quan_1.lst"
#undef INT_VALUE
const int32_t IFM_C = IFM_C_Value();
const int32_t IFM_H = IFM_H_Value();
const int32_t IFM_W = IFM_W_Value();
- const int32_t KER_H = KER_N_Value();
+ const int32_t KER_H = KER_H_Value();
const int32_t KER_W = IFM_C_Value() * IFM_H_Value() * IFM_W_Value();
const int32_t OUT_LEN = KER_H;
nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
-#include "max_pool_1.lst"
+#include "max_pool_quan_1.lst"
#undef INT_VALUE
const TfLitePadding PADDING_TYPE = static_cast<TfLitePadding>(PADDING_TYPE_Value());
//
// Q: Is this necessary?
TfLiteQuantizationParams quantization;
- quantization.scale = FLOAT_NEAREST_TO_1;
quantization.zero_point = 0;
// On AddTensors(N) call, T/F Lite interpreter creates N tensors whose index is [0 ~ N)
interp.AddTensors(3);
// Configure output
+ float max_scale =
+ std::numeric_limits<uint8_t>::max(); // * input1_scale(1.0f) * input2_scale(1.0f)
+ quantization.scale = max_scale;
interp.SetTensorParametersReadWrite(0, kTfLiteUInt8 /* type */, "output" /* name */,
{OFM_1D, OFM_2D, OFM_3D} /* dims */, quantization);
// Configure input(s)
+ quantization.scale = 1.0f;
interp.SetTensorParametersReadWrite(1, kTfLiteUInt8 /* type */, "left" /* name */,
{LEFT_1D, LEFT_2D, LEFT_3D} /* dims */, quantization);
nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
-#include "relu6_1.lst"
+#include "relu6_quan_1.lst"
#undef INT_VALUE
const int32_t IFM_H = IFM_H_Value();
nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
-#include "relu_1.lst"
+#include "relu_quan_1.lst"
#undef INT_VALUE
const int32_t IFM_H = IFM_H_Value();
nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
-#include "max_pool_1.lst"
+#include "reshape_quan_1.lst"
#undef INT_VALUE
const int32_t IFM_C = IFM_C_Value();
nnfw::util::env::IntAccessor("TOLERANCE").access(tolerance);
#define INT_VALUE(NAME, VALUE) IntVar NAME##_Value(#NAME, VALUE);
-#include "softmax_1.lst"
+#include "softmax_quan_1.lst"
#undef INT_VALUE
const int32_t IFM_C = 1;