[Topk_v2 Op] support other operation types (#1314)
author최성진/동작제어Lab(SR)/Principal Engineer/삼성전자 <lotieye.choi@samsung.com>
Wed, 30 May 2018 11:06:45 +0000 (20:06 +0900)
committer이춘석/동작제어Lab(SR)/Staff Engineer/삼성전자 <chunseok.lee@samsung.com>
Wed, 30 May 2018 11:06:45 +0000 (20:06 +0900)
* [Topk_v2 Op] add uint8 types

This commit adds other types.
-uint8

Signed-off-by: SungJin Choi <lotieye.choi@samsung.com>
* [Topk_v2 Op] add int32 operation types

This commit adds other types.
-int32

Signed-off-by: SungJin Choi <lotieye.choi@samsung.com>
* [Topk_v2 Op] add an comment that explains topk_v2 operation

This commit adds comments.
- TopK_v2 of NN Runtime supports TENSOR_FLOAT32, TENSOR_QUANT8_ASYMM, TENSOR_INT32 other than TFLite.

Signed-off-by: SungJin Choi <lotieye.choi@samsung.com>
* [Top[Topk_v2 Op] add 2D test cases for other types

This commit adds 2D test cases for other types (int32, uint8)

Signed-off-by: SungJin Choi <lotieye.choi@samsung.com>
16 files changed:
runtimes/nn/common/OperationsUtils.cpp
runtimes/nn/common/operations/Topk_v2.cpp
runtimes/nn/common/operations/internal/optimized/topk_v2.h
runtimes/tests/neural_networks_test/generated/all_generated_tests.cpp
runtimes/tests/neural_networks_test/generated/examples/topk_v2_1D_int32.example.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/examples/topk_v2_1D_uint8.example.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/examples/topk_v2_2D_int32.example.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/examples/topk_v2_2D_uint8.example.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/models/topk_v2_1D_int32.model.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/models/topk_v2_1D_uint8.model.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/models/topk_v2_2D_int32.model.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/models/topk_v2_2D_uint8.model.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/specs/Ex/topk_v2_1D_int32.mod.py [new file with mode: 0644]
runtimes/tests/neural_networks_test/specs/Ex/topk_v2_1D_uint8.mod.py [new file with mode: 0644]
runtimes/tests/neural_networks_test/specs/Ex/topk_v2_2D_int32.mod.py [new file with mode: 0644]
runtimes/tests/neural_networks_test/specs/Ex/topk_v2_2D_uint8.mod.py [new file with mode: 0644]

index a1584ad..ae6eb3a 100644 (file)
@@ -665,10 +665,12 @@ bool topk_v2Prepare(const Shape &inputShape, const int32_t k, Shape *outputIndex
   // k shoud be larger than zero.
   NN_OPS_CHECK(k > 0);
 
-  // TODO: other dimension
+  // NN Runtime supports TENSOR_FLOAT32, TENSOR_QUANT8_ASYMM, TENSOR_INT32.
+  // While TFlite supports kTfLiteFloat32, kTfLiteUInt8, kTfLiteInt32, as well as kTfLiteInt64.
   switch (inputShape.type)
   {
     case OperandType::TENSOR_FLOAT32:
+    case OperandType::TENSOR_QUANT8_ASYMM:
     case OperandType::TENSOR_INT32:
     {
       // Fully supported by optimized_ops::TopK.
index c2fb9a0..5bd7f94 100644 (file)
@@ -41,13 +41,26 @@ bool topk_v2Generic(const uint8_t *inputData, const Shape &inputShape, const int
     num_rows *= inputShape.dimensions[i];
   }
 
-  // TODO: other types
+  // NN Runtime supports TENSOR_FLOAT32, TENSOR_QUANT8_ASYMM, TENSOR_INT32.
+  // While TFlite supports kTfLiteFloat32, kTfLiteUInt8, kTfLiteInt32, as well as kTfLiteInt64.
   if (inputShape.type == OperandType::TENSOR_FLOAT32)
   {
     optimized_ops::TopK(row_size, num_rows, reinterpret_cast<const float *>(inputData), k,
                         reinterpret_cast<int32 *>(outputIndexes),
                         reinterpret_cast<float *>(outputValues));
   }
+  else if (inputShape.type == OperandType::TENSOR_QUANT8_ASYMM)
+  {
+    optimized_ops::TopK(row_size, num_rows, reinterpret_cast<const uint8_t *>(inputData), k,
+                        reinterpret_cast<int32 *>(outputIndexes),
+                        reinterpret_cast<uint8_t *>(outputValues));
+  }
+  else if (inputShape.type == OperandType::TENSOR_INT32)
+  {
+    optimized_ops::TopK(row_size, num_rows, reinterpret_cast<const int32 *>(inputData), k,
+                        reinterpret_cast<int32 *>(outputIndexes),
+                        reinterpret_cast<int32 *>(outputValues));
+  }
   else
   {
     LOG(ERROR) << "Unsupported data type";
index 3d29cc4..8036dfd 100644 (file)
@@ -28,6 +28,10 @@ namespace rt
 {
 namespace optimized_ops
 {
+// The follwing codes are impemented and modified while referring to TFLite topk_v2.cc file.
+// TopK_v2 of NN Runtime supports TENSOR_FLOAT32, TENSOR_QUANT8_ASYMM, TENSOR_INT32 other than
+// TFLite.
+//(TFLite additionaly supports kTfLiteInt64.)
 
 // The class that collects top indexes of k values. Based on template
 // tensorflow::gtl::TopN<> but, for optimization,
index d15766e..9b1bc60 100644 (file)
@@ -2269,6 +2269,62 @@ TEST_F(GeneratedTests, cast_ex_int32_to_float32) {
             cast_ex_int32_to_float32::examples);
 }
 
+namespace topk_v2_1D_int32 {
+std::vector<MixedTypedExample> examples = {
+// Generated topk_v2_1D_int32 test
+#include "generated/examples/topk_v2_1D_int32.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/topk_v2_1D_int32.model.cpp"
+} // namespace topk_v2_1D_int32
+TEST_F(GeneratedTests, topk_v2_1D_int32) {
+    execute(topk_v2_1D_int32::CreateModel,
+            topk_v2_1D_int32::is_ignored,
+            topk_v2_1D_int32::examples);
+}
+
+namespace topk_v2_1D_uint8 {
+std::vector<MixedTypedExample> examples = {
+// Generated topk_v2_1D_uint8 test
+#include "generated/examples/topk_v2_1D_uint8.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/topk_v2_1D_uint8.model.cpp"
+} // namespace topk_v2_1D_uint8
+TEST_F(GeneratedTests, topk_v2_1D_uint8) {
+    execute(topk_v2_1D_uint8::CreateModel,
+            topk_v2_1D_uint8::is_ignored,
+            topk_v2_1D_uint8::examples);
+}
+
+namespace topk_v2_2D_int32 {
+std::vector<MixedTypedExample> examples = {
+// Generated topk_v2_2D_int32 test
+#include "generated/examples/topk_v2_2D_int32.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/topk_v2_2D_int32.model.cpp"
+} // namespace topk_v2_2D_int32
+TEST_F(GeneratedTests, topk_v2_2D_int32) {
+    execute(topk_v2_2D_int32::CreateModel,
+            topk_v2_2D_int32::is_ignored,
+            topk_v2_2D_int32::examples);
+}
+
+namespace topk_v2_2D_uint8 {
+std::vector<MixedTypedExample> examples = {
+// Generated topk_v2_2D_uint8 test
+#include "generated/examples/topk_v2_2D_uint8.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/topk_v2_2D_uint8.model.cpp"
+} // namespace topk_v2_2D_uint8
+TEST_F(GeneratedTests, topk_v2_2D_uint8) {
+    execute(topk_v2_2D_uint8::CreateModel,
+            topk_v2_2D_uint8::is_ignored,
+            topk_v2_2D_uint8::examples);
+}
+
 namespace gather_2D_float {
 std::vector<MixedTypedExample> examples = {
 // Generated gather_2D_float test
diff --git a/runtimes/tests/neural_networks_test/generated/examples/topk_v2_1D_int32.example.cpp b/runtimes/tests/neural_networks_test/generated/examples/topk_v2_1D_int32.example.cpp
new file mode 100644 (file)
index 0000000..bb62a91
--- /dev/null
@@ -0,0 +1,22 @@
+// Generated file (from: topk_v2_1D_int32.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {40000, 41000, 50000, 60000}}},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {3, 2}}, {1, {60000, 50000}}},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
diff --git a/runtimes/tests/neural_networks_test/generated/examples/topk_v2_1D_uint8.example.cpp b/runtimes/tests/neural_networks_test/generated/examples/topk_v2_1D_uint8.example.cpp
new file mode 100644 (file)
index 0000000..74e4aec
--- /dev/null
@@ -0,0 +1,22 @@
+// Generated file (from: topk_v2_1D_uint8.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {3, 4, 5, 6}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {3, 2}}},
+  // int -> QUANT8_ASYMM map
+  {{1, {6, 5}}}
+}
+}, // End of an example
diff --git a/runtimes/tests/neural_networks_test/generated/examples/topk_v2_2D_int32.example.cpp b/runtimes/tests/neural_networks_test/generated/examples/topk_v2_2D_int32.example.cpp
new file mode 100644 (file)
index 0000000..9c413e4
--- /dev/null
@@ -0,0 +1,22 @@
+// Generated file (from: topk_v2_2D_int32.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {40000, 41000, 50000, 60000, 70000, 80000, 90000, 79000, 170000, 180000, 190000, 110000}}},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {3, 2, 2, 1, 2, 1}}, {1, {60000, 50000, 90000, 80000, 190000, 180000}}},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
diff --git a/runtimes/tests/neural_networks_test/generated/examples/topk_v2_2D_uint8.example.cpp b/runtimes/tests/neural_networks_test/generated/examples/topk_v2_2D_uint8.example.cpp
new file mode 100644 (file)
index 0000000..f6a05a7
--- /dev/null
@@ -0,0 +1,22 @@
+// Generated file (from: topk_v2_2D_uint8.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {3, 4, 5, 6, 7, 8, 9, 1, 2, 18, 19, 11}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {3, 2, 2, 1, 2, 1}}},
+  // int -> QUANT8_ASYMM map
+  {{1, {6, 5, 9, 8, 19, 18}}}
+}
+}, // End of an example
diff --git a/runtimes/tests/neural_networks_test/generated/models/topk_v2_1D_int32.model.cpp b/runtimes/tests/neural_networks_test/generated/models/topk_v2_1D_int32.model.cpp
new file mode 100644 (file)
index 0000000..5d11fed
--- /dev/null
@@ -0,0 +1,25 @@
+// Generated file (from: topk_v2_1D_int32.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type2(Type::TENSOR_INT32, {2});
+  OperandType type0(Type::TENSOR_INT32, {4});
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type0);
+  auto k = model->addOperand(&type1);
+  auto op2 = model->addOperand(&type2);
+  auto op3 = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t k_init[] = {2};
+  model->setOperandValue(k, k_init, sizeof(int32_t) * 1);
+  model->addOperationEx(ANEURALNETWORKS_TOPK_V2_EX, {op1, k}, {op2, op3});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1},
+    {op2, op3});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/runtimes/tests/neural_networks_test/generated/models/topk_v2_1D_uint8.model.cpp b/runtimes/tests/neural_networks_test/generated/models/topk_v2_1D_uint8.model.cpp
new file mode 100644 (file)
index 0000000..02e7d2a
--- /dev/null
@@ -0,0 +1,26 @@
+// Generated file (from: topk_v2_1D_uint8.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type2(Type::TENSOR_INT32, {2});
+  OperandType type3(Type::TENSOR_QUANT8_ASYMM, {2});
+  OperandType type0(Type::TENSOR_QUANT8_ASYMM, {4});
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type0);
+  auto k = model->addOperand(&type1);
+  auto op2 = model->addOperand(&type2);
+  auto op3 = model->addOperand(&type3);
+  // Phase 2, operations
+  static int32_t k_init[] = {2};
+  model->setOperandValue(k, k_init, sizeof(int32_t) * 1);
+  model->addOperationEx(ANEURALNETWORKS_TOPK_V2_EX, {op1, k}, {op2, op3});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1},
+    {op2, op3});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/runtimes/tests/neural_networks_test/generated/models/topk_v2_2D_int32.model.cpp b/runtimes/tests/neural_networks_test/generated/models/topk_v2_2D_int32.model.cpp
new file mode 100644 (file)
index 0000000..36e137e
--- /dev/null
@@ -0,0 +1,25 @@
+// Generated file (from: topk_v2_2D_int32.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type2(Type::TENSOR_INT32, {3,2});
+  OperandType type0(Type::TENSOR_INT32, {3,4});
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type0);
+  auto k = model->addOperand(&type1);
+  auto op2 = model->addOperand(&type2);
+  auto op3 = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t k_init[] = {2};
+  model->setOperandValue(k, k_init, sizeof(int32_t) * 1);
+  model->addOperationEx(ANEURALNETWORKS_TOPK_V2_EX, {op1, k}, {op2, op3});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1},
+    {op2, op3});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/runtimes/tests/neural_networks_test/generated/models/topk_v2_2D_uint8.model.cpp b/runtimes/tests/neural_networks_test/generated/models/topk_v2_2D_uint8.model.cpp
new file mode 100644 (file)
index 0000000..d763067
--- /dev/null
@@ -0,0 +1,26 @@
+// Generated file (from: topk_v2_2D_uint8.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type2(Type::TENSOR_INT32, {3,2});
+  OperandType type3(Type::TENSOR_QUANT8_ASYMM, {3,2});
+  OperandType type0(Type::TENSOR_QUANT8_ASYMM, {3,4});
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type0);
+  auto k = model->addOperand(&type1);
+  auto op2 = model->addOperand(&type2);
+  auto op3 = model->addOperand(&type3);
+  // Phase 2, operations
+  static int32_t k_init[] = {2};
+  model->setOperandValue(k, k_init, sizeof(int32_t) * 1);
+  model->addOperationEx(ANEURALNETWORKS_TOPK_V2_EX, {op1, k}, {op2, op3});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1},
+    {op2, op3});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/runtimes/tests/neural_networks_test/specs/Ex/topk_v2_1D_int32.mod.py b/runtimes/tests/neural_networks_test/specs/Ex/topk_v2_1D_int32.mod.py
new file mode 100644 (file)
index 0000000..79d5f47
--- /dev/null
@@ -0,0 +1,19 @@
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_INT32", "{4}") # a vector of input
+k = Int32Scalar("k", 2)
+i2 = Output("op2", "TENSOR_INT32", "{2}") # indexes of output
+i3 = Output("op3", "TENSOR_INT32", "{2}") # values of output
+model = model.Operation("TOPK_V2_EX", i1, k).To([i2, i3])
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [40000, 41000, 50000, 60000]}
+
+output0 = {i2: # output 0
+           [3, 2],
+           i3: # output 1
+           [60000, 50000]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtimes/tests/neural_networks_test/specs/Ex/topk_v2_1D_uint8.mod.py b/runtimes/tests/neural_networks_test/specs/Ex/topk_v2_1D_uint8.mod.py
new file mode 100644 (file)
index 0000000..d2c6c12
--- /dev/null
@@ -0,0 +1,19 @@
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_QUANT8_ASYMM", "{4}") # a vector of input
+k = Int32Scalar("k", 2)
+i2 = Output("op2", "TENSOR_INT32", "{2}") # indexes of output
+i3 = Output("op3", "TENSOR_QUANT8_ASYMM", "{2}") # values of output
+model = model.Operation("TOPK_V2_EX", i1, k).To([i2, i3])
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [3, 4, 5, 6]}
+
+output0 = {i2: # output 0
+           [3, 2],
+           i3: # output 1
+           [6, 5]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtimes/tests/neural_networks_test/specs/Ex/topk_v2_2D_int32.mod.py b/runtimes/tests/neural_networks_test/specs/Ex/topk_v2_2D_int32.mod.py
new file mode 100644 (file)
index 0000000..d688927
--- /dev/null
@@ -0,0 +1,25 @@
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_INT32", "{3,4}") # a vector of input
+k = Int32Scalar("k", 2)
+i2 = Output("op2", "TENSOR_INT32", "{3,2}") # indexes of output
+i3 = Output("op3", "TENSOR_INT32", "{3,2}") # values of output
+model = model.Operation("TOPK_V2_EX", i1, k).To([i2, i3])
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [40000, 41000, 50000, 60000,
+          70000, 80000, 90000, 79000,
+          170000, 180000, 190000, 110000]}
+
+output0 = {i2: # output 0
+           [3, 2,
+           2, 1,
+           2, 1],
+           i3: # output 1
+           [60000, 50000,
+           90000, 80000,
+           190000, 180000]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtimes/tests/neural_networks_test/specs/Ex/topk_v2_2D_uint8.mod.py b/runtimes/tests/neural_networks_test/specs/Ex/topk_v2_2D_uint8.mod.py
new file mode 100644 (file)
index 0000000..a6e0efc
--- /dev/null
@@ -0,0 +1,25 @@
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_QUANT8_ASYMM", "{3,4}") # a vector of input
+k = Int32Scalar("k", 2)
+i2 = Output("op2", "TENSOR_INT32", "{3,2}") # indexes of output
+i3 = Output("op3", "TENSOR_QUANT8_ASYMM", "{3,2}") # values of output
+model = model.Operation("TOPK_V2_EX", i1, k).To([i2, i3])
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [3, 4, 5, 6,
+          7, 8, 9, 1,
+          2, 18, 19, 11]}
+
+output0 = {i2: # output 0
+           [3, 2,
+           2, 1,
+           2, 1],
+           i3: # output 1
+           [6, 5,
+           9, 8,
+           19, 18]}
+
+# Instantiate an example
+Example((input0, output0))