// k shoud be larger than zero.
NN_OPS_CHECK(k > 0);
- // TODO: other dimension
+ // NN Runtime supports TENSOR_FLOAT32, TENSOR_QUANT8_ASYMM, TENSOR_INT32.
+ // While TFlite supports kTfLiteFloat32, kTfLiteUInt8, kTfLiteInt32, as well as kTfLiteInt64.
switch (inputShape.type)
{
case OperandType::TENSOR_FLOAT32:
+ case OperandType::TENSOR_QUANT8_ASYMM:
case OperandType::TENSOR_INT32:
{
// Fully supported by optimized_ops::TopK.
num_rows *= inputShape.dimensions[i];
}
- // TODO: other types
+ // NN Runtime supports TENSOR_FLOAT32, TENSOR_QUANT8_ASYMM, TENSOR_INT32.
+ // While TFlite supports kTfLiteFloat32, kTfLiteUInt8, kTfLiteInt32, as well as kTfLiteInt64.
if (inputShape.type == OperandType::TENSOR_FLOAT32)
{
optimized_ops::TopK(row_size, num_rows, reinterpret_cast<const float *>(inputData), k,
reinterpret_cast<int32 *>(outputIndexes),
reinterpret_cast<float *>(outputValues));
}
+ else if (inputShape.type == OperandType::TENSOR_QUANT8_ASYMM)
+ {
+ optimized_ops::TopK(row_size, num_rows, reinterpret_cast<const uint8_t *>(inputData), k,
+ reinterpret_cast<int32 *>(outputIndexes),
+ reinterpret_cast<uint8_t *>(outputValues));
+ }
+ else if (inputShape.type == OperandType::TENSOR_INT32)
+ {
+ optimized_ops::TopK(row_size, num_rows, reinterpret_cast<const int32 *>(inputData), k,
+ reinterpret_cast<int32 *>(outputIndexes),
+ reinterpret_cast<int32 *>(outputValues));
+ }
else
{
LOG(ERROR) << "Unsupported data type";
{
namespace optimized_ops
{
+// The follwing codes are impemented and modified while referring to TFLite topk_v2.cc file.
+// TopK_v2 of NN Runtime supports TENSOR_FLOAT32, TENSOR_QUANT8_ASYMM, TENSOR_INT32 other than
+// TFLite.
+//(TFLite additionaly supports kTfLiteInt64.)
// The class that collects top indexes of k values. Based on template
// tensorflow::gtl::TopN<> but, for optimization,
cast_ex_int32_to_float32::examples);
}
+namespace topk_v2_1D_int32 {
+std::vector<MixedTypedExample> examples = {
+// Generated topk_v2_1D_int32 test
+#include "generated/examples/topk_v2_1D_int32.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/topk_v2_1D_int32.model.cpp"
+} // namespace topk_v2_1D_int32
+TEST_F(GeneratedTests, topk_v2_1D_int32) {
+ execute(topk_v2_1D_int32::CreateModel,
+ topk_v2_1D_int32::is_ignored,
+ topk_v2_1D_int32::examples);
+}
+
+namespace topk_v2_1D_uint8 {
+std::vector<MixedTypedExample> examples = {
+// Generated topk_v2_1D_uint8 test
+#include "generated/examples/topk_v2_1D_uint8.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/topk_v2_1D_uint8.model.cpp"
+} // namespace topk_v2_1D_uint8
+TEST_F(GeneratedTests, topk_v2_1D_uint8) {
+ execute(topk_v2_1D_uint8::CreateModel,
+ topk_v2_1D_uint8::is_ignored,
+ topk_v2_1D_uint8::examples);
+}
+
+namespace topk_v2_2D_int32 {
+std::vector<MixedTypedExample> examples = {
+// Generated topk_v2_2D_int32 test
+#include "generated/examples/topk_v2_2D_int32.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/topk_v2_2D_int32.model.cpp"
+} // namespace topk_v2_2D_int32
+TEST_F(GeneratedTests, topk_v2_2D_int32) {
+ execute(topk_v2_2D_int32::CreateModel,
+ topk_v2_2D_int32::is_ignored,
+ topk_v2_2D_int32::examples);
+}
+
+namespace topk_v2_2D_uint8 {
+std::vector<MixedTypedExample> examples = {
+// Generated topk_v2_2D_uint8 test
+#include "generated/examples/topk_v2_2D_uint8.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/topk_v2_2D_uint8.model.cpp"
+} // namespace topk_v2_2D_uint8
+TEST_F(GeneratedTests, topk_v2_2D_uint8) {
+ execute(topk_v2_2D_uint8::CreateModel,
+ topk_v2_2D_uint8::is_ignored,
+ topk_v2_2D_uint8::examples);
+}
+
namespace gather_2D_float {
std::vector<MixedTypedExample> examples = {
// Generated gather_2D_float test
--- /dev/null
+// Generated file (from: topk_v2_1D_int32.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {},
+ // int -> INT32 map
+ {{0, {40000, 41000, 50000, 60000}}},
+ // int -> QUANT8_ASYMM map
+ {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {},
+ // int -> INT32 map
+ {{0, {3, 2}}, {1, {60000, 50000}}},
+ // int -> QUANT8_ASYMM map
+ {}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: topk_v2_1D_uint8.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {{0, {3, 4, 5, 6}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {},
+ // int -> INT32 map
+ {{0, {3, 2}}},
+ // int -> QUANT8_ASYMM map
+ {{1, {6, 5}}}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: topk_v2_2D_int32.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {},
+ // int -> INT32 map
+ {{0, {40000, 41000, 50000, 60000, 70000, 80000, 90000, 79000, 170000, 180000, 190000, 110000}}},
+ // int -> QUANT8_ASYMM map
+ {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {},
+ // int -> INT32 map
+ {{0, {3, 2, 2, 1, 2, 1}}, {1, {60000, 50000, 90000, 80000, 190000, 180000}}},
+ // int -> QUANT8_ASYMM map
+ {}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: topk_v2_2D_uint8.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {{0, {3, 4, 5, 6, 7, 8, 9, 1, 2, 18, 19, 11}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {},
+ // int -> INT32 map
+ {{0, {3, 2, 2, 1, 2, 1}}},
+ // int -> QUANT8_ASYMM map
+ {{1, {6, 5, 9, 8, 19, 18}}}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: topk_v2_1D_int32.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type1(Type::INT32, {});
+ OperandType type2(Type::TENSOR_INT32, {2});
+ OperandType type0(Type::TENSOR_INT32, {4});
+ // Phase 1, operands
+ auto op1 = model->addOperand(&type0);
+ auto k = model->addOperand(&type1);
+ auto op2 = model->addOperand(&type2);
+ auto op3 = model->addOperand(&type2);
+ // Phase 2, operations
+ static int32_t k_init[] = {2};
+ model->setOperandValue(k, k_init, sizeof(int32_t) * 1);
+ model->addOperationEx(ANEURALNETWORKS_TOPK_V2_EX, {op1, k}, {op2, op3});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {op1},
+ {op2, op3});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+// Generated file (from: topk_v2_1D_uint8.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type1(Type::INT32, {});
+ OperandType type2(Type::TENSOR_INT32, {2});
+ OperandType type3(Type::TENSOR_QUANT8_ASYMM, {2});
+ OperandType type0(Type::TENSOR_QUANT8_ASYMM, {4});
+ // Phase 1, operands
+ auto op1 = model->addOperand(&type0);
+ auto k = model->addOperand(&type1);
+ auto op2 = model->addOperand(&type2);
+ auto op3 = model->addOperand(&type3);
+ // Phase 2, operations
+ static int32_t k_init[] = {2};
+ model->setOperandValue(k, k_init, sizeof(int32_t) * 1);
+ model->addOperationEx(ANEURALNETWORKS_TOPK_V2_EX, {op1, k}, {op2, op3});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {op1},
+ {op2, op3});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+// Generated file (from: topk_v2_2D_int32.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type1(Type::INT32, {});
+ OperandType type2(Type::TENSOR_INT32, {3,2});
+ OperandType type0(Type::TENSOR_INT32, {3,4});
+ // Phase 1, operands
+ auto op1 = model->addOperand(&type0);
+ auto k = model->addOperand(&type1);
+ auto op2 = model->addOperand(&type2);
+ auto op3 = model->addOperand(&type2);
+ // Phase 2, operations
+ static int32_t k_init[] = {2};
+ model->setOperandValue(k, k_init, sizeof(int32_t) * 1);
+ model->addOperationEx(ANEURALNETWORKS_TOPK_V2_EX, {op1, k}, {op2, op3});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {op1},
+ {op2, op3});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+// Generated file (from: topk_v2_2D_uint8.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type1(Type::INT32, {});
+ OperandType type2(Type::TENSOR_INT32, {3,2});
+ OperandType type3(Type::TENSOR_QUANT8_ASYMM, {3,2});
+ OperandType type0(Type::TENSOR_QUANT8_ASYMM, {3,4});
+ // Phase 1, operands
+ auto op1 = model->addOperand(&type0);
+ auto k = model->addOperand(&type1);
+ auto op2 = model->addOperand(&type2);
+ auto op3 = model->addOperand(&type3);
+ // Phase 2, operations
+ static int32_t k_init[] = {2};
+ model->setOperandValue(k, k_init, sizeof(int32_t) * 1);
+ model->addOperationEx(ANEURALNETWORKS_TOPK_V2_EX, {op1, k}, {op2, op3});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {op1},
+ {op2, op3});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_INT32", "{4}") # a vector of input
+k = Int32Scalar("k", 2)
+i2 = Output("op2", "TENSOR_INT32", "{2}") # indexes of output
+i3 = Output("op3", "TENSOR_INT32", "{2}") # values of output
+model = model.Operation("TOPK_V2_EX", i1, k).To([i2, i3])
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [40000, 41000, 50000, 60000]}
+
+output0 = {i2: # output 0
+ [3, 2],
+ i3: # output 1
+ [60000, 50000]}
+
+# Instantiate an example
+Example((input0, output0))
--- /dev/null
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_QUANT8_ASYMM", "{4}") # a vector of input
+k = Int32Scalar("k", 2)
+i2 = Output("op2", "TENSOR_INT32", "{2}") # indexes of output
+i3 = Output("op3", "TENSOR_QUANT8_ASYMM", "{2}") # values of output
+model = model.Operation("TOPK_V2_EX", i1, k).To([i2, i3])
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [3, 4, 5, 6]}
+
+output0 = {i2: # output 0
+ [3, 2],
+ i3: # output 1
+ [6, 5]}
+
+# Instantiate an example
+Example((input0, output0))
--- /dev/null
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_INT32", "{3,4}") # a vector of input
+k = Int32Scalar("k", 2)
+i2 = Output("op2", "TENSOR_INT32", "{3,2}") # indexes of output
+i3 = Output("op3", "TENSOR_INT32", "{3,2}") # values of output
+model = model.Operation("TOPK_V2_EX", i1, k).To([i2, i3])
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [40000, 41000, 50000, 60000,
+ 70000, 80000, 90000, 79000,
+ 170000, 180000, 190000, 110000]}
+
+output0 = {i2: # output 0
+ [3, 2,
+ 2, 1,
+ 2, 1],
+ i3: # output 1
+ [60000, 50000,
+ 90000, 80000,
+ 190000, 180000]}
+
+# Instantiate an example
+Example((input0, output0))
--- /dev/null
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_QUANT8_ASYMM", "{3,4}") # a vector of input
+k = Int32Scalar("k", 2)
+i2 = Output("op2", "TENSOR_INT32", "{3,2}") # indexes of output
+i3 = Output("op3", "TENSOR_QUANT8_ASYMM", "{3,2}") # values of output
+model = model.Operation("TOPK_V2_EX", i1, k).To([i2, i3])
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [3, 4, 5, 6,
+ 7, 8, 9, 1,
+ 2, 18, 19, 11]}
+
+output0 = {i2: # output 0
+ [3, 2,
+ 2, 1,
+ 2, 1],
+ i3: # output 1
+ [6, 5,
+ 9, 8,
+ 19, 18]}
+
+# Instantiate an example
+Example((input0, output0))