dequantize::examples);
}
+namespace embedding_lookup_2d_nnfw {
+std::vector<MixedTypedExample> examples = {
+// Generated embedding_lookup_2d_nnfw test
+#include "generated/examples/embedding_lookup_2d_nnfw.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/embedding_lookup_2d_nnfw.model.cpp"
+} // namespace embedding_lookup_2d_nnfw
+TEST_F(GeneratedTests, embedding_lookup_2d_nnfw) {
+ execute(embedding_lookup_2d_nnfw::CreateModel,
+ embedding_lookup_2d_nnfw::is_ignored,
+ embedding_lookup_2d_nnfw::examples);
+}
+
+namespace embedding_lookup_4d_nnfw {
+std::vector<MixedTypedExample> examples = {
+// Generated embedding_lookup_4d_nnfw test
+#include "generated/examples/embedding_lookup_4d_nnfw.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/embedding_lookup_4d_nnfw.model.cpp"
+} // namespace embedding_lookup_4d_nnfw
+TEST_F(GeneratedTests, embedding_lookup_4d_nnfw) {
+ execute(embedding_lookup_4d_nnfw::CreateModel,
+ embedding_lookup_4d_nnfw::is_ignored,
+ embedding_lookup_4d_nnfw::examples);
+}
+
namespace embedding_lookup {
std::vector<MixedTypedExample> examples = {
// Generated embedding_lookup test
--- /dev/null
+// Generated file (from: embedding_lookup_2d_nnfw.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{1, {0.0f, 0.1f, 1.0f, 1.1f, 2.0f, 2.1f}}},
+ // int -> INT32 map
+ {{0, {1, 0, 2}}},
+ // int -> QUANT8_ASYMM map
+ {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {1.0f, 1.1f, 0.0f, 0.1f, 2.0f, 2.1f}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: embedding_lookup_4d_nnfw.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {},
+ // int -> INT32 map
+ {{0, {4, 0, 2}}, {1, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79}}},
+ // int -> QUANT8_ASYMM map
+ {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {},
+ // int -> INT32 map
+ {{0, {64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47}}},
+ // int -> QUANT8_ASYMM map
+ {}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: embedding_lookup_2d_nnfw.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type1(Type::TENSOR_FLOAT32, {3, 2});
+ OperandType type0(Type::TENSOR_INT32, {3});
+ // Phase 1, operands
+ auto index = model->addOperand(&type0);
+ auto value = model->addOperand(&type1);
+ auto output = model->addOperand(&type1);
+ // Phase 2, operations
+ model->addOperation(ANEURALNETWORKS_EMBEDDING_LOOKUP, {index, value}, {output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {index, value},
+ {output});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+// Generated file (from: embedding_lookup_4d_nnfw.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type2(Type::TENSOR_INT32, {3, 2, 4, 2});
+ OperandType type0(Type::TENSOR_INT32, {3});
+ OperandType type1(Type::TENSOR_INT32, {5, 2, 4, 2});
+ // Phase 1, operands
+ auto index = model->addOperand(&type0);
+ auto value = model->addOperand(&type1);
+ auto output = model->addOperand(&type2);
+ // Phase 2, operations
+ model->addOperation(ANEURALNETWORKS_EMBEDDING_LOOKUP, {index, value}, {output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {index, value},
+ {output});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
GeneratedTests.depth_to_space_float_3
GeneratedTests.depth_to_space_quant8_1
GeneratedTests.depth_to_space_quant8_2
-GeneratedTests.embedding_lookup
GeneratedTests.hashtable_lookup_float
GeneratedTests.hashtable_lookup_quant8
GeneratedTests.l2_normalization
GeneratedTests.depthwise_conv
GeneratedTests.dequantize
GeneratedTests.embedding_lookup
+GeneratedTests.embedding_lookup_2d_nnfw
+GeneratedTests.embedding_lookup_4d_nnfw
GeneratedTests.floor_
GeneratedTests.hashtable_lookup*
GeneratedTests.l2_normalization*
GeneratedTests.depth_to_space_float_3
GeneratedTests.depth_to_space_quant8_1
GeneratedTests.depth_to_space_quant8_2
-GeneratedTests.embedding_lookup
GeneratedTests.hashtable_lookup_float
GeneratedTests.hashtable_lookup_quant8
GeneratedTests.l2_normalization
--- /dev/null
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+lookups = 3
+rows = 3
+columns = 2
+
+actual_values = [x for x in range(rows * columns)]
+for i in range(rows):
+ for j in range(columns):
+ actual_values[(i * columns + j)] = i + j / 10.
+
+model = Model()
+index = Input("index", "TENSOR_INT32", "{%d}"%lookups)
+value = Input("value", "TENSOR_FLOAT32", "{%d, %d}" % (rows, columns))
+output = Output("output", "TENSOR_FLOAT32", "{%d, %d}" % (lookups, columns))
+model = model.Operation("EMBEDDING_LOOKUP", index, value).To(output)
+
+input0 = {index: [1, 0, 2],
+ value: actual_values}
+
+output0 = {output:
+ [
+ 1.0, 1.1, # Row 1
+ 0.0, 0.1, # Row 0
+ 2.0, 2.1, # Row 2
+ ]}
+
+# Instantiate an example
+Example((input0, output0))
--- /dev/null
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+lookups = 3
+N = 5
+H = 2
+W = 4
+C = 2
+
+actual_values = [x for x in range(N * H * W * C)]
+
+model = Model()
+index = Input("index", "TENSOR_INT32", "{%d}"%lookups)
+value = Input("value", "TENSOR_INT32", "{%d, %d, %d, %d}" % (N, H, W, C))
+output = Output("output", "TENSOR_INT32", "{%d, %d, %d, %d}" % (lookups, H, W, C))
+model = model.Operation("EMBEDDING_LOOKUP", index, value).To(output)
+
+input0 = {index: [4, 0, 2],
+ value: actual_values}
+
+output0 = {output:
+ [
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, # Row 4
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, # Row 0
+ 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, # Row 2
+ ]}
+
+# Instantiate an example
+Example((input0, output0))