From 7431f97800756af752b049523fb86bbac4803ee2 Mon Sep 17 00:00:00 2001 From: =?utf8?q?=EC=9E=A5=EC=A7=80=EC=84=AD/=EB=8F=99=EC=9E=91=EC=A0=9C?= =?utf8?q?=EC=96=B4Lab=28SR=29/Engineer/=EC=82=BC=EC=84=B1=EC=A0=84?= =?utf8?q?=EC=9E=90?= Date: Wed, 23 May 2018 10:01:59 +0900 Subject: [PATCH] Add testing CAST_EX operation and supporting extended API's test (#1279) This commit adds testing CAST_EX operation and supporting extended API's test. - Open the way to test extended operators. - add tests for CAST_EX operation. Signed-off-by: jiseob.jang --- externals/nnapi_test_generator/test_generator.py | 8 ++++++- runtimes/tests/include/NeuralNetworksWrapper.h | 11 +++++++++ .../generated/all_generated_tests.cpp | 28 ++++++++++++++++++++++ .../examples/cast_ex_float32_to_int32.example.cpp | 22 +++++++++++++++++ .../examples/cast_ex_int32_to_float32.example.cpp | 22 +++++++++++++++++ .../models/cast_ex_float32_to_int32.model.cpp | 24 +++++++++++++++++++ .../models/cast_ex_int32_to_float32.model.cpp | 24 +++++++++++++++++++ .../specs/Ex/cast_ex_float32_to_int32.mod.py | 16 +++++++++++++ .../specs/Ex/cast_ex_int32_to_float32.mod.py | 16 +++++++++++++ .../neural_networks_test/specs/generate_test.sh | 1 + 10 files changed, 171 insertions(+), 1 deletion(-) create mode 100644 runtimes/tests/neural_networks_test/generated/examples/cast_ex_float32_to_int32.example.cpp create mode 100644 runtimes/tests/neural_networks_test/generated/examples/cast_ex_int32_to_float32.example.cpp create mode 100644 runtimes/tests/neural_networks_test/generated/models/cast_ex_float32_to_int32.model.cpp create mode 100644 runtimes/tests/neural_networks_test/generated/models/cast_ex_int32_to_float32.model.cpp create mode 100644 runtimes/tests/neural_networks_test/specs/Ex/cast_ex_float32_to_int32.mod.py create mode 100644 runtimes/tests/neural_networks_test/specs/Ex/cast_ex_int32_to_float32.mod.py diff --git a/externals/nnapi_test_generator/test_generator.py b/externals/nnapi_test_generator/test_generator.py index 07b503b..922ef77 100755 --- a/externals/nnapi_test_generator/test_generator.py +++ b/externals/nnapi_test_generator/test_generator.py @@ -1,5 +1,6 @@ #!/usr/bin/python3 +# Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved # Copyright 2017, The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -30,6 +31,7 @@ import struct import sys import contextlib import pprint +import re @contextlib.contextmanager def smart_open(filename=None): @@ -410,7 +412,11 @@ class Operation(Definitions, Uses, Traversable): def Definition(self): inputs = Operand.print_operands(self.ins); outputs = Operand.print_operands(self.outs); - return "model->addOperation(ANEURALNETWORKS_"+self.optype+", " + \ + if re.search('_EX$', self.optype): + return "model->addOperationEx(ANEURALNETWORKS_"+self.optype+", " + \ + "{"+", ".join(inputs)+"}, {" + ", ".join(outputs) + "});" + else: + return "model->addOperation(ANEURALNETWORKS_"+self.optype+", " + \ "{"+", ".join(inputs)+"}, {" + ", ".join(outputs) + "});" # Get Python-ish dump for the op diff --git a/runtimes/tests/include/NeuralNetworksWrapper.h b/runtimes/tests/include/NeuralNetworksWrapper.h index c0db78d..5662d6f 100644 --- a/runtimes/tests/include/NeuralNetworksWrapper.h +++ b/runtimes/tests/include/NeuralNetworksWrapper.h @@ -21,6 +21,7 @@ #define __NNFW_RT_NEURAL_NETWORKS_WRAPPER_H__ #include "NeuralNetworks.h" +#include "NeuralNetworksEx.h" #include #include @@ -169,6 +170,16 @@ public: mValid = false; } } + + void addOperationEx(ANeuralNetworksOperationTypeEx type, const std::vector& inputs, + const std::vector& outputs) { + if (ANeuralNetworksModel_addOperationEx(mModel, type, static_cast(inputs.size()), + inputs.data(), static_cast(outputs.size()), + outputs.data()) != ANEURALNETWORKS_NO_ERROR) { + mValid = false; + } + } + void identifyInputsAndOutputs(const std::vector& inputs, const std::vector& outputs) { if (ANeuralNetworksModel_identifyInputsAndOutputs( diff --git a/runtimes/tests/neural_networks_test/generated/all_generated_tests.cpp b/runtimes/tests/neural_networks_test/generated/all_generated_tests.cpp index f9d5af2..9c735d0 100644 --- a/runtimes/tests/neural_networks_test/generated/all_generated_tests.cpp +++ b/runtimes/tests/neural_networks_test/generated/all_generated_tests.cpp @@ -2226,3 +2226,31 @@ TEST_F(GeneratedTests, sub) { sub::is_ignored, sub::examples); } + +namespace cast_ex_float32_to_int32 { +std::vector examples = { +// Generated cast_ex_float32_to_int32 test +#include "generated/examples/cast_ex_float32_to_int32.example.cpp" +}; +// Generated model constructor +#include "generated/models/cast_ex_float32_to_int32.model.cpp" +} // namespace cast_ex_float32_to_int32 +TEST_F(GeneratedTests, cast_ex_float32_to_int32) { + execute(cast_ex_float32_to_int32::CreateModel, + cast_ex_float32_to_int32::is_ignored, + cast_ex_float32_to_int32::examples); +} + +namespace cast_ex_int32_to_float32 { +std::vector examples = { +// Generated cast_ex_int32_to_float32 test +#include "generated/examples/cast_ex_int32_to_float32.example.cpp" +}; +// Generated model constructor +#include "generated/models/cast_ex_int32_to_float32.model.cpp" +} // namespace cast_ex_int32_to_float32 +TEST_F(GeneratedTests, cast_ex_int32_to_float32) { + execute(cast_ex_int32_to_float32::CreateModel, + cast_ex_int32_to_float32::is_ignored, + cast_ex_int32_to_float32::examples); +} diff --git a/runtimes/tests/neural_networks_test/generated/examples/cast_ex_float32_to_int32.example.cpp b/runtimes/tests/neural_networks_test/generated/examples/cast_ex_float32_to_int32.example.cpp new file mode 100644 index 0000000..bf931ad --- /dev/null +++ b/runtimes/tests/neural_networks_test/generated/examples/cast_ex_float32_to_int32.example.cpp @@ -0,0 +1,22 @@ +// Generated file (from: cast_ex_float32_to_int32.mod.py). Do not edit +// Begin of an example +{ +//Input(s) +{ // See tools/test_generator/include/TestHarness.h:MixedTyped + // int -> FLOAT32 map + {{0, {100.0f, 20.0f, 3.0f, 0.4f, 0.999f, 1.1f}}}, + // int -> INT32 map + {}, + // int -> QUANT8_ASYMM map + {} +}, +//Output(s) +{ // See tools/test_generator/include/TestHarness.h:MixedTyped + // int -> FLOAT32 map + {}, + // int -> INT32 map + {{0, {100, 20, 3, 0, 0, 1}}}, + // int -> QUANT8_ASYMM map + {} +} +}, // End of an example diff --git a/runtimes/tests/neural_networks_test/generated/examples/cast_ex_int32_to_float32.example.cpp b/runtimes/tests/neural_networks_test/generated/examples/cast_ex_int32_to_float32.example.cpp new file mode 100644 index 0000000..7a076e1 --- /dev/null +++ b/runtimes/tests/neural_networks_test/generated/examples/cast_ex_int32_to_float32.example.cpp @@ -0,0 +1,22 @@ +// Generated file (from: cast_ex_int32_to_float32.mod.py). Do not edit +// Begin of an example +{ +//Input(s) +{ // See tools/test_generator/include/TestHarness.h:MixedTyped + // int -> FLOAT32 map + {}, + // int -> INT32 map + {{0, {100, 200, 300, 400, 500, 600}}}, + // int -> QUANT8_ASYMM map + {} +}, +//Output(s) +{ // See tools/test_generator/include/TestHarness.h:MixedTyped + // int -> FLOAT32 map + {{0, {100.0f, 200.0f, 300.0f, 400.0f, 500.0f, 600.0f}}}, + // int -> INT32 map + {}, + // int -> QUANT8_ASYMM map + {} +} +}, // End of an example diff --git a/runtimes/tests/neural_networks_test/generated/models/cast_ex_float32_to_int32.model.cpp b/runtimes/tests/neural_networks_test/generated/models/cast_ex_float32_to_int32.model.cpp new file mode 100644 index 0000000..00a9385 --- /dev/null +++ b/runtimes/tests/neural_networks_test/generated/models/cast_ex_float32_to_int32.model.cpp @@ -0,0 +1,24 @@ +// Generated file (from: cast_ex_float32_to_int32.mod.py). Do not edit +void CreateModel(Model *model) { + OperandType type1(Type::INT32, {}); + OperandType type0(Type::TENSOR_FLOAT32, {2, 3}); + OperandType type2(Type::TENSOR_INT32, {2, 3}); + // Phase 1, operands + auto op1 = model->addOperand(&type0); + auto act = model->addOperand(&type1); + auto op2 = model->addOperand(&type2); + // Phase 2, operations + static int32_t act_init[] = {0}; + model->setOperandValue(act, act_init, sizeof(int32_t) * 1); + model->addOperationEx(ANEURALNETWORKS_CAST_EX, {op1, act}, {op2}); + // Phase 3, inputs and outputs + model->identifyInputsAndOutputs( + {op1}, + {op2}); + assert(model->isValid()); +} + +bool is_ignored(int i) { + static std::set ignore = {}; + return ignore.find(i) != ignore.end(); +} diff --git a/runtimes/tests/neural_networks_test/generated/models/cast_ex_int32_to_float32.model.cpp b/runtimes/tests/neural_networks_test/generated/models/cast_ex_int32_to_float32.model.cpp new file mode 100644 index 0000000..3f9715f --- /dev/null +++ b/runtimes/tests/neural_networks_test/generated/models/cast_ex_int32_to_float32.model.cpp @@ -0,0 +1,24 @@ +// Generated file (from: cast_ex_int32_to_float32.mod.py). Do not edit +void CreateModel(Model *model) { + OperandType type1(Type::INT32, {}); + OperandType type2(Type::TENSOR_FLOAT32, {2, 3}); + OperandType type0(Type::TENSOR_INT32, {2, 3}); + // Phase 1, operands + auto op1 = model->addOperand(&type0); + auto act = model->addOperand(&type1); + auto op2 = model->addOperand(&type2); + // Phase 2, operations + static int32_t act_init[] = {0}; + model->setOperandValue(act, act_init, sizeof(int32_t) * 1); + model->addOperationEx(ANEURALNETWORKS_CAST_EX, {op1, act}, {op2}); + // Phase 3, inputs and outputs + model->identifyInputsAndOutputs( + {op1}, + {op2}); + assert(model->isValid()); +} + +bool is_ignored(int i) { + static std::set ignore = {}; + return ignore.find(i) != ignore.end(); +} diff --git a/runtimes/tests/neural_networks_test/specs/Ex/cast_ex_float32_to_int32.mod.py b/runtimes/tests/neural_networks_test/specs/Ex/cast_ex_float32_to_int32.mod.py new file mode 100644 index 0000000..102dcdf --- /dev/null +++ b/runtimes/tests/neural_networks_test/specs/Ex/cast_ex_float32_to_int32.mod.py @@ -0,0 +1,16 @@ +# model +model = Model() +i1 = Input("op1", "TENSOR_FLOAT32", "{2, 3}") +act = Int32Scalar("act", 0) # an int32_t scalar fuse_activation +i2 = Output("op2", "TENSOR_INT32", "{2, 3}") +model = model.Operation("CAST_EX", i1, act).To(i2) + +# Example 1. Input in operand 0, +input0 = {i1: # input 0 + [100.0, 20.0, 3.0, 0.4, 0.999, 1.1]} + +output0 = {i2: # output 0 + [100, 20, 3, 0, 0, 1]} + +# Instantiate an example +Example((input0, output0)) diff --git a/runtimes/tests/neural_networks_test/specs/Ex/cast_ex_int32_to_float32.mod.py b/runtimes/tests/neural_networks_test/specs/Ex/cast_ex_int32_to_float32.mod.py new file mode 100644 index 0000000..b20b4ae --- /dev/null +++ b/runtimes/tests/neural_networks_test/specs/Ex/cast_ex_int32_to_float32.mod.py @@ -0,0 +1,16 @@ +# model +model = Model() +i1 = Input("op1", "TENSOR_INT32", "{2, 3}") +act = Int32Scalar("act", 0) # an int32_t scalar fuse_activation +i2 = Output("op2", "TENSOR_FLOAT32", "{2, 3}") +model = model.Operation("CAST_EX", i1, act).To(i2) + +# Example 1. Input in operand 0, +input0 = {i1: # input 0 + [100, 200, 300, 400, 500, 600]} + +output0 = {i2: # output 0 + [100.0, 200.0, 300.0, 400.0, 500.0, 600.0]} + +# Instantiate an example +Example((input0, output0)) diff --git a/runtimes/tests/neural_networks_test/specs/generate_test.sh b/runtimes/tests/neural_networks_test/specs/generate_test.sh index f63aa58..c0676cf 100755 --- a/runtimes/tests/neural_networks_test/specs/generate_test.sh +++ b/runtimes/tests/neural_networks_test/specs/generate_test.sh @@ -17,6 +17,7 @@ NNAPI_VERSION=" V1_0 V1_1 +Ex " # Process one test spec, and optionally provide the log file argument -- 2.7.4