This commit adds testing CAST_EX operation and supporting extended API's test.
- Open the way to test extended operators.
- add tests for CAST_EX operation.
Signed-off-by: jiseob.jang <jiseob.jang@samsung.com>
#!/usr/bin/python3
+# Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
# Copyright 2017, The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
import sys
import contextlib
import pprint
+import re
@contextlib.contextmanager
def smart_open(filename=None):
def Definition(self):
inputs = Operand.print_operands(self.ins);
outputs = Operand.print_operands(self.outs);
- return "model->addOperation(ANEURALNETWORKS_"+self.optype+", " + \
+ if re.search('_EX$', self.optype):
+ return "model->addOperationEx(ANEURALNETWORKS_"+self.optype+", " + \
+ "{"+", ".join(inputs)+"}, {" + ", ".join(outputs) + "});"
+ else:
+ return "model->addOperation(ANEURALNETWORKS_"+self.optype+", " + \
"{"+", ".join(inputs)+"}, {" + ", ".join(outputs) + "});"
# Get Python-ish dump for the op
#define __NNFW_RT_NEURAL_NETWORKS_WRAPPER_H__
#include "NeuralNetworks.h"
+#include "NeuralNetworksEx.h"
#include <math.h>
#include <vector>
mValid = false;
}
}
+
+ void addOperationEx(ANeuralNetworksOperationTypeEx type, const std::vector<uint32_t>& inputs,
+ const std::vector<uint32_t>& outputs) {
+ if (ANeuralNetworksModel_addOperationEx(mModel, type, static_cast<uint32_t>(inputs.size()),
+ inputs.data(), static_cast<uint32_t>(outputs.size()),
+ outputs.data()) != ANEURALNETWORKS_NO_ERROR) {
+ mValid = false;
+ }
+ }
+
void identifyInputsAndOutputs(const std::vector<uint32_t>& inputs,
const std::vector<uint32_t>& outputs) {
if (ANeuralNetworksModel_identifyInputsAndOutputs(
sub::is_ignored,
sub::examples);
}
+
+namespace cast_ex_float32_to_int32 {
+std::vector<MixedTypedExample> examples = {
+// Generated cast_ex_float32_to_int32 test
+#include "generated/examples/cast_ex_float32_to_int32.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/cast_ex_float32_to_int32.model.cpp"
+} // namespace cast_ex_float32_to_int32
+TEST_F(GeneratedTests, cast_ex_float32_to_int32) {
+ execute(cast_ex_float32_to_int32::CreateModel,
+ cast_ex_float32_to_int32::is_ignored,
+ cast_ex_float32_to_int32::examples);
+}
+
+namespace cast_ex_int32_to_float32 {
+std::vector<MixedTypedExample> examples = {
+// Generated cast_ex_int32_to_float32 test
+#include "generated/examples/cast_ex_int32_to_float32.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/cast_ex_int32_to_float32.model.cpp"
+} // namespace cast_ex_int32_to_float32
+TEST_F(GeneratedTests, cast_ex_int32_to_float32) {
+ execute(cast_ex_int32_to_float32::CreateModel,
+ cast_ex_int32_to_float32::is_ignored,
+ cast_ex_int32_to_float32::examples);
+}
--- /dev/null
+// Generated file (from: cast_ex_float32_to_int32.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {100.0f, 20.0f, 3.0f, 0.4f, 0.999f, 1.1f}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {},
+ // int -> INT32 map
+ {{0, {100, 20, 3, 0, 0, 1}}},
+ // int -> QUANT8_ASYMM map
+ {}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: cast_ex_int32_to_float32.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {},
+ // int -> INT32 map
+ {{0, {100, 200, 300, 400, 500, 600}}},
+ // int -> QUANT8_ASYMM map
+ {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {100.0f, 200.0f, 300.0f, 400.0f, 500.0f, 600.0f}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: cast_ex_float32_to_int32.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type1(Type::INT32, {});
+ OperandType type0(Type::TENSOR_FLOAT32, {2, 3});
+ OperandType type2(Type::TENSOR_INT32, {2, 3});
+ // Phase 1, operands
+ auto op1 = model->addOperand(&type0);
+ auto act = model->addOperand(&type1);
+ auto op2 = model->addOperand(&type2);
+ // Phase 2, operations
+ static int32_t act_init[] = {0};
+ model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+ model->addOperationEx(ANEURALNETWORKS_CAST_EX, {op1, act}, {op2});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {op1},
+ {op2});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+// Generated file (from: cast_ex_int32_to_float32.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type1(Type::INT32, {});
+ OperandType type2(Type::TENSOR_FLOAT32, {2, 3});
+ OperandType type0(Type::TENSOR_INT32, {2, 3});
+ // Phase 1, operands
+ auto op1 = model->addOperand(&type0);
+ auto act = model->addOperand(&type1);
+ auto op2 = model->addOperand(&type2);
+ // Phase 2, operations
+ static int32_t act_init[] = {0};
+ model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+ model->addOperationEx(ANEURALNETWORKS_CAST_EX, {op1, act}, {op2});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {op1},
+ {op2});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_FLOAT32", "{2, 3}")
+act = Int32Scalar("act", 0) # an int32_t scalar fuse_activation
+i2 = Output("op2", "TENSOR_INT32", "{2, 3}")
+model = model.Operation("CAST_EX", i1, act).To(i2)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [100.0, 20.0, 3.0, 0.4, 0.999, 1.1]}
+
+output0 = {i2: # output 0
+ [100, 20, 3, 0, 0, 1]}
+
+# Instantiate an example
+Example((input0, output0))
--- /dev/null
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_INT32", "{2, 3}")
+act = Int32Scalar("act", 0) # an int32_t scalar fuse_activation
+i2 = Output("op2", "TENSOR_FLOAT32", "{2, 3}")
+model = model.Operation("CAST_EX", i1, act).To(i2)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [100, 200, 300, 400, 500, 600]}
+
+output0 = {i2: # output 0
+ [100.0, 200.0, 300.0, 400.0, 500.0, 600.0]}
+
+# Instantiate an example
+Example((input0, output0))
NNAPI_VERSION="
V1_0
V1_1
+Ex
"
# Process one test spec, and optionally provide the log file argument