# NN-API Test Generator
-Original code is at https://android.googlesource.com/platform/frameworks/ml/+/ee61649/nn/tools/test_generator/
+Original code is at https://android.googlesource.com/platform/frameworks/ml/+/refs/tags/android-10.0.0_r2/nn/tools/test_generator/
### Fix for neurun
- `nn/runtime/test/specs/` => `tests/nnapi/specs/`
- $ANDROID_BUILD_TOP/frameworks/ml/nn/runtime/test/specs => $NNAS_PROJECT_PATH/tests/nnapi/specs
- Rebuild with mm afterwards => Rebuild afterwards (mm is not supported)
+- Remove script and directories
+ - `slicing.py`
+ - `tests/`
+ - `include/` (`TestHarness.h` is in `tests/nnapi/include`)
---
This is useful when only a subset of variations has a different version.
-### Creating negative tests
-
-Negative test, also known as validation test, is a testing method that supplies invalid model or request, and expects the target framework or driver to fail gracefully. You can use `ExpectFailure` to tag a example as invalid.
-
-```Python
-Example.ExpectFailure()
-```
-
### A Complete Example
```Python
$NNAS_PROJECT_PATH/tests/nnapi/specs/generate_vts_test.sh
```
-It will read and generate all CTS/VTS unit tests based on spec files in `tests/nnapi/specs/V1_*/*` if needed. CTS test generator is able to identify which spec files are modified since last generation and only regenerate those files to reduce compilation time. To force a regeneration, use `-f` flag. The VTS test generator will regenerate tests targeting the latest HAL version by default. Pass the `all` positional argument to override.
+It will read and generate all CTS/VTS unit tests based on spec files in `tests/nnapi/specs/V1_*/*` if needed. CTS test generator is able to identify which spec files are modified since last generation and only regenerate those files to reduce compilation time. To force a regeneration, use `-f` flag.
+
+```
+$NNAS_PROJECT_PATH/tests/nnapi/specs/generate_test.sh -f
+```
+
+If you only want to regenerate a certain set of files, simply append the file names to the end of the command, and optionally, use `-f` flag.
+
+```
+$NNAS_PROJECT_PATH/tests/nnapi/specs/generate_test.sh -f file1.mod.py file2.mod.py ...
+```
-Rebuild afterwards.
+Rebuild with mm afterwards.
parser.add_argument(
"-t", "--test", help="the output test file/directory", default="-")
parser.add_argument(
+ "-c", "--cts", help="the CTS TestGeneratedOneFile.cpp", default="-")
+ parser.add_argument(
"-f", "--force", help="force to regenerate all spec files", action="store_true")
+ # for slicing tool
+ parser.add_argument(
+ "-l", "--log", help="the optional log file", default="")
args = parser.parse_args()
- tg.FileNames.InitializeFileLists(args.spec, args.model, args.example, args.test)
+ tg.FileNames.InitializeFileLists(
+ args.spec, args.model, args.example, args.test, args.cts, args.log)
Configuration.force_regenerate = args.force
def NeedRegenerate():
# Write headers for generated files, which are boilerplate codes only related to filenames
def InitializeFiles(model_fd, example_fd, test_fd):
+ fileHeader = "// clang-format off\n// Generated file (from: {spec_file}). Do not edit"
+ testFileHeader = """\
+#include "../../TestGenerated.h"\n
+namespace {spec_name} {{
+// Generated {spec_name} test
+#include "{example_file}"
+// Generated model constructor
+#include "{model_file}"
+}} // namespace {spec_name}\n"""
+ # This regex is to remove prefix and get relative path for #include
+ pathRegex = r".*((frameworks/ml/nn/(runtime/test/)?)|(vendor/google/[a-z]*/test/))"
specFileBase = os.path.basename(tg.FileNames.specFile)
- fileHeader = """\
-// Generated from {spec_file}
-// DO NOT EDIT
-// clang-format off
-#include "{header}"
-"""
- print(fileHeader.format(spec_file=specFileBase, header="TestGenerated.h"), file=test_fd)
- print(fileHeader.format(spec_file=specFileBase, header="TestGenerated.h"), file=model_fd)
- print(fileHeader.format(spec_file=specFileBase, header="TestHarness.h"), file=example_fd)
+ print(fileHeader.format(spec_file=specFileBase), file=model_fd)
+ print(fileHeader.format(spec_file=specFileBase), file=example_fd)
+ print(fileHeader.format(spec_file=specFileBase), file=test_fd)
+ print(testFileHeader.format(
+ model_file=re.sub(pathRegex, "", tg.FileNames.modelFile),
+ example_file=re.sub(pathRegex, "", tg.FileNames.exampleFile),
+ spec_name=tg.FileNames.specName), file=test_fd)
# Dump is_ignored function for IgnoredOutput
def DumpCtsIsIgnored(model, model_fd):
isIgnoredTemplate = """\
-bool {is_ignored_name}(int i) {{
+inline bool {is_ignored_name}(int i) {{
static std::set<int> ignore = {{{ignored_index}}};
- return ignore.find(i) != ignore.end();
-}}
-"""
+ return ignore.find(i) != ignore.end();\n}}\n"""
print(isIgnoredTemplate.format(
ignored_index=tg.GetJointStr(model.GetIgnoredOutputs(), method=lambda x: str(x.index)),
is_ignored_name=str(model.isIgnoredFunctionName)), file=model_fd)
assert model.compiled
if model.dumped:
return
- namespace = "generated_tests::{spec_name}".format(spec_name=tg.FileNames.specName)
- print("namespace {namespace} {{\n".format(namespace=namespace), file=model_fd)
print("void %s(Model *model) {"%(model.createFunctionName), file=model_fd)
# Phase 0: types
typeDef = "OperandType %s(Type::%s, %s, %s, %d);"%(
t, t.type, t.GetDimensionsString(), tg.PrettyPrintAsFloat(t.scale), t.zeroPoint)
else:
- assert t.type == "TENSOR_QUANT8_SYMM_PER_CHANNEL", "Unexpected model configuration. " \
- "Extra params are currently expected for " \
- "TENSOR_QUANT8_SYMM_PER_CHANNEL operand type. "
- assert t.scale == 0.0 and t.zeroPoint == 0, "Scale and zero point are always zero for " \
- "TENSOR_QUANT8_SYMM_PER_CHANNEL operands"
- typeDef = "OperandType %s(Type::%s, %s, %s);"%(
- t, t.type, t.GetDimensionsString(), t.extraParams.GetConstructor())
+ typeDef = "OperandType %s(Type::%s, %s, %s, %d, %s);"%(
+ t, t.type, t.GetDimensionsString(), tg.PrettyPrintAsFloat(t.scale), t.zeroPoint,
+ t.extraParams.GetConstructor())
IndentedPrint(typeDef, file=model_fd)
print (" assert(model->isValid());", file=model_fd)
print ("}\n", file=model_fd)
DumpCtsIsIgnored(model, model_fd)
- print("}} // namespace {namespace}".format(namespace=namespace), file=model_fd)
model.dumped = True
def DumpMixedType(operands, feedDict):
# Dump Example file for Cts tests
def DumpCtsExample(example, example_fd):
- namespace = "generated_tests::{spec_name}".format(spec_name=tg.FileNames.specName)
- print("namespace {namespace} {{\n".format(namespace=namespace), file=example_fd)
- print("std::vector<::test_helper::MixedTypedExample>& get_%s() {" % (example.examplesName), file=example_fd)
- print("static std::vector<::test_helper::MixedTypedExample> %s = {" % (example.examplesName), file=example_fd)
+ print("std::vector<MixedTypedExample>& get_%s() {" % (example.examplesName), file=example_fd)
+ print("static std::vector<MixedTypedExample> %s = {" % (example.examplesName), file=example_fd)
for inputFeedDict, outputFeedDict in example.feedDicts:
print ('// Begin of an example', file = example_fd)
print ('{\n.operands = {', file = example_fd)
print ('}, // End of an example', file = example_fd)
print("};", file=example_fd)
print("return %s;" % (example.examplesName), file=example_fd)
- print("};", file=example_fd)
- print("\n}} // namespace {namespace}".format(namespace=namespace), file=example_fd)
+ print("};\n", file=example_fd)
# Dump Test file for Cts tests
def DumpCtsTest(example, test_fd):
- namespace = "generated_tests::{spec_name}".format(spec_name=tg.FileNames.specName)
testTemplate = """\
-namespace {namespace} {{
-
-void {create_model_name}(Model *model);
-bool {is_ignored_name}(int);
-std::vector<::test_helper::MixedTypedExample>& get_{examples_name}();
-
TEST_F({test_case_name}, {test_name}) {{
- execute({create_model_name},
- {is_ignored_name},
- get_{examples_name}());
-}}
-
-}} // namespace {namespace}
-"""
- if example.model.version is not None and not example.expectFailure:
+ execute({namespace}::{create_model_name},
+ {namespace}::{is_ignored_name},
+ {namespace}::get_{examples_name}(){log_file});\n}}\n"""
+ if example.model.version is not None:
testTemplate += """\
TEST_AVAILABLE_SINCE({version}, {test_name}, {namespace}::{create_model_name})\n"""
-
- if example.expectFailure:
- testCaseName = "GeneratedValidationTests"
- elif example.model.hasDynamicOutputShape:
- testCaseName = "DynamicOutputShapeTest"
- else:
- testCaseName = "GeneratedTests"
-
print(testTemplate.format(
- test_case_name=testCaseName,
+ test_case_name="DynamicOutputShapeTest" if example.model.hasDynamicOutputShape \
+ else "GeneratedTests",
test_name=str(example.testName),
- namespace=namespace,
+ namespace=tg.FileNames.specName,
create_model_name=str(example.model.createFunctionName),
is_ignored_name=str(example.model.isIgnoredFunctionName),
examples_name=str(example.examplesName),
- version=example.model.version), file=test_fd)
+ version=example.model.version,
+ log_file=tg.FileNames.logFile), file=test_fd)
if __name__ == '__main__':
ParseCmdLine()
while tg.FileNames.NextFile():
if Configuration.force_regenerate or NeedRegenerate():
- print("Generating CTS tests from spec %s" % tg.FileNames.specFile, file=sys.stderr)
+ print("Generating test(s) from spec: %s" % tg.FileNames.specFile, file=sys.stderr)
exec(open(tg.FileNames.specFile, "r").read())
+ print("Output CTS model: %s" % tg.FileNames.modelFile, file=sys.stderr)
+ print("Output example:%s" % tg.FileNames.exampleFile, file=sys.stderr)
+ print("Output CTS test: %s" % tg.FileNames.testFile, file=sys.stderr)
with SmartOpen(tg.FileNames.modelFile) as model_fd, \
SmartOpen(tg.FileNames.exampleFile) as example_fd, \
SmartOpen(tg.FileNames.testFile) as test_fd:
DumpTest=DumpCtsTest, test_fd=test_fd)
else:
print("Skip file: %s" % tg.FileNames.specFile, file=sys.stderr)
+ with SmartOpen(tg.FileNames.ctsFile, mode="a") as cts_fd:
+ print("#include \"../generated/tests/%s.cpp\""%os.path.basename(tg.FileNames.specFile),
+ file=cts_fd)
--- /dev/null
+#!/usr/bin/python3
+
+# Copyright 2019, The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Spec Visualizer
+
+Visualize python spec file for test generator.
+
+Modified from TFLite graph visualizer -- instead of flatbuffer, takes spec file as input.
+(https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/tools/visualize.py)
+
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+import argparse
+import fnmatch
+import json
+import math
+import os
+import re
+import sys
+import traceback
+
+# Stuff from test generator
+import test_generator as tg
+from test_generator import ActivationConverter
+from test_generator import BoolScalar
+from test_generator import Configuration
+from test_generator import DataTypeConverter
+from test_generator import DataLayoutConverter
+from test_generator import Example
+from test_generator import Float32Scalar
+from test_generator import Float32Vector
+from test_generator import GetJointStr
+from test_generator import IgnoredOutput
+from test_generator import Input
+from test_generator import Int32Scalar
+from test_generator import Int32Vector
+from test_generator import Internal
+from test_generator import Model
+from test_generator import Operand
+from test_generator import Output
+from test_generator import Parameter
+from test_generator import ParameterAsInputConverter
+from test_generator import RelaxedModeConverter
+from test_generator import SmartOpen
+
+# A CSS description for making the visualizer
+_CSS = """
+<html>
+<head>
+<style>
+body {font-family: sans-serif; background-color: #ffaa00;}
+table {background-color: #eeccaa;}
+th {background-color: black; color: white;}
+h1 {
+ background-color: ffaa00;
+ padding:5px;
+ color: black;
+}
+
+div {
+ border-radius: 5px;
+ background-color: #ffeecc;
+ padding:5px;
+ margin:5px;
+}
+
+.tooltip {color: blue;}
+.tooltip .tooltipcontent {
+ visibility: hidden;
+ color: black;
+ background-color: yellow;
+ padding: 5px;
+ border-radius: 4px;
+ position: absolute;
+ z-index: 1;
+}
+.tooltip:hover .tooltipcontent {
+ visibility: visible;
+}
+
+.edges line {
+ stroke: #333333;
+}
+
+.nodes text {
+ color: black;
+ pointer-events: none;
+ font-family: sans-serif;
+ font-size: 11px;
+}
+</style>
+
+<script src="https://d3js.org/d3.v4.min.js"></script>
+
+</head>
+<body>
+"""
+
+_D3_HTML_TEMPLATE = """
+ <script>
+ // Build graph data
+ var graph = %s;
+
+ var svg = d3.select("#subgraph_%s");
+ var width = svg.attr("width");
+ var height = svg.attr("height");
+ var color = d3.scaleOrdinal(d3.schemeCategory20);
+
+ var simulation = d3.forceSimulation()
+ .force("link", d3.forceLink().id(function(d) {return d.id;}))
+ .force("charge", d3.forceManyBody())
+ .force("center", d3.forceCenter(0.5 * width, 0.5 * height));
+
+
+ function buildGraph() {
+ var edge = svg.append("g").attr("class", "edges").selectAll("line")
+ .data(graph.edges).enter().append("line")
+ // Make the node group
+ var node = svg.selectAll(".nodes")
+ .data(graph.nodes)
+ .enter().append("g")
+ .attr("class", "nodes")
+ .call(d3.drag()
+ .on("start", function(d) {
+ if(!d3.event.active) simulation.alphaTarget(1.0).restart();
+ d.fx = d.x;d.fy = d.y;
+ })
+ .on("drag", function(d) {
+ d.fx = d3.event.x; d.fy = d3.event.y;
+ })
+ .on("end", function(d) {
+ if (!d3.event.active) simulation.alphaTarget(0);
+ d.fx = d.fy = null;
+ }));
+ // Within the group, draw a circle for the node position and text
+ // on the side.
+ node.append("circle")
+ .attr("r", "5px")
+ .attr("fill", function(d) { return color(d.group); })
+ node.append("text")
+ .attr("dx", 8).attr("dy", 5).text(function(d) { return d.name; });
+ // Setup force parameters and update position callback
+ simulation.nodes(graph.nodes).on("tick", forceSimulationUpdated);
+ simulation.force("link").links(graph.edges);
+
+ function forceSimulationUpdated() {
+ // Update edges.
+ edge.attr("x1", function(d) {return d.source.x;})
+ .attr("y1", function(d) {return d.source.y;})
+ .attr("x2", function(d) {return d.target.x;})
+ .attr("y2", function(d) {return d.target.y;});
+ // Update node positions
+ node.attr("transform", function(d) { return "translate(" + d.x + "," + d.y + ")"; });
+ }
+ }
+ buildGraph()
+</script>
+"""
+
+class OpCodeMapper(object):
+ """Maps an opcode index to an op name."""
+
+ def __init__(self, data):
+ self.code_to_name = {}
+ for idx, d in enumerate(data["operator_codes"]):
+ self.code_to_name[idx] = d["builtin_code"]
+
+ def __call__(self, x):
+ if x not in self.code_to_name:
+ s = "<UNKNOWN>"
+ else:
+ s = self.code_to_name[x]
+ return "%s (opcode=%d)" % (s, x)
+
+
+class DataSizeMapper(object):
+ """For buffers, report the number of bytes."""
+
+ def __call__(self, x):
+ if x is not None:
+ return "%d bytes" % len(x)
+ else:
+ return "--"
+
+
+class TensorMapper(object):
+ """Maps a list of tensor indices to a tooltip hoverable indicator of more."""
+
+ def __init__(self, subgraph_data):
+ self.data = subgraph_data
+
+ def __call__(self, x):
+ html = ""
+ html += "<span class='tooltip'><span class='tooltipcontent'>"
+ for i in x:
+ tensor = self.data["operands"][i]
+ html += str(i) + " "
+ html += tensor["name"] + " "
+ html += str(tensor["type"]) + " "
+ html += (repr(tensor["dimensions"]) if "dimensions" in tensor else "[]") + "<br>"
+ html += "</span>"
+ html += repr(x)
+ html += "</span>"
+ return html
+
+def GenerateGraph(g):
+ """Produces the HTML required to have a d3 visualization of the dag."""
+
+# def TensorName(idx):
+# return "t%d" % idx
+
+ def OpName(idx):
+ return "o%d" % idx
+
+ edges = []
+ nodes = []
+ first = {}
+ pixel_mult = 50
+ for op_index, op in enumerate(g["operations"]):
+ for tensor in op["inputs"]:
+ if tensor not in first:
+ first[str(tensor)] = (
+ op_index * pixel_mult,
+ len(first) * pixel_mult - pixel_mult / 2)
+ edges.append({
+ "source": str(tensor),
+ "target": OpName(op_index)
+ })
+ for tensor in op["outputs"]:
+ edges.append({
+ "target": str(tensor),
+ "source": OpName(op_index)
+ })
+ nodes.append({
+ "id": OpName(op_index),
+ "name": op["opcode"],
+ "group": 2,
+ "x": pixel_mult,
+ "y": op_index * pixel_mult
+ })
+ for tensor_index, tensor in enumerate(g["operands"]):
+ initial_y = (
+ first[tensor["name"]] if tensor["name"] in first else len(g["operations"]))
+
+ nodes.append({
+ "id": tensor["name"],
+ "name": "%s (%d)" % (tensor["name"], tensor_index),
+ "group": 1,
+ "x": 2,
+ "y": initial_y
+ })
+ graph_str = json.dumps({"nodes": nodes, "edges": edges})
+
+ html = _D3_HTML_TEMPLATE % (graph_str, g["name"])
+ return html
+
+def GenerateTableHtml(items, keys_to_print, display_index=True):
+ """Given a list of object values and keys to print, make an HTML table.
+
+ Args:
+ items: Items to print an array of dicts.
+ keys_to_print: (key, display_fn). `key` is a key in the object. i.e.
+ items[0][key] should exist. display_fn is the mapping function on display.
+ i.e. the displayed html cell will have the string returned by
+ `mapping_fn(items[0][key])`.
+ display_index: add a column which is the index of each row in `items`.
+ Returns:
+ An html table.
+ """
+ html = ""
+ # Print the list of items
+ html += "<table><tr>\n"
+ html += "<tr>\n"
+ if display_index:
+ html += "<th>index</th>"
+ for h, mapper in keys_to_print:
+ html += "<th>%s</th>" % h
+ html += "</tr>\n"
+ for idx, tensor in enumerate(items):
+ html += "<tr>\n"
+ if display_index:
+ html += "<td>%d</td>" % idx
+ # print tensor.keys()
+ for h, mapper in keys_to_print:
+ val = tensor[h] if h in tensor else None
+ val = val if mapper is None else mapper(val)
+ html += "<td>%s</td>\n" % val
+
+ html += "</tr>\n"
+ html += "</table>\n"
+ return html
+
+
+def CreateHtmlFile(g, fd):
+ """Given a tflite model in `tflite_input` file, produce html description."""
+ html = ""
+
+ # Subgraph local specs on what to display
+ html += "<div class='subgraph'>"
+ tensor_mapper = lambda l: ", ".join(str(op) for op in l)
+ op_keys_to_display = [("opcode", None), ("inputs", tensor_mapper), ("outputs", tensor_mapper)]
+ tensor_keys_to_display = [("name", None), ("type", None), ("dimensions", None), ("scale", None),
+ ("zero_point", None), ("lifetime", None)]
+ html += "<h2>%s</h2>\n" % g["name"]
+
+ # Configurations.
+ html += "<h3>Configurations</h3>\n"
+ html += GenerateTableHtml(
+ [g["options"]], [(k, None) for k in g["options"].keys()], display_index=False)
+
+ # Inputs and outputs.
+ html += "<h3>Inputs/Outputs</h3>\n"
+ html += GenerateTableHtml(
+ [{
+ "inputs": g["inputs"],
+ "outputs": g["outputs"]
+ }], [("inputs", tensor_mapper), ("outputs", tensor_mapper)],
+ display_index=False)
+
+ # Print the operands.
+ html += "<h3>Operands</h3>\n"
+ html += GenerateTableHtml(g["operands"], tensor_keys_to_display)
+
+ # Print the operations.
+ html += "<h3>Operations</h3>\n"
+ html += GenerateTableHtml(g["operations"], op_keys_to_display)
+
+ # Visual graph.
+ html += "<h3>Visual Graph</h3>\n"
+ html += "<svg id='subgraph_%s' width='%d' height='%d'></svg>\n"%(
+ g["name"], max(min(len(g["operations"])*100, 1600), 200), len(g["operations"])*100)
+ html += GenerateGraph(g)
+ html += "</div>"
+
+ fd.write(html)
+
+def InitializeHtml(fd):
+ html = ""
+ html += _CSS
+ html += "<h1>%s</h1>"%(tg.FileNames.specName)
+ fd.write(html)
+
+def FinalizeHtml(fd):
+ fd.write("</body></html>\n")
+
+def VisualizeModel(example, fd):
+ if varName is not None and not fnmatch.fnmatch(str(example.testName), varName):
+ print(" Skip variation %s" % example.testName)
+ return
+ print(" Visualizing variation %s" % example.testName)
+ model = example.model
+ g = {}
+ g["options"] = {"relaxed": str(model.isRelaxed), "useSHM": str(tg.Configuration.useSHM())}
+ g["name"] = str(example.testName)
+ g["inputs"] = model.GetInputs()
+ g["outputs"] = model.GetOutputs()
+ g["operands"] = [{
+ "name": str(op), "type": op.type.type, "dimensions": op.type.GetDimensionsString(),
+ "scale": op.type.scale, "zero_point": op.type.zeroPoint, "lifetime": op.lifetime
+ } for op in model.operands]
+ g["operations"] = [{
+ "inputs": op.ins, "outputs": op.outs, "opcode": op.optype
+ } for op in model.operations]
+ CreateHtmlFile(g, fd)
+
+# Take a model from command line
+def ParseCmdLine():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("spec", help="the spec file")
+ parser.add_argument(
+ "-v", "--variation", help="the target variation name/pattern", default=None)
+ parser.add_argument(
+ "-o", "--out", help="the output html path", default="out.html")
+ args = parser.parse_args()
+ tg.FileNames.InitializeFileLists(
+ args.spec, "-", "-", "-", "-", "-")
+ tg.FileNames.NextFile()
+ return os.path.abspath(args.spec), args.variation, os.path.abspath(args.out)
+
+if __name__ == '__main__':
+ specFile, varName, outFile = ParseCmdLine()
+ print("Visualizing from spec: %s" % specFile)
+ exec(open(specFile, "r").read())
+ with SmartOpen(outFile) as fd:
+ InitializeHtml(fd)
+ Example.DumpAllExamples(
+ DumpModel=None, model_fd=None,
+ DumpExample=VisualizeModel, example_fd=fd,
+ DumpTest=None, test_fd=None)
+ FinalizeHtml(fd)
+ print("Output HTML file: %s" % outFile)
+
Contain classes definition and utilify functions for compiling models and
examples into NDK-based CTS and VTS unit tests.
-Used by cts_generator.py, vts_generator.py, and spec_visualizer.py
+Used by cts_generator.py, vts_generator.py, and slicing.py
"""
from __future__ import absolute_import
def IsBool(self):
return self.GetCppTypeString() == "bool8"
- def IsScalar(self):
- return not self.type.startswith("TENSOR_")
-
def GetElementByteSize(self):
cppTypeString = self.GetCppTypeString()
if cppTypeString in ["uint8_t", "int8_t", "bool8"]:
def GetSignatureTuple(self):
return (self.type, self.dimensions, self.scale, self.zeroPoint)
+ # For backward-compatibility with slicing.py
+ def GetRawShape(self):
+ if self.scale == 0 and self.zeroPoint == 0:
+ return self.GetDimensionsString()
+ else:
+ return GetJointStr([self.GetDimensionsString(), self.scale, self.zeroPoint])
+
def ToUnspecifiedDim(self):
return Type.GetType(self.type, [0] * len(self.dimensions), self.scale, self.zeroPoint)
self.SetValue(value)
self.dimensions = self.type.dimensions
self.lifetime = "TEMPORARY_VARIABLE"
- self.model_index = None
self.ins = []
self.outs = []
self.outs = list(outs)
return self
+ # For backward-compatibility with slicing.py
+ # Get Python-ish dump for the op
+ def PyDefinition(self):
+ py_op_string = """Operation("{optype}", {inputs}).To({outputs})"""
+ inputs = [str(x) for x in self.ins]
+ inputs = ", ".join(inputs)
+ assert len(self.outs) <= 1
+ outputs = str(self.outs[0])
+ ops = {"optype": self.optype, "inputs": inputs, "outputs": outputs}
+ return py_op_string.format(**ops)
+
# Main interface
class Model:
models = list()
self.operands[self.operands.index(t)] = t
return self
- def SetOperandIndex(self):
+ def SetInputAndOutputIndex(self):
for ind, i in enumerate(self.GetInputs()):
i.index = ind
for ind, o in enumerate(self.GetOutputs()):
o.index = ind
- for ind, op in enumerate(self.operands):
- op.model_index = ind
return self
def SetOperandInsAndOuts(self):
def Compile(self):
if self.compiled:
return self
- self.SetOperandIndex()
+ self.SetInputAndOutputIndex()
self.SetOperandInsAndOuts()
self.TopologicalSort()
self.SetOutputUnspecified()
self.model = Model.models[-1] if model is None else model
self.name = name
self.expectedMultinomialDistributionTolerance = None
- self.expectFailure = False
self.feedDicts = []
for feedDict in args:
if type(feedDict) is tuple or type(feedDict) is list:
# If set to greater than zero, the input is compared as log-probabilities
# to the output and must be within this tolerance to pass.
def WithMultinomialDistributionTolerance(self, expectedTolerance):
- assert self.expectFailure is False
self.expectedMultinomialDistributionTolerance = expectedTolerance
return self
- # Specifies that this example is expected to fail during compilation or execution.
- def ExpectFailure(self):
- assert self.expectedMultinomialDistributionTolerance is None
- self.expectFailure = True
- return self
+ # For backward-compatibility with slicing.py
+ # Similar to dump_dict, but in python. Used by the slicing tool
+ # if referenced is not None, only print operands that are present there
+ @staticmethod
+ def py_dump_dict(d, referenced):
+ ret = []
+ for k, v in d.items():
+ if referenced != None and k not in referenced:
+ continue
+ key = str(k)
+ init = pprint.pformat(v)
+ ret.append("%s: %s" % (key, init))
+ return ", ".join(ret)
+
+ # For backward-compatibility with slicing.py
+ # similar to dump, but in python. Used by the slicing tool
+ # if referenced is not None, only print operands that are present there
+ @staticmethod
+ def py_dump(example_file, override, referenced):
+ Example.CombineAllExamples()
+ if len(Example.examples[0].feedDicts) > 0:
+ example_no = 0
+ example_template = """\
+input{no} = {{{inputs}}}
+# Only executed during data collection phase
+if collecting_data is True:
+ Example((input{no}, {{{outputs}}}))
+"""
+ for i, o in Example.examples[0].feedDicts:
+ print ('# Begin of an example', file = example_file)
+ inputs = Example.py_dump_dict(i, referenced)
+ output_list = []
+ for k, v in override.items():
+ output_list.append("%s: [0] * %d" % (k, v))
+ outputs = ",".join(output_list)
+
+ # TODO: handle >1 outputs
+ for k, v in o.items():
+ assert k.index == 0
+ example_contents = {
+ 'no': example_no,
+ 'inputs': inputs,
+ 'outputs': outputs
+ }
+ print (example_template.format(**example_contents), file = example_file)
class FileNames:
specFiles = []
modelFile = ""
exampleFile = ""
testFile = ""
+ ctsFile = ""
+ logFile = ""
version = ""
fileIndex = 0
@staticmethod
- def InitializeFileLists(spec, model, example, test):
+ def InitializeFileLists(spec, model, example, test, cts="-", log=""):
# get all spec files and target files
if os.path.isfile(spec):
FileNames.specFiles = [os.path.abspath(spec)]
FileNames.modelFiles = FileNames.ParseTargetFiles(model, ".model.cpp")
FileNames.exampleFiles = FileNames.ParseTargetFiles(example, ".example.cpp")
FileNames.testFiles = FileNames.ParseTargetFiles(test, ".mod.py.cpp")
+ FileNames.ctsFile = os.path.abspath(cts) if cts != "-" else "-"
+ FileNames.logFile = ", \"%s\""%log if log != "" else ""
@staticmethod
def ParseTargetFiles(arg, ext):
import os
import re
import struct
+import sys
import contextlib
import pprint
from test_generator import SymmPerChannelQuantParams
# Dumping methods that shared with CTS generator
+from cts_generator import DumpCtsExample
from cts_generator import DumpCtsIsIgnored
-
-# TODO: Make this part of tg.Configuration?
-target_hal_version = None
-
-
# Take a model from command line
def ParseCmdLine():
parser = argparse.ArgumentParser()
parser.add_argument(
"-m", "--model", help="the output model file", default="-")
parser.add_argument(
- "-t", "--test", help="the output test file", default="-")
+ "-e", "--example", help="the output example file", default="-")
parser.add_argument(
- "--target_hal_version",
- help="the HAL version of the output",
- required=True,
- choices=["V1_0", "V1_1", "V1_2"])
+ "-t", "--test", help="the output test file", default="-")
args = parser.parse_args()
- example = "-" # VTS generator does not generate examples. See cts_generator.py.
tg.FileNames.InitializeFileLists(
- args.spec, args.model, example, args.test)
- global target_hal_version
- target_hal_version = args.target_hal_version
+ args.spec, args.model, args.example, args.test)
# Generate operands in VTS format
def generate_vts_operands(model):
// Allocate segment of android shared memory, wrapped in hidl_memory.
// This object will be automatically freed when sharedMemory is destroyed.
- hidl_memory sharedMemory = ::android::nn::allocateSharedMemory(sizeof(data));
+ hidl_memory sharedMemory = allocateSharedMemory(sizeof(data));
// Mmap ashmem into usable address and hold it within the mappedMemory object.
// MappedMemory will automatically munmap the memory when it is destroyed.
- sp<::android::hidl::memory::V1_0::IMemory> mappedMemory = mapMemory(sharedMemory);
+ sp<IMemory> mappedMemory = mapMemory(sharedMemory);
if (mappedMemory != nullptr) {{
// Retrieve the mmapped pointer.
}}
"""
model_dict = {
- "hal_version": target_hal_version,
"create_test_model_name": str(model.createTestFunctionName),
"operations": generate_vts_operations(model),
"operand_decls": generate_vts_operands(model),
print(model_fmt.format(**model_dict), file = model_file)
def generate_vts(model, model_file):
- assert model.compiled
- # Do not generate DynamicOutputShapeTest for pre-1.2 VTS.
- if model.hasDynamicOutputShape and target_hal_version < "V1_2":
- return
- namespace = "android::hardware::neuralnetworks::{hal_version}::generated_tests::{spec_name}".format(spec_name=tg.FileNames.specName, hal_version=target_hal_version)
- print("namespace {namespace} {{\n".format(namespace=namespace), file=model_file)
- generate_vts_model(model, model_file)
- DumpCtsIsIgnored(model, model_file)
- print("}} // namespace {namespace}".format(namespace=namespace), file=model_file)
+ assert model.compiled
+ generate_vts_model(model, model_file)
+ DumpCtsIsIgnored(model, model_file)
def generate_vts_test(example, test_file):
- # Do not generate DynamicOutputShapeTest for pre-1.2 VTS.
- if example.model.hasDynamicOutputShape and target_hal_version < "V1_2":
- return
-
- generated_vts_namespace = "android::hardware::neuralnetworks::{hal_version}::generated_tests::{spec_name}".format(spec_name=tg.FileNames.specName, hal_version=target_hal_version)
- generated_cts_namespace = "generated_tests::{spec_name}".format(spec_name=tg.FileNames.specName)
testTemplate = """\
-namespace {generated_cts_namespace} {{
-
-std::vector<::test_helper::MixedTypedExample>& get_{examples_name}();
-
-}} // namespace {generated_cts_namespace}
-
-namespace {generated_vts_namespace} {{
-
-Model {create_model_name}();
-bool {is_ignored_name}(int);
-"""
-
- if not example.expectFailure:
- testTemplate += """
TEST_F({test_case_name}, {test_name}) {{
- Execute(device,
- {create_model_name},
- {is_ignored_name},
- ::{generated_cts_namespace}::get_{examples_name}(){test_dynamic_output_shape});
-}}
-"""
+ generated_tests::Execute(device,
+ {namespace}::{create_model_name},
+ {namespace}::{is_ignored_name},
+ {namespace}::get_{examples_name}(){test_dynamic_output_shape});\n}}
- testTemplate += """
TEST_F(ValidationTest, {test_name}) {{
- const Model model = {create_model_name}();
- const std::vector<Request> requests = createRequests(::{generated_cts_namespace}::get_{examples_name}());
+ const Model model = {namespace}::{create_model_name}();
+ const std::vector<Request> requests = createRequests({namespace}::get_{examples_name}());
validateEverything(model, requests);
-}}
-
-}} // namespace {generated_vts_namespace}
+}}\n
"""
-
+ if example.model.hasDynamicOutputShape:
+ print("#ifdef NN_TEST_DYNAMIC_OUTPUT_SHAPE", file=test_fd)
print(testTemplate.format(
test_case_name="DynamicOutputShapeTest" if example.model.hasDynamicOutputShape \
else "NeuralnetworksHidlTest",
test_name=str(example.testName),
- generated_vts_namespace=generated_vts_namespace,
- generated_cts_namespace=generated_cts_namespace,
- hal_version=target_hal_version,
+ namespace=tg.FileNames.specName,
create_model_name=str(example.model.createTestFunctionName),
is_ignored_name=str(example.model.isIgnoredFunctionName),
examples_name=str(example.examplesName),
- test_dynamic_output_shape=", true" if example.model.hasDynamicOutputShape else "",
- validation_method="validateFailure" if example.expectFailure else "validateEverything",
+ test_dynamic_output_shape=", true" if example.model.hasDynamicOutputShape else ""
), file=test_fd)
-
-def InitializeFiles(model_fd, test_fd):
+ if example.model.hasDynamicOutputShape:
+ print("#endif", file=test_fd)
+
+def InitializeFiles(model_fd, example_fd, test_fd):
+ fileHeader = "// clang-format off\n// Generated file (from: {spec_file}). Do not edit"
+ testFileHeader = """\
+// Generated from: {spec_file}.
+namespace {spec_name} {{
+// Generated {spec_name} test
+#include "{example_file}"
+// Generated model constructor
+#include "{model_file}"
+}} // namespace {spec_name}\n"""
+ # This regex is to remove prefix and get relative path for #include
+ pathRegex = r".*frameworks/ml/nn/(runtime/test/generated/)?"
specFileBase = os.path.basename(tg.FileNames.specFile)
- fileHeader = """\
-// Generated from {spec_file}
-// DO NOT EDIT
-// clang-format off
-#include "GeneratedTests.h"
-""".format(spec_file=specFileBase)
- print(fileHeader, file=model_fd)
- print(fileHeader, file=test_fd)
+ print(fileHeader.format(spec_file=specFileBase), file=model_fd)
+ print(fileHeader.format(spec_file=specFileBase), file=example_fd)
+ print(testFileHeader.format(
+ spec_file=specFileBase,
+ model_file=re.sub(pathRegex, "", tg.FileNames.modelFile),
+ example_file=re.sub(pathRegex, "", tg.FileNames.exampleFile),
+ spec_name=tg.FileNames.specName), file=test_fd)
if __name__ == "__main__":
ParseCmdLine()
while tg.FileNames.NextFile():
- print("Generating VTS tests from %s" % tg.FileNames.specFile)
+ print("Generating test(s) from spec: %s" % tg.FileNames.specFile, file=sys.stderr)
exec (open(tg.FileNames.specFile, "r").read())
+ print("Output VTS model: %s" % tg.FileNames.modelFile, file=sys.stderr)
+ print("Output example:" + tg.FileNames.exampleFile, file=sys.stderr)
with SmartOpen(tg.FileNames.modelFile) as model_fd, \
- SmartOpen(tg.FileNames.testFile) as test_fd:
- InitializeFiles(model_fd, test_fd)
+ SmartOpen(tg.FileNames.exampleFile) as example_fd, \
+ SmartOpen(tg.FileNames.testFile, mode="a") as test_fd:
+ InitializeFiles(model_fd, example_fd, test_fd)
Example.DumpAllExamples(
DumpModel=generate_vts, model_fd=model_fd,
+ DumpExample=DumpCtsExample, example_fd=example_fd,
DumpTest=generate_vts_test, test_fd=test_fd)
+++ /dev/null
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* Header-only library for various helpers of test harness
- * See frameworks/ml/nn/runtime/test/TestGenerated.cpp for how this is used.
- */
-#ifndef ANDROID_FRAMEWORKS_ML_NN_TOOLS_TEST_GENERATOR_TEST_HARNESS_H
-#define ANDROID_FRAMEWORKS_ML_NN_TOOLS_TEST_GENERATOR_TEST_HARNESS_H
-
-#include <gmock/gmock-matchers.h>
-#include <gtest/gtest.h>
-
-#include <cmath>
-#include <functional>
-#include <map>
-#include <tuple>
-#include <vector>
-
-namespace test_helper {
-
-constexpr const size_t gMaximumNumberOfErrorMessages = 10;
-
-// TODO: Figure out the build dependency to make including "CpuOperationUtils.h" work.
-inline void convertFloat16ToFloat32(const _Float16* input, std::vector<float>* output) {
- for (size_t i = 0; i < output->size(); ++i) {
- (*output)[i] = static_cast<float>(input[i]);
- }
-}
-
-// This class is a workaround for two issues our code relies on:
-// 1. sizeof(bool) is implementation defined.
-// 2. vector<bool> does not allow direct pointer access via the data() method.
-class bool8 {
- public:
- bool8() : mValue() {}
- /* implicit */ bool8(bool value) : mValue(value) {}
- inline operator bool() const { return mValue != 0; }
-
- private:
- uint8_t mValue;
-};
-
-static_assert(sizeof(bool8) == 1, "size of bool8 must be 8 bits");
-
-typedef std::map<int, std::vector<uint32_t>> OperandDimensions;
-typedef std::map<int, std::vector<float>> Float32Operands;
-typedef std::map<int, std::vector<int32_t>> Int32Operands;
-typedef std::map<int, std::vector<uint8_t>> Quant8AsymmOperands;
-typedef std::map<int, std::vector<int16_t>> Quant16SymmOperands;
-typedef std::map<int, std::vector<_Float16>> Float16Operands;
-typedef std::map<int, std::vector<bool8>> Bool8Operands;
-typedef std::map<int, std::vector<int8_t>> Quant8ChannelOperands;
-typedef std::map<int, std::vector<uint16_t>> Quant16AsymmOperands;
-typedef std::map<int, std::vector<int8_t>> Quant8SymmOperands;
-struct MixedTyped {
- static constexpr size_t kNumTypes = 9;
- OperandDimensions operandDimensions;
- Float32Operands float32Operands;
- Int32Operands int32Operands;
- Quant8AsymmOperands quant8AsymmOperands;
- Quant16SymmOperands quant16SymmOperands;
- Float16Operands float16Operands;
- Bool8Operands bool8Operands;
- Quant8ChannelOperands quant8ChannelOperands;
- Quant16AsymmOperands quant16AsymmOperands;
- Quant8SymmOperands quant8SymmOperands;
-};
-typedef std::pair<MixedTyped, MixedTyped> MixedTypedExampleType;
-
-// Mixed-typed examples
-typedef struct {
- MixedTypedExampleType operands;
- // Specifies the RANDOM_MULTINOMIAL distribution tolerance.
- // If set to greater than zero, the input is compared as log-probabilities
- // to the output and must be within this tolerance to pass.
- float expectedMultinomialDistributionTolerance = 0.0;
-} MixedTypedExample;
-
-// Go through all index-value pairs of a given input type
-template <typename T>
-inline void for_each(const std::map<int, std::vector<T>>& idx_and_data,
- std::function<void(int, const std::vector<T>&)> execute) {
- for (auto& i : idx_and_data) {
- execute(i.first, i.second);
- }
-}
-
-// non-const variant of for_each
-template <typename T>
-inline void for_each(std::map<int, std::vector<T>>& idx_and_data,
- std::function<void(int, std::vector<T>&)> execute) {
- for (auto& i : idx_and_data) {
- execute(i.first, i.second);
- }
-}
-
-// Go through all index-value pairs of a given input type
-template <typename T>
-inline void for_each(const std::map<int, std::vector<T>>& golden,
- std::map<int, std::vector<T>>& test,
- std::function<void(int, const std::vector<T>&, std::vector<T>&)> execute) {
- for_each<T>(golden, [&test, &execute](int index, const std::vector<T>& g) {
- auto& t = test[index];
- execute(index, g, t);
- });
-}
-
-// Go through all index-value pairs of a given input type
-template <typename T>
-inline void for_each(
- const std::map<int, std::vector<T>>& golden, const std::map<int, std::vector<T>>& test,
- std::function<void(int, const std::vector<T>&, const std::vector<T>&)> execute) {
- for_each<T>(golden, [&test, &execute](int index, const std::vector<T>& g) {
- auto t = test.find(index);
- ASSERT_NE(t, test.end());
- execute(index, g, t->second);
- });
-}
-
-// internal helper for for_all
-template <typename T>
-inline void for_all_internal(std::map<int, std::vector<T>>& idx_and_data,
- std::function<void(int, void*, size_t)> execute_this) {
- for_each<T>(idx_and_data, [&execute_this](int idx, std::vector<T>& m) {
- execute_this(idx, static_cast<void*>(m.data()), m.size() * sizeof(T));
- });
-}
-
-// Go through all index-value pairs of all input types
-// expects a functor that takes (int index, void *raw data, size_t sz)
-inline void for_all(MixedTyped& idx_and_data,
- std::function<void(int, void*, size_t)> execute_this) {
- for_all_internal(idx_and_data.float32Operands, execute_this);
- for_all_internal(idx_and_data.int32Operands, execute_this);
- for_all_internal(idx_and_data.quant8AsymmOperands, execute_this);
- for_all_internal(idx_and_data.quant16SymmOperands, execute_this);
- for_all_internal(idx_and_data.float16Operands, execute_this);
- for_all_internal(idx_and_data.bool8Operands, execute_this);
- for_all_internal(idx_and_data.quant8ChannelOperands, execute_this);
- for_all_internal(idx_and_data.quant16AsymmOperands, execute_this);
- for_all_internal(idx_and_data.quant8SymmOperands, execute_this);
- static_assert(9 == MixedTyped::kNumTypes,
- "Number of types in MixedTyped changed, but for_all function wasn't updated");
-}
-
-// Const variant of internal helper for for_all
-template <typename T>
-inline void for_all_internal(const std::map<int, std::vector<T>>& idx_and_data,
- std::function<void(int, const void*, size_t)> execute_this) {
- for_each<T>(idx_and_data, [&execute_this](int idx, const std::vector<T>& m) {
- execute_this(idx, static_cast<const void*>(m.data()), m.size() * sizeof(T));
- });
-}
-
-// Go through all index-value pairs (const variant)
-// expects a functor that takes (int index, const void *raw data, size_t sz)
-inline void for_all(const MixedTyped& idx_and_data,
- std::function<void(int, const void*, size_t)> execute_this) {
- for_all_internal(idx_and_data.float32Operands, execute_this);
- for_all_internal(idx_and_data.int32Operands, execute_this);
- for_all_internal(idx_and_data.quant8AsymmOperands, execute_this);
- for_all_internal(idx_and_data.quant16SymmOperands, execute_this);
- for_all_internal(idx_and_data.float16Operands, execute_this);
- for_all_internal(idx_and_data.bool8Operands, execute_this);
- for_all_internal(idx_and_data.quant8ChannelOperands, execute_this);
- for_all_internal(idx_and_data.quant16AsymmOperands, execute_this);
- for_all_internal(idx_and_data.quant8SymmOperands, execute_this);
- static_assert(
- 9 == MixedTyped::kNumTypes,
- "Number of types in MixedTyped changed, but const for_all function wasn't updated");
-}
-
-// Helper template - resize test output per golden
-template <typename T>
-inline void resize_accordingly_(const std::map<int, std::vector<T>>& golden,
- std::map<int, std::vector<T>>& test) {
- for_each<T>(golden, test,
- [](int, const std::vector<T>& g, std::vector<T>& t) { t.resize(g.size()); });
-}
-
-template <>
-inline void resize_accordingly_<uint32_t>(const OperandDimensions& golden,
- OperandDimensions& test) {
- for_each<uint32_t>(
- golden, test,
- [](int, const std::vector<uint32_t>& g, std::vector<uint32_t>& t) { t = g; });
-}
-
-inline void resize_accordingly(const MixedTyped& golden, MixedTyped& test) {
- resize_accordingly_(golden.operandDimensions, test.operandDimensions);
- resize_accordingly_(golden.float32Operands, test.float32Operands);
- resize_accordingly_(golden.int32Operands, test.int32Operands);
- resize_accordingly_(golden.quant8AsymmOperands, test.quant8AsymmOperands);
- resize_accordingly_(golden.quant16SymmOperands, test.quant16SymmOperands);
- resize_accordingly_(golden.float16Operands, test.float16Operands);
- resize_accordingly_(golden.bool8Operands, test.bool8Operands);
- resize_accordingly_(golden.quant8ChannelOperands, test.quant8ChannelOperands);
- resize_accordingly_(golden.quant16AsymmOperands, test.quant16AsymmOperands);
- resize_accordingly_(golden.quant8SymmOperands, test.quant8SymmOperands);
- static_assert(9 == MixedTyped::kNumTypes,
- "Number of types in MixedTyped changed, but resize_accordingly function wasn't "
- "updated");
-}
-
-template <typename T>
-void filter_internal(const std::map<int, std::vector<T>>& golden,
- std::map<int, std::vector<T>>* filtered, std::function<bool(int)> is_ignored) {
- for_each<T>(golden, [filtered, &is_ignored](int index, const std::vector<T>& m) {
- auto& g = *filtered;
- if (!is_ignored(index)) g[index] = m;
- });
-}
-
-inline MixedTyped filter(const MixedTyped& golden, std::function<bool(int)> is_ignored) {
- MixedTyped filtered;
- filter_internal(golden.operandDimensions, &filtered.operandDimensions, is_ignored);
- filter_internal(golden.float32Operands, &filtered.float32Operands, is_ignored);
- filter_internal(golden.int32Operands, &filtered.int32Operands, is_ignored);
- filter_internal(golden.quant8AsymmOperands, &filtered.quant8AsymmOperands, is_ignored);
- filter_internal(golden.quant16SymmOperands, &filtered.quant16SymmOperands, is_ignored);
- filter_internal(golden.float16Operands, &filtered.float16Operands, is_ignored);
- filter_internal(golden.bool8Operands, &filtered.bool8Operands, is_ignored);
- filter_internal(golden.quant8ChannelOperands, &filtered.quant8ChannelOperands, is_ignored);
- filter_internal(golden.quant16AsymmOperands, &filtered.quant16AsymmOperands, is_ignored);
- filter_internal(golden.quant8SymmOperands, &filtered.quant8SymmOperands, is_ignored);
- static_assert(9 == MixedTyped::kNumTypes,
- "Number of types in MixedTyped changed, but compare function wasn't updated");
- return filtered;
-}
-
-// Compare results
-template <typename T>
-void compare_(const std::map<int, std::vector<T>>& golden,
- const std::map<int, std::vector<T>>& test, std::function<void(T, T)> cmp) {
- for_each<T>(golden, test, [&cmp](int index, const std::vector<T>& g, const std::vector<T>& t) {
- for (unsigned int i = 0; i < g.size(); i++) {
- SCOPED_TRACE(testing::Message()
- << "When comparing output " << index << " element " << i);
- cmp(g[i], t[i]);
- }
- });
-}
-
-// TODO: Allow passing accuracy criteria from spec.
-// Currently we only need relaxed accuracy criteria on mobilenet tests, so we return the quant8
-// tolerance simply based on the current test name.
-inline int getQuant8AllowedError() {
- const ::testing::TestInfo* const testInfo =
- ::testing::UnitTest::GetInstance()->current_test_info();
- const std::string testCaseName = testInfo->test_case_name();
- const std::string testName = testInfo->name();
- // We relax the quant8 precision for all tests with mobilenet:
- // - CTS/VTS GeneratedTest and DynamicOutputShapeTest with mobilenet
- // - VTS CompilationCachingTest and CompilationCachingSecurityTest except for TOCTOU tests
- if (testName.find("mobilenet") != std::string::npos ||
- (testCaseName.find("CompilationCaching") != std::string::npos &&
- testName.find("TOCTOU") == std::string::npos)) {
- return 2;
- } else {
- return 1;
- }
-}
-
-inline void compare(const MixedTyped& golden, const MixedTyped& test, float fpAtol = 1e-5f,
- float fpRtol = 1e-5f) {
- int quant8AllowedError = getQuant8AllowedError();
- for_each<uint32_t>(
- golden.operandDimensions, test.operandDimensions,
- [](int index, const std::vector<uint32_t>& g, const std::vector<uint32_t>& t) {
- SCOPED_TRACE(testing::Message()
- << "When comparing dimensions for output " << index);
- EXPECT_EQ(g, t);
- });
- size_t totalNumberOfErrors = 0;
- compare_<float>(golden.float32Operands, test.float32Operands,
- [&totalNumberOfErrors, fpAtol, fpRtol](float expected, float actual) {
- // Compute the range based on both absolute tolerance and relative tolerance
- float fpRange = fpAtol + fpRtol * std::abs(expected);
- if (totalNumberOfErrors < gMaximumNumberOfErrorMessages) {
- EXPECT_NEAR(expected, actual, fpRange);
- }
- if (std::abs(expected - actual) > fpRange) {
- totalNumberOfErrors++;
- }
- });
- compare_<int32_t>(golden.int32Operands, test.int32Operands,
- [&totalNumberOfErrors](int32_t expected, int32_t actual) {
- if (totalNumberOfErrors < gMaximumNumberOfErrorMessages) {
- EXPECT_EQ(expected, actual);
- }
- if (expected != actual) {
- totalNumberOfErrors++;
- }
- });
- compare_<uint8_t>(golden.quant8AsymmOperands, test.quant8AsymmOperands,
- [&totalNumberOfErrors, quant8AllowedError](uint8_t expected, uint8_t actual) {
- if (totalNumberOfErrors < gMaximumNumberOfErrorMessages) {
- EXPECT_NEAR(expected, actual, quant8AllowedError);
- }
- if (std::abs(expected - actual) > quant8AllowedError) {
- totalNumberOfErrors++;
- }
- });
- compare_<int16_t>(golden.quant16SymmOperands, test.quant16SymmOperands,
- [&totalNumberOfErrors](int16_t expected, int16_t actual) {
- if (totalNumberOfErrors < gMaximumNumberOfErrorMessages) {
- EXPECT_NEAR(expected, actual, 1);
- }
- if (std::abs(expected - actual) > 1) {
- totalNumberOfErrors++;
- }
- });
- compare_<_Float16>(golden.float16Operands, test.float16Operands,
- [&totalNumberOfErrors, fpAtol, fpRtol](_Float16 expected, _Float16 actual) {
- // Compute the range based on both absolute tolerance and relative
- // tolerance
- float fpRange = fpAtol + fpRtol * std::abs(static_cast<float>(expected));
- if (totalNumberOfErrors < gMaximumNumberOfErrorMessages) {
- EXPECT_NEAR(expected, actual, fpRange);
- }
- if (std::abs(static_cast<float>(expected - actual)) > fpRange) {
- totalNumberOfErrors++;
- }
- });
- compare_<bool8>(golden.bool8Operands, test.bool8Operands,
- [&totalNumberOfErrors](bool expected, bool actual) {
- if (totalNumberOfErrors < gMaximumNumberOfErrorMessages) {
- EXPECT_EQ(expected, actual);
- }
- if (expected != actual) {
- totalNumberOfErrors++;
- }
- });
- compare_<int8_t>(golden.quant8ChannelOperands, test.quant8ChannelOperands,
- [&totalNumberOfErrors, &quant8AllowedError](int8_t expected, int8_t actual) {
- if (totalNumberOfErrors < gMaximumNumberOfErrorMessages) {
- EXPECT_NEAR(expected, actual, quant8AllowedError);
- }
- if (std::abs(static_cast<int>(expected) - static_cast<int>(actual)) >
- quant8AllowedError) {
- totalNumberOfErrors++;
- }
- });
- compare_<uint16_t>(golden.quant16AsymmOperands, test.quant16AsymmOperands,
- [&totalNumberOfErrors](int16_t expected, int16_t actual) {
- if (totalNumberOfErrors < gMaximumNumberOfErrorMessages) {
- EXPECT_NEAR(expected, actual, 1);
- }
- if (std::abs(expected - actual) > 1) {
- totalNumberOfErrors++;
- }
- });
- compare_<int8_t>(golden.quant8SymmOperands, test.quant8SymmOperands,
- [&totalNumberOfErrors, quant8AllowedError](int8_t expected, int8_t actual) {
- if (totalNumberOfErrors < gMaximumNumberOfErrorMessages) {
- EXPECT_NEAR(expected, actual, quant8AllowedError);
- }
- if (std::abs(static_cast<int>(expected) - static_cast<int>(actual)) >
- quant8AllowedError) {
- totalNumberOfErrors++;
- }
- });
-
- static_assert(9 == MixedTyped::kNumTypes,
- "Number of types in MixedTyped changed, but compare function wasn't updated");
- EXPECT_EQ(size_t{0}, totalNumberOfErrors);
-}
-
-// Calculates the expected probability from the unnormalized log-probability of
-// each class in the input and compares it to the actual ocurrence of that class
-// in the output.
-inline void expectMultinomialDistributionWithinTolerance(const MixedTyped& test,
- const MixedTypedExample& example) {
- // TODO: These should be parameters but aren't currently preserved in the example.
- const int kBatchSize = 1;
- const int kNumClasses = 1024;
- const int kNumSamples = 128;
-
- std::vector<int32_t> output = test.int32Operands.at(0);
- std::vector<int> class_counts;
- class_counts.resize(kNumClasses);
- for (int index : output) {
- class_counts[index]++;
- }
- std::vector<float> input;
- Float32Operands float32Operands = example.operands.first.float32Operands;
- if (!float32Operands.empty()) {
- input = example.operands.first.float32Operands.at(0);
- } else {
- std::vector<_Float16> inputFloat16 = example.operands.first.float16Operands.at(0);
- input.resize(inputFloat16.size());
- convertFloat16ToFloat32(inputFloat16.data(), &input);
- }
- for (int b = 0; b < kBatchSize; ++b) {
- float probability_sum = 0;
- const int batch_index = kBatchSize * b;
- for (int i = 0; i < kNumClasses; ++i) {
- probability_sum += expf(input[batch_index + i]);
- }
- for (int i = 0; i < kNumClasses; ++i) {
- float probability =
- static_cast<float>(class_counts[i]) / static_cast<float>(kNumSamples);
- float probability_expected = expf(input[batch_index + i]) / probability_sum;
- EXPECT_THAT(probability,
- ::testing::FloatNear(probability_expected,
- example.expectedMultinomialDistributionTolerance));
- }
- }
-}
-
-}; // namespace test_helper
-
-#endif // ANDROID_FRAMEWORKS_ML_NN_TOOLS_TEST_GENERATOR_TEST_HARNESS_H
+++ /dev/null
-#!/usr/bin/python3
-
-# Copyright 2019, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Spec Visualizer
-
-Visualize python spec file for test generator.
-Invoked by ml/nn/runtime/test/specs/visualize_spec.sh;
-See that script for details on how this script is used.
-"""
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-import argparse
-import json
-import os
-import sys
-from string import Template
-
-# Stuff from test generator
-import test_generator as tg
-from test_generator import ActivationConverter
-from test_generator import BoolScalar
-from test_generator import Configuration
-from test_generator import DataTypeConverter
-from test_generator import DataLayoutConverter
-from test_generator import Example
-from test_generator import Float16Scalar
-from test_generator import Float32Scalar
-from test_generator import Float32Vector
-from test_generator import GetJointStr
-from test_generator import IgnoredOutput
-from test_generator import Input
-from test_generator import Int32Scalar
-from test_generator import Int32Vector
-from test_generator import Internal
-from test_generator import Model
-from test_generator import Operand
-from test_generator import Output
-from test_generator import Parameter
-from test_generator import ParameterAsInputConverter
-from test_generator import RelaxedModeConverter
-from test_generator import SymmPerChannelQuantParams
-
-
-TEMPLATE_FILE = os.path.join(os.path.dirname(os.path.realpath(__file__)), "spec_viz_template.html")
-global_graphs = dict()
-
-
-def FormatArray(data, is_scalar=False):
- if is_scalar:
- assert len(data) == 1
- return str(data[0])
- else:
- return "[%s]" % (", ".join(str(i) for i in data))
-
-
-def FormatDict(data):
- return "<br/>".join("<b>%s:</b> %s"%(k.capitalize(), v) for k, v in data.items())
-
-
-def GetOperandInfo(op):
- op_info = {"lifetime": op.lifetime, "type": op.type.type}
-
- if not op.type.IsScalar():
- op_info["dimensions"] = FormatArray(op.type.dimensions)
-
- if op.type.scale != 0:
- op_info["scale"] = op.type.scale
- op_info["zero point"] = op.type.zeroPoint
- if op.type.type == "TENSOR_QUANT8_SYMM_PER_CHANNEL":
- op_info["scale"] = FormatArray(op.type.extraParams.scales)
- op_info["channel dim"] = op.type.extraParams.channelDim
-
- return op_info
-
-
-def FormatOperand(op):
- # All keys and values in op_info will appear in the tooltip. We only display the operand data
- # if the length is less than 10. This should be convenient enough for most parameters.
- op_info = GetOperandInfo(op)
- if isinstance(op, Parameter) and len(op.value) <= 10:
- op_info["data"] = FormatArray(op.value, op.type.IsScalar())
-
- template = "<span class='tooltip'><span class='tooltipcontent'>{tooltip_content}</span><a href=\"{inpage_link}\">{op_name}</a></span>"
- return template.format(
- op_name=str(op),
- tooltip_content=FormatDict(op_info),
- inpage_link="#details-operands-%d" % (op.model_index),
- )
-
-
-def GetSubgraph(example):
- """Produces the nodes and edges information for d3 visualization."""
-
- node_index_map = {}
- topological_order = []
-
- def AddToTopologicalOrder(op):
- if op not in node_index_map:
- node_index_map[op] = len(topological_order)
- topological_order.append(op)
-
- # Get the topological order, both operands and operations are treated the same.
- # Given that the example.model.operations is already topologically sorted, here we simply
- # iterate through and insert inputs and outputs.
- for op in example.model.operations:
- for i in op.ins:
- AddToTopologicalOrder(i)
- AddToTopologicalOrder(op)
- for o in op.outs:
- AddToTopologicalOrder(o)
-
- # Assign layers to the nodes.
- layers = {}
- for node in topological_order:
- layers[node] = max([layers[i] for i in node.ins], default=-1) + 1
- for node in reversed(topological_order):
- layers[node] = min([layers[o] for o in node.outs], default=layers[node]+1) - 1
- num_layers = max(layers.values()) + 1
-
- # Assign coordinates to the nodes. Nodes are equally spaced.
- CoordX = lambda index: (index + 0.5) * 200 # 200px spacing horizontally
- CoordY = lambda index: (index + 0.5) * 100 # 100px spacing vertically
- coords = {}
- layer_cnt = [0] * num_layers
- for node in topological_order:
- coords[node] = (CoordX(layer_cnt[layers[node]]), CoordY(layers[node]))
- layer_cnt[layers[node]] += 1
-
- # Create edges and nodes dictionaries for d3 visualization.
- OpName = lambda idx: "operation%d" % idx
- edges = []
- nodes = []
- for ind, op in enumerate(example.model.operations):
- for tensor in op.ins:
- edges.append({
- "source": str(tensor),
- "target": OpName(ind)
- })
- for tensor in op.outs:
- edges.append({
- "target": str(tensor),
- "source": OpName(ind)
- })
- nodes.append({
- "index": ind,
- "id": OpName(ind),
- "name": op.optype,
- "group": 2,
- "x": coords[op][0],
- "y": coords[op][1],
- })
-
- for ind, op in enumerate(example.model.operands):
- nodes.append({
- "index": ind,
- "id": str(op),
- "name": str(op),
- "group": 1,
- "x": coords[op][0],
- "y": coords[op][1],
- })
-
- return {"nodes": nodes, "edges": edges}
-
-
-# The following Get**Info methods will each return a list of dictionaries,
-# whose content will appear in the tables and sidebar views.
-def GetConfigurationsInfo(example):
- return [{
- "relaxed": str(example.model.isRelaxed),
- "use shared memory": str(tg.Configuration.useSHM()),
- "expect failure": str(example.expectFailure),
- }]
-
-
-def GetOperandsInfo(example):
- ret = []
- for index, op in enumerate(example.model.operands):
- ret.append({
- "index": index,
- "name": str(op),
- "group": "operand"
- })
- ret[-1].update(GetOperandInfo(op))
- if isinstance(op, Parameter):
- ret[-1]["data"] = FormatArray(op.value, op.type.IsScalar())
- elif isinstance(op, Input):
- ret[-1]["data"] = FormatArray(example.feedDicts[0][0][op], op.type.IsScalar())
- elif isinstance(op, Output) and not isinstance(op, IgnoredOutput):
- ret[-1]["data"] = FormatArray(example.feedDicts[0][1][op], op.type.IsScalar())
- return ret
-
-
-def GetOperationsInfo(example):
- return [{
- "index": index,
- "name": op.optype,
- "group": "operation",
- "opcode": op.optype,
- "inputs": ", ".join(FormatOperand(i) for i in op.ins),
- "outputs": ", ".join(FormatOperand(o) for o in op.outs),
- } for index,op in enumerate(example.model.operations)]
-
-
-# TODO: Remove the unused fd from the parameter.
-def ProcessExample(example, fd):
- """Process an example and save the information into the global dictionary global_graphs."""
-
- global global_graphs
- print(" Processing variation %s" % example.testName)
- global_graphs[str(example.testName)] = {
- "subgraph": GetSubgraph(example),
- "details": {
- "configurations": GetConfigurationsInfo(example),
- "operands": GetOperandsInfo(example),
- "operations": GetOperationsInfo(example)
- }
- }
-
-
-def DumpHtml(spec_file, out_file):
- """Dump the final HTML file by replacing entries from a template file."""
-
- with open(TEMPLATE_FILE, "r") as template_fd:
- html_template = template_fd.read()
-
- with open(out_file, "w") as out_fd:
- out_fd.write(Template(html_template).substitute(
- spec_name=os.path.basename(spec_file),
- graph_dump=json.dumps(global_graphs),
- ))
-
-
-def ParseCmdLine():
- parser = argparse.ArgumentParser()
- parser.add_argument("spec", help="the spec file")
- parser.add_argument("-o", "--out", help="the output html path", default="out.html")
- args = parser.parse_args()
- tg.FileNames.InitializeFileLists(args.spec, "-", "-", "-")
- tg.FileNames.NextFile()
- return os.path.abspath(args.spec), os.path.abspath(args.out)
-
-
-if __name__ == '__main__':
- spec_file, out_file = ParseCmdLine()
- print("Visualizing from spec: %s" % spec_file)
- exec(open(spec_file, "r").read())
- Example.DumpAllExamples(DumpExample=ProcessExample, example_fd=0)
- DumpHtml(spec_file, out_file)
- print("Output HTML file: %s" % out_file)
-
+++ /dev/null
-<!DOCTYPE html>
-<html>
-<head>
- <title>$spec_name</title>
- <style>
- body {
- font-family: "Roboto", sans-serif;
- margin: 0;
- height: 100%;
- background-color: rgb(61, 65, 77);
- }
-
- #main {
- width: 62%;
- transition: 0.5s;
- }
-
- #main h1 {
- padding: 20px;
- color: #eee;
- font-size: 24px;
- }
-
- .subgraph h3 {
- text-transform: capitalize;
- }
-
- .subgraph {
- padding: 20px;
- margin: 20px;
- border-radius: 10px;
- background-color: #fff;
- }
-
- .subgraph table {
- border-collapse: collapse;
- border-spacing: 0;
- }
-
- .subgraph thead {
- background-color: rgb(61, 65, 77);
- color: white;
- text-transform: capitalize;
- }
-
- .subgraph tbody tr:nth-child(odd) {
- background-color: #f2f2f2;
- }
-
- .subgraph tbody tr:hover {
- background-color: #d8d8d8;
- }
-
- .subgraph td {
- border: 1px solid #ddd;
- padding: 8px;
- }
-
- .subgraph select {
- font-weight: bold;
- text-transform: uppercase;
- font-size: 18px;
- color: black;
- }
-
- .subgraph svg {
- background: white;
- border: 1px solid #ccc;
- }
-
- .subgraph .edges line {
- stroke: #333;
- }
-
- .subgraph .nodes text {
- color: black;
- pointer-events: none;
- font-family: sans-serif;
- font-size: 11px;
- }
-
- #sidebar {
- height: 100%;
- width: 38%;
- position: fixed;
- z-index: 1;
- top: 0;
- right: 0;
- background-color: #eee;
- overflow-x: hidden;
- transition: 0.5s;
- border-left: 1px solid #ccc;
- }
-
- #sidebar #sidebar-main {
- padding: 50px;
- }
-
- #sidebar h1 {
- margin-top: 6px;
- margin-bottom: 24px;
- font-weight: bold;
- font-size: 18px;
- text-transform: uppercase;
- }
-
- #sidebar .subtitle {
- margin-bottom: 6px;
- border-bottom: 1px solid #ccc;
- padding-bottom: 4px;
- font-weight: bold;
- font-size: 12px;
- text-transform: uppercase;
- color: #555;
- }
-
- #sidebar .property {
- display: block;
- margin-bottom: 16px;
- }
-
- #sidebar .property_title {
- float: left;
- width: 80px;
- margin-top: 0;
- padding-top: 10px;
- font-weight: bold;
- font-size: 12px;
- text-transform: uppercase;
- color: #555;
- }
-
- #sidebar .property_text {
- margin-top: 8px;
- margin-left: 100px;
- border: 1px solid #ccc;
- border-radius: 2px;
- padding: 8px;
- font-size: 14px;
- background-color: #fff;
- }
-
- #sidebar .closebtn {
- position: absolute;
- top: 0;
- right: 25px;
- font-size: 36px;
- margin-left: 50px;
- text-decoration: none;
- color: #555;
- }
-
- .tooltip {
- color: blue;
- }
-
- .tooltip .tooltipcontent {
- visibility: hidden;
- color: black;
- background-color: #eee;
- margin-top: 18px;
- padding: 5px;
- border: 1px solid #ccc;
- border-radius: 4px;
- position: absolute;
- z-index: 1;
- }
-
- .tooltip:hover .tooltipcontent {
- visibility: visible;
- }
- </style>
- <link href="https://fonts.googleapis.com/css?family=Roboto&display=swap" rel="stylesheet" />
- <script src="https://d3js.org/d3.v4.min.js"></script>
- <script>
- graphs = $graph_dump;
- </script>
-</head>
-
-<body>
- <div id="main">
- <h1>$spec_name</h1>
- <div class="subgraph" id="main-subgraph">
- <label for="main-selector">Choose a subgraph: </label>
- <select id="main-selector" onchange="renderSubgraph(this.value)"></select>
- <div id="main-tables"></div>
- <h3>Visual Graph</h3>
- <svg id="subgraph-svg" width="100%" height="720"></svg>
- </div>
- </div>
-
- <div id="sidebar">
- <div id="sidebar-main">
- </div>
- </div>
-
- <script>
- // Render the sidebar view of a given node object.
- // The node must have "name" and "group" fields available.
- function renderSidebar(node) {
- var sidebar = document.getElementById("sidebar-main");
- sidebar.innerHTML = "";
- if (node == null) return;
-
- // Sidebar subtitle -- text taken from node.group.
- var subtitle = document.createElement("p");
- subtitle.classList.add("subtitle");
- subtitle.innerHTML = node.group;
- sidebar.appendChild(subtitle);
-
- // Sidebar title -- text taken from node.name.
- var title = document.createElement("h1");
- title.innerHTML = node.name;
- sidebar.appendChild(title);
-
- // List all the other fields in sidebar.
- var ignoredFields = ["name", "group"];
- for (var property in node) {
- if (ignoredFields.includes(property)) continue;
-
- var propertyTitle = document.createElement("h2");
- propertyTitle.classList.add("property_title");
- propertyTitle.innerHTML = property;
-
- var propertyText = document.createElement("p");
- propertyText.classList.add("property_text");
- propertyText.innerHTML = node[property];
-
- var propertyDiv = document.createElement("div");
- propertyDiv.classList.add("property");
- propertyDiv.appendChild(propertyTitle);
- propertyDiv.appendChild(propertyText);
- sidebar.appendChild(propertyDiv);
- }
- }
-
- // Render the SVG DAG visualization, from TFLite graph visualizer.
- // https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/tools/visualize.py
- //
- // The node coordiates are pre-calculated from the python visualizer.
- function renderSvg(subgraph) {
- var data = graphs[subgraph]["subgraph"];
- var svg = d3.select("#subgraph-svg");
- svg.selectAll("*").remove();
- var width = svg.attr("width");
- var height = svg.attr("height");
- // Make the graph scrollable.
- svg = svg.call(d3.zoom().on("zoom", function () {
- svg.attr("transform", d3.event.transform);
- })).append("g");
- var color = d3.scaleOrdinal(d3.schemeDark2);
- var simulation = d3.forceSimulation()
- .force("link", d3.forceLink().id(function (d) { return d.id; }))
- .force("charge", d3.forceManyBody())
- .force("center", d3.forceCenter(0.5 * width, 0.5 * height));
- var edge = svg.append("g").attr("class", "edges").selectAll("line")
- .data(data.edges).enter().append("path").attr("stroke", "black").attr("fill", "none")
- // Make the node group
- var node = svg.selectAll(".nodes")
- .data(data.nodes)
- .enter().append("g")
- .attr("x", function (d) { return d.x })
- .attr("y", function (d) { return d.y })
- .attr("transform", function (d) {
- return "translate( " + d.x + ", " + d.y + ")"
- })
- .attr("class", "nodes")
- .call(d3.drag()
- .on("start", function (d) {
- if (!d3.event.active) simulation.alphaTarget(1.0).restart();
- d.fx = d.x; d.fy = d.y;
- })
- .on("drag", function (d) {
- d.fx = d3.event.x; d.fy = d3.event.y;
- })
- .on("end", function (d) {
- if (!d3.event.active) simulation.alphaTarget(0);
- d.fx = d.fy = null;
- }));
- // Within the group, draw a box for the node position and text
- // on the side.
- var node_width = 150;
- var node_height = 30;
- node.append("rect")
- .attr("r", "5px")
- .attr("width", function (d) { return d.group == 1 ? node_width : node_width + 50; })
- .attr("height", node_height)
- .attr("rx", function (d) { return d.group == 1 ? 1 : 10; })
- .attr("stroke", "#000000")
- .attr("fill", function (d) { return d.group == 1 ? "#dddddd" : "#000000"; })
- .attr("onclick", function (d) {
- return "renderSidebar(graphs." + subgraph + ".details." +
- (d.group == 1 ? "operands" : "operations") + "[" +
- d.index.toString() + "])";
- });
- node.append("text")
- .text(function (d) { return d.name; })
- .attr("x", 5)
- .attr("y", 20)
- .attr("fill", function (d) { return d.group == 1 ? "#000000" : "#eeeeee"; })
- // Setup force parameters and update position callback
- var node = svg.selectAll(".nodes")
- .data(data.nodes);
- // Bind the links
- var name_to_g = {}
- node.each(function (data, index, nodes) {
- name_to_g[data.id] = this;
- });
- function proc(w, t) {
- return parseInt(w.getAttribute(t));
- }
- edge.attr("d", function (d) {
- function lerp(t, a, b) {
- return (1.0 - t) * a + t * b;
- }
- var x1 = proc(name_to_g[d.source], "x") + node_width / 2;
- var y1 = proc(name_to_g[d.source], "y") + node_height;
- var x2 = proc(name_to_g[d.target], "x") + node_width / 2;
- var y2 = proc(name_to_g[d.target], "y");
- var s = "M " + x1 + " " + y1
- + " C " + x1 + " " + lerp(.5, y1, y2)
- + " " + x2 + " " + lerp(.5, y1, y2)
- + " " + x2 + " " + y2
- return s;
- });
- }
-
- // Open a new window and present the full text data.
- function showFullData(data) {
- window.open().document.write(data);
- }
-
- // Renders a single table.
- function renderTable(title, data, headers) {
- var parent = document.getElementById("main-tables");
-
- // Create heading.
- var heading = document.createElement("h3");
- heading.innerHTML = title;
- parent.appendChild(heading);
-
- // Filter out headers that do not appear in any data element.
- headers = headers.filter(function (key) {
- return data.some(function (elem) { return key in elem; });
- });
-
- // Render the table headers.
- var table = document.createElement("table");
- let header = table.createTHead().insertRow();
- for (let key of headers) { header.insertCell().innerHTML = key; }
-
- // Render the table body.
- // Since the "data" field could be very large, we omit the full content and
- // append a "View Full" button to the end.
- var omittableFields = ["data"];
- let body = table.createTBody();
- for (const [index, elem] of data.entries()) {
- let row = body.insertRow();
- row.id = "details-" + title.toLowerCase() + "-" + index.toString();
-
- for (let key of headers) {
- var cell = row.insertCell();
- var data = key in elem ? elem[key] : "-";
- if (omittableFields.includes(key) && data.length > 100) {
- // If the data exceeds the length limit, only print the first 80 and
- // the last 20 characters.
- data = data.substring(0, 80) + " ... " +
- data.substring(data.length - 20, data.length) + " ";
- cell.innerHTML = data;
-
- // Append a "View Full" button to the end.
- var href = document.createElement("a");
- href.innerHTML = "View Full";
- href.href = "javascript:void(0)";
- href.onclick = function () { showFullData(elem[key]); };
- cell.appendChild(href);
- } else {
- cell.innerHTML = data;
- }
- }
- }
- parent.appendChild(table);
- }
-
- function renderTables(subgraph) {
- document.getElementById("main-tables").innerHTML = "";
- renderTable("Configurations", graphs[subgraph].details.configurations, [
- "relaxed",
- "use shared memory",
- "expect failure"
- ]);
- renderTable("Operands", graphs[subgraph].details.operands, [
- "index",
- "name",
- "type",
- "dimensions",
- "scale",
- "zero point",
- "channel dim",
- "lifetime",
- "data"
- ]);
- renderTable("Operations", graphs[subgraph].details.operations, [
- "index",
- "opcode",
- "inputs",
- "outputs"
- ]);
- }
-
- // Re-render all the information related to a subgraph.
- // Invoked everytime when the main-selector changes.
- function renderSubgraph(subgraph) {
- renderTables(subgraph);
- renderSvg(subgraph);
- renderSidebar(null); // Clear sidebar.
- }
-
- // Renders the main-selector and the first subgraph choice in the main-selector.
- // Only invoked once when the page gets loaded the first time.
- function renderMain() {
- var selector = document.getElementById("main-selector");
- var first = true;
- for (var subgraph in graphs) {
- var option = document.createElement("option");
- option.value = subgraph;
- option.text = subgraph;
- selector.appendChild(option);
- if (first) {
- first = false;
- renderSubgraph(subgraph);
- }
- }
- }
- renderMain();
- </script>
-</body>
-</html>
+++ /dev/null
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-model = Model()
-
-n_batch = 1
-n_input = 2
-# n_cell and n_output have the same size when there is no projection.
-n_cell = 4
-n_output = 4
-
-input = Input("input", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_input))
-
-input_to_input_weights = Input("input_to_input_weights", "TENSOR_FLOAT32", "{%d, %d}" % (n_cell, n_input))
-input_to_forget_weights = Input("input_to_forget_weights", "TENSOR_FLOAT32", "{%d, %d}" % (n_cell, n_input))
-input_to_cell_weights = Input("input_to_cell_weights", "TENSOR_FLOAT32", "{%d, %d}" % (n_cell, n_input))
-input_to_output_weights = Input("input_to_output_weights", "TENSOR_FLOAT32", "{%d, %d}" % (n_cell, n_input))
-
-recurrent_to_input_weights = Input("recurrent_to_intput_weights", "TENSOR_FLOAT32", "{%d, %d}" % (n_cell, n_output))
-recurrent_to_forget_weights = Input("recurrent_to_forget_weights", "TENSOR_FLOAT32", "{%d, %d}" % (n_cell, n_output))
-recurrent_to_cell_weights = Input("recurrent_to_cell_weights", "TENSOR_FLOAT32", "{%d, %d}" % (n_cell, n_output))
-recurrent_to_output_weights = Input("recurrent_to_output_weights", "TENSOR_FLOAT32", "{%d, %d}" % (n_cell, n_output))
-
-cell_to_input_weights = Input("cell_to_input_weights", "TENSOR_FLOAT32", "{0}")
-cell_to_forget_weights = Input("cell_to_forget_weights", "TENSOR_FLOAT32", "{0}")
-cell_to_output_weights = Input("cell_to_output_weights", "TENSOR_FLOAT32", "{0}")
-
-input_gate_bias = Input("input_gate_bias", "TENSOR_FLOAT32", "{%d}"%(n_cell))
-forget_gate_bias = Input("forget_gate_bias", "TENSOR_FLOAT32", "{%d}"%(n_cell))
-cell_gate_bias = Input("cell_gate_bias", "TENSOR_FLOAT32", "{%d}"%(n_cell))
-output_gate_bias = Input("output_gate_bias", "TENSOR_FLOAT32", "{%d}"%(n_cell))
-
-projection_weights = Input("projection_weights", "TENSOR_FLOAT32", "{0,0}")
-projection_bias = Input("projection_bias", "TENSOR_FLOAT32", "{0}")
-
-output_state_in = Input("output_state_in", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output))
-cell_state_in = Input("cell_state_in", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_cell))
-
-activation_param = Int32Scalar("activation_param", 4) # Tanh
-cell_clip_param = Float32Scalar("cell_clip_param", 0.)
-proj_clip_param = Float32Scalar("proj_clip_param", 0.)
-
-scratch_buffer = IgnoredOutput("scratch_buffer", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, (n_cell * 4)))
-output_state_out = Output("output_state_out", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output))
-cell_state_out = Output("cell_state_out", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_cell))
-output = Output("output", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output))
-
-model = model.Operation("LSTM",
- input,
-
- input_to_input_weights,
- input_to_forget_weights,
- input_to_cell_weights,
- input_to_output_weights,
-
- recurrent_to_input_weights,
- recurrent_to_forget_weights,
- recurrent_to_cell_weights,
- recurrent_to_output_weights,
-
- cell_to_input_weights,
- cell_to_forget_weights,
- cell_to_output_weights,
-
- input_gate_bias,
- forget_gate_bias,
- cell_gate_bias,
- output_gate_bias,
-
- projection_weights,
- projection_bias,
-
- output_state_in,
- cell_state_in,
-
- activation_param,
- cell_clip_param,
- proj_clip_param
-).To([scratch_buffer, output_state_out, cell_state_out, output])
-model = model.RelaxedExecution(True)
-
-# Example 1. Input in operand 0,
-input0 = {input_to_input_weights: [-0.45018822, -0.02338299, -0.0870589, -0.34550029, 0.04266912, -0.15680569, -0.34856534, 0.43890524],
- input_to_forget_weights: [0.09701663, 0.20334584, -0.50592935, -0.31343272, -0.40032279, 0.44781327, 0.01387155, -0.35593212],
- input_to_cell_weights: [-0.50013041, 0.1370284, 0.11810488, 0.2013163, -0.20583314, 0.44344562, 0.22077113, -0.29909778],
- input_to_output_weights: [-0.25065863, -0.28290087, 0.04613829, 0.40525138, 0.44272184, 0.03897077, -0.1556896, 0.19487578],
-
- input_gate_bias: [0.,0.,0.,0.],
- forget_gate_bias: [1.,1.,1.,1.],
- cell_gate_bias: [0.,0.,0.,0.],
- output_gate_bias: [0.,0.,0.,0.],
-
- recurrent_to_input_weights: [
- -0.0063535, -0.2042388, 0.31454784, -0.35746509, 0.28902304, 0.08183324,
- -0.16555229, 0.02286911, -0.13566875, 0.03034258, 0.48091322,
- -0.12528998, 0.24077177, -0.51332325, -0.33502164, 0.10629296],
-
- recurrent_to_cell_weights: [
- -0.3407414, 0.24443203, -0.2078532, 0.26320225, 0.05695659, -0.00123841,
- -0.4744786, -0.35869038, -0.06418842, -0.13502428, -0.501764, 0.22830659,
- -0.46367589, 0.26016325, -0.03894562, -0.16368064],
-
- recurrent_to_forget_weights: [
- -0.48684245, -0.06655136, 0.42224967, 0.2112639, 0.27654213, 0.20864892,
- -0.07646349, 0.45877004, 0.00141793, -0.14609534, 0.36447752, 0.09196436,
- 0.28053468, 0.01560611, -0.20127171, -0.01140004],
-
- recurrent_to_output_weights: [
- 0.43385774, -0.17194885, 0.2718237, 0.09215671, 0.24107647, -0.39835793,
- 0.18212086, 0.01301402, 0.48572797, -0.50656658, 0.20047462, -0.20607421,
- -0.51818722, -0.15390486, 0.0468148, 0.39922136],
-
- cell_to_input_weights: [],
- cell_to_forget_weights: [],
- cell_to_output_weights: [],
-
- projection_weights: [],
- projection_bias: [],
-}
-
-test_input = [2., 3.]
-output_state = [0, 0, 0, 0]
-cell_state = [0, 0, 0, 0]
-golden_output = [-0.02973187, 0.1229473, 0.20885126, -0.15358765,]
-output0 = {
- scratch_buffer: [ 0 for x in range(n_batch * n_cell * 4) ],
- cell_state_out: [ -0.145439, 0.157475, 0.293663, -0.277353 ],
- output_state_out: [ -0.0297319, 0.122947, 0.208851, -0.153588 ],
- output: golden_output
-}
-input0[input] = test_input
-input0[output_state_in] = output_state
-input0[cell_state_in] = cell_state
-Example((input0, output0))
+++ /dev/null
-Output CTS model: -
-Output example:-
-Output CTS test: -
+++ /dev/null
-// clang-format off
-// Generated file (from: lstm_float.mod.py). Do not edit
-// clang-format off
-// Generated file (from: lstm_float.mod.py). Do not edit
-// clang-format off
-// Generated file (from: lstm_float.mod.py). Do not edit
-#include "../../TestGenerated.h"
-
-namespace lstm_float {
-// Generated lstm_float test
-#include "-"
-// Generated model constructor
-#include "-"
-} // namespace lstm_float
-
-void CreateModel(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {4, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {4, 4});
- OperandType type3(Type::TENSOR_FLOAT32, {0});
- OperandType type4(Type::TENSOR_FLOAT32, {4});
- OperandType type5(Type::TENSOR_FLOAT32, {0, 0});
- OperandType type6(Type::TENSOR_FLOAT32, {1, 4});
- OperandType type7(Type::INT32, {});
- OperandType type8(Type::FLOAT32, {});
- OperandType type9(Type::TENSOR_FLOAT32, {1, 16});
- // Phase 1, operands
- auto input = model->addOperand(&type0);
- auto input_to_input_weights = model->addOperand(&type1);
- auto input_to_forget_weights = model->addOperand(&type1);
- auto input_to_cell_weights = model->addOperand(&type1);
- auto input_to_output_weights = model->addOperand(&type1);
- auto recurrent_to_intput_weights = model->addOperand(&type2);
- auto recurrent_to_forget_weights = model->addOperand(&type2);
- auto recurrent_to_cell_weights = model->addOperand(&type2);
- auto recurrent_to_output_weights = model->addOperand(&type2);
- auto cell_to_input_weights = model->addOperand(&type3);
- auto cell_to_forget_weights = model->addOperand(&type3);
- auto cell_to_output_weights = model->addOperand(&type3);
- auto input_gate_bias = model->addOperand(&type4);
- auto forget_gate_bias = model->addOperand(&type4);
- auto cell_gate_bias = model->addOperand(&type4);
- auto output_gate_bias = model->addOperand(&type4);
- auto projection_weights = model->addOperand(&type5);
- auto projection_bias = model->addOperand(&type3);
- auto output_state_in = model->addOperand(&type6);
- auto cell_state_in = model->addOperand(&type6);
- auto activation_param = model->addOperand(&type7);
- auto cell_clip_param = model->addOperand(&type8);
- auto proj_clip_param = model->addOperand(&type8);
- auto scratch_buffer = model->addOperand(&type9);
- auto output_state_out = model->addOperand(&type6);
- auto cell_state_out = model->addOperand(&type6);
- auto output = model->addOperand(&type6);
- // Phase 2, operations
- static int32_t activation_param_init[] = {4};
- model->setOperandValue(activation_param, activation_param_init, sizeof(int32_t) * 1);
- static float cell_clip_param_init[] = {0.0f};
- model->setOperandValue(cell_clip_param, cell_clip_param_init, sizeof(float) * 1);
- static float proj_clip_param_init[] = {0.0f};
- model->setOperandValue(proj_clip_param, proj_clip_param_init, sizeof(float) * 1);
- model->addOperation(ANEURALNETWORKS_LSTM, {input, input_to_input_weights, input_to_forget_weights, input_to_cell_weights, input_to_output_weights, recurrent_to_intput_weights, recurrent_to_forget_weights, recurrent_to_cell_weights, recurrent_to_output_weights, cell_to_input_weights, cell_to_forget_weights, cell_to_output_weights, input_gate_bias, forget_gate_bias, cell_gate_bias, output_gate_bias, projection_weights, projection_bias, output_state_in, cell_state_in, activation_param, cell_clip_param, proj_clip_param}, {scratch_buffer, output_state_out, cell_state_out, output});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {input, input_to_input_weights, input_to_forget_weights, input_to_cell_weights, input_to_output_weights, recurrent_to_intput_weights, recurrent_to_forget_weights, recurrent_to_cell_weights, recurrent_to_output_weights, cell_to_input_weights, cell_to_forget_weights, cell_to_output_weights, input_gate_bias, forget_gate_bias, cell_gate_bias, output_gate_bias, projection_weights, projection_bias, output_state_in, cell_state_in},
- {scratch_buffer, output_state_out, cell_state_out, output});
- // Phase 4: set relaxed execution
- model->relaxComputationFloat32toFloat16(true);
- assert(model->isValid());
-}
-
-bool is_ignored(int i) {
- static std::set<int> ignore = {0};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {2.0f, 3.0f}}, {1, {-0.45018822f, -0.02338299f, -0.0870589f, -0.34550029f, 0.04266912f, -0.15680569f, -0.34856534f, 0.43890524f}}, {2, {0.09701663f, 0.20334584f, -0.50592935f, -0.31343272f, -0.40032279f, 0.44781327f, 0.01387155f, -0.35593212f}}, {3, {-0.50013041f, 0.1370284f, 0.11810488f, 0.2013163f, -0.20583314f, 0.44344562f, 0.22077113f, -0.29909778f}}, {4, {-0.25065863f, -0.28290087f, 0.04613829f, 0.40525138f, 0.44272184f, 0.03897077f, -0.1556896f, 0.19487578f}}, {5, {-0.0063535f, -0.2042388f, 0.31454784f, -0.35746509f, 0.28902304f, 0.08183324f, -0.16555229f, 0.02286911f, -0.13566875f, 0.03034258f, 0.48091322f, -0.12528998f, 0.24077177f, -0.51332325f, -0.33502164f, 0.10629296f}}, {6, {-0.48684245f, -0.06655136f, 0.42224967f, 0.2112639f, 0.27654213f, 0.20864892f, -0.07646349f, 0.45877004f, 0.00141793f, -0.14609534f, 0.36447752f, 0.09196436f, 0.28053468f, 0.01560611f, -0.20127171f, -0.01140004f}}, {7, {-0.3407414f, 0.24443203f, -0.2078532f, 0.26320225f, 0.05695659f, -0.00123841f, -0.4744786f, -0.35869038f, -0.06418842f, -0.13502428f, -0.501764f, 0.22830659f, -0.46367589f, 0.26016325f, -0.03894562f, -0.16368064f}}, {8, {0.43385774f, -0.17194885f, 0.2718237f, 0.09215671f, 0.24107647f, -0.39835793f, 0.18212086f, 0.01301402f, 0.48572797f, -0.50656658f, 0.20047462f, -0.20607421f, -0.51818722f, -0.15390486f, 0.0468148f, 0.39922136f}}, {9, {}}, {10, {}}, {11, {}}, {12, {0.0f, 0.0f, 0.0f, 0.0f}}, {13, {1.0f, 1.0f, 1.0f, 1.0f}}, {14, {0.0f, 0.0f, 0.0f, 0.0f}}, {15, {0.0f, 0.0f, 0.0f, 0.0f}}, {16, {}}, {17, {}}, {18, {0.0f, 0.0f, 0.0f, 0.0f}}, {19, {0.0f, 0.0f, 0.0f, 0.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}}, {1, {-0.0297319f, 0.122947f, 0.208851f, -0.153588f}}, {2, {-0.145439f, 0.157475f, 0.293663f, -0.277353f}}, {3, {-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, lstm_float) {
- execute(lstm_float::CreateModel,
- lstm_float::is_ignored,
- lstm_float::examples);
-}
-
-#include "../generated/tests/lstm_float.mod.py.cpp"
+++ /dev/null
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-model = Model()
-i1 = Input("op1", "TENSOR_QUANT8_ASYMM", "{1, 2, 2, 2}, 0.5f, 0")
-f1 = Input("op2", "TENSOR_QUANT8_ASYMM", "{1, 2, 2, 2}, 0.5f, 0")
-b1 = Input("op3", "TENSOR_INT32", "{2}, 0.25f, 0")
-pad0 = Int32Scalar("pad0", 0)
-act = Int32Scalar("act", 0)
-stride = Int32Scalar("stride", 1)
-cm = Int32Scalar("channelMultiplier", 1)
-output = Output("op4", "TENSOR_QUANT8_ASYMM", "{1,1,1,2}, 1.f, 0")
-
-model = model.Operation("DEPTHWISE_CONV_2D",
- i1, f1, b1,
- pad0, pad0, pad0, pad0,
- stride, stride,
- cm, act).To(output)
-
-# Example 1. Input in operand 0,
-input0 = {i1: # input 0
- [4, 16, 4, 32, 4, 64, 4, 128],
- f1:
- [2, 4, 2, 0, 2, 2, 2, 0],
- b1:
- [0, 0]}
-# (i1 (depthconv) f1)
-output0 = {output: # output 0
- [8, 48]}
-
-# Instantiate an example
-Example((input0, output0))
+++ /dev/null
-Output CTS model: -
-Output example:-
-Output CTS test: -
+++ /dev/null
-// clang-format off
-// Generated file (from: depthwise_conv2d_quant8.mod.py). Do not edit
-// clang-format off
-// Generated file (from: depthwise_conv2d_quant8.mod.py). Do not edit
-// clang-format off
-// Generated file (from: depthwise_conv2d_quant8.mod.py). Do not edit
-#include "../../TestGenerated.h"
-
-namespace depthwise_conv2d_quant8 {
-// Generated depthwise_conv2d_quant8 test
-#include "-"
-// Generated model constructor
-#include "-"
-} // namespace depthwise_conv2d_quant8
-
-void CreateModel(Model *model) {
- OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 0);
- OperandType type1(Type::TENSOR_INT32, {2}, 0.25f, 0);
- OperandType type2(Type::INT32, {});
- OperandType type3(Type::TENSOR_QUANT8_ASYMM, {1, 1, 1, 2}, 1.0f, 0);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type0);
- auto op3 = model->addOperand(&type1);
- auto pad0 = model->addOperand(&type2);
- auto stride = model->addOperand(&type2);
- auto channelMultiplier = model->addOperand(&type2);
- auto act = model->addOperand(&type2);
- auto op4 = model->addOperand(&type3);
- // Phase 2, operations
- static int32_t pad0_init[] = {0};
- model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
- static int32_t stride_init[] = {1};
- model->setOperandValue(stride, stride_init, sizeof(int32_t) * 1);
- static int32_t channelMultiplier_init[] = {1};
- model->setOperandValue(channelMultiplier, channelMultiplier_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {0};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_DEPTHWISE_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, channelMultiplier, act}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1, op2, op3},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {{2, {0, 0}}},
- // int -> QUANT8_ASYMM map
- {{0, {4, 16, 4, 32, 4, 64, 4, 128}}, {1, {2, 4, 2, 0, 2, 2, 2, 0}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {8, 48}}}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, depthwise_conv2d_quant8) {
- execute(depthwise_conv2d_quant8::CreateModel,
- depthwise_conv2d_quant8::is_ignored,
- depthwise_conv2d_quant8::examples);
-}
-
-#include "../generated/tests/depthwise_conv2d_quant8.mod.py.cpp"
+++ /dev/null
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-model = Model()
-i1 = Input("op1", "TENSOR_FLOAT32", "{1, 3, 3, 1}")
-f1 = Parameter("op2", "TENSOR_FLOAT32", "{1, 2, 2, 1}", [.25, .25, .25, .25])
-b1 = Parameter("op3", "TENSOR_FLOAT32", "{1}", [0])
-pad0 = Int32Scalar("pad0", 0)
-act = Int32Scalar("act", 0)
-stride = Int32Scalar("stride", 1)
-output = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 1}")
-
-model = model.Operation("CONV_2D", i1, f1, b1, pad0, pad0, pad0, pad0, stride, stride, act).To(output)
-model = model.RelaxedExecution(True)
-
-# Example 1. Input in operand 0,
-input0 = {i1: # input 0
- [1.0, 1.0, 1.0, 1.0, 0.5, 1.0, 1.0, 1.0, 1.0]}
-
-output0 = {output: # output 0
- [.875, .875, .875, .875]}
-
-# Instantiate an example
-Example((input0, output0))
+++ /dev/null
-Output CTS model: -
-Output example:-
-Output CTS test: -
+++ /dev/null
-// clang-format off
-// Generated file (from: conv_float.mod.py). Do not edit
-// clang-format off
-// Generated file (from: conv_float.mod.py). Do not edit
-// clang-format off
-// Generated file (from: conv_float.mod.py). Do not edit
-#include "../../TestGenerated.h"
-
-namespace conv_float {
-// Generated conv_float test
-#include "-"
-// Generated model constructor
-#include "-"
-} // namespace conv_float
-
-void CreateModel(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 3, 3, 1});
- OperandType type1(Type::TENSOR_FLOAT32, {1, 2, 2, 1});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto pad0 = model->addOperand(&type3);
- auto stride = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto op4 = model->addOperand(&type1);
- // Phase 2, operations
- static float op2_init[] = {0.25f, 0.25f, 0.25f, 0.25f};
- model->setOperandValue(op2, op2_init, sizeof(float) * 4);
- static float op3_init[] = {0.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t pad0_init[] = {0};
- model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
- static int32_t stride_init[] = {1};
- model->setOperandValue(stride, stride_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {0};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, act}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1},
- {op4});
- // Phase 4: set relaxed execution
- model->relaxComputationFloat32toFloat16(true);
- assert(model->isValid());
-}
-
-bool is_ignored(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 1.0f, 1.0f, 1.0f, 0.5f, 1.0f, 1.0f, 1.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {0.875f, 0.875f, 0.875f, 0.875f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float) {
- execute(conv_float::CreateModel,
- conv_float::is_ignored,
- conv_float::examples);
-}
-
-#include "../generated/tests/conv_float.mod.py.cpp"
+++ /dev/null
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-i0 = Input("i0", ("TENSOR_FLOAT32", [2, 2])) # input 0
-
-o1 = Output("o1", ("TENSOR_FLOAT32", [1, 2])) # output for model1
-o2 = Output("o2", ("TENSOR_FLOAT32", [2, 1])) # output for model2
-o3 = Output("o3", ("TENSOR_FLOAT32", [1])) # output for model3
-
-model1 = Model().Operation("MEAN", i0, [0], 1).To(o1) # along axis 0, keep_dim=True
-model2 = Model().Operation("MEAN", i0, [1], 1).To(o2) # along axis 1, keep_dim=True
-model3 = Model().Operation("MEAN", i0, [0, 1], 0).To(o3) # along both axis, keep_dim=False
-
-inputs1 = {i0: [1, 2, 3, 4]}
-outputs11 = {o1: [4, 6]}
-outputs12 = {o2: [3, 7]}
-outputs13 = {o3: [10]}
-
-inputs2 = {i0: [-1, -2, -3, -4]}
-outputs21 = {o1: [-4, -6]}
-outputs22 = {o2: [-3, -7]}
-outputs23 = {o3: [-10]}
-
-Example((inputs1, outputs11), model=model1)
-Example((inputs1, outputs12), model=model2)
-Example((inputs1, outputs13), model=model3)
-
-Example((inputs2, outputs21), model=model1)
-Example((inputs2, outputs22), model=model2)
-Example((inputs2, outputs23), model=model3)
+++ /dev/null
-Output CTS model: -
-Output example:-
-Output CTS test: -
+++ /dev/null
-// clang-format off
-// Generated file (from: mean_implicit.mod.py). Do not edit
-// clang-format off
-// Generated file (from: mean_implicit.mod.py). Do not edit
-// clang-format off
-// Generated file (from: mean_implicit.mod.py). Do not edit
-#include "../../TestGenerated.h"
-
-namespace mean_implicit {
-// Generated mean_implicit test
-#include "-"
-// Generated model constructor
-#include "-"
-} // namespace mean_implicit
-
-void CreateModel(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {1, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {2, 1});
- OperandType type3(Type::TENSOR_FLOAT32, {1});
- OperandType type4(Type::TENSOR_INT32, {1});
- OperandType type5(Type::INT32, {});
- OperandType type6(Type::TENSOR_INT32, {2});
- // Phase 1, operands
- auto i0 = model->addOperand(&type0);
- auto param = model->addOperand(&type4);
- auto param1 = model->addOperand(&type5);
- auto o1 = model->addOperand(&type1);
- // Phase 2, operations
- static int32_t param_init[] = {0};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_MEAN, {i0, param, param1}, {o1});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {i0},
- {o1});
- assert(model->isValid());
-}
-
-bool is_ignored(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {4.0f, 6.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {-1.0f, -2.0f, -3.0f, -4.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {-4.0f, -6.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, mean_implicit) {
- execute(mean_implicit::CreateModel,
- mean_implicit::is_ignored,
- mean_implicit::examples);
-}
-
-void CreateModel_2(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {1, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {2, 1});
- OperandType type3(Type::TENSOR_FLOAT32, {1});
- OperandType type4(Type::TENSOR_INT32, {1});
- OperandType type5(Type::INT32, {});
- OperandType type6(Type::TENSOR_INT32, {2});
- // Phase 1, operands
- auto i0 = model->addOperand(&type0);
- auto param2 = model->addOperand(&type4);
- auto param3 = model->addOperand(&type5);
- auto o2 = model->addOperand(&type2);
- // Phase 2, operations
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t param3_init[] = {1};
- model->setOperandValue(param3, param3_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_MEAN, {i0, param2, param3}, {o2});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {i0},
- {o2});
- assert(model->isValid());
-}
-
-bool is_ignored_2(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_2 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {3.0f, 7.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {-1.0f, -2.0f, -3.0f, -4.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {-3.0f, -7.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, mean_implicit_2) {
- execute(mean_implicit::CreateModel_2,
- mean_implicit::is_ignored_2,
- mean_implicit::examples_2);
-}
-
-void CreateModel_3(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {1, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {2, 1});
- OperandType type3(Type::TENSOR_FLOAT32, {1});
- OperandType type4(Type::TENSOR_INT32, {1});
- OperandType type5(Type::INT32, {});
- OperandType type6(Type::TENSOR_INT32, {2});
- // Phase 1, operands
- auto i0 = model->addOperand(&type0);
- auto param4 = model->addOperand(&type6);
- auto param5 = model->addOperand(&type5);
- auto o3 = model->addOperand(&type3);
- // Phase 2, operations
- static int32_t param4_init[] = {0, 1};
- model->setOperandValue(param4, param4_init, sizeof(int32_t) * 2);
- static int32_t param5_init[] = {0};
- model->setOperandValue(param5, param5_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_MEAN, {i0, param4, param5}, {o3});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {i0},
- {o3});
- assert(model->isValid());
-}
-
-bool is_ignored_3(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_3 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {10.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {-1.0f, -2.0f, -3.0f, -4.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {-10.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, mean_implicit_3) {
- execute(mean_implicit::CreateModel_3,
- mean_implicit::is_ignored_3,
- mean_implicit::examples_3);
-}
-
-#include "../generated/tests/mean_implicit.mod.py.cpp"
+++ /dev/null
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-model = Model()
-i1 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 2}")
-f1 = Parameter("op2", "TENSOR_FLOAT32", "{2, 2, 2, 2}", [1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1])
-b1 = Parameter("op3", "TENSOR_FLOAT32", "{1}", [-200])
-output = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 2}")
-act = Int32Scalar("act", 0) # None activation
-layout = Int32Scalar("layout", 0) # NHWC
-
-model = model.Operation("CONV_2D", i1, f1, b1, 1, 1, 1, act, layout).To(output)
-
-# Example 1. Input in operand 0,
-input0 = {i1: # input 0
- [1, 2, 3, 4, 5, 6, 7, 8]}
-
-output0 = {output: # output 0
- [204, 120, 94, 104, 70, 164, 23, 112]}
-
-quant8 = DataTypeConverter().Identify({
- i1: ("TENSOR_QUANT8_ASYMM", 0.5, 128),
- f1: ("TENSOR_QUANT8_ASYMM", 0.25, 128),
- b1: ("TENSOR_INT32", 0.125, 0),
- output: ("TENSOR_QUANT8_ASYMM", 2, 100)
-})
-
-# Instantiate an example
-Example(
- (input0, output0)
-).AddVariations(
- ("NCHW", [i1, f1, output], [layout])
-).AddVariations(
- ("relu", [output], [act]),
- ("relu6", [output], [act]),
- includeDefault=False
-).AddVariations(
- ("as_input", [f1])
-).AddVariations(
- "relaxed", quant8
-)
+++ /dev/null
-Output CTS model: -
-Output example:-
-Output CTS test: -
+++ /dev/null
-// clang-format off
-// Generated file (from: conv_float.mod.py). Do not edit
-// clang-format off
-// Generated file (from: conv_float.mod.py). Do not edit
-// clang-format off
-// Generated file (from: conv_float.mod.py). Do not edit
-#include "../../TestGenerated.h"
-
-namespace conv_float {
-// Generated conv_float test
-#include "-"
-// Generated model constructor
-#include "-"
-} // namespace conv_float
-
-void CreateModel_relu(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op2_init[] = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f};
- model->setOperandValue(op2, op2_init, sizeof(float) * 16);
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {1};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {0};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_relu(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 120.0f, 94.0f, 104.0f, 70.0f, 164.0f, 23.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_relu) {
- execute(conv_float::CreateModel_relu,
- conv_float::is_ignored_relu,
- conv_float::examples_relu);
-}
-
-void CreateModel_relu_relaxed(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op2_init[] = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f};
- model->setOperandValue(op2, op2_init, sizeof(float) * 16);
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {1};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {0};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1},
- {op4});
- // Phase 4: set relaxed execution
- model->relaxComputationFloat32toFloat16(true);
- assert(model->isValid());
-}
-
-bool is_ignored_relu_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 120.0f, 94.0f, 104.0f, 70.0f, 164.0f, 23.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_relu_relaxed) {
- execute(conv_float::CreateModel_relu_relaxed,
- conv_float::is_ignored_relu_relaxed,
- conv_float::examples_relu_relaxed);
-}
-
-void CreateModel_relu_quant8(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type4);
- auto op2 = model->addOperand(&type5);
- auto op3 = model->addOperand(&type6);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type7);
- // Phase 2, operations
- static uint8_t op2_init[] = {132, 136, 140, 144, 148, 152, 156, 160, 160, 156, 152, 148, 144, 140, 136, 132};
- model->setOperandValue(op2, op2_init, sizeof(uint8_t) * 16);
- static int32_t op3_init[] = {-1600};
- model->setOperandValue(op3, op3_init, sizeof(int32_t) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {1};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {0};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_relu_quant8(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu_quant8 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 132, 134, 136, 138, 140, 142, 144}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {202, 160, 147, 152, 135, 182, 112, 156}}}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_relu_quant8) {
- execute(conv_float::CreateModel_relu_quant8,
- conv_float::is_ignored_relu_quant8,
- conv_float::examples_relu_quant8);
-}
-
-void CreateModel_relu_weight_as_input(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {1};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {0};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1, op2},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_relu_weight_as_input(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu_weight_as_input = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}, {1, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 120.0f, 94.0f, 104.0f, 70.0f, 164.0f, 23.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_relu_weight_as_input) {
- execute(conv_float::CreateModel_relu_weight_as_input,
- conv_float::is_ignored_relu_weight_as_input,
- conv_float::examples_relu_weight_as_input);
-}
-
-void CreateModel_relu_weight_as_input_relaxed(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {1};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {0};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1, op2},
- {op4});
- // Phase 4: set relaxed execution
- model->relaxComputationFloat32toFloat16(true);
- assert(model->isValid());
-}
-
-bool is_ignored_relu_weight_as_input_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu_weight_as_input_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}, {1, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 120.0f, 94.0f, 104.0f, 70.0f, 164.0f, 23.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_relu_weight_as_input_relaxed) {
- execute(conv_float::CreateModel_relu_weight_as_input_relaxed,
- conv_float::is_ignored_relu_weight_as_input_relaxed,
- conv_float::examples_relu_weight_as_input_relaxed);
-}
-
-void CreateModel_relu_weight_as_input_quant8(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type4);
- auto op2 = model->addOperand(&type5);
- auto op3 = model->addOperand(&type6);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type7);
- // Phase 2, operations
- static int32_t op3_init[] = {-1600};
- model->setOperandValue(op3, op3_init, sizeof(int32_t) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {1};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {0};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1, op2},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_relu_weight_as_input_quant8(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu_weight_as_input_quant8 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 132, 134, 136, 138, 140, 142, 144}}, {1, {132, 136, 140, 144, 148, 152, 156, 160, 160, 156, 152, 148, 144, 140, 136, 132}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {202, 160, 147, 152, 135, 182, 112, 156}}}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_relu_weight_as_input_quant8) {
- execute(conv_float::CreateModel_relu_weight_as_input_quant8,
- conv_float::is_ignored_relu_weight_as_input_quant8,
- conv_float::examples_relu_weight_as_input_quant8);
-}
-
-void CreateModel_relu6(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op2_init[] = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f};
- model->setOperandValue(op2, op2_init, sizeof(float) * 16);
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {3};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {0};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_relu6(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu6 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_relu6) {
- execute(conv_float::CreateModel_relu6,
- conv_float::is_ignored_relu6,
- conv_float::examples_relu6);
-}
-
-void CreateModel_relu6_relaxed(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op2_init[] = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f};
- model->setOperandValue(op2, op2_init, sizeof(float) * 16);
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {3};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {0};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1},
- {op4});
- // Phase 4: set relaxed execution
- model->relaxComputationFloat32toFloat16(true);
- assert(model->isValid());
-}
-
-bool is_ignored_relu6_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu6_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_relu6_relaxed) {
- execute(conv_float::CreateModel_relu6_relaxed,
- conv_float::is_ignored_relu6_relaxed,
- conv_float::examples_relu6_relaxed);
-}
-
-void CreateModel_relu6_quant8(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type4);
- auto op2 = model->addOperand(&type5);
- auto op3 = model->addOperand(&type6);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type7);
- // Phase 2, operations
- static uint8_t op2_init[] = {132, 136, 140, 144, 148, 152, 156, 160, 160, 156, 152, 148, 144, 140, 136, 132};
- model->setOperandValue(op2, op2_init, sizeof(uint8_t) * 16);
- static int32_t op3_init[] = {-1600};
- model->setOperandValue(op3, op3_init, sizeof(int32_t) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {3};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {0};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_relu6_quant8(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu6_quant8 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 132, 134, 136, 138, 140, 142, 144}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {103, 103, 103, 103, 103, 103, 103, 103}}}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_relu6_quant8) {
- execute(conv_float::CreateModel_relu6_quant8,
- conv_float::is_ignored_relu6_quant8,
- conv_float::examples_relu6_quant8);
-}
-
-void CreateModel_relu6_weight_as_input(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {3};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {0};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1, op2},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_relu6_weight_as_input(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu6_weight_as_input = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}, {1, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_relu6_weight_as_input) {
- execute(conv_float::CreateModel_relu6_weight_as_input,
- conv_float::is_ignored_relu6_weight_as_input,
- conv_float::examples_relu6_weight_as_input);
-}
-
-void CreateModel_relu6_weight_as_input_relaxed(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {3};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {0};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1, op2},
- {op4});
- // Phase 4: set relaxed execution
- model->relaxComputationFloat32toFloat16(true);
- assert(model->isValid());
-}
-
-bool is_ignored_relu6_weight_as_input_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu6_weight_as_input_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}, {1, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_relu6_weight_as_input_relaxed) {
- execute(conv_float::CreateModel_relu6_weight_as_input_relaxed,
- conv_float::is_ignored_relu6_weight_as_input_relaxed,
- conv_float::examples_relu6_weight_as_input_relaxed);
-}
-
-void CreateModel_relu6_weight_as_input_quant8(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type4);
- auto op2 = model->addOperand(&type5);
- auto op3 = model->addOperand(&type6);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type7);
- // Phase 2, operations
- static int32_t op3_init[] = {-1600};
- model->setOperandValue(op3, op3_init, sizeof(int32_t) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {3};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {0};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1, op2},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_relu6_weight_as_input_quant8(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu6_weight_as_input_quant8 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 132, 134, 136, 138, 140, 142, 144}}, {1, {132, 136, 140, 144, 148, 152, 156, 160, 160, 156, 152, 148, 144, 140, 136, 132}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {103, 103, 103, 103, 103, 103, 103, 103}}}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_relu6_weight_as_input_quant8) {
- execute(conv_float::CreateModel_relu6_weight_as_input_quant8,
- conv_float::is_ignored_relu6_weight_as_input_quant8,
- conv_float::examples_relu6_weight_as_input_quant8);
-}
-
-void CreateModel_nchw_relu(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op2_init[] = {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f, 8.0f, 6.0f, 4.0f, 2.0f, 7.0f, 5.0f, 3.0f, 1.0f};
- model->setOperandValue(op2, op2_init, sizeof(float) * 16);
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {1};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {1};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_nchw_relu(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 94.0f, 70.0f, 23.0f, 120.0f, 104.0f, 164.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_nchw_relu) {
- execute(conv_float::CreateModel_nchw_relu,
- conv_float::is_ignored_nchw_relu,
- conv_float::examples_nchw_relu);
-}
-
-void CreateModel_nchw_relu_relaxed(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op2_init[] = {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f, 8.0f, 6.0f, 4.0f, 2.0f, 7.0f, 5.0f, 3.0f, 1.0f};
- model->setOperandValue(op2, op2_init, sizeof(float) * 16);
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {1};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {1};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1},
- {op4});
- // Phase 4: set relaxed execution
- model->relaxComputationFloat32toFloat16(true);
- assert(model->isValid());
-}
-
-bool is_ignored_nchw_relu_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 94.0f, 70.0f, 23.0f, 120.0f, 104.0f, 164.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_nchw_relu_relaxed) {
- execute(conv_float::CreateModel_nchw_relu_relaxed,
- conv_float::is_ignored_nchw_relu_relaxed,
- conv_float::examples_nchw_relu_relaxed);
-}
-
-void CreateModel_nchw_relu_quant8(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type4);
- auto op2 = model->addOperand(&type5);
- auto op3 = model->addOperand(&type6);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type7);
- // Phase 2, operations
- static uint8_t op2_init[] = {132, 140, 148, 156, 136, 144, 152, 160, 160, 152, 144, 136, 156, 148, 140, 132};
- model->setOperandValue(op2, op2_init, sizeof(uint8_t) * 16);
- static int32_t op3_init[] = {-1600};
- model->setOperandValue(op3, op3_init, sizeof(int32_t) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {1};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {1};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_nchw_relu_quant8(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu_quant8 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 134, 138, 142, 132, 136, 140, 144}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {202, 147, 135, 112, 160, 152, 182, 156}}}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_nchw_relu_quant8) {
- execute(conv_float::CreateModel_nchw_relu_quant8,
- conv_float::is_ignored_nchw_relu_quant8,
- conv_float::examples_nchw_relu_quant8);
-}
-
-void CreateModel_nchw_relu_weight_as_input(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {1};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {1};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1, op2},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_nchw_relu_weight_as_input(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu_weight_as_input = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}, {1, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f, 8.0f, 6.0f, 4.0f, 2.0f, 7.0f, 5.0f, 3.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 94.0f, 70.0f, 23.0f, 120.0f, 104.0f, 164.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_nchw_relu_weight_as_input) {
- execute(conv_float::CreateModel_nchw_relu_weight_as_input,
- conv_float::is_ignored_nchw_relu_weight_as_input,
- conv_float::examples_nchw_relu_weight_as_input);
-}
-
-void CreateModel_nchw_relu_weight_as_input_relaxed(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {1};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {1};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1, op2},
- {op4});
- // Phase 4: set relaxed execution
- model->relaxComputationFloat32toFloat16(true);
- assert(model->isValid());
-}
-
-bool is_ignored_nchw_relu_weight_as_input_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu_weight_as_input_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}, {1, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f, 8.0f, 6.0f, 4.0f, 2.0f, 7.0f, 5.0f, 3.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 94.0f, 70.0f, 23.0f, 120.0f, 104.0f, 164.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_nchw_relu_weight_as_input_relaxed) {
- execute(conv_float::CreateModel_nchw_relu_weight_as_input_relaxed,
- conv_float::is_ignored_nchw_relu_weight_as_input_relaxed,
- conv_float::examples_nchw_relu_weight_as_input_relaxed);
-}
-
-void CreateModel_nchw_relu_weight_as_input_quant8(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type4);
- auto op2 = model->addOperand(&type5);
- auto op3 = model->addOperand(&type6);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type7);
- // Phase 2, operations
- static int32_t op3_init[] = {-1600};
- model->setOperandValue(op3, op3_init, sizeof(int32_t) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {1};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {1};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1, op2},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_nchw_relu_weight_as_input_quant8(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu_weight_as_input_quant8 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 134, 138, 142, 132, 136, 140, 144}}, {1, {132, 140, 148, 156, 136, 144, 152, 160, 160, 152, 144, 136, 156, 148, 140, 132}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {202, 147, 135, 112, 160, 152, 182, 156}}}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_nchw_relu_weight_as_input_quant8) {
- execute(conv_float::CreateModel_nchw_relu_weight_as_input_quant8,
- conv_float::is_ignored_nchw_relu_weight_as_input_quant8,
- conv_float::examples_nchw_relu_weight_as_input_quant8);
-}
-
-void CreateModel_nchw_relu6(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op2_init[] = {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f, 8.0f, 6.0f, 4.0f, 2.0f, 7.0f, 5.0f, 3.0f, 1.0f};
- model->setOperandValue(op2, op2_init, sizeof(float) * 16);
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {3};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {1};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_nchw_relu6(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu6 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_nchw_relu6) {
- execute(conv_float::CreateModel_nchw_relu6,
- conv_float::is_ignored_nchw_relu6,
- conv_float::examples_nchw_relu6);
-}
-
-void CreateModel_nchw_relu6_relaxed(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op2_init[] = {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f, 8.0f, 6.0f, 4.0f, 2.0f, 7.0f, 5.0f, 3.0f, 1.0f};
- model->setOperandValue(op2, op2_init, sizeof(float) * 16);
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {3};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {1};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1},
- {op4});
- // Phase 4: set relaxed execution
- model->relaxComputationFloat32toFloat16(true);
- assert(model->isValid());
-}
-
-bool is_ignored_nchw_relu6_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu6_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_nchw_relu6_relaxed) {
- execute(conv_float::CreateModel_nchw_relu6_relaxed,
- conv_float::is_ignored_nchw_relu6_relaxed,
- conv_float::examples_nchw_relu6_relaxed);
-}
-
-void CreateModel_nchw_relu6_quant8(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type4);
- auto op2 = model->addOperand(&type5);
- auto op3 = model->addOperand(&type6);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type7);
- // Phase 2, operations
- static uint8_t op2_init[] = {132, 140, 148, 156, 136, 144, 152, 160, 160, 152, 144, 136, 156, 148, 140, 132};
- model->setOperandValue(op2, op2_init, sizeof(uint8_t) * 16);
- static int32_t op3_init[] = {-1600};
- model->setOperandValue(op3, op3_init, sizeof(int32_t) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {3};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {1};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_nchw_relu6_quant8(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu6_quant8 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 134, 138, 142, 132, 136, 140, 144}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {103, 103, 103, 103, 103, 103, 103, 103}}}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_nchw_relu6_quant8) {
- execute(conv_float::CreateModel_nchw_relu6_quant8,
- conv_float::is_ignored_nchw_relu6_quant8,
- conv_float::examples_nchw_relu6_quant8);
-}
-
-void CreateModel_nchw_relu6_weight_as_input(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {3};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {1};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1, op2},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_nchw_relu6_weight_as_input(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu6_weight_as_input = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}, {1, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f, 8.0f, 6.0f, 4.0f, 2.0f, 7.0f, 5.0f, 3.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_nchw_relu6_weight_as_input) {
- execute(conv_float::CreateModel_nchw_relu6_weight_as_input,
- conv_float::is_ignored_nchw_relu6_weight_as_input,
- conv_float::examples_nchw_relu6_weight_as_input);
-}
-
-void CreateModel_nchw_relu6_weight_as_input_relaxed(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {3};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {1};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1, op2},
- {op4});
- // Phase 4: set relaxed execution
- model->relaxComputationFloat32toFloat16(true);
- assert(model->isValid());
-}
-
-bool is_ignored_nchw_relu6_weight_as_input_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu6_weight_as_input_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}, {1, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f, 8.0f, 6.0f, 4.0f, 2.0f, 7.0f, 5.0f, 3.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_nchw_relu6_weight_as_input_relaxed) {
- execute(conv_float::CreateModel_nchw_relu6_weight_as_input_relaxed,
- conv_float::is_ignored_nchw_relu6_weight_as_input_relaxed,
- conv_float::examples_nchw_relu6_weight_as_input_relaxed);
-}
-
-void CreateModel_nchw_relu6_weight_as_input_quant8(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type4);
- auto op2 = model->addOperand(&type5);
- auto op3 = model->addOperand(&type6);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type7);
- // Phase 2, operations
- static int32_t op3_init[] = {-1600};
- model->setOperandValue(op3, op3_init, sizeof(int32_t) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {3};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {1};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1, op2},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_nchw_relu6_weight_as_input_quant8(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu6_weight_as_input_quant8 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 134, 138, 142, 132, 136, 140, 144}}, {1, {132, 140, 148, 156, 136, 144, 152, 160, 160, 152, 144, 136, 156, 148, 140, 132}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {103, 103, 103, 103, 103, 103, 103, 103}}}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_nchw_relu6_weight_as_input_quant8) {
- execute(conv_float::CreateModel_nchw_relu6_weight_as_input_quant8,
- conv_float::is_ignored_nchw_relu6_weight_as_input_quant8,
- conv_float::examples_nchw_relu6_weight_as_input_quant8);
-}
-
-#include "../generated/tests/conv_float.mod.py.cpp"
+++ /dev/null
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-model = Model()
-
-i0 = Input("i0", ("TENSOR_FLOAT32", [2])) # input 0
-i1 = Input("i1", ("TENSOR_FLOAT32", [2])) # input 0
-i2 = Input("i2", ("TENSOR_FLOAT32", [2])) # input 0
-i3 = Input("i3", ("TENSOR_FLOAT32", [2])) # input 0
-i4 = Input("i4", ("TENSOR_FLOAT32", [2])) # input 0
-i5 = Input("i5", ("TENSOR_FLOAT32", [2])) # input 0
-i6 = Input("i6", ("TENSOR_FLOAT32", [2])) # input 0
-i7 = Input("i7", ("TENSOR_FLOAT32", [2])) # input 0
-i8 = Input("i8", ("TENSOR_FLOAT32", [2])) # input 0
-
-t0 = Internal("t0", ("TENSOR_FLOAT32", [2]))
-t1 = Internal("t1", ("TENSOR_FLOAT32", [2]))
-t2 = Internal("t2", ("TENSOR_FLOAT32", [2]))
-t3 = Internal("t3", ("TENSOR_FLOAT32", [2]))
-t4 = Internal("t4", ("TENSOR_FLOAT32", [2]))
-t5 = Internal("t5", ("TENSOR_FLOAT32", [2]))
-t6 = Internal("t6", ("TENSOR_FLOAT32", [2]))
-
-o0 = Output("o0", ("TENSOR_FLOAT32", [2]))
-o1 = Output("o1", ("TENSOR_FLOAT32", [2]))
-o2 = Output("o2", ("TENSOR_FLOAT32", [2]))
-
-p0 = Parameter("p0", ("TENSOR_FLOAT32", [2]), [0.0, 1.0])
-act = Int32Scalar("act", 0)
-
-model.Operation("ADD", o0, o1, act).To(o2)
-model.Operation("ADD", p0, t5, act).To(t6)
-model.Operation("ADD", i2, t0, act).To(t1)
-model.Operation("ADD", i6, p0, act).To(t5)
-model.Operation("ADD", i0, i1, act).To(t0)
-model.Operation("ADD", t1, t3, act).To(t4)
-model.Operation("ADD", t2, i5, act).To(t3)
-model.Operation("ADD", t4, t6, act).To(o0)
-model.Operation("ADD", i3, i4, act).To(t2)
-model.Operation("ADD", i7, i8, act).To(o1)
-
-inputs = {
- i0: [0, 0],
- i1: [0, 0],
- i2: [0, 0],
- i3: [0, 0],
- i4: [0, 0],
- i5: [0, 0],
- i6: [0, 0],
- i7: [0, 0],
- i8: [0, 0]
-}
-
-outputs = {
- o0: [0, 2],
- o1: [0, 0],
- o2: [0, 2]
-}
-
-Example((inputs, outputs))
+++ /dev/null
-Output CTS model: -
-Output example:-
-Output CTS test: -
+++ /dev/null
-// clang-format off
-// Generated file (from: add_internal.mod.py). Do not edit
-// clang-format off
-// Generated file (from: add_internal.mod.py). Do not edit
-// clang-format off
-// Generated file (from: add_internal.mod.py). Do not edit
-#include "../../TestGenerated.h"
-
-namespace add_internal {
-// Generated add_internal test
-#include "-"
-// Generated model constructor
-#include "-"
-} // namespace add_internal
-
-void CreateModel(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {2});
- OperandType type1(Type::INT32, {});
- // Phase 1, operands
- auto o0 = model->addOperand(&type0);
- auto o1 = model->addOperand(&type0);
- auto act = model->addOperand(&type1);
- auto o2 = model->addOperand(&type0);
- auto p0 = model->addOperand(&type0);
- auto t5 = model->addOperand(&type0);
- auto t6 = model->addOperand(&type0);
- auto i2 = model->addOperand(&type0);
- auto t0 = model->addOperand(&type0);
- auto t1 = model->addOperand(&type0);
- auto i6 = model->addOperand(&type0);
- auto i0 = model->addOperand(&type0);
- auto i1 = model->addOperand(&type0);
- auto t3 = model->addOperand(&type0);
- auto t4 = model->addOperand(&type0);
- auto t2 = model->addOperand(&type0);
- auto i5 = model->addOperand(&type0);
- auto i3 = model->addOperand(&type0);
- auto i4 = model->addOperand(&type0);
- auto i7 = model->addOperand(&type0);
- auto i8 = model->addOperand(&type0);
- // Phase 2, operations
- static int32_t act_init[] = {0};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static float p0_init[] = {0.0f, 1.0f};
- model->setOperandValue(p0, p0_init, sizeof(float) * 2);
- model->addOperation(ANEURALNETWORKS_ADD, {i6, p0, act}, {t5});
- model->addOperation(ANEURALNETWORKS_ADD, {p0, t5, act}, {t6});
- model->addOperation(ANEURALNETWORKS_ADD, {i0, i1, act}, {t0});
- model->addOperation(ANEURALNETWORKS_ADD, {i2, t0, act}, {t1});
- model->addOperation(ANEURALNETWORKS_ADD, {i3, i4, act}, {t2});
- model->addOperation(ANEURALNETWORKS_ADD, {t2, i5, act}, {t3});
- model->addOperation(ANEURALNETWORKS_ADD, {t1, t3, act}, {t4});
- model->addOperation(ANEURALNETWORKS_ADD, {t4, t6, act}, {o0});
- model->addOperation(ANEURALNETWORKS_ADD, {i7, i8, act}, {o1});
- model->addOperation(ANEURALNETWORKS_ADD, {o0, o1, act}, {o2});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {i2, i6, i0, i1, i5, i3, i4, i7, i8},
- {o0, o1, o2});
- assert(model->isValid());
-}
-
-bool is_ignored(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {0.0f, 0.0f}}, {1, {0.0f, 0.0f}}, {2, {0.0f, 0.0f}}, {3, {0.0f, 0.0f}}, {4, {0.0f, 0.0f}}, {5, {0.0f, 0.0f}}, {6, {0.0f, 0.0f}}, {7, {0.0f, 0.0f}}, {8, {0.0f, 0.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {0.0f, 2.0f}}, {1, {0.0f, 0.0f}}, {2, {0.0f, 2.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, add_internal) {
- execute(add_internal::CreateModel,
- add_internal::is_ignored,
- add_internal::examples);
-}
-
-#include "../generated/tests/add_internal.mod.py.cpp"
+++ /dev/null
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-model = Model("model_name")
-i1 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 2}")
-f1 = Parameter("op2", "TENSOR_FLOAT32", "{2, 2, 2, 2}", [1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1])
-b1 = Parameter("op3", "TENSOR_FLOAT32", "{1}", [-200])
-output = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 2}")
-act = Int32Scalar("act", 0) # None activation
-layout = Int32Scalar("layout", 0) # NHWC
-pad = Int32Scalar("param", 1)
-stride0 = Int32Scalar("param1", 1)
-stride1 = Int32Scalar("param2", 1)
-
-model = model.Operation("CONV_2D", i1, f1, b1, pad, stride0, stride1, act, layout).To(output)
-
-# Example 1. Input in operand 0,
-input0 = {i1: # input 0
- [1, 2, 3, 4, 5, 6, 7, 8]}
-
-output0 = {output: # output 0
- [204, 120, 94, 104, 70, 164, 23, 112]}
-
-quant8 = DataTypeConverter(name="quantized").Identify({
- i1: ("TENSOR_QUANT8_ASYMM", 0.5, 128),
- f1: ("TENSOR_QUANT8_ASYMM", 0.25, 128),
- b1: ("TENSOR_INT32", 0.125, 0),
- output: ("TENSOR_QUANT8_ASYMM", 2, 100)
-})
-nchw = DataLayoutConverter("NCHW", name="nchw_layout").Identify([i1, f1, output], [layout])
-relu = ActivationConverter("relu", name="act").Identify([output], [act])
-relu6 = ActivationConverter("relu6").Identify([output], [act])
-weight_as_input = ParameterAsInputConverter(name="w_as_input").Identify([f1])
-relax = RelaxedModeConverter(True, name="float_relaxed")
-
-# Instantiate an example
-# Will produce cartesian product of
-# [nhwc, nchw_layout] * [act, relu6] * [w_as_param, w_as_input] * [float, float_relaxed, quantized]
-# 24 variations
-Example(
- (input0, output0), name="example_name"
-).AddVariations(
- nchw, defaultName="nhwc"
-).AddVariations(
- relu, relu6, includeDefault=False
-).AddVariations(
- weight_as_input, defaultName="w_as_param"
-).AddVariations(
- relax, quant8, defaultName="float"
-)
+++ /dev/null
-Output CTS model: -
-Output example:-
-Output CTS test: -
+++ /dev/null
-// clang-format off
-// Generated file (from: conv_float.mod.py). Do not edit
-// clang-format off
-// Generated file (from: conv_float.mod.py). Do not edit
-// clang-format off
-// Generated file (from: conv_float.mod.py). Do not edit
-#include "../../TestGenerated.h"
-
-namespace conv_float {
-// Generated conv_float test
-#include "-"
-// Generated model constructor
-#include "-"
-} // namespace conv_float
-
-void CreateModel_model_name_nhwc_act_w_as_param_float(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op2_init[] = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f};
- model->setOperandValue(op2, op2_init, sizeof(float) * 16);
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {1};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {0};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_model_name_nhwc_act_w_as_param_float(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_model_name_example_name_nhwc_act_w_as_param_float = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 120.0f, 94.0f, 104.0f, 70.0f, 164.0f, 23.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_model_name_example_name_nhwc_act_w_as_param_float) {
- execute(conv_float::CreateModel_model_name_nhwc_act_w_as_param_float,
- conv_float::is_ignored_model_name_nhwc_act_w_as_param_float,
- conv_float::examples_model_name_example_name_nhwc_act_w_as_param_float);
-}
-
-void CreateModel_model_name_nhwc_act_w_as_param_float_relaxed(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op2_init[] = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f};
- model->setOperandValue(op2, op2_init, sizeof(float) * 16);
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {1};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {0};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1},
- {op4});
- // Phase 4: set relaxed execution
- model->relaxComputationFloat32toFloat16(true);
- assert(model->isValid());
-}
-
-bool is_ignored_model_name_nhwc_act_w_as_param_float_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_model_name_example_name_nhwc_act_w_as_param_float_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 120.0f, 94.0f, 104.0f, 70.0f, 164.0f, 23.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_model_name_example_name_nhwc_act_w_as_param_float_relaxed) {
- execute(conv_float::CreateModel_model_name_nhwc_act_w_as_param_float_relaxed,
- conv_float::is_ignored_model_name_nhwc_act_w_as_param_float_relaxed,
- conv_float::examples_model_name_example_name_nhwc_act_w_as_param_float_relaxed);
-}
-
-void CreateModel_model_name_nhwc_act_w_as_param_quantized(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type4);
- auto op2 = model->addOperand(&type5);
- auto op3 = model->addOperand(&type6);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type7);
- // Phase 2, operations
- static uint8_t op2_init[] = {132, 136, 140, 144, 148, 152, 156, 160, 160, 156, 152, 148, 144, 140, 136, 132};
- model->setOperandValue(op2, op2_init, sizeof(uint8_t) * 16);
- static int32_t op3_init[] = {-1600};
- model->setOperandValue(op3, op3_init, sizeof(int32_t) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {1};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {0};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_model_name_nhwc_act_w_as_param_quantized(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_model_name_example_name_nhwc_act_w_as_param_quantized = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 132, 134, 136, 138, 140, 142, 144}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {202, 160, 147, 152, 135, 182, 112, 156}}}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_model_name_example_name_nhwc_act_w_as_param_quantized) {
- execute(conv_float::CreateModel_model_name_nhwc_act_w_as_param_quantized,
- conv_float::is_ignored_model_name_nhwc_act_w_as_param_quantized,
- conv_float::examples_model_name_example_name_nhwc_act_w_as_param_quantized);
-}
-
-void CreateModel_model_name_nhwc_act_w_as_input_float(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {1};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {0};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1, op2},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_model_name_nhwc_act_w_as_input_float(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_model_name_example_name_nhwc_act_w_as_input_float = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}, {1, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 120.0f, 94.0f, 104.0f, 70.0f, 164.0f, 23.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_model_name_example_name_nhwc_act_w_as_input_float) {
- execute(conv_float::CreateModel_model_name_nhwc_act_w_as_input_float,
- conv_float::is_ignored_model_name_nhwc_act_w_as_input_float,
- conv_float::examples_model_name_example_name_nhwc_act_w_as_input_float);
-}
-
-void CreateModel_model_name_nhwc_act_w_as_input_float_relaxed(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {1};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {0};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1, op2},
- {op4});
- // Phase 4: set relaxed execution
- model->relaxComputationFloat32toFloat16(true);
- assert(model->isValid());
-}
-
-bool is_ignored_model_name_nhwc_act_w_as_input_float_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_model_name_example_name_nhwc_act_w_as_input_float_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}, {1, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 120.0f, 94.0f, 104.0f, 70.0f, 164.0f, 23.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_model_name_example_name_nhwc_act_w_as_input_float_relaxed) {
- execute(conv_float::CreateModel_model_name_nhwc_act_w_as_input_float_relaxed,
- conv_float::is_ignored_model_name_nhwc_act_w_as_input_float_relaxed,
- conv_float::examples_model_name_example_name_nhwc_act_w_as_input_float_relaxed);
-}
-
-void CreateModel_model_name_nhwc_act_w_as_input_quantized(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type4);
- auto op2 = model->addOperand(&type5);
- auto op3 = model->addOperand(&type6);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type7);
- // Phase 2, operations
- static int32_t op3_init[] = {-1600};
- model->setOperandValue(op3, op3_init, sizeof(int32_t) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {1};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {0};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1, op2},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_model_name_nhwc_act_w_as_input_quantized(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_model_name_example_name_nhwc_act_w_as_input_quantized = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 132, 134, 136, 138, 140, 142, 144}}, {1, {132, 136, 140, 144, 148, 152, 156, 160, 160, 156, 152, 148, 144, 140, 136, 132}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {202, 160, 147, 152, 135, 182, 112, 156}}}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_model_name_example_name_nhwc_act_w_as_input_quantized) {
- execute(conv_float::CreateModel_model_name_nhwc_act_w_as_input_quantized,
- conv_float::is_ignored_model_name_nhwc_act_w_as_input_quantized,
- conv_float::examples_model_name_example_name_nhwc_act_w_as_input_quantized);
-}
-
-void CreateModel_model_name_nhwc_relu6_w_as_param_float(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op2_init[] = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f};
- model->setOperandValue(op2, op2_init, sizeof(float) * 16);
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {3};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {0};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_model_name_nhwc_relu6_w_as_param_float(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_model_name_example_name_nhwc_relu6_w_as_param_float = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_model_name_example_name_nhwc_relu6_w_as_param_float) {
- execute(conv_float::CreateModel_model_name_nhwc_relu6_w_as_param_float,
- conv_float::is_ignored_model_name_nhwc_relu6_w_as_param_float,
- conv_float::examples_model_name_example_name_nhwc_relu6_w_as_param_float);
-}
-
-void CreateModel_model_name_nhwc_relu6_w_as_param_float_relaxed(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op2_init[] = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f};
- model->setOperandValue(op2, op2_init, sizeof(float) * 16);
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {3};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {0};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1},
- {op4});
- // Phase 4: set relaxed execution
- model->relaxComputationFloat32toFloat16(true);
- assert(model->isValid());
-}
-
-bool is_ignored_model_name_nhwc_relu6_w_as_param_float_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_model_name_example_name_nhwc_relu6_w_as_param_float_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_model_name_example_name_nhwc_relu6_w_as_param_float_relaxed) {
- execute(conv_float::CreateModel_model_name_nhwc_relu6_w_as_param_float_relaxed,
- conv_float::is_ignored_model_name_nhwc_relu6_w_as_param_float_relaxed,
- conv_float::examples_model_name_example_name_nhwc_relu6_w_as_param_float_relaxed);
-}
-
-void CreateModel_model_name_nhwc_relu6_w_as_param_quantized(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type4);
- auto op2 = model->addOperand(&type5);
- auto op3 = model->addOperand(&type6);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type7);
- // Phase 2, operations
- static uint8_t op2_init[] = {132, 136, 140, 144, 148, 152, 156, 160, 160, 156, 152, 148, 144, 140, 136, 132};
- model->setOperandValue(op2, op2_init, sizeof(uint8_t) * 16);
- static int32_t op3_init[] = {-1600};
- model->setOperandValue(op3, op3_init, sizeof(int32_t) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {3};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {0};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_model_name_nhwc_relu6_w_as_param_quantized(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_model_name_example_name_nhwc_relu6_w_as_param_quantized = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 132, 134, 136, 138, 140, 142, 144}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {103, 103, 103, 103, 103, 103, 103, 103}}}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_model_name_example_name_nhwc_relu6_w_as_param_quantized) {
- execute(conv_float::CreateModel_model_name_nhwc_relu6_w_as_param_quantized,
- conv_float::is_ignored_model_name_nhwc_relu6_w_as_param_quantized,
- conv_float::examples_model_name_example_name_nhwc_relu6_w_as_param_quantized);
-}
-
-void CreateModel_model_name_nhwc_relu6_w_as_input_float(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {3};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {0};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1, op2},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_model_name_nhwc_relu6_w_as_input_float(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_model_name_example_name_nhwc_relu6_w_as_input_float = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}, {1, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_model_name_example_name_nhwc_relu6_w_as_input_float) {
- execute(conv_float::CreateModel_model_name_nhwc_relu6_w_as_input_float,
- conv_float::is_ignored_model_name_nhwc_relu6_w_as_input_float,
- conv_float::examples_model_name_example_name_nhwc_relu6_w_as_input_float);
-}
-
-void CreateModel_model_name_nhwc_relu6_w_as_input_float_relaxed(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {3};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {0};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1, op2},
- {op4});
- // Phase 4: set relaxed execution
- model->relaxComputationFloat32toFloat16(true);
- assert(model->isValid());
-}
-
-bool is_ignored_model_name_nhwc_relu6_w_as_input_float_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_model_name_example_name_nhwc_relu6_w_as_input_float_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}, {1, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_model_name_example_name_nhwc_relu6_w_as_input_float_relaxed) {
- execute(conv_float::CreateModel_model_name_nhwc_relu6_w_as_input_float_relaxed,
- conv_float::is_ignored_model_name_nhwc_relu6_w_as_input_float_relaxed,
- conv_float::examples_model_name_example_name_nhwc_relu6_w_as_input_float_relaxed);
-}
-
-void CreateModel_model_name_nhwc_relu6_w_as_input_quantized(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type4);
- auto op2 = model->addOperand(&type5);
- auto op3 = model->addOperand(&type6);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type7);
- // Phase 2, operations
- static int32_t op3_init[] = {-1600};
- model->setOperandValue(op3, op3_init, sizeof(int32_t) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {3};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {0};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1, op2},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_model_name_nhwc_relu6_w_as_input_quantized(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_model_name_example_name_nhwc_relu6_w_as_input_quantized = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 132, 134, 136, 138, 140, 142, 144}}, {1, {132, 136, 140, 144, 148, 152, 156, 160, 160, 156, 152, 148, 144, 140, 136, 132}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {103, 103, 103, 103, 103, 103, 103, 103}}}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_model_name_example_name_nhwc_relu6_w_as_input_quantized) {
- execute(conv_float::CreateModel_model_name_nhwc_relu6_w_as_input_quantized,
- conv_float::is_ignored_model_name_nhwc_relu6_w_as_input_quantized,
- conv_float::examples_model_name_example_name_nhwc_relu6_w_as_input_quantized);
-}
-
-void CreateModel_model_name_nchw_layout_act_w_as_param_float(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op2_init[] = {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f, 8.0f, 6.0f, 4.0f, 2.0f, 7.0f, 5.0f, 3.0f, 1.0f};
- model->setOperandValue(op2, op2_init, sizeof(float) * 16);
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {1};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {1};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_model_name_nchw_layout_act_w_as_param_float(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_model_name_example_name_nchw_layout_act_w_as_param_float = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 94.0f, 70.0f, 23.0f, 120.0f, 104.0f, 164.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_model_name_example_name_nchw_layout_act_w_as_param_float) {
- execute(conv_float::CreateModel_model_name_nchw_layout_act_w_as_param_float,
- conv_float::is_ignored_model_name_nchw_layout_act_w_as_param_float,
- conv_float::examples_model_name_example_name_nchw_layout_act_w_as_param_float);
-}
-
-void CreateModel_model_name_nchw_layout_act_w_as_param_float_relaxed(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op2_init[] = {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f, 8.0f, 6.0f, 4.0f, 2.0f, 7.0f, 5.0f, 3.0f, 1.0f};
- model->setOperandValue(op2, op2_init, sizeof(float) * 16);
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {1};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {1};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1},
- {op4});
- // Phase 4: set relaxed execution
- model->relaxComputationFloat32toFloat16(true);
- assert(model->isValid());
-}
-
-bool is_ignored_model_name_nchw_layout_act_w_as_param_float_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_model_name_example_name_nchw_layout_act_w_as_param_float_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 94.0f, 70.0f, 23.0f, 120.0f, 104.0f, 164.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_model_name_example_name_nchw_layout_act_w_as_param_float_relaxed) {
- execute(conv_float::CreateModel_model_name_nchw_layout_act_w_as_param_float_relaxed,
- conv_float::is_ignored_model_name_nchw_layout_act_w_as_param_float_relaxed,
- conv_float::examples_model_name_example_name_nchw_layout_act_w_as_param_float_relaxed);
-}
-
-void CreateModel_model_name_nchw_layout_act_w_as_param_quantized(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type4);
- auto op2 = model->addOperand(&type5);
- auto op3 = model->addOperand(&type6);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type7);
- // Phase 2, operations
- static uint8_t op2_init[] = {132, 140, 148, 156, 136, 144, 152, 160, 160, 152, 144, 136, 156, 148, 140, 132};
- model->setOperandValue(op2, op2_init, sizeof(uint8_t) * 16);
- static int32_t op3_init[] = {-1600};
- model->setOperandValue(op3, op3_init, sizeof(int32_t) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {1};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {1};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_model_name_nchw_layout_act_w_as_param_quantized(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_model_name_example_name_nchw_layout_act_w_as_param_quantized = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 134, 138, 142, 132, 136, 140, 144}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {202, 147, 135, 112, 160, 152, 182, 156}}}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_model_name_example_name_nchw_layout_act_w_as_param_quantized) {
- execute(conv_float::CreateModel_model_name_nchw_layout_act_w_as_param_quantized,
- conv_float::is_ignored_model_name_nchw_layout_act_w_as_param_quantized,
- conv_float::examples_model_name_example_name_nchw_layout_act_w_as_param_quantized);
-}
-
-void CreateModel_model_name_nchw_layout_act_w_as_input_float(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {1};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {1};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1, op2},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_model_name_nchw_layout_act_w_as_input_float(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_model_name_example_name_nchw_layout_act_w_as_input_float = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}, {1, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f, 8.0f, 6.0f, 4.0f, 2.0f, 7.0f, 5.0f, 3.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 94.0f, 70.0f, 23.0f, 120.0f, 104.0f, 164.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_model_name_example_name_nchw_layout_act_w_as_input_float) {
- execute(conv_float::CreateModel_model_name_nchw_layout_act_w_as_input_float,
- conv_float::is_ignored_model_name_nchw_layout_act_w_as_input_float,
- conv_float::examples_model_name_example_name_nchw_layout_act_w_as_input_float);
-}
-
-void CreateModel_model_name_nchw_layout_act_w_as_input_float_relaxed(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {1};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {1};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1, op2},
- {op4});
- // Phase 4: set relaxed execution
- model->relaxComputationFloat32toFloat16(true);
- assert(model->isValid());
-}
-
-bool is_ignored_model_name_nchw_layout_act_w_as_input_float_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_model_name_example_name_nchw_layout_act_w_as_input_float_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}, {1, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f, 8.0f, 6.0f, 4.0f, 2.0f, 7.0f, 5.0f, 3.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 94.0f, 70.0f, 23.0f, 120.0f, 104.0f, 164.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_model_name_example_name_nchw_layout_act_w_as_input_float_relaxed) {
- execute(conv_float::CreateModel_model_name_nchw_layout_act_w_as_input_float_relaxed,
- conv_float::is_ignored_model_name_nchw_layout_act_w_as_input_float_relaxed,
- conv_float::examples_model_name_example_name_nchw_layout_act_w_as_input_float_relaxed);
-}
-
-void CreateModel_model_name_nchw_layout_act_w_as_input_quantized(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type4);
- auto op2 = model->addOperand(&type5);
- auto op3 = model->addOperand(&type6);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type7);
- // Phase 2, operations
- static int32_t op3_init[] = {-1600};
- model->setOperandValue(op3, op3_init, sizeof(int32_t) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {1};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {1};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1, op2},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_model_name_nchw_layout_act_w_as_input_quantized(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_model_name_example_name_nchw_layout_act_w_as_input_quantized = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 134, 138, 142, 132, 136, 140, 144}}, {1, {132, 140, 148, 156, 136, 144, 152, 160, 160, 152, 144, 136, 156, 148, 140, 132}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {202, 147, 135, 112, 160, 152, 182, 156}}}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_model_name_example_name_nchw_layout_act_w_as_input_quantized) {
- execute(conv_float::CreateModel_model_name_nchw_layout_act_w_as_input_quantized,
- conv_float::is_ignored_model_name_nchw_layout_act_w_as_input_quantized,
- conv_float::examples_model_name_example_name_nchw_layout_act_w_as_input_quantized);
-}
-
-void CreateModel_model_name_nchw_layout_relu6_w_as_param_float(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op2_init[] = {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f, 8.0f, 6.0f, 4.0f, 2.0f, 7.0f, 5.0f, 3.0f, 1.0f};
- model->setOperandValue(op2, op2_init, sizeof(float) * 16);
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {3};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {1};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_model_name_nchw_layout_relu6_w_as_param_float(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_model_name_example_name_nchw_layout_relu6_w_as_param_float = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_model_name_example_name_nchw_layout_relu6_w_as_param_float) {
- execute(conv_float::CreateModel_model_name_nchw_layout_relu6_w_as_param_float,
- conv_float::is_ignored_model_name_nchw_layout_relu6_w_as_param_float,
- conv_float::examples_model_name_example_name_nchw_layout_relu6_w_as_param_float);
-}
-
-void CreateModel_model_name_nchw_layout_relu6_w_as_param_float_relaxed(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op2_init[] = {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f, 8.0f, 6.0f, 4.0f, 2.0f, 7.0f, 5.0f, 3.0f, 1.0f};
- model->setOperandValue(op2, op2_init, sizeof(float) * 16);
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {3};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {1};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1},
- {op4});
- // Phase 4: set relaxed execution
- model->relaxComputationFloat32toFloat16(true);
- assert(model->isValid());
-}
-
-bool is_ignored_model_name_nchw_layout_relu6_w_as_param_float_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_model_name_example_name_nchw_layout_relu6_w_as_param_float_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_model_name_example_name_nchw_layout_relu6_w_as_param_float_relaxed) {
- execute(conv_float::CreateModel_model_name_nchw_layout_relu6_w_as_param_float_relaxed,
- conv_float::is_ignored_model_name_nchw_layout_relu6_w_as_param_float_relaxed,
- conv_float::examples_model_name_example_name_nchw_layout_relu6_w_as_param_float_relaxed);
-}
-
-void CreateModel_model_name_nchw_layout_relu6_w_as_param_quantized(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type4);
- auto op2 = model->addOperand(&type5);
- auto op3 = model->addOperand(&type6);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type7);
- // Phase 2, operations
- static uint8_t op2_init[] = {132, 140, 148, 156, 136, 144, 152, 160, 160, 152, 144, 136, 156, 148, 140, 132};
- model->setOperandValue(op2, op2_init, sizeof(uint8_t) * 16);
- static int32_t op3_init[] = {-1600};
- model->setOperandValue(op3, op3_init, sizeof(int32_t) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {3};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {1};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_model_name_nchw_layout_relu6_w_as_param_quantized(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_model_name_example_name_nchw_layout_relu6_w_as_param_quantized = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 134, 138, 142, 132, 136, 140, 144}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {103, 103, 103, 103, 103, 103, 103, 103}}}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_model_name_example_name_nchw_layout_relu6_w_as_param_quantized) {
- execute(conv_float::CreateModel_model_name_nchw_layout_relu6_w_as_param_quantized,
- conv_float::is_ignored_model_name_nchw_layout_relu6_w_as_param_quantized,
- conv_float::examples_model_name_example_name_nchw_layout_relu6_w_as_param_quantized);
-}
-
-void CreateModel_model_name_nchw_layout_relu6_w_as_input_float(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {3};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {1};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1, op2},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_model_name_nchw_layout_relu6_w_as_input_float(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_model_name_example_name_nchw_layout_relu6_w_as_input_float = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}, {1, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f, 8.0f, 6.0f, 4.0f, 2.0f, 7.0f, 5.0f, 3.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_model_name_example_name_nchw_layout_relu6_w_as_input_float) {
- execute(conv_float::CreateModel_model_name_nchw_layout_relu6_w_as_input_float,
- conv_float::is_ignored_model_name_nchw_layout_relu6_w_as_input_float,
- conv_float::examples_model_name_example_name_nchw_layout_relu6_w_as_input_float);
-}
-
-void CreateModel_model_name_nchw_layout_relu6_w_as_input_float_relaxed(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {3};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {1};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1, op2},
- {op4});
- // Phase 4: set relaxed execution
- model->relaxComputationFloat32toFloat16(true);
- assert(model->isValid());
-}
-
-bool is_ignored_model_name_nchw_layout_relu6_w_as_input_float_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_model_name_example_name_nchw_layout_relu6_w_as_input_float_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}, {1, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f, 8.0f, 6.0f, 4.0f, 2.0f, 7.0f, 5.0f, 3.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_model_name_example_name_nchw_layout_relu6_w_as_input_float_relaxed) {
- execute(conv_float::CreateModel_model_name_nchw_layout_relu6_w_as_input_float_relaxed,
- conv_float::is_ignored_model_name_nchw_layout_relu6_w_as_input_float_relaxed,
- conv_float::examples_model_name_example_name_nchw_layout_relu6_w_as_input_float_relaxed);
-}
-
-void CreateModel_model_name_nchw_layout_relu6_w_as_input_quantized(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type4);
- auto op2 = model->addOperand(&type5);
- auto op3 = model->addOperand(&type6);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type7);
- // Phase 2, operations
- static int32_t op3_init[] = {-1600};
- model->setOperandValue(op3, op3_init, sizeof(int32_t) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {3};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {1};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1, op2},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_model_name_nchw_layout_relu6_w_as_input_quantized(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_model_name_example_name_nchw_layout_relu6_w_as_input_quantized = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 134, 138, 142, 132, 136, 140, 144}}, {1, {132, 140, 148, 156, 136, 144, 152, 160, 160, 152, 144, 136, 156, 148, 140, 132}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {103, 103, 103, 103, 103, 103, 103, 103}}}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_model_name_example_name_nchw_layout_relu6_w_as_input_quantized) {
- execute(conv_float::CreateModel_model_name_nchw_layout_relu6_w_as_input_quantized,
- conv_float::is_ignored_model_name_nchw_layout_relu6_w_as_input_quantized,
- conv_float::examples_model_name_example_name_nchw_layout_relu6_w_as_input_quantized);
-}
-
-#include "../generated/tests/conv_float.mod.py.cpp"
+++ /dev/null
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-model = Model()
-i1 = Input("op1", "TENSOR_FLOAT32", "{1, 3, 3, 1}")
-f1 = Parameter("op2", "TENSOR_FLOAT32", "{1, 2, 2, 1}", [.25, .25, .25, .25])
-b1 = Parameter("op3", "TENSOR_FLOAT32", "{1}", [0])
-pad0 = Int32Scalar("pad0", 0)
-act = Int32Scalar("act", 0)
-stride = Int32Scalar("stride", 1)
-output = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 1}")
-
-model = model.Operation("CONV_2D", i1, f1, b1, pad0, pad0, pad0, pad0, stride, stride, act).To(output)
-model = model.RelaxedExecution(True)
-
-# Example 1. Input in operand 0,
-input0 = {i1: # input 0
- [1.0, 1.0, 1.0, 1.0, 0.5, 1.0, 1.0, 1.0, 1.0]}
-
-output0 = {output: # output 0
- [.875, .875, .875, .875]}
-
-# Instantiate an example
-Example((input0, output0))
+++ /dev/null
-Output CTS model: -
-Output example:-
-Output CTS test: -
+++ /dev/null
-// clang-format off
-// Generated file (from: conv_quant8.mod.py). Do not edit
-// clang-format off
-// Generated file (from: conv_quant8.mod.py). Do not edit
-// clang-format off
-// Generated file (from: conv_quant8.mod.py). Do not edit
-#include "../../TestGenerated.h"
-
-namespace conv_quant8 {
-// Generated conv_quant8 test
-#include "-"
-// Generated model constructor
-#include "-"
-} // namespace conv_quant8
-
-void CreateModel(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 3, 3, 1});
- OperandType type1(Type::TENSOR_FLOAT32, {1, 2, 2, 1});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto pad0 = model->addOperand(&type3);
- auto stride = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto op4 = model->addOperand(&type1);
- // Phase 2, operations
- static float op2_init[] = {0.25f, 0.25f, 0.25f, 0.25f};
- model->setOperandValue(op2, op2_init, sizeof(float) * 4);
- static float op3_init[] = {0.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t pad0_init[] = {0};
- model->setOperandValue(pad0, pad0_init, sizeof(int32_t) * 1);
- static int32_t stride_init[] = {1};
- model->setOperandValue(stride, stride_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {0};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, act}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1},
- {op4});
- // Phase 4: set relaxed execution
- model->relaxComputationFloat32toFloat16(true);
- assert(model->isValid());
-}
-
-bool is_ignored(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 1.0f, 1.0f, 1.0f, 0.5f, 1.0f, 1.0f, 1.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {0.875f, 0.875f, 0.875f, 0.875f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_quant8) {
- execute(conv_quant8::CreateModel,
- conv_quant8::is_ignored,
- conv_quant8::examples);
-}
-
-#include "../generated/tests/conv_quant8.mod.py.cpp"
+++ /dev/null
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-model = Model()
-i1 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 2}")
-f1 = Parameter("op2", "TENSOR_FLOAT32", "{2, 2, 2, 2}", [1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1])
-b1 = Parameter("op3", "TENSOR_FLOAT32", "{1}", [-200])
-output = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 2}")
-act = Int32Scalar("act", 0) # None activation
-layout = Int32Scalar("layout", 0) # NHWC
-pad = Int32Scalar("param", 1)
-stride0 = Int32Scalar("param1", 1)
-stride1 = Int32Scalar("param2", 1)
-
-model = model.Operation("CONV_2D", i1, f1, b1, pad, stride0, stride1, act, layout).To(output)
-
-# Example 1. Input in operand 0,
-input0 = {i1: # input 0
- [1, 2, 3, 4, 5, 6, 7, 8]}
-
-output0 = {output: # output 0
- [204, 120, 94, 104, 70, 164, 23, 112]}
-
-quant8 = DataTypeConverter().Identify({
- i1: ("TENSOR_QUANT8_ASYMM", 0.5, 128),
- f1: ("TENSOR_QUANT8_ASYMM", 0.25, 128),
- b1: ("TENSOR_INT32", 0.125, 0),
- output: ("TENSOR_QUANT8_ASYMM", 2, 100)
-})
-
-# Instantiate an example
-Example((input0, output0)).AddNchw(i1, f1, output, layout).AddAllActivations(
- output, act).AddInput(f1).AddVariations(RelaxedModeConverter(True), quant8)
+++ /dev/null
-Output CTS model: -
-Output example:-
-Output CTS test: -
+++ /dev/null
-// clang-format off
-// Generated file (from: conv_float.mod.py). Do not edit
-// clang-format off
-// Generated file (from: conv_float.mod.py). Do not edit
-// clang-format off
-// Generated file (from: conv_float.mod.py). Do not edit
-#include "../../TestGenerated.h"
-
-namespace conv_float {
-// Generated conv_float test
-#include "-"
-// Generated model constructor
-#include "-"
-} // namespace conv_float
-
-void CreateModel_none(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op2_init[] = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f};
- model->setOperandValue(op2, op2_init, sizeof(float) * 16);
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {0};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {0};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_none(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_none = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 120.0f, 94.0f, 104.0f, 70.0f, 164.0f, 23.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_none) {
- execute(conv_float::CreateModel_none,
- conv_float::is_ignored_none,
- conv_float::examples_none);
-}
-
-void CreateModel_none_relaxed(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op2_init[] = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f};
- model->setOperandValue(op2, op2_init, sizeof(float) * 16);
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {0};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {0};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1},
- {op4});
- // Phase 4: set relaxed execution
- model->relaxComputationFloat32toFloat16(true);
- assert(model->isValid());
-}
-
-bool is_ignored_none_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_none_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 120.0f, 94.0f, 104.0f, 70.0f, 164.0f, 23.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_none_relaxed) {
- execute(conv_float::CreateModel_none_relaxed,
- conv_float::is_ignored_none_relaxed,
- conv_float::examples_none_relaxed);
-}
-
-void CreateModel_none_quant8(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type4);
- auto op2 = model->addOperand(&type5);
- auto op3 = model->addOperand(&type6);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type7);
- // Phase 2, operations
- static uint8_t op2_init[] = {132, 136, 140, 144, 148, 152, 156, 160, 160, 156, 152, 148, 144, 140, 136, 132};
- model->setOperandValue(op2, op2_init, sizeof(uint8_t) * 16);
- static int32_t op3_init[] = {-1600};
- model->setOperandValue(op3, op3_init, sizeof(int32_t) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {0};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {0};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_none_quant8(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_none_quant8 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 132, 134, 136, 138, 140, 142, 144}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {202, 160, 147, 152, 135, 182, 112, 156}}}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_none_quant8) {
- execute(conv_float::CreateModel_none_quant8,
- conv_float::is_ignored_none_quant8,
- conv_float::examples_none_quant8);
-}
-
-void CreateModel_none_weight_as_input(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {0};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {0};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1, op2},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_none_weight_as_input(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_none_weight_as_input = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}, {1, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 120.0f, 94.0f, 104.0f, 70.0f, 164.0f, 23.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_none_weight_as_input) {
- execute(conv_float::CreateModel_none_weight_as_input,
- conv_float::is_ignored_none_weight_as_input,
- conv_float::examples_none_weight_as_input);
-}
-
-void CreateModel_none_weight_as_input_relaxed(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {0};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {0};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1, op2},
- {op4});
- // Phase 4: set relaxed execution
- model->relaxComputationFloat32toFloat16(true);
- assert(model->isValid());
-}
-
-bool is_ignored_none_weight_as_input_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_none_weight_as_input_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}, {1, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 120.0f, 94.0f, 104.0f, 70.0f, 164.0f, 23.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_none_weight_as_input_relaxed) {
- execute(conv_float::CreateModel_none_weight_as_input_relaxed,
- conv_float::is_ignored_none_weight_as_input_relaxed,
- conv_float::examples_none_weight_as_input_relaxed);
-}
-
-void CreateModel_none_weight_as_input_quant8(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type4);
- auto op2 = model->addOperand(&type5);
- auto op3 = model->addOperand(&type6);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type7);
- // Phase 2, operations
- static int32_t op3_init[] = {-1600};
- model->setOperandValue(op3, op3_init, sizeof(int32_t) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {0};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {0};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1, op2},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_none_weight_as_input_quant8(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_none_weight_as_input_quant8 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 132, 134, 136, 138, 140, 142, 144}}, {1, {132, 136, 140, 144, 148, 152, 156, 160, 160, 156, 152, 148, 144, 140, 136, 132}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {202, 160, 147, 152, 135, 182, 112, 156}}}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_none_weight_as_input_quant8) {
- execute(conv_float::CreateModel_none_weight_as_input_quant8,
- conv_float::is_ignored_none_weight_as_input_quant8,
- conv_float::examples_none_weight_as_input_quant8);
-}
-
-void CreateModel_relu(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op2_init[] = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f};
- model->setOperandValue(op2, op2_init, sizeof(float) * 16);
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {1};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {0};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_relu(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 120.0f, 94.0f, 104.0f, 70.0f, 164.0f, 23.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_relu) {
- execute(conv_float::CreateModel_relu,
- conv_float::is_ignored_relu,
- conv_float::examples_relu);
-}
-
-void CreateModel_relu_relaxed(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op2_init[] = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f};
- model->setOperandValue(op2, op2_init, sizeof(float) * 16);
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {1};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {0};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1},
- {op4});
- // Phase 4: set relaxed execution
- model->relaxComputationFloat32toFloat16(true);
- assert(model->isValid());
-}
-
-bool is_ignored_relu_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 120.0f, 94.0f, 104.0f, 70.0f, 164.0f, 23.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_relu_relaxed) {
- execute(conv_float::CreateModel_relu_relaxed,
- conv_float::is_ignored_relu_relaxed,
- conv_float::examples_relu_relaxed);
-}
-
-void CreateModel_relu_quant8(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type4);
- auto op2 = model->addOperand(&type5);
- auto op3 = model->addOperand(&type6);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type7);
- // Phase 2, operations
- static uint8_t op2_init[] = {132, 136, 140, 144, 148, 152, 156, 160, 160, 156, 152, 148, 144, 140, 136, 132};
- model->setOperandValue(op2, op2_init, sizeof(uint8_t) * 16);
- static int32_t op3_init[] = {-1600};
- model->setOperandValue(op3, op3_init, sizeof(int32_t) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {1};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {0};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_relu_quant8(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu_quant8 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 132, 134, 136, 138, 140, 142, 144}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {202, 160, 147, 152, 135, 182, 112, 156}}}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_relu_quant8) {
- execute(conv_float::CreateModel_relu_quant8,
- conv_float::is_ignored_relu_quant8,
- conv_float::examples_relu_quant8);
-}
-
-void CreateModel_relu_weight_as_input(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {1};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {0};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1, op2},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_relu_weight_as_input(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu_weight_as_input = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}, {1, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 120.0f, 94.0f, 104.0f, 70.0f, 164.0f, 23.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_relu_weight_as_input) {
- execute(conv_float::CreateModel_relu_weight_as_input,
- conv_float::is_ignored_relu_weight_as_input,
- conv_float::examples_relu_weight_as_input);
-}
-
-void CreateModel_relu_weight_as_input_relaxed(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {1};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {0};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1, op2},
- {op4});
- // Phase 4: set relaxed execution
- model->relaxComputationFloat32toFloat16(true);
- assert(model->isValid());
-}
-
-bool is_ignored_relu_weight_as_input_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu_weight_as_input_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}, {1, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 120.0f, 94.0f, 104.0f, 70.0f, 164.0f, 23.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_relu_weight_as_input_relaxed) {
- execute(conv_float::CreateModel_relu_weight_as_input_relaxed,
- conv_float::is_ignored_relu_weight_as_input_relaxed,
- conv_float::examples_relu_weight_as_input_relaxed);
-}
-
-void CreateModel_relu_weight_as_input_quant8(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type4);
- auto op2 = model->addOperand(&type5);
- auto op3 = model->addOperand(&type6);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type7);
- // Phase 2, operations
- static int32_t op3_init[] = {-1600};
- model->setOperandValue(op3, op3_init, sizeof(int32_t) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {1};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {0};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1, op2},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_relu_weight_as_input_quant8(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu_weight_as_input_quant8 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 132, 134, 136, 138, 140, 142, 144}}, {1, {132, 136, 140, 144, 148, 152, 156, 160, 160, 156, 152, 148, 144, 140, 136, 132}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {202, 160, 147, 152, 135, 182, 112, 156}}}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_relu_weight_as_input_quant8) {
- execute(conv_float::CreateModel_relu_weight_as_input_quant8,
- conv_float::is_ignored_relu_weight_as_input_quant8,
- conv_float::examples_relu_weight_as_input_quant8);
-}
-
-void CreateModel_relu1(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op2_init[] = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f};
- model->setOperandValue(op2, op2_init, sizeof(float) * 16);
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {2};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {0};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_relu1(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu1 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_relu1) {
- execute(conv_float::CreateModel_relu1,
- conv_float::is_ignored_relu1,
- conv_float::examples_relu1);
-}
-
-void CreateModel_relu1_relaxed(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op2_init[] = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f};
- model->setOperandValue(op2, op2_init, sizeof(float) * 16);
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {2};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {0};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1},
- {op4});
- // Phase 4: set relaxed execution
- model->relaxComputationFloat32toFloat16(true);
- assert(model->isValid());
-}
-
-bool is_ignored_relu1_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu1_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_relu1_relaxed) {
- execute(conv_float::CreateModel_relu1_relaxed,
- conv_float::is_ignored_relu1_relaxed,
- conv_float::examples_relu1_relaxed);
-}
-
-void CreateModel_relu1_quant8(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type4);
- auto op2 = model->addOperand(&type5);
- auto op3 = model->addOperand(&type6);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type7);
- // Phase 2, operations
- static uint8_t op2_init[] = {132, 136, 140, 144, 148, 152, 156, 160, 160, 156, 152, 148, 144, 140, 136, 132};
- model->setOperandValue(op2, op2_init, sizeof(uint8_t) * 16);
- static int32_t op3_init[] = {-1600};
- model->setOperandValue(op3, op3_init, sizeof(int32_t) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {2};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {0};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_relu1_quant8(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu1_quant8 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 132, 134, 136, 138, 140, 142, 144}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {100, 100, 100, 100, 100, 100, 100, 100}}}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_relu1_quant8) {
- execute(conv_float::CreateModel_relu1_quant8,
- conv_float::is_ignored_relu1_quant8,
- conv_float::examples_relu1_quant8);
-}
-
-void CreateModel_relu1_weight_as_input(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {2};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {0};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1, op2},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_relu1_weight_as_input(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu1_weight_as_input = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}, {1, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_relu1_weight_as_input) {
- execute(conv_float::CreateModel_relu1_weight_as_input,
- conv_float::is_ignored_relu1_weight_as_input,
- conv_float::examples_relu1_weight_as_input);
-}
-
-void CreateModel_relu1_weight_as_input_relaxed(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {2};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {0};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1, op2},
- {op4});
- // Phase 4: set relaxed execution
- model->relaxComputationFloat32toFloat16(true);
- assert(model->isValid());
-}
-
-bool is_ignored_relu1_weight_as_input_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu1_weight_as_input_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}, {1, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_relu1_weight_as_input_relaxed) {
- execute(conv_float::CreateModel_relu1_weight_as_input_relaxed,
- conv_float::is_ignored_relu1_weight_as_input_relaxed,
- conv_float::examples_relu1_weight_as_input_relaxed);
-}
-
-void CreateModel_relu1_weight_as_input_quant8(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type4);
- auto op2 = model->addOperand(&type5);
- auto op3 = model->addOperand(&type6);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type7);
- // Phase 2, operations
- static int32_t op3_init[] = {-1600};
- model->setOperandValue(op3, op3_init, sizeof(int32_t) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {2};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {0};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1, op2},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_relu1_weight_as_input_quant8(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu1_weight_as_input_quant8 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 132, 134, 136, 138, 140, 142, 144}}, {1, {132, 136, 140, 144, 148, 152, 156, 160, 160, 156, 152, 148, 144, 140, 136, 132}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {100, 100, 100, 100, 100, 100, 100, 100}}}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_relu1_weight_as_input_quant8) {
- execute(conv_float::CreateModel_relu1_weight_as_input_quant8,
- conv_float::is_ignored_relu1_weight_as_input_quant8,
- conv_float::examples_relu1_weight_as_input_quant8);
-}
-
-void CreateModel_relu6(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op2_init[] = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f};
- model->setOperandValue(op2, op2_init, sizeof(float) * 16);
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {3};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {0};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_relu6(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu6 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_relu6) {
- execute(conv_float::CreateModel_relu6,
- conv_float::is_ignored_relu6,
- conv_float::examples_relu6);
-}
-
-void CreateModel_relu6_relaxed(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op2_init[] = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f};
- model->setOperandValue(op2, op2_init, sizeof(float) * 16);
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {3};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {0};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1},
- {op4});
- // Phase 4: set relaxed execution
- model->relaxComputationFloat32toFloat16(true);
- assert(model->isValid());
-}
-
-bool is_ignored_relu6_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu6_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_relu6_relaxed) {
- execute(conv_float::CreateModel_relu6_relaxed,
- conv_float::is_ignored_relu6_relaxed,
- conv_float::examples_relu6_relaxed);
-}
-
-void CreateModel_relu6_quant8(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type4);
- auto op2 = model->addOperand(&type5);
- auto op3 = model->addOperand(&type6);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type7);
- // Phase 2, operations
- static uint8_t op2_init[] = {132, 136, 140, 144, 148, 152, 156, 160, 160, 156, 152, 148, 144, 140, 136, 132};
- model->setOperandValue(op2, op2_init, sizeof(uint8_t) * 16);
- static int32_t op3_init[] = {-1600};
- model->setOperandValue(op3, op3_init, sizeof(int32_t) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {3};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {0};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_relu6_quant8(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu6_quant8 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 132, 134, 136, 138, 140, 142, 144}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {103, 103, 103, 103, 103, 103, 103, 103}}}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_relu6_quant8) {
- execute(conv_float::CreateModel_relu6_quant8,
- conv_float::is_ignored_relu6_quant8,
- conv_float::examples_relu6_quant8);
-}
-
-void CreateModel_relu6_weight_as_input(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {3};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {0};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1, op2},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_relu6_weight_as_input(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu6_weight_as_input = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}, {1, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_relu6_weight_as_input) {
- execute(conv_float::CreateModel_relu6_weight_as_input,
- conv_float::is_ignored_relu6_weight_as_input,
- conv_float::examples_relu6_weight_as_input);
-}
-
-void CreateModel_relu6_weight_as_input_relaxed(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {3};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {0};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1, op2},
- {op4});
- // Phase 4: set relaxed execution
- model->relaxComputationFloat32toFloat16(true);
- assert(model->isValid());
-}
-
-bool is_ignored_relu6_weight_as_input_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu6_weight_as_input_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}, {1, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_relu6_weight_as_input_relaxed) {
- execute(conv_float::CreateModel_relu6_weight_as_input_relaxed,
- conv_float::is_ignored_relu6_weight_as_input_relaxed,
- conv_float::examples_relu6_weight_as_input_relaxed);
-}
-
-void CreateModel_relu6_weight_as_input_quant8(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type4);
- auto op2 = model->addOperand(&type5);
- auto op3 = model->addOperand(&type6);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type7);
- // Phase 2, operations
- static int32_t op3_init[] = {-1600};
- model->setOperandValue(op3, op3_init, sizeof(int32_t) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {3};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {0};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1, op2},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_relu6_weight_as_input_quant8(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu6_weight_as_input_quant8 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 132, 134, 136, 138, 140, 142, 144}}, {1, {132, 136, 140, 144, 148, 152, 156, 160, 160, 156, 152, 148, 144, 140, 136, 132}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {103, 103, 103, 103, 103, 103, 103, 103}}}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_relu6_weight_as_input_quant8) {
- execute(conv_float::CreateModel_relu6_weight_as_input_quant8,
- conv_float::is_ignored_relu6_weight_as_input_quant8,
- conv_float::examples_relu6_weight_as_input_quant8);
-}
-
-void CreateModel_nchw_none(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op2_init[] = {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f, 8.0f, 6.0f, 4.0f, 2.0f, 7.0f, 5.0f, 3.0f, 1.0f};
- model->setOperandValue(op2, op2_init, sizeof(float) * 16);
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {0};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {1};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_nchw_none(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_none = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 94.0f, 70.0f, 23.0f, 120.0f, 104.0f, 164.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_nchw_none) {
- execute(conv_float::CreateModel_nchw_none,
- conv_float::is_ignored_nchw_none,
- conv_float::examples_nchw_none);
-}
-
-void CreateModel_nchw_none_relaxed(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op2_init[] = {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f, 8.0f, 6.0f, 4.0f, 2.0f, 7.0f, 5.0f, 3.0f, 1.0f};
- model->setOperandValue(op2, op2_init, sizeof(float) * 16);
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {0};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {1};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1},
- {op4});
- // Phase 4: set relaxed execution
- model->relaxComputationFloat32toFloat16(true);
- assert(model->isValid());
-}
-
-bool is_ignored_nchw_none_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_none_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 94.0f, 70.0f, 23.0f, 120.0f, 104.0f, 164.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_nchw_none_relaxed) {
- execute(conv_float::CreateModel_nchw_none_relaxed,
- conv_float::is_ignored_nchw_none_relaxed,
- conv_float::examples_nchw_none_relaxed);
-}
-
-void CreateModel_nchw_none_quant8(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type4);
- auto op2 = model->addOperand(&type5);
- auto op3 = model->addOperand(&type6);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type7);
- // Phase 2, operations
- static uint8_t op2_init[] = {132, 140, 148, 156, 136, 144, 152, 160, 160, 152, 144, 136, 156, 148, 140, 132};
- model->setOperandValue(op2, op2_init, sizeof(uint8_t) * 16);
- static int32_t op3_init[] = {-1600};
- model->setOperandValue(op3, op3_init, sizeof(int32_t) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {0};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {1};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_nchw_none_quant8(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_none_quant8 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 134, 138, 142, 132, 136, 140, 144}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {202, 147, 135, 112, 160, 152, 182, 156}}}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_nchw_none_quant8) {
- execute(conv_float::CreateModel_nchw_none_quant8,
- conv_float::is_ignored_nchw_none_quant8,
- conv_float::examples_nchw_none_quant8);
-}
-
-void CreateModel_nchw_none_weight_as_input(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {0};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {1};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1, op2},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_nchw_none_weight_as_input(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_none_weight_as_input = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}, {1, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f, 8.0f, 6.0f, 4.0f, 2.0f, 7.0f, 5.0f, 3.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 94.0f, 70.0f, 23.0f, 120.0f, 104.0f, 164.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_nchw_none_weight_as_input) {
- execute(conv_float::CreateModel_nchw_none_weight_as_input,
- conv_float::is_ignored_nchw_none_weight_as_input,
- conv_float::examples_nchw_none_weight_as_input);
-}
-
-void CreateModel_nchw_none_weight_as_input_relaxed(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {0};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {1};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1, op2},
- {op4});
- // Phase 4: set relaxed execution
- model->relaxComputationFloat32toFloat16(true);
- assert(model->isValid());
-}
-
-bool is_ignored_nchw_none_weight_as_input_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_none_weight_as_input_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}, {1, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f, 8.0f, 6.0f, 4.0f, 2.0f, 7.0f, 5.0f, 3.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 94.0f, 70.0f, 23.0f, 120.0f, 104.0f, 164.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_nchw_none_weight_as_input_relaxed) {
- execute(conv_float::CreateModel_nchw_none_weight_as_input_relaxed,
- conv_float::is_ignored_nchw_none_weight_as_input_relaxed,
- conv_float::examples_nchw_none_weight_as_input_relaxed);
-}
-
-void CreateModel_nchw_none_weight_as_input_quant8(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type4);
- auto op2 = model->addOperand(&type5);
- auto op3 = model->addOperand(&type6);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type7);
- // Phase 2, operations
- static int32_t op3_init[] = {-1600};
- model->setOperandValue(op3, op3_init, sizeof(int32_t) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {0};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {1};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1, op2},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_nchw_none_weight_as_input_quant8(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_none_weight_as_input_quant8 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 134, 138, 142, 132, 136, 140, 144}}, {1, {132, 140, 148, 156, 136, 144, 152, 160, 160, 152, 144, 136, 156, 148, 140, 132}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {202, 147, 135, 112, 160, 152, 182, 156}}}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_nchw_none_weight_as_input_quant8) {
- execute(conv_float::CreateModel_nchw_none_weight_as_input_quant8,
- conv_float::is_ignored_nchw_none_weight_as_input_quant8,
- conv_float::examples_nchw_none_weight_as_input_quant8);
-}
-
-void CreateModel_nchw_relu(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op2_init[] = {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f, 8.0f, 6.0f, 4.0f, 2.0f, 7.0f, 5.0f, 3.0f, 1.0f};
- model->setOperandValue(op2, op2_init, sizeof(float) * 16);
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {1};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {1};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_nchw_relu(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 94.0f, 70.0f, 23.0f, 120.0f, 104.0f, 164.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_nchw_relu) {
- execute(conv_float::CreateModel_nchw_relu,
- conv_float::is_ignored_nchw_relu,
- conv_float::examples_nchw_relu);
-}
-
-void CreateModel_nchw_relu_relaxed(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op2_init[] = {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f, 8.0f, 6.0f, 4.0f, 2.0f, 7.0f, 5.0f, 3.0f, 1.0f};
- model->setOperandValue(op2, op2_init, sizeof(float) * 16);
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {1};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {1};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1},
- {op4});
- // Phase 4: set relaxed execution
- model->relaxComputationFloat32toFloat16(true);
- assert(model->isValid());
-}
-
-bool is_ignored_nchw_relu_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 94.0f, 70.0f, 23.0f, 120.0f, 104.0f, 164.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_nchw_relu_relaxed) {
- execute(conv_float::CreateModel_nchw_relu_relaxed,
- conv_float::is_ignored_nchw_relu_relaxed,
- conv_float::examples_nchw_relu_relaxed);
-}
-
-void CreateModel_nchw_relu_quant8(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type4);
- auto op2 = model->addOperand(&type5);
- auto op3 = model->addOperand(&type6);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type7);
- // Phase 2, operations
- static uint8_t op2_init[] = {132, 140, 148, 156, 136, 144, 152, 160, 160, 152, 144, 136, 156, 148, 140, 132};
- model->setOperandValue(op2, op2_init, sizeof(uint8_t) * 16);
- static int32_t op3_init[] = {-1600};
- model->setOperandValue(op3, op3_init, sizeof(int32_t) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {1};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {1};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_nchw_relu_quant8(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu_quant8 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 134, 138, 142, 132, 136, 140, 144}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {202, 147, 135, 112, 160, 152, 182, 156}}}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_nchw_relu_quant8) {
- execute(conv_float::CreateModel_nchw_relu_quant8,
- conv_float::is_ignored_nchw_relu_quant8,
- conv_float::examples_nchw_relu_quant8);
-}
-
-void CreateModel_nchw_relu_weight_as_input(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {1};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {1};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1, op2},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_nchw_relu_weight_as_input(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu_weight_as_input = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}, {1, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f, 8.0f, 6.0f, 4.0f, 2.0f, 7.0f, 5.0f, 3.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 94.0f, 70.0f, 23.0f, 120.0f, 104.0f, 164.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_nchw_relu_weight_as_input) {
- execute(conv_float::CreateModel_nchw_relu_weight_as_input,
- conv_float::is_ignored_nchw_relu_weight_as_input,
- conv_float::examples_nchw_relu_weight_as_input);
-}
-
-void CreateModel_nchw_relu_weight_as_input_relaxed(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {1};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {1};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1, op2},
- {op4});
- // Phase 4: set relaxed execution
- model->relaxComputationFloat32toFloat16(true);
- assert(model->isValid());
-}
-
-bool is_ignored_nchw_relu_weight_as_input_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu_weight_as_input_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}, {1, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f, 8.0f, 6.0f, 4.0f, 2.0f, 7.0f, 5.0f, 3.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 94.0f, 70.0f, 23.0f, 120.0f, 104.0f, 164.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_nchw_relu_weight_as_input_relaxed) {
- execute(conv_float::CreateModel_nchw_relu_weight_as_input_relaxed,
- conv_float::is_ignored_nchw_relu_weight_as_input_relaxed,
- conv_float::examples_nchw_relu_weight_as_input_relaxed);
-}
-
-void CreateModel_nchw_relu_weight_as_input_quant8(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type4);
- auto op2 = model->addOperand(&type5);
- auto op3 = model->addOperand(&type6);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type7);
- // Phase 2, operations
- static int32_t op3_init[] = {-1600};
- model->setOperandValue(op3, op3_init, sizeof(int32_t) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {1};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {1};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1, op2},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_nchw_relu_weight_as_input_quant8(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu_weight_as_input_quant8 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 134, 138, 142, 132, 136, 140, 144}}, {1, {132, 140, 148, 156, 136, 144, 152, 160, 160, 152, 144, 136, 156, 148, 140, 132}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {202, 147, 135, 112, 160, 152, 182, 156}}}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_nchw_relu_weight_as_input_quant8) {
- execute(conv_float::CreateModel_nchw_relu_weight_as_input_quant8,
- conv_float::is_ignored_nchw_relu_weight_as_input_quant8,
- conv_float::examples_nchw_relu_weight_as_input_quant8);
-}
-
-void CreateModel_nchw_relu1(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op2_init[] = {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f, 8.0f, 6.0f, 4.0f, 2.0f, 7.0f, 5.0f, 3.0f, 1.0f};
- model->setOperandValue(op2, op2_init, sizeof(float) * 16);
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {2};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {1};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_nchw_relu1(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu1 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_nchw_relu1) {
- execute(conv_float::CreateModel_nchw_relu1,
- conv_float::is_ignored_nchw_relu1,
- conv_float::examples_nchw_relu1);
-}
-
-void CreateModel_nchw_relu1_relaxed(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op2_init[] = {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f, 8.0f, 6.0f, 4.0f, 2.0f, 7.0f, 5.0f, 3.0f, 1.0f};
- model->setOperandValue(op2, op2_init, sizeof(float) * 16);
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {2};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {1};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1},
- {op4});
- // Phase 4: set relaxed execution
- model->relaxComputationFloat32toFloat16(true);
- assert(model->isValid());
-}
-
-bool is_ignored_nchw_relu1_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu1_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_nchw_relu1_relaxed) {
- execute(conv_float::CreateModel_nchw_relu1_relaxed,
- conv_float::is_ignored_nchw_relu1_relaxed,
- conv_float::examples_nchw_relu1_relaxed);
-}
-
-void CreateModel_nchw_relu1_quant8(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type4);
- auto op2 = model->addOperand(&type5);
- auto op3 = model->addOperand(&type6);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type7);
- // Phase 2, operations
- static uint8_t op2_init[] = {132, 140, 148, 156, 136, 144, 152, 160, 160, 152, 144, 136, 156, 148, 140, 132};
- model->setOperandValue(op2, op2_init, sizeof(uint8_t) * 16);
- static int32_t op3_init[] = {-1600};
- model->setOperandValue(op3, op3_init, sizeof(int32_t) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {2};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {1};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_nchw_relu1_quant8(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu1_quant8 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 134, 138, 142, 132, 136, 140, 144}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {100, 100, 100, 100, 100, 100, 100, 100}}}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_nchw_relu1_quant8) {
- execute(conv_float::CreateModel_nchw_relu1_quant8,
- conv_float::is_ignored_nchw_relu1_quant8,
- conv_float::examples_nchw_relu1_quant8);
-}
-
-void CreateModel_nchw_relu1_weight_as_input(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {2};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {1};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1, op2},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_nchw_relu1_weight_as_input(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu1_weight_as_input = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}, {1, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f, 8.0f, 6.0f, 4.0f, 2.0f, 7.0f, 5.0f, 3.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_nchw_relu1_weight_as_input) {
- execute(conv_float::CreateModel_nchw_relu1_weight_as_input,
- conv_float::is_ignored_nchw_relu1_weight_as_input,
- conv_float::examples_nchw_relu1_weight_as_input);
-}
-
-void CreateModel_nchw_relu1_weight_as_input_relaxed(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {2};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {1};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1, op2},
- {op4});
- // Phase 4: set relaxed execution
- model->relaxComputationFloat32toFloat16(true);
- assert(model->isValid());
-}
-
-bool is_ignored_nchw_relu1_weight_as_input_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu1_weight_as_input_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}, {1, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f, 8.0f, 6.0f, 4.0f, 2.0f, 7.0f, 5.0f, 3.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_nchw_relu1_weight_as_input_relaxed) {
- execute(conv_float::CreateModel_nchw_relu1_weight_as_input_relaxed,
- conv_float::is_ignored_nchw_relu1_weight_as_input_relaxed,
- conv_float::examples_nchw_relu1_weight_as_input_relaxed);
-}
-
-void CreateModel_nchw_relu1_weight_as_input_quant8(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type4);
- auto op2 = model->addOperand(&type5);
- auto op3 = model->addOperand(&type6);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type7);
- // Phase 2, operations
- static int32_t op3_init[] = {-1600};
- model->setOperandValue(op3, op3_init, sizeof(int32_t) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {2};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {1};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1, op2},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_nchw_relu1_weight_as_input_quant8(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu1_weight_as_input_quant8 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 134, 138, 142, 132, 136, 140, 144}}, {1, {132, 140, 148, 156, 136, 144, 152, 160, 160, 152, 144, 136, 156, 148, 140, 132}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {100, 100, 100, 100, 100, 100, 100, 100}}}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_nchw_relu1_weight_as_input_quant8) {
- execute(conv_float::CreateModel_nchw_relu1_weight_as_input_quant8,
- conv_float::is_ignored_nchw_relu1_weight_as_input_quant8,
- conv_float::examples_nchw_relu1_weight_as_input_quant8);
-}
-
-void CreateModel_nchw_relu6(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op2_init[] = {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f, 8.0f, 6.0f, 4.0f, 2.0f, 7.0f, 5.0f, 3.0f, 1.0f};
- model->setOperandValue(op2, op2_init, sizeof(float) * 16);
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {3};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {1};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_nchw_relu6(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu6 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_nchw_relu6) {
- execute(conv_float::CreateModel_nchw_relu6,
- conv_float::is_ignored_nchw_relu6,
- conv_float::examples_nchw_relu6);
-}
-
-void CreateModel_nchw_relu6_relaxed(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op2_init[] = {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f, 8.0f, 6.0f, 4.0f, 2.0f, 7.0f, 5.0f, 3.0f, 1.0f};
- model->setOperandValue(op2, op2_init, sizeof(float) * 16);
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {3};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {1};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1},
- {op4});
- // Phase 4: set relaxed execution
- model->relaxComputationFloat32toFloat16(true);
- assert(model->isValid());
-}
-
-bool is_ignored_nchw_relu6_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu6_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_nchw_relu6_relaxed) {
- execute(conv_float::CreateModel_nchw_relu6_relaxed,
- conv_float::is_ignored_nchw_relu6_relaxed,
- conv_float::examples_nchw_relu6_relaxed);
-}
-
-void CreateModel_nchw_relu6_quant8(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type4);
- auto op2 = model->addOperand(&type5);
- auto op3 = model->addOperand(&type6);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type7);
- // Phase 2, operations
- static uint8_t op2_init[] = {132, 140, 148, 156, 136, 144, 152, 160, 160, 152, 144, 136, 156, 148, 140, 132};
- model->setOperandValue(op2, op2_init, sizeof(uint8_t) * 16);
- static int32_t op3_init[] = {-1600};
- model->setOperandValue(op3, op3_init, sizeof(int32_t) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {3};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {1};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_nchw_relu6_quant8(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu6_quant8 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 134, 138, 142, 132, 136, 140, 144}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {103, 103, 103, 103, 103, 103, 103, 103}}}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_nchw_relu6_quant8) {
- execute(conv_float::CreateModel_nchw_relu6_quant8,
- conv_float::is_ignored_nchw_relu6_quant8,
- conv_float::examples_nchw_relu6_quant8);
-}
-
-void CreateModel_nchw_relu6_weight_as_input(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {3};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {1};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1, op2},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_nchw_relu6_weight_as_input(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu6_weight_as_input = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}, {1, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f, 8.0f, 6.0f, 4.0f, 2.0f, 7.0f, 5.0f, 3.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_nchw_relu6_weight_as_input) {
- execute(conv_float::CreateModel_nchw_relu6_weight_as_input,
- conv_float::is_ignored_nchw_relu6_weight_as_input,
- conv_float::examples_nchw_relu6_weight_as_input);
-}
-
-void CreateModel_nchw_relu6_weight_as_input_relaxed(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type0);
- auto op2 = model->addOperand(&type1);
- auto op3 = model->addOperand(&type2);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type0);
- // Phase 2, operations
- static float op3_init[] = {-200.0f};
- model->setOperandValue(op3, op3_init, sizeof(float) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {3};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {1};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1, op2},
- {op4});
- // Phase 4: set relaxed execution
- model->relaxComputationFloat32toFloat16(true);
- assert(model->isValid());
-}
-
-bool is_ignored_nchw_relu6_weight_as_input_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu6_weight_as_input_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}, {1, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f, 8.0f, 6.0f, 4.0f, 2.0f, 7.0f, 5.0f, 3.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_nchw_relu6_weight_as_input_relaxed) {
- execute(conv_float::CreateModel_nchw_relu6_weight_as_input_relaxed,
- conv_float::is_ignored_nchw_relu6_weight_as_input_relaxed,
- conv_float::examples_nchw_relu6_weight_as_input_relaxed);
-}
-
-void CreateModel_nchw_relu6_weight_as_input_quant8(Model *model) {
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
- OperandType type1(Type::TENSOR_FLOAT32, {2, 2, 2, 2});
- OperandType type2(Type::TENSOR_FLOAT32, {1});
- OperandType type3(Type::INT32, {});
- OperandType type4(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 0.5f, 128);
- OperandType type5(Type::TENSOR_QUANT8_ASYMM, {2, 2, 2, 2}, 0.25f, 128);
- OperandType type6(Type::TENSOR_INT32, {1}, 0.125f, 0);
- OperandType type7(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 2}, 2.0f, 100);
- // Phase 1, operands
- auto op1 = model->addOperand(&type4);
- auto op2 = model->addOperand(&type5);
- auto op3 = model->addOperand(&type6);
- auto param = model->addOperand(&type3);
- auto param1 = model->addOperand(&type3);
- auto param2 = model->addOperand(&type3);
- auto act = model->addOperand(&type3);
- auto layout = model->addOperand(&type3);
- auto op4 = model->addOperand(&type7);
- // Phase 2, operations
- static int32_t op3_init[] = {-1600};
- model->setOperandValue(op3, op3_init, sizeof(int32_t) * 1);
- static int32_t param_init[] = {1};
- model->setOperandValue(param, param_init, sizeof(int32_t) * 1);
- static int32_t param1_init[] = {1};
- model->setOperandValue(param1, param1_init, sizeof(int32_t) * 1);
- static int32_t param2_init[] = {1};
- model->setOperandValue(param2, param2_init, sizeof(int32_t) * 1);
- static int32_t act_init[] = {3};
- model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- static int32_t layout_init[] = {1};
- model->setOperandValue(layout, layout_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, param, param1, param2, act, layout}, {op4});
- // Phase 3, inputs and outputs
- model->identifyInputsAndOutputs(
- {op1, op2},
- {op4});
- assert(model->isValid());
-}
-
-bool is_ignored_nchw_relu6_weight_as_input_quant8(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu6_weight_as_input_quant8 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 134, 138, 142, 132, 136, 140, 144}}, {1, {132, 140, 148, 156, 136, 144, 152, 160, 160, 152, 144, 136, 156, 148, 140, 132}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {103, 103, 103, 103, 103, 103, 103, 103}}}
-}
-}, // End of an example
-};
-
-TEST_F(GeneratedTests, conv_float_nchw_relu6_weight_as_input_quant8) {
- execute(conv_float::CreateModel_nchw_relu6_weight_as_input_quant8,
- conv_float::is_ignored_nchw_relu6_weight_as_input_quant8,
- conv_float::examples_nchw_relu6_weight_as_input_quant8);
-}
-
-#include "../generated/tests/conv_float.mod.py.cpp"
+++ /dev/null
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-model = Model()
-
-n_batch = 1
-n_input = 2
-# n_cell and n_output have the same size when there is no projection.
-n_cell = 4
-n_output = 4
-
-input = Input("input", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_input))
-
-input_to_input_weights = Input("input_to_input_weights", "TENSOR_FLOAT32", "{%d, %d}" % (n_cell, n_input))
-input_to_forget_weights = Input("input_to_forget_weights", "TENSOR_FLOAT32", "{%d, %d}" % (n_cell, n_input))
-input_to_cell_weights = Input("input_to_cell_weights", "TENSOR_FLOAT32", "{%d, %d}" % (n_cell, n_input))
-input_to_output_weights = Input("input_to_output_weights", "TENSOR_FLOAT32", "{%d, %d}" % (n_cell, n_input))
-
-recurrent_to_input_weights = Input("recurrent_to_intput_weights", "TENSOR_FLOAT32", "{%d, %d}" % (n_cell, n_output))
-recurrent_to_forget_weights = Input("recurrent_to_forget_weights", "TENSOR_FLOAT32", "{%d, %d}" % (n_cell, n_output))
-recurrent_to_cell_weights = Input("recurrent_to_cell_weights", "TENSOR_FLOAT32", "{%d, %d}" % (n_cell, n_output))
-recurrent_to_output_weights = Input("recurrent_to_output_weights", "TENSOR_FLOAT32", "{%d, %d}" % (n_cell, n_output))
-
-cell_to_input_weights = Input("cell_to_input_weights", "TENSOR_FLOAT32", "{0}")
-cell_to_forget_weights = Input("cell_to_forget_weights", "TENSOR_FLOAT32", "{0}")
-cell_to_output_weights = Input("cell_to_output_weights", "TENSOR_FLOAT32", "{0}")
-
-input_gate_bias = Input("input_gate_bias", "TENSOR_FLOAT32", "{%d}"%(n_cell))
-forget_gate_bias = Input("forget_gate_bias", "TENSOR_FLOAT32", "{%d}"%(n_cell))
-cell_gate_bias = Input("cell_gate_bias", "TENSOR_FLOAT32", "{%d}"%(n_cell))
-output_gate_bias = Input("output_gate_bias", "TENSOR_FLOAT32", "{%d}"%(n_cell))
-
-projection_weights = Input("projection_weights", "TENSOR_FLOAT32", "{0,0}")
-projection_bias = Input("projection_bias", "TENSOR_FLOAT32", "{0}")
-
-output_state_in = Input("output_state_in", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output))
-cell_state_in = Input("cell_state_in", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_cell))
-
-activation_param = Int32Scalar("activation_param", 4) # Tanh
-cell_clip_param = Float32Scalar("cell_clip_param", 0.)
-proj_clip_param = Float32Scalar("proj_clip_param", 0.)
-
-scratch_buffer = IgnoredOutput("scratch_buffer", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, (n_cell * 4)))
-output_state_out = Output("output_state_out", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output))
-cell_state_out = Output("cell_state_out", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_cell))
-output = Output("output", "TENSOR_FLOAT32", "{%d, %d}" % (n_batch, n_output))
-
-model = model.Operation("LSTM",
- input,
-
- input_to_input_weights,
- input_to_forget_weights,
- input_to_cell_weights,
- input_to_output_weights,
-
- recurrent_to_input_weights,
- recurrent_to_forget_weights,
- recurrent_to_cell_weights,
- recurrent_to_output_weights,
-
- cell_to_input_weights,
- cell_to_forget_weights,
- cell_to_output_weights,
-
- input_gate_bias,
- forget_gate_bias,
- cell_gate_bias,
- output_gate_bias,
-
- projection_weights,
- projection_bias,
-
- output_state_in,
- cell_state_in,
-
- activation_param,
- cell_clip_param,
- proj_clip_param
-).To([scratch_buffer, output_state_out, cell_state_out, output])
-model = model.RelaxedExecution(True)
-
-# Example 1. Input in operand 0,
-input0 = {input_to_input_weights: [-0.45018822, -0.02338299, -0.0870589, -0.34550029, 0.04266912, -0.15680569, -0.34856534, 0.43890524],
- input_to_forget_weights: [0.09701663, 0.20334584, -0.50592935, -0.31343272, -0.40032279, 0.44781327, 0.01387155, -0.35593212],
- input_to_cell_weights: [-0.50013041, 0.1370284, 0.11810488, 0.2013163, -0.20583314, 0.44344562, 0.22077113, -0.29909778],
- input_to_output_weights: [-0.25065863, -0.28290087, 0.04613829, 0.40525138, 0.44272184, 0.03897077, -0.1556896, 0.19487578],
-
- input_gate_bias: [0.,0.,0.,0.],
- forget_gate_bias: [1.,1.,1.,1.],
- cell_gate_bias: [0.,0.,0.,0.],
- output_gate_bias: [0.,0.,0.,0.],
-
- recurrent_to_input_weights: [
- -0.0063535, -0.2042388, 0.31454784, -0.35746509, 0.28902304, 0.08183324,
- -0.16555229, 0.02286911, -0.13566875, 0.03034258, 0.48091322,
- -0.12528998, 0.24077177, -0.51332325, -0.33502164, 0.10629296],
-
- recurrent_to_cell_weights: [
- -0.3407414, 0.24443203, -0.2078532, 0.26320225, 0.05695659, -0.00123841,
- -0.4744786, -0.35869038, -0.06418842, -0.13502428, -0.501764, 0.22830659,
- -0.46367589, 0.26016325, -0.03894562, -0.16368064],
-
- recurrent_to_forget_weights: [
- -0.48684245, -0.06655136, 0.42224967, 0.2112639, 0.27654213, 0.20864892,
- -0.07646349, 0.45877004, 0.00141793, -0.14609534, 0.36447752, 0.09196436,
- 0.28053468, 0.01560611, -0.20127171, -0.01140004],
-
- recurrent_to_output_weights: [
- 0.43385774, -0.17194885, 0.2718237, 0.09215671, 0.24107647, -0.39835793,
- 0.18212086, 0.01301402, 0.48572797, -0.50656658, 0.20047462, -0.20607421,
- -0.51818722, -0.15390486, 0.0468148, 0.39922136],
-
- cell_to_input_weights: [],
- cell_to_forget_weights: [],
- cell_to_output_weights: [],
-
- projection_weights: [],
- projection_bias: [],
-}
-
-test_input = [2., 3.]
-output_state = [0, 0, 0, 0]
-cell_state = [0, 0, 0, 0]
-golden_output = [-0.02973187, 0.1229473, 0.20885126, -0.15358765,]
-output0 = {
- scratch_buffer: [ 0 for x in range(n_batch * n_cell * 4) ],
- cell_state_out: [ -0.145439, 0.157475, 0.293663, -0.277353 ],
- output_state_out: [ -0.0297319, 0.122947, 0.208851, -0.153588 ],
- output: golden_output
-}
-input0[input] = test_input
-input0[output_state_in] = output_state
-input0[cell_state_in] = cell_state
-Example((input0, output0))
+++ /dev/null
-Output VTS model: -
-Output example:-
+++ /dev/null
-// clang-format off
-// Generated file (from: lstm_float.mod.py). Do not edit
-// clang-format off
-// Generated file (from: lstm_float.mod.py). Do not edit
-// Generated from: lstm_float.mod.py.
-namespace lstm_float {
-// Generated lstm_float test
-#include "-"
-// Generated model constructor
-#include "-"
-} // namespace lstm_float
-
-// Create the model
-Model createTestModel() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {4, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {4, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {4, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {4, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {4, 4},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {4, 4},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {4, 4},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {4, 4},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {0},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {0},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {0},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {4},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {4},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {4},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {4},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {0, 0},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {0},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 4},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 4},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 4},
- },
- {
- .type = OperandType::FLOAT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 4, .length = 4},
- },
- {
- .type = OperandType::FLOAT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 8, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 16},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 4},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 4},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 4},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::LSTM,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22},
- .outputs = {23, 24, 25, 26},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19};
- const std::vector<uint32_t> outputIndexes = {23, 24, 25, 26};
- std::vector<uint8_t> operandValues = {
- 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- .relaxComputationFloat32toFloat16 = true,
- };
-}
-
-bool is_ignored(int i) {
- static std::set<int> ignore = {0};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {2.0f, 3.0f}}, {1, {-0.45018822f, -0.02338299f, -0.0870589f, -0.34550029f, 0.04266912f, -0.15680569f, -0.34856534f, 0.43890524f}}, {2, {0.09701663f, 0.20334584f, -0.50592935f, -0.31343272f, -0.40032279f, 0.44781327f, 0.01387155f, -0.35593212f}}, {3, {-0.50013041f, 0.1370284f, 0.11810488f, 0.2013163f, -0.20583314f, 0.44344562f, 0.22077113f, -0.29909778f}}, {4, {-0.25065863f, -0.28290087f, 0.04613829f, 0.40525138f, 0.44272184f, 0.03897077f, -0.1556896f, 0.19487578f}}, {5, {-0.0063535f, -0.2042388f, 0.31454784f, -0.35746509f, 0.28902304f, 0.08183324f, -0.16555229f, 0.02286911f, -0.13566875f, 0.03034258f, 0.48091322f, -0.12528998f, 0.24077177f, -0.51332325f, -0.33502164f, 0.10629296f}}, {6, {-0.48684245f, -0.06655136f, 0.42224967f, 0.2112639f, 0.27654213f, 0.20864892f, -0.07646349f, 0.45877004f, 0.00141793f, -0.14609534f, 0.36447752f, 0.09196436f, 0.28053468f, 0.01560611f, -0.20127171f, -0.01140004f}}, {7, {-0.3407414f, 0.24443203f, -0.2078532f, 0.26320225f, 0.05695659f, -0.00123841f, -0.4744786f, -0.35869038f, -0.06418842f, -0.13502428f, -0.501764f, 0.22830659f, -0.46367589f, 0.26016325f, -0.03894562f, -0.16368064f}}, {8, {0.43385774f, -0.17194885f, 0.2718237f, 0.09215671f, 0.24107647f, -0.39835793f, 0.18212086f, 0.01301402f, 0.48572797f, -0.50656658f, 0.20047462f, -0.20607421f, -0.51818722f, -0.15390486f, 0.0468148f, 0.39922136f}}, {9, {}}, {10, {}}, {11, {}}, {12, {0.0f, 0.0f, 0.0f, 0.0f}}, {13, {1.0f, 1.0f, 1.0f, 1.0f}}, {14, {0.0f, 0.0f, 0.0f, 0.0f}}, {15, {0.0f, 0.0f, 0.0f, 0.0f}}, {16, {}}, {17, {}}, {18, {0.0f, 0.0f, 0.0f, 0.0f}}, {19, {0.0f, 0.0f, 0.0f, 0.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}}, {1, {-0.0297319f, 0.122947f, 0.208851f, -0.153588f}}, {2, {-0.145439f, 0.157475f, 0.293663f, -0.277353f}}, {3, {-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, lstm_float) {
- generated_tests::Execute(device,
- lstm_float::createTestModel,
- lstm_float::is_ignored,
- lstm_float::examples);
-}
-
+++ /dev/null
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-model = Model()
-i1 = Input("op1", "TENSOR_QUANT8_ASYMM", "{1, 2, 2, 2}, 0.5f, 0")
-f1 = Input("op2", "TENSOR_QUANT8_ASYMM", "{1, 2, 2, 2}, 0.5f, 0")
-b1 = Input("op3", "TENSOR_INT32", "{2}, 0.25f, 0")
-pad0 = Int32Scalar("pad0", 0)
-act = Int32Scalar("act", 0)
-stride = Int32Scalar("stride", 1)
-cm = Int32Scalar("channelMultiplier", 1)
-output = Output("op4", "TENSOR_QUANT8_ASYMM", "{1,1,1,2}, 1.f, 0")
-
-model = model.Operation("DEPTHWISE_CONV_2D",
- i1, f1, b1,
- pad0, pad0, pad0, pad0,
- stride, stride,
- cm, act).To(output)
-
-# Example 1. Input in operand 0,
-input0 = {i1: # input 0
- [4, 16, 4, 32, 4, 64, 4, 128],
- f1:
- [2, 4, 2, 0, 2, 2, 2, 0],
- b1:
- [0, 0]}
-# (i1 (depthconv) f1)
-output0 = {output: # output 0
- [8, 48]}
-
-# Instantiate an example
-Example((input0, output0))
+++ /dev/null
-Output VTS model: -
-Output example:-
+++ /dev/null
-// clang-format off
-// Generated file (from: depthwise_conv2d_quant8.mod.py). Do not edit
-// clang-format off
-// Generated file (from: depthwise_conv2d_quant8.mod.py). Do not edit
-// Generated from: depthwise_conv2d_quant8.mod.py.
-namespace depthwise_conv2d_quant8 {
-// Generated depthwise_conv2d_quant8 test
-#include "-"
-// Generated model constructor
-#include "-"
-} // namespace depthwise_conv2d_quant8
-
-// Create the model
-Model createTestModel() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.5f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.5f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_INT32,
- .dimensions = {2},
- .numberOfConsumers = 1,
- .scale = 0.25f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 4,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 2,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 4, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 8, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 12, .length = 4},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 1, 1, 2},
- .numberOfConsumers = 0,
- .scale = 1.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::DEPTHWISE_CONV_2D,
- .inputs = {0, 1, 2, 3, 3, 3, 3, 4, 4, 5, 6},
- .outputs = {7},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0, 1, 2};
- const std::vector<uint32_t> outputIndexes = {7};
- std::vector<uint8_t> operandValues = {
- 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {{2, {0, 0}}},
- // int -> QUANT8_ASYMM map
- {{0, {4, 16, 4, 32, 4, 64, 4, 128}}, {1, {2, 4, 2, 0, 2, 2, 2, 0}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {8, 48}}}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, depthwise_conv2d_quant8) {
- generated_tests::Execute(device,
- depthwise_conv2d_quant8::createTestModel,
- depthwise_conv2d_quant8::is_ignored,
- depthwise_conv2d_quant8::examples);
-}
-
+++ /dev/null
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-model = Model()
-i1 = Input("op1", "TENSOR_FLOAT32", "{1, 3, 3, 1}")
-f1 = Parameter("op2", "TENSOR_FLOAT32", "{1, 2, 2, 1}", [.25, .25, .25, .25])
-b1 = Parameter("op3", "TENSOR_FLOAT32", "{1}", [0])
-pad0 = Int32Scalar("pad0", 0)
-act = Int32Scalar("act", 0)
-stride = Int32Scalar("stride", 1)
-output = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 1}")
-
-model = model.Operation("CONV_2D", i1, f1, b1, pad0, pad0, pad0, pad0, stride, stride, act).To(output)
-model = model.RelaxedExecution(True)
-
-# Example 1. Input in operand 0,
-input0 = {i1: # input 0
- [1.0, 1.0, 1.0, 1.0, 0.5, 1.0, 1.0, 1.0, 1.0]}
-
-output0 = {output: # output 0
- [.875, .875, .875, .875]}
-
-# Instantiate an example
-Example((input0, output0))
+++ /dev/null
-Output VTS model: -
-Output example:-
+++ /dev/null
-// clang-format off
-// Generated file (from: conv_float.mod.py). Do not edit
-// clang-format off
-// Generated file (from: conv_float.mod.py). Do not edit
-// Generated from: conv_float.mod.py.
-namespace conv_float {
-// Generated conv_float test
-#include "-"
-// Generated model constructor
-#include "-"
-} // namespace conv_float
-
-// Create the model
-Model createTestModel() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 3, 3, 1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 16},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 4,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 2,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 24, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 28, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 1},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 3, 3, 3, 4, 4, 5},
- .outputs = {6},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0};
- const std::vector<uint32_t> outputIndexes = {6};
- std::vector<uint8_t> operandValues = {
- 0, 0, 128, 62, 0, 0, 128, 62, 0, 0, 128, 62, 0, 0, 128, 62, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- .relaxComputationFloat32toFloat16 = true,
- };
-}
-
-bool is_ignored(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 1.0f, 1.0f, 1.0f, 0.5f, 1.0f, 1.0f, 1.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {0.875f, 0.875f, 0.875f, 0.875f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float) {
- generated_tests::Execute(device,
- conv_float::createTestModel,
- conv_float::is_ignored,
- conv_float::examples);
-}
-
+++ /dev/null
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-i0 = Input("i0", ("TENSOR_FLOAT32", [2, 2])) # input 0
-
-o1 = Output("o1", ("TENSOR_FLOAT32", [1, 2])) # output for model1
-o2 = Output("o2", ("TENSOR_FLOAT32", [2, 1])) # output for model2
-o3 = Output("o3", ("TENSOR_FLOAT32", [1])) # output for model3
-
-model1 = Model().Operation("MEAN", i0, [0], 1).To(o1) # along axis 0, keep_dim=True
-model2 = Model().Operation("MEAN", i0, [1], 1).To(o2) # along axis 1, keep_dim=True
-model3 = Model().Operation("MEAN", i0, [0, 1], 0).To(o3) # along both axis, keep_dim=False
-
-inputs1 = {i0: [1, 2, 3, 4]}
-outputs11 = {o1: [4, 6]}
-outputs12 = {o2: [3, 7]}
-outputs13 = {o3: [10]}
-
-inputs2 = {i0: [-1, -2, -3, -4]}
-outputs21 = {o1: [-4, -6]}
-outputs22 = {o2: [-3, -7]}
-outputs23 = {o3: [-10]}
-
-Example((inputs1, outputs11), model=model1)
-Example((inputs1, outputs12), model=model2)
-Example((inputs1, outputs13), model=model3)
-
-Example((inputs2, outputs21), model=model1)
-Example((inputs2, outputs22), model=model2)
-Example((inputs2, outputs23), model=model3)
+++ /dev/null
-Output VTS model: -
-Output example:-
+++ /dev/null
-// clang-format off
-// Generated file (from: mean_implicit.mod.py). Do not edit
-// clang-format off
-// Generated file (from: mean_implicit.mod.py). Do not edit
-// Generated from: mean_implicit.mod.py.
-namespace mean_implicit {
-// Generated mean_implicit test
-#include "-"
-// Generated model constructor
-#include "-"
-} // namespace mean_implicit
-
-// Create the model
-Model createTestModel() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2},
- .numberOfConsumers = 3,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_INT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 4, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::MEAN,
- .inputs = {0, 1, 2},
- .outputs = {3},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0};
- const std::vector<uint32_t> outputIndexes = {3};
- std::vector<uint8_t> operandValues = {
- 0, 0, 0, 0, 1, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {4.0f, 6.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {-1.0f, -2.0f, -3.0f, -4.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {-4.0f, -6.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, mean_implicit) {
- generated_tests::Execute(device,
- mean_implicit::createTestModel,
- mean_implicit::is_ignored,
- mean_implicit::examples);
-}
-
-// Create the model
-Model createTestModel_2() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2},
- .numberOfConsumers = 3,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_INT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 4, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 1},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::MEAN,
- .inputs = {0, 1, 2},
- .outputs = {3},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0};
- const std::vector<uint32_t> outputIndexes = {3};
- std::vector<uint8_t> operandValues = {
- 1, 0, 0, 0, 1, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_2(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_2 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {3.0f, 7.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {-1.0f, -2.0f, -3.0f, -4.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {-3.0f, -7.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, mean_implicit_2) {
- generated_tests::Execute(device,
- mean_implicit::createTestModel_2,
- mean_implicit::is_ignored_2,
- mean_implicit::examples_2);
-}
-
-// Create the model
-Model createTestModel_3() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2},
- .numberOfConsumers = 3,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_INT32,
- .dimensions = {2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 8},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 8, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::MEAN,
- .inputs = {0, 1, 2},
- .outputs = {3},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0};
- const std::vector<uint32_t> outputIndexes = {3};
- std::vector<uint8_t> operandValues = {
- 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_3(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_3 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {10.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {-1.0f, -2.0f, -3.0f, -4.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {-10.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, mean_implicit_3) {
- generated_tests::Execute(device,
- mean_implicit::createTestModel_3,
- mean_implicit::is_ignored_3,
- mean_implicit::examples_3);
-}
-
+++ /dev/null
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-model = Model()
-i1 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 2}")
-f1 = Parameter("op2", "TENSOR_FLOAT32", "{2, 2, 2, 2}", [1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1])
-b1 = Parameter("op3", "TENSOR_FLOAT32", "{1}", [-200])
-output = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 2}")
-act = Int32Scalar("act", 0) # None activation
-layout = Int32Scalar("layout", 0) # NHWC
-
-model = model.Operation("CONV_2D", i1, f1, b1, 1, 1, 1, act, layout).To(output)
-
-# Example 1. Input in operand 0,
-input0 = {i1: # input 0
- [1, 2, 3, 4, 5, 6, 7, 8]}
-
-output0 = {output: # output 0
- [204, 120, 94, 104, 70, 164, 23, 112]}
-
-quant8 = DataTypeConverter().Identify({
- i1: ("TENSOR_QUANT8_ASYMM", 0.5, 128),
- f1: ("TENSOR_QUANT8_ASYMM", 0.25, 128),
- b1: ("TENSOR_INT32", 0.125, 0),
- output: ("TENSOR_QUANT8_ASYMM", 2, 100)
-})
-
-# Instantiate an example
-Example(
- (input0, output0)
-).AddVariations(
- ("NCHW", [i1, f1, output], [layout])
-).AddVariations(
- ("relu", [output], [act]),
- ("relu6", [output], [act]),
- includeDefault=False
-).AddVariations(
- ("as_input", [f1])
-).AddVariations(
- "relaxed", quant8
-)
+++ /dev/null
-Output VTS model: -
-Output example:-
+++ /dev/null
-// clang-format off
-// Generated file (from: conv_float.mod.py). Do not edit
-// clang-format off
-// Generated file (from: conv_float.mod.py). Do not edit
-// Generated from: conv_float.mod.py.
-namespace conv_float {
-// Generated conv_float test
-#include "-"
-// Generated model constructor
-#include "-"
-} // namespace conv_float
-
-// Create the model
-Model createTestModel_relu() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 64},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 64, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 68, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 72, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 76, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 80, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 84, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 128, 63, 0, 0, 0, 64, 0, 0, 64, 64, 0, 0, 128, 64, 0, 0, 160, 64, 0, 0, 192, 64, 0, 0, 224, 64, 0, 0, 0, 65, 0, 0, 0, 65, 0, 0, 224, 64, 0, 0, 192, 64, 0, 0, 160, 64, 0, 0, 128, 64, 0, 0, 64, 64, 0, 0, 0, 64, 0, 0, 128, 63, 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_relu(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 120.0f, 94.0f, 104.0f, 70.0f, 164.0f, 23.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_relu) {
- generated_tests::Execute(device,
- conv_float::createTestModel_relu,
- conv_float::is_ignored_relu,
- conv_float::examples_relu);
-}
-
-// Create the model
-Model createTestModel_relu_relaxed() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 64},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 64, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 68, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 72, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 76, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 80, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 84, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 128, 63, 0, 0, 0, 64, 0, 0, 64, 64, 0, 0, 128, 64, 0, 0, 160, 64, 0, 0, 192, 64, 0, 0, 224, 64, 0, 0, 0, 65, 0, 0, 0, 65, 0, 0, 224, 64, 0, 0, 192, 64, 0, 0, 160, 64, 0, 0, 128, 64, 0, 0, 64, 64, 0, 0, 0, 64, 0, 0, 128, 63, 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- .relaxComputationFloat32toFloat16 = true,
- };
-}
-
-bool is_ignored_relu_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 120.0f, 94.0f, 104.0f, 70.0f, 164.0f, 23.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_relu_relaxed) {
- generated_tests::Execute(device,
- conv_float::createTestModel_relu_relaxed,
- conv_float::is_ignored_relu_relaxed,
- conv_float::examples_relu_relaxed);
-}
-
-// Create the model
-Model createTestModel_relu_quant8() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.5f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.25f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 16},
- },
- {
- .type = OperandType::TENSOR_INT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.125f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 24, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 28, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 32, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 36, .length = 4},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 2.0f,
- .zeroPoint = 100,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 132, 136, 140, 144, 148, 152, 156, 160, 160, 156, 152, 148, 144, 140, 136, 132, 192, 249, 255, 255, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_relu_quant8(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu_quant8 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 132, 134, 136, 138, 140, 142, 144}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {202, 160, 147, 152, 135, 182, 112, 156}}}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_relu_quant8) {
- generated_tests::Execute(device,
- conv_float::createTestModel_relu_quant8,
- conv_float::is_ignored_relu_quant8,
- conv_float::examples_relu_quant8);
-}
-
-// Create the model
-Model createTestModel_relu_weight_as_input() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 4, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 8, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 12, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0, 1};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_relu_weight_as_input(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu_weight_as_input = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}, {1, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 120.0f, 94.0f, 104.0f, 70.0f, 164.0f, 23.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_relu_weight_as_input) {
- generated_tests::Execute(device,
- conv_float::createTestModel_relu_weight_as_input,
- conv_float::is_ignored_relu_weight_as_input,
- conv_float::examples_relu_weight_as_input);
-}
-
-// Create the model
-Model createTestModel_relu_weight_as_input_relaxed() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 4, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 8, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 12, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0, 1};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- .relaxComputationFloat32toFloat16 = true,
- };
-}
-
-bool is_ignored_relu_weight_as_input_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu_weight_as_input_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}, {1, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 120.0f, 94.0f, 104.0f, 70.0f, 164.0f, 23.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_relu_weight_as_input_relaxed) {
- generated_tests::Execute(device,
- conv_float::createTestModel_relu_weight_as_input_relaxed,
- conv_float::is_ignored_relu_weight_as_input_relaxed,
- conv_float::examples_relu_weight_as_input_relaxed);
-}
-
-// Create the model
-Model createTestModel_relu_weight_as_input_quant8() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.5f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.25f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_INT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.125f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 4, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 8, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 12, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 2.0f,
- .zeroPoint = 100,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0, 1};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 192, 249, 255, 255, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_relu_weight_as_input_quant8(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu_weight_as_input_quant8 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 132, 134, 136, 138, 140, 142, 144}}, {1, {132, 136, 140, 144, 148, 152, 156, 160, 160, 156, 152, 148, 144, 140, 136, 132}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {202, 160, 147, 152, 135, 182, 112, 156}}}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_relu_weight_as_input_quant8) {
- generated_tests::Execute(device,
- conv_float::createTestModel_relu_weight_as_input_quant8,
- conv_float::is_ignored_relu_weight_as_input_quant8,
- conv_float::examples_relu_weight_as_input_quant8);
-}
-
-// Create the model
-Model createTestModel_relu6() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 64},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 64, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 68, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 72, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 76, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 80, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 84, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 128, 63, 0, 0, 0, 64, 0, 0, 64, 64, 0, 0, 128, 64, 0, 0, 160, 64, 0, 0, 192, 64, 0, 0, 224, 64, 0, 0, 0, 65, 0, 0, 0, 65, 0, 0, 224, 64, 0, 0, 192, 64, 0, 0, 160, 64, 0, 0, 128, 64, 0, 0, 64, 64, 0, 0, 0, 64, 0, 0, 128, 63, 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_relu6(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu6 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_relu6) {
- generated_tests::Execute(device,
- conv_float::createTestModel_relu6,
- conv_float::is_ignored_relu6,
- conv_float::examples_relu6);
-}
-
-// Create the model
-Model createTestModel_relu6_relaxed() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 64},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 64, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 68, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 72, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 76, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 80, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 84, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 128, 63, 0, 0, 0, 64, 0, 0, 64, 64, 0, 0, 128, 64, 0, 0, 160, 64, 0, 0, 192, 64, 0, 0, 224, 64, 0, 0, 0, 65, 0, 0, 0, 65, 0, 0, 224, 64, 0, 0, 192, 64, 0, 0, 160, 64, 0, 0, 128, 64, 0, 0, 64, 64, 0, 0, 0, 64, 0, 0, 128, 63, 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- .relaxComputationFloat32toFloat16 = true,
- };
-}
-
-bool is_ignored_relu6_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu6_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_relu6_relaxed) {
- generated_tests::Execute(device,
- conv_float::createTestModel_relu6_relaxed,
- conv_float::is_ignored_relu6_relaxed,
- conv_float::examples_relu6_relaxed);
-}
-
-// Create the model
-Model createTestModel_relu6_quant8() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.5f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.25f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 16},
- },
- {
- .type = OperandType::TENSOR_INT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.125f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 24, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 28, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 32, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 36, .length = 4},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 2.0f,
- .zeroPoint = 100,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 132, 136, 140, 144, 148, 152, 156, 160, 160, 156, 152, 148, 144, 140, 136, 132, 192, 249, 255, 255, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_relu6_quant8(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu6_quant8 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 132, 134, 136, 138, 140, 142, 144}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {103, 103, 103, 103, 103, 103, 103, 103}}}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_relu6_quant8) {
- generated_tests::Execute(device,
- conv_float::createTestModel_relu6_quant8,
- conv_float::is_ignored_relu6_quant8,
- conv_float::examples_relu6_quant8);
-}
-
-// Create the model
-Model createTestModel_relu6_weight_as_input() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 4, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 8, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 12, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0, 1};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_relu6_weight_as_input(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu6_weight_as_input = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}, {1, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_relu6_weight_as_input) {
- generated_tests::Execute(device,
- conv_float::createTestModel_relu6_weight_as_input,
- conv_float::is_ignored_relu6_weight_as_input,
- conv_float::examples_relu6_weight_as_input);
-}
-
-// Create the model
-Model createTestModel_relu6_weight_as_input_relaxed() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 4, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 8, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 12, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0, 1};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- .relaxComputationFloat32toFloat16 = true,
- };
-}
-
-bool is_ignored_relu6_weight_as_input_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu6_weight_as_input_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}, {1, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_relu6_weight_as_input_relaxed) {
- generated_tests::Execute(device,
- conv_float::createTestModel_relu6_weight_as_input_relaxed,
- conv_float::is_ignored_relu6_weight_as_input_relaxed,
- conv_float::examples_relu6_weight_as_input_relaxed);
-}
-
-// Create the model
-Model createTestModel_relu6_weight_as_input_quant8() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.5f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.25f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_INT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.125f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 4, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 8, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 12, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 2.0f,
- .zeroPoint = 100,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0, 1};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 192, 249, 255, 255, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_relu6_weight_as_input_quant8(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu6_weight_as_input_quant8 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 132, 134, 136, 138, 140, 142, 144}}, {1, {132, 136, 140, 144, 148, 152, 156, 160, 160, 156, 152, 148, 144, 140, 136, 132}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {103, 103, 103, 103, 103, 103, 103, 103}}}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_relu6_weight_as_input_quant8) {
- generated_tests::Execute(device,
- conv_float::createTestModel_relu6_weight_as_input_quant8,
- conv_float::is_ignored_relu6_weight_as_input_quant8,
- conv_float::examples_relu6_weight_as_input_quant8);
-}
-
-// Create the model
-Model createTestModel_nchw_relu() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 64},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 64, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 68, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 72, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 76, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 80, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 84, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 128, 63, 0, 0, 64, 64, 0, 0, 160, 64, 0, 0, 224, 64, 0, 0, 0, 64, 0, 0, 128, 64, 0, 0, 192, 64, 0, 0, 0, 65, 0, 0, 0, 65, 0, 0, 192, 64, 0, 0, 128, 64, 0, 0, 0, 64, 0, 0, 224, 64, 0, 0, 160, 64, 0, 0, 64, 64, 0, 0, 128, 63, 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_nchw_relu(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 94.0f, 70.0f, 23.0f, 120.0f, 104.0f, 164.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_nchw_relu) {
- generated_tests::Execute(device,
- conv_float::createTestModel_nchw_relu,
- conv_float::is_ignored_nchw_relu,
- conv_float::examples_nchw_relu);
-}
-
-// Create the model
-Model createTestModel_nchw_relu_relaxed() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 64},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 64, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 68, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 72, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 76, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 80, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 84, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 128, 63, 0, 0, 64, 64, 0, 0, 160, 64, 0, 0, 224, 64, 0, 0, 0, 64, 0, 0, 128, 64, 0, 0, 192, 64, 0, 0, 0, 65, 0, 0, 0, 65, 0, 0, 192, 64, 0, 0, 128, 64, 0, 0, 0, 64, 0, 0, 224, 64, 0, 0, 160, 64, 0, 0, 64, 64, 0, 0, 128, 63, 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- .relaxComputationFloat32toFloat16 = true,
- };
-}
-
-bool is_ignored_nchw_relu_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 94.0f, 70.0f, 23.0f, 120.0f, 104.0f, 164.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_nchw_relu_relaxed) {
- generated_tests::Execute(device,
- conv_float::createTestModel_nchw_relu_relaxed,
- conv_float::is_ignored_nchw_relu_relaxed,
- conv_float::examples_nchw_relu_relaxed);
-}
-
-// Create the model
-Model createTestModel_nchw_relu_quant8() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.5f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.25f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 16},
- },
- {
- .type = OperandType::TENSOR_INT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.125f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 24, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 28, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 32, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 36, .length = 4},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 2.0f,
- .zeroPoint = 100,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 132, 140, 148, 156, 136, 144, 152, 160, 160, 152, 144, 136, 156, 148, 140, 132, 192, 249, 255, 255, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_nchw_relu_quant8(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu_quant8 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 134, 138, 142, 132, 136, 140, 144}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {202, 147, 135, 112, 160, 152, 182, 156}}}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_nchw_relu_quant8) {
- generated_tests::Execute(device,
- conv_float::createTestModel_nchw_relu_quant8,
- conv_float::is_ignored_nchw_relu_quant8,
- conv_float::examples_nchw_relu_quant8);
-}
-
-// Create the model
-Model createTestModel_nchw_relu_weight_as_input() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 4, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 8, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 12, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0, 1};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_nchw_relu_weight_as_input(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu_weight_as_input = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}, {1, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f, 8.0f, 6.0f, 4.0f, 2.0f, 7.0f, 5.0f, 3.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 94.0f, 70.0f, 23.0f, 120.0f, 104.0f, 164.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_nchw_relu_weight_as_input) {
- generated_tests::Execute(device,
- conv_float::createTestModel_nchw_relu_weight_as_input,
- conv_float::is_ignored_nchw_relu_weight_as_input,
- conv_float::examples_nchw_relu_weight_as_input);
-}
-
-// Create the model
-Model createTestModel_nchw_relu_weight_as_input_relaxed() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 4, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 8, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 12, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0, 1};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- .relaxComputationFloat32toFloat16 = true,
- };
-}
-
-bool is_ignored_nchw_relu_weight_as_input_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu_weight_as_input_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}, {1, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f, 8.0f, 6.0f, 4.0f, 2.0f, 7.0f, 5.0f, 3.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 94.0f, 70.0f, 23.0f, 120.0f, 104.0f, 164.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_nchw_relu_weight_as_input_relaxed) {
- generated_tests::Execute(device,
- conv_float::createTestModel_nchw_relu_weight_as_input_relaxed,
- conv_float::is_ignored_nchw_relu_weight_as_input_relaxed,
- conv_float::examples_nchw_relu_weight_as_input_relaxed);
-}
-
-// Create the model
-Model createTestModel_nchw_relu_weight_as_input_quant8() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.5f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.25f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_INT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.125f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 4, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 8, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 12, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 2.0f,
- .zeroPoint = 100,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0, 1};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 192, 249, 255, 255, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_nchw_relu_weight_as_input_quant8(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu_weight_as_input_quant8 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 134, 138, 142, 132, 136, 140, 144}}, {1, {132, 140, 148, 156, 136, 144, 152, 160, 160, 152, 144, 136, 156, 148, 140, 132}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {202, 147, 135, 112, 160, 152, 182, 156}}}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_nchw_relu_weight_as_input_quant8) {
- generated_tests::Execute(device,
- conv_float::createTestModel_nchw_relu_weight_as_input_quant8,
- conv_float::is_ignored_nchw_relu_weight_as_input_quant8,
- conv_float::examples_nchw_relu_weight_as_input_quant8);
-}
-
-// Create the model
-Model createTestModel_nchw_relu6() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 64},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 64, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 68, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 72, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 76, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 80, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 84, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 128, 63, 0, 0, 64, 64, 0, 0, 160, 64, 0, 0, 224, 64, 0, 0, 0, 64, 0, 0, 128, 64, 0, 0, 192, 64, 0, 0, 0, 65, 0, 0, 0, 65, 0, 0, 192, 64, 0, 0, 128, 64, 0, 0, 0, 64, 0, 0, 224, 64, 0, 0, 160, 64, 0, 0, 64, 64, 0, 0, 128, 63, 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0, 1, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_nchw_relu6(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu6 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_nchw_relu6) {
- generated_tests::Execute(device,
- conv_float::createTestModel_nchw_relu6,
- conv_float::is_ignored_nchw_relu6,
- conv_float::examples_nchw_relu6);
-}
-
-// Create the model
-Model createTestModel_nchw_relu6_relaxed() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 64},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 64, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 68, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 72, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 76, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 80, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 84, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 128, 63, 0, 0, 64, 64, 0, 0, 160, 64, 0, 0, 224, 64, 0, 0, 0, 64, 0, 0, 128, 64, 0, 0, 192, 64, 0, 0, 0, 65, 0, 0, 0, 65, 0, 0, 192, 64, 0, 0, 128, 64, 0, 0, 0, 64, 0, 0, 224, 64, 0, 0, 160, 64, 0, 0, 64, 64, 0, 0, 128, 63, 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0, 1, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- .relaxComputationFloat32toFloat16 = true,
- };
-}
-
-bool is_ignored_nchw_relu6_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu6_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_nchw_relu6_relaxed) {
- generated_tests::Execute(device,
- conv_float::createTestModel_nchw_relu6_relaxed,
- conv_float::is_ignored_nchw_relu6_relaxed,
- conv_float::examples_nchw_relu6_relaxed);
-}
-
-// Create the model
-Model createTestModel_nchw_relu6_quant8() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.5f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.25f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 16},
- },
- {
- .type = OperandType::TENSOR_INT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.125f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 24, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 28, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 32, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 36, .length = 4},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 2.0f,
- .zeroPoint = 100,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 132, 140, 148, 156, 136, 144, 152, 160, 160, 152, 144, 136, 156, 148, 140, 132, 192, 249, 255, 255, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0, 1, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_nchw_relu6_quant8(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu6_quant8 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 134, 138, 142, 132, 136, 140, 144}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {103, 103, 103, 103, 103, 103, 103, 103}}}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_nchw_relu6_quant8) {
- generated_tests::Execute(device,
- conv_float::createTestModel_nchw_relu6_quant8,
- conv_float::is_ignored_nchw_relu6_quant8,
- conv_float::examples_nchw_relu6_quant8);
-}
-
-// Create the model
-Model createTestModel_nchw_relu6_weight_as_input() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 4, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 8, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 12, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0, 1};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0, 1, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_nchw_relu6_weight_as_input(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu6_weight_as_input = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}, {1, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f, 8.0f, 6.0f, 4.0f, 2.0f, 7.0f, 5.0f, 3.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_nchw_relu6_weight_as_input) {
- generated_tests::Execute(device,
- conv_float::createTestModel_nchw_relu6_weight_as_input,
- conv_float::is_ignored_nchw_relu6_weight_as_input,
- conv_float::examples_nchw_relu6_weight_as_input);
-}
-
-// Create the model
-Model createTestModel_nchw_relu6_weight_as_input_relaxed() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 4, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 8, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 12, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0, 1};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0, 1, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- .relaxComputationFloat32toFloat16 = true,
- };
-}
-
-bool is_ignored_nchw_relu6_weight_as_input_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu6_weight_as_input_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}, {1, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f, 8.0f, 6.0f, 4.0f, 2.0f, 7.0f, 5.0f, 3.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_nchw_relu6_weight_as_input_relaxed) {
- generated_tests::Execute(device,
- conv_float::createTestModel_nchw_relu6_weight_as_input_relaxed,
- conv_float::is_ignored_nchw_relu6_weight_as_input_relaxed,
- conv_float::examples_nchw_relu6_weight_as_input_relaxed);
-}
-
-// Create the model
-Model createTestModel_nchw_relu6_weight_as_input_quant8() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.5f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.25f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_INT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.125f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 4, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 8, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 12, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 2.0f,
- .zeroPoint = 100,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0, 1};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 192, 249, 255, 255, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0, 1, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_nchw_relu6_weight_as_input_quant8(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu6_weight_as_input_quant8 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 134, 138, 142, 132, 136, 140, 144}}, {1, {132, 140, 148, 156, 136, 144, 152, 160, 160, 152, 144, 136, 156, 148, 140, 132}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {103, 103, 103, 103, 103, 103, 103, 103}}}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_nchw_relu6_weight_as_input_quant8) {
- generated_tests::Execute(device,
- conv_float::createTestModel_nchw_relu6_weight_as_input_quant8,
- conv_float::is_ignored_nchw_relu6_weight_as_input_quant8,
- conv_float::examples_nchw_relu6_weight_as_input_quant8);
-}
-
+++ /dev/null
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-model = Model()
-
-i0 = Input("i0", ("TENSOR_FLOAT32", [2])) # input 0
-i1 = Input("i1", ("TENSOR_FLOAT32", [2])) # input 0
-i2 = Input("i2", ("TENSOR_FLOAT32", [2])) # input 0
-i3 = Input("i3", ("TENSOR_FLOAT32", [2])) # input 0
-i4 = Input("i4", ("TENSOR_FLOAT32", [2])) # input 0
-i5 = Input("i5", ("TENSOR_FLOAT32", [2])) # input 0
-i6 = Input("i6", ("TENSOR_FLOAT32", [2])) # input 0
-i7 = Input("i7", ("TENSOR_FLOAT32", [2])) # input 0
-i8 = Input("i8", ("TENSOR_FLOAT32", [2])) # input 0
-
-t0 = Internal("t0", ("TENSOR_FLOAT32", [2]))
-t1 = Internal("t1", ("TENSOR_FLOAT32", [2]))
-t2 = Internal("t2", ("TENSOR_FLOAT32", [2]))
-t3 = Internal("t3", ("TENSOR_FLOAT32", [2]))
-t4 = Internal("t4", ("TENSOR_FLOAT32", [2]))
-t5 = Internal("t5", ("TENSOR_FLOAT32", [2]))
-t6 = Internal("t6", ("TENSOR_FLOAT32", [2]))
-
-o0 = Output("o0", ("TENSOR_FLOAT32", [2]))
-o1 = Output("o1", ("TENSOR_FLOAT32", [2]))
-o2 = Output("o2", ("TENSOR_FLOAT32", [2]))
-
-p0 = Parameter("p0", ("TENSOR_FLOAT32", [2]), [0.0, 1.0])
-act = Int32Scalar("act", 0)
-
-model.Operation("ADD", o0, o1, act).To(o2)
-model.Operation("ADD", p0, t5, act).To(t6)
-model.Operation("ADD", i2, t0, act).To(t1)
-model.Operation("ADD", i6, p0, act).To(t5)
-model.Operation("ADD", i0, i1, act).To(t0)
-model.Operation("ADD", t1, t3, act).To(t4)
-model.Operation("ADD", t2, i5, act).To(t3)
-model.Operation("ADD", t4, t6, act).To(o0)
-model.Operation("ADD", i3, i4, act).To(t2)
-model.Operation("ADD", i7, i8, act).To(o1)
-
-inputs = {
- i0: [0, 0],
- i1: [0, 0],
- i2: [0, 0],
- i3: [0, 0],
- i4: [0, 0],
- i5: [0, 0],
- i6: [0, 0],
- i7: [0, 0],
- i8: [0, 0]
-}
-
-outputs = {
- o0: [0, 2],
- o1: [0, 0],
- o2: [0, 2]
-}
-
-Example((inputs, outputs))
+++ /dev/null
-Output VTS model: -
-Output example:-
+++ /dev/null
-// clang-format off
-// Generated file (from: add_internal.mod.py). Do not edit
-// clang-format off
-// Generated file (from: add_internal.mod.py). Do not edit
-// Generated from: add_internal.mod.py.
-namespace add_internal {
-// Generated add_internal test
-#include "-"
-// Generated model constructor
-#include "-"
-} // namespace add_internal
-
-// Create the model
-Model createTestModel() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 10,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2},
- .numberOfConsumers = 2,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 4, .length = 8},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::ADD,
- .inputs = {10, 4, 2},
- .outputs = {5},
- },
- {
- .type = OperationType::ADD,
- .inputs = {4, 5, 2},
- .outputs = {6},
- },
- {
- .type = OperationType::ADD,
- .inputs = {11, 12, 2},
- .outputs = {8},
- },
- {
- .type = OperationType::ADD,
- .inputs = {7, 8, 2},
- .outputs = {9},
- },
- {
- .type = OperationType::ADD,
- .inputs = {17, 18, 2},
- .outputs = {15},
- },
- {
- .type = OperationType::ADD,
- .inputs = {15, 16, 2},
- .outputs = {13},
- },
- {
- .type = OperationType::ADD,
- .inputs = {9, 13, 2},
- .outputs = {14},
- },
- {
- .type = OperationType::ADD,
- .inputs = {14, 6, 2},
- .outputs = {0},
- },
- {
- .type = OperationType::ADD,
- .inputs = {19, 20, 2},
- .outputs = {1},
- },
- {
- .type = OperationType::ADD,
- .inputs = {0, 1, 2},
- .outputs = {3},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {7, 10, 11, 12, 16, 17, 18, 19, 20};
- const std::vector<uint32_t> outputIndexes = {0, 1, 3};
- std::vector<uint8_t> operandValues = {
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 63
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {0.0f, 0.0f}}, {1, {0.0f, 0.0f}}, {2, {0.0f, 0.0f}}, {3, {0.0f, 0.0f}}, {4, {0.0f, 0.0f}}, {5, {0.0f, 0.0f}}, {6, {0.0f, 0.0f}}, {7, {0.0f, 0.0f}}, {8, {0.0f, 0.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {0.0f, 2.0f}}, {1, {0.0f, 0.0f}}, {2, {0.0f, 2.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, add_internal) {
- generated_tests::Execute(device,
- add_internal::createTestModel,
- add_internal::is_ignored,
- add_internal::examples);
-}
-
+++ /dev/null
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-model = Model("model_name")
-i1 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 2}")
-f1 = Parameter("op2", "TENSOR_FLOAT32", "{2, 2, 2, 2}", [1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1])
-b1 = Parameter("op3", "TENSOR_FLOAT32", "{1}", [-200])
-output = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 2}")
-act = Int32Scalar("act", 0) # None activation
-layout = Int32Scalar("layout", 0) # NHWC
-pad = Int32Scalar("param", 1)
-stride0 = Int32Scalar("param1", 1)
-stride1 = Int32Scalar("param2", 1)
-
-model = model.Operation("CONV_2D", i1, f1, b1, pad, stride0, stride1, act, layout).To(output)
-
-# Example 1. Input in operand 0,
-input0 = {i1: # input 0
- [1, 2, 3, 4, 5, 6, 7, 8]}
-
-output0 = {output: # output 0
- [204, 120, 94, 104, 70, 164, 23, 112]}
-
-quant8 = DataTypeConverter(name="quantized").Identify({
- i1: ("TENSOR_QUANT8_ASYMM", 0.5, 128),
- f1: ("TENSOR_QUANT8_ASYMM", 0.25, 128),
- b1: ("TENSOR_INT32", 0.125, 0),
- output: ("TENSOR_QUANT8_ASYMM", 2, 100)
-})
-nchw = DataLayoutConverter("NCHW", name="nchw_layout").Identify([i1, f1, output], [layout])
-relu = ActivationConverter("relu", name="act").Identify([output], [act])
-relu6 = ActivationConverter("relu6").Identify([output], [act])
-weight_as_input = ParameterAsInputConverter(name="w_as_input").Identify([f1])
-relax = RelaxedModeConverter(True, name="float_relaxed")
-
-# Instantiate an example
-# Will produce cartesian product of
-# [nhwc, nchw_layout] * [act, relu6] * [w_as_param, w_as_input] * [float, float_relaxed, quantized]
-# 24 variations
-Example(
- (input0, output0), name="example_name"
-).AddVariations(
- nchw, defaultName="nhwc"
-).AddVariations(
- relu, relu6, includeDefault=False
-).AddVariations(
- weight_as_input, defaultName="w_as_param"
-).AddVariations(
- relax, quant8, defaultName="float"
-)
+++ /dev/null
-Output VTS model: -
-Output example:-
+++ /dev/null
-// clang-format off
-// Generated file (from: conv_float.mod.py). Do not edit
-// clang-format off
-// Generated file (from: conv_float.mod.py). Do not edit
-// Generated from: conv_float.mod.py.
-namespace conv_float {
-// Generated conv_float test
-#include "-"
-// Generated model constructor
-#include "-"
-} // namespace conv_float
-
-// Create the model
-Model createTestModel_model_name_nhwc_act_w_as_param_float() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 64},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 64, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 68, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 72, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 76, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 80, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 84, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 128, 63, 0, 0, 0, 64, 0, 0, 64, 64, 0, 0, 128, 64, 0, 0, 160, 64, 0, 0, 192, 64, 0, 0, 224, 64, 0, 0, 0, 65, 0, 0, 0, 65, 0, 0, 224, 64, 0, 0, 192, 64, 0, 0, 160, 64, 0, 0, 128, 64, 0, 0, 64, 64, 0, 0, 0, 64, 0, 0, 128, 63, 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_model_name_nhwc_act_w_as_param_float(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_model_name_example_name_nhwc_act_w_as_param_float = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 120.0f, 94.0f, 104.0f, 70.0f, 164.0f, 23.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_model_name_example_name_nhwc_act_w_as_param_float) {
- generated_tests::Execute(device,
- conv_float::createTestModel_model_name_nhwc_act_w_as_param_float,
- conv_float::is_ignored_model_name_nhwc_act_w_as_param_float,
- conv_float::examples_model_name_example_name_nhwc_act_w_as_param_float);
-}
-
-// Create the model
-Model createTestModel_model_name_nhwc_act_w_as_param_float_relaxed() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 64},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 64, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 68, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 72, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 76, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 80, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 84, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 128, 63, 0, 0, 0, 64, 0, 0, 64, 64, 0, 0, 128, 64, 0, 0, 160, 64, 0, 0, 192, 64, 0, 0, 224, 64, 0, 0, 0, 65, 0, 0, 0, 65, 0, 0, 224, 64, 0, 0, 192, 64, 0, 0, 160, 64, 0, 0, 128, 64, 0, 0, 64, 64, 0, 0, 0, 64, 0, 0, 128, 63, 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- .relaxComputationFloat32toFloat16 = true,
- };
-}
-
-bool is_ignored_model_name_nhwc_act_w_as_param_float_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_model_name_example_name_nhwc_act_w_as_param_float_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 120.0f, 94.0f, 104.0f, 70.0f, 164.0f, 23.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_model_name_example_name_nhwc_act_w_as_param_float_relaxed) {
- generated_tests::Execute(device,
- conv_float::createTestModel_model_name_nhwc_act_w_as_param_float_relaxed,
- conv_float::is_ignored_model_name_nhwc_act_w_as_param_float_relaxed,
- conv_float::examples_model_name_example_name_nhwc_act_w_as_param_float_relaxed);
-}
-
-// Create the model
-Model createTestModel_model_name_nhwc_act_w_as_param_quantized() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.5f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.25f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 16},
- },
- {
- .type = OperandType::TENSOR_INT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.125f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 24, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 28, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 32, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 36, .length = 4},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 2.0f,
- .zeroPoint = 100,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 132, 136, 140, 144, 148, 152, 156, 160, 160, 156, 152, 148, 144, 140, 136, 132, 192, 249, 255, 255, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_model_name_nhwc_act_w_as_param_quantized(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_model_name_example_name_nhwc_act_w_as_param_quantized = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 132, 134, 136, 138, 140, 142, 144}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {202, 160, 147, 152, 135, 182, 112, 156}}}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_model_name_example_name_nhwc_act_w_as_param_quantized) {
- generated_tests::Execute(device,
- conv_float::createTestModel_model_name_nhwc_act_w_as_param_quantized,
- conv_float::is_ignored_model_name_nhwc_act_w_as_param_quantized,
- conv_float::examples_model_name_example_name_nhwc_act_w_as_param_quantized);
-}
-
-// Create the model
-Model createTestModel_model_name_nhwc_act_w_as_input_float() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 4, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 8, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 12, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0, 1};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_model_name_nhwc_act_w_as_input_float(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_model_name_example_name_nhwc_act_w_as_input_float = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}, {1, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 120.0f, 94.0f, 104.0f, 70.0f, 164.0f, 23.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_model_name_example_name_nhwc_act_w_as_input_float) {
- generated_tests::Execute(device,
- conv_float::createTestModel_model_name_nhwc_act_w_as_input_float,
- conv_float::is_ignored_model_name_nhwc_act_w_as_input_float,
- conv_float::examples_model_name_example_name_nhwc_act_w_as_input_float);
-}
-
-// Create the model
-Model createTestModel_model_name_nhwc_act_w_as_input_float_relaxed() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 4, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 8, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 12, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0, 1};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- .relaxComputationFloat32toFloat16 = true,
- };
-}
-
-bool is_ignored_model_name_nhwc_act_w_as_input_float_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_model_name_example_name_nhwc_act_w_as_input_float_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}, {1, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 120.0f, 94.0f, 104.0f, 70.0f, 164.0f, 23.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_model_name_example_name_nhwc_act_w_as_input_float_relaxed) {
- generated_tests::Execute(device,
- conv_float::createTestModel_model_name_nhwc_act_w_as_input_float_relaxed,
- conv_float::is_ignored_model_name_nhwc_act_w_as_input_float_relaxed,
- conv_float::examples_model_name_example_name_nhwc_act_w_as_input_float_relaxed);
-}
-
-// Create the model
-Model createTestModel_model_name_nhwc_act_w_as_input_quantized() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.5f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.25f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_INT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.125f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 4, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 8, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 12, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 2.0f,
- .zeroPoint = 100,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0, 1};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 192, 249, 255, 255, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_model_name_nhwc_act_w_as_input_quantized(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_model_name_example_name_nhwc_act_w_as_input_quantized = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 132, 134, 136, 138, 140, 142, 144}}, {1, {132, 136, 140, 144, 148, 152, 156, 160, 160, 156, 152, 148, 144, 140, 136, 132}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {202, 160, 147, 152, 135, 182, 112, 156}}}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_model_name_example_name_nhwc_act_w_as_input_quantized) {
- generated_tests::Execute(device,
- conv_float::createTestModel_model_name_nhwc_act_w_as_input_quantized,
- conv_float::is_ignored_model_name_nhwc_act_w_as_input_quantized,
- conv_float::examples_model_name_example_name_nhwc_act_w_as_input_quantized);
-}
-
-// Create the model
-Model createTestModel_model_name_nhwc_relu6_w_as_param_float() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 64},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 64, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 68, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 72, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 76, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 80, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 84, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 128, 63, 0, 0, 0, 64, 0, 0, 64, 64, 0, 0, 128, 64, 0, 0, 160, 64, 0, 0, 192, 64, 0, 0, 224, 64, 0, 0, 0, 65, 0, 0, 0, 65, 0, 0, 224, 64, 0, 0, 192, 64, 0, 0, 160, 64, 0, 0, 128, 64, 0, 0, 64, 64, 0, 0, 0, 64, 0, 0, 128, 63, 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_model_name_nhwc_relu6_w_as_param_float(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_model_name_example_name_nhwc_relu6_w_as_param_float = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_model_name_example_name_nhwc_relu6_w_as_param_float) {
- generated_tests::Execute(device,
- conv_float::createTestModel_model_name_nhwc_relu6_w_as_param_float,
- conv_float::is_ignored_model_name_nhwc_relu6_w_as_param_float,
- conv_float::examples_model_name_example_name_nhwc_relu6_w_as_param_float);
-}
-
-// Create the model
-Model createTestModel_model_name_nhwc_relu6_w_as_param_float_relaxed() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 64},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 64, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 68, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 72, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 76, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 80, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 84, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 128, 63, 0, 0, 0, 64, 0, 0, 64, 64, 0, 0, 128, 64, 0, 0, 160, 64, 0, 0, 192, 64, 0, 0, 224, 64, 0, 0, 0, 65, 0, 0, 0, 65, 0, 0, 224, 64, 0, 0, 192, 64, 0, 0, 160, 64, 0, 0, 128, 64, 0, 0, 64, 64, 0, 0, 0, 64, 0, 0, 128, 63, 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- .relaxComputationFloat32toFloat16 = true,
- };
-}
-
-bool is_ignored_model_name_nhwc_relu6_w_as_param_float_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_model_name_example_name_nhwc_relu6_w_as_param_float_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_model_name_example_name_nhwc_relu6_w_as_param_float_relaxed) {
- generated_tests::Execute(device,
- conv_float::createTestModel_model_name_nhwc_relu6_w_as_param_float_relaxed,
- conv_float::is_ignored_model_name_nhwc_relu6_w_as_param_float_relaxed,
- conv_float::examples_model_name_example_name_nhwc_relu6_w_as_param_float_relaxed);
-}
-
-// Create the model
-Model createTestModel_model_name_nhwc_relu6_w_as_param_quantized() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.5f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.25f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 16},
- },
- {
- .type = OperandType::TENSOR_INT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.125f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 24, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 28, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 32, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 36, .length = 4},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 2.0f,
- .zeroPoint = 100,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 132, 136, 140, 144, 148, 152, 156, 160, 160, 156, 152, 148, 144, 140, 136, 132, 192, 249, 255, 255, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_model_name_nhwc_relu6_w_as_param_quantized(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_model_name_example_name_nhwc_relu6_w_as_param_quantized = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 132, 134, 136, 138, 140, 142, 144}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {103, 103, 103, 103, 103, 103, 103, 103}}}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_model_name_example_name_nhwc_relu6_w_as_param_quantized) {
- generated_tests::Execute(device,
- conv_float::createTestModel_model_name_nhwc_relu6_w_as_param_quantized,
- conv_float::is_ignored_model_name_nhwc_relu6_w_as_param_quantized,
- conv_float::examples_model_name_example_name_nhwc_relu6_w_as_param_quantized);
-}
-
-// Create the model
-Model createTestModel_model_name_nhwc_relu6_w_as_input_float() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 4, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 8, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 12, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0, 1};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_model_name_nhwc_relu6_w_as_input_float(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_model_name_example_name_nhwc_relu6_w_as_input_float = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}, {1, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_model_name_example_name_nhwc_relu6_w_as_input_float) {
- generated_tests::Execute(device,
- conv_float::createTestModel_model_name_nhwc_relu6_w_as_input_float,
- conv_float::is_ignored_model_name_nhwc_relu6_w_as_input_float,
- conv_float::examples_model_name_example_name_nhwc_relu6_w_as_input_float);
-}
-
-// Create the model
-Model createTestModel_model_name_nhwc_relu6_w_as_input_float_relaxed() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 4, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 8, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 12, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0, 1};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- .relaxComputationFloat32toFloat16 = true,
- };
-}
-
-bool is_ignored_model_name_nhwc_relu6_w_as_input_float_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_model_name_example_name_nhwc_relu6_w_as_input_float_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}, {1, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_model_name_example_name_nhwc_relu6_w_as_input_float_relaxed) {
- generated_tests::Execute(device,
- conv_float::createTestModel_model_name_nhwc_relu6_w_as_input_float_relaxed,
- conv_float::is_ignored_model_name_nhwc_relu6_w_as_input_float_relaxed,
- conv_float::examples_model_name_example_name_nhwc_relu6_w_as_input_float_relaxed);
-}
-
-// Create the model
-Model createTestModel_model_name_nhwc_relu6_w_as_input_quantized() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.5f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.25f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_INT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.125f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 4, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 8, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 12, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 2.0f,
- .zeroPoint = 100,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0, 1};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 192, 249, 255, 255, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_model_name_nhwc_relu6_w_as_input_quantized(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_model_name_example_name_nhwc_relu6_w_as_input_quantized = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 132, 134, 136, 138, 140, 142, 144}}, {1, {132, 136, 140, 144, 148, 152, 156, 160, 160, 156, 152, 148, 144, 140, 136, 132}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {103, 103, 103, 103, 103, 103, 103, 103}}}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_model_name_example_name_nhwc_relu6_w_as_input_quantized) {
- generated_tests::Execute(device,
- conv_float::createTestModel_model_name_nhwc_relu6_w_as_input_quantized,
- conv_float::is_ignored_model_name_nhwc_relu6_w_as_input_quantized,
- conv_float::examples_model_name_example_name_nhwc_relu6_w_as_input_quantized);
-}
-
-// Create the model
-Model createTestModel_model_name_nchw_layout_act_w_as_param_float() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 64},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 64, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 68, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 72, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 76, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 80, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 84, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 128, 63, 0, 0, 64, 64, 0, 0, 160, 64, 0, 0, 224, 64, 0, 0, 0, 64, 0, 0, 128, 64, 0, 0, 192, 64, 0, 0, 0, 65, 0, 0, 0, 65, 0, 0, 192, 64, 0, 0, 128, 64, 0, 0, 0, 64, 0, 0, 224, 64, 0, 0, 160, 64, 0, 0, 64, 64, 0, 0, 128, 63, 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_model_name_nchw_layout_act_w_as_param_float(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_model_name_example_name_nchw_layout_act_w_as_param_float = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 94.0f, 70.0f, 23.0f, 120.0f, 104.0f, 164.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_model_name_example_name_nchw_layout_act_w_as_param_float) {
- generated_tests::Execute(device,
- conv_float::createTestModel_model_name_nchw_layout_act_w_as_param_float,
- conv_float::is_ignored_model_name_nchw_layout_act_w_as_param_float,
- conv_float::examples_model_name_example_name_nchw_layout_act_w_as_param_float);
-}
-
-// Create the model
-Model createTestModel_model_name_nchw_layout_act_w_as_param_float_relaxed() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 64},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 64, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 68, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 72, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 76, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 80, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 84, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 128, 63, 0, 0, 64, 64, 0, 0, 160, 64, 0, 0, 224, 64, 0, 0, 0, 64, 0, 0, 128, 64, 0, 0, 192, 64, 0, 0, 0, 65, 0, 0, 0, 65, 0, 0, 192, 64, 0, 0, 128, 64, 0, 0, 0, 64, 0, 0, 224, 64, 0, 0, 160, 64, 0, 0, 64, 64, 0, 0, 128, 63, 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- .relaxComputationFloat32toFloat16 = true,
- };
-}
-
-bool is_ignored_model_name_nchw_layout_act_w_as_param_float_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_model_name_example_name_nchw_layout_act_w_as_param_float_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 94.0f, 70.0f, 23.0f, 120.0f, 104.0f, 164.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_model_name_example_name_nchw_layout_act_w_as_param_float_relaxed) {
- generated_tests::Execute(device,
- conv_float::createTestModel_model_name_nchw_layout_act_w_as_param_float_relaxed,
- conv_float::is_ignored_model_name_nchw_layout_act_w_as_param_float_relaxed,
- conv_float::examples_model_name_example_name_nchw_layout_act_w_as_param_float_relaxed);
-}
-
-// Create the model
-Model createTestModel_model_name_nchw_layout_act_w_as_param_quantized() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.5f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.25f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 16},
- },
- {
- .type = OperandType::TENSOR_INT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.125f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 24, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 28, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 32, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 36, .length = 4},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 2.0f,
- .zeroPoint = 100,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 132, 140, 148, 156, 136, 144, 152, 160, 160, 152, 144, 136, 156, 148, 140, 132, 192, 249, 255, 255, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_model_name_nchw_layout_act_w_as_param_quantized(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_model_name_example_name_nchw_layout_act_w_as_param_quantized = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 134, 138, 142, 132, 136, 140, 144}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {202, 147, 135, 112, 160, 152, 182, 156}}}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_model_name_example_name_nchw_layout_act_w_as_param_quantized) {
- generated_tests::Execute(device,
- conv_float::createTestModel_model_name_nchw_layout_act_w_as_param_quantized,
- conv_float::is_ignored_model_name_nchw_layout_act_w_as_param_quantized,
- conv_float::examples_model_name_example_name_nchw_layout_act_w_as_param_quantized);
-}
-
-// Create the model
-Model createTestModel_model_name_nchw_layout_act_w_as_input_float() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 4, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 8, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 12, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0, 1};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_model_name_nchw_layout_act_w_as_input_float(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_model_name_example_name_nchw_layout_act_w_as_input_float = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}, {1, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f, 8.0f, 6.0f, 4.0f, 2.0f, 7.0f, 5.0f, 3.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 94.0f, 70.0f, 23.0f, 120.0f, 104.0f, 164.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_model_name_example_name_nchw_layout_act_w_as_input_float) {
- generated_tests::Execute(device,
- conv_float::createTestModel_model_name_nchw_layout_act_w_as_input_float,
- conv_float::is_ignored_model_name_nchw_layout_act_w_as_input_float,
- conv_float::examples_model_name_example_name_nchw_layout_act_w_as_input_float);
-}
-
-// Create the model
-Model createTestModel_model_name_nchw_layout_act_w_as_input_float_relaxed() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 4, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 8, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 12, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0, 1};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- .relaxComputationFloat32toFloat16 = true,
- };
-}
-
-bool is_ignored_model_name_nchw_layout_act_w_as_input_float_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_model_name_example_name_nchw_layout_act_w_as_input_float_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}, {1, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f, 8.0f, 6.0f, 4.0f, 2.0f, 7.0f, 5.0f, 3.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 94.0f, 70.0f, 23.0f, 120.0f, 104.0f, 164.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_model_name_example_name_nchw_layout_act_w_as_input_float_relaxed) {
- generated_tests::Execute(device,
- conv_float::createTestModel_model_name_nchw_layout_act_w_as_input_float_relaxed,
- conv_float::is_ignored_model_name_nchw_layout_act_w_as_input_float_relaxed,
- conv_float::examples_model_name_example_name_nchw_layout_act_w_as_input_float_relaxed);
-}
-
-// Create the model
-Model createTestModel_model_name_nchw_layout_act_w_as_input_quantized() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.5f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.25f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_INT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.125f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 4, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 8, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 12, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 2.0f,
- .zeroPoint = 100,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0, 1};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 192, 249, 255, 255, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_model_name_nchw_layout_act_w_as_input_quantized(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_model_name_example_name_nchw_layout_act_w_as_input_quantized = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 134, 138, 142, 132, 136, 140, 144}}, {1, {132, 140, 148, 156, 136, 144, 152, 160, 160, 152, 144, 136, 156, 148, 140, 132}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {202, 147, 135, 112, 160, 152, 182, 156}}}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_model_name_example_name_nchw_layout_act_w_as_input_quantized) {
- generated_tests::Execute(device,
- conv_float::createTestModel_model_name_nchw_layout_act_w_as_input_quantized,
- conv_float::is_ignored_model_name_nchw_layout_act_w_as_input_quantized,
- conv_float::examples_model_name_example_name_nchw_layout_act_w_as_input_quantized);
-}
-
-// Create the model
-Model createTestModel_model_name_nchw_layout_relu6_w_as_param_float() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 64},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 64, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 68, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 72, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 76, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 80, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 84, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 128, 63, 0, 0, 64, 64, 0, 0, 160, 64, 0, 0, 224, 64, 0, 0, 0, 64, 0, 0, 128, 64, 0, 0, 192, 64, 0, 0, 0, 65, 0, 0, 0, 65, 0, 0, 192, 64, 0, 0, 128, 64, 0, 0, 0, 64, 0, 0, 224, 64, 0, 0, 160, 64, 0, 0, 64, 64, 0, 0, 128, 63, 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0, 1, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_model_name_nchw_layout_relu6_w_as_param_float(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_model_name_example_name_nchw_layout_relu6_w_as_param_float = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_model_name_example_name_nchw_layout_relu6_w_as_param_float) {
- generated_tests::Execute(device,
- conv_float::createTestModel_model_name_nchw_layout_relu6_w_as_param_float,
- conv_float::is_ignored_model_name_nchw_layout_relu6_w_as_param_float,
- conv_float::examples_model_name_example_name_nchw_layout_relu6_w_as_param_float);
-}
-
-// Create the model
-Model createTestModel_model_name_nchw_layout_relu6_w_as_param_float_relaxed() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 64},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 64, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 68, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 72, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 76, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 80, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 84, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 128, 63, 0, 0, 64, 64, 0, 0, 160, 64, 0, 0, 224, 64, 0, 0, 0, 64, 0, 0, 128, 64, 0, 0, 192, 64, 0, 0, 0, 65, 0, 0, 0, 65, 0, 0, 192, 64, 0, 0, 128, 64, 0, 0, 0, 64, 0, 0, 224, 64, 0, 0, 160, 64, 0, 0, 64, 64, 0, 0, 128, 63, 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0, 1, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- .relaxComputationFloat32toFloat16 = true,
- };
-}
-
-bool is_ignored_model_name_nchw_layout_relu6_w_as_param_float_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_model_name_example_name_nchw_layout_relu6_w_as_param_float_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_model_name_example_name_nchw_layout_relu6_w_as_param_float_relaxed) {
- generated_tests::Execute(device,
- conv_float::createTestModel_model_name_nchw_layout_relu6_w_as_param_float_relaxed,
- conv_float::is_ignored_model_name_nchw_layout_relu6_w_as_param_float_relaxed,
- conv_float::examples_model_name_example_name_nchw_layout_relu6_w_as_param_float_relaxed);
-}
-
-// Create the model
-Model createTestModel_model_name_nchw_layout_relu6_w_as_param_quantized() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.5f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.25f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 16},
- },
- {
- .type = OperandType::TENSOR_INT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.125f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 24, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 28, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 32, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 36, .length = 4},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 2.0f,
- .zeroPoint = 100,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 132, 140, 148, 156, 136, 144, 152, 160, 160, 152, 144, 136, 156, 148, 140, 132, 192, 249, 255, 255, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0, 1, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_model_name_nchw_layout_relu6_w_as_param_quantized(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_model_name_example_name_nchw_layout_relu6_w_as_param_quantized = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 134, 138, 142, 132, 136, 140, 144}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {103, 103, 103, 103, 103, 103, 103, 103}}}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_model_name_example_name_nchw_layout_relu6_w_as_param_quantized) {
- generated_tests::Execute(device,
- conv_float::createTestModel_model_name_nchw_layout_relu6_w_as_param_quantized,
- conv_float::is_ignored_model_name_nchw_layout_relu6_w_as_param_quantized,
- conv_float::examples_model_name_example_name_nchw_layout_relu6_w_as_param_quantized);
-}
-
-// Create the model
-Model createTestModel_model_name_nchw_layout_relu6_w_as_input_float() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 4, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 8, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 12, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0, 1};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0, 1, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_model_name_nchw_layout_relu6_w_as_input_float(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_model_name_example_name_nchw_layout_relu6_w_as_input_float = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}, {1, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f, 8.0f, 6.0f, 4.0f, 2.0f, 7.0f, 5.0f, 3.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_model_name_example_name_nchw_layout_relu6_w_as_input_float) {
- generated_tests::Execute(device,
- conv_float::createTestModel_model_name_nchw_layout_relu6_w_as_input_float,
- conv_float::is_ignored_model_name_nchw_layout_relu6_w_as_input_float,
- conv_float::examples_model_name_example_name_nchw_layout_relu6_w_as_input_float);
-}
-
-// Create the model
-Model createTestModel_model_name_nchw_layout_relu6_w_as_input_float_relaxed() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 4, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 8, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 12, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0, 1};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0, 1, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- .relaxComputationFloat32toFloat16 = true,
- };
-}
-
-bool is_ignored_model_name_nchw_layout_relu6_w_as_input_float_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_model_name_example_name_nchw_layout_relu6_w_as_input_float_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}, {1, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f, 8.0f, 6.0f, 4.0f, 2.0f, 7.0f, 5.0f, 3.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_model_name_example_name_nchw_layout_relu6_w_as_input_float_relaxed) {
- generated_tests::Execute(device,
- conv_float::createTestModel_model_name_nchw_layout_relu6_w_as_input_float_relaxed,
- conv_float::is_ignored_model_name_nchw_layout_relu6_w_as_input_float_relaxed,
- conv_float::examples_model_name_example_name_nchw_layout_relu6_w_as_input_float_relaxed);
-}
-
-// Create the model
-Model createTestModel_model_name_nchw_layout_relu6_w_as_input_quantized() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.5f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.25f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_INT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.125f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 4, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 8, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 12, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 2.0f,
- .zeroPoint = 100,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0, 1};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 192, 249, 255, 255, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0, 1, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_model_name_nchw_layout_relu6_w_as_input_quantized(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_model_name_example_name_nchw_layout_relu6_w_as_input_quantized = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 134, 138, 142, 132, 136, 140, 144}}, {1, {132, 140, 148, 156, 136, 144, 152, 160, 160, 152, 144, 136, 156, 148, 140, 132}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {103, 103, 103, 103, 103, 103, 103, 103}}}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_model_name_example_name_nchw_layout_relu6_w_as_input_quantized) {
- generated_tests::Execute(device,
- conv_float::createTestModel_model_name_nchw_layout_relu6_w_as_input_quantized,
- conv_float::is_ignored_model_name_nchw_layout_relu6_w_as_input_quantized,
- conv_float::examples_model_name_example_name_nchw_layout_relu6_w_as_input_quantized);
-}
-
+++ /dev/null
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-model = Model()
-i1 = Input("op1", "TENSOR_FLOAT32", "{1, 3, 3, 1}")
-f1 = Parameter("op2", "TENSOR_FLOAT32", "{1, 2, 2, 1}", [.25, .25, .25, .25])
-b1 = Parameter("op3", "TENSOR_FLOAT32", "{1}", [0])
-pad0 = Int32Scalar("pad0", 0)
-act = Int32Scalar("act", 0)
-stride = Int32Scalar("stride", 1)
-output = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 1}")
-
-model = model.Operation("CONV_2D", i1, f1, b1, pad0, pad0, pad0, pad0, stride, stride, act).To(output)
-model = model.RelaxedExecution(True)
-
-# Example 1. Input in operand 0,
-input0 = {i1: # input 0
- [1.0, 1.0, 1.0, 1.0, 0.5, 1.0, 1.0, 1.0, 1.0]}
-
-output0 = {output: # output 0
- [.875, .875, .875, .875]}
-
-# Instantiate an example
-Example((input0, output0))
+++ /dev/null
-Output VTS model: -
-Output example:-
+++ /dev/null
-// clang-format off
-// Generated file (from: conv_quant8.mod.py). Do not edit
-// clang-format off
-// Generated file (from: conv_quant8.mod.py). Do not edit
-// Generated from: conv_quant8.mod.py.
-namespace conv_quant8 {
-// Generated conv_quant8 test
-#include "-"
-// Generated model constructor
-#include "-"
-} // namespace conv_quant8
-
-// Create the model
-Model createTestModel() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 3, 3, 1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 16},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 4,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 2,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 24, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 28, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 1},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 3, 3, 3, 4, 4, 5},
- .outputs = {6},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0};
- const std::vector<uint32_t> outputIndexes = {6};
- std::vector<uint8_t> operandValues = {
- 0, 0, 128, 62, 0, 0, 128, 62, 0, 0, 128, 62, 0, 0, 128, 62, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- .relaxComputationFloat32toFloat16 = true,
- };
-}
-
-bool is_ignored(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 1.0f, 1.0f, 1.0f, 0.5f, 1.0f, 1.0f, 1.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {0.875f, 0.875f, 0.875f, 0.875f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_quant8) {
- generated_tests::Execute(device,
- conv_quant8::createTestModel,
- conv_quant8::is_ignored,
- conv_quant8::examples);
-}
-
+++ /dev/null
-# Copyright 2018, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-model = Model()
-i1 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 2}")
-f1 = Parameter("op2", "TENSOR_FLOAT32", "{2, 2, 2, 2}", [1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1])
-b1 = Parameter("op3", "TENSOR_FLOAT32", "{1}", [-200])
-output = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 2}")
-act = Int32Scalar("act", 0) # None activation
-layout = Int32Scalar("layout", 0) # NHWC
-pad = Int32Scalar("param", 1)
-stride0 = Int32Scalar("param1", 1)
-stride1 = Int32Scalar("param2", 1)
-
-model = model.Operation("CONV_2D", i1, f1, b1, pad, stride0, stride1, act, layout).To(output)
-
-# Example 1. Input in operand 0,
-input0 = {i1: # input 0
- [1, 2, 3, 4, 5, 6, 7, 8]}
-
-output0 = {output: # output 0
- [204, 120, 94, 104, 70, 164, 23, 112]}
-
-quant8 = DataTypeConverter().Identify({
- i1: ("TENSOR_QUANT8_ASYMM", 0.5, 128),
- f1: ("TENSOR_QUANT8_ASYMM", 0.25, 128),
- b1: ("TENSOR_INT32", 0.125, 0),
- output: ("TENSOR_QUANT8_ASYMM", 2, 100)
-})
-relu = ActivationConverter("relu").Identify([output], [act])
-relu6 = ActivationConverter("relu6").Identify([output], [act])
-
-# Instantiate an example
-Example((input0, output0)).AddNchw(i1, f1, output, layout).AddAllActivations(
- output, act).AddInput(f1).AddVariations(RelaxedModeConverter(True), quant8)
+++ /dev/null
-Output VTS model: -
-Output example:-
+++ /dev/null
-// clang-format off
-// Generated file (from: conv_float.mod.py). Do not edit
-// clang-format off
-// Generated file (from: conv_float.mod.py). Do not edit
-// Generated from: conv_float.mod.py.
-namespace conv_float {
-// Generated conv_float test
-#include "-"
-// Generated model constructor
-#include "-"
-} // namespace conv_float
-
-// Create the model
-Model createTestModel_none() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 64},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 64, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 68, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 72, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 76, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 80, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 84, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 128, 63, 0, 0, 0, 64, 0, 0, 64, 64, 0, 0, 128, 64, 0, 0, 160, 64, 0, 0, 192, 64, 0, 0, 224, 64, 0, 0, 0, 65, 0, 0, 0, 65, 0, 0, 224, 64, 0, 0, 192, 64, 0, 0, 160, 64, 0, 0, 128, 64, 0, 0, 64, 64, 0, 0, 0, 64, 0, 0, 128, 63, 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_none(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_none = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 120.0f, 94.0f, 104.0f, 70.0f, 164.0f, 23.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_none) {
- generated_tests::Execute(device,
- conv_float::createTestModel_none,
- conv_float::is_ignored_none,
- conv_float::examples_none);
-}
-
-// Create the model
-Model createTestModel_none_relaxed() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 64},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 64, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 68, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 72, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 76, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 80, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 84, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 128, 63, 0, 0, 0, 64, 0, 0, 64, 64, 0, 0, 128, 64, 0, 0, 160, 64, 0, 0, 192, 64, 0, 0, 224, 64, 0, 0, 0, 65, 0, 0, 0, 65, 0, 0, 224, 64, 0, 0, 192, 64, 0, 0, 160, 64, 0, 0, 128, 64, 0, 0, 64, 64, 0, 0, 0, 64, 0, 0, 128, 63, 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- .relaxComputationFloat32toFloat16 = true,
- };
-}
-
-bool is_ignored_none_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_none_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 120.0f, 94.0f, 104.0f, 70.0f, 164.0f, 23.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_none_relaxed) {
- generated_tests::Execute(device,
- conv_float::createTestModel_none_relaxed,
- conv_float::is_ignored_none_relaxed,
- conv_float::examples_none_relaxed);
-}
-
-// Create the model
-Model createTestModel_none_quant8() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.5f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.25f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 16},
- },
- {
- .type = OperandType::TENSOR_INT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.125f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 24, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 28, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 32, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 36, .length = 4},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 2.0f,
- .zeroPoint = 100,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 132, 136, 140, 144, 148, 152, 156, 160, 160, 156, 152, 148, 144, 140, 136, 132, 192, 249, 255, 255, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_none_quant8(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_none_quant8 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 132, 134, 136, 138, 140, 142, 144}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {202, 160, 147, 152, 135, 182, 112, 156}}}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_none_quant8) {
- generated_tests::Execute(device,
- conv_float::createTestModel_none_quant8,
- conv_float::is_ignored_none_quant8,
- conv_float::examples_none_quant8);
-}
-
-// Create the model
-Model createTestModel_none_weight_as_input() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 4, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 8, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 12, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0, 1};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_none_weight_as_input(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_none_weight_as_input = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}, {1, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 120.0f, 94.0f, 104.0f, 70.0f, 164.0f, 23.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_none_weight_as_input) {
- generated_tests::Execute(device,
- conv_float::createTestModel_none_weight_as_input,
- conv_float::is_ignored_none_weight_as_input,
- conv_float::examples_none_weight_as_input);
-}
-
-// Create the model
-Model createTestModel_none_weight_as_input_relaxed() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 4, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 8, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 12, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0, 1};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- .relaxComputationFloat32toFloat16 = true,
- };
-}
-
-bool is_ignored_none_weight_as_input_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_none_weight_as_input_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}, {1, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 120.0f, 94.0f, 104.0f, 70.0f, 164.0f, 23.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_none_weight_as_input_relaxed) {
- generated_tests::Execute(device,
- conv_float::createTestModel_none_weight_as_input_relaxed,
- conv_float::is_ignored_none_weight_as_input_relaxed,
- conv_float::examples_none_weight_as_input_relaxed);
-}
-
-// Create the model
-Model createTestModel_none_weight_as_input_quant8() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.5f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.25f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_INT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.125f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 4, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 8, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 12, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 2.0f,
- .zeroPoint = 100,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0, 1};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 192, 249, 255, 255, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_none_weight_as_input_quant8(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_none_weight_as_input_quant8 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 132, 134, 136, 138, 140, 142, 144}}, {1, {132, 136, 140, 144, 148, 152, 156, 160, 160, 156, 152, 148, 144, 140, 136, 132}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {202, 160, 147, 152, 135, 182, 112, 156}}}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_none_weight_as_input_quant8) {
- generated_tests::Execute(device,
- conv_float::createTestModel_none_weight_as_input_quant8,
- conv_float::is_ignored_none_weight_as_input_quant8,
- conv_float::examples_none_weight_as_input_quant8);
-}
-
-// Create the model
-Model createTestModel_relu() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 64},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 64, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 68, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 72, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 76, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 80, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 84, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 128, 63, 0, 0, 0, 64, 0, 0, 64, 64, 0, 0, 128, 64, 0, 0, 160, 64, 0, 0, 192, 64, 0, 0, 224, 64, 0, 0, 0, 65, 0, 0, 0, 65, 0, 0, 224, 64, 0, 0, 192, 64, 0, 0, 160, 64, 0, 0, 128, 64, 0, 0, 64, 64, 0, 0, 0, 64, 0, 0, 128, 63, 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_relu(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 120.0f, 94.0f, 104.0f, 70.0f, 164.0f, 23.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_relu) {
- generated_tests::Execute(device,
- conv_float::createTestModel_relu,
- conv_float::is_ignored_relu,
- conv_float::examples_relu);
-}
-
-// Create the model
-Model createTestModel_relu_relaxed() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 64},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 64, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 68, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 72, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 76, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 80, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 84, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 128, 63, 0, 0, 0, 64, 0, 0, 64, 64, 0, 0, 128, 64, 0, 0, 160, 64, 0, 0, 192, 64, 0, 0, 224, 64, 0, 0, 0, 65, 0, 0, 0, 65, 0, 0, 224, 64, 0, 0, 192, 64, 0, 0, 160, 64, 0, 0, 128, 64, 0, 0, 64, 64, 0, 0, 0, 64, 0, 0, 128, 63, 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- .relaxComputationFloat32toFloat16 = true,
- };
-}
-
-bool is_ignored_relu_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 120.0f, 94.0f, 104.0f, 70.0f, 164.0f, 23.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_relu_relaxed) {
- generated_tests::Execute(device,
- conv_float::createTestModel_relu_relaxed,
- conv_float::is_ignored_relu_relaxed,
- conv_float::examples_relu_relaxed);
-}
-
-// Create the model
-Model createTestModel_relu_quant8() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.5f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.25f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 16},
- },
- {
- .type = OperandType::TENSOR_INT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.125f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 24, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 28, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 32, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 36, .length = 4},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 2.0f,
- .zeroPoint = 100,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 132, 136, 140, 144, 148, 152, 156, 160, 160, 156, 152, 148, 144, 140, 136, 132, 192, 249, 255, 255, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_relu_quant8(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu_quant8 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 132, 134, 136, 138, 140, 142, 144}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {202, 160, 147, 152, 135, 182, 112, 156}}}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_relu_quant8) {
- generated_tests::Execute(device,
- conv_float::createTestModel_relu_quant8,
- conv_float::is_ignored_relu_quant8,
- conv_float::examples_relu_quant8);
-}
-
-// Create the model
-Model createTestModel_relu_weight_as_input() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 4, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 8, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 12, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0, 1};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_relu_weight_as_input(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu_weight_as_input = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}, {1, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 120.0f, 94.0f, 104.0f, 70.0f, 164.0f, 23.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_relu_weight_as_input) {
- generated_tests::Execute(device,
- conv_float::createTestModel_relu_weight_as_input,
- conv_float::is_ignored_relu_weight_as_input,
- conv_float::examples_relu_weight_as_input);
-}
-
-// Create the model
-Model createTestModel_relu_weight_as_input_relaxed() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 4, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 8, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 12, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0, 1};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- .relaxComputationFloat32toFloat16 = true,
- };
-}
-
-bool is_ignored_relu_weight_as_input_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu_weight_as_input_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}, {1, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 120.0f, 94.0f, 104.0f, 70.0f, 164.0f, 23.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_relu_weight_as_input_relaxed) {
- generated_tests::Execute(device,
- conv_float::createTestModel_relu_weight_as_input_relaxed,
- conv_float::is_ignored_relu_weight_as_input_relaxed,
- conv_float::examples_relu_weight_as_input_relaxed);
-}
-
-// Create the model
-Model createTestModel_relu_weight_as_input_quant8() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.5f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.25f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_INT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.125f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 4, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 8, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 12, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 2.0f,
- .zeroPoint = 100,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0, 1};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 192, 249, 255, 255, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_relu_weight_as_input_quant8(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu_weight_as_input_quant8 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 132, 134, 136, 138, 140, 142, 144}}, {1, {132, 136, 140, 144, 148, 152, 156, 160, 160, 156, 152, 148, 144, 140, 136, 132}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {202, 160, 147, 152, 135, 182, 112, 156}}}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_relu_weight_as_input_quant8) {
- generated_tests::Execute(device,
- conv_float::createTestModel_relu_weight_as_input_quant8,
- conv_float::is_ignored_relu_weight_as_input_quant8,
- conv_float::examples_relu_weight_as_input_quant8);
-}
-
-// Create the model
-Model createTestModel_relu1() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 64},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 64, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 68, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 72, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 76, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 80, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 84, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 128, 63, 0, 0, 0, 64, 0, 0, 64, 64, 0, 0, 128, 64, 0, 0, 160, 64, 0, 0, 192, 64, 0, 0, 224, 64, 0, 0, 0, 65, 0, 0, 0, 65, 0, 0, 224, 64, 0, 0, 192, 64, 0, 0, 160, 64, 0, 0, 128, 64, 0, 0, 64, 64, 0, 0, 0, 64, 0, 0, 128, 63, 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_relu1(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu1 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_relu1) {
- generated_tests::Execute(device,
- conv_float::createTestModel_relu1,
- conv_float::is_ignored_relu1,
- conv_float::examples_relu1);
-}
-
-// Create the model
-Model createTestModel_relu1_relaxed() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 64},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 64, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 68, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 72, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 76, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 80, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 84, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 128, 63, 0, 0, 0, 64, 0, 0, 64, 64, 0, 0, 128, 64, 0, 0, 160, 64, 0, 0, 192, 64, 0, 0, 224, 64, 0, 0, 0, 65, 0, 0, 0, 65, 0, 0, 224, 64, 0, 0, 192, 64, 0, 0, 160, 64, 0, 0, 128, 64, 0, 0, 64, 64, 0, 0, 0, 64, 0, 0, 128, 63, 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- .relaxComputationFloat32toFloat16 = true,
- };
-}
-
-bool is_ignored_relu1_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu1_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_relu1_relaxed) {
- generated_tests::Execute(device,
- conv_float::createTestModel_relu1_relaxed,
- conv_float::is_ignored_relu1_relaxed,
- conv_float::examples_relu1_relaxed);
-}
-
-// Create the model
-Model createTestModel_relu1_quant8() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.5f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.25f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 16},
- },
- {
- .type = OperandType::TENSOR_INT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.125f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 24, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 28, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 32, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 36, .length = 4},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 2.0f,
- .zeroPoint = 100,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 132, 136, 140, 144, 148, 152, 156, 160, 160, 156, 152, 148, 144, 140, 136, 132, 192, 249, 255, 255, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_relu1_quant8(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu1_quant8 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 132, 134, 136, 138, 140, 142, 144}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {100, 100, 100, 100, 100, 100, 100, 100}}}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_relu1_quant8) {
- generated_tests::Execute(device,
- conv_float::createTestModel_relu1_quant8,
- conv_float::is_ignored_relu1_quant8,
- conv_float::examples_relu1_quant8);
-}
-
-// Create the model
-Model createTestModel_relu1_weight_as_input() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 4, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 8, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 12, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0, 1};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_relu1_weight_as_input(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu1_weight_as_input = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}, {1, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_relu1_weight_as_input) {
- generated_tests::Execute(device,
- conv_float::createTestModel_relu1_weight_as_input,
- conv_float::is_ignored_relu1_weight_as_input,
- conv_float::examples_relu1_weight_as_input);
-}
-
-// Create the model
-Model createTestModel_relu1_weight_as_input_relaxed() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 4, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 8, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 12, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0, 1};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- .relaxComputationFloat32toFloat16 = true,
- };
-}
-
-bool is_ignored_relu1_weight_as_input_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu1_weight_as_input_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}, {1, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_relu1_weight_as_input_relaxed) {
- generated_tests::Execute(device,
- conv_float::createTestModel_relu1_weight_as_input_relaxed,
- conv_float::is_ignored_relu1_weight_as_input_relaxed,
- conv_float::examples_relu1_weight_as_input_relaxed);
-}
-
-// Create the model
-Model createTestModel_relu1_weight_as_input_quant8() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.5f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.25f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_INT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.125f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 4, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 8, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 12, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 2.0f,
- .zeroPoint = 100,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0, 1};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 192, 249, 255, 255, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_relu1_weight_as_input_quant8(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu1_weight_as_input_quant8 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 132, 134, 136, 138, 140, 142, 144}}, {1, {132, 136, 140, 144, 148, 152, 156, 160, 160, 156, 152, 148, 144, 140, 136, 132}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {100, 100, 100, 100, 100, 100, 100, 100}}}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_relu1_weight_as_input_quant8) {
- generated_tests::Execute(device,
- conv_float::createTestModel_relu1_weight_as_input_quant8,
- conv_float::is_ignored_relu1_weight_as_input_quant8,
- conv_float::examples_relu1_weight_as_input_quant8);
-}
-
-// Create the model
-Model createTestModel_relu6() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 64},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 64, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 68, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 72, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 76, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 80, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 84, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 128, 63, 0, 0, 0, 64, 0, 0, 64, 64, 0, 0, 128, 64, 0, 0, 160, 64, 0, 0, 192, 64, 0, 0, 224, 64, 0, 0, 0, 65, 0, 0, 0, 65, 0, 0, 224, 64, 0, 0, 192, 64, 0, 0, 160, 64, 0, 0, 128, 64, 0, 0, 64, 64, 0, 0, 0, 64, 0, 0, 128, 63, 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_relu6(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu6 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_relu6) {
- generated_tests::Execute(device,
- conv_float::createTestModel_relu6,
- conv_float::is_ignored_relu6,
- conv_float::examples_relu6);
-}
-
-// Create the model
-Model createTestModel_relu6_relaxed() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 64},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 64, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 68, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 72, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 76, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 80, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 84, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 128, 63, 0, 0, 0, 64, 0, 0, 64, 64, 0, 0, 128, 64, 0, 0, 160, 64, 0, 0, 192, 64, 0, 0, 224, 64, 0, 0, 0, 65, 0, 0, 0, 65, 0, 0, 224, 64, 0, 0, 192, 64, 0, 0, 160, 64, 0, 0, 128, 64, 0, 0, 64, 64, 0, 0, 0, 64, 0, 0, 128, 63, 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- .relaxComputationFloat32toFloat16 = true,
- };
-}
-
-bool is_ignored_relu6_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu6_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_relu6_relaxed) {
- generated_tests::Execute(device,
- conv_float::createTestModel_relu6_relaxed,
- conv_float::is_ignored_relu6_relaxed,
- conv_float::examples_relu6_relaxed);
-}
-
-// Create the model
-Model createTestModel_relu6_quant8() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.5f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.25f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 16},
- },
- {
- .type = OperandType::TENSOR_INT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.125f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 24, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 28, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 32, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 36, .length = 4},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 2.0f,
- .zeroPoint = 100,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 132, 136, 140, 144, 148, 152, 156, 160, 160, 156, 152, 148, 144, 140, 136, 132, 192, 249, 255, 255, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_relu6_quant8(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu6_quant8 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 132, 134, 136, 138, 140, 142, 144}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {103, 103, 103, 103, 103, 103, 103, 103}}}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_relu6_quant8) {
- generated_tests::Execute(device,
- conv_float::createTestModel_relu6_quant8,
- conv_float::is_ignored_relu6_quant8,
- conv_float::examples_relu6_quant8);
-}
-
-// Create the model
-Model createTestModel_relu6_weight_as_input() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 4, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 8, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 12, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0, 1};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_relu6_weight_as_input(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu6_weight_as_input = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}, {1, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_relu6_weight_as_input) {
- generated_tests::Execute(device,
- conv_float::createTestModel_relu6_weight_as_input,
- conv_float::is_ignored_relu6_weight_as_input,
- conv_float::examples_relu6_weight_as_input);
-}
-
-// Create the model
-Model createTestModel_relu6_weight_as_input_relaxed() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 4, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 8, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 12, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0, 1};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- .relaxComputationFloat32toFloat16 = true,
- };
-}
-
-bool is_ignored_relu6_weight_as_input_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu6_weight_as_input_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}}, {1, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_relu6_weight_as_input_relaxed) {
- generated_tests::Execute(device,
- conv_float::createTestModel_relu6_weight_as_input_relaxed,
- conv_float::is_ignored_relu6_weight_as_input_relaxed,
- conv_float::examples_relu6_weight_as_input_relaxed);
-}
-
-// Create the model
-Model createTestModel_relu6_weight_as_input_quant8() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.5f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.25f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_INT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.125f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 4, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 8, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 12, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 2.0f,
- .zeroPoint = 100,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0, 1};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 192, 249, 255, 255, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_relu6_weight_as_input_quant8(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_relu6_weight_as_input_quant8 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 132, 134, 136, 138, 140, 142, 144}}, {1, {132, 136, 140, 144, 148, 152, 156, 160, 160, 156, 152, 148, 144, 140, 136, 132}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {103, 103, 103, 103, 103, 103, 103, 103}}}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_relu6_weight_as_input_quant8) {
- generated_tests::Execute(device,
- conv_float::createTestModel_relu6_weight_as_input_quant8,
- conv_float::is_ignored_relu6_weight_as_input_quant8,
- conv_float::examples_relu6_weight_as_input_quant8);
-}
-
-// Create the model
-Model createTestModel_nchw_none() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 64},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 64, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 68, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 72, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 76, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 80, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 84, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 128, 63, 0, 0, 64, 64, 0, 0, 160, 64, 0, 0, 224, 64, 0, 0, 0, 64, 0, 0, 128, 64, 0, 0, 192, 64, 0, 0, 0, 65, 0, 0, 0, 65, 0, 0, 192, 64, 0, 0, 128, 64, 0, 0, 0, 64, 0, 0, 224, 64, 0, 0, 160, 64, 0, 0, 64, 64, 0, 0, 128, 63, 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_nchw_none(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_none = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 94.0f, 70.0f, 23.0f, 120.0f, 104.0f, 164.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_nchw_none) {
- generated_tests::Execute(device,
- conv_float::createTestModel_nchw_none,
- conv_float::is_ignored_nchw_none,
- conv_float::examples_nchw_none);
-}
-
-// Create the model
-Model createTestModel_nchw_none_relaxed() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 64},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 64, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 68, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 72, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 76, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 80, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 84, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 128, 63, 0, 0, 64, 64, 0, 0, 160, 64, 0, 0, 224, 64, 0, 0, 0, 64, 0, 0, 128, 64, 0, 0, 192, 64, 0, 0, 0, 65, 0, 0, 0, 65, 0, 0, 192, 64, 0, 0, 128, 64, 0, 0, 0, 64, 0, 0, 224, 64, 0, 0, 160, 64, 0, 0, 64, 64, 0, 0, 128, 63, 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- .relaxComputationFloat32toFloat16 = true,
- };
-}
-
-bool is_ignored_nchw_none_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_none_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 94.0f, 70.0f, 23.0f, 120.0f, 104.0f, 164.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_nchw_none_relaxed) {
- generated_tests::Execute(device,
- conv_float::createTestModel_nchw_none_relaxed,
- conv_float::is_ignored_nchw_none_relaxed,
- conv_float::examples_nchw_none_relaxed);
-}
-
-// Create the model
-Model createTestModel_nchw_none_quant8() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.5f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.25f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 16},
- },
- {
- .type = OperandType::TENSOR_INT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.125f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 24, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 28, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 32, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 36, .length = 4},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 2.0f,
- .zeroPoint = 100,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 132, 140, 148, 156, 136, 144, 152, 160, 160, 152, 144, 136, 156, 148, 140, 132, 192, 249, 255, 255, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_nchw_none_quant8(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_none_quant8 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 134, 138, 142, 132, 136, 140, 144}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {202, 147, 135, 112, 160, 152, 182, 156}}}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_nchw_none_quant8) {
- generated_tests::Execute(device,
- conv_float::createTestModel_nchw_none_quant8,
- conv_float::is_ignored_nchw_none_quant8,
- conv_float::examples_nchw_none_quant8);
-}
-
-// Create the model
-Model createTestModel_nchw_none_weight_as_input() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 4, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 8, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 12, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0, 1};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_nchw_none_weight_as_input(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_none_weight_as_input = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}, {1, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f, 8.0f, 6.0f, 4.0f, 2.0f, 7.0f, 5.0f, 3.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 94.0f, 70.0f, 23.0f, 120.0f, 104.0f, 164.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_nchw_none_weight_as_input) {
- generated_tests::Execute(device,
- conv_float::createTestModel_nchw_none_weight_as_input,
- conv_float::is_ignored_nchw_none_weight_as_input,
- conv_float::examples_nchw_none_weight_as_input);
-}
-
-// Create the model
-Model createTestModel_nchw_none_weight_as_input_relaxed() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 4, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 8, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 12, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0, 1};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- .relaxComputationFloat32toFloat16 = true,
- };
-}
-
-bool is_ignored_nchw_none_weight_as_input_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_none_weight_as_input_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}, {1, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f, 8.0f, 6.0f, 4.0f, 2.0f, 7.0f, 5.0f, 3.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 94.0f, 70.0f, 23.0f, 120.0f, 104.0f, 164.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_nchw_none_weight_as_input_relaxed) {
- generated_tests::Execute(device,
- conv_float::createTestModel_nchw_none_weight_as_input_relaxed,
- conv_float::is_ignored_nchw_none_weight_as_input_relaxed,
- conv_float::examples_nchw_none_weight_as_input_relaxed);
-}
-
-// Create the model
-Model createTestModel_nchw_none_weight_as_input_quant8() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.5f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.25f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_INT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.125f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 4, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 8, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 12, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 2.0f,
- .zeroPoint = 100,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0, 1};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 192, 249, 255, 255, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_nchw_none_weight_as_input_quant8(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_none_weight_as_input_quant8 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 134, 138, 142, 132, 136, 140, 144}}, {1, {132, 140, 148, 156, 136, 144, 152, 160, 160, 152, 144, 136, 156, 148, 140, 132}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {202, 147, 135, 112, 160, 152, 182, 156}}}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_nchw_none_weight_as_input_quant8) {
- generated_tests::Execute(device,
- conv_float::createTestModel_nchw_none_weight_as_input_quant8,
- conv_float::is_ignored_nchw_none_weight_as_input_quant8,
- conv_float::examples_nchw_none_weight_as_input_quant8);
-}
-
-// Create the model
-Model createTestModel_nchw_relu() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 64},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 64, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 68, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 72, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 76, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 80, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 84, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 128, 63, 0, 0, 64, 64, 0, 0, 160, 64, 0, 0, 224, 64, 0, 0, 0, 64, 0, 0, 128, 64, 0, 0, 192, 64, 0, 0, 0, 65, 0, 0, 0, 65, 0, 0, 192, 64, 0, 0, 128, 64, 0, 0, 0, 64, 0, 0, 224, 64, 0, 0, 160, 64, 0, 0, 64, 64, 0, 0, 128, 63, 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_nchw_relu(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 94.0f, 70.0f, 23.0f, 120.0f, 104.0f, 164.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_nchw_relu) {
- generated_tests::Execute(device,
- conv_float::createTestModel_nchw_relu,
- conv_float::is_ignored_nchw_relu,
- conv_float::examples_nchw_relu);
-}
-
-// Create the model
-Model createTestModel_nchw_relu_relaxed() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 64},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 64, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 68, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 72, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 76, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 80, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 84, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 128, 63, 0, 0, 64, 64, 0, 0, 160, 64, 0, 0, 224, 64, 0, 0, 0, 64, 0, 0, 128, 64, 0, 0, 192, 64, 0, 0, 0, 65, 0, 0, 0, 65, 0, 0, 192, 64, 0, 0, 128, 64, 0, 0, 0, 64, 0, 0, 224, 64, 0, 0, 160, 64, 0, 0, 64, 64, 0, 0, 128, 63, 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- .relaxComputationFloat32toFloat16 = true,
- };
-}
-
-bool is_ignored_nchw_relu_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 94.0f, 70.0f, 23.0f, 120.0f, 104.0f, 164.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_nchw_relu_relaxed) {
- generated_tests::Execute(device,
- conv_float::createTestModel_nchw_relu_relaxed,
- conv_float::is_ignored_nchw_relu_relaxed,
- conv_float::examples_nchw_relu_relaxed);
-}
-
-// Create the model
-Model createTestModel_nchw_relu_quant8() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.5f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.25f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 16},
- },
- {
- .type = OperandType::TENSOR_INT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.125f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 24, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 28, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 32, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 36, .length = 4},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 2.0f,
- .zeroPoint = 100,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 132, 140, 148, 156, 136, 144, 152, 160, 160, 152, 144, 136, 156, 148, 140, 132, 192, 249, 255, 255, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_nchw_relu_quant8(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu_quant8 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 134, 138, 142, 132, 136, 140, 144}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {202, 147, 135, 112, 160, 152, 182, 156}}}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_nchw_relu_quant8) {
- generated_tests::Execute(device,
- conv_float::createTestModel_nchw_relu_quant8,
- conv_float::is_ignored_nchw_relu_quant8,
- conv_float::examples_nchw_relu_quant8);
-}
-
-// Create the model
-Model createTestModel_nchw_relu_weight_as_input() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 4, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 8, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 12, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0, 1};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_nchw_relu_weight_as_input(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu_weight_as_input = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}, {1, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f, 8.0f, 6.0f, 4.0f, 2.0f, 7.0f, 5.0f, 3.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 94.0f, 70.0f, 23.0f, 120.0f, 104.0f, 164.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_nchw_relu_weight_as_input) {
- generated_tests::Execute(device,
- conv_float::createTestModel_nchw_relu_weight_as_input,
- conv_float::is_ignored_nchw_relu_weight_as_input,
- conv_float::examples_nchw_relu_weight_as_input);
-}
-
-// Create the model
-Model createTestModel_nchw_relu_weight_as_input_relaxed() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 4, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 8, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 12, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0, 1};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- .relaxComputationFloat32toFloat16 = true,
- };
-}
-
-bool is_ignored_nchw_relu_weight_as_input_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu_weight_as_input_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}, {1, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f, 8.0f, 6.0f, 4.0f, 2.0f, 7.0f, 5.0f, 3.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {204.0f, 94.0f, 70.0f, 23.0f, 120.0f, 104.0f, 164.0f, 112.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_nchw_relu_weight_as_input_relaxed) {
- generated_tests::Execute(device,
- conv_float::createTestModel_nchw_relu_weight_as_input_relaxed,
- conv_float::is_ignored_nchw_relu_weight_as_input_relaxed,
- conv_float::examples_nchw_relu_weight_as_input_relaxed);
-}
-
-// Create the model
-Model createTestModel_nchw_relu_weight_as_input_quant8() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.5f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.25f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_INT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.125f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 4, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 8, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 12, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 2.0f,
- .zeroPoint = 100,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0, 1};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 192, 249, 255, 255, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_nchw_relu_weight_as_input_quant8(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu_weight_as_input_quant8 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 134, 138, 142, 132, 136, 140, 144}}, {1, {132, 140, 148, 156, 136, 144, 152, 160, 160, 152, 144, 136, 156, 148, 140, 132}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {202, 147, 135, 112, 160, 152, 182, 156}}}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_nchw_relu_weight_as_input_quant8) {
- generated_tests::Execute(device,
- conv_float::createTestModel_nchw_relu_weight_as_input_quant8,
- conv_float::is_ignored_nchw_relu_weight_as_input_quant8,
- conv_float::examples_nchw_relu_weight_as_input_quant8);
-}
-
-// Create the model
-Model createTestModel_nchw_relu1() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 64},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 64, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 68, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 72, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 76, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 80, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 84, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 128, 63, 0, 0, 64, 64, 0, 0, 160, 64, 0, 0, 224, 64, 0, 0, 0, 64, 0, 0, 128, 64, 0, 0, 192, 64, 0, 0, 0, 65, 0, 0, 0, 65, 0, 0, 192, 64, 0, 0, 128, 64, 0, 0, 0, 64, 0, 0, 224, 64, 0, 0, 160, 64, 0, 0, 64, 64, 0, 0, 128, 63, 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_nchw_relu1(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu1 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_nchw_relu1) {
- generated_tests::Execute(device,
- conv_float::createTestModel_nchw_relu1,
- conv_float::is_ignored_nchw_relu1,
- conv_float::examples_nchw_relu1);
-}
-
-// Create the model
-Model createTestModel_nchw_relu1_relaxed() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 64},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 64, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 68, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 72, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 76, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 80, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 84, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 128, 63, 0, 0, 64, 64, 0, 0, 160, 64, 0, 0, 224, 64, 0, 0, 0, 64, 0, 0, 128, 64, 0, 0, 192, 64, 0, 0, 0, 65, 0, 0, 0, 65, 0, 0, 192, 64, 0, 0, 128, 64, 0, 0, 0, 64, 0, 0, 224, 64, 0, 0, 160, 64, 0, 0, 64, 64, 0, 0, 128, 63, 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- .relaxComputationFloat32toFloat16 = true,
- };
-}
-
-bool is_ignored_nchw_relu1_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu1_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_nchw_relu1_relaxed) {
- generated_tests::Execute(device,
- conv_float::createTestModel_nchw_relu1_relaxed,
- conv_float::is_ignored_nchw_relu1_relaxed,
- conv_float::examples_nchw_relu1_relaxed);
-}
-
-// Create the model
-Model createTestModel_nchw_relu1_quant8() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.5f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.25f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 16},
- },
- {
- .type = OperandType::TENSOR_INT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.125f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 24, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 28, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 32, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 36, .length = 4},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 2.0f,
- .zeroPoint = 100,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 132, 140, 148, 156, 136, 144, 152, 160, 160, 152, 144, 136, 156, 148, 140, 132, 192, 249, 255, 255, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_nchw_relu1_quant8(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu1_quant8 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 134, 138, 142, 132, 136, 140, 144}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {100, 100, 100, 100, 100, 100, 100, 100}}}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_nchw_relu1_quant8) {
- generated_tests::Execute(device,
- conv_float::createTestModel_nchw_relu1_quant8,
- conv_float::is_ignored_nchw_relu1_quant8,
- conv_float::examples_nchw_relu1_quant8);
-}
-
-// Create the model
-Model createTestModel_nchw_relu1_weight_as_input() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 4, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 8, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 12, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0, 1};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_nchw_relu1_weight_as_input(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu1_weight_as_input = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}, {1, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f, 8.0f, 6.0f, 4.0f, 2.0f, 7.0f, 5.0f, 3.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_nchw_relu1_weight_as_input) {
- generated_tests::Execute(device,
- conv_float::createTestModel_nchw_relu1_weight_as_input,
- conv_float::is_ignored_nchw_relu1_weight_as_input,
- conv_float::examples_nchw_relu1_weight_as_input);
-}
-
-// Create the model
-Model createTestModel_nchw_relu1_weight_as_input_relaxed() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 4, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 8, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 12, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0, 1};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- .relaxComputationFloat32toFloat16 = true,
- };
-}
-
-bool is_ignored_nchw_relu1_weight_as_input_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu1_weight_as_input_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}, {1, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f, 8.0f, 6.0f, 4.0f, 2.0f, 7.0f, 5.0f, 3.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_nchw_relu1_weight_as_input_relaxed) {
- generated_tests::Execute(device,
- conv_float::createTestModel_nchw_relu1_weight_as_input_relaxed,
- conv_float::is_ignored_nchw_relu1_weight_as_input_relaxed,
- conv_float::examples_nchw_relu1_weight_as_input_relaxed);
-}
-
-// Create the model
-Model createTestModel_nchw_relu1_weight_as_input_quant8() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.5f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.25f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_INT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.125f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 4, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 8, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 12, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 2.0f,
- .zeroPoint = 100,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0, 1};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 192, 249, 255, 255, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_nchw_relu1_weight_as_input_quant8(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu1_weight_as_input_quant8 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 134, 138, 142, 132, 136, 140, 144}}, {1, {132, 140, 148, 156, 136, 144, 152, 160, 160, 152, 144, 136, 156, 148, 140, 132}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {100, 100, 100, 100, 100, 100, 100, 100}}}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_nchw_relu1_weight_as_input_quant8) {
- generated_tests::Execute(device,
- conv_float::createTestModel_nchw_relu1_weight_as_input_quant8,
- conv_float::is_ignored_nchw_relu1_weight_as_input_quant8,
- conv_float::examples_nchw_relu1_weight_as_input_quant8);
-}
-
-// Create the model
-Model createTestModel_nchw_relu6() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 64},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 64, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 68, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 72, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 76, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 80, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 84, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 128, 63, 0, 0, 64, 64, 0, 0, 160, 64, 0, 0, 224, 64, 0, 0, 0, 64, 0, 0, 128, 64, 0, 0, 192, 64, 0, 0, 0, 65, 0, 0, 0, 65, 0, 0, 192, 64, 0, 0, 128, 64, 0, 0, 0, 64, 0, 0, 224, 64, 0, 0, 160, 64, 0, 0, 64, 64, 0, 0, 128, 63, 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0, 1, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_nchw_relu6(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu6 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_nchw_relu6) {
- generated_tests::Execute(device,
- conv_float::createTestModel_nchw_relu6,
- conv_float::is_ignored_nchw_relu6,
- conv_float::examples_nchw_relu6);
-}
-
-// Create the model
-Model createTestModel_nchw_relu6_relaxed() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 64},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 64, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 68, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 72, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 76, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 80, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 84, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 128, 63, 0, 0, 64, 64, 0, 0, 160, 64, 0, 0, 224, 64, 0, 0, 0, 64, 0, 0, 128, 64, 0, 0, 192, 64, 0, 0, 0, 65, 0, 0, 0, 65, 0, 0, 192, 64, 0, 0, 128, 64, 0, 0, 0, 64, 0, 0, 224, 64, 0, 0, 160, 64, 0, 0, 64, 64, 0, 0, 128, 63, 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0, 1, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- .relaxComputationFloat32toFloat16 = true,
- };
-}
-
-bool is_ignored_nchw_relu6_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu6_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_nchw_relu6_relaxed) {
- generated_tests::Execute(device,
- conv_float::createTestModel_nchw_relu6_relaxed,
- conv_float::is_ignored_nchw_relu6_relaxed,
- conv_float::examples_nchw_relu6_relaxed);
-}
-
-// Create the model
-Model createTestModel_nchw_relu6_quant8() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.5f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.25f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 16},
- },
- {
- .type = OperandType::TENSOR_INT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.125f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 24, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 28, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 32, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 36, .length = 4},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 2.0f,
- .zeroPoint = 100,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 132, 140, 148, 156, 136, 144, 152, 160, 160, 152, 144, 136, 156, 148, 140, 132, 192, 249, 255, 255, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0, 1, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_nchw_relu6_quant8(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu6_quant8 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 134, 138, 142, 132, 136, 140, 144}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {103, 103, 103, 103, 103, 103, 103, 103}}}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_nchw_relu6_quant8) {
- generated_tests::Execute(device,
- conv_float::createTestModel_nchw_relu6_quant8,
- conv_float::is_ignored_nchw_relu6_quant8,
- conv_float::examples_nchw_relu6_quant8);
-}
-
-// Create the model
-Model createTestModel_nchw_relu6_weight_as_input() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 4, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 8, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 12, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0, 1};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0, 1, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_nchw_relu6_weight_as_input(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu6_weight_as_input = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}, {1, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f, 8.0f, 6.0f, 4.0f, 2.0f, 7.0f, 5.0f, 3.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_nchw_relu6_weight_as_input) {
- generated_tests::Execute(device,
- conv_float::createTestModel_nchw_relu6_weight_as_input,
- conv_float::is_ignored_nchw_relu6_weight_as_input,
- conv_float::examples_nchw_relu6_weight_as_input);
-}
-
-// Create the model
-Model createTestModel_nchw_relu6_weight_as_input_relaxed() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 4, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 8, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 12, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::TENSOR_FLOAT32,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0, 1};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 0, 0, 72, 195, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0, 1, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- .relaxComputationFloat32toFloat16 = true,
- };
-}
-
-bool is_ignored_nchw_relu6_weight_as_input_relaxed(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu6_weight_as_input_relaxed = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f}}, {1, {1.0f, 3.0f, 5.0f, 7.0f, 2.0f, 4.0f, 6.0f, 8.0f, 8.0f, 6.0f, 4.0f, 2.0f, 7.0f, 5.0f, 3.0f, 1.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {{0, {6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f}}},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_nchw_relu6_weight_as_input_relaxed) {
- generated_tests::Execute(device,
- conv_float::createTestModel_nchw_relu6_weight_as_input_relaxed,
- conv_float::is_ignored_nchw_relu6_weight_as_input_relaxed,
- conv_float::examples_nchw_relu6_weight_as_input_relaxed);
-}
-
-// Create the model
-Model createTestModel_nchw_relu6_weight_as_input_quant8() {
- const std::vector<Operand> operands = {
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 1,
- .scale = 0.5f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {2, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 0.25f,
- .zeroPoint = 128,
- .lifetime = OperandLifeTime::MODEL_INPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- },
- {
- .type = OperandType::TENSOR_INT32,
- .dimensions = {1},
- .numberOfConsumers = 1,
- .scale = 0.125f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 0, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 4, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 8, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 12, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 16, .length = 4},
- },
- {
- .type = OperandType::INT32,
- .dimensions = {},
- .numberOfConsumers = 1,
- .scale = 0.0f,
- .zeroPoint = 0,
- .lifetime = OperandLifeTime::CONSTANT_COPY,
- .location = {.poolIndex = 0, .offset = 20, .length = 4},
- },
- {
- .type = OperandType::TENSOR_QUANT8_ASYMM,
- .dimensions = {1, 2, 2, 2},
- .numberOfConsumers = 0,
- .scale = 2.0f,
- .zeroPoint = 100,
- .lifetime = OperandLifeTime::MODEL_OUTPUT,
- .location = {.poolIndex = 0, .offset = 0, .length = 0},
- }
- };
-
- const std::vector<Operation> operations = {
- {
- .type = OperationType::CONV_2D,
- .inputs = {0, 1, 2, 3, 4, 5, 6, 7},
- .outputs = {8},
- }
- };
-
- const std::vector<uint32_t> inputIndexes = {0, 1};
- const std::vector<uint32_t> outputIndexes = {8};
- std::vector<uint8_t> operandValues = {
- 192, 249, 255, 255, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0, 1, 0, 0, 0
- };
- const std::vector<hidl_memory> pools = {};
-
- return {
- .operands = operands,
- .operations = operations,
- .inputIndexes = inputIndexes,
- .outputIndexes = outputIndexes,
- .operandValues = operandValues,
- .pools = pools,
- };
-}
-
-bool is_ignored_nchw_relu6_weight_as_input_quant8(int i) {
- static std::set<int> ignore = {};
- return ignore.find(i) != ignore.end();
-}
-
-std::vector<MixedTypedExample> examples_nchw_relu6_weight_as_input_quant8 = {
-// Begin of an example
-{
-//Input(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {130, 134, 138, 142, 132, 136, 140, 144}}, {1, {132, 140, 148, 156, 136, 144, 152, 160, 160, 152, 144, 136, 156, 148, 140, 132}}}
-},
-//Output(s)
-{ // See tools/test_generator/include/TestHarness.h:MixedTyped
- // int -> FLOAT32 map
- {},
- // int -> INT32 map
- {},
- // int -> QUANT8_ASYMM map
- {{0, {103, 103, 103, 103, 103, 103, 103, 103}}}
-}
-}, // End of an example
-};
-
-TEST_F(NeuralnetworksHidlTest, conv_float_nchw_relu6_weight_as_input_quant8) {
- generated_tests::Execute(device,
- conv_float::createTestModel_nchw_relu6_weight_as_input_quant8,
- conv_float::is_ignored_nchw_relu6_weight_as_input_quant8,
- conv_float::examples_nchw_relu6_weight_as_input_quant8);
-}
-
+++ /dev/null
-#!/usr/bin/python3
-
-# Copyright 2017, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""NN Model Test Compiler Test.
-
-Runs subdirectories of tests for the test generator/compiler.
-"""
-
-import filecmp
-import glob
-import os
-import re
-import shutil
-import subprocess
-import sys
-import unittest
-
-
-__author__ = 'Android'
-
-
-DOTTED_LINE = '................'
-
-class OrigFile:
- OrigDir = None
-
-class TestGeneratorTests(unittest.TestCase):
- """Class to contain all the unittest test cases.
-
- Tests will be dynamically added to this class as methods.
- No static tests, so this class is initially empty.
-
- """
- pass
-
-
-def GenerateTests(dir_name):
- """Creates a test method that can be added as method to GenerateTests."""
- cwd = os.getcwd()
- def Test(self):
- os.chdir(cwd)
- ExecTest(dir_name, self)
- return Test
-
-
-def AddUnitTests(test_dirs):
- """Adds a test to Tests for each directory in test_dirs."""
-
- for t in test_dirs:
- # Must start with 'test_' according to unittest
- test_name = 'test_%s' % t
- test = GenerateTests(t)
- # Add test as method to TestGeneratorTests with test_name as method name
- setattr(TestGeneratorTests, test_name, test)
-
-
-class Options(object):
- verbose = 0
- cleanup = 1
- update_cts = 0
- zero_return = 0
-
-
-def CompareFiles(actual, expect):
- """Compares actual and expect for equality."""
- if not os.path.isfile(actual):
- if Options.verbose:
- print ('Could not find %s' % actual)
- return False
- if not os.path.isfile(expect):
- if Options.verbose:
- print ('Could not find %s' % expect)
- return False
-
- return filecmp.cmp(actual, expect, False)
-
-
-def CopyIfDifferent(src, dst):
- """Updates dst if it is different from src."""
- if not CompareFiles(src, dst):
- if Options.verbose:
- print ('Copying from %s to %s' % (src, dst))
- shutil.copyfile(src, dst)
-
-
-def GetCommandLineArgs(filename):
- """Extracts command line arguments from first comment line in a file."""
- f = open(filename, 'r')
- line = f.readline()
- f.close()
- if line[0] == '/' and line[1] == '/':
- return line[2:].strip()
- else:
- return ''
-
-
-def ReadFileToStr(filename):
- """Returns contents of file as a str."""
- with open(filename, 'r') as f:
- return f.read()
-
-
-def ReportIfDifferFromExpected(tests, name, file1, file2):
- """Fails tests if file1 and file2 differ."""
- if not CompareFiles(file1, file2):
- if Options.verbose:
- err_message = ('%s is different:\n'
- 'expected:\n%s\n%s%s\n\n'
- 'actual:\n%s\n%s%s\n') % (
- name,
- DOTTED_LINE, ReadFileToStr(file1), DOTTED_LINE,
- DOTTED_LINE, ReadFileToStr(file2), DOTTED_LINE)
- else:
- err_message = '%s is different' % name
- tests.fail(err_message)
-
-
-def GetRSFiles():
- """Returns a list of files in cwd with extension '.rs' or '.fs'."""
- rs_files = glob.glob('*.mod.py')
- rs_files.sort()
- return rs_files
-
-
-def GetOutDir():
- return os.path.abspath(os.path.join(OrigFile.OrigDir, "../"))
-
-
-# Declare/define cache variable for GetOutDir to cache results
-# This way we only need to call subprocesses once to get the directory
-GetOutDir.cache = None
-
-
-def CreateCmd(run_vts):
- """Creates the test command to run for the current test."""
- cmd_string = ('%s/%s_generator.py'
- ) % (GetOutDir(), "cts" if not run_vts else "vts")
- base_args = cmd_string.split()
- rs_files = GetRSFiles()
-
- # Extra command line arguments can be placed as // comments at the start of
- # any .rs file. We automatically bundle up all of these extra args and invoke
- # llvm-rs-cc with them.
- extra_args_str = ''
- for rs_file in rs_files:
- extra_args_str += GetCommandLineArgs(rs_file)
- extra_args = extra_args_str.split()
-
- args = base_args + extra_args + rs_files
- return args
-
-def Cleanup():
- """Cleans up the cwd of any tmp files created in current test."""
- try:
- os.remove('stdout.txt')
- os.remove('stderr.txt')
- shutil.rmtree('tmp/')
- except OSError:
- pass
-
-
-def CheckTestResult(dir_name, subprocess_ret, tests, args):
- """Checks the result of the subprocess command to see if it passed/failed.
-
- If dir_name starts with 'F_', then subprocess is expected to fail.
- If it instead succeeded, then this test is failed.
- Vice versa with a dir_name starting with 'P_'.
-
- Args:
- dir_name: name of current directory/test name
- subprocess_ret: return code of subprocess
- tests: unittest, call tests.fail(reason) when failure
- args: the arguments for the command that was run
- """
- if dir_name[0:2] == 'F_':
- if subprocess_ret == 0:
- if Options.verbose:
- err_message = ('Command (%s) passed on invalid input\n'
- 'stdout:\n%s\n%s%s\n') % (
- ' '.join(args),
- DOTTED_LINE, ReadFileToStr('stdout.txt'), DOTTED_LINE
- )
- else:
- err_message = 'Command passed on invalid input'
- tests.fail(err_message)
- elif dir_name[0:2] == 'P_':
- if subprocess_ret != 0:
- if Options.verbose:
- err_message = ('Command (%s) failed on valid input\n'
- 'stderr:\n%s\n%s%s\n') % (
- ' '.join(args),
- DOTTED_LINE, ReadFileToStr('stderr.txt'), DOTTED_LINE
- )
- else:
- err_message = 'Command failed on valid input'
- tests.fail(err_message)
- else:
- tests.fail('Invalid test name: ' + dir_name +
- ', should start with F_ or P_')
-
-
-
-def ExecTest(dir_name, tests):
- """Executes an test generator test from dir_name."""
-
- os.chdir(dir_name)
- stdout_file = open('stdout.txt', 'w+')
- stderr_file = open('stderr.txt', 'w+')
- run_vts = (dir_name[2:5] == 'vts')
- args = CreateCmd(run_vts)
-
- if Options.verbose > 1:
- print ('Executing:', ' '.join(args))
-
- # Execute the command and check the resulting shell return value.
- # All tests that are expected to FAIL have directory names that
- # start with 'F_'. Other tests that are expected to PASS have
- # directory names that start with 'P_'.
- ret = 0
- try:
- ret = subprocess.call(args, stdout=stdout_file, stderr=stderr_file)
- except OSError:
- tests.fail('subprocess.call failed: ' + ' '.join(args))
-
- stdout_file.close()
- stderr_file.close()
-
- CheckTestResult(dir_name, ret, tests, args)
-
- ReportIfDifferFromExpected(tests, 'stdout', 'stdout.txt.expect', 'stdout.txt')
- ReportIfDifferFromExpected(tests, 'stderr', 'stderr.txt.expect', 'stderr.txt')
-
- if Options.cleanup:
- Cleanup()
-
-
-def Usage():
- """Print out usage information."""
- print ('Usage: %s [OPTION]... [TESTNAME]...'
- 'Renderscript Compiler Test Harness\n'
- 'Runs TESTNAMEs (all tests by default)\n'
- 'Available Options:\n'
- ' -h, --help Help message\n'
- ' -n, --no-cleanup Don\'t clean up after running tests\n'
- ' -v, --verbose Verbose output. Enter multiple -v to get more verbose.\n'
- ' -z, --zero-return Return 0 as exit code no matter if tests fail. Required for TreeHugger.\n'
- ) % (sys.argv[0]),
- return
-
-
-def main():
- """Runs the unittest suite.
-
- Parses command line arguments, adds test directories as tests.
-
- Returns:
- 0 if '-z' flag is set.
- Else unittest.main() returns with its own error code.
- """
-
- OrigFile.OrigDir = os.path.dirname(os.path.abspath(__file__))
- # Chdir to the directory this file is in since tests are in this directory
- os.chdir(OrigFile.OrigDir)
- files = []
- for arg in sys.argv[1:]:
- if arg in ('-h', '--help'):
- Usage()
- return 0
- elif arg in ('-n', '--no-cleanup'):
- Options.cleanup = 0
- elif arg in ('-u', '--update-cts'):
- Options.update_cts = 1
- elif arg in ('-v', '--verbose'):
- Options.verbose += 1
- elif arg in ('-z', '--zero-return'):
- Options.zero_return = 1
- else:
- # Test list to run
- if os.path.isdir(arg):
- files.append(arg)
- else:
- print >> sys.stderr, 'Invalid test or option: %s' % arg
- return 1
-
- if not files:
- file_names = os.listdir('.')
- # Test names must start with 'F_' or 'P_'
- # 'F_' tests are expected to fail
- # 'P_' tests are expected to pass
- for f in file_names:
- if os.path.isdir(f) and (f[0:2] == 'F_' or f[0:2] == 'P_'):
- files.append(f)
- files.sort()
-
- AddUnitTests(files)
-
- # verbosity=2 is necessary for PythonUnitTestRunner to parse the results
- # Otherwise verbosity does not matter
- # If Options.zero_return is set, do not let unittest.main() exit
- # This is necessary in TreeHugger to distinguish between failing tests and
- # failing to execute the python script
- # If Options.zero_return is not set, let unittest.main() exit
- # In this case it will return a non-zero code if any tests fail
- unittest_exit = Options.zero_return == 0
- unittest.main(verbosity=2,
- argv=[sys.argv[0]] + ['TestGeneratorTests'],
- exit=unittest_exit)
-
- return 0
-
-
-if __name__ == '__main__':
- sys.exit(main())
-