From 1d74fcc07abd136937263b0d4e3c58916350d1b3 Mon Sep 17 00:00:00 2001 From: =?utf8?q?=D0=A1=D0=B5=D1=80=D0=B3=D0=B5=D0=B9=20=D0=91=D0=B0=D1=80?= =?utf8?q?=D0=B0=D0=BD=D0=BD=D0=B8=D0=BA=D0=BE=D0=B2/AI=20Tools=20Lab=20/S?= =?utf8?q?RR/Engineer/=EC=82=BC=EC=84=B1=EC=A0=84=EC=9E=90?= Date: Fri, 25 Jan 2019 14:34:07 +0300 Subject: [PATCH] [nnc] Delete interpreter system tests (#2927) Delete deprecated interpreter system tests. Signed-off-by: Sergei Barannikov --- contrib/nnc/tests/CMakeLists.txt | 1 - contrib/nnc/tests/interpreter/CMakeLists.txt | 39 --- contrib/nnc/tests/interpreter/gen/gen_test_data.py | 369 --------------------- contrib/nnc/tests/interpreter/gen/op_info.fbs | 52 --- .../tests/interpreter/gen/opinfo/OperatorInfo.py | 142 -------- .../interpreter/gen/opinfo/OperatorInfoList.py | 44 --- .../tests/interpreter/gen/opinfo/OperatorType.py | 16 - .../nnc/tests/interpreter/gen/opinfo/PadType.py | 8 - .../nnc/tests/interpreter/gen/opinfo/PoolType.py | 8 - contrib/nnc/tests/interpreter/gen/opinfo/Shape.py | 46 --- contrib/nnc/tests/interpreter/gen/opinfo/Tensor.py | 58 ---- .../nnc/tests/interpreter/gen/opinfo/__init__.py | 0 contrib/nnc/tests/interpreter/gen/requirements.txt | 3 - contrib/nnc/tests/interpreter/gen/run_flatc.sh | 1 - contrib/nnc/tests/interpreter/gen/run_gen.sh | 1 - contrib/nnc/tests/interpreter/graph_creator.cpp | 160 --------- contrib/nnc/tests/interpreter/graph_creator.h | 24 -- contrib/nnc/tests/interpreter/main.cpp | 54 --- contrib/nnc/tests/interpreter/op_info_util.cpp | 144 -------- contrib/nnc/tests/interpreter/op_info_util.h | 40 --- contrib/nnc/tests/interpreter/op_test.cpp | 59 ---- .../interpreter/test_data/test_description.txt | 46 --- 22 files changed, 1315 deletions(-) delete mode 100644 contrib/nnc/tests/interpreter/CMakeLists.txt delete mode 100644 contrib/nnc/tests/interpreter/gen/gen_test_data.py delete mode 100644 contrib/nnc/tests/interpreter/gen/op_info.fbs delete mode 100644 contrib/nnc/tests/interpreter/gen/opinfo/OperatorInfo.py delete mode 100644 contrib/nnc/tests/interpreter/gen/opinfo/OperatorInfoList.py delete mode 100644 contrib/nnc/tests/interpreter/gen/opinfo/OperatorType.py delete mode 100644 contrib/nnc/tests/interpreter/gen/opinfo/PadType.py delete mode 100644 contrib/nnc/tests/interpreter/gen/opinfo/PoolType.py delete mode 100644 contrib/nnc/tests/interpreter/gen/opinfo/Shape.py delete mode 100644 contrib/nnc/tests/interpreter/gen/opinfo/Tensor.py delete mode 100644 contrib/nnc/tests/interpreter/gen/opinfo/__init__.py delete mode 100644 contrib/nnc/tests/interpreter/gen/requirements.txt delete mode 100755 contrib/nnc/tests/interpreter/gen/run_flatc.sh delete mode 100755 contrib/nnc/tests/interpreter/gen/run_gen.sh delete mode 100644 contrib/nnc/tests/interpreter/graph_creator.cpp delete mode 100644 contrib/nnc/tests/interpreter/graph_creator.h delete mode 100644 contrib/nnc/tests/interpreter/main.cpp delete mode 100644 contrib/nnc/tests/interpreter/op_info_util.cpp delete mode 100644 contrib/nnc/tests/interpreter/op_info_util.h delete mode 100644 contrib/nnc/tests/interpreter/op_test.cpp delete mode 100644 contrib/nnc/tests/interpreter/test_data/test_description.txt diff --git a/contrib/nnc/tests/CMakeLists.txt b/contrib/nnc/tests/CMakeLists.txt index 2bf6d7e..c244ec7 100644 --- a/contrib/nnc/tests/CMakeLists.txt +++ b/contrib/nnc/tests/CMakeLists.txt @@ -1,4 +1,3 @@ add_subdirectory(import) add_subdirectory(soft_backend) -add_subdirectory(interpreter) add_subdirectory(acl_soft_backend) diff --git a/contrib/nnc/tests/interpreter/CMakeLists.txt b/contrib/nnc/tests/interpreter/CMakeLists.txt deleted file mode 100644 index edb7352..0000000 --- a/contrib/nnc/tests/interpreter/CMakeLists.txt +++ /dev/null @@ -1,39 +0,0 @@ -nncc_find_package(FlatBuffers QUIET) -if (NOT FlatBuffers_FOUND) - message(STATUS "FlatBuffers_FOUND is not set") - return() -endif() - -if (NOT TARGET gtest) - message(STATUS "gtest is not available, we can't build this target properly if unit tests are disabled") - return() -endif() - -# Compile flatbuffers schemas -# Produces FB_GEN_SOURCES and FB_GEN_INCLUDE_DIRS variables -set(GENERATED_OUTPUT_DIR "${CMAKE_CURRENT_BINARY_DIR}/generated") -FlatBuffers_Generate(FB_GEN - ${GENERATED_OUTPUT_DIR} - ${CMAKE_CURRENT_SOURCE_DIR}/gen - op_info.fbs) - -file(GLOB SOURCES ./*.cpp) -file(GLOB HEADERS ./*.h) - -add_executable(interpreter_op_test ${SOURCES} ${HEADERS} ${FB_GEN_SOURCES} ${OPTIONS_SRC}) - -file(GLOB TEST_DATA ${CMAKE_CURRENT_SOURCE_DIR}/test_data/*_data.fb) -foreach(TEST_DATA_ITEM ${TEST_DATA}) - get_filename_component(TEST_NAME ${TEST_DATA_ITEM} NAME_WE) - string(REPLACE "_data" "" TEST_NAME ${TEST_NAME}) - - add_test(NAME interpreter_${TEST_NAME}_test - COMMAND interpreter_op_test ${TEST_DATA_ITEM} - WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) -endforeach() - -target_include_directories(interpreter_op_test PRIVATE ${FB_GEN_INCLUDE_DIRS}) - -target_link_libraries(interpreter_op_test PRIVATE gtest flatbuffers - nnc_core nnc_interpreter - nn_import_common) diff --git a/contrib/nnc/tests/interpreter/gen/gen_test_data.py b/contrib/nnc/tests/interpreter/gen/gen_test_data.py deleted file mode 100644 index 29b51ca..0000000 --- a/contrib/nnc/tests/interpreter/gen/gen_test_data.py +++ /dev/null @@ -1,369 +0,0 @@ -import flatbuffers -import numpy as np -import tensorflow as tf - -import re -import os.path as path -import argparse -from collections import defaultdict - -from opinfo import OperatorInfoList -from opinfo import OperatorInfo -from opinfo import OperatorType -from opinfo import Tensor -from opinfo import Shape -from opinfo import PadType -from opinfo import PoolType - -# 'axis' for CAPPED_RELU is not an error, it just denotes a numeric parameter. -OP_FORMATS = { - 'FULLY_CONNECTED': (), - 'CONV_2D': ('padType', 'shapes'), - 'DEPTHWISE_CONV_2D': ('padType', 'shapes'), - 'POOL_2D': ('padType', 'poolType', 'shapes'), - 'CONCATENATION': ('axis',), - 'RESHAPE': ('shapes',), - 'RELU': (), - 'CAPPED_RELU': ('axis',), - 'BIAS_ADD': (), - 'SOFTMAX': ('axis',) -} - - -class OpInfoSerializer: - def __init__(self): - self.builder = flatbuffers.Builder(1024*1024) - self.dataMap = {'int': self.builder.PrependInt32, - 'float': self.builder.PrependFloat32, - 'obj': self.builder.PrependUOffsetTRelative} - - @staticmethod - def vector_start_method(mdl, fld): - method_name = mdl.__name__.split('.')[-1] + 'Start' + fld.capitalize() + 'Vector' - return getattr(mdl, method_name) - - def make_vector(self, mdl, fld, data_type, data, data_size): - if data_type == 'float': - print('Serializing vector {}.{} of {} floats'.format(mdl.__name__, fld, data_size)) - - add_vector_item = self.dataMap[data_type] - self.vector_start_method(mdl, fld)(self.builder, data_size) - for item in reversed(data): - add_vector_item(item) - return self.builder.EndVector(data_size) - - def make_tensor_vector(self, tensors, field_name): - if tensors is None: - return None - tensors = [self.make_tensor(tensor) for tensor in tensors] - return self.make_vector(OperatorInfo, field_name, 'obj', tensors, len(tensors)) - - def make_shape_vector(self, shapes): - if shapes is None: - return None - shapes = [self.make_shape(shape) for shape in shapes] - return self.make_vector(OperatorInfo, 'shapes', 'obj', shapes, len(shapes)) - - def make_op_info_vector(self, providers): - op_infos = [self.make_operator_info(p) for p in providers] - return self.make_vector(OperatorInfoList, 'infos', 'obj', op_infos, len(op_infos)) - - def make_shape(self, shape): - dims = self.make_vector(Shape, 'dims', 'int', shape, len(shape)) - - Shape.ShapeStart(self.builder) - Shape.ShapeAddDims(self.builder, dims) - return Shape.ShapeEnd(self.builder) - - def make_tensor(self, tensor): - shape = self.make_shape(tensor.shape) - - flat_tensor = tensor.flatten() - data = self.make_vector(Tensor, 'data', 'float', flat_tensor, tensor.size) - - Tensor.TensorStart(self.builder) - Tensor.TensorAddShape(self.builder, shape) - Tensor.TensorAddData(self.builder, data) - return Tensor.TensorEnd(self.builder) - - def add_optional(self, method, value): - if value is not None: - method(self.builder, value) - - def add_optional_enum(self, method, cls, value): - if value is not None: - method(self.builder, getattr(cls, value)) - - def make_operator_info(self, provider): - inputs = self.make_tensor_vector(provider.get_inputs(), 'inputs') - kernels = self.make_tensor_vector(provider.get_kernels(), 'kernels') - results = self.make_tensor_vector(provider.get_results(), 'results') - - op = getattr(OperatorType.OperatorType, provider.get_op()) - shapes = self.make_shape_vector(provider.get_shapes()) - - OperatorInfo.OperatorInfoStart(self.builder) - OperatorInfo.OperatorInfoAddOp(self.builder, op) - OperatorInfo.OperatorInfoAddInputs(self.builder, inputs) - OperatorInfo.OperatorInfoAddResults(self.builder, results) - - self.add_optional(OperatorInfo.OperatorInfoAddKernels, kernels) - self.add_optional_enum(OperatorInfo.OperatorInfoAddPadType, PadType.PadType, provider.get_pad_type()) - self.add_optional_enum(OperatorInfo.OperatorInfoAddPoolType, PoolType.PoolType, provider.get_pool_type()) - self.add_optional(OperatorInfo.OperatorInfoAddShapes, shapes) - self.add_optional(OperatorInfo.OperatorInfoAddAxis, provider.get_axis()) - return OperatorInfo.OperatorInfoEnd(self.builder) - - def serialize(self, providers): - op_infos = self.make_op_info_vector(providers) - - OperatorInfoList.OperatorInfoListStart(self.builder) - OperatorInfoList.OperatorInfoListAddInfos(self.builder, op_infos) - info_list = OperatorInfoList.OperatorInfoListEnd(self.builder) - - self.builder.Finish(info_list) - return self.builder.Output() - - def save_to_file(self, providers, filename): - result_data = self.serialize(providers) - with open(filename, 'wb') as f: - f.write(result_data) - - -class OpInfoProvider: - sess = tf.InteractiveSession() - - def __init__(self, op_type): - self.kernel_gen_method = 'RANDOM' - self.input_gen_method = 'RANDOM' - - self.op = op_type - self.input_shapes = None - self.kernel_shapes = None - - self.padType = None - self.poolType = None - self.shapes = None - self.axis = None - - self._inputs = None - self._kernels = None - self._results = None - - def __repr__(self): - return '{}: input {}'.format(self.op, self.input_shapes) - - @staticmethod - def gen_tensor(shape, method): - if method == 'RANDOM': - return np.random.rand(*shape).astype(np.float32) * 10 - 5 - else: - raise Exception("So far only RANDOM tensor generation method is supported") - - def get_op(self): - return self.op - - def get_pad_type(self): - return self.padType - - def get_pool_type(self): - return self.poolType + 'POOL' if self.poolType else self.poolType - - def get_shapes(self): - if self.op in ('CONV_2D', 'DEPTHWISE_CONV_2D', 'POOL_2D'): - # Current NN interpreter implementation requires that strides and pooling kernels are 3d - [h, w, c] - return [shape + [1] for shape in self.shapes] - else: - return self.shapes - - def get_axis(self): - return self.axis - - def get_inputs(self): - if self._inputs is not None: - return self._inputs - - self._inputs = [self.gen_tensor(shape, self.input_gen_method) for shape in self.input_shapes] - return self._inputs - - def get_kernels(self): - if self.kernel_shapes is None: - return None - - if self._kernels is not None: - return self._kernels - - self._kernels = [self.gen_tensor(shape, self.kernel_gen_method) for shape in self.kernel_shapes] - return self._kernels - - def get_results(self): - if self._results is not None: - return self._results - - self.get_inputs() - self.get_kernels() - - self._results = getattr(OpInfoProvider, 'get_{}_result'.format(self.op.lower()))(self) - return self._results - - def get_fully_connected_result(self): - x = self._inputs[0] - kernel = self._kernels[0] - return [tf.matmul(x, kernel).eval()] - - def get_conv_2d_result(self): - x = self._inputs[0] - kernel = self._kernels[0] - strides = [1] + self.shapes[0] + [1] - - net = tf.nn.conv2d(tf.expand_dims(x, 0), kernel, strides, self.padType) - return [tf.squeeze(net, axis=0).eval()] - - def get_depthwise_conv_2d_result(self): - x = self._inputs[0] - kernel = self._kernels[0] - strides = [1] + self.shapes[0] + [1] - - net = tf.nn.depthwise_conv2d(tf.expand_dims(x, 0), kernel, strides, self.padType) - return [tf.squeeze(net, axis=0).eval()] - - def get_pool_2d_result(self): - x = self._inputs[0] - net = tf.nn.pool(tf.expand_dims(x, 0), self.shapes[0], self.poolType, self.padType, strides=self.shapes[1]) - return [tf.squeeze(net, axis=0).eval()] - - def get_concatenation_result(self): - return [tf.concat(self._inputs, self.axis).eval()] - - def get_reshape_result(self): - return [tf.reshape(self._inputs[0], self.shapes[0]).eval()] - - def get_relu_result(self): - return [tf.nn.relu(self._inputs[0]).eval()] - - def get_capped_relu_result(self): - return [tf.maximum(0.0, tf.minimum(self._inputs[0], self.axis)).eval()] - - def get_bias_add_result(self): - return [tf.add(self._inputs[0], self._inputs[1]).eval()] - - def get_softmax_result(self): - return [tf.nn.softmax(self._inputs[0], self.axis).eval()] - - -class OpInfoParser: - # This regex just selects spaces that are not inside []. - _split_regex = re.compile(r'\s+(?!(?:[^\[\]]*\[[^\[\]]*\])*[^\[\]]*\])') - - def __init__(self, op_type): - self.op = op_type - - @staticmethod - def info_split(info_string): - return re.split(OpInfoParser._split_regex, info_string) - - @staticmethod - def get_shape_list(shape_string): - shape_string = re.sub(r'\s*', '', shape_string) - shape_string = re.sub(r'\]\[', ' ', shape_string) - if shape_string.startswith('[['): - shape_string = shape_string[1:-1] - shape_list = shape_string.split(' ') - return [[int(dim.strip()) for dim in shape.strip('[]').split(',')] for shape in shape_list] - - def create_provider(self, info_string): - op_format = OP_FORMATS[self.op] - info_items = self.info_split(info_string) - - provider = OpInfoProvider(self.op) - - provider.input_shapes = self.get_shape_list(info_items[0]) - for i, op_attr in enumerate(op_format): - if op_attr == 'kernels': - provider.kernel_shapes = self.get_shape_list(info_items[i + 1]) - elif op_attr == 'padType': - provider.padType = info_items[i + 1] - elif op_attr == 'poolType': - provider.poolType = info_items[i + 1] - elif op_attr == 'axis': - provider.axis = int(info_items[i + 1]) - elif op_attr == 'shapes': - provider.shapes = self.get_shape_list(info_items[i + 1]) - else: - raise Exception('Encountered unknown op attr type') - - return provider - - -def preprocess(filename): - with open(filename, 'r') as f: - lines = f.readlines() - - return [stripped - for stripped in (line.strip() for line in lines) - if stripped != '' and not stripped.startswith('#')] - - -def get_opwise_opinfo_lines(lines): - opwise_opinfo = defaultdict(lambda: []) - current_op = None - for line in lines: - if line in OP_FORMATS: - current_op = line - else: - opwise_opinfo[current_op].append(line) - - if None in opwise_opinfo.keys(): - raise Exception('Operator info description file doesn\'t start with an operator name.') - - return opwise_opinfo - - -def prepare_save_paths(given_path): - given_path = path.abspath(given_path) - dirname = path.dirname(given_path) - basename = path.basename(given_path) - - if not path.exists(dirname): - raise Exception('Indicated path for saving result does not exist.') - - if basename == '': - basename = 'result.fb' - - return dirname, basename - - -def main(): - parser = argparse.ArgumentParser(description='Generate Flatbuffers files containing NN operator' - 'info needed for testing.') - parser.add_argument('input', type=str) - parser.add_argument('-o', '--output', type=str, default='./result.fb', - help='output file path; if used with "-b" will use the name of the file as a postfix') - parser.add_argument('-b', '--bulk', action='store_true', - help='save result to a single file (default is to save a file for each operation)') - - args = parser.parse_args() - - dirname, basename = prepare_save_paths(args.output) - - raw = preprocess(args.input) - opwise_raw = get_opwise_opinfo_lines(raw) - - opwise_data_providers = dict() - for op_type in opwise_raw: - parser = OpInfoParser(op_type) - opwise_data_providers[op_type] = [parser.create_provider(info_string) for info_string in opwise_raw[op_type]] - - if not args.bulk: - for op_type in opwise_data_providers: - serializer = OpInfoSerializer() - path_to_save = path.join(dirname, '{}_{}'.format(op_type.lower(), basename)) - serializer.save_to_file(opwise_data_providers[op_type], path_to_save) - else: - all_providers = [provider for op_type in opwise_data_providers for provider in opwise_data_providers[op_type]] - serializer = OpInfoSerializer() - serializer.save_to_file(all_providers, path.join(dirname, basename)) - - -if __name__ == '__main__': - main() diff --git a/contrib/nnc/tests/interpreter/gen/op_info.fbs b/contrib/nnc/tests/interpreter/gen/op_info.fbs deleted file mode 100644 index 8be43fd..0000000 --- a/contrib/nnc/tests/interpreter/gen/op_info.fbs +++ /dev/null @@ -1,52 +0,0 @@ -namespace opinfo; - -file_extension "opinfo"; - -enum OperatorType : byte { - FULLY_CONNECTED = 0, - CONV_2D, - DEPTHWISE_CONV_2D, - POOL_2D, - RESHAPE, - CONCATENATION, - RELU, - CAPPED_RELU, - BIAS_ADD, - SOFTMAX, -} - -enum PadType : byte { - SAME, - VALID -} - -enum PoolType : byte { - MAXPOOL, - AVGPOOL -} - -table Shape { - dims:[int]; -} - -table Tensor { - shape:Shape; - data:[float]; -} - -table OperatorInfo { - op:OperatorType; - inputs:[Tensor]; - kernels:[Tensor]; - results:[Tensor]; - padType:PadType; - poolType:PoolType; - shapes:[Shape]; - axis:int; -} - -table OperatorInfoList { - infos:[OperatorInfo]; -} - -root_type OperatorInfoList; diff --git a/contrib/nnc/tests/interpreter/gen/opinfo/OperatorInfo.py b/contrib/nnc/tests/interpreter/gen/opinfo/OperatorInfo.py deleted file mode 100644 index f79cf65..0000000 --- a/contrib/nnc/tests/interpreter/gen/opinfo/OperatorInfo.py +++ /dev/null @@ -1,142 +0,0 @@ -# automatically generated by the FlatBuffers compiler, do not modify - -# namespace: opinfo - -import flatbuffers - -class OperatorInfo(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsOperatorInfo(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = OperatorInfo() - x.Init(buf, n + offset) - return x - - # OperatorInfo - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # OperatorInfo - def Op(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) - return 0 - - # OperatorInfo - def Inputs(self, j): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - if o != 0: - x = self._tab.Vector(o) - x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 - x = self._tab.Indirect(x) - from .Tensor import Tensor - obj = Tensor() - obj.Init(self._tab.Bytes, x) - return obj - return None - - # OperatorInfo - def InputsLength(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - if o != 0: - return self._tab.VectorLen(o) - return 0 - - # OperatorInfo - def Kernels(self, j): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) - if o != 0: - x = self._tab.Vector(o) - x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 - x = self._tab.Indirect(x) - from .Tensor import Tensor - obj = Tensor() - obj.Init(self._tab.Bytes, x) - return obj - return None - - # OperatorInfo - def KernelsLength(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) - if o != 0: - return self._tab.VectorLen(o) - return 0 - - # OperatorInfo - def Results(self, j): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) - if o != 0: - x = self._tab.Vector(o) - x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 - x = self._tab.Indirect(x) - from .Tensor import Tensor - obj = Tensor() - obj.Init(self._tab.Bytes, x) - return obj - return None - - # OperatorInfo - def ResultsLength(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) - if o != 0: - return self._tab.VectorLen(o) - return 0 - - # OperatorInfo - def PadType(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) - return 0 - - # OperatorInfo - def PoolType(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) - return 0 - - # OperatorInfo - def Shapes(self, j): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16)) - if o != 0: - x = self._tab.Vector(o) - x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 - x = self._tab.Indirect(x) - from .Shape import Shape - obj = Shape() - obj.Init(self._tab.Bytes, x) - return obj - return None - - # OperatorInfo - def ShapesLength(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16)) - if o != 0: - return self._tab.VectorLen(o) - return 0 - - # OperatorInfo - def Axis(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) - if o != 0: - return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) - return 0 - -def OperatorInfoStart(builder): builder.StartObject(8) -def OperatorInfoAddOp(builder, op): builder.PrependInt8Slot(0, op, 0) -def OperatorInfoAddInputs(builder, inputs): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(inputs), 0) -def OperatorInfoStartInputsVector(builder, numElems): return builder.StartVector(4, numElems, 4) -def OperatorInfoAddKernels(builder, kernels): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(kernels), 0) -def OperatorInfoStartKernelsVector(builder, numElems): return builder.StartVector(4, numElems, 4) -def OperatorInfoAddResults(builder, results): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(results), 0) -def OperatorInfoStartResultsVector(builder, numElems): return builder.StartVector(4, numElems, 4) -def OperatorInfoAddPadType(builder, padType): builder.PrependInt8Slot(4, padType, 0) -def OperatorInfoAddPoolType(builder, poolType): builder.PrependInt8Slot(5, poolType, 0) -def OperatorInfoAddShapes(builder, shapes): builder.PrependUOffsetTRelativeSlot(6, flatbuffers.number_types.UOffsetTFlags.py_type(shapes), 0) -def OperatorInfoStartShapesVector(builder, numElems): return builder.StartVector(4, numElems, 4) -def OperatorInfoAddAxis(builder, axis): builder.PrependInt32Slot(7, axis, 0) -def OperatorInfoEnd(builder): return builder.EndObject() diff --git a/contrib/nnc/tests/interpreter/gen/opinfo/OperatorInfoList.py b/contrib/nnc/tests/interpreter/gen/opinfo/OperatorInfoList.py deleted file mode 100644 index 3e41fc4..0000000 --- a/contrib/nnc/tests/interpreter/gen/opinfo/OperatorInfoList.py +++ /dev/null @@ -1,44 +0,0 @@ -# automatically generated by the FlatBuffers compiler, do not modify - -# namespace: opinfo - -import flatbuffers - -class OperatorInfoList(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsOperatorInfoList(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = OperatorInfoList() - x.Init(buf, n + offset) - return x - - # OperatorInfoList - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # OperatorInfoList - def Infos(self, j): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - x = self._tab.Vector(o) - x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 - x = self._tab.Indirect(x) - from .OperatorInfo import OperatorInfo - obj = OperatorInfo() - obj.Init(self._tab.Bytes, x) - return obj - return None - - # OperatorInfoList - def InfosLength(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.VectorLen(o) - return 0 - -def OperatorInfoListStart(builder): builder.StartObject(1) -def OperatorInfoListAddInfos(builder, infos): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(infos), 0) -def OperatorInfoListStartInfosVector(builder, numElems): return builder.StartVector(4, numElems, 4) -def OperatorInfoListEnd(builder): return builder.EndObject() diff --git a/contrib/nnc/tests/interpreter/gen/opinfo/OperatorType.py b/contrib/nnc/tests/interpreter/gen/opinfo/OperatorType.py deleted file mode 100644 index 5fc3f79..0000000 --- a/contrib/nnc/tests/interpreter/gen/opinfo/OperatorType.py +++ /dev/null @@ -1,16 +0,0 @@ -# automatically generated by the FlatBuffers compiler, do not modify - -# namespace: opinfo - -class OperatorType(object): - FULLY_CONNECTED = 0 - CONV_2D = 1 - DEPTHWISE_CONV_2D = 2 - POOL_2D = 3 - RESHAPE = 4 - CONCATENATION = 5 - RELU = 6 - CAPPED_RELU = 7 - BIAS_ADD = 8 - SOFTMAX = 9 - diff --git a/contrib/nnc/tests/interpreter/gen/opinfo/PadType.py b/contrib/nnc/tests/interpreter/gen/opinfo/PadType.py deleted file mode 100644 index b147853..0000000 --- a/contrib/nnc/tests/interpreter/gen/opinfo/PadType.py +++ /dev/null @@ -1,8 +0,0 @@ -# automatically generated by the FlatBuffers compiler, do not modify - -# namespace: opinfo - -class PadType(object): - SAME = 0 - VALID = 1 - diff --git a/contrib/nnc/tests/interpreter/gen/opinfo/PoolType.py b/contrib/nnc/tests/interpreter/gen/opinfo/PoolType.py deleted file mode 100644 index c6178ca..0000000 --- a/contrib/nnc/tests/interpreter/gen/opinfo/PoolType.py +++ /dev/null @@ -1,8 +0,0 @@ -# automatically generated by the FlatBuffers compiler, do not modify - -# namespace: opinfo - -class PoolType(object): - MAXPOOL = 0 - AVGPOOL = 1 - diff --git a/contrib/nnc/tests/interpreter/gen/opinfo/Shape.py b/contrib/nnc/tests/interpreter/gen/opinfo/Shape.py deleted file mode 100644 index 827a316..0000000 --- a/contrib/nnc/tests/interpreter/gen/opinfo/Shape.py +++ /dev/null @@ -1,46 +0,0 @@ -# automatically generated by the FlatBuffers compiler, do not modify - -# namespace: opinfo - -import flatbuffers - -class Shape(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsShape(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = Shape() - x.Init(buf, n + offset) - return x - - # Shape - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # Shape - def Dims(self, j): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - a = self._tab.Vector(o) - return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) - return 0 - - # Shape - def DimsAsNumpy(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) - return 0 - - # Shape - def DimsLength(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - return self._tab.VectorLen(o) - return 0 - -def ShapeStart(builder): builder.StartObject(1) -def ShapeAddDims(builder, dims): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(dims), 0) -def ShapeStartDimsVector(builder, numElems): return builder.StartVector(4, numElems, 4) -def ShapeEnd(builder): return builder.EndObject() diff --git a/contrib/nnc/tests/interpreter/gen/opinfo/Tensor.py b/contrib/nnc/tests/interpreter/gen/opinfo/Tensor.py deleted file mode 100644 index aa85ccf..0000000 --- a/contrib/nnc/tests/interpreter/gen/opinfo/Tensor.py +++ /dev/null @@ -1,58 +0,0 @@ -# automatically generated by the FlatBuffers compiler, do not modify - -# namespace: opinfo - -import flatbuffers - -class Tensor(object): - __slots__ = ['_tab'] - - @classmethod - def GetRootAsTensor(cls, buf, offset): - n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) - x = Tensor() - x.Init(buf, n + offset) - return x - - # Tensor - def Init(self, buf, pos): - self._tab = flatbuffers.table.Table(buf, pos) - - # Tensor - def Shape(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) - if o != 0: - x = self._tab.Indirect(o + self._tab.Pos) - from .Shape import Shape - obj = Shape() - obj.Init(self._tab.Bytes, x) - return obj - return None - - # Tensor - def Data(self, j): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - if o != 0: - a = self._tab.Vector(o) - return self._tab.Get(flatbuffers.number_types.Float32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) - return 0 - - # Tensor - def DataAsNumpy(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - if o != 0: - return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float32Flags, o) - return 0 - - # Tensor - def DataLength(self): - o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) - if o != 0: - return self._tab.VectorLen(o) - return 0 - -def TensorStart(builder): builder.StartObject(2) -def TensorAddShape(builder, shape): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(shape), 0) -def TensorAddData(builder, data): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(data), 0) -def TensorStartDataVector(builder, numElems): return builder.StartVector(4, numElems, 4) -def TensorEnd(builder): return builder.EndObject() diff --git a/contrib/nnc/tests/interpreter/gen/opinfo/__init__.py b/contrib/nnc/tests/interpreter/gen/opinfo/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/contrib/nnc/tests/interpreter/gen/requirements.txt b/contrib/nnc/tests/interpreter/gen/requirements.txt deleted file mode 100644 index e6da856..0000000 --- a/contrib/nnc/tests/interpreter/gen/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -flatbuffers==2015.12.22.1 -numpy==1.14.3 -tensorflow==1.9.0 \ No newline at end of file diff --git a/contrib/nnc/tests/interpreter/gen/run_flatc.sh b/contrib/nnc/tests/interpreter/gen/run_flatc.sh deleted file mode 100755 index e53d815..0000000 --- a/contrib/nnc/tests/interpreter/gen/run_flatc.sh +++ /dev/null @@ -1 +0,0 @@ -flatc -p op_info.fbs diff --git a/contrib/nnc/tests/interpreter/gen/run_gen.sh b/contrib/nnc/tests/interpreter/gen/run_gen.sh deleted file mode 100755 index e68a8d5..0000000 --- a/contrib/nnc/tests/interpreter/gen/run_gen.sh +++ /dev/null @@ -1 +0,0 @@ -python3 gen_test_data.py -o ../test_data/data.fb ../test_data/test_description.txt diff --git a/contrib/nnc/tests/interpreter/graph_creator.cpp b/contrib/nnc/tests/interpreter/graph_creator.cpp deleted file mode 100644 index f210547..0000000 --- a/contrib/nnc/tests/interpreter/graph_creator.cpp +++ /dev/null @@ -1,160 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include - -#include "core/modelIR/operations/BiasAddOp.h" -#include "core/modelIR/operations/CappedReluOp.h" -#include "core/modelIR/operations/ConcatOp.h" -#include "core/modelIR/operations/Conv2DOp.h" -#include "core/modelIR/operations/DepthwiseConv2DOp.h" -#include "core/modelIR/operations/FullyConnectedOp.h" -#include "core/modelIR/operations/GemmOp.h" -#include "core/modelIR/operations/InputOp.h" -#include "core/modelIR/operations/PoolOp.h" -#include "core/modelIR/operations/ReluOp.h" -#include "core/modelIR/operations/ReshapeOp.h" -#include "core/modelIR/operations/SoftmaxOp.h" - -#include "passes/common_frontend/shape_helper.h" - -#include "op_info_generated.h" -#include "graph_creator.h" -#include "op_info_util.h" - -using namespace nnc; -using namespace nnc::mir; - -static Operation* createFullyConnected(std::unique_ptr& g, - const std::vector& inputs, - const opinfo::OperatorInfo* opInfo) { - return g->create("y", inputs[0], inputs[1]); -} - -static Operation* createConv2D(std::unique_ptr& g, - const std::vector& inputs, - const opinfo::OperatorInfo* opInfo) { - return g->create("y", inputs[0], inputs[1], getShapeParam(opInfo, 0), - std::vector{0, 0}, std::vector{0, 0}); -} - -static Operation* createDepthwiseConv2D(std::unique_ptr& g, - const std::vector& inputs, - const opinfo::OperatorInfo* opInfo) { - return g->create("y", inputs[0], inputs[1], - getShapeParam(opInfo, 0), std::vector{0, 0}, - std::vector{0, 0}); -} - -static Operation* createPool(std::unique_ptr& g, - const std::vector& inputs, - const opinfo::OperatorInfo* opInfo) { - return g->create("y", inputs[0], getPoolingType(opInfo), - getShapeParam(opInfo, 0), getShapeParam(opInfo, 1), - std::vector{0, 0}, std::vector{0, 0}, - ops::PoolOp::BorderType::ZEROFILLED); -} - -static Operation* createConcatenation(std::unique_ptr& g, - const std::vector& inputs, - const opinfo::OperatorInfo* opInfo) { - return g->create("y", inputs, getAxis(opInfo)); -} - -static Operation* createReshape(std::unique_ptr& g, - const std::vector& inputs, - const opinfo::OperatorInfo* opInfo) { - return g->create("y", inputs[0], getShapeParam(opInfo, 0)); -} - -static Operation* createReLU(std::unique_ptr& g, - const std::vector& inputs, - const opinfo::OperatorInfo* opInfo) { - (void)opInfo; - return g->create("y", inputs[0]); -} - -static Operation* createCappedReLU(std::unique_ptr& g, - const std::vector& inputs, - const opinfo::OperatorInfo* opInfo) { - return g->create("y", inputs[0], getAxis(opInfo)); -} - -static Operation* createSoftmax(std::unique_ptr& g, - const std::vector& inputs, - const opinfo::OperatorInfo* opInfo) { - return g->create("y", inputs[0], getAxis(opInfo)); -} - -static Operation* createBiasAdd(std::unique_ptr& g, - const std::vector& inputs, - const opinfo::OperatorInfo* opInfo) { - (void)opInfo; - return g->create("y", inputs[0], inputs[1]); -} - -static Operation* createOp(std::unique_ptr& g, - const std::vector& inputs, - const opinfo::OperatorInfo* opInfo) { - switch (opInfo->op()) { - case opinfo::OperatorType_FULLY_CONNECTED: - return createFullyConnected(g, inputs, opInfo); - case opinfo::OperatorType_CONV_2D: - return createConv2D(g, inputs, opInfo); - case opinfo::OperatorType_DEPTHWISE_CONV_2D: - return createDepthwiseConv2D(g, inputs, opInfo); - case opinfo::OperatorType_POOL_2D: - return createPool(g, inputs, opInfo); - case opinfo::OperatorType_CONCATENATION: - return createConcatenation(g, inputs, opInfo); - case opinfo::OperatorType_RESHAPE: - return createReshape(g, inputs, opInfo); - case opinfo::OperatorType_RELU: - return createReLU(g, inputs, opInfo); - case opinfo::OperatorType_SOFTMAX: - return createSoftmax(g, inputs, opInfo); - case opinfo::OperatorType_CAPPED_RELU: - return createCappedReLU(g, inputs, opInfo); - case opinfo::OperatorType_BIAS_ADD: - return createBiasAdd(g, inputs, opInfo); - default: - assert(false); - } -} - -std::unique_ptr make_graph(const opinfo::OperatorInfo* opInfo) { - // Create graph - std::unique_ptr g(new Graph()); - std::vector inputs; - - // Create inputs - for (unsigned int i = 0; i < opInfo->inputs()->size(); ++i) { - auto inputShapeIter = opInfo->inputs()->Get(i)->shape()->dims(); - Shape inputShape = ShapeHelper::createShape(*inputShapeIter, inputShapeIter->size()); - auto inputOp = g->create("x" + std::to_string(i), inputShape); - - inputs.push_back(inputOp->getOutput(0)); - } - - // Create operation node - auto opNode = createOp(g, inputs, opInfo); - - // Mark outputs - g->markOutput(opNode); - - return g; -} diff --git a/contrib/nnc/tests/interpreter/graph_creator.h b/contrib/nnc/tests/interpreter/graph_creator.h deleted file mode 100644 index a0bc109..0000000 --- a/contrib/nnc/tests/interpreter/graph_creator.h +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef NNC_INTERPRETER_OP_TEST_GRAPH_CREATOR_H -#define NNC_INTERPRETER_OP_TEST_GRAPH_CREATOR_H - -#include "core/modelIR/Graph.h" - -std::unique_ptr make_graph(const opinfo::OperatorInfo* opInfo); - -#endif // NNC_INTERPRETER_OP_TEST_GRAPH_CREATOR_H diff --git a/contrib/nnc/tests/interpreter/main.cpp b/contrib/nnc/tests/interpreter/main.cpp deleted file mode 100644 index 32d09c6..0000000 --- a/contrib/nnc/tests/interpreter/main.cpp +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include -#include - -#include "gtest/gtest.h" -#include "op_info_generated.h" - -using namespace opinfo; - -std::string opInfoBuf; -const OperatorInfoList* list; - -static void readOpInfo(const std::string& filename) -{ - using istrbufiter = std::istreambuf_iterator; - - std::ifstream ifs(filename, std::ios::binary); - - if (ifs.fail()) - { - std::cout << "Problem with opening " << filename << std::endl; - exit(1); - } - - opInfoBuf.assign((istrbufiter(ifs)), istrbufiter()); -} - -int main(int argc, char *argv[]) -{ - if (argc <= 1) - return 1; - - readOpInfo(std::string(argv[1])); - list = GetOperatorInfoList(reinterpret_cast(opInfoBuf.c_str())); - - ::testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/contrib/nnc/tests/interpreter/op_info_util.cpp b/contrib/nnc/tests/interpreter/op_info_util.cpp deleted file mode 100644 index 0de6d45..0000000 --- a/contrib/nnc/tests/interpreter/op_info_util.cpp +++ /dev/null @@ -1,144 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include - -#include "gtest/gtest.h" - -#include "core/modelIR/Tensor.h" -#include "core/modelIR/ShapeRange.h" -#include "core/modelIR/Shape.h" -#include "op_info_util.h" - -using namespace nnc; -using namespace nnc::mir; - -std::shared_ptr getTensor(const opinfo::Tensor* t) { - Shape shape = ShapeHelper::createShape(*t->shape()->dims(), t->shape()->dims()->size()); - return std::make_shared(DTYPE::FLOAT32, shape, t->data()->Data()); -} - -ops::PoolOp::PoolingType getPoolingType(const opinfo::OperatorInfo* opInfo) -{ - switch (opInfo->poolType()) - { - case opinfo::PoolType_MAXPOOL: - return ops::PoolOp::PoolingType::MAX; - case opinfo::PoolType_AVGPOOL: - return ops::PoolOp::PoolingType::AVG; - default: - assert(false); - } -} - -Shape getShapeParam(const opinfo::OperatorInfo* opInfo, unsigned int n) -{ - auto shapeIter = opInfo->shapes()->Get(n)->dims(); - return ShapeHelper::createShape(*shapeIter, shapeIter->size()); -} - -int getAxis(const opinfo::OperatorInfo* opInfo) -{ - return opInfo->axis(); -} - -/** @brief Utility function for printing tensors, can be used for debugging. - */ -__attribute__ ((unused)) void printTensor(const TensorVariant& lhs) -{ - using nnc::mir::ShapeRange; - using nnc::mir::Tensor; - - Tensor accessor(lhs); - - for(auto& idx : ShapeRange(accessor.getShape())) - { - float val = accessor.at(idx); - std::cout << val << std::endl; - } -} - -/** @brief Custom float comparator. - * It is supposed to be equivalent to GTest's ASSERT_FLOAT_EQ when allowedUlpsDiff is 4. - * Reminder: if the integer representations of two same-sign floats are subtracted then - * the absolute value of the result is equal to one plus the number of representable floats - * between them. This difference tells us how many ULPs the numbers differ by. - * @usage This function only works if float implementation conforms to IEEE-754. - */ -static inline ::testing::AssertionResult areFloatsEqual(float f1, float f2, int allowedUlpsDiff) -{ - auto intRepr1 = *reinterpret_cast(&f1); - auto intRepr2 = *reinterpret_cast(&f2); - - if ((intRepr1 < 0) != (intRepr2 < 0)) - { - if (f1 == f2) // Checking for +0 and -0 - return ::testing::AssertionSuccess(); - else - return ::testing::AssertionFailure() << "Different signs"; - } - - auto ulpsDiff = std::abs(intRepr1 - intRepr2); - - if (ulpsDiff <= allowedUlpsDiff) - return ::testing::AssertionSuccess(); - else - return ::testing::AssertionFailure() << "ULP difference is " << ulpsDiff; -} - -void assertTensorEq(const TensorVariant &lhs, const TensorVariant &rhs) -{ - using nnc::mir::ShapeRange; - using nnc::mir::Tensor; - - const int GTEST_FLOAT_EQ_ULP = 4; - - Tensor lhsAccessor(lhs); - Tensor rhsAccessor(rhs); - - ASSERT_EQ(lhsAccessor.getShape(), rhsAccessor.getShape()); - - for(auto& idx : ShapeRange(lhsAccessor.getShape())) - { - ASSERT_TRUE(areFloatsEqual(lhsAccessor.at(idx), rhsAccessor.at(idx), GTEST_FLOAT_EQ_ULP)); - } -} - -// Having to put print operator to the same namespace as Shape so that it can be found -namespace nnc -{ -namespace mir -{ -namespace tensor -{ - -std::ostream &operator<<(std::ostream &os, const Shape &sh) -{ - os << "Shape("; - for (int32_t i = 0; i < sh.rank(); ++i) - { - if (i != 0) - os << ", "; - os << sh.dim(i); - } - os << ")"; - return os; -} - -} // namespace tensor -} // namespace mir -} // namespace nnc diff --git a/contrib/nnc/tests/interpreter/op_info_util.h b/contrib/nnc/tests/interpreter/op_info_util.h deleted file mode 100644 index bdadf95..0000000 --- a/contrib/nnc/tests/interpreter/op_info_util.h +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef NNC_INTERPRETER_OP_TEST_UTIL_H -#define NNC_INTERPRETER_OP_TEST_UTIL_H - -#include -#include - -#include "core/modelIR/TensorVariant.h" -#include "core/modelIR/operations/CommonProps.h" -#include "core/modelIR/operations/PoolOp.h" - -#include "op_info_generated.h" -#include "passes/common_frontend/shape_helper.h" -#include "graph_creator.h" - - -std::shared_ptr getTensor(const opinfo::Tensor* t); -nnc::mir::ops::PoolOp::PoolingType getPoolingType(const opinfo::OperatorInfo* opInfo); -nnc::mir::Shape getShapeParam(const opinfo::OperatorInfo* opInfo, unsigned int n); -int getAxis(const opinfo::OperatorInfo* opInfo); - -__attribute__ ((unused)) void printTensor(const nnc::mir::TensorVariant& lhs); -void assertTensorEq(const nnc::mir::TensorVariant &lhs, const nnc::mir::TensorVariant &rhs); - -#endif // NNC_INTERPRETER_OP_TEST_UTIL_H diff --git a/contrib/nnc/tests/interpreter/op_test.cpp b/contrib/nnc/tests/interpreter/op_test.cpp deleted file mode 100644 index 0c98844..0000000 --- a/contrib/nnc/tests/interpreter/op_test.cpp +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include -#include - -#include "gtest/gtest.h" -#include "op_info_generated.h" - -#include "passes/interpreter/Interpreter.h" -#include "core/modelIR/Graph.h" -#include "op_info_util.h" -#include "graph_creator.h" - -using namespace opinfo; -using namespace nnc; -using namespace nnc::mir; - -extern std::string opInfoBuf; -extern const OperatorInfoList* list; - -class InterpTestFixture : public ::testing::TestWithParam {}; - -TEST_P(InterpTestFixture, InterpTest) -{ - const OperatorInfo* opInfo = GetParam(); - std::unique_ptr g = make_graph(opInfo); - - mir::NNInterpreter interpreter; - - for (unsigned int i = 0; i < opInfo->inputs()->size(); ++i) - { - interpreter.setInput("x" + std::to_string(i), *getTensor(opInfo->inputs()->Get(i))); - } - - g->accept(&interpreter); - - // TODO: Get and check equality for multiple outputs and results. - auto res = interpreter.getResult(g->getOutput("y"))[0]; - - assertTensorEq(res, *getTensor(opInfo->results()->Get(0))); -} - -INSTANTIATE_TEST_CASE_P(InterpTestSuite, InterpTestFixture, - ::testing::ValuesIn(list->infos()->begin(), list->infos()->end())); diff --git a/contrib/nnc/tests/interpreter/test_data/test_description.txt b/contrib/nnc/tests/interpreter/test_data/test_description.txt deleted file mode 100644 index d2f2b2d..0000000 --- a/contrib/nnc/tests/interpreter/test_data/test_description.txt +++ /dev/null @@ -1,46 +0,0 @@ -# Planned features: -# [x, y, z]= should denote the type of value generation. -# For example: [2, 3, 4]=RANDOM, [3, 4, 5]=NEAR_ZERO etc. - -FULLY_CONNECTED -[[3, 3] [3, 3]] -[[5, 10] [10, 3]] - -CONV_2D -# input shape: [height, width, in_channels] -# kernel shape: [height, width, out_channels] -# padding type: (VALID | SAME) -# strides: [h_stride, w_stride] -[[5, 5, 1] [3, 3, 1, 1]] VALID [1, 1] -[[64, 64, 4] [3, 3, 4, 2]] VALID [1, 1] - -DEPTHWISE_CONV_2D -[[5, 5, 10] [3, 3, 10, 1]] VALID [1, 1] -[[20, 20, 8] [3, 1, 8, 2]] SAME [2, 2] - -POOL_2D -# input shape: [height, width, in_channels] -# padding type: (VALID | SAME) -# pooling type: (MAX | AVG) -# window shape [height, width], strides [h_stride, w_stride] -[5, 5, 1] VALID MAX [[3, 3] [1, 1]] -[64, 64, 4] VALID MAX [[3, 3] [1, 1]] -[32, 32, 4] VALID AVG [[5, 5] [2, 2]] - -CONCATENATION -[[4, 5, 2] [4, 5, 5] [4, 5, 10]] 2 - -RESHAPE -[3, 4, 5] [4, 5, 3] - -RELU -[5, 5, 23] - -CAPPED_RELU -[4, 4, 4] 2 - -BIAS_ADD -[[6, 7, 8] [8]] - -SOFTMAX -[6, 5, 20] 0 -- 2.7.4