Remove loco-exporter (#6419)
author박종현/On-Device Lab(SR)/Staff Engineer/삼성전자 <jh1302.park@samsung.com>
Fri, 9 Aug 2019 09:59:06 +0000 (18:59 +0900)
committerAlexander Efimov/AI Tools Lab/./Samsung Electronics <a.efimov@samsung.com>
Fri, 9 Aug 2019 09:59:06 +0000 (12:59 +0300)
loco-exporter was left for Circle, but it is likely that Circle exporter
follows the design of (revised) exo-tflite.

Signed-off-by: Jonghyun Park <jh1302.park@samsung.com>
16 files changed:
compiler/loco-exporter/CMakeLists.txt [deleted file]
compiler/loco-exporter/include/LocoExporter.h [deleted file]
compiler/loco-exporter/requires.cmake [deleted file]
compiler/loco-exporter/schema/schema.fbs [deleted file]
compiler/loco-exporter/src/Exporter.test.cpp [deleted file]
compiler/loco-exporter/src/LocoExporter.cpp [deleted file]
compiler/loco-exporter/src/LocoExporterImpl.cpp [deleted file]
compiler/loco-exporter/src/LocoExporterImpl.h [deleted file]
compiler/loco-exporter/src/LocoExporterUtils.cpp [deleted file]
compiler/loco-exporter/src/LocoExporterUtils.h [deleted file]
compiler/loco-exporter/src/OperationExporter.cpp [deleted file]
compiler/loco-exporter/src/OperationExporter.h [deleted file]
compiler/loco-exporter/src/TensorExporter.cpp [deleted file]
compiler/loco-exporter/src/TensorExporter.h [deleted file]
compiler/loco-exporter/src/TypeInference.cpp [deleted file]
compiler/loco-exporter/src/TypeInference.h [deleted file]

diff --git a/compiler/loco-exporter/CMakeLists.txt b/compiler/loco-exporter/CMakeLists.txt
deleted file mode 100644 (file)
index cfa91cf..0000000
+++ /dev/null
@@ -1,47 +0,0 @@
-###################
-# SCHEMA          #
-###################
-nncc_find_package(FlatBuffers QUIET)
-
-if (NOT FlatBuffers_FOUND)
-  message(WARNING "FlatBuffers not found, loco exporter will not be built")
-  return()
-endif ()
-
-FlatBuffers_Target(loco_exporter_flatbuffers
-        OUTPUT_DIR "${CMAKE_CURRENT_BINARY_DIR}/generated/schema"
-        SCHEMA_DIR "${CMAKE_CURRENT_SOURCE_DIR}/schema"
-        SCHEMA_FILES schema.fbs)
-
-###################
-# LOCO EXPORTER   #
-###################
-file(GLOB_RECURSE SOURCES "src/*.cpp")
-file(GLOB_RECURSE TESTS "src/*.test.cpp")
-list(REMOVE_ITEM SOURCES ${TESTS})
-
-add_library(loco_exporter SHARED ${SOURCES})
-target_include_directories(loco_exporter PUBLIC include)
-target_include_directories(loco_exporter PRIVATE src)
-target_link_libraries(loco_exporter PUBLIC loco_exporter_flatbuffers)
-target_link_libraries(loco_exporter PUBLIC loco)
-target_link_libraries(loco_exporter PRIVATE stdex)
-# Let's apply nncc common compile options
-#
-# NOTE This will enable strict compilation (warnings as error).
-#      Please refer to the top-level CMakeLists.txt for details
-target_link_libraries(loco_exporter PRIVATE nncc_common)
-
-if (NOT ENABLE_TEST)
-  return()
-endif (NOT ENABLE_TEST)
-
-# Google Test is mandatory for internal testing
-nncc_find_package(GTest REQUIRED)
-
-GTest_AddTest(loco_exporter_test ${TESTS})
-target_include_directories(loco_exporter_test PRIVATE src)
-target_link_libraries(loco_exporter_test stdex)
-target_link_libraries(loco_exporter_test loco_exporter)
-
-add_test(loco_exporter_test loco_exporter_test)
diff --git a/compiler/loco-exporter/include/LocoExporter.h b/compiler/loco-exporter/include/LocoExporter.h
deleted file mode 100644 (file)
index 07c3613..0000000
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __LOCO_EXPORTER_H__
-#define __LOCO_EXPORTER_H__
-
-#include "loco.h"
-
-#include <memory>
-
-namespace loco_exporter
-{
-
-class ExporterImpl;
-
-/**
- * Container for passing serialized buffer without copying it
- * Appropriate for range based loops
- */
-class BufferView
-{
-public:
-  BufferView(const char *ptr, size_t size) : _ptr(ptr), _size(size) {}
-
-  const char *begin() const { return _ptr; }
-  const char *end() const { return _ptr + _size; }
-  size_t size() const { return _size; }
-private:
-  const char *_ptr;
-  size_t _size;
-};
-
-/**
- * Usage of this class:
- *
- *   loco::Graph *g = acquireGraph();
- *
- *   Exporter e(g);
- *   e.dumpToFile("model.tflite");
- *
- * simplified version:
- *
- *   Exporter(g).dumpToFile("model.tflite");
- *
- * If dumping into file is not an option
- * it is possible to gather raw buffer data for further processing:
- *
- *   std::vector<char> other_buffer;
- *
- *   Exporter e(g);
- *   BufferView buf = e.getBuffer();
- *
- *   // note that `e` should be preserved, `buf` contains pointer to exporter internal buffer
- *
- *   for (char byte: buf)
- *   {
- *     other_buffer.push_back(byte);
- *   }
- *
- *   e.clear(); // memory could be freed if buffer is not needed anymore
- *
- */
-class Exporter
-{
-public:
-  explicit Exporter(loco::Graph *graph);
-  ~Exporter();
-
-  /**
-   *   Clears internal state, free memory
-   */
-  void clear();
-
-  /** @brief write to a file
-   *  @param path path to file where to write data
-   *  @throws any file related exceptions
-   */
-  void dumpToFile(const char *path) const;
-
-  /**
-   * @return buffer data, ownership is not transferred
-   */
-  BufferView getBuffer() const;
-
-private:
-  std::unique_ptr<ExporterImpl> _impl;
-};
-
-} // namespace exporter
-
-#endif // __LOCO_EXPORTER_H__
diff --git a/compiler/loco-exporter/requires.cmake b/compiler/loco-exporter/requires.cmake
deleted file mode 100644 (file)
index 44f6870..0000000
+++ /dev/null
@@ -1 +0,0 @@
-require("loco")
diff --git a/compiler/loco-exporter/schema/schema.fbs b/compiler/loco-exporter/schema/schema.fbs
deleted file mode 100644 (file)
index 980f13b..0000000
+++ /dev/null
@@ -1,794 +0,0 @@
-// Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Revision History
-// Version 0: Initial version.
-// Version 1: Add subgraphs to schema.
-// Version 2: Rename operators to conform to NN API.
-// Version 3: Move buffer data from Model.Subgraph.Tensors to Model.Buffers.
-
-namespace tflite;
-
-// This corresponds to the version.
-file_identifier "TFL3";
-// File extension of any written files.
-file_extension "tflite";
-
-// IMPORTANT: All new members of tables, enums and unions must be added at the
-// end to ensure backwards compatibility.
-
-// The type of data stored in a tensor.
-enum TensorType : byte {
-  FLOAT32 = 0,
-  FLOAT16 = 1,
-  INT32 = 2,
-  UINT8 = 3,
-  INT64 = 4,
-  STRING = 5,
-  BOOL = 6,
-  INT16 = 7,
-  COMPLEX64 = 8,
-  INT8 = 9,
-}
-
-// Custom quantization parameters for experimenting with new quantization
-// techniques.
-table CustomQuantization {
-  custom:[ubyte] (force_align: 16);
-}
-
-// Represents a specific quantization technique's parameters.
-union QuantizationDetails {
-  CustomQuantization,
-}
-
-// Parameters for converting a quantized tensor back to float.
-table QuantizationParameters {
-  // These four parameters are the asymmetric linear quantization parameters.
-  // Given a quantized value q, the corresponding float value f should be:
-  //   f = scale * (q - zero_point)
-  // For other quantization types, the QuantizationDetails below is used.
-  min:[float];  // For importing back into tensorflow.
-  max:[float];  // For importing back into tensorflow.
-  scale:[float];  // For dequantizing the tensor's values.
-  zero_point:[long];
-
-  // If this is not none, the quantization parameters above are ignored and the
-  // value of the QuantizationDetails union below should be used.
-  details:QuantizationDetails;
-}
-
-table Tensor {
-  // The tensor shape. The meaning of each entry is operator-specific but
-  // builtin ops use: [batch size, height, width, number of channels] (That's
-  // Tensorflow's NHWC).
-  shape:[int];
-  type:TensorType;
-  // An index that refers to the buffers table at the root of the model. Or,
-  // if there is no data buffer associated (i.e. intermediate results), then
-  // this is 0 (which refers to an always existent empty buffer).
-  //
-  // The data_buffer itself is an opaque container, with the assumption that the
-  // target device is little-endian. In addition, all builtin operators assume
-  // the memory is ordered such that if `shape` is [4, 3, 2], then index
-  // [i, j, k] maps to data_buffer[i*3*2 + j*2 + k].
-  buffer:uint;
-  name:string;  // For debugging and importing back into tensorflow.
-  quantization:QuantizationParameters;  // Optional.
-
-  is_variable:bool = false;
-}
-
-// A list of builtin operators. Builtin operators are slightly faster than custom
-// ones, but not by much. Moreover, while custom operators accept an opaque
-// object containing configuration parameters, builtins have a predetermined
-// set of acceptable options.
-enum BuiltinOperator : byte {
-  ADD = 0,
-  AVERAGE_POOL_2D = 1,
-  CONCATENATION = 2,
-  CONV_2D = 3,
-  DEPTHWISE_CONV_2D = 4,
-  // DEPTH_TO_SPACE = 5,
-  DEQUANTIZE = 6,
-  EMBEDDING_LOOKUP = 7,
-  FLOOR = 8,
-  FULLY_CONNECTED = 9,
-  HASHTABLE_LOOKUP = 10,
-  L2_NORMALIZATION = 11,
-  L2_POOL_2D = 12,
-  LOCAL_RESPONSE_NORMALIZATION = 13,
-  LOGISTIC = 14,
-  LSH_PROJECTION = 15,
-  LSTM = 16,
-  MAX_POOL_2D = 17,
-  MUL = 18,
-  RELU = 19,
-  // NOTE(aselle): RELU_N1_TO_1 used to be called RELU1, but it was renamed
-  // since different model developers use RELU1 in different ways. Never
-  // create another op called RELU1.
-  RELU_N1_TO_1 = 20,
-  RELU6 = 21,
-  RESHAPE = 22,
-  RESIZE_BILINEAR = 23,
-  RNN = 24,
-  SOFTMAX = 25,
-  SPACE_TO_DEPTH = 26,
-  SVDF = 27,
-  TANH = 28,
-  // TODO(aselle): Consider rename to CONCATENATE_EMBEDDINGS
-  CONCAT_EMBEDDINGS = 29,
-  SKIP_GRAM = 30,
-  CALL = 31,
-  CUSTOM = 32,
-  EMBEDDING_LOOKUP_SPARSE = 33,
-  PAD = 34,
-  UNIDIRECTIONAL_SEQUENCE_RNN = 35,
-  GATHER = 36,
-  BATCH_TO_SPACE_ND = 37,
-  SPACE_TO_BATCH_ND = 38,
-  TRANSPOSE = 39,
-  MEAN = 40,
-  SUB = 41,
-  DIV = 42,
-  SQUEEZE = 43,
-  UNIDIRECTIONAL_SEQUENCE_LSTM = 44,
-  STRIDED_SLICE = 45,
-  BIDIRECTIONAL_SEQUENCE_RNN = 46,
-  EXP = 47,
-  TOPK_V2 = 48,
-  SPLIT = 49,
-  LOG_SOFTMAX = 50,
-  // DELEGATE is a special op type for the operations which are delegated to
-  // other backends.
-  // WARNING: Experimental interface, subject to change
-  DELEGATE = 51,
-  BIDIRECTIONAL_SEQUENCE_LSTM = 52,
-  CAST = 53,
-  PRELU = 54,
-  MAXIMUM = 55,
-  ARG_MAX = 56,
-  MINIMUM = 57,
-  LESS = 58,
-  NEG = 59,
-  PADV2 = 60,
-  GREATER = 61,
-  GREATER_EQUAL = 62,
-  LESS_EQUAL = 63,
-  SELECT = 64,
-  SLICE = 65,
-  SIN = 66,
-  TRANSPOSE_CONV = 67,
-  SPARSE_TO_DENSE = 68,
-  TILE = 69,
-  EXPAND_DIMS = 70,
-  EQUAL = 71,
-  NOT_EQUAL = 72,
-  LOG = 73,
-  SUM = 74,
-  SQRT = 75,
-  RSQRT = 76,
-  SHAPE = 77,
-  POW = 78,
-  ARG_MIN = 79,
-  FAKE_QUANT = 80,
-  REDUCE_PROD = 81,
-  REDUCE_MAX = 82,
-  PACK = 83,
-  LOGICAL_OR = 84,
-  ONE_HOT = 85,
-  LOGICAL_AND = 86,
-  LOGICAL_NOT = 87,
-  UNPACK = 88,
-  REDUCE_MIN = 89,
-  FLOOR_DIV = 90,
-  REDUCE_ANY = 91,
-  SQUARE = 92,
-  ZEROS_LIKE = 93,
-  FILL = 94,
-  FLOOR_MOD = 95,
-  RANGE = 96,
-  RESIZE_NEAREST_NEIGHBOR = 97,
-  LEAKY_RELU = 98,
-  SQUARED_DIFFERENCE = 99,
-  MIRROR_PAD = 100,
-  ABS = 101,
-  SPLIT_V = 102,
-}
-
-// Options for the builtin operators.
-union BuiltinOptions {
-  Conv2DOptions,
-  DepthwiseConv2DOptions,
-  ConcatEmbeddingsOptions,
-  LSHProjectionOptions,
-  Pool2DOptions,
-  SVDFOptions,
-  RNNOptions,
-  FullyConnectedOptions,
-  SoftmaxOptions,
-  ConcatenationOptions,
-  AddOptions,
-  L2NormOptions,
-  LocalResponseNormalizationOptions,
-  LSTMOptions,
-  ResizeBilinearOptions,
-  CallOptions,
-  ReshapeOptions,
-  SkipGramOptions,
-  SpaceToDepthOptions,
-  EmbeddingLookupSparseOptions,
-  MulOptions,
-  PadOptions,
-  GatherOptions,
-  BatchToSpaceNDOptions,
-  SpaceToBatchNDOptions,
-  TransposeOptions,
-  ReducerOptions,
-  SubOptions,
-  DivOptions,
-  SqueezeOptions,
-  SequenceRNNOptions,
-  StridedSliceOptions,
-  ExpOptions,
-  TopKV2Options,
-  SplitOptions,
-  LogSoftmaxOptions,
-  CastOptions,
-  DequantizeOptions,
-  MaximumMinimumOptions,
-  ArgMaxOptions,
-  LessOptions,
-  NegOptions,
-  PadV2Options,
-  GreaterOptions,
-  GreaterEqualOptions,
-  LessEqualOptions,
-  SelectOptions,
-  SliceOptions,
-  TransposeConvOptions,
-  SparseToDenseOptions,
-  TileOptions,
-  ExpandDimsOptions,
-  EqualOptions,
-  NotEqualOptions,
-  ShapeOptions,
-  PowOptions,
-  ArgMinOptions,
-  FakeQuantOptions,
-  PackOptions,
-  LogicalOrOptions,
-  OneHotOptions,
-  LogicalAndOptions,
-  LogicalNotOptions,
-  UnpackOptions,
-  FloorDivOptions,
-  SquareOptions,
-  ZerosLikeOptions,
-  FillOptions,
-  BidirectionalSequenceLSTMOptions,
-  BidirectionalSequenceRNNOptions,
-  UnidirectionalSequenceLSTMOptions,
-  FloorModOptions,
-  RangeOptions,
-  ResizeNearestNeighborOptions,
-  LeakyReluOptions,
-  SquaredDifferenceOptions,
-  MirrorPadOptions,
-  AbsOptions,
-  SplitVOptions,
-}
-
-enum Padding : byte { SAME, VALID }
-
-enum ActivationFunctionType : byte {
-  NONE = 0,
-  RELU = 1,
-  RELU_N1_TO_1 = 2,
-  RELU6 = 3,
-  TANH = 4,
-  SIGN_BIT = 5,
-}
-
-table Conv2DOptions {
-  padding:Padding;
-  stride_w:int;
-  stride_h:int;
-  fused_activation_function:ActivationFunctionType;
-  dilation_w_factor:int = 1;
-  dilation_h_factor:int = 1;
-}
-
-table Pool2DOptions {
-  padding:Padding;
-  stride_w:int;
-  stride_h:int;
-  filter_width:int;
-  filter_height:int;
-  fused_activation_function:ActivationFunctionType;
-}
-
-table DepthwiseConv2DOptions {
-  // Parameters for DepthwiseConv version 1 or above.
-  padding:Padding;
-  stride_w:int;
-  stride_h:int;
-  depth_multiplier:int;
-  fused_activation_function:ActivationFunctionType;
-  // Parameters for DepthwiseConv version 2 or above.
-  dilation_w_factor:int = 1;
-  dilation_h_factor:int = 1;
-}
-
-table ConcatEmbeddingsOptions {
-  num_channels:int;
-  num_columns_per_channel:[int];
-  embedding_dim_per_channel:[int]; // This could be inferred from parameters.
-}
-
-enum LSHProjectionType: byte {
-  UNKNOWN = 0,
-  SPARSE = 1,
-  DENSE = 2,
-}
-
-table LSHProjectionOptions {
-  type: LSHProjectionType;
-}
-
-table SVDFOptions {
-  rank:int;
-  fused_activation_function:ActivationFunctionType;
-}
-
-// An implementation of TensorFlow RNNCell.
-table RNNOptions {
-  fused_activation_function:ActivationFunctionType;
-}
-
-// An implementation of TensorFlow dynamic_rnn with RNNCell.
-table SequenceRNNOptions {
-  time_major:bool;
-  fused_activation_function:ActivationFunctionType;
-}
-
-// An implementation of TensorFlow bidrectional_dynamic_rnn with RNNCell.
-table BidirectionalSequenceRNNOptions {
-  time_major:bool;
-  fused_activation_function:ActivationFunctionType;
-  merge_outputs: bool;
-}
-
-enum FullyConnectedOptionsWeightsFormat: byte {
-  DEFAULT = 0,
-  SHUFFLED4x16INT8 = 1,
-}
-
-// An implementation of TensorFlow fully_connected (a.k.a Dense) layer.
-table FullyConnectedOptions {
-  // Parameters for FullyConnected version 1 or above.
-  fused_activation_function:ActivationFunctionType;
-
-  // Parameters for FullyConnected version 2 or above.
-  weights_format:FullyConnectedOptionsWeightsFormat = DEFAULT;
-}
-
-table SoftmaxOptions {
-  beta: float;
-}
-
-// An implementation of TensorFlow concat.
-table ConcatenationOptions {
-  axis:int;
-  fused_activation_function:ActivationFunctionType;
-}
-
-table AddOptions {
-  fused_activation_function:ActivationFunctionType;
-}
-
-table MulOptions {
-  fused_activation_function:ActivationFunctionType;
-}
-
-table L2NormOptions {
-  fused_activation_function:ActivationFunctionType;
-}
-
-table LocalResponseNormalizationOptions {
-  radius:int;
-  bias:float;
-  alpha:float;
-  beta:float;
-}
-
-enum LSTMKernelType : byte {
-  // Full LSTM kernel which supports peephole and projection.
-  FULL = 0,
-  // Basic LSTM kernels. Equivalent to TensorFlow BasicLSTMCell.
-  BASIC = 1,
-}
-
-// An implementation of TensorFlow LSTMCell and CoupledInputForgetGateLSTMCell
-table LSTMOptions {
-  // Parameters for LSTM version 1 or above.
-  fused_activation_function:ActivationFunctionType;
-  cell_clip: float; // Optional, 0.0 means no clipping
-  proj_clip: float; // Optional, 0.0 means no clipping
-
-  // Parameters for LSTM version 2 or above.
-  // Basic kernel is only supported in version 2 or above.
-  kernel_type: LSTMKernelType = FULL;
-}
-
-// An implementation of TensorFlow dynamic_rnn with LSTMCell.
-table UnidirectionalSequenceLSTMOptions {
-  fused_activation_function:ActivationFunctionType;
-  cell_clip: float; // Optional, 0.0 means no clipping
-  proj_clip: float; // Optional, 0.0 means no clipping
-
-  // If true then first dimension is sequence, otherwise batch.
-  time_major:bool;
-}
-
-table BidirectionalSequenceLSTMOptions {
-  fused_activation_function:ActivationFunctionType;
-  cell_clip: float; // Optional, 0.0 means no clipping
-  proj_clip: float; // Optional, 0.0 means no clipping
-
-  // If true, store the outputs of both directions into the first output.
-  merge_outputs: bool;
-}
-
-table ResizeBilinearOptions {
-  new_height: int (deprecated);
-  new_width: int (deprecated);
-  align_corners: bool;
-}
-
-table ResizeNearestNeighborOptions {
-  align_corners: bool;
-}
-
-// A call operation options
-table CallOptions {
-  // The subgraph index that needs to be called.
-  subgraph:uint;
-}
-
-table PadOptions {
-}
-
-table PadV2Options {
-}
-
-table ReshapeOptions {
-  new_shape:[int];
-}
-
-table SpaceToBatchNDOptions {
-}
-
-table BatchToSpaceNDOptions {
-}
-
-table SkipGramOptions {
-  ngram_size: int;
-  max_skip_size: int;
-  include_all_ngrams: bool;
-}
-
-table SpaceToDepthOptions {
-  block_size: int;
-}
-
-table SubOptions {
-  fused_activation_function:ActivationFunctionType;
-}
-
-table DivOptions {
-  fused_activation_function:ActivationFunctionType;
-}
-
-table TopKV2Options {
-}
-
-enum CombinerType : byte {
-  SUM = 0,
-  MEAN = 1,
-  SQRTN = 2,
-}
-
-table EmbeddingLookupSparseOptions {
-  combiner:CombinerType;
-}
-
-table GatherOptions {
-  axis: int;
-}
-
-table TransposeOptions {
-}
-
-table ExpOptions {
-}
-
-table ReducerOptions {
-  keep_dims: bool;
-}
-
-table SqueezeOptions {
-  squeeze_dims:[int];
-}
-
-table SplitOptions {
-  num_splits: int;
-}
-
-table SplitVOptions {
-  num_splits: int;
-}
-
-table StridedSliceOptions {
-  begin_mask: int;
-  end_mask: int;
-  ellipsis_mask: int;
-  new_axis_mask: int;
-  shrink_axis_mask: int;
-}
-
-table LogSoftmaxOptions {
-}
-
-table CastOptions {
-  in_data_type: TensorType;
-  out_data_type: TensorType;
-}
-
-table DequantizeOptions {
-}
-
-table MaximumMinimumOptions {
-}
-
-table TileOptions {
-}
-
-table ArgMaxOptions {
-  output_type : TensorType;
-}
-
-table ArgMinOptions {
-  output_type : TensorType;
-}
-
-table GreaterOptions {
-}
-
-table GreaterEqualOptions {
-}
-
-table LessOptions {
-}
-
-table LessEqualOptions {
-}
-
-table NegOptions {
-}
-
-table SelectOptions {
-}
-
-table SliceOptions {
-}
-
-table TransposeConvOptions {
-  padding:Padding;
-  stride_w:int;
-  stride_h:int;
-}
-
-table ExpandDimsOptions {
-}
-
-table SparseToDenseOptions {
-  validate_indices:bool;
-}
-
-table EqualOptions {
-}
-
-table NotEqualOptions {
-}
-
-table ShapeOptions {
-  // Optional output type of the operation (int32 or int64). Defaults to int32.
-  out_type : TensorType;
-}
-
-table PowOptions {
-}
-
-table FakeQuantOptions {
-  // Parameters supported by version 1:
-  min:float;
-  max:float;
-  num_bits:int;
-
-  // Parameters supported by version 2:
-  narrow_range:bool;
-}
-
-table PackOptions {
-  values_count:int;
-  axis:int;
-}
-
-table LogicalOrOptions {
-}
-
-table OneHotOptions {
-  axis:int;
-}
-
-table AbsOptions {
-}
-
-
-table LogicalAndOptions {
-}
-
-table LogicalNotOptions {
-}
-
-table UnpackOptions {
-  num:int;
-  axis:int;
-}
-
-table FloorDivOptions {
-}
-
-table SquareOptions {
-}
-
-table ZerosLikeOptions {
-}
-
-table FillOptions {
-}
-
-table FloorModOptions {
-}
-
-table RangeOptions {
-}
-
-table LeakyReluOptions {
-  alpha:float;
-}
-
-table SquaredDifferenceOptions {
-}
-
-enum MirrorPadMode : byte {
-  // Doesn't include borders.
-  REFLECT = 0,
-  // Includes borders.
-  SYMMETRIC = 1,
-}
-
-table MirrorPadOptions {
-  mode:MirrorPadMode;
-}
-
-// An OperatorCode can be an enum value (BuiltinOperator) if the operator is a
-// builtin, or a string if the operator is custom.
-table OperatorCode {
-  builtin_code:BuiltinOperator;
-  custom_code:string;
-
-  // The version of the operator. The version need to be bumped whenever new
-  // parameters are introduced into an op.
-  version:int = 1;
-}
-
-enum CustomOptionsFormat : byte {
-  FLEXBUFFERS = 0,
-}
-
-// An operator takes tensors as inputs and outputs. The type of operation being
-// performed is determined by an index into the list of valid OperatorCodes,
-// while the specifics of each operations is configured using builtin_options
-// or custom_options.
-table Operator {
-  // Index into the operator_codes array. Using an integer here avoids
-  // complicate map lookups.
-  opcode_index:uint;
-
-  // Optional input and output tensors are indicated by -1.
-  inputs:[int];
-  outputs:[int];
-
-  builtin_options:BuiltinOptions;
-  custom_options:[ubyte];
-  custom_options_format:CustomOptionsFormat;
-
-  // A list of booleans indicating the input tensors which are being mutated by
-  // this operator.(e.g. used by RNN and LSTM).
-  // For example, if the "inputs" array refers to 5 tensors and the second and
-  // fifth are mutable variables, then this list will contain
-  // [false, true, false, false, true].
-  //
-  // If the list is empty, no variable is mutated in this operator.
-  // The list either has the same length as `inputs`, or is empty.
-  mutating_variable_inputs:[bool];
-}
-
-// The root type, defining a subgraph, which typically represents an entire
-// model.
-table SubGraph {
-  // A list of all tensors used in this subgraph.
-  tensors:[Tensor];
-
-  // Indices of the tensors that are inputs into this subgraph. Note this is
-  // the list of non-static tensors that feed into the subgraph for inference.
-  inputs:[int];
-
-  // Indices of the tensors that are outputs out of this subgraph. Note this is
-  // the list of output tensors that are considered the product of the
-  // subgraph's inference.
-  outputs:[int];
-
-  // All operators, in execution order.
-  operators:[Operator];
-
-  // Name of this subgraph (used for debugging).
-  name:string;
-}
-
-// Table of raw data buffers (used for constant tensors). Referenced by tensors
-// by index. The generous alignment accommodates mmap-friendly data structures.
-table Buffer {
-  data:[ubyte] (force_align: 16);
-}
-
-table Model {
-  // Version of the schema.
-  version:uint;
-
-  // A list of all operator codes used in this model. This is
-  // kept in order because operators carry an index into this
-  // vector.
-  operator_codes:[OperatorCode];
-
-  // All the subgraphs of the model. The 0th is assumed to be the main
-  // model.
-  subgraphs:[SubGraph];
-
-  // A description of the model.
-  description:string;
-
-  // Buffers of the model.
-  // Note the 0th entry of this array must be an empty buffer (sentinel).
-  // This is a convention so that tensors without a buffer can provide 0 as
-  // their buffer.
-  buffers:[Buffer];
-
-  // Metadata about the model.  Indirects into the existings buffers list.
-  metadata_buffer:[int];
-}
-
-root_type Model;
diff --git a/compiler/loco-exporter/src/Exporter.test.cpp b/compiler/loco-exporter/src/Exporter.test.cpp
deleted file mode 100644 (file)
index 1e46e3e..0000000
+++ /dev/null
@@ -1,182 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "LocoExporter.h"
-#include "loco/IR/PermutingCodec.h"
-
-#include <stdex/Memory.h>
-#include <gtest/gtest.h>
-
-using stdex::make_unique;
-
-class TestLocoExporterModels : public ::testing::Test
-{
-public:
-  template <typename T> uint32_t setSampleShape(T *op)
-  {
-    const uint32_t n = 1;
-    const uint32_t h = 100;
-    const uint32_t w = 100;
-    const uint32_t c = 3;
-    op->rank(4);
-    op->dim(0).set(n);
-    op->dim(1).set(c);
-    op->dim(2).set(h);
-    op->dim(3).set(w);
-    return n * h * w * c;
-  }
-
-  loco::Pull *pullLayer()
-  {
-    loco::Pull *pull = _graph.nodes()->create<loco::Pull>();
-
-    auto graph_input = _graph.inputs()->create();
-    graph_input->name("graph_input");
-    loco::link(graph_input, pull);
-
-    pull->dtype(loco::DataType::FLOAT32);
-    setSampleShape(pull);
-    return pull;
-  }
-
-  loco::ConstGen *constLayer()
-  {
-    loco::ConstGen *cst = _graph.nodes()->create<loco::ConstGen>();
-    cst->dtype(loco::DataType::FLOAT32);
-
-    const auto size = setSampleShape(cst);
-    cst->size<loco::DataType::FLOAT32>(size);
-    // fill cst layer with some data
-    for (uint32_t i = 0; i < size; ++i)
-      cst->at<loco::DataType::FLOAT32>(i) = i;
-    return cst;
-  }
-
-  loco::Push *pushLayer(loco::Node *input)
-  {
-    loco::Push *push = _graph.nodes()->create<loco::Push>();
-
-    auto graph_output = _graph.outputs()->create();
-    graph_output->name("graph_output");
-    loco::link(graph_output, push);
-
-    push->from(input);
-    return push;
-  }
-
-  loco::ReLU *reluLayer(loco::Node *input)
-  {
-    loco::ReLU *relu = _graph.nodes()->create<loco::ReLU>();
-    relu->input(input);
-    return relu;
-  }
-
-  loco::MaxPool2D *validPoolLayer(loco::Node *input_fm)
-  {
-    loco::MaxPool2D *max_pool = _graph.nodes()->create<loco::MaxPool2D>();
-    auto &window = *max_pool->window();
-    window.vertical(2);
-    window.horizontal(3);
-    auto &strides = *max_pool->stride();
-    strides.vertical(4);
-    strides.horizontal(5);
-    max_pool->ifm(input_fm);
-    return max_pool;
-  }
-
-  loco::FeatureEncode *featureEncodeLayer(loco::Node *input)
-  {
-    loco::FeatureEncode *encode_layer = _graph.nodes()->create<loco::FeatureEncode>();
-    auto encoder = make_unique<loco::PermutingEncoder<loco::Domain::Feature>>();
-    (*encoder->perm())[loco::FeatureAxis::Count] = 0;
-    (*encoder->perm())[loco::FeatureAxis::Depth] = 1;
-    (*encoder->perm())[loco::FeatureAxis::Height] = 2;
-    (*encoder->perm())[loco::FeatureAxis::Width] = 3;
-    encode_layer->encoder(std::move(encoder));
-    encode_layer->input(input);
-    return encode_layer;
-  }
-
-  loco::FeatureDecode *featureDecodeLayer(loco::Node *input)
-  {
-    loco::FeatureDecode *decode_layer = _graph.nodes()->create<loco::FeatureDecode>();
-    auto decoder = make_unique<loco::PermutingDecoder<loco::Domain::Feature>>();
-    (*decoder->perm())[loco::FeatureAxis::Count] = 0;
-    (*decoder->perm())[loco::FeatureAxis::Depth] = 1;
-    (*decoder->perm())[loco::FeatureAxis::Height] = 2;
-    (*decoder->perm())[loco::FeatureAxis::Width] = 3;
-    decode_layer->decoder(std::move(decoder));
-    decode_layer->input(input);
-    return decode_layer;
-  }
-
-  loco::Graph &getGraph() { return _graph; }
-
-private:
-  loco::Graph _graph;
-};
-
-TEST_F(TestLocoExporterModels, MaxPool2D)
-{
-  // Create graph with one input, one output and single maxpool operation
-  loco::Pull *pull = pullLayer();
-  loco::FeatureEncode *encode = featureEncodeLayer(pull);
-  loco::MaxPool2D *max_pool = validPoolLayer(encode);
-  loco::FeatureDecode *decode = featureDecodeLayer(max_pool);
-  loco::Push *push = pushLayer(decode);
-  (void)push;
-
-  loco_exporter::Exporter e(&getGraph());
-  e.dumpToFile("maxpool2d.tflite");
-
-  ASSERT_TRUE(true);
-}
-
-TEST_F(TestLocoExporterModels, Const)
-{
-  // Create graph with single constGen operation that goes straight to output of net
-  loco::ConstGen *cst = constLayer();
-  loco::Push *push = pushLayer(cst);
-  (void)push;
-
-  loco_exporter::Exporter e(&getGraph());
-  e.dumpToFile("const.tflite");
-
-  ASSERT_TRUE(true);
-}
-
-TEST_F(TestLocoExporterModels, PoolWithActivation)
-{
-  // Create graph with one input, one output and single maxpool operation with relu
-  loco::Pull *pull = pullLayer();
-  loco::FeatureEncode *encode = featureEncodeLayer(pull);
-  loco::MaxPool2D *max_pool = validPoolLayer(encode);
-  loco::FeatureDecode *decode = featureDecodeLayer(max_pool);
-  loco::ReLU *relu = reluLayer(decode);
-  loco::Push *push = pushLayer(relu);
-  (void)push;
-
-  loco_exporter::Exporter e(&getGraph());
-  e.dumpToFile("maxpool_activation.tflite");
-
-  ASSERT_TRUE(true);
-}
-
-int main(int argc, char **argv)
-{
-  testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
-}
diff --git a/compiler/loco-exporter/src/LocoExporter.cpp b/compiler/loco-exporter/src/LocoExporter.cpp
deleted file mode 100644 (file)
index ce62275..0000000
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "LocoExporter.h"
-
-#include "LocoExporterImpl.h"
-
-#include "stdex/Memory.h"
-
-#include <fstream>
-
-namespace loco_exporter
-{
-
-Exporter::Exporter(loco::Graph *graph) : _impl(stdex::make_unique<ExporterImpl>(graph))
-{
-  // NOTHING TO DO
-}
-
-Exporter::~Exporter() = default;
-
-void Exporter::clear() { _impl.reset(nullptr); }
-
-void Exporter::dumpToFile(const char *path) const
-{
-  const char *ptr = _impl->getBufferPointer();
-  const size_t size = _impl->getBufferSize();
-  assert(ptr && "graph is not serialized for some reason");
-  std::ofstream file(path);
-  file.write(ptr, size);
-}
-
-BufferView Exporter::getBuffer() const
-{
-  const char *ptr = _impl->getBufferPointer();
-  const size_t size = _impl->getBufferSize();
-  return {ptr, size};
-}
-
-} // namespace loco_exporter
diff --git a/compiler/loco-exporter/src/LocoExporterImpl.cpp b/compiler/loco-exporter/src/LocoExporterImpl.cpp
deleted file mode 100644 (file)
index 9719ad7..0000000
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "LocoExporterImpl.h"
-
-#include "TensorExporter.h"
-#include "OperationExporter.h"
-#include "LocoExporterUtils.h"
-
-#include <unordered_map>
-#include <string>
-
-namespace loco_exporter
-{
-using namespace tflite;
-using namespace flatbuffers;
-
-ExporterImpl::ExporterImpl(loco::Graph *graph) { exportGraph(graph); }
-
-Offset<Vector<Offset<OperatorCode>>>
-encodeOperatorCodes(FlatBufferBuilder &builder, std::unordered_map<OpCode, uint32_t> &opcodes)
-{
-  std::vector<Offset<OperatorCode>> operator_codes_vec(opcodes.size());
-  for (auto it : opcodes)
-  {
-    uint32_t idx = it.second;
-    operator_codes_vec[idx] = CreateOperatorCode(builder, it.first.opcode);
-  }
-  return builder.CreateVector(operator_codes_vec);
-}
-
-flatbuffers::Offset<tflite::SubGraph> ExporterImpl::exportSubgraph(SerializedModelData &gd)
-{
-  auto tensors = _builder.CreateVector(gd._tensors);
-  auto inputs = _builder.CreateVector(gd._inputs);
-  auto outputs = _builder.CreateVector(gd._outputs);
-  auto operators = _builder.CreateVector(gd._operators);
-  auto subgraph = CreateSubGraph(_builder, tensors, inputs, outputs, operators);
-  return subgraph;
-}
-
-void ExporterImpl::exportGraph(loco::Graph *graph)
-{
-  _builder.Clear();
-
-  SerializedModelData gd;
-
-  // This version is taken from comment in fbs
-  constexpr uint32_t version = 3;
-
-  registerGraphIOName(graph, gd);
-
-  // parse graph into SerializedModelData structure
-  exportOpDefinedTensors(graph->nodes(), _builder, gd);
-
-  exportNodes(graph->nodes(), _builder, gd);
-
-  // excode operator codes
-  auto operator_codes = encodeOperatorCodes(_builder, gd._operator_codes);
-
-  // Subgraphs
-  Offset<SubGraph> subgraph = exportSubgraph(gd);
-  auto subgraphs = _builder.CreateVector(std::vector<Offset<SubGraph>>{subgraph});
-
-  // Description
-  std::string description_str = "nnpackage";
-  auto description = _builder.CreateString(description_str);
-
-  // create array of buffers
-  auto buffers = _builder.CreateVector(gd._buffers);
-
-  // empty metadata
-  std::vector<int> metadata_buffer_vec;
-  auto metadata_buffer = _builder.CreateVector(metadata_buffer_vec);
-
-  // Model
-  auto model_offset = CreateModel(_builder, version, operator_codes, subgraphs, description,
-                                  buffers, metadata_buffer);
-  FinishModelBuffer(_builder, model_offset);
-}
-
-const char *ExporterImpl::getBufferPointer() const
-{
-  return reinterpret_cast<const char *>(_builder.GetBufferPointer());
-}
-
-size_t ExporterImpl::getBufferSize() const { return _builder.GetSize(); }
-
-} // namespace loco_exporter
diff --git a/compiler/loco-exporter/src/LocoExporterImpl.h b/compiler/loco-exporter/src/LocoExporterImpl.h
deleted file mode 100644 (file)
index fe9c273..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __LOCO_EXPORTER_IMPL_H__
-#define __LOCO_EXPORTER_IMPL_H__
-
-#include "schema_generated.h"
-
-#include "loco.h"
-
-namespace loco_exporter
-{
-
-struct SerializedModelData;
-
-/**
- * internal implementation of interface exporter class
- */
-class ExporterImpl
-{
-public:
-  ExporterImpl() = delete;
-  ~ExporterImpl() = default;
-
-  explicit ExporterImpl(loco::Graph *graph);
-
-  /**
-   * @return pointer to buffer with serialized graph
-   */
-  const char *getBufferPointer() const;
-
-  /**
-   * @return size of buffer with serialized graph
-   */
-  size_t getBufferSize() const;
-
-private:
-  /**
-   * @brief create Subgraph using data stored in SerializedModelData
-   * @param gd information about serializer parts of model
-   * @return offset in buffer corresponding to serialized subgraph
-   */
-  flatbuffers::Offset<tflite::SubGraph> exportSubgraph(SerializedModelData &gd);
-
-  /**
-   * @brief root function that writes graph into internal buffer
-   * @param graph
-   */
-  void exportGraph(loco::Graph *graph);
-
-private:
-  flatbuffers::FlatBufferBuilder _builder;
-};
-
-} // namespace loco_exporter
-
-#endif //_LOCO_EXPORTER_IMPL_H__
diff --git a/compiler/loco-exporter/src/LocoExporterUtils.cpp b/compiler/loco-exporter/src/LocoExporterUtils.cpp
deleted file mode 100644 (file)
index 32f2d94..0000000
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "LocoExporterUtils.h"
-
-namespace loco_exporter
-{
-
-uint32_t SerializedModelData::registerBuiltinOpcode(tflite::BuiltinOperator builtin_code)
-{
-  auto it = _operator_codes.find(OpCode{builtin_code});
-  if (it != _operator_codes.end())
-  {
-    return it->second;
-  }
-  auto idx = static_cast<uint32_t>(_operator_codes.size());
-  _operator_codes.emplace(OpCode{builtin_code}, idx);
-  return idx;
-}
-
-tflite::Padding getOpPadding(const loco::Pad<2> *pad)
-{
-  // VALID padding
-  if (pad->top() == 0 && pad->bottom() == 0 && pad->left() == 0 && pad->right() == 0)
-    return tflite::Padding_VALID;
-
-  // SAME padding
-  if ((pad->top() <= pad->bottom()) && (pad->bottom() <= pad->top() + 1) &&
-      (pad->left() <= pad->right()) && (pad->right() <= pad->left() + 1))
-    return tflite::Padding_SAME;
-
-  throw std::runtime_error("NYI for custom PAD");
-}
-
-void registerGraphIOName(loco::Graph *graph, SerializedModelData &gd)
-{
-  for (uint32_t in = 0; in < graph->inputs()->size(); ++in)
-  {
-    auto pull = graph->inputs()->at(in)->node();
-    auto name = graph->inputs()->at(in)->name();
-
-    gd._pull_to_name[pull] = name;
-  }
-  for (uint32_t out = 0; out < graph->outputs()->size(); ++out)
-  {
-    auto push = graph->outputs()->at(out)->node();
-    auto name = graph->outputs()->at(out)->name();
-
-    gd._push_to_name[push] = name;
-  }
-}
-
-} // namepsace loco_exporter
diff --git a/compiler/loco-exporter/src/LocoExporterUtils.h b/compiler/loco-exporter/src/LocoExporterUtils.h
deleted file mode 100644 (file)
index c403219..0000000
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __LOCO_EXPORTER_UTILS_H__
-#define __LOCO_EXPORTER_UTILS_H__
-
-#include "schema_generated.h"
-#include "loco.h"
-
-#include "loco/IR/PermutingCodec.h"
-
-#include <unordered_map>
-
-namespace loco_exporter
-{
-
-struct OpCode
-{
-  tflite::BuiltinOperator opcode;
-
-  bool operator==(const OpCode &rhs) const { return opcode == rhs.opcode; }
-};
-
-} // namespace loco_exporter
-
-namespace std
-{
-
-template <> struct hash<loco_exporter::OpCode>
-{
-  size_t operator()(const loco_exporter::OpCode &x) const { return hash<int>()(x.opcode); }
-};
-
-} // namespace std
-
-namespace loco_exporter
-{
-
-struct ShapeDescription
-{
-  std::vector<int32_t> _dims;
-  bool _rank_known;
-};
-
-// Prerequisites for tflite::Model object creation
-struct SerializedModelData final
-{
-  SerializedModelData() = default;
-  SerializedModelData(const SerializedModelData &) = delete;
-
-  std::unordered_map<OpCode, uint32_t> _operator_codes;
-  std::vector<flatbuffers::Offset<tflite::Operator>> _operators;
-  std::vector<flatbuffers::Offset<tflite::Tensor>> _tensors;
-  std::vector<flatbuffers::Offset<tflite::Buffer>> _buffers;
-  std::vector<int32_t> _inputs;
-  std::vector<int32_t> _outputs;
-  std::unordered_map<loco::Node *, int32_t> _node_to_tensor_id;
-
-  // Data for type and shape inference
-  std::unordered_map<loco::Node *, tflite::TensorType> _node_to_type;
-  std::unordered_map<loco::Node *, ShapeDescription> _node_to_shape;
-
-  // Graph input and output names
-  std::unordered_map<loco::Pull *, std::string> _pull_to_name;
-  std::unordered_map<loco::Push *, std::string> _push_to_name;
-
-  /**
-   * @brief if opcode is not registered in table of opcodes add it
-   * @param builtin_code
-   * @return idx of opcode in table of opcodes (see schema)
-   */
-  uint32_t registerBuiltinOpcode(tflite::BuiltinOperator builtin_code);
-};
-
-template <typename Permutation> inline bool isNHWC(Permutation *perm);
-
-template <> inline bool isNHWC(loco::Permutation<loco::Domain::Feature> *perm)
-{
-  return perm->axis(loco::FeatureAxis::Count) == 0 && perm->axis(loco::FeatureAxis::Height) == 1 &&
-         perm->axis(loco::FeatureAxis::Width) == 2 && perm->axis(loco::FeatureAxis::Depth) == 3;
-}
-
-template <> inline bool isNHWC(loco::Permutation<loco::Domain::Filter> *perm)
-{
-  return perm->axis(loco::FilterAxis::Count) == 0 && perm->axis(loco::FilterAxis::Height) == 1 &&
-         perm->axis(loco::FilterAxis::Width) == 2 && perm->axis(loco::FilterAxis::Depth) == 3;
-}
-
-tflite::Padding getOpPadding(const loco::Pad<2> *pad);
-
-/// @brief Register graph input and output names to SerializedModelData
-void registerGraphIOName(loco::Graph *graph, SerializedModelData &gd);
-
-} // namespace loco_exporter
-
-#endif // __LOCO_EXPORTER_UTILS_H__
diff --git a/compiler/loco-exporter/src/OperationExporter.cpp b/compiler/loco-exporter/src/OperationExporter.cpp
deleted file mode 100644 (file)
index 64d9f9d..0000000
+++ /dev/null
@@ -1,355 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "OperationExporter.h"
-#include "LocoExporterUtils.h"
-
-namespace loco_exporter
-{
-
-using namespace flatbuffers;
-using namespace tflite;
-
-namespace
-{
-
-void exportRelu(loco::ReLU *node, FlatBufferBuilder &builder, SerializedModelData &gd)
-{
-  uint32_t op_idx = gd.registerBuiltinOpcode(tflite::BuiltinOperator_RELU);
-  std::vector<int32_t> inputs_vec{gd._node_to_tensor_id[node->input()]};
-  std::vector<int32_t> outputs_vec{gd._node_to_tensor_id[static_cast<loco::Node *>(node)]};
-  auto inputs = builder.CreateVector(inputs_vec);
-  auto outputs = builder.CreateVector(outputs_vec);
-  auto op_offset = CreateOperator(builder, op_idx, inputs, outputs);
-  gd._operators.push_back(op_offset);
-}
-
-void exportMaxPool2D(loco::MaxPool2D *node, FlatBufferBuilder &builder, SerializedModelData &gd)
-{
-  uint32_t op_idx = gd.registerBuiltinOpcode(tflite::BuiltinOperator_MAX_POOL_2D);
-  std::vector<int32_t> inputs_vec{gd._node_to_tensor_id[node->ifm()]};
-  std::vector<int32_t> outputs_vec{gd._node_to_tensor_id[static_cast<loco::Node *>(node)]};
-  auto inputs = builder.CreateVector(inputs_vec);
-  auto outputs = builder.CreateVector(outputs_vec);
-  tflite::Padding padding = getOpPadding(node->pad());
-  auto options = CreatePool2DOptions(builder, padding, node->stride()->horizontal(),
-                                     node->stride()->vertical(), node->window()->horizontal(),
-                                     node->window()->vertical());
-  auto op_offset = CreateOperator(builder, op_idx, inputs, outputs,
-                                  tflite::BuiltinOptions_Pool2DOptions, options.Union());
-  gd._operators.push_back(op_offset);
-}
-
-void exportAvgPool2D(loco::AvgPool2D *node, FlatBufferBuilder &builder, SerializedModelData &gd)
-{
-  // TFlite only support Valid convention of average pooling
-  assert(node->convention() == loco::AvgPool2D::Convention::Valid);
-
-  uint32_t op_idx = gd.registerBuiltinOpcode(tflite::BuiltinOperator_AVERAGE_POOL_2D);
-  std::vector<int32_t> inputs_vec{gd._node_to_tensor_id[node->ifm()]};
-  std::vector<int32_t> outputs_vec{gd._node_to_tensor_id[static_cast<loco::Node *>(node)]};
-  auto inputs = builder.CreateVector(inputs_vec);
-  auto outputs = builder.CreateVector(outputs_vec);
-  tflite::Padding padding = getOpPadding(node->pad());
-  auto options = CreatePool2DOptions(builder, padding, node->stride()->horizontal(),
-                                     node->stride()->vertical(), node->window()->horizontal(),
-                                     node->window()->vertical());
-  auto op_offset = CreateOperator(builder, op_idx, inputs, outputs,
-                                  tflite::BuiltinOptions_Pool2DOptions, options.Union());
-  gd._operators.push_back(op_offset);
-}
-
-void exportConv2D(loco::Conv2D *node, FlatBufferBuilder &builder, SerializedModelData &gd)
-{
-  uint32_t op_idx = gd.registerBuiltinOpcode(tflite::BuiltinOperator_CONV_2D);
-
-  // Third input of CONV_2D of tflite should be bias. We will make (and register to gd) dummy zero
-  // bias. Bias would be rank 1, have size of output kernel count, and have all zero values, i.e.
-  // zero bias.
-  auto *ker = dynamic_cast<loco::FilterEncode *>(node->ker());
-  assert(ker);
-  int32_t bias_vec_size = gd._node_to_shape[ker]._dims[0]; // output kernel count
-
-  auto bias_vec_shape_offset = builder.CreateVector(std::vector<int32_t>{bias_vec_size});
-  size_t raw_bias_vec_size = bias_vec_size * sizeof(int32_t);
-
-  std::vector<float> bias_vec_data(bias_vec_size); // initialized as zero vector
-
-  auto bias_vec_offset =
-      builder.CreateVector(reinterpret_cast<uint8_t *>(bias_vec_data.data()), raw_bias_vec_size);
-
-  auto bias_buffer_offset = CreateBuffer(builder, bias_vec_offset);
-
-  const auto bias_buffer_id = static_cast<uint32_t>(gd._buffers.size());
-
-  gd._buffers.push_back(bias_buffer_offset);
-
-  auto bias_tensor_offset =
-      CreateTensor(builder, bias_vec_shape_offset, TensorType_FLOAT32, bias_buffer_id);
-
-  auto bias_tensor_id = static_cast<int32_t>(gd._tensors.size());
-  gd._tensors.push_back(bias_tensor_offset);
-
-  // Make input, output and options for operator
-  std::vector<int32_t> inputs_vec{gd._node_to_tensor_id[node->ifm()],
-                                  gd._node_to_tensor_id[node->ker()], bias_tensor_id};
-  std::vector<int32_t> outputs_vec{gd._node_to_tensor_id[static_cast<loco::Node *>(node)]};
-  auto inputs = builder.CreateVector(inputs_vec);
-  auto outputs = builder.CreateVector(outputs_vec);
-  tflite::Padding padding = getOpPadding(node->pad());
-  auto options = CreateConv2DOptions(builder, padding, node->stride()->horizontal(),
-                                     node->stride()->vertical());
-
-  // Make CONV_2D operator
-  auto op_offset = CreateOperator(builder, op_idx, inputs, outputs,
-                                  tflite::BuiltinOptions_Conv2DOptions, options.Union());
-  gd._operators.push_back(op_offset);
-}
-
-/// @brief Export given node into identity, i.e. CONCATENATION with one input
-template <typename NodeT>
-void exportIdentity(NodeT *node, FlatBufferBuilder &builder, SerializedModelData &gd)
-{
-  uint32_t op_idx = gd.registerBuiltinOpcode(tflite::BuiltinOperator_CONCATENATION);
-  std::vector<int32_t> inputs_vec{gd._node_to_tensor_id[node->arg(0)]};
-  std::vector<int32_t> outputs_vec{gd._node_to_tensor_id[static_cast<loco::Node *>(node)]};
-  auto inputs = builder.CreateVector(inputs_vec);
-  auto outputs = builder.CreateVector(outputs_vec);
-  auto options = CreateConcatenationOptions(builder); // use dummy 0 axis and NONE activation
-  auto op_offset = CreateOperator(builder, op_idx, inputs, outputs,
-                                  tflite::BuiltinOptions_ConcatenationOptions, options.Union());
-
-  gd._operators.push_back(op_offset);
-}
-
-/// @brief Export loco nodes as TRANSPOSE
-void exportAsTranspose(loco::Node *node, FlatBufferBuilder &builder,
-                       std::vector<int32_t> &perm_vec_data, SerializedModelData &gd)
-{
-  uint32_t op_idx = gd.registerBuiltinOpcode(tflite::BuiltinOperator_TRANSPOSE);
-
-  auto options = CreateTransposeOptions(builder);
-
-  // Create constant tensor with perm vector
-  constexpr int perm_vec_size = 4;
-  assert(perm_vec_data.size() == perm_vec_size);
-  auto perm_vec_shape_offset = builder.CreateVector(std::vector<int32_t>{perm_vec_size});
-  constexpr size_t raw_perm_vec_size = perm_vec_size * sizeof(int32_t);
-
-  auto perm_vec_offset =
-      builder.CreateVector(reinterpret_cast<uint8_t *>(perm_vec_data.data()), raw_perm_vec_size);
-
-  auto perm_buffer_offset = CreateBuffer(builder, perm_vec_offset);
-
-  const auto perm_buffer_id = static_cast<uint32_t>(gd._buffers.size());
-
-  gd._buffers.push_back(perm_buffer_offset);
-
-  auto perm_tensor_offset =
-      CreateTensor(builder, perm_vec_shape_offset, TensorType_INT32, perm_buffer_id);
-
-  auto perm_tensor_id = static_cast<int32_t>(gd._tensors.size());
-  gd._tensors.push_back(perm_tensor_offset);
-
-  // Create permutation node
-
-  std::vector<int32_t> inputs_vec{gd._node_to_tensor_id[node->arg(0)], perm_tensor_id};
-  std::vector<int32_t> outputs_vec{gd._node_to_tensor_id[node]};
-
-  auto inputs = builder.CreateVector(inputs_vec);
-  auto outputs = builder.CreateVector(outputs_vec);
-
-  constexpr auto options_type = tflite::BuiltinOptions::BuiltinOptions_TransposeOptions;
-
-  auto transpose_offset =
-      CreateOperator(builder, op_idx, inputs, outputs, options_type, options.Union());
-  gd._operators.push_back(transpose_offset);
-}
-
-void exportFeatureEncode(loco::FeatureEncode *node, FlatBufferBuilder &builder,
-                         SerializedModelData &gd)
-{
-  auto encoder = dynamic_cast<loco::PermutingEncoder<loco::Domain::Feature> *>(node->encoder());
-  auto perm = encoder->perm();
-
-  if (isNHWC(perm))
-  {
-    // Note that tflite represents feature as NHWC
-    exportIdentity(node, builder, gd);
-  }
-  else
-  {
-    std::vector<int32_t> perm_vec_data(4);
-    perm_vec_data[0] = perm->axis(loco::FeatureAxis::Count);
-    perm_vec_data[1] = perm->axis(loco::FeatureAxis::Height);
-    perm_vec_data[2] = perm->axis(loco::FeatureAxis::Width);
-    perm_vec_data[3] = perm->axis(loco::FeatureAxis::Depth);
-
-    exportAsTranspose(node, builder, perm_vec_data, gd);
-  }
-}
-
-void exportFeatureDecode(loco::FeatureDecode *node, FlatBufferBuilder &builder,
-                         SerializedModelData &gd)
-{
-  auto decoder = dynamic_cast<loco::PermutingDecoder<loco::Domain::Feature> *>(node->decoder());
-  auto perm = decoder->perm();
-
-  if (isNHWC(perm))
-  {
-    // Note that tflite represents feature as NHWC
-    exportIdentity(node, builder, gd);
-  }
-  else
-  {
-    std::vector<int32_t> perm_vec_data(4);
-    perm_vec_data[perm->axis(loco::FeatureAxis::Count)] = 0;
-    perm_vec_data[perm->axis(loco::FeatureAxis::Height)] = 1;
-    perm_vec_data[perm->axis(loco::FeatureAxis::Width)] = 2;
-    perm_vec_data[perm->axis(loco::FeatureAxis::Depth)] = 3;
-
-    exportAsTranspose(node, builder, perm_vec_data, gd);
-  }
-}
-
-void exportFilterEncode(loco::FilterEncode *node, FlatBufferBuilder &builder,
-                        SerializedModelData &gd)
-{
-  auto encoder = dynamic_cast<loco::PermutingEncoder<loco::Domain::Filter> *>(node->encoder());
-  auto perm = encoder->perm();
-
-  if (isNHWC(perm))
-  {
-    // Note that tflite represents filter as NHWC
-    exportIdentity(node, builder, gd);
-  }
-  else
-  {
-    std::vector<int32_t> perm_vec_data(4);
-    // NOTE In tflite, all tensors means NHWC, so 0 = N, 1 = H, 2 = W, 3 = C
-    perm_vec_data[0] = perm->axis(loco::FilterAxis::Count);
-    perm_vec_data[1] = perm->axis(loco::FilterAxis::Height);
-    perm_vec_data[2] = perm->axis(loco::FilterAxis::Width);
-    perm_vec_data[3] = perm->axis(loco::FilterAxis::Depth);
-
-    exportAsTranspose(node, builder, perm_vec_data, gd);
-  }
-}
-
-void exportBiasAdd(loco::BiasAdd<loco::Domain::Tensor> *node, FlatBufferBuilder &builder,
-                   SerializedModelData &gd)
-{
-  uint32_t op_idx = gd.registerBuiltinOpcode(tflite::BuiltinOperator_ADD);
-  std::vector<int32_t> inputs_vec{gd._node_to_tensor_id[node->value()],
-                                  gd._node_to_tensor_id[node->bias()]};
-  std::vector<int32_t> outputs_vec{gd._node_to_tensor_id[static_cast<loco::Node *>(node)]};
-  auto inputs = builder.CreateVector(inputs_vec);
-  auto outputs = builder.CreateVector(outputs_vec);
-  auto options = CreateAddOptions(builder); // dummy option
-  auto op_offset = CreateOperator(builder, op_idx, inputs, outputs,
-                                  tflite::BuiltinOptions_AddOptions, options.Union());
-  gd._operators.push_back(op_offset);
-}
-
-/// @brief Export CONCATENATION of **TWO** tensors only
-void exportConcat(loco::TensorConcat *node, FlatBufferBuilder &builder, SerializedModelData &gd)
-{
-  uint32_t op_idx = gd.registerBuiltinOpcode(tflite::BuiltinOperator_CONCATENATION);
-  std::vector<int32_t> inputs_vec{gd._node_to_tensor_id[node->lhs()],
-                                  gd._node_to_tensor_id[node->rhs()]};
-  std::vector<int32_t> outputs_vec{gd._node_to_tensor_id[static_cast<loco::Node *>(node)]};
-  auto inputs = builder.CreateVector(inputs_vec);
-  auto outputs = builder.CreateVector(outputs_vec);
-  auto options = CreateConcatenationOptions(builder, node->axis());
-  auto op_offset = CreateOperator(builder, op_idx, inputs, outputs,
-                                  tflite::BuiltinOptions_ConcatenationOptions, options.Union());
-
-  gd._operators.push_back(op_offset);
-}
-
-void exportNode(loco::Node *node, flatbuffers::FlatBufferBuilder &builder,
-                SerializedModelData &data)
-{
-  if (auto *relu = dynamic_cast<loco::ReLU *>(node))
-  {
-    exportRelu(relu, builder, data);
-  }
-  else if (dynamic_cast<loco::Pull *>(node))
-  {
-    data._inputs.push_back(data._node_to_tensor_id[node]);
-  }
-  else if (dynamic_cast<loco::Push *>(node))
-  {
-    data._outputs.push_back(data._node_to_tensor_id[node->arg(0)]);
-  }
-  else if (auto *encode = dynamic_cast<loco::FeatureEncode *>(node))
-  {
-    exportFeatureEncode(encode, builder, data);
-  }
-  else if (auto *decode = dynamic_cast<loco::FeatureDecode *>(node))
-  {
-    exportFeatureDecode(decode, builder, data);
-  }
-  else if (auto *encode = dynamic_cast<loco::FilterEncode *>(node))
-  {
-    exportFilterEncode(encode, builder, data);
-  }
-  else if (dynamic_cast<loco::ConstGen *>(node))
-  {
-    // skip, everything is done in exportOpDefinedTensors
-  }
-  else if (auto *max_pool = dynamic_cast<loco::MaxPool2D *>(node))
-  {
-    exportMaxPool2D(max_pool, builder, data);
-  }
-  else if (auto *avg_pool = dynamic_cast<loco::AvgPool2D *>(node))
-  {
-    exportAvgPool2D(avg_pool, builder, data);
-  }
-  else if (auto *conv2d = dynamic_cast<loco::Conv2D *>(node))
-  {
-    exportConv2D(conv2d, builder, data);
-  }
-  else if (auto *tconcat = dynamic_cast<loco::TensorConcat *>(node))
-  {
-    exportConcat(tconcat, builder, data);
-  }
-  else if (auto *encode = dynamic_cast<loco::BiasEncode *>(node))
-  {
-    exportIdentity(encode, builder, data);
-  }
-  else if (auto *biasadd = dynamic_cast<loco::BiasAdd<loco::Domain::Tensor> *>(node))
-  {
-    exportBiasAdd(biasadd, builder, data);
-  }
-  else
-  {
-    assert(false && "unsupported node found");
-  }
-}
-
-} // namespace
-
-void exportNodes(loco::Graph::NodeContext *nodes, FlatBufferBuilder &builder,
-                 SerializedModelData &gd)
-{
-  for (uint32_t node_id = 0; node_id < nodes->size(); node_id++)
-  {
-    exportNode(nodes->at(node_id), builder, gd);
-  }
-}
-
-} // namespace loco_exporter
diff --git a/compiler/loco-exporter/src/OperationExporter.h b/compiler/loco-exporter/src/OperationExporter.h
deleted file mode 100644 (file)
index a260f75..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __LOCO_EXPORTER_OPERATION_EXPORTER_H__
-#define __LOCO_EXPORTER_OPERATION_EXPORTER_H__
-
-#include "LocoExporterUtils.h"
-#include "loco/IR/Graph.h"
-
-namespace loco_exporter
-{
-
-/**
- * @brief create Operators corresponding to model nodes
- * @param nodes container with nodes
- * @param gd information about serializer parts of model
- */
-void exportNodes(loco::Graph::NodeContext *nodes, flatbuffers::FlatBufferBuilder &builder,
-                 SerializedModelData &gd);
-
-} // namespace loco_exporter
-
-#endif //__LOCO_EXPORTER_OPERATION_EXPORTER_H__
diff --git a/compiler/loco-exporter/src/TensorExporter.cpp b/compiler/loco-exporter/src/TensorExporter.cpp
deleted file mode 100644 (file)
index 9cef73a..0000000
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "TensorExporter.h"
-#include "TypeInference.h"
-
-#include "loco/IR/Algorithm.h"
-
-namespace loco_exporter
-{
-using namespace tflite;
-using namespace flatbuffers;
-
-namespace
-{
-
-flatbuffers::Offset<Vector<int32_t>> encodeShape(FlatBufferBuilder &builder,
-                                                 const ShapeDescription &shape)
-{
-  assert(shape._rank_known && "unknown number of dimensions is not supported");
-  return builder.CreateVector(shape._dims);
-}
-
-template <typename NodeT>
-flatbuffers::Offset<tflite::Buffer> encodeOpBuffer(FlatBufferBuilder &builder, NodeT *)
-{
-  return CreateBuffer(builder);
-}
-
-template <>
-flatbuffers::Offset<tflite::Buffer> encodeOpBuffer(FlatBufferBuilder &builder, loco::ConstGen *c)
-{
-  assert(c->dtype() == loco::DataType::FLOAT32);
-  std::vector<float> raw_data;
-  const uint32_t size = c->size<loco::DataType::FLOAT32>();
-  raw_data.reserve(size);
-  for (uint32_t i = 0; i < size; ++i)
-  {
-    raw_data.push_back(c->at<loco::DataType::FLOAT32>(i));
-  }
-  const size_t raw_size = size * sizeof(float);
-  auto array_offset = builder.CreateVector(reinterpret_cast<uint8_t *>(raw_data.data()), raw_size);
-  return CreateBuffer(builder, array_offset);
-}
-
-} // namespace
-
-template <typename NodeT>
-void exportOpDefinedTensor(NodeT *node, FlatBufferBuilder &builder, SerializedModelData &gd)
-{
-  // Create and register output tensor shape
-  ShapeDescription shape_description = getOpResultShape(node, gd);
-  gd._node_to_shape[node] = shape_description;
-  auto shape_offset = encodeShape(builder, shape_description);
-
-  // encode and register output tensor type
-  auto tensor_type = getOpResultType(node, gd);
-  gd._node_to_type[node] = tensor_type;
-
-  // encode and register output tensor buffer
-  auto buffer = encodeOpBuffer(builder, node);
-  auto buffer_id = static_cast<uint32_t>(gd._buffers.size());
-  gd._buffers.push_back(buffer);
-
-  // encode and register tensor itself using attributes from previous steps
-  auto tensor_id = static_cast<uint32_t>(gd._tensors.size());
-
-  std::string name;
-  // if current node is input
-  if (auto pull = dynamic_cast<loco::Pull *>(node))
-  {
-    name = gd._pull_to_name[pull];
-  }
-  // if next node is output
-  else if (auto push = dynamic_cast<loco::Push *>(*loco::succs(node).begin()))
-  {
-    name = gd._push_to_name[push];
-  }
-  else
-  {
-    name = "t_" + std::to_string(tensor_id);
-  }
-  auto name_offset = builder.CreateString(name);
-  auto tensor_offset = CreateTensor(builder, shape_offset, tensor_type, buffer_id, name_offset,
-                                    /*quantization*/ 0, /*is_variable*/ false);
-  gd._node_to_tensor_id[node] = tensor_id;
-  gd._tensors.push_back(tensor_offset);
-}
-
-void exportOpDefinedTensors(loco::Graph::NodeContext *nodes, FlatBufferBuilder &builder,
-                            SerializedModelData &gd)
-{
-  // find entrances of graph
-  std::vector<loco::Node *> roots;
-  for (uint32_t node_id = 0; node_id < nodes->size(); ++node_id)
-  {
-    loco::Node *node = nodes->at(node_id);
-    if (dynamic_cast<loco::Push *>(node))
-    {
-      roots.push_back(node);
-    }
-  }
-
-  // Operations should be traversed in RPO because during processing of current operation
-  // we need to know all attributes of previous operations,
-  // like shape, type,tensor id related with previous operation
-  auto sequence = loco::postorder_traversal(roots);
-  for (loco::Node *node : sequence)
-  {
-    if (auto *pull = dynamic_cast<loco::Pull *>(node))
-    {
-      // Create tensor for input node
-      exportOpDefinedTensor(pull, builder, gd);
-    }
-    else if (dynamic_cast<loco::Push *>(node))
-    {
-      // Do nothing for exit node
-    }
-    else if (auto *cst = dynamic_cast<loco::ConstGen *>(node))
-    {
-      // Create tensor filled with constant data
-      exportOpDefinedTensor(cst, builder, gd);
-    }
-    else if (auto *encode = dynamic_cast<loco::FeatureEncode *>(node))
-    {
-      exportOpDefinedTensor(encode, builder, gd);
-    }
-    else if (auto *decode = dynamic_cast<loco::FeatureDecode *>(node))
-    {
-      exportOpDefinedTensor(decode, builder, gd);
-    }
-    else if (auto *encode = dynamic_cast<loco::FilterEncode *>(node))
-    {
-      exportOpDefinedTensor(encode, builder, gd);
-    }
-    else if (auto *max_pool = dynamic_cast<loco::MaxPool2D *>(node))
-    {
-      exportOpDefinedTensor(max_pool, builder, gd);
-    }
-    else if (auto *avg_pool = dynamic_cast<loco::AvgPool2D *>(node))
-    {
-      exportOpDefinedTensor(avg_pool, builder, gd);
-    }
-    else if (auto *conv2d = dynamic_cast<loco::Conv2D *>(node))
-    {
-      exportOpDefinedTensor(conv2d, builder, gd);
-    }
-    else if (auto *relu = dynamic_cast<loco::ReLU *>(node))
-    {
-      exportOpDefinedTensor(relu, builder, gd);
-    }
-    else if (auto *tconcat = dynamic_cast<loco::TensorConcat *>(node))
-    {
-      exportOpDefinedTensor(tconcat, builder, gd);
-    }
-    else if (auto *encode = dynamic_cast<loco::BiasEncode *>(node))
-    {
-      exportOpDefinedTensor(encode, builder, gd);
-    }
-    else if (auto *biasadd = dynamic_cast<loco::BiasAdd<loco::Domain::Tensor> *>(node))
-    {
-      exportOpDefinedTensor(biasadd, builder, gd);
-    }
-    else
-    {
-      assert(false && "unsupported node type");
-    }
-  }
-}
-
-} // namespace loco_exporter
diff --git a/compiler/loco-exporter/src/TensorExporter.h b/compiler/loco-exporter/src/TensorExporter.h
deleted file mode 100644 (file)
index 8ee7dbe..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __LOCO_EXPORTER_TENSOR_EXPORTER_H__
-#define __LOCO_EXPORTER_TENSOR_EXPORTER_H__
-
-#include "LocoExporterUtils.h"
-
-#include "loco/IR/Graph.h"
-
-#include "flatbuffers/flatbuffers.h"
-
-namespace loco_exporter
-{
-
-/**
- * @brief create Tensors corresponding to results of all nodes in graph
- * @param nodes list of nodes in computational graph
- * @param gd information about serialized parts of model
- */
-void exportOpDefinedTensors(loco::Graph::NodeContext *nodes,
-                            flatbuffers::FlatBufferBuilder &builder, SerializedModelData &gd);
-}
-
-#endif //__LOCO_EXPORTER_TENSOR_EXPORTER_H__
diff --git a/compiler/loco-exporter/src/TypeInference.cpp b/compiler/loco-exporter/src/TypeInference.cpp
deleted file mode 100644 (file)
index c83294a..0000000
+++ /dev/null
@@ -1,476 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "TypeInference.h"
-
-#include "schema_generated.h"
-
-#include <type_traits>
-
-namespace loco_exporter
-{
-
-namespace
-{
-
-tflite::TensorType translateLocoTypeToTFLite(loco::DataType dtype)
-{
-  switch (dtype)
-  {
-    case loco::DataType::U8:
-      return tflite::TensorType_UINT8;
-    //  case loco::DataType::U16: unsupported
-    //  case loco::DataType::U32: unsupported
-    //  case loco::DataType::U64: unsupported
-    case loco::DataType::S8:
-      return tflite::TensorType_INT8;
-    case loco::DataType::S16:
-      return tflite::TensorType_INT16;
-    case loco::DataType::S32:
-      return tflite::TensorType_INT32;
-    case loco::DataType::S64:
-      return tflite::TensorType_INT64;
-    case loco::DataType::FLOAT16:
-      return tflite::TensorType_FLOAT16;
-    case loco::DataType::FLOAT32:
-      return tflite::TensorType_FLOAT32;
-    //  case loco::DataType::FLOAT64: unsupported
-    default:
-      assert(false && "unsupported data type");
-  }
-}
-
-template <typename T, typename If = typename std::enable_if<std::is_integral<T>::value, int>::type>
-T ceil_div(T dividend, T divisor)
-{
-  assert(dividend > 0 && divisor > 0 && "this implementations is for positive numbers only");
-  return (dividend + divisor - 1) / divisor;
-}
-
-} // namespace
-
-tflite::TensorType getOpResultType(loco::ConstGen *node, SerializedModelData &)
-{
-  return translateLocoTypeToTFLite(node->dtype());
-}
-
-tflite::TensorType getOpResultType(loco::Pull *node, SerializedModelData &)
-{
-  return translateLocoTypeToTFLite(node->dtype());
-}
-
-tflite::TensorType getOpResultType(loco::ReLU *node, SerializedModelData &gd)
-{
-  return gd._node_to_type[node->input()];
-}
-
-tflite::TensorType getOpResultType(loco::MaxPool2D *node, SerializedModelData &gd)
-{
-  return gd._node_to_type[node->ifm()];
-}
-
-tflite::TensorType getOpResultType(loco::AvgPool2D *node, SerializedModelData &gd)
-{
-  return gd._node_to_type[node->ifm()];
-}
-
-tflite::TensorType getOpResultType(loco::Conv2D *node, SerializedModelData &gd)
-{
-  return gd._node_to_type[node->ifm()];
-}
-
-tflite::TensorType getOpResultType(loco::FeatureEncode *node, SerializedModelData &gd)
-{
-  return gd._node_to_type[node->input()];
-}
-
-tflite::TensorType getOpResultType(loco::FeatureDecode *node, SerializedModelData &gd)
-{
-  return gd._node_to_type[node->input()];
-}
-
-tflite::TensorType getOpResultType(loco::FilterEncode *node, SerializedModelData &gd)
-{
-  return gd._node_to_type[node->input()];
-}
-
-tflite::TensorType getOpResultType(loco::TensorConcat *node, SerializedModelData &gd)
-{
-  tflite::TensorType lhs_type = gd._node_to_type[node->lhs()];
-  tflite::TensorType rhs_type = gd._node_to_type[node->rhs()];
-
-  // TODO support heterogenous type combination
-  assert(lhs_type == rhs_type);
-
-  return lhs_type;
-}
-
-tflite::TensorType getOpResultType(loco::BiasEncode *node, SerializedModelData &gd)
-{
-  return gd._node_to_type[node->input()];
-}
-
-tflite::TensorType getOpResultType(loco::BiasAdd<loco::Domain::Tensor> *node,
-                                   SerializedModelData &gd)
-{
-  tflite::TensorType value_type = gd._node_to_type[node->value()];
-  tflite::TensorType bias_type = gd._node_to_type[node->bias()];
-
-  // TODO support heterogenous type combination
-  assert(value_type == bias_type);
-
-  return value_type;
-}
-
-int32_t decodeShapeDimension(const loco::Dimension &dim)
-{
-  if (!dim.known())
-    return -1;
-  return dim.value();
-}
-
-loco::Dimension encodeShapeDimension(const int32_t &value)
-{
-  if (value == -1)
-    return loco::Dimension();
-  return {static_cast<uint32_t>(value)};
-}
-
-ShapeDescription getOpResultShape(loco::Pull *node, SerializedModelData &)
-{
-  ShapeDescription shape;
-  shape._rank_known = true;
-  shape._dims.reserve(node->rank());
-  for (uint32_t i = 0; i < node->rank(); ++i)
-  {
-    shape._dims.push_back(decodeShapeDimension(node->dim(i)));
-  }
-  return shape;
-}
-
-ShapeDescription getOpResultShape(loco::ConstGen *node, SerializedModelData &)
-{
-  ShapeDescription shape;
-  shape._rank_known = true;
-  shape._dims.reserve(node->rank());
-  for (uint32_t i = 0; i < node->rank(); ++i)
-  {
-    shape._dims.push_back(decodeShapeDimension(node->dim(i)));
-  }
-  return shape;
-}
-
-ShapeDescription getOpResultShape(loco::MaxPool2D *node, SerializedModelData &gd)
-{
-  loco::Node *pred = node->ifm();
-  const ShapeDescription &pred_shape = gd._node_to_shape[pred];
-  if (!pred_shape._rank_known)
-  {
-    // return unknown shape
-    return {};
-  }
-  ShapeDescription shape;
-  shape._rank_known = true;
-  shape._dims.resize(4);
-  shape._dims[0] = pred_shape._dims[0];
-  shape._dims[3] = pred_shape._dims[3];
-  tflite::Padding padding = getOpPadding(node->pad());
-  switch (padding)
-  {
-    case tflite::Padding_SAME:
-    {
-      auto height = static_cast<uint32_t>(pred_shape._dims[1]);
-      auto width = static_cast<uint32_t>(pred_shape._dims[2]);
-
-      int32_t proposed_res_height = ceil_div(height, node->stride()->vertical());
-      int32_t proposed_res_width = ceil_div(width, node->stride()->horizontal());
-
-      shape._dims[1] = pred_shape._dims[1] == -1 ? -1 : proposed_res_height;
-      shape._dims[2] = pred_shape._dims[2] == -1 ? -1 : proposed_res_width;
-      break;
-    }
-    case tflite::Padding_VALID:
-    {
-      auto padded_h = static_cast<uint32_t>(pred_shape._dims[1] - (node->window()->vertical() - 1));
-      auto padded_w =
-          static_cast<uint32_t>(pred_shape._dims[2] - (node->window()->horizontal() - 1));
-
-      int32_t proposed_height = ceil_div(padded_h, node->stride()->vertical());
-      int32_t proposed_width = ceil_div(padded_w, node->stride()->horizontal());
-
-      shape._dims[1] = pred_shape._dims[1] == -1 ? -1 : proposed_height;
-      shape._dims[2] = pred_shape._dims[2] == -1 ? -1 : proposed_width;
-      break;
-    }
-    default:
-      assert(false && "unknown padding type");
-  }
-  return shape;
-}
-
-ShapeDescription getOpResultShape(loco::AvgPool2D *node, SerializedModelData &gd)
-{
-  const ShapeDescription &ifm_shape = gd._node_to_shape[node->ifm()];
-  assert(ifm_shape._rank_known);
-
-  ShapeDescription shape;
-  shape._rank_known = true;
-  shape._dims.resize(4);
-  shape._dims[0] = ifm_shape._dims[0]; // copy batch
-  shape._dims[3] = ifm_shape._dims[3]; // copy channel
-
-  tflite::Padding padding = getOpPadding(node->pad());
-  switch (padding)
-  {
-    case tflite::Padding_SAME:
-    {
-      auto height = static_cast<uint32_t>(ifm_shape._dims[1]);
-      auto width = static_cast<uint32_t>(ifm_shape._dims[2]);
-
-      int32_t proposed_res_height = ceil_div(height, node->stride()->vertical());
-      int32_t proposed_res_width = ceil_div(width, node->stride()->horizontal());
-
-      shape._dims[1] = ifm_shape._dims[1] == -1 ? -1 : proposed_res_height;
-      shape._dims[2] = ifm_shape._dims[2] == -1 ? -1 : proposed_res_width;
-      break;
-    }
-    case tflite::Padding_VALID:
-    {
-      auto padded_h = static_cast<uint32_t>(ifm_shape._dims[1] - (node->window()->vertical() - 1));
-      auto padded_w =
-          static_cast<uint32_t>(ifm_shape._dims[2] - (node->window()->horizontal() - 1));
-
-      int32_t proposed_height = ceil_div(padded_h, node->stride()->vertical());
-      int32_t proposed_width = ceil_div(padded_w, node->stride()->horizontal());
-
-      shape._dims[1] = ifm_shape._dims[1] == -1 ? -1 : proposed_height;
-      shape._dims[2] = ifm_shape._dims[2] == -1 ? -1 : proposed_width;
-      break;
-    }
-    default:
-      assert(false && "unknown padding type");
-  }
-  return shape;
-}
-
-ShapeDescription getOpResultShape(loco::Conv2D *node, SerializedModelData &gd)
-{
-  loco::Node *ifm = node->ifm();
-  const ShapeDescription &ifm_shape = gd._node_to_shape[ifm];
-  if (!ifm_shape._rank_known)
-  {
-    // return unknown shape
-    return {};
-  }
-
-  auto *ker = dynamic_cast<loco::FilterEncode *>(node->ker());
-  assert(ker);
-  const ShapeDescription &ker_shape = gd._node_to_shape[ker];
-  if (!ker_shape._rank_known)
-  {
-    // return unknown shape
-    return {};
-  }
-
-  ShapeDescription shape;
-  shape._rank_known = true;
-  shape._dims.resize(4);
-  shape._dims[0] = ifm_shape._dims[0];
-  shape._dims[3] = ker_shape._dims[0];
-  tflite::Padding padding = getOpPadding(node->pad());
-  switch (padding)
-  {
-    case tflite::Padding_SAME:
-    {
-      auto height = static_cast<uint32_t>(ifm_shape._dims[1]);
-      auto width = static_cast<uint32_t>(ifm_shape._dims[2]);
-
-      int32_t proposed_res_height = ceil_div(height, node->stride()->vertical());
-      int32_t proposed_res_width = ceil_div(width, node->stride()->horizontal());
-
-      shape._dims[1] = ifm_shape._dims[1] == -1 ? -1 : proposed_res_height;
-      shape._dims[2] = ifm_shape._dims[2] == -1 ? -1 : proposed_res_width;
-      break;
-    }
-    case tflite::Padding_VALID:
-    {
-      auto padded_h = static_cast<uint32_t>(ifm_shape._dims[1] - (ker_shape._dims[1] - 1));
-      auto padded_w = static_cast<uint32_t>(ifm_shape._dims[2] - (ker_shape._dims[2] - 1));
-
-      int32_t proposed_height = ceil_div(padded_h, node->stride()->vertical());
-      int32_t proposed_width = ceil_div(padded_w, node->stride()->horizontal());
-
-      shape._dims[1] = ifm_shape._dims[1] == -1 ? -1 : proposed_height;
-      shape._dims[2] = ifm_shape._dims[2] == -1 ? -1 : proposed_width;
-      break;
-    }
-    default:
-      assert(false && "unknown padding type");
-  }
-  return shape;
-}
-
-ShapeDescription getOpResultShape(loco::ReLU *node, SerializedModelData &gd)
-{
-  return gd._node_to_shape[node->input()];
-}
-
-ShapeDescription getOpResultShape(loco::FeatureEncode *node, SerializedModelData &gd)
-{
-  const ShapeDescription &pred_shape = gd._node_to_shape[node->input()];
-  if (!pred_shape._rank_known)
-  {
-    // return unknown shape
-    return {};
-  }
-  ShapeDescription shape;
-  shape._rank_known = true;
-  loco::TensorShape tensor_shape;
-  uint32_t num_dims = pred_shape._dims.size();
-  tensor_shape.rank(num_dims);
-  for (uint32_t i = 0; i < num_dims; ++i)
-  {
-    tensor_shape.dim(i) = encodeShapeDimension(pred_shape._dims[i]);
-  }
-  loco::FeatureShape feature_shape = node->encoder()->shape(tensor_shape);
-  shape._dims.resize(4);
-  shape._dims[0] = decodeShapeDimension(feature_shape.count());
-  shape._dims[1] = decodeShapeDimension(feature_shape.height());
-  shape._dims[2] = decodeShapeDimension(feature_shape.width());
-  shape._dims[3] = decodeShapeDimension(feature_shape.depth());
-  return shape;
-}
-
-ShapeDescription getOpResultShape(loco::FeatureDecode *node, SerializedModelData &gd)
-{
-  const ShapeDescription &pred_shape = gd._node_to_shape[node->input()];
-  if (!pred_shape._rank_known)
-  {
-    // return unknown shape
-    return {};
-  }
-  ShapeDescription shape;
-  shape._rank_known = true;
-  loco::FeatureShape feature_shape;
-  feature_shape.count() = encodeShapeDimension(pred_shape._dims[0]);
-  feature_shape.height() = encodeShapeDimension(pred_shape._dims[1]);
-  feature_shape.width() = encodeShapeDimension(pred_shape._dims[2]);
-  feature_shape.depth() = encodeShapeDimension(pred_shape._dims[3]);
-  loco::TensorShape tensor_shape = node->decoder()->shape(feature_shape);
-  shape._dims.resize(4);
-  for (uint32_t i = 0; i < 4; ++i)
-  {
-    shape._dims[i] = decodeShapeDimension(tensor_shape.dim(i));
-  }
-  return shape;
-}
-
-ShapeDescription getOpResultShape(loco::FilterEncode *node, SerializedModelData &gd)
-{
-  const ShapeDescription &input_shape = gd._node_to_shape[node->input()];
-  if (!input_shape._rank_known)
-  {
-    // return unknown shape
-    return {};
-  }
-  ShapeDescription shape;
-  shape._rank_known = true;
-  loco::TensorShape tensor_shape;
-  uint32_t num_dims = input_shape._dims.size();
-  tensor_shape.rank(num_dims);
-  for (uint32_t i = 0; i < num_dims; ++i)
-  {
-    tensor_shape.dim(i) = encodeShapeDimension(input_shape._dims[i]);
-  }
-  loco::FilterShape filter_shape = node->encoder()->shape(tensor_shape);
-  shape._dims.resize(4);
-  shape._dims[0] = decodeShapeDimension(filter_shape.count());
-  shape._dims[1] = decodeShapeDimension(filter_shape.height());
-  shape._dims[2] = decodeShapeDimension(filter_shape.width());
-  shape._dims[3] = decodeShapeDimension(filter_shape.depth());
-  return shape;
-}
-
-ShapeDescription getOpResultShape(loco::TensorConcat *node, SerializedModelData &gd)
-{
-  const ShapeDescription &lhs_shape = gd._node_to_shape[node->lhs()];
-  if (!lhs_shape._rank_known)
-  {
-    // return unknown shape
-    return {};
-  }
-
-  const ShapeDescription &rhs_shape = gd._node_to_shape[node->rhs()];
-  if (!rhs_shape._rank_known)
-  {
-    // return unknown shape
-    return {};
-  }
-
-  ShapeDescription ret;
-
-  assert(lhs_shape._dims.size() == rhs_shape._dims.size());
-  ret._dims.resize(lhs_shape._dims.size());
-
-  uint32_t axis = node->axis();
-
-  for (uint32_t i = 0; i < lhs_shape._dims.size(); ++i)
-  {
-    if (i == axis)
-    {
-      ret._dims[i] = lhs_shape._dims[i] + rhs_shape._dims[i];
-    }
-    else
-    {
-      assert(lhs_shape._dims[i] == rhs_shape._dims[i]);
-      ret._dims[i] = lhs_shape._dims[i];
-    }
-  }
-  ret._rank_known = true;
-
-  return ret;
-}
-
-ShapeDescription getOpResultShape(loco::BiasEncode *node, SerializedModelData &gd)
-{
-  const ShapeDescription &input_shape = gd._node_to_shape[node->input()];
-
-  // Bias should be rank 1
-  assert(input_shape._dims.size() == 1);
-
-  return input_shape;
-}
-
-ShapeDescription getOpResultShape(loco::BiasAdd<loco::Domain::Tensor> *node,
-                                  SerializedModelData &gd)
-{
-  const ShapeDescription &value_shape = gd._node_to_shape[node->value()];
-  const ShapeDescription &bias_shape = gd._node_to_shape[node->bias()];
-
-  // For TFlite, only supports last bias add axis. Unless, broadcasting is not performed as
-  // expected.
-  assert(node->axis() == value_shape._dims.size() - 1);
-
-  // Bias should be rank 1
-  assert(bias_shape._dims.size() == 1);
-
-  // Channel count coherency for proper broadcast
-  assert(bias_shape._dims[0] == value_shape._dims[node->axis()]);
-
-  return value_shape;
-}
-
-} // namespace loco_exporter
diff --git a/compiler/loco-exporter/src/TypeInference.h b/compiler/loco-exporter/src/TypeInference.h
deleted file mode 100644 (file)
index b87aeb0..0000000
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "LocoExporterUtils.h"
-#include "loco/IR/Nodes.h"
-
-#ifndef __LOCO_EXPORTER_TYPEINFERENCE_H__
-#define __LOCO_EXPORTER_TYPEINFERENCE_H__
-
-namespace loco_exporter
-{
-
-// Tensor type inference functions
-
-tflite::TensorType getOpResultType(loco::ConstGen *node, SerializedModelData &);
-
-tflite::TensorType getOpResultType(loco::Pull *node, SerializedModelData &);
-
-tflite::TensorType getOpResultType(loco::ReLU *node, SerializedModelData &gd);
-
-tflite::TensorType getOpResultType(loco::MaxPool2D *node, SerializedModelData &gd);
-
-tflite::TensorType getOpResultType(loco::AvgPool2D *node, SerializedModelData &gd);
-
-tflite::TensorType getOpResultType(loco::Conv2D *node, SerializedModelData &gd);
-
-tflite::TensorType getOpResultType(loco::FeatureEncode *node, SerializedModelData &gd);
-
-tflite::TensorType getOpResultType(loco::FeatureDecode *node, SerializedModelData &gd);
-
-tflite::TensorType getOpResultType(loco::FilterEncode *node, SerializedModelData &gd);
-
-tflite::TensorType getOpResultType(loco::TensorConcat *node, SerializedModelData &gd);
-
-tflite::TensorType getOpResultType(loco::BiasEncode *node, SerializedModelData &gd);
-
-tflite::TensorType getOpResultType(loco::BiasAdd<loco::Domain::Tensor> *node,
-                                   SerializedModelData &gd);
-
-// Shape inference functions
-
-ShapeDescription getOpResultShape(loco::Pull *node, SerializedModelData &);
-
-ShapeDescription getOpResultShape(loco::ConstGen *node, SerializedModelData &);
-
-ShapeDescription getOpResultShape(loco::MaxPool2D *node, SerializedModelData &gd);
-
-ShapeDescription getOpResultShape(loco::AvgPool2D *node, SerializedModelData &gd);
-
-ShapeDescription getOpResultShape(loco::Conv2D *node, SerializedModelData &gd);
-
-ShapeDescription getOpResultShape(loco::ReLU *node, SerializedModelData &gd);
-
-ShapeDescription getOpResultShape(loco::FeatureEncode *node, SerializedModelData &gd);
-
-ShapeDescription getOpResultShape(loco::FeatureDecode *node, SerializedModelData &gd);
-
-ShapeDescription getOpResultShape(loco::FilterEncode *node, SerializedModelData &gd);
-
-ShapeDescription getOpResultShape(loco::TensorConcat *node, SerializedModelData &gd);
-
-ShapeDescription getOpResultShape(loco::BiasEncode *node, SerializedModelData &gd);
-
-ShapeDescription getOpResultShape(loco::BiasAdd<loco::Domain::Tensor> *node,
-                                  SerializedModelData &gd);
-}
-
-#endif //__LOCO_EXPORTER_TYPEINFERENCE_H__