[FlatBuffer] Add nntrainer_schema.fbs
authorDongHak Park <donghak.park@samsung.com>
Tue, 14 Feb 2023 10:12:24 +0000 (19:12 +0900)
committerJijoong Moon <jijoong.moon@samsung.com>
Fri, 17 Mar 2023 11:00:02 +0000 (20:00 +0900)
Add nntrainer_schema.fbs
- This schema for Flatbuffer export
- It contain only Fully-connected Layer Options
- It contain tensorflow lite's schema

Modify meson_build
- For compiler nntrainer_schema_generated.h

This will be updated for more layers and operators

Signed-off-by: DongHak Park <donghak.park@samsung.com>
nntrainer/compiler/meson.build
nntrainer/compiler/nntrainer_schema.fbs [new file with mode: 0644]

index d880a95..72f5362 100644 (file)
@@ -37,6 +37,24 @@ if get_option('enable-tflite-interpreter')
     'tflite_interpreter.cpp',
     'tflite_opnode.cpp'
   ]
+
+if not flatc_prog.found()
+  error('flatc executable not found')
+endif
+  flat_header2 = custom_target('nntrainer-schema',
+                               input: 'nntrainer_schema.fbs',
+                               output: 'nntrainer_schema_generated.h',
+                               command: [flatc_prog, '-o', '@OUTDIR@', '-c', '@INPUT@'])
+
+  nntrainer_inc_abs += meson.current_build_dir()
+  nntrainer_inc += include_directories('.')
+
+  flat_header2_dep = declare_dependency(sources : flat_header2)
+
+  nntrainer_base_deps += flat_header2_dep
+  compiler_sources += [
+    'flatbuffer_interpreter.cpp'
+  ]
 endif
 
 foreach s : compiler_sources
diff --git a/nntrainer/compiler/nntrainer_schema.fbs b/nntrainer/compiler/nntrainer_schema.fbs
new file mode 100644 (file)
index 0000000..55fc145
--- /dev/null
@@ -0,0 +1,341 @@
+namespace nntr;
+
+file_extension "nntr";
+
+enum TensorType : byte {
+     FLOAT32 = 0,
+     FLOAT16 = 1,
+}
+
+enum BuiltinOperator : int32 {
+      ADD = 0,
+      AVERAGE_POOL_2D = 1,
+      CONCATENATION = 2,
+      CONV_2D = 3,
+      DEPTHWISE_CONV_2D = 4,
+      DEPTH_TO_SPACE = 5,
+      DEQUANTIZE = 6,
+      EMBEDDING_LOOKUP = 7,
+      FLOOR = 8,
+      FULLY_CONNECTED = 9,
+      HASHTABLE_LOOKUP = 10,
+      L2_NORMALIZATION = 11,
+      L2_POOL_2D = 12,
+      LOCAL_RESPONSE_NORMALIZATION = 13,
+      LOGISTIC = 14,
+      LSH_PROJECTION = 15,
+      LSTM = 16,
+      MAX_POOL_2D = 17,
+      MUL = 18,
+      RELU = 19,
+      RELU_N1_TO_1 = 20,
+      RELU6 = 21,
+      RESHAPE = 22,
+      RESIZE_BILINEAR = 23,
+      RNN = 24,
+      SOFTMAX = 25,
+      SPACE_TO_DEPTH = 26,
+      SVDF = 27,
+      TANH = 28,
+      CONCAT_EMBEDDINGS = 29,
+      SKIP_GRAM = 30,
+      CALL = 31,
+      CUSTOM = 32,
+      EMBEDDING_LOOKUP_SPARSE = 33,
+      PAD = 34,
+      UNIDIRECTIONAL_SEQUENCE_RNN = 35,
+      GATHER = 36,
+      BATCH_TO_SPACE_ND = 37,
+      SPACE_TO_BATCH_ND = 38,
+      TRANSPOSE = 39,
+      MEAN = 40,
+      SUB = 41,
+      DIV = 42,
+      SQUEEZE = 43,
+      UNIDIRECTIONAL_SEQUENCE_LSTM = 44,
+      STRIDED_SLICE = 45,
+      BIDIRECTIONAL_SEQUENCE_RNN = 46,
+      EXP = 47,
+      TOPK_V2 = 48,
+      SPLIT = 49,
+      LOG_SOFTMAX = 50,
+      DELEGATE = 51,
+      BIDIRECTIONAL_SEQUENCE_LSTM = 52,
+      CAST = 53,
+      PRELU = 54,
+      MAXIMUM = 55,
+      ARG_MAX = 56,
+      MINIMUM = 57,
+      LESS = 58,
+      NEG = 59,
+      PADV2 = 60,
+      GREATER = 61,
+      GREATER_EQUAL = 62,
+      LESS_EQUAL = 63,
+      SELECT = 64,
+      SLICE = 65,
+      SIN = 66,
+      TRANSPOSE_CONV = 67,
+      SPARSE_TO_DENSE = 68,
+      TILE = 69,
+      EXPAND_DIMS = 70,
+      EQUAL = 71,
+      NOT_EQUAL = 72,
+      LOG = 73,
+      SUM = 74,
+      SQRT = 75,
+      RSQRT = 76,
+      SHAPE = 77,
+      POW = 78,
+      ARG_MIN = 79,
+      FAKE_QUANT = 80,
+      REDUCE_PROD = 81,
+      REDUCE_MAX = 82,
+      PACK = 83,
+      LOGICAL_OR = 84,
+      ONE_HOT = 85,
+      LOGICAL_AND = 86,
+      LOGICAL_NOT = 87,
+      UNPACK = 88,
+      REDUCE_MIN = 89,
+      FLOOR_DIV = 90,
+      REDUCE_ANY = 91,
+      SQUARE = 92,
+      ZEROS_LIKE = 93,
+      FILL = 94,
+      FLOOR_MOD = 95,
+      RANGE = 96,
+      RESIZE_NEAREST_NEIGHBOR = 97,
+      LEAKY_RELU = 98,
+      SQUARED_DIFFERENCE = 99,
+      MIRROR_PAD = 100,
+      ABS = 101,
+      SPLIT_V = 102,
+      UNIQUE = 103,
+      CEIL = 104,
+      REVERSE_V2 = 105,
+      ADD_N = 106,
+      GATHER_ND = 107,
+      COS = 108,
+      WHERE = 109,
+      RANK = 110,
+      ELU = 111,
+      REVERSE_SEQUENCE = 112,
+      MATRIX_DIAG = 113,
+      QUANTIZE = 114,
+      MATRIX_SET_DIAG = 115,
+      ROUND = 116,
+      HARD_SWISH = 117,
+      IF = 118,
+      WHILE = 119,
+      NON_MAX_SUPPRESSION_V4 = 120,
+      NON_MAX_SUPPRESSION_V5 = 121,
+      SCATTER_ND = 122,
+      SELECT_V2 = 123,
+      DENSIFY = 124,
+      SEGMENT_SUM = 125,
+      BATCH_MATMUL = 126,
+      PLACEHOLDER_FOR_GREATER_OP_CODES = 127,
+      CUMSUM = 128,
+      CALL_ONCE = 129,
+      BROADCAST_TO = 130,
+      RFFT2D = 131,
+      CONV_3D = 132,
+      IMAG = 133,
+      REAL = 134,
+      COMPLEX_ABS = 135,
+      HASHTABLE = 136,
+      HASHTABLE_FIND = 137,
+      HASHTABLE_IMPORT = 138,
+      HASHTABLE_SIZE = 139,
+      REDUCE_ALL = 140,
+      CONV_3D_TRANSPOSE = 141,
+      VAR_HANDLE = 142,
+      READ_VARIABLE = 143,
+      ASSIGN_VARIABLE = 144,
+      BROADCAST_ARGS = 145,
+      RANDOM_STANDARD_NORMAL = 146,
+      BUCKETIZE = 147,
+      RANDOM_UNIFORM = 148,
+      MULTINOMIAL = 149,
+      GELU = 150,
+      DYNAMIC_UPDATE_SLICE = 151,
+      RELU_0_TO_1 = 152,
+      UNSORTED_SEGMENT_PROD = 153,
+      UNSORTED_SEGMENT_MAX = 154,
+      UNSORTED_SEGMENT_SUM = 155,
+      ATAN2 = 156,
+      UNSORTED_SEGMENT_MIN = 157,
+      SIGN = 158
+}
+
+union BuiltinOptions {
+      FullyConnectedOptions,
+}
+
+enum InitializerType : byte{
+     ZEORS = 0,
+     ONES = 1,
+     LECUN_NORMAL = 2,
+     LECUN_UNIFORM = 3,
+     XAVIER_NORMAL = 4,
+     XAVIER_UNIFORM = 5,
+     HE_NORMAL = 6,
+     HE_UNIFORM = 7,
+     NONE = 8,
+}
+
+enum ActivationType : byte{
+     TANH = 0,
+     SIGMOID = 1,
+     SOFTMAX = 2,
+     RELU = 3,
+     LEAKY_RELU = 4,
+     NONE = 5,
+}
+
+//Tensor
+table Tensor {
+      type:TensorType;
+      dim:[int];
+      name:string;
+      buffer:uint;
+}
+
+//Tensor Mapping : name - index
+table TensorMap {
+      name:string;
+      index:uint;
+}
+
+
+//Buffer
+table Buffer{
+      data:[ubyte];
+}
+
+
+//Layers
+enum LayerTypes : int32 {
+     FULLY_CONNECTED = 0,
+}
+
+union LayerOptions {
+      FullyConnectedOptions,
+}
+
+table FullyConnectedOptions {
+      unit:uint;
+      weight_initializer:InitializerType;
+      bias_initializer:InitializerType;
+}
+
+table Layers {
+      type:LayerTypes;
+      name:string;
+      options:LayerOptions;
+      input_layers:[string];
+      input_shape:[int];
+      activation:ActivationType;
+      weignts:[Tensor];
+      input_tensors:[Tensor];
+      output_tensors:[Tensor];
+}
+
+
+//Learning Rate
+enum LRSchedulerType : int32 {
+     CONSTANT = 0,
+     EXPONENTIAL = 1,
+     STEP = 2,
+}
+
+union LROptions {
+      ConstantLROptions,
+      ExponentialLROptions,
+      StepLROptions,
+}
+
+table ConstantLROptions {
+      learning_rate:float;
+}
+
+table ExponentialLROptions {
+      learning_rate:float;
+}
+
+table StepLROptions {
+      learning_rate:float;
+}
+
+table LRScheduler {
+      type:LRSchedulerType;
+      options:LROptions;
+}
+
+//Optimizer 
+enum OptimizerType : int32 {
+     SGD = 0,
+     ADAM = 1,
+}
+
+union OptimizerOptions {
+      SGDOptimizerOptions,
+      AdamOptimizerOptions,
+}
+
+table SGDOptimizerOptions {
+      
+}
+
+table AdamOptimizerOptions {
+      beta1:float;
+      beta2:float;
+      epsilon:float;
+}
+
+table Optimizer {
+      type:OptimizerType;
+      options:OptimizerOptions;
+}
+
+enum LossType : int32 {
+     MSE = 0,
+     CROSS = 1,
+}
+
+union LossOptions {
+      MSELossOptions,
+      CrossLossOptions,
+}
+
+table MSELossOptions {
+}
+
+table CrossLossOptions {
+}
+
+table Loss {
+      type:LossType;
+      options:LossOptions;
+}
+
+table NetworkGraph {
+      name:string;
+      input_tensors:[int];
+      output_tensors:[int];
+      layers:[Layers];
+}
+
+
+table Model {
+      name:string;
+      epochs:uint;
+      batch_size:uint;
+      optimizer:Optimizer;
+      learning_rate_scheduler:LRScheduler;
+      loss:Loss;      
+      network_graph:[NetworkGraph];
+}
+
+root_type Model;