1 // Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
2 // Copyright 2017 The TensorFlow Authors. All Rights Reserved.
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
17 // Version 0: Initial version.
18 // Version 1: Add subgraphs to schema.
19 // Version 2: Rename operators to conform to NN API.
20 // Version 3: Move buffer data from Model.Subgraph.Tensors to Model.Buffers.
21 // Version 3a: Add new builtin op code field. Has backward compatibility with
23 // Version 3b: Rename fields in SignatureDef. Has backward compatibility with
26 // Change namespace to onert_tflite
27 namespace onert_tflite;
29 // This corresponds to the version.
30 file_identifier "TFL3";
31 // File extension of any written files.
32 file_extension "tflite";
34 // IMPORTANT: All new members of tables, enums and unions must be added at the
35 // end to ensure backwards compatibility.
37 // The type of data stored in a tensor.
38 enum TensorType : byte {
52 // Experimental: Resource and variant types are experimental, that are subject
53 // to change. Do not implement custom kernels using resource & variant types
61 // Custom quantization parameters for experimenting with new quantization
63 table CustomQuantization {
64 custom:[ubyte] (force_align: 16);
67 // Represents a specific quantization technique's parameters.
68 union QuantizationDetails {
72 // Parameters for converting a quantized tensor back to float.
73 table QuantizationParameters {
74 // These four parameters are the asymmetric linear quantization parameters.
75 // Given a quantized value q, the corresponding float value f should be:
76 // f = scale * (q - zero_point)
77 // For other quantization types, the QuantizationDetails below is used.
78 min:[float]; // For importing back into tensorflow.
79 max:[float]; // For importing back into tensorflow.
80 scale:[float]; // For dequantizing the tensor's values.
83 // If this is not none, the other quantization parameters (i.e. min, max,
84 // scale, zero_point fields above) are ignored and the value of the
85 // QuantizationDetails union should be used.
86 details:QuantizationDetails;
88 // Specifies the dimension of the Tensor's shape that the scales and
89 // zero_points correspond to. For example, a tensor t, with dims=[4, 3, 2, 1]
90 // with quantization params:
91 // scale=[1.0, 2.0, 3.0], zero_point=[1, 2, 3], quantization_dimension=1
92 // will be quantized across the second dimension of t.
93 // t[:, 0, :, :] will have scale[0]=1.0, zero_point[0]=1
94 // t[:, 1, :, :] will have scale[1]=2.0, zero_point[0]=2
95 // t[:, 2, :, :] will have scale[2]=3.0, zero_point[0]=3
96 quantized_dimension:int;
100 // We use a modification of the TACO format.
101 // Reference: http://tensor-compiler.org/kjolstad-oopsla17-tensor-compiler.pdf
103 // To encode a conceptual n-dimensional dense tensor with dims (d0, ..., dn-1),
104 // potentially with a k-dimensional block (0 <= k <= n) with dims
105 // (dn, ..., dn+k-1), the format needs to specify:
106 // 1. In what order to traverse these dimensions. For example, to store a 2-D
107 // matrix in row major order, the traversal order would be (d0, d1),
108 // whereas to store it in column major order, the traversal order would be
109 // (d1, d0). If the 2-D matrix has a 2-D inner block, the traversal order
110 // could be (d0, d1, d2, d3).
111 // 2. How each block dimension in (dn, ..., dn+k-1) maps to the original
112 // tensor dimension in (d0, ..., dn-1).
113 // 3. In the traversal order defined above, the format (dense vs. sparse) and
114 // index metadata for each dimension. For a dense dimension, this is just
115 // the size of that dimension. For a sparse dimension, it's the same as
116 // the compressed index defined in the Compressed Sparse Row (CSR) format.
117 // (http://scipy-lectures.org/advanced/scipy_sparse/csr_matrix.html)
119 // The storage type for a dimension. Currently we support:
120 // 1. DENSE: each coordinate in this dimension is stored implicitly.
121 // 2. SPARSE_CSR: only the coordinates with non-zero elements are stored. The
122 // compression technique is the same what CSR uses.
123 // More types like a sparse dimension with a different compression technique
124 // could be added to the list in the future.
125 enum DimensionType : byte {
135 values:[ushort] (force_align: 4);
139 values:[ubyte] (force_align: 4);
142 // Variable-typed buffer to store the index metadata for a sparse dimension.
143 // The widest type is Int32 instead of UInt32 because tensor's shape is a int32
144 // vector. We don't want the per-dimensional index to overflow that range.
145 union SparseIndexVector {
151 table DimensionMetadata {
152 // Whether a dimension is dense or sparse.
153 format:DimensionType;
154 // Index metadata used for a dimension.
155 // - If format is DimensionType.DENSE then we use the dense_size field to
156 // store the size of that dimension. Each index in that dimension is
157 // stored implicitly.
158 // - If format is DimensionType.SPARSE_CSR then we use array_segments and
159 // array_indices to encode that dimension. array_segments represents how
160 // to segment the indices array, each segment corresponds to one element
161 // in the previous dimension. array_indices represents the index of the
162 // non-zero elements within this dimension (as those in the CSR matrix
163 // format, where the first array is row pointers and the second array is
166 array_segments:SparseIndexVector;
167 array_indices:SparseIndexVector;
170 // Parameters to encode a sparse TfLite tensor.
171 table SparsityParameters {
172 // The traversal order of the dimensions defined in the `shape` field of the
173 // conceptual dense tensor. For a n-dimensional tensors with dims (d0, d1,
175 // - if not block sparse, the traversal_order is just a permutation of (d0,
176 // ..., dn-1). For example, a 2-D matrix stored in row-major order would
177 // have traversal_order = (d0, d1).
178 // - if block sparse with a k-dimensional block (0 <= k <= n), the
179 // traversal_order has n + k elements. The first n elements are still a
180 // permutation of (d0, ..., dn-1). The lask k elements are a permutation
181 // of (dn, ..., dn+k-1), defining how to traverse a block internally. For
182 // example, a 2-D matrix with 2-D blocks, both stored in row-major order
183 // would have traversal_order = (d0, d1, d2, d3).
184 traversal_order:[int];
185 // For an n-dimensional tensor with a k-dimensional block (0 <= k <= n),
186 // stores how a block dimension in (dn, ..., dn+k-1) maps to the original
187 // tensor dimension in (d0, ..., dn).
188 // It's stored in the order of (dn, ..., dn+k-1).
189 // If not block-sparse, this field is NULL.
191 // In the traversal order defined above, the metadata needed for
192 // each dimension to locate the non-zero values in the original dense tensor.
193 // The size of the dim_metadata array = the size of the traversal_order array
195 dim_metadata:[DimensionMetadata];
199 // The tensor shape. The meaning of each entry is operator-specific but
200 // builtin ops use: [batch size, height, width, number of channels] (That's
201 // Tensorflow's NHWC).
204 // An index that refers to the buffers table at the root of the model. Or,
205 // if there is no data buffer associated (i.e. intermediate results), then
206 // this is 0 (which refers to an always existent empty buffer).
208 // The data_buffer itself is an opaque container, with the assumption that the
209 // target device is little-endian. In addition, all builtin operators assume
210 // the memory is ordered such that if `shape` is [4, 3, 2], then index
211 // [i, j, k] maps to data_buffer[i*3*2 + j*2 + k].
213 name:string; // For debugging and importing back into tensorflow.
214 quantization:QuantizationParameters; // Optional.
216 is_variable:bool = false;
218 // Parameters to encode a sparse tensor. See the example in
219 // tensorflow/lite/testdata/sparse_tensor.json.
220 sparsity:SparsityParameters; // Optional.
222 // Encodes `shape` with unknown dimensions. Unknown dimensions are
223 // represented with -1.
224 shape_signature:[int]; // Optional.
226 // If false, the rank or the number of tensor dimensions is unknown.
227 // If false, "shape" must be [].
228 has_rank: bool = false;
231 // A list of builtin operators. Builtin operators are slightly faster than custom
232 // ones, but not by much. Moreover, while custom operators accept an opaque
233 // object containing configuration parameters, builtins have a predetermined
234 // set of acceptable options.
236 enum BuiltinOperator : int32 {
241 DEPTHWISE_CONV_2D = 4,
244 EMBEDDING_LOOKUP = 7,
247 HASHTABLE_LOOKUP = 10,
248 L2_NORMALIZATION = 11,
250 LOCAL_RESPONSE_NORMALIZATION = 13,
257 // NOTE(aselle): RELU_N1_TO_1 used to be called RELU1, but it was renamed
258 // since different model developers use RELU1 in different ways. Never
259 // create another op called RELU1.
263 RESIZE_BILINEAR = 23,
269 CONCAT_EMBEDDINGS = 29,
273 EMBEDDING_LOOKUP_SPARSE = 33,
275 UNIDIRECTIONAL_SEQUENCE_RNN = 35,
277 BATCH_TO_SPACE_ND = 37,
278 SPACE_TO_BATCH_ND = 38,
284 UNIDIRECTIONAL_SEQUENCE_LSTM = 44,
286 BIDIRECTIONAL_SEQUENCE_RNN = 46,
291 // DELEGATE is a special op type for the operations which are delegated to
293 // WARNING: Experimental interface, subject to change
295 BIDIRECTIONAL_SEQUENCE_LSTM = 52,
311 SPARSE_TO_DENSE = 68,
340 RESIZE_NEAREST_NEIGHBOR = 97,
342 SQUARED_DIFFERENCE = 99,
355 REVERSE_SEQUENCE = 112,
358 MATRIX_SET_DIAG = 115,
363 NON_MAX_SUPPRESSION_V4 = 120,
364 NON_MAX_SUPPRESSION_V5 = 121,
370 PLACEHOLDER_FOR_GREATER_OP_CODES = 127,
380 HASHTABLE_FIND = 137,
381 HASHTABLE_IMPORT = 138,
382 HASHTABLE_SIZE = 139,
384 CONV_3D_TRANSPOSE = 141,
387 ASSIGN_VARIABLE = 144,
388 BROADCAST_ARGS = 145,
389 RANDOM_STANDARD_NORMAL = 146,
391 RANDOM_UNIFORM = 148,
394 DYNAMIC_UPDATE_SLICE = 151,
396 UNSORTED_SEGMENT_PROD = 153,
397 UNSORTED_SEGMENT_MAX = 154,
398 UNSORTED_SEGMENT_SUM = 155,
401 // LINT.ThenChange(nnapi_linter/linter.proto)
403 // Options for the builtin operators.
404 union BuiltinOptions {
406 DepthwiseConv2DOptions,
407 ConcatEmbeddingsOptions,
408 LSHProjectionOptions,
412 FullyConnectedOptions,
414 ConcatenationOptions,
417 LocalResponseNormalizationOptions,
419 ResizeBilinearOptions,
424 EmbeddingLookupSparseOptions,
428 BatchToSpaceNDOptions,
429 SpaceToBatchNDOptions,
443 MaximumMinimumOptions,
453 TransposeConvOptions,
454 SparseToDenseOptions,
473 BidirectionalSequenceLSTMOptions,
474 BidirectionalSequenceRNNOptions,
475 UnidirectionalSequenceLSTMOptions,
478 ResizeNearestNeighborOptions,
480 SquaredDifferenceOptions,
491 ReverseSequenceOptions,
494 MatrixSetDiagOptions,
499 NonMaxSuppressionV4Options,
500 NonMaxSuppressionV5Options,
512 HashtableFindOptions,
513 HashtableImportOptions,
514 HashtableSizeOptions,
517 AssignVariableOptions,
521 DynamicUpdateSliceOptions,
522 UnsortedSegmentProdOptions,
523 UnsortedSegmentMaxOptions,
524 UnsortedSegmentSumOptions,
529 enum Padding : byte { SAME, VALID }
530 // LINT.ThenChange(//tensorflow/compiler/mlir/lite/ir/tfl_op_enums.td)
533 enum ActivationFunctionType : byte {
541 // LINT.ThenChange(//tensorflow/compiler/mlir/lite/ir/tfl_op_enums.td)
543 table Conv2DOptions {
547 fused_activation_function:ActivationFunctionType;
548 dilation_w_factor:int = 1;
549 dilation_h_factor:int = 1;
552 // Options for both Conv3D and Conv3DTranspose.
553 table Conv3DOptions {
558 fused_activation_function:ActivationFunctionType;
559 dilation_d_factor:int = 1;
560 dilation_w_factor:int = 1;
561 dilation_h_factor:int = 1;
564 table Pool2DOptions {
570 fused_activation_function:ActivationFunctionType;
573 table DepthwiseConv2DOptions {
574 // Parameters for DepthwiseConv version 1 or above.
578 // `depth_multiplier` is redundant. It's used by CPU kernels in
579 // TensorFlow 2.0 or below, but ignored in versions above.
580 // See comments in lite/c/builtin_op_data.h for more details.
581 depth_multiplier:int;
582 fused_activation_function:ActivationFunctionType;
583 // Parameters for DepthwiseConv version 2 or above.
584 dilation_w_factor:int = 1;
585 dilation_h_factor:int = 1;
588 table ConcatEmbeddingsOptions {
590 num_columns_per_channel:[int];
591 embedding_dim_per_channel:[int]; // This could be inferred from parameters.
594 enum LSHProjectionType: byte {
600 table LSHProjectionOptions {
601 type: LSHProjectionType;
606 fused_activation_function:ActivationFunctionType;
607 // For weights-only quantization, use asymmetric quantization for non
608 // constant inputs at evaluation time.
609 asymmetric_quantize_inputs:bool;
612 // An implementation of TensorFlow RNNCell.
614 fused_activation_function:ActivationFunctionType;
615 asymmetric_quantize_inputs:bool;
618 // An implementation of TensorFlow dynamic_rnn with RNNCell.
619 table SequenceRNNOptions {
621 fused_activation_function:ActivationFunctionType;
622 asymmetric_quantize_inputs:bool;
625 // An implementation of TensorFlow bidrectional_dynamic_rnn with RNNCell.
626 table BidirectionalSequenceRNNOptions {
628 fused_activation_function:ActivationFunctionType;
630 asymmetric_quantize_inputs:bool;
634 enum FullyConnectedOptionsWeightsFormat: byte {
636 SHUFFLED4x16INT8 = 1,
638 // LINT.ThenChange(//tensorflow/compiler/mlir/lite/ir/tfl_op_enums.td)
640 // An implementation of TensorFlow fully_connected (a.k.a Dense) layer.
641 table FullyConnectedOptions {
642 // Parameters for FullyConnected version 1 or above.
643 fused_activation_function:ActivationFunctionType;
645 // Parameters for FullyConnected version 2 or above.
646 weights_format:FullyConnectedOptionsWeightsFormat = DEFAULT;
648 // Parameters for FullyConnected version 5 or above.
649 // If set to true, then the number of dimension is preserved. Furthermore,
650 // all but the last dimension of the input and output shapes will be equal.
653 // Parameters for FullyConnected version 7 or above.
654 // If set to true, then weights-only op will use asymmetric quantization for
656 asymmetric_quantize_inputs: bool;
659 table SoftmaxOptions {
663 // An implementation of TensorFlow concat.
664 table ConcatenationOptions {
666 fused_activation_function:ActivationFunctionType;
670 fused_activation_function:ActivationFunctionType;
671 // Parameters supported by version 3.
672 pot_scale_int16:bool = true;
676 fused_activation_function:ActivationFunctionType;
679 table L2NormOptions {
680 // This field is currently ignored in the L2 Norm Op.
681 fused_activation_function:ActivationFunctionType;
684 table LocalResponseNormalizationOptions {
692 enum LSTMKernelType : byte {
693 // Full LSTM kernel which supports peephole and projection.
695 // Basic LSTM kernels. Equivalent to TensorFlow BasicLSTMCell.
698 // LINT.ThenChange(//tensorflow/compiler/mlir/lite/ir/tfl_op_enums.td)
700 // An implementation of TensorFlow LSTMCell and CoupledInputForgetGateLSTMCell
702 // Parameters for LSTM version 1 or above.
703 fused_activation_function:ActivationFunctionType;
704 cell_clip: float; // Optional, 0.0 means no clipping
705 proj_clip: float; // Optional, 0.0 means no clipping
707 // Parameters for LSTM version 2 or above.
708 // Basic kernel is only supported in version 2 or above.
709 kernel_type: LSTMKernelType = FULL;
711 // Parameters for LSTM version 4 or above.
712 asymmetric_quantize_inputs: bool;
715 // An implementation of TensorFlow dynamic_rnn with LSTMCell.
716 table UnidirectionalSequenceLSTMOptions {
717 fused_activation_function:ActivationFunctionType;
718 cell_clip: float; // Optional, 0.0 means no clipping
719 proj_clip: float; // Optional, 0.0 means no clipping
721 // If true then first dimension is sequence, otherwise batch.
724 // Parameter for Unidirectional Sequence LSTM version 4.
725 asymmetric_quantize_inputs:bool;
728 table BidirectionalSequenceLSTMOptions {
729 // Parameters supported by version 1:
730 fused_activation_function:ActivationFunctionType;
731 cell_clip: float; // Optional, 0.0 means no clipping
732 proj_clip: float; // Optional, 0.0 means no clipping
734 // If true, store the outputs of both directions into the first output.
737 // Parameters supported by version 2:
738 // If true then first dimension is sequence, otherwise batch.
739 // Version 1 implementations assumed time_major to be true, so this default
740 // value should never change.
741 time_major: bool = true;
743 // Parameters for version 3 or above.
744 asymmetric_quantize_inputs:bool;
747 table ResizeBilinearOptions {
748 new_height: int (deprecated);
749 new_width: int (deprecated);
751 half_pixel_centers: bool;
754 table ResizeNearestNeighborOptions {
756 half_pixel_centers: bool;
759 // A call operation options
761 // The subgraph index that needs to be called.
771 table ReshapeOptions {
775 table SpaceToBatchNDOptions {
778 table BatchToSpaceNDOptions {
781 table SkipGramOptions {
784 include_all_ngrams: bool;
787 table SpaceToDepthOptions {
791 table DepthToSpaceOptions {
796 fused_activation_function:ActivationFunctionType;
797 // Parameters supported by version 5
798 pot_scale_int16:bool = true;
802 fused_activation_function:ActivationFunctionType;
805 table TopKV2Options {
808 enum CombinerType : byte {
814 table EmbeddingLookupSparseOptions {
815 combiner:CombinerType;
818 table GatherOptions {
820 // Parameters for Gather version 5 or above.
824 table TransposeOptions {
833 table ReducerOptions {
837 table SqueezeOptions {
845 table SplitVOptions {
849 table StridedSliceOptions {
854 shrink_axis_mask: int;
857 table LogSoftmaxOptions {
861 in_data_type: TensorType;
862 out_data_type: TensorType;
865 table DequantizeOptions {
868 table MaximumMinimumOptions {
874 table ArgMaxOptions {
875 output_type : TensorType;
878 table ArgMinOptions {
879 output_type : TensorType;
882 table GreaterOptions {
885 table GreaterEqualOptions {
891 table LessEqualOptions {
897 table SelectOptions {
903 table TransposeConvOptions {
909 table ExpandDimsOptions {
912 table SparseToDenseOptions {
913 validate_indices:bool;
919 table NotEqualOptions {
923 // Optional output type of the operation (int32 or int64). Defaults to int32.
924 out_type : TensorType;
933 table FakeQuantOptions {
934 // Parameters supported by version 1:
939 // Parameters supported by version 2:
948 table LogicalOrOptions {
951 table OneHotOptions {
959 table HardSwishOptions {
962 table LogicalAndOptions {
965 table LogicalNotOptions {
968 table UnpackOptions {
973 table FloorDivOptions {
976 table SquareOptions {
979 table ZerosLikeOptions {
985 table FloorModOptions {
991 table LeakyReluOptions {
995 table SquaredDifferenceOptions {
999 enum MirrorPadMode : byte {
1000 // Doesn't include borders.
1002 // Includes borders.
1005 // LINT.ThenChange(//tensorflow/compiler/mlir/lite/ir/tfl_op_enums.td)
1007 table MirrorPadOptions {
1011 table UniqueOptions {
1012 idx_out_type:TensorType = INT32;
1015 table ReverseV2Options {
1021 table GatherNdOptions {
1024 table WhereOptions {
1027 table ReverseSequenceOptions {
1032 table MatrixDiagOptions {
1035 table QuantizeOptions {
1038 table MatrixSetDiagOptions {
1042 then_subgraph_index:int;
1043 else_subgraph_index:int;
1046 table CallOnceOptions {
1047 init_subgraph_index:int;
1050 table WhileOptions {
1051 cond_subgraph_index:int;
1052 body_subgraph_index:int;
1055 table NonMaxSuppressionV4Options {
1058 table NonMaxSuppressionV5Options {
1061 table ScatterNdOptions {
1064 table SelectV2Options {
1067 table DensifyOptions {
1070 table SegmentSumOptions {
1073 table BatchMatMulOptions {
1076 // Parameters for BatchMatMul version 4 or above.
1077 // If set to true, then weights-only op will use asymmetric quantization for
1079 asymmetric_quantize_inputs: bool;
1082 table CumsumOptions {
1087 table BroadcastToOptions {
1090 table Rfft2dOptions {
1093 table HashtableOptions {
1094 // The identity of hash tables. This identity will be used across different
1095 // subgraphs in the same interpreter instance.
1097 key_dtype:TensorType;
1098 value_dtype:TensorType;
1101 table HashtableFindOptions {
1104 table HashtableImportOptions {
1107 table HashtableSizeOptions {
1110 table VarHandleOptions {
1115 table ReadVariableOptions {
1118 table AssignVariableOptions {
1121 table RandomOptions {
1126 table BucketizeOptions {
1127 boundaries: [float]; // The bucket boundaries.
1134 table DynamicUpdateSliceOptions {
1137 table UnsortedSegmentProdOptions {
1140 table UnsortedSegmentMaxOptions {
1143 table UnsortedSegmentSumOptions {
1146 table ATan2Options {
1150 // An OperatorCode can be an enum value (BuiltinOperator) if the operator is a
1151 // builtin, or a string if the operator is custom.
1152 table OperatorCode {
1153 // This field is for backward compatibility. This field will be used when
1154 // the value of the extended builtin_code field has less than
1155 // BulitinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES.
1156 deprecated_builtin_code:byte;
1159 // The version of the operator. The version need to be bumped whenever new
1160 // parameters are introduced into an op.
1163 // This field is introduced for resolving op builtin code shortage problem
1164 // (the original BuiltinOperator enum field was represented as a byte).
1165 // This field will be used when the value of the extended builtin_code field
1166 // has greater than BulitinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES.
1167 builtin_code:BuiltinOperator;
1170 enum CustomOptionsFormat : byte {
1174 // An operator takes tensors as inputs and outputs. The type of operation being
1175 // performed is determined by an index into the list of valid OperatorCodes,
1176 // while the specifics of each operations is configured using builtin_options
1177 // or custom_options.
1179 // Index into the operator_codes array. Using an integer here avoids
1180 // complicate map lookups.
1183 // Optional input are indicated by -1.
1187 builtin_options:BuiltinOptions;
1188 custom_options:[ubyte];
1189 custom_options_format:CustomOptionsFormat;
1191 // A list of booleans indicating the input tensors which are being mutated by
1192 // this operator.(e.g. used by RNN and LSTM).
1193 // For example, if the "inputs" array refers to 5 tensors and the second and
1194 // fifth are mutable variables, then this list will contain
1195 // [false, true, false, false, true].
1197 // If the list is empty, no variable is mutated in this operator.
1198 // The list either has the same length as `inputs`, or is empty.
1199 mutating_variable_inputs:[bool];
1201 // A list of indices to the subgraph's "tensors" that are internal to an Op.
1202 // Internal tensors are those that do not flow in or out of the operation,
1203 // but instead are part of internal computation. As such, the operation's
1204 // implementation may manage its memory more efficiently. They are needed
1205 // however (i.e. not just an implementation detail) since they are part of the
1206 // computation, which may require relevant metadata such as quantization
1208 intermediates:[int];
1211 // The root type, defining a subgraph, which typically represents an entire
1214 // A list of all tensors used in this subgraph.
1217 // Indices of the tensors that are inputs into this subgraph. Note this is
1218 // the list of non-static tensors that feed into the subgraph for inference.
1221 // Indices of the tensors that are outputs out of this subgraph. Note this is
1222 // the list of output tensors that are considered the product of the
1223 // subgraph's inference.
1226 // All operators, in execution order.
1227 operators:[Operator];
1229 // Name of this subgraph (used for debugging).
1233 // Table of raw data buffers (used for constant tensors). Referenced by tensors
1234 // by index. The generous alignment accommodates mmap-friendly data structures.
1236 data:[ubyte] (force_align: 16);
1240 // A human readable string to uniquely identify a Metadata.
1242 // An index to the buffers table.
1246 // Map from an alias name of tensor to tensor index in the graph.
1247 // This is used in Signature def.
1249 // Represents the alias to use for this tensor.
1252 // The actual tensor index in the primary graph, that 'name' corresponds to.
1256 // This corresponds to SignatureDef in Tensorflow SavedModel.
1257 // The SignatureDef will be part of the SavedModel provided for conversion.
1258 table SignatureDef {
1259 // Named inputs for this signature.
1262 // Named outputs for this signature.
1263 outputs:[TensorMap];
1265 // Key value which was in the Tensorflow SavedModel SignatureDef map.
1266 signature_key:string;
1268 // Model tag, deprecated.
1269 deprecated_tag:string (deprecated);
1271 // Index of subgraphs that corresponds to the exported method.
1272 subgraph_index:uint;
1276 // Version of the schema.
1279 // A list of all operator codes used in this model. This is
1280 // kept in order because operators carry an index into this
1282 operator_codes:[OperatorCode];
1284 // All the subgraphs of the model. The 0th is assumed to be the main
1286 subgraphs:[SubGraph];
1288 // A description of the model.
1291 // Buffers of the model.
1292 // Note the 0th entry of this array must be an empty buffer (sentinel).
1293 // This is a convention so that tensors without a buffer can provide 0 as
1297 // Metadata about the model. Indirects into the existings buffers list.
1298 // Deprecated, prefer to use metadata field.
1299 metadata_buffer:[int];
1301 // Metadata about the model.
1302 metadata:[Metadata];
1304 // Optional SignatureDefs for the model.
1305 signature_defs:[SignatureDef];