1 // Copyright 2017 The TensorFlow Authors. All Rights Reserved.
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
7 // http://www.apache.org/licenses/LICENSE-2.0
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
16 // Version 0: Initial version.
17 // Version 1: Add subgraphs to schema.
18 // Version 2: Rename operators to conform to NN API.
19 // Version 3: Move buffer data from Model.Subgraph.Tensors to Model.Buffers.
20 // Version 3a: Add new builtin op code field. Has backward compatibility with
22 // Version 3b: Rename fields in SignatureDef. Has backward compatibility with
27 // This corresponds to the version.
28 file_identifier "TFL3";
29 // File extension of any written files.
30 file_extension "tflite";
32 // IMPORTANT: All new members of tables, enums and unions must be added at the
33 // end to ensure backwards compatibility.
35 // The type of data stored in a tensor.
36 enum TensorType : byte {
50 // Experimental: Resource and variant types are experimental, that are subject
51 // to change. Do not implement custom kernels using resource & variant types
59 // Custom quantization parameters for experimenting with new quantization
61 table CustomQuantization {
62 custom:[ubyte] (force_align: 16);
65 // Represents a specific quantization technique's parameters.
66 union QuantizationDetails {
70 // Parameters for converting a quantized tensor back to float.
71 table QuantizationParameters {
72 // These four parameters are the asymmetric linear quantization parameters.
73 // Given a quantized value q, the corresponding float value f should be:
74 // f = scale * (q - zero_point)
75 // For other quantization types, the QuantizationDetails below is used.
76 min:[float]; // For importing back into tensorflow.
77 max:[float]; // For importing back into tensorflow.
78 scale:[float]; // For dequantizing the tensor's values.
81 // If this is not none, the other quantization parameters (i.e. min, max,
82 // scale, zero_point fields above) are ignored and the value of the
83 // QuantizationDetails union should be used.
84 details:QuantizationDetails;
86 // Specifies the dimension of the Tensor's shape that the scales and
87 // zero_points correspond to. For example, a tensor t, with dims=[4, 3, 2, 1]
88 // with quantization params:
89 // scale=[1.0, 2.0, 3.0], zero_point=[1, 2, 3], quantization_dimension=1
90 // will be quantized across the second dimension of t.
91 // t[:, 0, :, :] will have scale[0]=1.0, zero_point[0]=1
92 // t[:, 1, :, :] will have scale[1]=2.0, zero_point[0]=2
93 // t[:, 2, :, :] will have scale[2]=3.0, zero_point[0]=3
94 quantized_dimension:int;
98 // We use a modification of the TACO format.
99 // Reference: http://tensor-compiler.org/kjolstad-oopsla17-tensor-compiler.pdf
101 // To encode a conceptual n-dimensional dense tensor with dims (d0, ..., dn-1),
102 // potentially with a k-dimensional block (0 <= k <= n) with dims
103 // (dn, ..., dn+k-1), the format needs to specify:
104 // 1. In what order to traverse these dimensions. For example, to store a 2-D
105 // matrix in row major order, the traversal order would be (d0, d1),
106 // whereas to store it in column major order, the traversal order would be
107 // (d1, d0). If the 2-D matrix has a 2-D inner block, the traversal order
108 // could be (d0, d1, d2, d3).
109 // 2. How each block dimension in (dn, ..., dn+k-1) maps to the original
110 // tensor dimension in (d0, ..., dn-1).
111 // 3. In the traversal order defined above, the format (dense vs. sparse) and
112 // index metadata for each dimension. For a dense dimension, this is just
113 // the size of that dimension. For a sparse dimension, it's the same as
114 // the compressed index defined in the Compressed Sparse Row (CSR) format.
115 // (http://scipy-lectures.org/advanced/scipy_sparse/csr_matrix.html)
117 // The storage type for a dimension. Currently we support:
118 // 1. DENSE: each coordinate in this dimension is stored implicitly.
119 // 2. SPARSE_CSR: only the coordinates with non-zero elements are stored. The
120 // compression technique is the same what CSR uses.
121 // More types like a sparse dimension with a different compression technique
122 // could be added to the list in the future.
123 enum DimensionType : byte {
133 values:[ushort] (force_align: 4);
137 values:[ubyte] (force_align: 4);
140 // Variable-typed buffer to store the index metadata for a sparse dimension.
141 // The widest type is Int32 instead of UInt32 because tensor's shape is a int32
142 // vector. We don't want the per-dimensional index to overflow that range.
143 union SparseIndexVector {
149 table DimensionMetadata {
150 // Whether a dimension is dense or sparse.
151 format:DimensionType;
152 // Index metadata used for a dimension.
153 // - If format is DimensionType.DENSE then we use the dense_size field to
154 // store the size of that dimension. Each index in that dimension is
155 // stored implicitly.
156 // - If format is DimensionType.SPARSE_CSR then we use array_segments and
157 // array_indices to encode that dimension. array_segments represents how
158 // to segment the indices array, each segment corresponds to one element
159 // in the previous dimension. array_indices represents the index of the
160 // non-zero elements within this dimension (as those in the CSR matrix
161 // format, where the first array is row pointers and the second array is
164 array_segments:SparseIndexVector;
165 array_indices:SparseIndexVector;
168 // Parameters to encode a sparse TfLite tensor.
169 table SparsityParameters {
170 // The traversal order of the dimensions defined in the `shape` field of the
171 // conceptual dense tensor. For a n-dimensional tensors with dims (d0, d1,
173 // - if not block sparse, the traversal_order is just a permutation of (d0,
174 // ..., dn-1). For example, a 2-D matrix stored in row-major order would
175 // have traversal_order = (d0, d1).
176 // - if block sparse with a k-dimensional block (0 <= k <= n), the
177 // traversal_order has n + k elements. The first n elements are still a
178 // permutation of (d0, ..., dn-1). The lask k elements are a permutation
179 // of (dn, ..., dn+k-1), defining how to traverse a block internally. For
180 // example, a 2-D matrix with 2-D blocks, both stored in row-major order
181 // would have traversal_order = (d0, d1, d2, d3).
182 traversal_order:[int];
183 // For an n-dimensional tensor with a k-dimensional block (0 <= k <= n),
184 // stores how a block dimension in (dn, ..., dn+k-1) maps to the original
185 // tensor dimension in (d0, ..., dn).
186 // It's stored in the order of (dn, ..., dn+k-1).
187 // If not block-sparse, this field is NULL.
189 // In the traversal order defined above, the metadata needed for
190 // each dimension to locate the non-zero values in the original dense tensor.
191 // The size of the dim_metadata array = the size of the traversal_order array
193 dim_metadata:[DimensionMetadata];
197 // The tensor shape. The meaning of each entry is operator-specific but
198 // builtin ops use: [batch size, height, width, number of channels] (That's
199 // Tensorflow's NHWC).
202 // An index that refers to the buffers table at the root of the model. Or,
203 // if there is no data buffer associated (i.e. intermediate results), then
204 // this is 0 (which refers to an always existent empty buffer).
206 // The data_buffer itself is an opaque container, with the assumption that the
207 // target device is little-endian. In addition, all builtin operators assume
208 // the memory is ordered such that if `shape` is [4, 3, 2], then index
209 // [i, j, k] maps to data_buffer[i*3*2 + j*2 + k].
211 name:string; // For debugging and importing back into tensorflow.
212 quantization:QuantizationParameters; // Optional.
214 is_variable:bool = false;
216 // Parameters to encode a sparse tensor. See the example in
217 // tensorflow/lite/testdata/sparse_tensor.json.
218 sparsity:SparsityParameters; // Optional.
220 // Encodes `shape` with unknown dimensions. Unknown dimensions are
221 // represented with -1.
222 shape_signature:[int]; // Optional.
224 // If false, the rank or the number of tensor dimensions is unknown.
225 // If false, "shape" must be [].
226 has_rank: bool = false;
229 // A list of builtin operators. Builtin operators are slightly faster than custom
230 // ones, but not by much. Moreover, while custom operators accept an opaque
231 // object containing configuration parameters, builtins have a predetermined
232 // set of acceptable options.
234 enum BuiltinOperator : int32 {
239 DEPTHWISE_CONV_2D = 4,
242 EMBEDDING_LOOKUP = 7,
245 HASHTABLE_LOOKUP = 10,
246 L2_NORMALIZATION = 11,
248 LOCAL_RESPONSE_NORMALIZATION = 13,
255 // NOTE(aselle): RELU_N1_TO_1 used to be called RELU1, but it was renamed
256 // since different model developers use RELU1 in different ways. Never
257 // create another op called RELU1.
261 RESIZE_BILINEAR = 23,
267 CONCAT_EMBEDDINGS = 29,
271 EMBEDDING_LOOKUP_SPARSE = 33,
273 UNIDIRECTIONAL_SEQUENCE_RNN = 35,
275 BATCH_TO_SPACE_ND = 37,
276 SPACE_TO_BATCH_ND = 38,
282 UNIDIRECTIONAL_SEQUENCE_LSTM = 44,
284 BIDIRECTIONAL_SEQUENCE_RNN = 46,
289 // DELEGATE is a special op type for the operations which are delegated to
291 // WARNING: Experimental interface, subject to change
293 BIDIRECTIONAL_SEQUENCE_LSTM = 52,
309 SPARSE_TO_DENSE = 68,
338 RESIZE_NEAREST_NEIGHBOR = 97,
340 SQUARED_DIFFERENCE = 99,
353 REVERSE_SEQUENCE = 112,
356 MATRIX_SET_DIAG = 115,
361 NON_MAX_SUPPRESSION_V4 = 120,
362 NON_MAX_SUPPRESSION_V5 = 121,
368 PLACEHOLDER_FOR_GREATER_OP_CODES = 127,
378 HASHTABLE_FIND = 137,
379 HASHTABLE_IMPORT = 138,
380 HASHTABLE_SIZE = 139,
382 CONV_3D_TRANSPOSE = 141,
385 ASSIGN_VARIABLE = 144,
386 BROADCAST_ARGS = 145,
387 RANDOM_STANDARD_NORMAL = 146,
389 RANDOM_UNIFORM = 148,
392 DYNAMIC_UPDATE_SLICE = 151,
394 UNSORTED_SEGMENT_PROD = 153,
395 UNSORTED_SEGMENT_MAX = 154,
396 UNSORTED_SEGMENT_SUM = 155,
399 // LINT.ThenChange(nnapi_linter/linter.proto)
401 // Options for the builtin operators.
402 union BuiltinOptions {
404 DepthwiseConv2DOptions,
405 ConcatEmbeddingsOptions,
406 LSHProjectionOptions,
410 FullyConnectedOptions,
412 ConcatenationOptions,
415 LocalResponseNormalizationOptions,
417 ResizeBilinearOptions,
422 EmbeddingLookupSparseOptions,
426 BatchToSpaceNDOptions,
427 SpaceToBatchNDOptions,
441 MaximumMinimumOptions,
451 TransposeConvOptions,
452 SparseToDenseOptions,
471 BidirectionalSequenceLSTMOptions,
472 BidirectionalSequenceRNNOptions,
473 UnidirectionalSequenceLSTMOptions,
476 ResizeNearestNeighborOptions,
478 SquaredDifferenceOptions,
489 ReverseSequenceOptions,
492 MatrixSetDiagOptions,
497 NonMaxSuppressionV4Options,
498 NonMaxSuppressionV5Options,
510 HashtableFindOptions,
511 HashtableImportOptions,
512 HashtableSizeOptions,
515 AssignVariableOptions,
519 DynamicUpdateSliceOptions,
520 UnsortedSegmentProdOptions,
521 UnsortedSegmentMaxOptions,
522 UnsortedSegmentSumOptions,
527 enum Padding : byte { SAME, VALID }
528 // LINT.ThenChange(//tensorflow/compiler/mlir/lite/ir/tfl_op_enums.td)
531 enum ActivationFunctionType : byte {
539 // LINT.ThenChange(//tensorflow/compiler/mlir/lite/ir/tfl_op_enums.td)
541 table Conv2DOptions {
545 fused_activation_function:ActivationFunctionType;
546 dilation_w_factor:int = 1;
547 dilation_h_factor:int = 1;
550 // Options for both Conv3D and Conv3DTranspose.
551 table Conv3DOptions {
556 fused_activation_function:ActivationFunctionType;
557 dilation_d_factor:int = 1;
558 dilation_w_factor:int = 1;
559 dilation_h_factor:int = 1;
562 table Pool2DOptions {
568 fused_activation_function:ActivationFunctionType;
571 table DepthwiseConv2DOptions {
572 // Parameters for DepthwiseConv version 1 or above.
576 // `depth_multiplier` is redundant. It's used by CPU kernels in
577 // TensorFlow 2.0 or below, but ignored in versions above.
578 // See comments in lite/c/builtin_op_data.h for more details.
579 depth_multiplier:int;
580 fused_activation_function:ActivationFunctionType;
581 // Parameters for DepthwiseConv version 2 or above.
582 dilation_w_factor:int = 1;
583 dilation_h_factor:int = 1;
586 table ConcatEmbeddingsOptions {
588 num_columns_per_channel:[int];
589 embedding_dim_per_channel:[int]; // This could be inferred from parameters.
592 enum LSHProjectionType: byte {
598 table LSHProjectionOptions {
599 type: LSHProjectionType;
604 fused_activation_function:ActivationFunctionType;
605 // For weights-only quantization, use asymmetric quantization for non
606 // constant inputs at evaluation time.
607 asymmetric_quantize_inputs:bool;
610 // An implementation of TensorFlow RNNCell.
612 fused_activation_function:ActivationFunctionType;
613 asymmetric_quantize_inputs:bool;
616 // An implementation of TensorFlow dynamic_rnn with RNNCell.
617 table SequenceRNNOptions {
619 fused_activation_function:ActivationFunctionType;
620 asymmetric_quantize_inputs:bool;
623 // An implementation of TensorFlow bidrectional_dynamic_rnn with RNNCell.
624 table BidirectionalSequenceRNNOptions {
626 fused_activation_function:ActivationFunctionType;
628 asymmetric_quantize_inputs:bool;
632 enum FullyConnectedOptionsWeightsFormat: byte {
634 SHUFFLED4x16INT8 = 1,
636 // LINT.ThenChange(//tensorflow/compiler/mlir/lite/ir/tfl_op_enums.td)
638 // An implementation of TensorFlow fully_connected (a.k.a Dense) layer.
639 table FullyConnectedOptions {
640 // Parameters for FullyConnected version 1 or above.
641 fused_activation_function:ActivationFunctionType;
643 // Parameters for FullyConnected version 2 or above.
644 weights_format:FullyConnectedOptionsWeightsFormat = DEFAULT;
646 // Parameters for FullyConnected version 5 or above.
647 // If set to true, then the number of dimension is preserved. Furthermore,
648 // all but the last dimension of the input and output shapes will be equal.
651 // Parameters for FullyConnected version 7 or above.
652 // If set to true, then weights-only op will use asymmetric quantization for
654 asymmetric_quantize_inputs: bool;
657 table SoftmaxOptions {
661 // An implementation of TensorFlow concat.
662 table ConcatenationOptions {
664 fused_activation_function:ActivationFunctionType;
668 fused_activation_function:ActivationFunctionType;
669 // Parameters supported by version 3.
670 pot_scale_int16:bool = true;
674 fused_activation_function:ActivationFunctionType;
677 table L2NormOptions {
678 // This field is currently ignored in the L2 Norm Op.
679 fused_activation_function:ActivationFunctionType;
682 table LocalResponseNormalizationOptions {
690 enum LSTMKernelType : byte {
691 // Full LSTM kernel which supports peephole and projection.
693 // Basic LSTM kernels. Equivalent to TensorFlow BasicLSTMCell.
696 // LINT.ThenChange(//tensorflow/compiler/mlir/lite/ir/tfl_op_enums.td)
698 // An implementation of TensorFlow LSTMCell and CoupledInputForgetGateLSTMCell
700 // Parameters for LSTM version 1 or above.
701 fused_activation_function:ActivationFunctionType;
702 cell_clip: float; // Optional, 0.0 means no clipping
703 proj_clip: float; // Optional, 0.0 means no clipping
705 // Parameters for LSTM version 2 or above.
706 // Basic kernel is only supported in version 2 or above.
707 kernel_type: LSTMKernelType = FULL;
709 // Parameters for LSTM version 4 or above.
710 asymmetric_quantize_inputs: bool;
713 // An implementation of TensorFlow dynamic_rnn with LSTMCell.
714 table UnidirectionalSequenceLSTMOptions {
715 fused_activation_function:ActivationFunctionType;
716 cell_clip: float; // Optional, 0.0 means no clipping
717 proj_clip: float; // Optional, 0.0 means no clipping
719 // If true then first dimension is sequence, otherwise batch.
722 // Parameter for Unidirectional Sequence LSTM version 4.
723 asymmetric_quantize_inputs:bool;
726 table BidirectionalSequenceLSTMOptions {
727 // Parameters supported by version 1:
728 fused_activation_function:ActivationFunctionType;
729 cell_clip: float; // Optional, 0.0 means no clipping
730 proj_clip: float; // Optional, 0.0 means no clipping
732 // If true, store the outputs of both directions into the first output.
735 // Parameters supported by version 2:
736 // If true then first dimension is sequence, otherwise batch.
737 // Version 1 implementations assumed time_major to be true, so this default
738 // value should never change.
739 time_major: bool = true;
741 // Parameters for version 3 or above.
742 asymmetric_quantize_inputs:bool;
745 table ResizeBilinearOptions {
746 new_height: int (deprecated);
747 new_width: int (deprecated);
749 half_pixel_centers: bool;
752 table ResizeNearestNeighborOptions {
754 half_pixel_centers: bool;
757 // A call operation options
759 // The subgraph index that needs to be called.
769 table ReshapeOptions {
773 table SpaceToBatchNDOptions {
776 table BatchToSpaceNDOptions {
779 table SkipGramOptions {
782 include_all_ngrams: bool;
785 table SpaceToDepthOptions {
789 table DepthToSpaceOptions {
794 fused_activation_function:ActivationFunctionType;
795 // Parameters supported by version 5
796 pot_scale_int16:bool = true;
800 fused_activation_function:ActivationFunctionType;
803 table TopKV2Options {
806 enum CombinerType : byte {
812 table EmbeddingLookupSparseOptions {
813 combiner:CombinerType;
816 table GatherOptions {
818 // Parameters for Gather version 5 or above.
822 table TransposeOptions {
831 table ReducerOptions {
835 table SqueezeOptions {
843 table SplitVOptions {
847 table StridedSliceOptions {
852 shrink_axis_mask: int;
855 table LogSoftmaxOptions {
859 in_data_type: TensorType;
860 out_data_type: TensorType;
863 table DequantizeOptions {
866 table MaximumMinimumOptions {
872 table ArgMaxOptions {
873 output_type : TensorType;
876 table ArgMinOptions {
877 output_type : TensorType;
880 table GreaterOptions {
883 table GreaterEqualOptions {
889 table LessEqualOptions {
895 table SelectOptions {
901 table TransposeConvOptions {
907 table ExpandDimsOptions {
910 table SparseToDenseOptions {
911 validate_indices:bool;
917 table NotEqualOptions {
921 // Optional output type of the operation (int32 or int64). Defaults to int32.
922 out_type : TensorType;
931 table FakeQuantOptions {
932 // Parameters supported by version 1:
937 // Parameters supported by version 2:
946 table LogicalOrOptions {
949 table OneHotOptions {
957 table HardSwishOptions {
960 table LogicalAndOptions {
963 table LogicalNotOptions {
966 table UnpackOptions {
971 table FloorDivOptions {
974 table SquareOptions {
977 table ZerosLikeOptions {
983 table FloorModOptions {
989 table LeakyReluOptions {
993 table SquaredDifferenceOptions {
997 enum MirrorPadMode : byte {
998 // Doesn't include borders.
1000 // Includes borders.
1003 // LINT.ThenChange(//tensorflow/compiler/mlir/lite/ir/tfl_op_enums.td)
1005 table MirrorPadOptions {
1009 table UniqueOptions {
1010 idx_out_type:TensorType = INT32;
1013 table ReverseV2Options {
1019 table GatherNdOptions {
1022 table WhereOptions {
1025 table ReverseSequenceOptions {
1030 table MatrixDiagOptions {
1033 table QuantizeOptions {
1036 table MatrixSetDiagOptions {
1040 then_subgraph_index:int;
1041 else_subgraph_index:int;
1044 table CallOnceOptions {
1045 init_subgraph_index:int;
1048 table WhileOptions {
1049 cond_subgraph_index:int;
1050 body_subgraph_index:int;
1053 table NonMaxSuppressionV4Options {
1056 table NonMaxSuppressionV5Options {
1059 table ScatterNdOptions {
1062 table SelectV2Options {
1065 table DensifyOptions {
1068 table SegmentSumOptions {
1071 table BatchMatMulOptions {
1074 // Parameters for BatchMatMul version 4 or above.
1075 // If set to true, then weights-only op will use asymmetric quantization for
1077 asymmetric_quantize_inputs: bool;
1080 table CumsumOptions {
1085 table BroadcastToOptions {
1088 table Rfft2dOptions {
1091 table HashtableOptions {
1092 // The identity of hash tables. This identity will be used across different
1093 // subgraphs in the same interpreter instance.
1095 key_dtype:TensorType;
1096 value_dtype:TensorType;
1099 table HashtableFindOptions {
1102 table HashtableImportOptions {
1105 table HashtableSizeOptions {
1108 table VarHandleOptions {
1113 table ReadVariableOptions {
1116 table AssignVariableOptions {
1119 table RandomOptions {
1124 table BucketizeOptions {
1125 boundaries: [float]; // The bucket boundaries.
1132 table DynamicUpdateSliceOptions {
1135 table UnsortedSegmentProdOptions {
1138 table UnsortedSegmentMaxOptions {
1141 table UnsortedSegmentSumOptions {
1144 table ATan2Options {
1148 // An OperatorCode can be an enum value (BuiltinOperator) if the operator is a
1149 // builtin, or a string if the operator is custom.
1150 table OperatorCode {
1151 // This field is for backward compatibility. This field will be used when
1152 // the value of the extended builtin_code field has less than
1153 // BulitinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES.
1154 deprecated_builtin_code:byte;
1157 // The version of the operator. The version need to be bumped whenever new
1158 // parameters are introduced into an op.
1161 // This field is introduced for resolving op builtin code shortage problem
1162 // (the original BuiltinOperator enum field was represented as a byte).
1163 // This field will be used when the value of the extended builtin_code field
1164 // has greater than BulitinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES.
1165 builtin_code:BuiltinOperator;
1168 enum CustomOptionsFormat : byte {
1172 // An operator takes tensors as inputs and outputs. The type of operation being
1173 // performed is determined by an index into the list of valid OperatorCodes,
1174 // while the specifics of each operations is configured using builtin_options
1175 // or custom_options.
1177 // Index into the operator_codes array. Using an integer here avoids
1178 // complicate map lookups.
1181 // Optional input are indicated by -1.
1185 builtin_options:BuiltinOptions;
1186 custom_options:[ubyte];
1187 custom_options_format:CustomOptionsFormat;
1189 // A list of booleans indicating the input tensors which are being mutated by
1190 // this operator.(e.g. used by RNN and LSTM).
1191 // For example, if the "inputs" array refers to 5 tensors and the second and
1192 // fifth are mutable variables, then this list will contain
1193 // [false, true, false, false, true].
1195 // If the list is empty, no variable is mutated in this operator.
1196 // The list either has the same length as `inputs`, or is empty.
1197 mutating_variable_inputs:[bool];
1199 // A list of indices to the subgraph's "tensors" that are internal to an Op.
1200 // Internal tensors are those that do not flow in or out of the operation,
1201 // but instead are part of internal computation. As such, the operation's
1202 // implementation may manage its memory more efficiently. They are needed
1203 // however (i.e. not just an implementation detail) since they are part of the
1204 // computation, which may require relevant metadata such as quantization
1206 intermediates:[int];
1209 // The root type, defining a subgraph, which typically represents an entire
1212 // A list of all tensors used in this subgraph.
1215 // Indices of the tensors that are inputs into this subgraph. Note this is
1216 // the list of non-static tensors that feed into the subgraph for inference.
1219 // Indices of the tensors that are outputs out of this subgraph. Note this is
1220 // the list of output tensors that are considered the product of the
1221 // subgraph's inference.
1224 // All operators, in execution order.
1225 operators:[Operator];
1227 // Name of this subgraph (used for debugging).
1231 // Table of raw data buffers (used for constant tensors). Referenced by tensors
1232 // by index. The generous alignment accommodates mmap-friendly data structures.
1234 data:[ubyte] (force_align: 16);
1238 // A human readable string to uniquely identify a Metadata.
1240 // An index to the buffers table.
1244 // Map from an alias name of tensor to tensor index in the graph.
1245 // This is used in Signature def.
1247 // Represents the alias to use for this tensor.
1250 // The actual tensor index in the primary graph, that 'name' corresponds to.
1254 // This corresponds to SignatureDef in Tensorflow SavedModel.
1255 // The SignatureDef will be part of the SavedModel provided for conversion.
1256 table SignatureDef {
1257 // Named inputs for this signature.
1260 // Named outputs for this signature.
1261 outputs:[TensorMap];
1263 // Key value which was in the Tensorflow SavedModel SignatureDef map.
1264 signature_key:string;
1266 // Model tag, deprecated.
1267 deprecated_tag:string (deprecated);
1269 // Index of subgraphs that corresponds to the exported method.
1270 subgraph_index:uint;
1274 // Version of the schema.
1277 // A list of all operator codes used in this model. This is
1278 // kept in order because operators carry an index into this
1280 operator_codes:[OperatorCode];
1282 // All the subgraphs of the model. The 0th is assumed to be the main
1284 subgraphs:[SubGraph];
1286 // A description of the model.
1289 // Buffers of the model.
1290 // Note the 0th entry of this array must be an empty buffer (sentinel).
1291 // This is a convention so that tensors without a buffer can provide 0 as
1295 // Metadata about the model. Indirects into the existings buffers list.
1296 // Deprecated, prefer to use metadata field.
1297 metadata_buffer:[int];
1299 // Metadata about the model.
1300 metadata:[Metadata];
1302 // Optional SignatureDefs for the model.
1303 signature_defs:[SignatureDef];