1 // Copyright 2017 The TensorFlow Authors. All Rights Reserved.
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
7 // http://www.apache.org/licenses/LICENSE-2.0
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
16 // Version 0: Initial version.
17 // Version 1: Add subgraphs to schema.
18 // Version 2: Rename operators to conform to NN API.
19 // Version 3: Move buffer data from Model.Subgraph.Tensors to Model.Buffers.
20 // Version 3a: Add new builtin op code field. Has backward compatibility with
22 // Version 3b: Rename fields in SignatureDef. Has backward compatibility with
27 // This corresponds to the version.
28 file_identifier "TFL3";
29 // File extension of any written files.
30 file_extension "tflite";
32 // IMPORTANT: All new members of tables, enums and unions must be added at the
33 // end to ensure backwards compatibility.
35 // The type of data stored in a tensor.
36 enum TensorType : byte {
50 // Experimental: Resource and variant types are experimental, that are subject
51 // to change. Do not implement custom kernels using resource & variant types
60 // Custom quantization parameters for experimenting with new quantization
62 table CustomQuantization {
63 custom:[ubyte] (force_align: 16);
66 // Represents a specific quantization technique's parameters.
67 union QuantizationDetails {
71 // Parameters for converting a quantized tensor back to float.
72 table QuantizationParameters {
73 // These four parameters are the asymmetric linear quantization parameters.
74 // Given a quantized value q, the corresponding float value f should be:
75 // f = scale * (q - zero_point)
76 // For other quantization types, the QuantizationDetails below is used.
77 min:[float]; // For importing back into tensorflow.
78 max:[float]; // For importing back into tensorflow.
79 scale:[float]; // For dequantizing the tensor's values.
82 // If this is not none, the other quantization parameters (i.e. min, max,
83 // scale, zero_point fields above) are ignored and the value of the
84 // QuantizationDetails union should be used.
85 details:QuantizationDetails;
87 // Specifies the dimension of the Tensor's shape that the scales and
88 // zero_points correspond to. For example, a tensor t, with dims=[4, 3, 2, 1]
89 // with quantization params:
90 // scale=[1.0, 2.0, 3.0], zero_point=[1, 2, 3], quantization_dimension=1
91 // will be quantized across the second dimension of t.
92 // t[:, 0, :, :] will have scale[0]=1.0, zero_point[0]=1
93 // t[:, 1, :, :] will have scale[1]=2.0, zero_point[0]=2
94 // t[:, 2, :, :] will have scale[2]=3.0, zero_point[0]=3
95 quantized_dimension:int;
99 // We use a modification of the TACO format.
100 // Reference: http://tensor-compiler.org/kjolstad-oopsla17-tensor-compiler.pdf
102 // To encode a conceptual n-dimensional dense tensor with dims (d0, ..., dn-1),
103 // potentially with a k-dimensional block (0 <= k <= n) with dims
104 // (dn, ..., dn+k-1), the format needs to specify:
105 // 1. In what order to traverse these dimensions. For example, to store a 2-D
106 // matrix in row major order, the traversal order would be (d0, d1),
107 // whereas to store it in column major order, the traversal order would be
108 // (d1, d0). If the 2-D matrix has a 2-D inner block, the traversal order
109 // could be (d0, d1, d2, d3).
110 // 2. How each block dimension in (dn, ..., dn+k-1) maps to the original
111 // tensor dimension in (d0, ..., dn-1).
112 // 3. In the traversal order defined above, the format (dense vs. sparse) and
113 // index metadata for each dimension. For a dense dimension, this is just
114 // the size of that dimension. For a sparse dimension, it's the same as
115 // the compressed index defined in the Compressed Sparse Row (CSR) format.
116 // (http://scipy-lectures.org/advanced/scipy_sparse/csr_matrix.html)
118 // The storage type for a dimension. Currently we support:
119 // 1. DENSE: each coordinate in this dimension is stored implicitly.
120 // 2. SPARSE_CSR: only the coordinates with non-zero elements are stored. The
121 // compression technique is the same what CSR uses.
122 // More types like a sparse dimension with a different compression technique
123 // could be added to the list in the future.
124 enum DimensionType : byte {
134 values:[ushort] (force_align: 4);
138 values:[ubyte] (force_align: 4);
141 // Variable-typed buffer to store the index metadata for a sparse dimension.
142 // The widest type is Int32 instead of UInt32 because tensor's shape is a int32
143 // vector. We don't want the per-dimensional index to overflow that range.
144 union SparseIndexVector {
150 table DimensionMetadata {
151 // Whether a dimension is dense or sparse.
152 format:DimensionType;
153 // Index metadata used for a dimension.
154 // - If format is DimensionType.DENSE then we use the dense_size field to
155 // store the size of that dimension. Each index in that dimension is
156 // stored implicitly.
157 // - If format is DimensionType.SPARSE_CSR then we use array_segments and
158 // array_indices to encode that dimension. array_segments represents how
159 // to segment the indices array, each segment corresponds to one element
160 // in the previous dimension. array_indices represents the index of the
161 // non-zero elements within this dimension (as those in the CSR matrix
162 // format, where the first array is row pointers and the second array is
165 array_segments:SparseIndexVector;
166 array_indices:SparseIndexVector;
169 // Parameters to encode a sparse TfLite tensor.
170 table SparsityParameters {
171 // The traversal order of the dimensions defined in the `shape` field of the
172 // conceptual dense tensor. For a n-dimensional tensors with dims (d0, d1,
174 // - if not block sparse, the traversal_order is just a permutation of (d0,
175 // ..., dn-1). For example, a 2-D matrix stored in row-major order would
176 // have traversal_order = (d0, d1).
177 // - if block sparse with a k-dimensional block (0 <= k <= n), the
178 // traversal_order has n + k elements. The first n elements are still a
179 // permutation of (d0, ..., dn-1). The lask k elements are a permutation
180 // of (dn, ..., dn+k-1), defining how to traverse a block internally. For
181 // example, a 2-D matrix with 2-D blocks, both stored in row-major order
182 // would have traversal_order = (d0, d1, d2, d3).
183 traversal_order:[int];
184 // For an n-dimensional tensor with a k-dimensional block (0 <= k <= n),
185 // stores how a block dimension in (dn, ..., dn+k-1) maps to the original
186 // tensor dimension in (d0, ..., dn).
187 // It's stored in the order of (dn, ..., dn+k-1).
188 // If not block-sparse, this field is NULL.
190 // In the traversal order defined above, the metadata needed for
191 // each dimension to locate the non-zero values in the original dense tensor.
192 // The size of the dim_metadata array = the size of the traversal_order array
194 dim_metadata:[DimensionMetadata];
197 // The nested tensor type for VARIANT type.
198 table VariantSubType {
202 // If false, the rank or the number of tensor dimensions is unknown.
203 // If false, "shape" must be [].
204 has_rank: bool = false;
208 // The tensor shape. The meaning of each entry is operator-specific but
209 // builtin ops use: [batch size, height, width, number of channels] (That's
210 // Tensorflow's NHWC).
213 // An index that refers to the buffers table at the root of the model. Or,
214 // if there is no data buffer associated (i.e. intermediate results), then
215 // this is 0 (which refers to an always existent empty buffer).
217 // The data_buffer itself is an opaque container, with the assumption that the
218 // target device is little-endian. In addition, all builtin operators assume
219 // the memory is ordered such that if `shape` is [4, 3, 2], then index
220 // [i, j, k] maps to data_buffer[i*3*2 + j*2 + k].
222 name:string; // For debugging and importing back into tensorflow.
223 quantization:QuantizationParameters; // Optional.
225 is_variable:bool = false;
227 // Parameters to encode a sparse tensor. See the example in
228 // tensorflow/lite/testdata/sparse_tensor.json.
229 sparsity:SparsityParameters; // Optional.
231 // Encodes `shape` with unknown dimensions. Unknown dimensions are
232 // represented with -1.
233 shape_signature:[int]; // Optional.
235 // If false, the rank or the number of tensor dimensions is unknown.
236 // If false, "shape" must be [].
237 has_rank: bool = false;
239 // The nested Tensor types for VARIANT type. This is always empty for
240 // non-VARIANT types. This is optional because the nested type can be omitted.
241 // Currently only 1 subtype is supported. The field is defined as an array for
242 // flexibility of supporting multiple subtypes in the future.
243 variant_tensors:[VariantSubType];
246 // A list of builtin operators. Builtin operators are slightly faster than custom
247 // ones, but not by much. Moreover, while custom operators accept an opaque
248 // object containing configuration parameters, builtins have a predetermined
249 // set of acceptable options.
251 enum BuiltinOperator : int32 {
256 DEPTHWISE_CONV_2D = 4,
259 EMBEDDING_LOOKUP = 7,
262 HASHTABLE_LOOKUP = 10,
263 L2_NORMALIZATION = 11,
265 LOCAL_RESPONSE_NORMALIZATION = 13,
272 // NOTE(aselle): RELU_N1_TO_1 used to be called RELU1, but it was renamed
273 // since different model developers use RELU1 in different ways. Never
274 // create another op called RELU1.
278 RESIZE_BILINEAR = 23,
284 CONCAT_EMBEDDINGS = 29,
288 EMBEDDING_LOOKUP_SPARSE = 33,
290 UNIDIRECTIONAL_SEQUENCE_RNN = 35,
292 BATCH_TO_SPACE_ND = 37,
293 SPACE_TO_BATCH_ND = 38,
299 UNIDIRECTIONAL_SEQUENCE_LSTM = 44,
301 BIDIRECTIONAL_SEQUENCE_RNN = 46,
306 // DELEGATE is a special op type for the operations which are delegated to
308 // WARNING: Experimental interface, subject to change
310 BIDIRECTIONAL_SEQUENCE_LSTM = 52,
326 SPARSE_TO_DENSE = 68,
355 RESIZE_NEAREST_NEIGHBOR = 97,
357 SQUARED_DIFFERENCE = 99,
370 REVERSE_SEQUENCE = 112,
373 MATRIX_SET_DIAG = 115,
378 NON_MAX_SUPPRESSION_V4 = 120,
379 NON_MAX_SUPPRESSION_V5 = 121,
385 PLACEHOLDER_FOR_GREATER_OP_CODES = 127,
395 HASHTABLE_FIND = 137,
396 HASHTABLE_IMPORT = 138,
397 HASHTABLE_SIZE = 139,
399 CONV_3D_TRANSPOSE = 141,
402 ASSIGN_VARIABLE = 144,
403 BROADCAST_ARGS = 145,
404 RANDOM_STANDARD_NORMAL = 146,
406 RANDOM_UNIFORM = 148,
409 DYNAMIC_UPDATE_SLICE = 151,
411 UNSORTED_SEGMENT_PROD = 153,
412 UNSORTED_SEGMENT_MAX = 154,
413 UNSORTED_SEGMENT_SUM = 155,
415 UNSORTED_SEGMENT_MIN = 157,
418 // LINT.ThenChange(nnapi_linter/linter.proto)
420 // Options for the builtin operators.
421 union BuiltinOptions {
423 DepthwiseConv2DOptions,
424 ConcatEmbeddingsOptions,
425 LSHProjectionOptions,
429 FullyConnectedOptions,
431 ConcatenationOptions,
434 LocalResponseNormalizationOptions,
436 ResizeBilinearOptions,
441 EmbeddingLookupSparseOptions,
445 BatchToSpaceNDOptions,
446 SpaceToBatchNDOptions,
460 MaximumMinimumOptions,
470 TransposeConvOptions,
471 SparseToDenseOptions,
490 BidirectionalSequenceLSTMOptions,
491 BidirectionalSequenceRNNOptions,
492 UnidirectionalSequenceLSTMOptions,
495 ResizeNearestNeighborOptions,
497 SquaredDifferenceOptions,
508 ReverseSequenceOptions,
511 MatrixSetDiagOptions,
516 NonMaxSuppressionV4Options,
517 NonMaxSuppressionV5Options,
529 HashtableFindOptions,
530 HashtableImportOptions,
531 HashtableSizeOptions,
534 AssignVariableOptions,
538 DynamicUpdateSliceOptions,
539 UnsortedSegmentProdOptions,
540 UnsortedSegmentMaxOptions,
541 UnsortedSegmentMinOptions,
542 UnsortedSegmentSumOptions,
548 enum Padding : byte { SAME, VALID }
549 // LINT.ThenChange(//tensorflow/compiler/mlir/lite/ir/tfl_op_enums.td)
552 enum ActivationFunctionType : byte {
560 // LINT.ThenChange(//tensorflow/compiler/mlir/lite/ir/tfl_op_enums.td)
562 table Conv2DOptions {
566 fused_activation_function:ActivationFunctionType;
567 dilation_w_factor:int = 1;
568 dilation_h_factor:int = 1;
571 // Options for both Conv3D and Conv3DTranspose.
572 table Conv3DOptions {
577 fused_activation_function:ActivationFunctionType;
578 dilation_d_factor:int = 1;
579 dilation_w_factor:int = 1;
580 dilation_h_factor:int = 1;
583 table Pool2DOptions {
589 fused_activation_function:ActivationFunctionType;
592 table DepthwiseConv2DOptions {
593 // Parameters for DepthwiseConv version 1 or above.
597 // `depth_multiplier` is redundant. It's used by CPU kernels in
598 // TensorFlow 2.0 or below, but ignored in versions above.
599 // See comments in lite/c/builtin_op_data.h for more details.
600 depth_multiplier:int;
601 fused_activation_function:ActivationFunctionType;
602 // Parameters for DepthwiseConv version 2 or above.
603 dilation_w_factor:int = 1;
604 dilation_h_factor:int = 1;
607 table ConcatEmbeddingsOptions {
609 num_columns_per_channel:[int];
610 embedding_dim_per_channel:[int]; // This could be inferred from parameters.
613 enum LSHProjectionType: byte {
619 table LSHProjectionOptions {
620 type: LSHProjectionType;
625 fused_activation_function:ActivationFunctionType;
626 // For weights-only quantization, use asymmetric quantization for non
627 // constant inputs at evaluation time.
628 asymmetric_quantize_inputs:bool;
631 // An implementation of TensorFlow RNNCell.
633 fused_activation_function:ActivationFunctionType;
634 asymmetric_quantize_inputs:bool;
637 // An implementation of TensorFlow dynamic_rnn with RNNCell.
638 table SequenceRNNOptions {
640 fused_activation_function:ActivationFunctionType;
641 asymmetric_quantize_inputs:bool;
644 // An implementation of TensorFlow bidrectional_dynamic_rnn with RNNCell.
645 table BidirectionalSequenceRNNOptions {
647 fused_activation_function:ActivationFunctionType;
649 asymmetric_quantize_inputs:bool;
653 enum FullyConnectedOptionsWeightsFormat: byte {
655 SHUFFLED4x16INT8 = 1,
657 // LINT.ThenChange(//tensorflow/compiler/mlir/lite/ir/tfl_op_enums.td)
659 // An implementation of TensorFlow fully_connected (a.k.a Dense) layer.
660 table FullyConnectedOptions {
661 // Parameters for FullyConnected version 1 or above.
662 fused_activation_function:ActivationFunctionType;
664 // Parameters for FullyConnected version 2 or above.
665 weights_format:FullyConnectedOptionsWeightsFormat = DEFAULT;
667 // Parameters for FullyConnected version 5 or above.
668 // If set to true, then the number of dimension is preserved. Furthermore,
669 // all but the last dimension of the input and output shapes will be equal.
672 // Parameters for FullyConnected version 7 or above.
673 // If set to true, then weights-only op will use asymmetric quantization for
675 asymmetric_quantize_inputs: bool;
678 table SoftmaxOptions {
682 // An implementation of TensorFlow concat.
683 table ConcatenationOptions {
685 fused_activation_function:ActivationFunctionType;
689 fused_activation_function:ActivationFunctionType;
690 // Parameters supported by version 3.
691 pot_scale_int16:bool = true;
695 fused_activation_function:ActivationFunctionType;
698 table L2NormOptions {
699 // This field is currently ignored in the L2 Norm Op.
700 fused_activation_function:ActivationFunctionType;
703 table LocalResponseNormalizationOptions {
711 enum LSTMKernelType : byte {
712 // Full LSTM kernel which supports peephole and projection.
714 // Basic LSTM kernels. Equivalent to TensorFlow BasicLSTMCell.
717 // LINT.ThenChange(//tensorflow/compiler/mlir/lite/ir/tfl_op_enums.td)
719 // An implementation of TensorFlow LSTMCell and CoupledInputForgetGateLSTMCell
721 // Parameters for LSTM version 1 or above.
722 fused_activation_function:ActivationFunctionType;
723 cell_clip: float; // Optional, 0.0 means no clipping
724 proj_clip: float; // Optional, 0.0 means no clipping
726 // Parameters for LSTM version 2 or above.
727 // Basic kernel is only supported in version 2 or above.
728 kernel_type: LSTMKernelType = FULL;
730 // Parameters for LSTM version 4 or above.
731 asymmetric_quantize_inputs: bool;
734 // An implementation of TensorFlow dynamic_rnn with LSTMCell.
735 table UnidirectionalSequenceLSTMOptions {
736 fused_activation_function:ActivationFunctionType;
737 cell_clip: float; // Optional, 0.0 means no clipping
738 proj_clip: float; // Optional, 0.0 means no clipping
740 // If true then first dimension is sequence, otherwise batch.
743 // Parameter for Unidirectional Sequence LSTM version 3.
744 asymmetric_quantize_inputs:bool;
746 // Parameter for unidirectional sequence RNN version 4.
747 diagonal_recurrent_tensors:bool;
750 table BidirectionalSequenceLSTMOptions {
751 // Parameters supported by version 1:
752 fused_activation_function:ActivationFunctionType;
753 cell_clip: float; // Optional, 0.0 means no clipping
754 proj_clip: float; // Optional, 0.0 means no clipping
756 // If true, store the outputs of both directions into the first output.
759 // Parameters supported by version 2:
760 // If true then first dimension is sequence, otherwise batch.
761 // Version 1 implementations assumed time_major to be true, so this default
762 // value should never change.
763 time_major: bool = true;
765 // Parameters for version 3 or above.
766 asymmetric_quantize_inputs:bool;
769 table ResizeBilinearOptions {
770 new_height: int (deprecated);
771 new_width: int (deprecated);
773 half_pixel_centers: bool;
776 table ResizeNearestNeighborOptions {
778 half_pixel_centers: bool;
781 // A call operation options
783 // The subgraph index that needs to be called.
793 table ReshapeOptions {
797 table SpaceToBatchNDOptions {
800 table BatchToSpaceNDOptions {
803 table SkipGramOptions {
806 include_all_ngrams: bool;
809 table SpaceToDepthOptions {
813 table DepthToSpaceOptions {
818 fused_activation_function:ActivationFunctionType;
819 // Parameters supported by version 5
820 pot_scale_int16:bool = true;
824 fused_activation_function:ActivationFunctionType;
827 table TopKV2Options {
830 enum CombinerType : byte {
836 table EmbeddingLookupSparseOptions {
837 combiner:CombinerType;
840 table GatherOptions {
842 // Parameters for Gather version 5 or above.
846 table TransposeOptions {
855 table ReducerOptions {
859 table SqueezeOptions {
867 table SplitVOptions {
871 table StridedSliceOptions {
876 shrink_axis_mask: int;
879 table LogSoftmaxOptions {
883 in_data_type: TensorType;
884 out_data_type: TensorType;
887 table DequantizeOptions {
890 table MaximumMinimumOptions {
896 table ArgMaxOptions {
897 output_type : TensorType;
900 table ArgMinOptions {
901 output_type : TensorType;
904 table GreaterOptions {
907 table GreaterEqualOptions {
913 table LessEqualOptions {
919 table SelectOptions {
925 table TransposeConvOptions {
926 // Parameters supported by version 1, 2, 3:
931 // Parameters supported by version 4:
932 fused_activation_function:ActivationFunctionType = NONE;
935 table ExpandDimsOptions {
938 table SparseToDenseOptions {
939 validate_indices:bool;
945 table NotEqualOptions {
949 // Optional output type of the operation (int32 or int64). Defaults to int32.
950 out_type : TensorType;
959 table FakeQuantOptions {
960 // Parameters supported by version 1:
965 // Parameters supported by version 2:
974 table LogicalOrOptions {
977 table OneHotOptions {
985 table HardSwishOptions {
988 table LogicalAndOptions {
991 table LogicalNotOptions {
994 table UnpackOptions {
999 table FloorDivOptions {
1002 table SquareOptions {
1005 table ZerosLikeOptions {
1011 table FloorModOptions {
1014 table RangeOptions {
1017 table LeakyReluOptions {
1021 table SquaredDifferenceOptions {
1025 enum MirrorPadMode : byte {
1026 // Doesn't include borders.
1028 // Includes borders.
1031 // LINT.ThenChange(//tensorflow/compiler/mlir/lite/ir/tfl_op_enums.td)
1033 table MirrorPadOptions {
1037 table UniqueOptions {
1038 idx_out_type:TensorType = INT32;
1041 table ReverseV2Options {
1047 table GatherNdOptions {
1050 table WhereOptions {
1053 table ReverseSequenceOptions {
1058 table MatrixDiagOptions {
1061 table QuantizeOptions {
1064 table MatrixSetDiagOptions {
1068 then_subgraph_index:int;
1069 else_subgraph_index:int;
1072 table CallOnceOptions {
1073 init_subgraph_index:int;
1076 table WhileOptions {
1077 cond_subgraph_index:int;
1078 body_subgraph_index:int;
1081 table NonMaxSuppressionV4Options {
1084 table NonMaxSuppressionV5Options {
1087 table ScatterNdOptions {
1090 table SelectV2Options {
1093 table DensifyOptions {
1096 table SegmentSumOptions {
1099 table BatchMatMulOptions {
1102 // Parameters for BatchMatMul version 4 or above.
1103 // If set to true, then weights-only op will use asymmetric quantization for
1105 asymmetric_quantize_inputs: bool;
1108 table CumsumOptions {
1113 table BroadcastToOptions {
1116 table Rfft2dOptions {
1119 table HashtableOptions {
1120 // The identity of hash tables. This identity will be used across different
1121 // subgraphs in the same interpreter instance.
1123 key_dtype:TensorType;
1124 value_dtype:TensorType;
1127 table HashtableFindOptions {
1130 table HashtableImportOptions {
1133 table HashtableSizeOptions {
1136 table VarHandleOptions {
1141 table ReadVariableOptions {
1144 table AssignVariableOptions {
1147 table RandomOptions {
1152 table BucketizeOptions {
1153 boundaries: [float]; // The bucket boundaries.
1160 table DynamicUpdateSliceOptions {
1163 table UnsortedSegmentProdOptions {
1166 table UnsortedSegmentMaxOptions {
1169 table UnsortedSegmentSumOptions {
1172 table ATan2Options {
1175 table UnsortedSegmentMinOptions{
1182 // An OperatorCode can be an enum value (BuiltinOperator) if the operator is a
1183 // builtin, or a string if the operator is custom.
1184 table OperatorCode {
1185 // This field is for backward compatibility. This field will be used when
1186 // the value of the extended builtin_code field has less than
1187 // BulitinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES.
1188 deprecated_builtin_code:byte;
1191 // The version of the operator. The version need to be bumped whenever new
1192 // parameters are introduced into an op.
1195 // This field is introduced for resolving op builtin code shortage problem
1196 // (the original BuiltinOperator enum field was represented as a byte).
1197 // This field will be used when the value of the extended builtin_code field
1198 // has greater than BulitinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES.
1199 builtin_code:BuiltinOperator;
1202 enum CustomOptionsFormat : byte {
1206 // An operator takes tensors as inputs and outputs. The type of operation being
1207 // performed is determined by an index into the list of valid OperatorCodes,
1208 // while the specifics of each operations is configured using builtin_options
1209 // or custom_options.
1211 // Index into the operator_codes array. Using an integer here avoids
1212 // complicate map lookups.
1215 // Optional input are indicated by -1.
1219 builtin_options:BuiltinOptions;
1220 custom_options:[ubyte];
1221 custom_options_format:CustomOptionsFormat;
1223 // A list of booleans indicating the input tensors which are being mutated by
1224 // this operator.(e.g. used by RNN and LSTM).
1225 // For example, if the "inputs" array refers to 5 tensors and the second and
1226 // fifth are mutable variables, then this list will contain
1227 // [false, true, false, false, true].
1229 // If the list is empty, no variable is mutated in this operator.
1230 // The list either has the same length as `inputs`, or is empty.
1231 mutating_variable_inputs:[bool];
1233 // A list of indices to the subgraph's "tensors" that are internal to an Op.
1234 // Internal tensors are those that do not flow in or out of the operation,
1235 // but instead are part of internal computation. As such, the operation's
1236 // implementation may manage its memory more efficiently. They are needed
1237 // however (i.e. not just an implementation detail) since they are part of the
1238 // computation, which may require relevant metadata such as quantization
1240 intermediates:[int];
1243 // The root type, defining a subgraph, which typically represents an entire
1246 // A list of all tensors used in this subgraph.
1249 // Indices of the tensors that are inputs into this subgraph. Note this is
1250 // the list of non-static tensors that feed into the subgraph for inference.
1253 // Indices of the tensors that are outputs out of this subgraph. Note this is
1254 // the list of output tensors that are considered the product of the
1255 // subgraph's inference.
1258 // All operators, in execution order.
1259 operators:[Operator];
1261 // Name of this subgraph (used for debugging).
1265 // Table of raw data buffers (used for constant tensors). Referenced by tensors
1266 // by index. The generous alignment accommodates mmap-friendly data structures.
1268 data:[ubyte] (force_align: 16);
1272 // A human readable string to uniquely identify a Metadata.
1274 // An index to the buffers table.
1278 // Map from an alias name of tensor to tensor index in the graph.
1279 // This is used in Signature def.
1281 // Represents the alias to use for this tensor.
1284 // The actual tensor index in the primary graph, that 'name' corresponds to.
1288 // This corresponds to SignatureDef in Tensorflow SavedModel.
1289 // The SignatureDef will be part of the SavedModel provided for conversion.
1290 table SignatureDef {
1291 // Named inputs for this signature.
1294 // Named outputs for this signature.
1295 outputs:[TensorMap];
1297 // Key value which was in the Tensorflow SavedModel SignatureDef map.
1298 signature_key:string;
1300 // Model tag, deprecated.
1301 deprecated_tag:string (deprecated);
1303 // Index of subgraphs that corresponds to the exported method.
1304 subgraph_index:uint;
1308 // Version of the schema.
1311 // A list of all operator codes used in this model. This is
1312 // kept in order because operators carry an index into this
1314 operator_codes:[OperatorCode];
1316 // All the subgraphs of the model. The 0th is assumed to be the main
1318 subgraphs:[SubGraph];
1320 // A description of the model.
1323 // Buffers of the model.
1324 // Note the 0th entry of this array must be an empty buffer (sentinel).
1325 // This is a convention so that tensors without a buffer can provide 0 as
1329 // Metadata about the model. Indirects into the existings buffers list.
1330 // Deprecated, prefer to use metadata field.
1331 metadata_buffer:[int];
1333 // Metadata about the model.
1334 metadata:[Metadata];
1336 // Optional SignatureDefs for the model.
1337 signature_defs:[SignatureDef];