1 // Copyright (c) 2019~2023 Samsung Electronics Co., Ltd. All Rights Reserved
2 // Copyright 2017 The TensorFlow Authors. All Rights Reserved.
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
18 // Version Major.Minor
20 // Major version is schema version.
21 // We keep schema version if it is compatible
22 // Minor version is for human communication
23 // It will not be stored in circle model.
25 // Version 0.0: Initial version. Based on TensorFlow Lite v1.13.1 schema.
26 // Version 0.1: Based on TF v2.2-rc2 + more (from TensorFlow `56d281c`)
27 // `BATCH_MATMUL` operator, `FLOAT64` tensor type,
28 // `asymmetric_quantize_inputs` for several operator options
29 // Version 0.2: BCQ_GATHER and BCQ_FULLY_CONNECTED are added.
30 // Version 0.3: SHUFFLED16x1FLOAT32 is added.
31 // Version 0.4: Base up to TensorFlow Lite v2.7.0 schema.
32 // Version 0.5: Base up to TensorFlow Lite v2.10.1 schema.
36 // This corresponds to the version.
37 file_identifier "CIR0";
38 // File extension of any written files.
39 file_extension "circle";
41 // IMPORTANT: All new members of tables, enums and unions must be added at the
42 // end to ensure backwards compatibility.
44 // The type of data stored in a tensor.
45 enum TensorType : byte {
59 // Experimental: Resource and variant types are experimental, that are subject
60 // to change. Do not implement custom kernels using resource & variant types
68 // Custom quantization parameters for experimenting with new quantization
70 table CustomQuantization {
71 custom:[ubyte] (force_align: 16);
74 // Represents a specific quantization technique's parameters.
75 union QuantizationDetails {
79 // Parameters for converting a quantized tensor back to float.
80 table QuantizationParameters {
81 // These four parameters are the asymmetric linear quantization parameters.
82 // Given a quantized value q, the corresponding float value f should be:
83 // f = scale * (q - zero_point)
84 // For other quantization types, the QuantizationDetails below is used.
85 min:[float]; // For importing back into tensorflow.
86 max:[float]; // For importing back into tensorflow.
87 scale:[float]; // For dequantizing the tensor's values.
90 // If this is not none, the other quantization parameters (i.e. min, max,
91 // scale, zero_point fields above) are ignored and the value of the
92 // QuantizationDetails union should be used.
93 details:QuantizationDetails;
95 // Specifies the dimension of the Tensor's shape that the scales and
96 // zero_points correspond to. For example, a tensor t, with dims=[4, 3, 2, 1]
97 // with quantization params:
98 // scale=[1.0, 2.0, 3.0], zero_point=[1, 2, 3], quantization_dimension=1
99 // will be quantized across the second dimension of t.
100 // t[:, 0, :, :] will have scale[0]=1.0, zero_point[0]=1
101 // t[:, 1, :, :] will have scale[1]=2.0, zero_point[0]=2
102 // t[:, 2, :, :] will have scale[2]=3.0, zero_point[0]=3
103 quantized_dimension:int;
107 // We use a modification of the TACO format.
108 // Reference: http://tensor-compiler.org/kjolstad-oopsla17-tensor-compiler.pdf
110 // To encode a conceptual n-dimensional dense tensor with dims (d0, ..., dn-1),
111 // potentially with a k-dimensional block (0 <= k <= n) with dims
112 // (dn, ..., dn+k-1), the format needs to specify:
113 // 1. In what order to traverse these dimensions. For example, to store a 2-D
114 // matrix in row major order, the traversal order would be (d0, d1),
115 // whereas to store it in column major order, the traversal order would be
116 // (d1, d0). If the 2-D matrix has a 2-D inner block, the traversal order
117 // could be (d0, d1, d2, d3).
118 // 2. How each block dimension in (dn, ..., dn+k-1) maps to the original
119 // tensor dimension in (d0, ..., dn-1).
120 // 3. In the traversal order defined above, the format (dense vs. sparse) and
121 // index metadata for each dimension. For a dense dimension, this is just
122 // the size of that dimension. For a sparse dimension, it's the same as
123 // the compressed index defined in the Compressed Sparse Row (CSR) format.
124 // (http://scipy-lectures.org/advanced/scipy_sparse/csr_matrix.html)
126 // The storage type for a dimension. Currently we support:
127 // 1. DENSE: each coordinate in this dimension is stored implicitly.
128 // 2. SPARSE_CSR: only the coordinates with non-zero elements are stored. The
129 // compression technique is the same what CSR uses.
130 // More types like a sparse dimension with a different compression technique
131 // could be added to the list in the future.
132 enum DimensionType : byte {
142 values:[ushort] (force_align: 4);
146 values:[ubyte] (force_align: 4);
149 // Variable-typed buffer to store the index metadata for a sparse dimension.
150 // The widest type is Int32 instead of UInt32 because tensor's shape is a int32
151 // vector. We don't want the per-dimensional index to overflow that range.
152 union SparseIndexVector {
158 table DimensionMetadata {
159 // Whether a dimension is dense or sparse.
160 format:DimensionType;
161 // Index metadata used for a dimension.
162 // - If format is DimensionType.DENSE then we use the dense_size field to
163 // store the size of that dimension. Each index in that dimension is
164 // stored implicitly.
165 // - If format is DimensionType.SPARSE_CSR then we use array_segments and
166 // array_indices to encode that dimension. array_segments represents how
167 // to segment the indices array, each segment corresponds to one element
168 // in the previous dimension. array_indices represents the index of the
169 // non-zero elements within this dimension (as those in the CSR matrix
170 // format, where the first array is row pointers and the second array is
173 array_segments:SparseIndexVector;
174 array_indices:SparseIndexVector;
177 // Parameters to encode a sparse TfLite tensor.
178 table SparsityParameters {
179 // The traversal order of the dimensions defined in the `shape` field of the
180 // conceptual dense tensor. For a n-dimensional tensors with dims (d0, d1,
182 // - if not block sparse, the traversal_order is just a permutation of (d0,
183 // ..., dn-1). For example, a 2-D matrix stored in row-major order would
184 // have traversal_order = (d0, d1).
185 // - if block sparse with a k-dimensional block (0 <= k <= n), the
186 // traversal_order has n + k elements. The first n elements are still a
187 // permutation of (d0, ..., dn-1). The lask k elements are a permutation
188 // of (dn, ..., dn+k-1), defining how to traverse a block internally. For
189 // example, a 2-D matrix with 2-D blocks, both stored in row-major order
190 // would have traversal_order = (d0, d1, d2, d3).
191 traversal_order:[int];
192 // For an n-dimensional tensor with a k-dimensional block (0 <= k <= n),
193 // stores how a block dimension in (dn, ..., dn+k-1) maps to the original
194 // tensor dimension in (d0, ..., dn).
195 // It's stored in the order of (dn, ..., dn+k-1).
196 // If not block-sparse, this field is NULL.
198 // In the traversal order defined above, the metadata needed for
199 // each dimension to locate the non-zero values in the original dense tensor.
200 // The size of the dim_metadata array = the size of the traversal_order array
202 dim_metadata:[DimensionMetadata];
206 // The tensor shape. The meaning of each entry is operator-specific but
207 // builtin ops use: [batch size, height, width, number of channels] (That's
208 // Tensorflow's NHWC).
211 // An index that refers to the buffers table at the root of the model. Or,
212 // if there is no data buffer associated (i.e. intermediate results), then
213 // this is 0 (which refers to an always existent empty buffer).
215 // The data_buffer itself is an opaque container, with the assumption that the
216 // target device is little-endian. In addition, all builtin operators assume
217 // the memory is ordered such that if `shape` is [4, 3, 2], then index
218 // [i, j, k] maps to data_buffer[i*3*2 + j*2 + k].
220 name:string; // For debugging and importing back into tensorflow.
221 quantization:QuantizationParameters; // Optional.
223 is_variable:bool = false;
225 // Parameters to encode a sparse tensor. See the example in
226 // tensorflow/lite/testdata/sparse_tensor.json.
227 sparsity:SparsityParameters; // Optional.
229 // Encodes `shape` with unknown dimensions. Unknown dimensions are
230 // represented with -1.
231 shape_signature:[int]; // Optional.
233 // If false, the rank or the number of tensor dimensions is unknown.
234 // If false, "shape" must be [].
235 has_rank: bool = false;
238 // A list of builtin operators. Builtin operators are slightly faster than custom
239 // ones, but not by much. Moreover, while custom operators accept an opaque
240 // object containing configuration parameters, builtins have a predetermined
241 // set of acceptable options.
243 enum BuiltinOperator : int32 {
245 BCQ_FULLY_CONNECTED = -3,
251 DEPTHWISE_CONV_2D = 4,
254 EMBEDDING_LOOKUP = 7,
257 HASHTABLE_LOOKUP = 10,
258 L2_NORMALIZATION = 11,
260 LOCAL_RESPONSE_NORMALIZATION = 13,
267 // NOTE(aselle): RELU_N1_TO_1 used to be called RELU1, but it was renamed
268 // since different model developers use RELU1 in different ways. Never
269 // create another op called RELU1.
273 RESIZE_BILINEAR = 23,
279 CONCAT_EMBEDDINGS = 29,
283 EMBEDDING_LOOKUP_SPARSE = 33,
285 UNIDIRECTIONAL_SEQUENCE_RNN = 35,
287 BATCH_TO_SPACE_ND = 37,
288 SPACE_TO_BATCH_ND = 38,
294 UNIDIRECTIONAL_SEQUENCE_LSTM = 44,
296 BIDIRECTIONAL_SEQUENCE_RNN = 46,
301 // DELEGATE is a special op type for the operations which are delegated to
303 // WARNING: Experimental interface, subject to change
305 BIDIRECTIONAL_SEQUENCE_LSTM = 52,
321 SPARSE_TO_DENSE = 68,
350 RESIZE_NEAREST_NEIGHBOR = 97,
352 SQUARED_DIFFERENCE = 99,
365 REVERSE_SEQUENCE = 112,
368 MATRIX_SET_DIAG = 115,
373 NON_MAX_SUPPRESSION_V4 = 120,
374 NON_MAX_SUPPRESSION_V5 = 121,
380 PLACEHOLDER_FOR_GREATER_OP_CODES = 127,
390 HASHTABLE_FIND = 137,
391 HASHTABLE_IMPORT = 138,
392 HASHTABLE_SIZE = 139,
394 CONV_3D_TRANSPOSE = 141,
397 ASSIGN_VARIABLE = 144,
398 BROADCAST_ARGS = 145,
399 RANDOM_STANDARD_NORMAL = 146,
401 RANDOM_UNIFORM = 148,
404 DYNAMIC_UPDATE_SLICE = 151,
406 UNSORTED_SEGMENT_PROD = 153,
407 UNSORTED_SEGMENT_MAX = 154,
408 UNSORTED_SEGMENT_SUM = 155,
411 // LINT.ThenChange(nnapi_linter/linter.proto)
413 // Options for the builtin operators.
414 union BuiltinOptions {
416 DepthwiseConv2DOptions,
417 ConcatEmbeddingsOptions,
418 LSHProjectionOptions,
422 FullyConnectedOptions,
424 ConcatenationOptions,
427 LocalResponseNormalizationOptions,
429 ResizeBilinearOptions,
434 EmbeddingLookupSparseOptions,
438 BatchToSpaceNDOptions,
439 SpaceToBatchNDOptions,
453 MaximumMinimumOptions,
463 TransposeConvOptions,
464 SparseToDenseOptions,
483 BidirectionalSequenceLSTMOptions,
484 BidirectionalSequenceRNNOptions,
485 UnidirectionalSequenceLSTMOptions,
488 ResizeNearestNeighborOptions,
490 SquaredDifferenceOptions,
501 ReverseSequenceOptions,
504 MatrixSetDiagOptions,
509 NonMaxSuppressionV4Options,
510 NonMaxSuppressionV5Options,
522 HashtableFindOptions,
523 HashtableImportOptions,
524 HashtableSizeOptions,
527 AssignVariableOptions,
531 DynamicUpdateSliceOptions,
532 UnsortedSegmentProdOptions,
533 UnsortedSegmentMaxOptions,
534 UnsortedSegmentSumOptions,
536 BCQGatherOptions = 252,
537 BCQFullyConnectedOptions = 253,
538 InstanceNormOptions = 254,
541 enum Padding : byte { SAME, VALID }
543 enum ActivationFunctionType : byte {
552 table Conv2DOptions {
556 fused_activation_function:ActivationFunctionType;
557 dilation_w_factor:int = 1;
558 dilation_h_factor:int = 1;
561 // Options for both Conv3D and Conv3DTranspose.
562 table Conv3DOptions {
567 fused_activation_function:ActivationFunctionType;
568 dilation_d_factor:int = 1;
569 dilation_w_factor:int = 1;
570 dilation_h_factor:int = 1;
573 table Pool2DOptions {
579 fused_activation_function:ActivationFunctionType;
582 table DepthwiseConv2DOptions {
583 // Parameters for DepthwiseConv version 1 or above.
587 // `depth_multiplier` is redundant. It's used by CPU kernels in
588 // TensorFlow 2.0 or below, but ignored in versions above.
589 // See comments in lite/c/builtin_op_data.h for more details.
590 depth_multiplier:int;
591 fused_activation_function:ActivationFunctionType;
592 // Parameters for DepthwiseConv version 2 or above.
593 dilation_w_factor:int = 1;
594 dilation_h_factor:int = 1;
597 table ConcatEmbeddingsOptions {
599 num_columns_per_channel:[int];
600 embedding_dim_per_channel:[int]; // This could be inferred from parameters.
603 enum LSHProjectionType: byte {
609 table LSHProjectionOptions {
610 type: LSHProjectionType;
615 fused_activation_function:ActivationFunctionType;
616 // For weights-only quantization, use asymmetric quantization for non
617 // constant inputs at evaluation time.
618 asymmetric_quantize_inputs:bool;
621 // An implementation of TensorFlow RNNCell.
623 fused_activation_function:ActivationFunctionType;
624 asymmetric_quantize_inputs:bool;
627 // An implementation of TensorFlow dynamic_rnn with RNNCell.
628 table SequenceRNNOptions {
630 fused_activation_function:ActivationFunctionType;
631 asymmetric_quantize_inputs:bool;
634 // An implementation of TensorFlow bidrectional_dynamic_rnn with RNNCell.
635 table BidirectionalSequenceRNNOptions {
637 fused_activation_function:ActivationFunctionType;
639 asymmetric_quantize_inputs:bool;
642 enum FullyConnectedOptionsWeightsFormat: byte {
644 SHUFFLED4x16INT8 = 1,
645 SHUFFLED16x1FLOAT32 = 127
648 // An implementation of TensorFlow fully_connected (a.k.a Dense) layer.
649 table FullyConnectedOptions {
650 // Parameters for FullyConnected version 1 or above.
651 fused_activation_function:ActivationFunctionType;
653 // Parameters for FullyConnected version 2 or above.
654 weights_format:FullyConnectedOptionsWeightsFormat = DEFAULT;
656 // Parameters for FullyConnected version 5 or above.
657 // If set to true, then the number of dimension is preserved. Furthermore,
658 // all but the last dimension of the input and output shapes will be equal.
661 // Parameters for FullyConnected version 7 or above.
662 // If set to true, then weights-only op will use asymmetric quantization for
664 asymmetric_quantize_inputs: bool;
667 table SoftmaxOptions {
671 // An implementation of TensorFlow concat.
672 table ConcatenationOptions {
674 fused_activation_function:ActivationFunctionType;
678 fused_activation_function:ActivationFunctionType;
679 // Parameters supported by version 3.
680 pot_scale_int16:bool = true;
684 fused_activation_function:ActivationFunctionType;
687 table L2NormOptions {
688 // This field is currently ignored in the L2 Norm Op.
689 fused_activation_function:ActivationFunctionType;
692 table LocalResponseNormalizationOptions {
699 enum LSTMKernelType : byte {
700 // Full LSTM kernel which supports peephole and projection.
702 // Basic LSTM kernels. Equivalent to TensorFlow BasicLSTMCell.
706 // An implementation of TensorFlow LSTMCell and CoupledInputForgetGateLSTMCell
708 // Parameters for LSTM version 1 or above.
709 fused_activation_function:ActivationFunctionType;
710 cell_clip: float; // Optional, 0.0 means no clipping
711 proj_clip: float; // Optional, 0.0 means no clipping
713 // Parameters for LSTM version 2 or above.
714 // Basic kernel is only supported in version 2 or above.
715 kernel_type: LSTMKernelType = FULL;
717 // Parameters for LSTM version 4 or above.
718 asymmetric_quantize_inputs: bool;
721 // An implementation of TensorFlow dynamic_rnn with LSTMCell.
722 table UnidirectionalSequenceLSTMOptions {
723 fused_activation_function:ActivationFunctionType;
724 cell_clip: float; // Optional, 0.0 means no clipping
725 proj_clip: float; // Optional, 0.0 means no clipping
727 // If true then first dimension is sequence, otherwise batch.
730 // Parameter for Unidirectional Sequence LSTM version 4.
731 asymmetric_quantize_inputs:bool;
734 table BidirectionalSequenceLSTMOptions {
735 // Parameters supported by version 1:
736 fused_activation_function:ActivationFunctionType;
737 cell_clip: float; // Optional, 0.0 means no clipping
738 proj_clip: float; // Optional, 0.0 means no clipping
740 // If true, store the outputs of both directions into the first output.
743 // Parameters supported by version 2:
744 // If true then first dimension is sequence, otherwise batch.
745 // Version 1 implementations assumed time_major to be true, so this default
746 // value should never change.
747 time_major: bool = true;
749 // Parameters for version 3 or above.
750 asymmetric_quantize_inputs:bool;
753 table ResizeBilinearOptions {
754 new_height: int (deprecated);
755 new_width: int (deprecated);
757 half_pixel_centers: bool;
760 table ResizeNearestNeighborOptions {
762 half_pixel_centers: bool;
765 // A call operation options
767 // The subgraph index that needs to be called.
777 table ReshapeOptions {
781 table SpaceToBatchNDOptions {
784 table BatchToSpaceNDOptions {
787 table SkipGramOptions {
790 include_all_ngrams: bool;
793 table SpaceToDepthOptions {
797 table DepthToSpaceOptions {
802 fused_activation_function:ActivationFunctionType;
803 // Parameters supported by version 5
804 pot_scale_int16:bool = true;
808 fused_activation_function:ActivationFunctionType;
811 table TopKV2Options {
814 enum CombinerType : byte {
820 table EmbeddingLookupSparseOptions {
821 combiner:CombinerType;
824 table GatherOptions {
826 // Parameters for Gather version 5 or above.
830 table TransposeOptions {
839 table ReducerOptions {
843 table SqueezeOptions {
851 table SplitVOptions {
855 table StridedSliceOptions {
860 shrink_axis_mask: int;
863 table LogSoftmaxOptions {
867 in_data_type: TensorType;
868 out_data_type: TensorType;
871 table DequantizeOptions {
874 table MaximumMinimumOptions {
880 table ArgMaxOptions {
881 output_type : TensorType;
884 table ArgMinOptions {
885 output_type : TensorType;
888 table GreaterOptions {
891 table GreaterEqualOptions {
897 table LessEqualOptions {
903 table SelectOptions {
909 table TransposeConvOptions {
915 table ExpandDimsOptions {
918 table SparseToDenseOptions {
919 validate_indices:bool;
925 table NotEqualOptions {
929 // Optional output type of the operation (int32 or int64). Defaults to int32.
930 out_type : TensorType;
939 table FakeQuantOptions {
940 // Parameters supported by version 1:
945 // Parameters supported by version 2:
954 table LogicalOrOptions {
957 table OneHotOptions {
965 table HardSwishOptions {
968 table LogicalAndOptions {
971 table LogicalNotOptions {
974 table UnpackOptions {
979 table FloorDivOptions {
982 table SquareOptions {
985 table ZerosLikeOptions {
991 table FloorModOptions {
997 table LeakyReluOptions {
1001 table SquaredDifferenceOptions {
1004 enum MirrorPadMode : byte {
1005 // Doesn't include borders.
1007 // Includes borders.
1011 table MirrorPadOptions {
1015 table UniqueOptions {
1016 idx_out_type:TensorType = INT32;
1019 table ReverseV2Options {
1025 table GatherNdOptions {
1028 table WhereOptions {
1031 table ReverseSequenceOptions {
1036 table MatrixDiagOptions {
1039 table QuantizeOptions {
1042 table MatrixSetDiagOptions {
1046 then_subgraph_index:int;
1047 else_subgraph_index:int;
1050 table CallOnceOptions {
1051 init_subgraph_index:int;
1054 table WhileOptions {
1055 cond_subgraph_index:int;
1056 body_subgraph_index:int;
1059 table NonMaxSuppressionV4Options {
1062 table NonMaxSuppressionV5Options {
1065 table ScatterNdOptions {
1068 table SelectV2Options {
1071 table DensifyOptions {
1074 table SegmentSumOptions {
1077 table BatchMatMulOptions {
1080 // Parameters for BatchMatMul version 4 or above.
1081 // If set to true, then weights-only op will use asymmetric quantization for
1083 asymmetric_quantize_inputs: bool;
1086 table CumsumOptions {
1091 table BroadcastToOptions {
1094 table Rfft2dOptions {
1097 table HashtableOptions {
1098 // The identity of hash tables. This identity will be used across different
1099 // subgraphs in the same interpreter instance.
1101 key_dtype:TensorType;
1102 value_dtype:TensorType;
1105 table HashtableFindOptions {
1108 table HashtableImportOptions {
1111 table HashtableSizeOptions {
1114 table VarHandleOptions {
1119 table ReadVariableOptions {
1122 table AssignVariableOptions {
1125 table RandomOptions {
1130 table BucketizeOptions {
1131 boundaries: [float]; // The bucket boundaries.
1138 table DynamicUpdateSliceOptions {
1141 table UnsortedSegmentProdOptions {
1144 table UnsortedSegmentMaxOptions {
1147 table UnsortedSegmentSumOptions {
1150 table ATan2Options {
1153 table BCQGatherOptions {
1154 input_hidden_size: int;
1158 table BCQFullyConnectedOptions {
1159 weights_hidden_size: int;
1160 fused_activation_function:ActivationFunctionType;
1163 table InstanceNormOptions {
1165 fused_activation_function:ActivationFunctionType;
1168 // An OperatorCode can be an enum value (BuiltinOperator) if the operator is a
1169 // builtin, or a string if the operator is custom.
1170 table OperatorCode {
1171 // This field is for backward compatibility. This field will be used when
1172 // the value of the extended builtin_code field has less than
1173 // BulitinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES.
1174 deprecated_builtin_code:byte;
1177 // The version of the operator. The version need to be bumped whenever new
1178 // parameters are introduced into an op.
1181 // This field is introduced for resolving op builtin code shortage problem
1182 // (the original BuiltinOperator enum field was represented as a byte).
1183 // This field will be used when the value of the extended builtin_code field
1184 // has greater than BulitinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES.
1185 builtin_code:BuiltinOperator;
1188 enum CustomOptionsFormat : byte {
1192 enum DataFormat : byte {
1193 // For 2D data, NHWC(batch, height, width, channels)
1194 // For 3D data, NDHWC(batch, depth, height, width, channels)
1196 // For 2D data, NCHW(batch, channels, height, width)
1197 // For 3D data, NCDHW(batch, channels, depth, height, width)
1201 // An operator takes tensors as inputs and outputs. The type of operation being
1202 // performed is determined by an index into the list of valid OperatorCodes,
1203 // while the specifics of each operations is configured using builtin_options
1204 // or custom_options.
1206 // Index into the operator_codes array. Using an integer here avoids
1207 // complicate map lookups.
1210 // Optional input are indicated by -1.
1214 builtin_options:BuiltinOptions;
1215 custom_options:[ubyte];
1216 custom_options_format:CustomOptionsFormat;
1218 // A list of booleans indicating the input tensors which are being mutated by
1219 // this operator.(e.g. used by RNN and LSTM).
1220 // For example, if the "inputs" array refers to 5 tensors and the second and
1221 // fifth are mutable variables, then this list will contain
1222 // [false, true, false, false, true].
1224 // If the list is empty, no variable is mutated in this operator.
1225 // The list either has the same length as `inputs`, or is empty.
1226 mutating_variable_inputs:[bool];
1228 // A list of indices to the subgraph's "tensors" that are internal to an Op.
1229 // Internal tensors are those that do not flow in or out of the operation,
1230 // but instead are part of internal computation. As such, the operation's
1231 // implementation may manage its memory more efficiently. They are needed
1232 // however (i.e. not just an implementation detail) since they are part of the
1233 // computation, which may require relevant metadata such as quantization
1235 intermediates:[int];
1238 // The root type, defining a subgraph, which typically represents an entire
1241 // A list of all tensors used in this subgraph.
1244 // Indices of the tensors that are inputs into this subgraph. Note this is
1245 // the list of non-static tensors that feed into the subgraph for inference.
1248 // Indices of the tensors that are outputs out of this subgraph. Note this is
1249 // the list of output tensors that are considered the product of the
1250 // subgraph's inference.
1253 // All operators, in execution order.
1254 operators:[Operator];
1256 // Name of this subgraph (used for debugging).
1259 // Data format for input/output of SubGraph
1260 data_format: DataFormat;
1263 // Table of raw data buffers (used for constant tensors). Referenced by tensors
1264 // by index. The generous alignment accommodates mmap-friendly data structures.
1266 data:[ubyte] (force_align: 16);
1270 // A human readable string to uniquely identify a Metadata.
1272 // An index to the buffers table.
1276 // Map from an alias name of tensor to tensor index in the graph.
1277 // This is used in Signature def.
1279 // Represents the alias to use for this tensor.
1282 // The actual tensor index in the primary graph, that 'name' corresponds to.
1286 // This corresponds to SignatureDef in Tensorflow SavedModel.
1287 // The SignatureDef will be part of the SavedModel provided for conversion.
1288 table SignatureDef {
1289 // Named inputs for this signature.
1292 // Named outputs for this signature.
1293 outputs:[TensorMap];
1295 // Key value which was in the Tensorflow SavedModel SignatureDef map.
1296 signature_key:string;
1298 // Model tag, deprecated.
1299 deprecated_tag:string (deprecated);
1301 // Index of subgraphs that corresponds to the exported method.
1302 subgraph_index:uint;
1306 // Version of the schema.
1309 // A list of all operator codes used in this model. This is
1310 // kept in order because operators carry an index into this
1312 operator_codes:[OperatorCode];
1314 // All the subgraphs of the model. The 0th is assumed to be the main
1316 subgraphs:[SubGraph];
1318 // A description of the model.
1321 // Buffers of the model.
1322 // Note the 0th entry of this array must be an empty buffer (sentinel).
1323 // This is a convention so that tensors without a buffer can provide 0 as
1327 // Metadata about the model. Indirects into the existings buffers list.
1328 // Deprecated, prefer to use metadata field.
1329 metadata_buffer:[int];
1331 // Metadata about the model.
1332 metadata:[Metadata];
1334 // Optional SignatureDefs for the model.
1335 signature_defs:[SignatureDef];