1 // Copyright 2017 The TensorFlow Authors. All Rights Reserved.
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
7 // http://www.apache.org/licenses/LICENSE-2.0
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
16 // Version 0: Initial version.
17 // Version 1: Add subgraphs to schema.
18 // Version 2: Rename operators to conform to NN API.
19 // Version 3: Move buffer data from Model.Subgraph.Tensors to Model.Buffers.
20 // Version 3a: Add new builtin op code field. Has backward compatibility with
25 // This corresponds to the version.
26 file_identifier "TFL3";
27 // File extension of any written files.
28 file_extension "tflite";
30 // IMPORTANT: All new members of tables, enums and unions must be added at the
31 // end to ensure backwards compatibility.
33 // The type of data stored in a tensor.
34 enum TensorType : byte {
48 // Experimental: Resource and variant types are experimental, that are subject
49 // to change. Do not implement custom kernels using resource & variant types
56 // Custom quantization parameters for experimenting with new quantization
58 table CustomQuantization {
59 custom:[ubyte] (force_align: 16);
62 // Represents a specific quantization technique's parameters.
63 union QuantizationDetails {
67 // Parameters for converting a quantized tensor back to float.
68 table QuantizationParameters {
69 // These four parameters are the asymmetric linear quantization parameters.
70 // Given a quantized value q, the corresponding float value f should be:
71 // f = scale * (q - zero_point)
72 // For other quantization types, the QuantizationDetails below is used.
73 min:[float]; // For importing back into tensorflow.
74 max:[float]; // For importing back into tensorflow.
75 scale:[float]; // For dequantizing the tensor's values.
78 // If this is not none, the other quantization parameters (i.e. min, max,
79 // scale, zero_point fields above) are ignored and the value of the
80 // QuantizationDetails union should be used.
81 details:QuantizationDetails;
83 // Specifies the dimension of the Tensor's shape that the scales and
84 // zero_points correspond to. For example, a tensor t, with dims=[4, 3, 2, 1]
85 // with quantization params:
86 // scale=[1.0, 2.0, 3.0], zero_point=[1, 2, 3], quantization_dimension=1
87 // will be quantized across the second dimension of t.
88 // t[:, 0, :, :] will have scale[0]=1.0, zero_point[0]=1
89 // t[:, 1, :, :] will have scale[1]=2.0, zero_point[0]=2
90 // t[:, 2, :, :] will have scale[2]=3.0, zero_point[0]=3
91 quantized_dimension:int;
95 // We use a modification of the TACO format.
96 // Reference: http://tensor-compiler.org/kjolstad-oopsla17-tensor-compiler.pdf
98 // To encode a conceptual n-dimensional dense tensor with dims (d0, ..., dn-1),
99 // potentially with a k-dimensional block (0 <= k <= n) with dims
100 // (dn, ..., dn+k-1), the format needs to specify:
101 // 1. In what order to traverse these dimensions. For example, to store a 2-D
102 // matrix in row major order, the traversal order would be (d0, d1),
103 // whereas to store it in column major order, the traversal order would be
104 // (d1, d0). If the 2-D matrix has a 2-D inner block, the traversal order
105 // could be (d0, d1, d2, d3).
106 // 2. How each block dimension in (dn, ..., dn+k-1) maps to the original
107 // tensor dimension in (d0, ..., dn-1).
108 // 3. In the traversal order defined above, the format (dense vs. sparse) and
109 // index metadata for each dimension. For a dense dimension, this is just
110 // the size of that dimension. For a sparse dimension, it's the same as
111 // the compressed index defined in the Compressed Sparse Row (CSR) format.
112 // (http://scipy-lectures.org/advanced/scipy_sparse/csr_matrix.html)
114 // The storage type for a dimension. Currently we support:
115 // 1. DENSE: each coordinate in this dimension is stored implicitly.
116 // 2. SPARSE_CSR: only the coordinates with non-zero elements are stored. The
117 // compression technique is the same what CSR uses.
118 // More types like a sparse dimension with a different compression technique
119 // could be added to the list in the future.
120 enum DimensionType : byte {
130 values:[ushort] (force_align: 4);
134 values:[ubyte] (force_align: 4);
137 // Variable-typed buffer to store the index metadata for a sparse dimension.
138 // The widest type is Int32 instead of UInt32 because tensor's shape is a int32
139 // vector. We don't want the per-dimensional index to overflow that range.
140 union SparseIndexVector {
146 table DimensionMetadata {
147 // Whether a dimension is dense or sparse.
148 format:DimensionType;
149 // Index metadata used for a dimension.
150 // - If format is DimensionType.DENSE then we use the dense_size field to
151 // store the size of that dimension. Each index in that dimension is
152 // stored implicitly.
153 // - If format is DimensionType.SPARSE_CSR then we use array_segments and
154 // array_indices to encode that dimension. array_segments represents how
155 // to segment the indices array, each segment corresponds to one element
156 // in the previous dimension. array_indices represents the index of the
157 // non-zero elements within this dimension (as those in the CSR matrix
158 // format, where the first array is row pointers and the second array is
161 array_segments:SparseIndexVector;
162 array_indices:SparseIndexVector;
165 // Parameters to encode a sparse TfLite tensor.
166 table SparsityParameters {
167 // The traversal order of the dimensions defined in the `shape` field of the
168 // conceptual dense tensor. For a n-dimensional tensors with dims (d0, d1,
170 // - if not block sparse, the traversal_order is just a permutation of (d0,
171 // ..., dn-1). For example, a 2-D matrix stored in row-major order would
172 // have traversal_order = (d0, d1).
173 // - if block sparse with a k-dimensional block (0 <= k <= n), the
174 // traversal_order has n + k elements. The first n elements are still a
175 // permutation of (d0, ..., dn-1). The lask k elements are a permutation
176 // of (dn, ..., dn+k-1), defining how to traverse a block internally. For
177 // example, a 2-D matrix with 2-D blocks, both stored in row-major order
178 // would have traversal_order = (d0, d1, d2, d3).
179 traversal_order:[int];
180 // For an n-dimensional tensor with a k-dimensional block (0 <= k <= n),
181 // stores how a block dimension in (dn, ..., dn+k-1) maps to the original
182 // tensor dimension in (d0, ..., dn).
183 // It's stored in the order of (dn, ..., dn+k-1).
184 // If not block-sparse, this field is NULL.
186 // In the traversal order defined above, the metadata needed for
187 // each dimension to locate the non-zero values in the original dense tensor.
188 // The size of the dim_metadata array = the size of the traversal_order array
190 dim_metadata:[DimensionMetadata];
194 // The tensor shape. The meaning of each entry is operator-specific but
195 // builtin ops use: [batch size, height, width, number of channels] (That's
196 // Tensorflow's NHWC).
199 // An index that refers to the buffers table at the root of the model. Or,
200 // if there is no data buffer associated (i.e. intermediate results), then
201 // this is 0 (which refers to an always existent empty buffer).
203 // The data_buffer itself is an opaque container, with the assumption that the
204 // target device is little-endian. In addition, all builtin operators assume
205 // the memory is ordered such that if `shape` is [4, 3, 2], then index
206 // [i, j, k] maps to data_buffer[i*3*2 + j*2 + k].
208 name:string; // For debugging and importing back into tensorflow.
209 quantization:QuantizationParameters; // Optional.
211 is_variable:bool = false;
213 // Parameters to encode a sparse tensor. See the example in
214 // tensorflow/lite/testdata/sparse_tensor.json.
215 sparsity:SparsityParameters; // Optional.
217 // Encodes `shape` with unknown dimensions. Unknown dimensions are
218 // represented with -1.
219 shape_signature:[int]; // Optional.
222 // A list of builtin operators. Builtin operators are slightly faster than custom
223 // ones, but not by much. Moreover, while custom operators accept an opaque
224 // object containing configuration parameters, builtins have a predetermined
225 // set of acceptable options.
227 enum BuiltinOperator : int32 {
232 DEPTHWISE_CONV_2D = 4,
235 EMBEDDING_LOOKUP = 7,
238 HASHTABLE_LOOKUP = 10,
239 L2_NORMALIZATION = 11,
241 LOCAL_RESPONSE_NORMALIZATION = 13,
248 // NOTE(aselle): RELU_N1_TO_1 used to be called RELU1, but it was renamed
249 // since different model developers use RELU1 in different ways. Never
250 // create another op called RELU1.
254 RESIZE_BILINEAR = 23,
260 CONCAT_EMBEDDINGS = 29,
264 EMBEDDING_LOOKUP_SPARSE = 33,
266 UNIDIRECTIONAL_SEQUENCE_RNN = 35,
268 BATCH_TO_SPACE_ND = 37,
269 SPACE_TO_BATCH_ND = 38,
275 UNIDIRECTIONAL_SEQUENCE_LSTM = 44,
277 BIDIRECTIONAL_SEQUENCE_RNN = 46,
282 // DELEGATE is a special op type for the operations which are delegated to
284 // WARNING: Experimental interface, subject to change
286 BIDIRECTIONAL_SEQUENCE_LSTM = 52,
302 SPARSE_TO_DENSE = 68,
331 RESIZE_NEAREST_NEIGHBOR = 97,
333 SQUARED_DIFFERENCE = 99,
346 REVERSE_SEQUENCE = 112,
349 MATRIX_SET_DIAG = 115,
354 NON_MAX_SUPPRESSION_V4 = 120,
355 NON_MAX_SUPPRESSION_V5 = 121,
361 PLACEHOLDER_FOR_GREATER_OP_CODES = 127,
371 HASHTABLE_FIND = 137,
372 HASHTABLE_IMPORT = 138,
373 HASHTABLE_SIZE = 139,
375 CONV_3D_TRANSPOSE = 141,
378 ASSIGN_VARIABLE = 144,
380 // LINT.ThenChange(nnapi_linter/linter.proto)
382 // Options for the builtin operators.
383 union BuiltinOptions {
385 DepthwiseConv2DOptions,
386 ConcatEmbeddingsOptions,
387 LSHProjectionOptions,
391 FullyConnectedOptions,
393 ConcatenationOptions,
396 LocalResponseNormalizationOptions,
398 ResizeBilinearOptions,
403 EmbeddingLookupSparseOptions,
407 BatchToSpaceNDOptions,
408 SpaceToBatchNDOptions,
422 MaximumMinimumOptions,
432 TransposeConvOptions,
433 SparseToDenseOptions,
452 BidirectionalSequenceLSTMOptions,
453 BidirectionalSequenceRNNOptions,
454 UnidirectionalSequenceLSTMOptions,
457 ResizeNearestNeighborOptions,
459 SquaredDifferenceOptions,
470 ReverseSequenceOptions,
473 MatrixSetDiagOptions,
478 NonMaxSuppressionV4Options,
479 NonMaxSuppressionV5Options,
491 HashtableFindOptions,
492 HashtableImportOptions,
493 HashtableSizeOptions,
496 AssignVariableOptions,
499 enum Padding : byte { SAME, VALID }
501 enum ActivationFunctionType : byte {
510 table Conv2DOptions {
514 fused_activation_function:ActivationFunctionType;
515 dilation_w_factor:int = 1;
516 dilation_h_factor:int = 1;
519 // Options for both Conv3D and Conv3DTranspose.
520 table Conv3DOptions {
525 fused_activation_function:ActivationFunctionType;
526 dilation_d_factor:int = 1;
527 dilation_w_factor:int = 1;
528 dilation_h_factor:int = 1;
531 table Pool2DOptions {
537 fused_activation_function:ActivationFunctionType;
540 table DepthwiseConv2DOptions {
541 // Parameters for DepthwiseConv version 1 or above.
545 // `depth_multiplier` is redundant. It's used by CPU kernels in
546 // TensorFlow 2.0 or below, but ignored in versions above.
547 // See comments in lite/c/builtin_op_data.h for more details.
548 depth_multiplier:int;
549 fused_activation_function:ActivationFunctionType;
550 // Parameters for DepthwiseConv version 2 or above.
551 dilation_w_factor:int = 1;
552 dilation_h_factor:int = 1;
555 table ConcatEmbeddingsOptions {
557 num_columns_per_channel:[int];
558 embedding_dim_per_channel:[int]; // This could be inferred from parameters.
561 enum LSHProjectionType: byte {
567 table LSHProjectionOptions {
568 type: LSHProjectionType;
573 fused_activation_function:ActivationFunctionType;
574 // For weights-only quantization, use asymmetric quantization for non
575 // constant inputs at evaluation time.
576 asymmetric_quantize_inputs:bool;
579 // An implementation of TensorFlow RNNCell.
581 fused_activation_function:ActivationFunctionType;
582 asymmetric_quantize_inputs:bool;
585 // An implementation of TensorFlow dynamic_rnn with RNNCell.
586 table SequenceRNNOptions {
588 fused_activation_function:ActivationFunctionType;
589 asymmetric_quantize_inputs:bool;
592 // An implementation of TensorFlow bidrectional_dynamic_rnn with RNNCell.
593 table BidirectionalSequenceRNNOptions {
595 fused_activation_function:ActivationFunctionType;
597 asymmetric_quantize_inputs:bool;
600 enum FullyConnectedOptionsWeightsFormat: byte {
602 SHUFFLED4x16INT8 = 1,
605 // An implementation of TensorFlow fully_connected (a.k.a Dense) layer.
606 table FullyConnectedOptions {
607 // Parameters for FullyConnected version 1 or above.
608 fused_activation_function:ActivationFunctionType;
610 // Parameters for FullyConnected version 2 or above.
611 weights_format:FullyConnectedOptionsWeightsFormat = DEFAULT;
613 // Parameters for FullyConnected version 5 or above.
614 // If set to true, then the number of dimension is preserved. Furthermore,
615 // all but the last dimension of the input and output shapes will be equal.
618 // Parameters for FullyConnected version 7 or above.
619 // If set to true, then weights-only op will use asymmetric quantization for
621 asymmetric_quantize_inputs: bool;
624 table SoftmaxOptions {
628 // An implementation of TensorFlow concat.
629 table ConcatenationOptions {
631 fused_activation_function:ActivationFunctionType;
635 fused_activation_function:ActivationFunctionType;
636 // Parameters supported by version 3.
637 pot_scale_int16:bool = true;
641 fused_activation_function:ActivationFunctionType;
644 table L2NormOptions {
645 // This field is currently ignored in the L2 Norm Op.
646 fused_activation_function:ActivationFunctionType;
649 table LocalResponseNormalizationOptions {
656 enum LSTMKernelType : byte {
657 // Full LSTM kernel which supports peephole and projection.
659 // Basic LSTM kernels. Equivalent to TensorFlow BasicLSTMCell.
663 // An implementation of TensorFlow LSTMCell and CoupledInputForgetGateLSTMCell
665 // Parameters for LSTM version 1 or above.
666 fused_activation_function:ActivationFunctionType;
667 cell_clip: float; // Optional, 0.0 means no clipping
668 proj_clip: float; // Optional, 0.0 means no clipping
670 // Parameters for LSTM version 2 or above.
671 // Basic kernel is only supported in version 2 or above.
672 kernel_type: LSTMKernelType = FULL;
674 // Parameters for LSTM version 4 or above.
675 asymmetric_quantize_inputs: bool;
678 // An implementation of TensorFlow dynamic_rnn with LSTMCell.
679 table UnidirectionalSequenceLSTMOptions {
680 fused_activation_function:ActivationFunctionType;
681 cell_clip: float; // Optional, 0.0 means no clipping
682 proj_clip: float; // Optional, 0.0 means no clipping
684 // If true then first dimension is sequence, otherwise batch.
687 // Parameter for Unidirectional Sequence LSTM version 4.
688 asymmetric_quantize_inputs:bool;
691 table BidirectionalSequenceLSTMOptions {
692 // Parameters supported by version 1:
693 fused_activation_function:ActivationFunctionType;
694 cell_clip: float; // Optional, 0.0 means no clipping
695 proj_clip: float; // Optional, 0.0 means no clipping
697 // If true, store the outputs of both directions into the first output.
700 // Parameters supported by version 2:
701 // If true then first dimension is sequence, otherwise batch.
702 // Version 1 implementations assumed time_major to be true, so this default
703 // value should never change.
704 time_major: bool = true;
706 // Parameters for version 3 or above.
707 asymmetric_quantize_inputs:bool;
710 table ResizeBilinearOptions {
711 new_height: int (deprecated);
712 new_width: int (deprecated);
714 half_pixel_centers: bool;
717 table ResizeNearestNeighborOptions {
719 half_pixel_centers: bool;
722 // A call operation options
724 // The subgraph index that needs to be called.
734 table ReshapeOptions {
738 table SpaceToBatchNDOptions {
741 table BatchToSpaceNDOptions {
744 table SkipGramOptions {
747 include_all_ngrams: bool;
750 table SpaceToDepthOptions {
754 table DepthToSpaceOptions {
759 fused_activation_function:ActivationFunctionType;
760 // Parameters supported by version 5
761 pot_scale_int16:bool = true;
765 fused_activation_function:ActivationFunctionType;
768 table TopKV2Options {
771 enum CombinerType : byte {
777 table EmbeddingLookupSparseOptions {
778 combiner:CombinerType;
781 table GatherOptions {
783 // Parameters for Gather version 5 or above.
787 table TransposeOptions {
796 table ReducerOptions {
800 table SqueezeOptions {
808 table SplitVOptions {
812 table StridedSliceOptions {
817 shrink_axis_mask: int;
820 table LogSoftmaxOptions {
824 in_data_type: TensorType;
825 out_data_type: TensorType;
828 table DequantizeOptions {
831 table MaximumMinimumOptions {
837 table ArgMaxOptions {
838 output_type : TensorType;
841 table ArgMinOptions {
842 output_type : TensorType;
845 table GreaterOptions {
848 table GreaterEqualOptions {
854 table LessEqualOptions {
860 table SelectOptions {
866 table TransposeConvOptions {
872 table ExpandDimsOptions {
875 table SparseToDenseOptions {
876 validate_indices:bool;
882 table NotEqualOptions {
886 // Optional output type of the operation (int32 or int64). Defaults to int32.
887 out_type : TensorType;
896 table FakeQuantOptions {
897 // Parameters supported by version 1:
902 // Parameters supported by version 2:
911 table LogicalOrOptions {
914 table OneHotOptions {
922 table HardSwishOptions {
925 table LogicalAndOptions {
928 table LogicalNotOptions {
931 table UnpackOptions {
936 table FloorDivOptions {
939 table SquareOptions {
942 table ZerosLikeOptions {
948 table FloorModOptions {
954 table LeakyReluOptions {
958 table SquaredDifferenceOptions {
961 enum MirrorPadMode : byte {
962 // Doesn't include borders.
968 table MirrorPadOptions {
972 table UniqueOptions {
973 idx_out_type:TensorType = INT32;
976 table ReverseV2Options {
982 table GatherNdOptions {
988 table ReverseSequenceOptions {
993 table MatrixDiagOptions {
996 table QuantizeOptions {
999 table MatrixSetDiagOptions {
1003 then_subgraph_index:int;
1004 else_subgraph_index:int;
1007 table CallOnceOptions {
1008 init_subgraph_index:int;
1011 table WhileOptions {
1012 cond_subgraph_index:int;
1013 body_subgraph_index:int;
1016 table NonMaxSuppressionV4Options {
1019 table NonMaxSuppressionV5Options {
1022 table ScatterNdOptions {
1025 table SelectV2Options {
1028 table DensifyOptions {
1031 table SegmentSumOptions {
1034 table BatchMatMulOptions {
1037 // Parameters for BatchMatMul version 4 or above.
1038 // If set to true, then weights-only op will use asymmetric quantization for
1040 asymmetric_quantize_inputs: bool;
1043 table CumsumOptions {
1048 table BroadcastToOptions {
1051 table Rfft2dOptions {
1054 table HashtableOptions {
1055 // The identity of hash tables. This identity will be used across different
1056 // subgraphs in the same interpreter instance.
1058 key_dtype:TensorType;
1059 value_dtype:TensorType;
1062 table HashtableFindOptions {
1065 table HashtableImportOptions {
1068 table HashtableSizeOptions {
1071 table VarHandleOptions {
1076 table ReadVariableOptions {
1079 table AssignVariableOptions {
1082 // An OperatorCode can be an enum value (BuiltinOperator) if the operator is a
1083 // builtin, or a string if the operator is custom.
1084 table OperatorCode {
1085 // This field is for backward compatibility. This field will be used when
1086 // the value of the extended builtin_code field has less than
1087 // BulitinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES.
1088 deprecated_builtin_code:byte;
1091 // The version of the operator. The version need to be bumped whenever new
1092 // parameters are introduced into an op.
1095 // This field is introduced for resolving op builtin code shortage problem
1096 // (the original BuiltinOperator enum field was represented as a byte).
1097 // This field will be used when the value of the extended builtin_code field
1098 // has greater than BulitinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES.
1099 builtin_code:BuiltinOperator;
1102 enum CustomOptionsFormat : byte {
1106 // An operator takes tensors as inputs and outputs. The type of operation being
1107 // performed is determined by an index into the list of valid OperatorCodes,
1108 // while the specifics of each operations is configured using builtin_options
1109 // or custom_options.
1111 // Index into the operator_codes array. Using an integer here avoids
1112 // complicate map lookups.
1115 // Optional input are indicated by -1.
1119 builtin_options:BuiltinOptions;
1120 custom_options:[ubyte];
1121 custom_options_format:CustomOptionsFormat;
1123 // A list of booleans indicating the input tensors which are being mutated by
1124 // this operator.(e.g. used by RNN and LSTM).
1125 // For example, if the "inputs" array refers to 5 tensors and the second and
1126 // fifth are mutable variables, then this list will contain
1127 // [false, true, false, false, true].
1129 // If the list is empty, no variable is mutated in this operator.
1130 // The list either has the same length as `inputs`, or is empty.
1131 mutating_variable_inputs:[bool];
1133 // A list of indices to the subgraph's "tensors" that are internal to an Op.
1134 // Internal tensors are those that do not flow in or out of the operation,
1135 // but instead are part of internal computation. As such, the operation's
1136 // implementation may manage its memory more efficiently. They are needed
1137 // however (i.e. not just an implementation detail) since they are part of the
1138 // computation, which may require relevant metadata such as quantization
1140 intermediates:[int];
1143 // The root type, defining a subgraph, which typically represents an entire
1146 // A list of all tensors used in this subgraph.
1149 // Indices of the tensors that are inputs into this subgraph. Note this is
1150 // the list of non-static tensors that feed into the subgraph for inference.
1153 // Indices of the tensors that are outputs out of this subgraph. Note this is
1154 // the list of output tensors that are considered the product of the
1155 // subgraph's inference.
1158 // All operators, in execution order.
1159 operators:[Operator];
1161 // Name of this subgraph (used for debugging).
1165 // Table of raw data buffers (used for constant tensors). Referenced by tensors
1166 // by index. The generous alignment accommodates mmap-friendly data structures.
1168 data:[ubyte] (force_align: 16);
1172 // A human readable string to uniquely identify a Metadata.
1174 // An index to the buffers table.
1178 // Map from an alias name of tensor to tensor index in the graph.
1179 // This is used in Signature def.
1181 // Represents the alias to use for this tensor.
1184 // The actual tensor index in the primary graph, that 'name' corresponds to.
1188 // This corresponds to SignatureDef in Tensorflow SavedModel.
1189 // The SignatureDef will be part of the SavedModel provided for conversion.
1190 table SignatureDef {
1191 // Named inputs for this signature.
1194 // Named outputs for this signature.
1195 outputs:[TensorMap];
1197 // Exported method name for this signature.
1200 // Key value which was in the Tensorflow SavedModel SignatureDef map.
1203 // Subgraph index of the exported method.
1204 subgraph_index:uint;
1208 // Version of the schema.
1211 // A list of all operator codes used in this model. This is
1212 // kept in order because operators carry an index into this
1214 operator_codes:[OperatorCode];
1216 // All the subgraphs of the model. The 0th is assumed to be the main
1218 subgraphs:[SubGraph];
1220 // A description of the model.
1223 // Buffers of the model.
1224 // Note the 0th entry of this array must be an empty buffer (sentinel).
1225 // This is a convention so that tensors without a buffer can provide 0 as
1229 // Metadata about the model. Indirects into the existings buffers list.
1230 // Deprecated, prefer to use metadata field.
1231 metadata_buffer:[int];
1233 // Metadata about the model.
1234 metadata:[Metadata];
1236 // Optional SignatureDefs for the model.
1237 signature_defs:[SignatureDef];