2 * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
3 * Copyright 2018 The TensorFlow Authors. All Rights Reserved.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #ifndef __NNFW_CKER_TYPES_H__
19 #define __NNFW_CKER_TYPES_H__
22 #include <type_traits>
31 enum class FusedActivationFunctionType
38 enum class PaddingType
45 enum class BinaryArithmeticOpType
54 enum class ComparisonOpType
70 enum class BroadcastableOpCategory : uint8_t
73 kNonBroadcast, // Matching input shapes.
74 kFirstInputBroadcastsFast, // Fivefold nested loops.
75 kSecondInputBroadcastsFast, // Fivefold nested loops.
76 kGenericBroadcast, // Fall-back.
81 FusedActivationFunctionType activation;
82 PaddingType padding_type;
83 PaddingValues padding_values;
88 // uint8, etc, activation params.
89 int32_t quantized_activation_min;
90 int32_t quantized_activation_max;
91 // float activation params.
92 float float_activation_min;
93 float float_activation_max;
98 // beta is not really used (not a Tensorflow parameter) and not implemented
102 // uint8 inference params. Used even when beta defaults to 1.0.
103 int32_t input_multiplier;
104 int32_t input_left_shift;
105 // Reverse scaling is only used by LogSoftmax.
106 int32_t reverse_scaling_divisor;
107 int32_t reverse_scaling_right_shift;
114 // zeropoint and scale were only used to implement PackWithScaling in the legacy code of
116 // const int32_t* input_zeropoint;
117 // const float* input_scale;
118 uint16_t inputs_count;
119 // int32_t output_zeropoint;
120 // float output_scale;
131 PaddingType padding_type;
132 PaddingValues padding_values;
133 // TODO(starka): This was just "stride", so check that width+height is OK.
134 int16_t stride_width;
135 int16_t stride_height;
136 int16_t dilation_width_factor;
137 int16_t dilation_height_factor;
138 // uint8_t inference params.
139 // TODO(b/65838351): Use smaller types if appropriate.
140 int32_t input_offset;
141 int32_t weights_offset;
142 int32_t output_offset;
143 int32_t output_multiplier;
145 // uint8_t, etc, activation params.
146 int32_t quantized_activation_min;
147 int32_t quantized_activation_max;
148 // float activation params.
149 float float_activation_min;
150 float float_activation_max;
151 bool is_replaced_weights{false};
154 struct ComparisonParams
156 ComparisonOpType type;
160 int32_t input1_offset;
161 int32_t input1_multiplier;
162 int32_t input2_offset;
163 int32_t input2_multiplier;
167 struct BinaryArithmeticOpParam
169 // Shape dependent / common to data / op types.
170 BroadcastableOpCategory broadcast_category;
171 // uint8 inference params.
172 int32_t input1_offset;
173 int32_t input2_offset;
174 int32_t output_offset;
175 int32_t output_multiplier;
176 int32_t output_shift;
177 // Add / Sub, not Mul, uint8 inference params.
179 int32_t input1_multiplier;
180 int32_t input1_shift;
181 int32_t input2_multiplier;
182 int32_t input2_shift;
183 // uint8, etc, activation params.
184 int32_t quantized_activation_min;
185 int32_t quantized_activation_max;
186 // float activation params.
187 float float_activation_min;
188 float float_activation_max;
190 // Processed output dimensions.
191 // Let input "a" be the one that broadcasts in the faster-changing dimension.
192 // Then, after coalescing, for shapes {a0, a1, a2, a3, a4} and
193 // {b0, b1, b2, b3, b4},
194 // broadcast_shape[4] = b0 = a0.
195 // broadcast_shape[3] = b1; a1 = 1.
196 // broadcast_shape[2] = b2 = a2.
197 // broadcast_shape[1] = a3; b3 = 1.
198 // broadcast_shape[0] = b4 = a4.
199 int broadcast_shape[5] = {};
202 struct TransposeParams
208 struct ConcatenationParams
211 const int32_t *input_zeropoint;
212 const float *input_scale;
213 uint16_t inputs_count;
214 int32_t output_zeropoint;
218 struct DepthwiseConvParams
220 PaddingType padding_type;
221 PaddingValues padding_values;
222 int16_t stride_width;
223 int16_t stride_height;
224 int16_t dilation_width_factor;
225 int16_t dilation_height_factor;
226 int16_t depth_multiplier;
227 // uint8 inference params.
228 // TODO(b/65838351): Use smaller types if appropriate.
229 int32_t input_offset;
230 int32_t weights_offset;
231 int32_t output_offset;
232 int32_t output_multiplier;
234 // uint8, etc, activation params.
235 int32_t quantized_activation_min;
236 int32_t quantized_activation_max;
237 // float activation params.
238 float float_activation_min;
239 float float_activation_max;
242 struct FullyConnectedParams
244 FusedActivationFunctionType activation{FusedActivationFunctionType::kNone};
245 // uint8 inference params.
246 // TODO(b/65838351): Use smaller types if appropriate.
247 int32_t input_offset;
248 int32_t weights_offset;
250 int32_t output_offset;
251 int32_t output_multiplier;
253 // uint8, etc, activation params.
254 int32_t quantized_activation_min;
255 int32_t quantized_activation_max;
256 // float activation params.
257 float float_activation_min;
258 float float_activation_max;
259 // FullyConnectedWeightsFormat weights_format;
267 struct InstanceNormParams
270 float float_activation_min;
271 float float_activation_max;
274 struct TransposeConvParams
276 PaddingType padding_type;
277 PaddingValues padding_values;
278 // TODO(starka): This was just "stride", so check that width+height is OK.
279 int16_t stride_width;
280 int16_t stride_height;
281 int16_t dilation_width_factor;
282 int16_t dilation_height_factor;
283 // uint8_t inference params.
284 // TODO(b/65838351): Use smaller types if appropriate.
285 int32_t input_offset;
286 int32_t weights_offset;
287 int32_t output_offset;
288 int32_t output_multiplier;
290 // uint8_t, etc, activation params.
291 int32_t quantized_activation_min;
292 int32_t quantized_activation_max;
293 // float activation params.
294 float float_activation_min;
295 float float_activation_max;
306 struct StridedSliceParams
308 int8_t start_indices_count;
309 int16_t start_indices[4];
310 int8_t stop_indices_count;
311 int16_t stop_indices[4];
312 int8_t strides_count;
316 int16_t ellipsis_mask;
318 int16_t new_axis_mask;
319 int16_t shrink_axis_mask;
328 struct FusedBatchNormParams
331 std::string data_format; // UNKNOWN(0), NHWC(1), NCHW(2)
335 struct SpaceToBatchParams
337 // "Zero" padding for uint8 means padding with the output offset.
338 int32_t output_offset;
347 // MatrixParams encapsulates the parameters that Gemm needs about each
348 // matrix, besides the buffer data pointer.
349 // Compare to ruy::Matrix, which also encapsulates the data pointer.
350 // Rationale for leaving the data pointer out of here: doing so
351 // requires complicated const-correctness mechanics. See
352 // ruy::ConstCheckingPtr.
353 template <typename Scalar> struct MatrixParams
355 // Storage layout order. For now we only do plain linear non-strided
356 // layout. It would be easy to support a stride if needed.
357 Order order = Order::kColMajor;
358 // Number of rows of the matrix.
360 // Number of columns of the matrix.
362 // The zero_point, i.e. which Scalar value is to be interpreted as zero.
363 // When Scalar is floating-point, this must be 0.
364 Scalar zero_point = 0;
365 // Indicate whether the underlying data will remain unchanged for
366 // some period of time. Defaults to false, but should be set to true
367 // for unchanging data (e.g. weights buffers in many cases)
368 bool cacheable = false;
371 // Enumeration of broad categories of Gemm.
373 // The primary reason for this to exist is to allow Gemm to compile
374 // only uniform-quantized or only per-channel-quantized code paths.
375 // This is unneeded with ruy as the back-end, as this is only a runtime
376 // difference in ruy, but with gemmlowp these really are separate code
377 // paths and templatizing in a QuantizationFlavor is necessary to avoid
378 // compiling unused gemmlowp code. Indeed, TFLite currently uses
379 // uint8 with uniform quantization and int8 with per-channel quantization,
380 // and does not use uint8 with per-channel. We want to avoid compiling
381 // the gemmlowp uint8 per-channel path when gemmlowp is the back-end.
383 // It's possible to drop this in the future if gemmlowp goes away and no
384 // other then-relevant backend library handles quantized paths in a way that
385 // requires knowing this at compile-time.
386 enum class QuantizationFlavor
388 // Floating-point Gemm: the accumulators are not multiplied by any
391 // Quantized Gemm using a single multiplier for all accumulators.
392 kIntegerWithUniformMultiplier,
393 // Quantized Gemm using a separate multipliers for accumulators of each
394 // row of the destination matrix. This is what is called 'per-channel'
395 // in GemmParams. Here we use the more specific 'per-row' terminology
396 // to allow for the possibility of 'per-column' in the future, and to
397 // allow for that to be a separate code path in some back-end such as
399 kIntegerWithPerRowMultiplier
402 // Additional parameters that Gemm needs, beyond what falls into
403 // the MatrixParams that it takes. Compare to ruy::Spec.
405 // Decoupling AccumScalar from DstScalar (rather than deducing it from that)
406 // is useful future-proofing. Think of a float16 path using float32 accum.
408 // QuantizationFlavor is passed here even though it's technically not used
409 // in this class. This is so that we retain the ability in the future to
410 // specialize this class for quantization flavor, and this allows for
411 // Gemm to be templatized in quantization_flavor via the GemmParams that it
412 // takes, allowing for automatic template parameter deduction to take place,
413 // so that most call sites don't need to specify a QuantizationFlavor
414 // (only those that need perchannel quantization do).
415 template <typename AccumScalar, typename DstScalar,
416 QuantizationFlavor quantization_flavor =
417 std::is_floating_point<AccumScalar>::value
418 ? QuantizationFlavor::kFloatingPoint
419 : QuantizationFlavor::kIntegerWithUniformMultiplier>
422 // Only for non-floating-point cases. The fixed-point part (i.e. the mantissa)
423 // of the multiplier by which accumulators are multiplied before being casted
424 // to the destination type.
425 AccumScalar multiplier_fixedpoint = 0;
426 // Only for non-floating-point cases. The exponent part of the aforementioned
428 int multiplier_exponent = 0;
429 // Per-channel variant of multiplier_fixedpoint. If not nullptr, this must
430 // point to a buffer of as many values as there are rows in the destination
431 // matrix. Each row of the destination matrix will use the corresponding
432 // buffer element instead of multiplier_fixedpoint.
433 const AccumScalar *multiplier_fixedpoint_perchannel = nullptr;
434 // Per-channel variant of multiplier_exponent. If not nullptr, this must
435 // point to a buffer of as many values as there are rows in the destination
436 // matrix. Each row of the destination matrix will use the corresponding
437 // buffer element instead of multiplier_exponent.
439 // Either none or both of multiplier_exponent_perchannel and
440 // multiplier_fixedpoint_perchannel must be nullptr.
441 const int *multiplier_exponent_perchannel = nullptr;
442 // The bias vector data, if not null.
443 const AccumScalar *bias = nullptr;
444 // min clamp bound of destination values.
445 DstScalar clamp_min = std::is_floating_point<DstScalar>::value
446 ? -std::numeric_limits<DstScalar>::infinity()
447 : std::numeric_limits<DstScalar>::lowest();
448 // max clamp bound of destination values.
449 DstScalar clamp_max = std::is_floating_point<DstScalar>::value
450 ? std::numeric_limits<DstScalar>::infinity()
451 : std::numeric_limits<DstScalar>::max();
454 // Validates self-consistency of GemmParams.
455 template <typename AccumScalar, typename DstScalar, QuantizationFlavor quantization_flavor>
456 void ValidateGemmParams(const GemmParams<AccumScalar, DstScalar, quantization_flavor> ¶ms)
458 // Guard consistency of the quantized multiplier fields.
459 if (quantization_flavor == QuantizationFlavor::kFloatingPoint)
461 assert(!params.multiplier_fixedpoint);
462 assert(!params.multiplier_exponent);
463 assert(!params.multiplier_fixedpoint_perchannel);
464 assert(!params.multiplier_exponent_perchannel);
466 else if (quantization_flavor == QuantizationFlavor::kIntegerWithUniformMultiplier &&
467 !std::is_same<DstScalar, int32_t>::value)
469 assert(params.multiplier_fixedpoint);
470 // Nothing to check about multiplier_exponent
471 assert(!params.multiplier_fixedpoint_perchannel);
472 assert(!params.multiplier_exponent_perchannel);
474 else if (quantization_flavor == QuantizationFlavor::kIntegerWithPerRowMultiplier &&
475 !std::is_same<DstScalar, int32_t>::value)
477 assert(!params.multiplier_fixedpoint);
478 assert(!params.multiplier_exponent);
479 assert(params.multiplier_fixedpoint_perchannel);
480 assert(params.multiplier_exponent_perchannel);
484 // For the get raw accumulator case, we should make sure none of the
485 // quantization params are set.
486 assert(!params.multiplier_fixedpoint);
487 assert(!params.multiplier_exponent);
488 assert(!params.multiplier_fixedpoint_perchannel);
489 assert(!params.multiplier_exponent_perchannel);
491 UNUSED_RELEASE(params);
497 #endif // __NNFW_CKER_TYPES_H__