From 856b8e5a78a3cef48c3cebfd4d9f35555864cc01 Mon Sep 17 00:00:00 2001
From: =?utf8?q?=EC=98=A4=ED=98=95=EC=84=9D/On-Device=20Lab=28SR=29/Staff?=
=?utf8?q?=20Engineer/=EC=82=BC=EC=84=B1=EC=A0=84=EC=9E=90?=
Any NNAPI function can return any result code, including result codes not
+ * currently documented. Any value other than {@link ANEURALNETWORKS_NO_ERROR}
+ * indicates a failure of some kind. Additional information about the nature of a failure can be obtained from
+ * the device log after enabling NNAPI debugging by setting the debug.nn.vlog
+ * property to 1, e.g., by calling "adb shell setprop debug.nn.vlog 1".
@@ -955,25 +1341,50 @@ typedef enum {
* * 21:The clipping threshold (\f$t_{cell}\f$) for the cell state, such
* that values are bound within [-cell_clip, cell_clip]. If set to 0.0
* then clipping is disabled.
+ * Until API level 29 this scalar must be of type {@link
+ * ANEURALNETWORKS_FLOAT32}. Since API level 29, if all the input
+ * tensors have type {@link ANEURALNETWORKS_TENSOR_FLOAT32}, this
+ * scalar must be of the type {@link ANEURALNETWORKS_FLOAT32},
+ * otherwise if all the input tensors have the type {@link
+ * ANEURALNETWORKS_TENSOR_FLOAT16}, this scalar must be of type {@link
+ * ANEURALNETWORKS_FLOAT16}.
* * 22:The clipping threshold (\f$t_{proj}\f$) for the output from the
* projection layer, such that values are bound within
* [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
+ * Until API level 29 this scalar must be of type {@link
+ * ANEURALNETWORKS_FLOAT32}. Since API level 29, if all the input
+ * tensors have type {@link ANEURALNETWORKS_TENSOR_FLOAT32}, this
+ * scalar must be of the type {@link ANEURALNETWORKS_FLOAT32},
+ * otherwise if all the input tensors have the type {@link
+ * ANEURALNETWORKS_TENSOR_FLOAT16}, this scalar must be of type {@link
+ * ANEURALNETWORKS_FLOAT16}.
+ * Since API level 29 there are additional inputs to this op:
+ * * 23:The input layer normalization weights.
+ * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
+ * to activation at input gate.
+ * * 24:The forget layer normalization weights.
+ * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
+ * to activation at forget gate.
+ * * 25:The cell layer normalization weights.
+ * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
+ * to activation at cell gate.
+ * * 26:The output layer normalization weights.
+ * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
+ * to activation at output gate.
*
* Outputs:
* * 0: The scratch buffer.
- * A 2-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
- * [batch_size, num_units * 4] with CIFG, or
- * [batch_size, num_units * 3] without CIFG.
+ * A 2-D tensor of shape [batch_size, num_units * 3] with CIFG, or
+ * [batch_size, num_units * 4] without CIFG.
* * 1: The output state (out) (\f$h_t\f$).
- * A 2-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
- * [batch_size, output_size].
+ * A 2-D tensor of shape [batch_size, output_size].
* * 2: The cell state (out) (\f$C_t\f$).
- * A 2-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
- * [batch_size, num_units].
+ * A 2-D tensor of shape [batch_size, num_units].
* * 3: The output (\f$o_t\f$).
- * A 2-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
- * [batch_size, output_size]. This is effectively the same as the
- * current âoutput state (out)â value.
+ * A 2-D tensor of shape [batch_size, output_size]. This is effectively
+ * the same as the current âoutput state (out)â value.
+ *
+ * Available since API level 27.
*/
ANEURALNETWORKS_LSTM = 16,
@@ -985,20 +1396,27 @@ typedef enum {
*
* The values in the output tensor are computed as:
*
- * output[batch, row, col, channel] =
- * max_{i, j} (input[batch, row + i, col + j, channel])
+ * output[b, i, j, channel] =
+ * max_{di, dj} (
+ * input[b, strides[1] * i + di, strides[2] * j + dj, channel]
+ * )
*
* Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
* * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
* * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
*
- * Supported tensor rank: 4, with "NHWC" data layout.
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
*
* Both explicit padding and implicit padding are supported.
*
* Inputs (explicit padding):
* * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
- * the input.
+ * the input. Since API level 29, zero batches is supported for this
+ * tensor.
* * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
* the left, in the âwidthâ dimension.
* * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
@@ -1018,10 +1436,14 @@ typedef enum {
* * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
* {@link FuseCode} values. Specifies the activation to
* invoke on the result.
+ * * 10: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ * Available since API level 29.
*
* Inputs (implicit padding):
* * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
- * the input.
+ * the input. Since API level 29, zero batches is supported for this
+ * tensor.
* * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit
* padding scheme, has to be one of the
* {@link PaddingCode} values.
@@ -1036,10 +1458,17 @@ typedef enum {
* * 6: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
* {@link FuseCode} values. Specifies the activation to
* invoke on the result.
+ * * 7: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ * Available since API level 29.
*
* Outputs:
* * 0: The output 4-D tensor, of shape
* [batches, out_height, out_width, depth].
+ * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ *
+ * Available since API level 27.
*/
ANEURALNETWORKS_MAX_POOL_2D = 17,
@@ -1058,7 +1487,12 @@ typedef enum {
* of the input operands. It starts with the trailing dimensions, and works
* its way forward.
*
+ * Since API level 29, generic zero-sized input tensor is supported. Zero
+ * dimension is only compatible with 0 or 1. The size of the output
+ * dimension is zero if either of corresponding input dimension is zero.
+ *
* Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
* * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
* * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
*
@@ -1077,6 +1511,8 @@ typedef enum {
* For output tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},
* the following condition must be satisfied:
* output_scale > input1_scale * input2_scale.
+ *
+ * Available since API level 27.
*/
ANEURALNETWORKS_MUL = 18,
@@ -1088,16 +1524,22 @@ typedef enum {
* output = max(0, input)
*
* Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
* * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
* * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
*
* Supported tensor rank: up to 4.
*
* Inputs:
- * * 0: A tensor, specifying the input.
+ * * 0: A tensor, specifying the input. Since API level 29, this tensor may
+ * be zero-sized.
*
* Outputs:
* * 0: The output tensor of same shape as input0.
+ * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ *
+ * Available since API level 27.
*/
ANEURALNETWORKS_RELU = 19,
@@ -1109,16 +1551,22 @@ typedef enum {
* output = min(1.f, max(-1.f, input))
*
* Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
* * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
* * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
*
* Supported tensor rank: up to 4.
*
* Inputs:
- * * 0: A tensor, specifying the input.
+ * * 0: A tensor, specifying the input. Since API level 29, this tensor may
+ * be zero-sized.
*
* Outputs:
- * * 0: The output tensor of same shape as input0.
+ * * 0: The output tensor of the same shape as input0.
+ * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ *
+ * Available since API level 27.
*/
ANEURALNETWORKS_RELU1 = 20,
@@ -1130,16 +1578,22 @@ typedef enum {
* output = min(6, max(0, input))
*
* Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
* * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
* * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
*
* Supported tensor rank: up to 4.
*
* Inputs:
- * * 0: A tensor, specifying the input.
+ * * 0: A tensor, specifying the input. Since API level 29, this tensor may
+ * be zero-sized.
*
* Outputs:
* * 0: The output tensor of same shape as input0.
+ * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ *
+ * Available since API level 27.
*/
ANEURALNETWORKS_RELU6 = 21,
@@ -1150,6 +1604,7 @@ typedef enum {
* tensor, but with a newly specified shape.
*
* Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
* * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
* * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
*
@@ -1161,8 +1616,17 @@ typedef enum {
* shape of the output tensor. The number of elements implied by shape
* must be the same as the number of elements in the input tensor.
*
+ * If one component of shape is the special value -1, the size of that
+ * dimension is computed so that the total size remains constant. In
+ * particular, a shape of [-1] flattens into 1-D. At most one component
+ * of shape can be -1.
+ *
* Outputs:
* * 0: The output tensor, of shape specified by the input shape.
+ * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ *
+ * Available since API level 27.
*/
ANEURALNETWORKS_RESHAPE = 22,
@@ -1174,21 +1638,54 @@ typedef enum {
* same as corner pixels of input.
*
* Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
* * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (since API level 29)
*
- * Supported tensor rank: 4, with "NHWC" data layout.
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
*
- * Inputs:
+ * Both resizing by shape and resizing by scale are supported.
+ *
+ * Inputs (resizing by shape):
* * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
- * the input.
+ * the input. Since API level 29, zero batches is supported for this
+ * tensor.
* * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output
- * height of the output tensor.
- * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output
* width of the output tensor.
+ * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output
+ * height of the output tensor.
+ * * 3: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ * Available since API level 29.
+ *
+ * Inputs (resizing by scale, since API level 29):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+ * the input. Zero batches is supported for this tensor.
+ * * 1: A scalar, specifying width_scale, the scaling factor of the width
+ * dimension from the input tensor to the output tensor. The output
+ * width is calculated as new_width = floor(width * width_scale).
+ * The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is
+ * of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of
+ * {@link ANEURALNETWORKS_FLOAT32} otherwise.
+ * * 2: A scalar, specifying height_scale, the scaling factor of the height
+ * dimension from the input tensor to the output tensor. The output
+ * height is calculated as new_height = floor(height * height_scale).
+ * The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is
+ * of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of
+ * {@link ANEURALNETWORKS_FLOAT32} otherwise.
+ * * 3: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
*
* Outputs:
* * 0: The output 4-D tensor, of shape
* [batches, new_height, new_width, depth].
+ * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ *
+ * Available since API level 27.
*/
ANEURALNETWORKS_RESIZE_BILINEAR = 23,
@@ -1209,27 +1706,26 @@ typedef enum {
* argument (if not âNONEâ).
*
* Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
* * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
*
+ * The input tensors must all be the same type.
+ *
* Inputs:
* * 0: input.
- * A 2-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32} of shape
- * [batch_size, input_size], where âbatch_sizeâ corresponds to the
- * batching dimension, and âinput_sizeâ is the size of the input.
+ * A 2-D tensor of shape [batch_size, input_size], where âbatch_sizeâ
+ * corresponds to the batching dimension, and âinput_sizeâ is the size
+ * of the input.
* * 1: weights.
- * A 2-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
- * [num_units, input_size], where ânum_unitsâ corresponds to the
- * number of units.
+ * A 2-D tensor of shape [num_units, input_size], where ânum_unitsâ
+ * corresponds to the number of units.
* * 2: recurrent_weights.
- * A 2-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
- * [num_units, num_units], with columns corresponding to the weights
- * from each unit.
+ * A 2-D tensor of shape [num_units, num_units], with columns
+ * corresponding to the weights from each unit.
* * 3: bias.
- * A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
- * [num_units].
+ * A 1-D tensor of shape [num_units].
* * 4: hidden state (in).
- * A 2-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
- * [batch_size, num_units].
+ * A 2-D tensor of shape [batch_size, num_units].
* * 5: fused_activation_function.
* An optional {@link FuseCode} value indicating the
* activation function. If âNONEâ is specified then it results in a
@@ -1237,13 +1733,13 @@ typedef enum {
*
* Outputs:
* * 0: hidden state (out).
- * A 2-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
- * [batch_size, num_units].
+ * A 2-D tensor of shape [batch_size, num_units].
*
* * 1: output.
- * A 2-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
- * [batch_size, num_units]. This is effectively the same as the
- * current state value.
+ * A 2-D tensor of shape [batch_size, num_units]. This is effectively
+ * the same as the current state value.
+ *
+ * Available since API level 27.
*/
ANEURALNETWORKS_RNN = 24,
@@ -1258,21 +1754,38 @@ typedef enum {
* exp((input[batch, i] - max(input[batch, :])) * beta) /
* sum_{k}{exp((input[batch, k] - max(input[batch, :])) * beta)}
*
+ * For input tensor with rank other than 2, the activation will be applied
+ * independently on each 1-D slice along specified dimension.
+ *
* Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
* * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
* * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
*
- * Supported tensor rank: 2 or 4.
+ * Supported tensor rank: up to 4.
+ * Tensors with rank other than 2 or 4 are only supported since API level 29.
*
* Inputs:
- * * 0: A 2-D or 4-D tensor, specifying the tensor to be reshaped.
- * * 1: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the positive
- * scaling factor for the exponent, beta.
+ * * 0: A 2-D or 4-D tensor, specifying the tensor to be reshaped. Since
+ * API level 29, this tensor may be zero-sized.
+ * * 1: A scalar, specifying the positive scaling factor for the exponent,
+ * beta. If input0 is of {@link ANEURALNETWORKS_TENSOR_FLOAT32} or
+ * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, the scalar must be of
+ * {@link ANEURALNETWORKS_FLOAT32}. If input0 is of {@link
+ * ANEURALNETWORKS_TENSOR_FLOAT16}, then the scalar must be of {@link
+ * ANEURALNETWORKS_FLOAT16}.
+ * * 2: An optional {@link ANEURALNETWORKS_INT32} scalar, default to -1,
+ * specifying the dimension the activation would be performed on.
+ * Negative index is used to specify axis from the end (e.g. -1 for
+ * the last axis). Must be in the range [-n, n).
+ * Available since API level 29.
*
* Outputs:
* * 0: The output tensor of same shape as input0.
* For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},
* the scale must be 1.f / 256 and the zeroPoint must be 0.
+ *
+ * Available since API level 27.
*/
ANEURALNETWORKS_SOFTMAX = 25,
@@ -1291,10 +1804,14 @@ typedef enum {
* The input tensor's height and width must be divisible by block_size.
*
* Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
* * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
* * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
*
- * Supported tensor rank: 4, with "NHWC" data layout.
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
*
* Inputs:
* * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
@@ -1302,10 +1819,17 @@ typedef enum {
* * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the block_size.
* block_size must be >=1 and block_size must be a divisor of both the
* input height and width.
+ * * 2: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ * Available since API level 29.
*
* Outputs:
* * 0: The output 4-D tensor, of shape [batches, height/block_size,
* width/block_size, depth_in*block_size*block_size].
+ * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ *
+ * Available since API level 27.
*/
ANEURALNETWORKS_SPACE_TO_DEPTH = 26,
@@ -1349,27 +1873,26 @@ typedef enum {
* the filters.
*
* Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
* * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
*
+ * All input tensors must be the same type.
+ *
* Inputs:
* * 0: input.
- * A 2-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
- * [batch_size, input_size], where âbatch_sizeâ corresponds to the
- * batching dimension, and âinput_sizeâ is the size of the input.
+ * A 2-D tensor of shape [batch_size, input_size], where âbatch_sizeâ
+ * corresponds to the batching dimension, and âinput_sizeâ is the size
+ * of the input.
* * 1: weights_feature.
- * A 2-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
- * [num_units, input_size], where ânum_unitsâ corresponds to the
- * number of units.
+ * A 2-D tensor of shape [num_units, input_size], where ânum_unitsâ
+ * corresponds to the number of units.
* * 2: weights_time.
- * A 2-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
- * [num_units, memory_size], where âmemory_sizeâ corresponds to the
- * fixed-size of the memory.
+ * A 2-D tensor of shape [num_units, memory_size], where âmemory_sizeâ
+ * corresponds to the fixed-size of the memory.
* * 3: bias.
- * An optional 1-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32},
- * of shape [num_units].
+ * An optional 1-D tensor of shape [num_units].
* * 4: state (in).
- * A 2-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
- * [batch_size, (memory_size - 1) * num_units * rank].
+ * A 2-D tensor of shape [batch_size, (memory_size - 1) * num_units * rank].
* * 5: rank.
* The rank of the SVD approximation.
* * 6: fused_activation_function.
@@ -1379,11 +1902,13 @@ typedef enum {
*
* Outputs:
* * 0: state (out).
- * A 2-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
+ * A 2-D tensor of the same {@link OperandCode} as the inputs, with shape
* [batch_size, (memory_size - 1) * num_units * rank].
* * 1: output.
- * A 2-D tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, of shape
- * [batch_size, num_units].
+ * A 2-D tensor of the same {@link OperandCode} as the inputs, with shape
+ * [batch_size, num_units].
+ *
+ * Available since API level 27.
*/
ANEURALNETWORKS_SVDF = 27,
@@ -1395,19 +1920,27 @@ typedef enum {
* output = tanh(input)
*
* Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
* * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (since API level 29)
*
* Supported tensor rank: up to 4.
*
* Inputs:
- * * 0: A tensor, specifying the input.
+ * * 0: A tensor, specifying the input. Since API level 29, this tensor may
+ * be zero-sized.
*
* Outputs:
* * 0: The output tensor of same shape as input0.
+ * For {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},
+ * the scale must be 1.f / 128 and the zeroPoint must be 128.
+ *
+ * Available since API level 27.
*/
ANEURALNETWORKS_TANH = 28,
- // TODO: make the description easier to understand.
+ // Operations below are available since API level 28.
+
/**
* BatchToSpace for N-dimensional tensors.
*
@@ -1419,19 +1952,30 @@ typedef enum {
* This is the reverse of SpaceToBatch.
*
* Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
* * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
* * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
*
- * Supported tensor rank: 4
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
*
* Inputs:
* * 0: An n-D tensor, specifying the tensor to be reshaped
* * 1: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the block
* sizes for each spatial dimension of the input tensor. All values
* must be >= 1.
+ * * 2: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ * Available since API level 29.
*
* Outputs:
* * 0: A tensor of the same {@link OperandCode} as input0.
+ * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ *
+ * Available since API level 28.
*/
ANEURALNETWORKS_BATCH_TO_SPACE_ND = 29,
@@ -1455,7 +1999,12 @@ typedef enum {
* input2.dimension = {5, 4, 3, 1}
* output.dimension = {5, 4, 3, 2}
*
+ * Since API level 29, generic zero-sized input tensor is supported. Zero
+ * dimension is only compatible with 0 or 1. The size of the output
+ * dimension is zero if either of corresponding input dimension is zero.
+ *
* Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
* * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
*
* Supported tensor rank: up to 4
@@ -1470,6 +2019,8 @@ typedef enum {
*
* Outputs:
* * 0: A tensor of the same {@link OperandCode} as input0.
+ *
+ * Available since API level 28.
*/
ANEURALNETWORKS_DIV = 30,
@@ -1481,10 +2032,8 @@ typedef enum {
* in axis. If keep_dims is true, the reduced dimensions are retained with
* length 1.
*
- * If dimensions to reduce have no entries, all dimensions are reduced, and
- * a tensor with a single element is returned.
- *
* Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
* * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
* * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
*
@@ -1493,24 +2042,36 @@ typedef enum {
* Inputs:
* * 0: A tensor, specifying the input.
* * 1: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions
- * to reduce. If None (the default), reduces all dimensions. Must be in
- * the range [-rank(input_tensor), rank(input_tensor)).
+ * to reduce. Must be in the range
+ * [-rank(input_tensor), rank(input_tensor)).
+ *
+ * NOTE: When the operation was introduced, the documentation
+ * incorrectly stated that if dimensions were empty, the operation
+ * would reduce across all dimensions. This behavior was never
+ * implemented.
+ *
* * 2: An {@link ANEURALNETWORKS_INT32} scalar, keep_dims. If positive,
* retains reduced dimensions with length 1.
*
* Outputs:
* * 0: A tensor of the same {@link OperandCode} as input0.
+ * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be same as input0.
+ *
+ * Available since API level 28.
*/
ANEURALNETWORKS_MEAN = 31,
/**
- * Pads a tensor.
+ * Pads a tensor with zeros.
*
* This operation pads a tensor according to the specified paddings.
*
* Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
* * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
- * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
+ * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (full support since API
+ * level 29, see the output section)
*
* Supported tensor rank: up to 4
*
@@ -1532,10 +2093,17 @@ typedef enum {
* of the padding:
* output0.dimension[i] =
* padding[i, 0] + input0.dimension[i] + padding[i, 1]
+ * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ *
+ * NOTE: Before API level 29, the pad value for
+ * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} is undefined.
+ * Since API level 29, the pad value is always the logical zero.
+ *
+ * Available since API level 28.
*/
ANEURALNETWORKS_PAD = 32,
- // TODO: make the description easier to understand.
/**
* SpaceToBatch for N-Dimensional tensors.
*
@@ -1548,10 +2116,15 @@ typedef enum {
* dimensions of the input are optionally zero padded according to paddings.
*
* Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
* * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
- * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
+ * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (full support since API
+ * level 29, see the output section)
*
- * Supported tensor rank: 4
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
*
* Inputs:
* * 0: An n-D tensor, specifying the input.
@@ -1560,14 +2133,26 @@ typedef enum {
* must be >= 1.
* * 2: A 2-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the paddings
* for each spatial dimension of the input tensor. All values must be
- * >= 0. The shape of the tensor must be {rank(input0), 2}.
+ * >= 0. The shape of the tensor must be {M, 2}, where M is the number
+ * of spatial dimensions.
* padding[i, 0] specifies the number of element to be padded in the
* front of dimension i.
* padding[i, 1] specifies the number of element to be padded after the
* end of dimension i.
+ * * 3: An optional {@link ANEURALNETWORKS_BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ * Available since API level 29.
*
* Outputs:
* * 0: A tensor of the same {@link OperandCode} as input0.
+ * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ *
+ * NOTE: Before API level 29, the pad value for
+ * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} is undefined.
+ * Since API level 29, the pad value is always the logical zero.
+ *
+ * Available since API level 28.
*/
ANEURALNETWORKS_SPACE_TO_BATCH_ND = 33,
@@ -1580,6 +2165,7 @@ typedef enum {
* dimensions by specifying the axes (input1).
*
* Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
* * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
* * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
*
@@ -1597,6 +2183,10 @@ typedef enum {
* * 0: A tensor of the same {@link OperandCode} as input0. Contains the
* same data as input, but has one or more dimensions of size 1
* removed.
+ * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ *
+ * Available since API level 28.
*/
ANEURALNETWORKS_SQUEEZE = 34,
@@ -1610,6 +2200,7 @@ typedef enum {
* reverse slice.
*
* Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
* * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
* * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
*
@@ -1617,28 +2208,34 @@ typedef enum {
*
* Inputs:
* * 0: An n-D tensor, specifying the tensor to be sliced.
- * * 1: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the starts of
- * the dimensions of the input tensor to be sliced. The length must be
- * of rank(input0).
- * * 2: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the ends of
- * the dimensions of the input tensor to be sliced. The length must be
- * of rank(input0).
- * * 3: A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the strides of
- * the dimensions of the input tensor to be sliced. The length must be
- * of rank(input0).
- * * 4: An {@link ANEURALNETWORKS_INT32} scalar, begin_mask. If the ith bit
+ * * 1: begin, a 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The
+ * starts of the dimensions of the input tensor to be sliced. The
+ * length must be of rank(input0).
+ * * 2: end, a 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The
+ * ends of the dimensions of the input tensor to be sliced. The length
+ * must be of rank(input0).
+ * * 3: strides, a 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The
+ * strides of the dimensions of the input tensor to be sliced. The
+ * length must be of rank(input0). The entries must be non-zero.
+ * * 4: begin_mask, an {@link ANEURALNETWORKS_INT32} scalar. If the ith bit
* of begin_mask is set, begin[i] is ignored and the fullest possible
* range in that dimension is used instead.
- * * 5: An {@link ANEURALNETWORKS_INT32} scalar, end_mask. If the ith bit of
+ * * 5: end_mask, an {@link ANEURALNETWORKS_INT32} scalar. If the ith bit of
* end_mask is set, end[i] is ignored and the fullest possible range in
* that dimension is used instead.
- * * 6: An {@link ANEURALNETWORKS_INT32} scalar, shrink_axis_mask. An int32
- * mask. If the ith bit of shrink_axis_mask is set, it implies that the
- * ith specification shrinks the dimensionality by 1. A slice of size 1
- * starting from begin[i] in the dimension must be preserved.
+ * * 6: shrink_axis_mask, an {@link ANEURALNETWORKS_INT32} scalar. If the
+ * ith bit of shrink_axis_mask is set, the ith dimension specification
+ * shrinks the dimensionality by 1, taking on the value at index
+ * begin[i]. In this case, the ith specification must define a
+ * slice of size 1, e.g. begin[i] = x, end[i] = x + 1.
*
* Outputs:
- * * 0: A tensor of the same {@link OperandCode} as input0.
+ * * 0: A tensor of the same {@link OperandCode} as input0 and rank (n - k),
+ * where k is the number of bits set in shrink_axis_mask.
+ * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ *
+ * Available since API level 28.
*/
ANEURALNETWORKS_STRIDED_SLICE = 35,
@@ -1662,8 +2259,14 @@ typedef enum {
* input2.dimension = {5, 4, 3, 1}
* output.dimension = {5, 4, 3, 2}
*
+ * Since API level 29, generic zero-sized input tensor is supported. Zero
+ * dimension is only compatible with 0 or 1. The size of the output
+ * dimension is zero if either of corresponding input dimension is zero.
+ *
* Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
* * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} (since API level 29)
*
* Supported tensor rank: up to 4
*
@@ -1677,6 +2280,10 @@ typedef enum {
*
* Outputs:
* * 0: A tensor of the same {@link OperandCode} as input0.
+ * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint can be different from inputs' scale and zeroPoint.
+ *
+ * Available since API level 28.
*/
ANEURALNETWORKS_SUB = 36,
@@ -1690,6 +2297,7 @@ typedef enum {
* regular matrix transpose on 2-D input Tensors.
*
* Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
* * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
* * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
*
@@ -1697,64 +2305,2601 @@ typedef enum {
*
* Inputs:
* * 0: An n-D tensor, specifying the tensor to be transposed.
+ * Since API level 29, this tensor may be zero-sized.
* * 1: An optional 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32},
* the permutation of the dimensions of the input tensor.
*
* Outputs:
* * 0: A tensor of the same {@link OperandCode} as input0.
+ * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ *
+ * Available since API level 28.
*/
ANEURALNETWORKS_TRANSPOSE = 37,
-} OperationCode;
-/**
- * Fused activation function types.
- *
- */
-typedef enum {
- /** NO fused activation function. */
- ANEURALNETWORKS_FUSED_NONE = 0,
- /** Fused ReLU activation function. */
- ANEURALNETWORKS_FUSED_RELU = 1,
- /** Fused ReLU1 activation function. */
- ANEURALNETWORKS_FUSED_RELU1 = 2,
- /** Fused ReLU6 activation function. */
- ANEURALNETWORKS_FUSED_RELU6 = 3,
-} FuseCode;
+ // Operations below are available since API level 29.
-/**
- * Implicit padding algorithms.
- *
- */
-typedef enum {
/**
- * SAME padding.
- * Padding on both ends are the "same":
- * padding_to_beginning = total_padding / 2
- * padding_to_end = (total_padding + 1)/2.
- * i.e., for even number of padding, padding to both ends are exactly
- * the same; for odd number of padding, padding to the ending is bigger
- * than the padding to the beginning by 1.
+ * Computes the absolute value of a tensor, element-wise.
*
- * total_padding is a function of input, stride and filter size.
- * It could be computed as follows:
- * out_size = (input + stride - 1) / stride;
- * needed_input = (out_size - 1) * stride + filter_size
- * total_padding = max(0, needed_input - output_size)
- * The computation is the same for the horizontal and vertical directions.
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: from 1.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
+ *
+ * Available since API level 29.
*/
- ANEURALNETWORKS_PADDING_SAME = 1,
+ ANEURALNETWORKS_ABS = 38,
/**
- * VALID padding.
- * No padding. When the input size is not evenly divisible by
- * the filter size, the input at the end that could not fill
- * the whole filter tile will simply be ignored.
- */
- ANEURALNETWORKS_PADDING_VALID = 2,
-} PaddingCode;
-
+ * Returns the index of the largest element along an axis.
+ *
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ * * {@link ANEURALNETWORKS_TENSOR_INT32}
+ * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: An n-D tensor specifying the input. Must be non-empty.
+ * * 1: An {@link ANEURALNETWORKS_INT32} scalar specifying the axis to
+ * reduce across. Negative index is used to specify axis from the
+ * end (e.g. -1 for the last axis). Must be in the range [-n, n).
+ *
+ * Outputs:
+ * * 0: An (n - 1)-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor.
+ *
+ * Available since API level 29.
+ */
+ // There is no underscore in ARG_MAX to avoid name conflict with
+ // the macro defined in libc/kernel/uapi/linux/limits.h.
+ ANEURALNETWORKS_ARGMAX = 39,
+
+ /**
+ * Returns the index of the smallest element along an axis.
+ *
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ * * {@link ANEURALNETWORKS_TENSOR_INT32}
+ * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: An n-D tensor specifying the input. Must be non-empty.
+ * * 1: An {@link ANEURALNETWORKS_INT32} scalar specifying the axis to
+ * reduce across. Negative index is used to specify axis from the
+ * end (e.g. -1 for the last axis). Must be in the range [-n, n).
+ *
+ * Outputs:
+ * * 0: An (n - 1)-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor.
+ *
+ * Available since API level 29.
+ */
+ ANEURALNETWORKS_ARGMIN = 40, // See ARGMAX for naming discussion.
+
+ /**
+ * Transform axis-aligned bounding box proposals using bounding box deltas.
+ *
+ * Given the positions of bounding box proposals and the corresponding
+ * bounding box deltas for each class, return the refined bounding box
+ * regions. The resulting bounding boxes are cliped against the edges of
+ * the image.
+ *
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ * * {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}
+ *
+ * Inputs:
+ * * 0: A 2-D Tensor of shape [num_rois, 4], specifying the locations of the
+ * bounding box proposals, each line with format [x1, y1, x2, y2].
+ * For tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM},
+ * the zeroPoint must be 0 and the scale must be 0.125. Zero num_rois
+ * is supported for this tensor.
+ * * 1: A 2-D Tensor of shape [num_rois, num_classes * 4], specifying the
+ * bounding box delta for each region of interest and each class. The
+ * bounding box deltas are organized in the following order
+ * [dx, dy, dw, dh], where dx and dy is the relative correction factor
+ * for the center position of the bounding box with respect to the width
+ * and height, dw and dh is the log-scale relative correction factor
+ * for the width and height. For input0 of type
+ * {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, this tensor should be
+ * of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}. Zero num_rois is
+ * supported for this tensor.
+ * * 2: An 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape
+ * [num_rois], specifying the batch index of each box. Boxes with
+ * the same batch index are grouped together. Zero num_rois is
+ * supported for this tensor.
+ * * 3: A 2-D Tensor of shape [batches, 2], specifying the information of
+ * each image in the batch, each line with format
+ * [image_height, image_width].
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandCode} as input0, with shape
+ * [num_rois, num_classes * 4], specifying the coordinates of each
+ * output bounding box for each class, with format [x1, y1, x2, y2].
+ * For type of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, the
+ * scale must be 0.125 and the zero point must be 0.
+ *
+ * Available since API level 29.
+ */
+ ANEURALNETWORKS_AXIS_ALIGNED_BBOX_TRANSFORM = 41,
+
+ /**
+ * Performs a forward LSTM on the input followed by a backward LSTM.
+ *
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: 3, either time-major or batch-major.
+ *
+ * All input and output tensors must be of the same type.
+ *
+ *
+ * Inputs:
+ * * 0: The input.
+ * A 3-D tensor of shape:
+ * If time-major: [max_time, batch_size, input_size]
+ * If batch-major: [batch_size, max_time, input_size]
+ * where "max_time" is the number of timesteps (sequence length),
+ * "batch_size" corresponds to the batching dimension, and
+ * "input_size" is the size of the input.
+ * * 1: The forward input-to-input weights. Optional.
+ * A 2-D tensor of shape [fw_num_units, input_size], where âfw_num_unitsâ
+ * corresponds to the number of forward cell units.
+ * * 2: The forward input-to-forget weights.
+ * A 2-D tensor of shape [fw_num_units, input_size].
+ * * 3: The forward input-to-cell weights.
+ * A 2-D tensor of shape [fw_num_units, input_size].
+ * * 4: The forward input-to-output weights.
+ * A 2-D tensor of shape [fw_num_units, input_size].
+ * * 5: The forward recurrent-to-input weights. Optional.
+ * A 2-D tensor of shape [fw_num_units, fw_output_size], where âfw_output_sizeâ
+ * corresponds to either the number of cell units (i.e., fw_num_units),
+ * or the second dimension of the âfw_projection_weightsâ, if defined.
+ * * 6: The forward recurrent-to-forget weights.
+ * A 2-D tensor of shape [fw_num_units, fw_output_size].
+ * * 7: The forward recurrent-to-cell weights.
+ * A 2-D tensor of shape [fw_num_units, fw_output_size].
+ * * 8: The forward recurrent-to-output weights.
+ * A 2-D tensor of shape [fw_num_units, fw_output_size].
+ * * 9: The forward cell-to-input weights. Optional.
+ * A 1-D tensor of shape [fw_num_units].
+ * * 10: The forward cell-to-forget weights. Optional.
+ * A 1-D tensor of shape [fw_num_units].
+ * * 11: The forward cell-to-output weights. Optional.
+ * A 1-D tensor of shape [fw_num_units].
+ * * 12: The forward input gate bias. Optional.
+ * A 1-D tensor of shape [fw_num_units].
+ * * 13: The forward forget gate bias.
+ * A 1-D tensor of shape [fw_num_units].
+ * * 14: The forward cell gate bias.
+ * A 1-D tensor of shape [fw_num_units].
+ * * 15: The forward output gate bias.
+ * A 1-D tensor of shape [fw_num_units].
+ * * 16: The forward projection weights. Optional.
+ * A 2-D tensor of shape [fw_output_size, fw_num_units].
+ * * 17: The forward projection bias. Optional.
+ * A 1-D tensor of shape [fw_output_size].
+ * * 18: The backward input-to-input weights. Optional.
+ * A 2-D tensor of shape [bw_num_units, input_size], where âbw_num_unitsâ
+ * corresponds to the number of backward cell units.
+ * * 19: The backward input-to-forget weights.
+ * A 2-D tensor of shape [bw_num_units, input_size].
+ * * 20: The backward input-to-cell weights.
+ * A 2-D tensor of shape [bw_num_units, input_size].
+ * * 21: The backward input-to-output weights.
+ * A 2-D tensor of shape [bw_num_units, input_size].
+ * * 22: The backward recurrent-to-input weights. Optional.
+ * A 2-D tensor of shape [bw_num_units, bw_output_size], where âbw_output_sizeâ
+ * corresponds to either the number of cell units (i.e., âbw_num_unitsâ),
+ * or the second dimension of the âbw_projection_weightsâ, if defined.
+ * * 23: The backward recurrent-to-forget weights.
+ * A 2-D tensor of shape [bw_num_units, bw_output_size].
+ * * 24: The backward recurrent-to-cell weights.
+ * A 2-D tensor of shape [bw_num_units, bw_output_size].
+ * * 25: The backward recurrent-to-output weights.
+ * A 2-D tensor of shape [bw_num_units, bw_output_size].
+ * * 26: The backward cell-to-input weights. Optional.
+ * A 1-D tensor of shape [bw_num_units].
+ * * 27: The backward cell-to-forget weights. Optional.
+ * A 1-D tensor of shape [bw_num_units].
+ * * 28: The backward cell-to-output weights. Optional.
+ * A 1-D tensor of shape [bw_num_units].
+ * * 29: The backward input gate bias. Optional.
+ * A 1-D tensor of shape [bw_num_units].
+ * * 30: The backward forget gate bias.
+ * A 1-D tensor of shape [bw_num_units].
+ * * 31: The backward cell gate bias.
+ * A 1-D tensor of shape [bw_num_units].
+ * * 32: The backward output gate bias.
+ * A 1-D tensor of shape [bw_num_units].
+ * * 33: The backward projection weights. Optional.
+ * A 2-D tensor of shape [bw_output_size, bw_num_units].
+ * * 34: The backward projection bias. Optional.
+ * A 1-D tensor of shape [bw_output_size].
+ * * 35: The forward input activation state.
+ * A 2-D tensor of shape [batch_size, bw_output_size].
+ * * 36: The forward input cell state.
+ * A 2-D tensor of shape [batch_size, bw_num_units].
+ * * 37: The backward input activation state.
+ * A 2-D tensor of shape [batch_size, bw_output_size].
+ * * 38: The backward input cell state.
+ * A 2-D tensor of shape [batch_size, bw_num_units].
+ * * 39: The auxiliary input. Optional.
+ * A 3-D tensor of shape [max_time, batch_size, input_size], where âbatch_sizeâ
+ * corresponds to the batching dimension, and âinput_sizeâ is the size
+ * of the input.
+ * * 40: The forward auxiliary input-to-input weights. Optional.
+ * A 2-D tensor of shape [fw_num_units, input_size].
+ * * 41: The forward auxiliary input-to-forget weights. Optional.
+ * A 2-D tensor of shape [fw_num_units, input_size].
+ * * 42: The forward auxiliary input-to-cell weights. Optional.
+ * A 2-D tensor of shape [fw_num_units, input_size].
+ * * 43: The forward auxiliary input-to-output weights. Optional.
+ * A 2-D tensor of shape [fw_num_units, input_size].
+ * * 44: The backward auxiliary input-to-input weights. Optional.
+ * A 2-D tensor of shape [bw_num_units, input_size].
+ * * 45: The backward auxiliary input-to-forget weights. Optional.
+ * A 2-D tensor of shape [bw_num_units, input_size].
+ * * 46: The backward auxiliary input-to-cell weights. Optional.
+ * A 2-D tensor of shape [bw_num_units, input_size].
+ * * 47: The backward auxiliary input-to-output weights. Optional.
+ * A 2-D tensor of shape [bw_num_units, input_size].
+ * * 48: The activation function.
+ * A value indicating the activation function:
+ *
+ *
+ * * 49: The clipping threshold for the cell state, such
+ * that values are bound within [-cell_clip, cell_clip]. If set to 0.0
+ * then clipping is disabled.
+ * If all the input tensors have type {@link ANEURALNETWORKS_TENSOR_FLOAT32},
+ * this scalar must be of the type {@link ANEURALNETWORKS_FLOAT32},
+ * otherwise if all the input tensors have the type {@link
+ * ANEURALNETWORKS_TENSOR_FLOAT16}, this scalar must be of type {@link
+ * ANEURALNETWORKS_FLOAT16}.
+ * * 50: The clipping threshold for the output from the
+ * projection layer, such that values are bound within
+ * [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
+ * If all the input tensors have type {@link ANEURALNETWORKS_TENSOR_FLOAT32},
+ * this scalar must be of the type {@link ANEURALNETWORKS_FLOAT32},
+ * otherwise if all the input tensors have the type {@link
+ * ANEURALNETWORKS_TENSOR_FLOAT16}, this scalar must be of type {@link
+ * ANEURALNETWORKS_FLOAT16}.
+ * * 51: merge_outputs
+ * An {@link ANEURALNETWORKS_BOOL} scalar specifying if the outputs
+ * from forward and backward cells should be merged.
+ * * 52: time_major
+ * An {@link ANEURALNETWORKS_BOOL} scalar specifying the shape format
+ * of input and output tensors.
+ * * 53: The forward input layer normalization weights. Optional.
+ * A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
+ * to activation at input gate.
+ * * 54: The forward forget layer normalization weights. Optional.
+ * A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
+ * to activation at forget gate.
+ * * 55: The forward cell layer normalization weights. Optional.
+ * A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
+ * to activation at cell gate.
+ * * 56: The forward output layer normalization weights. Optional.
+ * A 1-D tensor of shape [fw_num_units]. Used to rescale normalized inputs
+ * to activation at output gate.
+ * * 57: The backward input layer normalization weights. Optional.
+ * A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
+ * to activation at input gate.
+ * * 58: The backward forget layer normalization weights. Optional.
+ * A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
+ * to activation at forget gate.
+ * * 59: The backward cell layer normalization weights. Optional.
+ * A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
+ * to activation at cell gate.
+ * * 60: The backward output layer normalization weights. Optional.
+ * A 1-D tensor of shape [bw_num_units]. Used to rescale normalized inputs
+ * to activation at output gate.
+ *
+ * Outputs:
+ * * 0: The forward output.
+ * A 3-D tensor of shape:
+ * If time-major and not merge_outputs:
+ * [max_time, batch_size, fw_output_size]
+ * If time-major and merge_outputs:
+ * [max_time, batch_size, fw_output_size + bw_output_size]
+ * If batch-major and not merge_outputs:
+ * [batch_size, max_time, fw_output_size]
+ * If batch-major and merge_outputs:
+ * [batch_size, max_time, fw_output_size + bw_output_size]
+ * * 1: The backward output. Unused if merge_outputs is true.
+ * A 3-D tensor of shape:
+ * If time-major: [max_time, batch_size, bw_output_size]
+ * If batch-major: [batch_size, max_time, bw_output_size]
+ *
+ * Available since API level 29.
+ */
+ ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_LSTM = 42,
+
+ /**
+ * A recurrent neural network layer that applies a basic RNN cell to a
+ * sequence of inputs in forward and backward directions.
+ *
+ * This Op unrolls the input along the sequence dimension, and implements
+ * the following operation for each element in the sequence s =
+ * 1...sequence_length:
+ * fw_outputs[s] = fw_state = activation(inputs[s] * fw_input_weightsâ +
+ * fw_state * fw_recurrent_weightsâ + fw_bias)
+ *
+ * And for each element in sequence t = sequence_length : 1
+ * bw_outputs[t] = bw_state = activation(inputs[t] * bw_input_weightsâ +
+ * bw_state * bw_recurrent_weightsâ + bw_bias)
+ *
+ * Where:
+ * * â{fw,bw}_input_weightsâ is a weight matrix that multiplies the inputs;
+ * * â{fw,bw}_recurrent_weightsâ is a weight matrix that multiplies the
+ * current âstateâ which itself is the output from the previous time step
+ * computation;
+ * * â{fw,bw}_biasâ is a bias vector (added to each output vector in the
+ * batch);
+ * * âactivationâ is the function passed as the âfused_activation_functionâ
+ * argument (if not âNONEâ).
+ *
+ * The op also supports an auxiliary input. Regular cell feeds one input
+ * into the two RNN cells in the following way:
+ *
+ * INPUT (INPUT_REVERSED)
+ * | |
+ * ---------------------
+ * | FW_RNN BW_RNN |
+ * ---------------------
+ * | |
+ * FW_OUT BW_OUT
+ *
+ * An op with an auxiliary input takes two inputs and feeds them into the
+ * RNN cells in the following way:
+ *
+ * AUX_INPUT (AUX_INPUT_REVERSED)
+ * | |
+ * INPUT | (INPUT_R'D.)|
+ * | | | |
+ * -----------------------
+ * | \ / \ / |
+ * | FW_RNN BW_RNN |
+ * -----------------------
+ * | |
+ * FW_OUT BW_OUT
+ *
+ * While stacking this op on top of itself, this allows to connect both
+ * forward and backward outputs from previous cell to the next cell's
+ * inputs.
+ *
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ *
+ * The input tensors must all be the same type.
+ *
+ * Inputs:
+ * * 0: input.
+ * A 3-D tensor. The shape is defined by the input 6 (timeMajor). If
+ * it is set to true, then the input has a shape [maxTime, batchSize,
+ * inputSize], otherwise the input has a shape [batchSize, maxTime,
+ * inputSize].
+ * * 1: fwWeights.
+ * A 2-D tensor of shape [fwNumUnits, inputSize].
+ * * 2: fwRecurrentWeights.
+ * A 2-D tensor of shape [fwNumUnits, fwNumUnits].
+ * * 3: fwBias.
+ * A 1-D tensor of shape [fwNumUnits].
+ * * 4: fwHiddenState.
+ * A 2-D tensor of shape [batchSize, fwNumUnits]. Specifies a hidden
+ * state input for the first time step of the computation.
+ * * 5: bwWeights.
+ * A 2-D tensor of shape [bwNumUnits, inputSize].
+ * * 6: bwRecurrentWeights.
+ * A 2-D tensor of shape [bwNumUnits, bwNumUnits].
+ * * 7: bwBias.
+ * A 1-D tensor of shape [bwNumUnits].
+ * * 8: bwHiddenState
+ * A 2-D tensor of shape [batchSize, bwNumUnits]. Specifies a hidden
+ * state input for the first time step of the computation.
+ * * 9: auxInput.
+ * A 3-D tensor. The shape is the same as of the input 0.
+ * * 10:fwAuxWeights.
+ * A 2-D tensor of shape [fwNumUnits, inputSize].
+ * * 11:bwAuxWeights.
+ * A 2-D tensor of shape [bwNumUnits, inputSize].
+ * * 12:fusedActivationFunction.
+ * A {@link FuseCode} value indicating the activation function. If
+ * âNONEâ is specified then it results in a linear activation.
+ * * 13:timeMajor
+ * An {@link ANEURALNETWORKS_BOOL} scalar specifying the shape format
+ * of input and output tensors.
+ * * 14:mergeOutputs
+ * An {@link ANEURALNETWORKS_BOOL} scalar specifying if the outputs
+ * from forward and backward cells are separate (if set to false) or
+ * concatenated (if set to true).
+ * Outputs:
+ * * 0: fwOutput.
+ * A 3-D tensor. The first two dimensions of the shape are defined by
+ * the input 6 (timeMajor) and the third dimension is defined by the
+ * input 14 (mergeOutputs). If timeMajor is set to true, then the first
+ * two dimensions are [maxTime, batchSize], otherwise they are set to
+ * [batchSize, maxTime]. If mergeOutputs is set to true, then the third
+ * dimension is equal to (fwNumUnits + bwNumUnits), otherwise it is set
+ * to fwNumUnits.
+ * * 1: bwOutput.
+ * A 3-D tensor. If the input 14 (mergeOutputs) is set to true, then
+ * this tensor is not produced. The shape is defined by the input 6
+ * (timeMajor). If it is set to true, then the shape is set to
+ * [maxTime, batchSize, bwNumUnits], otherwise the shape is set to
+ * [batchSize, maxTime, bwNumUnits].
+ *
+ * Available since API level 29.
+ */
+ ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_RNN = 43,
+
+ /**
+ * Greedily selects a subset of bounding boxes in descending order of score.
+ *
+ * This op applies NMS algorithm to each class. In each loop of execution,
+ * the box with maximum score gets selected and removed from the pending set.
+ * The scores of the rest of boxes are lowered according to the
+ * intersection-over-union (IOU) overlapping with the previously selected
+ * boxes and a specified NMS kernel method. Any boxes with score less
+ * than a threshold are removed from the pending set.
+ *
+ * Three NMS kernels are supported:
+ * * Hard: score_new = score_old * (1 if IoU < threshold else 0)
+ * * Linear: score_new = score_old * (1 if IoU < threshold else 1 - IoU)
+ * * Gaussian: score_new = score_old * exp(- IoU^2 / sigma)
+ *
+ * Axis-aligned bounding boxes are represented by its upper-left corner
+ * coordinate (x1,y1) and lower-right corner coordinate (x2,y2). A valid
+ * bounding box should satisfy x1 <= x2 and y1 <= y2.
+ *
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
+ *
+ * Inputs:
+ * * 0: A 2-D Tensor of shape [num_rois, num_classes], specifying the score
+ * of each bounding box proposal. The boxes are grouped by batches in the
+ * first dimension. Zero num_rois is supported for this tensor.
+ * * 1: A 2-D Tensor specifying the bounding boxes of shape
+ * [num_rois, num_classes * 4], organized in the order [x1, y1, x2, y2].
+ * The boxes are grouped by batches in the first dimension. The sequential
+ * order of the boxes corresponds with input0. For input0 of type
+ * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, this tensor should be of
+ * {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, with zeroPoint of 0 and
+ * scale of 0.125. Zero num_rois is supported for this tensor.
+ * * 2: A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape
+ * [num_rois], specifying the batch index of each box. Boxes with
+ * the same batch index are grouped together.
+ * * 3: An {@link ANEURALNETWORKS_FLOAT32} scalar, score_threshold. Boxes
+ * with scores lower than the threshold are filtered before sending
+ * to the NMS algorithm.
+ * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the maximum
+ * number of selected bounding boxes for each image. Set to a negative
+ * value for unlimited number of output bounding boxes.
+ * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the NMS
+ * kernel method, options are 0:hard, 1:linear, 2:gaussian.
+ * * 6: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the IoU
+ * threshold in hard and linear NMS kernel. This field is ignored if
+ * gaussian kernel is selected.
+ * * 7: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the sigma in
+ * gaussian NMS kernel. This field is ignored if gaussian kernel is
+ * not selected.
+ * * 8: An {@link ANEURALNETWORKS_FLOAT32} scalar, nms_score_threshold.
+ * Boxes with scores lower than the threshold are dropped during the
+ * score updating phase in soft NMS.
+ *
+ * Outputs:
+ * * 0: A 1-D Tensor of the same {@link OperandCode} as input0, with shape
+ * [num_output_rois], specifying the score of each output box. The boxes
+ * are grouped by batches, but the sequential order in each batch is not
+ * guaranteed. For type of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},
+ * the scale and zero point must be the same as input0.
+ * * 1: A 2-D Tensor of the same {@link OperandCode} as input1, with shape
+ * [num_output_rois, 4], specifying the coordinates of each
+ * output bounding box with the same format as input1. The sequential
+ * order of the boxes corresponds with output0. For type of
+ * {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, the scale must be
+ * 0.125 and the zero point must be 0.
+ * * 2: A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape
+ * [num_output_rois], specifying the class of each output box. The
+ * sequential order of the boxes corresponds with output0.
+ * * 3: A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape
+ * [num_output_rois], specifying the batch index of each box. Boxes
+ * with the same batch index are grouped together.
+ *
+ * Available since API level 29.
+ */
+ ANEURALNETWORKS_BOX_WITH_NMS_LIMIT = 44,
+
+ /**
+ * Casts a tensor to a new type.
+ *
+ * This operation ignores the scale and zeroPoint of quanized tensors,
+ * e.g. it treats a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} input
+ * as a tensor of uint8 values.
+ *
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ * * {@link ANEURALNETWORKS_TENSOR_INT32}
+ * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: A tensor.
+ *
+ * Outputs:
+ * * 0: A tensor with the same shape as input0.
+ *
+ * Available since API level 29.
+ */
+ ANEURALNETWORKS_CAST = 45,
+
+ /**
+ * Shuffle the channels of the input tensor.
+ *
+ * Given an input tensor and a integer value of num_groups, CHANNEL_SHUFFLE
+ * divide the channel dimension into num_groups groups, and reorganize the
+ * channels by grouping channels with the same index in each group.
+ *
+ * Along the channel dimension, the output is calculated using this formula:
+ *
+ * output_channel[k * num_groups + g] = input_channel[g * group_size + k]
+ *
+ * where group_size = num_channels / num_groups
+ *
+ * The number of channels must be divisible by num_groups.
+ *
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: An n-D tensor, specifying the tensor to be shuffled.
+ * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the number of
+ * groups.
+ * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the dimension
+ * channel shuffle would be performed on. Negative index is used to
+ * specify axis from the end (e.g. -1 for the last axis). Must be in
+ * the range [-n, n).
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandCode} and same shape as input0.
+ * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ *
+ * Available since API level 29.
+ */
+ ANEURALNETWORKS_CHANNEL_SHUFFLE = 46,
+
+ /**
+ * Apply postprocessing steps to bounding box detections.
+ *
+ * Bounding box detections are generated by applying transformation on a set
+ * of predefined anchors with the bounding box deltas from bounding box
+ * regression. A final step of hard NMS is applied to limit the number of
+ * returned boxes.
+ *
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ *
+ * Inputs:
+ * * 0: A 3-D Tensor of shape [batches, num_anchors, num_classes], specifying
+ * the score of each anchor with each class. Class 0 for each
+ * [batches, num_anchors, 0] is background and will be ignored.
+ * * 1: A 3-D Tensor of shape [batches, num_anchors, length_box_encoding], with
+ * the first four values in length_box_encoding specifying the bounding
+ * box deltas. The box deltas are encoded in the order of [dy, dx, dh, dw],
+ * where dy and dx is the linear-scale relative correction factor for the
+ * center position of the bounding box with respect to the width and height,
+ * dh and dw is the log-scale relative correction factor for the width and
+ * height. All the entries in length_box_encoding beyond the first four
+ * values are ignored in this operation.
+ * * 2: A 2-D Tensor of shape [num_anchors, 4], specifying the shape of each
+ * predefined anchor, with format [ctr_y, ctr_x, h, w], where ctr_y and
+ * ctr_x are the center position of the box, and h and w are the height
+ * and the width.
+ * * 3: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the scaling
+ * factor for dy in bounding box deltas.
+ * * 4: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the scaling
+ * factor for dx in bounding box deltas.
+ * * 5: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the scaling
+ * factor for dh in bounding box deltas.
+ * * 6: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the scaling
+ * factor for dw in bounding box deltas.
+ * * 7: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to use regular
+ * multi-class NMS algorithm that do NMS separately for each class,
+ * set to false for a faster algorithm that only do one single NMS
+ * using the highest class score..
+ * * 8: An {@link ANEURALNETWORKS_INT32} scalar, max_num_detections, specifying
+ * the maximum number of boxes for the output. Boxes with the lowest
+ * scores are discarded to meet the limit.
+ * * 9: An {@link ANEURALNETWORKS_INT32} scalar, only used when input7 is
+ * set to false, specifying the maximum number of classes per detection.
+ * * 10: An {@link ANEURALNETWORKS_INT32} scalar, only used when input7 is
+ * set to true, specifying the maximum number of detections when
+ * applying NMS algorithm for each single class.
+ * * 11: A scalar, score_threshold. Boxes with scores lower than the
+ * threshold are filtered before sending to the NMS algorithm. The
+ * scalar must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is of
+ * {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of {@link
+ * ANEURALNETWORKS_FLOAT32} if input0 is of {@link
+ * ANEURALNETWORKS_TENSOR_FLOAT32}.
+ * * 12: A scalar, specifying the IoU threshold for hard NMS. The scalar
+ * must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is of {@link
+ * ANEURALNETWORKS_TENSOR_FLOAT16} and of {@link
+ * ANEURALNETWORKS_FLOAT32} if input0 is of {@link
+ * ANEURALNETWORKS_TENSOR_FLOAT32}.
+ * * 13: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to include
+ * background class in the list of label map for the output, set
+ * to false to not include the background. When the background
+ * class is included, it has label 0 and the output classes start
+ * at 1 in the label map, otherwise, the output classes start at 0.
+ *
+ * Outputs:
+ * * 0: A 2-D tensor of the same {@link OperandCode} as input0, with shape
+ * [batches, max_num_detections], specifying the score of each output
+ * detections.
+ * * 1: A 3-D tensor of shape [batches, max_num_detections, 4], specifying the
+ * coordinates of each output bounding box, with format
+ * [y1, x1, y2, x2].
+ * * 2: A 2-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape
+ * [batches, max_num_detections], specifying the class label for each
+ * output detection.
+ * * 3: An 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape [batches],
+ * specifying the number of valid output detections for each batch.
+ *
+ * Available since API level 29.
+ */
+ ANEURALNETWORKS_DETECTION_POSTPROCESSING = 47,
+
+ /**
+ * For input tensors x and y, computes x == y elementwise.
+ *
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ * * {@link ANEURALNETWORKS_TENSOR_INT32}
+ * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * This operation supports broadcasting.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ * * 1: A tensor of the same {@link OperandCode} and dimensions compatible
+ * with input0.
+ *
+ * Outputs:
+ * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.
+ *
+ * Available since API level 29.
+ */
+ ANEURALNETWORKS_EQUAL = 48,
+
+ /**
+ * Computes exponential of x element-wise.
+ *
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: from 1.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
+ *
+ * Available since API level 29.
+ */
+ ANEURALNETWORKS_EXP = 49,
+
+ /**
+ * Inserts a dimension of 1 into a tensor's shape.
+ *
+ * Given a tensor input, this operation inserts a dimension of 1 at the
+ * given dimension index of input's shape. The dimension index starts at
+ * zero; if you specify a negative dimension index, it is counted backward
+ * from the end.
+ *
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ * * {@link ANEURALNETWORKS_TENSOR_INT32}
+ * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: An n-D tensor.
+ * * 1: An {@link ANEURALNETWORKS_INT32} scalar specifying the dimension
+ * index to expand. Must be in the range [-(n + 1), (n + 1)).
+ *
+ * Outputs:
+ * * 0: An (n + 1)-D tensor with the same {@link OperandCode} and data as
+ * input0.
+ * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ *
+ * Available since API level 29.
+ */
+ ANEURALNETWORKS_EXPAND_DIMS = 50,
+
+ /**
+ * Gathers values along an axis.
+ *
+ * Produces an output tensor with shape
+ * input0.dimension[:axis] + indices.dimension + input0.dimension[axis + 1:]
+ * where:
+ * # Vector indices (output is rank(input0)).
+ * output[a_0, ..., a_n, i, b_0, ..., b_n] =
+ * input0[a_0, ..., a_n, indices[i], b_0, ..., b_n]
+ *
+ * # Higher rank indices (output is rank(input0) + rank(indices) - 1).
+ * output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] =
+ * input0[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n]
+ *
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ * * {@link ANEURALNETWORKS_TENSOR_INT32}
+ * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: An n-D tensor from which to gather values.
+ * * 1: An {@link ANEURALNETWORKS_INT32} scalar specifying the axis.
+ * Negative index is used to specify axis from the end
+ * (e.g. -1 for the last axis). Must be in the range [-n, n).
+ * * 2: A k-D tensor {@link ANEURALNETWORKS_TENSOR_INT32} of indices.
+ * The values must be in the bounds of the corresponding dimensions
+ * of input0.
+ *
+ * Outputs:
+ * * 0: An (n + k - 1)-D tensor with the same {@link OperandCode} as input0.
+ * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ *
+ * Available since API level 29.
+ */
+ ANEURALNETWORKS_GATHER = 51,
+
+ /**
+ * Generate aixs-aligned bounding box proposals.
+ *
+ * Bounding box proposals are generated by applying transformation on a set
+ * of predefined anchors with the bounding box deltas from bounding box
+ * regression. A final step of hard NMS is applied to limit the number of
+ * returned boxes.
+ *
+ * Axis-aligned bounding boxes are represented by its upper-left corner
+ * coordinate (x1,y1) and lower-right corner coordinate (x2,y2). A valid
+ * bounding box should satisfy x1 <= x2 and y1 <= y2.
+ *
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
+ *
+ * Inputs:
+ * * 0: A 4-D Tensor specifying the score of each anchor at each
+ * location. With "NHWC" data layout, the tensor shape is
+ * [batches, height, width, num_anchors]. With "NCHW" data layout,
+ * the tensor shape is [batches, num_anchors, height, width].
+ * * 1: A 4-D Tensor specifying the bounding box deltas. With "NHWC" data
+ * layout, the tensor shape is [batches, height, width, num_anchors * 4].
+ * With "NCHW" data layout, the tensor shape is
+ * [batches, num_anchors * 4, height, width]. The box deltas are encoded
+ * in the order of [dx, dy, dw, dh], where dx and dy is the linear-scale
+ * relative correction factor for the center position of the bounding box
+ * with respect to the width and height, dw and dh is the log-scale
+ * relative correction factor for the width and height. The last
+ * dimensions is the channel dimension.
+ * * 2: A 2-D Tensor of shape [num_anchors, 4], specifying the shape of each
+ * predefined anchor, with format [x1, y1, x2, y2]. For input0 of type
+ * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, this tensor should be of
+ * {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}, with scale of 0.125.
+ * * 3: A 2-D Tensor of shape [batches, 2], specifying the size of
+ * each image in the batch, with format [image_height, image_width].
+ * For input0 of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, this
+ * tensor should be of {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}, with
+ * scale of 0.125.
+ * * 4: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio
+ * from the height of original image to the height of feature map.
+ * * 5: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio
+ * from the width of original image to the width of feature map.
+ * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the maximum
+ * number of boxes before going into the hard NMS algorithm. Boxes
+ * with the lowest scores are discarded to meet the limit. Set to
+ * a non-positive value for unlimited number.
+ * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the maximum
+ * number of boxes returning from the hard NMS algorithm. Boxes
+ * with the lowest scores are discarded to meet the limit. Set to
+ * a non-positive value for unlimited number.
+ * * 8: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the IoU
+ * threshold for hard NMS.
+ * * 9: An {@link ANEURALNETWORKS_FLOAT32} scalar, min_size. Boxes with
+ * height or width lower than the absolute threshold are filtered out.
+ * * 10: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify
+ * NCHW data layout for input0 and input1. Set to false for NHWC.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandCode} as input0, of shape
+ * [num_output_rois], specifying the score of each output box.
+ * The boxes are grouped by batches, but the sequential order in
+ * each batch is not guaranteed. For type of
+ * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, the scale and zero
+ * point must be the same as input0.
+ * * 1: A tensor of the same {@link OperandCode} as input3, of shape
+ * [num_output_rois, 4], specifying the coordinates of each output
+ * bounding box for each class, with format [x1, y1, x2, y2].
+ * The sequential order of the boxes corresponds with output0.
+ * For type of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, the
+ * scale must be 0.125 and the zero point must be 0.
+ * * 2: A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape
+ * [num_output_rois], specifying the batch index of each box. Boxes
+ * with the same batch index are grouped together.
+ *
+ * Available since API level 29.
+ */
+ ANEURALNETWORKS_GENERATE_PROPOSALS = 52,
+
+ /**
+ * For input tensors x and y, computes x > y elementwise.
+ *
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ * * {@link ANEURALNETWORKS_TENSOR_INT32}
+ * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * This operation supports broadcasting.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ * * 1: A tensor of the same {@link OperandCode} and dimensions compatible
+ * with input0.
+ *
+ * Outputs:
+ * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.
+ *
+ * Available since API level 29.
+ */
+ ANEURALNETWORKS_GREATER = 53,
+ /**
+ * For input tensors x and y, computes x >= y elementwise.
+ *
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ * * {@link ANEURALNETWORKS_TENSOR_INT32}
+ * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * This operation supports broadcasting.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ * * 1: A tensor of the same {@link OperandCode} and dimensions compatible
+ * with input0.
+ *
+ * Outputs:
+ * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.
+ *
+ * Available since API level 29.
+ */
+ ANEURALNETWORKS_GREATER_EQUAL = 54,
+
+ /**
+ * Performs a grouped 2-D convolution operation.
+ *
+ * Given an input tensor of shape [batches, height, width, depth_in] and a
+ * filter tensor of shape [depth_out, filter_height, filter_width, depth_group]
+ * containing depth_out convolutional filters of depth depth_group, GROUPED_CONV
+ * applies a group of different filters to each input channel group, then
+ * concatenates the results together.
+ *
+ * Specifically, the input channels are divided into num_groups groups, each with
+ * depth depth_group, i.e. depth_in = num_groups * depth_group. The convolutional
+ * filters are also divided into num_groups groups, i.e. depth_out is divisible
+ * by num_groups. GROUPED_CONV applies each group of filters to the corresponding
+ * input channel group, and the result are concatenated together.
+ *
+ * The output dimensions are functions of the filter dimensions, stride, and
+ * padding.
+ *
+ * The values in the output tensor are computed as:
+ *
+ * output[b, i, j, g * channel_multiplier + q] =
+ * sum_{di, dj, dk} (
+ * input[b, strides[1] * i + di, strides[2] * j + dj,
+ * g * depth_group + dk] *
+ * filter[g * channel_multiplier + q, di, dj, dk]
+ * ) + bias[channel]
+ *
+ * where channel_multiplier = depth_out / num_groups
+ *
+ * Supported tensor {@link OperandCode} configurations:
+ * * 16 bit floating point:
+ * * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} for input, filter, output, and bias.
+ *
+ * * 32 bit floating point:
+ * * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} for input, filter, output, and bias.
+ *
+ * * Quantized:
+ * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, filter, and output.
+ * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to
+ * * * input.scale * filter.scale).
+ *
+ * * Quantized with symmetric per channel quantization for the filter:
+ * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, and output.
+ * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
+ * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0,
+ * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ *
+ * Both explicit padding and implicit padding are supported.
+ *
+ * Inputs (explicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
+ * specifying the input, where depth_in = num_groups * depth_group.
+ * * 1: A 4-D tensor, of shape
+ * [depth_out, filter_height, filter_width, depth_group], specifying
+ * the filter, where depth_out must be divisible by num_groups. For
+ * tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}
+ * the channel dimension (channelDim at
+ * {@link ANeuralNetworksSymmPerChannelQuantParams}) must be set to 0.
+ * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
+ * tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} or
+ * {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias must be of the same
+ * type. For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},
+ * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint
+ * of 0 and bias_scale == input_scale * filter_scale. For filter tensor
+ * of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
+ * should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of
+ * 0 and bias_scale of 0. The actual scale of each value 'i' is equal to
+ * bias_scale[i] = input_scale * filter_scale[i].
+ * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
+ * the left, in the âwidthâ dimension.
+ * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
+ * the right, in the âwidthâ dimension.
+ * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
+ * the top, in the âheightâ dimension.
+ * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
+ * the bottom, in the âheightâ dimension.
+ * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
+ * walking through input in the âwidthâ dimension.
+ * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
+ * walking through input in the âheightâ dimension.
+ * * 9: An {@link ANEURALNETWORKS_INT32} scalar, specifying the number of
+ groups.
+ * * 10: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
+ * {@link FuseCode} values. Specifies the activation to
+ * invoke on the result.
+ * * 11: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify
+ * NCHW data layout for input0 and output0. Set to false for NHWC.
+ *
+ * Inputs (implicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
+ * specifying the input, where depth_in = num_groups * depth_group.
+ * * 1: A 4-D tensor, of shape
+ * [depth_out, filter_height, filter_width, depth_group], specifying
+ * the filter, where depth_out must be divisible by num_groups. For
+ * tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}
+ * the channel dimension (channelDim at
+ * {@link ANeuralNetworksSymmPerChannelQuantParams}) must be set to 0.
+ * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
+ * tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} or
+ * {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias must be of the same
+ * type. For filter tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},
+ * the bias should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint
+ * of 0 and bias_scale == input_scale * filter_scale. For filter tensor
+ * of {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
+ * should be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of
+ * 0 and bias_scale of 0. The actual scale of each value 'i' is equal to
+ * bias_scale[i] = input_scale * filter_scale[i].
+ * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit
+ * padding scheme, has to be one of the
+ * {@link PaddingCode} values.
+ * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
+ * walking through input in the âwidthâ dimension.
+ * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
+ * walking through input in the âheightâ dimension.
+ * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the number of
+ * groups.
+ * * 7: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
+ * {@link FuseCode} values. Specifies the activation to
+ * invoke on the result.
+ * * 8: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify
+ * NCHW data layout for input0 and output0. Set to false for NHWC.
+ *
+ * Outputs:
+ * * 0: The output 4-D tensor, of shape
+ * [batches, out_height, out_width, depth_out].
+ * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint can be different from inputs' scale and zeroPoint.
+ *
+ * Available since API level 29.
+ */
+ ANEURALNETWORKS_GROUPED_CONV_2D = 55,
+
+ /**
+ * Localize the maximum keypoints from heatmaps.
+ *
+ * This operation approximates the accurate maximum keypoint scores and
+ * indices after bicubic upscaling by using Taylor expansion up to the
+ * quadratic term.
+ *
+ * The bounding box is represented by its upper-left corner coordinate
+ * (x1,y1) and lower-right corner coordinate (x2,y2) in the original image.
+ * A valid bounding box should satisfy x1 <= x2 and y1 <= y2.
+ *
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ *
+ * Inputs:
+ * * 0: A 4-D Tensor of shape
+ * [num_boxes, heatmap_size, heatmap_size, num_keypoints],
+ * specifying the heatmaps, the height and width of heatmaps should
+ * be the same, and must be greater than or equal to 2.
+ * * 1: A 2-D Tensor of shape [num_boxes, 4], specifying the bounding boxes,
+ * each with format [x1, y1, x2, y2]. For input0 of type
+ * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, this tensor should
+ * be of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, with zeroPoint
+ * of 0 and scale of 0.125.
+ * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify
+ * NCHW data layout for input0. Set to false for NHWC.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandCode} as input0, with shape
+ * [num_boxes, num_keypoints], specifying score of the keypoints.
+ * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint can be different from input0 scale and zeroPoint.
+ * * 1: A tensor of the same {@link OperandCode} as input1, with shape
+ * [num_boxes, num_keypoints, 2], specifying the location of
+ * the keypoints, the second dimension is organized as
+ * [keypoint_x, keypoint_y].
+ * For type of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM}, the
+ * scale must be 0.125 and the zero point must be 0.
+ *
+ * Available since API level 29.
+ */
+ ANEURALNETWORKS_HEATMAP_MAX_KEYPOINT = 56,
+
+ /**
+ * Applies instance normalization to the input tensor.
+ *
+ * The values in the output tensor are computed as:
+ *
+ * output[b, h, w, c] =
+ * (input[b, h, w, c] - mean[b, c]) * gamma /
+ * sqrt(var[b, c] + epsilon) + beta
+ *
+ * Where the mean and variance are computed across the spatial dimensions:
+ *
+ * mean[b, c] =
+ * sum_{h, w}(input[b, h, w, c]) / sum(1)
+ *
+ * var[b, c] =
+ * sum_{h, w}(pow(input[b, h, w, c] - mean[b, c], 2)) / sum(1)
+ *
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ *
+ * Inputs:
+ * * 0: An n-D tensor, specifying the tensor to be normalized.
+ * * 1: A scalar, specifying gamma, the scale applied to the normalized
+ * tensor. The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if
+ * input0 is of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of {@link
+ * ANEURALNETWORKS_FLOAT32} if input0 is of {@link
+ * ANEURALNETWORKS_TENSOR_FLOAT32}.
+ * * 2: A scalar, specifying beta, the offset applied to the normalized
+ * tensor. The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if
+ * input0 is of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of {@link
+ * ANEURALNETWORKS_FLOAT32} if input0 is of {@link
+ * ANEURALNETWORKS_TENSOR_FLOAT32}.
+ * * 3: A scalar, specifying epsilon, the small value added to variance to
+ * avoid dividing by zero. The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if
+ * input0 is of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of {@link
+ * ANEURALNETWORKS_FLOAT32} if input0 is of {@link
+ * ANEURALNETWORKS_TENSOR_FLOAT32}.
+ * * 4: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify
+ * NCHW data layout for input0 and output0. Set to false for NHWC.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandCode} and same shape as input0.
+ *
+ * Available since API level 29.
+ */
+ ANEURALNETWORKS_INSTANCE_NORMALIZATION = 57,
+
+ /**
+ * For input tensors x and y, computes x < y elementwise.
+ *
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ * * {@link ANEURALNETWORKS_TENSOR_INT32}
+ * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * This operation supports broadcasting.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ * * 1: A tensor of the same {@link OperandCode} and dimensions compatible
+ * with input0.
+ *
+ * Outputs:
+ * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.
+ *
+ * Available since API level 29.
+ */
+ ANEURALNETWORKS_LESS = 58,
+
+ /**
+ * For input tensors x and y, computes x <= y elementwise.
+ *
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ * * {@link ANEURALNETWORKS_TENSOR_INT32}
+ * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * This operation supports broadcasting.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ * * 1: A tensor of the same {@link OperandCode} and dimensions compatible
+ * with input0.
+ *
+ * Outputs:
+ * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.
+ *
+ * Available since API level 29.
+ */
+ ANEURALNETWORKS_LESS_EQUAL = 59,
+
+ /**
+ * Computes natural logarithm of x element-wise.
+ *
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: from 1.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
+ *
+ * Available since API level 29.
+ */
+ ANEURALNETWORKS_LOG = 60,
+
+ /**
+ * Returns the truth value of x AND y element-wise.
+ *
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
+ *
+ * Supported tensor rank: from 1
+ *
+ * This operation supports broadcasting.
+ *
+ * Inputs:
+ * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.
+ * * 1: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8} and dimensions
+ * compatible with input0.
+ *
+ * Outputs:
+ * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.
+ *
+ * Available since API level 29.
+ */
+ ANEURALNETWORKS_LOGICAL_AND = 61,
+
+ /**
+ * Computes the truth value of NOT x element-wise.
+ *
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
+ *
+ * Supported tensor rank: from 1.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
+ *
+ * Available since API level 29.
+ */
+ ANEURALNETWORKS_LOGICAL_NOT = 62,
+
+ /**
+ * Returns the truth value of x OR y element-wise.
+ *
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
+ *
+ * Supported tensor rank: from 1
+ *
+ * This operation supports broadcasting.
+ *
+ * Inputs:
+ * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.
+ * * 1: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8} and dimensions
+ * compatible with input0.
+ *
+ * Outputs:
+ * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.
+ *
+ * Available since API level 29.
+ */
+ ANEURALNETWORKS_LOGICAL_OR = 63,
+
+ /**
+ * Computes the log softmax activations given logits.
+ *
+ * The output is calculated using this formula:
+ *
+ * output = logits * beta - log(reduce_sum(exp(logits * beta), axis))
+ *
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: from 1.
+ *
+ * Inputs:
+ * * 0: A tensor specifying the input logits.
+ * * 1: A scalar, specifying the positive scaling factor for the exponent,
+ * beta.
+ * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the beta
+ * value must be of {@link ANEURALNETWORKS_FLOAT16}.
+ * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the beta
+ * value must be of {@link ANEURALNETWORKS_FLOAT32}.
+ * * 2: An {@link ANEURALNETWORKS_INT32} scalar specifying the axis to
+ * reduce across. Negative index is used to specify axis from the
+ * end (e.g. -1 for the last axis). Must be in the range [-n, n).
+ *
+ * Outputs:
+ * * 0: The output tensor of the same {@link OperandCode} and shape as
+ * input0.
+ *
+ * Available since API level 29.
+ */
+ ANEURALNETWORKS_LOG_SOFTMAX = 64,
+
+ /**
+ * Returns the element-wise maximum of two tensors.
+ *
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ * * {@link ANEURALNETWORKS_TENSOR_INT32}
+ * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ * * 1: A tensor of the same {@link OperandCode} and compatible dimensions
+ * with input0.
+ * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
+ * the scales and zeroPoint can be different from input0 scale and zeroPoint.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandCode} as input0.
+ * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint can be different from inputs' scale and zeroPoint.
+ *
+ * Available since API level 29.
+ */
+ ANEURALNETWORKS_MAXIMUM = 65,
+
+ /**
+ * Returns the element-wise minimum of two tensors.
+ *
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ * * {@link ANEURALNETWORKS_TENSOR_INT32}
+ * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ * * 1: A tensor of the same {@link OperandCode} and compatible dimensions
+ * with input0.
+ * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
+ * the scales and zeroPoint can be different from input0 scale and zeroPoint.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandCode} as input0.
+ * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint can be different from inputs' scale and zeroPoint.
+ *
+ * Available since API level 29.
+ */
+ ANEURALNETWORKS_MINIMUM = 66,
+
+ /**
+ * Computes numerical negative value element-wise.
+ *
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ * * {@link ANEURALNETWORKS_TENSOR_INT32}
+ *
+ * Supported tensor rank: from 1.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
+ *
+ * Available since API level 29.
+ */
+ ANEURALNETWORKS_NEG = 67,
+
+ /**
+ * For input tensors x and y, computes x != y elementwise.
+ *
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ * * {@link ANEURALNETWORKS_TENSOR_INT32}
+ * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * This operation supports broadcasting.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ * * 1: A tensor of the same {@link OperandCode} and dimensions compatible
+ * with input0.
+ *
+ * Outputs:
+ * * 0: A tensor of {@link ANEURALNETWORKS_TENSOR_BOOL8}.
+ *
+ * Available since API level 29.
+ */
+ ANEURALNETWORKS_NOT_EQUAL = 68,
+
+ /**
+ * Pads a tensor with the given constant value according to the specified
+ * paddings.
+ *
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: An n-D tensor, specifying the tensor to be padded.
+ * * 1: A 2-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the paddings
+ * for each spatial dimension of the input tensor. The shape of the
+ * tensor must be {rank(input0), 2}.
+ * padding[i, 0] specifies the number of elements to be padded in the
+ * front of dimension i.
+ * padding[i, 1] specifies the number of elements to be padded after
+ * the end of dimension i.
+ * * 2: An scalar specifying the value to use for padding input0.
+ * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the
+ * pad value must be of {@link ANEURALNETWORKS_FLOAT16}.
+ * For input tensor of {@link ANEURALNETWORKS_TENSOR_FLOAT32}, the
+ * pad value must be of {@link ANEURALNETWORKS_FLOAT32}.
+ * For input tensor of {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},
+ * the pad value must be of {@link ANEURALNETWORKS_INT32}. The
+ * scale and zeroPoint are assumed to be the same as in input0.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandCode} as input0. The
+ * output tensor has the same rank as input0, and each
+ * dimension of the output tensor has the same size as the
+ * corresponding dimension of the input tensor plus the size
+ * of the padding:
+ * output0.dimension[i] =
+ * padding[i, 0] + input0.dimension[i] + padding[i, 1]
+ * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ *
+ * Available since API level 29.
+ */
+ ANEURALNETWORKS_PAD_V2 = 69,
+
+ /**
+ * Computes the power of one value to another.
+ *
+ * Given a tensor base and a tensor exponent, this operation computes
+ * base^exponent elementwise.
+ *
+ * This operations supports broadcasting. The size of the output is the
+ * maximum size along each dimension of the input operands. It starts with
+ * the trailing dimensions, and works its way forward.
+ *
+ * For example:
+ * base.dimension = {4, 1, 2}
+ * exponent.dimension = {5, 4, 3, 1}
+ * output.dimension = {5, 4, 3, 2}
+ *
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: A tensor specifying the base.
+ * * 1: A tensor specifying the exponent.
+ *
+ * Outputs:
+ * * 0: An output tensor.
+ *
+ * Available since API level 29.
+ */
+ ANEURALNETWORKS_POW = 70,
+
+ /**
+ * Parametric Rectified Linear Unit.
+ *
+ * It follows: f(x) = alpha * x for x < 0, f(x) = x for x >= 0, where alpha
+ * is a learned array with the same {@link OperandCode} and compatible
+ * dimensions as input x.
+ *
+ * Two dimensions are compatible when:
+ * 1. they are equal, or
+ * 2. one of them is 1
+ *
+ * The size of the output is the maximum size along each dimension of the
+ * input operands. It starts with the trailing dimensions, and works its way
+ * forward.
+ *
+ * Example:
+ * input.dimension = {4, 1, 2}
+ * alpha.dimension = {5, 4, 3, 1}
+ * output.dimension = {5, 4, 3, 2}
+ *
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: A tensor, specifying the input.
+ * * 1: A tensor of the same {@link OperandCode}, and compatible dimensions
+ * as input0, specifying the alpha.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandCode} as input0.
+ * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint can be diffent from the input0 scale and zeroPoint.
+ *
+ * Available since API level 29.
+ */
+ ANEURALNETWORKS_PRELU = 71,
+
+ /**
+ * Quantizes the input tensor.
+ *
+ * The formula is:
+ *
+ * output = max(0, min(255, round(input / scale) + zeroPoint)
+ *
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: A tensor, may be zero-sized.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape as input0, but with
+ * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}.
+ *
+ * Available since API level 29.
+ */
+ ANEURALNETWORKS_QUANTIZE = 72,
+
+ /**
+ * A version of quantized LSTM, using 16 bit quantization for internal
+ * state.
+ *
+ * There is no projection layer, so cell state size is equal to the output
+ * size.
+ *
+ * Inputs:
+ * * 0: A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
+ * and shape [numBatches, inputSize] specifying the input to the LSTM
+ * cell. Tensor is quantized with a fixed quantization range of
+ * [-1, 127/128] (scale = 1/128, zeroPoint = 128).
+ * * 1: The input-to-input weights.
+ * A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
+ * and shape [outputSize, inputSize] specifying input-to-input part of
+ * weights for fully-connected layer inside the LSTM cell.
+ * Quantization zero point and scale must be the same across all the
+ * weights.
+ * * 2: The input-to-forget weights.
+ * A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
+ * and shape [outputSize, inputSize] specifying input-to-forget part of
+ * weights for fully-connected layer inside the LSTM cell.
+ * Quantization zero point and scale must be the same across all the
+ * weights.
+ * * 3: The input-to-cell weights.
+ * A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
+ * and shape [outputSize, inputSize] specifying input-to-cell part of
+ * weights for fully-connected layer inside the LSTM cell.
+ * Quantization zero point and scale must be the same across all the
+ * weights.
+ * * 4: The input-to-output weights.
+ * A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
+ * and shape [outputSize, inputSize] specifying input-to-output part of
+ * weights for fully-connected layer inside the LSTM cell.
+ * Quantization zero point and scale must be the same across all the
+ * weights.
+ * * 5: The recurrent-to-input weights.
+ * A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
+ * and shape [outputSize, outputSize] specifying recurrent-to-input part
+ * of weights for fully-connected layer inside the LSTM cell.
+ * Quantization zero point and scale must be the same across all the
+ * weights.
+ * * 6: The recurrent-to-forget weights.
+ * A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
+ * and shape [outputSize, outputSize] specifying recurrent-to-forget
+ * part of weights for fully-connected layer inside the LSTM cell.
+ * Quantization zero point and scale must be the same across all the
+ * weights.
+ * * 7: The recurrent-to-cell weights.
+ * A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
+ * and shape [outputSize, outputSize] specifying recurrent-to-cell part
+ * of weights for fully-connected layer inside the LSTM cell.
+ * Quantization zero point and scale must be the same across all the
+ * weights.
+ * * 8: The recurrent-to-output weights.
+ * A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
+ * and shape [outputSize, outputSize] specifying recurrent-to-output
+ * part of weights for fully-connected layer inside the LSTM cell.
+ * Quantization zero point and scale must be the same across all the
+ * weights.
+ * * 9: The input gate bias.
+ * A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} and shape
+ * [outputSize] specifying the bias for the fully-connected layer
+ * inside the LSTM cell. Bias is quantized with scale being a product
+ * of input and weights scales and zeroPoint equal to 0.
+ * * 10:The forget gate bias.
+ * A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} and shape
+ * [outputSize] specifying the bias for the fully-connected layer
+ * inside the LSTM cell. Bias is quantized with scale being a product
+ * of input and weights scales and zeroPoint equal to 0.
+ * * 11:The cell bias.
+ * A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} and shape
+ * [outputSize] specifying the bias for the fully-connected layer
+ * inside the LSTM cell. Bias is quantized with scale being a product
+ * of input and weights scales and zeroPoint equal to 0.
+ * * 12:The output gate bias.
+ * A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} and shape
+ * [outputSize] specifying the bias for the fully-connected layer
+ * inside the LSTM cell. Bias is quantized with scale being a product
+ * of input and weights scales and zeroPoint equal to 0.
+ * * 13: A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}
+ * and shape [numBatches, outputSize] specifying the cell state from the
+ * previous time step of the LSTM cell. It is quantized using a
+ * quantization range of [-2^4, 2^4 * 32767/32768] (scale = 2^4 /
+ * 32768, zeroPoint = 0).
+ * * 14: A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
+ * and shape [numBathes, outputSize] specifying the output of the LSTM
+ * cell from previous time-step. Tensor is quantized with a fixed
+ * quantization range of [-1, 127/128] (scale = 1/128, zeroPoint =
+ * 128).
+ *
+ *
+ * Outputs:
+ * * 0: A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT16_SYMM}
+ * and shape [numBatches, outputSize] which contains a cell state from
+ * the current time step. Tensor is quantized using a quantization
+ * range of [-2^4, 2^4 * 32767/32768] (scale = 2^4 / 32768, zeroPoint =
+ * 0).
+ * * 1: A 2-D tensor of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
+ * and shape [numBathes, outputSize] which contains the output value.
+ * Tensor is quantized with a fixed quantization range of [-1, 127/128]
+ * (scale = 1/128, zeroPoint = 128).
+ */
+ ANEURALNETWORKS_QUANTIZED_16BIT_LSTM = 73,
+
+ /**
+ * Draws samples from a multinomial distribution.
+ *
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ *
+ * Inputs:
+ * * 0: A 2-D tensor with shape [batches, classes], specifying the
+ * unnormalized log-probabilities for all classes.
+ * * 1: A scalar {@link ANEURALNETWORKS_INT32}, specifying the number of
+ * independent samples to draw for each row slice.
+ * * 2: A 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor with shape [2],
+ * specifying seeds used to initialize the random distribution.
+ * Outputs:
+ * * 0: A 2-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor with shape
+ * [batches, samples], containing the drawn samples.
+ *
+ * Available since API level 29.
+ */
+ ANEURALNETWORKS_RANDOM_MULTINOMIAL = 74,
+
+ /**
+ * Reduces a tensor by computing the "logical and" of elements along given
+ * dimensions.
+ *
+ * If keep_dims is true, the reduced dimensions are
+ * retained with length 1. Otherwise, the rank of the tensor is reduced by
+ * 1 for each entry in dimensions.
+ *
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: An n-D tensor.
+ * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions
+ * to reduce. Dimension values must be in the range [-n, n).
+ * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true,
+ * retains reduced dimensions with length 1.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandCode} as input0.
+ *
+ * Available since API level 29.
+ */
+ ANEURALNETWORKS_REDUCE_ALL = 75,
+
+ /**
+ * Reduces a tensor by computing the "logical or" of elements along given
+ * dimensions.
+ *
+ * If keep_dims is true, the reduced dimensions are
+ * retained with length 1. Otherwise, the rank of the tensor is reduced by
+ * 1 for each entry in dimensions.
+ *
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_BOOL8}
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: An n-D tensor.
+ * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions
+ * to reduce. Dimension values must be in the range [-n, n).
+ * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true,
+ * retains reduced dimensions with length 1.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandCode} as input0.
+ *
+ * Available since API level 29.
+ */
+ ANEURALNETWORKS_REDUCE_ANY = 76,
+
+ /**
+ * Reduces a tensor by computing the maximum of elements along given
+ * dimensions.
+ *
+ * If keep_dims is true, the reduced dimensions are
+ * retained with length 1. Otherwise, the rank of the tensor is reduced by
+ * 1 for each entry in dimensions.
+ *
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: An n-D tensor.
+ * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions
+ * to reduce. Dimension values must be in the range [-n, n).
+ * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true,
+ * retains reduced dimensions with length 1.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandCode} as input0.
+ * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ *
+ * Available since API level 29.
+ */
+ ANEURALNETWORKS_REDUCE_MAX = 77,
+
+ /**
+ * Reduces a tensor by computing the minimum of elements along given
+ * dimensions.
+ *
+ * If keep_dims is true, the reduced dimensions are
+ * retained with length 1. Otherwise, the rank of the tensor is reduced by
+ * 1 for each entry in dimensions.
+ *
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: An n-D tensor.
+ * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions
+ * to reduce. Dimension values must be in the range [-n, n).
+ * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true,
+ * retains reduced dimensions with length 1.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandCode} as input0.
+ * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ *
+ * Available since API level 29.
+ */
+ ANEURALNETWORKS_REDUCE_MIN = 78,
+
+ /**
+ * Reduces a tensor by multiplying elements along given dimensions.
+ *
+ * If keep_dims is true, the reduced dimensions are
+ * retained with length 1. Otherwise, the rank of the tensor is reduced by
+ * 1 for each entry in dimensions.
+ *
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: An n-D tensor.
+ * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions
+ * to reduce. Dimension values must be in the range [-n, n).
+ * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true,
+ * retains reduced dimensions with length 1.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandCode} as input0.
+ *
+ * Available since API level 29.
+ */
+ ANEURALNETWORKS_REDUCE_PROD = 79,
+
+ /**
+ * Reduces a tensor by summing elements along given dimensions.
+ *
+ * If keep_dims is true, the reduced dimensions are
+ * retained with length 1. Otherwise, the rank of the tensor is reduced by
+ * 1 for each entry in dimensions.
+ *
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: up to 4
+ *
+ * Inputs:
+ * * 0: An n-D tensor.
+ * * 1: A 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}. The dimensions
+ * to reduce. Dimension values must be in the range [-n, n).
+ * * 2: An {@link ANEURALNETWORKS_BOOL} scalar, keep_dims. If true,
+ * retains reduced dimensions with length 1.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandCode} as input0.
+ *
+ * Available since API level 29.
+ */
+ ANEURALNETWORKS_REDUCE_SUM = 80,
+
+ /**
+ * Select and scale the feature map of each region of interest to a unified
+ * output size by average pooling sampling points from bilinear interpolation.
+ *
+ * The region of interest is represented by its upper-left corner coordinate
+ * (x1,y1) and lower-right corner coordinate (x2,y2) in the original image.
+ * A spatial scaling factor is applied to map into feature map coordinate.
+ * A valid region of interest should satisfy x1 <= x2 and y1 <= y2.
+ *
+ * No rounding is applied in this operation. The sampling points are unified
+ * distributed in the pooling bin and their values are calculated by bilinear
+ * interpolation.
+ *
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} (since API level 29)
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ *
+ * Inputs:
+ * * 0: A 4-D tensor, specifying the feature map.
+ * * 1: A 2-D Tensor of shape [num_rois, 4], specifying the locations of
+ * the regions of interest, each line with format [x1, y1, x2, y2].
+ * For input0 of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},
+ * this tensor should be of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM},
+ * with zeroPoint of 0 and scale of 0.125. Zero num_rois is
+ * supported for this tensor.
+ * * 2: An 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape
+ * [num_rois], specifying the batch index of each box. Boxes with
+ * the same batch index are grouped together. Zero num_rois is
+ * supported for this tensor.
+ * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output
+ * height of the output tensor.
+ * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output
+ * width of the output tensor.
+ * * 5: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio
+ * from the height of original image to the height of feature map.
+ * * 6: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio
+ * from the width of original image to the width of feature map.
+ * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the number of
+ * sampling points in height dimension used to compute the output.
+ * Set to 0 for adaptive value of ceil(roi_height/out_height).
+ * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the number of
+ * sampling points in width dimension used to compute the output.
+ * Set to 0 for adaptive value of ceil(roi_width/out_width).
+ * * 9: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify
+ * NCHW data layout for input0 and output0. Set to false for NHWC.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandCode} as input0. The output
+ * shape is [num_rois, out_height, out_width, depth].
+ * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint can be different from the input0 scale and zeroPoint.
+ *
+ * Available since API level 29.
+ */
+ ANEURALNETWORKS_ROI_ALIGN = 81,
+
+ /**
+ * Select and scale the feature map of each region of interest to a unified
+ * output size by max-pooling.
+ *
+ * The region of interest is represented by its upper-left corner coordinate
+ * (x1,y1) and lower-right corner coordinate (x2,y2) in the original image.
+ * A spatial scaling factor is applied to map into feature map coordinate.
+ * A valid region of interest should satisfy x1 <= x2 and y1 <= y2.
+ *
+ * Rounding is applied in this operation to ensure integer boundary for
+ * regions of interest and pooling bins.
+ *
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ *
+ * Inputs:
+ * * 0: A 4-D tensor, specifying the feature map.
+ * * 1: A 2-D Tensor of shape [num_rois, 4], specifying the locations of
+ * the regions of interest, each line with format [x1, y1, x2, y2].
+ * For input0 of type {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM},
+ * this tensor should be of {@link ANEURALNETWORKS_TENSOR_QUANT16_ASYMM},
+ * with zeroPoint of 0 and scale of 0.125.
+ * * 2: An 1-D {@link ANEURALNETWORKS_TENSOR_INT32} tensor, of shape
+ * [num_rois], specifying the batch index of each box. Boxes with
+ * the same batch index are grouped together.
+ * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output
+ * height of the output tensor.
+ * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output
+ * width of the output tensor.
+ * * 5: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio
+ * from the height of original image to the height of feature map.
+ * * 6: An {@link ANEURALNETWORKS_FLOAT32} scalar, specifying the ratio
+ * from the width of original image to the width of feature map.
+ * * 7: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify
+ * NCHW data layout for input0 and output0. Set to false for NHWC.
+ *
+ * Outputs:
+ * * 0: A tensor of the same {@link OperandCode} as input0. The output
+ * shape is [num_rois, out_height, out_width, depth].
+ * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ *
+ * Available since API level 29.
+ */
+ ANEURALNETWORKS_ROI_POOLING = 82,
+
+ /**
+ * Computes reciprocal of square root of x element-wise.
+ *
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: from 1.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
+ *
+ * Available since API level 29.
+ */
+ ANEURALNETWORKS_RSQRT = 83,
+
+ /**
+ * Using a tensor of booleans c and input tensors x and y select values
+ * elementwise from both input tensors:
+ *
+ * O[i] = C[i] ? x[i] : y[i].
+ *
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ * * {@link ANEURALNETWORKS_TENSOR_INT32}
+ * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: A tensor of type {@link ANEURALNETWORKS_TENSOR_BOOL8} acting as a
+ * mask that chooses, based on the value at each element, whether the
+ * corresponding element in the output should be taken from input1 (if
+ * true) or input2 (if false).
+ * * 1: An input tensor of the same shape as input0.
+ * * 2: An input tensor of the same shape and type as input1.
+ * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
+ * the scales and zeroPoint can be different from input1 scale and zeroPoint.
+ *
+ * Outputs:
+ * * 0: A tensor of the same type and shape as input1 and input2.
+ * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint can be different from inputs' scale and zeroPoint.
+ *
+ */
+ ANEURALNETWORKS_SELECT = 84,
+
+ /**
+ * Computes sin of x element-wise.
+ *
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: from 1.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
+ *
+ * Available since API level 29.
+ */
+ ANEURALNETWORKS_SIN = 85,
+
+ /**
+ * Extracts a slice of specified size from the input tensor starting at a
+ * specified location.
+ *
+ * The starting location is specified as a 1-D tensor containing offsets
+ * for each dimension. The size is specified as a 1-D tensor containing
+ * either size of a slice along corresponding dimension or -1. In the latter
+ * case, all the remaining elements in dimension are included in the slice.
+ *
+ * A sum of begin offset and a size of a slice must not exceed size of a
+ * corresponding dimension.
+ *
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ * * {@link ANEURALNETWORKS_TENSOR_INT32}
+ * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: An n-D tensor to take slice from, may be zero-sized.
+ * * 1: A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} specifying
+ * the beginning indices of the slice in each dimension.
+ * * 2: A 1-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32} specifying
+ * the size of the slice in each dimension.
+ *
+ * Outputs:
+ * * 0: An n-D tensor of the same type as the input containing the slice.
+ * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
+ * its scale and zeroPoint has to be same as the input0 scale and zeroPoint.
+ *
+ * Available since API level 29.
+ */
+ ANEURALNETWORKS_SLICE = 86,
+
+ /**
+ * Splits a tensor along a given axis into num_splits subtensors.
+ *
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ * * {@link ANEURALNETWORKS_TENSOR_INT32}
+ * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: An n-D tensor to split.
+ * * 1: An {@link ANEURALNETWORKS_INT32} scalar specifying the axis along
+ * which to split.
+ * * 2: An {@link ANEURALNETWORKS_INT32} scalar indicating the number of
+ * splits along given axis. Must evenly divide axis size.
+ *
+ * Outputs:
+ * * 0 ~ (num_splits - 1): Resulting subtensors.
+ * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ *
+ * Available since API level 29.
+ */
+ ANEURALNETWORKS_SPLIT = 87,
+
+ /**
+ * Computes square root of x element-wise.
+ *
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: from 1.
+ *
+ * Inputs:
+ * * 0: A tensor.
+ *
+ * Outputs:
+ * * 0: The output tensor of same shape as input0.
+ *
+ * Available since API level 29.
+ */
+ ANEURALNETWORKS_SQRT = 88,
+
+ /**
+ * Constructs a tensor by tiling a given tensor.
+ *
+ * This operation creates a new tensor by replicating `input` `multiples`
+ * times. The output tensor's i-th dimension has `input.dims(i) * multiples[i]`
+ * elements, and the values of `input` are replicated `multiples[i]` times
+ * along the i-th dimension.
+ * For example, tiling `[a b c d]` by `[2]` produces `[a b c d a b c d]`.
+ *
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ * * {@link ANEURALNETWORKS_TENSOR_INT32}
+ * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: input, an n-D tensor specifying the input.
+ * * 1: multiples, a 1-D tensor of {@link ANEURALNETWORKS_TENSOR_INT32}.
+ * The length of multiples must be n.
+ *
+ * Outputs:
+ * * 0: A tiled tensor of the same {@link OperandCode} and rank as `input`.
+ * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ *
+ * Available since API level 29.
+ */
+ ANEURALNETWORKS_TILE = 89,
+
+ /**
+ * Finds values and indices of the k largest entries for the last dimension.
+ *
+ * Resulting values in each dimensions are sorted in descending order. If
+ * two values are equal, the one with larger index appears first.
+ *
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ * * {@link ANEURALNETWORKS_TENSOR_INT32}
+ * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: from 1
+ *
+ * Inputs:
+ * * 0: input, an n-D tensor specifying the input.
+ * * 1: k, an {@link ANEURALNETWORKS_INT32} scalar, specifying the number of
+ * top elements to look for along the last dimension.
+ *
+ * Outputs:
+ * * 0: An n-D tensor of the same type as the input, containing the k
+ * largest elements along each last dimensional slice.
+ * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ * * 1: An n-D tensor of type {@link ANEURALNETWORKS_TENSOR_INT32}
+ * containing the indices of values within the last dimension of input.
+ *
+ * Available since API level 29.
+ */
+ ANEURALNETWORKS_TOPK_V2 = 90,
+
+ /**
+ * Performs the transpose of 2-D convolution operation.
+ *
+ * This operation is sometimes called "deconvolution" after Deconvolutional
+ * Networks, but is actually the transpose (gradient) of
+ * {@link ANEURALNETWORKS_CONV_2D} rather than an actual deconvolution.
+ *
+ * The output dimensions are functions of the filter dimensions, stride, and
+ * padding.
+ *
+ * Supported tensor {@link OperandCode} configurations:
+ * * 16 bit floating point:
+ * * * {@link ANEURALNETWORKS_TENSOR_FLOAT16} for input, filter, output, and bias.
+ *
+ * * 32 bit floating point:
+ * * * {@link ANEURALNETWORKS_TENSOR_FLOAT32} for input, filter, output, and bias.
+ *
+ * * Quantized:
+ * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, filter, and output.
+ * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (with scale set to
+ * * * input.scale * filter.scale).
+ *
+ * * Quantized with symmetric per channel quantization for the filter:
+ * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} for input, and output.
+ * * * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} for filter.
+ * * * {@link ANEURALNETWORKS_TENSOR_INT32} for bias (scale set to 0.0,
+ * * * each value scaling is separate and equal to input.scale * filter.scales[channel]).
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ *
+ * Both explicit padding and implicit padding are supported.
+ *
+ * Inputs (explicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
+ * specifying the input. Since API level 29, zero batches is supported
+ * for this tensor.
+ * * 1: A 4-D tensor, of shape
+ * [depth_out, filter_height, filter_width, depth_in], specifying the
+ * filter. For tensor of type
+ * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} the channel
+ * dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim) must be set to 0.
+ * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
+ * tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} or
+ * {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias should be of the
+ * same type. For input tensor of type
+ * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, the bias should be
+ * of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0 and
+ * bias_scale == input_scale * filter_scale. For filter tensor of
+ * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
+ * must be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of
+ * 0 and bias_scale of 0. The actual scale of each value 'i' is equal
+ * to bias_scale[i] = input_scale * filter_scale[i].
+ * * 3: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
+ * the left, in the âwidthâ dimension.
+ * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
+ * the right, in the âwidthâ dimension.
+ * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
+ * the top, in the âheightâ dimension.
+ * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the padding on
+ * the bottom, in the âheightâ dimension.
+ * * 7: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
+ * walking through input in the âwidthâ dimension.
+ * * 8: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
+ * walking through input in the âheightâ dimension.
+ * * 9: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
+ * {@link FuseCode} values. Specifies the activation to
+ * invoke on the result.
+ * * 10: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify
+ * NCHW data layout for input0 and output0. Set to false for NHWC.
+ *
+ * Inputs (implicit padding):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
+ * specifying the input. Since API level 29, zero batches is supported
+ * for this tensor.
+ * * 1: A 4-D tensor, of shape
+ * [depth_out, filter_height, filter_width, depth_in], specifying the
+ * filter. For tensor of type
+ * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} the channel
+ * dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim) must be set to 0.
+ * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
+ * tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} or
+ * {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias should be of the
+ * same type. For input tensor of type
+ * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}, the bias should be
+ * of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of 0 and
+ * bias_scale == input_scale * filter_scale. For filter tensor of
+ * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
+ * must be of {@link ANEURALNETWORKS_TENSOR_INT32}, with zeroPoint of
+ * 0 and bias_scale of 0. The actual scale of each value 'i' is equal
+ * to bias_scale[i] = input_scale * filter_scale[i].
+ * * 3: An {@link ANEURALNETWORKS_TENSOR_INT32} tensor, specifying the output
+ * tensor shape.
+ * * 4: An {@link ANEURALNETWORKS_INT32} scalar, specifying the implicit
+ * padding scheme, has to be one of the
+ * {@link PaddingCode} values.
+ * * 5: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
+ * walking through input in the âwidthâ dimension.
+ * * 6: An {@link ANEURALNETWORKS_INT32} scalar, specifying the stride when
+ * walking through input in the âheightâ dimension.
+ * * 7: An {@link ANEURALNETWORKS_INT32} scalar, and has to be one of the
+ * {@link FuseCode} values. Specifies the activation to
+ * invoke on the result.
+ * * 8: An {@link ANEURALNETWORKS_BOOL} scalar, set to true to specify
+ * NCHW data layout for input0 and output0. Set to false for NHWC.
+ *
+ * Outputs:
+ * * 0: The output 4-D tensor, of shape
+ * [batches, out_height, out_width, depth_out].
+ * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint can be different from inputs' scale and zeroPoint.
+ *
+ * Available since API level 29.
+ */
+ ANEURALNETWORKS_TRANSPOSE_CONV_2D = 91,
+
+ /**
+ * A recurrent neural network specified by an LSTM cell.
+ *
+ * Performs (fully) dynamic unrolling of input.
+ *
+ * This Op unrolls the input along the time dimension, and implements the
+ * following operation for each element in the sequence
+ * s = 1...sequence_length:
+ * outputs[s] = projection(state = activation(LSTMOp(inputs[s])))
+ *
+ * Where LSTMOp is the LSTM op as in {@link ANEURALNETWORKS_LSTM},
+ * the "projection" is an optional projection layer from state and output
+ * and the âactivationâ is the function passed as the
+ * âfused_activation_functionâ argument (if not âNONEâ).
+ *
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ *
+ * Supported tensor rank: 3, either time-major or batch-major.
+ *
+ * All input and output tensors must be of the same type.
+ *
+ * Inputs:
+ * * 0: The input (\f$x_t\f$).
+ * A 3-D tensor of shape:
+ * If time-major: [max_time, batch_size, input_size]
+ * If batch-major: [batch_size, max_time, input_size]
+ * where âmax_timeâ is the number of timesteps (sequence length),
+ * âbatch_sizeâ corresponds to the batching dimension, and
+ * âinput_sizeâ is the size of the input.
+ * * 1: The input-to-input weights (\f$W_{xi}\f$). Optional.
+ * A 2-D tensor of shape [num_units, input_size], where ânum_unitsâ
+ * corresponds to the number of cell units.
+ * * 2: The input-to-forget weights (\f$W_{xf}\f$).
+ * A 2-D tensor of shape [num_units, input_size].
+ * * 3: The input-to-cell weights (\f$W_{xc}\f$).
+ * A 2-D tensor of shape [num_units, input_size].
+ * * 4: The input-to-output weights (\f$W_{xo}\f$).
+ * A 2-D tensor of shape [num_units, input_size].
+ * * 5: The recurrent-to-input weights (\f$W_{hi}\f$). Optional.
+ * A 2-D tensor of shape [num_units, output_size], where âoutput_sizeâ
+ * corresponds to either the number of cell units (i.e., ânum_unitsâ),
+ * or the second dimension of the âprojection_weightsâ, if defined.
+ * * 6: The recurrent-to-forget weights (\f$W_{hf}\f$).
+ * A 2-D tensor of shape [num_units, output_size].
+ * * 7: The recurrent-to-cell weights (\f$W_{hc}\f$).
+ * A 2-D tensor of shape [num_units, output_size].
+ * * 8: The recurrent-to-output weights (\f$W_{ho}\f$).
+ * A 2-D tensor of shape [num_units, output_size].
+ * * 9: The cell-to-input weights (\f$W_{ci}\f$). Optional.
+ * A 1-D tensor of shape [num_units].
+ * * 10:The cell-to-forget weights (\f$W_{cf}\f$). Optional.
+ * A 1-D tensor of shape [num_units].
+ * * 11:The cell-to-output weights (\f$W_{co}\f$). Optional.
+ * A 1-D tensor of shape [num_units].
+ * * 12:The input gate bias (\f$b_i\f$). Optional.
+ * A 1-D tensor of shape [num_units].
+ * * 13:The forget gate bias (\f$b_f\f$).
+ * A 1-D tensor of shape [num_units].
+ * * 14:The cell bias (\f$b_c\f$).
+ * A 1-D tensor of shape [num_units].
+ * * 15:The output gate bias (\f$b_o\f$).
+ * A 1-D tensor of shape [num_units].
+ * * 16:The projection weights (\f$W_{proj}\f$). Optional.
+ * A 2-D tensor of shape [output_size, num_units].
+ * * 17:The projection bias (\f$b_{proj}\f$). Optional.
+ * A 1-D tensor of shape [output_size].
+ * * 18:The output state (in) (\f$h_{t-1}\f$).
+ * A 2-D tensor of shape [batch_size, output_size].
+ * * 19:The cell state (in) (\f$C_{t-1}\f$).
+ * A 2-D tensor of shape [batch_size, num_units].
+ * * 20:The activation function (\f$g\f$).
+ * A value indicating the activation function:
+ *
+ *
+ * * 21:The clipping threshold (\f$t_{cell}\f$) for the cell state, such
+ * that values are bound within [-cell_clip, cell_clip]. If set to 0.0
+ * then clipping is disabled.
+ * * 22:The clipping threshold (\f$t_{proj}\f$) for the output from the
+ * projection layer, such that values are bound within
+ * [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
+ * * 23:Time-major if true, batch-major if false.
+ * * 24:The input layer normalization weights. Optional.
+ * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
+ * to activation at input gate.
+ * * 25:The forget layer normalization weights. Optional.
+ * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
+ * to activation at forget gate.
+ * * 26:The cell layer normalization weights. Optional.
+ * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
+ * to activation at cell gate.
+ * * 27:The output layer normalization weights. Optional.
+ * A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
+ * to activation at output gate.
+ *
+ * Outputs:
+ * * 0: The output (\f$o_t\f$).
+ * A 3-D tensor of shape:
+ * If time-major: [max_time, batch_size, output_size]
+ * If batch-major: [batch_size, max_time, output_size]
+ *
+ * Available since API level 29.
+ */
+ ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_LSTM = 92,
+
+ /**
+ * A recurrent neural network layer that applies a basic RNN cell to a
+ * sequence of inputs.
+ *
+ * This layer unrolls the input along the sequence dimension, and implements
+ * the following operation
+ * for each element in the sequence s = 1...sequence_length:
+ * outputs[s] = state = activation(inputs[s] * input_weightsâ + state *
+ * recurrent_weightsâ + bias)
+ *
+ * Where:
+ * * âinput_weightsâ is a weight matrix that multiplies the inputs;
+ * * ârecurrent_weightsâ is a weight matrix that multiplies the current
+ * âstateâ which itself is the output from the previous time step
+ * computation;
+ * * âbiasâ is a bias vector (added to each output vector in the batch);
+ * * âactivationâ is the function passed as the âfused_activation_functionâ
+ * argument (if not âNONEâ).
+ *
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ *
+ * The input tensors must all be the same type.
+ *
+ * Inputs:
+ * * 0: input.
+ * A 3-D tensor. The shape is defined by the input 6 (timeMajor). If
+ * it is set to 1, then the input has a shape [maxTime, batchSize,
+ * inputSize], otherwise the input has a shape [batchSize, maxTime,
+ * inputSize].
+ * * 1: weights.
+ * A 2-D tensor of shape [numUnits, inputSize].
+ * * 2: recurrent_weights.
+ * A 2-D tensor of shape [numUnits, numUnits].
+ * * 3: bias.
+ * A 1-D tensor of shape [numUnits].
+ * * 4: hidden state
+ * A 2-D tensor of shape [batchSize, numUnits]. Specifies a hidden
+ * state input for the first time step of the computation.
+ * * 5: fusedActivationFunction.
+ * A {@link FuseCode} value indicating the activation function. If
+ * âNONEâ is specified then it results in a linear activation.
+ * * 6: timeMajor
+ * An {@link ANEURALNETWORKS_INT32} scalar specifying the shape format
+ * of input and output tensors. Must be set to either 0 or 1.
+ * Outputs:
+ * * 0: output.
+ * A 3-D tensor. The shape is defined by the input 6 (timeMajor). If
+ * it is set to 1, then the output has a shape [maxTime, batchSize,
+ * numUnits], otherwise the output has a shape [batchSize, maxTime,
+ * numUnits].
+ *
+ * Available since API level 29.
+ */
+ ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_RNN = 93,
+
+ /**
+ * Resizes images to given size using the nearest neighbor interpretation.
+ *
+ * Resized images must be distorted if their output aspect ratio is not the
+ * same as input aspect ratio. The corner pixels of output may not be the
+ * same as corner pixels of input.
+ *
+ * Supported tensor {@link OperandCode}:
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT16}
+ * * {@link ANEURALNETWORKS_TENSOR_FLOAT32}
+ * * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
+ *
+ * Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
+ * With the default data layout NHWC, the data is stored in the order of:
+ * [batch, height, width, channels]. Alternatively, the data layout could
+ * be NCHW, the data storage order of: [batch, channels, height, width].
+ *
+ * Both resizing by shape and resizing by scale are supported.
+ *
+ * Inputs (resizing by shape):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+ * the input. Zero batches is supported for this tensor.
+ * * 1: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output
+ * width of the output tensor.
+ * * 2: An {@link ANEURALNETWORKS_INT32} scalar, specifying the output
+ * height of the output tensor.
+ * * 3: An {@link ANEURALNETWORKS_BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ *
+ * Inputs (resizing by scale):
+ * * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
+ * the input. Zero batches is supported for this tensor.
+ * * 1: A scalar, specifying width_scale, the scaling factor of the width
+ * dimension from the input tensor to the output tensor. The output
+ * width is calculated as new_width = floor(width * width_scale).
+ * The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is
+ * of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of
+ * {@link ANEURALNETWORKS_FLOAT32} otherwise.
+ * * 2: A scalar, specifying height_scale, the scaling factor of the height
+ * dimension from the input tensor to the output tensor. The output
+ * height is calculated as new_height = floor(height * height_scale).
+ * The scalar must be of {@link ANEURALNETWORKS_FLOAT16} if input0 is
+ * of {@link ANEURALNETWORKS_TENSOR_FLOAT16} and of
+ * {@link ANEURALNETWORKS_FLOAT32} otherwise.
+ * * 3: An {@link ANEURALNETWORKS_BOOL} scalar, default to false.
+ * Set to true to specify NCHW data layout for input0 and output0.
+ *
+ * Outputs:
+ * * 0: The output 4-D tensor, of shape
+ * [batches, new_height, new_width, depth].
+ * For a {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
+ *
+ * Available since API level 29.
+ */
+ ANEURALNETWORKS_RESIZE_NEAREST_NEIGHBOR = 94,
+} OperationCode;
+
+/**
+ * Fused activation function types.
+ *
+ *
+ * Available since API level 27.
+ */
+typedef enum {
+ /** NO fused activation function. */
+ ANEURALNETWORKS_FUSED_NONE = 0,
+ /** Fused ReLU activation function. */
+ ANEURALNETWORKS_FUSED_RELU = 1,
+ /** Fused ReLU1 activation function. */
+ ANEURALNETWORKS_FUSED_RELU1 = 2,
+ /** Fused ReLU6 activation function. */
+ ANEURALNETWORKS_FUSED_RELU6 = 3,
+} FuseCode;
+
+/**
+ * Implicit padding algorithms.
+ *
+ *
+ * Available since API level 27.
+ */
+typedef enum {
+ /**
+ * SAME padding.
+ * Padding on both ends are the "same":
+ * padding_to_beginning = total_padding / 2
+ * padding_to_end = (total_padding + 1)/2.
+ * i.e., for even number of padding, padding to both ends are exactly
+ * the same; for odd number of padding, padding to the ending is bigger
+ * than the padding to the beginning by 1.
+ *
+ * total_padding is a function of input, stride, dilation and filter size.
+ * It could be computed as follows:
+ * out_size = (input + stride - 1) / stride
+ * effective_filter_size = (filter_size - 1) * dilation + 1
+ * needed_input = (out_size - 1) * stride + effective_filter_size
+ * total_padding = max(0, needed_input - input_size)
+ * The computation is the same for the horizontal and vertical directions.
+ */
+ ANEURALNETWORKS_PADDING_SAME = 1,
+
+ /**
+ * VALID padding.
+ * No padding. When the input size is not evenly divisible by
+ * the filter size, the input at the end that could not fill
+ * the whole filter tile will simply be ignored.
+ */
+ ANEURALNETWORKS_PADDING_VALID = 2,
+} PaddingCode;
+
/**
* Execution preferences.
+ *
+ * Available since API level 27.
*/
typedef enum {
/**
@@ -1775,27 +4920,106 @@ typedef enum {
} PreferenceCode;
/**
+ * Device types.
+ *
+ * The type of NNAPI device.
+ */
+typedef enum {
+ /** The device type cannot be provided. */
+ ANEURALNETWORKS_DEVICE_UNKNOWN = 0,
+ /** The device does not fall into any category below. */
+ ANEURALNETWORKS_DEVICE_OTHER = 1,
+ /** The device runs NNAPI models on single or multi-core CPU. */
+ ANEURALNETWORKS_DEVICE_CPU = 2,
+ /** The device can run NNAPI models and also accelerate graphics APIs such
+ * as OpenGL ES and Vulkan. */
+ ANEURALNETWORKS_DEVICE_GPU = 3,
+ /** Dedicated accelerator for Machine Learning workloads. */
+ ANEURALNETWORKS_DEVICE_ACCELERATOR = 4,
+} DeviceTypeCode;
+
+/**
* Result codes.
+ *
+ *
It is also the application's responsibility to ensure that there are no other - * uses of the model after calling {@link ANeuralNetworksModel_free}. - * This includes any compilation or execution object created using the model.
+ *It is also the application's responsibility to ensure that there are no + * other uses of the model after calling {@link ANeuralNetworksModel_free}. + * This includes any compilation, execution object or burst object created using + * the model.
+ * + * Available since API level 27. */ typedef struct ANeuralNetworksModel ANeuralNetworksModel; @@ -1854,12 +5101,16 @@ typedef struct ANeuralNetworksModel ANeuralNetworksModel; * *To use:
It is also the application's responsibility to ensure that there are no other * uses of the compilation after calling {@link ANeuralNetworksCompilation_free}. - * This includes any execution object created using the compilation.
+ * This includes any execution object or burst object created using the compilation. + * + * Available since API level 27. */ typedef struct ANeuralNetworksCompilation ANeuralNetworksCompilation; @@ -1893,9 +5146,13 @@ typedef struct ANeuralNetworksCompilation ANeuralNetworksCompilation; *An execution cannot be modified once {@link ANeuralNetworksExecution_startCompute} - * has been called on it.
+ *An execution cannot be modified once + * {@link ANeuralNetworksExecution_burstCompute}, + * {@link ANeuralNetworksExecution_compute} or + * {@link ANeuralNetworksExecution_startCompute} has been called on it.
* *An execution can be applied to a model with - * {@link ANeuralNetworksExecution_startCompute} only once. Create new executions - * to do new evaluations of the model.
+ * {@link ANeuralNetworksExecution_burstCompute}, + * {@link ANeuralNetworksExecution_compute} or + * {@link ANeuralNetworksExecution_startCompute} only once. Create new + * executions to do new evaluations of the model. * *It is the application's responsibility to make sure that only one thread * modifies an execution at a given time. It is however safe for more than one * thread to use {@link ANeuralNetworksEvent_wait} at the same time.
* + *It is also the application's responsibility to ensure that the execution + * either has never been scheduled or has completed (i.e., that + * {@link ANeuralNetworksExecution_burstCompute}, + * {@link ANeuralNetworksExecution_compute}, or + * {@link ANeuralNetworksEvent_wait} has returned) before calling + * {@link ANeuralNetworksExecution_free}.
. + * *It is also the application's responsibility to ensure that there are no other * uses of the execution after calling {@link ANeuralNetworksExecution_free}.
+ * + *Multiple executions can be scheduled and evaluated concurrently, either by + * means of {@link ANeuralNetworksExecution_compute} or + * {@link ANeuralNetworksExecution_burstCompute} (which are synchronous) in + * different threads, or by means of + * {@link ANeuralNetworksExecution_startCompute} (which is asynchronous). + * (Concurrent uses of {@link ANeuralNetworksExecution_burstCompute} must be on + * different burst objects.) The runtime makes no guarantee on the ordering of + * completion of executions. If it's important to the application, the + * application should enforce the ordering by ensuring that one execution + * completes before the next is scheduled (for example, by scheduling all + * executions synchronously within a single thread, or by scheduling all + * executions asynchronously and using {@link ANeuralNetworksEvent_wait} between + * calls to {@link ANeuralNetworksExecution_startCompute}).
+ * + * Available since API level 27. */ typedef struct ANeuralNetworksExecution ANeuralNetworksExecution; +#if __ANDROID_API__ >= __ANDROID_API_Q__ +/** + * Parameters for ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL operand. + */ +typedef struct ANeuralNetworksSymmPerChannelQuantParams { + /* The index of the channel dimension. */ + uint32_t channelDim; + /** The size of the scale array. Should be equal to dimension[channelDim] of the Operand. */ + uint32_t scaleCount; + /** The array of scaling values for each channel. Each value must be greater than zero. */ + const float* scales; +} ANeuralNetworksSymmPerChannelQuantParams; + +/** + * ANeuralNetworksBurst is an opaque type that can be used to reduce the latency + * of a rapid sequence of executions. It will likely cause overhead if only used + * for a single execution. + * + * ANeuralNetworksBurst serves as a context object for any number of inferences + * using {@link ANeuralNetworksExecution} objects. An ANeuralNetworksBurst + * object and the {@link ANeuralNetworksExecution} objects used with it must all + * have been created from the same {@link ANeuralNetworksCompilation} object. + * + * This object is also used as a hint to drivers, providing insight to the + * lifetime of a rapid sequence of executions. For example, a driver may choose + * to increase the clock frequency of its accelerator for the lifetime of a + * burst object. + * + *To use:
Schedules synchronous evaluation of the execution. Returns once the + * execution has completed and the outputs are ready to be consumed. + *
+ * + * See {@link ANeuralNetworksExecution} for information on multithreaded usage. + * + * See {@link ANeuralNetworksExecution_startCompute} for asynchronous execution. + * Synchronous execution incurs lower overhead than asynchronous execution. + * + * Available since API level 29. + * + * @param execution The execution to be scheduled and executed. + * + * @return ANEURALNETWORKS_NO_ERROR if the execution completed normally. + * ANEURALNETWORKS_UNMAPPABLE if the execution input or output memory cannot + * be properly mapped. + */ +int ANeuralNetworksExecution_compute(ANeuralNetworksExecution* execution) __INTRODUCED_IN(29); + +/** + * Get the dimensional information of the specified output operand of the model of the + * {@link ANeuralNetworksExecution}. + * + * On asynchronous execution initiated by {@link ANeuralNetworksExecution_startCompute}, + * {@link ANeuralNetworksEvent_wait} must be called prior to this function to recuperate + * the resources used by the execution. + * + * @param execution The execution to be queried. + * @param index The index of the output argument we are querying. It is + * an index into the lists passed to + * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not + * the index associated with {@link ANeuralNetworksModel_addOperand}. + * @param rank The rank of the output operand. + * + * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE + * if the target output is provided an insufficient buffer at execution time, + * ANEURALNETWORKS_BAD_DATA if the index is invalid. + * + * Available since API level 29. + */ +int ANeuralNetworksExecution_getOutputOperandRank(ANeuralNetworksExecution* execution, + int32_t index, uint32_t* rank) + __INTRODUCED_IN(29); + +/** + * Get the dimensional information of the specified output operand of the model of the + * {@link ANeuralNetworksExecution}. The target output operand cannot be a scalar. + * + * On asynchronous execution initiated by {@link ANeuralNetworksExecution_startCompute}, + * {@link ANeuralNetworksEvent_wait} must be called prior to this function to recuperate + * the resources used by the execution. + * + * @param execution The execution to be queried. + * @param index The index of the output argument we are querying. It is an index into the lists + * passed to {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not + * the index associated with {@link ANeuralNetworksModel_addOperand}. + * @param dimensions The dimension array to be filled. The size of the array must be exactly as + * large as the rank of the output operand to be queried in the model. + * + * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE + * if the target output is provided an insufficient buffer at execution time, + * ANEURALNETWORKS_BAD_DATA if the index is invalid or if the target is a scalar. + * + * Available since API level 29. + */ +int ANeuralNetworksExecution_getOutputOperandDimensions(ANeuralNetworksExecution* execution, + int32_t index, uint32_t* dimensions) + __INTRODUCED_IN(29); + +/** + * Create a {@link ANeuralNetworksBurst} to apply the given compilation. + * This only creates the burst object. Computation is only performed once + * {@link ANeuralNetworksExecution_burstCompute} is invoked with a valid + * {@link ANeuralNetworksExecution} and {@link ANeuralNetworksBurst}. + * + *The provided compilation must outlive the burst object.
+ * + * Available since API level 29. + * + * @param compilation The {@link ANeuralNetworksCompilation} to be evaluated. + * @param burst The newly created object or NULL if unsuccessful. + * + * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA + * if the compilation is invalid. + */ +int ANeuralNetworksBurst_create(ANeuralNetworksCompilation* compilation, + ANeuralNetworksBurst** burst) __INTRODUCED_IN(29); + +/** + * Destroys the burst object. + * + * Available since API level 29. + * + * @param burst The burst object to be destroyed. Passing NULL is acceptable and + * results in no operation. + */ +void ANeuralNetworksBurst_free(ANeuralNetworksBurst* burst) __INTRODUCED_IN(29); + +/** + * Schedule synchronous evaluation of the execution on a burst object. + * + *Schedules synchronous evaluation of the execution. Returns once the + * execution has completed and the outputs are ready to be consumed.
+ * + *There must be at most one {@link ANeuralNetworksExecution} processing at + * any given time for any given burst object. Any + * {@link ANeuralNetworksExecution} launched before the previous has finished + * will result in ANEURALNETWORKS_BAD_STATE.
+ * + * Available since API level 29. + * + * @param burst The burst object to execute on. + * @param execution The execution to be scheduled and executed. The execution + * must be created from the same {@link + * ANeuralNetworksCompilation} as the burst object. + * + * @return ANEURALNETWORKS_NO_ERROR if the execution completed normally. + */ +int ANeuralNetworksExecution_burstCompute(ANeuralNetworksExecution* execution, + ANeuralNetworksBurst* burst) __INTRODUCED_IN(29); + +/** + * Creates a shared memory object from an AHardwareBuffer handle. + * + * If the shared memory is backed by an AHardwareBuffer of AHARDWAREBUFFER_FORMAT_BLOB + * format, it can be used the same way as shared memory created from a file handle. See + * {@link ANeuralNetworksMemory} for a description on how to use this shared memory. + * + * If the shared memory is backed by an AHardwareBuffer of a format other than + * AHARDWAREBUFFER_FORMAT_BLOB, it can only be used for Model inputs and outputs. + * When calling {@link ANeuralNetworksExecution_setInputFromMemory} or + * {@link ANeuralNetworksExecution_setOutputFromMemory} with the shared memory, both + * offset and length must be set to zero and the entire memory region will be + * associated with the specified input or output operand. There is no guarantee + * that an arbitrary AHardwareBuffer_Format and AHardwareBuffer_UsageFlags combination + * can be used by arbitrary devices. The execution will fail if selected set of devices + * cannot consume the buffer. + * + * Calling {@link ANeuralNetworksModel_setOperandValueFromMemory} with shared memory + * backed by an AHardwareBuffer of a format other than AHARDWAREBUFFER_FORMAT_BLOB is + * disallowed. + * + * Available since API level 29. + * + * @param ahwb The AHardwareBuffer handle. + * @param memory The memory object to be created. + * Set to NULL if unsuccessful. + * + * @return ANEURALNETWORKS_NO_ERROR if the request completed normally. + * + * @see AHardwareBuffer + */ +int ANeuralNetworksMemory_createFromAHardwareBuffer(const AHardwareBuffer* ahwb, + ANeuralNetworksMemory** memory) + __INTRODUCED_IN(29); + +/** + + * Specifies whether duration of the {@link ANeuralNetworksExecution} is to be + * measured. Evaluation of the execution must not have been scheduled. + * + * By default, duration is not measured. + * + * The {@link ANeuralNetworksExecution} must have been created with + * {@link ANeuralNetworksCompilation_createForDevices} with numDevices = 1. + * + * See {@link ANeuralNetworksExecution} for information on multithreaded usage. + * + * Available since API level 29. + * + * @param execution The execution to be modified. + * @param measure 'true' if duration is to be measured, 'false' if not. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + */ +int ANeuralNetworksExecution_setMeasureTiming(ANeuralNetworksExecution* execution, bool measure) + __INTRODUCED_IN(29); + +/** + * Different duration measurements. + * + * Durations are measured in nanoseconds. + * + * Available since API level 29. + */ +typedef enum { + // Execution time on hardware (not driver, which runs on host processor). + ANEURALNETWORKS_DURATION_ON_HARDWARE = 0, + // Execution time in driver (including time on hardware). Excludes overhead + // such as that of the runtime itself and the IPC needed for the runtime to + // communicate with the driver. + ANEURALNETWORKS_DURATION_IN_DRIVER = 1, +} DurationCode; + +/** + * Get the time spent in the specified {@link ANeuralNetworksExecution}, in nanoseconds. + * The execution must have completed. + * + * Available since API level 29. + * + * @param execution The execution to be queried. + * @param durationCode The measurement to be queried, specified by {@link DurationCode}. + * @param duration The returned duration. If no measurement was requested by + * {@link ANeuralNetworksExecution_setMeasureTiming}, or for some other + * reason the duration is not available, UINT64_MAX will be returned. + * A particular device need not support any given measurement. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + */ +int ANeuralNetworksExecution_getDuration(const ANeuralNetworksExecution* execution, + int32_t durationCode, uint64_t* duration) + __INTRODUCED_IN(29); + +#endif // __ANDROID_API__ >= __ANDROID_API_Q__ + +#if __ANDROID_API__ >= 27 /** * Creates a shared memory object from a file descriptor. @@ -1992,6 +5764,8 @@ typedef struct ANeuralNetworksEvent ANeuralNetworksEvent; * See {@link ANeuralNetworksMemory} for a description on how to use * this shared memory. * + * Available since API level 27. + * * @param size The requested size in bytes. * Must not be larger than the file size. * @param prot The desired memory protection for the mapping. @@ -2008,7 +5782,7 @@ typedef struct ANeuralNetworksEvent ANeuralNetworksEvent; * @return ANEURALNETWORKS_NO_ERROR if the request completed normally. */ int ANeuralNetworksMemory_createFromFd(size_t size, int protect, int fd, size_t offset, - ANeuralNetworksMemory** memory); + ANeuralNetworksMemory** memory) __INTRODUCED_IN(27); /** * Delete a memory object. @@ -2017,14 +5791,18 @@ int ANeuralNetworksMemory_createFromFd(size_t size, int protect, int fd, size_t * This will free the underlying actual memory if no other code has open * handles to this memory. * - * @param memory The memory object to be freed. + * Available since API level 27. + * + * @param memory The memory object to be freed. Passing NULL is acceptable and + * results in no operation. */ -void ANeuralNetworksMemory_free(ANeuralNetworksMemory* memory); +void ANeuralNetworksMemory_free(ANeuralNetworksMemory* memory) __INTRODUCED_IN(27); /** * Create an empty {@link ANeuralNetworksModel}. * *This only creates the object. Computation is performed once + * {@link ANeuralNetworksExecution_compute} or * {@link ANeuralNetworksExecution_startCompute} is invoked. * * The model should be constructed with calls to @@ -2037,12 +5815,14 @@ void ANeuralNetworksMemory_free(ANeuralNetworksMemory* memory); *
{@link ANeuralNetworksModel_free} should be called once the model * is no longer needed.
* + * Available since API level 27. + * * @param model The {@link ANeuralNetworksModel} to be created. * Set to NULL if unsuccessful. * * @return ANEURALNETWORKS_NO_ERROR if successful. */ -int ANeuralNetworksModel_create(ANeuralNetworksModel** model); +int ANeuralNetworksModel_create(ANeuralNetworksModel** model) __INTRODUCED_IN(27); /** * Destroy a model. @@ -2052,27 +5832,32 @@ int ANeuralNetworksModel_create(ANeuralNetworksModel** model); * * See {@link ANeuralNetworksModel} for information on multithreaded usage. * + * Available since API level 27. + * * @param model The model to be destroyed. Passing NULL is acceptable and * results in no operation. */ -void ANeuralNetworksModel_free(ANeuralNetworksModel* model); +void ANeuralNetworksModel_free(ANeuralNetworksModel* model) __INTRODUCED_IN(27); /** * Indicate that we have finished modifying a model. Required before - * calling {@link ANeuralNetworksCompilation_create}. + * calling {@link ANeuralNetworksCompilation_create} and + * {@link ANeuralNetworksCompilation_createForDevices}. * - * An application is responsible to make sure that no other thread uses - * the model at the same time. + * An application must ensure that no other thread uses the model at the same + * time. * * This function must only be called once for a given model. * * See {@link ANeuralNetworksModel} for information on multithreaded usage. * + * Available since API level 27. + * * @param model The model to be finished. * * @return ANEURALNETWORKS_NO_ERROR if successful. */ -int ANeuralNetworksModel_finish(ANeuralNetworksModel* model); +int ANeuralNetworksModel_finish(ANeuralNetworksModel* model) __INTRODUCED_IN(27); /** * Add an operand to a model. @@ -2114,6 +5899,8 @@ int ANeuralNetworksModel_finish(ANeuralNetworksModel* model); * * See {@link ANeuralNetworksModel} for information on multithreaded usage. * + * Available since API level 27. + * * @param model The model to be modified. * @param type The {@link ANeuralNetworksOperandType} that describes the shape * of the operand. Neither the {@link ANeuralNetworksOperandType} @@ -2123,7 +5910,7 @@ int ANeuralNetworksModel_finish(ANeuralNetworksModel* model); * @return ANEURALNETWORKS_NO_ERROR if successful. */ int ANeuralNetworksModel_addOperand(ANeuralNetworksModel* model, - const ANeuralNetworksOperandType* type); + const ANeuralNetworksOperandType* type) __INTRODUCED_IN(27); /** * Sets an operand to a constant value. @@ -2132,11 +5919,13 @@ int ANeuralNetworksModel_addOperand(ANeuralNetworksModel* model, * {@link ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES} * are immediately copied into the model. * - * For values of length greater than {@link ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES}, - * a pointer to the buffer is stored within the model. The application is responsible - * for not changing the content of this region until all executions using this model - * have completed. As the data may be copied during processing, modifying the data - * after this call yields undefined results. + * For values of length greater than + * {@link ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES}, a pointer to + * the buffer is stored within the model. The application must not change the + * content of this region until all executions using this model have + * completed. As the data may be copied during processing, modifying the data + * after this call yields undefined results. The provided buffer must outlive + * this model. * * For large tensors, using {@link ANeuralNetworksModel_setOperandValueFromMemory} * is likely to be more efficient. @@ -2149,6 +5938,8 @@ int ANeuralNetworksModel_addOperand(ANeuralNetworksModel* model, * * See {@link ANeuralNetworksModel} for information on multithreaded usage. * + * Available since API level 27. + * * @param model The model to be modified. * @param index The index of the model operand we're setting. * @param buffer A pointer to the data to use. @@ -2157,24 +5948,60 @@ int ANeuralNetworksModel_addOperand(ANeuralNetworksModel* model, * @return ANEURALNETWORKS_NO_ERROR if successful. */ int ANeuralNetworksModel_setOperandValue(ANeuralNetworksModel* model, int32_t index, - const void* buffer, size_t length); + const void* buffer, size_t length) __INTRODUCED_IN(27); + +#if __ANDROID_API__ >= __ANDROID_API_Q__ + +/** + * Sets an operand's per channel quantization parameters. + * + * Sets parameters required by a tensor of type + * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}. + * This function must be called for every tensor of type + * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} before + * calling {@link ANeuralNetworksModel_finish}. + * + * Available since API level 29. + * + * @param model The model to be modified. + * @param index The index of the model operand we're setting. + * @param channelQuant The per channel quantization parameters for the operand. + * No memory in this struct needs to outlive the call to + * this function. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + */ +int ANeuralNetworksModel_setOperandSymmPerChannelQuantParams( + ANeuralNetworksModel* model, int32_t index, + const ANeuralNetworksSymmPerChannelQuantParams* channelQuant) __INTRODUCED_IN(29); + +#endif // __ANDROID_API__ >= __ANDROID_API_Q__ /** * Sets an operand to a value stored in a memory object. * * The content of the memory is not copied. A reference to that memory is stored - * inside the model. The application is responsible for not changing the content - * of the memory region until all executions using this model have completed. - * As the data may be copied during processing, modifying the data after this call - * yields undefined results. + * inside the model. The application must not change the content of the memory + * region until all executions using this model have completed. As the data may + * be copied during processing, modifying the data after this call yields + * undefined results. + * + *The provided memory must outlive this model.
* * To indicate that an optional operand should be considered missing, * use {@link ANeuralNetworksModel_setOperandValue} instead, passing nullptr for buffer. * + * Is disallowed to set an operand value with shared memory backed by an AHardwareBuffer + * of a format other than AHARDWAREBUFFER_FORMAT_BLOB. + * * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been * called will return an error. * * See {@link ANeuralNetworksModel} for information on multithreaded usage. + * See {@link ANeuralNetworksMemory_createFromAHardwarBuffer} for information on + * AHardwareBuffer usage. + * + * Available since API level 27. * * @param model The model to be modified. * @param index The index of the model operand we're setting. @@ -2188,7 +6015,8 @@ int ANeuralNetworksModel_setOperandValue(ANeuralNetworksModel* model, int32_t in */ int ANeuralNetworksModel_setOperandValueFromMemory(ANeuralNetworksModel* model, int32_t index, const ANeuralNetworksMemory* memory, - size_t offset, size_t length); + size_t offset, size_t length) + __INTRODUCED_IN(27); /** * Add an operation to a model. @@ -2208,12 +6036,14 @@ int ANeuralNetworksModel_setOperandValueFromMemory(ANeuralNetworksModel* model, * * See {@link ANeuralNetworksModel} for information on multithreaded usage. * + * Available since API level 27. + * * @return ANEURALNETWORKS_NO_ERROR if successful. */ int ANeuralNetworksModel_addOperation(ANeuralNetworksModel* model, ANeuralNetworksOperationType type, uint32_t inputCount, const uint32_t* inputs, uint32_t outputCount, - const uint32_t* outputs); + const uint32_t* outputs) __INTRODUCED_IN(27); /** * Specifies which operands will be the model's inputs and @@ -2236,10 +6066,14 @@ int ANeuralNetworksModel_addOperation(ANeuralNetworksModel* model, * * See {@link ANeuralNetworksModel} for information on multithreaded usage. * + * Available since API level 27. + * */ int ANeuralNetworksModel_identifyInputsAndOutputs(ANeuralNetworksModel* model, uint32_t inputCount, const uint32_t* inputs, uint32_t outputCount, - const uint32_t* outputs); + const uint32_t* outputs) __INTRODUCED_IN(27); + +#if __ANDROID_API__ >= 28 /** * Specifies whether {@link ANEURALNETWORKS_TENSOR_FLOAT32} is allowed to be @@ -2259,9 +6093,14 @@ int ANeuralNetworksModel_identifyInputsAndOutputs(ANeuralNetworksModel* model, u * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been * called will return an error. * + * Available since API level 28. + * * See {@link ANeuralNetworksModel} for information on multithreaded usage. */ -int ANeuralNetworksModel_relaxComputationFloat32toFloat16(ANeuralNetworksModel* model, bool allow); +int ANeuralNetworksModel_relaxComputationFloat32toFloat16(ANeuralNetworksModel* model, bool allow) + __INTRODUCED_IN(28); + +#endif // __ANDROID_API__ >= 28 /** * Create a {@link ANeuralNetworksCompilation} to compile the given model. @@ -2282,6 +6121,8 @@ int ANeuralNetworksModel_relaxComputationFloat32toFloat16(ANeuralNetworksModel* * * See {@link ANeuralNetworksCompilation} for information on multithreaded usage. * + * Available since API level 27. + * * @param model The {@link ANeuralNetworksModel} to be compiled. * @param compilation The newly created object or NULL if unsuccessful. * @@ -2289,20 +6130,22 @@ int ANeuralNetworksModel_relaxComputationFloat32toFloat16(ANeuralNetworksModel* * if the model is invalid. */ int ANeuralNetworksCompilation_create(ANeuralNetworksModel* model, - ANeuralNetworksCompilation** compilation); + ANeuralNetworksCompilation** compilation) __INTRODUCED_IN(27); /** * Destroy a compilation. * * The compilation need not have been finished by a call to - * {@link ANeuralNetworksModel_finish}. + * {@link ANeuralNetworksCompilation_finish}. * * See {@link ANeuralNetworksCompilation} for information on multithreaded usage. * + * Available since API level 27. + * * @param compilation The compilation to be destroyed. Passing NULL is acceptable and * results in no operation. */ -void ANeuralNetworksCompilation_free(ANeuralNetworksCompilation* compilation); +void ANeuralNetworksCompilation_free(ANeuralNetworksCompilation* compilation) __INTRODUCED_IN(27); /** * Sets the execution preference. @@ -2311,6 +6154,8 @@ void ANeuralNetworksCompilation_free(ANeuralNetworksCompilation* compilation); * * See {@link ANeuralNetworksCompilation} for information on multithreaded usage. * + * Available since API level 27. + * * @param compilation The compilation to be modified. * @param preference Either {@link PREFER_LOW_POWER}, * {@link PREFER_SINGLE_FAST_ANSWER}, or @@ -2319,34 +6164,39 @@ void ANeuralNetworksCompilation_free(ANeuralNetworksCompilation* compilation); * @return ANEURALNETWORKS_NO_ERROR if successful. */ int ANeuralNetworksCompilation_setPreference(ANeuralNetworksCompilation* compilation, - int32_t preference); + int32_t preference) __INTRODUCED_IN(27); /** * Indicate that we have finished modifying a compilation. Required before * calling {@link ANeuralNetworksExecution_create}. * - * An application is responsible to make sure that no other thread uses - * the compilation at the same time. + * An application must ensure that no other thread uses the compilation at the + * same time. * * This function must only be called once for a given compilation. * * See {@link ANeuralNetworksCompilation} for information on multithreaded usage. * + * Available since API level 27. + * * @param compilation The compilation to be finished. * * @return ANEURALNETWORKS_NO_ERROR if successful. */ -int ANeuralNetworksCompilation_finish(ANeuralNetworksCompilation* compilation); +int ANeuralNetworksCompilation_finish(ANeuralNetworksCompilation* compilation) __INTRODUCED_IN(27); /** * Create a {@link ANeuralNetworksExecution} to apply the given compilation. * This only creates the object. Computation is only performed once + * {@link ANeuralNetworksExecution_compute} or * {@link ANeuralNetworksExecution_startCompute} is invoked. * *The provided compilation must outlive the execution.
* * See {@link ANeuralNetworksExecution} for information on multithreaded usage. * + * Available since API level 27. + * * @param compilation The {@link ANeuralNetworksCompilation} to be evaluated. * @param execution The newly created object or NULL if unsuccessful. * @@ -2354,28 +6204,37 @@ int ANeuralNetworksCompilation_finish(ANeuralNetworksCompilation* compilation); * if the compilation is invalid. */ int ANeuralNetworksExecution_create(ANeuralNetworksCompilation* compilation, - ANeuralNetworksExecution** execution); + ANeuralNetworksExecution** execution) __INTRODUCED_IN(27); /** * Destroy an execution. * - *If called on an execution for which - * {@link ANeuralNetworksExecution_startCompute} has been called, the - * function will return immediately but will mark the execution to be deleted - * once the computation completes. The related {@link ANeuralNetworksEvent} - * will be signaled and the {@link ANeuralNetworksEvent_wait} will return - * ANEURALNETWORKS_ERROR_DELETED. + *
The execution need not have been scheduled by a call to + * {@link ANeuralNetworksExecution_burstCompute}, + * {@link ANeuralNetworksExecution_compute}, or + * {@link ANeuralNetworksExecution_startCompute}; but if it has been scheduled, + * then the application must not call {@link ANeuralNetworksExecution_free} + * until the execution has completed (i.e., + * {@link ANeuralNetworksExecution_burstCompute}, + * {@link ANeuralNetworksExecution_compute}, or + * {@link ANeuralNetworksEvent_wait} has returned). * * See {@link ANeuralNetworksExecution} for information on multithreaded usage. * + * Available since API level 27. + * * @param execution The execution to be destroyed. Passing NULL is acceptable and * results in no operation. */ -void ANeuralNetworksExecution_free(ANeuralNetworksExecution* execution); +void ANeuralNetworksExecution_free(ANeuralNetworksExecution* execution) __INTRODUCED_IN(27); /** * Associate a user buffer with an input of the model of the - * {@link ANeuralNetworksExecution}. + * {@link ANeuralNetworksExecution}. Evaluation of the execution must not have + * been scheduled. Once evaluation of the execution has been scheduled, the + * application must not change the content of the buffer until the execution has + * completed. Evaluation of the execution will not change the content of the + * buffer. * *
The provided buffer must outlive the execution.
* @@ -2384,6 +6243,8 @@ void ANeuralNetworksExecution_free(ANeuralNetworksExecution* execution); * * See {@link ANeuralNetworksExecution} for information on multithreaded usage. * + * Available since API level 27. + * * @param execution The execution to be modified. * @param index The index of the input argument we are setting. It is * an index into the lists passed to @@ -2408,19 +6269,27 @@ void ANeuralNetworksExecution_free(ANeuralNetworksExecution* execution); */ int ANeuralNetworksExecution_setInput(ANeuralNetworksExecution* execution, int32_t index, const ANeuralNetworksOperandType* type, const void* buffer, - size_t length); + size_t length) __INTRODUCED_IN(27); /** - * Associate part of a memory object with an input of the model of the - * {@link ANeuralNetworksExecution}. + * Associate a region of a memory object with an input of the model of the + * {@link ANeuralNetworksExecution}. Evaluation of the execution must not have + * been scheduled. Once evaluation of the execution has been scheduled, the + * application must not change the content of the region until the execution has + * completed. Evaluation of the execution will not change the content of the + * region. * *The provided memory must outlive the execution.
* * If the input is optional, you can indicate that it is omitted by - * using {@link ANeuralNetworks_setInput} instead, passing nullptr for buffer - * and 0 for length. + * using {@link ANeuralNetworksExecution_setInput} instead, passing nullptr for + * buffer and 0 for length. * * See {@link ANeuralNetworksExecution} for information on multithreaded usage. + * See {@link ANeuralNetworksMemory_createFromAHardwarBuffer} for information on + * AHardwareBuffer usage. + * + * Available since API level 27. * * @param execution The execution to be modified. * @param index The index of the input argument we are setting. It is @@ -2447,11 +6316,14 @@ int ANeuralNetworksExecution_setInput(ANeuralNetworksExecution* execution, int32 int ANeuralNetworksExecution_setInputFromMemory(ANeuralNetworksExecution* execution, int32_t index, const ANeuralNetworksOperandType* type, const ANeuralNetworksMemory* memory, size_t offset, - size_t length); + size_t length) __INTRODUCED_IN(27); /** * Associate a user buffer with an output of the model of the - * {@link ANeuralNetworksExecution}. + * {@link ANeuralNetworksExecution}. Evaluation of the execution must not have + * been scheduled. Once evaluation of the execution has been scheduled, the + * application must not change the content of the buffer until the execution has + * completed. * * If the output is optional, you can indicate that it is omitted by * passing nullptr for buffer and 0 for length. @@ -2460,6 +6332,8 @@ int ANeuralNetworksExecution_setInputFromMemory(ANeuralNetworksExecution* execut * * See {@link ANeuralNetworksExecution} for information on multithreaded usage. * + * Available since API level 27. + * * @param execution The execution to be modified. * @param index The index of the output argument we are setting. It is * an index into the lists passed to @@ -2475,6 +6349,12 @@ int ANeuralNetworksExecution_setInputFromMemory(ANeuralNetworksExecution* execut * passed. Neither the {@link ANeuralNetworksOperandType} * nor the dimensions it points to need to outlive the call * to {@link ANeuralNetworksExecution_setOutput}. + * Since API level 29, the output operand can have unspecified + * dimensions or rank to be deduced dynamically during the execution. + * However, the user must provide a large enough buffer. The user + * can retrieve the output dimensional information after the execution + * by {@link ANeuralNetworksExecution_getOutputOperandRank} and + * {@link ANeuralNetworksExecution_getOutputOperandDimensions}. * @param buffer The buffer where the data is to be written. * @param length The length in bytes of the buffer. * @@ -2483,19 +6363,26 @@ int ANeuralNetworksExecution_setInputFromMemory(ANeuralNetworksExecution* execut */ int ANeuralNetworksExecution_setOutput(ANeuralNetworksExecution* execution, int32_t index, const ANeuralNetworksOperandType* type, void* buffer, - size_t length); + size_t length) __INTRODUCED_IN(27); /** - * Associate part of a memory object with an output of the model of the - * {@link ANeuralNetworksExecution}. + * Associate a region of a memory object with an output of the model of the + * {@link ANeuralNetworksExecution}. Evaluation of the execution must not have + * been scheduled. Once evaluation of the execution has been scheduled, the + * application must not change the content of the region until the execution has + * completed. * * If the output is optional, you can indicate that it is omitted by - * using {@link ANeuralNetworks_setOutput} instead, passing nullptr for buffer - * and 0 for length. + * using {@link ANeuralNetworksExecution_setOutput} instead, passing nullptr for + * buffer and 0 for length. * *The provided memory must outlive the execution.
* * See {@link ANeuralNetworksExecution} for information on multithreaded usage. + * See {@link ANeuralNetworksMemory_createFromAHardwarBuffer} for information on + * AHardwareBuffer usage. + * + * Available since API level 27. * * @param execution The execution to be modified. * @param index The index of the output argument we are setting. It is @@ -2511,6 +6398,12 @@ int ANeuralNetworksExecution_setOutput(ANeuralNetworksExecution* execution, int3 * passed. Neither the {@link ANeuralNetworksOperandType} * nor the dimensions it points to need to outlive the call * to {@link ANeuralNetworksExecution_setOutputFromMemory}. + * Since API level 29, the output operand can have unspecified + * dimensions or rank to be deduced dynamically during the execution. + * However, the user must provide a large enough memory. The user + * can retrieve the output dimensional information after the execution + * by {@link ANeuralNetworksExecution_getOutputOperandRank} and + * {@link ANeuralNetworksExecution_getOutputOperandDimensions}. * @param memory The memory where the data is to be stored. * @param offset This specifies the location of the data within the memory. * The offset is in bytes from the start of memory. @@ -2522,27 +6415,27 @@ int ANeuralNetworksExecution_setOutput(ANeuralNetworksExecution* execution, int3 int ANeuralNetworksExecution_setOutputFromMemory(ANeuralNetworksExecution* execution, int32_t index, const ANeuralNetworksOperandType* type, const ANeuralNetworksMemory* memory, size_t offset, - size_t length); + size_t length) __INTRODUCED_IN(27); /** - * Schedule evaluation of the execution. + * Schedule asynchronous evaluation of the execution. * - *Schedules evaluation of the execution. Once the model has been - * applied and the outputs are ready to be consumed, the returned event will be - * signaled. Use {@link ANeuralNetworksEvent_wait} to wait for that event. + *
Schedules asynchronous evaluation of the execution. Once the model has + * been applied and the outputs are ready to be consumed, the returned event + * will be signaled. Use {@link ANeuralNetworksEvent_wait} to wait for that + * event. *
* - * Multiple executions can be scheduled and evaluated concurrently. The - * runtime makes no guarantee on the ordering of completion of - * executions. If it's important to the application, the application - * should enforce the ordering by using - * {@link ANeuralNetworksEvent_wait}. - * * ANeuralNetworksEvent_wait must be called to recuperate the resources used * by the execution. * * See {@link ANeuralNetworksExecution} for information on multithreaded usage. * + * See {@link ANeuralNetworksExecution_compute} for synchronous execution. + * Synchronous execution incurs lower overhead than asynchronous execution. + * + * Available since API level 27. + * * @param execution The execution to be scheduled and executed. * @param event The event that will be signaled on completion. event is set to * NULL if there's an error. @@ -2550,7 +6443,7 @@ int ANeuralNetworksExecution_setOutputFromMemory(ANeuralNetworksExecution* execu * @return ANEURALNETWORKS_NO_ERROR if successful. */ int ANeuralNetworksExecution_startCompute(ANeuralNetworksExecution* execution, - ANeuralNetworksEvent** event); + ANeuralNetworksEvent** event) __INTRODUCED_IN(27); /** * Waits until the execution completes. @@ -2560,19 +6453,30 @@ int ANeuralNetworksExecution_startCompute(ANeuralNetworksExecution* execution, * * See {@link ANeuralNetworksExecution} for information on multithreaded usage. * + * Available since API level 27. + * * @return ANEURALNETWORKS_NO_ERROR if the execution completed normally. + * ANEURALNETWORKS_UNMAPPABLE if the execution input or output memory cannot + * be properly mapped. */ -int ANeuralNetworksEvent_wait(ANeuralNetworksEvent* event); +int ANeuralNetworksEvent_wait(ANeuralNetworksEvent* event) __INTRODUCED_IN(27); /** * Destroys the event. * * See {@link ANeuralNetworksExecution} for information on multithreaded usage. + * + * Available since API level 27. + * + * @param event The event object to be destroyed. Passing NULL is acceptable and + * results in no operation. */ -void ANeuralNetworksEvent_free(ANeuralNetworksEvent* event); +void ANeuralNetworksEvent_free(ANeuralNetworksEvent* event) __INTRODUCED_IN(27); + +#endif // __ANDROID_API__ >= 27 __END_DECLS -#endif // ANDROID_ML_NN_RUNTIME_NEURAL_NETWORKS_H +#endif // ANDROID_FRAMEWORKS_ML_NN_RUNTIME_NEURAL_NETWORKS_H /** @} */ diff --git a/runtimes/include/NeuralNetworksExtensions.h b/runtimes/include/NeuralNetworksExtensions.h new file mode 100644 index 0000000..429a1dc --- /dev/null +++ b/runtimes/include/NeuralNetworksExtensions.h @@ -0,0 +1,117 @@ +/* + * Copyright (C) 2019 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_FRAMEWORKS_ML_NN_RUNTIME_NEURAL_NETWORKS_EXTENSIONS_H +#define ANDROID_FRAMEWORKS_ML_NN_RUNTIME_NEURAL_NETWORKS_EXTENSIONS_H + +#include "NeuralNetworks.h" + +/****************************************************************** + * + * IMPORTANT NOTICE: + * + * This file is not intended for use by general developers -- only + * by OEM applications. + * + * Extensions source AND binary code relies on the definitions + * here to be FROZEN ON ALL UPCOMING PLATFORM RELEASES. + * + * - DO NOT MODIFY ENUMS (EXCEPT IF YOU ADD NEW 32-BIT VALUES) + * - DO NOT MODIFY CONSTANTS OR FUNCTIONAL MACROS + * - DO NOT CHANGE THE SIGNATURE OF FUNCTIONS IN ANY WAY + * - DO NOT CHANGE THE LAYOUT OR SIZE OF STRUCTURES + */ + +__BEGIN_DECLS + +#if __ANDROID_API__ >= __ANDROID_API_Q__ + +/** + * Queries whether an extension is supported by the driver implementation of the specified device. + * + * @param device The representation of the specified device. + * @param extension The extension name. + * @param isExtensionSupported The boolean value indicating whether the extension is supported. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + * + * Available since API level 29. + */ +int ANeuralNetworksDevice_getExtensionSupport(const ANeuralNetworksDevice* device, + const char* extensionName, bool* isExtensionSupported) + __INTRODUCED_IN(29); + +/** + * Creates an operand type from an extension name and an extension operand code. + * + * See {@link ANeuralNetworksModel} for information on multithreaded usage. + * + * Available since API level 29. + * + * @param model The model to contain the operand. + * @param extensionName The extension name. + * @param operandCodeWithinExtension The extension operand code. + * @param type The operand type. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + */ +int ANeuralNetworksModel_getExtensionOperandType(ANeuralNetworksModel* model, + const char* extensionName, + uint16_t operandCodeWithinExtension, int32_t* type) + __INTRODUCED_IN(29); + +/** + * Creates an operation type from an extension name and an extension operation code. + * + * See {@link ANeuralNetworksModel} for information on multithreaded usage. + * + * Available since API level 29. + * + * @param model The model to contain the operation. + * @param extensionName The extension name. + * @param operationCodeWithinExtension The extension operation code. + * @param type The operation type. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + */ +int ANeuralNetworksModel_getExtensionOperationType(ANeuralNetworksModel* model, + const char* extensionName, + uint16_t operationCodeWithinExtension, + ANeuralNetworksOperationType* type) + __INTRODUCED_IN(29); + +/** + * Sets extension operand parameters. + * + * Available since API level 29. + * + * @param model The model to be modified. + * @param index The index of the model operand we're setting. + * @param data A pointer to the extension operand data. + * The data does not have to outlive the call to this function. + * @param length The size in bytes of the data value. + * + * @return ANEURALNETWORKS_NO_ERROR if successful. + */ +int ANeuralNetworksModel_setOperandExtensionData(ANeuralNetworksModel* model, int32_t index, + const void* data, size_t length) + __INTRODUCED_IN(29); + +#endif // __ANDROID_API__ >= __ANDROID_API_Q__ + +__END_DECLS + +#endif // ANDROID_FRAMEWORKS_ML_NN_RUNTIME_NEURAL_NETWORKS_EXTENSIONS_H -- 2.7.4