* @file NeuralNetworks.h
*/
-#ifndef ANDROID_FRAMEWORKS_ML_NN_RUNTIME_NEURAL_NETWORKS_H
-#define ANDROID_FRAMEWORKS_ML_NN_RUNTIME_NEURAL_NETWORKS_H
+#ifndef ANDROID_ML_NN_RUNTIME_NEURAL_NETWORKS_H
+#define ANDROID_ML_NN_RUNTIME_NEURAL_NETWORKS_H
/******************************************************************
*
* [depth_out, filter_height, filter_width, depth_in], specifying the
* filter. For tensor of type
* {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} the channel
- * dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim) must be set to 0.
+ * dimension (extraParams.channelQuant.channelDim) must be set to 0.
* * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
* tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} or
* {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias must be of the same
* [depth_out, filter_height, filter_width, depth_in], specifying the
* filter. For tensor of type
* {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} the channel
- * dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim) must be set to 0.
+ * dimension (extraParams.channelQuant.channelDim) must be set to 0.
* * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
* tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} or
* {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias must be of the same
* * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out],
* specifying the filter. For tensor of type
* {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} the channel
- * dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim) must be set to 3.
+ * dimension (extraParams.channelQuant.channelDim) must be set to 3.
* * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
* tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} or
* {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias must be of the same
* cells between each filter element on height dimension. If this input is set,
* input 9 (dilation factor for width) must be specified as well.
* Available since API level 29.
+
*
* Outputs:
* * 0: The output 4-D tensor, of shape
// Operations below are available since API level 28.
+ // TODO: make the description easier to understand.
/**
* BatchToSpace for N-dimensional tensors.
*
*/
ANEURALNETWORKS_PAD = 32,
+ // TODO: make the description easier to understand.
/**
* SpaceToBatch for N-Dimensional tensors.
*
* [depth_out, filter_height, filter_width, depth_in], specifying the
* filter. For tensor of type
* {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} the channel
- * dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim) must be set to 0.
+ * dimension (extraParams.channelQuant.channelDim) must be set to 0.
* * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
* tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} or
* {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias should be of the
* [depth_out, filter_height, filter_width, depth_in], specifying the
* filter. For tensor of type
* {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} the channel
- * dimension (ANeuralNetworksSymmPerChannelQuantParams::channelDim) must be set to 0.
+ * dimension (extraParams.channelQuant.channelDim) must be set to 0.
* * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
* tensor of type {@link ANEURALNETWORKS_TENSOR_FLOAT32} or
* {@link ANEURALNETWORKS_TENSOR_FLOAT16}, the bias should be of the
* the same; for odd number of padding, padding to the ending is bigger
* than the padding to the beginning by 1.
*
- * total_padding is a function of input, stride, dilation and filter size.
+ * total_padding is a function of input, stride and filter size.
* It could be computed as follows:
- * out_size = (input + stride - 1) / stride
- * effective_filter_size = (filter_size - 1) * dilation + 1
- * needed_input = (out_size - 1) * stride + effective_filter_size
+ * out_size = (input + stride - 1) / stride;
+ * needed_input = (out_size - 1) * stride + filter_size
* total_padding = max(0, needed_input - input_size)
* The computation is the same for the horizontal and vertical directions.
*/
* of the element type byte size, e.g., a tensor with
* {@link ANEURALNETWORKS_TENSOR_FLOAT32} type must be aligned on 4-byte boundary.
*
- * It is the application's responsibility to ensure that there are no uses of
- * the memory after calling {@link ANeuralNetworksMemory_free}. This includes
- * any model which references this memory because of a call to
- * {@link ANeuralNetworksModel_setOperandValueFromMemory}, any compilation
- * created using such a model, any execution object or burst object created
- * using such a compilation, or any execution which references this memory
- * because of a call to {@link ANeuralNetworksExecution_setInputFromMemory} or
- * {@link ANeuralNetworksExecution_setOutputFromMemory}.
- *
* Available since API level 27.
*/
typedef struct ANeuralNetworksMemory ANeuralNetworksMemory;
* modifies a model at a given time. It is however safe for more than one
* thread to use the model once {@link ANeuralNetworksModel_finish} has returned.</p>
*
- * <p>It is also the application's responsibility to ensure that there are no
- * other uses of the model after calling {@link ANeuralNetworksModel_free}.
- * This includes any compilation, execution object or burst object created using
- * the model.</p>
+ * <p>It is also the application's responsibility to ensure that there are no other
+ * uses of the model after calling {@link ANeuralNetworksModel_free}.
+ * This includes any compilation or execution object created using the model.</p>
*
* Available since API level 27.
*/
*
* <p>It is also the application's responsibility to ensure that there are no other
* uses of the compilation after calling {@link ANeuralNetworksCompilation_free}.
- * This includes any execution object or burst object created using the compilation.</p>
+ * This includes any execution object created using the compilation.</p>
*
* Available since API level 27.
*/
* ({@link ANeuralNetworksModel_setOperandValueFromMemory}).</p>
*
* <p>An execution cannot be modified once
- * {@link ANeuralNetworksExecution_burstCompute},
* {@link ANeuralNetworksExecution_compute} or
* {@link ANeuralNetworksExecution_startCompute} has been called on it.</p>
*
* <p>An execution can be applied to a model with
- * {@link ANeuralNetworksExecution_burstCompute},
* {@link ANeuralNetworksExecution_compute} or
* {@link ANeuralNetworksExecution_startCompute} only once. Create new
* executions to do new evaluations of the model.</p>
* modifies an execution at a given time. It is however safe for more than one
* thread to use {@link ANeuralNetworksEvent_wait} at the same time.</p>
*
- * <p>It is also the application's responsibility to ensure that the execution
- * either has never been scheduled or has completed (i.e., that
- * {@link ANeuralNetworksExecution_burstCompute},
- * {@link ANeuralNetworksExecution_compute}, or
- * {@link ANeuralNetworksEvent_wait} has returned) before calling
- * {@link ANeuralNetworksExecution_free}.</p>.
- *
* <p>It is also the application's responsibility to ensure that there are no other
* uses of the execution after calling {@link ANeuralNetworksExecution_free}.</p>
*
* <p>Multiple executions can be scheduled and evaluated concurrently, either by
- * means of {@link ANeuralNetworksExecution_compute} or
- * {@link ANeuralNetworksExecution_burstCompute} (which are synchronous) in
- * different threads, or by means of
- * {@link ANeuralNetworksExecution_startCompute} (which is asynchronous).
- * (Concurrent uses of {@link ANeuralNetworksExecution_burstCompute} must be on
- * different burst objects.) The runtime makes no guarantee on the ordering of
- * completion of executions. If it's important to the application, the
- * application should enforce the ordering by ensuring that one execution
- * completes before the next is scheduled (for example, by scheduling all
- * executions synchronously within a single thread, or by scheduling all
- * executions asynchronously and using {@link ANeuralNetworksEvent_wait} between
- * calls to {@link ANeuralNetworksExecution_startCompute}).</p>
+ * means of {@link ANeuralNetworksExecution_compute} (which is synchronous) in
+ * different threads or by means of
+ * {@link ANeuralNetworksExecution_startCompute} (which is asynchronous). The
+ * runtime makes no guarantee on the ordering of completion of executions. If
+ * it's important to the application, the application should enforce the
+ * ordering by ensuring that one execution completes before the next is
+ * scheduled (for example, by scheduling all executions synchronously within a
+ * single thread, or by scheduling all executions asynchronously and using
+ * {@link ANeuralNetworksEvent_wait} between calls to
+ * {@link ANeuralNetworksExecution_startCompute}).</p>
*
* Available since API level 27.
*/
* data. It is recommended to use the code cache directory provided
* by the Android runtime. If not using the code cache directory, the
* user should choose a directory local to the application, and is
- * responsible for managing the cache entries.
+ * responsible to managing the cache entries.
* @param token The token provided by the user to specify a model must be of length
* ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN. The user should ensure that
* the token is unique to a model within the application. The NNAPI
* backed by an AHardwareBuffer of a format other than AHARDWAREBUFFER_FORMAT_BLOB is
* disallowed.
*
+ * TODO(miaowang): add documentation about intended usage with introspection API.
+ *
* Available since API level 29.
*
* @param ahwb The AHardwareBuffer handle.
*
* Available since API level 27.
*
- * @param memory The memory object to be freed. Passing NULL is acceptable and
- * results in no operation.
+ * @param memory The memory object to be freed.
*/
void ANeuralNetworksMemory_free(ANeuralNetworksMemory* memory) __INTRODUCED_IN(27);
* calling {@link ANeuralNetworksCompilation_create} and
* {@link ANeuralNetworksCompilation_createForDevices}.
*
- * An application must ensure that no other thread uses the model at the same
- * time.
+ * An application is responsible to make sure that no other thread uses
+ * the model at the same time.
*
* This function must only be called once for a given model.
*
* {@link ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES}
* are immediately copied into the model.
*
- * For values of length greater than
- * {@link ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES}, a pointer to
- * the buffer is stored within the model. The application must not change the
- * content of this region until all executions using this model have
- * completed. As the data may be copied during processing, modifying the data
- * after this call yields undefined results. The provided buffer must outlive
- * this model.
+ * For values of length greater than {@link ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES},
+ * a pointer to the buffer is stored within the model. The application is responsible
+ * for not changing the content of this region until all executions using this model
+ * have completed. As the data may be copied during processing, modifying the data
+ * after this call yields undefined results.
*
* For large tensors, using {@link ANeuralNetworksModel_setOperandValueFromMemory}
* is likely to be more efficient.
* Sets an operand to a value stored in a memory object.
*
* The content of the memory is not copied. A reference to that memory is stored
- * inside the model. The application must not change the content of the memory
- * region until all executions using this model have completed. As the data may
- * be copied during processing, modifying the data after this call yields
- * undefined results.
- *
- * <p>The provided memory must outlive this model.</p>
+ * inside the model. The application is responsible for not changing the content
+ * of the memory region until all executions using this model have completed.
+ * As the data may be copied during processing, modifying the data after this call
+ * yields undefined results.
*
* To indicate that an optional operand should be considered missing,
* use {@link ANeuralNetworksModel_setOperandValue} instead, passing nullptr for buffer.
* Destroy a compilation.
*
* The compilation need not have been finished by a call to
- * {@link ANeuralNetworksCompilation_finish}.
+ * {@link ANeuralNetworksModel_finish}.
*
* See {@link ANeuralNetworksCompilation} for information on multithreaded usage.
*
* Indicate that we have finished modifying a compilation. Required before
* calling {@link ANeuralNetworksExecution_create}.
*
- * An application must ensure that no other thread uses the compilation at the
- * same time.
+ * An application is responsible to make sure that no other thread uses
+ * the compilation at the same time.
*
* This function must only be called once for a given compilation.
*
/**
* Destroy an execution.
*
- * <p>The execution need not have been scheduled by a call to
- * {@link ANeuralNetworksExecution_burstCompute},
- * {@link ANeuralNetworksExecution_compute}, or
- * {@link ANeuralNetworksExecution_startCompute}; but if it has been scheduled,
- * then the application must not call {@link ANeuralNetworksExecution_free}
- * until the execution has completed (i.e.,
- * {@link ANeuralNetworksExecution_burstCompute},
- * {@link ANeuralNetworksExecution_compute}, or
- * {@link ANeuralNetworksEvent_wait} has returned).
+ * <p>If called on an execution for which
+ * {@link ANeuralNetworksExecution_startCompute} has been called, the
+ * function will return immediately but will mark the execution to be deleted
+ * once the computation completes. The related {@link ANeuralNetworksEvent}
+ * will be signaled and the {@link ANeuralNetworksEvent_wait} will return
+ * ANEURALNETWORKS_ERROR_DELETED.
*
* See {@link ANeuralNetworksExecution} for information on multithreaded usage.
*
/**
* Associate a user buffer with an input of the model of the
* {@link ANeuralNetworksExecution}. Evaluation of the execution must not have
- * been scheduled. Once evaluation of the execution has been scheduled, the
- * application must not change the content of the buffer until the execution has
- * completed. Evaluation of the execution will not change the content of the
- * buffer.
+ * been scheduled.
*
* <p>The provided buffer must outlive the execution.</p>
*
size_t length) __INTRODUCED_IN(27);
/**
- * Associate a region of a memory object with an input of the model of the
+ * Associate part of a memory object with an input of the model of the
* {@link ANeuralNetworksExecution}. Evaluation of the execution must not have
- * been scheduled. Once evaluation of the execution has been scheduled, the
- * application must not change the content of the region until the execution has
- * completed. Evaluation of the execution will not change the content of the
- * region.
+ * been scheduled.
*
* <p>The provided memory must outlive the execution.</p>
*
/**
* Associate a user buffer with an output of the model of the
* {@link ANeuralNetworksExecution}. Evaluation of the execution must not have
- * been scheduled. Once evaluation of the execution has been scheduled, the
- * application must not change the content of the buffer until the execution has
- * completed.
+ * been scheduled.
*
* If the output is optional, you can indicate that it is omitted by
* passing nullptr for buffer and 0 for length.
size_t length) __INTRODUCED_IN(27);
/**
- * Associate a region of a memory object with an output of the model of the
+ * Associate part of a memory object with an output of the model of the
* {@link ANeuralNetworksExecution}. Evaluation of the execution must not have
- * been scheduled. Once evaluation of the execution has been scheduled, the
- * application must not change the content of the region until the execution has
- * completed.
+ * been scheduled.
*
* If the output is optional, you can indicate that it is omitted by
* using {@link ANeuralNetworksExecution_setOutput} instead, passing nullptr for
* See {@link ANeuralNetworksExecution} for information on multithreaded usage.
*
* Available since API level 27.
- *
- * @param event The event object to be destroyed. Passing NULL is acceptable and
- * results in no operation.
*/
void ANeuralNetworksEvent_free(ANeuralNetworksEvent* event) __INTRODUCED_IN(27);
__END_DECLS
-#endif // ANDROID_FRAMEWORKS_ML_NN_RUNTIME_NEURAL_NETWORKS_H
+#endif // ANDROID_ML_NN_RUNTIME_NEURAL_NETWORKS_H
/** @} */