2 * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
3 * Copyright 2017 The TensorFlow Authors. All Rights Reserved.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 // NOTE This header is derived from part of the following file
19 // https://github.com/tensorflow/tensorflow/blob/v2.3.0/tensorflow/lite/nnapi/NeuralNetworksShim.h
21 #ifndef __NEURAL_NETWORKS_SHIM_H__
22 #define __NEURAL_NETWORKS_SHIM_H__
24 #include "NeuralNetworksTypes.h"
25 #include "NeuralNetworksLoadHelpers.h"
27 // This interface is now deprecated. You should use instead
28 // nnapi_implementation.
30 // TODO(b/123017568): Update all current usages of this file.
32 // NN api types based on NNAPI header file
33 // https://developer.android.com/ndk/reference/group/neural-networks
36 * Creates a shared memory object from a file descriptor.
38 * The shared memory is backed by a file descriptor via mmap.
39 * See {@link ANeuralNetworksMemory} for a description on how to use
42 * @param size The requested size in bytes.
43 * Must not be larger than the file size.
44 * @param prot The desired memory protection for the mapping.
45 * It is either PROT_NONE or the bitwise OR of one or
46 * more of the following flags: PROT_READ, PROT_WRITE.
47 * @param fd The requested file descriptor.
48 * The file descriptor has to be mmap-able. The file
49 * descriptor will be duplicated.
50 * @param offset The offset to the beginning of the file of the area to map.
51 * The offset has to be aligned to a page size.
52 * @param memory The memory object to be created.
53 * Set to NULL if unsuccessful.
55 * @return ANEURALNETWORKS_NO_ERROR if the request completed normally.
57 inline int ANeuralNetworksMemory_createFromFd(size_t size, int protect, int fd, size_t offset,
58 ANeuralNetworksMemory **memory)
60 LOAD_FUNCTION(ANeuralNetworksMemory_createFromFd);
61 EXECUTE_FUNCTION_RETURN(size, protect, fd, offset, memory);
65 * Delete a memory object.
67 * Destroys the object used by the run time to keep track of the memory.
68 * This will free the underlying actual memory if no other code has open
69 * handles to this memory.
71 * @param memory The memory object to be freed.
73 inline void ANeuralNetworksMemory_free(ANeuralNetworksMemory *memory)
75 LOAD_FUNCTION(ANeuralNetworksMemory_free);
76 EXECUTE_FUNCTION(memory);
80 * Create an empty {@link ANeuralNetworksModel}.
82 * <p>This only creates the object. Computation is performed once
83 * {@link ANeuralNetworksExecution_startCompute} is invoked.
85 * The model should be constructed with calls to
86 * {@link ANeuralNetworksModel_addOperation} and
87 * {@link ANeuralNetworksModel_addOperand}
89 * <p>{@link ANeuralNetworksModel_finish} should be called once the model
90 * has been fully constructed.</p>
92 * <p>{@link ANeuralNetworksModel_free} should be called once the model
93 * is no longer needed.</p>
95 * @param model The {@link ANeuralNetworksModel} to be created.
96 * Set to NULL if unsuccessful.
98 * @return ANEURALNETWORKS_NO_ERROR if successful.
100 inline int ANeuralNetworksModel_create(ANeuralNetworksModel **model)
102 LOAD_FUNCTION(ANeuralNetworksModel_create);
103 EXECUTE_FUNCTION_RETURN(model);
109 * The model need not have been finished by a call to
110 * {@link ANeuralNetworksModel_finish}.
112 * See {@link ANeuralNetworksModel} for information on multithreaded usage.
114 * @param model The model to be destroyed. Passing NULL is acceptable and
115 * results in no operation.
117 inline void ANeuralNetworksModel_free(ANeuralNetworksModel *model)
119 LOAD_FUNCTION(ANeuralNetworksModel_free);
120 EXECUTE_FUNCTION(model);
124 * Indicate that we have finished modifying a model. Required before
125 * calling {@link ANeuralNetworksCompilation_compile}.
127 * An application is responsible to make sure that no other thread uses
128 * the model at the same time.
130 * See {@link ANeuralNetworksModel} for information on multithreaded usage.
132 * @param model The model to be finished.
134 * @return ANEURALNETWORKS_NO_ERROR if successful.
136 inline int ANeuralNetworksModel_finish(ANeuralNetworksModel *model)
138 LOAD_FUNCTION(ANeuralNetworksModel_finish);
139 EXECUTE_FUNCTION_RETURN(model);
143 * Add an operand to a model.
145 * The order in which the operands are added is important. The first one added
146 * to a model will have the index value 0, the second 1, etc. These indexes are
147 * used as operand identifiers in {@link ANeuralNetworksModel_addOperation},
148 * {@link ANeuralNetworksExecution_setInput},
149 * {@link ANeuralNetworksExecution_setInputFromMemory},
150 * {@link ANeuralNetworksExecution_setOutput},
151 * {@link ANeuralNetworksExecution_setOutputFromMemory} and
152 * {@link ANeuralNetworksExecution_setOperandValue}.
154 * To build a model that can accommodate inputs of various sizes, as you may
155 * want to do for a CNN, set the size of the dimensions that will vary at run
156 * time to 0. If you do so, provide the full dimensions when calling
157 * {@link ANeuralNetworksExecution_setInput} or {@link
158 * ANeuralNetworksExecution_setInputFromMemory}.
160 * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has
161 * been called will return an error.
163 * See {@link ANeuralNetworksModel} for information on multithreaded usage.
165 * @param model The model to be modified.
166 * @param type The {@link ANeuralNetworksOperandType} that describes the shape
169 * @return ANEURALNETWORKS_NO_ERROR if successful.
171 inline int ANeuralNetworksModel_addOperand(ANeuralNetworksModel *model,
172 const ANeuralNetworksOperandType *type)
174 LOAD_FUNCTION(ANeuralNetworksModel_addOperand);
175 EXECUTE_FUNCTION_RETURN(model, type);
179 * Sets an operand to a constant value.
181 * For scalar values, the content of buffer is copied into the model.
183 * For tensor values, a pointer to the buffer is stored within the model.
184 * The application is responsible for not changing the content of this region
185 * until all executions using this model have completed. As the data may
186 * be copied during processing, modifying the data after this call yields
189 * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has
190 * been called will return an error.
192 * See {@link ANeuralNetworksModel} for information on multithreaded usage.
194 * @param model The model to be modified.
195 * @param index The index of the model operand we're setting.
196 * @param buffer A pointer to the data to use.
197 * @param length The size in bytes of the data value.
199 * @return ANEURALNETWORKS_NO_ERROR if successful.
201 inline int ANeuralNetworksModel_setOperandValue(ANeuralNetworksModel *model, int32_t index,
202 const void *buffer, size_t length)
204 LOAD_FUNCTION(ANeuralNetworksModel_setOperandValue);
205 EXECUTE_FUNCTION_RETURN(model, index, buffer, length);
209 * Sets an operand's per channel quantization parameters.
211 * Sets parameters required by a tensor of type
212 * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL}.
213 * This function must be called for every tensor of type
214 * {@link ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL} before
215 * calling {@link ANeuralNetworksModel_finish}.
217 * Available since API level 29.
219 * @param model The model to be modified.
220 * @param index The index of the model operand we're setting.
221 * @param channelQuant The per channel quantization parameters for the operand.
222 * No memory in this struct needs to outlive the call to
225 * @return ANEURALNETWORKS_NO_ERROR if successful.
227 inline int ANeuralNetworksModel_setOperandSymmPerChannelQuantParams(
228 ANeuralNetworksModel *model, int32_t index,
229 const ANeuralNetworksSymmPerChannelQuantParams *channelQuant)
231 LOAD_FUNCTION(ANeuralNetworksModel_setOperandSymmPerChannelQuantParams);
232 EXECUTE_FUNCTION_RETURN(model, index, channelQuant);
236 * Sets an operand to a value stored in a memory object.
238 * The content of the memory is not copied. A reference to that memory is stored
239 * inside the model. The application is responsible for not changing the content
240 * of the memory region until all executions using this model have completed.
241 * As the data may be copied during processing, modifying the data after this
242 * call yields undefined results.
244 * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has
245 * been called will return an error.
247 * See {@link ANeuralNetworksModel} for information on multithreaded usage.
249 * @param model The model to be modified.
250 * @param index The index of the model operand we're setting.
251 * @param buffer A pointer to the data to use.
252 * @param memory The memory containing the data.
253 * @param offset This specifies the location of the data within the memory.
254 * The offset is in bytes from the start of memory.
255 * @param length The size in bytes of the data value.
257 * @return ANEURALNETWORKS_NO_ERROR if successful.
259 inline int ANeuralNetworksModel_setOperandValueFromMemory(ANeuralNetworksModel *model,
261 const ANeuralNetworksMemory *memory,
262 size_t offset, size_t length)
264 LOAD_FUNCTION(ANeuralNetworksModel_setOperandValueFromMemory);
265 EXECUTE_FUNCTION_RETURN(model, index, memory, offset, length);
269 * Add an operation to a model.
271 * @param model The model to be modified.
272 * @param type The type of the operation.
273 * @param inputCount The number of entries in the inputs array.
274 * @param inputs An array of indexes identifying each operand.
275 * @param outputCount The number of entries in the outputs array.
276 * @param outputs An array of indexes identifying each operand.
278 * The operands specified by inputs and outputs must have been
279 * previously added by calls to {@link ANeuralNetworksModel_addOperand}.
281 * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has
282 * been called will return an error.
284 * See {@link ANeuralNetworksModel} for information on multithreaded usage.
286 * @return ANEURALNETWORKS_NO_ERROR if successful.
288 inline int ANeuralNetworksModel_addOperation(ANeuralNetworksModel *model,
289 ANeuralNetworksOperationType type, uint32_t inputCount,
290 const uint32_t *inputs, uint32_t outputCount,
291 const uint32_t *outputs)
293 LOAD_FUNCTION(ANeuralNetworksModel_addOperation);
294 EXECUTE_FUNCTION_RETURN(model, type, inputCount, inputs, outputCount, outputs);
298 * Specifies which operands will be the model's inputs and outputs.
300 * An operand cannot be used for both input and output. Doing so will
303 * @param model The model to be modified.
304 * @param inputCount The number of entries in the inputs array.
305 * @param inputs An array of indexes identifying the input operands.
306 * @param outputCount The number of entries in the outputs array.
307 * @param outputs An array of indexes identifying the output operands.
309 * The operands specified by inputs and outputs must have been
310 * previously added by calls to {@link ANeuralNetworksModel_addOperand}.
312 * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has
313 * been called will return an error.
315 * See {@link ANeuralNetworksModel} for information on multithreaded usage.
318 inline int ANeuralNetworksModel_identifyInputsAndOutputs(ANeuralNetworksModel *model,
320 const uint32_t *inputs,
321 uint32_t outputCount,
322 const uint32_t *outputs)
324 LOAD_FUNCTION(ANeuralNetworksModel_identifyInputsAndOutputs);
325 EXECUTE_FUNCTION_RETURN(model, inputCount, inputs, outputCount, outputs);
329 * Specifies whether {@link ANEURALNETWORKS_TENSOR_FLOAT32} is allowed to be
330 * calculated with range and/or precision as low as that of the IEEE 754 16-bit
331 * floating-point format. By default, {@link ANEURALNETWORKS_TENSOR_FLOAT32}
332 * must be calculated using at least the range and precision of the IEEE 754
333 * 32-bit floating-point format.
335 * @param model The model to be modified.
336 * @param allow 'true' indicates {@link ANEURALNETWORKS_TENSOR_FLOAT32} may be
337 * calculated with range and/or precision as low as that of the
338 * IEEE 754 16-bit floating point format. 'false' indicates
339 * {@link ANEURALNETWORKS_TENSOR_FLOAT32} must be calculated using
340 * at least the range and precision of the IEEE 754 32-bit floating
343 * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has
344 * been called will return an error.
346 * Available since API level 28.
348 * See {@link ANeuralNetworksModel} for information on multithreaded usage.
350 inline int ANeuralNetworksModel_relaxComputationFloat32toFloat16(ANeuralNetworksModel *model,
353 LOAD_FUNCTION(ANeuralNetworksModel_relaxComputationFloat32toFloat16);
354 EXECUTE_FUNCTION_RETURN(model, allow);
358 * Create a {@link ANeuralNetworksCompilation} to compile the given model.
359 * This only creates the object. Compilation is only performed once
360 * {@link ANeuralNetworksCompilation_start} is invoked.
362 * <p>The provided model must outlive the compilation.</p>
364 * The model must already have been finished by a call to
365 * {@link ANeuralNetworksModel_finish}.
367 * See {@link ANeuralNetworksCompilation} for information on multithreaded
370 * @param model The {@link ANeuralNetworksModel} to be compiled.
371 * @param compilation The newly created object or NULL if unsuccessful.
373 * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA
374 * if the model is invalid.
376 inline int ANeuralNetworksCompilation_create(ANeuralNetworksModel *model,
377 ANeuralNetworksCompilation **compilation)
379 LOAD_FUNCTION(ANeuralNetworksCompilation_create);
380 EXECUTE_FUNCTION_RETURN(model, compilation);
384 * Destroy a compilation.
386 * <p>If called on a compilation for which
387 * {@link ANeuralNetworksCompilation_start} has been called, the
388 * function will return immediately but will mark the compilation to be deleted
389 * once the compilation completes. The {@link ANeuralNetworksCompilation_wait}
390 * will return ERROR_DELETED.
392 * See {@link ANeuralNetworksCompilation} for information on multithreaded
395 * @param compilation The compilation to be destroyed. Passing NULL is
396 * acceptable and results in no operation.
398 inline void ANeuralNetworksCompilation_free(ANeuralNetworksCompilation *compilation)
400 LOAD_FUNCTION(ANeuralNetworksCompilation_free);
401 EXECUTE_FUNCTION(compilation);
405 * Sets the execution preference.
407 * <p>Provides guidance to the runtime when trade-offs are possible.</p>
409 * See {@link ANeuralNetworksCompilation} for information on multithreaded
412 * @param compilation The compilation to be modified.
413 * @param preference Either {@link PREFER_LOW_POWER},
414 * {@link PREFER_SINGLE_FAST_ANSWER}, or
415 * {@link PREFER_SUSTAINED_SPEED}.
417 * @return ANEURALNETWORKS_NO_ERROR if successful.
419 inline int ANeuralNetworksCompilation_setPreference(ANeuralNetworksCompilation *compilation,
422 LOAD_FUNCTION(ANeuralNetworksCompilation_setPreference);
423 EXECUTE_FUNCTION_RETURN(compilation, preference);
427 * Waits until the compilation completes.
429 * More than one thread can wait on a compilation. When the compilation
430 * completes, all threads will be released.
432 * See {@link ANeuralNetworksCompilation} for information on multithreaded
435 * @return ANEURALNETWORKS_NO_ERROR if the compilation completed normally.
437 inline int ANeuralNetworksCompilation_finish(ANeuralNetworksCompilation *compilation)
439 LOAD_FUNCTION(ANeuralNetworksCompilation_finish);
440 EXECUTE_FUNCTION_RETURN(compilation);
443 * Create a {@link ANeuralNetworksExecution} to apply the given compilation.
444 * This only creates the object. Computation is only performed once
445 * {@link ANeuralNetworksExecution_startCompute} is invoked.
447 * <p>The provided compilation must outlive the execution.</p>
449 * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
451 * @param compilation The {@link ANeuralNetworksCompilation} to be evaluated.
452 * @param execution The newly created object or NULL if unsuccessful.
454 * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA
455 * if the compilation is invalid.
457 inline int ANeuralNetworksExecution_create(ANeuralNetworksCompilation *compilation,
458 ANeuralNetworksExecution **execution)
460 LOAD_FUNCTION(ANeuralNetworksExecution_create);
461 EXECUTE_FUNCTION_RETURN(compilation, execution);
465 * Destroy an execution.
467 * <p>If called on an execution for which
468 * {@link ANeuralNetworksExecution_startCompute} has been called, the
469 * function will return immediately but will mark the execution to be deleted
470 * once the computation completes. The {link ANeuralNetworksExecution_wait}
471 * will return ANEURALNETWORKS_ERROR_DELETED.
473 * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
475 * @param execution The execution to be destroyed. Passing NULL is acceptable
476 * and results in no operation.
478 inline void ANeuralNetworksExecution_free(ANeuralNetworksExecution *execution)
480 LOAD_FUNCTION(ANeuralNetworksExecution_free);
481 EXECUTE_FUNCTION(execution);
485 * Associate a user buffer with an input of the model of the
486 * {@link ANeuralNetworksExecution}.
488 * <p>The provided buffer must outlive the execution.</p>
490 * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
492 * @param execution The execution to be modified.
493 * @param index The index of the input argument we are setting. It is
494 * an index into the lists passed to
495 * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
496 * the index associated with {@link
497 * ANeuralNetworksModel_addOperand}.
498 * @param type The type of the operand. This should be used to specify the
499 * dimensions that were set to 0 when the operand was added to the
500 * model. All other properties of the type must be the same as
501 * specified in the model. If the type is the same as specified
502 * when the model was built, NULL can be passed.
503 * @param buffer The buffer containing the data.
504 * @param length The length in bytes of the buffer.
506 * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if
507 * the name is not recognized or the buffer is too small for the input.
509 inline int ANeuralNetworksExecution_setInput(ANeuralNetworksExecution *execution, int32_t index,
510 const ANeuralNetworksOperandType *type,
511 const void *buffer, size_t length)
513 LOAD_FUNCTION(ANeuralNetworksExecution_setInput);
514 EXECUTE_FUNCTION_RETURN(execution, index, type, buffer, length);
518 * Associate part of a memory object with an input of the model of the
519 * {@link ANeuralNetworksExecution}.
521 * <p>The provided memory must outlive the execution.</p>
523 * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
525 * @param execution The execution to be modified.
526 * @param index The index of the input argument we are setting. It is
527 * an index into the lists passed to
528 * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
529 * the index associated with {@link
530 * ANeuralNetworksModel_addOperand}.
531 * @param type The type of the operand. This can be used to specify the
532 * dimensions that were set to 0 when the operand was added to the
533 * model. All other values must be the same as specified in the
534 * model. If the type is the same as specified when the model
535 * was built, NULL can be passed.
536 * @param memory The memory containing the data.
537 * @param offset This specifies the location of the data within the memory.
538 * The offset is in bytes from the start of memory.
539 * @param length The size in bytes of the data value.
541 * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if
542 * the name is not recognized or the buffer is too small for the input.
544 inline int ANeuralNetworksExecution_setInputFromMemory(ANeuralNetworksExecution *execution,
546 const ANeuralNetworksOperandType *type,
547 const ANeuralNetworksMemory *memory,
548 size_t offset, size_t length)
550 LOAD_FUNCTION(ANeuralNetworksExecution_setInputFromMemory);
551 EXECUTE_FUNCTION_RETURN(execution, index, type, memory, offset, length);
555 * Associate a user buffer with an output of the model of the
556 * {@link ANeuralNetworksExecution}.
558 * <p>The provided buffer must outlive the execution.</p>
560 * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
562 * @param execution The execution to be modified.
563 * @param index The index of the output argument we are setting. It is
564 * an index into the lists passed to
565 * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
566 * the index associated with {@link
567 * ANeuralNetworksModel_addOperand}.
568 * @param type The type of the operand. This can be used to specify the
569 * dimensions that were set to 0 when the operand was added to the
570 * model. All other values must be the same as specified in the
571 * model. If the type is the same as specified when the model
572 * was built, NULL can be passed.
573 * @param buffer The buffer where the data is to be written.
574 * @param length The length in bytes of the buffer.
576 * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if
577 * the name is not recognized or the buffer is too small for the output.
579 inline int ANeuralNetworksExecution_setOutput(ANeuralNetworksExecution *execution, int32_t index,
580 const ANeuralNetworksOperandType *type, void *buffer,
583 LOAD_FUNCTION(ANeuralNetworksExecution_setOutput);
584 EXECUTE_FUNCTION_RETURN(execution, index, type, buffer, length);
588 * Associate part of a memory object with an output of the model of the
589 * {@link ANeuralNetworksExecution}.
591 * <p>The provided memory must outlive the execution.</p>
593 * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
595 * @param execution The execution to be modified.
596 * @param index The index of the output argument we are setting. It is
597 * an index into the lists passed to
598 * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
599 * the index associated with {@link
600 * ANeuralNetworksModel_addOperand}.
601 * @param type The type of the operand. This can be used to specify the
602 * dimensions that were set to 0 when the operand was added to the
603 * model. All other values must be the same as specified in the
604 * model. If the type is the same as specified when the model
605 * was built, NULL can be passed.
606 * @param memory The memory where the data is to be stored.
607 * @param offset This specifies the location of the data within the memory.
608 * The offset is in bytes from the start of memory.
609 * @param length The length in bytes of the data value.
611 * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA if
612 * the name is not recognized or the buffer is too small for the output.
614 inline int ANeuralNetworksExecution_setOutputFromMemory(ANeuralNetworksExecution *execution,
616 const ANeuralNetworksOperandType *type,
617 const ANeuralNetworksMemory *memory,
618 size_t offset, size_t length)
620 LOAD_FUNCTION(ANeuralNetworksExecution_setOutputFromMemory);
621 EXECUTE_FUNCTION_RETURN(execution, index, type, memory, offset, length);
625 * Schedule evaluation of the execution.
627 * <p>Schedules evaluation of the execution. Once the model has been
628 * applied and the outputs are ready to be consumed, the execution will be
629 * signaled. Use {@link ANeuralNetworksExecution_wait} to wait for that signal.
632 * Multiple executions can be scheduled and evaluated concurrently, and
633 * compilations can be performed concurrently with executions. The runtime makes
634 * no guarantee on the ordering of the completion of compilations and
635 * executions. If it's important to the application, the application should
636 * enforce the ordering by using {@link ANeuralNetworksCompilation_wait} and
637 * {@link ANeuralNetworksExecution_wait}.
639 * ANeuralNetworksExecution_wait must be called to recuperate the resources used
642 * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
644 * @param execution The execution to be scheduled and executed.
646 * @return ANEURALNETWORKS_NO_ERROR if successful.
648 inline int ANeuralNetworksExecution_startCompute(ANeuralNetworksExecution *execution,
649 ANeuralNetworksEvent **event)
651 LOAD_FUNCTION(ANeuralNetworksExecution_startCompute);
652 EXECUTE_FUNCTION_RETURN(execution, event);
656 * Waits until the execution completes.
658 * More than one thread can wait on an event. When the execution completes,
659 * all threads will be released.
661 * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
663 * @return ANEURALNETWORKS_NO_ERROR if the execution completed normally.
665 inline int ANeuralNetworksEvent_wait(ANeuralNetworksEvent *event)
667 LOAD_FUNCTION(ANeuralNetworksEvent_wait);
668 EXECUTE_FUNCTION_RETURN(event);
672 * Destroys the event.
674 * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
676 inline void ANeuralNetworksEvent_free(ANeuralNetworksEvent *event)
678 LOAD_FUNCTION(ANeuralNetworksEvent_free);
679 EXECUTE_FUNCTION(event);
683 * Get the number of available devices.
685 * @param numDevices Used to return the number of devices.
687 * @return ANEURALNETWORKS_NO_ERROR if successful.
689 * Available since API level 29.
691 inline int ANeuralNetworks_getDeviceCount(uint32_t *numDevices)
693 LOAD_FUNCTION(ANeuralNetworks_getDeviceCount);
694 EXECUTE_FUNCTION_RETURN(numDevices);
698 * Get the representation of the specified device.
700 * @param devIndex The index of the specified device. Must be less than the
701 * number of available devices.
702 * @param device The representation of the specified device.
703 * The same representation will always be returned for the
706 * @return ANEURALNETWORKS_NO_ERROR if successful.
708 * Available since API level 29.
711 inline int ANeuralNetworks_getDevice(uint32_t devIndex, ANeuralNetworksDevice **device)
713 LOAD_FUNCTION(ANeuralNetworks_getDevice);
714 EXECUTE_FUNCTION_RETURN(devIndex, device);
718 * Get the name of the specified device.
720 * @param device The representation of the specified device.
721 * @param name The returned name of the specified device. The name will be in
722 * UTF-8 and will be null-terminated. It will be recognizable as a
723 * known device name rather than a cryptic string. For devices
724 * with API level 29 and above, the format of the name is
725 * {VENDOR}-{DEVICE}, e.g. “google-ipu”. For devices with feature
726 * level 28 or lower, the name will always be “unknown-device”.
727 * The name will remain valid for the duration of the application.
729 * @return ANEURALNETWORKS_NO_ERROR if successful.
731 * Available since API level 29.
733 inline int ANeuralNetworksDevice_getName(const ANeuralNetworksDevice *device, const char **name)
735 LOAD_FUNCTION(ANeuralNetworksDevice_getName);
736 EXECUTE_FUNCTION_RETURN(device, name);
740 * Get the version of the driver implementation of the specified device.
742 * It’s the responsibility of the driver implementor to insure that this version
743 * string uniquely distinguishes this implementation from all previous
746 * This version string must not be confused with the feature level which is
747 * solely defined by {@link ANeuralNetworksDevice_getFeatureLevel}. There is no
748 * implicit ordering of the versions. For example, it is not possible to filter
749 * all drivers older than a certain version.
751 * Application developers may use this version string to avoid or prefer
752 * specific driver implementations. For example, an application may want to do
754 * - A specific version of the driver does not provide the required
755 * performance, perhaps because of a performance regression.
756 * - A specific version of the driver has a bug or returns results that
757 * don’t match the minimum precision requirement for the application.
759 * @param device The representation of the specified device.
760 * @param version The returned version string of the driver for the specified
761 * device. The string will be in UTF-8 and will be
762 * null-terminated. For devices with feature level 28 or lower,
763 * "UNKNOWN" will be returned. The version string will remain
764 * valid for the duration of the application.
766 * @return ANEURALNETWORKS_NO_ERROR if successful.
768 * Available since API level 29.
770 inline int ANeuralNetworksDevice_getVersion(const ANeuralNetworksDevice *device,
771 const char **version)
773 LOAD_FUNCTION(ANeuralNetworksDevice_getVersion);
774 EXECUTE_FUNCTION_RETURN(device, version);
778 * Get the supported NNAPI version of the specified device.
780 * Each device has a supported feature level, which is the most advanced feature
781 * this driver implements. For example, if the driver implements the features
782 * introduced in Android P, but does not implement the features introduced after
783 * Android P, the value would be 28. Developers could decide whether or not the
784 * specified device should be used for a Model that has certain feature
787 * @param device The representation of the specified device.
788 * @param featureLevel The API level of the most advanced feature this driver
791 * @return ANEURALNETWORKS_NO_ERROR if successful.
793 * Available since API level 29.
795 inline int ANeuralNetworksDevice_getFeatureLevel(const ANeuralNetworksDevice *device,
796 int64_t *featureLevel)
798 LOAD_FUNCTION(ANeuralNetworksDevice_getFeatureLevel);
799 EXECUTE_FUNCTION_RETURN(device, featureLevel);
803 * Get the supported operations for a specified set of devices. If multiple
804 * devices are selected, the supported operation list is a union of supported
805 * operations of all selected devices.
807 * @param model The model to be queried.
808 * @param devices The set of devices. Must not contain duplicates.
809 * @param numDevices The number of devices in the set.
810 * @param supportedOps The boolean array to be filled. True means supported. The
811 * size of the boolean array must be at least as large as
812 * the number of operations in the model. The order of
813 * elements in the supportedOps array matches the order in
814 * which the corresponding operations were added to the
817 * @return ANEURALNETWORKS_NO_ERROR if successful.
819 * Available since API level 29.
822 ANeuralNetworksModel_getSupportedOperationsForDevices(const ANeuralNetworksModel *model,
823 const ANeuralNetworksDevice *const *devices,
824 uint32_t numDevices, bool *supportedOps)
826 LOAD_FUNCTION(ANeuralNetworksModel_getSupportedOperationsForDevices);
827 EXECUTE_FUNCTION_RETURN(model, devices, numDevices, supportedOps);
831 * Create a {@link ANeuralNetworksCompilation} to compile the given model for a
832 * specified set of devices. If more than one device is specified, the
833 * compilation will distribute the workload automatically across the devices.
834 * The model must be fully supported by the specified set of devices. This means
835 * that ANeuralNetworksModel_getSupportedOperationsForDevices() must have
836 * returned true for every operation for that model/devices pair.
838 * @param model The {@link ANeuralNetworksModel} to be compiled.
839 * @param devices The set of devices. Must not contain duplicates.
840 * @param numDevices The number of devices in the set.
841 * @param compilation The newly created object or NULL if unsuccessful.
843 * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA
844 * if the model is invalid.
846 * Available since API level 29.
848 inline int ANeuralNetworksCompilation_createForDevices(ANeuralNetworksModel *model,
849 const ANeuralNetworksDevice *const *devices,
851 ANeuralNetworksCompilation **compilation)
853 LOAD_FUNCTION(ANeuralNetworksCompilation_createForDevices);
854 EXECUTE_FUNCTION_RETURN(model, devices, numDevices, compilation);
858 * Sets the compilation caching signature and the cache directory.
860 * Provides optional caching information to the runtime for faster repeated
863 * See {@link ANeuralNetworksCompilation} for information on multithreaded
866 * @param compilation The compilation to be modified.
867 * @param cacheDir The cache directory to store and retrieve caching data. It is
868 * recommended to use the code_cache provided by the Android
869 * runtime. If not using the code_cache, the user should choose
870 * a directory local to the application, and is responsible to
871 * manage and clean the cache entries.
872 * @param token The token provided by the user to specify a model, must be of
873 * length ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN. The user should
874 * ensure that the token is unique to a model within the
875 * application. The NNAPI runtime will not detected token
876 * collisions. If there is a collision, the compilation outcome may
877 * be incorrect without notifying with error.
879 * @return ANEURALNETWORKS_NO_ERROR if successful.
881 * Available since API level 29.
883 inline int ANeuralNetworksCompilation_setCaching(ANeuralNetworksCompilation *compilation,
884 const char *cacheDir, const uint8_t *token)
886 LOAD_FUNCTION(ANeuralNetworksCompilation_setCaching);
887 EXECUTE_FUNCTION_RETURN(compilation, cacheDir, token);
891 * Schedule synchronous evaluation of the execution.
893 * <p>Schedules synchronous evaluation of the execution. Returns once the
894 * execution has completed and the outputs are ready to be consumed.
897 * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
899 * See {@link ANeuralNetworksExecution_startCompute} for asynchronous execution.
900 * Synchronous execution incurs lower overhead than asynchronous execution.
902 * Available since API level 29.
904 * @param execution The execution to be scheduled and executed.
906 * @return ANEURALNETWORKS_NO_ERROR if the execution completed normally.
907 * ANEURALNETWORKS_UNMAPPABLE if the execution input or output memory
908 * cannot be properly mapped.
910 inline int ANeuralNetworksExecution_compute(ANeuralNetworksExecution *execution)
912 LOAD_FUNCTION(ANeuralNetworksExecution_compute);
913 EXECUTE_FUNCTION_RETURN(execution);
917 * Get the dimensional information of the specified output operand of the model
919 * {@link ANeuralNetworksExecution}.
921 * On asynchronous execution initiated by {@link
922 * ANeuralNetworksExecution_startCompute},
923 * {@link ANeuralNetworksEvent_wait} must be called prior to this function to
924 * recuperate the resources used by the execution.
926 * @param execution The execution to be queried.
927 * @param index The index of the output argument we are querying. It is
928 * an index into the lists passed to
929 * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
930 * the index associated with {@link
931 * ANeuralNetworksModel_addOperand}.
932 * @param rank The rank of the output operand.
934 * @return ANEURALNETWORKS_NO_ERROR if successful,
935 * ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE if the target output is provided an
936 * insufficient buffer at execution time, ANEURALNETWORKS_BAD_DATA if the index
939 * Available since API level 29.
941 inline int ANeuralNetworksExecution_getOutputOperandRank(ANeuralNetworksExecution *execution,
942 int32_t index, uint32_t *rank)
944 LOAD_FUNCTION(ANeuralNetworksExecution_getOutputOperandRank);
945 EXECUTE_FUNCTION_RETURN(execution, index, rank);
949 * Get the dimensional information of the specified output operand of the model
951 * {@link ANeuralNetworksExecution}. The target output operand cannot be a
954 * On asynchronous execution initiated by
955 * {@link ANeuralNetworksExecution_startCompute},
956 * {@link ANeuralNetworksEvent_wait} must be called prior to this function to
957 * recuperate the resources used by the execution.
959 * @param execution The execution to be queried.
960 * @param index The index of the output argument we are querying. It is an index
961 * into the lists passed to
962 * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
963 * the index associated with
964 * {@link ANeuralNetworksModel_addOperand}.
965 * @param dimensions The dimension array to be filled. The size of the array
966 * must be exactly as large as the rank of the output operand
967 * to be queried in the model.
969 * @return ANEURALNETWORKS_NO_ERROR if successful,
970 * ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE if the target output is provided an
971 * insufficient buffer at execution time, ANEURALNETWORKS_BAD_DATA if the index
972 * is invalid or if the target is a scalar.
974 * Available since API level 29.
976 inline int ANeuralNetworksExecution_getOutputOperandDimensions(ANeuralNetworksExecution *execution,
977 int32_t index, uint32_t *dimensions)
979 LOAD_FUNCTION(ANeuralNetworksExecution_getOutputOperandDimensions);
980 EXECUTE_FUNCTION_RETURN(execution, index, dimensions);
984 * Create a {@link ANeuralNetworksBurst} to apply the given compilation.
985 * This only creates the burst object. Computation is only performed once
986 * {@link ANeuralNetworksExecution_burstCompute} is invoked with a valid
987 * {@link ANeuralNetworksExecution} and {@link ANeuralNetworksBurst}.
989 * <p>The provided compilation must outlive the burst object.</p>
991 * Available since API level 29.
993 * @param compilation The {@link ANeuralNetworksCompilation} to be evaluated.
994 * @param burst The newly created object or NULL if unsuccessful.
996 * @return ANEURALNETWORKS_NO_ERROR if successful, ANEURALNETWORKS_BAD_DATA
997 * if the compilation is invalid.
999 inline int ANeuralNetworksBurst_create(ANeuralNetworksCompilation *compilation,
1000 ANeuralNetworksBurst **burst)
1002 LOAD_FUNCTION(ANeuralNetworksBurst_create);
1003 EXECUTE_FUNCTION_RETURN(compilation, burst);
1007 * Destroys the burst object.
1009 * Available since API level 29.
1011 * @param burst The burst object to be destroyed. Passing NULL is acceptable and
1012 * results in no operation.
1014 inline void ANeuralNetworksBurst_free(ANeuralNetworksBurst *burst)
1016 LOAD_FUNCTION(ANeuralNetworksBurst_free);
1017 EXECUTE_FUNCTION(burst);
1021 * Schedule synchronous evaluation of the execution on a burst object.
1023 * <p>Schedules synchronous evaluation of the execution. Returns once the
1024 * execution has completed and the outputs are ready to be consumed.</p>
1026 * <p>There must be at most one {@link ANeuralNetworksExecution} processing at
1027 * any given time for any given burst object. Any
1028 * {@link ANeuralNetworksExecution} launched before the previous has finished
1029 * will result in ANEURALNETWORKS_BAD_STATE.</p>
1031 * Available since API level 29.
1033 * @param burst The burst object to execute on.
1034 * @param execution The execution to be scheduled and executed. The execution
1035 * must be created from the same {@link
1036 * ANeuralNetworksCompilation} as the burst object.
1038 * @return ANEURALNETWORKS_NO_ERROR if the execution completed normally.
1040 inline int ANeuralNetworksExecution_burstCompute(ANeuralNetworksExecution *execution,
1041 ANeuralNetworksBurst *burst)
1043 LOAD_FUNCTION(ANeuralNetworksExecution_burstCompute);
1044 EXECUTE_FUNCTION_RETURN(execution, burst);
1048 * Creates a shared memory object from an AHardwareBuffer handle.
1050 * If the shared memory is backed by an AHardwareBuffer of
1051 * AHARDWAREBUFFER_FORMAT_BLOB format, it can be used the same way as shared
1052 * memory created from a file handle. See
1053 * {@link ANeuralNetworksMemory} for a description on how to use this shared
1056 * If the shared memory is backed by an AHardwareBuffer of a format other than
1057 * AHARDWAREBUFFER_FORMAT_BLOB, it can only be used for Model inputs and
1058 * outputs. When calling {@link ANeuralNetworksExecution_setInputFromMemory} or
1059 * {@link ANeuralNetworksExecution_setOutputFromMemory} with the shared memory,
1060 * both offset and length must be set to zero and the entire memory region will
1061 * be associated with the specified input or output operand. There is no
1062 * guarantee that an arbitrary AHardwareBuffer_Format and
1063 * AHardwareBuffer_UsageFlags combination can be used by arbitrary devices. The
1064 * execution will fail if selected set of devices cannot consume the buffer.
1066 * Calling {@link ANeuralNetworksModel_setOperandValueFromMemory} with shared
1067 * memory backed by an AHardwareBuffer of a format other than
1068 * AHARDWAREBUFFER_FORMAT_BLOB is disallowed.
1070 * TODO(miaowang): add documentation about intended usage with introspection
1073 * Available since API level 29.
1075 * @param ahwb The AHardwareBuffer handle.
1076 * @param memory The memory object to be created.
1077 * Set to NULL if unsuccessful.
1079 * @return ANEURALNETWORKS_NO_ERROR if the request completed normally.
1081 * @see AHardwareBuffer
1083 inline int ANeuralNetworksMemory_createFromAHardwareBuffer(const AHardwareBuffer *ahwb,
1084 ANeuralNetworksMemory **memory)
1086 LOAD_FUNCTION(ANeuralNetworksMemory_createFromAHardwareBuffer);
1087 EXECUTE_FUNCTION_RETURN(ahwb, memory);
1091 * Specifies whether duration of the {@link ANeuralNetworksExecution} is to be
1092 * measured. By default, duration is not measured.
1094 * The {@link ANeuralNetworksExecution} must have been created with
1095 * {@link ANeuralNetworksCompilation_createForDevices} with numDevices = 1.
1097 * See {@link ANeuralNetworksExecution} for information on multithreaded usage.
1099 * Available since API level 29.
1101 * @param execution The execution to be modified.
1102 * @param measure 'true' if duration is to be measured, 'false' if not.
1104 * @return ANEURALNETWORKS_NO_ERROR if successful.
1106 inline int ANeuralNetworksExecution_setMeasureTiming(ANeuralNetworksExecution *execution,
1109 LOAD_FUNCTION(ANeuralNetworksExecution_setMeasureTiming);
1110 EXECUTE_FUNCTION_RETURN(execution, measure);
1114 * Get the time spent in the specified {@link ANeuralNetworksExecution}, in
1115 * nanoseconds. The execution must have completed.
1117 * @param execution The execution to be queried.
1118 * @param durationCode The measurement to be queried, specified by {@link
1120 * @param duration The returned duration. If no measurement was requested by
1121 * {@link ANeuralNetworksExecution_setMeasureTiming}, or for
1122 * some other reason the duration is not available, UINT64_MAX will be returned.
1123 * A particular device need not support any given measurement.
1125 * @return ANEURALNETWORKS_NO_ERROR if successful.
1127 inline int ANeuralNetworksExecution_getDuration(const ANeuralNetworksExecution *execution,
1128 int32_t durationCode, uint64_t *duration)
1130 LOAD_FUNCTION(ANeuralNetworksExecution_getDuration);
1131 EXECUTE_FUNCTION_RETURN(execution, durationCode, duration);
1135 * Queries whether an extension is supported by the driver implementation of
1136 * the specified device.
1138 * @param device The representation of the specified device.
1139 * @param extension The extension name.
1140 * @param isExtensionSupported The boolean value indicating whether the
1141 * extension is supported.
1143 * @return ANEURALNETWORKS_NO_ERROR if successful.
1145 * Available since API level 29.
1147 inline int ANeuralNetworksDevice_getExtensionSupport(const ANeuralNetworksDevice *device,
1148 const char *extensionName,
1149 bool *isExtensionSupported)
1151 LOAD_FUNCTION(ANeuralNetworksDevice_getExtensionSupport);
1152 EXECUTE_FUNCTION_RETURN(device, extensionName, isExtensionSupported);
1156 * Creates an operand type from an extension name and an extension operand code.
1158 * See {@link ANeuralNetworksModel} for information on multithreaded usage.
1160 * Available since API level 29.
1162 * @param model The model to contain the operand.
1163 * @param extensionName The extension name.
1164 * @param operandCodeWithinExtension The extension operand code.
1165 * @param type The operand type.
1167 * @return ANEURALNETWORKS_NO_ERROR if successful.
1169 inline int ANeuralNetworksModel_getExtensionOperandType(ANeuralNetworksModel *model,
1170 const char *extensionName,
1171 uint16_t operandCodeWithinExtension,
1174 LOAD_FUNCTION(ANeuralNetworksModel_getExtensionOperandType);
1175 EXECUTE_FUNCTION_RETURN(model, extensionName, operandCodeWithinExtension, type);
1179 * Creates an operation type from an extension name and an extension operation
1182 * See {@link ANeuralNetworksModel} for information on multithreaded usage.
1184 * Available since API level 29.
1186 * @param model The model to contain the operation.
1187 * @param extensionName The extension name.
1188 * @param operationCodeWithinExtension The extension operation code.
1189 * @param type The operation type.
1191 * @return ANEURALNETWORKS_NO_ERROR if successful.
1193 inline int ANeuralNetworksModel_getExtensionOperationType(ANeuralNetworksModel *model,
1194 const char *extensionName,
1195 uint16_t operationCodeWithinExtension,
1196 ANeuralNetworksOperationType *type)
1198 LOAD_FUNCTION(ANeuralNetworksModel_getExtensionOperationType);
1199 EXECUTE_FUNCTION_RETURN(model, extensionName, operationCodeWithinExtension, type);
1203 * Sets extension operand parameters.
1205 * Available since API level 29.
1207 * @param model The model to be modified.
1208 * @param index The index of the model operand we're setting.
1209 * @param data A pointer to the extension operand data.
1210 * The data does not have to outlive the call to this function.
1211 * @param length The size in bytes of the data value.
1213 * @return ANEURALNETWORKS_NO_ERROR if successful.
1215 inline int ANeuralNetworksModel_setOperandExtensionData(ANeuralNetworksModel *model, int32_t index,
1216 const void *data, size_t length)
1218 LOAD_FUNCTION(ANeuralNetworksModel_setOperandExtensionData);
1219 EXECUTE_FUNCTION_RETURN(model, index, data, length);
1223 * Create a {@link ANeuralNetworksMemoryDesc} with no properties.
1225 * This only creates the memory descriptor. Its properties should be set with
1227 * {@link ANeuralNetworksMemoryDesc_addInputRole},
1228 * {@link ANeuralNetworksMemoryDesc_addOutputRole}, and
1229 * {@link ANeuralNetworksMemoryDesc_setDimensions}.
1231 * {@link ANeuralNetworksMemoryDesc_finish} must be called once all properties
1234 * {@link ANeuralNetworksMemoryDesc_free} must be called once the memory
1235 * descriptor is no longer needed.
1237 * Available since API level 30.
1239 * @param desc The {@link ANeuralNetworksMemoryDesc} to be created.
1240 * Set to NULL if unsuccessful.
1242 * @return ANEURALNETWORKS_NO_ERROR if successful.
1244 inline int ANeuralNetworksMemoryDesc_create(ANeuralNetworksMemoryDesc **desc)
1246 LOAD_FUNCTION(ANeuralNetworksMemoryDesc_create);
1247 EXECUTE_FUNCTION_RETURN(desc);
1251 * Destroy a memory descriptor.
1253 * The memory descriptor need not have been finished by a call to
1254 * {@link ANeuralNetworksMemoryDesc_finish}.
1256 * See {@link ANeuralNetworksMemoryDesc} for information on multithreaded usage.
1258 * Available since API level 30.
1260 * @param desc The memory descriptor to be destroyed. Passing NULL is acceptable
1261 * and results in no operation.
1263 inline void ANeuralNetworksMemoryDesc_free(ANeuralNetworksMemoryDesc *desc)
1265 LOAD_FUNCTION(ANeuralNetworksMemoryDesc_free);
1266 EXECUTE_FUNCTION(desc);
1270 * Specify that a memory object will be playing the role of an output to an
1271 * execution created from a particular compilation.
1273 * The compilation and the output index fully specify an output operand. This
1274 * function may be invoked multiple times on the same memory descriptor with
1275 * different output operands, and the same output operand may be specified on
1276 * multiple memory descriptors. However, specifying the same output operand on
1277 * the same memory descriptor object more than once will return an error.
1279 * The dimensions of the corresponding model operands of all the roles specified
1281 * {@link ANeuralNetworksMemoryDesc_addInputRole} and
1282 * {@link ANeuralNetworksMemoryDesc_addOutputRole} must be compatible with each
1283 * other. Two dimensions are incompatible if both ranks are fully specified but
1284 * have different values, or if there is at least one axis that is fully
1285 * specified in both but has different values.
1287 * At least one of {@link ANeuralNetworksMemoryDesc_addInputRole} and
1288 * {@link ANeuralNetworksMemoryDesc_addOutputRole} must be called on the memory
1289 * descriptor before invoking {@link ANeuralNetworksMemoryDesc_finish}.
1291 * Attempting to modify a memory descriptor once
1292 * {@link ANeuralNetworksMemoryDesc_finish} has been called will return an
1295 * See {@link ANeuralNetworksMemoryDesc} for information on multithreaded usage.
1297 * Available since API level 30.
1299 * @param desc The memory descriptor to be modified.
1300 * @param compilation The compilation object. It must already have been finished
1301 * by calling {@link ANeuralNetworksCompilation_finish}, and must outlive the
1302 * memory descriptor.
1303 * @param index The index of the output argument we are referencing from the
1304 * compilation. It is an index into the outputs list passed to
1305 * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
1306 * the index associated with {@link
1307 * ANeuralNetworksModel_addOperand}.
1308 * @param frequency A floating-point value within the range (0.0, 1.0].
1309 * Describes how likely the memory is to be used in the specified role. This is
1310 * provided as a hint to optimize the case when multiple roles
1311 * prefer different memory locations or data layouts.
1313 * @return ANEURALNETWORKS_NO_ERROR if successful.
1315 inline int ANeuralNetworksMemoryDesc_addOutputRole(ANeuralNetworksMemoryDesc *desc,
1316 const ANeuralNetworksCompilation *compilation,
1317 int32_t index, float frequency)
1319 LOAD_FUNCTION(ANeuralNetworksMemoryDesc_addOutputRole);
1320 EXECUTE_FUNCTION_RETURN(desc, compilation, index, frequency);
1324 * Specify that a memory object will be playing the role of an input to an
1325 * execution created from a particular compilation.
1327 * The compilation and the input index fully specify an input operand. This
1328 * function may be invoked multiple times on the same memory descriptor with
1329 * different input operands, and the same input operand may be specified on
1330 * multiple memory descriptors. However, specifying the same input operand on
1331 * the same memory descriptor more than once will return an error.
1333 * The dimensions of the corresponding model operands of all the roles specified
1335 * {@link ANeuralNetworksMemoryDesc_addInputRole} and
1336 * {@link ANeuralNetworksMemoryDesc_addOutputRole} must be compatible with each
1337 * other. Two dimensions are incompatible if both ranks are fully specified but
1338 * have different values, or if there is at least one axis that is fully
1339 * specified in both but has different values.
1341 * At least one of {@link ANeuralNetworksMemoryDesc_addInputRole} and
1342 * {@link ANeuralNetworksMemoryDesc_addOutputRole} must be called on a memory
1343 * descriptor before invoking {@link ANeuralNetworksMemoryDesc_finish}.
1345 * Attempting to modify a memory descriptor once
1346 * {@link ANeuralNetworksMemoryDesc_finish} has been called will return an
1349 * See {@link ANeuralNetworksMemoryDesc} for information on multithreaded usage.
1351 * Available since API level 30.
1353 * @param desc The memory descriptor to be modified.
1354 * @param compilation The compilation object. It must already have been finished
1355 * by calling {@link ANeuralNetworksCompilation_finish}, and must outlive the
1356 * memory descriptor.
1357 * @param index The index of the input argument we are referencing from the
1358 * compilation. It is an index into the inputs list passed to
1359 * {@link ANeuralNetworksModel_identifyInputsAndOutputs}. It is not
1360 * the index associated with {@link
1361 * ANeuralNetworksModel_addOperand}.
1362 * @param frequency A floating-point value within the range (0.0, 1.0].
1363 * Describes how likely the memory is to be used in the specified role. This is
1364 * provided as a hint to optimize the case when different roles
1365 * prefer different memory locations or data layouts.
1367 * @return ANEURALNETWORKS_NO_ERROR if successful.
1369 inline int ANeuralNetworksMemoryDesc_addInputRole(ANeuralNetworksMemoryDesc *desc,
1370 const ANeuralNetworksCompilation *compilation,
1371 uint32_t index, float frequency)
1373 LOAD_FUNCTION(ANeuralNetworksMemoryDesc_addInputRole);
1374 EXECUTE_FUNCTION_RETURN(desc, compilation, index, frequency);
1378 * Set the dimensional information of the memory descriptor.
1380 * The specified dimensions must be compatible with the dimensions of the
1381 * corresponding model operands of all the roles specified by
1382 * {@link ANeuralNetworksMemoryDesc_addInputRole} and
1383 * {@link ANeuralNetworksMemoryDesc_addOutputRole}. Two dimensions are
1384 * incompatible if both ranks are fully specified but have different values, or
1385 * if there is at least one axis that is fully specified in both but has
1388 * Attempting to modify a memory descriptor once
1389 * {@link ANeuralNetworksMemoryDesc_finish} has been called will return an
1392 * See {@link ANeuralNetworksMemoryDesc} for information on multithreaded usage.
1394 * Available since API level 30.
1396 * @param desc The memory descriptor to be modified.
1397 * @param rank The number of dimensions. Must be 0 for scalars.
1398 * @param dimensions An array of dimensions. An entry with the value 0 indicates
1399 * that the corresponding axis has an unknown size.
1401 * @return ANEURALNETWORKS_NO_ERROR if successful.
1403 inline int ANeuralNetworksMemoryDesc_setDimensions(ANeuralNetworksMemoryDesc *desc, uint32_t rank,
1404 const uint32_t *dimensions)
1406 LOAD_FUNCTION(ANeuralNetworksMemoryDesc_setDimensions);
1407 EXECUTE_FUNCTION_RETURN(desc, rank, dimensions);
1411 * Indicate that we have finished modifying a memory descriptor. Required before
1413 * {@link ANeuralNetworksMemory_createFromDesc}.
1415 * This function must only be called once for a given memory descriptor.
1417 * See {@link ANeuralNetworksMemoryDesc} for information on multithreaded usage.
1419 * Available since API level 30.
1421 * @param desc The memory descriptor to be finished.
1423 * @return ANEURALNETWORKS_NO_ERROR if successful.
1425 inline int ANeuralNetworksMemoryDesc_finish(ANeuralNetworksMemoryDesc *desc)
1427 LOAD_FUNCTION(ANeuralNetworksMemoryDesc_finish);
1428 EXECUTE_FUNCTION_RETURN(desc);
1432 * Creates a memory object from a memory descriptor.
1434 * The memory object is created with an uninitialized buffer. A memory object
1435 * with an uninitialized buffer may only be used according to the roles
1437 * {@link ANeuralNetworksMemoryDesc_addOutputRole}, or as the destination memory
1439 * {@link ANeuralNetworksMemory_copy}. The buffer of a memory object is
1440 * initialized after the memory object is used as an output in a successful
1441 * execution, or used as the destination memory in a successful {@link
1442 * ANeuralNetworksMemory_copy}. A memory object with an initialized buffer may
1443 * be used according to all roles specified in
1444 * {@link ANeuralNetworksMemoryDesc}, or as the source or destination memory in
1445 * {@link ANeuralNetworksMemory_copy}. The buffer of a memory object will return
1446 * to the uninitialized state if the memory object is used as an output in a
1447 * failed execution, or used as the destination memory in a failed {@link
1448 * ANeuralNetworksMemory_copy}.
1450 * The dimensions of the memory descriptor are deduced from the dimensions of
1451 * the corresponding model operands of all the roles specified by
1452 * {@link ANeuralNetworksMemoryDesc_addInputRole} and
1453 * {@link ANeuralNetworksMemoryDesc_addOutputRole}, as well as the dimensions
1454 * set by the call to {@link ANeuralNetworksMemoryDesc_setDimensions}, if any.
1455 * The memory descriptor may have unspecified dimensions or rank. In such a
1456 * case, the same memory object may be used with different shapes of outputs in
1457 * different executions. When the memory is used as an input, the input shape
1458 * must be the same as the output shape from the last execution using this
1459 * memory object as an output, or the last
1460 * {@link ANeuralNetworkMemory_copy} using this memory object as the destination
1461 * memory. Creating a memory object with unspecified dimensions or rank may fail
1462 * for certain sets of roles.
1464 * Using the memory in roles or shapes that are not compatible with the rules
1465 * specified above will return an error.
1467 * When calling {@link ANeuralNetworksExecution_setInputFromMemory} or
1468 * {@link ANeuralNetworksExecution_setOutputFromMemory} with the memory object,
1469 * both offset and length must be set to zero and the entire memory region will
1470 * be associated with the specified input or output operand.
1472 * Calling {@link ANeuralNetworksModel_setOperandValueFromMemory} with the
1473 * memory created from this function will return an error.
1475 * {@link ANeuralNetworksMemory_free} must be called once the memory is no
1478 * Attempting to create memory from an unfinished memory descriptor will return
1481 * The provided {@link ANeuralNetworksMemoryDesc} need not outlive the
1482 * {@link ANeuralNetworksMemory} object.
1484 * Available since API level 30.
1486 * @param desc The memory descriptor.
1487 * @param memory The memory object to be created.
1488 * Set to NULL if unsuccessful.
1490 * @return ANEURALNETWORKS_NO_ERROR if successful; ANEURALNETWORKS_OP_FAILED if
1491 * the memory is created with unspecified dimensions or rank and it is not
1492 * supported for this set of roles.
1494 inline int ANeuralNetworksMemory_createFromDesc(const ANeuralNetworksMemoryDesc *desc,
1495 ANeuralNetworksMemory **memory)
1497 LOAD_FUNCTION(ANeuralNetworksMemory_createFromDesc);
1498 EXECUTE_FUNCTION_RETURN(desc, memory);
1502 * Copies data from one memory object to another.
1504 * If at most one of the src and dst is created from
1505 * {@link ANeuralNetworksMemory_createFromDesc}, the src and dst must have the
1506 * same logical size:
1507 * - If the memory is created from {@link ANeuralNetworksMemory_createFromFd},
1508 * or if it is created from {@link
1509 * ANeuralNetworksMemory_createFromAHardwareBuffer} with format of
1510 * AHARDWAREBUFFER_FORMAT_BLOB, the logical size equals the size of the memory.
1511 * - If the memory is created from
1512 * {@link ANeuralNetworksMemory_createFromAHardwareBuffer} with a format other
1513 * than AHARDWAREBUFFER_FORMAT_BLOB, the logical size equals the size when there
1514 * is no padding and the data is tightly packed. This function may fail if the
1515 * AHardwareBuffer cannot be accessed.
1516 * - If the memory is created from {@link ANeuralNetworksMemory_createFromDesc},
1517 * the logical size equals the size indicated by the {@link OperandCode}
1518 * multiplied by the number of elements. This function will fail if the number
1519 * of elements is unknown.
1521 * If both src and dst are created from {@link
1522 * ANeuralNetworksMemory_createFromDesc}, they must have compatible dimensions.
1523 * Two dimensions are incompatible if both ranks are fully specified but have
1524 * different values, or if there is at least one axis that is fully specified in
1525 * both but has different values. The dst may have unspecified dimensions or
1526 * rank. In such a case, the dimensions of dst will get updated according to the
1527 * dimensions of the src.
1529 * In both cases, if the src is created from
1530 * {@link ANeuralNetworksMemory_createFromDesc}, it must have been used as an
1531 * output in a successful execution, or used as the destination memory in a
1533 * {@link ANeuralNetworksMemory_copy}.
1535 * The src and dst may have different data layout, in which case the data
1536 * copying is performed logically with data layout transformation.
1538 * Available since API level 30.
1540 * @param src The source memory object.
1541 * @param dst The destination memory object.
1543 * @return ANEURALNETWORKS_NO_ERROR if successful.
1545 inline int ANeuralNetworksMemory_copy(const ANeuralNetworksMemory *src,
1546 const ANeuralNetworksMemory *dst)
1548 LOAD_FUNCTION(ANeuralNetworksMemory_copy);
1549 EXECUTE_FUNCTION_RETURN(src, dst);
1554 #endif // __NEURAL_NETWORKS_SHIM_H__