int ANeuralNetworksCompilation_create(ANeuralNetworksModel *model,
ANeuralNetworksCompilation **compilation)
{
+ if ((model == nullptr) || (compilation == nullptr))
+ {
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+
std::shared_ptr<const internal::tflite::Model> internal;
model->release(internal);
- *compilation = new ANeuralNetworksCompilation(internal);
+ ANeuralNetworksCompilation *compilation_ptr = new ANeuralNetworksCompilation(internal);
+ if (compilation_ptr == nullptr)
+ {
+ return ANEURALNETWORKS_OUT_OF_MEMORY;
+ }
+ *compilation = compilation_ptr;
return ANEURALNETWORKS_NO_ERROR;
}
int ANeuralNetworksCompilation_setPreference(ANeuralNetworksCompilation *compilation,
int32_t preference)
{
+ if (compilation == nullptr)
+ {
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+
// NOTE Pure CL runimte currently ignores this API call
// TODO Use preference
return ANEURALNETWORKS_NO_ERROR;
int ANeuralNetworksCompilation_finish(ANeuralNetworksCompilation *compilation)
{
+ if (compilation == nullptr)
+ {
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+
arm_compute::CLScheduler::get().default_init();
const auto &operands = compilation->plan().model().operands();
#include "event.h"
-int ANeuralNetworksEvent_wait(ANeuralNetworksEvent *event) { return ANEURALNETWORKS_NO_ERROR; }
+int ANeuralNetworksEvent_wait(ANeuralNetworksEvent *event)
+{
+ if (event == nullptr)
+ {
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+
+ return ANEURALNETWORKS_NO_ERROR;
+}
void ANeuralNetworksEvent_free(ANeuralNetworksEvent *event) { delete event; }
#include "compilation.h"
#include "execution.h"
#include "profiling.h"
+#include "event.h"
#include "internal/VectorSource.h"
#include "internal/MatrixSource.h"
int ANeuralNetworksExecution_create(ANeuralNetworksCompilation *compilation,
ANeuralNetworksExecution **execution)
{
+ if ((compilation == nullptr) || (execution == nullptr))
+ {
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+
std::shared_ptr<const ::internal::arm_compute::Plan> plan;
compilation->publish(plan);
- *execution = new ANeuralNetworksExecution{plan};
+ ANeuralNetworksExecution *execution_ptr = new ANeuralNetworksExecution{plan};
+ if (execution_ptr == nullptr)
+ {
+ return ANEURALNETWORKS_OUT_OF_MEMORY;
+ }
+ *execution = execution_ptr;
std::shared_ptr<const std::map<int, ::internal::tflite::operand::Shape>>
broadcasting_tensor_shape;
const ANeuralNetworksOperandType *type, const void *buffer,
size_t length)
{
+ // Don't check type
+ // Comment about ANeuralNetworksOperandType in NeuralNetworks.h:
+ // If the input or output is optional and omitted then it need not have a fully specified tensor
+ // operand type
+ if ((execution == nullptr) || ((buffer == nullptr) && (length != 0)))
+ {
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+
const auto &operands = execution->plan().model().operands();
// TODO Check type conflicts
const ANeuralNetworksOperandType *type, void *buffer,
size_t length)
{
+ // Don't check type
+ // Comment about ANeuralNetworksOperandType in NeuralNetworks.h:
+ // If the input or output is optional and omitted then it need not have a fully specified tensor
+ // operand type
+ if ((execution == nullptr) || ((buffer == nullptr) && (length != 0)))
+ {
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+
const auto &operands = execution->plan().model().operands();
// TODO Check type conflicts
int ANeuralNetworksExecution_startCompute(ANeuralNetworksExecution *execution,
ANeuralNetworksEvent **event)
{
- const bool sync = profiling::Context::get().sync().enabled();
-
- assert(execution != nullptr);
+ if ((execution == nullptr) || (event == nullptr))
+ {
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+ const bool sync = profiling::Context::get().sync().enabled();
const auto &plan = execution->plan();
const auto &model = plan.model();
+ // TODO: Handle event
+ ANeuralNetworksEvent *event_ptr = new ANeuralNetworksEvent{};
+ if (event_ptr == nullptr)
+ {
+ return ANEURALNETWORKS_OUT_OF_MEMORY;
+ }
+ *event = event_ptr;
+
// Set input(s)
for (uint32_t n = 0; n < model.inputs.size(); ++n)
{
const ANeuralNetworksMemory *memory, size_t offset,
size_t length)
{
+ if ((execution == nullptr) || (memory == nullptr))
+ {
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+
assert(false);
return -1;
}
const ANeuralNetworksMemory *memory, size_t offset,
size_t length)
{
+ if ((execution == nullptr) || (memory == nullptr))
+ {
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+
assert(false);
return -1;
}
int ANeuralNetworksMemory_createFromFd(size_t size, int protect, int fd, size_t offset,
ANeuralNetworksMemory **memory)
{
- *memory = new ANeuralNetworksMemory{size, protect, fd, offset};
+ if (memory == nullptr)
+ {
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+
+ ANeuralNetworksMemory *memory_ptr = new ANeuralNetworksMemory{size, protect, fd, offset};
+ if (memory_ptr == nullptr)
+ {
+ return ANEURALNETWORKS_OUT_OF_MEMORY;
+ }
+ *memory = memory_ptr;
return ANEURALNETWORKS_NO_ERROR;
}
int ANeuralNetworksModel_create(ANeuralNetworksModel **model)
{
- *model = new ANeuralNetworksModel{};
+ if (model == nullptr)
+ {
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+
+ ANeuralNetworksModel *model_ptr = new ANeuralNetworksModel{};
+
+ if (model_ptr == nullptr)
+ {
+ return ANEURALNETWORKS_OUT_OF_MEMORY;
+ }
+
+ *model = model_ptr;
return ANEURALNETWORKS_NO_ERROR;
}
int ANeuralNetworksModel_addOperand(ANeuralNetworksModel *model,
const ANeuralNetworksOperandType *type)
{
+ if ((model == nullptr) || (type == nullptr))
+ {
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+
// ASSUME A tensor operand should consists of fp32 or int32 values.
// NOTE We do not care about scala operands.
assert((type->dimensionCount == 0) || (type->type == ANEURALNETWORKS_TENSOR_FLOAT32 ||
int ANeuralNetworksModel_setOperandValue(ANeuralNetworksModel *model, int32_t index,
const void *buffer, size_t length)
{
+ if ((model == nullptr) || ((buffer == nullptr) && (length != 0)))
+ {
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+
const internal::tflite::operand::Index ind{index};
auto &obj = model->deref().operands().at(ind);
const ANeuralNetworksMemory *memory,
size_t offset, size_t length)
{
+ if ((model == nullptr) || (memory == nullptr))
+ {
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+
const internal::tflite::operand::Index ind{index};
auto &obj = model->deref().operands().at(ind);
const uint32_t *inputs, uint32_t outputCount,
const uint32_t *outputs)
{
+ if ((model == nullptr) || ((inputs == nullptr) && (inputCount != 0)) ||
+ ((outputs == nullptr) && (outputCount != 0)))
+ {
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+
switch (type)
{
case ANEURALNETWORKS_ADD:
const uint32_t *inputs, uint32_t outputCount,
const uint32_t *outputs)
{
+ if ((model == nullptr) || ((inputs == nullptr) && (inputCount != 0)) ||
+ ((outputs == nullptr) && (outputCount != 0)))
+ {
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+
switch (type)
{
case ANEURALNETWORKS_CAST_EX:
const uint32_t *inputs, uint32_t outputCount,
const uint32_t *outputs)
{
+ if ((model == nullptr) || (inputs == nullptr) || (outputs == nullptr))
+ {
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+
// NOTE ::internal::tflite::operand::Index uses int as its underlying type as various NNAPI
// functions such as ANeuralNetworksModel_setOperandValue use int to represent operand index
//
return ANEURALNETWORKS_NO_ERROR;
}
-int ANeuralNetworksModel_finish(ANeuralNetworksModel *model) { return ANEURALNETWORKS_NO_ERROR; }
+int ANeuralNetworksModel_finish(ANeuralNetworksModel *model)
+{
+ if (model == nullptr)
+ {
+ return ANEURALNETWORKS_UNEXPECTED_NULL;
+ }
+
+ return ANEURALNETWORKS_NO_ERROR;
+}
//
// ANeuralNetworksModel