[pacl] NNAPI frontend exception handling by nullptr (#2164)
author오형석/동작제어Lab(SR)/Staff Engineer/삼성전자 <hseok82.oh@samsung.com>
Mon, 6 Aug 2018 05:45:12 +0000 (14:45 +0900)
committerGitHub Enterprise <noreply-CODE@samsung.com>
Mon, 6 Aug 2018 05:45:12 +0000 (14:45 +0900)
Handle exception by nullptr parameter in NNAPI frontend implementation
Same implementation as neurun

Signed-off-by: Hyeongseok Oh <hseok82.oh@samsung.com>
runtimes/pure_arm_compute/src/compilation.cc
runtimes/pure_arm_compute/src/event.cc
runtimes/pure_arm_compute/src/execution.cc
runtimes/pure_arm_compute/src/memory.cc
runtimes/pure_arm_compute/src/model.cc

index ae54487..72ec76c 100644 (file)
@@ -3009,11 +3009,21 @@ void PlanBuilder::finalize(void) const
 int ANeuralNetworksCompilation_create(ANeuralNetworksModel *model,
                                       ANeuralNetworksCompilation **compilation)
 {
+  if ((model == nullptr) || (compilation == nullptr))
+  {
+    return ANEURALNETWORKS_UNEXPECTED_NULL;
+  }
+
   std::shared_ptr<const internal::tflite::Model> internal;
 
   model->release(internal);
 
-  *compilation = new ANeuralNetworksCompilation(internal);
+  ANeuralNetworksCompilation *compilation_ptr = new ANeuralNetworksCompilation(internal);
+  if (compilation_ptr == nullptr)
+  {
+    return ANEURALNETWORKS_OUT_OF_MEMORY;
+  }
+  *compilation = compilation_ptr;
 
   return ANEURALNETWORKS_NO_ERROR;
 }
@@ -3021,6 +3031,11 @@ int ANeuralNetworksCompilation_create(ANeuralNetworksModel *model,
 int ANeuralNetworksCompilation_setPreference(ANeuralNetworksCompilation *compilation,
                                              int32_t preference)
 {
+  if (compilation == nullptr)
+  {
+    return ANEURALNETWORKS_UNEXPECTED_NULL;
+  }
+
   // NOTE Pure CL runimte currently ignores this API call
   // TODO Use preference
   return ANEURALNETWORKS_NO_ERROR;
@@ -3028,6 +3043,11 @@ int ANeuralNetworksCompilation_setPreference(ANeuralNetworksCompilation *compila
 
 int ANeuralNetworksCompilation_finish(ANeuralNetworksCompilation *compilation)
 {
+  if (compilation == nullptr)
+  {
+    return ANEURALNETWORKS_UNEXPECTED_NULL;
+  }
+
   arm_compute::CLScheduler::get().default_init();
 
   const auto &operands = compilation->plan().model().operands();
index 9a406e1..47d77ca 100644 (file)
@@ -2,6 +2,14 @@
 
 #include "event.h"
 
-int ANeuralNetworksEvent_wait(ANeuralNetworksEvent *event) { return ANEURALNETWORKS_NO_ERROR; }
+int ANeuralNetworksEvent_wait(ANeuralNetworksEvent *event)
+{
+  if (event == nullptr)
+  {
+    return ANEURALNETWORKS_UNEXPECTED_NULL;
+  }
+
+  return ANEURALNETWORKS_NO_ERROR;
+}
 
 void ANeuralNetworksEvent_free(ANeuralNetworksEvent *event) { delete event; }
index fabf700..90e5a2a 100644 (file)
@@ -3,6 +3,7 @@
 #include "compilation.h"
 #include "execution.h"
 #include "profiling.h"
+#include "event.h"
 
 #include "internal/VectorSource.h"
 #include "internal/MatrixSource.h"
@@ -286,9 +287,19 @@ static void asTensorSink(ANeuralNetworksExecution *execution, int32_t type, int3
 int ANeuralNetworksExecution_create(ANeuralNetworksCompilation *compilation,
                                     ANeuralNetworksExecution **execution)
 {
+  if ((compilation == nullptr) || (execution == nullptr))
+  {
+    return ANEURALNETWORKS_UNEXPECTED_NULL;
+  }
+
   std::shared_ptr<const ::internal::arm_compute::Plan> plan;
   compilation->publish(plan);
-  *execution = new ANeuralNetworksExecution{plan};
+  ANeuralNetworksExecution *execution_ptr = new ANeuralNetworksExecution{plan};
+  if (execution_ptr == nullptr)
+  {
+    return ANEURALNETWORKS_OUT_OF_MEMORY;
+  }
+  *execution = execution_ptr;
 
   std::shared_ptr<const std::map<int, ::internal::tflite::operand::Shape>>
       broadcasting_tensor_shape;
@@ -302,6 +313,15 @@ int ANeuralNetworksExecution_setInput(ANeuralNetworksExecution *execution, int32
                                       const ANeuralNetworksOperandType *type, const void *buffer,
                                       size_t length)
 {
+  // Don't check type
+  // Comment about ANeuralNetworksOperandType in NeuralNetworks.h:
+  //  If the input or output is optional and omitted then it need not have a fully specified tensor
+  //  operand type
+  if ((execution == nullptr) || ((buffer == nullptr) && (length != 0)))
+  {
+    return ANEURALNETWORKS_UNEXPECTED_NULL;
+  }
+
   const auto &operands = execution->plan().model().operands();
 
   // TODO Check type conflicts
@@ -383,6 +403,15 @@ int ANeuralNetworksExecution_setOutput(ANeuralNetworksExecution *execution, int3
                                        const ANeuralNetworksOperandType *type, void *buffer,
                                        size_t length)
 {
+  // Don't check type
+  // Comment about ANeuralNetworksOperandType in NeuralNetworks.h:
+  //  If the input or output is optional and omitted then it need not have a fully specified tensor
+  //  operand type
+  if ((execution == nullptr) || ((buffer == nullptr) && (length != 0)))
+  {
+    return ANEURALNETWORKS_UNEXPECTED_NULL;
+  }
+
   const auto &operands = execution->plan().model().operands();
 
   // TODO Check type conflicts
@@ -427,13 +456,23 @@ int ANeuralNetworksExecution_setOutput(ANeuralNetworksExecution *execution, int3
 int ANeuralNetworksExecution_startCompute(ANeuralNetworksExecution *execution,
                                           ANeuralNetworksEvent **event)
 {
-  const bool sync = profiling::Context::get().sync().enabled();
-
-  assert(execution != nullptr);
+  if ((execution == nullptr) || (event == nullptr))
+  {
+    return ANEURALNETWORKS_UNEXPECTED_NULL;
+  }
 
+  const bool sync = profiling::Context::get().sync().enabled();
   const auto &plan = execution->plan();
   const auto &model = plan.model();
 
+  // TODO: Handle event
+  ANeuralNetworksEvent *event_ptr = new ANeuralNetworksEvent{};
+  if (event_ptr == nullptr)
+  {
+    return ANEURALNETWORKS_OUT_OF_MEMORY;
+  }
+  *event = event_ptr;
+
   // Set input(s)
   for (uint32_t n = 0; n < model.inputs.size(); ++n)
   {
@@ -473,6 +512,11 @@ int ANeuralNetworksExecution_setInputFromMemory(ANeuralNetworksExecution *execut
                                                 const ANeuralNetworksMemory *memory, size_t offset,
                                                 size_t length)
 {
+  if ((execution == nullptr) || (memory == nullptr))
+  {
+    return ANEURALNETWORKS_UNEXPECTED_NULL;
+  }
+
   assert(false);
   return -1;
 }
@@ -483,6 +527,11 @@ int ANeuralNetworksExecution_setOutputFromMemory(ANeuralNetworksExecution *execu
                                                  const ANeuralNetworksMemory *memory, size_t offset,
                                                  size_t length)
 {
+  if ((execution == nullptr) || (memory == nullptr))
+  {
+    return ANEURALNETWORKS_UNEXPECTED_NULL;
+  }
+
   assert(false);
   return -1;
 }
index 9fca307..ab55f0e 100644 (file)
@@ -6,7 +6,17 @@
 int ANeuralNetworksMemory_createFromFd(size_t size, int protect, int fd, size_t offset,
                                        ANeuralNetworksMemory **memory)
 {
-  *memory = new ANeuralNetworksMemory{size, protect, fd, offset};
+  if (memory == nullptr)
+  {
+    return ANEURALNETWORKS_UNEXPECTED_NULL;
+  }
+
+  ANeuralNetworksMemory *memory_ptr = new ANeuralNetworksMemory{size, protect, fd, offset};
+  if (memory_ptr == nullptr)
+  {
+    return ANEURALNETWORKS_OUT_OF_MEMORY;
+  }
+  *memory = memory_ptr;
 
   return ANEURALNETWORKS_NO_ERROR;
 }
index 2400754..0ad9eec 100644 (file)
@@ -9,7 +9,19 @@
 
 int ANeuralNetworksModel_create(ANeuralNetworksModel **model)
 {
-  *model = new ANeuralNetworksModel{};
+  if (model == nullptr)
+  {
+    return ANEURALNETWORKS_UNEXPECTED_NULL;
+  }
+
+  ANeuralNetworksModel *model_ptr = new ANeuralNetworksModel{};
+
+  if (model_ptr == nullptr)
+  {
+    return ANEURALNETWORKS_OUT_OF_MEMORY;
+  }
+
+  *model = model_ptr;
 
   return ANEURALNETWORKS_NO_ERROR;
 }
@@ -19,6 +31,11 @@ void ANeuralNetworksModel_free(ANeuralNetworksModel *model) { delete model; }
 int ANeuralNetworksModel_addOperand(ANeuralNetworksModel *model,
                                     const ANeuralNetworksOperandType *type)
 {
+  if ((model == nullptr) || (type == nullptr))
+  {
+    return ANEURALNETWORKS_UNEXPECTED_NULL;
+  }
+
   // ASSUME A tensor operand should consists of fp32 or int32 values.
   // NOTE We do not care about scala operands.
   assert((type->dimensionCount == 0) || (type->type == ANEURALNETWORKS_TENSOR_FLOAT32 ||
@@ -44,6 +61,11 @@ int ANeuralNetworksModel_addOperand(ANeuralNetworksModel *model,
 int ANeuralNetworksModel_setOperandValue(ANeuralNetworksModel *model, int32_t index,
                                          const void *buffer, size_t length)
 {
+  if ((model == nullptr) || ((buffer == nullptr) && (length != 0)))
+  {
+    return ANEURALNETWORKS_UNEXPECTED_NULL;
+  }
+
   const internal::tflite::operand::Index ind{index};
   auto &obj = model->deref().operands().at(ind);
 
@@ -58,6 +80,11 @@ int ANeuralNetworksModel_setOperandValueFromMemory(ANeuralNetworksModel *model,
                                                    const ANeuralNetworksMemory *memory,
                                                    size_t offset, size_t length)
 {
+  if ((model == nullptr) || (memory == nullptr))
+  {
+    return ANEURALNETWORKS_UNEXPECTED_NULL;
+  }
+
   const internal::tflite::operand::Index ind{index};
   auto &obj = model->deref().operands().at(ind);
 
@@ -73,6 +100,12 @@ int ANeuralNetworksModel_addOperation(ANeuralNetworksModel *model,
                                       const uint32_t *inputs, uint32_t outputCount,
                                       const uint32_t *outputs)
 {
+  if ((model == nullptr) || ((inputs == nullptr) && (inputCount != 0)) ||
+      ((outputs == nullptr) && (outputCount != 0)))
+  {
+    return ANEURALNETWORKS_UNEXPECTED_NULL;
+  }
+
   switch (type)
   {
     case ANEURALNETWORKS_ADD:
@@ -416,6 +449,12 @@ int ANeuralNetworksModel_addOperationEx(ANeuralNetworksModel *model,
                                         const uint32_t *inputs, uint32_t outputCount,
                                         const uint32_t *outputs)
 {
+  if ((model == nullptr) || ((inputs == nullptr) && (inputCount != 0)) ||
+      ((outputs == nullptr) && (outputCount != 0)))
+  {
+    return ANEURALNETWORKS_UNEXPECTED_NULL;
+  }
+
   switch (type)
   {
     case ANEURALNETWORKS_CAST_EX:
@@ -477,6 +516,11 @@ int ANeuralNetworksModel_identifyInputsAndOutputs(ANeuralNetworksModel *model, u
                                                   const uint32_t *inputs, uint32_t outputCount,
                                                   const uint32_t *outputs)
 {
+  if ((model == nullptr) || (inputs == nullptr) || (outputs == nullptr))
+  {
+    return ANEURALNETWORKS_UNEXPECTED_NULL;
+  }
+
   // NOTE ::internal::tflite::operand::Index uses int as its underlying type as various NNAPI
   //      functions such as ANeuralNetworksModel_setOperandValue use int to represent operand index
   //
@@ -499,7 +543,15 @@ int ANeuralNetworksModel_identifyInputsAndOutputs(ANeuralNetworksModel *model, u
   return ANEURALNETWORKS_NO_ERROR;
 }
 
-int ANeuralNetworksModel_finish(ANeuralNetworksModel *model) { return ANEURALNETWORKS_NO_ERROR; }
+int ANeuralNetworksModel_finish(ANeuralNetworksModel *model)
+{
+  if (model == nullptr)
+  {
+    return ANEURALNETWORKS_UNEXPECTED_NULL;
+  }
+
+  return ANEURALNETWORKS_NO_ERROR;
+}
 
 //
 // ANeuralNetworksModel