Move memory and model NNAPI implementation to frontend (#2133)
author오형석/동작제어Lab(SR)/Staff Engineer/삼성전자 <hseok82.oh@samsung.com>
Wed, 1 Aug 2018 07:41:25 +0000 (16:41 +0900)
committer박세희/동작제어Lab(SR)/Principal Engineer/삼성전자 <saehie.park@samsung.com>
Wed, 1 Aug 2018 07:41:25 +0000 (16:41 +0900)
- Move NNAPI implementation in model.cc into frontend
- Move NNAPI implementation in memory.cc into frontend

Signed-off-by: Hyeongseok Oh <hseok82.oh@samsung.com>
runtimes/neurun/src/frontend/memory.cc [new file with mode: 0644]
runtimes/neurun/src/frontend/model.cc [new file with mode: 0644]
runtimes/neurun/src/memory.cc
runtimes/neurun/src/model.cc

diff --git a/runtimes/neurun/src/frontend/memory.cc b/runtimes/neurun/src/frontend/memory.cc
new file mode 100644 (file)
index 0000000..9e3dea9
--- /dev/null
@@ -0,0 +1,14 @@
+#include <NeuralNetworks.h>
+#include <sys/mman.h>
+
+#include "memory.h"
+
+int ANeuralNetworksMemory_createFromFd(size_t size, int protect, int fd, size_t offset,
+                                       ANeuralNetworksMemory **memory)
+{
+  *memory = new ANeuralNetworksMemory{size, protect, fd, offset};
+
+  return ANEURALNETWORKS_NO_ERROR;
+}
+
+void ANeuralNetworksMemory_free(ANeuralNetworksMemory *memory) { delete memory; }
diff --git a/runtimes/neurun/src/frontend/model.cc b/runtimes/neurun/src/frontend/model.cc
new file mode 100644 (file)
index 0000000..8135e06
--- /dev/null
@@ -0,0 +1,246 @@
+#include <NeuralNetworks.h>
+#include <NeuralNetworksEx.h>
+
+#include <cassert>
+#include <stdexcept>
+
+#include "model.h"
+#include "memory.h"
+
+int ANeuralNetworksModel_create(ANeuralNetworksModel **model)
+{
+  *model = new ANeuralNetworksModel{};
+
+  return ANEURALNETWORKS_NO_ERROR;
+}
+
+void ANeuralNetworksModel_free(ANeuralNetworksModel *model) { delete model; }
+
+int ANeuralNetworksModel_addOperand(ANeuralNetworksModel *model,
+                                    const ANeuralNetworksOperandType *type)
+{
+  // ASSUME A tensor operand always consists of fp32 values
+  // NOTE We do not care about scala operands.
+  assert(!(type->dimensionCount > 1) || (type->type == 3 /* ANEURALNETWORKS_TENSOR_FLOAT32 */));
+
+  internal::tflite::operand::Shape shape(type->dimensionCount);
+
+  for (uint32_t axis = 0; axis < type->dimensionCount; ++axis)
+  {
+    shape.dim(axis) = type->dimensions[axis];
+  }
+
+  shape.set(type->type, type->scale, type->zeroPoint);
+
+  model->deref().operands().append(shape);
+
+  // NOTE We do NOT allocate CLTensor here as we do not how to interpret this one.
+  //      TensorFlow Lite may interpret a rank-4 tensor either as a feature map (with batch) or
+  //      a convolution kernel.
+
+  return ANEURALNETWORKS_NO_ERROR;
+}
+
+int ANeuralNetworksModel_setOperandValue(ANeuralNetworksModel *model, int32_t index,
+                                         const void *buffer, size_t length)
+{
+  const internal::tflite::operand::Index ind{index};
+  auto &obj = model->deref().operands().at(ind);
+
+  using internal::tflite::operand::CachedData;
+
+  obj.data<CachedData>(reinterpret_cast<const uint8_t *>(buffer), length);
+
+  return ANEURALNETWORKS_NO_ERROR;
+}
+
+int ANeuralNetworksModel_setOperandValueFromMemory(ANeuralNetworksModel *model, int32_t index,
+                                                   const ANeuralNetworksMemory *memory,
+                                                   size_t offset, size_t length)
+{
+  const internal::tflite::operand::Index ind{index};
+  auto &obj = model->deref().operands().at(ind);
+
+  using internal::tflite::operand::ExternalData;
+
+  obj.data<ExternalData>(reinterpret_cast<const uint8_t *>(memory->base() + offset), length);
+
+  return ANEURALNETWORKS_NO_ERROR;
+}
+
+int ANeuralNetworksModel_addOperation(ANeuralNetworksModel *model,
+                                      ANeuralNetworksOperationType type, uint32_t inputCount,
+                                      const uint32_t *inputs, uint32_t outputCount,
+                                      const uint32_t *outputs)
+{
+  switch (type)
+  {
+    case ANEURALNETWORKS_CONV_2D:
+    {
+      // inputCount is either 7 or 10 acccording to NN API specification.
+      //  - Padding is implicit when inputCount is 7
+      //  - Padding is explicit when inputCount is 10
+      assert(inputCount == 7 || inputCount == 10);
+      assert(outputCount == 1);
+
+      if (inputCount == 7)
+      {
+        using internal::tflite::op::Conv2D::implicit::Param;
+        using internal::tflite::op::Conv2D::implicit::Node;
+
+        // Add 'operations'
+        auto &operations = model->deref().operations();
+
+        operations.emplace_back<Node>(Param{inputCount, inputs, outputCount, outputs});
+      }
+      else
+      {
+        throw std::runtime_error{"Explicit padding in Conv2D is not supported, yet"};
+      }
+
+      break;
+    }
+    case ANEURALNETWORKS_MAX_POOL_2D:
+    {
+      // inputCount is either 7 or 10 acccording to NN API specification.
+      //  - Padding is implicit when inputCount is 7
+      //  - Padding is explicit when inputCount is 10
+      assert(inputCount == 7 || inputCount == 10);
+      assert(outputCount == 1);
+
+      if (inputCount == 7)
+      {
+        using internal::tflite::op::MaxPool2D::implicit::Param;
+        using internal::tflite::op::MaxPool2D::implicit::Node;
+
+        // Add 'operations'
+        auto &operations = model->deref().operations();
+
+        operations.emplace_back<Node>(Param{inputCount, inputs, outputCount, outputs});
+      }
+      else
+      {
+        throw std::runtime_error{"Explicit padding in MaxPool2D is not supported, yet"};
+      }
+
+      break;
+    }
+    case ANEURALNETWORKS_AVERAGE_POOL_2D:
+    {
+      // inputCount is either 7 or 10 acccording to NN API specification.
+      //  - Padding is implicit when inputCount is 7
+      //  - Padding is explicit when inputCount is 10
+      assert(inputCount == 7 || inputCount == 10);
+      assert(outputCount == 1);
+
+      if (inputCount == 7)
+      {
+        using internal::tflite::op::AvgPool2D::implicit::Param;
+        using internal::tflite::op::AvgPool2D::implicit::Node;
+
+        // Add 'operations'
+        auto &operations = model->deref().operations();
+
+        operations.emplace_back<Node>(Param{inputCount, inputs, outputCount, outputs});
+      }
+      else
+      {
+        throw std::runtime_error{"Explicit padding in AvgPool2D is not supported, yet"};
+      }
+
+      break;
+    }
+    case ANEURALNETWORKS_CONCATENATION:
+    {
+      using internal::tflite::op::Concat::Param;
+      using internal::tflite::op::Concat::Node;
+
+      // Add 'operations'
+      auto &operations = model->deref().operations();
+
+      operations.emplace_back<Node>(Param{inputCount, inputs, outputCount, outputs});
+
+      break;
+    }
+    case ANEURALNETWORKS_RESHAPE:
+    {
+      using internal::tflite::op::Reshape::Param;
+      using internal::tflite::op::Reshape::Node;
+
+      // Add 'operations'
+      auto &operations = model->deref().operations();
+
+      operations.emplace_back<Node>(Param{inputCount, inputs, outputCount, outputs});
+
+      break;
+    }
+    case ANEURALNETWORKS_FULLY_CONNECTED:
+    {
+      using internal::tflite::op::FullyConnected::Param;
+      using internal::tflite::op::FullyConnected::Node;
+
+      // Add 'operations'
+      auto &operations = model->deref().operations();
+
+      operations.emplace_back<Node>(Param{inputCount, inputs, outputCount, outputs});
+
+      break;
+    }
+    case ANEURALNETWORKS_SOFTMAX:
+    {
+      using internal::tflite::op::Softmax::Param;
+      using internal::tflite::op::Softmax::Node;
+
+      // Add 'operations'
+      auto &operations = model->deref().operations();
+
+      operations.emplace_back<Node>(Param{inputCount, inputs, outputCount, outputs});
+
+      break;
+    }
+    default:
+      throw std::runtime_error{"Not supported operation"};
+  };
+
+  return ANEURALNETWORKS_NO_ERROR;
+}
+
+int ANeuralNetworksModel_addOperationEx(ANeuralNetworksModel *model,
+                                        ANeuralNetworksOperationTypeEx type, uint32_t inputCount,
+                                        const uint32_t *inputs, uint32_t outputCount,
+                                        const uint32_t *outputs)
+{
+  switch (type)
+  {
+    default:
+      throw std::runtime_error{"Not supported operation"};
+  }
+}
+
+int ANeuralNetworksModel_identifyInputsAndOutputs(ANeuralNetworksModel *model, uint32_t inputCount,
+                                                  const uint32_t *inputs, uint32_t outputCount,
+                                                  const uint32_t *outputs)
+{
+  // NOTE ::internal::tflite::operand::Index uses int as its underlying type as various NNAPI
+  //      functions such as ANeuralNetworksModel_setOperandValue use int to represent operand index
+  //
+  //      ANeuralNetworksModel_identifyInputsAndOutputs, however, uses uint32_t to represent operand
+  //      index.
+  //
+  //      Below, static_cast<int>(...) is introduced to eliminate compiler warning.
+  for (uint32_t n = 0; n < inputCount; ++n)
+  {
+    const ::internal::tflite::operand::Index ind{static_cast<int>(inputs[n])};
+    model->deref().inputs.emplace_back(ind);
+  }
+
+  for (uint32_t n = 0; n < outputCount; ++n)
+  {
+    const ::internal::tflite::operand::Index ind{static_cast<int>(outputs[n])};
+    model->deref().outputs.emplace_back(ind);
+  }
+
+  return ANEURALNETWORKS_NO_ERROR;
+}
+
+int ANeuralNetworksModel_finish(ANeuralNetworksModel *model) { return ANEURALNETWORKS_NO_ERROR; }
index 9fca307..3f02d58 100644 (file)
@@ -3,16 +3,6 @@
 
 #include "memory.h"
 
-int ANeuralNetworksMemory_createFromFd(size_t size, int protect, int fd, size_t offset,
-                                       ANeuralNetworksMemory **memory)
-{
-  *memory = new ANeuralNetworksMemory{size, protect, fd, offset};
-
-  return ANEURALNETWORKS_NO_ERROR;
-}
-
-void ANeuralNetworksMemory_free(ANeuralNetworksMemory *memory) { delete memory; }
-
 //
 // ANeuralNetworksMemory
 //
index e6509d1..088803b 100644 (file)
@@ -1,249 +1,7 @@
 #include <NeuralNetworks.h>
 #include <NeuralNetworksEx.h>
 
-#include <cassert>
-#include <stdexcept>
-
 #include "model.h"
-#include "memory.h"
-
-int ANeuralNetworksModel_create(ANeuralNetworksModel **model)
-{
-  *model = new ANeuralNetworksModel{};
-
-  return ANEURALNETWORKS_NO_ERROR;
-}
-
-void ANeuralNetworksModel_free(ANeuralNetworksModel *model) { delete model; }
-
-int ANeuralNetworksModel_addOperand(ANeuralNetworksModel *model,
-                                    const ANeuralNetworksOperandType *type)
-{
-  // ASSUME A tensor operand always consists of fp32 values
-  // NOTE We do not care about scala operands.
-  assert(!(type->dimensionCount > 1) || (type->type == 3 /* ANEURALNETWORKS_TENSOR_FLOAT32 */));
-
-  internal::tflite::operand::Shape shape(type->dimensionCount);
-
-  for (uint32_t axis = 0; axis < type->dimensionCount; ++axis)
-  {
-    shape.dim(axis) = type->dimensions[axis];
-  }
-
-  shape.set(type->type, type->scale, type->zeroPoint);
-
-  model->deref().operands().append(shape);
-
-  // NOTE We do NOT allocate CLTensor here as we do not how to interpret this one.
-  //      TensorFlow Lite may interpret a rank-4 tensor either as a feature map (with batch) or
-  //      a convolution kernel.
-
-  return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksModel_setOperandValue(ANeuralNetworksModel *model, int32_t index,
-                                         const void *buffer, size_t length)
-{
-  const internal::tflite::operand::Index ind{index};
-  auto &obj = model->deref().operands().at(ind);
-
-  using internal::tflite::operand::CachedData;
-
-  obj.data<CachedData>(reinterpret_cast<const uint8_t *>(buffer), length);
-
-  return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksModel_setOperandValueFromMemory(ANeuralNetworksModel *model, int32_t index,
-                                                   const ANeuralNetworksMemory *memory,
-                                                   size_t offset, size_t length)
-{
-  const internal::tflite::operand::Index ind{index};
-  auto &obj = model->deref().operands().at(ind);
-
-  using internal::tflite::operand::ExternalData;
-
-  obj.data<ExternalData>(reinterpret_cast<const uint8_t *>(memory->base() + offset), length);
-
-  return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksModel_addOperation(ANeuralNetworksModel *model,
-                                      ANeuralNetworksOperationType type, uint32_t inputCount,
-                                      const uint32_t *inputs, uint32_t outputCount,
-                                      const uint32_t *outputs)
-{
-  switch (type)
-  {
-    case ANEURALNETWORKS_CONV_2D:
-    {
-      // inputCount is either 7 or 10 acccording to NN API specification.
-      //  - Padding is implicit when inputCount is 7
-      //  - Padding is explicit when inputCount is 10
-      assert(inputCount == 7 || inputCount == 10);
-      assert(outputCount == 1);
-
-      if (inputCount == 7)
-      {
-        using internal::tflite::op::Conv2D::implicit::Param;
-        using internal::tflite::op::Conv2D::implicit::Node;
-
-        // Add 'operations'
-        auto &operations = model->deref().operations();
-
-        operations.emplace_back<Node>(Param{inputCount, inputs, outputCount, outputs});
-      }
-      else
-      {
-        throw std::runtime_error{"Explicit padding in Conv2D is not supported, yet"};
-      }
-
-      break;
-    }
-    case ANEURALNETWORKS_MAX_POOL_2D:
-    {
-      // inputCount is either 7 or 10 acccording to NN API specification.
-      //  - Padding is implicit when inputCount is 7
-      //  - Padding is explicit when inputCount is 10
-      assert(inputCount == 7 || inputCount == 10);
-      assert(outputCount == 1);
-
-      if (inputCount == 7)
-      {
-        using internal::tflite::op::MaxPool2D::implicit::Param;
-        using internal::tflite::op::MaxPool2D::implicit::Node;
-
-        // Add 'operations'
-        auto &operations = model->deref().operations();
-
-        operations.emplace_back<Node>(Param{inputCount, inputs, outputCount, outputs});
-      }
-      else
-      {
-        throw std::runtime_error{"Explicit padding in MaxPool2D is not supported, yet"};
-      }
-
-      break;
-    }
-    case ANEURALNETWORKS_AVERAGE_POOL_2D:
-    {
-      // inputCount is either 7 or 10 acccording to NN API specification.
-      //  - Padding is implicit when inputCount is 7
-      //  - Padding is explicit when inputCount is 10
-      assert(inputCount == 7 || inputCount == 10);
-      assert(outputCount == 1);
-
-      if (inputCount == 7)
-      {
-        using internal::tflite::op::AvgPool2D::implicit::Param;
-        using internal::tflite::op::AvgPool2D::implicit::Node;
-
-        // Add 'operations'
-        auto &operations = model->deref().operations();
-
-        operations.emplace_back<Node>(Param{inputCount, inputs, outputCount, outputs});
-      }
-      else
-      {
-        throw std::runtime_error{"Explicit padding in AvgPool2D is not supported, yet"};
-      }
-
-      break;
-    }
-    case ANEURALNETWORKS_CONCATENATION:
-    {
-      using internal::tflite::op::Concat::Param;
-      using internal::tflite::op::Concat::Node;
-
-      // Add 'operations'
-      auto &operations = model->deref().operations();
-
-      operations.emplace_back<Node>(Param{inputCount, inputs, outputCount, outputs});
-
-      break;
-    }
-    case ANEURALNETWORKS_RESHAPE:
-    {
-      using internal::tflite::op::Reshape::Param;
-      using internal::tflite::op::Reshape::Node;
-
-      // Add 'operations'
-      auto &operations = model->deref().operations();
-
-      operations.emplace_back<Node>(Param{inputCount, inputs, outputCount, outputs});
-
-      break;
-    }
-    case ANEURALNETWORKS_FULLY_CONNECTED:
-    {
-      using internal::tflite::op::FullyConnected::Param;
-      using internal::tflite::op::FullyConnected::Node;
-
-      // Add 'operations'
-      auto &operations = model->deref().operations();
-
-      operations.emplace_back<Node>(Param{inputCount, inputs, outputCount, outputs});
-
-      break;
-    }
-    case ANEURALNETWORKS_SOFTMAX:
-    {
-      using internal::tflite::op::Softmax::Param;
-      using internal::tflite::op::Softmax::Node;
-
-      // Add 'operations'
-      auto &operations = model->deref().operations();
-
-      operations.emplace_back<Node>(Param{inputCount, inputs, outputCount, outputs});
-
-      break;
-    }
-    default:
-      throw std::runtime_error{"Not supported operation"};
-  };
-
-  return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksModel_addOperationEx(ANeuralNetworksModel *model,
-                                        ANeuralNetworksOperationTypeEx type, uint32_t inputCount,
-                                        const uint32_t *inputs, uint32_t outputCount,
-                                        const uint32_t *outputs)
-{
-  switch (type)
-  {
-    default:
-      throw std::runtime_error{"Not supported operation"};
-  }
-}
-
-int ANeuralNetworksModel_identifyInputsAndOutputs(ANeuralNetworksModel *model, uint32_t inputCount,
-                                                  const uint32_t *inputs, uint32_t outputCount,
-                                                  const uint32_t *outputs)
-{
-  // NOTE ::internal::tflite::operand::Index uses int as its underlying type as various NNAPI
-  //      functions such as ANeuralNetworksModel_setOperandValue use int to represent operand index
-  //
-  //      ANeuralNetworksModel_identifyInputsAndOutputs, however, uses uint32_t to represent operand
-  //      index.
-  //
-  //      Below, static_cast<int>(...) is introduced to eliminate compiler warning.
-  for (uint32_t n = 0; n < inputCount; ++n)
-  {
-    const ::internal::tflite::operand::Index ind{static_cast<int>(inputs[n])};
-    model->deref().inputs.emplace_back(ind);
-  }
-
-  for (uint32_t n = 0; n < outputCount; ++n)
-  {
-    const ::internal::tflite::operand::Index ind{static_cast<int>(outputs[n])};
-    model->deref().outputs.emplace_back(ind);
-  }
-
-  return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksModel_finish(ANeuralNetworksModel *model) { return ANEURALNETWORKS_NO_ERROR; }
 
 //
 // ANeuralNetworksModel