[Tizen5.0-M1] Remove runtimes/logging (#1021)
author서상민/동작제어Lab(SR)/Senior Engineer/삼성전자 <sangmin7.seo@samsung.com>
Thu, 3 May 2018 01:22:25 +0000 (10:22 +0900)
committerGitHub Enterprise <noreply-CODE@samsung.com>
Thu, 3 May 2018 01:22:25 +0000 (10:22 +0900)
As decided in #982, `runtimes/logging` is not the scope of the Tizen 5.0
M1 release.  This patch removes it.

Signed-off-by: Sangmin Seo <sangmin7.seo@samsung.com>
runtimes/CMakeLists.txt
runtimes/logging/CMakeLists.txt [deleted file]
runtimes/logging/include/operand.def [deleted file]
runtimes/logging/include/operation.def [deleted file]
runtimes/logging/src/nnapi_logging.cc [deleted file]

index 36315d8..fd3e51a 100644 (file)
@@ -6,5 +6,4 @@ endif(BUILD_NN_RUNTIME)
 
 set(NNAPI_INCLUDE_DIR ${CMAKE_SOURCE_DIR}/include)
 add_subdirectory(template)
-add_subdirectory(logging)
 add_subdirectory(pure_arm_compute)
diff --git a/runtimes/logging/CMakeLists.txt b/runtimes/logging/CMakeLists.txt
deleted file mode 100644 (file)
index 2df3e90..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-file(GLOB_RECURSE NNAPI_LOGGING_SRCS "src/*.cc")
-
-add_library(neuralnetworks SHARED ${NNAPI_LOGGING_SRCS})
-target_include_directories(neuralnetworks PUBLIC ${NNAPI_INCLUDE_DIR})
-target_include_directories(neuralnetworks PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include)
diff --git a/runtimes/logging/include/operand.def b/runtimes/logging/include/operand.def
deleted file mode 100644 (file)
index c570cf0..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-// Extracted from tensorflow/contrib/lite/nnapi/NeuralNetworksShim.h
-//
-// NNAPI_OPERAND(NAME, CODE)
-#ifndef NNAPI_OPERAND
-#error NNAPI_OPERAND should be defined
-#endif
-NNAPI_OPERAND(ANEURALNETWORKS_FLOAT32, 0)
-NNAPI_OPERAND(ANEURALNETWORKS_INT32, 1)
-NNAPI_OPERAND(ANEURALNETWORKS_UINT32, 2)
-NNAPI_OPERAND(ANEURALNETWORKS_TENSOR_FLOAT32, 3)
-NNAPI_OPERAND(ANEURALNETWORKS_TENSOR_INT32, 4)
-NNAPI_OPERAND(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, 5)
diff --git a/runtimes/logging/include/operation.def b/runtimes/logging/include/operation.def
deleted file mode 100644 (file)
index 32e684d..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-// Extracted from tensorflow/contrib/lite/nnapi/NeuralNetworksShim.h
-//
-// NNAPI_OPERATION(NAME, CODE)
-#ifndef NNAPI_OPERATION
-#error NNAPI_OPERATION should be defined
-#endif
-NNAPI_OPERATION(ANEURALNETWORKS_AVERAGE_POOL_2D, 1)
-NNAPI_OPERATION(ANEURALNETWORKS_CONCATENATION, 2)
-NNAPI_OPERATION(ANEURALNETWORKS_CONV_2D, 3)
-NNAPI_OPERATION(ANEURALNETWORKS_DEPTHWISE_CONV_2D, 4)
-NNAPI_OPERATION(ANEURALNETWORKS_FULLY_CONNECTED, 9)
-NNAPI_OPERATION(ANEURALNETWORKS_MAX_POOL_2D, 17)
-NNAPI_OPERATION(ANEURALNETWORKS_RESHAPE, 22)
-NNAPI_OPERATION(ANEURALNETWORKS_SOFTMAX, 25)
diff --git a/runtimes/logging/src/nnapi_logging.cc b/runtimes/logging/src/nnapi_logging.cc
deleted file mode 100644 (file)
index 9784125..0000000
+++ /dev/null
@@ -1,381 +0,0 @@
-#include <nnapi.h>
-
-#include <stdexcept>
-#include <iostream>
-
-#include <string>
-#include <map>
-
-#include <cassert>
-
-#include <boost/format.hpp>
-
-namespace
-{
-
-class OperationCodeResolver
-{
-public:
-  OperationCodeResolver();
-
-public:
-  std::string resolve(int code) const;
-
-private:
-  void setName(int code, const std::string &name);
-
-private:
-  std::map<int, std::string> _table;
-
-public:
-  static const OperationCodeResolver &access()
-  {
-    static const OperationCodeResolver resolver;
-
-    return resolver;
-  }
-};
-
-OperationCodeResolver::OperationCodeResolver()
-{
-#define NNAPI_OPERATION(NAME, CODE) setName(CODE, #NAME);
-#include "operation.def"
-#undef NNAPI_OPERATION
-}
-
-void OperationCodeResolver::setName(int code, const std::string &name)
-{
-  assert(_table.find(code) == _table.end());
-  _table[code] = name;
-}
-
-std::string OperationCodeResolver::resolve(int code) const
-{
-  auto it = _table.find(code);
-
-  if (it == _table.end())
-  {
-    return boost::str(boost::format("unknown(%d)") % code);
-  }
-
-  return it->second;
-}
-
-class OperandCodeResolver
-{
-public:
-  OperandCodeResolver();
-
-public:
-  std::string resolve(int code) const;
-
-private:
-  void setName(int code, const std::string &name);
-
-private:
-  std::map<int, std::string> _table;
-
-public:
-  static const OperandCodeResolver &access()
-  {
-    static const OperandCodeResolver resolver;
-
-    return resolver;
-  }
-};
-
-OperandCodeResolver::OperandCodeResolver()
-{
-#define NNAPI_OPERAND(NAME, CODE) setName(CODE, #NAME);
-#include "operand.def"
-#undef NNAPI_OPERAND
-}
-
-void OperandCodeResolver::setName(int code, const std::string &name)
-{
-  assert(_table.find(code) == _table.end());
-  _table[code] = name;
-}
-
-std::string OperandCodeResolver::resolve(int code) const
-{
-  auto it = _table.find(code);
-
-  if (it == _table.end())
-  {
-    return boost::str(boost::format("unknown(%d)") % code);
-  }
-
-  return it->second;
-}
-}
-
-//
-// Asynchronous Event
-//
-struct ANeuralNetworksEvent
-{
-};
-
-ResultCode ANeuralNetworksEvent_wait(ANeuralNetworksEvent *event)
-{
-  return ANEURALNETWORKS_NO_ERROR;
-}
-
-ResultCode ANeuralNetworksEvent_free(ANeuralNetworksEvent *event)
-{
-  delete event;
-  return ANEURALNETWORKS_NO_ERROR;
-}
-
-//
-// Memory
-//
-struct ANeuralNetworksMemory
-{
-  // 1st approach - Store all the data inside ANeuralNetworksMemory object
-  // 2nd approach - Store metadata only, and defer data loading as much as possible
-};
-
-ResultCode ANeuralNetworksMemory_createFromFd(size_t size, int protect, int fd, size_t offset,
-                                              ANeuralNetworksMemory **memory)
-{
-  std::cout << __FUNCTION__ << "()" << std::endl;
-  *memory = new ANeuralNetworksMemory;
-  return ANEURALNETWORKS_NO_ERROR;
-}
-
-ResultCode ANeuralNetworksMemory_free(ANeuralNetworksMemory *memory)
-{
-  delete memory;
-  std::cout << __FUNCTION__ << "(" << memory << ")" << std::endl;
-  return ANEURALNETWORKS_NO_ERROR;
-}
-
-//
-// Model
-//
-struct ANeuralNetworksModel
-{
-  // ANeuralNetworksModel should be a factory for Graph IR (a.k.a ISA Frontend)
-  // TODO Record # of operands
-  uint32_t numOperands;
-
-  ANeuralNetworksModel() : numOperands(0)
-  {
-    // DO NOTHING
-  }
-};
-
-ResultCode ANeuralNetworksModel_create(ANeuralNetworksModel **model)
-{
-  std::cout << __FUNCTION__ << "(" << model << ")" << std::endl;
-
-  *model = new ANeuralNetworksModel;
-
-  return ANEURALNETWORKS_NO_ERROR;
-}
-
-ResultCode ANeuralNetworksModel_free(ANeuralNetworksModel *model)
-{
-  std::cout << __FUNCTION__ << "(" << model << ")" << std::endl;
-
-  delete model;
-
-  return ANEURALNETWORKS_NO_ERROR;
-}
-
-ResultCode ANeuralNetworksModel_addOperand(ANeuralNetworksModel *model,
-                                           const ANeuralNetworksOperandType *type)
-{
-  std::cout << __FUNCTION__ << "(model: " << model
-            << ", type: " << ::OperandCodeResolver::access().resolve(type->type) << ")"
-            << std::endl;
-
-  auto id = model->numOperands;
-
-  std::cout << "  id: " << id << std::endl;
-  std::cout << "  rank: " << type->dimensionCount << std::endl;
-  for (uint32_t dim = 0; dim < type->dimensionCount; ++dim)
-  {
-    std::cout << "    dim(" << dim << "): " << type->dimensions[dim] << std::endl;
-  }
-
-  model->numOperands += 1;
-
-  return ANEURALNETWORKS_NO_ERROR;
-}
-
-ResultCode ANeuralNetworksModel_setOperandValue(ANeuralNetworksModel *model, int32_t index,
-                                                const void *buffer, size_t length)
-{
-  std::cout << __FUNCTION__ << "(model: " << model << ", index: " << index << ")" << std::endl;
-
-  // TODO Implement this!
-  // NOTE buffer becomes invalid after ANeuralNetworksModel_setOperandValue returns
-
-  return ANEURALNETWORKS_NO_ERROR;
-}
-
-ResultCode ANeuralNetworksModel_setOperandValueFromMemory(ANeuralNetworksModel *model,
-                                                          int32_t index,
-                                                          const ANeuralNetworksMemory *memory,
-                                                          size_t offset, size_t length)
-{
-  std::cout << __FUNCTION__ << "(model: " << model << ", index: " << index << ")" << std::endl;
-
-  // TODO Implement this!
-
-  return ANEURALNETWORKS_NO_ERROR;
-}
-
-ResultCode ANeuralNetworksModel_addOperation(ANeuralNetworksModel *model,
-                                             ANeuralNetworksOperationType type, uint32_t inputCount,
-                                             const uint32_t *inputs, uint32_t outputCount,
-                                             const uint32_t *outputs)
-{
-  std::cout << __FUNCTION__ << "(model: " << model
-            << ", type: " << ::OperationCodeResolver::access().resolve(type)
-            << ", inputCount: " << inputCount << ", outputCount: " << outputCount << ")"
-            << std::endl;
-
-  for (uint32_t input = 0; input < inputCount; ++input)
-  {
-    std::cout << "  input(" << input << "): " << inputs[input] << std::endl;
-  }
-  for (uint32_t output = 0; output < outputCount; ++output)
-  {
-    std::cout << "  output(" << output << "): " << outputs[output] << std::endl;
-  }
-
-  // TODO Implement this!
-
-  return ANEURALNETWORKS_NO_ERROR;
-}
-
-ResultCode ANeuralNetworksModel_identifyInputsAndOutputs(ANeuralNetworksModel *model,
-                                                         uint32_t inputCount,
-                                                         const uint32_t *inputs,
-                                                         uint32_t outputCount,
-                                                         const uint32_t *outputs)
-{
-  std::cout << __FUNCTION__ << "(model: " << model << ")" << std::endl;
-
-  for (uint32_t input = 0; input < inputCount; ++input)
-  {
-    std::cout << "  input(" << input << "): " << inputs[input] << std::endl;
-  }
-  for (uint32_t output = 0; output < outputCount; ++output)
-  {
-    std::cout << "  output(" << output << "): " << outputs[output] << std::endl;
-  }
-
-  // TODO Implement this!
-  // NOTE It seems that this function identifies the input and output of the whole model
-
-  return ANEURALNETWORKS_NO_ERROR;
-}
-
-ResultCode ANeuralNetworksModel_finish(ANeuralNetworksModel *model)
-{
-  std::cout << __FUNCTION__ << "(model: " << model << ")" << std::endl;
-
-  // TODO Implement this!
-
-  return ANEURALNETWORKS_NO_ERROR;
-}
-
-//
-// Compilation
-//
-struct ANeuralNetworksCompilation
-{
-  // ANeuralNetworksCompilation should hold a compiled IR
-};
-
-ResultCode ANeuralNetworksCompilation_create(ANeuralNetworksModel *model,
-                                             ANeuralNetworksCompilation **compilation)
-{
-  std::cout << __FUNCTION__ << "(model: " << model << ")" << std::endl;
-
-  *compilation = new ANeuralNetworksCompilation;
-
-  return ANEURALNETWORKS_NO_ERROR;
-}
-
-ResultCode ANeuralNetworksCompilation_finish(ANeuralNetworksCompilation *compilation)
-{
-  std::cout << __FUNCTION__ << "(compilation: " << compilation << ")" << std::endl;
-
-  return ANEURALNETWORKS_NO_ERROR;
-}
-
-//
-// Execution
-//
-struct ANeuralNetworksExecution
-{
-  // ANeuralNetworksExecution corresponds to NPU::Interp::Session
-};
-
-ResultCode ANeuralNetworksExecution_create(ANeuralNetworksCompilation *compilation,
-                                           ANeuralNetworksExecution **execution)
-{
-  std::cout << __FUNCTION__ << "(compilation: " << compilation << ")" << std::endl;
-
-  *execution = new ANeuralNetworksExecution;
-
-  return ANEURALNETWORKS_NO_ERROR;
-}
-
-// ANeuralNetworksExecution_setInput and ANeuralNetworksExecution_setOutput specify HOST buffer for
-// input/output
-ResultCode ANeuralNetworksExecution_setInput(ANeuralNetworksExecution *execution, int32_t index,
-                                             const ANeuralNetworksOperandType *type,
-                                             const void *buffer, size_t length)
-{
-  std::cout << __FUNCTION__ << "(execution: " << execution << ", type: ";
-
-  if (type == nullptr)
-    std::cout << "nullptr)" << std::endl;
-  else
-    std::cout << ::OperandCodeResolver::access().resolve(type->type) << ")" << std::endl;
-
-  // Q: Should we transfer input from HOST to DEVICE here, or in
-  // ANeuralNetworksExecution_startCompute?
-
-  return ANEURALNETWORKS_NO_ERROR;
-}
-
-ResultCode ANeuralNetworksExecution_setOutput(ANeuralNetworksExecution *execution, int32_t index,
-                                              const ANeuralNetworksOperandType *type, void *buffer,
-                                              size_t length)
-{
-  std::cout << __FUNCTION__ << "(execution: " << execution << ", type: ";
-
-  if (type == nullptr)
-    std::cout << "nullptr)" << std::endl;
-  else
-    std::cout << ::OperandCodeResolver::access().resolve(type->type) << ")" << std::endl;
-
-  return ANEURALNETWORKS_NO_ERROR;
-}
-
-ResultCode ANeuralNetworksExecution_startCompute(ANeuralNetworksExecution *execution,
-                                                 ANeuralNetworksEvent **event)
-{
-  std::cout << __FUNCTION__ << "(execution: " << execution << ")" << std::endl;
-
-  *event = new ANeuralNetworksEvent;
-
-  return ANEURALNETWORKS_NO_ERROR;
-}
-
-ResultCode ANeuralNetworksExecution_free(ANeuralNetworksExecution *execution)
-{
-  std::cout << __FUNCTION__ << "(execution: " << execution << ")" << std::endl;
-
-  delete execution;
-
-  return ANEURALNETWORKS_NO_ERROR;
-}