From 211b15b5534b6a04a9daaa8a6ac4274bf69ee595 Mon Sep 17 00:00:00 2001 From: =?utf8?q?=EC=84=9C=EC=83=81=EB=AF=BC/=EB=8F=99=EC=9E=91=EC=A0=9C?= =?utf8?q?=EC=96=B4Lab=28SR=29/Senior=20Engineer/=EC=82=BC=EC=84=B1?= =?utf8?q?=EC=A0=84=EC=9E=90?= Date: Thu, 3 May 2018 10:22:25 +0900 Subject: [PATCH] [Tizen5.0-M1] Remove runtimes/logging (#1021) As decided in #982, `runtimes/logging` is not the scope of the Tizen 5.0 M1 release. This patch removes it. Signed-off-by: Sangmin Seo --- runtimes/CMakeLists.txt | 1 - runtimes/logging/CMakeLists.txt | 5 - runtimes/logging/include/operand.def | 12 -- runtimes/logging/include/operation.def | 14 -- runtimes/logging/src/nnapi_logging.cc | 381 --------------------------------- 5 files changed, 413 deletions(-) delete mode 100644 runtimes/logging/CMakeLists.txt delete mode 100644 runtimes/logging/include/operand.def delete mode 100644 runtimes/logging/include/operation.def delete mode 100644 runtimes/logging/src/nnapi_logging.cc diff --git a/runtimes/CMakeLists.txt b/runtimes/CMakeLists.txt index 36315d8..fd3e51a 100644 --- a/runtimes/CMakeLists.txt +++ b/runtimes/CMakeLists.txt @@ -6,5 +6,4 @@ endif(BUILD_NN_RUNTIME) set(NNAPI_INCLUDE_DIR ${CMAKE_SOURCE_DIR}/include) add_subdirectory(template) -add_subdirectory(logging) add_subdirectory(pure_arm_compute) diff --git a/runtimes/logging/CMakeLists.txt b/runtimes/logging/CMakeLists.txt deleted file mode 100644 index 2df3e90..0000000 --- a/runtimes/logging/CMakeLists.txt +++ /dev/null @@ -1,5 +0,0 @@ -file(GLOB_RECURSE NNAPI_LOGGING_SRCS "src/*.cc") - -add_library(neuralnetworks SHARED ${NNAPI_LOGGING_SRCS}) -target_include_directories(neuralnetworks PUBLIC ${NNAPI_INCLUDE_DIR}) -target_include_directories(neuralnetworks PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include) diff --git a/runtimes/logging/include/operand.def b/runtimes/logging/include/operand.def deleted file mode 100644 index c570cf0..0000000 --- a/runtimes/logging/include/operand.def +++ /dev/null @@ -1,12 +0,0 @@ -// Extracted from tensorflow/contrib/lite/nnapi/NeuralNetworksShim.h -// -// NNAPI_OPERAND(NAME, CODE) -#ifndef NNAPI_OPERAND -#error NNAPI_OPERAND should be defined -#endif -NNAPI_OPERAND(ANEURALNETWORKS_FLOAT32, 0) -NNAPI_OPERAND(ANEURALNETWORKS_INT32, 1) -NNAPI_OPERAND(ANEURALNETWORKS_UINT32, 2) -NNAPI_OPERAND(ANEURALNETWORKS_TENSOR_FLOAT32, 3) -NNAPI_OPERAND(ANEURALNETWORKS_TENSOR_INT32, 4) -NNAPI_OPERAND(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, 5) diff --git a/runtimes/logging/include/operation.def b/runtimes/logging/include/operation.def deleted file mode 100644 index 32e684d..0000000 --- a/runtimes/logging/include/operation.def +++ /dev/null @@ -1,14 +0,0 @@ -// Extracted from tensorflow/contrib/lite/nnapi/NeuralNetworksShim.h -// -// NNAPI_OPERATION(NAME, CODE) -#ifndef NNAPI_OPERATION -#error NNAPI_OPERATION should be defined -#endif -NNAPI_OPERATION(ANEURALNETWORKS_AVERAGE_POOL_2D, 1) -NNAPI_OPERATION(ANEURALNETWORKS_CONCATENATION, 2) -NNAPI_OPERATION(ANEURALNETWORKS_CONV_2D, 3) -NNAPI_OPERATION(ANEURALNETWORKS_DEPTHWISE_CONV_2D, 4) -NNAPI_OPERATION(ANEURALNETWORKS_FULLY_CONNECTED, 9) -NNAPI_OPERATION(ANEURALNETWORKS_MAX_POOL_2D, 17) -NNAPI_OPERATION(ANEURALNETWORKS_RESHAPE, 22) -NNAPI_OPERATION(ANEURALNETWORKS_SOFTMAX, 25) diff --git a/runtimes/logging/src/nnapi_logging.cc b/runtimes/logging/src/nnapi_logging.cc deleted file mode 100644 index 9784125..0000000 --- a/runtimes/logging/src/nnapi_logging.cc +++ /dev/null @@ -1,381 +0,0 @@ -#include - -#include -#include - -#include -#include - -#include - -#include - -namespace -{ - -class OperationCodeResolver -{ -public: - OperationCodeResolver(); - -public: - std::string resolve(int code) const; - -private: - void setName(int code, const std::string &name); - -private: - std::map _table; - -public: - static const OperationCodeResolver &access() - { - static const OperationCodeResolver resolver; - - return resolver; - } -}; - -OperationCodeResolver::OperationCodeResolver() -{ -#define NNAPI_OPERATION(NAME, CODE) setName(CODE, #NAME); -#include "operation.def" -#undef NNAPI_OPERATION -} - -void OperationCodeResolver::setName(int code, const std::string &name) -{ - assert(_table.find(code) == _table.end()); - _table[code] = name; -} - -std::string OperationCodeResolver::resolve(int code) const -{ - auto it = _table.find(code); - - if (it == _table.end()) - { - return boost::str(boost::format("unknown(%d)") % code); - } - - return it->second; -} - -class OperandCodeResolver -{ -public: - OperandCodeResolver(); - -public: - std::string resolve(int code) const; - -private: - void setName(int code, const std::string &name); - -private: - std::map _table; - -public: - static const OperandCodeResolver &access() - { - static const OperandCodeResolver resolver; - - return resolver; - } -}; - -OperandCodeResolver::OperandCodeResolver() -{ -#define NNAPI_OPERAND(NAME, CODE) setName(CODE, #NAME); -#include "operand.def" -#undef NNAPI_OPERAND -} - -void OperandCodeResolver::setName(int code, const std::string &name) -{ - assert(_table.find(code) == _table.end()); - _table[code] = name; -} - -std::string OperandCodeResolver::resolve(int code) const -{ - auto it = _table.find(code); - - if (it == _table.end()) - { - return boost::str(boost::format("unknown(%d)") % code); - } - - return it->second; -} -} - -// -// Asynchronous Event -// -struct ANeuralNetworksEvent -{ -}; - -ResultCode ANeuralNetworksEvent_wait(ANeuralNetworksEvent *event) -{ - return ANEURALNETWORKS_NO_ERROR; -} - -ResultCode ANeuralNetworksEvent_free(ANeuralNetworksEvent *event) -{ - delete event; - return ANEURALNETWORKS_NO_ERROR; -} - -// -// Memory -// -struct ANeuralNetworksMemory -{ - // 1st approach - Store all the data inside ANeuralNetworksMemory object - // 2nd approach - Store metadata only, and defer data loading as much as possible -}; - -ResultCode ANeuralNetworksMemory_createFromFd(size_t size, int protect, int fd, size_t offset, - ANeuralNetworksMemory **memory) -{ - std::cout << __FUNCTION__ << "()" << std::endl; - *memory = new ANeuralNetworksMemory; - return ANEURALNETWORKS_NO_ERROR; -} - -ResultCode ANeuralNetworksMemory_free(ANeuralNetworksMemory *memory) -{ - delete memory; - std::cout << __FUNCTION__ << "(" << memory << ")" << std::endl; - return ANEURALNETWORKS_NO_ERROR; -} - -// -// Model -// -struct ANeuralNetworksModel -{ - // ANeuralNetworksModel should be a factory for Graph IR (a.k.a ISA Frontend) - // TODO Record # of operands - uint32_t numOperands; - - ANeuralNetworksModel() : numOperands(0) - { - // DO NOTHING - } -}; - -ResultCode ANeuralNetworksModel_create(ANeuralNetworksModel **model) -{ - std::cout << __FUNCTION__ << "(" << model << ")" << std::endl; - - *model = new ANeuralNetworksModel; - - return ANEURALNETWORKS_NO_ERROR; -} - -ResultCode ANeuralNetworksModel_free(ANeuralNetworksModel *model) -{ - std::cout << __FUNCTION__ << "(" << model << ")" << std::endl; - - delete model; - - return ANEURALNETWORKS_NO_ERROR; -} - -ResultCode ANeuralNetworksModel_addOperand(ANeuralNetworksModel *model, - const ANeuralNetworksOperandType *type) -{ - std::cout << __FUNCTION__ << "(model: " << model - << ", type: " << ::OperandCodeResolver::access().resolve(type->type) << ")" - << std::endl; - - auto id = model->numOperands; - - std::cout << " id: " << id << std::endl; - std::cout << " rank: " << type->dimensionCount << std::endl; - for (uint32_t dim = 0; dim < type->dimensionCount; ++dim) - { - std::cout << " dim(" << dim << "): " << type->dimensions[dim] << std::endl; - } - - model->numOperands += 1; - - return ANEURALNETWORKS_NO_ERROR; -} - -ResultCode ANeuralNetworksModel_setOperandValue(ANeuralNetworksModel *model, int32_t index, - const void *buffer, size_t length) -{ - std::cout << __FUNCTION__ << "(model: " << model << ", index: " << index << ")" << std::endl; - - // TODO Implement this! - // NOTE buffer becomes invalid after ANeuralNetworksModel_setOperandValue returns - - return ANEURALNETWORKS_NO_ERROR; -} - -ResultCode ANeuralNetworksModel_setOperandValueFromMemory(ANeuralNetworksModel *model, - int32_t index, - const ANeuralNetworksMemory *memory, - size_t offset, size_t length) -{ - std::cout << __FUNCTION__ << "(model: " << model << ", index: " << index << ")" << std::endl; - - // TODO Implement this! - - return ANEURALNETWORKS_NO_ERROR; -} - -ResultCode ANeuralNetworksModel_addOperation(ANeuralNetworksModel *model, - ANeuralNetworksOperationType type, uint32_t inputCount, - const uint32_t *inputs, uint32_t outputCount, - const uint32_t *outputs) -{ - std::cout << __FUNCTION__ << "(model: " << model - << ", type: " << ::OperationCodeResolver::access().resolve(type) - << ", inputCount: " << inputCount << ", outputCount: " << outputCount << ")" - << std::endl; - - for (uint32_t input = 0; input < inputCount; ++input) - { - std::cout << " input(" << input << "): " << inputs[input] << std::endl; - } - for (uint32_t output = 0; output < outputCount; ++output) - { - std::cout << " output(" << output << "): " << outputs[output] << std::endl; - } - - // TODO Implement this! - - return ANEURALNETWORKS_NO_ERROR; -} - -ResultCode ANeuralNetworksModel_identifyInputsAndOutputs(ANeuralNetworksModel *model, - uint32_t inputCount, - const uint32_t *inputs, - uint32_t outputCount, - const uint32_t *outputs) -{ - std::cout << __FUNCTION__ << "(model: " << model << ")" << std::endl; - - for (uint32_t input = 0; input < inputCount; ++input) - { - std::cout << " input(" << input << "): " << inputs[input] << std::endl; - } - for (uint32_t output = 0; output < outputCount; ++output) - { - std::cout << " output(" << output << "): " << outputs[output] << std::endl; - } - - // TODO Implement this! - // NOTE It seems that this function identifies the input and output of the whole model - - return ANEURALNETWORKS_NO_ERROR; -} - -ResultCode ANeuralNetworksModel_finish(ANeuralNetworksModel *model) -{ - std::cout << __FUNCTION__ << "(model: " << model << ")" << std::endl; - - // TODO Implement this! - - return ANEURALNETWORKS_NO_ERROR; -} - -// -// Compilation -// -struct ANeuralNetworksCompilation -{ - // ANeuralNetworksCompilation should hold a compiled IR -}; - -ResultCode ANeuralNetworksCompilation_create(ANeuralNetworksModel *model, - ANeuralNetworksCompilation **compilation) -{ - std::cout << __FUNCTION__ << "(model: " << model << ")" << std::endl; - - *compilation = new ANeuralNetworksCompilation; - - return ANEURALNETWORKS_NO_ERROR; -} - -ResultCode ANeuralNetworksCompilation_finish(ANeuralNetworksCompilation *compilation) -{ - std::cout << __FUNCTION__ << "(compilation: " << compilation << ")" << std::endl; - - return ANEURALNETWORKS_NO_ERROR; -} - -// -// Execution -// -struct ANeuralNetworksExecution -{ - // ANeuralNetworksExecution corresponds to NPU::Interp::Session -}; - -ResultCode ANeuralNetworksExecution_create(ANeuralNetworksCompilation *compilation, - ANeuralNetworksExecution **execution) -{ - std::cout << __FUNCTION__ << "(compilation: " << compilation << ")" << std::endl; - - *execution = new ANeuralNetworksExecution; - - return ANEURALNETWORKS_NO_ERROR; -} - -// ANeuralNetworksExecution_setInput and ANeuralNetworksExecution_setOutput specify HOST buffer for -// input/output -ResultCode ANeuralNetworksExecution_setInput(ANeuralNetworksExecution *execution, int32_t index, - const ANeuralNetworksOperandType *type, - const void *buffer, size_t length) -{ - std::cout << __FUNCTION__ << "(execution: " << execution << ", type: "; - - if (type == nullptr) - std::cout << "nullptr)" << std::endl; - else - std::cout << ::OperandCodeResolver::access().resolve(type->type) << ")" << std::endl; - - // Q: Should we transfer input from HOST to DEVICE here, or in - // ANeuralNetworksExecution_startCompute? - - return ANEURALNETWORKS_NO_ERROR; -} - -ResultCode ANeuralNetworksExecution_setOutput(ANeuralNetworksExecution *execution, int32_t index, - const ANeuralNetworksOperandType *type, void *buffer, - size_t length) -{ - std::cout << __FUNCTION__ << "(execution: " << execution << ", type: "; - - if (type == nullptr) - std::cout << "nullptr)" << std::endl; - else - std::cout << ::OperandCodeResolver::access().resolve(type->type) << ")" << std::endl; - - return ANEURALNETWORKS_NO_ERROR; -} - -ResultCode ANeuralNetworksExecution_startCompute(ANeuralNetworksExecution *execution, - ANeuralNetworksEvent **event) -{ - std::cout << __FUNCTION__ << "(execution: " << execution << ")" << std::endl; - - *event = new ANeuralNetworksEvent; - - return ANEURALNETWORKS_NO_ERROR; -} - -ResultCode ANeuralNetworksExecution_free(ANeuralNetworksExecution *execution) -{ - std::cout << __FUNCTION__ << "(execution: " << execution << ")" << std::endl; - - delete execution; - - return ANEURALNETWORKS_NO_ERROR; -} -- 2.7.4