From 3555dea2a02daa530b26f45f62fc751ef62e41e3 Mon Sep 17 00:00:00 2001 From: =?utf8?q?=EC=B5=9C=ED=98=95=EA=B7=9C/=EB=8F=99=EC=9E=91=EC=A0=9C?= =?utf8?q?=EC=96=B4Lab=28SR=29/Senior=20Engineer/=EC=82=BC=EC=84=B1?= =?utf8?q?=EC=A0=84=EC=9E=90?= Date: Tue, 27 Mar 2018 15:07:06 +0900 Subject: [PATCH] Update logging to print more information (#198) - Print type of ANeuralNetworksOperandType - Print model for ANeuralNetworksModel_addOperation Signed-off-by: Hyung-Kyu Choi --- .../bindings/logging/include/operand.def | 12 ++++ .../bindings/logging/src/nnapi_logging.cc | 67 ++++++++++++++++++++-- 2 files changed, 74 insertions(+), 5 deletions(-) create mode 100644 tools/nnapi_bindings/bindings/logging/include/operand.def diff --git a/tools/nnapi_bindings/bindings/logging/include/operand.def b/tools/nnapi_bindings/bindings/logging/include/operand.def new file mode 100644 index 0000000..c570cf0 --- /dev/null +++ b/tools/nnapi_bindings/bindings/logging/include/operand.def @@ -0,0 +1,12 @@ +// Extracted from tensorflow/contrib/lite/nnapi/NeuralNetworksShim.h +// +// NNAPI_OPERAND(NAME, CODE) +#ifndef NNAPI_OPERAND +#error NNAPI_OPERAND should be defined +#endif +NNAPI_OPERAND(ANEURALNETWORKS_FLOAT32, 0) +NNAPI_OPERAND(ANEURALNETWORKS_INT32, 1) +NNAPI_OPERAND(ANEURALNETWORKS_UINT32, 2) +NNAPI_OPERAND(ANEURALNETWORKS_TENSOR_FLOAT32, 3) +NNAPI_OPERAND(ANEURALNETWORKS_TENSOR_INT32, 4) +NNAPI_OPERAND(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, 5) diff --git a/tools/nnapi_bindings/bindings/logging/src/nnapi_logging.cc b/tools/nnapi_bindings/bindings/logging/src/nnapi_logging.cc index f13a4c9..0ce4285 100644 --- a/tools/nnapi_bindings/bindings/logging/src/nnapi_logging.cc +++ b/tools/nnapi_bindings/bindings/logging/src/nnapi_logging.cc @@ -60,6 +60,54 @@ std::string OperationCodeResolver::resolve(int code) const return it->second; } + +class OperandCodeResolver +{ +public: + OperandCodeResolver(); + +public: + std::string resolve(int code) const; + +private: + void setName(int code, const std::string &name); + +private: + std::map _table; + +public: + static const OperandCodeResolver &access() + { + static const OperandCodeResolver resolver; + + return resolver; + } +}; + +OperandCodeResolver::OperandCodeResolver() +{ +#define NNAPI_OPERAND(NAME, CODE) setName(CODE, #NAME); +#include "operand.def" +#undef NNAPI_OPERAND +} + +void OperandCodeResolver::setName(int code, const std::string &name) +{ + assert(_table.find(code) == _table.end()); + _table[code] = name; +} + +std::string OperandCodeResolver::resolve(int code) const +{ + auto it = _table.find(code); + + if (it == _table.end()) + { + return boost::str(boost::format("unknown(%d)") % code); + } + + return it->second; +} } // @@ -138,12 +186,11 @@ ResultCode ANeuralNetworksModel_free(ANeuralNetworksModel* model) ResultCode ANeuralNetworksModel_addOperand(ANeuralNetworksModel* model, const ANeuralNetworksOperandType *type) { - std::cout << __FUNCTION__ << "(model: " << model << ")" << std::endl; + std::cout << __FUNCTION__ << "(model: " << model << ", type: " << ::OperandCodeResolver::access().resolve(type->type) << ")" << std::endl; auto id = model->numOperands; std::cout << " id: " << id << std::endl; - std::cout << " type: " << type->type << std::endl; std::cout << " rank: " << type->dimensionCount << std::endl; for (uint32_t dim = 0; dim < type->dimensionCount; ++dim) { @@ -176,7 +223,7 @@ ResultCode ANeuralNetworksModel_setOperandValueFromMemory(ANeuralNetworksModel* ResultCode ANeuralNetworksModel_addOperation(ANeuralNetworksModel* model, ANeuralNetworksOperationType type, uint32_t inputCount, const uint32_t* inputs, uint32_t outputCount, const uint32_t* outputs) { - std::cout << __FUNCTION__ << "(type: " << ::OperationCodeResolver::access().resolve(type) << ", inputCount: " << inputCount << ", outputCount: " << outputCount << ")" << std::endl; + std::cout << __FUNCTION__ << "(model: " << model << ", type: " << ::OperationCodeResolver::access().resolve(type) << ", inputCount: " << inputCount << ", outputCount: " << outputCount << ")" << std::endl; for (uint32_t input = 0; input < inputCount; ++input) { @@ -264,7 +311,12 @@ ResultCode ANeuralNetworksExecution_create(ANeuralNetworksCompilation* compilati // ANeuralNetworksExecution_setInput and ANeuralNetworksExecution_setOutput specify HOST buffer for input/output ResultCode ANeuralNetworksExecution_setInput(ANeuralNetworksExecution* execution, int32_t index, const ANeuralNetworksOperandType* type, const void* buffer, size_t length) { - std::cout << __FUNCTION__ << "(execution: " << execution << ")" << std::endl; + std::cout << __FUNCTION__ << "(execution: " << execution << ", type: "; + + if (type == nullptr) + std::cout << "nullptr)" << std::endl; + else + std::cout<< ::OperandCodeResolver::access().resolve(type->type) << ")" << std::endl; // Q: Should we transfer input from HOST to DEVICE here, or in ANeuralNetworksExecution_startCompute? @@ -273,7 +325,12 @@ ResultCode ANeuralNetworksExecution_setInput(ANeuralNetworksExecution* execution ResultCode ANeuralNetworksExecution_setOutput(ANeuralNetworksExecution* execution, int32_t index, const ANeuralNetworksOperandType* type, const void* buffer, size_t length) { - std::cout << __FUNCTION__ << "(execution: " << execution << ")" << std::endl; + std::cout << __FUNCTION__ << "(execution: " << execution << ", type: "; + + if (type == nullptr) + std::cout << "nullptr)" << std::endl; + else + std::cout<< ::OperandCodeResolver::access().resolve(type->type) << ")" << std::endl; return ANEURALNETWORKS_NO_ERROR; } -- 2.7.4