From: 오형석/On-Device Lab(SR)/Staff Engineer/삼성전자 Date: Tue, 26 Feb 2019 07:49:41 +0000 (+0900) Subject: Frontend to support unspecified model input and output (#4495) X-Git-Tag: submit/tizen/20190325.013700~204 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=7099c7dc09aa4e788e57bf51fc364fa878d78def;p=platform%2Fcore%2Fml%2Fnnfw.git Frontend to support unspecified model input and output (#4495) - Some model don't have specified shape for Input and output - In that case, pass input and output shape to executor when prepare inference Signed-off-by: Hyeongseok Oh --- diff --git a/runtimes/neurun/src/frontend/execution.cc b/runtimes/neurun/src/frontend/execution.cc index c9c0058..e9ee6fe 100644 --- a/runtimes/neurun/src/frontend/execution.cc +++ b/runtimes/neurun/src/frontend/execution.cc @@ -104,14 +104,6 @@ int ANeuralNetworksExecution_setInput(ANeuralNetworksExecution *execution, int32 VERBOSE(NNAPI::Execution) << "setInput: Shape mismatch" << std::endl; return ANEURALNETWORKS_BAD_DATA; } - - // TODO Handle specifed dimension on execution - if (execution->haveUnspecifiedDims(operand_index)) - { - VERBOSE(NNAPI::Execution) << "setInput: Cannot handle specified dimension on execution yet" - << std::endl; - return ANEURALNETWORKS_BAD_STATE; - } } else { @@ -178,14 +170,6 @@ int ANeuralNetworksExecution_setOutput(ANeuralNetworksExecution *execution, int3 VERBOSE(NNAPI::Execution) << "setOutput: Shape mismatch" << std::endl; return ANEURALNETWORKS_BAD_DATA; } - - // TODO Handle specifed dimension on execution - if (execution->haveUnspecifiedDims(operand_index)) - { - VERBOSE(NNAPI::Execution) << "setOutput: Cannot handle specified dimension on execution yet" - << std::endl; - return ANEURALNETWORKS_BAD_STATE; - } } else { diff --git a/runtimes/neurun/src/frontend/wrapper/execution.cc b/runtimes/neurun/src/frontend/wrapper/execution.cc index 836e03d..5ad9343 100644 --- a/runtimes/neurun/src/frontend/wrapper/execution.cc +++ b/runtimes/neurun/src/frontend/wrapper/execution.cc @@ -77,15 +77,18 @@ bool ANeuralNetworksExecution::haveUnspecifiedDims( return ((operand_shape.element_nums() == 0) ? true : false); } -bool ANeuralNetworksExecution::setInput(uint32_t index, const ANeuralNetworksOperandType * /*type*/, +bool ANeuralNetworksExecution::setInput(uint32_t index, const ANeuralNetworksOperandType *type, const void *buffer, size_t length) noexcept { try { neurun::model::operand::IO::Index input_index{index}; const auto operand_index = getInputOperandIndex(index); + bool unspecified = haveUnspecifiedDims(operand_index); + const auto type_info = _executor->model().operands.at(operand_index).typeInfo(); - const auto shape = _executor->model().operands.at(operand_index).shape(); + const auto shape = (unspecified ? neurun::util::getShape(type) + : _executor->model().operands.at(operand_index).shape()); _executor->setInput(input_index, type_info, shape, buffer, length); } @@ -99,16 +102,18 @@ bool ANeuralNetworksExecution::setInput(uint32_t index, const ANeuralNetworksOpe return true; } -bool ANeuralNetworksExecution::setOutput(uint32_t index, - const ANeuralNetworksOperandType * /*type*/, void *buffer, - size_t length) noexcept +bool ANeuralNetworksExecution::setOutput(uint32_t index, const ANeuralNetworksOperandType *type, + void *buffer, size_t length) noexcept { try { neurun::model::operand::IO::Index output_index{index}; const auto operand_index = getOutputOperandIndex(index); + bool unspecified = haveUnspecifiedDims(operand_index); + const auto type_info = _executor->model().operands.at(operand_index).typeInfo(); - const auto shape = _executor->model().operands.at(operand_index).shape(); + const auto shape = (unspecified ? neurun::util::getShape(type) + : _executor->model().operands.at(operand_index).shape()); _executor->setOutput(output_index, type_info, shape, buffer, length); }