From 43ae918a784ea4ca1f5938c8c56c5e6ea717f6be Mon Sep 17 00:00:00 2001 From: =?utf8?q?=EC=98=A4=ED=98=95=EC=84=9D/On-Device=20Lab=28SR=29/Staff?= =?utf8?q?=20Engineer/=EC=82=BC=EC=84=B1=EC=A0=84=EC=9E=90?= Date: Wed, 29 May 2019 10:22:14 +0900 Subject: [PATCH] [Interpreter] Use cker for interpreter add operation (#5276) * Use cker for interpreter add operation - Use cker for interpreter add operation - Introduce util header for type transfer - Support more activations: RELU, RELU1, RELU6 - Support more types: float32 Signed-off-by: Hyeongseok Oh * Rename functions --- runtimes/neurun/core/CMakeLists.txt | 2 +- .../neurun/core/src/exec/interp/Interpreter.cc | 2 +- .../neurun/core/src/exec/interp/Registration.h | 2 +- .../neurun/core/src/exec/interp/operations/Add.cc | 74 ++++++++++++++-------- .../src/exec/interp/operations/OperationUtil.h | 71 +++++++++++++++++++++ 5 files changed, 123 insertions(+), 28 deletions(-) create mode 100644 runtimes/neurun/core/src/exec/interp/operations/OperationUtil.h diff --git a/runtimes/neurun/core/CMakeLists.txt b/runtimes/neurun/core/CMakeLists.txt index 10dbb63..d5c389a 100644 --- a/runtimes/neurun/core/CMakeLists.txt +++ b/runtimes/neurun/core/CMakeLists.txt @@ -4,5 +4,5 @@ add_library(neurun-core STATIC ${SOURCES}) set_target_properties(neurun-core PROPERTIES POSITION_INDEPENDENT_CODE ON) target_include_directories(neurun-core PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include) target_include_directories(neurun-core PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/src) -target_link_libraries(neurun-core nnfw_lib_misc nnfw_lib_cpp14 dl) +target_link_libraries(neurun-core nnfw_lib_misc nnfw_lib_cpp14 nnfw_lib_cker dl) target_compile_options(neurun-core PRIVATE -Wall -Wextra -Werror) diff --git a/runtimes/neurun/core/src/exec/interp/Interpreter.cc b/runtimes/neurun/core/src/exec/interp/Interpreter.cc index c6e1326..4ffb1f4 100644 --- a/runtimes/neurun/core/src/exec/interp/Interpreter.cc +++ b/runtimes/neurun/core/src/exec/interp/Interpreter.cc @@ -47,7 +47,7 @@ public: #undef OP public: - OperationExecutor(ExecEnv *env) : _env{env} { _kernels[NodeName::AddNode] = Get_AddNode(); } + OperationExecutor(ExecEnv *env) : _env{env} { _kernels[NodeName::AddNode] = getAddNode(); } void execute(const model::OperationIndex &idx) { diff --git a/runtimes/neurun/core/src/exec/interp/Registration.h b/runtimes/neurun/core/src/exec/interp/Registration.h index 7fc40ed..47cf700 100644 --- a/runtimes/neurun/core/src/exec/interp/Registration.h +++ b/runtimes/neurun/core/src/exec/interp/Registration.h @@ -35,7 +35,7 @@ struct OpKernel }; // Defined in operations/ directory -OpKernel *Get_AddNode(); +OpKernel *getAddNode(); } // namespace interp } // namespace exec diff --git a/runtimes/neurun/core/src/exec/interp/operations/Add.cc b/runtimes/neurun/core/src/exec/interp/operations/Add.cc index 8e7590a..791b3c0 100644 --- a/runtimes/neurun/core/src/exec/interp/operations/Add.cc +++ b/runtimes/neurun/core/src/exec/interp/operations/Add.cc @@ -14,6 +14,10 @@ * limitations under the License. */ +#include + +#include "OperationUtil.h" + #include "exec/interp/Registration.h" #include "model/operation/AddNode.h" #include "util/Utils.h" @@ -50,14 +54,6 @@ void prepareAdd(ExecEnv *env, const model::Operation &node) assert(lhs_tensor->dimension(i) == rhs_tensor->dimension(i)); } - // Check activation data type - const auto activation = add_node.param().activation; - if (activation != model::Activation::NONE) - { - // activation value zero: none - throw std::runtime_error("NYI"); - } - // Output's shape and type should be same with input (don't consider broadcast) auto output_info = lhs_tensor->tensorInfo(); // We can handle already allocated (ex. model output) @@ -77,6 +73,41 @@ void prepareAdd(ExecEnv *env, const model::Operation &node) } } +inline void setActivationParams(float min, float max, nnfw::cker::AddParam *params) +{ + params->float_activation_min = min; + params->float_activation_max = max; +} + +inline void setActivationParams(int32_t min, int32_t max, nnfw::cker::AddParam *params) +{ + params->quantized_activation_min = min; + params->quantized_activation_max = max; +} + +template +void invoke(const ITensor *lhs_tensor, const ITensor *rhs_tensor, const ITensor *out_tensor, + const model::operation::AddNode::Param ¶m) +{ + const auto lhs_buffer = lhs_tensor->bufferRO(); + const auto rhs_buffer = rhs_tensor->bufferRO(); + auto out_buffer = out_tensor->buffer(); + + nnfw::cker::AddParam cker_param; + raw_type activation_min, activation_max; + calculateActivationRange(param.activation, &activation_min, &activation_max); + setActivationParams(activation_min, activation_max, &cker_param); + const auto lhs_shape = convertShape(lhs_tensor->tensorInfo().shape()); + const auto rhs_shape = convertShape(rhs_tensor->tensorInfo().shape()); + const auto out_shape = convertShape(out_tensor->tensorInfo().shape()); + const raw_type *lhs_ptr = reinterpret_cast(lhs_buffer); + const raw_type *rhs_ptr = reinterpret_cast(rhs_buffer); + raw_type *out_ptr = reinterpret_cast(out_buffer); + + // Calculate + nnfw::cker::Add(cker_param, lhs_shape, lhs_ptr, rhs_shape, rhs_ptr, out_shape, out_ptr); +} + void invokeAdd(const ExecEnv *env, const model::Operation &node) { auto add_node = reinterpret_cast(node); @@ -84,34 +115,27 @@ void invokeAdd(const ExecEnv *env, const model::Operation &node) const auto lhs_index = node.getInputs().at(add_node.LHS); const auto rhs_index = node.getInputs().at(add_node.RHS); const auto out_index = node.getOutputs().at(0); - - // Check lhs shape is same with rhs (with broadcast) const auto lhs_tensor = env->tensorAt(lhs_index); const auto rhs_tensor = env->tensorAt(rhs_index); const auto out_tensor = env->tensorAt(out_index); - const auto lhs_buffer = lhs_tensor->bufferRO(); - const auto rhs_buffer = rhs_tensor->bufferRO(); - auto out_buffer = out_tensor->buffer(); - const auto data_type = lhs_tensor->data_type(); - if (data_type != model::DataType::INT32) + + if (data_type == model::DataType::INT32) { - throw std::runtime_error{"NYI: Support INT32 only"}; + invoke(lhs_tensor, rhs_tensor, out_tensor, add_node.param()); } - - // Calculate - uint64_t size = lhs_tensor->element_nums(); - const int32_t *lhs_ptr = reinterpret_cast(lhs_buffer); - const int32_t *rhs_ptr = reinterpret_cast(rhs_buffer); - int32_t *out_ptr = reinterpret_cast(out_buffer); - for (uint64_t index = 0; index < size; index++) + else if (data_type == model::DataType::FLOAT32) + { + invoke(lhs_tensor, rhs_tensor, out_tensor, add_node.param()); + } + else { - *(out_ptr++) = *(lhs_ptr++) + *(rhs_ptr++); + throw std::runtime_error{"NYI: Unsupported data type"}; } } } // namespace add -OpKernel *Get_AddNode() +OpKernel *getAddNode() { static OpKernel kernel = {add::prepareAdd, add::invokeAdd}; return &kernel; diff --git a/runtimes/neurun/core/src/exec/interp/operations/OperationUtil.h b/runtimes/neurun/core/src/exec/interp/operations/OperationUtil.h new file mode 100644 index 0000000..57c8716 --- /dev/null +++ b/runtimes/neurun/core/src/exec/interp/operations/OperationUtil.h @@ -0,0 +1,71 @@ +#ifndef __NEURUN_EXEC_INTERP_OPERATIONS_OPERATION_UTILS_H_ +#define __NEURUN_EXEC_INTERP_OPERATIONS_OPERATION_UTILS_H_ + +#include "model/Shape.h" +#include "model/InternalType.h" + +#include + +namespace neurun +{ +namespace exec +{ +namespace interp +{ + +inline nnfw::cker::Shape convertShape(const model::Shape &shape) +{ + auto dimensions = std::vector(shape.dims().begin(), shape.dims().end()); + + std::vector raw_shape; + raw_shape.resize(4); + + for (uint32_t i = 0; i < 4; ++i) + { + if (i >= dimensions.size()) + { + raw_shape[i] = 1; + } + else + { + raw_shape[i] = dimensions[i]; + } + } + + return nnfw::cker::GetShape(raw_shape); +} + +template +void calculateActivationRange(model::Activation activation, T *activation_min, T *activation_max) +{ + if (activation == model::Activation::RELU) + { + *activation_min = 0; + *activation_max = std::numeric_limits::max(); + } + else if (activation == model::Activation::RELU6) + { + *activation_min = 0; + *activation_max = 6; + } + else if (activation == model::Activation::RELU1) + { + *activation_min = -1; + *activation_max = 1; + } + else if (activation == model::Activation::NONE) + { + *activation_min = std::numeric_limits::lowest(); + *activation_max = std::numeric_limits::max(); + } + else + { + throw std::runtime_error{"Unsupported activation type"}; + } +} + +} // namespace interp +} // namespace exec +} // namespace neurun + +#endif // __NEURUN_EXEC_INTERP_OPERATIONS_OPERATION_UTILS_H_ -- 2.7.4