* limitations under the License.
*/
+#include <cker/operation/Add.h>
+
+#include "OperationUtil.h"
+
#include "exec/interp/Registration.h"
#include "model/operation/AddNode.h"
#include "util/Utils.h"
assert(lhs_tensor->dimension(i) == rhs_tensor->dimension(i));
}
- // Check activation data type
- const auto activation = add_node.param().activation;
- if (activation != model::Activation::NONE)
- {
- // activation value zero: none
- throw std::runtime_error("NYI");
- }
-
// Output's shape and type should be same with input (don't consider broadcast)
auto output_info = lhs_tensor->tensorInfo();
// We can handle already allocated (ex. model output)
}
}
+inline void setActivationParams(float min, float max, nnfw::cker::AddParam *params)
+{
+ params->float_activation_min = min;
+ params->float_activation_max = max;
+}
+
+inline void setActivationParams(int32_t min, int32_t max, nnfw::cker::AddParam *params)
+{
+ params->quantized_activation_min = min;
+ params->quantized_activation_max = max;
+}
+
+template <typename raw_type>
+void invoke(const ITensor *lhs_tensor, const ITensor *rhs_tensor, const ITensor *out_tensor,
+ const model::operation::AddNode::Param ¶m)
+{
+ const auto lhs_buffer = lhs_tensor->bufferRO();
+ const auto rhs_buffer = rhs_tensor->bufferRO();
+ auto out_buffer = out_tensor->buffer();
+
+ nnfw::cker::AddParam cker_param;
+ raw_type activation_min, activation_max;
+ calculateActivationRange(param.activation, &activation_min, &activation_max);
+ setActivationParams(activation_min, activation_max, &cker_param);
+ const auto lhs_shape = convertShape(lhs_tensor->tensorInfo().shape());
+ const auto rhs_shape = convertShape(rhs_tensor->tensorInfo().shape());
+ const auto out_shape = convertShape(out_tensor->tensorInfo().shape());
+ const raw_type *lhs_ptr = reinterpret_cast<const raw_type *>(lhs_buffer);
+ const raw_type *rhs_ptr = reinterpret_cast<const raw_type *>(rhs_buffer);
+ raw_type *out_ptr = reinterpret_cast<raw_type *>(out_buffer);
+
+ // Calculate
+ nnfw::cker::Add(cker_param, lhs_shape, lhs_ptr, rhs_shape, rhs_ptr, out_shape, out_ptr);
+}
+
void invokeAdd(const ExecEnv *env, const model::Operation &node)
{
auto add_node = reinterpret_cast<const model::operation::AddNode &>(node);
const auto lhs_index = node.getInputs().at(add_node.LHS);
const auto rhs_index = node.getInputs().at(add_node.RHS);
const auto out_index = node.getOutputs().at(0);
-
- // Check lhs shape is same with rhs (with broadcast)
const auto lhs_tensor = env->tensorAt(lhs_index);
const auto rhs_tensor = env->tensorAt(rhs_index);
const auto out_tensor = env->tensorAt(out_index);
- const auto lhs_buffer = lhs_tensor->bufferRO();
- const auto rhs_buffer = rhs_tensor->bufferRO();
- auto out_buffer = out_tensor->buffer();
-
const auto data_type = lhs_tensor->data_type();
- if (data_type != model::DataType::INT32)
+
+ if (data_type == model::DataType::INT32)
{
- throw std::runtime_error{"NYI: Support INT32 only"};
+ invoke<int32_t>(lhs_tensor, rhs_tensor, out_tensor, add_node.param());
}
-
- // Calculate
- uint64_t size = lhs_tensor->element_nums();
- const int32_t *lhs_ptr = reinterpret_cast<const int32_t *>(lhs_buffer);
- const int32_t *rhs_ptr = reinterpret_cast<const int32_t *>(rhs_buffer);
- int32_t *out_ptr = reinterpret_cast<int32_t *>(out_buffer);
- for (uint64_t index = 0; index < size; index++)
+ else if (data_type == model::DataType::FLOAT32)
+ {
+ invoke<float>(lhs_tensor, rhs_tensor, out_tensor, add_node.param());
+ }
+ else
{
- *(out_ptr++) = *(lhs_ptr++) + *(rhs_ptr++);
+ throw std::runtime_error{"NYI: Unsupported data type"};
}
}
} // namespace add
-OpKernel *Get_AddNode()
+OpKernel *getAddNode()
{
static OpKernel kernel = {add::prepareAdd, add::invokeAdd};
return &kernel;
--- /dev/null
+#ifndef __NEURUN_EXEC_INTERP_OPERATIONS_OPERATION_UTILS_H_
+#define __NEURUN_EXEC_INTERP_OPERATIONS_OPERATION_UTILS_H_
+
+#include "model/Shape.h"
+#include "model/InternalType.h"
+
+#include <cker/Shape.h>
+
+namespace neurun
+{
+namespace exec
+{
+namespace interp
+{
+
+inline nnfw::cker::Shape convertShape(const model::Shape &shape)
+{
+ auto dimensions = std::vector<uint32_t>(shape.dims().begin(), shape.dims().end());
+
+ std::vector<int32_t> raw_shape;
+ raw_shape.resize(4);
+
+ for (uint32_t i = 0; i < 4; ++i)
+ {
+ if (i >= dimensions.size())
+ {
+ raw_shape[i] = 1;
+ }
+ else
+ {
+ raw_shape[i] = dimensions[i];
+ }
+ }
+
+ return nnfw::cker::GetShape(raw_shape);
+}
+
+template <typename T>
+void calculateActivationRange(model::Activation activation, T *activation_min, T *activation_max)
+{
+ if (activation == model::Activation::RELU)
+ {
+ *activation_min = 0;
+ *activation_max = std::numeric_limits<T>::max();
+ }
+ else if (activation == model::Activation::RELU6)
+ {
+ *activation_min = 0;
+ *activation_max = 6;
+ }
+ else if (activation == model::Activation::RELU1)
+ {
+ *activation_min = -1;
+ *activation_max = 1;
+ }
+ else if (activation == model::Activation::NONE)
+ {
+ *activation_min = std::numeric_limits<T>::lowest();
+ *activation_max = std::numeric_limits<T>::max();
+ }
+ else
+ {
+ throw std::runtime_error{"Unsupported activation type"};
+ }
+}
+
+} // namespace interp
+} // namespace exec
+} // namespace neurun
+
+#endif // __NEURUN_EXEC_INTERP_OPERATIONS_OPERATION_UTILS_H_