From: 오형석/동작제어Lab(SR)/Staff Engineer/삼성전자 Date: Thu, 23 Aug 2018 04:56:25 +0000 (+0900) Subject: [neurun] Use internal data type in cpu InitializeGenerator (#2428) X-Git-Tag: 0.2~195 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=f2ea45ca967d6a4b8ba12a06716fa3fe761b7673;p=platform%2Fcore%2Fml%2Fnnfw.git [neurun] Use internal data type in cpu InitializeGenerator (#2428) Define getter for internal data type Use internal data type in cpu InitializeGenerator instead of shape Signed-off-by: Hyeongseok Oh --- diff --git a/runtimes/neurun/src/backend/cpu/InitializerGenerator.cc b/runtimes/neurun/src/backend/cpu/InitializerGenerator.cc index de56d85..eea0352 100644 --- a/runtimes/neurun/src/backend/cpu/InitializerGenerator.cc +++ b/runtimes/neurun/src/backend/cpu/InitializerGenerator.cc @@ -48,7 +48,7 @@ InitializerGenerator::generateWeight(const ::internal::tflite::op::FullyConnecte const auto num_output = _ctx.at(weight_index).shape().dim(0); auto weight_base = _ctx.at(weight_index).data().base(); auto weight_size = _ctx.at(weight_index).data().size(); - auto weight_type = _ctx.at(weight_index).shape().type(); + auto weight_type = _ctx.at(weight_index).typeInfo().type(); // NOTE We assume that input is a feature map // TODO Remove this restriction! @@ -56,7 +56,7 @@ InitializerGenerator::generateWeight(const ::internal::tflite::op::FullyConnecte switch (weight_type) { - case ANEURALNETWORKS_TENSOR_FLOAT32: + case ::neurun::internal::operand::DataType::NEURUN_TENSOR_FLOAT32: { return [num_output, ifm_shape, weight_base, weight_size](::arm_compute::ITensor &tensor) { const ::nnfw::util::kernel::Shape ker_shape{num_output, ifm_shape.C, ifm_shape.H, @@ -83,7 +83,7 @@ InitializerGenerator::generateWeight(const ::internal::tflite::op::FullyConnecte }; }; } - case ANEURALNETWORKS_TENSOR_QUANT8_ASYMM: + case ::neurun::internal::operand::DataType::NEURUN_TENSOR_QUANT8_ASYMM: { return [num_output, ifm_shape, weight_base, weight_size](::arm_compute::ITensor &tensor) { const ::nnfw::util::kernel::Shape ker_shape{num_output, ifm_shape.C, ifm_shape.H, @@ -146,12 +146,12 @@ InitializerGenerator::generateBias(const ::internal::tflite::op::FullyConnected: const ::neurun::graph::operand::Index bias_index{node.param().bias_index}; auto bias_base = _ctx.at(bias_index).data().base(); - auto bias_type = _ctx.at(bias_index).shape().type(); + auto bias_type = _ctx.at(bias_index).typeInfo().type(); const auto bias_size = _ctx.at(bias_index).shape().asVector(); switch (bias_type) { - case ANEURALNETWORKS_TENSOR_FLOAT32: + case ::neurun::internal::operand::DataType::NEURUN_TENSOR_FLOAT32: { return [bias_base, bias_size](::arm_compute::ITensor &tensor) { for (uint32_t n = 0; n < bias_size; ++n) @@ -167,7 +167,7 @@ InitializerGenerator::generateBias(const ::internal::tflite::op::FullyConnected: } }; } - case ANEURALNETWORKS_TENSOR_QUANT8_ASYMM: + case ::neurun::internal::operand::DataType::NEURUN_TENSOR_QUANT8_ASYMM: { return [bias_base, bias_size](::arm_compute::ITensor &tensor) { for (uint32_t n = 0; n < bias_size; ++n) diff --git a/runtimes/neurun/src/internal/operand/Object.h b/runtimes/neurun/src/internal/operand/Object.h index 4e22665..eaaa987 100644 --- a/runtimes/neurun/src/internal/operand/Object.h +++ b/runtimes/neurun/src/internal/operand/Object.h @@ -37,6 +37,7 @@ public: public: const Shape &shape(void) const { return _shape; } + const TypeInfo &typeInfo(void) const { return _type; } size_t operandSize(void) const; bool setAsConstant() { return setUsage(OperandUsage::CONSTANT); } bool setAsModelInput() { return setUsage(OperandUsage::MODEL_INPUT); }