From f2ea45ca967d6a4b8ba12a06716fa3fe761b7673 Mon Sep 17 00:00:00 2001 From: =?utf8?q?=EC=98=A4=ED=98=95=EC=84=9D/=EB=8F=99=EC=9E=91=EC=A0=9C?= =?utf8?q?=EC=96=B4Lab=28SR=29/Staff=20Engineer/=EC=82=BC=EC=84=B1?= =?utf8?q?=EC=A0=84=EC=9E=90?= Date: Thu, 23 Aug 2018 13:56:25 +0900 Subject: [PATCH] [neurun] Use internal data type in cpu InitializeGenerator (#2428) Define getter for internal data type Use internal data type in cpu InitializeGenerator instead of shape Signed-off-by: Hyeongseok Oh --- runtimes/neurun/src/backend/cpu/InitializerGenerator.cc | 12 ++++++------ runtimes/neurun/src/internal/operand/Object.h | 1 + 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/runtimes/neurun/src/backend/cpu/InitializerGenerator.cc b/runtimes/neurun/src/backend/cpu/InitializerGenerator.cc index de56d85..eea0352 100644 --- a/runtimes/neurun/src/backend/cpu/InitializerGenerator.cc +++ b/runtimes/neurun/src/backend/cpu/InitializerGenerator.cc @@ -48,7 +48,7 @@ InitializerGenerator::generateWeight(const ::internal::tflite::op::FullyConnecte const auto num_output = _ctx.at(weight_index).shape().dim(0); auto weight_base = _ctx.at(weight_index).data().base(); auto weight_size = _ctx.at(weight_index).data().size(); - auto weight_type = _ctx.at(weight_index).shape().type(); + auto weight_type = _ctx.at(weight_index).typeInfo().type(); // NOTE We assume that input is a feature map // TODO Remove this restriction! @@ -56,7 +56,7 @@ InitializerGenerator::generateWeight(const ::internal::tflite::op::FullyConnecte switch (weight_type) { - case ANEURALNETWORKS_TENSOR_FLOAT32: + case ::neurun::internal::operand::DataType::NEURUN_TENSOR_FLOAT32: { return [num_output, ifm_shape, weight_base, weight_size](::arm_compute::ITensor &tensor) { const ::nnfw::util::kernel::Shape ker_shape{num_output, ifm_shape.C, ifm_shape.H, @@ -83,7 +83,7 @@ InitializerGenerator::generateWeight(const ::internal::tflite::op::FullyConnecte }; }; } - case ANEURALNETWORKS_TENSOR_QUANT8_ASYMM: + case ::neurun::internal::operand::DataType::NEURUN_TENSOR_QUANT8_ASYMM: { return [num_output, ifm_shape, weight_base, weight_size](::arm_compute::ITensor &tensor) { const ::nnfw::util::kernel::Shape ker_shape{num_output, ifm_shape.C, ifm_shape.H, @@ -146,12 +146,12 @@ InitializerGenerator::generateBias(const ::internal::tflite::op::FullyConnected: const ::neurun::graph::operand::Index bias_index{node.param().bias_index}; auto bias_base = _ctx.at(bias_index).data().base(); - auto bias_type = _ctx.at(bias_index).shape().type(); + auto bias_type = _ctx.at(bias_index).typeInfo().type(); const auto bias_size = _ctx.at(bias_index).shape().asVector(); switch (bias_type) { - case ANEURALNETWORKS_TENSOR_FLOAT32: + case ::neurun::internal::operand::DataType::NEURUN_TENSOR_FLOAT32: { return [bias_base, bias_size](::arm_compute::ITensor &tensor) { for (uint32_t n = 0; n < bias_size; ++n) @@ -167,7 +167,7 @@ InitializerGenerator::generateBias(const ::internal::tflite::op::FullyConnected: } }; } - case ANEURALNETWORKS_TENSOR_QUANT8_ASYMM: + case ::neurun::internal::operand::DataType::NEURUN_TENSOR_QUANT8_ASYMM: { return [bias_base, bias_size](::arm_compute::ITensor &tensor) { for (uint32_t n = 0; n < bias_size; ++n) diff --git a/runtimes/neurun/src/internal/operand/Object.h b/runtimes/neurun/src/internal/operand/Object.h index 4e22665..eaaa987 100644 --- a/runtimes/neurun/src/internal/operand/Object.h +++ b/runtimes/neurun/src/internal/operand/Object.h @@ -37,6 +37,7 @@ public: public: const Shape &shape(void) const { return _shape; } + const TypeInfo &typeInfo(void) const { return _type; } size_t operandSize(void) const; bool setAsConstant() { return setUsage(OperandUsage::CONSTANT); } bool setAsModelInput() { return setUsage(OperandUsage::MODEL_INPUT); } -- 2.7.4