[neurun] Use internal data type in cpu InitializeGenerator (#2428)
author오형석/동작제어Lab(SR)/Staff Engineer/삼성전자 <hseok82.oh@samsung.com>
Thu, 23 Aug 2018 04:56:25 +0000 (13:56 +0900)
committerGitHub Enterprise <noreply-CODE@samsung.com>
Thu, 23 Aug 2018 04:56:25 +0000 (13:56 +0900)
Define getter for internal data type
Use internal data type in cpu InitializeGenerator instead of shape

Signed-off-by: Hyeongseok Oh <hseok82.oh@samsung.com>
runtimes/neurun/src/backend/cpu/InitializerGenerator.cc
runtimes/neurun/src/internal/operand/Object.h

index de56d85..eea0352 100644 (file)
@@ -48,7 +48,7 @@ InitializerGenerator::generateWeight(const ::internal::tflite::op::FullyConnecte
   const auto num_output = _ctx.at(weight_index).shape().dim(0);
   auto weight_base = _ctx.at(weight_index).data().base();
   auto weight_size = _ctx.at(weight_index).data().size();
-  auto weight_type = _ctx.at(weight_index).shape().type();
+  auto weight_type = _ctx.at(weight_index).typeInfo().type();
 
   // NOTE We assume that input is a feature map
   // TODO Remove this restriction!
@@ -56,7 +56,7 @@ InitializerGenerator::generateWeight(const ::internal::tflite::op::FullyConnecte
 
   switch (weight_type)
   {
-    case ANEURALNETWORKS_TENSOR_FLOAT32:
+    case ::neurun::internal::operand::DataType::NEURUN_TENSOR_FLOAT32:
     {
       return [num_output, ifm_shape, weight_base, weight_size](::arm_compute::ITensor &tensor) {
         const ::nnfw::util::kernel::Shape ker_shape{num_output, ifm_shape.C, ifm_shape.H,
@@ -83,7 +83,7 @@ InitializerGenerator::generateWeight(const ::internal::tflite::op::FullyConnecte
                };
       };
     }
-    case ANEURALNETWORKS_TENSOR_QUANT8_ASYMM:
+    case ::neurun::internal::operand::DataType::NEURUN_TENSOR_QUANT8_ASYMM:
     {
       return [num_output, ifm_shape, weight_base, weight_size](::arm_compute::ITensor &tensor) {
         const ::nnfw::util::kernel::Shape ker_shape{num_output, ifm_shape.C, ifm_shape.H,
@@ -146,12 +146,12 @@ InitializerGenerator::generateBias(const ::internal::tflite::op::FullyConnected:
   const ::neurun::graph::operand::Index bias_index{node.param().bias_index};
 
   auto bias_base = _ctx.at(bias_index).data().base();
-  auto bias_type = _ctx.at(bias_index).shape().type();
+  auto bias_type = _ctx.at(bias_index).typeInfo().type();
   const auto bias_size = _ctx.at(bias_index).shape().asVector();
 
   switch (bias_type)
   {
-    case ANEURALNETWORKS_TENSOR_FLOAT32:
+    case ::neurun::internal::operand::DataType::NEURUN_TENSOR_FLOAT32:
     {
       return [bias_base, bias_size](::arm_compute::ITensor &tensor) {
         for (uint32_t n = 0; n < bias_size; ++n)
@@ -167,7 +167,7 @@ InitializerGenerator::generateBias(const ::internal::tflite::op::FullyConnected:
         }
       };
     }
-    case ANEURALNETWORKS_TENSOR_QUANT8_ASYMM:
+    case ::neurun::internal::operand::DataType::NEURUN_TENSOR_QUANT8_ASYMM:
     {
       return [bias_base, bias_size](::arm_compute::ITensor &tensor) {
         for (uint32_t n = 0; n < bias_size; ++n)
index 4e22665..eaaa987 100644 (file)
@@ -37,6 +37,7 @@ public:
 
 public:
   const Shape &shape(void) const { return _shape; }
+  const TypeInfo &typeInfo(void) const { return _type; }
   size_t operandSize(void) const;
   bool setAsConstant() { return setUsage(OperandUsage::CONSTANT); }
   bool setAsModelInput() { return setUsage(OperandUsage::MODEL_INPUT); }