Support scalar type for operand rank larger than 0 (#3281)
author오형석/동작제어Lab(SR)/Staff Engineer/삼성전자 <hseok82.oh@samsung.com>
Mon, 22 Oct 2018 09:16:19 +0000 (18:16 +0900)
committerGitHub Enterprise <noreply-CODE@samsung.com>
Mon, 22 Oct 2018 09:16:19 +0000 (18:16 +0900)
Support scalar type for operand rank larger than 0
Operand rank can be changed to 1 ~ 4 for broadcast operation

Signed-off-by: Hyeongseok Oh <hseok82.oh@samsung.com>
runtimes/pure_arm_compute/src/compilation.cc

index bde0ac2..e79b581 100644 (file)
@@ -4999,6 +4999,8 @@ void PlanBuilder::finalize(void) const
       auto type = operands.at(operand_idx).type();
       auto shape = operands.at(operand_idx).shape();
 
+      // Need to support scalar types (ANEURALNETWORKS_FLOAT32 and ANEURALNETWORKS_INT32)
+      // for rank > 1 tensor, because it can be operand of broadcast operation
       switch (rank)
       {
         case 0: // scalar
@@ -5042,12 +5044,14 @@ void PlanBuilder::finalize(void) const
           auto size = shape.asVector();
           switch (type)
           {
+            case ANEURALNETWORKS_FLOAT32:
             case ANEURALNETWORKS_TENSOR_FLOAT32:
             {
               auto initializer = std::bind(initVectorTensor<float>, _1, base, size);
               _plan.operands().at(operand_idx).access(initializer);
               break;
             }
+            case ANEURALNETWORKS_INT32:
             case ANEURALNETWORKS_TENSOR_INT32:
             {
               auto initializer = std::bind(initVectorTensor<int32_t>, _1, base, size);
@@ -5072,12 +5076,14 @@ void PlanBuilder::finalize(void) const
           auto size = operands.at(operand_idx).data().size();
           switch (type)
           {
+            case ANEURALNETWORKS_FLOAT32:
             case ANEURALNETWORKS_TENSOR_FLOAT32:
             {
               auto initializer = std::bind(initMatrixTensor<float>, _1, matrix_shape, base, size);
               _plan.operands().at(operand_idx).access(initializer);
               break;
             }
+            case ANEURALNETWORKS_INT32:
             case ANEURALNETWORKS_TENSOR_INT32:
             {
               auto initializer = std::bind(initMatrixTensor<int32_t>, _1, matrix_shape, base, size);
@@ -5102,12 +5108,14 @@ void PlanBuilder::finalize(void) const
           auto size = operands.at(operand_idx).data().size();
           switch (type)
           {
+            case ANEURALNETWORKS_FLOAT32:
             case ANEURALNETWORKS_TENSOR_FLOAT32:
             {
               auto initializer = std::bind(initTensor3D<float>, _1, tensor_shape, base, size);
               _plan.operands().at(operand_idx).access(initializer);
               break;
             }
+            case ANEURALNETWORKS_INT32:
             case ANEURALNETWORKS_TENSOR_INT32:
             {
               auto initializer = std::bind(initTensor3D<int32_t>, _1, tensor_shape, base, size);
@@ -5132,12 +5140,14 @@ void PlanBuilder::finalize(void) const
           auto size = operands.at(operand_idx).data().size();
           switch (type)
           {
+            case ANEURALNETWORKS_FLOAT32:
             case ANEURALNETWORKS_TENSOR_FLOAT32:
             {
               auto initializer = std::bind(initFeatureTensor<float>, _1, feature_shape, base, size);
               _plan.operands().at(operand_idx).access(initializer);
               break;
             }
+            case ANEURALNETWORKS_INT32:
             case ANEURALNETWORKS_TENSOR_INT32:
             {
               auto initializer =