[neurun] Support 'quant8' for some cpu kernels (#4402)
author김수진/On-Device Lab(SR)/Engineer/삼성전자 <sjsujin.kim@samsung.com>
Mon, 11 Feb 2019 10:53:09 +0000 (19:53 +0900)
committer오형석/On-Device Lab(SR)/Staff Engineer/삼성전자 <hseok82.oh@samsung.com>
Mon, 11 Feb 2019 10:53:09 +0000 (19:53 +0900)
This commit supports `quant8` data type for some cpu kernels except below.

- `ConvolutionLayer` : no match result
- `FullyConnectedLayer` : need to revise to use `tflite`

Signed-off-by: sjsujinkim sjsujin.kim@samsung.com
runtimes/neurun/src/backend/cpu/kernel/AvgPoolLayer.cc
runtimes/neurun/src/backend/cpu/kernel/ConcatLayer.cc
runtimes/neurun/src/backend/cpu/kernel/MaxPoolLayer.cc
runtimes/neurun/src/backend/cpu/kernel/OperationUtils.cc
runtimes/neurun/src/backend/cpu/kernel/SoftMaxLayer.cc
tests/nnapi/nnapi_gtest.skip.armv7l-linux.neurun.cpu

index 2ffdca1..43c3521 100644 (file)
@@ -108,8 +108,7 @@ void AvgPoolLayer::run()
   }
   else if (_inputType == OperandType::TENSOR_QUANT8_ASYMM)
   {
-    throw std::runtime_error{"AvgPoolLayer : Not tested for TENSOR_QUANT8_ASYMM"};
-    // averagePoolQuant8();
+    averagePoolQuant8();
   }
 }
 
index 6250dc2..f87d7f3 100644 (file)
@@ -129,8 +129,7 @@ void ConcatLayer::run()
   }
   else if (_inputType == OperandType::TENSOR_QUANT8_ASYMM)
   {
-    throw std::runtime_error{"ConcatLayer : Not tested for TENSOR_QUANT8_ASYMM"};
-    // concatenationQuant8();
+    concatenationQuant8();
   }
 }
 
index 1da1ad5..d2a0cdc 100644 (file)
@@ -107,8 +107,7 @@ void MaxPoolLayer::run()
   }
   else if (_inputType == OperandType::TENSOR_QUANT8_ASYMM)
   {
-    throw std::runtime_error{"MaxPoolLayer : Not tested for TENSOR_QUANT8_ASYMM"};
-    // maxPoolQuant8();
+    maxPoolQuant8();
   }
 }
 
index df6e092..0c696a2 100644 (file)
@@ -193,7 +193,7 @@ Shape getShape(const ::neurun::model::operand::Object &o)
   shape.type = static_cast<OperandType>(static_cast<int32_t>(o.typeInfo().type()));
   shape.dimensions = std::vector<uint32_t>(o.shape().dims().begin(), o.shape().dims().end());
   shape.scale = o.typeInfo().scale();
-  // shape.offset = _offset;
+  shape.offset = o.typeInfo().offset();
 
   return shape;
 }
index 59e6b4c..6bf1d4d 100644 (file)
@@ -166,8 +166,7 @@ void SoftMaxLayer::run()
   }
   else if (_inputType == OperandType::TENSOR_QUANT8_ASYMM)
   {
-    throw std::runtime_error{"SoftMaxLayer : Not tested for TENSOR_QUANT8_ASYMM"};
-    // softmaxQuant8();
+    softmaxQuant8();
   }
 }
 
index 352e7da..eed2c98 100644 (file)
@@ -22,13 +22,6 @@ ValidationTestExecution.StartCompute
 ValidationTestExecution.EventWait
 GeneratedTests.add*
 GeneratedTests.argmax*
-GeneratedTests.avg_pool_quant8_1
-GeneratedTests.avg_pool_quant8_2
-GeneratedTests.avg_pool_quant8_3
-GeneratedTests.avg_pool_quant8_4
-GeneratedTests.concat_quant8_1
-GeneratedTests.concat_quant8_2
-GeneratedTests.concat_quant8_3
 GeneratedTests.conv_quant8_channels
 GeneratedTests.conv_quant8_channels_weights_as_inputs
 GeneratedTests.conv_quant8_large
@@ -56,9 +49,6 @@ GeneratedTests.logical_or_ex*
 GeneratedTests.logistic*
 GeneratedTests.lsh_projection*
 GeneratedTests.lstm*
-GeneratedTests.max_pool_quant8_1
-GeneratedTests.max_pool_quant8_2
-GeneratedTests.max_pool_quant8_3
 GeneratedTests.mobilenet*
 GeneratedTests.mul*
 GeneratedTests.neg*
@@ -94,9 +84,7 @@ GeneratedTests.topk_v2*
 # Unhandled exception
 GeneratedTests.fully_connected*
 # Unexpected result
-GeneratedTests.avg_pool_quant8_5
 GeneratedTests.conv_quant8_2
-GeneratedTests.max_pool_quant8_4
 GeneratedTests.softmax*
 GeneratedTests.split*
 GeneratedTests.transpose_conv*