Disable depthwiseConvQuant8 in NN runtime (#866)
author최형규/동작제어Lab(SR)/Senior Engineer/삼성전자 <hk0110.choi@samsung.com>
Tue, 24 Apr 2018 08:08:02 +0000 (17:08 +0900)
committer오형석/동작제어Lab(SR)/Senior Engineer/삼성전자 <hseok82.oh@samsung.com>
Tue, 24 Apr 2018 08:08:02 +0000 (17:08 +0900)
- Disable depthwiseConvQuant8 in NN runtime
- We disable this because
  - we don't have test set for this yet
  - and we want to exclude unused code from code coverage test.

Signed-off-by: Hyung-Kyu Choi <hk0110.choi@samsung.com>
src/runtime/ref/nn/common/CpuExecutor.cpp
src/runtime/ref/nn/common/include/Operations.h
src/runtime/ref/nn/common/operations/DepthwiseConv2D.cpp

index bd83f02..53cd021 100755 (executable)
@@ -493,6 +493,7 @@ int CpuExecutor::executeOperation(const Operation& operation) {
                                                reinterpret_cast<float*>(output.buffer),
                                                outShape);
             } else if (input.type == OperandType::TENSOR_QUANT8_ASYMM) {
+#if 0 // REF-ANN We don't support depthwiseConvQuant8 yet
                 success = depthwiseConvPrepare(input.shape(), filter.shape(), bias.shape(),
                                                padding_left, padding_right,
                                                padding_top, padding_bottom,
@@ -511,6 +512,10 @@ int CpuExecutor::executeOperation(const Operation& operation) {
                                               depth_multiplier, activation,
                                               reinterpret_cast<uint8_t*>(output.buffer),
                                               outShape);
+#else // REF-ANN
+                LOG(ERROR) << getOperationName(operation.type) << " failed.";
+                NYI("We dont' support TENSOR_QUANT8_ASYMM yet.");
+#endif // REF-ANN
             }
 
         } break;
index 006772f..047461a 100644 (file)
@@ -70,6 +70,7 @@ bool depthwiseConvFloat32(const float* inputData, const Shape& inputShape,
                           int32_t stride_width, int32_t stride_height,
                           int32_t depth_multiplier, int32_t activation,
                           float* outputData, const Shape& outputShape);
+#if 0 // REF-ANN We don't support depthwiseConvQuant8 yet
 bool depthwiseConvQuant8(const uint8_t* inputData, const Shape& inputShape,
                          const uint8_t* filterData, const Shape& filterShape,
                          const int32_t* biasData, const Shape& biasShape,
@@ -78,6 +79,7 @@ bool depthwiseConvQuant8(const uint8_t* inputData, const Shape& inputShape,
                          int32_t stride_width, int32_t stride_height,
                          int32_t depth_multiplier, int32_t activation,
                          uint8_t* outputData, const Shape& outputShape);
+#endif // REF-ANN
 
 bool convFloat32(const float* inputData, const Shape& inputShape,
                  const float* filterData, const Shape& filterShape,
index 9792048..a48f9d9 100755 (executable)
@@ -18,7 +18,9 @@
 #include "OperationsUtils.h"
 
 #include "internal/optimized/depthwiseconv_float.h"
+#if 0 // REF-ANN We don't support depthwiseConvQuant8 yet
 #include "internal/optimized/depthwiseconv_uint8.h"
+#endif
 
 namespace android {
 namespace nn {
@@ -61,6 +63,7 @@ bool depthwiseConvFloat32(const float* inputData, const Shape& inputShape,
 }
 
 
+#if 0 // REF-ANN We don't support depthwiseConvQuant8 yet
 bool depthwiseConvQuant8(const uint8_t* inputData, const Shape& inputShape,
                          const uint8_t* filterData, const Shape& filterShape,
                          const int32_t* biasData, const Shape& biasShape,
@@ -108,6 +111,7 @@ bool depthwiseConvQuant8(const uint8_t* inputData, const Shape& inputShape,
 
     return true;
 }
+#endif // REF-ANN
 
 #undef ANDROID_NN_DEPTHWISE_CONV_PARAMETERS
 }  // namespace nn