[cker] Depthwise conv quant8 type (#5195)
author오형석/On-Device Lab(SR)/Staff Engineer/삼성전자 <hseok82.oh@samsung.com>
Fri, 17 May 2019 10:06:29 +0000 (19:06 +0900)
committer박세희/On-Device Lab(SR)/Principal Engineer/삼성전자 <saehie.park@samsung.com>
Fri, 17 May 2019 10:06:29 +0000 (19:06 +0900)
Introduce depthwise conv quant8 type kernel in cker
Implement neurun cpu backend for depthwise conv quant8 and enable test

Signed-off-by: Hyeongseok Oh <hseok82.oh@samsung.com>
libs/cker/include/cker/operation/DepthwiseConv.h
runtimes/neurun/backend/cpu/kernel/DepthwiseConvolutionLayer.cc
tests/nnapi/nnapi_gtest.skip.armv7l-linux.cpu

index 42e9c5b..19913b2 100644 (file)
@@ -52,6 +52,88 @@ struct DepthwiseConvParams
 };
 
 inline void DepthwiseConv(const DepthwiseConvParams &params, const Shape &input_shape,
+                          const uint8_t *input_data, const Shape &filter_shape,
+                          const uint8_t *filter_data, const Shape &bias_shape,
+                          const int32_t *bias_data, const Shape &output_shape, uint8_t *output_data)
+{
+  const int stride_width = params.stride_width;
+  const int stride_height = params.stride_height;
+  const int dilation_width_factor = params.dilation_width_factor;
+  const int dilation_height_factor = params.dilation_height_factor;
+  const int pad_width = params.padding_values.width;
+  const int pad_height = params.padding_values.height;
+  const int depth_multiplier = params.depth_multiplier;
+  const int32_t output_activation_min = params.quantized_activation_min;
+  const int32_t output_activation_max = params.quantized_activation_max;
+  const int32_t input_offset = params.input_offset;
+  const int32_t filter_offset = params.weights_offset;
+  const int32_t output_offset = params.output_offset;
+  const int32_t output_multiplier = params.output_multiplier;
+  const int output_shift = params.output_shift;
+  assert(input_shape.DimensionsCount() == 4);
+  assert(filter_shape.DimensionsCount() == 4);
+  assert(output_shape.DimensionsCount() == 4);
+
+  assert(output_activation_min <= output_activation_max);
+  const int batches = MatchingDim(input_shape, 0, output_shape, 0);
+  const int output_depth = MatchingDim(filter_shape, 3, output_shape, 3);
+  const int input_height = input_shape.Dims(1);
+  const int input_width = input_shape.Dims(2);
+  const int input_depth = input_shape.Dims(3);
+  const int filter_height = filter_shape.Dims(1);
+  const int filter_width = filter_shape.Dims(2);
+  const int output_height = output_shape.Dims(1);
+  const int output_width = output_shape.Dims(2);
+  assert(output_depth == input_depth * depth_multiplier);
+  assert(bias_shape.FlatSize() == output_depth);
+
+  for (int b = 0; b < batches; ++b)
+  {
+    for (int out_y = 0; out_y < output_height; ++out_y)
+    {
+      for (int out_x = 0; out_x < output_width; ++out_x)
+      {
+        for (int ic = 0; ic < input_depth; ++ic)
+        {
+          for (int m = 0; m < depth_multiplier; m++)
+          {
+            const int oc = m + ic * depth_multiplier;
+            const int in_x_origin = (out_x * stride_width) - pad_width;
+            const int in_y_origin = (out_y * stride_height) - pad_height;
+            int32_t acc = 0;
+            for (int filter_y = 0; filter_y < filter_height; ++filter_y)
+            {
+              for (int filter_x = 0; filter_x < filter_width; ++filter_x)
+              {
+                const int in_x = in_x_origin + dilation_width_factor * filter_x;
+                const int in_y = in_y_origin + dilation_height_factor * filter_y;
+                // If the location is outside the bounds of the input image,
+                // use zero as a default value.
+                if ((in_x >= 0) && (in_x < input_width) && (in_y >= 0) && (in_y < input_height))
+                {
+                  int32_t input_val = input_data[Offset(input_shape, b, in_y, in_x, ic)];
+                  int32_t filter_val = filter_data[Offset(filter_shape, 0, filter_y, filter_x, oc)];
+                  acc += (filter_val + filter_offset) * (input_val + input_offset);
+                }
+              }
+            }
+            if (bias_data)
+            {
+              acc += bias_data[oc];
+            }
+            acc = MultiplyByQuantizedMultiplier(acc, output_multiplier, output_shift);
+            acc += output_offset;
+            acc = std::max(acc, output_activation_min);
+            acc = std::min(acc, output_activation_max);
+            output_data[Offset(output_shape, b, out_y, out_x, oc)] = static_cast<uint8_t>(acc);
+          }
+        }
+      }
+    }
+  }
+}
+
+inline void DepthwiseConv(const DepthwiseConvParams &params, const Shape &input_shape,
                           const float *input_data, const Shape &filter_shape,
                           const float *filter_data, const Shape &bias_shape, const float *bias_data,
                           const Shape &output_shape, float *output_data)
index 7c956da..b8d0873 100644 (file)
@@ -58,7 +58,41 @@ void DepthwiseConvolutionLayer::convFloat32()
                             convertShapeToCkerShape(_outputShape), _outputData.f);
 }
 
-void DepthwiseConvolutionLayer::convQuant8() { throw "NYI"; }
+void DepthwiseConvolutionLayer::convQuant8()
+{
+  int32_t output_activation_min = 0;
+  int32_t output_activation_max = 0;
+  CalculateActivationRangeUint8(_activation, _outputShape, &output_activation_min,
+                                &output_activation_max);
+
+  float real_multiplier = 0.0;
+  int32_t output_multiplier = 0;
+  int32_t output_shift = 0;
+  GetQuantizedConvolutionMultipler(_inputShape, _kernelShape, _biasShape, _outputShape,
+                                   &real_multiplier);
+  QuantizeMultiplier(real_multiplier, &output_multiplier, &output_shift);
+
+  nnfw::cker::DepthwiseConvParams op_params;
+  op_params.stride_width = _strideWidth;
+  op_params.stride_height = _strideHeight;
+  op_params.dilation_width_factor = 1;
+  op_params.dilation_height_factor = 1;
+  op_params.padding_values.width = _paddingLeft;
+  op_params.padding_values.height = _paddingTop;
+  op_params.depth_multiplier = _multiplier;
+  op_params.input_offset = -_inputShape.offset;
+  op_params.weights_offset = -_kernelShape.offset;
+  op_params.output_offset = _outputShape.offset;
+  op_params.output_multiplier = output_multiplier;
+  op_params.output_shift = output_shift;
+  op_params.quantized_activation_min = output_activation_min;
+  op_params.quantized_activation_max = output_activation_max;
+
+  nnfw::cker::DepthwiseConv(op_params, convertShapeToCkerShape(_inputShape), _inputData.u8,
+                            convertShapeToCkerShape(_kernelShape), _kernelData.u8,
+                            convertShapeToCkerShape(_biasShape), _biasData.i32,
+                            convertShapeToCkerShape(_outputShape), _outputData.u8);
+}
 
 void DepthwiseConvolutionLayer::configure(
     uint8_t *inputData, const Shape inputShape, uint8_t *kernelData, const Shape kernelShape,
@@ -94,7 +128,7 @@ void DepthwiseConvolutionLayer::run()
   }
   else if (_inputType == OperandType::QUANT8_ASYMM)
   {
-    throw std::runtime_error{"DepthwiseConvolutionLayer Quant8: Not supported yet"};
+    convQuant8();
   }
 }
 
index 608539a..39b7271 100644 (file)
@@ -19,8 +19,6 @@ GeneratedTests.add_broadcast*
 GeneratedTests.add_quant*
 GeneratedTests.argmax*
 GeneratedTests.depth_to_space*
-GeneratedTests.depthwise_conv2d_quant*
-GeneratedTests.depthwise_conv
 GeneratedTests.dequantize
 GeneratedTests.embedding_lookup
 GeneratedTests.embedding_lookup_2d_nnfw