Fix typo in depthwise convolution node (#5216)
author오형석/On-Device Lab(SR)/Staff Engineer/삼성전자 <hseok82.oh@samsung.com>
Mon, 20 May 2019 07:23:43 +0000 (16:23 +0900)
committerGitHub Enterprise <noreply-CODE@samsung.com>
Mon, 20 May 2019 07:23:43 +0000 (16:23 +0900)
Fix typo: multipler -> multiplier
This typo comes from tensorflow lite

Signed-off-by: Hyeongseok Oh <hseok82.oh@samsung.com>
runtimes/neurun/backend/cpu/kernel/ConvolutionLayer.cc
runtimes/neurun/backend/cpu/kernel/DepthwiseConvolutionLayer.cc
runtimes/neurun/backend/cpu/kernel/FullyConnectedLayer.cc
runtimes/neurun/backend/cpu/kernel/OperationUtils.cc
runtimes/neurun/backend/cpu/kernel/OperationUtils.h
runtimes/pure_arm_compute/src/compilation.cc
runtimes/pure_arm_compute/src/internal/op/DepthwiseConv2D.cc
runtimes/pure_arm_compute/src/internal/op/DepthwiseConv2D.h

index 9cfbe92..efeabbb 100644 (file)
@@ -68,8 +68,8 @@ void ConvolutionLayer::convQuant8()
   float real_multiplier = 0.0;
   int32_t output_multiplier = 0;
   int32_t output_shift = 0;
-  GetQuantizedConvolutionMultipler(_inputShape, _kernelShape, _biasShape, _outputShape,
-                                   &real_multiplier);
+  GetQuantizedConvolutionMultiplier(_inputShape, _kernelShape, _biasShape, _outputShape,
+                                    &real_multiplier);
   QuantizeMultiplier(real_multiplier, &output_multiplier, &output_shift);
 
   nnfw::cker::ConvParams op_params;
index b8d0873..1c750e0 100644 (file)
@@ -68,8 +68,8 @@ void DepthwiseConvolutionLayer::convQuant8()
   float real_multiplier = 0.0;
   int32_t output_multiplier = 0;
   int32_t output_shift = 0;
-  GetQuantizedConvolutionMultipler(_inputShape, _kernelShape, _biasShape, _outputShape,
-                                   &real_multiplier);
+  GetQuantizedConvolutionMultiplier(_inputShape, _kernelShape, _biasShape, _outputShape,
+                                    &real_multiplier);
   QuantizeMultiplier(real_multiplier, &output_multiplier, &output_shift);
 
   nnfw::cker::DepthwiseConvParams op_params;
index 2cb7d92..cbd3692 100644 (file)
@@ -62,8 +62,8 @@ void FullyConnectedLayer::fullyConnectedQuant8()
   int32_t output_shift = 0;
   int32_t output_activation_min = 0;
   int32_t output_activation_max = 0;
-  GetQuantizedConvolutionMultipler(_inputShape, _weightsShape, _biasShape, _outputShape,
-                                   &real_multiplier);
+  GetQuantizedConvolutionMultiplier(_inputShape, _weightsShape, _biasShape, _outputShape,
+                                    &real_multiplier);
   QuantizeMultiplier(real_multiplier, &output_multiplier, &output_shift);
   CalculateActivationRangeUint8(_activation, _outputShape, &output_activation_min,
                                 &output_activation_max);
index a976d65..7de047e 100644 (file)
@@ -72,9 +72,9 @@ void QuantizeMultiplier(double double_multiplier, int32_t *quantized_multiplier,
   *quantized_multiplier = static_cast<int32_t>(q_fixed);
 }
 
-void GetQuantizedConvolutionMultipler(const Shape &inputShape, const Shape &filterShape,
-                                      const Shape &biasShape, const Shape &outputShape,
-                                      float *multiplier)
+void GetQuantizedConvolutionMultiplier(const Shape &inputShape, const Shape &filterShape,
+                                       const Shape &biasShape, const Shape &outputShape,
+                                       float *multiplier)
 {
   const float input_product_scale = inputShape.scale * filterShape.scale;
   const float bias_scale = biasShape.scale;
index 9e0abe9..bab0c5b 100644 (file)
@@ -103,9 +103,9 @@ inline nnfw::cker::Shape convertShapeToCkerShape(const Shape &shape)
 
 void QuantizeMultiplier(double double_multiplier, int32_t *quantized_multiplier, int *shift);
 
-void GetQuantizedConvolutionMultipler(const Shape &inputShape, const Shape &filterShape,
-                                      const Shape &biasShape, const Shape &outputShape,
-                                      float *multiplier);
+void GetQuantizedConvolutionMultiplier(const Shape &inputShape, const Shape &filterShape,
+                                       const Shape &biasShape, const Shape &outputShape,
+                                       float *multiplier);
 
 void QuantizeMultiplierGreaterThanOne(double double_multiplier, int32_t *quantized_multiplier,
                                       int *left_shift);
index 20b47cb..5607d2e 100644 (file)
@@ -1218,7 +1218,7 @@ void Planner::visit(const ::internal::tflite::op::DepthwiseConv2D::Implicit::Nod
   const ::internal::tflite::operand::Index hstride_index{node.param().hstride_index};
 
   const ::internal::tflite::operand::Index padding_index{node.param().padding_index};
-  const ::internal::tflite::operand::Index multipler_index{node.param().multipler_index};
+  const ::internal::tflite::operand::Index multiplier_index{node.param().multiplier_index};
   const ::internal::tflite::operand::Index activation_index{node.param().activation_index};
 
   const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature();
@@ -1227,7 +1227,7 @@ void Planner::visit(const ::internal::tflite::op::DepthwiseConv2D::Implicit::Nod
   const auto ker_shape = _ctx.at(ker_index).shape().asFeature();
   const auto bias_size = _ctx.at(bias_index).shape().asVector();
 
-  auto multiplier = _ctx.at(multipler_index).asScalar<int>();
+  auto multiplier = _ctx.at(multiplier_index).asScalar<int>();
 
   assert(ker_shape.C == bias_size);
   assert(ker_shape.C == ifm_shape.C * multiplier);
@@ -1360,7 +1360,7 @@ void Planner::visit(const ::internal::tflite::op::DepthwiseConv2D::Explicit::Nod
   const ::internal::tflite::operand::Index padding_top_index{node.param().padding_top_index};
   const ::internal::tflite::operand::Index padding_bottom_index{node.param().padding_bottom_index};
 
-  const ::internal::tflite::operand::Index multipler_index{node.param().multipler_index};
+  const ::internal::tflite::operand::Index multiplier_index{node.param().multiplier_index};
   const ::internal::tflite::operand::Index activation_index{node.param().activation_index};
 
   const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature();
@@ -1369,7 +1369,7 @@ void Planner::visit(const ::internal::tflite::op::DepthwiseConv2D::Explicit::Nod
   const auto ker_shape = _ctx.at(ker_index).shape().asFeature();
   const auto bias_size = _ctx.at(bias_index).shape().asVector();
 
-  auto multiplier = _ctx.at(multipler_index).asScalar<int>();
+  auto multiplier = _ctx.at(multiplier_index).asScalar<int>();
 
   assert(ker_shape.C == bias_size);
   assert(ker_shape.C == ifm_shape.C * multiplier);
index f91f834..f4d1ca3 100644 (file)
@@ -74,7 +74,7 @@ Param::Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount,
   //  6 -> Padding_bottom index
   //  7 -> Stride (width) Index
   //  8 -> Stride (height) INdex
-  //  9 -> Depthwise Multipler
+  //  9 -> Depthwise Multiplier
   //  10 -> Activation Index
   ifm_index = inputs[0];
   ker_index = inputs[1];
@@ -85,7 +85,7 @@ Param::Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount,
   padding_bottom_index = inputs[6];
   hstride_index = inputs[7];
   vstride_index = inputs[8];
-  multipler_index = inputs[9];
+  multiplier_index = inputs[9];
   activation_index = inputs[10];
 }
 
@@ -109,7 +109,7 @@ Param::Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount,
   //  3 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index
   //  4 -> Stride (width) Index
   //  5 -> Stride (height) INdex
-  //  6 -> Depthwise Multipler
+  //  6 -> Depthwise Multiplier
   //  7 -> Activation Index
   ifm_index = inputs[0];
   ker_index = inputs[1];
@@ -117,7 +117,7 @@ Param::Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount,
   padding_index = inputs[3];
   hstride_index = inputs[4];
   vstride_index = inputs[5];
-  multipler_index = inputs[6];
+  multiplier_index = inputs[6];
   activation_index = inputs[7];
 }
 
index c63e30a..01a9e48 100644 (file)
@@ -57,7 +57,7 @@ struct Param
   int32_t padding_top_index;    /**< Index of padding top */
   int32_t padding_bottom_index; /**< Index of padding bottom */
 
-  int32_t multipler_index;  /**< Index of multipler */
+  int32_t multiplier_index; /**< Index of multipler */
   int32_t activation_index; /**< Index of activation */
   /**
    * @brief Construct as default
@@ -133,7 +133,7 @@ struct Param
   int32_t vstride_index; /**< Index of vertical stride */
 
   int32_t padding_index;    /**< Index of padding */
-  int32_t multipler_index;  /**< Index of multipler */
+  int32_t multiplier_index; /**< Index of multipler */
   int32_t activation_index; /**< Index of activation */
   /**
    * @brief Construct as default