From a4928c2387ac702b6b39069e01451ecff8fd3237 Mon Sep 17 00:00:00 2001 From: =?utf8?q?=EC=98=A4=ED=98=95=EC=84=9D/On-Device=20Lab=28SR=29/Staff?= =?utf8?q?=20Engineer/=EC=82=BC=EC=84=B1=EC=A0=84=EC=9E=90?= Date: Mon, 20 May 2019 16:23:43 +0900 Subject: [PATCH] Fix typo in depthwise convolution node (#5216) Fix typo: multipler -> multiplier This typo comes from tensorflow lite Signed-off-by: Hyeongseok Oh --- runtimes/neurun/backend/cpu/kernel/ConvolutionLayer.cc | 4 ++-- runtimes/neurun/backend/cpu/kernel/DepthwiseConvolutionLayer.cc | 4 ++-- runtimes/neurun/backend/cpu/kernel/FullyConnectedLayer.cc | 4 ++-- runtimes/neurun/backend/cpu/kernel/OperationUtils.cc | 6 +++--- runtimes/neurun/backend/cpu/kernel/OperationUtils.h | 6 +++--- runtimes/pure_arm_compute/src/compilation.cc | 8 ++++---- runtimes/pure_arm_compute/src/internal/op/DepthwiseConv2D.cc | 8 ++++---- runtimes/pure_arm_compute/src/internal/op/DepthwiseConv2D.h | 4 ++-- 8 files changed, 22 insertions(+), 22 deletions(-) diff --git a/runtimes/neurun/backend/cpu/kernel/ConvolutionLayer.cc b/runtimes/neurun/backend/cpu/kernel/ConvolutionLayer.cc index 9cfbe92..efeabbb 100644 --- a/runtimes/neurun/backend/cpu/kernel/ConvolutionLayer.cc +++ b/runtimes/neurun/backend/cpu/kernel/ConvolutionLayer.cc @@ -68,8 +68,8 @@ void ConvolutionLayer::convQuant8() float real_multiplier = 0.0; int32_t output_multiplier = 0; int32_t output_shift = 0; - GetQuantizedConvolutionMultipler(_inputShape, _kernelShape, _biasShape, _outputShape, - &real_multiplier); + GetQuantizedConvolutionMultiplier(_inputShape, _kernelShape, _biasShape, _outputShape, + &real_multiplier); QuantizeMultiplier(real_multiplier, &output_multiplier, &output_shift); nnfw::cker::ConvParams op_params; diff --git a/runtimes/neurun/backend/cpu/kernel/DepthwiseConvolutionLayer.cc b/runtimes/neurun/backend/cpu/kernel/DepthwiseConvolutionLayer.cc index b8d0873..1c750e0 100644 --- a/runtimes/neurun/backend/cpu/kernel/DepthwiseConvolutionLayer.cc +++ b/runtimes/neurun/backend/cpu/kernel/DepthwiseConvolutionLayer.cc @@ -68,8 +68,8 @@ void DepthwiseConvolutionLayer::convQuant8() float real_multiplier = 0.0; int32_t output_multiplier = 0; int32_t output_shift = 0; - GetQuantizedConvolutionMultipler(_inputShape, _kernelShape, _biasShape, _outputShape, - &real_multiplier); + GetQuantizedConvolutionMultiplier(_inputShape, _kernelShape, _biasShape, _outputShape, + &real_multiplier); QuantizeMultiplier(real_multiplier, &output_multiplier, &output_shift); nnfw::cker::DepthwiseConvParams op_params; diff --git a/runtimes/neurun/backend/cpu/kernel/FullyConnectedLayer.cc b/runtimes/neurun/backend/cpu/kernel/FullyConnectedLayer.cc index 2cb7d92..cbd3692 100644 --- a/runtimes/neurun/backend/cpu/kernel/FullyConnectedLayer.cc +++ b/runtimes/neurun/backend/cpu/kernel/FullyConnectedLayer.cc @@ -62,8 +62,8 @@ void FullyConnectedLayer::fullyConnectedQuant8() int32_t output_shift = 0; int32_t output_activation_min = 0; int32_t output_activation_max = 0; - GetQuantizedConvolutionMultipler(_inputShape, _weightsShape, _biasShape, _outputShape, - &real_multiplier); + GetQuantizedConvolutionMultiplier(_inputShape, _weightsShape, _biasShape, _outputShape, + &real_multiplier); QuantizeMultiplier(real_multiplier, &output_multiplier, &output_shift); CalculateActivationRangeUint8(_activation, _outputShape, &output_activation_min, &output_activation_max); diff --git a/runtimes/neurun/backend/cpu/kernel/OperationUtils.cc b/runtimes/neurun/backend/cpu/kernel/OperationUtils.cc index a976d65..7de047e 100644 --- a/runtimes/neurun/backend/cpu/kernel/OperationUtils.cc +++ b/runtimes/neurun/backend/cpu/kernel/OperationUtils.cc @@ -72,9 +72,9 @@ void QuantizeMultiplier(double double_multiplier, int32_t *quantized_multiplier, *quantized_multiplier = static_cast(q_fixed); } -void GetQuantizedConvolutionMultipler(const Shape &inputShape, const Shape &filterShape, - const Shape &biasShape, const Shape &outputShape, - float *multiplier) +void GetQuantizedConvolutionMultiplier(const Shape &inputShape, const Shape &filterShape, + const Shape &biasShape, const Shape &outputShape, + float *multiplier) { const float input_product_scale = inputShape.scale * filterShape.scale; const float bias_scale = biasShape.scale; diff --git a/runtimes/neurun/backend/cpu/kernel/OperationUtils.h b/runtimes/neurun/backend/cpu/kernel/OperationUtils.h index 9e0abe9..bab0c5b 100644 --- a/runtimes/neurun/backend/cpu/kernel/OperationUtils.h +++ b/runtimes/neurun/backend/cpu/kernel/OperationUtils.h @@ -103,9 +103,9 @@ inline nnfw::cker::Shape convertShapeToCkerShape(const Shape &shape) void QuantizeMultiplier(double double_multiplier, int32_t *quantized_multiplier, int *shift); -void GetQuantizedConvolutionMultipler(const Shape &inputShape, const Shape &filterShape, - const Shape &biasShape, const Shape &outputShape, - float *multiplier); +void GetQuantizedConvolutionMultiplier(const Shape &inputShape, const Shape &filterShape, + const Shape &biasShape, const Shape &outputShape, + float *multiplier); void QuantizeMultiplierGreaterThanOne(double double_multiplier, int32_t *quantized_multiplier, int *left_shift); diff --git a/runtimes/pure_arm_compute/src/compilation.cc b/runtimes/pure_arm_compute/src/compilation.cc index 20b47cb..5607d2e 100644 --- a/runtimes/pure_arm_compute/src/compilation.cc +++ b/runtimes/pure_arm_compute/src/compilation.cc @@ -1218,7 +1218,7 @@ void Planner::visit(const ::internal::tflite::op::DepthwiseConv2D::Implicit::Nod const ::internal::tflite::operand::Index hstride_index{node.param().hstride_index}; const ::internal::tflite::operand::Index padding_index{node.param().padding_index}; - const ::internal::tflite::operand::Index multipler_index{node.param().multipler_index}; + const ::internal::tflite::operand::Index multiplier_index{node.param().multiplier_index}; const ::internal::tflite::operand::Index activation_index{node.param().activation_index}; const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature(); @@ -1227,7 +1227,7 @@ void Planner::visit(const ::internal::tflite::op::DepthwiseConv2D::Implicit::Nod const auto ker_shape = _ctx.at(ker_index).shape().asFeature(); const auto bias_size = _ctx.at(bias_index).shape().asVector(); - auto multiplier = _ctx.at(multipler_index).asScalar(); + auto multiplier = _ctx.at(multiplier_index).asScalar(); assert(ker_shape.C == bias_size); assert(ker_shape.C == ifm_shape.C * multiplier); @@ -1360,7 +1360,7 @@ void Planner::visit(const ::internal::tflite::op::DepthwiseConv2D::Explicit::Nod const ::internal::tflite::operand::Index padding_top_index{node.param().padding_top_index}; const ::internal::tflite::operand::Index padding_bottom_index{node.param().padding_bottom_index}; - const ::internal::tflite::operand::Index multipler_index{node.param().multipler_index}; + const ::internal::tflite::operand::Index multiplier_index{node.param().multiplier_index}; const ::internal::tflite::operand::Index activation_index{node.param().activation_index}; const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature(); @@ -1369,7 +1369,7 @@ void Planner::visit(const ::internal::tflite::op::DepthwiseConv2D::Explicit::Nod const auto ker_shape = _ctx.at(ker_index).shape().asFeature(); const auto bias_size = _ctx.at(bias_index).shape().asVector(); - auto multiplier = _ctx.at(multipler_index).asScalar(); + auto multiplier = _ctx.at(multiplier_index).asScalar(); assert(ker_shape.C == bias_size); assert(ker_shape.C == ifm_shape.C * multiplier); diff --git a/runtimes/pure_arm_compute/src/internal/op/DepthwiseConv2D.cc b/runtimes/pure_arm_compute/src/internal/op/DepthwiseConv2D.cc index f91f834..f4d1ca3 100644 --- a/runtimes/pure_arm_compute/src/internal/op/DepthwiseConv2D.cc +++ b/runtimes/pure_arm_compute/src/internal/op/DepthwiseConv2D.cc @@ -74,7 +74,7 @@ Param::Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount, // 6 -> Padding_bottom index // 7 -> Stride (width) Index // 8 -> Stride (height) INdex - // 9 -> Depthwise Multipler + // 9 -> Depthwise Multiplier // 10 -> Activation Index ifm_index = inputs[0]; ker_index = inputs[1]; @@ -85,7 +85,7 @@ Param::Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount, padding_bottom_index = inputs[6]; hstride_index = inputs[7]; vstride_index = inputs[8]; - multipler_index = inputs[9]; + multiplier_index = inputs[9]; activation_index = inputs[10]; } @@ -109,7 +109,7 @@ Param::Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount, // 3 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index // 4 -> Stride (width) Index // 5 -> Stride (height) INdex - // 6 -> Depthwise Multipler + // 6 -> Depthwise Multiplier // 7 -> Activation Index ifm_index = inputs[0]; ker_index = inputs[1]; @@ -117,7 +117,7 @@ Param::Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount, padding_index = inputs[3]; hstride_index = inputs[4]; vstride_index = inputs[5]; - multipler_index = inputs[6]; + multiplier_index = inputs[6]; activation_index = inputs[7]; } diff --git a/runtimes/pure_arm_compute/src/internal/op/DepthwiseConv2D.h b/runtimes/pure_arm_compute/src/internal/op/DepthwiseConv2D.h index c63e30a..01a9e48 100644 --- a/runtimes/pure_arm_compute/src/internal/op/DepthwiseConv2D.h +++ b/runtimes/pure_arm_compute/src/internal/op/DepthwiseConv2D.h @@ -57,7 +57,7 @@ struct Param int32_t padding_top_index; /**< Index of padding top */ int32_t padding_bottom_index; /**< Index of padding bottom */ - int32_t multipler_index; /**< Index of multipler */ + int32_t multiplier_index; /**< Index of multipler */ int32_t activation_index; /**< Index of activation */ /** * @brief Construct as default @@ -133,7 +133,7 @@ struct Param int32_t vstride_index; /**< Index of vertical stride */ int32_t padding_index; /**< Index of padding */ - int32_t multipler_index; /**< Index of multipler */ + int32_t multiplier_index; /**< Index of multipler */ int32_t activation_index; /**< Index of activation */ /** * @brief Construct as default -- 2.7.4