COMPMID-3851: Fix regression on NEDepthwiseConvolutionLayerNativeKernel
authorSang-Hoon Park <sang-hoon.park@arm.com>
Thu, 12 Nov 2020 17:41:32 +0000 (17:41 +0000)
committerSang-Hoon Park <sang-hoon.park@arm.com>
Fri, 13 Nov 2020 11:02:13 +0000 (11:02 +0000)
The exit condition of some for loops in quantized version
of the kernel with depth_multiplier=1 is decided
during compilation to fix performance issue.

Change-Id: I849b3d63b2a2cf5eb374ae681898ae1c296fb4fe
Signed-off-by: Sang-Hoon Park <sang-hoon.park@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/4392
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>

src/core/NEON/kernels/NEDepthwiseConvolutionLayerNativeKernel.cpp

index 90a81b30c942f0fa1c924b513f1881d90dc4eabd..87315909d87b7469c71458db3aabff47e97d1f5f 100644 (file)
@@ -372,7 +372,7 @@ void depthwise_loop_multiplier1_quantized(const ITensor *input, const ITensor *w
                                                  out_of_bound_vector;
                     const auto weights_vals = wrapper::vload(reinterpret_cast<TW *>(weights_ptr + w * run_info.weights_stride_y) + x);
 
-                    for(size_t i = 0; i < run_info.x_step; ++i)
+                    for(size_t i = 0; i < element_per_vector; ++i)
                     {
                         acc.at(i) += input_vals[i] * weights_vals[i];
                         in_sum.at(i) += input_vals[i];
@@ -387,7 +387,7 @@ void depthwise_loop_multiplier1_quantized(const ITensor *input, const ITensor *w
             }
 
             VectorType out_vals = wrapper::vdup_n(static_cast<T>(0), TagType{});
-            for(size_t i = 0; i < run_info.x_step; ++i)
+            for(size_t i = 0; i < element_per_vector; ++i)
             {
                 acc.at(i) -= in_sum.at(i) * weights_qoffset;
                 acc.at(i) -= we_sum.at(i) * input_qoffset;