From: 윤지영/On-Device Lab(SR)/Staff Engineer/삼성전자 Date: Fri, 29 Mar 2019 04:21:56 +0000 (+0900) Subject: Revert "[PACL] Apply DepthwiseConvolutionLayer3x3 to DepthwiseConv operation (#4354... X-Git-Tag: accepted/tizen/unified/20190430.113441~85 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=042510f77f5c066aa7ab97b8d93649b50c2c5626;p=platform%2Fcore%2Fml%2Fnnfw.git Revert "[PACL] Apply DepthwiseConvolutionLayer3x3 to DepthwiseConv operation (#4354)" (#4840) This reverts commit 34f49de36ae41f10735fd570e006981671e13a68. The same changes were merged in acl v19.02. Related PR : https://review.mlplatform.org/#/c/ml/ComputeLibrary/+/511/ Signed-off-by: Jiyoung Yun --- diff --git a/runtimes/pure_arm_compute/src/compilation.cc b/runtimes/pure_arm_compute/src/compilation.cc index dcf7a30..d028e01 100644 --- a/runtimes/pure_arm_compute/src/compilation.cc +++ b/runtimes/pure_arm_compute/src/compilation.cc @@ -1324,35 +1324,20 @@ void Planner::visit(const ::internal::tflite::op::DepthwiseConv2D::Implicit::Nod if (::internal::arm_compute::isGpuMode()) { - if (getHeight(ker_alloc) == 3 && getWidth(ker_alloc) == 3) - { - auto fn = nnfw::cpp14::make_unique<::arm_compute::CLDepthwiseConvolutionLayer3x3>(); - fn->configure(CAST_CL(ifm_alloc), CAST_CL(ker_alloc), CAST_CL(bias_alloc), - CAST_CL(ofm_alloc), conv_info, param.multipler); - builder.append("DepthwiseConv2D", std::move(fn)); - } - else - { - auto fn = nnfw::cpp14::make_unique<::arm_compute::CLDepthwiseConvolutionLayer>(); - fn->configure(CAST_CL(ifm_alloc), CAST_CL(ker_alloc), CAST_CL(bias_alloc), - CAST_CL(ofm_alloc), conv_info, param.multipler); - builder.append("DepthwiseConv2D", std::move(fn)); - } + auto fn = nnfw::cpp14::make_unique<::arm_compute::CLDepthwiseConvolutionLayer>(); + + fn->configure(CAST_CL(ifm_alloc), CAST_CL(ker_alloc), CAST_CL(bias_alloc), CAST_CL(ofm_alloc), + conv_info, param.multipler); + + builder.append("DepthwiseConv2D", std::move(fn)); } else { - if (getHeight(ker_alloc) == 3 && getWidth(ker_alloc) == 3) - { - auto fn = nnfw::cpp14::make_unique<::arm_compute::NEDepthwiseConvolutionLayer3x3>(); - fn->configure(ifm_alloc, ker_alloc, bias_alloc, ofm_alloc, conv_info, param.multipler); - builder.append("DepthwiseConv2D", std::move(fn)); - } - else - { - auto fn = nnfw::cpp14::make_unique<::arm_compute::NEDepthwiseConvolutionLayer>(); - fn->configure(ifm_alloc, ker_alloc, bias_alloc, ofm_alloc, conv_info, param.multipler); - builder.append("DepthwiseConv2D", std::move(fn)); - } + auto fn = nnfw::cpp14::make_unique<::arm_compute::NEDepthwiseConvolutionLayer>(); + + fn->configure(ifm_alloc, ker_alloc, bias_alloc, ofm_alloc, conv_info, param.multipler); + + builder.append("DepthwiseConv2D", std::move(fn)); } ActivationBuilder{builder}.append(param.activation, ofm_alloc); @@ -1482,35 +1467,20 @@ void Planner::visit(const ::internal::tflite::op::DepthwiseConv2D::Explicit::Nod if (::internal::arm_compute::isGpuMode()) { - if (getHeight(ker_alloc) == 3 && getWidth(ker_alloc) == 3) - { - auto fn = nnfw::cpp14::make_unique<::arm_compute::CLDepthwiseConvolutionLayer3x3>(); - fn->configure(CAST_CL(ifm_alloc), CAST_CL(ker_alloc), CAST_CL(bias_alloc), - CAST_CL(ofm_alloc), conv_info, param.multipler); - builder.append("DepthwiseConv2D", std::move(fn)); - } - else - { - auto fn = nnfw::cpp14::make_unique<::arm_compute::CLDepthwiseConvolutionLayer>(); - fn->configure(CAST_CL(ifm_alloc), CAST_CL(ker_alloc), CAST_CL(bias_alloc), - CAST_CL(ofm_alloc), conv_info, param.multipler); - builder.append("DepthwiseConv2D", std::move(fn)); - } + auto fn = nnfw::cpp14::make_unique<::arm_compute::CLDepthwiseConvolutionLayer>(); + + fn->configure(CAST_CL(ifm_alloc), CAST_CL(ker_alloc), CAST_CL(bias_alloc), CAST_CL(ofm_alloc), + conv_info, param.multipler); + + builder.append("DepthwiseConv2D", std::move(fn)); } else { - if (getHeight(ker_alloc) == 3 && getWidth(ker_alloc) == 3) - { - auto fn = nnfw::cpp14::make_unique<::arm_compute::NEDepthwiseConvolutionLayer3x3>(); - fn->configure(ifm_alloc, ker_alloc, bias_alloc, ofm_alloc, conv_info, param.multipler); - builder.append("DepthwiseConv2D", std::move(fn)); - } - else - { - auto fn = nnfw::cpp14::make_unique<::arm_compute::NEDepthwiseConvolutionLayer>(); - fn->configure(ifm_alloc, ker_alloc, bias_alloc, ofm_alloc, conv_info, param.multipler); - builder.append("DepthwiseConv2D", std::move(fn)); - } + auto fn = nnfw::cpp14::make_unique<::arm_compute::NEDepthwiseConvolutionLayer>(); + + fn->configure(ifm_alloc, ker_alloc, bias_alloc, ofm_alloc, conv_info, param.multipler); + + builder.append("DepthwiseConv2D", std::move(fn)); } ActivationBuilder{builder}.append(param.activation, ofm_alloc); diff --git a/runtimes/pure_arm_compute/src/internal/arm_compute/Cast.cc b/runtimes/pure_arm_compute/src/internal/arm_compute/Cast.cc index eabae09..1a5c735 100644 --- a/runtimes/pure_arm_compute/src/internal/arm_compute/Cast.cc +++ b/runtimes/pure_arm_compute/src/internal/arm_compute/Cast.cc @@ -150,17 +150,3 @@ { return ::arm_compute::TensorInfo(shape, 1, type, asQuantizationInfo(scale, zeroPoint)); } - -size_t getHeight(const ::arm_compute::ITensor *tensor) -{ - assert(tensor != nullptr); - return tensor->info()->dimension(::arm_compute::get_data_layout_dimension_index( - tensor->info()->data_layout(), ::arm_compute::DataLayoutDimension::HEIGHT)); -} - -size_t getWidth(const ::arm_compute::ITensor *tensor) -{ - assert(tensor != nullptr); - return tensor->info()->dimension(::arm_compute::get_data_layout_dimension_index( - tensor->info()->data_layout(), ::arm_compute::DataLayoutDimension::WIDTH)); -} diff --git a/runtimes/pure_arm_compute/src/internal/arm_compute/Cast.h b/runtimes/pure_arm_compute/src/internal/arm_compute/Cast.h index c8d94e8..211a6ac 100644 --- a/runtimes/pure_arm_compute/src/internal/arm_compute/Cast.h +++ b/runtimes/pure_arm_compute/src/internal/arm_compute/Cast.h @@ -153,22 +153,4 @@ void copyCast(const FromT value, ::arm_compute::ITensor *to, const ::arm_compute } } -/** - * @brief Get the value of the height dimension for a given tensor - * @param[in] tensor Target tensor of arm compute - * @return The height value - */ -// TODO It seems that this helper function is not for casting, but we couldn't find proper source -// location. -size_t getHeight(const ::arm_compute::ITensor *tensor); - -/** - * @brief Get the value of the width dimension for a given tensor - * @param[in] tensor Target tensor of arm compute - * @return The width value - */ -// TODO It seems that this helper function is not for casting, but we couldn't find proper source -// location. -size_t getWidth(const ::arm_compute::ITensor *tensor); - #endif // __ARM_COMPUTE_CAST_H__