COMPMID-3069: Align graph convolution implementation for CL and NEON.
authorGeorgios Pinitas <georgios.pinitas@arm.com>
Thu, 14 May 2020 09:03:56 +0000 (10:03 +0100)
committerGeorgios Pinitas <georgios.pinitas@arm.com>
Thu, 14 May 2020 15:22:54 +0000 (15:22 +0000)
Enables fast-math on Neon backend for convolution

Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com>
Change-Id: Ia072f0fd2db1f0814562049b290cffc91cbbd9a8
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3201
Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>

src/graph/backends/NEON/NEFunctionFactory.cpp
src/runtime/NEON/functions/NEConvolutionLayer.cpp

index 0aea15d941bb096a30bf16a393c1fc69e28a1920..454215e7eced257b2c69460d394e704d18c2328f 100644 (file)
@@ -80,78 +80,6 @@ struct NEFusedLayerTypes
 
 namespace detail
 {
-// Specialized functions
-template <>
-std::unique_ptr<IFunction> create_convolution_layer<NEConvolutionLayerFunctions, NETargetInfo>(ConvolutionLayerNode &node,
-                                                                                               GraphContext &ctx)
-{
-    validate_node<NETargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
-
-    // Extract IO and info
-    NETargetInfo::TensorType *input   = get_backing_tensor<NETargetInfo>(node.input(0));
-    NETargetInfo::TensorType *weights = get_backing_tensor<NETargetInfo>(node.input(1));
-    NETargetInfo::TensorType *biases  = get_backing_tensor<NETargetInfo>(node.input(2));
-    NETargetInfo::TensorType *output  = get_backing_tensor<NETargetInfo>(node.output(0));
-
-    const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
-
-    if(is_quantized)
-    {
-        biases->info()->set_data_type(DataType::S32);
-    }
-
-    const PadStrideInfo       conv_info      = node.convolution_info();
-    const ConvolutionMethod   conv_algorithm = node.convolution_method();
-    const ActivationLayerInfo fused_act      = node.fused_activation();
-
-    // Create and configure function (we assume that functions have been validated before creation)
-    std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, Target::NEON);
-    std::unique_ptr<IFunction>      func;
-    std::string                     func_name;
-
-    if(conv_algorithm == ConvolutionMethod::Direct)
-    {
-        std::tie(func, func_name) = create_named_memory_managed_function<NEDirectConvolutionLayer>(
-                                        std::string("DirectConvolutionLayer"), mm, input, weights, biases, output, conv_info, fused_act);
-    }
-    else if(conv_algorithm == ConvolutionMethod::GEMM)
-    {
-        std::tie(func, func_name) = create_named_memory_managed_function<NEGEMMConvolutionLayer>(
-                                        std::string("GEMMConvolutionLayer"), mm, input, weights, biases, output, conv_info, WeightsInfo(), Size2D(1, 1), fused_act);
-    }
-    else if(conv_algorithm == ConvolutionMethod::Winograd)
-    {
-        std::tie(func, func_name) = create_named_memory_managed_function<NEWinogradConvolutionLayer>(
-                                        std::string("WinogradConvolutionLayer"), mm, input, weights, biases, output, conv_info, fused_act);
-    }
-    else
-    {
-        std::tie(func, func_name) = create_named_memory_managed_function<NEConvolutionLayer>(
-                                        std::string("ConvolutionLayer"), mm, input, weights, biases, output, conv_info, WeightsInfo(), Size2D(1, 1), fused_act);
-    }
-
-    // Log info
-    std::ostringstream qss;
-    if(is_quantized)
-    {
-        qss << " Input QuantInfo: " << input->info()->quantization_info()
-            << " Weights QuantInfo: " << weights->info()->quantization_info()
-            << " Output QuantInfo: " << output->info()->quantization_info();
-    }
-    ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
-                               << node.name()
-                               << " Type: " << func_name
-                               << " Target: " << NETargetInfo::TargetType
-                               << " Data Type: " << input->info()->data_type()
-                               << qss.str()
-                               << " Input shape: " << input->info()->tensor_shape()
-                               << " Weights shape: " << weights->info()->tensor_shape()
-                               << " Output shape: " << output->info()->tensor_shape()
-                               << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
-                               << std::endl);
-    return func;
-}
-
 template <>
 std::unique_ptr<IFunction> create_normalization_layer<NENormalizationLayer, NETargetInfo>(NormalizationLayerNode &node, GraphContext &ctx)
 {
index dcd26fc1cd065f9119feeded5479f52f9bd58650..4a779917a753293855b1ed14ebf06be197717099 100644 (file)
@@ -50,7 +50,7 @@ void NEConvolutionLayer::configure(ITensor *input, const ITensor *weights, const
     ARM_COMPUTE_ERROR_THROW_ON(NEConvolutionLayer::validate(input->info(), weights->info(), ((biases != nullptr) ? biases->info() : nullptr), output->info(), conv_info, weights_info, dilation, act_info,
                                                             enable_fast_math));
 
-    switch(NEConvolutionLayer::get_convolution_method(input->info(), weights->info(), output->info(), conv_info, weights_info, dilation, act_info))
+    switch(NEConvolutionLayer::get_convolution_method(input->info(), weights->info(), output->info(), conv_info, weights_info, dilation, act_info, enable_fast_math))
     {
         case ConvolutionMethod::WINOGRAD:
         {
@@ -91,7 +91,7 @@ Status NEConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo
 {
     ARM_COMPUTE_RETURN_ERROR_ON_MSG((num_groups != 1), "Grouping (num_groups != 1) is not supported on NEON");
 
-    switch(NEConvolutionLayer::get_convolution_method(input, weights, output, conv_info, weights_info, dilation, act_info))
+    switch(NEConvolutionLayer::get_convolution_method(input, weights, output, conv_info, weights_info, dilation, act_info, enable_fast_math))
     {
         case ConvolutionMethod::WINOGRAD:
             //Validate Winograd