arm_compute v17.10
[platform/upstream/armcl.git] / src / graph / nodes / PoolingLayer.cpp
1 /*
2  * Copyright (c) 2017 ARM Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "arm_compute/graph/nodes/PoolingLayer.h"
25
26 #include "arm_compute/core/Logger.h"
27 #include "arm_compute/runtime/CL/CLTensor.h"
28 #include "arm_compute/runtime/CL/functions/CLPoolingLayer.h"
29 #include "arm_compute/runtime/NEON/functions/NEPoolingLayer.h"
30 #include "arm_compute/runtime/Tensor.h"
31 #include "support/ToolchainSupport.h"
32 #include "utils/TypePrinter.h"
33
34 using namespace arm_compute::graph;
35
36 namespace
37 {
38 template <typename PoolingType, typename TensorType, TargetHint target_hint>
39 std::unique_ptr<arm_compute::IFunction> instantiate_function(ITensor *input, ITensor *output, const PoolingLayerInfo &pool_info)
40 {
41     auto pool = arm_compute::support::cpp14::make_unique<PoolingType>();
42     pool->configure(
43         dynamic_cast<TensorType *>(input),
44         dynamic_cast<TensorType *>(output),
45         pool_info);
46
47     return std::move(pool);
48 }
49
50 template <TargetHint                    target_hint>
51 std::unique_ptr<arm_compute::IFunction> instantiate(ITensor *input, ITensor *output, const PoolingLayerInfo &pool_info);
52
53 template <>
54 std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::OPENCL>(ITensor *input, ITensor *output, const PoolingLayerInfo &pool_info)
55 {
56     return instantiate_function<arm_compute::CLPoolingLayer, arm_compute::CLTensor, TargetHint::OPENCL>(input, output, pool_info);
57 }
58
59 template <>
60 std::unique_ptr<arm_compute::IFunction> instantiate<TargetHint::NEON>(ITensor *input, ITensor *output, const PoolingLayerInfo &pool_info)
61 {
62     return instantiate_function<arm_compute::NEPoolingLayer, arm_compute::Tensor, TargetHint::NEON>(input, output, pool_info);
63 }
64 } // namespace
65
66 PoolingLayer::PoolingLayer(const PoolingLayerInfo pool_info)
67     : _pool_info(pool_info)
68 {
69 }
70
71 std::unique_ptr<arm_compute::IFunction> PoolingLayer::instantiate_node(GraphContext &ctx, ITensor *input, ITensor *output)
72 {
73     std::unique_ptr<arm_compute::IFunction> func;
74     _target_hint = ctx.hints().target_hint();
75
76     if(_target_hint == TargetHint::OPENCL)
77     {
78         func = instantiate<TargetHint::OPENCL>(input, output, _pool_info);
79         ARM_COMPUTE_LOG("Instantiating CLPoolingLayer");
80     }
81     else
82     {
83         func = instantiate<TargetHint::NEON>(input, output, _pool_info);
84         ARM_COMPUTE_LOG("Instantiating NEPoolingLayer");
85     }
86
87     ARM_COMPUTE_LOG(" Data Type: " << input->info()->data_type()
88                     << " Input shape: " << input->info()->tensor_shape()
89                     << " Output shape: " << output->info()->tensor_shape()
90                     << " Pooling info: " << _pool_info << std::endl);
91
92     return func;
93 }