2 * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 * Copyright (c) 2017-2018 ARM Limited.
20 * SPDX-License-Identifier: MIT
22 * Permission is hereby granted, free of charge, to any person obtaining a copy
23 * of this software and associated documentation files (the "Software"), to
24 * deal in the Software without restriction, including without limitation the
25 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
26 * sell copies of the Software, and to permit persons to whom the Software is
27 * furnished to do so, subject to the following conditions:
29 * The above copyright notice and this permission notice shall be included in all
30 * copies or substantial portions of the Software.
32 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
33 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
34 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
35 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
36 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
37 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
41 #include "arm_compute/runtime/CL/functions/CLReduceOperation.h"
43 #include "arm_compute/core/CL/kernels/CLReduceOperationKernel.h"
44 #include "arm_compute/core/TensorShape.h"
45 #include "arm_compute/runtime/CL/CLScheduler.h"
47 using namespace arm_compute;
49 CLReduceOperation::CLReduceOperation(std::shared_ptr<IMemoryManager> memory_manager)
50 : _memory_group(std::move(memory_manager)), _input(nullptr), _output(nullptr), _axis(),
51 _keep_dims(false), _interm_tensors(), _reduce_kernels(), _reshape()
55 Status CLReduceOperation::validate(const ITensorInfo *input, const ITensorInfo *output,
56 const std::set<uint32_t> &axis, bool keep_dims,
57 const ReduceOperation &op)
59 const size_t num_of_kernels = axis.size();
60 const size_t num_of_interm_tensors = num_of_kernels - (keep_dims ? 1 : 0);
62 // Create temporary tensor infos
63 auto interm_tensors = support::cpp14::make_unique<TensorInfo[]>(num_of_interm_tensors);
65 // Create intermediate tensor info
66 TensorShape shape{input->tensor_shape()};
68 auto it = axis.begin();
69 for (size_t i = 0; i < num_of_interm_tensors; ++i, ++it)
71 shape.set(*it, 1, false);
72 interm_tensors[i].set_data_type(input->data_type());
73 interm_tensors[i].set_tensor_shape(shape);
74 interm_tensors[i].set_num_channels(input->num_channels());
75 interm_tensors[i].set_data_layout(input->data_layout());
76 interm_tensors[i].set_quantization_info(input->quantization_info());
79 // Set a vector that is ordered ITensorInfo sequentially.
80 std::vector<const ITensorInfo *> tensors;
81 tensors.emplace_back(input);
82 for (size_t i = 0; i < num_of_interm_tensors; ++i)
84 tensors.emplace_back(interm_tensors.get() + i);
86 tensors.emplace_back(output);
88 // Validate ReduceOperation only on all kernels
90 for (size_t i = 0; i < num_of_kernels; ++i, ++it)
92 ARM_COMPUTE_RETURN_ON_ERROR(
93 CLReduceOperationKernel::validate(tensors[i], tensors[i + 1], *it, op));
98 ARM_COMPUTE_RETURN_ON_ERROR(
99 CLReshapeLayer::validate(&interm_tensors[num_of_interm_tensors - 1], output));
105 void CLReduceOperation::configure(ICLTensor *input, ICLTensor *output,
106 const std::set<uint32_t> &axis, bool keep_dims,
109 ARM_COMPUTE_ERROR_THROW_ON(validate(input->info(), output->info(), axis, keep_dims, op));
115 _keep_dims = keep_dims;
117 // NOTE The axis must have no duplication.
118 const size_t num_of_kernels = axis.size();
119 const size_t num_of_interm_tensors = num_of_kernels - (keep_dims ? 1 : 0);
121 _interm_tensors = support::cpp14::make_unique<CLTensor[]>(num_of_interm_tensors);
122 _reduce_kernels = support::cpp14::make_unique<CLReduceOperationKernel[]>(num_of_kernels);
124 // Set a vector that is ordered ICLTensors sequentially.
125 std::vector<ICLTensor *> tensors;
126 tensors.emplace_back(input);
127 for (size_t i = 0; i < num_of_interm_tensors; ++i)
129 tensors.emplace_back(_interm_tensors.get() + i);
131 tensors.emplace_back(output);
133 // Apply ReduceOperation on all kernels
134 TensorShape shape{input->info()->tensor_shape()};
135 auto it = axis.begin();
136 for (size_t i = 0; i < num_of_kernels; ++i, ++it)
138 shape.set(*it, 1, false);
139 if (!keep_dims || i != (num_of_kernels - 1))
141 _interm_tensors[i].allocator()->init(input->info()->clone()->set_tensor_shape(shape));
142 _memory_group.manage(&_interm_tensors[i]);
144 _reduce_kernels[i].configure(tensors[i], tensors[i + 1], *it, op);
147 _interm_tensors[i - 1].allocator()->allocate();
151 // Configure reshape layer if we want to drop the dimensions
154 _reshape.configure(&_interm_tensors[num_of_interm_tensors - 1], output);
155 _interm_tensors[num_of_interm_tensors - 1].allocator()->allocate();
159 void CLReduceOperation::run()
161 MemoryGroupResourceScope scope_mg(_memory_group);
163 const size_t num_of_kernels = _axis.size();
164 for (size_t i = 0; i < num_of_kernels; ++i)
166 CLScheduler::get().enqueue(_reduce_kernels[i]);