Imported Upstream version 1.9.0
[platform/core/ml/nnfw.git] / runtime / onert / backend / cpu / ops / ReduceLayer.cc
1 /*
2  * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 #include "ReduceLayer.h"
18
19 #include "OperationUtils.h"
20
21 #include <cker/operation/Reduce.h>
22
23 namespace onert
24 {
25 namespace backend
26 {
27 namespace cpu
28 {
29 namespace ops
30 {
31
32 namespace
33 {
34
35 template <typename T>
36 void evalLogic(const IPortableTensor *input, IPortableTensor *output, const std::vector<int> &axes,
37                bool keep_dims, T init_value, nnfw::cker::Reduce &reduce_kernel,
38                T reducer(const T current, const T in))
39 {
40   reduce_kernel.prepare(input->num_dimensions(), axes.size());
41   bool result = reduce_kernel.ReduceGeneric<T>(
42       getTensorShape(input), reinterpret_cast<const T *>(input->buffer()), getTensorShape(output),
43       reinterpret_cast<T *>(output->buffer()), axes, keep_dims, init_value, reducer);
44
45   if (!result)
46   {
47     throw std::runtime_error{"Reduce: Fail to run"};
48   }
49 }
50
51 template <typename T>
52 std::function<void(const IPortableTensor *, IPortableTensor *, const std::vector<int> &)>
53 evalType(bool keep_dims, nnfw::cker::Reduce &reduce_kernel, ReduceType reduce_type)
54 {
55   switch (reduce_type)
56   {
57     case ReduceType::kSum:
58       return std::bind(&evalLogic<T>, std::placeholders::_1, std::placeholders::_2,
59                        std::placeholders::_3, keep_dims, static_cast<T>(0), reduce_kernel,
60                        [](const T current, const T in) -> T { return in + current; });
61       break;
62     case ReduceType::kProd:
63       return std::bind(&evalLogic<T>, std::placeholders::_1, std::placeholders::_2,
64                        std::placeholders::_3, keep_dims, static_cast<T>(1), reduce_kernel,
65                        [](const T current, const T in) -> T { return in * current; });
66       break;
67     case ReduceType::kMax:
68       return std::bind(
69           &evalLogic<T>, std::placeholders::_1, std::placeholders::_2, std::placeholders::_3,
70           keep_dims, std::numeric_limits<T>::lowest(), reduce_kernel,
71           [](const T current, const T in) -> T { return (in > current) ? in : current; });
72       break;
73     case ReduceType::kMin:
74       return std::bind(
75           &evalLogic<T>, std::placeholders::_1, std::placeholders::_2, std::placeholders::_3,
76           keep_dims, std::numeric_limits<T>::max(), reduce_kernel,
77           [](const T current, const T in) -> T { return (in < current) ? in : current; });
78       break;
79     default:
80       throw std::runtime_error{"Reduce: Unsupported reduce type"};
81   }
82 }
83
84 // Template specialization for bool type
85 template <>
86 std::function<void(const IPortableTensor *, IPortableTensor *, const std::vector<int> &)>
87 evalType<bool>(bool keep_dims, nnfw::cker::Reduce &reduce_kernel, ReduceType reduce_type)
88 {
89   switch (reduce_type)
90   {
91     case ReduceType::kAny:
92       return std::bind(&evalLogic<bool>, std::placeholders::_1, std::placeholders::_2,
93                        std::placeholders::_3, keep_dims, false, reduce_kernel,
94                        [](const bool current, const bool in) -> bool { return in || current; });
95       break;
96     case ReduceType::kAll:
97       return std::bind(&evalLogic<bool>, std::placeholders::_1, std::placeholders::_2,
98                        std::placeholders::_3, keep_dims, true, reduce_kernel,
99                        [](const bool current, const bool in) -> bool { return in && current; });
100       break;
101     default:
102       throw std::runtime_error{"Reduce: Unsupported reduce type"};
103   }
104 }
105
106 std::function<void(const IPortableTensor *, IPortableTensor *, const std::vector<int> &)>
107 generateKernelGeneric(const IPortableTensor *input, bool keep_dims,
108                       nnfw::cker::Reduce &reduce_kernel, ReduceType reduce_type)
109 {
110   switch (input->data_type())
111   {
112     case OperandType::FLOAT32:
113       return evalType<float>(keep_dims, reduce_kernel, reduce_type);
114     case OperandType::INT32:
115       return evalType<int32_t>(keep_dims, reduce_kernel, reduce_type);
116     case OperandType::BOOL8:
117       return evalType<bool>(keep_dims, reduce_kernel, reduce_type);
118     default:
119       throw std::runtime_error{"Reduce(generic): unsupported data type"};
120   }
121 }
122
123 // TODO Refine this function
124 void evalSumQuantized(const IPortableTensor *input, IPortableTensor *output,
125                       const std::vector<int> &axes, bool keep_dims,
126                       nnfw::cker::Reduce &reduce_kernel)
127 {
128   const bool same_scale = (input->data_scale() == output->data_scale() &&
129                            input->data_offset() == output->data_offset());
130
131   reduce_kernel.prepare(input->num_dimensions(), axes.size());
132
133   if (!same_scale)
134   {
135     std::vector<int32_t> temp_sum(output->getShape().num_elements());
136     bool result = reduce_kernel.QuantizedMeanOrSum<uint8_t, int32_t>(
137         reinterpret_cast<const uint8_t *>(input->buffer()), input->data_offset(),
138         input->data_scale(), getTensorShape(input), reinterpret_cast<uint8_t *>(output->buffer()),
139         output->data_offset(), output->data_scale(), getTensorShape(output), axes, keep_dims,
140         temp_sum.data(), true, [](const int32_t current, const uint8_t in) -> int32_t {
141           const int32_t actual_in = static_cast<int32_t>(in);
142           return current + actual_in;
143         });
144
145     if (!result)
146     {
147       throw std::runtime_error{"Reduce: Fail to run"};
148     }
149
150     return;
151   }
152
153   const auto kernel = generateKernelGeneric(input, keep_dims, reduce_kernel, ReduceType::kSum);
154   kernel(input, output, axes);
155 }
156
157 } // namespace
158
159 ReduceLayer::ReduceLayer()
160     : _input(nullptr), _axes(nullptr), _output(nullptr), _reduce_kernel(new nnfw::cker::Reduce()),
161       _kernel()
162 {
163   // DO NOTHING
164 }
165
166 ReduceLayer::~ReduceLayer() = default;
167
168 void ReduceLayer::configure(const IPortableTensor *input, const IPortableTensor *axes,
169                             IPortableTensor *output, ReduceType reduceType, bool keep_dims)
170 {
171   _input = input;
172   _axes = axes;
173   _output = output;
174
175   switch (reduceType)
176   {
177     case ReduceType::kSum:
178       if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM)
179       {
180         _kernel = std::bind(&evalSumQuantized, std::placeholders::_1, std::placeholders::_2,
181                             std::placeholders::_3, keep_dims, *_reduce_kernel);
182         return;
183       }
184       _kernel = generateKernelGeneric(_input, keep_dims, *_reduce_kernel, ReduceType::kSum);
185       break;
186     case ReduceType::kProd:
187       _kernel = generateKernelGeneric(_input, keep_dims, *_reduce_kernel, ReduceType::kProd);
188       break;
189     case ReduceType::kMax:
190       _kernel = generateKernelGeneric(_input, keep_dims, *_reduce_kernel, ReduceType::kMax);
191       break;
192     case ReduceType::kMin:
193       _kernel = generateKernelGeneric(_input, keep_dims, *_reduce_kernel, ReduceType::kMin);
194       break;
195     case ReduceType::kAny:
196       _kernel = generateKernelGeneric(_input, keep_dims, *_reduce_kernel, ReduceType::kAny);
197       break;
198     case ReduceType::kAll:
199       _kernel = generateKernelGeneric(_input, keep_dims, *_reduce_kernel, ReduceType::kAll);
200       break;
201     default:
202       throw std::runtime_error{"ReduceSum: Unsupported reduce type"};
203   }
204 }
205
206 void ReduceLayer::run()
207 {
208   const auto axes = getReducerAxes(_axes);
209   _kernel(_input, _output, axes);
210 }
211
212 } // namespace ops
213 } // namespace cpu
214 } // namespace backend
215 } // namespace onert