Imported Upstream version 1.8.0
[platform/core/ml/nnfw.git] / runtime / onert / backend / cpu / ops / ReduceLayer.cc
1 /*
2  * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 #include "ReduceLayer.h"
18
19 #include "OperationUtils.h"
20
21 #include <cker/operation/Reduce.h>
22
23 namespace onert
24 {
25 namespace backend
26 {
27 namespace cpu
28 {
29 namespace ops
30 {
31
32 namespace
33 {
34
35 template <typename T>
36 void evalLogic(const IPortableTensor *input, IPortableTensor *output, const std::vector<int> &axes,
37                bool keep_dims, T init_value, nnfw::cker::Reduce &reduce_kernel,
38                T reducer(const T current, const T in))
39 {
40   reduce_kernel.prepare(input->num_dimensions(), axes.size());
41   bool result = reduce_kernel.ReduceGeneric<T>(
42       getTensorShape(input), reinterpret_cast<const T *>(input->buffer()), getTensorShape(output),
43       reinterpret_cast<T *>(output->buffer()), axes, keep_dims, init_value, reducer);
44
45   if (!result)
46   {
47     throw std::runtime_error{"Reduce: Fail to run"};
48   }
49 }
50
51 template <typename T>
52 void evalType(const IPortableTensor *input, IPortableTensor *output, const std::vector<int> &axes,
53               bool keep_dims, nnfw::cker::Reduce &reduce_kernel, ReduceType reduce_type)
54 {
55   switch (reduce_type)
56   {
57     case ReduceType::kSum:
58       return evalLogic<T>(input, output, axes, keep_dims, static_cast<T>(0), reduce_kernel,
59                           [](const T current, const T in) -> T { return in + current; });
60       break;
61     case ReduceType::kProd:
62       return evalLogic<T>(input, output, axes, keep_dims, static_cast<T>(1), reduce_kernel,
63                           [](const T current, const T in) -> T { return in * current; });
64       break;
65     case ReduceType::kMax:
66       return evalLogic<T>(
67           input, output, axes, keep_dims, std::numeric_limits<T>::lowest(), reduce_kernel,
68           [](const T current, const T in) -> T { return (in > current) ? in : current; });
69       break;
70     case ReduceType::kMin:
71       return evalLogic<T>(
72           input, output, axes, keep_dims, std::numeric_limits<T>::max(), reduce_kernel,
73           [](const T current, const T in) -> T { return (in < current) ? in : current; });
74       break;
75     default:
76       throw std::runtime_error{"Reduce: Unsupported reduce type"};
77   }
78 }
79
80 // Template specialization for bool type
81 template <>
82 void evalType<bool>(const IPortableTensor *input, IPortableTensor *output,
83                     const std::vector<int> &axes, bool keep_dims, nnfw::cker::Reduce &reduce_kernel,
84                     ReduceType reduce_type)
85 {
86   switch (reduce_type)
87   {
88     case ReduceType::kAny:
89       return evalLogic<bool>(
90           input, output, axes, keep_dims, false, reduce_kernel,
91           [](const bool current, const bool in) -> bool { return in || current; });
92       break;
93     case ReduceType::kAll:
94       return evalLogic<bool>(
95           input, output, axes, keep_dims, true, reduce_kernel,
96           [](const bool current, const bool in) -> bool { return in && current; });
97       break;
98     default:
99       throw std::runtime_error{"Reduce: Unsupported reduce type"};
100   }
101 }
102
103 template <ReduceType reduce_type>
104 void evalGeneric(const IPortableTensor *input, IPortableTensor *output,
105                  const std::vector<int> &axes, bool keep_dims, nnfw::cker::Reduce &reduce_kernel)
106 {
107   switch (input->data_type())
108   {
109     case OperandType::FLOAT32:
110       return evalType<float>(input, output, axes, keep_dims, reduce_kernel, reduce_type);
111     case OperandType::INT32:
112       return evalType<int32_t>(input, output, axes, keep_dims, reduce_kernel, reduce_type);
113     case OperandType::BOOL8:
114       return evalType<bool>(input, output, axes, keep_dims, reduce_kernel, reduce_type);
115     default:
116       throw std::runtime_error{"Reduce(generic): unsupported data type"};
117   }
118 }
119
120 void evalSumQuantized(const IPortableTensor *input, IPortableTensor *output,
121                       const std::vector<int> &axes, bool keep_dims,
122                       nnfw::cker::Reduce &reduce_kernel)
123 {
124   const bool same_scale = (input->data_scale() == output->data_scale() &&
125                            input->data_offset() == output->data_offset());
126
127   reduce_kernel.prepare(input->num_dimensions(), axes.size());
128
129   if (!same_scale)
130   {
131     std::vector<int32_t> temp_sum(output->getShape().num_elements());
132     bool result = reduce_kernel.QuantizedMeanOrSum<uint8_t, int32_t>(
133         reinterpret_cast<const uint8_t *>(input->buffer()), input->data_offset(),
134         input->data_scale(), getTensorShape(input), reinterpret_cast<uint8_t *>(output->buffer()),
135         output->data_offset(), output->data_scale(), getTensorShape(output), axes, keep_dims,
136         temp_sum.data(), true, [](const int32_t current, const uint8_t in) -> int32_t {
137           const int32_t actual_in = static_cast<int32_t>(in);
138           return current + actual_in;
139         });
140
141     if (!result)
142     {
143       throw std::runtime_error{"Reduce: Fail to run"};
144     }
145
146     return;
147   }
148
149   evalGeneric<ReduceType::kSum>(input, output, axes, keep_dims, reduce_kernel);
150 }
151
152 } // namespace
153
154 ReduceLayer::ReduceLayer()
155     : _input(nullptr), _axes(nullptr), _output(nullptr), _reduceType(ReduceType::kAny),
156       _keep_dims(false), _reduce_kernel(new nnfw::cker::Reduce())
157 {
158   // DO NOTHING
159 }
160
161 ReduceLayer::~ReduceLayer() = default;
162
163 void ReduceLayer::configure(const IPortableTensor *input, const IPortableTensor *axes,
164                             IPortableTensor *output, ReduceType reduceType, bool keep_dims)
165 {
166   _input = input;
167   _axes = axes;
168   _output = output;
169   _reduceType = reduceType;
170   _keep_dims = keep_dims;
171 }
172
173 void ReduceLayer::run()
174 {
175   const auto axes = getReducerAxes(_axes);
176   switch (_reduceType)
177   {
178     case ReduceType::kSum:
179       if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM)
180       {
181         evalSumQuantized(_input, _output, axes, _keep_dims, *_reduce_kernel);
182         return;
183       }
184       evalGeneric<ReduceType::kSum>(_input, _output, axes, _keep_dims, *_reduce_kernel);
185       break;
186     case ReduceType::kProd:
187       evalGeneric<ReduceType::kProd>(_input, _output, axes, _keep_dims, *_reduce_kernel);
188       break;
189     case ReduceType::kMax:
190       evalGeneric<ReduceType::kMax>(_input, _output, axes, _keep_dims, *_reduce_kernel);
191       break;
192     case ReduceType::kMin:
193       evalGeneric<ReduceType::kMin>(_input, _output, axes, _keep_dims, *_reduce_kernel);
194       break;
195     case ReduceType::kAny:
196       evalGeneric<ReduceType::kAny>(_input, _output, axes, _keep_dims, *_reduce_kernel);
197       break;
198     case ReduceType::kAll:
199       evalGeneric<ReduceType::kAll>(_input, _output, axes, _keep_dims, *_reduce_kernel);
200       break;
201     default:
202       throw std::runtime_error{"ReduceSum: Unsupported reduce type"};
203   }
204 }
205
206 } // namespace ops
207 } // namespace cpu
208 } // namespace backend
209 } // namespace onert