2 * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include "ReduceLayer.h"
19 #include "OperationUtils.h"
21 #include <cker/operation/Reduce.h>
36 void evalLogic(const IPortableTensor *input, IPortableTensor *output, const std::vector<int> &axes,
37 bool keep_dims, T init_value, nnfw::cker::Reduce &reduce_kernel,
38 T reducer(const T current, const T in))
40 reduce_kernel.prepare(input->num_dimensions(), axes.size());
41 bool result = reduce_kernel.ReduceGeneric<T>(
42 getTensorShape(input), reinterpret_cast<const T *>(input->buffer()), getTensorShape(output),
43 reinterpret_cast<T *>(output->buffer()), axes, keep_dims, init_value, reducer);
47 throw std::runtime_error{"Reduce: Fail to run"};
52 void evalType(const IPortableTensor *input, IPortableTensor *output, const std::vector<int> &axes,
53 bool keep_dims, nnfw::cker::Reduce &reduce_kernel, ReduceType reduce_type)
57 case ReduceType::kSum:
58 return evalLogic<T>(input, output, axes, keep_dims, static_cast<T>(0), reduce_kernel,
59 [](const T current, const T in) -> T { return in + current; });
61 case ReduceType::kProd:
62 return evalLogic<T>(input, output, axes, keep_dims, static_cast<T>(1), reduce_kernel,
63 [](const T current, const T in) -> T { return in * current; });
65 case ReduceType::kMax:
67 input, output, axes, keep_dims, std::numeric_limits<T>::lowest(), reduce_kernel,
68 [](const T current, const T in) -> T { return (in > current) ? in : current; });
70 case ReduceType::kMin:
72 input, output, axes, keep_dims, std::numeric_limits<T>::max(), reduce_kernel,
73 [](const T current, const T in) -> T { return (in < current) ? in : current; });
76 throw std::runtime_error{"Reduce: Unsupported reduce type"};
80 // Template specialization for bool type
82 void evalType<bool>(const IPortableTensor *input, IPortableTensor *output,
83 const std::vector<int> &axes, bool keep_dims, nnfw::cker::Reduce &reduce_kernel,
84 ReduceType reduce_type)
88 case ReduceType::kAny:
89 return evalLogic<bool>(
90 input, output, axes, keep_dims, false, reduce_kernel,
91 [](const bool current, const bool in) -> bool { return in || current; });
93 case ReduceType::kAll:
94 return evalLogic<bool>(
95 input, output, axes, keep_dims, true, reduce_kernel,
96 [](const bool current, const bool in) -> bool { return in && current; });
99 throw std::runtime_error{"Reduce: Unsupported reduce type"};
103 template <ReduceType reduce_type>
104 void evalGeneric(const IPortableTensor *input, IPortableTensor *output,
105 const std::vector<int> &axes, bool keep_dims, nnfw::cker::Reduce &reduce_kernel)
107 switch (input->data_type())
109 case OperandType::FLOAT32:
110 return evalType<float>(input, output, axes, keep_dims, reduce_kernel, reduce_type);
111 case OperandType::INT32:
112 return evalType<int32_t>(input, output, axes, keep_dims, reduce_kernel, reduce_type);
113 case OperandType::BOOL8:
114 return evalType<bool>(input, output, axes, keep_dims, reduce_kernel, reduce_type);
116 throw std::runtime_error{"Reduce(generic): unsupported data type"};
120 void evalSumQuantized(const IPortableTensor *input, IPortableTensor *output,
121 const std::vector<int> &axes, bool keep_dims,
122 nnfw::cker::Reduce &reduce_kernel)
124 const bool same_scale = (input->data_scale() == output->data_scale() &&
125 input->data_offset() == output->data_offset());
127 reduce_kernel.prepare(input->num_dimensions(), axes.size());
131 std::vector<int32_t> temp_sum(output->getShape().num_elements());
132 bool result = reduce_kernel.QuantizedMeanOrSum<uint8_t, int32_t>(
133 reinterpret_cast<const uint8_t *>(input->buffer()), input->data_offset(),
134 input->data_scale(), getTensorShape(input), reinterpret_cast<uint8_t *>(output->buffer()),
135 output->data_offset(), output->data_scale(), getTensorShape(output), axes, keep_dims,
136 temp_sum.data(), true, [](const int32_t current, const uint8_t in) -> int32_t {
137 const int32_t actual_in = static_cast<int32_t>(in);
138 return current + actual_in;
143 throw std::runtime_error{"Reduce: Fail to run"};
149 evalGeneric<ReduceType::kSum>(input, output, axes, keep_dims, reduce_kernel);
154 ReduceLayer::ReduceLayer()
155 : _input(nullptr), _axes(nullptr), _output(nullptr), _reduceType(ReduceType::kAny),
156 _keep_dims(false), _reduce_kernel(new nnfw::cker::Reduce())
161 ReduceLayer::~ReduceLayer() = default;
163 void ReduceLayer::configure(const IPortableTensor *input, const IPortableTensor *axes,
164 IPortableTensor *output, ReduceType reduceType, bool keep_dims)
169 _reduceType = reduceType;
170 _keep_dims = keep_dims;
173 void ReduceLayer::run()
175 const auto axes = getReducerAxes(_axes);
178 case ReduceType::kSum:
179 if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM)
181 evalSumQuantized(_input, _output, axes, _keep_dims, *_reduce_kernel);
184 evalGeneric<ReduceType::kSum>(_input, _output, axes, _keep_dims, *_reduce_kernel);
186 case ReduceType::kProd:
187 evalGeneric<ReduceType::kProd>(_input, _output, axes, _keep_dims, *_reduce_kernel);
189 case ReduceType::kMax:
190 evalGeneric<ReduceType::kMax>(_input, _output, axes, _keep_dims, *_reduce_kernel);
192 case ReduceType::kMin:
193 evalGeneric<ReduceType::kMin>(_input, _output, axes, _keep_dims, *_reduce_kernel);
195 case ReduceType::kAny:
196 evalGeneric<ReduceType::kAny>(_input, _output, axes, _keep_dims, *_reduce_kernel);
198 case ReduceType::kAll:
199 evalGeneric<ReduceType::kAll>(_input, _output, axes, _keep_dims, *_reduce_kernel);
202 throw std::runtime_error{"ReduceSum: Unsupported reduce type"};
208 } // namespace backend