2 * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include "ReduceLayer.h"
19 #include "OperationUtils.h"
21 #include <cker/operation/Reduce.h>
36 void evalLogic(const IPortableTensor *input, IPortableTensor *output, const std::vector<int> &axes,
37 bool keep_dims, T init_value, nnfw::cker::Reduce &reduce_kernel,
38 T reducer(const T current, const T in))
40 reduce_kernel.prepare(input->num_dimensions(), axes.size());
41 bool result = reduce_kernel.ReduceGeneric<T>(
42 getTensorShape(input), reinterpret_cast<const T *>(input->buffer()), getTensorShape(output),
43 reinterpret_cast<T *>(output->buffer()), axes, keep_dims, init_value, reducer);
47 throw std::runtime_error{"Reduce: Fail to run"};
52 std::function<void(const IPortableTensor *, IPortableTensor *, const std::vector<int> &)>
53 evalType(bool keep_dims, nnfw::cker::Reduce &reduce_kernel, ReduceType reduce_type)
57 case ReduceType::kSum:
58 return std::bind(&evalLogic<T>, std::placeholders::_1, std::placeholders::_2,
59 std::placeholders::_3, keep_dims, static_cast<T>(0), reduce_kernel,
60 [](const T current, const T in) -> T { return in + current; });
62 case ReduceType::kProd:
63 return std::bind(&evalLogic<T>, std::placeholders::_1, std::placeholders::_2,
64 std::placeholders::_3, keep_dims, static_cast<T>(1), reduce_kernel,
65 [](const T current, const T in) -> T { return in * current; });
67 case ReduceType::kMax:
69 &evalLogic<T>, std::placeholders::_1, std::placeholders::_2, std::placeholders::_3,
70 keep_dims, std::numeric_limits<T>::lowest(), reduce_kernel,
71 [](const T current, const T in) -> T { return (in > current) ? in : current; });
73 case ReduceType::kMin:
75 &evalLogic<T>, std::placeholders::_1, std::placeholders::_2, std::placeholders::_3,
76 keep_dims, std::numeric_limits<T>::max(), reduce_kernel,
77 [](const T current, const T in) -> T { return (in < current) ? in : current; });
80 throw std::runtime_error{"Reduce: Unsupported reduce type"};
84 // Template specialization for bool type
86 std::function<void(const IPortableTensor *, IPortableTensor *, const std::vector<int> &)>
87 evalType<bool>(bool keep_dims, nnfw::cker::Reduce &reduce_kernel, ReduceType reduce_type)
91 case ReduceType::kAny:
92 return std::bind(&evalLogic<bool>, std::placeholders::_1, std::placeholders::_2,
93 std::placeholders::_3, keep_dims, false, reduce_kernel,
94 [](const bool current, const bool in) -> bool { return in || current; });
96 case ReduceType::kAll:
97 return std::bind(&evalLogic<bool>, std::placeholders::_1, std::placeholders::_2,
98 std::placeholders::_3, keep_dims, true, reduce_kernel,
99 [](const bool current, const bool in) -> bool { return in && current; });
102 throw std::runtime_error{"Reduce: Unsupported reduce type"};
106 std::function<void(const IPortableTensor *, IPortableTensor *, const std::vector<int> &)>
107 generateKernelGeneric(const IPortableTensor *input, bool keep_dims,
108 nnfw::cker::Reduce &reduce_kernel, ReduceType reduce_type)
110 switch (input->data_type())
112 case OperandType::FLOAT32:
113 return evalType<float>(keep_dims, reduce_kernel, reduce_type);
114 case OperandType::INT32:
115 return evalType<int32_t>(keep_dims, reduce_kernel, reduce_type);
116 case OperandType::BOOL8:
117 return evalType<bool>(keep_dims, reduce_kernel, reduce_type);
119 throw std::runtime_error{"Reduce(generic): unsupported data type"};
123 // TODO Refine this function
124 void evalSumQuantized(const IPortableTensor *input, IPortableTensor *output,
125 const std::vector<int> &axes, bool keep_dims,
126 nnfw::cker::Reduce &reduce_kernel)
128 const bool same_scale = (input->data_scale() == output->data_scale() &&
129 input->data_offset() == output->data_offset());
131 reduce_kernel.prepare(input->num_dimensions(), axes.size());
135 std::vector<int32_t> temp_sum(output->getShape().num_elements());
136 bool result = reduce_kernel.QuantizedMeanOrSum<uint8_t, int32_t>(
137 reinterpret_cast<const uint8_t *>(input->buffer()), input->data_offset(),
138 input->data_scale(), getTensorShape(input), reinterpret_cast<uint8_t *>(output->buffer()),
139 output->data_offset(), output->data_scale(), getTensorShape(output), axes, keep_dims,
140 temp_sum.data(), true, [](const int32_t current, const uint8_t in) -> int32_t {
141 const int32_t actual_in = static_cast<int32_t>(in);
142 return current + actual_in;
147 throw std::runtime_error{"Reduce: Fail to run"};
153 const auto kernel = generateKernelGeneric(input, keep_dims, reduce_kernel, ReduceType::kSum);
154 kernel(input, output, axes);
159 ReduceLayer::ReduceLayer()
160 : _input(nullptr), _axes(nullptr), _output(nullptr), _reduce_kernel(new nnfw::cker::Reduce()),
166 ReduceLayer::~ReduceLayer() = default;
168 void ReduceLayer::configure(const IPortableTensor *input, const IPortableTensor *axes,
169 IPortableTensor *output, ReduceType reduceType, bool keep_dims)
177 case ReduceType::kSum:
178 if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM)
180 _kernel = std::bind(&evalSumQuantized, std::placeholders::_1, std::placeholders::_2,
181 std::placeholders::_3, keep_dims, *_reduce_kernel);
184 _kernel = generateKernelGeneric(_input, keep_dims, *_reduce_kernel, ReduceType::kSum);
186 case ReduceType::kProd:
187 _kernel = generateKernelGeneric(_input, keep_dims, *_reduce_kernel, ReduceType::kProd);
189 case ReduceType::kMax:
190 _kernel = generateKernelGeneric(_input, keep_dims, *_reduce_kernel, ReduceType::kMax);
192 case ReduceType::kMin:
193 _kernel = generateKernelGeneric(_input, keep_dims, *_reduce_kernel, ReduceType::kMin);
195 case ReduceType::kAny:
196 _kernel = generateKernelGeneric(_input, keep_dims, *_reduce_kernel, ReduceType::kAny);
198 case ReduceType::kAll:
199 _kernel = generateKernelGeneric(_input, keep_dims, *_reduce_kernel, ReduceType::kAll);
202 throw std::runtime_error{"ReduceSum: Unsupported reduce type"};
206 void ReduceLayer::run()
208 const auto axes = getReducerAxes(_axes);
209 _kernel(_input, _output, axes);
214 } // namespace backend