2 * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 #include "kernels/Utils.h"
19 #include "MISOKernel.h"
23 namespace luci_interpreter
28 const int max_dim = 5;
39 inline void slice(const luci_interpreter::SliceParams &op_params,
40 const luci_interpreter::RuntimeShape &input_shape, const T *input_data,
41 const luci_interpreter::RuntimeShape &output_shape, T *output_data)
43 const luci_interpreter::RuntimeShape ext_shape =
44 luci_interpreter::RuntimeShape::extendedShape(5, input_shape);
45 const int begin_count = op_params.begin_count;
46 const int size_count = op_params.size_count;
47 // We front-pad the begin and size vectors.
50 for (int i = 0; i < 5; ++i)
53 start[i] = begin_count < padded_i ? 0 : op_params.begin[begin_count - padded_i];
54 stop[i] = (size_count < padded_i || op_params.size[size_count - padded_i] == -1)
56 : start[i] + op_params.size[size_count - padded_i];
59 for (int i0 = start[0]; i0 < stop[0]; ++i0)
61 for (int i1 = start[1]; i1 < stop[1]; ++i1)
63 for (int i2 = start[2]; i2 < stop[2]; ++i2)
65 for (int i3 = start[3]; i3 < stop[3]; ++i3)
67 for (int i4 = start[4]; i4 < stop[4]; ++i4)
70 (((i0 * ext_shape.dims(1) + i1) * ext_shape.dims(2) + i2) * ext_shape.dims(3) + i3) *
73 *output_data++ = input_data[position];
82 void getBeginAndSizeVectors(int dimensions, const uint8_t *begin_data, const uint8_t *size_data,
83 int32_t *begins, int32_t *sizes)
85 int offset = max_dim - dimensions;
86 for (int idx = 0; idx < dimensions; ++idx)
88 begins[offset + idx] = kernels::getTensorData<T>(begin_data)[idx];
89 sizes[offset + idx] = kernels::getTensorData<T>(size_data)[idx];
94 void configure_kernel_CircleSlice(const circle::Operator *cur_op, BaseRuntimeGraph *runtime_graph)
96 kernels::MISOKernel kernel(cur_op, runtime_graph);
98 LUCI_INTERPRETER_CHECK(Tensor::element_type(kernel.input1()) ==
99 Tensor::element_type(kernel.output()));
100 LUCI_INTERPRETER_CHECK(Tensor::element_type(kernel.input2()) == DataType::S32 ||
101 Tensor::element_type(kernel.input2()) == DataType::S64);
102 LUCI_INTERPRETER_CHECK(Tensor::element_type(kernel.input3()) == DataType::S32 ||
103 Tensor::element_type(kernel.input3()) == DataType::S64);
104 LUCI_INTERPRETER_CHECK(Tensor::num_dims(kernel.input2()) == 1);
105 LUCI_INTERPRETER_CHECK(Tensor::num_dims(kernel.input3()) == 1);
106 LUCI_INTERPRETER_CHECK(Tensor::num_dims(kernel.input1()) <= max_dim);
109 void execute_kernel_CircleSlice(const circle::Operator *cur_op, BaseRuntimeGraph *runtime_graph)
111 kernels::MISOKernel kernel(cur_op, runtime_graph);
113 bool is_dynamic_shapes = false;
115 const circle::Tensor *input = kernel.input1();
116 const circle::Tensor *begin = kernel.input2();
117 const circle::Tensor *size_tensor = kernel.input3();
118 const circle::Tensor *output = kernel.output();
120 const auto *input_data = runtime_graph->getDataByTensor(input);
121 if (input_data == nullptr)
122 input_data = runtime_graph->getConstDataByTensor(input);
125 const auto *begin_data = runtime_graph->getDataByTensor(begin);
126 if (begin_data == nullptr)
128 begin_data = runtime_graph->getConstDataByTensor(begin);
129 is_dynamic_shapes = true;
133 const auto *size_data = runtime_graph->getDataByTensor(size_tensor);
134 if (size_data == nullptr)
136 size_data = runtime_graph->getConstDataByTensor(size_tensor);
137 is_dynamic_shapes = true;
141 auto *output_data = runtime_graph->getDataByTensor(output);
144 SliceParams op_params{};
145 op_params.begin_count = max_dim;
146 op_params.size_count = max_dim;
147 for (int i = 0; i < max_dim; i++)
149 op_params.begin[i] = 0;
150 op_params.size[i] = 1;
152 auto num_dim = Tensor::num_dims(input);
154 if (Tensor::element_type(begin) == DataType::S32)
156 getBeginAndSizeVectors<int32_t>(num_dim, begin_data, size_data, op_params.begin,
159 else if (Tensor::element_type(begin) == DataType::S64)
161 getBeginAndSizeVectors<int64_t>(num_dim, begin_data, size_data, op_params.begin,
166 assert(false && "Unsupported type");
169 #ifndef DIS_DYN_SHAPES
170 if (is_dynamic_shapes)
172 int32_t data_size = 1;
173 luci_interpreter::RuntimeShape dynamic_shapes(max_dim - num_dim + 1);
174 int offset = max_dim - Tensor::num_dims(input);
175 for (int i = 0; i <= max_dim - num_dim; ++i)
179 auto cur_size = op_params.size[i + offset] != -1
180 ? op_params.size[i + offset]
181 : Tensor::dim(input, i) - op_params.begin[i + offset];
182 data_size *= cur_size;
184 dynamic_shapes.setDim(i, cur_size);
186 data_size *= size(Tensor::element_type(output));
188 runtime_graph->addDynamicShapeTensor(output, std::move(dynamic_shapes));
192 runtime_graph->resetTensorData(nullptr, output);
196 auto new_output_data = new uint8_t[data_size];
197 output_data = new_output_data;
198 runtime_graph->resetTensorData(new_output_data, output);
201 assert(is_dynamic_shapes == false);
202 #endif // DIS_DYN_SHAPES
204 switch (Tensor::element_type(input))
207 case DataType::FLOAT32:
208 slice<float>(op_params, kernels::getTensorShape(input),
209 kernels::getTensorData<float>(input_data), kernels::getTensorShape(output),
210 kernels::getTensorData<float>(output_data));
215 slice<uint8_t>(op_params, kernels::getTensorShape(input),
216 kernels::getTensorData<uint8_t>(input_data), kernels::getTensorShape(output),
217 kernels::getTensorData<uint8_t>(output_data));
220 slice<int8_t>(op_params, kernels::getTensorShape(input),
221 kernels::getTensorData<int8_t>(input_data), kernels::getTensorShape(output),
222 kernels::getTensorData<int8_t>(output_data));
225 slice<int16_t>(op_params, kernels::getTensorShape(input),
226 kernels::getTensorData<int16_t>(input_data), kernels::getTensorShape(output),
227 kernels::getTensorData<int16_t>(output_data));
231 assert(false && "Unsupported input type.");
235 } // namespace luci_interpreter