Imported Upstream version 1.25.0
[platform/core/ml/nnfw.git] / compiler / tflchef / core / src / Convert.cpp
1 /*
2  * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
3  * Copyright 2020 The TensorFlow Authors. All Rights Reserved.
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  *    http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17
18 #include "Convert.h"
19
20 #include <stdexcept>
21
22 tflite::Padding as_tflite_padding(const tflchef::Padding &value)
23 {
24   switch (value)
25   {
26     case tflchef::SAME:
27       return tflite::Padding_SAME;
28     case tflchef::VALID:
29       return tflite::Padding_VALID;
30     default:
31       break;
32   }
33
34   throw std::runtime_error{"Unknown padding value"};
35 }
36
37 tflite::ActivationFunctionType as_tflite_activation(const tflchef::Activation &value)
38 {
39   switch (value)
40   {
41     case tflchef::NONE:
42       return tflite::ActivationFunctionType_NONE;
43     case tflchef::RELU:
44       return tflite::ActivationFunctionType_RELU;
45     case tflchef::RELU_N1_TO_1:
46       return tflite::ActivationFunctionType_RELU_N1_TO_1;
47     case tflchef::RELU6:
48       return tflite::ActivationFunctionType_RELU6;
49     case tflchef::TANH:
50       return tflite::ActivationFunctionType_TANH;
51     case tflchef::SIGN_BIT:
52       return tflite::ActivationFunctionType_SIGN_BIT;
53     default:
54       break;
55   }
56
57   throw std::runtime_error{"Unknown activation"};
58 }
59
60 tflite::TensorType as_tflite_tensortype(const tflchef::TensorType &value)
61 {
62   switch (value)
63   {
64     case tflchef::FLOAT32:
65       return tflite::TensorType_FLOAT32;
66     case tflchef::FLOAT16:
67       return tflite::TensorType_FLOAT16;
68     case tflchef::INT32:
69       return tflite::TensorType_INT32;
70     case tflchef::UINT8:
71       return tflite::TensorType_UINT8;
72     case tflchef::INT64:
73       return tflite::TensorType_INT64;
74     case tflchef::STRING:
75       return tflite::TensorType_STRING;
76     case tflchef::BOOL:
77       return tflite::TensorType_BOOL;
78     case tflchef::INT16:
79       return tflite::TensorType_INT16;
80     case tflchef::INT8:
81       return tflite::TensorType_INT8;
82     default:
83       break;
84   }
85
86   throw std::runtime_error{"Unknown tensor type"};
87 }
88
89 tflite::MirrorPadMode as_tflite_mirrorpadmode(const tflchef::MirrorPadMode &value)
90 {
91   switch (value)
92   {
93     case tflchef::REFLECT:
94       return tflite::MirrorPadMode_REFLECT;
95     case tflchef::SYMMETRIC:
96       return tflite::MirrorPadMode_SYMMETRIC;
97     default:
98       break;
99   }
100
101   throw std::runtime_error{"Unknown mirrorpad mode"};
102 }
103
104 tflite::DimensionType as_tflite_dimensiontype(const tflchef::DimensionType &value)
105 {
106   switch (value)
107   {
108     case tflchef::DimensionType::DENSE:
109       return tflite::DimensionType_DENSE;
110     case tflchef::DimensionType::SPARSE_CSR:
111       return tflite::DimensionType_SPARSE_CSR;
112     default:
113       break;
114   }
115
116   throw std::runtime_error("Unknown dimension type");
117 }
118
119 tflite::SparseIndexVector as_tflite_sparse_idx_vec_type(const tflchef::SparseIndexVecType &value)
120 {
121   switch (value)
122   {
123     case tflchef::SparseIndexVecType::SparseIdxVecType_NONE:
124       return tflite::SparseIndexVector_NONE;
125     case tflchef::SparseIndexVecType::INT32VEC:
126       return tflite::SparseIndexVector_Int32Vector;
127     case tflchef::SparseIndexVecType::UINT16VEC:
128       return tflite::SparseIndexVector_Uint16Vector;
129     case tflchef::SparseIndexVecType::UINT8VEC:
130       return tflite::SparseIndexVector_Uint8Vector;
131     default:
132       break;
133   }
134
135   throw std::runtime_error("Unknown SparseIndexVector type");
136 }
137
138 flatbuffers::Offset<void>
139 as_tflite_sparse_index_vec(flatbuffers::FlatBufferBuilder &fb,
140                            const ::tflchef::TensorSparsity_IndexVec &value)
141 {
142   auto sparse_idx_type = value.type();
143
144   switch (sparse_idx_type)
145   {
146     case tflchef::SparseIndexVecType::SparseIdxVecType_NONE:
147       return flatbuffers::Offset<void>();
148     case tflchef::SparseIndexVecType::INT32VEC:
149     {
150       auto values_vec_int32 = std::vector<int32_t>{value.dim().begin(), value.dim().end()};
151       auto values_int32 = fb.CreateVector(values_vec_int32);
152       return tflite::CreateInt32Vector(fb, values_int32).Union();
153     }
154     case tflchef::SparseIndexVecType::UINT16VEC:
155     {
156       auto values_vec_uint16 = std::vector<uint16_t>{value.dim().begin(), value.dim().end()};
157       auto values_uint16 = fb.CreateVector(values_vec_uint16);
158       return tflite::CreateUint16Vector(fb, values_uint16).Union();
159     }
160     case tflchef::SparseIndexVecType::UINT8VEC:
161     {
162       auto values_vec_uint8 = std::vector<uint8_t>{value.dim().begin(), value.dim().end()};
163       auto values_uint8 = fb.CreateVector(values_vec_uint8);
164       return tflite::CreateUint8Vector(fb, values_uint8).Union();
165     }
166     default:
167       break;
168   }
169
170   throw std::runtime_error("Unknown SparseIndexVector type");
171 }
172
173 // namespace sparsity code referenced from
174 // https://github.com/tensorflow/tensorflow/blob/3f878cff5b698b82eea85db2b60d65a2e320850e/
175 //       tensorflow/lite/kernels/internal/utils/sparsity_format_converter.cc
176
177 namespace sparsity
178 {
179
180 template <typename T>
181 FormatConverter<T>::FormatConverter(const std::vector<int> &shape,
182                                     const std::vector<int> &traversal_order,
183                                     const std::vector<TfLiteDimensionType> &format,
184                                     const std::vector<int> &block_size,
185                                     const std::vector<int> &block_map)
186   : dense_shape_(shape), traversal_order_(traversal_order), block_size_(block_size),
187     block_map_(block_map)
188 {
189   dense_size_ = 1;
190   int block_dim = 0;
191   blocked_shape_.resize(shape.size());
192   format_.resize(shape.size() + block_map.size());
193   for (int i = 0; i < shape.size(); i++)
194   {
195     format_[i] = format[traversal_order[i]];
196     dense_size_ *= shape[i];
197     if (block_dim < block_map.size() && block_map[block_dim] == i)
198     {
199       blocked_shape_[i] = shape[i] / block_size[block_dim];
200       block_dim++;
201     }
202     else
203     {
204       blocked_shape_[i] = shape[i];
205     }
206   }
207
208   // Only dense blocks are supported.
209   for (int i = 0; i < block_map.size(); i++)
210   {
211     format_[i + shape.size()] = kTfLiteDimDense;
212   }
213 }
214
215 template <typename T> bool FormatConverter<T>::DenseToSparse(const T *src_data)
216 {
217   int num_original_dims = dense_shape_.size();
218   int num_block_dims = block_map_.size();
219   int num_expanded_dims = num_original_dims + num_block_dims;
220   std::vector<int> expanded_shape(num_expanded_dims);
221   for (int i = 0; i < num_expanded_dims; i++)
222   {
223     if (i < num_original_dims)
224     {
225       expanded_shape[i] = blocked_shape_[i];
226     }
227     else
228     {
229       expanded_shape[i] = block_size_[i - num_original_dims];
230     }
231   }
232
233   std::vector<int> shape_offset(num_original_dims);
234   shape_offset[shape_offset.size() - 1] = 1;
235   for (int i = num_original_dims - 1; i > 0; --i)
236   {
237     shape_offset[i - 1] = shape_offset[i] * dense_shape_[i];
238   }
239
240   std::vector<int> expanded_shape_offset(num_expanded_dims);
241   for (int i = 0; i < num_original_dims; ++i)
242   {
243     expanded_shape_offset[i] = shape_offset[i];
244   }
245   for (int i = 0; i < num_block_dims; ++i)
246   {
247     int mapped_dim = block_map_[i];
248     expanded_shape_offset[num_original_dims + i] = shape_offset[mapped_dim];
249     expanded_shape_offset[mapped_dim] *= block_size_[i];
250   }
251
252   std::vector<int> dst_ordered_offset(num_expanded_dims);
253   for (int i = 0; i < num_expanded_dims; ++i)
254   {
255     dst_ordered_offset[i] = expanded_shape_offset[traversal_order_[i]];
256   }
257
258   std::vector<bool> dst_dim_has_nonzeroes(num_expanded_dims);
259   std::fill(dst_dim_has_nonzeroes.begin(), dst_dim_has_nonzeroes.end(), false);
260   std::vector<int> inner_compressed_dim(num_expanded_dims);
261   int most_recent_compressed_dim = -1;
262   std::vector<int> num_segments_of_next_compressed_dim(num_expanded_dims);
263   int segment_count = 1;
264   for (int i = num_expanded_dims - 1; i >= 0; --i)
265   {
266     inner_compressed_dim[i] = most_recent_compressed_dim;
267     if (format_[i] == kTfLiteDimSparseCSR)
268     {
269       most_recent_compressed_dim = i;
270       num_segments_of_next_compressed_dim[i] = segment_count;
271       segment_count = 1;
272     }
273     else
274     {
275       num_segments_of_next_compressed_dim[i] = -1;
276       segment_count *= expanded_shape[traversal_order_[i]];
277     }
278   }
279
280   dim_metadata_.resize(num_expanded_dims * 2);
281   std::vector<int> dst_sparse_dims;
282   dst_sparse_dims.reserve(num_expanded_dims);
283   for (int i = 0; i < num_expanded_dims; ++i)
284   {
285     dim_metadata_[i * 2].clear();
286     dim_metadata_[i * 2 + 1].clear();
287     if (format_[i] == kTfLiteDimDense)
288     {
289       // If dimension is dense, just store the shape.
290       dim_metadata_[i * 2].push_back(expanded_shape[traversal_order_[i]]);
291     }
292     else
293     {
294       dim_metadata_[i * 2].push_back(0); // Segment array always begins with 0.
295       dst_sparse_dims.push_back(i);      // Add dimension to the sparse list.
296     }
297   }
298
299   // This algorithm assumes that the block size is small enough for all the
300   // elements to fit in cache, so the strided accesses from different traversal
301   // order and the write-first-erase-later strategy shouldn't be too slow
302   int dst_dim_idx = num_expanded_dims;
303   std::vector<int> coordinate(num_expanded_dims, 0);
304   int dense_tensor_idx = 0;
305   while (dst_dim_idx >= 0)
306   {
307     if (dst_dim_idx == num_expanded_dims)
308     {
309       // We have a complete coordinate. Add the element to the value array if it
310       // is not zero, or if the last dimension is dense.
311       if (!IsZero(src_data[dense_tensor_idx]))
312       {
313         data_.push_back(src_data[dense_tensor_idx]);
314         // Mark all sparse dimensions that their current indices have nonzeroes.
315         for (auto dst_dim : dst_sparse_dims)
316         {
317           if (!dst_dim_has_nonzeroes[dst_dim])
318           {
319             // Only add the index to the indices array if the current nonzero
320             // is the first nonzero of the block.
321             dim_metadata_[2 * dst_dim + 1].push_back(coordinate[dst_dim]);
322             dst_dim_has_nonzeroes[dst_dim] = true;
323           }
324         }
325       }
326       else if (format_[num_expanded_dims - 1] == kTfLiteDimDense)
327       {
328         data_.push_back(src_data[dense_tensor_idx]);
329       }
330       --dst_dim_idx;
331     }
332     else
333     {
334       int original_dim_idx = traversal_order_[dst_dim_idx];
335       int dim_size = expanded_shape[original_dim_idx];
336       if (dst_dim_has_nonzeroes[dst_dim_idx])
337       {
338         // If the previous block has nonzeroes, reset the flag to false since
339         // we have just moved to a new block.
340         dst_dim_has_nonzeroes[dst_dim_idx] = false;
341       }
342       else if (format_[dst_dim_idx] == kTfLiteDimSparseCSR)
343       {
344         // This block is empty. Delete unnecessary values if compressed.
345         int next_compressed_dim = inner_compressed_dim[dst_dim_idx];
346         int erase_offset = dim_metadata_[2 * dst_dim_idx + 1].size() *
347                            num_segments_of_next_compressed_dim[dst_dim_idx];
348         if (next_compressed_dim >= 0)
349         {
350           auto &segments = dim_metadata_[2 * inner_compressed_dim[dst_dim_idx]];
351           segments.erase(segments.begin() + 1 + erase_offset, segments.end());
352         }
353         else
354         {
355           data_.erase(data_.begin() + erase_offset, data_.end());
356         }
357       }
358       if (++coordinate[dst_dim_idx] < dim_size)
359       {
360         // The current dst_dim_idx is valid (not out of bound).
361         dense_tensor_idx += dst_ordered_offset[dst_dim_idx];
362         ++dst_dim_idx;
363       }
364       else
365       {
366         // dst_dim_idx has reached its dim size. Update segment array and go
367         // back to incrementing the previous dimension (dst_dim_idx - 1).
368         if (format_[dst_dim_idx] == kTfLiteDimSparseCSR)
369         {
370           dim_metadata_[2 * dst_dim_idx].push_back(dim_metadata_[2 * dst_dim_idx + 1].size());
371         }
372         coordinate[dst_dim_idx] = -1;
373         dense_tensor_idx -= dst_ordered_offset[dst_dim_idx] * dim_size;
374         --dst_dim_idx;
375       }
376     }
377   }
378
379   return true;
380 }
381
382 template <typename T> bool FormatConverter<T>::IsZero(const T val)
383 {
384   return (val == static_cast<T>(0));
385 }
386
387 template class FormatConverter<float>;
388 template class FormatConverter<uint16_t>; // float16
389
390 } // namespace sparsity