f4dd4b332b72ad56b5faf85b21a084892dac70ae
[platform/core/ml/nnfw.git] / compiler / tflchef / core / src / Convert.cpp
1 /*
2  * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
3  * Copyright 2020 The TensorFlow Authors. All Rights Reserved.
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  *    http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17
18 #include "Convert.h"
19
20 #include <stdexcept>
21
22 tflite::Padding as_tflite_padding(const tflchef::Padding &value)
23 {
24   switch (value)
25   {
26     case tflchef::SAME:
27       return tflite::Padding_SAME;
28     case tflchef::VALID:
29       return tflite::Padding_VALID;
30     default:
31       break;
32   }
33
34   throw std::runtime_error{"Unknown padding value"};
35 }
36
37 tflite::ActivationFunctionType as_tflite_activation(const tflchef::Activation &value)
38 {
39   switch (value)
40   {
41     case tflchef::NONE:
42       return tflite::ActivationFunctionType_NONE;
43     case tflchef::RELU:
44       return tflite::ActivationFunctionType_RELU;
45     case tflchef::RELU_N1_TO_1:
46       return tflite::ActivationFunctionType_RELU_N1_TO_1;
47     case tflchef::RELU6:
48       return tflite::ActivationFunctionType_RELU6;
49     case tflchef::TANH:
50       return tflite::ActivationFunctionType_TANH;
51     case tflchef::SIGN_BIT:
52       return tflite::ActivationFunctionType_SIGN_BIT;
53     default:
54       break;
55   }
56
57   throw std::runtime_error{"Unknown activation"};
58 }
59
60 tflite::TensorType as_tflite_tensortype(const tflchef::TensorType &value)
61 {
62   switch (value)
63   {
64     case tflchef::FLOAT32:
65       return tflite::TensorType_FLOAT32;
66     case tflchef::FLOAT16:
67       return tflite::TensorType_FLOAT16;
68     case tflchef::INT32:
69       return tflite::TensorType_INT32;
70     case tflchef::UINT8:
71       return tflite::TensorType_UINT8;
72     case tflchef::INT64:
73       return tflite::TensorType_INT64;
74     case tflchef::STRING:
75       return tflite::TensorType_STRING;
76     case tflchef::BOOL:
77       return tflite::TensorType_BOOL;
78     case tflchef::INT16:
79       return tflite::TensorType_INT16;
80     default:
81       break;
82   }
83
84   throw std::runtime_error{"Unknown tensor type"};
85 }
86
87 tflite::MirrorPadMode as_tflite_mirrorpadmode(const tflchef::MirrorPadMode &value)
88 {
89   switch (value)
90   {
91     case tflchef::REFLECT:
92       return tflite::MirrorPadMode_REFLECT;
93     case tflchef::SYMMETRIC:
94       return tflite::MirrorPadMode_SYMMETRIC;
95     default:
96       break;
97   }
98
99   throw std::runtime_error{"Unknown mirrorpad mode"};
100 }
101
102 tflite::DimensionType as_tflite_dimensiontype(const tflchef::DimensionType &value)
103 {
104   switch (value)
105   {
106     case tflchef::DimensionType::DENSE:
107       return tflite::DimensionType_DENSE;
108     case tflchef::DimensionType::SPARSE_CSR:
109       return tflite::DimensionType_SPARSE_CSR;
110     default:
111       break;
112   }
113
114   throw std::runtime_error("Unknown dimension type");
115 }
116
117 tflite::SparseIndexVector as_tflite_sparse_idx_vec_type(const tflchef::SparseIndexVecType &value)
118 {
119   switch (value)
120   {
121     case tflchef::SparseIndexVecType::SparseIdxVecType_NONE:
122       return tflite::SparseIndexVector_NONE;
123     case tflchef::SparseIndexVecType::INT32VEC:
124       return tflite::SparseIndexVector_Int32Vector;
125     case tflchef::SparseIndexVecType::UINT16VEC:
126       return tflite::SparseIndexVector_Uint16Vector;
127     case tflchef::SparseIndexVecType::UINT8VEC:
128       return tflite::SparseIndexVector_Uint8Vector;
129     default:
130       break;
131   }
132
133   throw std::runtime_error("Unknown SparseIndexVector type");
134 }
135
136 flatbuffers::Offset<void>
137 as_tflite_sparse_index_vec(flatbuffers::FlatBufferBuilder &fb,
138                            const ::tflchef::TensorSparsity_IndexVec &value)
139 {
140   auto sparse_idx_type = value.type();
141
142   switch (sparse_idx_type)
143   {
144     case tflchef::SparseIndexVecType::SparseIdxVecType_NONE:
145       return flatbuffers::Offset<void>();
146     case tflchef::SparseIndexVecType::INT32VEC:
147     {
148       auto values_vec_int32 = std::vector<int32_t>{value.dim().begin(), value.dim().end()};
149       auto values_int32 = fb.CreateVector(values_vec_int32);
150       return tflite::CreateInt32Vector(fb, values_int32).Union();
151     }
152     case tflchef::SparseIndexVecType::UINT16VEC:
153     {
154       auto values_vec_uint16 = std::vector<uint16_t>{value.dim().begin(), value.dim().end()};
155       auto values_uint16 = fb.CreateVector(values_vec_uint16);
156       return tflite::CreateUint16Vector(fb, values_uint16).Union();
157     }
158     case tflchef::SparseIndexVecType::UINT8VEC:
159     {
160       auto values_vec_uint8 = std::vector<uint8_t>{value.dim().begin(), value.dim().end()};
161       auto values_uint8 = fb.CreateVector(values_vec_uint8);
162       return tflite::CreateUint8Vector(fb, values_uint8).Union();
163     }
164     default:
165       break;
166   }
167
168   throw std::runtime_error("Unknown SparseIndexVector type");
169 }
170
171 // namespace sparsity code referenced from
172 // https://github.com/tensorflow/tensorflow/blob/3f878cff5b698b82eea85db2b60d65a2e320850e/
173 //       tensorflow/lite/kernels/internal/utils/sparsity_format_converter.cc
174
175 namespace sparsity
176 {
177
178 template <typename T>
179 FormatConverter<T>::FormatConverter(const std::vector<int> &shape,
180                                     const std::vector<int> &traversal_order,
181                                     const std::vector<TfLiteDimensionType> &format,
182                                     const std::vector<int> &block_size,
183                                     const std::vector<int> &block_map)
184   : dense_shape_(shape), traversal_order_(traversal_order), block_size_(block_size),
185     block_map_(block_map)
186 {
187   dense_size_ = 1;
188   int block_dim = 0;
189   blocked_shape_.resize(shape.size());
190   format_.resize(shape.size() + block_map.size());
191   for (int i = 0; i < shape.size(); i++)
192   {
193     format_[i] = format[traversal_order[i]];
194     dense_size_ *= shape[i];
195     if (block_dim < block_map.size() && block_map[block_dim] == i)
196     {
197       blocked_shape_[i] = shape[i] / block_size[block_dim];
198       block_dim++;
199     }
200     else
201     {
202       blocked_shape_[i] = shape[i];
203     }
204   }
205
206   // Only dense blocks are supported.
207   for (int i = 0; i < block_map.size(); i++)
208   {
209     format_[i + shape.size()] = kTfLiteDimDense;
210   }
211 }
212
213 template <typename T> bool FormatConverter<T>::DenseToSparse(const T *src_data)
214 {
215   int num_original_dims = dense_shape_.size();
216   int num_block_dims = block_map_.size();
217   int num_expanded_dims = num_original_dims + num_block_dims;
218   std::vector<int> expanded_shape(num_expanded_dims);
219   for (int i = 0; i < num_expanded_dims; i++)
220   {
221     if (i < num_original_dims)
222     {
223       expanded_shape[i] = blocked_shape_[i];
224     }
225     else
226     {
227       expanded_shape[i] = block_size_[i - num_original_dims];
228     }
229   }
230
231   std::vector<int> shape_offset(num_original_dims);
232   shape_offset[shape_offset.size() - 1] = 1;
233   for (int i = num_original_dims - 1; i > 0; --i)
234   {
235     shape_offset[i - 1] = shape_offset[i] * dense_shape_[i];
236   }
237
238   std::vector<int> expanded_shape_offset(num_expanded_dims);
239   for (int i = 0; i < num_original_dims; ++i)
240   {
241     expanded_shape_offset[i] = shape_offset[i];
242   }
243   for (int i = 0; i < num_block_dims; ++i)
244   {
245     int mapped_dim = block_map_[i];
246     expanded_shape_offset[num_original_dims + i] = shape_offset[mapped_dim];
247     expanded_shape_offset[mapped_dim] *= block_size_[i];
248   }
249
250   std::vector<int> dst_ordered_offset(num_expanded_dims);
251   for (int i = 0; i < num_expanded_dims; ++i)
252   {
253     dst_ordered_offset[i] = expanded_shape_offset[traversal_order_[i]];
254   }
255
256   std::vector<bool> dst_dim_has_nonzeroes(num_expanded_dims);
257   std::fill(dst_dim_has_nonzeroes.begin(), dst_dim_has_nonzeroes.end(), false);
258   std::vector<int> inner_compressed_dim(num_expanded_dims);
259   int most_recent_compressed_dim = -1;
260   std::vector<int> num_segments_of_next_compressed_dim(num_expanded_dims);
261   int segment_count = 1;
262   for (int i = num_expanded_dims - 1; i >= 0; --i)
263   {
264     inner_compressed_dim[i] = most_recent_compressed_dim;
265     if (format_[i] == kTfLiteDimSparseCSR)
266     {
267       most_recent_compressed_dim = i;
268       num_segments_of_next_compressed_dim[i] = segment_count;
269       segment_count = 1;
270     }
271     else
272     {
273       num_segments_of_next_compressed_dim[i] = -1;
274       segment_count *= expanded_shape[traversal_order_[i]];
275     }
276   }
277
278   dim_metadata_.resize(num_expanded_dims * 2);
279   std::vector<int> dst_sparse_dims;
280   dst_sparse_dims.reserve(num_expanded_dims);
281   for (int i = 0; i < num_expanded_dims; ++i)
282   {
283     dim_metadata_[i * 2].clear();
284     dim_metadata_[i * 2 + 1].clear();
285     if (format_[i] == kTfLiteDimDense)
286     {
287       // If dimension is dense, just store the shape.
288       dim_metadata_[i * 2].push_back(expanded_shape[traversal_order_[i]]);
289     }
290     else
291     {
292       dim_metadata_[i * 2].push_back(0); // Segment array always begins with 0.
293       dst_sparse_dims.push_back(i);      // Add dimension to the sparse list.
294     }
295   }
296
297   // This algorithm assumes that the block size is small enough for all the
298   // elements to fit in cache, so the strided accesses from different traversal
299   // order and the write-first-erase-later strategy shouldn't be too slow
300   int dst_dim_idx = num_expanded_dims;
301   std::vector<int> coordinate(num_expanded_dims, 0);
302   int dense_tensor_idx = 0;
303   while (dst_dim_idx >= 0)
304   {
305     if (dst_dim_idx == num_expanded_dims)
306     {
307       // We have a complete coordinate. Add the element to the value array if it
308       // is not zero, or if the last dimension is dense.
309       if (!IsZero(src_data[dense_tensor_idx]))
310       {
311         data_.push_back(src_data[dense_tensor_idx]);
312         // Mark all sparse dimensions that their current indices have nonzeroes.
313         for (auto dst_dim : dst_sparse_dims)
314         {
315           if (!dst_dim_has_nonzeroes[dst_dim])
316           {
317             // Only add the index to the indices array if the current nonzero
318             // is the first nonzero of the block.
319             dim_metadata_[2 * dst_dim + 1].push_back(coordinate[dst_dim]);
320             dst_dim_has_nonzeroes[dst_dim] = true;
321           }
322         }
323       }
324       else if (format_[num_expanded_dims - 1] == kTfLiteDimDense)
325       {
326         data_.push_back(src_data[dense_tensor_idx]);
327       }
328       --dst_dim_idx;
329     }
330     else
331     {
332       int original_dim_idx = traversal_order_[dst_dim_idx];
333       int dim_size = expanded_shape[original_dim_idx];
334       if (dst_dim_has_nonzeroes[dst_dim_idx])
335       {
336         // If the previous block has nonzeroes, reset the flag to false since
337         // we have just moved to a new block.
338         dst_dim_has_nonzeroes[dst_dim_idx] = false;
339       }
340       else if (format_[dst_dim_idx] == kTfLiteDimSparseCSR)
341       {
342         // This block is empty. Delete unnecessary values if compressed.
343         int next_compressed_dim = inner_compressed_dim[dst_dim_idx];
344         int erase_offset = dim_metadata_[2 * dst_dim_idx + 1].size() *
345                            num_segments_of_next_compressed_dim[dst_dim_idx];
346         if (next_compressed_dim >= 0)
347         {
348           auto &segments = dim_metadata_[2 * inner_compressed_dim[dst_dim_idx]];
349           segments.erase(segments.begin() + 1 + erase_offset, segments.end());
350         }
351         else
352         {
353           data_.erase(data_.begin() + erase_offset, data_.end());
354         }
355       }
356       if (++coordinate[dst_dim_idx] < dim_size)
357       {
358         // The current dst_dim_idx is valid (not out of bound).
359         dense_tensor_idx += dst_ordered_offset[dst_dim_idx];
360         ++dst_dim_idx;
361       }
362       else
363       {
364         // dst_dim_idx has reached its dim size. Update segment array and go
365         // back to incrementing the previous dimension (dst_dim_idx - 1).
366         if (format_[dst_dim_idx] == kTfLiteDimSparseCSR)
367         {
368           dim_metadata_[2 * dst_dim_idx].push_back(dim_metadata_[2 * dst_dim_idx + 1].size());
369         }
370         coordinate[dst_dim_idx] = -1;
371         dense_tensor_idx -= dst_ordered_offset[dst_dim_idx] * dim_size;
372         --dst_dim_idx;
373       }
374     }
375   }
376
377   return true;
378 }
379
380 template <typename T> bool FormatConverter<T>::IsZero(const T val)
381 {
382   return (val == static_cast<T>(0));
383 }
384
385 template class FormatConverter<float>;
386 template class FormatConverter<uint16_t>; // float16
387
388 } // namespace sparsity