2 * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
19 #include <mio_circle/Reader.h>
32 void print_comma_sepearted(std::ostream &os, const flatbuffers::Vector<T> *vec)
36 for (auto iter = vec->begin(); iter != vec->end(); iter++)
38 if (iter != vec->begin())
44 void print_buffer(std::ostream &os, uint32_t buff_idx, const flatbuffers::Vector<uint8_t> *data_ptr,
45 const circle::TensorType &type)
47 if (data_ptr == nullptr)
50 os << " └── buffer" << std::endl;
51 os << " ├── index : " << buff_idx << std::endl;
52 size_t buff_size = data_ptr->size();
53 os << " ├── size : " << buff_size << std::endl;
57 case circle::TensorType_UINT8:
59 const uint8_t *buff_data_ui8 = reinterpret_cast<const uint8_t *>(data_ptr->data());
60 for (uint32_t idx = 0; idx < buff_size / sizeof(uint8_t); idx++)
62 os << static_cast<const uint32_t>(buff_data_ui8[idx]) << ", ";
66 case circle::TensorType_INT32:
68 const int32_t *buff_data_i32 = reinterpret_cast<const int32_t *>(data_ptr->data());
69 for (uint32_t idx = 0; idx < buff_size / sizeof(int32_t); idx++)
71 os << buff_data_i32[idx] << ", ";
75 case circle::TensorType_INT64:
77 const int64_t *buff_data_i64 = reinterpret_cast<const int64_t *>(data_ptr->data());
78 for (uint32_t idx = 0; idx < buff_size / sizeof(int64_t); idx++)
80 os << buff_data_i64[idx] << ", ";
84 case circle::TensorType_FLOAT32:
86 const float *buff_data_f32 = reinterpret_cast<const float *>(data_ptr->data());
87 for (uint32_t idx = 0; idx < buff_size / sizeof(float); idx++)
89 os << buff_data_f32[idx] << ", ";
94 throw std::runtime_error("NYI tensor type : " + std::to_string(type));
101 namespace circletensordump
104 void DumpTensors::run(std::ostream &os, const circle::Model *model, const std::string &)
106 mio::circle::Reader reader(model);
107 uint32_t num_subgraph = reader.num_subgraph();
108 auto buffers = reader.buffers();
110 for (uint32_t subgraph_idx = 0; subgraph_idx < num_subgraph; subgraph_idx++)
112 reader.select_subgraph(subgraph_idx);
114 auto tensors = reader.tensors();
115 for (const auto &tensor : *tensors)
117 const auto tensor_name = tensor->name();
118 std::string tensor_name_str = tensor_name ? tensor_name->str() : "no_name";
119 os << std::string(70, '-') << std::endl;
120 os << "[" << tensor_name_str << "]" << std::endl;
121 auto buff_idx = tensor->buffer();
122 auto buff_data_ptr = reader.buffers()->Get(buff_idx)->data();
123 auto quant_param = tensor->quantization();
124 std::string print_format = (!buff_data_ptr && !quant_param) ? "└──" : "├──";
127 auto shape = tensor->shape();
128 os << " " + print_format + " shape : (";
129 ::print_comma_sepearted(os, shape);
130 os << ")" << std::endl;
132 // quantization paramters
135 std::string print_format1 = buff_data_ptr ? "├──" : "└──";
136 std::string print_format2 = buff_data_ptr ? "│" : " ";
137 os << " " + print_format1 + " quantization" << std::endl;
138 auto min = quant_param->min();
139 auto max = quant_param->max();
140 auto scale = quant_param->scale();
141 auto zero_point = quant_param->zero_point();
142 auto quantized_dimension = quant_param->quantized_dimension();
144 os << " " + print_format2 + " ├── min : ";
145 ::print_comma_sepearted(os, min);
147 os << " " + print_format2 + " ├── max : ";
148 ::print_comma_sepearted(os, max);
150 os << " " + print_format2 + " ├── scale : ";
151 ::print_comma_sepearted(os, scale);
153 os << " " + print_format2 + " ├── zero_point : ";
154 ::print_comma_sepearted(os, zero_point);
156 os << " " + print_format2 + " └── quantized_dimension : " << quantized_dimension;
161 print_buffer(os, buff_idx, buff_data_ptr, tensor->type());
167 } // namespace circletensordump
172 // HDF5 forbids the inclusion of '/' in the name.
173 std::string mangle(const std::string &name)
175 std::string ret{name};
176 std::replace(ret.begin(), ret.end(), '/', '_');
180 H5::PredType hdf5_dtype_cast(const circle::TensorType &circle_type)
184 case circle::TensorType_UINT8:
186 return H5::PredType::NATIVE_UINT8;
188 case circle::TensorType_INT16:
190 return H5::PredType::NATIVE_INT16;
192 case circle::TensorType_INT32:
194 return H5::PredType::NATIVE_INT32;
196 case circle::TensorType_INT64:
198 return H5::PredType::NATIVE_INT64;
200 case circle::TensorType_FLOAT32:
202 return H5::PredType::NATIVE_FLOAT;
205 throw std::runtime_error("NYI tensor type : " + std::to_string(circle_type));
210 * In order to create a dataspace, its rank and dimensions are required as hsize_t type.
211 * This function converts flatbuffers::Vector<T> to std::vector<hsize_t>.
213 * If "dims" parameter is passed, the parameter will be converted. However, if
214 * not passed(nullptr), data is considered as a rank 1 vector.
216 template <typename T>
217 std::vector<hsize_t> hdf5_dims_cast(const flatbuffers::Vector<T> *data,
218 const flatbuffers::Vector<int32_t> *dims = nullptr)
220 std::vector<hsize_t> ret;
226 ret.at(0) = data->size();
230 const uint32_t rank = dims->size();
232 for (uint32_t d = 0; d < rank; d++)
234 if (dims->Get(d) < 0)
235 throw std::runtime_error("Dimensions shouldn't be negative");
236 ret.at(d) = static_cast<hsize_t>(dims->Get(d));
244 * This function writes vector data to given hdf5 file like below.
248 * ㄴDATASET "dataset_name"
252 template <typename T>
253 void write_vector_data_to_hdf5(H5::H5File &file, std::string &group_name, std::string dataset_name,
254 const H5::PredType &type, const flatbuffers::Vector<T> *data,
255 std::vector<hsize_t> dims)
259 auto dataspace = std::make_unique<H5::DataSpace>(dims.size(), dims.data());
260 auto dataset = std::make_unique<H5::DataSet>(
261 file.createDataSet(group_name + "/" + dataset_name, type, *dataspace));
262 dataset->write(data->data(), type);
265 /// @brief This function writes scalar data to given hdf5 file
266 template <typename T>
267 void write_scalar_data_to_hdf5(H5::H5File &file, std::string &group_name, std::string dataset_name,
268 const H5::PredType &type, T data)
270 auto dataspace = std::make_unique<H5::DataSpace>(H5S_SCALAR);
271 auto dataset = std::make_unique<H5::DataSet>(
272 file.createDataSet(group_name + "/" + dataset_name, type, *dataspace));
273 dataset->write(&data, type);
278 namespace circletensordump
282 * HDF5 layout is like below
285 * ㄴGROUP "tensor name"
286 * ㄴDATASET "weights" : Shape (x, y, ...), type(uint8, int16)
287 * ㄴDATASET "min" : Shape (n)
288 * ㄴDATASET "max" : Shape (n)
289 * ㄴDATASET "scale" : Shape (m)
290 * ㄴDATASET "zero_point" : Shape (m)
292 * NOTE All Dataset is optional. It means that if tensor doesn't have the data, it won't be created
296 void DumpTensorsToHdf5::run(std::ostream &os, const circle::Model *model,
297 const std::string &output_path)
299 // loads a circle model
300 mio::circle::Reader reader(model);
301 uint32_t num_subgraph = reader.num_subgraph();
303 // create a hdf5 file
304 H5::H5File file{output_path, H5F_ACC_TRUNC};
306 for (uint32_t subgraph_idx = 0; subgraph_idx < num_subgraph; subgraph_idx++)
308 reader.select_subgraph(subgraph_idx);
310 auto tensors = reader.tensors();
311 for (const auto &tensor : *tensors)
313 // If tensor does not have name, do nothing.
314 const auto tensor_name = tensor->name();
315 if (tensor_name == nullptr)
317 assert(false && "There is no tensor name");
321 // create a group for each tensor whose name is its tensor name
322 std::string group_name = ::mangle(tensor_name->c_str());
323 std::unique_ptr<H5::Group> tensor_group =
324 std::make_unique<H5::Group>(file.createGroup(group_name));
326 // write a buffer data
327 uint32_t buff_idx = tensor->buffer();
328 auto buff_data_ptr = reader.buffers()->Get(buff_idx)->data();
331 ::write_vector_data_to_hdf5(file, group_name, "weights", ::hdf5_dtype_cast(tensor->type()),
333 ::hdf5_dims_cast(buff_data_ptr, tensor->shape()));
336 // write quantization parameters
337 auto quant_param = tensor->quantization();
340 auto min = quant_param->min();
341 ::write_vector_data_to_hdf5(file, group_name, "min", H5::PredType::NATIVE_FLOAT, min,
342 ::hdf5_dims_cast(min));
343 auto max = quant_param->max();
344 ::write_vector_data_to_hdf5(file, group_name, "max", H5::PredType::NATIVE_FLOAT, max,
345 ::hdf5_dims_cast(max));
346 auto scale = quant_param->scale();
347 ::write_vector_data_to_hdf5(file, group_name, "scale", H5::PredType::NATIVE_FLOAT, scale,
348 ::hdf5_dims_cast(scale));
349 auto zero_point = quant_param->zero_point();
350 ::write_vector_data_to_hdf5(file, group_name, "zero_point", H5::PredType::NATIVE_INT64,
351 zero_point, ::hdf5_dims_cast(zero_point));
352 auto quantized_dimension = quant_param->quantized_dimension();
353 ::write_scalar_data_to_hdf5(file, group_name, "quantized_dimension",
354 H5::PredType::NATIVE_INT32, quantized_dimension);
360 } // namespace circletensordump