2 * Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include "h5formatter.h"
19 #include "nnfw_util.h"
27 onert_train::TensorShape getShape(H5::DataSet &data_set)
29 std::vector<hsize_t> h5_shape; // hsize_t is unsigned long long
30 H5::DataSpace data_space = data_set.getSpace();
31 int rank = data_space.getSimpleExtentNdims();
32 h5_shape.resize(rank);
34 // read shape info from H5 file
35 data_space.getSimpleExtentDims(h5_shape.data(), NULL);
37 onert_train::TensorShape shape;
38 for (auto dim : h5_shape)
39 shape.emplace_back(static_cast<int>(dim));
47 static const char *h5_value_grpname = "value";
49 std::vector<TensorShape> H5Formatter::readTensorShapes(const std::string &filename)
52 NNPR_ENSURE_STATUS(nnfw_input_size(session_, &num_inputs));
53 std::vector<TensorShape> tensor_shapes;
57 H5::Exception::dontPrint();
59 H5::H5File file(filename, H5F_ACC_RDONLY);
60 H5::Group value_group = file.openGroup(h5_value_grpname);
62 // Constraints: if there are n data set names, they should be unique and
63 // one of [ "0", "1", .. , "n-1" ]
64 for (uint32_t i = 0; i < num_inputs; ++i)
66 H5::DataSet data_set = value_group.openDataSet(std::to_string(i));
67 H5::DataType type = data_set.getDataType();
68 auto shape = getShape(data_set);
70 tensor_shapes.emplace_back(shape);
75 catch (const H5::Exception &e)
77 H5::Exception::printErrorStack();
80 catch (const std::exception &e)
82 std::cerr << e.what() << std::endl;
87 void H5Formatter::loadInputs(const std::string &filename, std::vector<Allocation> &inputs)
90 NNPR_ENSURE_STATUS(nnfw_input_size(session_, &num_inputs));
93 // Turn off the automatic error printing.
94 H5::Exception::dontPrint();
96 H5::H5File file(filename, H5F_ACC_RDONLY);
97 H5::Group value_group = file.openGroup(h5_value_grpname);
98 for (uint32_t i = 0; i < num_inputs; ++i)
101 NNPR_ENSURE_STATUS(nnfw_input_tensorinfo(session_, i, &ti));
103 // TODO Add Assert(nnfw shape, h5 file shape size)
105 // allocate memory for data
106 auto bufsz = bufsize_for(&ti);
107 inputs[i].alloc(bufsz);
109 H5::DataSet data_set = value_group.openDataSet(std::to_string(i));
110 H5::DataType type = data_set.getDataType();
113 case NNFW_TYPE_TENSOR_FLOAT32:
114 if (type == H5::PredType::IEEE_F32BE || type == H5::PredType::IEEE_F32LE)
115 data_set.read(inputs[i].data(), H5::PredType::NATIVE_FLOAT);
117 throw std::runtime_error("model input type is f32. But h5 data type is different.");
119 case NNFW_TYPE_TENSOR_INT32:
120 if (type == H5::PredType::STD_I32BE || type == H5::PredType::STD_I32LE)
121 data_set.read(inputs[i].data(), H5::PredType::NATIVE_INT32);
123 throw std::runtime_error("model input type is i32. But h5 data type is different.");
125 case NNFW_TYPE_TENSOR_INT64:
126 if (type == H5::PredType::STD_I64BE || type == H5::PredType::STD_I64LE)
127 data_set.read(inputs[i].data(), H5::PredType::NATIVE_INT64);
129 throw std::runtime_error("model input type is i64. But h5 data type is different.");
131 case NNFW_TYPE_TENSOR_QUANT8_ASYMM:
132 case NNFW_TYPE_TENSOR_BOOL:
133 case NNFW_TYPE_TENSOR_UINT8:
134 if (type == H5::PredType::STD_U8BE || type == H5::PredType::STD_U8LE)
135 data_set.read(inputs[i].data(), H5::PredType::NATIVE_UINT8);
137 throw std::runtime_error(
138 "model input type is qasymm8, bool or uint8. But h5 data type is different.");
140 case NNFW_TYPE_TENSOR_QUANT8_ASYMM_SIGNED:
141 if (type == H5::PredType::STD_I8BE || type == H5::PredType::STD_I8LE)
142 data_set.read(inputs[i].data(), H5::PredType::NATIVE_INT8);
144 throw std::runtime_error("model input type is int8. But h5 data type is different.");
146 case NNFW_TYPE_TENSOR_QUANT16_SYMM_SIGNED:
147 throw std::runtime_error("NYI for NNFW_TYPE_TENSOR_QUANT16_SYMM_SIGNED type");
149 throw std::runtime_error("onert_run can load f32, i32, qasymm8, bool and uint8.");
151 NNPR_ENSURE_STATUS(nnfw_set_input(session_, i, ti.dtype, inputs[i].data(), bufsz));
152 NNPR_ENSURE_STATUS(nnfw_set_input_layout(session_, i, NNFW_LAYOUT_CHANNELS_LAST));
155 catch (const H5::Exception &e)
157 H5::Exception::printErrorStack();
160 catch (const std::exception &e)
162 std::cerr << e.what() << std::endl;
167 void H5Formatter::dumpOutputs(const std::string &filename, std::vector<Allocation> &outputs)
169 uint32_t num_outputs;
170 NNPR_ENSURE_STATUS(nnfw_output_size(session_, &num_outputs));
173 // Turn off the automatic error printing.
174 H5::Exception::dontPrint();
176 H5::H5File file(filename, H5F_ACC_TRUNC);
177 H5::Group value_group = file.createGroup(h5_value_grpname);
178 for (uint32_t i = 0; i < num_outputs; i++)
181 NNPR_ENSURE_STATUS(nnfw_output_tensorinfo(session_, i, &ti));
182 std::vector<hsize_t> dims(ti.rank);
183 for (uint32_t j = 0; j < ti.rank; ++j)
186 dims[j] = static_cast<hsize_t>(ti.dims[j]);
189 std::cerr << "Negative dimension in output tensor" << std::endl;
193 H5::DataSpace data_space(ti.rank, dims.data());
196 case NNFW_TYPE_TENSOR_FLOAT32:
198 H5::DataSet data_set =
199 value_group.createDataSet(std::to_string(i), H5::PredType::IEEE_F32BE, data_space);
200 data_set.write(outputs[i].data(), H5::PredType::NATIVE_FLOAT);
203 case NNFW_TYPE_TENSOR_INT32:
205 H5::DataSet data_set =
206 value_group.createDataSet(std::to_string(i), H5::PredType::STD_I32LE, data_space);
207 data_set.write(outputs[i].data(), H5::PredType::NATIVE_INT32);
210 case NNFW_TYPE_TENSOR_INT64:
212 H5::DataSet data_set =
213 value_group.createDataSet(std::to_string(i), H5::PredType::STD_I64LE, data_space);
214 data_set.write(outputs[i].data(), H5::PredType::NATIVE_INT64);
217 case NNFW_TYPE_TENSOR_UINT8:
218 case NNFW_TYPE_TENSOR_QUANT8_ASYMM:
220 H5::DataSet data_set =
221 value_group.createDataSet(std::to_string(i), H5::PredType::STD_U8BE, data_space);
222 data_set.write(outputs[i].data(), H5::PredType::NATIVE_UINT8);
225 case NNFW_TYPE_TENSOR_BOOL:
227 H5::DataSet data_set =
228 value_group.createDataSet(std::to_string(i), H5::PredType::STD_U8LE, data_space);
229 data_set.write(outputs[i].data(), H5::PredType::NATIVE_INT8);
232 case NNFW_TYPE_TENSOR_QUANT8_ASYMM_SIGNED:
234 H5::DataSet data_set =
235 value_group.createDataSet(std::to_string(i), H5::PredType::STD_I8LE, data_space);
236 data_set.write(outputs[i].data(), H5::PredType::NATIVE_INT8);
239 case NNFW_TYPE_TENSOR_QUANT16_SYMM_SIGNED:
240 throw std::runtime_error("NYI for NNFW_TYPE_TENSOR_QUANT16_SYMM_SIGNED type");
242 throw std::runtime_error("onert_run can dump f32, i32, qasymm8, bool and uint8.");
246 catch (const H5::Exception &e)
248 H5::Exception::printErrorStack();
251 catch (const std::runtime_error &e)
253 std::cerr << "Error during dumpOutputs on onert_run : " << e.what() << std::endl;
258 } // end of namespace onert_train