2 * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include "h5formatter.h"
19 #include "nnfw_util.h"
27 static const char *h5_value_grpname = "value";
29 void H5Formatter::loadInputs(const std::string &filename, std::vector<Allocation> &inputs)
32 NNPR_ENSURE_STATUS(nnfw_input_size(session_, &num_inputs));
35 // Turn off the automatic error printing.
36 H5::Exception::dontPrint();
38 H5::H5File file(filename, H5F_ACC_RDONLY);
39 H5::Group value_group = file.openGroup(h5_value_grpname);
40 for (uint32_t i = 0; i < num_inputs; ++i)
43 NNPR_ENSURE_STATUS(nnfw_input_tensorinfo(session_, i, &ti));
44 // allocate memory for data
45 auto bufsz = bufsize_for(&ti);
46 inputs[i].alloc(bufsz);
48 H5::DataSet data_set = value_group.openDataSet(std::to_string(i));
49 H5::DataType type = data_set.getDataType();
52 case NNFW_TYPE_TENSOR_FLOAT32:
53 if (type == H5::PredType::IEEE_F32BE || type == H5::PredType::IEEE_F32LE)
54 data_set.read(inputs[i].data(), H5::PredType::NATIVE_FLOAT);
56 throw std::runtime_error("model input type is f32. But h5 data type is different.");
58 case NNFW_TYPE_TENSOR_INT32:
59 if (type == H5::PredType::STD_I32BE || type == H5::PredType::STD_I32LE)
60 data_set.read(inputs[i].data(), H5::PredType::NATIVE_INT32);
62 throw std::runtime_error("model input type is i32. But h5 data type is different.");
64 case NNFW_TYPE_TENSOR_INT64:
65 if (type == H5::PredType::STD_I64BE || type == H5::PredType::STD_I64LE)
66 data_set.read(inputs[i].data(), H5::PredType::NATIVE_INT64);
68 throw std::runtime_error("model input type is i64. But h5 data type is different.");
70 case NNFW_TYPE_TENSOR_QUANT8_ASYMM:
71 case NNFW_TYPE_TENSOR_BOOL:
72 case NNFW_TYPE_TENSOR_UINT8:
73 if (type == H5::PredType::STD_U8BE || type == H5::PredType::STD_U8LE)
74 data_set.read(inputs[i].data(), H5::PredType::NATIVE_UINT8);
76 throw std::runtime_error(
77 "model input type is qasymm8, bool or uint8. But h5 data type is different.");
80 throw std::runtime_error("nnpkg_run can load f32, i32, qasymm8, bool and uint8.");
82 NNPR_ENSURE_STATUS(nnfw_set_input(session_, i, ti.dtype, inputs[i].data(), bufsz));
83 NNPR_ENSURE_STATUS(nnfw_set_input_layout(session_, i, NNFW_LAYOUT_CHANNELS_LAST));
86 catch (const H5::Exception &e)
88 H5::Exception::printErrorStack();
91 catch (const std::exception &e)
93 std::cerr << e.what() << std::endl;
98 void H5Formatter::dumpOutputs(const std::string &filename, std::vector<Allocation> &outputs)
100 uint32_t num_outputs;
101 NNPR_ENSURE_STATUS(nnfw_output_size(session_, &num_outputs));
104 // Turn off the automatic error printing.
105 H5::Exception::dontPrint();
107 H5::H5File file(filename, H5F_ACC_TRUNC);
108 H5::Group value_group = file.createGroup(h5_value_grpname);
109 for (uint32_t i = 0; i < num_outputs; i++)
112 NNPR_ENSURE_STATUS(nnfw_output_tensorinfo(session_, i, &ti));
113 std::vector<hsize_t> dims(ti.rank);
114 for (uint32_t j = 0; j < ti.rank; ++j)
117 dims[j] = static_cast<hsize_t>(ti.dims[j]);
120 std::cerr << "Negative dimension in output tensor" << std::endl;
124 H5::DataSpace data_space(ti.rank, dims.data());
127 case NNFW_TYPE_TENSOR_FLOAT32:
129 H5::DataSet data_set =
130 value_group.createDataSet(std::to_string(i), H5::PredType::IEEE_F32BE, data_space);
131 data_set.write(outputs[i].data(), H5::PredType::NATIVE_FLOAT);
134 case NNFW_TYPE_TENSOR_INT32:
136 H5::DataSet data_set =
137 value_group.createDataSet(std::to_string(i), H5::PredType::STD_I32LE, data_space);
138 data_set.write(outputs[i].data(), H5::PredType::NATIVE_INT32);
141 case NNFW_TYPE_TENSOR_INT64:
143 H5::DataSet data_set =
144 value_group.createDataSet(std::to_string(i), H5::PredType::STD_I64LE, data_space);
145 data_set.write(outputs[i].data(), H5::PredType::NATIVE_INT64);
148 case NNFW_TYPE_TENSOR_QUANT8_ASYMM:
150 H5::DataSet data_set =
151 value_group.createDataSet(std::to_string(i), H5::PredType::STD_U8BE, data_space);
152 data_set.write(outputs[i].data(), H5::PredType::NATIVE_UINT8);
155 case NNFW_TYPE_TENSOR_BOOL:
157 H5::DataSet data_set =
158 value_group.createDataSet(std::to_string(i), H5::PredType::STD_I8LE, data_space);
159 data_set.write(outputs[i].data(), H5::PredType::NATIVE_INT8);
162 case NNFW_TYPE_TENSOR_UINT8:
164 H5::DataSet data_set =
165 value_group.createDataSet(std::to_string(i), H5::PredType::STD_U8BE, data_space);
166 data_set.write(outputs[i].data(), H5::PredType::NATIVE_UINT8);
170 throw std::runtime_error("nnpkg_run can dump f32, i32, qasymm8, bool and uint8.");
174 catch (const H5::Exception &e)
176 H5::Exception::printErrorStack();
179 catch (const std::runtime_error &e)
181 std::cerr << "Error during dumpOutputs on nnpackage_run : " << e.what() << std::endl;
186 } // end of namespace nnpkg_run