#include "core/modelIR/Tensor.h"
-namespace nnc
-{
+namespace nnc {
using namespace mir;
#ifdef NNC_HDF5_SUPPORTED
+
/**
* @brief save tensor in file in '.hdf5' format
* @param tensor - tensor to save
- * @param tensorName - name, by wich tensor will be saved
- * @param fileName - path to file, in which tensor will be saved
+ * @param tensor_name - name, by wich tensor will be saved
+ * @param destination - path to file, in which tensor will be saved
*/
-static void writeTensorToHDF5File(TensorVariant *tensor, std::string tensorName, std::string destPath)
-{
+static void writeTensorToHDF5File(TensorVariant* tensor,
+ std::string tensor_name,
+ const std::string& destination) {
+
// Prepare shape, rank, dims, numElems
- Shape shape = tensor->getShape();
+ auto& shape = tensor->getShape();
const int32_t rank = shape.rank();
hsize_t dims[rank];
- size_t numElems = 1;
for (int32_t axis = 0; axis < rank; ++axis) {
dims[axis] = shape.dim(axis);
- numElems *= shape.dim(axis);
}
// Create float array from tensor
- float *tensorValues = new float[numElems];
- ShapeRange outRange(shape);
- Tensor<float> tensorAccessor(*tensor);
- int i = 0;
- for (auto outIdx : outRange)
- tensorValues[i++] = tensorAccessor.at(outIdx);
+ std::vector<float> values;
+ values.reserve(shape.numElements());
+ ShapeRange out_range(shape);
+ Tensor<float> tensor_accessor(*tensor);
+
+ for (auto& out_idx : out_range)
+ values.push_back(tensor_accessor.at(out_idx));
// Backslashes are not allowed in tensor names
- std::replace(tensorName.begin(), tensorName.end(), '/', '_');
- std::string fileName = destPath + "/" + tensorName + ".hdf5";
+ std::replace(tensor_name.begin(), tensor_name.end(), '/', '_');
+ std::string filename = destination + "/" + tensor_name + ".hdf5";
// Write to .hdf5 file
- H5::H5File h5File(fileName, H5F_ACC_TRUNC);
+ H5::H5File h5File(filename, H5F_ACC_TRUNC);
H5::DataSpace dataspace(rank, dims);
- auto dataset = h5File.createDataSet(tensorName, H5::PredType::IEEE_F32BE, dataspace);
- dataset.write(tensorValues, H5::PredType::NATIVE_FLOAT);
-
- delete[] tensorValues;
+ auto dataset = h5File.createDataSet(tensor_name, H5::PredType::IEEE_F32BE, dataspace);
+ dataset.write(values.data(), H5::PredType::NATIVE_FLOAT);
}
+
#endif // NNC_HDF5_SUPPORTED
-PassData InterpreterPass::run(PassData data)
-{
- auto g = static_cast<Graph *>(data);
+PassData InterpreterPass::run(PassData data) {
+ auto g = static_cast<Graph*>(data);
assert(g);
- ShapeInference shapeInference;
+ ShapeInference shape_inference;
NNInterpreter interpreter;
- g->accept(&shapeInference);
+ g->accept(&shape_inference);
// Check ops
- auto inputOp = g->getInput(cli::interInNode);
- if (inputOp == nullptr) {
- throw PassException("input node <" + cli::interInNode +"> not found" );
- }
+ const auto& inputs = g->collectInputs();
+ assert(inputs.size() == 1 && "Interpreter doesn't support networks with multiple input nodes");
-
- auto input = loadInput(inputOp->getOutputShape(0));
- interpreter.setInput(cli::interInNode, input);
+ auto input_node = inputs[0];
+ auto input_data = loadInput(input_node->getOutputShape(0));
+ interpreter.setInput(input_node->getName(), input_data);
g->accept(&interpreter);
// Check nodes
- for (auto &tensorName : cli::interOutNode) {
- auto outputNode = interpreter.getOperationResult(tensorName);
+ const auto& outputs = g->collectOutputs();
+
+ for (auto& out : outputs) {
+ auto outputNode = interpreter.getResult(out);
if (outputNode.empty()) {
- throw PassException("output node <" + tensorName + "> not found");
+ throw PassException("No value for output node <" + out->getName() + ">");
} else {
- std::cout << "OutNode <" + tensorName + "> found" << std::endl;
+ std::cout << "Output node <" + out->getName() + "> found" << std::endl;
}
}
-
- bool is_several_outs = (cli::interOutNode.size() > 1);
- nnc::mir::TensorVariant *out = nullptr;
- for (auto &tensorName : cli::interOutNode) {
- out = new TensorVariant(interpreter.getOperationResult(tensorName)[0]);
+ bool is_several_outs = (outputs.size() > 1);
+
+ nnc::mir::TensorVariant* out_data = nullptr;
+ for (auto& out_node : outputs) {
+ out_data = new TensorVariant(interpreter.getResult(out_node)[0]);
#ifdef NNC_HDF5_SUPPORTED
- writeTensorToHDF5File(out, tensorName, cli::artifactDir);
+ writeTensorToHDF5File(out_data, out_node->getName(), cli::artifactDir);
#else
- std::cout << "Result <" << tensorName << "> wasn't saved, due to lack of HDF5" << std::endl;
+ std::cout << "Result <" << out_node->getName()
+ << "> wasn't saved, due to lack of HDF5" << std::endl;
+
#endif // NNC_HDF5_SUPPORTED
- if ( is_several_outs )
- delete out;
+ if (is_several_outs)
+ delete out_data;
}
- _out = is_several_outs ? nullptr : out;
+ _out = is_several_outs ? nullptr : out_data;
return _out;
}
-TensorVariant InterpreterPass::loadInput(const Shape &shape)
-{
+TensorVariant InterpreterPass::loadInput(const Shape& shape) {
auto f = fopen(cli::interInputData.c_str(), "rb");
assert(f && "Cannot open file");
int is_error = fseek(f, 0L, SEEK_END);
assert(!is_error);
- auto len = static_cast<int64_t>(ftell(f));
+ auto len = static_cast<size_t>(ftell(f));
assert(len != -1);
- auto tensorSize = static_cast<int64_t>(shape.numElements() * sizeof(float));
+ auto data_size = static_cast<size_t>(shape.numElements() * sizeof(float));
// Check size
- if (len != tensorSize) {
+ if (len != data_size) {
std::stringstream info;
- info << "Wrong input file size <" << cli::interInputData << "> = " << len << ". Should be :" << tensorSize;
+ info << "Wrong input file size <" << cli::interInputData << "> = "
+ << len << ". Should be :" << data_size;
+
throw PassException(info.str());
}
rewind(f);
- auto data = new char[len];
- auto rlen = fread(data, len, 1, f);
+ auto data = new float[shape.numElements()];
+ auto rlen = fread(data, data_size, 1, f);
assert(rlen == 1);
- (void)rlen;
+ (void) rlen;
+
is_error = fclose(f);
- assert(is_error != EOF && "Can not close file!");
- (void)is_error;
+ assert(is_error != EOF && "Can not close file!");
+ (void) is_error;
- return TensorVariant(shape, std::shared_ptr<char>(data, std::default_delete<char[]>()),
- DTYPE::FLOAT32, sizeof(float));
+ std::shared_ptr<float> buffer_sp(data, std::default_delete<float[]>());
+ return TensorVariant(shape, buffer_sp, DTYPE::FLOAT32);
}
-InterpreterPass::~InterpreterPass()
-{
+InterpreterPass::~InterpreterPass() {
delete _out;
}