From: Vladimir Plazun/AI Tools Lab /SRR/Engineer/삼성전자 Date: Wed, 28 Nov 2018 14:14:24 +0000 (+0300) Subject: [nnc] Remove redundant interpreter options (#2425) X-Git-Tag: nncc_backup~1243 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=709431125b47ef9fa826b9a99609f86581ad0cfd;p=platform%2Fcore%2Fml%2Fnnfw.git [nnc] Remove redundant interpreter options (#2425) - Remove cli options for input/output node, which are used only in interpreter - fix code style in interpreter_pass.cpp Signed-off-by: Vladimir Plazun --- diff --git a/contrib/nnc/driver/Options.cpp b/contrib/nnc/driver/Options.cpp index 9712223..ba6ea9e 100644 --- a/contrib/nnc/driver/Options.cpp +++ b/contrib/nnc/driver/Options.cpp @@ -162,14 +162,6 @@ Option interInputData(optname("--input-model-data"), optional(true), optvalues(""), checkInFile); -Option interInNode(optname("--input-node"), - overview("interpreter option: set input node in Computational Graph"), - std::string(), - optional(true)); -Option> interOutNode(optname("--output-node"), - overview("interpreter option: set output node in Computational Graph"), - std::vector{}, - optional(true)); } // namespace cli } // namespace nnc diff --git a/contrib/nnc/include/option/Options.h b/contrib/nnc/include/option/Options.h index 79ab690..9a605ff 100644 --- a/contrib/nnc/include/option/Options.h +++ b/contrib/nnc/include/option/Options.h @@ -57,9 +57,6 @@ extern Option artifactName; // name of artifact * Options for interpreter */ extern Option interInputData; // input data for model -extern Option interInNode; // name of input node in computational graph -extern Option> interOutNode; // name of output nodes in computational graph - } // namespace cli } // namespace nnc diff --git a/contrib/nnc/passes/interpreter/interpreter_pass.cpp b/contrib/nnc/passes/interpreter/interpreter_pass.cpp index 43a2aa8..374e261 100644 --- a/contrib/nnc/passes/interpreter/interpreter_pass.cpp +++ b/contrib/nnc/passes/interpreter/interpreter_pass.cpp @@ -43,139 +43,141 @@ #include "core/modelIR/Tensor.h" -namespace nnc -{ +namespace nnc { using namespace mir; #ifdef NNC_HDF5_SUPPORTED + /** * @brief save tensor in file in '.hdf5' format * @param tensor - tensor to save - * @param tensorName - name, by wich tensor will be saved - * @param fileName - path to file, in which tensor will be saved + * @param tensor_name - name, by wich tensor will be saved + * @param destination - path to file, in which tensor will be saved */ -static void writeTensorToHDF5File(TensorVariant *tensor, std::string tensorName, std::string destPath) -{ +static void writeTensorToHDF5File(TensorVariant* tensor, + std::string tensor_name, + const std::string& destination) { + // Prepare shape, rank, dims, numElems - Shape shape = tensor->getShape(); + auto& shape = tensor->getShape(); const int32_t rank = shape.rank(); hsize_t dims[rank]; - size_t numElems = 1; for (int32_t axis = 0; axis < rank; ++axis) { dims[axis] = shape.dim(axis); - numElems *= shape.dim(axis); } // Create float array from tensor - float *tensorValues = new float[numElems]; - ShapeRange outRange(shape); - Tensor tensorAccessor(*tensor); - int i = 0; - for (auto outIdx : outRange) - tensorValues[i++] = tensorAccessor.at(outIdx); + std::vector values; + values.reserve(shape.numElements()); + ShapeRange out_range(shape); + Tensor tensor_accessor(*tensor); + + for (auto& out_idx : out_range) + values.push_back(tensor_accessor.at(out_idx)); // Backslashes are not allowed in tensor names - std::replace(tensorName.begin(), tensorName.end(), '/', '_'); - std::string fileName = destPath + "/" + tensorName + ".hdf5"; + std::replace(tensor_name.begin(), tensor_name.end(), '/', '_'); + std::string filename = destination + "/" + tensor_name + ".hdf5"; // Write to .hdf5 file - H5::H5File h5File(fileName, H5F_ACC_TRUNC); + H5::H5File h5File(filename, H5F_ACC_TRUNC); H5::DataSpace dataspace(rank, dims); - auto dataset = h5File.createDataSet(tensorName, H5::PredType::IEEE_F32BE, dataspace); - dataset.write(tensorValues, H5::PredType::NATIVE_FLOAT); - - delete[] tensorValues; + auto dataset = h5File.createDataSet(tensor_name, H5::PredType::IEEE_F32BE, dataspace); + dataset.write(values.data(), H5::PredType::NATIVE_FLOAT); } + #endif // NNC_HDF5_SUPPORTED -PassData InterpreterPass::run(PassData data) -{ - auto g = static_cast(data); +PassData InterpreterPass::run(PassData data) { + auto g = static_cast(data); assert(g); - ShapeInference shapeInference; + ShapeInference shape_inference; NNInterpreter interpreter; - g->accept(&shapeInference); + g->accept(&shape_inference); // Check ops - auto inputOp = g->getInput(cli::interInNode); - if (inputOp == nullptr) { - throw PassException("input node <" + cli::interInNode +"> not found" ); - } + const auto& inputs = g->collectInputs(); + assert(inputs.size() == 1 && "Interpreter doesn't support networks with multiple input nodes"); - - auto input = loadInput(inputOp->getOutputShape(0)); - interpreter.setInput(cli::interInNode, input); + auto input_node = inputs[0]; + auto input_data = loadInput(input_node->getOutputShape(0)); + interpreter.setInput(input_node->getName(), input_data); g->accept(&interpreter); // Check nodes - for (auto &tensorName : cli::interOutNode) { - auto outputNode = interpreter.getOperationResult(tensorName); + const auto& outputs = g->collectOutputs(); + + for (auto& out : outputs) { + auto outputNode = interpreter.getResult(out); if (outputNode.empty()) { - throw PassException("output node <" + tensorName + "> not found"); + throw PassException("No value for output node <" + out->getName() + ">"); } else { - std::cout << "OutNode <" + tensorName + "> found" << std::endl; + std::cout << "Output node <" + out->getName() + "> found" << std::endl; } } - - bool is_several_outs = (cli::interOutNode.size() > 1); - nnc::mir::TensorVariant *out = nullptr; - for (auto &tensorName : cli::interOutNode) { - out = new TensorVariant(interpreter.getOperationResult(tensorName)[0]); + bool is_several_outs = (outputs.size() > 1); + + nnc::mir::TensorVariant* out_data = nullptr; + for (auto& out_node : outputs) { + out_data = new TensorVariant(interpreter.getResult(out_node)[0]); #ifdef NNC_HDF5_SUPPORTED - writeTensorToHDF5File(out, tensorName, cli::artifactDir); + writeTensorToHDF5File(out_data, out_node->getName(), cli::artifactDir); #else - std::cout << "Result <" << tensorName << "> wasn't saved, due to lack of HDF5" << std::endl; + std::cout << "Result <" << out_node->getName() + << "> wasn't saved, due to lack of HDF5" << std::endl; + #endif // NNC_HDF5_SUPPORTED - if ( is_several_outs ) - delete out; + if (is_several_outs) + delete out_data; } - _out = is_several_outs ? nullptr : out; + _out = is_several_outs ? nullptr : out_data; return _out; } -TensorVariant InterpreterPass::loadInput(const Shape &shape) -{ +TensorVariant InterpreterPass::loadInput(const Shape& shape) { auto f = fopen(cli::interInputData.c_str(), "rb"); assert(f && "Cannot open file"); int is_error = fseek(f, 0L, SEEK_END); assert(!is_error); - auto len = static_cast(ftell(f)); + auto len = static_cast(ftell(f)); assert(len != -1); - auto tensorSize = static_cast(shape.numElements() * sizeof(float)); + auto data_size = static_cast(shape.numElements() * sizeof(float)); // Check size - if (len != tensorSize) { + if (len != data_size) { std::stringstream info; - info << "Wrong input file size <" << cli::interInputData << "> = " << len << ". Should be :" << tensorSize; + info << "Wrong input file size <" << cli::interInputData << "> = " + << len << ". Should be :" << data_size; + throw PassException(info.str()); } rewind(f); - auto data = new char[len]; - auto rlen = fread(data, len, 1, f); + auto data = new float[shape.numElements()]; + auto rlen = fread(data, data_size, 1, f); assert(rlen == 1); - (void)rlen; + (void) rlen; + is_error = fclose(f); - assert(is_error != EOF && "Can not close file!"); - (void)is_error; + assert(is_error != EOF && "Can not close file!"); + (void) is_error; - return TensorVariant(shape, std::shared_ptr(data, std::default_delete()), - DTYPE::FLOAT32, sizeof(float)); + std::shared_ptr buffer_sp(data, std::default_delete()); + return TensorVariant(shape, buffer_sp, DTYPE::FLOAT32); } -InterpreterPass::~InterpreterPass() -{ +InterpreterPass::~InterpreterPass() { delete _out; }