From: Jihoon Lee Date: Fri, 12 Mar 2021 12:53:40 +0000 (+0900) Subject: [Filter] Refactor filter to be more robust X-Git-Tag: accepted/tizen/unified/20210318.063524^0 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=83220053eb997d181ed0756c1c687b0c75604248;p=platform%2Fcore%2Fml%2Fnntrainer.git [Filter] Refactor filter to be more robust **Changes Proposed in this PR aim for** 1. nntrainer filter no longer requires dimensions specified 1. nntrainer filter now adapts to the incoming batchsize(required exposing neuralnet::setBatchSize) 1. nntrainer filter now do not copy incoming input to inference from the filter side 1. nntrainer filter adapts to the multiple input, multiple output **Major Changes** `getInputDim`, `getOutTensorDim` is replaced to `setInputDim` nntrainer->run now recognizes more than 1 input, 1 output **Self evaluation:** 1. Build test: [X]Passed [ ]Failed [ ]Skipped 2. Run test: [X]Passed [ ]Failed [ ]Skipped Signed-off-by: Jihoon Lee --- diff --git a/nnstreamer/tensor_filter/tensor_filter_nntrainer.cc b/nnstreamer/tensor_filter/tensor_filter_nntrainer.cc index b640992..fa34cd0 100644 --- a/nnstreamer/tensor_filter/tensor_filter_nntrainer.cc +++ b/nnstreamer/tensor_filter/tensor_filter_nntrainer.cc @@ -22,6 +22,7 @@ #include #include +#include #include "tensor_filter_nntrainer.hh" @@ -45,77 +46,41 @@ static const gchar *nntrainer_accl_support[] = {NULL}; void init_filter_nntrainer(void) __attribute__((constructor)); void fini_filter_nntrainer(void) __attribute__((destructor)); -NNTrainerInference::NNTrainerInference(const char *model_config_, - const GstTensorFilterProperties *prop) { - gst_tensors_info_init(&inputTensorMeta); - gst_tensors_info_init(&outputTensorMeta); +static std::unique_ptr +to_nnst_tensor_dim(const nntrainer::TensorDim &dim) { + auto info = std::unique_ptr(g_new(GstTensorInfo, 1)); + gst_tensor_info_init(info.get()); - model_config = g_strdup(model_config_); - loadModel(); - model->compile(); - model->initialize(); - - validateTensor(&prop->input_meta, true); - validateTensor(&prop->output_meta, false); - - model->readModel(); - - gst_tensors_info_copy(&inputTensorMeta, &prop->input_meta); - gst_tensors_info_copy(&outputTensorMeta, &prop->output_meta); -} - -NNTrainerInference::~NNTrainerInference() { - if (model != nullptr) { - delete model; + info->type = _NNS_FLOAT32; + for (unsigned int i = 0; i < NUM_DIM; ++i) { + info->dimension[i] = dim.getTensorDim(NUM_DIM - i - 1); } - gst_tensors_info_free(&inputTensorMeta); - gst_tensors_info_free(&outputTensorMeta); - g_free(model_config); + return info; } -const char *NNTrainerInference::getModelConfig() { return model_config; } - -void NNTrainerInference::validateTensor(const GstTensorsInfo *tensorInfo, - bool is_input) { - - nntrainer::TensorDim dim; - nntrainer_tensor_info_s info_s; - - if (is_input) - dim = model->getInputDimension()[0]; - else - dim = model->getOutputDimension()[0]; - - if (tensorInfo->info[0].type != _NNS_FLOAT32) - throw std::invalid_argument( - "only float32 is supported for input and output"); - - info_s.rank = NUM_DIM; - - info_s.dims.push_back(tensorInfo->info[0].dimension[NUM_DIM - 1]); - - // Here, we only compare channel, height, width. - // Just use batch from info variable, cause it will be updated if it differs. - for (unsigned int i = 1; i < NUM_DIM; ++i) { - if (tensorInfo->info[0].dimension[i - 1] != dim.getDim()[NUM_DIM - i]) - throw std::invalid_argument("Tensor dimension doesn't match"); +static nntrainer::TensorDim to_nntr_tensor_dim(const GstTensorInfo *info) { + const tensor_dim &d = info->dimension; + return {d[3], d[2], d[1], d[0]}; +} - info_s.dims.push_back(dim.getDim()[i]); - } +NNTrainerInference::NNTrainerInference(const std::string &model_config_) : + model_config(model_config_) { + loadModel(); + model->compile(); + model->initialize(); + model->readModel(); +} - if (is_input) - input_tensor_info.push_back(info_s); +const char *NNTrainerInference::getModelConfig() { + return model_config.c_str(); } void NNTrainerInference::loadModel() { #if (DBG) gint64 start_time = g_get_real_time(); #endif - if (model_config == nullptr) - throw std::invalid_argument("model config is null!"); - - model = new nntrainer::NeuralNetwork(); + model = std::make_unique(); model->loadFromConfig(model_config); #if (DBG) gint64 stop_time = g_get_real_time(); @@ -123,16 +88,6 @@ void NNTrainerInference::loadModel() { #endif } -int NNTrainerInference::getInputTensorDim(GstTensorsInfo *info) { - gst_tensors_info_copy(info, &inputTensorMeta); - return 0; -} - -int NNTrainerInference::getOutputTensorDim(GstTensorsInfo *info) { - gst_tensors_info_copy(info, &outputTensorMeta); - return 0; -} - int NNTrainerInference::run(const GstTensorMemory *input, GstTensorMemory *output) { #if (DBG) @@ -140,15 +95,24 @@ int NNTrainerInference::run(const GstTensorMemory *input, #endif std::shared_ptr out; - std::vector d = input_tensor_info[0].dims; - nntrainer::Tensor X = - nntrainer::Tensor(nntrainer::TensorDim(d[0], d[1], d[2], d[3]), - static_cast(input[0].data)); + auto input_dims = getInputDimension(); + nntrainer::sharedConstTensors inputs; + inputs.reserve(input_dims.size()); + + const GstTensorMemory *input_mem = input; + unsigned int offset = 0; + for (auto &id : input_dims) { + // do not allocate new, but instead use tensor::Map + inputs.emplace_back(MAKE_SHARED_TENSOR(nntrainer::Tensor::Map( + static_cast(input_mem->data), input_mem->size, id, offset))); + input_mem++; + offset += input_mem->size; + } - std::shared_ptr o; + nntrainer::sharedConstTensors outputs; try { - o = model->inference({MAKE_SHARED_TENSOR(X)}, false)[0]; + outputs = model->inference(inputs, false); } catch (std::exception &e) { ml_loge("%s %s", typeid(e).name(), e.what()); return -2; @@ -157,14 +121,18 @@ int NNTrainerInference::run(const GstTensorMemory *input, return -3; } - if (o == nullptr) { - return -1; - } + GstTensorMemory *output_mem = output; + for (auto &o : outputs) { + if (o == nullptr) { + return -1; + } - out = std::const_pointer_cast(o); - output[0].data = out->getData(); + out = std::const_pointer_cast(o); + output_mem->data = out->getData(); - outputTensorMap.insert(std::make_pair(output[0].data, out)); + outputTensorMap.insert(std::make_pair(output_mem->data, out)); + output_mem++; + } #if (DBG) gint64 stop_time = g_get_real_time(); @@ -210,7 +178,7 @@ static int nntrainer_loadModelFile(const GstTensorFilterProperties *prop, } try { - nntrainer = new NNTrainerInference(model_file, prop); + nntrainer = new NNTrainerInference(model_file); } catch (std::exception &e) { ml_loge("%s %s", typeid(e).name(), e.what()); return -1; @@ -239,20 +207,45 @@ static int nntrainer_run(const GstTensorFilterProperties *prop, return nntrainer->run(input, output); } -static int nntrainer_getInputDim(const GstTensorFilterProperties *prop, - void **private_data, GstTensorsInfo *info) { +static int nntrainer_setInputDim(const GstTensorFilterProperties *prop, + void **private_data, + const GstTensorsInfo *in_info, + GstTensorsInfo *out_info) { NNTrainerInference *nntrainer = static_cast(*private_data); - g_return_val_if_fail(nntrainer && info, -EINVAL); - return nntrainer->getInputTensorDim(info); -} + g_return_val_if_fail(prop && nntrainer && in_info && out_info, -EINVAL); + + auto num_input = in_info->num_tensors; + g_return_val_if_fail(num_input != 0, -EINVAL); + + /// this does not allocate the memory for the inference, so setting batch here + /// does not have a large effect on the first inference call as of now. + /// we can make a call to nntrainer->allocate(); + /// which would wrap around NeuralNetwork::allocate(false); + /// However, it might not be a good choice in therms of migrating to api. + nntrainer->setBatchSize(in_info->info[0].dimension[3]); + + auto model_inputs = nntrainer->getInputDimension(); + /// check number of in + g_return_val_if_fail(num_input == model_inputs.size(), -EINVAL); + + /// check each in dimension matches + for (unsigned int i = 0; i < num_input; ++i) { + g_return_val_if_fail(in_info->info[i].type == _NNS_FLOAT32, -EINVAL); + g_return_val_if_fail( + model_inputs[i] == to_nntr_tensor_dim(in_info->info + i), -EINVAL); + } -static int nntrainer_getOutputDim(const GstTensorFilterProperties *prop, - void **private_data, GstTensorsInfo *info) { - NNTrainerInference *nntrainer = - static_cast(*private_data); - g_return_val_if_fail(nntrainer && info, -EINVAL); - return nntrainer->getOutputTensorDim(info); + auto model_outputs = nntrainer->getOutputDimension(); + g_return_val_if_fail(!model_outputs.empty(), -EINVAL); + /// set gstTensorInfo + out_info->num_tensors = model_outputs.size(); + for (unsigned int i = 0; i < out_info->num_tensors; ++i) { + gst_tensor_info_copy(out_info->info + i, + to_nnst_tensor_dim(model_outputs[i]).get()); + } + + return 0; } static void nntrainer_destroyNotify(void **private_data, void *data) { @@ -285,10 +278,11 @@ void init_filter_nntrainer(void) { NNS_support_nntrainer.run_without_model = FALSE; NNS_support_nntrainer.verify_model_path = FALSE; NNS_support_nntrainer.invoke_NN = nntrainer_run; - NNS_support_nntrainer.getInputDimension = nntrainer_getInputDim; - NNS_support_nntrainer.getOutputDimension = nntrainer_getOutputDim; + NNS_support_nntrainer.setInputDimension = nntrainer_setInputDim; NNS_support_nntrainer.destroyNotify = nntrainer_destroyNotify; NNS_support_nntrainer.checkAvailability = nntrainer_checkAvailability; + NNS_support_nntrainer.getInputDimension = NULL; + NNS_support_nntrainer.getOutputDimension = NULL; nnstreamer_filter_probe(&NNS_support_nntrainer); } diff --git a/nnstreamer/tensor_filter/tensor_filter_nntrainer.hh b/nnstreamer/tensor_filter/tensor_filter_nntrainer.hh index 8e9ed18..3948a2d 100644 --- a/nnstreamer/tensor_filter/tensor_filter_nntrainer.hh +++ b/nnstreamer/tensor_filter/tensor_filter_nntrainer.hh @@ -41,16 +41,14 @@ public: * @brief Construct a new NNTrainerInference object * * @param model_config_ config address - * @param prop tensor filter property */ - NNTrainerInference(const char *model_config_, - const GstTensorFilterProperties *prop); + NNTrainerInference(const std::string &model_config); /** * @brief Destroy the NNTrainerInference object * */ - ~NNTrainerInference(); + ~NNTrainerInference() = default; /** * @brief Get the Model Config object @@ -60,23 +58,36 @@ public: const char *getModelConfig(); /** - * @brief Get the Input Tensor Dim object + * @brief Set the Batch Size * - * @param[out] info copied tensor info, free after use - * @return int 0 if success + * @param batch batch size */ - int getInputTensorDim(GstTensorsInfo *info); + void setBatchSize(unsigned int batch) { + std::stringstream ss; + ss << "batch_size=" << batch; + model->setProperty({ss.str()}); + } /** - * @brief Get the Output Tensor Dim object + * @brief Get the Input Dimension object * - * @param info copied tensor info, free after use - * @return int 0 if success + * @return const std::vector input dimensions + */ + const std::vector getInputDimension() { + return model->getInputDimension(); + } + + /** + * @brief Get the Output Dimension object + * + * @return const std::vector output dimensions */ - int getOutputTensorDim(GstTensorsInfo *info); + const std::vector getOutputDimension() { + return model->getOutputDimension(); + } /** - * @brief run inference + * @brief run inference, output * * @param input input tensor memory * @param output output tensor memory @@ -93,14 +104,21 @@ public: private: void loadModel(); - void validateTensor(const GstTensorsInfo *tensorInfo, bool is_input); - - char *model_config; - nntrainer::NeuralNetwork *model; - - GstTensorsInfo inputTensorMeta; - GstTensorsInfo outputTensorMeta; - std::vector input_tensor_info; + std::string model_config; + ///@todo change this to ccapi + /// required method + /// model->loadFromConfig (available) + /// model->setProperty (available) + /// model->compile (available) + /// model->initialize (available) + /// model->readModel (available) + /// model->inference (n/a) + /// model->getInputDimension (n/a) + /// model->getOutputDimension (n/a) + /// possibly required for optimization + /// model->forwarding (n/a) + /// model->allocate (n/a) + std::unique_ptr model; std::map> outputTensorMap; }; diff --git a/nntrainer/graph/network_graph.cpp b/nntrainer/graph/network_graph.cpp index 7d06893..400a73d 100644 --- a/nntrainer/graph/network_graph.cpp +++ b/nntrainer/graph/network_graph.cpp @@ -539,10 +539,16 @@ sharedConstTensors NetworkGraph::forwarding(bool training) { } std::vector NetworkGraph::getInputDimension() { + /// @todo add check if the graph is compiled + NNTR_THROW_IF(num_node == 0, std::invalid_argument) + << "[NetworkGraph] the graph has no node!"; return Sorted[0].layer->getInputDimension(); } std::vector NetworkGraph::getOutputDimension() { + /// @todo add check if the graph is compiled + NNTR_THROW_IF(num_node == 0, std::invalid_argument) + << "[NetworkGraph] the graph has no node!"; return Sorted.back().layer->getOutputDimension(); } diff --git a/test/nnstreamer/runTest.sh b/test/nnstreamer/runTest.sh index 70fd062..6e0d222 100755 --- a/test/nnstreamer/runTest.sh +++ b/test/nnstreamer/runTest.sh @@ -79,10 +79,15 @@ gstTest "--gst-plugin-path=${PATH_TO_PLUGIN} filesrc location=${PATH_TO_DATA} ! python checkLabel.py nntrainer.out.1.log 2 testResult $? 1 "Golden test comparison" 0 1 +# no input, output +gstTest "--gst-plugin-path=${PATH_TO_PLUGIN} filesrc location=${PATH_TO_DATA} ! pngdec ! tensor_converter ! other/tensor,dimension=1:28:28:1 ! tensor_transform mode=transpose option=1:2:0:3 ! tensor_transform mode=typecast option=float32 ! tensor_filter framework=nntrainer model=${PATH_TO_CONFIG} ! filesink location=nntrainer.out.2.log" 2 0 0 $PERFORMANCE +python checkLabel.py nntrainer.out.2.log 2 +testResult $? 2 "Golden test comparison" 0 1 + PATH_TO_CONFIG="../test_models/models/mnist.ini" PATH_TO_DATA="../test_models/data/0.png" -gstTest "--gst-plugin-path=${PATH_TO_PLUGIN} filesrc location=${PATH_TO_DATA} ! pngdec ! tensor_converter ! tensor_transform mode=transpose option=1:2:0:3 ! tensor_transform mode=typecast option=float32 ! tensor_filter framework=nntrainer model=${PATH_TO_CONFIG} input=28:28:1:1 inputtype=float32 output=10:1:1:1 outputtype=float32 ! filesink location=nntrainer.out.2.log" 1 0 0 $PERFORMANCE -python checkLabel.py nntrainer.out.2.log 0 -testResult $? 2 "Golden test comparison" 0 1 +gstTest "--gst-plugin-path=${PATH_TO_PLUGIN} filesrc location=${PATH_TO_DATA} ! pngdec ! tensor_converter ! tensor_transform mode=transpose option=1:2:0:3 ! tensor_transform mode=typecast option=float32 ! tensor_filter framework=nntrainer model=${PATH_TO_CONFIG} input=28:28:1:1 inputtype=float32 output=10:1:1:1 outputtype=float32 ! filesink location=nntrainer.out.3.log" 3 0 0 $PERFORMANCE +python checkLabel.py nntrainer.out.3.log 0 +testResult $? 3 "Golden test comparison" 0 1 report