From: jijoong.moon Date: Fri, 20 Nov 2020 00:58:43 +0000 (+0900) Subject: [ NNSTREAMER FILTER ] Fix nnstreamer filter to support graph X-Git-Tag: submit/tizen/20201127.015104~4 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=c3bc3f4ef242a4df00e2551e78f314f61d8c346c;p=platform%2Fcore%2Fml%2Fnntrainer.git [ NNSTREAMER FILTER ] Fix nnstreamer filter to support graph Describe a commit content (Until 80 colums per line) in detail ASAP. **Changes proposed in this PR:** - Added TOC generator for README.md Resolves: **Self evaluation:** 1. Build test: [X]Passed [ ]Failed [ ]Skipped 2. Run test: [X]Passed [ ]Failed [ ]Skipped Signed-off-by: jijoong.moon --- diff --git a/nnstreamer/tensor_filter/tensor_filter_nntrainer.cc b/nnstreamer/tensor_filter/tensor_filter_nntrainer.cc index 9eaf94ff..8db29e7f 100644 --- a/nnstreamer/tensor_filter/tensor_filter_nntrainer.cc +++ b/nnstreamer/tensor_filter/tensor_filter_nntrainer.cc @@ -91,7 +91,7 @@ NNTrainer::NNTrainer(const char *model_config_, validateTensor(&prop->input_meta, true); validateTensor(&prop->output_meta, false); - model->init(); + model->initialize(); model->readModel(); gst_tensors_info_copy(&inputTensorMeta, &prop->input_meta); @@ -147,6 +147,7 @@ void NNTrainer::loadModel() { model = new nntrainer::NeuralNetwork(); model->loadFromConfig(model_config); + model->compile(); #if (DBG) gint64 stop_time = g_get_real_time(); g_message("Model is loaded: %" G_GINT64_FORMAT, (stop_time - start_time)); diff --git a/nntrainer/layers/layer_internal.h b/nntrainer/layers/layer_internal.h index 9579e028..47517f82 100644 --- a/nntrainer/layers/layer_internal.h +++ b/nntrainer/layers/layer_internal.h @@ -114,7 +114,7 @@ public: virtual void forwarding(sharedConstTensors in = {}) = 0; virtual sharedConstTensors forwarding_with_val(sharedConstTensors input, - sharedConstTensors in = {}); + sharedConstTensors in = {}); /** * @brief Back Propagation of a layer @@ -124,9 +124,9 @@ public: */ virtual void backwarding(int iteration, sharedConstTensors in = {}) = 0; - virtual sharedConstTensors backwarding_with_val(int iteration, - sharedConstTensors deriv, - sharedConstTensors in = {}); + virtual sharedConstTensors backwarding_with_val(int iteration, + sharedConstTensors deriv, + sharedConstTensors in = {}); /** * @brief read layer Weight & Bias data from file @@ -315,12 +315,12 @@ public: std::vector getGradient(); - void resizeNetInput(unsigned int size){net_input.resize(size);} - - void resizeNetOutput(unsigned int size){net_hidden.resize(size);} + void resizeNetInput(unsigned int size) { net_input.resize(size); } - unsigned int getNumInputs(){return num_inputs;} - unsigned int getNumOutputs(){return num_outputs;} + void resizeNetOutput(unsigned int size) { net_hidden.resize(size); } + + unsigned int getNumInputs() { return num_inputs; } + unsigned int getNumOutputs() { return num_outputs; } void setInputBuffer(unsigned int i, std::shared_ptr n_buffer) { if (i >= net_input.size())