validateTensor(&prop->input_meta, true);
validateTensor(&prop->output_meta, false);
- model->init();
+ model->initialize();
model->readModel();
gst_tensors_info_copy(&inputTensorMeta, &prop->input_meta);
model = new nntrainer::NeuralNetwork();
model->loadFromConfig(model_config);
+ model->compile();
#if (DBG)
gint64 stop_time = g_get_real_time();
g_message("Model is loaded: %" G_GINT64_FORMAT, (stop_time - start_time));
virtual void forwarding(sharedConstTensors in = {}) = 0;
virtual sharedConstTensors forwarding_with_val(sharedConstTensors input,
- sharedConstTensors in = {});
+ sharedConstTensors in = {});
/**
* @brief Back Propagation of a layer
*/
virtual void backwarding(int iteration, sharedConstTensors in = {}) = 0;
- virtual sharedConstTensors backwarding_with_val(int iteration,
- sharedConstTensors deriv,
- sharedConstTensors in = {});
+ virtual sharedConstTensors backwarding_with_val(int iteration,
+ sharedConstTensors deriv,
+ sharedConstTensors in = {});
/**
* @brief read layer Weight & Bias data from file
std::vector<Tensor> getGradient();
- void resizeNetInput(unsigned int size){net_input.resize(size);}
-
- void resizeNetOutput(unsigned int size){net_hidden.resize(size);}
+ void resizeNetInput(unsigned int size) { net_input.resize(size); }
- unsigned int getNumInputs(){return num_inputs;}
- unsigned int getNumOutputs(){return num_outputs;}
+ void resizeNetOutput(unsigned int size) { net_hidden.resize(size); }
+
+ unsigned int getNumInputs() { return num_inputs; }
+ unsigned int getNumOutputs() { return num_outputs; }
void setInputBuffer(unsigned int i, std::shared_ptr<NetBuffers> n_buffer) {
if (i >= net_input.size())