throw std::invalid_argument("Cannot find Layer");
}
+LayerNode &NetworkGraph::getSortedLayerNode(unsigned int ith) {
+
+ for (unsigned int i = 0; i < Sorted.size(); ++i) {
+ if (Sorted[i].index == ith) {
+ return Sorted[i];
+ }
+ }
+
+ throw std::invalid_argument("Cannot find Layer");
+}
+
void NetworkGraph::topologicalSort() {
std::stack<LayerNode> Stack;
std::shared_ptr<Layer> layer = nntrainer::createLayer("concat");
ensureName(layer, current.getName());
layer->num_inputs = current.num_inputs;
+ layer->input_dim.resize(layer->num_inputs);
layer->input_layers.clear();
for (unsigned int i = 0; i < current.input_layers.size(); ++i)
layer->input_layers.push_back(current.input_layers[i]);
layer->input_layers.push_back(current.getName());
layer->num_outputs = current.num_outputs;
+ layer->output_dim.resize(layer->num_outputs);
layer->output_layers.clear();
for (unsigned int i = 0; i < current.num_outputs; ++i)
layer->output_layers.push_back(current.output_layers[i]);
layer->input_layers.clear();
layer->input_layers.push_back(input_str);
+ std::shared_ptr<LossLayer> temp = std::dynamic_pointer_cast<LossLayer>(layer);
+ temp->setLoss(updated_loss_type);
+
addLayerNode(layer);
return ML_ERROR_NONE;
}
}
}
- layers[idx]->num_outputs = count;
+ if (layers[idx]->num_outputs != count) {
+ layers[idx]->num_outputs = count;
+ layers[idx]->output_dim.resize(count);
+ }
std::cout << std::endl;
}
+
+ if (layers.back()->num_outputs == 0) {
+ layers.back()->num_outputs = 1;
+ layers.back()->output_dim.resize(1);
+ layers.back()->output_layers.push_back("exit");
+ }
}
int NetworkGraph::realizeMultiOutputType(
return status;
}
+void NetworkGraph::setNumNetBufferSize() {
+ for (unsigned int i = 0; i < Sorted.size(); ++i) {
+ Sorted[i].input.resize(Sorted[i].layer->input_layers.size());
+ Sorted[i].hidden.resize(Sorted[i].layer->output_layers.size());
+ }
+}
+
LayerNode &NetworkGraph::getLayerNode(const std::string &layer_name) {
std::list<LayerNode>::iterator iter;
throw std::invalid_argument("Cannot find Layer");
}
+LayerNode &NetworkGraph::getSortedLayerNode(const std::string &layer_name) {
+
+ for (unsigned int i = 0; i < Sorted.size(); ++i) {
+ if (Sorted[i].layer->getName() == layer_name)
+ return Sorted[i];
+ }
+
+ throw std::invalid_argument("Cannot find Layer");
+}
+
int NetworkGraph::setEdge() {
int status = ML_ERROR_NONE;
namespace nntrainer {
+struct NetBuffers {
+ Tensor var;
+ Tensor grad;
+};
+
struct LayerNode {
std::shared_ptr<Layer> layer;
unsigned int index;
+ /* TODO : This NetBuffers should be inside of layers*/
+ std::vector<std::shared_ptr<NetBuffers>> input;
+ std::vector<std::shared_ptr<NetBuffers>> hidden;
};
class NetworkGraph {
+
+ friend class NeuralNetwork;
+
public:
NetworkGraph() : num_node(0), def_name_count(0){};
unsigned int getNumNode() { return num_node; }
+ void setNumNetBufferSize();
+
LayerNode &getLayerNode(unsigned int ith);
+ LayerNode &getSortedLayerNode(unsigned int ith);
+
LayerNode &getLayerNode(const std::string &layer_name);
+ LayerNode &getSortedLayerNode(const std::string &layer_name);
+
void ensureName(std::shared_ptr<Layer> layer, const std::string &prefix,
bool force_rename);
std::vector<std::list<LayerNode>> adj;
std::vector<LayerNode> Sorted;
std::set<std::string> layer_names;
+ std::vector<std::shared_ptr<NetBuffers>> netBuffers;
int def_name_count;
};
std::vector<std::string> concat_layers = split(value, reg);
num_inputs = concat_layers.size();
+ input_dim.resize(num_inputs);
for (unsigned int i = 0; i < num_inputs; ++i)
input_layers.push_back(concat_layers[i]);
std::vector<std::string> concat_layers = split(value, reg);
num_outputs = concat_layers.size();
+ output_dim.resize(num_outputs);
for (unsigned int i = 0; i < num_outputs; ++i)
output_layers.push_back(concat_layers[i]);
* @param[in] d dimension to be set
*/
void setInputDimension(std::vector<TensorDim> d) { input_dim = d; }
+
+ void setInputDimension(TensorDim d, unsigned int i) { input_dim[i] = d; }
};
/**
return status;
}
+int NeuralNetwork::initialize() {
+ int status = ML_ERROR_NONE;
+
+ unsigned int n_layers = (unsigned int)model_graph.Sorted.size();
+
+ ml_logd("initializing neural network, layer size: %d", n_layers);
+
+ model_graph.setNumNetBufferSize();
+
+ for (unsigned int idx = 0; idx < n_layers; ++idx) {
+ bool first = idx == 0;
+ Layer &l = *model_graph.getSortedLayerNode(idx).layer;
+ ml_logd("layer name : %s", l.getName().c_str());
+ const std::string &cur_type = l.getType();
+
+ if (!first) {
+ if (istrequal(model_graph.getSortedLayerNode(idx - 1).layer->getType(),
+ ActivationLayer::type) &&
+ istrequal(cur_type, ActivationLayer::type)) {
+ ml_loge("double activation is not allowed");
+ return ML_ERROR_INVALID_PARAMETER;
+ }
+
+ for (unsigned int i = 0; i < l.input_layers.size(); ++i) {
+ std::cout << " " << l.input_layers[i];
+ std::shared_ptr<NetBuffers> n_buffer = std::make_unique<NetBuffers>();
+ // TODO : NetBuffer of layers are managed by graph
+ // model_graph.netBuffers.push_back(n_buffer);
+
+ Layer &in_layer =
+ *model_graph.getSortedLayerNode(l.input_layers[i]).layer;
+
+ unsigned int location = 0;
+ for (unsigned int j = 0; j < in_layer.output_layers.size(); ++j) {
+ if (in_layer.output_layers[j] == l.getName()) {
+ location = j;
+ break;
+ }
+ }
+
+ l.setInputDimension(in_layer.getOutputDimension()[location], i);
+
+ n_buffer->var = Tensor(l.getInputDimension()[i]);
+ n_buffer->grad = Tensor(l.getInputDimension()[i]);
+
+ model_graph.getSortedLayerNode(idx).input[i] = n_buffer;
+
+ model_graph.getSortedLayerNode(l.input_layers[i]).hidden[location] =
+ n_buffer;
+ }
+ }
+
+ std::cout << std::endl;
+ status = l.initialize();
+ NN_RETURN_STATUS();
+
+ if (istrequal(cur_type, BatchNormalizationLayer::type) ||
+ istrequal(cur_type, Conv2DLayer::type) ||
+ istrequal(cur_type, FullyConnectedLayer::type)) {
+ status = l.setOptimizer(opt);
+ NN_RETURN_STATUS();
+ }
+ }
+
+ return status;
+}
+
int NeuralNetwork::init() {
int status = ML_ERROR_NONE;
std::vector<TensorDim> previous_dim;
*/
int init();
+ int initialize();
+
/**
* @brief Forward Propagation of the neural network
* @param[in] input List of Input Tensors taken by the neural network
} else {
EXPECT_EQ(status, ML_ERROR_NONE);
}
+
+ status = NN.initialize();
+
+ if (failAtLoad()) {
+ EXPECT_NE(status, ML_ERROR_NONE);
+ } else {
+ EXPECT_EQ(status, ML_ERROR_NONE);
+ }
}
static IniSection nw_base("model", "Type = NeuralNetwork | "