virtual int reinitialize() {
int status = layer.initialize();
EXPECT_EQ(status, ML_ERROR_NONE);
+
in = nntrainer::Tensor(layer.getInputDimension()[0]);
out = nntrainer::Tensor(layer.getOutputDimension()[0]);
+
+ layer.resizeNetInput(layer.getNumInputs());
+ layer.resizeNetOutput(layer.getNumOutputs());
+
+ for (unsigned int i = 0; i < layer.getNumInputs(); ++i) {
+ std::shared_ptr<nntrainer::NetBuffers> n_buffer =
+ std::make_unique<nntrainer::NetBuffers>();
+ n_buffer->var = nntrainer::Tensor(layer.getInputDimension()[i]);
+ n_buffer->grad = nntrainer::Tensor(layer.getInputDimension()[i]);
+ layer.setInputBuffer(i,n_buffer);
+ }
+
+ for (unsigned int i = 0; i < layer.getNumOutputs(); ++i) {
+ std::shared_ptr<nntrainer::NetBuffers> n_buffer =
+ std::make_unique<nntrainer::NetBuffers>();
+ n_buffer->var = nntrainer::Tensor(layer.getOutputDimension()[i]);
+ n_buffer->grad = nntrainer::Tensor(layer.getOutputDimension()[i]);
+ layer.setOutputBuffer(i,n_buffer);
+ }
+
return status;
}
}
// setting optimizer property separated by "|"
- int setOptimizer(const std::string &type, const std::string &str = "") {
+ int setOptimizer(nntrainer::OptType type, const std::string &str = "") {
std::vector<std::string> input_str;
std::regex words_regex("[^|]+");
auto words_begin =
* @brief Input Layer
*/
TEST_F(nntrainer_InputLayer, setOptimizer_01_p) {
- status = setOptimizer("adam", "learning_rate=0.001 |"
- "beta1=0.9 |"
- "beta2=0.9999 |"
- "epsilon=1e-7");
+ status = setOptimizer(nntrainer::OptType::ADAM, "learning_rate=0.001 |"
+ "beta1=0.9 |"
+ "beta2=0.9999 |"
+ "epsilon=1e-7");
EXPECT_EQ(status, ML_ERROR_NONE);
}
* @brief Fully Connected Layer
*/
TEST_F(nntrainer_FullyConnectedLayer, setOptimizer_01_p) {
- status = setOptimizer("adam", "learning_rate=0.001 |"
- "beta1=0.9 |"
- "beta2=0.9999 |"
- "epsilon=1e-7");
+ status = setOptimizer(nntrainer::OptType::ADAM, "learning_rate=0.001 |"
+ "beta1=0.9 |"
+ "beta2=0.9999 |"
+ "epsilon=1e-7");
EXPECT_EQ(status, ML_ERROR_NONE);
}
* @brief FullyConnected Layer
*/
TEST_F(nntrainer_FullyConnectedLayer, setOptimizer_02_p) {
- status = setOptimizer("sgd", "learning_rate=0.1");
+ status = setOptimizer(nntrainer::OptType::SGD, "learning_rate=0.1");
EXPECT_EQ(status, ML_ERROR_NONE);
}
-
/**
* @brief Fully Connected Layer
*/
{"input_shape=" + getDimensionString(layer.getOutputDimension()[0])});
EXPECT_EQ(status, ML_ERROR_NONE);
+ act_layer->setBatch(layer.getOutputDimension()[0].batch());
+
status = act_layer->initialize();
EXPECT_EQ(status, ML_ERROR_NONE);
+
+ act_layer->resizeNetInput(act_layer->getNumInputs());
+ act_layer->resizeNetOutput(act_layer->getNumOutputs());
+
+ for (unsigned int i = 0; i < act_layer->getNumInputs(); ++i) {
+ std::shared_ptr<nntrainer::NetBuffers> n_buffer =
+ std::make_unique<nntrainer::NetBuffers>();
+ n_buffer->var = nntrainer::Tensor(act_layer->getInputDimension()[i]);
+ n_buffer->grad = nntrainer::Tensor(act_layer->getInputDimension()[i]);
+ act_layer->setInputBuffer(i, n_buffer);
+ }
+
+ for (unsigned int i = 0; i < act_layer->getNumOutputs(); ++i) {
+ std::shared_ptr<nntrainer::NetBuffers> n_buffer =
+ std::make_unique<nntrainer::NetBuffers>();
+ n_buffer->var = nntrainer::Tensor(act_layer->getOutputDimension()[i]);
+ n_buffer->grad = nntrainer::Tensor(act_layer->getOutputDimension()[i]);
+ act_layer->setOutputBuffer(i, n_buffer);
+ }
+
layers.push_back(act_layer);
}
{"input_shape=" + getDimensionString(layer.getOutputDimension()[0])});
EXPECT_EQ(status, ML_ERROR_NONE);
+ loss_layer->setBatch(layer.getOutputDimension()[0].batch());
+
status = loss_layer->initialize();
EXPECT_EQ(status, ML_ERROR_NONE);
status = loss_layer->setLoss(type);
EXPECT_EQ(status, ML_ERROR_NONE);
+
+ loss_layer->resizeNetInput(loss_layer->getNumInputs());
+ loss_layer->resizeNetOutput(loss_layer->getNumOutputs());
+
+ for (unsigned int i = 0; i < loss_layer->getNumInputs(); ++i) {
+ std::shared_ptr<nntrainer::NetBuffers> n_buffer =
+ std::make_unique<nntrainer::NetBuffers>();
+ n_buffer->var = nntrainer::Tensor(loss_layer->getInputDimension()[i]);
+ n_buffer->grad = nntrainer::Tensor(loss_layer->getInputDimension()[i]);
+ loss_layer->setInputBuffer(i, n_buffer);
+ }
+
+ for (unsigned int i = 0; i < loss_layer->getNumOutputs(); ++i) {
+ std::shared_ptr<nntrainer::NetBuffers> n_buffer =
+ std::make_unique<nntrainer::NetBuffers>();
+ n_buffer->var = nntrainer::Tensor(loss_layer->getOutputDimension()[i]);
+ n_buffer->grad = nntrainer::Tensor(loss_layer->getOutputDimension()[i]);
+ loss_layer->setOutputBuffer(i, n_buffer);
+ }
+
layers.push_back(loss_layer);
if (type == nntrainer::LossType::LOSS_ENTROPY_SOFTMAX) {
void matchForwarding(const char *file) {
sharedConstTensor out;
- EXPECT_NO_THROW(out = layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
+ EXPECT_NO_THROW(out = layer.forwarding_with_val({MAKE_SHARED_TENSOR(in)})[0]);
if (layers.size() > 0) {
for (unsigned int idx = 0; idx < layers.size() - 1; idx++) {
- EXPECT_NO_THROW(out = layers[idx]->forwarding({out})[0]);
+ EXPECT_NO_THROW(out = layers[idx]->forwarding_with_val({out})[0]);
}
- if (nntrainer::istrequal(layers.back()->getType(),
- nntrainer::LossLayer::type)) {
+ if (layers.back()->getType() == nntrainer::LayerType::LAYER_LOSS) {
std::shared_ptr<nntrainer::LossLayer> loss_layer =
std::static_pointer_cast<nntrainer::LossLayer>(layers.back());
EXPECT_NO_THROW(out = loss_layer->forwarding({out}, {label})[0]);
} else {
- EXPECT_NO_THROW(out = layers.back()->forwarding({out})[0]);
+ EXPECT_NO_THROW(out = layers.back()->forwarding_with_val({out})[0]);
}
EXPECT_EQ(status, ML_ERROR_NONE);
}
MAKE_SHARED_TENSOR(constant(1.0, 3, 1, 1, 15));
sharedConstTensor back_out;
- if (layers.size() && nntrainer::istrequal(layers.back()->getType(),
- nntrainer::LossLayer::type)) {
+ if (layers.size() &&
+ layers.back()->getType() == nntrainer::LayerType::LAYER_LOSS) {
if (with_loss) {
- EXPECT_NO_THROW(back_out = layers.back()->backwarding({label}, 1)[0]);
+ EXPECT_NO_THROW(layers.back()->backwarding(1, {label}));
+ back_out=MAKE_SHARED_TENSOR(layers.back()->getGradient()[0]);
} else {
back_out = def_derivative;
}
}
for (; idx >= 0; --idx)
- EXPECT_NO_THROW(back_out = layers[idx]->backwarding({back_out}, 1)[0]);
+ EXPECT_NO_THROW(back_out = layers[idx]->backwarding_with_val( 1, {back_out})[0]);
- EXPECT_NO_THROW(back_out = layer.backwarding({back_out}, 1)[0]);
+ EXPECT_NO_THROW(back_out = layer.backwarding_with_val(1, {back_out})[0]);
matchOutput(*back_out.get(), file_dx);
loadUpdatedWeightsGradients(file_uw, file_g);
std::vector<float> weight_data;
std::vector<float> bias_data;
- setOptimizer("adam", "learning_rate=1.0");
+ setOptimizer(nntrainer::OptType::ADAM, "learning_rate=1.0");
sharedConstTensor out;
- EXPECT_NO_THROW(out = layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
+ EXPECT_NO_THROW(out = layer.forwarding_with_val({MAKE_SHARED_TENSOR(in)})[0]);
nntrainer::Tensor derivatives(3, 1, 1, 15);
nntrainer::Tensor result;
EXPECT_NO_THROW(
- result = *layer.backwarding({MAKE_SHARED_TENSOR(derivatives)}, 1)[0]);
+ result = *layer.backwarding_with_val(1, {MAKE_SHARED_TENSOR(derivatives)})[0]);
matchOutput(result, "tc_fc_1_goldenFCGradientAdam.out");
*/
TEST_F(nntrainer_FullyConnectedLayer_TFmatch,
forwarding_backwarding_loss_00_p) {
- setOptimizer("adam", "learning_rate=0.0001");
+ setOptimizer(nntrainer::OptType::ADAM, "learning_rate=0.0001");
addLoss(nntrainer::LossType::LOSS_ENTROPY_SOFTMAX);
matchForwarding("tc_fc_1_goldenFCResultSoftmaxCrossAdam.out");
*/
TEST_F(nntrainer_FullyConnectedLayer_TFmatch, forwarding_backwarding_01_p) {
- setOptimizer("sgd", "learning_rate=1.0");
+ setOptimizer(nntrainer::OptType::SGD, "learning_rate=1.0");
/** Verify forwarding and backwarding without loss */
matchForwarding("tc_fc_1_goldenFCResultActNone.out");
addActivation(nntrainer::ActivationType::ACT_SIGMOID);
addLoss(nntrainer::LossType::LOSS_MSE);
- setOptimizer("sgd", "learning_rate=1.0");
+ setOptimizer(nntrainer::OptType::SGD, "learning_rate=1.0");
/** Verify forwarding value */
matchForwarding("tc_fc_1_goldenFCResultSigmoidMse.out");
addActivation(nntrainer::ActivationType::ACT_SOFTMAX);
addLoss(nntrainer::LossType::LOSS_MSE);
- setOptimizer("sgd", "learning_rate=1.0");
+ setOptimizer(nntrainer::OptType::SGD, "learning_rate=1.0");
/** Verify forwarding value */
matchForwarding("tc_fc_1_goldenFCResultSoftmaxMse.out");
TEST_F(nntrainer_FullyConnectedLayer_TFmatch, forwarding_backwarding_04_p) {
addLoss(nntrainer::LossType::LOSS_MSE);
- setOptimizer("sgd", "learning_rate=1.0");
+ setOptimizer(nntrainer::OptType::SGD, "learning_rate=1.0");
/** Verify forwarding value */
matchForwarding("tc_fc_1_goldenFCResultActNone.out");
addActivation(nntrainer::ActivationType::ACT_SIGMOID);
addLoss(nntrainer::LossType::LOSS_MSE);
- setOptimizer("sgd", "learning_rate=1.0");
+ setOptimizer(nntrainer::OptType::SGD, "learning_rate=1.0");
/** Verify forwarding value */
matchForwarding("tc_fc_1_goldenFCResultSigmoidMse.out");
addActivation(nntrainer::ActivationType::ACT_SOFTMAX);
addLoss(nntrainer::LossType::LOSS_MSE);
- setOptimizer("sgd", "learning_rate=1.0");
+ setOptimizer(nntrainer::OptType::SGD, "learning_rate=1.0");
/** Verify forwarding value */
matchForwarding("tc_fc_1_goldenFCResultSoftmaxMse.out");
TEST_F(nntrainer_FullyConnectedLayer_TFmatch, forwarding_backwarding_07_p) {
addLoss(nntrainer::LossType::LOSS_ENTROPY_SIGMOID);
- setOptimizer("sgd", "learning_rate=1.0");
+ setOptimizer(nntrainer::OptType::SGD, "learning_rate=1.0");
/** Verify forwarding value */
matchForwarding("tc_fc_1_goldenFCResultSigmoidCross.out");
TEST_F(nntrainer_FullyConnectedLayer_TFmatch, forwarding_backwarding_08_p) {
addLoss(nntrainer::LossType::LOSS_ENTROPY_SOFTMAX);
- setOptimizer("sgd", "learning_rate=1.0");
+ setOptimizer(nntrainer::OptType::SGD, "learning_rate=1.0");
/** Verify forwarding value */
matchForwarding("tc_fc_1_goldenFCResultSoftmaxCross.out");
virtual void prepareLayer() {
setProperty("input_shape=1:1:12 | epsilon=0.001 | momentum=0.90");
setBatch(3);
- setOptimizer("sgd", "learning_rate=1");
+ setOptimizer(nntrainer::OptType::SGD, "learning_rate=1");
}
};
*/
TEST_F(nntrainer_BatchNormalizationLayer, setOptimizer_01_p) {
status = setOptimizer(
- "adam", "learning_rate=0.001 | beta1=0.9 | beta2=0.9999 | epsilon=1e-7");
+ nntrainer::OptType::ADAM, "learning_rate=0.001 | beta1=0.9 | beta2=0.9999 | epsilon=1e-7");
EXPECT_EQ(status, ML_ERROR_NONE);
}
sharedConstTensor forward_result;
EXPECT_NO_THROW(forward_result =
- layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
+ layer.forwarding_with_val({MAKE_SHARED_TENSOR(in)})[0]);
matchOutput(*forward_result, "tc_bn_fc_1_goldenBNResultForward.out");
nntrainer::Tensor backward_in(layer.getOutputDimension()[0]);
loadFile("tc_bn_fc_1_goldenBNLayerBackwardDxIn.out", backward_in);
nntrainer::Tensor backward_result =
- *layer.backwarding({MAKE_SHARED_TENSOR(backward_in)}, 1)[0];
+ *layer.backwarding_with_val(1, {MAKE_SHARED_TENSOR(backward_in)})[0];
matchOutput(backward_result, "tc_bn_fc_1_goldenBNLayerBackwardDx.out");
}
virtual void prepareLayer() {
setProperty("input_shape=2:4:5 | epsilon=0.001 | momentum=0.90");
setBatch(3);
- setOptimizer("sgd", "learning_rate=1");
+ setOptimizer(nntrainer::OptType::SGD, "learning_rate=1");
}
};
layer.setTrainable(true);
sharedConstTensor forward_result;
- forward_result = layer.forwarding({MAKE_SHARED_TENSOR(in)})[0];
+ forward_result = layer.forwarding_with_val({MAKE_SHARED_TENSOR(in)})[0];
matchOutput(*forward_result, "tc_bn_conv_1_goldenBNResultForward.out");
nntrainer::Tensor backward_in(layer.getOutputDimension()[0]);
loadFile("tc_bn_conv_1_goldenBNLayerBackwardDxIn.out", backward_in);
nntrainer::Tensor backward_result =
- *layer.backwarding({MAKE_SHARED_TENSOR(backward_in)}, 1)[0];
+ *layer.backwarding_with_val( 1, {MAKE_SHARED_TENSOR(backward_in)})[0];
matchOutput(backward_result, "tc_bn_conv_1_goldenBNLayerBackwardDx.out");
}
virtual void prepareLayer() {
setProperty("input_shape=2:4:5 | epsilon=0.001 | momentum=0.90");
setBatch(1);
- setOptimizer("sgd", "learning_rate=1");
+ setOptimizer(nntrainer::OptType::SGD, "learning_rate=1");
}
};
layer.setTrainable(true);
sharedConstTensor forward_result;
- forward_result = layer.forwarding({MAKE_SHARED_TENSOR(in)})[0];
+ forward_result = layer.forwarding_with_val({MAKE_SHARED_TENSOR(in)})[0];
matchOutput(*forward_result, "tc_bn_conv_2_goldenBNResultForward.out");
nntrainer::Tensor backward_in(layer.getOutputDimension()[0]);
loadFile("tc_bn_conv_2_goldenBNLayerBackwardDxIn.out", backward_in);
nntrainer::Tensor backward_result =
- *layer.backwarding({MAKE_SHARED_TENSOR(backward_in)}, 1)[0];
+ *layer.backwarding_with_val( 1, {MAKE_SHARED_TENSOR(backward_in)})[0];
matchOutput(backward_result, "tc_bn_conv_2_goldenBNLayerBackwardDx.out");
}
loadFile("tc_conv2d_1_conv2DLayer.in", in);
loadFile("tc_conv2d_1_conv2DKernel.in", layer);
- EXPECT_NO_THROW(out = *layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
+ EXPECT_NO_THROW(out = *layer.forwarding_with_val({MAKE_SHARED_TENSOR(in)})[0]);
matchOutput(out, "tc_conv2d_1_goldenConv2DResult.out");
}
loadFile("tc_conv2d_2_conv2DLayer.in", in);
loadFile("tc_conv2d_2_conv2DKernel.in", layer);
- EXPECT_NO_THROW(out = *layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
+ EXPECT_NO_THROW(out = *layer.forwarding_with_val({MAKE_SHARED_TENSOR(in)})[0]);
matchOutput(out, "tc_conv2d_2_goldenConv2DResult.out");
}
loadFile("tc_conv2d_1_conv2DLayer.in", in);
loadFile("tc_conv2d_1_conv2DKernel.in", layer);
- setOptimizer("sgd", "learning_rate=1.0");
+ setOptimizer(nntrainer::OptType::SGD, "learning_rate=1.0");
- EXPECT_NO_THROW(out = *layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
+ EXPECT_NO_THROW(out = *layer.forwarding_with_val({MAKE_SHARED_TENSOR(in)})[0]);
for (unsigned int i = 0; i < derivatives.getDim().getDataLen(); ++i) {
derivatives.getData()[i] = 1.0;
}
+
EXPECT_NO_THROW(
- result = *layer.backwarding({MAKE_SHARED_TENSOR(derivatives)}, 1)[0]);
+ result = *layer.backwarding_with_val(1, {MAKE_SHARED_TENSOR(derivatives)})[0]);
nntrainer::Weight *param_data = layer.getWeights().get();
loadFile("tc_conv2d_2_conv2DLayer.in", in);
loadFile("tc_conv2d_2_conv2DKernel.in", layer);
- setOptimizer("sgd", "learning_rate=1.0");
+ setOptimizer(nntrainer::OptType::SGD, "learning_rate=1.0");
- EXPECT_NO_THROW(out = *layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
+ EXPECT_NO_THROW(out = *layer.forwarding_with_val({MAKE_SHARED_TENSOR(in)})[0]);
for (unsigned int i = 0; i < derivatives.getDim().getDataLen(); ++i) {
derivatives.getData()[i] = 1.0;
}
EXPECT_NO_THROW(
- result = *layer.backwarding({MAKE_SHARED_TENSOR(derivatives)}, 1)[0]);
+ result = *layer.backwarding_with_val(1, {MAKE_SHARED_TENSOR(derivatives)})[0]);
param_data = layer.getWeights().get();
for (unsigned int i = 0; i < filter_size * 2; ++i) {
matchOutput(bias_grad, "tc_conv2d_2_goldenBiasGrad.out");
for (int i = 0; i < 4; i++) {
- EXPECT_NO_THROW(out = *layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
+ EXPECT_NO_THROW(out = *layer.forwarding_with_val({MAKE_SHARED_TENSOR(in)})[0]);
EXPECT_NO_THROW(
- result = *layer.backwarding({MAKE_SHARED_TENSOR(derivatives)}, 1)[0]);
+ result = *layer.backwarding_with_val(1, {MAKE_SHARED_TENSOR(derivatives)})[0]);
}
param_data = layer.getWeights().get();
loadFile("tc_conv2d_int_conv2DKernel.in", layer1);
std::shared_ptr<nntrainer::Optimizer> op;
- EXPECT_NO_THROW(op = nntrainer::createOptimizer("sgd"));
+ EXPECT_NO_THROW(op = nntrainer::createOptimizer(nntrainer::OptType::SGD));
status = op->setProperty({"learning_rate=1.0"});
EXPECT_EQ(status, ML_ERROR_NONE);
status = layer1.setOptimizer(op);
loadFile("tc_conv2d_int_conv2DKernel2.in", layer2);
std::shared_ptr<nntrainer::Optimizer> op2;
- EXPECT_NO_THROW(op2 = nntrainer::createOptimizer("sgd"));
+ EXPECT_NO_THROW(op2 = nntrainer::createOptimizer(nntrainer::OptType::SGD));
status = op2->setProperty({"learning_rate=1.0"});
EXPECT_EQ(status, ML_ERROR_NONE);
status = layer2.setOptimizer(op2);
EXPECT_EQ(status, ML_ERROR_NONE);
- setOptimizer("sgd", "learning_rate=1.0");
+ setOptimizer(nntrainer::OptType::SGD, "learning_rate=1.0");
unsigned int filter_size;
std::vector<float> grad_data;
nntrainer::Tensor derivatives(1, 12, 24, 24);
nntrainer::Tensor out1;
- EXPECT_NO_THROW(out1 = *layer1.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
+ EXPECT_NO_THROW(out1 = *layer1.forwarding_with_val({MAKE_SHARED_TENSOR(in)})[0]);
nntrainer::Tensor out2;
- EXPECT_NO_THROW(out2 = *layer2.forwarding({MAKE_SHARED_TENSOR(out1)})[0]);
+ EXPECT_NO_THROW(out2 = *layer2.forwarding_with_val({MAKE_SHARED_TENSOR(out1)})[0]);
matchOutput(out1, "tc_conv2d_int_goldenConv2DResult.out");
matchOutput(out2, "tc_conv2d_int_goldenConv2DResult2.out");
nntrainer::Tensor result2;
EXPECT_NO_THROW(
- result2 = *layer2.backwarding({MAKE_SHARED_TENSOR(derivatives)}, 1)[0]);
+ result2 = *layer2.backwarding_with_val(1, {MAKE_SHARED_TENSOR(derivatives)})[0]);
EXPECT_NO_THROW(result =
- *layer1.backwarding({MAKE_SHARED_TENSOR(result2)}, 1)[0]);
+ *layer1.backwarding_with_val(1, {MAKE_SHARED_TENSOR(result2)})[0]);
/** Compare second conv */
param_data = layer2.getWeights().get();
loadFile("tc_conv2d_3_conv2DLayer.in", in);
loadFile("tc_conv2d_3_conv2DKernel.in", layer);
- setOptimizer("sgd", "learning_rate=1.0");
- EXPECT_NO_THROW(out = *layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
+ setOptimizer(nntrainer::OptType::SGD, "learning_rate=1.0");
+ EXPECT_NO_THROW(out = *layer.forwarding_with_val({MAKE_SHARED_TENSOR(in)})[0]);
for (unsigned int i = 0; i < derivatives.getDim().getDataLen(); ++i) {
derivatives.getData()[i] = 1.0;
}
EXPECT_NO_THROW(
- result = *layer.backwarding({MAKE_SHARED_TENSOR(derivatives)}, 1)[0]);
+ result = *layer.backwarding_with_val( 1, {MAKE_SHARED_TENSOR(derivatives)})[0]);
nntrainer::Weight *param_data = layer.getWeights().get();
loadFile("tc_pooling2d_1.in", in);
- EXPECT_NO_THROW(out = *layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
+ EXPECT_NO_THROW(out = *layer.forwarding_with_val({MAKE_SHARED_TENSOR(in)})[0]);
matchOutput(out, "tc_pooling2d_1_goldenPooling2Dmax.out");
}
loadFile("tc_pooling2d_1.in", in);
- EXPECT_NO_THROW(out = *layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
+ EXPECT_NO_THROW(out = *layer.forwarding_with_val({MAKE_SHARED_TENSOR(in)})[0]);
matchOutput(out, "tc_pooling2d_1_goldenPooling2Daverage.out");
}
loadFile("tc_pooling2d_1.in", in);
- EXPECT_NO_THROW(out = *layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
+ EXPECT_NO_THROW(out = *layer.forwarding_with_val({MAKE_SHARED_TENSOR(in)})[0]);
matchOutput(out, "tc_pooling2d_1_goldenPooling2Dglobal_max.out");
}
loadFile("tc_pooling2d_1.in", in);
- EXPECT_NO_THROW(out = *layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
+ EXPECT_NO_THROW(out = *layer.forwarding_with_val({MAKE_SHARED_TENSOR(in)})[0]);
matchOutput(out, "tc_pooling2d_1_goldenPooling2Dglobal_average.out");
}
reinitialize();
loadFile("tc_pooling2d_2.in", in);
- EXPECT_NO_THROW(out = *layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
+ EXPECT_NO_THROW(out = *layer.forwarding_with_val({MAKE_SHARED_TENSOR(in)})[0]);
matchOutput(out, "tc_pooling2d_2_goldenPooling2Dglobal_max.out");
}
loadFile("tc_pooling2d_2.in", in);
- EXPECT_NO_THROW(out = *layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
+ EXPECT_NO_THROW(out = *layer.forwarding_with_val( {MAKE_SHARED_TENSOR(in)})[0]);
matchOutput(out, "tc_pooling2d_2_goldenPooling2Dglobal_average.out");
}
reinitialize();
loadFile("tc_pooling2d_1.in", in);
- EXPECT_NO_THROW(out = *layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
+ EXPECT_NO_THROW(out = *layer.forwarding_with_val({MAKE_SHARED_TENSOR(in)})[0]);
nntrainer::Tensor grad(out.getDim());
grad.getData()[i] = 1.0;
}
- EXPECT_NO_THROW(in = *layer.backwarding({MAKE_SHARED_TENSOR(grad)}, 0)[0]);
+ EXPECT_NO_THROW(in = *layer.backwarding_with_val(1, {MAKE_SHARED_TENSOR(grad)})[0]);
matchOutput(in, "tc_pooling2d_1_goldenPooling2DmaxGrad.out");
}
reinitialize();
loadFile("tc_pooling2d_1.in", in);
- EXPECT_NO_THROW(out = *layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
+ EXPECT_NO_THROW(out = *layer.forwarding_with_val({MAKE_SHARED_TENSOR(in)})[0]);
sharedTensor grad = MAKE_SHARED_TENSOR(out.getDim());
grad->getData()[i] = 1.0;
}
- EXPECT_NO_THROW(in = *layer.backwarding({grad}, 0)[0]);
+ EXPECT_NO_THROW(in = *layer.backwarding_with_val(1, {grad})[0]);
matchOutput(in, "tc_pooling2d_1_goldenPooling2DaverageGrad.out");
}
loadFile("tc_pooling2d_1.in", in);
- EXPECT_NO_THROW(out = *layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
+ EXPECT_NO_THROW(out = *layer.forwarding_with_val({MAKE_SHARED_TENSOR(in)})[0]);
nntrainer::Tensor grad(out.getDim());
grad.getData()[i] = 1.0;
}
- EXPECT_NO_THROW(in = *layer.backwarding({MAKE_SHARED_TENSOR(grad)}, 0)[0]);
+ EXPECT_NO_THROW(in = *layer.backwarding_with_val(1, {MAKE_SHARED_TENSOR(grad)})[0]);
matchOutput(in, "tc_pooling2d_1_goldenPooling2Dglobal_maxGrad.out");
}
reinitialize();
loadFile("tc_pooling2d_1.in", in);
- EXPECT_NO_THROW(out = *layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
+ EXPECT_NO_THROW(out = *layer.forwarding_with_val({MAKE_SHARED_TENSOR(in)})[0]);
nntrainer::Tensor grad(out.getDim());
grad.getData()[i] = 1.0;
}
- EXPECT_NO_THROW(in = *layer.backwarding({MAKE_SHARED_TENSOR(grad)}, 0)[0]);
+ EXPECT_NO_THROW(in = *layer.backwarding_with_val( 1, {MAKE_SHARED_TENSOR(grad)})[0]);
matchOutput(in, "tc_pooling2d_1_goldenPooling2Dglobal_averageGrad.out");
}
loadFile("tc_pooling2d_1_goldenPooling2Dmax.out", in);
- EXPECT_NO_THROW(out = *layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
+ EXPECT_NO_THROW(out = *layer.forwarding_with_val({MAKE_SHARED_TENSOR(in)})[0]);
matchOutput(out, "tc_pooling2d_1_goldenPooling2Dmax.out");
}
loadFile("tc_pooling2d_2_goldenPooling2Dmax.out", in);
- EXPECT_NO_THROW(out = *layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
+ EXPECT_NO_THROW(out = *layer.forwarding_with_val({MAKE_SHARED_TENSOR(in)})[0]);
matchOutput(out, "tc_pooling2d_2_goldenPooling2Dmax.out");
}
loadFile("tc_pooling2d_1_goldenPooling2Dmax.out", out);
- EXPECT_NO_THROW(in = *layer.backwarding({MAKE_SHARED_TENSOR(out)}, 0)[0]);
+ EXPECT_NO_THROW(in = *layer.backwarding_with_val( 1, {MAKE_SHARED_TENSOR(out)})[0]);
EXPECT_EQ(in.getDim(), nntrainer::TensorDim(1, 2, 4, 4));
matchOutput(in, "tc_pooling2d_1_goldenPooling2Dmax.out");
loadFile("tc_pooling2d_2_goldenPooling2Dmax.out", out);
- EXPECT_NO_THROW(in = *layer.backwarding({MAKE_SHARED_TENSOR(out)}, 0)[0]);
+ EXPECT_NO_THROW(in = *layer.backwarding_with_val( 1, {MAKE_SHARED_TENSOR(out)})[0]);
EXPECT_EQ(in.getDim(), nntrainer::TensorDim(2, 2, 4, 4));
matchOutput(in, "tc_pooling2d_2_goldenPooling2Dmax.out");
nntrainer::Tensor a = constant(1.0, 1, 1, 1, 1);
nntrainer::Tensor b = constant(1.0, 1, 1, 1, 1);
EXPECT_THROW(
- layer.forwarding({MAKE_SHARED_TENSOR(a)}, {MAKE_SHARED_TENSOR(b)}),
+ layer.forwarding( {MAKE_SHARED_TENSOR(a)}, {MAKE_SHARED_TENSOR(b)}),
std::runtime_error);
}
TEST(nntrainer_LossLayer, backward_loss_unknown_n) {
nntrainer::LossLayer layer;
nntrainer::Tensor a = constant(1.0, 1, 1, 1, 1);
- EXPECT_THROW(layer.backwarding({MAKE_SHARED_TENSOR(a)}, 1),
+ EXPECT_THROW(layer.backwarding( 1, {MAKE_SHARED_TENSOR(a)}),
std::runtime_error);
}
nntrainer::LossLayer layer;
layer.setLoss(nntrainer::LossType::LOSS_ENTROPY);
nntrainer::Tensor a = constant(1.0, 1, 1, 1, 1);
- EXPECT_THROW(layer.backwarding({MAKE_SHARED_TENSOR(a)}, 1),
+ EXPECT_THROW(layer.backwarding( 1, {MAKE_SHARED_TENSOR(a)}),
std::runtime_error);
}
GEN_TEST_INPUT(expected,
nntrainer::ActivationLayer::relu((l - 4) * 0.1 * (i + 1)));
nntrainer::Tensor result;
- EXPECT_NO_THROW(result = *layer.forwarding({MAKE_SHARED_TENSOR(input)})[0]);
+ EXPECT_NO_THROW(result = *layer.forwarding_with_val({MAKE_SHARED_TENSOR(input)})[0]);
EXPECT_TRUE(result == expected);
expected.copy(input);
- EXPECT_NO_THROW(result = *layer.backwarding(
- {MAKE_SHARED_TENSOR(constant(1.0, 3, 1, 1, 10))}, 1)[0]);
+ EXPECT_NO_THROW(result = *layer.backwarding_with_val(1,
+ {MAKE_SHARED_TENSOR(constant(1.0, 3, 1, 1, 10))})[0]);
GEN_TEST_INPUT(expected,
nntrainer::ActivationLayer::reluPrime(
nntrainer::ActivationLayer::relu((l - 4) * 0.1 * (i + 1))));
in = nntrainer::Tensor();
- EXPECT_THROW(layer.forwarding({input}), std::runtime_error);
+ EXPECT_THROW(layer.forwarding_with_val({input}), std::runtime_error);
}
/*
in = nntrainer::Tensor(layer.getInputDimension()[0]);
- EXPECT_THROW(layer.forwarding({input}), std::runtime_error);
+ EXPECT_THROW(layer.forwarding_with_val({input}), std::runtime_error);
}
TEST_F(nntrainer_AdditionLayer, DISABLED_forwarding_03_p) {
input.get()[1] = *input;
- EXPECT_NO_THROW(layer.forwarding({input}));
+ EXPECT_NO_THROW(layer.forwarding_with_val({input}));
}
/**
* Watcher Classes *
********************************************************/
-using NodeType = nntrainer::NeuralNetwork::NodeType;
+using NodeType = nntrainer::LayerNode;
using FlatGraphType = nntrainer::NeuralNetwork::FlatGraphType;
+using NetworkGraphType = nntrainer::NetworkGraph;
/**
* @brief verify tensor to the reference and throw if not match to stop
* @param node node to watch.
*/
NodeWatcher(const NodeType &node) : node(node) {
- unsigned int num_weights = node->getNumWeights();
- node->setTrainable(true);
+ unsigned int num_weights = node.layer->getNumWeights();
+ node.layer->setTrainable(true);
for (unsigned int i = 0; i < num_weights; ++i) {
- const nntrainer::Weight &w = node->weightAt(i);
+ const nntrainer::Weight &w = node.layer->weightAt(i);
expected_weights.push_back(w);
}
- expected_output = nntrainer::Tensor(node->getOutputDimension()[0]);
- expected_dx = nntrainer::Tensor(node->getInputDimension()[0]);
+ expected_output = nntrainer::Tensor(node.layer->getOutputDimension()[0]);
+ expected_dx = nntrainer::Tensor(node.layer->getInputDimension()[0]);
}
/**
*
*/
void readLayerWeight(std::ifstream &f) {
- for (unsigned int i = 0; i < node->getNumWeights(); ++i) {
+ for (unsigned int i = 0; i < node.layer->getNumWeights(); ++i) {
/// @note below is harrasing the fact the tensor shares same base memory
- node->weightAt(i).getVariable().read(f);
+ node.layer->weightAt(i).getVariable().read(f);
}
}
* @param iteration iteration
* @return nntrainer::sharedConstTensor
*/
- nntrainer::sharedConstTensors forward(nntrainer::sharedConstTensors in,
- int iteration);
+ void forward(int iteration);
/**
* @brief forward loss node with verifying inputs/weights/outputs
* @param should_verify should verify the inputs/gradients/outputs
* @return nntrainer::sharedConstTensor
*/
- nntrainer::sharedConstTensors backward(nntrainer::sharedConstTensors deriv,
- int iteration,
- bool should_verify = true);
+ void backward(int iteration, bool should_verify = true);
/**
* @brief verify weights of the current node
*
* @return float loss
*/
- float getLoss() { return node->getLoss(); }
+ float getLoss() { return node.layer->getLoss(); }
/**
* @brief read Node
void NodeWatcher::verifyWeight(const std::string &error_msg) {
for (unsigned int i = 0; i < expected_weights.size(); ++i) {
- verify(node->weightAt(i).getVariable(), expected_weights[i].getVariable(),
- error_msg + " " + node->weightAt(i).getName() + " weight");
+ verify(node.layer->weightAt(i).getVariable(),
+ expected_weights[i].getVariable(),
+ error_msg + " " + node.layer->weightAt(i).getName() + " weight");
}
}
void NodeWatcher::verifyGrad(const std::string &error_msg) {
for (unsigned int i = 0; i < expected_weights.size(); ++i) {
- auto weight = node->weightAt(i);
+ auto weight = node.layer->weightAt(i);
if (weight.getTrainable()) {
verify(weight.getGradient(), expected_weights[i].getGradient(),
error_msg + " " + weight.getName() + " grad");
}
}
-nntrainer::sharedConstTensors
-NodeWatcher::forward(nntrainer::sharedConstTensors in, int iteration) {
+void NodeWatcher::forward(int iteration) {
std::stringstream ss;
- ss << "forward failed at " << node->getName() << " at iteration "
+ ss << "forward failed at " << node.layer->getName() << " at iteration "
<< iteration;
std::string err_msg = ss.str();
- nntrainer::sharedConstTensors out = node->forwarding(in);
- verify(*out[0], expected_output, err_msg + " at output");
- return out;
+ std::vector<nntrainer::Tensor> out = node.layer->getHidden();
+
+ verify(out[0], expected_output, err_msg + " at output");
}
nntrainer::sharedConstTensors
NodeWatcher::lossForward(nntrainer::sharedConstTensors pred,
nntrainer::sharedConstTensors answer, int iteration) {
std::stringstream ss;
- ss << "loss failed at " << node->getName() << " at iteration " << iteration;
+ ss << "loss failed at " << node.layer->getName() << " at iteration "
+ << iteration;
std::string err_msg = ss.str();
nntrainer::sharedConstTensors out =
- std::static_pointer_cast<nntrainer::LossLayer>(node)->forwarding(pred,
- answer);
+ std::static_pointer_cast<nntrainer::LossLayer>(node.layer)
+ ->forwarding(pred, answer);
return out;
}
-nntrainer::sharedConstTensors
-NodeWatcher::backward(nntrainer::sharedConstTensors deriv, int iteration,
- bool should_verify) {
+void NodeWatcher::backward(int iteration, bool should_verify) {
std::stringstream ss;
- ss << "backward failed at " << node->getName() << " at iteration "
+ ss << "backward failed at " << node.layer->getName() << " at iteration "
<< iteration;
std::string err_msg = ss.str();
- nntrainer::sharedConstTensors out = node->backwarding(deriv, iteration);
+ std::vector<nntrainer::Tensor> out = node.layer->getGradient();
+
if (should_verify) {
+ verify(out[0], expected_dx, err_msg);
verifyGrad(err_msg);
- verify(*out[0], expected_dx, err_msg);
verifyWeight(err_msg);
}
-
- return out;
}
GraphWatcher::GraphWatcher(const std::string &config) {
throw std::invalid_argument("load from config failed!");
};
- if (nn.init()) {
+ if (nn.compile()) {
+ throw std::invalid_argument("initiation failed");
+ };
+
+ if (nn.initialize()) {
throw std::invalid_argument("initiation failed");
};
- FlatGraphType graph = nn.getFlatGraph();
+ NetworkGraphType model_graph = nn.getNetworkGraph();
+
+ std::vector<NodeType> graph = model_graph.getSorted();
for (auto it = graph.begin(); it != graph.end() - 1; ++it) {
nodes.push_back(NodeWatcher(*it));
readIteration(ref);
- /// forward pass
- for (auto &i : nodes)
- input = i.forward(input, iteration);
-
- loss_node.lossForward(input, label, iteration);
+ nn.forwarding(input, label);
EXPECT_NEAR(expected_loss, loss_node.getLoss(), nntrainer::Tensor::epsilon);
- /// backward pass and update weights
- nntrainer::sharedConstTensors output =
- loss_node.backward(label, iteration, false);
- for (auto it = nodes.rbegin(); it != nodes.rend(); it++)
- output = it->backward(output, iteration);
+ for (auto it = nodes.begin(); it != nodes.end() - 1; ++it) {
+ it->forward(iteration);
+ }
+
+ nn.getNetworkGraph().backwarding(label, iteration);
+
+ for (auto it = nodes.rbegin(); it != nodes.rend() - 1; it++)
+ it->backward(iteration);
}
}
// clang-format off
INI fc_sigmoid_mse(
"fc_sigmoid_mse",
- {
- nn_base + "learning_rate=1 | optimizer=sgd | loss=mse | batch_size = 3",
- I("input") + input_base + "input_shape = 1:1:3",
- I("dense") + fc_base + "unit = 5",
- I("act") + sigmoid_base,
- I("dense_1") + fc_base + "unit = 10",
- I("act_1") + softmax_base
- }
-);
+ {nn_base + "learning_rate=1 | optimizer=sgd | loss=mse | batch_size = 3",
+ I("input") + input_base + "input_shape = 1:1:3",
+ I("dense") + fc_base + "unit = 5" + "input_layers=input",
+ I("act") + sigmoid + "input_layers=dense",
+ I("dense_1") + fc_base + "unit = 10" + "input_layers=act",
+ I("act_1") + softmax + "input_layers=dense_1"});
INI fc_sigmoid_cross =
INI("fc_sigmoid_cross") + fc_sigmoid_mse + "model/loss=cross";
INI fc_relu_mse(
"fc_relu_mse",
- {
- nn_base + "Learning_rate=0.1 | Optimizer=sgd | Loss=mse | batch_size = 3",
- I("input") + input_base + "input_shape = 1:1:3",
- I("dense") + fc_base + "unit = 10",
- I("act") + relu_base,
- I("dense_1") + fc_base + "unit = 2",
- I("act_1") + sigmoid_base
- }
-);
+ {nn_base + "Learning_rate=0.1 | Optimizer=sgd | Loss=mse | batch_size = 3",
+ I("input") + input_base + "input_shape = 1:1:3",
+ I("dense") + fc_base + "unit = 10" + "input_layers=input",
+ I("act") + relu + "input_layers=dense",
+ I("dense_1") + fc_base + "unit = 2" + "input_layers=act",
+ I("act_1") + sigmoid + "input_layers=dense" + "input_layers=dense_1"});
INI fc_bn_sigmoid_cross(
"fc_bn_sigmoid_cross",
- {
- nn_base + "learning_rate=1 | optimizer=sgd | loss=cross | batch_size = 3",
- I("input") + input_base + "input_shape = 1:1:3",
- I("dense") + fc_base + "unit = 10",
- I("bn") + bn_base,
- I("act") + sigmoid_base,
- I("dense_2") + fc_base + "unit = 10",
- I("act_3") + softmax_base
- }
-);
+ {nn_base + "learning_rate=1 | optimizer=sgd | loss=cross | batch_size = 3",
+ I("input") + input_base + "input_shape = 1:1:3",
+ I("dense") + fc_base + "unit = 10" + "input_layers=input",
+ I("bn") + bn_base + "input_layers=dense",
+ I("act") + sigmoid + "input_layers=bn",
+ I("dense_2") + fc_base + "unit = 10" + "input_layers=act",
+ I("act_3") + softmax + "input_layers=dense_2"});
INI fc_bn_sigmoid_mse =
INI("fc_bn_sigmoid_mse") + fc_bn_sigmoid_cross + "model/loss=mse";
{
nn_base + "learning_rate=0.1 | optimizer=sgd | loss=cross | batch_size=3",
I("input") + input_base + "input_shape=2:4:5",
- I("conv2d_c1_layer") + conv_base + "kernel_size=3,4 | filters=2",
- I("act_1") + sigmoid_base,
- I("pool_1") + mnist_pooling,
- I("flatten", "type=flatten"),
- I("outputlayer") + fc_base + "unit = 10",
- I("act_3") + softmax_base
+ I("conv2d_c1_layer") + conv_base + "kernel_size=3,4 | filters=2" +"input_layers=input",
+ I("act_1") + sigmoid_base +"input_layers=conv2d_c1_layer",
+ I("pool_1") + mnist_pooling+"input_layers=act_1",
+ I("flatten", "type=flatten")+"input_layers=pool_1" ,
+ I("outputlayer") + fc_base + "unit = 10" +"input_layers=flatten",
+ I("act_3") + softmax_base +"input_layers=outputlayer"
}
);