getData(dataFile, o, l, j);
try {
- float answer = NN.forwarding(MAKE_SHARED_TENSOR(nntrainer::Tensor({o})))
- ->apply(stepFunction)
- .getValue(0, 0, 0, 0);
+ float answer =
+ NN.forwarding({MAKE_SHARED_TENSOR(nntrainer::Tensor({o}))})[0]
+ ->apply(stepFunction)
+ .getValue(0, 0, 0, 0);
std::cout << answer << " : " << l[0] << std::endl;
cn += answer == l[0];
} catch (...) {
return 0;
}
try {
- test = mainNet.forwarding(MAKE_SHARED_TENSOR(in_tensor));
+ test = mainNet.forwarding({MAKE_SHARED_TENSOR(in_tensor)})[0];
} catch (...) {
std::cerr << "Error while forwarding the network" << std::endl;
return 0;
*/
nntrainer::sharedConstTensor Q;
try {
- Q = mainNet.forwarding(MAKE_SHARED_TENSOR(q_in));
+ Q = mainNet.forwarding({MAKE_SHARED_TENSOR(q_in)})[0];
} catch (...) {
std::cerr << "Error during forwarding main network" << std::endl;
return -1;
*/
nntrainer::sharedConstTensor NQ;
try {
- NQ = targetNet.forwarding(MAKE_SHARED_TENSOR(nq_in));
+ NQ = targetNet.forwarding({MAKE_SHARED_TENSOR(nq_in)})[0];
} catch (...) {
std::cerr << "Error during forwarding target network" << std::endl;
return -1;
nntrainer::Tensor in_tensor;
try {
in_tensor = nntrainer::Tensor(inbatch);
- mainNet.backwarding(MAKE_SHARED_TENSOR(in_tensor), Q, iter);
+ mainNet.backwarding({MAKE_SHARED_TENSOR(in_tensor)}, {Q}, iter);
} catch (...) {
std::cerr << "Error during backwarding the network" << std::endl;
return -1;
nntrainer::Tensor X;
try {
X = nntrainer::Tensor({featureVector});
- NN.forwarding(MAKE_SHARED_TENSOR(X))->apply(stepFunction);
+ NN.forwarding({MAKE_SHARED_TENSOR(X)})[0]->apply(stepFunction);
} catch (...) {
std::cerr << "Error while forwarding the model" << std::endl;
return 0;
std::shared_ptr<const nntrainer::Tensor> o;
try {
- o = model->inference(X);
+ o = model->inference({MAKE_SHARED_TENSOR(X)})[0];
} catch (std::exception &e) {
ml_loge("%s %s", typeid(e).name(), e.what());
return -2;
void save(std::ofstream &file){/* noop */};
/**
- * @copydoc Layer::forwarding(sharedConstTensor in)
+ * @copydoc Layer::forwarding(sharedConstTensors in)
*/
- sharedConstTensor forwarding(sharedConstTensor in);
+ sharedConstTensors forwarding(sharedConstTensors in);
/**
- * @copydoc Layer::backwarding(sharedConstTensor in, int iteration)
+ * @copydoc Layer::backwarding(sharedConstTensors in, int iteration)
*/
- sharedConstTensor backwarding(sharedConstTensor in, int iteration);
+ sharedConstTensors backwarding(sharedConstTensors in, int iteration);
/**
* @brief setActivation by preset ActivationType
void save(std::ofstream &file){};
/**
- * @copydoc Layer::forwarding(sharedConstTensor in)
+ * @copydoc Layer::forwarding(sharedConstTensors in)
*/
- sharedConstTensor forwarding(sharedConstTensor in);
+ sharedConstTensors forwarding(sharedConstTensors in);
/**
- * @copydoc Layer::backwarding(sharedConstTensor in, int iteration)
+ * @copydoc Layer::backwarding(sharedConstTensors in, int iteration)
*/
- sharedConstTensor backwarding(sharedConstTensor in, int iteration);
+ sharedConstTensors backwarding(sharedConstTensors in, int iteration);
/**
* @brief get the base name for the layer
BatchNormalizationLayer &operator=(BatchNormalizationLayer &&rhs) = default;
/**
- * @copydoc Layer::forwarding(sharedConstTensor in)
+ * @copydoc Layer::forwarding(sharedConstTensors in)
*/
- sharedConstTensor forwarding(sharedConstTensor in);
+ sharedConstTensors forwarding(sharedConstTensors in);
/**
- * @copydoc Layer::backwarding(sharedConstTensor in, int iteration)
+ * @copydoc Layer::backwarding(sharedConstTensors in, int iteration)
*/
- sharedConstTensor backwarding(sharedConstTensor in, int iteration);
+ sharedConstTensors backwarding(sharedConstTensors in, int iteration);
/**
* @brief copy layer
void save(std::ofstream &file);
/**
- * @copydoc Layer::forwarding(sharedConstTensor in)
+ * @copydoc Layer::forwarding(sharedConstTensors in)
*/
- sharedConstTensor forwarding(sharedConstTensor in);
+ sharedConstTensors forwarding(sharedConstTensors in);
/**
- * @copydoc Layer::backwarding(sharedConstTensor in, int iteration)
+ * @copydoc Layer::backwarding(sharedConstTensors in, int iteration)
*/
- sharedConstTensor backwarding(sharedConstTensor in, int iteration);
+ sharedConstTensors backwarding(sharedConstTensors in, int iteration);
/**
* @brief copy layer
void save(std::ofstream &file);
/**
- * @copydoc Layer::forwarding(sharedConstTensor in)
+ * @copydoc Layer::forwarding(sharedConstTensors in)
*/
- sharedConstTensor forwarding(sharedConstTensor in);
+ sharedConstTensors forwarding(sharedConstTensors in);
/**
- * @copydoc Layer::backwarding(sharedConstTensor in, int iteration)
+ * @copydoc Layer::backwarding(sharedConstTensors in, int iteration)
*/
- sharedConstTensor backwarding(sharedConstTensor in, int iteration);
+ sharedConstTensors backwarding(sharedConstTensors in, int iteration);
/**
* @brief copy layer
void save(std::ofstream &file){};
/**
- * @copydoc Layer::forwarding(sharedConstTensor in)
+ * @copydoc Layer::forwarding(sharedConstTensors in)
*/
- sharedConstTensor forwarding(sharedConstTensor in);
+ sharedConstTensors forwarding(sharedConstTensors in);
/**
- * @copydoc Layer::backwarding(sharedConstTensor in, int iteration)
+ * @copydoc Layer::backwarding(sharedConstTensors in, int iteration)
*/
- sharedConstTensor backwarding(sharedConstTensor in, int iteration);
+ sharedConstTensors backwarding(sharedConstTensors in, int iteration);
/**
* @brief get the base name for the layer
void save(std::ofstream &file){};
/**
- * @copydoc Layer::forwarding(sharedConstTensor in)
+ * @copydoc Layer::forwarding(sharedConstTensors in)
*/
- sharedConstTensor forwarding(sharedConstTensor in);
+ sharedConstTensors forwarding(sharedConstTensors in);
/**
- * @copydoc Layer::backwarding(sharedConstTensor in, int iteration)
+ * @copydoc Layer::backwarding(sharedConstTensors in, int iteration)
*/
- sharedConstTensor backwarding(sharedConstTensor in, int iteration);
+ sharedConstTensors backwarding(sharedConstTensors in, int iteration);
/**
* @brief Initializer of Input Layer
* @param[in] in List of Input Tensors taken by this layer
* @retval List of Output Tensors
*/
- virtual sharedConstTensor forwarding(sharedConstTensor in) = 0;
+ virtual sharedConstTensors forwarding(sharedConstTensors in) = 0;
/**
* @brief Back Propagation of a layer
* @param[in] iteration Iteration value for the Optimizer
* @retval Derivative List of Tensor for the previous layer
*/
- virtual sharedConstTensor backwarding(sharedConstTensor in,
- int iteration) = 0;
+ virtual sharedConstTensors backwarding(sharedConstTensors in,
+ int iteration) = 0;
/**
* @brief read layer Weight & Bias data from file
~LossLayer(){};
/**
- * @copydoc Layer::forwarding(sharedConstTensor in)
+ * @copydoc Layer::forwarding(sharedConstTensors in)
*/
- sharedConstTensor forwarding(sharedConstTensor in);
+ sharedConstTensors forwarding(sharedConstTensors in);
/**
* @brief Forward Propagation of a layer
* @param[in] label List of Label Tensors for the model
* @retval List of Input Tensors as it is.
*/
- sharedConstTensor forwarding(sharedConstTensor in, sharedConstTensor label);
+ sharedConstTensors forwarding(sharedConstTensors in,
+ sharedConstTensors label);
/**
- * @copydoc Layer::backwarding(sharedConstTensor in, int iteration)
+ * @copydoc Layer::backwarding(sharedConstTensors in, int iteration)
*/
- sharedConstTensor backwarding(sharedConstTensor in, int iteration);
+ sharedConstTensors backwarding(sharedConstTensors in, int iteration);
/**
* @brief read layer Weight & Bias data from file
* @param[in] input List of Input Tensors taken by the neural network
* @retval List of Output Tensors
*/
- sharedConstTensor forwarding(sharedConstTensor input);
+ sharedConstTensors forwarding(sharedConstTensors input);
/**
* @brief Forward Propagation of the neural network
* @param[in] label List of Label Tensors for the model
* @retval List of Output Tensors
*/
- sharedConstTensor forwarding(sharedConstTensor input,
- sharedConstTensor label);
+ sharedConstTensors forwarding(sharedConstTensors input,
+ sharedConstTensors label);
/**
* @brief Backward Propagation of the neural network
* @param[in] label List of Label Tensors for the model
* @param[in] iteration Iteration Number for the optimizer
*/
- void backwarding(sharedConstTensor input, sharedConstTensor label,
+ void backwarding(sharedConstTensors input, sharedConstTensors label,
int iteration);
/**
* @param[in] X input tensor
* @retval shared_ptr<const Tensor>
*/
- sharedConstTensor inference(const Tensor X);
+ sharedConstTensors inference(sharedConstTensors X);
/**
* @brief Run NeuralNetwork train with callback function by user
void save(std::ofstream &file){};
/**
- * @copydoc Layer::forwarding(sharedConstTensor in)
+ * @copydoc Layer::forwarding(sharedConstTensors in)
*/
- sharedConstTensor forwarding(sharedConstTensor in);
+ sharedConstTensors forwarding(sharedConstTensors in);
/**
- * @copydoc Layer::backwarding(sharedConstTensor in, int iteration)
+ * @copydoc Layer::backwarding(sharedConstTensors in, int iteration)
*/
- sharedConstTensor backwarding(sharedConstTensor in, int iteration);
+ sharedConstTensors backwarding(sharedConstTensors in, int iteration);
/**
* @copydoc Layer::setBatch(unsigned int batch)
typedef std::shared_ptr<const Tensor> sharedConstTensor;
+typedef std::vector<sharedConstTensor> sharedConstTensors;
+
+typedef std::vector<sharedTensor> sharedTensors;
+
} /* namespace nntrainer */
#endif /* __cplusplus */
return ML_ERROR_NONE;
}
-sharedConstTensor ActivationLayer::forwarding(sharedConstTensor in) {
- input = *in;
+sharedConstTensors ActivationLayer::forwarding(sharedConstTensors in) {
+ input = *in[0];
/// @note @a _act_fn is expected to work out of place and not modify @a input
hidden = _act_fn(input);
- return MAKE_SHARED_TENSOR(hidden);
+ return {MAKE_SHARED_TENSOR(hidden)};
}
-sharedConstTensor ActivationLayer::backwarding(sharedConstTensor derivative,
- int iteration) {
- Tensor deriv = *derivative;
+sharedConstTensors ActivationLayer::backwarding(sharedConstTensors derivative,
+ int iteration) {
+ Tensor deriv = *derivative[0];
Tensor ret;
if (activation_type == ActivationType::ACT_SOFTMAX)
ret = _act_prime_fn(hidden, deriv);
else
ret = _act_prime_fn(input, deriv);
- return MAKE_SHARED_TENSOR(std::move(ret));
+ return {MAKE_SHARED_TENSOR(std::move(ret))};
}
int ActivationLayer::setActivation(
return status;
}
-sharedConstTensor AdditionLayer::forwarding(sharedConstTensor in) {
+sharedConstTensors AdditionLayer::forwarding(sharedConstTensors in) {
hidden = Tensor(input_dim);
hidden.setZero();
for (unsigned int idx = 0; idx < num_inputs; ++idx) {
- if (input_dim != in.get()[idx].getDim())
+ if (input_dim != in[0].get()[idx].getDim())
throw std::runtime_error("Error: addition layer requires same "
"shape from all input layers");
- hidden.add_i(in.get()[idx]);
+ hidden.add_i(in[0].get()[idx]);
}
- return MAKE_SHARED_TENSOR(hidden);
+ return {MAKE_SHARED_TENSOR(hidden)};
}
-sharedConstTensor AdditionLayer::backwarding(sharedConstTensor derivative,
- int iteration) {
+sharedConstTensors AdditionLayer::backwarding(sharedConstTensors derivative,
+ int iteration) {
sharedTensor ret = std::shared_ptr<Tensor>(new Tensor[num_inputs],
std::default_delete<Tensor[]>());
for (unsigned int idx = 0; idx < num_inputs; ++idx) {
Tensor &t = ret.get()[idx];
- t = *derivative;
+ t = *derivative[0];
}
- return ret;
+ return {ret};
}
void AdditionLayer::setProperty(const PropertyType type,
}
}
-sharedConstTensor BatchNormalizationLayer::forwarding(sharedConstTensor in) {
+sharedConstTensors BatchNormalizationLayer::forwarding(sharedConstTensors in) {
Tensor &mu = weightAt(static_cast<int>(BNParams::mu)).getVariableRef();
Tensor &var = weightAt(static_cast<int>(BNParams::var)).getVariableRef();
Tensor &gamma = weightAt(static_cast<int>(BNParams::gamma)).getVariableRef();
Tensor &beta = weightAt(static_cast<int>(BNParams::beta)).getVariableRef();
- input = *in;
+ input = *in[0];
/// @todo change trainable #524
if (trainable) {
Tensor cmu = input.average(axes_to_reduce);
this->hidden.add(beta);
}
- return MAKE_SHARED_TENSOR(hidden);
+ return {MAKE_SHARED_TENSOR(hidden)};
}
-sharedConstTensor
-BatchNormalizationLayer::backwarding(sharedConstTensor derivative,
+sharedConstTensors
+BatchNormalizationLayer::backwarding(sharedConstTensors derivative,
int iteration) {
Tensor &gamma = weightAt(static_cast<int>(BNParams::gamma)).getVariableRef();
Tensor &dgamma = weightAt(static_cast<int>(BNParams::gamma)).getGradientRef();
Tensor &dbeta = weightAt(static_cast<int>(BNParams::beta)).getGradientRef();
Tensor dx_normalized;
- Tensor deriv = *derivative;
+ Tensor deriv = *derivative[0];
int N = 1;
opt->apply_gradients(weight_list, num_weights, iteration);
- return MAKE_SHARED_TENSOR(std::move(dx));
+ return {MAKE_SHARED_TENSOR(std::move(dx))};
}
void BatchNormalizationLayer::copy(std::shared_ptr<Layer> l) {
void Conv2DLayer::save(std::ofstream &file) { Layer::save(file); }
-sharedConstTensor Conv2DLayer::forwarding(sharedConstTensor in) {
+sharedConstTensors Conv2DLayer::forwarding(sharedConstTensors in) {
int status = ML_ERROR_NONE;
- input = *in;
+ input = *in[0];
if (normalization) {
input = input.normalization();
loss /= filter_size;
}
- return MAKE_SHARED_TENSOR(hidden);
+ return {MAKE_SHARED_TENSOR(hidden)};
};
-sharedConstTensor Conv2DLayer::backwarding(sharedConstTensor derivative,
- int iteration) {
+sharedConstTensors Conv2DLayer::backwarding(sharedConstTensors derivatives,
+ int iteration) {
std::array<unsigned int, CONV2D_DIM> same_pad;
+ sharedConstTensor derivative = derivatives[0];
same_pad[0] = kernel_size[0] - 1;
same_pad[1] = kernel_size[1] - 1;
opt->apply_gradients(weight_list, num_weights, iteration);
}
- return MAKE_SHARED_TENSOR(std::move(strip_pad(ret, padding.data())));
+ return {MAKE_SHARED_TENSOR(std::move(strip_pad(ret, padding.data())))};
}
void Conv2DLayer::copy(std::shared_ptr<Layer> l) {
}
}
-sharedConstTensor FullyConnectedLayer::forwarding(sharedConstTensor in) {
+sharedConstTensors FullyConnectedLayer::forwarding(sharedConstTensors in) {
Tensor &weight =
weightAt(static_cast<int>(FCParams::weight)).getVariableRef();
Tensor &bias = weightAt(static_cast<int>(FCParams::bias)).getVariableRef();
- input = *in;
+ input = *in[0];
hidden = input.dot(weight);
hidden.add_i(bias);
loss = weight_regularizer_constant * 0.5f * (weight.l2norm());
}
- return MAKE_SHARED_TENSOR(hidden);
+ return {MAKE_SHARED_TENSOR(hidden)};
}
void FullyConnectedLayer::read(std::ifstream &file) {
this->unit = from->unit;
}
-sharedConstTensor FullyConnectedLayer::backwarding(sharedConstTensor derivative,
- int iteration) {
+sharedConstTensors
+FullyConnectedLayer::backwarding(sharedConstTensors derivative, int iteration) {
unsigned int weight_idx = static_cast<int>(FCParams::weight);
unsigned int bias_idx = static_cast<int>(FCParams::bias);
Tensor &weight = weightAt(weight_idx).getVariableRef();
Tensor &djdw = weightAt(weight_idx).getGradientRef();
Tensor &djdb = weightAt(bias_idx).getGradientRef();
- Tensor ret = derivative->dot(weight, false, true);
- djdb = derivative->sum(0);
+ Tensor ret = derivative[0]->dot(weight, false, true);
+ djdb = derivative[0]->sum(0);
- djdw = input.dot(*derivative, true, false);
+ djdw = input.dot(*derivative[0], true, false);
if (isWeightRegularizerL2Norm())
djdw.add_i(weight, weight_regularizer_constant);
djdw = djdw.sum(0);
opt->apply_gradients(weight_list, num_weights, iteration);
}
- return MAKE_SHARED_TENSOR(std::move(ret));
+ return {MAKE_SHARED_TENSOR(std::move(ret))};
}
} /* namespace nntrainer */
return status;
}
-sharedConstTensor FlattenLayer::forwarding(sharedConstTensor in) {
- input = *in;
+sharedConstTensors FlattenLayer::forwarding(sharedConstTensors in) {
+ input = *in[0];
hidden = input;
hidden.reshape(output_dim);
- return MAKE_SHARED_TENSOR(hidden);
+ return {MAKE_SHARED_TENSOR(hidden)};
}
-sharedConstTensor FlattenLayer::backwarding(sharedConstTensor in,
- int iteration) {
- Tensor temp = *in;
+sharedConstTensors FlattenLayer::backwarding(sharedConstTensors in,
+ int iteration) {
+ Tensor temp = *in[0];
temp.reshape(input_dim);
- return MAKE_SHARED_TENSOR(std::move(temp));
+ return {MAKE_SHARED_TENSOR(std::move(temp))};
}
} /* namespace nntrainer */
}
}
-sharedConstTensor InputLayer::forwarding(sharedConstTensor in) {
- input = *in;
+sharedConstTensors InputLayer::forwarding(sharedConstTensors in) {
+ input = *in[0];
hidden = input;
if (normalization)
if (standardization)
hidden = hidden.standardization();
- return MAKE_SHARED_TENSOR(hidden);
+ return {MAKE_SHARED_TENSOR(hidden)};
}
-sharedConstTensor InputLayer::backwarding(sharedConstTensor in, int iteration) {
+sharedConstTensors InputLayer::backwarding(sharedConstTensors in,
+ int iteration) {
return in;
}
return status;
}
-sharedConstTensor LossLayer::forwarding(sharedConstTensor in,
- sharedConstTensor label) {
- input = *in;
- Tensor y2 = *label;
+sharedConstTensors LossLayer::forwarding(sharedConstTensors in,
+ sharedConstTensors label) {
+ input = *in[0];
+ Tensor y2 = *label[0];
Tensor y = input;
Tensor l;
}
updateLoss(l);
- return MAKE_SHARED_TENSOR(std::move(y));
+ return {MAKE_SHARED_TENSOR(std::move(y))};
}
-sharedConstTensor LossLayer::forwarding(sharedConstTensor in) {
+sharedConstTensors LossLayer::forwarding(sharedConstTensors in) {
Tensor ret;
switch (loss_type) {
case LossType::LOSS_MSE:
return in;
case LossType::LOSS_ENTROPY_SIGMOID:
- ret = in->apply(ActivationLayer::sigmoid);
- return MAKE_SHARED_TENSOR(std::move(ret));
+ ret = in[0]->apply(ActivationLayer::sigmoid);
+ return {MAKE_SHARED_TENSOR(std::move(ret))};
case LossType::LOSS_ENTROPY_SOFTMAX:
- ret = in->apply(ActivationLayer::softmax);
- return MAKE_SHARED_TENSOR(std::move(ret));
+ ret = in[0]->apply(ActivationLayer::softmax);
+ return {MAKE_SHARED_TENSOR(std::move(ret))};
case LossType::LOSS_ENTROPY:
throw std::runtime_error(
"Error: Cross Entropy not supported without softmax or sigmoid.");
this->loss_type = from->loss_type;
}
-sharedConstTensor LossLayer::backwarding(sharedConstTensor derivative,
- int iteration) {
+sharedConstTensors LossLayer::backwarding(sharedConstTensors derivative,
+ int iteration) {
Tensor ret_derivative;
- Tensor y2 = *derivative;
+ Tensor y2 = *derivative[0];
Tensor y = input;
switch (loss_type) {
throw std::runtime_error("Unknown loss_type.");
}
- return MAKE_SHARED_TENSOR(std::move(ret_derivative));
+ return {MAKE_SHARED_TENSOR(std::move(ret_derivative))};
}
int LossLayer::setLoss(LossType l) {
/**
* @brief forward propagation using layers object which has layer
*/
-sharedConstTensor NeuralNetwork::forwarding(sharedConstTensor input) {
- sharedConstTensor X = input;
+sharedConstTensors NeuralNetwork::forwarding(sharedConstTensors input) {
+ sharedConstTensors X = input;
/** Do not forward the loss layer, as label is not available */
for (unsigned int i = 0; i < layers.size() - 1; i++) {
X = layers[i]->forwarding(X);
/**
* @brief forward propagation using layers object which has layer
*/
-sharedConstTensor NeuralNetwork::forwarding(sharedConstTensor input,
- sharedConstTensor label) {
- sharedConstTensor X;
+sharedConstTensors NeuralNetwork::forwarding(sharedConstTensors input,
+ sharedConstTensors label) {
+ sharedConstTensors X;
- if (input->getDim().batch() > batch_size)
+ if (input[0]->getDim().batch() > batch_size)
throw std::logic_error("Error: mismatch in batchsize for data and model.");
X = forwarding(input);
* Call backwarding function of layer in reverse order
* No need to call at first Input Layer (No data to be updated)
*/
-void NeuralNetwork::backwarding(sharedConstTensor input,
- sharedConstTensor label, int iteration) {
+void NeuralNetwork::backwarding(sharedConstTensors input,
+ sharedConstTensors label, int iteration) {
if (layers.empty() || layers.back()->getType() != LayerType::LAYER_LOSS) {
throw std::invalid_argument("last layer is not loss layer");
forwarding(input, label);
- sharedConstTensor output = label;
+ sharedConstTensors output = label;
for (unsigned int i = layers.size() - 1; i > 0; i--)
output = layers[i]->backwarding(output, iteration);
}
throw std::invalid_argument("Error setting batchsize for the dataset");
}
-sharedConstTensor NeuralNetwork::inference(const Tensor X) {
- if (batch_size != X.batch()) {
+sharedConstTensors NeuralNetwork::inference(sharedConstTensors X) {
+ if (batch_size != X[0]->batch()) {
/**
* Note that inference resets batch_size of the previous train configuration
* Next train must set its batch_size if inference is run with this model.
*/
- setBatchSize(X.batch());
+ setBatchSize(X[0]->batch());
}
- sharedConstTensor out;
+ sharedConstTensors out;
try {
- out = forwarding(MAKE_SHARED_TENSOR(X));
+ out = forwarding(X);
/** Forward loss layer without label as well */
out = std::static_pointer_cast<LossLayer>(layers[layers.size() - 1])
->forwarding(out);
} catch (...) {
ml_loge("Failed to inference Model");
- return nullptr;
+ return out;
}
return out;
}
if (data_buffer->getDataFromBuffer(nntrainer::BufferType::BUF_TRAIN,
in->getData(), label->getData())) {
try {
- backwarding(in, label, iter++);
+ backwarding({in}, {label}, iter++);
} catch (...) {
data_buffer->clear(nntrainer::BufferType::BUF_TRAIN);
ml_loge("Error: training error in #%d/%d.", epoch_idx, epochs);
while (true) {
if (data_buffer->getDataFromBuffer(nntrainer::BufferType::BUF_VAL,
in->getData(), label->getData())) {
- sharedConstTensor Y = forwarding(in, label);
- auto model_out = Y->argmax();
+ sharedConstTensors Y = forwarding({in}, {label});
+ auto model_out = Y[0]->argmax();
auto label_out = label->argmax();
for (unsigned int b = 0; b < batch_size; b++) {
if (model_out[b] == label_out[b])
return status;
}
-sharedConstTensor Pooling2DLayer::forwarding(sharedConstTensor in) {
- input = *in;
+sharedConstTensors Pooling2DLayer::forwarding(sharedConstTensors in) {
+ input = *in[0];
TensorDim hidden_dim = output_dim;
hidden = Tensor(hidden_dim);
result.getData(), result.getDim().getDataLen() * sizeof(float));
}
- return MAKE_SHARED_TENSOR(hidden);
+ return {MAKE_SHARED_TENSOR(hidden)};
}
-sharedConstTensor Pooling2DLayer::backwarding(sharedConstTensor derivative,
- int iteration) {
+sharedConstTensors Pooling2DLayer::backwarding(sharedConstTensors derivative,
+ int iteration) {
unsigned int batch = input_dim.batch();
unsigned int channel = input_dim.channel();
unsigned int height = input_dim.height();
float *out = result.getData();
switch (pooling_type) {
case PoolingType::max: {
- for (unsigned int i = 0; i < derivative->getDim().getDataLen(); ++i) {
- out[max_idx[i]] += derivative->getData()[i];
+ for (unsigned int i = 0; i < derivative[0]->getDim().getDataLen(); ++i) {
+ out[max_idx[i]] += derivative[0]->getData()[i];
}
} break;
case PoolingType::average: {
K = 0;
for (unsigned int k = 0; k <= width - p_width; k += stride[1]) {
float del =
- derivative->getValue(b, i, J, K) / static_cast<float>(p_size);
+ derivative[0]->getValue(b, i, J, K) / static_cast<float>(p_size);
for (unsigned int pi = 0; pi < p_height; ++pi) {
for (unsigned int pj = 0; pj < p_width; ++pj) {
result.setValue(b, i, j + pi, k + pj,
}
} break;
case PoolingType::global_max: {
- for (unsigned int i = 0; i < derivative->getDim().getDataLen(); ++i) {
- float der = derivative->getData()[i] / max_idx_global[i].size();
+ for (unsigned int i = 0; i < derivative[0]->getDim().getDataLen(); ++i) {
+ float der = derivative[0]->getData()[i] / max_idx_global[i].size();
for (unsigned int m = 0; m < max_idx_global[i].size(); m++) {
out[max_idx_global[i][m]] += der;
}
unsigned int p_size = width * height;
for (unsigned int b = 0; b < batch; ++b) {
for (unsigned int i = 0; i < channel; ++i) {
- float del = derivative->getValue(b, i, 0, 0) / (p_size);
+ float del = derivative[0]->getValue(b, i, 0, 0) / (p_size);
for (unsigned int j = 0; j < height; ++j) {
for (unsigned int k = 0; k < width; ++k) {
result.setValue(b, i, j, k, del);
default:
throw std::runtime_error("Error: Unknown Pooling Type");
}
- return MAKE_SHARED_TENSOR(std::move(result));
+ return {MAKE_SHARED_TENSOR(std::move(result))};
}
int Pooling2DLayer::setSize(int *size, PropertyType type) {
loadFile("tc_fc_1_FCLayer.in", in);
loadFile("tc_fc_1_FCKernel.in", layer);
- loadFile("tc_fc_1_FCLabel.in", label.get()[0]);
+ loadFile("tc_fc_1_FCLabel.in", *label);
layers.clear();
return status;
if (type == nntrainer::LossType::LOSS_ENTROPY_SOFTMAX) {
loadFile("tc_fc_1_FCLayer_sensible.in", in);
loadFile("tc_fc_1_FCKernel_sensible.in", layer);
- loadFile("tc_fc_1_FCLabel_sensible.in", label.get()[0]);
+ loadFile("tc_fc_1_FCLabel_sensible.in", *label);
}
}
void matchForwarding(const char *file) {
sharedConstTensor out;
- EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)));
+ EXPECT_NO_THROW(out = layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
if (layers.size() > 0) {
for (unsigned int idx = 0; idx < layers.size() - 1; idx++) {
- EXPECT_NO_THROW(out = layers[idx]->forwarding(out));
+ EXPECT_NO_THROW(out = layers[idx]->forwarding({out})[0]);
}
if (layers.back()->getType() == nntrainer::LayerType::LAYER_LOSS) {
std::shared_ptr<nntrainer::LossLayer> loss_layer =
std::static_pointer_cast<nntrainer::LossLayer>(layers.back());
- EXPECT_NO_THROW(out = loss_layer->forwarding(out, label));
+ EXPECT_NO_THROW(out = loss_layer->forwarding({out}, {label})[0]);
} else {
- EXPECT_NO_THROW(out = layers.back()->forwarding(out));
+ EXPECT_NO_THROW(out = layers.back()->forwarding({out})[0]);
}
EXPECT_EQ(status, ML_ERROR_NONE);
}
- matchOutput(out.get()[0], file);
+ matchOutput(*out, file);
}
void matchLoss(const char *file) {
if (layers.size() &&
layers.back()->getType() == nntrainer::LayerType::LAYER_LOSS) {
if (with_loss) {
- EXPECT_NO_THROW(back_out = layers.back()->backwarding(label, 1));
+ EXPECT_NO_THROW(back_out = layers.back()->backwarding({label}, 1)[0]);
} else {
back_out = def_derivative;
}
}
for (; idx >= 0; --idx)
- EXPECT_NO_THROW(back_out = layers[idx]->backwarding(back_out, 1));
+ EXPECT_NO_THROW(back_out = layers[idx]->backwarding({back_out}, 1)[0]);
- EXPECT_NO_THROW(back_out = layer.backwarding(back_out, 1));
+ EXPECT_NO_THROW(back_out = layer.backwarding({back_out}, 1)[0]);
matchOutput(*back_out.get(), file_dx);
loadUpdatedWeightsGradients(file_uw, file_g);
sharedConstTensor out;
- EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)));
+ EXPECT_NO_THROW(out = layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
nntrainer::Tensor derivatives(3, 1, 1, 15);
nntrainer::Tensor result;
EXPECT_NO_THROW(
- result = layer.backwarding(MAKE_SHARED_TENSOR(derivatives), 1).get()[0]);
+ result = *layer.backwarding({MAKE_SHARED_TENSOR(derivatives)}, 1)[0]);
matchOutput(result, "tc_fc_1_goldenFCGradientAdam.out");
/** Verify forwarding value */
matchForwarding("tc_fc_1_goldenFCResultActNone.out");
- matchOutput(label.get()[0], "tc_fc_1_FCLabel.in");
+ matchOutput(*label, "tc_fc_1_FCLabel.in");
/** Verify loss value */
matchLoss("tc_fc_1_goldenFCLossActNoneMse.out");
layer.setTrainable(true);
sharedConstTensor forward_result;
- EXPECT_NO_THROW(forward_result = layer.forwarding(MAKE_SHARED_TENSOR(in)));
+ EXPECT_NO_THROW(forward_result =
+ layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
matchOutput(*forward_result, "tc_bn_fc_1_goldenBNResultForward.out");
nntrainer::Tensor backward_in(layer.getOutputDimension());
loadFile("tc_bn_fc_1_goldenBNLayerBackwardDxIn.out", backward_in);
nntrainer::Tensor backward_result =
- *layer.backwarding(MAKE_SHARED_TENSOR(backward_in), 1);
+ *layer.backwarding({MAKE_SHARED_TENSOR(backward_in)}, 1)[0];
matchOutput(backward_result, "tc_bn_fc_1_goldenBNLayerBackwardDx.out");
}
layer.setTrainable(true);
sharedConstTensor forward_result;
- forward_result = layer.forwarding(MAKE_SHARED_TENSOR(in));
+ forward_result = layer.forwarding({MAKE_SHARED_TENSOR(in)})[0];
matchOutput(*forward_result, "tc_bn_conv_1_goldenBNResultForward.out");
nntrainer::Tensor backward_in(layer.getOutputDimension());
loadFile("tc_bn_conv_1_goldenBNLayerBackwardDxIn.out", backward_in);
nntrainer::Tensor backward_result =
- *layer.backwarding(MAKE_SHARED_TENSOR(backward_in), 1);
+ *layer.backwarding({MAKE_SHARED_TENSOR(backward_in)}, 1)[0];
matchOutput(backward_result, "tc_bn_conv_1_goldenBNLayerBackwardDx.out");
}
layer.setTrainable(true);
sharedConstTensor forward_result;
- forward_result = layer.forwarding(MAKE_SHARED_TENSOR(in));
+ forward_result = layer.forwarding({MAKE_SHARED_TENSOR(in)})[0];
matchOutput(*forward_result, "tc_bn_conv_2_goldenBNResultForward.out");
nntrainer::Tensor backward_in(layer.getOutputDimension());
loadFile("tc_bn_conv_2_goldenBNLayerBackwardDxIn.out", backward_in);
nntrainer::Tensor backward_result =
- *layer.backwarding(MAKE_SHARED_TENSOR(backward_in), 1);
+ *layer.backwarding({MAKE_SHARED_TENSOR(backward_in)}, 1)[0];
matchOutput(backward_result, "tc_bn_conv_2_goldenBNLayerBackwardDx.out");
}
loadFile("tc_conv2d_1_conv2DLayer.in", in);
loadFile("tc_conv2d_1_conv2DKernel.in", layer);
- EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)).get()[0]);
+ EXPECT_NO_THROW(out = *layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
matchOutput(out, "tc_conv2d_1_goldenConv2DResult.out");
}
loadFile("tc_conv2d_2_conv2DLayer.in", in);
loadFile("tc_conv2d_2_conv2DKernel.in", layer);
- EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)).get()[0]);
+ EXPECT_NO_THROW(out = *layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
matchOutput(out, "tc_conv2d_2_goldenConv2DResult.out");
}
loadFile("tc_conv2d_1_conv2DLayer.in", in);
loadFile("tc_conv2d_1_conv2DKernel.in", layer);
- EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)).get()[0]);
+ EXPECT_NO_THROW(out = *layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
for (unsigned int i = 0; i < derivatives.getDim().getDataLen(); ++i) {
derivatives.getData()[i] = 1.0;
}
EXPECT_NO_THROW(
- result = layer.backwarding(MAKE_SHARED_TENSOR(derivatives), 1).get()[0]);
+ result = *layer.backwarding({MAKE_SHARED_TENSOR(derivatives)}, 1)[0]);
nntrainer::Weight *param_data = layer.getWeights().get();
loadFile("tc_conv2d_3_conv2DLayer.in", in);
loadFile("tc_conv2d_3_conv2DKernel.in", layer);
- EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)).get()[0]);
+ EXPECT_NO_THROW(out = *layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
for (unsigned int i = 0; i < derivatives.getDim().getDataLen(); ++i) {
derivatives.getData()[i] = 1.0;
}
EXPECT_NO_THROW(
- result = layer.backwarding(MAKE_SHARED_TENSOR(derivatives), 1).get()[0]);
+ result = *layer.backwarding({MAKE_SHARED_TENSOR(derivatives)}, 1)[0]);
nntrainer::Weight *param_data = layer.getWeights().get();
loadFile("tc_conv2d_2_conv2DLayer.in", in);
loadFile("tc_conv2d_2_conv2DKernel.in", layer);
- EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)).get()[0]);
+ EXPECT_NO_THROW(out = *layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
for (unsigned int i = 0; i < derivatives.getDim().getDataLen(); ++i) {
derivatives.getData()[i] = 1.0;
}
EXPECT_NO_THROW(
- result = layer.backwarding(MAKE_SHARED_TENSOR(derivatives), 1).get()[0]);
+ result = *layer.backwarding({MAKE_SHARED_TENSOR(derivatives)}, 1)[0]);
param_data = layer.getWeights().get();
for (unsigned int i = 0; i < filter_size * 2; ++i) {
matchOutput(bias_grad, "tc_conv2d_2_goldenBiasGrad.out");
for (int i = 0; i < 4; i++) {
- EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)).get()[0]);
+ EXPECT_NO_THROW(out = *layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
EXPECT_NO_THROW(
- result = layer.backwarding(MAKE_SHARED_TENSOR(derivatives), 1).get()[0]);
+ result = *layer.backwarding({MAKE_SHARED_TENSOR(derivatives)}, 1)[0]);
}
param_data = layer.getWeights().get();
loadFile("tc_conv2d_int_conv2DKernel2.in", layer2);
nntrainer::Tensor out1;
- EXPECT_NO_THROW(out1 = layer1.forwarding(MAKE_SHARED_TENSOR(in)).get()[0]);
+ EXPECT_NO_THROW(out1 = *layer1.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
nntrainer::Tensor out2;
- EXPECT_NO_THROW(out2 = layer2.forwarding(MAKE_SHARED_TENSOR(out1)).get()[0]);
+ EXPECT_NO_THROW(out2 = *layer2.forwarding({MAKE_SHARED_TENSOR(out1)})[0]);
matchOutput(out1, "tc_conv2d_int_goldenConv2DResult.out");
matchOutput(out2, "tc_conv2d_int_goldenConv2DResult2.out");
nntrainer::Tensor result2;
EXPECT_NO_THROW(
- result2 = layer2.backwarding(MAKE_SHARED_TENSOR(derivatives), 1).get()[0]);
+ result2 = *layer2.backwarding({MAKE_SHARED_TENSOR(derivatives)}, 1)[0]);
- EXPECT_NO_THROW(
- result = layer1.backwarding(MAKE_SHARED_TENSOR(result2), 1).get()[0]);
+ EXPECT_NO_THROW(result =
+ *layer1.backwarding({MAKE_SHARED_TENSOR(result2)}, 1)[0]);
/** Compare second conv */
param_data = layer2.getWeights().get();
loadFile("tc_pooling2d_1.in", in);
- EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)).get()[0]);
+ EXPECT_NO_THROW(out = *layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
matchOutput(out, "tc_pooling2d_1_goldenPooling2Dmax.out");
}
loadFile("tc_pooling2d_1.in", in);
- EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)).get()[0]);
+ EXPECT_NO_THROW(out = *layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
matchOutput(out, "tc_pooling2d_1_goldenPooling2Daverage.out");
}
loadFile("tc_pooling2d_1.in", in);
- EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)).get()[0]);
+ EXPECT_NO_THROW(out = *layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
matchOutput(out, "tc_pooling2d_1_goldenPooling2Dglobal_max.out");
}
loadFile("tc_pooling2d_1.in", in);
- EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)).get()[0]);
+ EXPECT_NO_THROW(out = *layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
matchOutput(out, "tc_pooling2d_1_goldenPooling2Dglobal_average.out");
}
reinitialize();
loadFile("tc_pooling2d_2.in", in);
- EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)).get()[0]);
+ EXPECT_NO_THROW(out = *layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
matchOutput(out, "tc_pooling2d_2_goldenPooling2Dglobal_max.out");
}
loadFile("tc_pooling2d_2.in", in);
- EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)).get()[0]);
+ EXPECT_NO_THROW(out = *layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
matchOutput(out, "tc_pooling2d_2_goldenPooling2Dglobal_average.out");
}
reinitialize();
loadFile("tc_pooling2d_1.in", in);
- EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)).get()[0]);
+ EXPECT_NO_THROW(out = *layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
nntrainer::Tensor grad(out.getDim());
grad.getData()[i] = 1.0;
}
- EXPECT_NO_THROW(in = layer.backwarding(MAKE_SHARED_TENSOR(grad), 0).get()[0]);
+ EXPECT_NO_THROW(in = *layer.backwarding({MAKE_SHARED_TENSOR(grad)}, 0)[0]);
matchOutput(in, "tc_pooling2d_1_goldenPooling2DmaxGrad.out");
}
reinitialize();
loadFile("tc_pooling2d_1.in", in);
- EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)).get()[0]);
+ EXPECT_NO_THROW(out = *layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
sharedTensor grad = MAKE_SHARED_TENSOR(out.getDim());
grad->getData()[i] = 1.0;
}
- EXPECT_NO_THROW(in = layer.backwarding(grad, 0).get()[0]);
+ EXPECT_NO_THROW(in = *layer.backwarding({grad}, 0)[0]);
matchOutput(in, "tc_pooling2d_1_goldenPooling2DaverageGrad.out");
}
loadFile("tc_pooling2d_1.in", in);
- EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)).get()[0]);
+ EXPECT_NO_THROW(out = *layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
nntrainer::Tensor grad(out.getDim());
grad.getData()[i] = 1.0;
}
- EXPECT_NO_THROW(in = layer.backwarding(MAKE_SHARED_TENSOR(grad), 0).get()[0]);
+ EXPECT_NO_THROW(in = *layer.backwarding({MAKE_SHARED_TENSOR(grad)}, 0)[0]);
matchOutput(in, "tc_pooling2d_1_goldenPooling2Dglobal_maxGrad.out");
}
reinitialize();
loadFile("tc_pooling2d_1.in", in);
- EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)).get()[0]);
+ EXPECT_NO_THROW(out = *layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
nntrainer::Tensor grad(out.getDim());
grad.getData()[i] = 1.0;
}
- EXPECT_NO_THROW(in = layer.backwarding(MAKE_SHARED_TENSOR(grad), 0).get()[0]);
+ EXPECT_NO_THROW(in = *layer.backwarding({MAKE_SHARED_TENSOR(grad)}, 0)[0]);
matchOutput(in, "tc_pooling2d_1_goldenPooling2Dglobal_averageGrad.out");
}
loadFile("tc_pooling2d_1_goldenPooling2Dmax.out", in);
- EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)).get()[0]);
+ EXPECT_NO_THROW(out = *layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
matchOutput(out, "tc_pooling2d_1_goldenPooling2Dmax.out");
}
loadFile("tc_pooling2d_2_goldenPooling2Dmax.out", in);
- EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)).get()[0]);
+ EXPECT_NO_THROW(out = *layer.forwarding({MAKE_SHARED_TENSOR(in)})[0]);
matchOutput(out, "tc_pooling2d_2_goldenPooling2Dmax.out");
}
loadFile("tc_pooling2d_1_goldenPooling2Dmax.out", out);
- EXPECT_NO_THROW(in = layer.backwarding(MAKE_SHARED_TENSOR(out), 0).get()[0]);
+ EXPECT_NO_THROW(in = *layer.backwarding({MAKE_SHARED_TENSOR(out)}, 0)[0]);
EXPECT_EQ(in.getDim(), nntrainer::TensorDim(1, 2, 4, 4));
matchOutput(in, "tc_pooling2d_1_goldenPooling2Dmax.out");
loadFile("tc_pooling2d_2_goldenPooling2Dmax.out", out);
- EXPECT_NO_THROW(in = layer.backwarding(MAKE_SHARED_TENSOR(out), 0).get()[0]);
+ EXPECT_NO_THROW(in = *layer.backwarding({MAKE_SHARED_TENSOR(out)}, 0)[0]);
EXPECT_EQ(in.getDim(), nntrainer::TensorDim(2, 2, 4, 4));
matchOutput(in, "tc_pooling2d_2_goldenPooling2Dmax.out");
TEST(nntrainer_LossLayer, forward_nolabel_n) {
nntrainer::LossLayer layer;
nntrainer::Tensor a = constant(1.0, 1, 1, 1, 1);
- EXPECT_THROW(layer.forwarding(MAKE_SHARED_TENSOR(a)), std::runtime_error);
+ EXPECT_THROW(layer.forwarding({MAKE_SHARED_TENSOR(a)}), std::runtime_error);
}
TEST(nntrainer_LossLayer, forward_loss_unknown_n) {
nntrainer::LossLayer layer;
nntrainer::Tensor a = constant(1.0, 1, 1, 1, 1);
nntrainer::Tensor b = constant(1.0, 1, 1, 1, 1);
- EXPECT_THROW(layer.forwarding(MAKE_SHARED_TENSOR(a), MAKE_SHARED_TENSOR(b)),
- std::runtime_error);
+ EXPECT_THROW(
+ layer.forwarding({MAKE_SHARED_TENSOR(a)}, {MAKE_SHARED_TENSOR(b)}),
+ std::runtime_error);
}
TEST(nntrainer_LossLayer, backward_loss_unknown_n) {
nntrainer::LossLayer layer;
nntrainer::Tensor a = constant(1.0, 1, 1, 1, 1);
- EXPECT_THROW(layer.backwarding(MAKE_SHARED_TENSOR(a), 1), std::runtime_error);
+ EXPECT_THROW(layer.backwarding({MAKE_SHARED_TENSOR(a)}, 1),
+ std::runtime_error);
}
TEST(nntrainer_LossLayer, forward_loss_forward_entropy_n) {
layer.setLoss(nntrainer::LossType::LOSS_ENTROPY);
nntrainer::Tensor a = constant(1.0, 1, 1, 1, 1);
nntrainer::Tensor b = constant(1.0, 1, 1, 1, 1);
- EXPECT_THROW(layer.forwarding(MAKE_SHARED_TENSOR(a), MAKE_SHARED_TENSOR(b)),
- std::runtime_error);
+ EXPECT_THROW(
+ layer.forwarding({MAKE_SHARED_TENSOR(a)}, {MAKE_SHARED_TENSOR(b)}),
+ std::runtime_error);
}
TEST(nntrainer_LossLayer, backward_loss_backward_entropy_n) {
nntrainer::LossLayer layer;
layer.setLoss(nntrainer::LossType::LOSS_ENTROPY);
nntrainer::Tensor a = constant(1.0, 1, 1, 1, 1);
- EXPECT_THROW(layer.backwarding(MAKE_SHARED_TENSOR(a), 1), std::runtime_error);
+ EXPECT_THROW(layer.backwarding({MAKE_SHARED_TENSOR(a)}, 1),
+ std::runtime_error);
}
/**
GEN_TEST_INPUT(expected,
nntrainer::ActivationLayer::relu((l - 4) * 0.1 * (i + 1)));
nntrainer::Tensor result;
- EXPECT_NO_THROW(result =
- layer.forwarding(MAKE_SHARED_TENSOR(input)).get()[0]);
+ EXPECT_NO_THROW(result = *layer.forwarding({MAKE_SHARED_TENSOR(input)})[0]);
EXPECT_TRUE(result == expected);
expected.copy(input);
- EXPECT_NO_THROW(
- result =
- layer.backwarding(MAKE_SHARED_TENSOR(constant(1.0, 3, 1, 1, 10)), 1)
- .get()[0]);
+ EXPECT_NO_THROW(result = *layer.backwarding(
+ {MAKE_SHARED_TENSOR(constant(1.0, 3, 1, 1, 10))}, 1)[0]);
GEN_TEST_INPUT(expected,
nntrainer::ActivationLayer::reluPrime(
nntrainer::ActivationLayer::relu((l - 4) * 0.1 * (i + 1))));
sharedTensor input = std::shared_ptr<nntrainer::Tensor>(
new nntrainer::Tensor[1], std::default_delete<nntrainer::Tensor[]>());
- nntrainer::Tensor &in = input.get()[0];
+ nntrainer::Tensor &in = *input;
in = nntrainer::Tensor();
- EXPECT_THROW(layer.forwarding(input), std::runtime_error);
+ EXPECT_THROW(layer.forwarding({input}), std::runtime_error);
}
TEST_F(nntrainer_AdditionLayer, forwarding_02_n) {
sharedTensor input = std::shared_ptr<nntrainer::Tensor>(
new nntrainer::Tensor[1], std::default_delete<nntrainer::Tensor[]>());
- nntrainer::Tensor &in = input.get()[0];
+ nntrainer::Tensor &in = *input;
in = nntrainer::Tensor(layer.getInputDimension());
- EXPECT_THROW(layer.forwarding(input), std::runtime_error);
+ EXPECT_THROW(layer.forwarding({input}), std::runtime_error);
}
TEST_F(nntrainer_AdditionLayer, forwarding_03_p) {
sharedTensor input = std::shared_ptr<nntrainer::Tensor>(
new nntrainer::Tensor[2], std::default_delete<nntrainer::Tensor[]>());
- nntrainer::Tensor &in = input.get()[0];
+ nntrainer::Tensor &in = *input;
in = nntrainer::Tensor(layer.getInputDimension());
- input.get()[1] = input.get()[0];
+ input.get()[1] = *input;
- EXPECT_NO_THROW(layer.forwarding(input));
+ EXPECT_NO_THROW(layer.forwarding({input}));
}
/**
std::string err_msg = ss.str();
verify(*in, expected_input, err_msg + " at input ");
- nntrainer::sharedConstTensor out = node->forwarding(in);
+ nntrainer::sharedConstTensor out = node->forwarding({in})[0];
verify(*out, expected_output, err_msg + " at output ");
return out;
}
std::string err_msg = ss.str();
nntrainer::sharedConstTensor out =
- std::static_pointer_cast<nntrainer::LossLayer>(node)->forwarding(pred,
- answer);
+ std::static_pointer_cast<nntrainer::LossLayer>(node)->forwarding(
+ {pred}, {answer})[0];
return out;
}
<< iteration;
std::string err_msg = ss.str();
- nntrainer::sharedConstTensor out = node->backwarding(deriv, iteration);
+ nntrainer::sharedConstTensor out = node->backwarding({deriv}, iteration)[0];
if (should_verify) {
verify(*out, expected_dx, err_msg);