}
if (!TRAINING) {
- int status;
std::string img = data_path;
std::vector<float> featureVector, resultVector;
featureVector.resize(feature_size);
nntrainer::Tensor X;
try {
X = nntrainer::Tensor({featureVector});
+ NN.forwarding(MAKE_SHARED_TENSOR(X))->apply(stepFunction);
} catch (...) {
- std::cerr << "Error while construct tensor" << std::endl;
+ std::cerr << "Error while forwarding the model" << std::endl;
NN.finalize();
return 0;
}
- cout << NN.forwarding(X, status).apply(stepFunction) << endl;
}
/**
* @brief Finalize NN
} catch (...) {
std::cerr << "Error during tensor construct" << std::endl;
NN.finalize();
- return 0;
+ return -1;
}
try {
y = nntrainer::Tensor(label);
} catch (...) {
std::cerr << "Error during tensor construct" << std::endl;
NN.finalize();
- return 0;
+ return -1;
}
try {
- NN.backwarding(d, y, i);
+ NN.backwarding(MAKE_SHARED_TENSOR(d), MAKE_SHARED_TENSOR(y), i);
} catch (...) {
- std::cerr << "Error during backwarding" << std::endl;
+ std::cerr << "Error during backwarding the model" << std::endl;
NN.finalize();
- return 0;
+ return -1;
}
}
std::cout << "#" << i + 1 << "/" << NN.getEpoch()
* @brief forward propagation
*/
int cn = 0;
- int status = 0;
for (unsigned int j = 0; j < inputVector.size(); ++j) {
std::vector<std::vector<float>> in, label;
in.push_back(inputVector[j]);
label.push_back(outputVector[j]);
- if (NN.forwarding(nntrainer::Tensor(in), status)
- .apply(stepFunction)
- .getValue(0, 0, 0, 0) == label[0][0])
- cn++;
+ try {
+ cn += NN.forwarding(MAKE_SHARED_TENSOR(in))
+ ->apply(stepFunction)
+ .getValue(0, 0, 0, 0) == label[0][0];
+ } catch (...) {
+ std::cerr << "Error during forwarding the model" << std::endl;
+ NN.finalize();
+ return -1;
+ }
}
std::cout << "[ Accuracy ] : " << ((float)(cn) / inputVector.size()) * 100.0
<< "%" << std::endl;
std::deque<Experience> expQ;
PTR env;
- int status = 0;
/**
* @brief Initialize Environment
* @brief get action with input State with mainNet
*/
nntrainer::Tensor in_tensor;
+ nntrainer::sharedTensor test;
try {
in_tensor = nntrainer::Tensor({input});
} catch (...) {
targetNet.finalize();
return 0;
}
- nntrainer::Tensor test = mainNet.forwarding(in_tensor, status);
- float *data = test.getData();
- unsigned int len = test.getDim().getDataLen();
+ try {
+ test = mainNet.forwarding(MAKE_SHARED_TENSOR(in_tensor));
+ } catch (...) {
+ std::cerr << "Error while forwarding the network" << std::endl;
+ mainNet.finalize();
+ targetNet.finalize();
+ return 0;
+ }
+ float *data = test->getData();
+ unsigned int len = test->getDim().getDataLen();
std::vector<float> temp(data, data + len);
action.push_back(argmax(temp));
/**
* @brief run forward propagation with mainNet
*/
- nntrainer::Tensor Q = mainNet.forwarding(q_in, status);
+ nntrainer::sharedTensor Q;
+ try {
+ Q = mainNet.forwarding(MAKE_SHARED_TENSOR(q_in));
+ } catch (...) {
+ std::cerr << "Error during forwarding main network" << std::endl;
+ mainNet.finalize();
+ targetNet.finalize();
+ return -1;
+ }
/**
* @brief run forward propagation with targetNet
*/
- nntrainer::Tensor NQ = targetNet.forwarding(nq_in, status);
- float *nqa = NQ.getData();
+ nntrainer::sharedTensor NQ;
+ try {
+ NQ = targetNet.forwarding(MAKE_SHARED_TENSOR(nq_in));
+ } catch (...) {
+ std::cerr << "Error during forwarding target network" << std::endl;
+ mainNet.finalize();
+ targetNet.finalize();
+ return -1;
+ }
+ float *nqa = NQ->getData();
/**
* @brief Update Q values & udpate mainNetwork
*/
for (unsigned int i = 0; i < in_Exp.size(); i++) {
if (in_Exp[i].done) {
- Q.setValue(i, 0, 0, (int)in_Exp[i].action[0],
- (float)in_Exp[i].reward);
+ Q->setValue(i, 0, 0, (int)in_Exp[i].action[0],
+ (float)in_Exp[i].reward);
} else {
- float next = (nqa[i * NQ.getWidth()] > nqa[i * NQ.getWidth() + 1])
- ? nqa[i * NQ.getWidth()]
- : nqa[i * NQ.getWidth() + 1];
+ float next = (nqa[i * NQ->getWidth()] > nqa[i * NQ->getWidth() + 1])
+ ? nqa[i * NQ->getWidth()]
+ : nqa[i * NQ->getWidth() + 1];
try {
- Q.setValue(i, 0, 0, (int)in_Exp[i].action[0],
- (float)in_Exp[i].reward + DISCOUNT * next);
+ Q->setValue(i, 0, 0, (int)in_Exp[i].action[0],
+ (float)in_Exp[i].reward + DISCOUNT * next);
} catch (...) {
- std::cerr << "Error durint set value" << std::endl;
+ std::cerr << "Error during set value" << std::endl;
mainNet.finalize();
targetNet.finalize();
return 0;
nntrainer::Tensor in_tensor;
try {
in_tensor = nntrainer::Tensor(inbatch);
+ mainNet.backwarding(MAKE_SHARED_TENSOR(in_tensor), Q, iter);
} catch (...) {
- std::cerr << "Error during tensor initialization" << std::endl;
+ std::cerr << "Error during backwarding the network" << std::endl;
mainNet.finalize();
targetNet.finalize();
return 0;
}
-
- mainNet.backwarding(in_tensor, Q, iter);
}
writeFile << "mainNet Loss : " << mainNet.getLoss()
srand(time(NULL));
std::string ini_file = data_path + "ini.bin";
std::vector<std::vector<float>> inputVector, outputVector;
- int status = 0;
/**
* @brief Extract Feature
*/
} catch (...) {
std::cerr << "Error during tensor initialization" << std::endl;
NN.finalize();
- return 0;
+ return -1;
}
try {
out = nntrainer::Tensor({outputVector[j]});
} catch (...) {
std::cerr << "Error during tensor initialization" << std::endl;
NN.finalize();
- return 0;
+ return -1;
}
- NN.backwarding(in, out, i);
+ try {
+ NN.backwarding(MAKE_SHARED_TENSOR(in), MAKE_SHARED_TENSOR(out), i);
+ } catch (...) {
+ std::cerr << "Error during backwarding the model" << std::endl;
+ NN.finalize();
+ return -1;
+ }
}
cout << "#" << i + 1 << "/" << ITERATION << " - Loss : " << NN.getLoss()
<< endl;
nntrainer::Tensor X;
try {
X = nntrainer::Tensor({featureVector});
+ NN.forwarding(MAKE_SHARED_TENSOR(X))->apply(stepFunction);
} catch (...) {
- std::cerr << "Error during tensor initialization" << std::endl;
+ std::cerr << "Error during forwaring the model" << std::endl;
NN.finalize();
- return 0;
+ return -1;
}
- cout << NN.forwarding(X, status).apply(stepFunction) << endl;
}
/**
void save(std::ofstream &file){/* noop */};
/**
- * @brief forward propagation with input
- * @param[in] in Input Tensor from upper layer
- * @param[out] status Error Status of this function
- * @retval Activation(f(x))
+ * @copydoc Layer::forwarding(sharedTensor in)
*/
- Tensor forwarding(Tensor in, int &status);
+ sharedTensor forwarding(sharedTensor in);
/**
- * @brief back propagation calculate activation prime.
- * @param[in] input Input Tensor from lower layer
- * @param[in] iteration Numberof Epoch for ADAM
- * @retval Tensor
+ * @copydoc Layer::backwarding(sharedTensor in, int iteration)
*/
- Tensor backwarding(Tensor in, int iteration);
+ sharedTensor backwarding(sharedTensor in, int iteration);
/**
* @brief copy layer
BatchNormalizationLayer &operator=(BatchNormalizationLayer &&rhs) = default;
/**
- * @brief forward propagation with input
- * @param[in] in Input Tensor from upper layer
- * @retval normalized input tensor using scaling factor
+ * @copydoc Layer::forwarding(sharedTensor in)
*/
- Tensor forwarding(Tensor in, int &status);
+ sharedTensor forwarding(sharedTensor in);
/**
- * @brief back propagation
- * Calculate dJdB & dJdW & Update W & B
- * @param[in] in Input Tensor from lower layer
- * @param[in] iteration Number of Epoch for ADAM
- * @retval dJdB x W Tensor
+ * @copydoc Layer::backwarding(sharedTensor in, int iteration)
*/
- Tensor backwarding(Tensor in, int iteration);
+ sharedTensor backwarding(sharedTensor in, int iteration);
/**
* @brief copy layer
void save(std::ofstream &file);
/**
- * @brief forward propagation with input
- * @param[in] in Input Tensor from upper layer
- * @param[out] status Error Status of this function
- * @retval Activation(W x input + B)
+ * @copydoc Layer::forwarding(sharedTensor in)
*/
- Tensor forwarding(Tensor in, int &status);
+ sharedTensor forwarding(sharedTensor in);
/**
- * @brief back propagation
- * Calculate dJdB & dJdW & Update W & B
- * @param[in] input Input Tensor from lower layer
- * @param[in] iteration Number of Epoch for ADAM
- * @retval dJdB x W Tensor
+ * @copydoc Layer::backwarding(sharedTensor in, int iteration)
*/
- Tensor backwarding(Tensor in, int iteration);
+ sharedTensor backwarding(sharedTensor in, int iteration);
/**
* @brief copy layer
void save(std::ofstream &file);
/**
- * @brief forward propagation with input
- * @param[in] in Input Tensor from upper layer
- * @retval Activation(W x input + B)
+ * @copydoc Layer::forwarding(sharedTensor in)
*/
- Tensor forwarding(Tensor in, int &status);
+ sharedTensor forwarding(sharedTensor in);
/**
- * @brief back propagation
- * Calculate dJdB & dJdW & Update W & B
- * @param[in] input Input Tensor from lower layer
- * @param[in] iteration Number of Epoch for ADAM
- * @retval dJdB x W Tensor
+ * @copydoc Layer::backwarding(sharedTensor in, int iteration)
*/
- Tensor backwarding(Tensor in, int iteration);
+ sharedTensor backwarding(sharedTensor in, int iteration);
/**
* @brief copy layer
void save(std::ofstream &file){};
/**
- * @brief forward propagation with input
- * @param[in] in Input Tensor from upper layer
- * @param[out] status Error Status of this function
- * @retval return Flatten Result
+ * @copydoc Layer::forwarding(sharedTensor in)
*/
- Tensor forwarding(Tensor in, int &status);
+ sharedTensor forwarding(sharedTensor in);
/**
- * @brief back propagation
- * Calculate Derivatives
- * @param[in] input Input Tensor from lower layer
- * @param[in] iteration Number of Epoch
- * @retval Splited derivatives
+ * @copydoc Layer::backwarding(sharedTensor in, int iteration)
*/
- Tensor backwarding(Tensor in, int iteration);
+ sharedTensor backwarding(sharedTensor in, int iteration);
/**
* @brief copy layer
void save(std::ofstream &file){};
/**
- * @brief It is back propagation of input layer.
- * It return Input as it is.
- * @param[in] input input Tensor from lower layer.
- * @param[in] iteration Epoch Number for ADAM
- * @retval
+ * @copydoc Layer::forwarding(sharedTensor in)
*/
- Tensor backwarding(Tensor in, int iteration) { return input; };
+ sharedTensor forwarding(sharedTensor in);
/**
- * @brief foward propagation : return Input Tensor
- * It return Input as it is.
- * @param[in] in input Tensor from lower layer.
- * @retval return Input Tensor
+ * @copydoc Layer::backwarding(sharedTensor in, int iteration)
*/
- Tensor forwarding(Tensor in, int &status);
+ sharedTensor backwarding(sharedTensor in, int iteration);
/**
* @brief Initializer of Input Layer
weight_ini_type(WEIGHT_XAVIER_UNIFORM),
flatten(false),
trainable(true),
- param_size(0) {}
+ param_size(0),
+ num_inputs(1),
+ num_outputs(1) {}
/**
* @brief Destructor of Layer Class
virtual Layer &operator=(Layer &&rhs) = default;
/**
- * @brief Forward Propation of neural Network
- * @param[in] in Input Tensor taken by upper layer
- * @retval Output Tensor
+ * @brief Forward Propagation of a layer
+ * @param[in] in List of Input Tensors taken by this layer
+ * @retval List of Output Tensors
*/
- virtual Tensor forwarding(Tensor in, int &status) = 0;
+ virtual sharedTensor forwarding(sharedTensor in) = 0;
/**
- * @brief Back Propation of neural Network
- * @param[in] in Input Tensor taken by lower layer
- * @param[in] iteration Epoch value for the ADAM Optimizer
- * @retval Output Tensor
+ * @brief Back Propagation of a layer
+ * @param[in] in List of Derivative Tensor from the next layer
+ * @param[in] iteration Iteration value for the Optimizer
+ * @retval Derivative List of Tensor for the previous layer
*/
- virtual Tensor backwarding(Tensor in, int iteration) = 0;
+ virtual sharedTensor backwarding(sharedTensor in, int iteration) = 0;
/**
* @brief Initialize the layer
* 15. pooling : max, average, global_max, global_average
* 16. flatten : bool
* 17. name : string (type)
+ * 18. num_inputs : unsigned int (minimum 1)
+ * 19. num_outputs : unsigned int (minimum 1)
*/
enum class PropertyType {
input_shape = 0,
pooling = 15,
flatten = 16,
name = 17,
- unknown = 18
+ num_inputs = 18,
+ num_outputs = 19,
+ unknown = 20
};
/**
use setParamSize() to avoid
setting parameters twice */
+ /**
+ * @brief Number of inputs this layer will requries/will operate on
+ */
+ unsigned int num_inputs;
+
+ /**
+ * @brief Numer of outputs this layer will produce
+ */
+ unsigned int num_outputs;
+
private:
/**
* @brief Set containing all the names of layers
~LossLayer(){};
/**
- * @brief Forward Propation of neural Network
- * @param[in] in Input Tensor taken by upper layer
- * @retval Output Tensor
+ * @copydoc Layer::forwarding(sharedTensor in)
*/
- Tensor forwarding(Tensor in, int &status);
+ sharedTensor forwarding(sharedTensor in);
/**
- * @brief foward propagation : return Input Tensor
- * It return Input as it is.
- * @param[in] output input Tensor from lower layer.
- * @param[in] label label Tensor.
- * @retval loss (cost)
+ * @brief Forward Propagation of a layer
+ * @param[in] in List of Input Tensors taken by this layer
+ * @param[in] label List of Label Tensors for the model
+ * @retval List of Input Tensors as it is.
*/
- Tensor forwarding(Tensor output, Tensor label, int &status);
+ sharedTensor forwarding(sharedTensor in, sharedTensor label);
/**
- * @brief back propagation
- * @param[in] input Input Tensor from lower layer
- * @param[in] iteration Number of Epoch for ADAM
- * @retval loss diff Tensor
+ * @copydoc Layer::backwarding(sharedTensor in, int iteration)
*/
- Tensor backwarding(Tensor in, int iteration);
+ sharedTensor backwarding(sharedTensor in, int iteration);
/**
* @brief read layer Weight & Bias data from file
int init();
/**
- * @brief forward propagation
- * @param[in] input Input Tensor X
- * @retval Output Tensor Y
+ * @brief Forward Propagation of the neural network
+ * @param[in] input List of Input Tensors taken by the neural network
+ * @retval List of Output Tensors
*/
- Tensor forwarding(Tensor input, int &status);
+ sharedTensor forwarding(sharedTensor input);
/**
- * @brief forward propagation
- * @param[in] input Input Tensor X
- * @param[in] label Input Tensor Y2
- * @retval Output Tensor Y
+ * @brief Forward Propagation of the neural network
+ * @param[in] input List of Input Tensors taken by the neural network
+ * @param[in] label List of Label Tensors for the model
+ * @retval List of Output Tensors
*/
- Tensor forwarding(Tensor input, Tensor output, int &status);
+ sharedTensor forwarding(sharedTensor input, sharedTensor label);
/**
- * @brief back propagation to update W & B
- * @param[in] input Input Tensor X
- * @param[in] expectedOutput Lable Tensor Y
- * @param[in] iteration Epoch Number for ADAM
+ * @brief Forward Propagation of the neural network
+ * @param[in] input List of Input Tensors taken by the neural network
+ * @param[in] label List of Label Tensors for the model
+ * @param[in] iteration Iteration Number for the optimizer
*/
- int backwarding(Tensor input, Tensor expected_output, int iteration);
+ void backwarding(sharedTensor input, sharedTensor label, int iteration);
/**
* @brief save model and training parameters into file
void save(std::ofstream &file){};
/**
- * @brief forward propagation with input
- * @param[in] in Input Tensor from upper layer
- * @param[out] status Error Status of this function
- * @retval return Pooling Result
+ * @copydoc Layer::forwarding(sharedTensor in)
*/
- Tensor forwarding(Tensor in, int &status);
+ sharedTensor forwarding(sharedTensor in);
/**
- * @brief back propagation
- * Calculate Delivatives
- * @param[in] input Input Tensor from lower layer
- * @param[in] iteration Number of Epoch
- * @retval dJdB x W Tensor
+ * @copydoc Layer::backwarding(sharedTensor in, int iteration)
*/
- Tensor backwarding(Tensor in, int iteration);
+ sharedTensor backwarding(sharedTensor in, int iteration);
/**
* @brief copy layer
* @brief calculation convolution
* @param[in] batch batch index
* @param[in] in input tensor
- * @param[out] status output of status
* @retval Tensor outoput tensor
*/
- Tensor pooling2d(unsigned int batch, Tensor in, int &status);
+ Tensor pooling2d(unsigned int batch, Tensor &in);
};
} // namespace nntrainer
#include <regex>
#include <tensor_dim.h>
+#define MAKE_SHARED_TENSOR(x) std::make_shared<nntrainer::Tensor>(x)
+
namespace nntrainer {
class LazyTensor;
*/
std::ostream &operator<<(std::ostream &out, Tensor const &m);
+typedef std::shared_ptr<Tensor> sharedTensor;
+
} /* namespace nntrainer */
#endif /* __cplusplus */
return ML_ERROR_NONE;
}
-Tensor ActivationLayer::forwarding(Tensor in, int &status) {
- status = ML_ERROR_NONE;
+sharedTensor ActivationLayer::forwarding(sharedTensor in) {
+ input = *in;
+ hidden = _act_fn(input);
- input = in;
- hidden = _act_fn(in);
-
- return hidden;
+ return MAKE_SHARED_TENSOR(hidden);
}
-Tensor ActivationLayer::backwarding(Tensor derivative, int iteration) {
+sharedTensor ActivationLayer::backwarding(sharedTensor derivative,
+ int iteration) {
+ Tensor deriv = *derivative;
+ Tensor ret;
if (activation_type == ActiType::ACT_SOFTMAX)
- return _act_prime_fn(hidden, derivative);
+ ret = _act_prime_fn(hidden, deriv);
else
- return _act_prime_fn(input, derivative);
+ ret = _act_prime_fn(input, deriv);
+
+ return MAKE_SHARED_TENSOR(std::move(ret));
}
/**
}
}
-Tensor BatchNormalizationLayer::forwarding(Tensor in, int &status) {
+sharedTensor BatchNormalizationLayer::forwarding(sharedTensor in) {
Tensor &mu = paramsAt(static_cast<int>(BNParams::mu)).weight;
Tensor &var = paramsAt(static_cast<int>(BNParams::var)).weight;
Tensor &gamma = paramsAt(static_cast<int>(BNParams::gamma)).weight;
if (trainable) {
Tensor deviation;
- this->input = in;
+ input = *in;
///< current mu */
Tensor cmu;
- cmu = in.average(0);
+ cmu = input.average(0);
- deviation = in.subtract(cmu);
+ deviation = input.subtract(cmu);
this->cvar = deviation.chain()
.multiply_i(deviation)
this->x_normalized = deviation.divide(cvar.apply(sqrtFloat));
this->hidden = x_normalized.chain().multiply_i(gamma).add_i(beta).run();
-
- status = ML_ERROR_NONE;
} else {
/// NYI
- status = ML_ERROR_NOT_SUPPORTED;
throw std::runtime_error("not_yet_implemented");
}
- return hidden;
+
+ return MAKE_SHARED_TENSOR(hidden);
}
-Tensor BatchNormalizationLayer::backwarding(Tensor dy, int iteration) {
+sharedTensor BatchNormalizationLayer::backwarding(sharedTensor derivative,
+ int iteration) {
Tensor &gamma = paramsAt(static_cast<int>(BNParams::gamma)).weight;
Tensor &dbeta = paramsAt(static_cast<int>(BNParams::beta)).grad;
Tensor &dgamma = paramsAt(static_cast<int>(BNParams::beta)).grad;
Tensor dx_normalized;
Tensor dx;
+ Tensor deriv = *derivative;
- int batch = dy.batch();
+ int batch = deriv.batch();
- dgamma = x_normalized.multiply(dy).sum(0);
- dbeta = dy.sum(0);
+ dgamma = x_normalized.multiply(deriv).sum(0);
+ dbeta = deriv.sum(0);
- dx_normalized = dy.multiply(gamma);
+ dx_normalized = deriv.multiply(gamma);
dx = dx_normalized.chain()
.multiply_i(batch)
opt.apply_gradients(grad_params, param_size - 2, iteration);
- return dx;
+ return MAKE_SHARED_TENSOR(std::move(dx));
}
void BatchNormalizationLayer::copy(std::shared_ptr<Layer> l) {
void Conv2DLayer::save(std::ofstream &file) { Layer::save(file); }
-Tensor Conv2DLayer::forwarding(Tensor in, int &status) {
+sharedTensor Conv2DLayer::forwarding(sharedTensor in) {
+ int status = ML_ERROR_NONE;
+ input = *in;
+
if (normalization) {
- input = in.normalization();
- } else {
- input = in;
+ input = input.normalization();
}
if (standardization) {
input = input.standardization();
}
- hidden = Tensor(in.batch(), output_dim.channel(), output_dim.height(),
+ hidden = Tensor(input.batch(), output_dim.channel(), output_dim.height(),
output_dim.width());
hidden.setZero();
std::vector<float> output(output_dim.width() * output_dim.height());
- for (unsigned int b = 0; b < in.batch(); ++b) {
+ for (unsigned int b = 0; b < input.batch(); ++b) {
Tensor in_padded = zero_pad(b, input, padding);
for (unsigned int i = 0; i < filter_size; ++i) {
status = conv2d(in_padded.getData(), in_padded.getDim(), filter.getData(),
filter.getDim(), output.data(), stride,
bias.getValue(0, 0, 0, 0));
+ if (status != ML_ERROR_NONE)
+ throw std::runtime_error("Forwarding Convolution failed.");
memcpy(hidden.getAddress(b * hidden.getDim().getFeatureLen() +
i * hidden.height() * hidden.width()),
}
}
- status = ML_ERROR_NONE;
- return hidden;
+ return MAKE_SHARED_TENSOR(hidden);
};
-Tensor Conv2DLayer::backwarding(Tensor derivative, int iteration) {
+sharedTensor Conv2DLayer::backwarding(sharedTensor derivative, int iteration) {
// Calculate delK : [batch, channel, height, width ] * filter_size
unsigned int same_pad[CONV2D_DIM];
delBias.setZero();
}
- TensorDim in_dim(1, 1, derivative.height(), derivative.width());
+ TensorDim in_dim(1, 1, derivative->height(), derivative->width());
for (unsigned int b = 0; b < input_dim.batch(); ++b) {
Tensor in_padded = zero_pad(b, input, padding);
Tensor &delK = paramsAt(i).grad;
Tensor &delBias = paramsAt(i + filter_size).grad;
for (unsigned int j = 0; j < in_padded.channel(); ++j) {
- conv2d(
- in_padded.getAddress(j * in_padded.height() * in_padded.width()),
- p_dim,
- derivative.getAddress(b * derivative.getDim().getFeatureLen() +
- i * derivative.height() * derivative.width()),
- in_dim, output.data(), stride, 0.0f);
+ conv2d(in_padded.getAddress(j * in_padded.height() * in_padded.width()),
+ p_dim,
+ derivative->getAddress(b * derivative->getDim().getFeatureLen() +
+ i * derivative->height() *
+ derivative->width()),
+ in_dim, output.data(), stride, 0.0f);
float *del = delK.getAddress(j * o_size);
for (unsigned k = 0; k < o_size; ++k) {
del[k] += output[k];
}
// Calculate delBias [ 1, 1, 1, filter_size]
- for (unsigned int j = 0; j < derivative.height(); ++j) {
- for (unsigned int k = 0; k < derivative.width(); ++k) {
- sum += derivative.getValue(b, i, j, k);
+ for (unsigned int j = 0; j < derivative->height(); ++j) {
+ for (unsigned int k = 0; k < derivative->width(); ++k) {
+ sum += derivative->getValue(b, i, j, k);
}
}
delBias.setValue(0, 0, 0, 0, sum + delBias.getValue(0, 0, 0, 0));
output.clear();
output.resize(ret.height() * ret.width());
- for (unsigned int b = 0; b < derivative.batch(); ++b) {
- Tensor in_padded = zero_pad(b, derivative, same_pad);
+ for (unsigned int b = 0; b < derivative->batch(); ++b) {
+ Tensor in_padded = zero_pad(b, *derivative, same_pad);
TensorDim p_dim(1, 1, in_padded.height(), in_padded.width());
for (unsigned int in_c = 0; in_c < input_dim.channel(); ++in_c) {
- for (unsigned int i = 0; i < derivative.channel(); ++i) {
+ for (unsigned int i = 0; i < derivative->channel(); ++i) {
Tensor &filter = paramsAt(i).weight;
conv2d(in_padded.getAddress(i * in_padded.height() * in_padded.width()),
opt.apply_gradients(params, param_size, iteration);
}
- return rotate_180(strip_pad(ret, padding));
+ ret = rotate_180(strip_pad(ret, padding));
+ return MAKE_SHARED_TENSOR(std::move(ret));
}
void Conv2DLayer::copy(std::shared_ptr<Layer> l) {
}
}
-Tensor FullyConnectedLayer::forwarding(Tensor in, int &status) {
+sharedTensor FullyConnectedLayer::forwarding(sharedTensor in) {
Tensor &weight = paramsAt(static_cast<int>(FCParams::weight)).weight;
Tensor &bias = paramsAt(static_cast<int>(FCParams::bias)).weight;
- input = in;
+ input = *in;
hidden = input.chain().dot(weight).add_i(bias).run();
- status = ML_ERROR_NONE;
if (weight_decay.type == WeightDecayType::l2norm) {
loss = weight_decay.lambda * 0.5f * (weight.l2norm());
}
- return hidden;
+ return MAKE_SHARED_TENSOR(hidden);
}
void FullyConnectedLayer::read(std::ifstream &file) {
this->cost = from->cost;
}
-Tensor FullyConnectedLayer::backwarding(Tensor derivative, int iteration) {
+sharedTensor FullyConnectedLayer::backwarding(sharedTensor derivative,
+ int iteration) {
unsigned int weight_idx = static_cast<int>(FCParams::weight);
unsigned int bias_idx = static_cast<int>(FCParams::bias);
Tensor &weight = paramsAt(weight_idx).weight;
Tensor &djdw = paramsAt(weight_idx).grad;
Tensor &djdb = paramsAt(bias_idx).grad;
- Tensor ret = derivative.dot(weight.transpose("0:2:1"));
- djdb = derivative.sum(0);
+ Tensor ret = derivative->dot(weight.transpose("0:2:1"));
+ djdb = derivative->sum(0);
djdw = input.chain()
.transpose("0:2:1")
- .dot(derivative)
+ .dot(*derivative)
.applyIf(this->isWeightDecayL2Norm(), _LIFT(add_i), weight,
weight_decay.lambda)
.run()
opt.apply_gradients(params, param_size, iteration);
}
- return ret;
+ return MAKE_SHARED_TENSOR(std::move(ret));
}
} /* namespace nntrainer */
return status;
}
-Tensor FlattenLayer::forwarding(Tensor in, int &status) {
- hidden = Tensor(in.batch(), output_dim.channel(), output_dim.height(),
+sharedTensor FlattenLayer::forwarding(sharedTensor in) {
+ input = *in;
+
+ hidden = Tensor(input.batch(), output_dim.channel(), output_dim.height(),
output_dim.width());
hidden.setZero();
- memcpy(hidden.getData(), in.getData(),
- in.getDim().getDataLen() * sizeof(float));
- return hidden;
+ memcpy(hidden.getData(), input.getData(),
+ input.getDim().getDataLen() * sizeof(float));
+
+ return MAKE_SHARED_TENSOR(hidden);
}
-Tensor FlattenLayer::backwarding(Tensor in, int iteration) {
- Tensor ret = in;
- ret.setDim(input_dim);
- return ret;
+sharedTensor FlattenLayer::backwarding(sharedTensor in, int iteration) {
+ in->setDim(input_dim);
+ return in;
}
void FlattenLayer::setProperty(const PropertyType type,
this->hidden.copy(from->hidden);
}
-Tensor InputLayer::forwarding(Tensor in, int &status) {
- input = in;
+sharedTensor InputLayer::forwarding(sharedTensor in) {
+ input = *in;
if (normalization)
input = input.normalization();
+ if (standardization)
+ input = input.standardization();
- status = ML_ERROR_NONE;
- return input;
+ return MAKE_SHARED_TENSOR(input);
+}
+
+sharedTensor InputLayer::backwarding(sharedTensor in, int iteration) {
+ return in;
}
int InputLayer::initialize(bool last) {
return status;
}
-Tensor LossLayer::forwarding(Tensor output, Tensor label, int &status) {
- input = output;
- Tensor y2 = label;
- Tensor y = output;
+sharedTensor LossLayer::forwarding(sharedTensor in, sharedTensor label) {
+ input = *in;
+ Tensor y2 = *label;
+ Tensor y = input;
Tensor l;
switch (cost) {
} break;
case COST_ENTROPY: {
- status = ML_ERROR_NOT_SUPPORTED;
- ml_loge("Error: Cross Entropy not supported without softmax or sigmoid.");
- return y;
+ throw std::runtime_error(
+ "Error: Cross Entropy not supported without softmax or sigmoid.");
}
case COST_UNKNOWN:
/** intended */
- default: {
- status = ML_ERROR_NOT_SUPPORTED;
- ml_loge("Error: Unknown cost.");
- return y;
- }
+ default: { throw std::runtime_error("Error: Unknown cost."); }
}
updateLoss(l);
- status = ML_ERROR_NONE;
- return y;
+ return MAKE_SHARED_TENSOR(std::move(y));
}
void LossLayer::updateLoss(const Tensor &l) {
this->loss = from->loss;
}
-Tensor LossLayer::backwarding(Tensor derivative, int iteration) {
+sharedTensor LossLayer::backwarding(sharedTensor derivative, int iteration) {
Tensor ret_derivative;
- Tensor y2 = derivative;
+ Tensor y2 = *derivative;
Tensor y = input;
switch (cost) {
throw std::runtime_error("Unknown cost.");
}
- return ret_derivative;
+ return MAKE_SHARED_TENSOR(std::move(ret_derivative));
}
-Tensor LossLayer::forwarding(Tensor in, int &status) {
- status = ML_ERROR_NOT_SUPPORTED;
- return in;
+sharedTensor LossLayer::forwarding(sharedTensor in) {
+ throw std::runtime_error("Not supported.");
}
void LossLayer::setProperty(const PropertyType type, const std::string &value) {
/**
* @brief forward propagation using layers object which has layer
*/
-Tensor NeuralNetwork::forwarding(Tensor input, int &status) {
- Tensor X = input;
+sharedTensor NeuralNetwork::forwarding(sharedTensor input) {
+ sharedTensor X = input;
/** Do not forward the loss layer, as label is not available */
- for (unsigned int i = 0; i < layers.size() - 1; i++) {
- X = layers[i]->forwarding(X, status);
- if (status != ML_ERROR_NONE)
- break;
- }
+ for (unsigned int i = 0; i < layers.size() - 1; i++)
+ X = layers[i]->forwarding(X);
+
return X;
}
/**
* @brief forward propagation using layers object which has layer
*/
-Tensor NeuralNetwork::forwarding(Tensor input, Tensor output, int &status) {
- Tensor X = input;
- Tensor Y2 = output;
-
- X = forwarding(input, status);
- if (status != ML_ERROR_NONE)
- return X;
+sharedTensor NeuralNetwork::forwarding(sharedTensor input, sharedTensor label) {
+ sharedTensor X;
+ X = forwarding(input);
X = std::static_pointer_cast<LossLayer>(layers[layers.size() - 1])
- ->forwarding(X, Y2, status);
+ ->forwarding(X, label);
+
return X;
}
* Call backwarding function of layer in reverse order
* No need to call at first Input Layer (No data to be updated)
*/
-int NeuralNetwork::backwarding(Tensor input, Tensor expected_output,
- int iteration) {
- int status = ML_ERROR_NONE;
- Tensor Y2 = expected_output;
- Tensor X = input;
- Tensor Y = forwarding(X, Y2, status);
- if (status != ML_ERROR_NONE)
- return status;
-
- for (unsigned int i = layers.size() - 1; i > 0; i--) {
- Y2 = layers[i]->backwarding(Y2, iteration);
- }
- return status;
+void NeuralNetwork::backwarding(sharedTensor input, sharedTensor label,
+ int iteration) {
+
+ forwarding(input, label);
+
+ sharedTensor output = label;
+ for (unsigned int i = layers.size() - 1; i > 0; i--)
+ output = layers[i]->backwarding(output, iteration);
}
float NeuralNetwork::getLoss() {
while (true) {
vec_4d in, label;
if (data_buffer->getDataFromBuffer(nntrainer::BUF_TRAIN, in, label)) {
- status =
- backwarding(nntrainer::Tensor(in), nntrainer::Tensor(label), iter++);
- if (status != ML_ERROR_NONE) {
+ try {
+ backwarding(MAKE_SHARED_TENSOR(in), MAKE_SHARED_TENSOR(label),
+ iter++);
+ } catch (...) {
data_buffer->clear(nntrainer::BUF_TRAIN);
ml_loge("Error: training error in #%d/%d.", i + 1, epoch);
- return status;
+ std::rethrow_exception(std::current_exception());
}
std::cout << "#" << i + 1 << "/" << epoch;
data_buffer->displayProgress(count++, nntrainer::BUF_TRAIN, getLoss());
vec_4d in, label;
if (data_buffer->getDataFromBuffer(nntrainer::BUF_VAL, in, label)) {
for (int i = 0; i < batch_size; ++i) {
- nntrainer::Tensor X = nntrainer::Tensor({in[i]});
- nntrainer::Tensor Y2 = nntrainer::Tensor({label[i]});
- nntrainer::Tensor Y = forwarding(X, Y2, status);
+ sharedTensor X = MAKE_SHARED_TENSOR(Tensor({in[i]}));
+ sharedTensor Y2 = MAKE_SHARED_TENSOR(Tensor({label[i]}));
+ sharedTensor Y = forwarding(X, Y2);
if (status != ML_ERROR_NONE) {
ml_loge("Error: forwarding the network resulted in error.");
return status;
}
- if (Y.argmax() == Y2.argmax())
+ if (Y->argmax() == Y2->argmax())
right++;
valloss += getLoss();
tcases++;
return status;
}
-Tensor Pooling2DLayer::forwarding(Tensor in, int &status) {
- hidden = Tensor(in.batch(), output_dim.channel(), output_dim.height(),
+sharedTensor Pooling2DLayer::forwarding(sharedTensor in) {
+ input = *in;
+
+ hidden = Tensor(input.batch(), output_dim.channel(), output_dim.height(),
output_dim.width());
hidden.setZero();
- for (unsigned int b = 0; b < in.batch(); ++b) {
- Tensor in_padded = zero_pad(b, in, padding);
- Tensor result = pooling2d(b, in_padded, status);
+
+ for (unsigned int b = 0; b < input.batch(); ++b) {
+ Tensor in_padded = zero_pad(b, input, padding);
+ Tensor result = pooling2d(b, in_padded);
memcpy(hidden.getAddress(b * hidden.getDim().getFeatureLen()),
result.getData(), result.getDim().getDataLen() * sizeof(float));
}
- return hidden;
+
+ return MAKE_SHARED_TENSOR(hidden);
}
-Tensor Pooling2DLayer::backwarding(Tensor derivative, int iteration) {
+sharedTensor Pooling2DLayer::backwarding(sharedTensor derivative,
+ int iteration) {
unsigned int batch = input_dim.batch();
unsigned int channel = input_dim.channel();
unsigned int height = input_dim.height();
float *out = result.getData();
switch (pooling_type) {
case PoolingType::max: {
- for (unsigned int i = 0; i < derivative.getDim().getDataLen(); ++i) {
- out[max_idx[i]] += derivative.getData()[i];
+ for (unsigned int i = 0; i < derivative->getDim().getDataLen(); ++i) {
+ out[max_idx[i]] += derivative->getData()[i];
}
} break;
case PoolingType::average: {
for (unsigned int j = 0; j <= height - p_height; j += stride[0]) {
K = 0;
for (unsigned int k = 0; k <= width - p_width; k += stride[1]) {
- float del = derivative.getValue(b, i, J, K) / (p_size);
+ float del = derivative->getValue(b, i, J, K) / (p_size);
for (unsigned int pi = 0; pi < p_height; ++pi) {
for (unsigned int pj = 0; pj < p_width; ++pj) {
result.setValue(b, i, j + pi, k + pj,
}
} break;
case PoolingType::global_max: {
- for (unsigned int i = 0; i < derivative.getDim().getDataLen(); ++i) {
- out[max_idx[i]] += derivative.getData()[i];
+ for (unsigned int i = 0; i < derivative->getDim().getDataLen(); ++i) {
+ out[max_idx[i]] += derivative->getData()[i];
}
} break;
case PoolingType::global_average: {
unsigned int p_size = width * height;
for (unsigned int b = 0; b < batch; ++b) {
for (unsigned int i = 0; i < channel; ++i) {
- float del = derivative.getValue(b, i, 0, 0) / (p_size);
+ float del = derivative->getValue(b, i, 0, 0) / (p_size);
for (unsigned int j = 0; j < height; ++j) {
for (unsigned int k = 0; k < width; ++k) {
result.setValue(b, i, j, k, del);
} break;
default:
- ml_loge("Error: Unknown Pooling Type");
- break;
+ throw std::runtime_error("Error: Unknown Pooling Type");
}
- return result;
+ return MAKE_SHARED_TENSOR(std::move(result));
}
int Pooling2DLayer::setSize(int *size, PropertyType type) {
}
}
-Tensor Pooling2DLayer::pooling2d(unsigned int batch, Tensor in, int &status) {
+Tensor Pooling2DLayer::pooling2d(unsigned int batch, Tensor &in) {
unsigned int channel = in.channel();
unsigned int height = in.height();
unsigned int width = in.width();
} break;
default:
ml_loge("Error: Unknown Pooling Type");
- status = ML_ERROR_INVALID_PARAMETER;
+ throw std::runtime_error("Error: Unknown Pooling Type");
break;
}
#include <pooling2d_layer.h>
#include <util_func.h>
+using nntrainer::sharedTensor;
+
template <typename LayerType>
class nntrainer_abstractLayer : public ::testing::Test {
protected:
virtual int reinitialize(bool _last_layer = false) {
int status = super::reinitialize(_last_layer);
- label = nntrainer::Tensor(layer.getOutputDimension());
+ label = MAKE_SHARED_TENSOR(nntrainer::Tensor(layer.getOutputDimension()));
loadFile("tc_fc_1_FCLayer.in", in);
loadFile("tc_fc_1_FCKernel.in", layer);
- loadFile("tc_fc_1_FCLabel.in", label);
+ loadFile("tc_fc_1_FCLabel.in", label.get()[0]);
layers.clear();
return status;
}
void matchForwarding(const char *file) {
- nntrainer::Tensor out;
- out = layer.forwarding(in, status);
- EXPECT_EQ(status, ML_ERROR_NONE);
+ sharedTensor out;
+ EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)));
if (layers.size() > 0) {
for (unsigned int idx = 0; idx < layers.size() - 1; idx++) {
- out = layers[idx]->forwarding(out, status);
- EXPECT_EQ(status, ML_ERROR_NONE);
+ EXPECT_NO_THROW(out = layers[idx]->forwarding(out));
}
if (layers.back()->getType() == nntrainer::LAYER_LOSS) {
std::shared_ptr<nntrainer::LossLayer> loss_layer =
std::static_pointer_cast<nntrainer::LossLayer>(layers.back());
- out = loss_layer->forwarding(out, label, status);
+ EXPECT_NO_THROW(out = loss_layer->forwarding(out, label));
} else {
- out = layers.back()->forwarding(out, status);
+ EXPECT_NO_THROW(out = layers.back()->forwarding(out));
}
EXPECT_EQ(status, ML_ERROR_NONE);
}
- matchOutput(out, file);
+ matchOutput(out.get()[0], file);
}
void matchLoss(const char *file) {
const char *file_g, const bool with_loss = false) {
int idx = layers.size() - 1;
- nntrainer::Tensor def_derivative = constant(1.0, 3, 1, 1, 15);
- nntrainer::Tensor back_out;
+ sharedTensor def_derivative =
+ MAKE_SHARED_TENSOR(constant(1.0, 3, 1, 1, 15));
+ sharedTensor back_out;
if (layers.size() && layers.back()->getType() == nntrainer::LAYER_LOSS) {
if (with_loss) {
- back_out = layers.back()->backwarding(label, 1);
+ EXPECT_NO_THROW(back_out = layers.back()->backwarding(label, 1));
} else {
back_out = def_derivative;
}
}
for (; idx >= 0; --idx)
- back_out = layers[idx]->backwarding(back_out, 1);
+ EXPECT_NO_THROW(back_out = layers[idx]->backwarding(back_out, 1));
- back_out = layer.backwarding(back_out, 1);
- matchOutput(back_out, file_dx);
+ EXPECT_NO_THROW(back_out = layer.backwarding(back_out, 1));
+ matchOutput(*back_out.get(), file_dx);
loadUpdatedWeightsGradients(file_uw, file_g);
matchUpdatedWeightsGradients();
}
}
- nntrainer::Tensor label;
+ sharedTensor label;
std::vector<nntrainer::Tensor> new_w;
std::vector<nntrainer::Tensor> grad;
std::vector<std::shared_ptr<nntrainer::Layer>> layers;
setOptimizer(nntrainer::OptType::adam, "learning_rate=1.0");
- nntrainer::Tensor out = layer.forwarding(in, status);
+ sharedTensor out;
+
+ EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)));
nntrainer::Tensor derivatives(3, 1, 1, 15);
derivatives.getData()[i] = 1.0;
}
- nntrainer::Tensor result = layer.backwarding(derivatives, 1);
+ nntrainer::Tensor result;
+ EXPECT_NO_THROW(
+ result = layer.backwarding(MAKE_SHARED_TENSOR(derivatives), 1).get()[0]);
matchOutput(result, "tc_fc_1_goldenFCGradientAdam.out");
TEST_F(nntrainer_BatchNormalizationLayer,
DISABLED_forward_backward_training_01_p) {
- int status = ML_ERROR_NONE;
layer.setTrainable(true);
- nntrainer::Tensor forward_result = layer.forwarding(in, status);
- EXPECT_EQ(status, ML_ERROR_NONE);
+ sharedTensor forward_result;
+
+ EXPECT_NO_THROW(forward_result = layer.forwarding(MAKE_SHARED_TENSOR(in)));
- matchOutput(forward_result, "tc_bn_1_goldenBNResultForward.out");
+ matchOutput(forward_result.get()[0], "tc_bn_1_goldenBNResultForward.out");
- nntrainer::Tensor backward_result =
- layer.backwarding(constant(1.0, 3, 1, 4, 5), 1);
+ nntrainer::Tensor backward_result;
+ EXPECT_NO_THROW(
+ backward_result =
+ layer.backwarding(MAKE_SHARED_TENSOR(constant(1.0, 3, 1, 4, 5)), 1)
+ .get()[0]);
matchOutput(backward_result, "tc_bn_1_goldenBNLayerBackwardDx.out");
}
EXPECT_EQ(status, ML_ERROR_NONE);
}
+
+ nntrainer::Tensor result;
};
TEST_F(nntrainer_Conv2DLayer, print_01_p) {
loadFile("tc_conv2d_1_conv2DLayer.in", in);
loadFile("tc_conv2d_1_conv2DKernel.in", layer);
- out = layer.forwarding(in, status);
- EXPECT_EQ(status, ML_ERROR_NONE);
+ EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)).get()[0]);
matchOutput(out, "tc_conv2d_1_goldenConv2DResult.out");
}
loadFile("tc_conv2d_2_conv2DLayer.in", in);
loadFile("tc_conv2d_2_conv2DKernel.in", layer);
- out = layer.forwarding(in, status);
- EXPECT_EQ(status, ML_ERROR_NONE);
+ EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)).get()[0]);
matchOutput(out, "tc_conv2d_2_goldenConv2DResult.out");
}
loadFile("tc_conv2d_1_conv2DLayer.in", in);
loadFile("tc_conv2d_1_conv2DKernel.in", layer);
- out = layer.forwarding(in, status);
+ EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)).get()[0]);
for (unsigned int i = 0; i < derivatives.getDim().getDataLen(); ++i) {
derivatives.getData()[i] = 1.0;
}
- nntrainer::Tensor result = layer.backwarding(derivatives, 1);
+ EXPECT_NO_THROW(
+ result = layer.backwarding(MAKE_SHARED_TENSOR(derivatives), 1).get()[0]);
nntrainer::UpdatableParam *param_data = layer.getParams().get();
loadFile("tc_conv2d_2_conv2DLayer.in", in);
loadFile("tc_conv2d_2_conv2DKernel.in", layer);
- out = layer.forwarding(in, status);
+ EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)).get()[0]);
for (unsigned int i = 0; i < derivatives.getDim().getDataLen(); ++i) {
derivatives.getData()[i] = 1.0;
}
- nntrainer::Tensor result = layer.backwarding(derivatives, 1);
+ EXPECT_NO_THROW(
+ result = layer.backwarding(MAKE_SHARED_TENSOR(derivatives), 1).get()[0]);
nntrainer::UpdatableParam *param_data = layer.getParams().get();
for (unsigned int i = 0; i < filter_size * 2; ++i) {
loadFile("tc_pooling2d_1.in", in);
- out = layer.forwarding(in, status);
-
- EXPECT_EQ(status, ML_ERROR_NONE);
+ EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)).get()[0]);
matchOutput(out, "tc_pooling2d_1_goldenPooling2Dmax.out");
}
loadFile("tc_pooling2d_1.in", in);
- out = layer.forwarding(in, status);
-
- EXPECT_EQ(status, ML_ERROR_NONE);
+ EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)).get()[0]);
matchOutput(out, "tc_pooling2d_1_goldenPooling2Daverage.out");
}
loadFile("tc_pooling2d_1.in", in);
- out = layer.forwarding(in, status);
- EXPECT_EQ(status, ML_ERROR_NONE);
+ EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)).get()[0]);
matchOutput(out, "tc_pooling2d_1_goldenPooling2Dglobal_max.out");
}
loadFile("tc_pooling2d_1.in", in);
- out = layer.forwarding(in, status);
- EXPECT_EQ(status, ML_ERROR_NONE);
+ EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)).get()[0]);
matchOutput(out, "tc_pooling2d_1_goldenPooling2Dglobal_average.out");
}
reinitialize();
loadFile("tc_pooling2d_2.in", in);
- out = layer.forwarding(in, status);
- EXPECT_EQ(status, ML_ERROR_NONE);
+ EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)).get()[0]);
matchOutput(out, "tc_pooling2d_2_goldenPooling2Dglobal_max.out");
}
loadFile("tc_pooling2d_2.in", in);
- out = layer.forwarding(in, status);
- EXPECT_EQ(status, ML_ERROR_NONE);
+ EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)).get()[0]);
matchOutput(out, "tc_pooling2d_2_goldenPooling2Dglobal_average.out");
}
reinitialize();
loadFile("tc_pooling2d_1.in", in);
- out = layer.forwarding(in, status);
- EXPECT_EQ(status, ML_ERROR_NONE);
+ EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)).get()[0]);
nntrainer::Tensor grad(out.getDim());
grad.getData()[i] = 1.0;
}
- in = layer.backwarding(grad, 0);
+ EXPECT_NO_THROW(in = layer.backwarding(MAKE_SHARED_TENSOR(grad), 0).get()[0]);
matchOutput(in, "tc_pooling2d_1_goldenPooling2DmaxGrad.out");
}
reinitialize();
loadFile("tc_pooling2d_1.in", in);
- out = layer.forwarding(in, status);
- EXPECT_EQ(status, ML_ERROR_NONE);
+ EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)).get()[0]);
- nntrainer::Tensor grad(out.getDim());
+ sharedTensor grad = MAKE_SHARED_TENSOR(out.getDim());
- for (unsigned int i = 0; i < grad.length(); ++i) {
- grad.getData()[i] = 1.0;
+ for (unsigned int i = 0; i < grad->length(); ++i) {
+ grad->getData()[i] = 1.0;
}
- in = layer.backwarding(grad, 0);
+ EXPECT_NO_THROW(in = layer.backwarding(grad, 0).get()[0]);
matchOutput(in, "tc_pooling2d_1_goldenPooling2DaverageGrad.out");
}
loadFile("tc_pooling2d_1.in", in);
- out = layer.forwarding(in, status);
- EXPECT_EQ(status, ML_ERROR_NONE);
+ EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)).get()[0]);
nntrainer::Tensor grad(out.getDim());
grad.getData()[i] = 1.0;
}
- in = layer.backwarding(grad, 0);
+ EXPECT_NO_THROW(in = layer.backwarding(MAKE_SHARED_TENSOR(grad), 0).get()[0]);
matchOutput(in, "tc_pooling2d_1_goldenPooling2Dglobal_maxGrad.out");
}
reinitialize();
loadFile("tc_pooling2d_1.in", in);
- out = layer.forwarding(in, status);
- EXPECT_EQ(status, ML_ERROR_NONE);
+ EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)).get()[0]);
nntrainer::Tensor grad(out.getDim());
grad.getData()[i] = 1.0;
}
- in = layer.backwarding(grad, 0);
+ EXPECT_NO_THROW(in = layer.backwarding(MAKE_SHARED_TENSOR(grad), 0).get()[0]);
matchOutput(in, "tc_pooling2d_1_goldenPooling2Dglobal_averageGrad.out");
}
loadFile("tc_pooling2d_1_goldenPooling2Dmax.out", in);
- out = layer.forwarding(in, status);
+ EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)).get()[0]);
matchOutput(out, "tc_pooling2d_1_goldenPooling2Dmax.out");
}
loadFile("tc_pooling2d_2_goldenPooling2Dmax.out", in);
- out = layer.forwarding(in, status);
+ EXPECT_NO_THROW(out = layer.forwarding(MAKE_SHARED_TENSOR(in)).get()[0]);
matchOutput(out, "tc_pooling2d_2_goldenPooling2Dmax.out");
}
loadFile("tc_pooling2d_1_goldenPooling2Dmax.out", out);
- in = layer.backwarding(out, 0);
+ EXPECT_NO_THROW(in = layer.backwarding(MAKE_SHARED_TENSOR(out), 0).get()[0]);
EXPECT_EQ(in.getDim(), nntrainer::TensorDim(1, 2, 4, 4));
matchOutput(in, "tc_pooling2d_1_goldenPooling2Dmax.out");
loadFile("tc_pooling2d_2_goldenPooling2Dmax.out", out);
- in = layer.backwarding(out, 0);
+ EXPECT_NO_THROW(in = layer.backwarding(MAKE_SHARED_TENSOR(out), 0).get()[0]);
EXPECT_EQ(in.getDim(), nntrainer::TensorDim(2, 2, 4, 4));
matchOutput(in, "tc_pooling2d_2_goldenPooling2Dmax.out");
}
TEST(nntrainer_ActivationLayer, forward_backward_01_p) {
- int status = ML_ERROR_NONE;
int batch = 3;
int channel = 1;
int height = 1;
nntrainer::Tensor expected(batch, channel, height, width);
GEN_TEST_INPUT(expected,
nntrainer::ActivationLayer::relu((l - 4) * 0.1 * (i + 1)));
- nntrainer::Tensor result = layer.forwarding(input, status);
- EXPECT_EQ(status, ML_ERROR_NONE);
+ nntrainer::Tensor result;
+ EXPECT_NO_THROW(result =
+ layer.forwarding(MAKE_SHARED_TENSOR(input)).get()[0]);
EXPECT_TRUE(result == expected);
expected.copy(input);
- result = layer.backwarding(constant(1.0, 3, 1, 1, 10), 1);
+ EXPECT_NO_THROW(
+ result =
+ layer.backwarding(MAKE_SHARED_TENSOR(constant(1.0, 3, 1, 1, 10)), 1)
+ .get()[0]);
GEN_TEST_INPUT(expected,
nntrainer::ActivationLayer::reluPrime(
nntrainer::ActivationLayer::relu((l - 4) * 0.1 * (i + 1))));