- Check stop_cb before doing forwarding/calcGrading/calcDerivative by every layer
to stop training more quickly.
Signed-off-by: hyeonseok lee <hs89.lee@samsung.com>
}
}
-sharedConstTensors NetworkGraph::forwarding(bool training) {
- for (auto iter = cbegin(); iter != cend(); iter++) {
+sharedConstTensors
+NetworkGraph::forwarding(bool training,
+ std::function<bool(void *userdata)> stop_cb) {
+ for (auto iter = cbegin(); iter != cend() && !stop_cb(nullptr); iter++) {
auto const &ln = *iter;
PROFILE_TIME_START(profile_keys.at(ln->getType()));
void NetworkGraph::backwarding(
int iteration,
std::function<void(std::shared_ptr<LayerNode>, int)> &backwarding_op,
- std::function<void(Weight &, int)> &apply_grad_clip_op) const {
+ std::function<void(Weight &, int)> &apply_grad_clip_op,
+ std::function<bool(void *userdata)> stop_cb) const {
/**
* last layer backwarding is run out of this loop
*/
throw std::runtime_error(
"Error: last layer does not accept label, we can't train");
- for (auto iter = iter_begin; iter != iter_end; iter++) {
+ for (auto iter = iter_begin; iter != iter_end && !stop_cb(nullptr); iter++) {
auto &ln = *iter;
PROFILE_TIME_START(profile_keys.at(ln->getType()));
backwarding_op(ln, iteration);
void NetworkGraph::flushCache() { tensor_manager->flushCache(); }
-void NetworkGraph::flushCacheExcept(unsigned int order) { tensor_manager->flushCacheExcept(order); }
+void NetworkGraph::flushCacheExcept(unsigned int order) {
+ tensor_manager->flushCacheExcept(order);
+}
void NetworkGraph::requestOptimizerVariable(
std::function<std::vector<TensorDim>(const TensorDim &)> cb,
* @param[in] training true if forwarding is on training
* @retval output tensors
*/
- sharedConstTensors forwarding(bool training = false);
+ sharedConstTensors forwarding(bool training = false,
+ std::function<bool(void *userdata)> stop_cb =
+ [](void *user_data) { return false; });
/**
* @brief backwarding the network graph
void backwarding(
int iteration,
std::function<void(std::shared_ptr<LayerNode>, int)> &backwarding_op,
- std::function<void(Weight &, int)> &apply_grad_clip_op) const;
+ std::function<void(Weight &, int)> &apply_grad_clip_op,
+ std::function<bool(void *userdata)> stop_cb = [](void *user_data) {
+ return false;
+ }) const;
/**
* @brief get begin iterator for the graph
/**
* @brief forward propagation using layers object which has layer
*/
-sharedConstTensors NeuralNetwork::forwarding(bool training) {
- return model_graph.forwarding(training);
+sharedConstTensors
+NeuralNetwork::forwarding(bool training,
+ std::function<bool(void *userdata)> stop_cb) {
+ return model_graph.forwarding(training, stop_cb);
}
/**
* Call backwarding function of layer in reverse order
* No need to call at first Input Layer (No data to be updated)
*/
-void NeuralNetwork::backwarding(int iteration) {
+void NeuralNetwork::backwarding(int iteration,
+ std::function<bool(void *userdata)> stop_cb) {
#ifdef DEBUG
NNTR_THROW_IF(!opt, std::invalid_argument) << "optimizer is null!";
#endif
std::function<void(std::shared_ptr<LayerNode>, int)> backwarding_op =
- [this](std::shared_ptr<LayerNode> node, int iteration) -> void {
+ [this, stop_cb](std::shared_ptr<LayerNode> node, int iteration) -> void {
/**
* Do not change this order:
* 1. calcGradient
model_graph.flushCacheExcept(std::get<2>(node->getExecutionOrder()));
+ if (stop_cb(nullptr)) {
+ return;
+ }
+
if (node->needsCalcDerivative())
node->calcDerivative();
opt_->applyGradient(opt_context);
};
- model_graph.backwarding(iteration, backwarding_op, apply_grad_clip_op);
+ model_graph.backwarding(iteration, backwarding_op, apply_grad_clip_op,
+ stop_cb);
}
void NeuralNetwork::save(const std::string &file_path,
DataBuffer &buffer) {
model_graph.flushCache();
- forwarding(true);
- backwarding(iter++);
+ forwarding(true, stop_cb);
+ backwarding(iter++, stop_cb);
if (!stop_cb(nullptr)) {
std::cout << "#" << epoch_idx << "/" << getEpochs();
/**
* @brief Forward Propagation of the neural network
*/
- sharedConstTensors forwarding(bool training = true);
+ sharedConstTensors forwarding(bool training = true,
+ std::function<bool(void *userdata)> stop_cb =
+ [](void *user_data) { return false; });
/**
* @brief Forward Propagation of the neural network
* @brief Backward Propagation of the neural network
* @param[in] iteration Iteration Number for the optimizer
*/
- void backwarding(int iteration);
+ void backwarding(int iteration, std::function<bool(void *userdata)> stop_cb =
+ [](void *user_data) { return false; });
/**
* @copydoc Model::save(const std::string &file_path, ml::train::ModelFormat