graph.topologicalSort();
setExecutionOrder();
+ forward_iter_end = (*(cend() - 1)).get();
inPlaceOptimize();
}
/** mark all the required nodes support backwarding */
- for (auto const &node_name : must_support_backwarding)
- LNODE(graph.getNode(node_name))->needsCalcDerivative(true);
+ for (auto const &node_name : must_support_backwarding) {
+ auto ln = LNODE(graph.getNode(node_name)).get();
+ ln->needsCalcDerivative(true);
+ }
}
void NetworkGraph::setBatchSize(unsigned int batch_size) {
TensorSpecV2::RequestType::PLACEHOLDER;
}
- /// @todo switch to check if model inputs instead and add a new tensor life
- /// span to represent from here to ~ max_fwd_exec_order, also this is only
- /// needed for the inference mode
+ /// @note below needs to be enabled only for inference mode, but need decision
+ /// if we are going to separate inference initialization from train
+ /// initialization this might not worth optimize because in general output of
+ /// a neuralnet is very small
if (lnode->getOutputConnections().size() == 0u) {
- std::for_each(out_specs.begin(), out_specs.end(), [](VarGradSpecV2 &spec) {
- spec.variable_spec.ls = TensorLifespan::MAX_LIFESPAN;
- });
+ std::for_each(out_specs.begin(), out_specs.end(),
+ [this](VarGradSpecV2 &spec) {
+ spec.variable_spec.additional_exec_order.push_back(
+ std::get<0>(forward_iter_end->getExecutionOrder()));
+ });
}
+
const std::vector<Var_Grad *> &outputs = tensor_manager->requestTensors(
out_specs, Manager::TensorGroupType::OUTPUT, lnode->getExecutionOrder(),
lnode->getName());
try {
markNodesForBackwarding();
backward_iter_end = computeBackwardEnd();
- forward_iter_end = (*(cend() - 1)).get();
} catch (std::exception &e) {
ml_loge(
"Backwarding required from layer which doesn't support backwarding: %s",
auto [forward, calc_grad, calc_deriv] = exec_order;
- std::vector<unsigned> order;
+ std::vector<unsigned> order = spec.additional_exec_order;
const auto name = scope + ":" + spec.name;
Var_Grad *Manager::requestTensor(const VarGradSpecV2 &spec,
TensorGroupType identify_as,
const GraphNode::ExecutionOrder &exec_order,
- const std::string &scope) {
+ const std::string &scope, bool expose_var,
+ bool expose_grad) {
NNTR_THROW_IF(identify_as == TensorGroupType::WEIGHT, std::invalid_argument)
<< "requestTensor with var grad spec cannot be identified as weights, use "
"requestTensor with weight spec instead";
std::vector<Var_Grad *> Manager::requestTensors(
const std::vector<VarGradSpecV2> &specs, TensorGroupType identify_as,
- const GraphNode::ExecutionOrder &exec_order, const std::string &scope) {
+ const GraphNode::ExecutionOrder &exec_order, const std::string &scope,
+ bool expose_var, bool expose_grad) {
std::vector<Var_Grad *> ret;
ret.reserve(specs.size());
for (auto &spec : specs) {
- ret.push_back(requestTensor(spec, identify_as, exec_order, scope));
+ ret.push_back(requestTensor(spec, identify_as, exec_order, scope,
+ expose_var, expose_grad));
}
return ret;
* @param identify_as identify as tensor as a group
* @param exec_order execution order to refer to
* @param scope common scope to attach in front of current specification name
+ * @param expose_var expose variable tensor out of graph, when allocation,
+ * this tensor will be valid max_exec_order when allocation happens
+ * @param expose_grad expose variable tensor out of graph, this tensor will be
+ * valid max_exec_order when allocation happens
* @return Tensor* tensor
*/
Var_Grad *requestTensor(const VarGradSpecV2 &spec,
TensorGroupType identify_as,
const GraphNode::ExecutionOrder &exec_order,
- const std::string &scope = "");
+ const std::string &scope = "",
+ bool expose_var = false, bool expose_grad = false);
/**
* @brief request vector of tensors with variable + gradient specification
* @param identify_as identify as tensor as a group
* @param exec_order execution order to refer to
* @param scope common scope to attach in front of current specification name
+ * @param expose_var expose variable tensor out of graph, when
+ * allocation, this tensor will be valid max_exec_order when allocation
+ * happens
+ * @param expose_grad expose variable tensor out of graph, this tensor will be
+ * valid max_exec_order when allocation happens
* @return Tensor* tensor
*/
std::vector<Var_Grad *> requestTensors(
const std::vector<VarGradSpecV2> &specs, TensorGroupType identify_as,
- const GraphNode::ExecutionOrder &exec_order, const std::string &scope = "");
+ const GraphNode::ExecutionOrder &exec_order, const std::string &scope = "",
+ bool expose_var = false, bool expose_grad = false);
private:
/** @todo: merge this list to one */
/** ONLY USED FOR READ_ONLY_VIEW, MAYBE_MODIFYING_VIEW */
unsigned int offset = 0u; /**< tensor offset */
std::string reference_name; /**< reference name */
+
+ /** ONLY FOR THE GRANULAR CONTROL OF LIFE OUTSIDE OF LAYER NODE */
+ /// @todo make this as an opaque information with PIMPL
+ std::vector<unsigned> additional_exec_order = {};
};
/**