This patch removes activation input exec order from backwarding as input
of the activation layer is not used in the backwarding.
This leads to change in the unittests as we cannot check all the
outputs, especially close to the end of the model. So, with optimization
enabled, only the output layer's forwarding is checked.
Signed-off-by: Parichay Kapoor <pk.kapoor@samsung.com>
std::vector<unsigned int> var_exec_order(
{forwarding_order, calcGradient_order});
+ if (node.getType() == ActivationLayer::type)
+ var_exec_order = {forwarding_order};
+
if (node.getType() == MultiOutLayer::type)
var_exec_order = {forwarding_order};
auto it = nodes.begin();
for (; it != nodes.end() - 1; ++it) {
- it->forward(iteration, !(it + 1)->supportInPlace());
+ it->forward(iteration, it->isOutputNode() | !optimize);
}
it->forward(iteration, true);
*/
bool needsCalcDerivative() { return node->needsCalcDerivative(); }
+ /**
+ * @brief check if the node is an output node
+ *
+ * @return true if output node else false
+ */
+ bool isOutputNode() { return node->getNumOutputConnections() == 0; }
+
private:
NodeType node;
std::vector<nntrainer::Tensor> expected_output;