From: Parichay Kapoor Date: Wed, 16 Jun 2021 03:03:56 +0000 (+0900) Subject: [addition] Bug fix addition layer calcDerivative X-Git-Tag: submit/tizen/20210827.122527~267 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=1aebf41b1d90e43f2c9c48807f26fc440b2984f0;p=platform%2Fcore%2Fml%2Fnntrainer.git [addition] Bug fix addition layer calcDerivative Bug fix for addition layer calcDerivative(). addition layer assign the same tensor derivative memory back to its other layers. If the other layers connecting with addition layers are inplace, then this can lead to wrong results. Signed-off-by: Parichay Kapoor --- diff --git a/nntrainer/layers/addition_layer.cpp b/nntrainer/layers/addition_layer.cpp index cd8c5901..953d6e84 100644 --- a/nntrainer/layers/addition_layer.cpp +++ b/nntrainer/layers/addition_layer.cpp @@ -59,7 +59,12 @@ void AdditionLayer::forwarding(bool training) { void AdditionLayer::calcDerivative() { for (unsigned int i = 0; i < getNumInputs(); ++i) { - net_input[i]->getGradientRef() = net_hidden[0]->getGradientRef(); + /** + * TODO: replace this with tensor assignment during optimization. + * Tensor assignement needs to make sure that the previous connected layers + * are not inplace + */ + net_input[i]->getGradientRef().copy(net_hidden[0]->getGradientRef()); } } diff --git a/nntrainer/layers/concat_layer.cpp b/nntrainer/layers/concat_layer.cpp index 0ab0bf16..1c0f2bdd 100644 --- a/nntrainer/layers/concat_layer.cpp +++ b/nntrainer/layers/concat_layer.cpp @@ -99,6 +99,7 @@ void ConcatLayer::calcDerivative() { TensorDim in_dim = input_dim[idx]; for (unsigned int b = 0; b < in_dim.batch(); ++b) { + // TODO: replace with tensor::copy/fill memcpy( net_input[idx]->getGradient().getAddress(b * in_dim.getFeatureLen()), net_hidden[0]->getGradient().getAddress(b * d.getFeatureLen() +