From: dkurt Date: Tue, 27 Jun 2017 11:52:46 +0000 (+0300) Subject: Fixed some bugs from Halide tests X-Git-Tag: accepted/tizen/6.0/unified/20201030.111113~897^2 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=121789f78e6918ddcc048d09e319c1d11e2db16e;p=platform%2Fupstream%2Fopencv.git Fixed some bugs from Halide tests --- diff --git a/modules/dnn/src/layers/elementwise_layers.cpp b/modules/dnn/src/layers/elementwise_layers.cpp index 97f7584..7245f33 100644 --- a/modules/dnn/src/layers/elementwise_layers.cpp +++ b/modules/dnn/src/layers/elementwise_layers.cpp @@ -192,7 +192,7 @@ struct ReLUFunctor Halide::Var x("x"), y("y"), c("c"), n("n"); if (slope) { - top(x, y, c, n) = select(input >= 0.0f, input, slope); + top(x, y, c, n) = select(input >= 0.0f, input, slope * input); } else { diff --git a/modules/dnn/src/layers/fully_connected_layer.cpp b/modules/dnn/src/layers/fully_connected_layer.cpp index 9f790da..5bf1b60 100644 --- a/modules/dnn/src/layers/fully_connected_layer.cpp +++ b/modules/dnn/src/layers/fully_connected_layer.cpp @@ -77,7 +77,6 @@ public: wpadding.setTo(Scalar::all(0.)); weightsMat = weightsBuf.colRange(0, vecsize); blobs[0].copyTo(weightsMat); - blobs[0] = weightsMat; } if (bias) diff --git a/modules/dnn/test/test_halide_layers.cpp b/modules/dnn/test/test_halide_layers.cpp index c572a84..3bcb0f8 100644 --- a/modules/dnn/test/test_halide_layers.cpp +++ b/modules/dnn/test/test_halide_layers.cpp @@ -430,7 +430,7 @@ TEST_P(ReLU, Accuracy) } INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, ReLU, Values( -/*negative slope*/ 2.0f, 0.3f, -0.1f +/*negative slope*/ 2.0f, 0.3f, -0.1f, 0.0f )); typedef TestWithParam > NoParamActivation; @@ -515,12 +515,7 @@ TEST_P(Concat, Accuracy) Net net; - LayerParams concatParam; - concatParam.type = "Concat"; - concatParam.name = "testLayer"; - int concatId = net.addLayer(concatParam.name, concatParam.type, concatParam); - net.connect(0, 0, concatId, 0); - + std::vector convLayerIds(numChannels.channels); for (int i = 0, n = numChannels.channels; i < n; ++i) { if (!numChannels[i]) @@ -540,9 +535,18 @@ TEST_P(Concat, Accuracy) convParam.name = ss.str(); convParam.blobs.push_back(weights); - int convId = net.addLayer(convParam.name, convParam.type, convParam); - net.connect(0, 0, convId, 0); - net.connect(convId, 0, concatId, i + 1); + convLayerIds[i] = net.addLayer(convParam.name, convParam.type, convParam); + net.connect(0, 0, convLayerIds[i], 0); + } + + LayerParams concatParam; + concatParam.type = "Concat"; + concatParam.name = "testLayer"; + int concatId = net.addLayer(concatParam.name, concatParam.type, concatParam); + net.connect(0, 0, concatId, 0); + for (int i = 0; i < convLayerIds.size(); ++i) + { + net.connect(convLayerIds[i], 0, concatId, i + 1); } Mat input({1, inSize[0], inSize[1], inSize[2]}, CV_32F); @@ -578,12 +582,7 @@ TEST_P(Eltwise, Accuracy) Net net; - LayerParams eltwiseParam; - eltwiseParam.type = "Eltwise"; - eltwiseParam.name = "testLayer"; - int eltwiseId = net.addLayer(eltwiseParam.name, eltwiseParam.type, eltwiseParam); - net.connect(0, 0, eltwiseId, 0); - + std::vector convLayerIds(numConv); for (int i = 0; i < numConv; ++i) { Mat weights({inSize[0], inSize[0], 1, 1}, CV_32F); @@ -600,9 +599,18 @@ TEST_P(Eltwise, Accuracy) convParam.name = ss.str(); convParam.blobs.push_back(weights); - int convId = net.addLayer(convParam.name, convParam.type, convParam); - net.connect(0, 0, convId, 0); - net.connect(convId, 0, eltwiseId, i + 1); + convLayerIds[i] = net.addLayer(convParam.name, convParam.type, convParam); + net.connect(0, 0, convLayerIds[i], 0); + } + + LayerParams eltwiseParam; + eltwiseParam.type = "Eltwise"; + eltwiseParam.name = "testLayer"; + int eltwiseId = net.addLayer(eltwiseParam.name, eltwiseParam.type, eltwiseParam); + net.connect(0, 0, eltwiseId, 0); + for (int i = 0; i < numConv; ++i) + { + net.connect(convLayerIds[i], 0, eltwiseId, i + 1); } Mat input({1, inSize[0], inSize[1], inSize[2]}, CV_32F);