{
for (int i = 0; i < numInps; i++)
{
- String inpfile = _tf(basename + ".input_" + (i + '0') + ".npy");
+ String inpfile = _tf(basename + cv::format(".input_%d.npy", i));
inps.push_back(blobFromNPY(inpfile));
}
}
{
for (int i = 0; i < numOuts; i++)
{
- String outfile = _tf(basename + "_" + (i + '0') + ".npy");
+ String outfile = _tf(basename + cv::format("_%d.npy", i));
refs.push_back(blobFromNPY(outfile));
}
}
{
for (int i = 0; i < numInps; i++)
{
- net.setInput(inps[i], inp_name + "_" + (i + '0'));
+ net.setInput(inps[i], inp_name + cv::format("_%d", i));
}
}
else
TEST_P(Test_Caffe_layers, DeConvolution)
{
+ if(target == DNN_TARGET_CUDA_FP16)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA_FP16);
testLayerUsingCaffeModels("layer_deconvolution", true, false);
}
TEST_P(Test_Caffe_layers, MVN)
{
+ if(backend == DNN_BACKEND_CUDA)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA); /* MVN is unsupported */
+
testLayerUsingCaffeModels("layer_mvn");
}
net.setPreferableTarget(target);
Mat out = net.forward();
- normAssert(ref, out, "", default_l1, default_lInf);
+ double l1 = default_l1, lInf = default_lInf;
+ if (target == DNN_TARGET_CUDA_FP16)
+ {
+ l1 = 0.0002;
+ lInf = 0.0005;
+ }
+ normAssert(ref, out, "", l1, lInf);
}
class Layer_LSTM_Test : public ::testing::Test
double l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 1e-3 : 1e-5;
double lInf = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 1e-3 : 1e-4;
+ if (target == DNN_TARGET_CUDA_FP16)
+ {
+ l1 = 2e-4;
+ lInf = 9e-4;
+ }
normAssert(out, ref, "", l1, lInf);
}
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
+ if(backend == DNN_BACKEND_CUDA)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA); /* Proposal layer is unsupported */
Net net = readNetFromCaffe(_tf("net_faster_rcnn_proposal.prototxt"));
double l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 1e-3 : 1e-5;
double lInf = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 1e-3 : 1e-4;
+ if (target == DNN_TARGET_CUDA_FP16)
+ {
+ l1 = 7e-5;
+ lInf = 0.0005;
+ }
normAssert(out, ref, "", l1, lInf);
}
0.25, 0.0, 1.0, 1.0,
0.1f, 0.1f, 0.2f, 0.2f,
0.1f, 0.1f, 0.2f, 0.2f);
- double l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 2e-5 : 1e-5;
+ double l1 = 1e-5;
+ if (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD || target == DNN_TARGET_CUDA_FP16)
+ l1 = 2e-5;
normAssert(out.reshape(1, 4), ref, "", l1);
}
// Output values are in range [0, 637.5].
double l1 = (targetId == DNN_TARGET_OPENCL_FP16 || targetId == DNN_TARGET_MYRIAD) ? 0.06 : 1e-6;
double lInf = (targetId == DNN_TARGET_OPENCL_FP16 || targetId == DNN_TARGET_MYRIAD) ? 0.3 : 1e-5;
+ if (targetId == DNN_TARGET_CUDA_FP16)
+ {
+ l1 = 0.06;
+ lInf = 0.3;
+ }
normAssert(out, ref, "", l1, lInf);
}
net.setPreferableTarget(targetId);
Mat out = net.forward();
- double l1 = (targetId == DNN_TARGET_OPENCL_FP16) ? 5e-2 : 1e-5;
- double lInf = (targetId == DNN_TARGET_OPENCL_FP16) ? 7e-2 : 1e-4;
+ double l1 = 1e-5, lInf = 1e-4;
+ if (targetId == DNN_TARGET_OPENCL_FP16)
+ {
+ l1 = 5e-2;
+ lInf = 7e-2;
+ }
+ else if (targetId == DNN_TARGET_CUDA_FP16)
+ {
+ l1 = 0.06;
+ lInf = 0.07;
+ }
for (int n = 0; n < inpShapeVec[0]; ++n)
{
for (int c = 0; c < inpShapeVec[1]; ++c)
int backendId = get<0>(get<1>(GetParam()));
int targetId = get<1>(get<1>(GetParam()));
+ if (backendId == DNN_BACKEND_CUDA && weighted)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA);
+
Net net;
LayerParams lp;
lp.type = "Eltwise";
lp.name = "testLayer";
lp.set<std::string>("output_channels_mode", "input_0");
+ if (backendId == DNN_BACKEND_CUDA && weighted)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA);
+
const int inpShapes[][4] = {{1, 4, 2, 2}, {1, 2, 2, 2}, {1, 3, 2, 2}};
const int out_channels = inpShapes[0][1];
std::vector<String> inpNames(3);
static testing::internal::ParamGenerator<tuple<Backend, Target> > dnnBackendsAndTargetsForFusionTests()
{
- return dnnBackendsAndTargets(false, false, true, false); // OCV OpenCL + OCV CPU
+ return dnnBackendsAndTargets(false, false, true, false, false, false); // OCV OpenCL + OCV CPU
}
};