* Added support for the ONNX "ReduceMean" Layer. (as this is the same as the GlobalAveragePool)
* Add ReduceMean test
* Fix ONNX importer
* Fix ReduceMean
* Add assert
* Split test
* Fix split test
if (isAsync)
CV_Error(Error::StsNotImplemented, "Default implementation fallbacks in asynchronous mode");
- CV_Assert(layer->supportBackend(DNN_BACKEND_OPENCV));
+ if (!layer->supportBackend(DNN_BACKEND_OPENCV))
+ CV_Error(Error::StsNotImplemented, format("Layer \"%s\" of type \"%s\" unsupported on OpenCV backend",
+ ld.name.c_str(), ld.type.c_str()));
+
if (preferableBackend == DNN_BACKEND_OPENCV && IS_DNN_OPENCL_TARGET(preferableTarget))
{
std::vector<UMat> umat_inputBlobs = OpenCLBackendWrapper::getUMatVector(ld.inputBlobsWrappers);
std::vector<size_t>& pads_begin, std::vector<size_t>& pads_end,
std::vector<size_t>& strides, cv::String &padMode)
{
- util::getStrideAndPadding(params, pads_begin, pads_end, strides, padMode);
-
globalPooling = params.has("global_pooling") &&
params.get<bool>("global_pooling");
if (globalPooling)
{
+ util::getStrideAndPadding(params, pads_begin, pads_end, strides, padMode);
if(params.has("kernel_h") || params.has("kernel_w") || params.has("kernel_size"))
{
CV_Error(cv::Error::StsBadArg, "In global_pooling mode, kernel_size (or kernel_h and kernel_w) cannot be specified");
else
{
util::getKernelSize(params, kernel);
+ util::getStrideAndPadding(params, pads_begin, pads_end, strides, padMode, kernel.size());
}
}
layerParams.set("ceil_mode", layerParams.has("pad_mode"));
layerParams.set("ave_pool_padded_area", framework_name == "pytorch");
}
- else if (layer_type == "GlobalAveragePool" || layer_type == "GlobalMaxPool")
+ else if (layer_type == "GlobalAveragePool" || layer_type == "GlobalMaxPool" || layer_type == "ReduceMean")
{
+ CV_Assert(node_proto.input_size() == 1);
layerParams.type = "Pooling";
- layerParams.set("pool", layer_type == "GlobalAveragePool" ? "AVE" : "MAX");
- layerParams.set("global_pooling", true);
+ layerParams.set("pool", layer_type == "GlobalMaxPool"? "MAX" : "AVE");
+ layerParams.set("global_pooling", layer_type == "GlobalAveragePool" || layer_type == "GlobalMaxPool");
+
+ if (layer_type == "ReduceMean")
+ {
+ if (layerParams.get<int>("keepdims") == 0 || !layerParams.has("axes"))
+ CV_Error(Error::StsNotImplemented, "Unsupported mode of ReduceMean operation.");
+
+ MatShape inpShape = outShapes[node_proto.input(0)];
+ if (inpShape.size() != 4 && inpShape.size() != 5)
+ CV_Error(Error::StsNotImplemented, "Unsupported input shape of reduce_mean operation.");
+
+ DictValue axes = layerParams.get("axes");
+ CV_Assert(axes.size() <= inpShape.size() - 2);
+ std::vector<int> kernel_size(inpShape.size() - 2, 1);
+ for (int i = 0; i < axes.size(); i++) {
+ int axis = axes.get<int>(i);
+ CV_Assert_N(axis >= 2 + i, axis < inpShape.size());
+ kernel_size[axis - 2] = inpShape[axis];
+ }
+
+ layerParams.set("kernel_size", DictValue::arrayInt(&kernel_size[0], kernel_size.size()));
+ }
}
else if (layer_type == "Slice")
{
if (axes.size() != 1)
CV_Error(Error::StsNotImplemented, "Multidimensional unsqueeze");
- int dims[] = {1, -1};
+ MatShape inpShape = outShapes[node_proto.input(0)];
+ int axis = axes.getIntValue(0);
+ CV_Assert(0 <= axis && axis <= inpShape.size());
+ std::vector<int> outShape = inpShape;
+ outShape.insert(outShape.begin() + axis, 1);
layerParams.type = "Reshape";
- layerParams.set("axis", axes.getIntValue(0));
- layerParams.set("num_axes", 1);
- layerParams.set("dim", DictValue::arrayInt(&dims[0], 2));
+ layerParams.set("dim", DictValue::arrayInt(&outShape[0], outShape.size()));
}
else if (layer_type == "Reshape")
{
testONNXModels("clip", npy);
}
+TEST_P(Test_ONNX_layers, ReduceMean)
+{
+ testONNXModels("reduce_mean");
+}
+
+TEST_P(Test_ONNX_layers, ReduceMean3D)
+{
+ if (target != DNN_TARGET_CPU)
+ throw SkipTestException("Only CPU is supported");
+ testONNXModels("reduce_mean3d");
+}
+
TEST_P(Test_ONNX_layers, MaxPooling_Sigmoid)
{
testONNXModels("maxpooling_sigmoid");
runTensorFlowNet("l2_normalize_3d");
}
-TEST_P(Test_TensorFlow_layers, Split)
-{
- runTensorFlowNet("split");
-}
-
class Test_TensorFlow_nets : public DNNTestLayer {};
TEST_P(Test_TensorFlow_nets, MobileNet_SSD)
TEST_P(Test_TensorFlow_layers, split)
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
+ applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_2);
+ runTensorFlowNet("split");
+ if (backend == DNN_BACKEND_INFERENCE_ENGINE)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE);
runTensorFlowNet("split_equals");
}