CV_SSE2=1 \
CV__DEBUG_NS_BEGIN= \
CV__DEBUG_NS_END= \
+ CV_DEPRECATED_EXTERNAL= \
CV_DEPRECATED=
EXPAND_AS_DEFINED =
SKIP_FUNCTION_MACROS = YES
# endif
#endif
+#ifndef CV_DEPRECATED_EXTERNAL
+# if defined(__OPENCV_BUILD)
+# define CV_DEPRECATED_EXTERNAL /* nothing */
+# else
+# define CV_DEPRECATED_EXTERNAL CV_DEPRECATED
+# endif
+#endif
+
+
#ifndef CV_EXTERN_C
# ifdef __cplusplus
# define CV_EXTERN_C extern "C"
}
}
-#if CV_SIMD128
+#if CV_SIMD128 && !defined(__aarch64__)
static inline void
load3x3Matrix(const float* m, v_float32x4& m0, v_float32x4& m1, v_float32x4& m2, v_float32x4& m3)
{
m2 = v_float32x4(m[2], m[6], m[10], 0);
m3 = v_float32x4(m[3], m[7], m[11], 0);
}
+#endif
+#if CV_SIMD128
static inline v_int16x8
v_matmulvec(const v_int16x8 &v0, const v_int16x8 &m0, const v_int16x8 &m1, const v_int16x8 &m2, const v_int32x4 &m3, const int BITS)
{
ocv_cmake_hook_append(INIT_MODULE_SOURCES_opencv_dnn "${CMAKE_CURRENT_LIST_DIR}/cmake/hooks/INIT_MODULE_SOURCES_opencv_dnn.cmake")
endif()
-ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-shadow -Wno-parentheses -Wmaybe-uninitialized -Wsign-promo
- -Wmissing-declarations -Wmissing-prototypes
-)
-ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4701 /wd4100)
-
if(MSVC)
add_definitions( -D_CRT_SECURE_NO_WARNINGS=1 )
ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4244 /wd4267 /wd4018 /wd4355 /wd4800 /wd4251 /wd4996 /wd4146
)
else()
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-deprecated -Wmissing-prototypes -Wmissing-declarations -Wshadow
- -Wunused-parameter -Wunused-local-typedefs -Wsign-compare -Wsign-promo
- -Wundef -Wtautological-undefined-compare -Wignored-qualifiers -Wextra
- -Wunused-function -Wunused-const-variable -Wdeprecated-declarations
+ -Wunused-parameter -Wsign-compare
)
endif()
+if(NOT HAVE_CXX11)
+ ocv_warnings_disable(CMAKE_CXX_FLAGS -Wno-undef) # LANG_CXX11 from protobuf files
+endif()
+
if(APPLE_FRAMEWORK)
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wshorten-64-to-32)
endif()
#suppress warnings in autogenerated caffe.pb.* files
ocv_warnings_disable(CMAKE_CXX_FLAGS
- -Wunused-parameter -Wundef -Wignored-qualifiers -Wno-enum-compare
- -Wdeprecated-declarations
/wd4125 /wd4267 /wd4127 /wd4244 /wd4512 /wd4702
/wd4456 /wd4510 /wd4610 /wd4800
/wd4701 /wd4703 # potentially uninitialized local/pointer variable 'value' used
int type;
Size kernel, stride;
int pad_l, pad_t, pad_r, pad_b;
- CV_DEPRECATED Size pad;
+ CV_DEPRECATED_EXTERNAL Size pad;
bool globalPooling;
bool computeMaxIdx;
String padMode;
{
public:
float pnorm, epsilon;
- CV_DEPRECATED bool acrossSpatial;
+ CV_DEPRECATED_EXTERNAL bool acrossSpatial;
static Ptr<NormalizeBBoxLayer> create(const LayerParams& params);
};
struct CV_EXPORTS_W DictValue
{
DictValue(const DictValue &r);
+ DictValue(bool i) : type(Param::INT), pi(new AutoBuffer<int64,1>) { (*pi)[0] = i ? 1 : 0; } //!< Constructs integer scalar
DictValue(int64 i = 0) : type(Param::INT), pi(new AutoBuffer<int64,1>) { (*pi)[0] = i; } //!< Constructs integer scalar
- CV_WRAP DictValue(int i) : type(Param::INT), pi(new AutoBuffer<int64,1>) { (*pi)[0] = i; } //!< Constructs integer scalar
+ CV_WRAP DictValue(int i) : type(Param::INT), pi(new AutoBuffer<int64,1>) { (*pi)[0] = i; } //!< Constructs integer scalar
DictValue(unsigned p) : type(Param::INT), pi(new AutoBuffer<int64,1>) { (*pi)[0] = p; } //!< Constructs integer scalar
CV_WRAP DictValue(double p) : type(Param::REAL), pd(new AutoBuffer<double,1>) { (*pd)[0] = p; } //!< Constructs floating point scalar
CV_WRAP DictValue(const String &s) : type(Param::STRING), ps(new AutoBuffer<String,1>) { (*ps)[0] = s; } //!< Constructs string scalar
- DictValue(const char *s) : type(Param::STRING), ps(new AutoBuffer<String,1>) { (*ps)[0] = s; } //!< @overload
+ DictValue(const char *s) : type(Param::STRING), ps(new AutoBuffer<String,1>) { (*ps)[0] = s; } //!< @overload
template<typename TypeIter>
static DictValue arrayInt(TypeIter begin, int size); //!< Constructs integer array
* If this method is called after network has allocated all memory for input and output blobs
* and before inferencing.
*/
- CV_DEPRECATED virtual void finalize(const std::vector<Mat*> &input, std::vector<Mat> &output);
+ CV_DEPRECATED_EXTERNAL
+ virtual void finalize(const std::vector<Mat*> &input, std::vector<Mat> &output);
/** @brief Computes and sets internal parameters according to inputs, outputs and blobs.
* @param[in] inputs vector of already allocated input blobs
* @param[out] output allocated output blobs, which will store results of the computation.
* @param[out] internals allocated internal blobs
*/
- CV_DEPRECATED virtual void forward(std::vector<Mat*> &input, std::vector<Mat> &output, std::vector<Mat> &internals);
+ CV_DEPRECATED_EXTERNAL
+ virtual void forward(std::vector<Mat*> &input, std::vector<Mat> &output, std::vector<Mat> &internals);
/** @brief Given the @p input blobs, computes the output @p blobs.
* @param[in] inputs the input blobs.
* @overload
* @deprecated Use Layer::finalize(InputArrayOfArrays, OutputArrayOfArrays) instead
*/
- CV_DEPRECATED void finalize(const std::vector<Mat> &inputs, CV_OUT std::vector<Mat> &outputs);
+ CV_DEPRECATED_EXTERNAL
+ void finalize(const std::vector<Mat> &inputs, CV_OUT std::vector<Mat> &outputs);
/** @brief
* @overload
PERF_TEST_P_(DNNTestNetwork, DenseNet_121)
{
if (backend == DNN_BACKEND_HALIDE ||
- backend == DNN_BACKEND_INFERENCE_ENGINE && (target == DNN_TARGET_OPENCL_FP16 ||
- target == DNN_TARGET_MYRIAD))
+ (backend == DNN_BACKEND_INFERENCE_ENGINE && (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD)))
throw SkipTestException("");
processNet("dnn/DenseNet_121.caffemodel", "dnn/DenseNet_121.prototxt", "",
Mat(cv::Size(224, 224), CV_32FC3));
PERF_TEST_P_(DNNTestNetwork, OpenPose_pose_coco)
{
if (backend == DNN_BACKEND_HALIDE ||
- backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
+ (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD))
throw SkipTestException("");
processNet("dnn/openpose_pose_coco.caffemodel", "dnn/openpose_pose_coco.prototxt", "",
Mat(cv::Size(368, 368), CV_32FC3));
PERF_TEST_P_(DNNTestNetwork, OpenPose_pose_mpi)
{
if (backend == DNN_BACKEND_HALIDE ||
- backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
+ (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD))
throw SkipTestException("");
processNet("dnn/openpose_pose_mpi.caffemodel", "dnn/openpose_pose_mpi.prototxt", "",
Mat(cv::Size(368, 368), CV_32FC3));
PERF_TEST_P_(DNNTestNetwork, OpenPose_pose_mpi_faster_4_stages)
{
if (backend == DNN_BACKEND_HALIDE ||
- backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
+ (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD))
throw SkipTestException("");
// The same .caffemodel but modified .prototxt
// See https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/src/openpose/pose/poseParameters.cpp
PERF_TEST_P_(DNNTestNetwork, YOLOv3)
{
if (backend == DNN_BACKEND_HALIDE ||
- backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
+ (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD))
throw SkipTestException("");
Mat sample = imread(findDataFile("dnn/dog416.png", false));
Mat inp;
PERF_TEST_P_(DNNTestNetwork, EAST_text_detection)
{
if (backend == DNN_BACKEND_HALIDE ||
- backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
+ (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD))
throw SkipTestException("");
processNet("dnn/frozen_east_text_detection.pb", "", "", Mat(cv::Size(320, 320), CV_32FC3));
}
PoolingParameter_PoolMethod_STOCHASTIC);
break;
default:
- LOG(ERROR) << "Unknown pool method " << pool;
+ LOG(ERROR) << "Unknown pool method " << (int)pool;
is_fully_compatible = false;
}
} else {
while (layer_param->param_size() <= i) { layer_param->add_param(); }
layer_param->mutable_param(i)->set_name(v1_layer_param.param(i));
}
- ParamSpec_DimCheckMode mode;
+ ParamSpec_DimCheckMode mode = ParamSpec_DimCheckMode_STRICT;
for (int i = 0; i < v1_layer_param.blob_share_mode_size(); ++i) {
while (layer_param->param_size() <= i) { layer_param->add_param(); }
switch (v1_layer_param.blob_share_mode(i)) {
break;
default:
LOG(FATAL) << "Unknown blob_share_mode: "
- << v1_layer_param.blob_share_mode(i);
- break;
+ << (int)v1_layer_param.blob_share_mode(i);
+ CV_Error_(Error::StsError, ("Unknown blob_share_mode: %d", (int)v1_layer_param.blob_share_mode(i)));
}
layer_param->mutable_param(i)->set_share_mode(mode);
}
case V1LayerParameter_LayerType_THRESHOLD:
return "Threshold";
default:
- LOG(FATAL) << "Unknown V1LayerParameter layer type: " << type;
+ LOG(FATAL) << "Unknown V1LayerParameter layer type: " << (int)type;
return "";
}
}
-const int kProtoReadBytesLimit = INT_MAX; // Max size of 2 GB minus 1 byte.
+static const int kProtoReadBytesLimit = INT_MAX; // Max size of 2 GB minus 1 byte.
bool ReadProtoFromBinary(ZeroCopyInputStream* input, Message *proto) {
CodedInputStream coded_input(input);
bool operator<(const LayerPin &r) const
{
- return lid < r.lid || lid == r.lid && oid < r.oid;
+ return lid < r.lid || (lid == r.lid && oid < r.oid);
}
bool operator ==(const LayerPin &r) const
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_OPENCV ||
- backendId == DNN_BACKEND_INFERENCE_ENGINE && inputsData.size() == 1;
+ (backendId == DNN_BACKEND_INFERENCE_ENGINE && inputsData.size() == 1);
}
void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
void fuseLayers(const std::vector<LayerPin>& blobsToKeep_)
{
- if( !fusion || preferableBackend != DNN_BACKEND_OPENCV &&
- preferableBackend != DNN_BACKEND_INFERENCE_ENGINE)
+ if( !fusion || (preferableBackend != DNN_BACKEND_OPENCV &&
+ preferableBackend != DNN_BACKEND_INFERENCE_ENGINE))
return;
CV_TRACE_FUNCTION();
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_OPENCV ||
- backendId == DNN_BACKEND_HALIDE && haveHalide() ||
- backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine();
+ (backendId == DNN_BACKEND_HALIDE && haveHalide()) ||
+ (backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine());
}
#ifdef HAVE_OPENCL
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_OPENCV ||
- backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine();
+ (backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine());
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_OPENCV ||
- backendId == DNN_BACKEND_HALIDE && haveHalide() && axis == 1 && !padding || // By channels
- backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine() && !padding;
+ (backendId == DNN_BACKEND_HALIDE && haveHalide() && axis == 1 && !padding) || // By channels
+ (backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine() && !padding);
}
class ChannelConcatInvoker : public ParallelLoopBody
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_OPENCV ||
- backendId == DNN_BACKEND_INFERENCE_ENGINE && crop_ranges.size() == 4;
+ (backendId == DNN_BACKEND_INFERENCE_ENGINE && crop_ranges.size() == 4);
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_OPENCV ||
- backendId == DNN_BACKEND_INFERENCE_ENGINE && !_locPredTransposed && _bboxesNormalized && !_clip;
+ (backendId == DNN_BACKEND_INFERENCE_ENGINE && !_locPredTransposed && _bboxesNormalized && !_clip);
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
{
return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_HALIDE ||
- backendId == DNN_BACKEND_INFERENCE_ENGINE && (op != SUM || coeffs.empty());
+ (backendId == DNN_BACKEND_INFERENCE_ENGINE && (op != SUM || coeffs.empty()));
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_OPENCV ||
- backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine();
+ (backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine());
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_OPENCV ||
- backendId == DNN_BACKEND_HALIDE && haveHalide() && axis == 1 ||
- backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine() && axis == 1;
+ (backendId == DNN_BACKEND_HALIDE && haveHalide() && axis == 1) ||
+ (backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine() && axis == 1);
}
virtual bool setActivation(const Ptr<ActivationLayer>& layer) CV_OVERRIDE
{
return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_HALIDE ||
- backendId == DNN_BACKEND_INFERENCE_ENGINE && (preferableTarget != DNN_TARGET_MYRIAD || type == CHANNEL_NRM);
+ (backendId == DNN_BACKEND_INFERENCE_ENGINE && (preferableTarget != DNN_TARGET_MYRIAD || type == CHANNEL_NRM));
}
#ifdef HAVE_OPENCL
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_OPENCV ||
- backendId == DNN_BACKEND_HALIDE && haveHalide() &&
- !poolPad.width && !poolPad.height;
+ (backendId == DNN_BACKEND_HALIDE && haveHalide() && !poolPad.width && !poolPad.height);
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_OPENCV ||
- backendId == DNN_BACKEND_HALIDE && haveHalide() && dstRanges.size() == 4;
+ (backendId == DNN_BACKEND_HALIDE && haveHalide() && dstRanges.size() == 4);
}
void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_OPENCV ||
- backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine();
+ (backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine());
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
}
else
return backendId == DNN_BACKEND_OPENCV ||
- backendId == DNN_BACKEND_HALIDE && haveHalide() &&
- (type == MAX || type == AVE && !pad_t && !pad_l && !pad_b && !pad_r);
+ (backendId == DNN_BACKEND_HALIDE && haveHalide() &&
+ (type == MAX || (type == AVE && !pad_t && !pad_l && !pad_b && !pad_r)));
}
#ifdef HAVE_OPENCL
src.isContinuous(), dst.isContinuous(),
src.type() == CV_32F, src.type() == dst.type(),
src.dims == 4, dst.dims == 4,
- ((poolingType == ROI || poolingType == PSROI) && dst.size[0] ==rois.size[0] || src.size[0] == dst.size[0]),
- poolingType == PSROI || src.size[1] == dst.size[1],
+ (((poolingType == ROI || poolingType == PSROI) && dst.size[0] == rois.size[0]) || src.size[0] == dst.size[0]),
+ poolingType == PSROI || src.size[1] == dst.size[1],
(mask.empty() || (mask.type() == src.type() && mask.size == dst.size)));
PoolingInvoker p;
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_OPENCV ||
- backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine();
+ (backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine());
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_OPENCV ||
- backendId == DNN_BACKEND_INFERENCE_ENGINE && preferableTarget != DNN_TARGET_MYRIAD;
+ (backendId == DNN_BACKEND_INFERENCE_ENGINE && preferableTarget != DNN_TARGET_MYRIAD);
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const CV_OVERRIDE
{
- CV_Assert(!usePeephole && blobs.size() == 3 || usePeephole && blobs.size() == 6);
+ CV_Assert((!usePeephole && blobs.size() == 3) || (usePeephole && blobs.size() == 6));
CV_Assert(inputs.size() == 1);
const MatShape& inp0 = inputs[0];
std::vector<Mat> input;
inputs_arr.getMatVector(input);
- CV_Assert(!usePeephole && blobs.size() == 3 || usePeephole && blobs.size() == 6);
+ CV_Assert((!usePeephole && blobs.size() == 3) || (usePeephole && blobs.size() == 6));
CV_Assert(input.size() == 1);
const Mat& inp0 = input[0];
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_OPENCV ||
- backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine();
+ (backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine());
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
std::vector<Mat> inputs;
inputs_arr.getMatVector(inputs);
hasWeights = blobs.size() == 2 || (blobs.size() == 1 && !hasBias);
- CV_Assert(inputs.size() == 2 && blobs.empty() || blobs.size() == (int)hasWeights + (int)hasBias);
+ CV_Assert((inputs.size() == 2 && blobs.empty()) || blobs.size() == (int)hasWeights + (int)hasBias);
}
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE ||
- backendId == DNN_BACKEND_INFERENCE_ENGINE && axis == 1;
+ (backendId == DNN_BACKEND_INFERENCE_ENGINE && axis == 1);
}
void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_OPENCV ||
- backendId == DNN_BACKEND_INFERENCE_ENGINE && sliceRanges.size() == 1 && sliceRanges[0].size() == 4;
+ (backendId == DNN_BACKEND_INFERENCE_ENGINE && sliceRanges.size() == 1 && sliceRanges[0].size() == 4);
}
bool getMemoryShapes(const std::vector<MatShape> &inputs,
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_OPENCV ||
- backendId == DNN_BACKEND_HALIDE && haveHalide() && axisRaw == 1 ||
- backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine() && !logSoftMax;
+ (backendId == DNN_BACKEND_HALIDE && haveHalide() && axisRaw == 1) ||
+ (backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine() && !logSoftMax);
}
#ifdef HAVE_OPENCL
<< "p" << pad_w_ << "x" << pad_h_ << "_"
<< "num" << num_ << "_"
<< "M" << M_ << "_"
- << "activ" << fused_activ_ << "_"
+ << "activ" << (int)fused_activ_ << "_"
<< "eltwise" << fused_eltwise_ << "_"
<< precision;
bool InfEngineBackendLayer::supportBackend(int backendId)
{
return backendId == DNN_BACKEND_DEFAULT ||
- backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine();
+ (backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine());
}
void InfEngineBackendLayer::forward(InputArrayOfArrays inputs, OutputArrayOfArrays outputs,
}
}
+#if 0
void printList(const tensorflow::AttrValue::ListValue &val)
{
std::cout << "(";
std::cout << std::endl;
}
}
+#endif
bool hasLayerAttr(const tensorflow::NodeDef &layer, const std::string &name)
{
using namespace ::google::protobuf;
using namespace ::google::protobuf::io;
-const int kProtoReadBytesLimit = INT_MAX; // Max size of 2 GB minus 1 byte.
-
void ReadTFNetParamsFromBinaryFileOrDie(const char* param_file,
tensorflow::GraphDef* param) {
CHECK(ReadProtoFromBinaryFile(param_file, param))
TEST_P(DNNTestNetwork, OpenPose_pose_coco)
{
if (backend == DNN_BACKEND_HALIDE ||
- backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
+ (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD))
throw SkipTestException("");
processNet("dnn/openpose_pose_coco.caffemodel", "dnn/openpose_pose_coco.prototxt",
Size(368, 368));
TEST_P(DNNTestNetwork, OpenPose_pose_mpi)
{
if (backend == DNN_BACKEND_HALIDE ||
- backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
+ (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD))
throw SkipTestException("");
processNet("dnn/openpose_pose_mpi.caffemodel", "dnn/openpose_pose_mpi.prototxt",
Size(368, 368));
TEST_P(DNNTestNetwork, OpenPose_pose_mpi_faster_4_stages)
{
if (backend == DNN_BACKEND_HALIDE ||
- backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
+ (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD))
throw SkipTestException("");
// The same .caffemodel but modified .prototxt
// See https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/src/openpose/pose/poseParameters.cpp
case DNN_BACKEND_INFERENCE_ENGINE: *os << "DLIE"; return;
case DNN_BACKEND_OPENCV: *os << "OCV"; return;
} // don't use "default:" to emit compiler warnings
- *os << "DNN_BACKEND_UNKNOWN(" << v << ")";
+ *os << "DNN_BACKEND_UNKNOWN(" << (int)v << ")";
}
static inline void PrintTo(const cv::dnn::Target& v, std::ostream* os)
case DNN_TARGET_OPENCL_FP16: *os << "OCL_FP16"; return;
case DNN_TARGET_MYRIAD: *os << "MYRIAD"; return;
} // don't use "default:" to emit compiler warnings
- *os << "DNN_TARGET_UNKNOWN(" << v << ")";
+ *os << "DNN_TARGET_UNKNOWN(" << (int)v << ")";
}
using opencv_test::tuple;
using namespace cv::dnn;
-static testing::internal::ParamGenerator<tuple<Backend, Target> > dnnBackendsAndTargets(
+static inline
+testing::internal::ParamGenerator<tuple<Backend, Target> > dnnBackendsAndTargets(
bool withInferenceEngine = true,
bool withHalide = false,
bool withCpuOCV = true
} // namespace
+
+namespace opencv_test {
+using namespace cv::dnn;
+
+static inline
+testing::internal::ParamGenerator<Target> availableDnnTargets()
+{
+ static std::vector<Target> targets;
+ if (targets.empty())
+ {
+ targets.push_back(DNN_TARGET_CPU);
+#ifdef HAVE_OPENCL
+ if (cv::ocl::useOpenCL())
+ targets.push_back(DNN_TARGET_OPENCL);
+#endif
+ }
+ return testing::ValuesIn(targets);
+}
+
+class DNNTestLayer : public TestWithParam<tuple<Backend, Target> >
+{
+public:
+ dnn::Backend backend;
+ dnn::Target target;
+ double default_l1, default_lInf;
+
+ DNNTestLayer()
+ {
+ backend = (dnn::Backend)(int)get<0>(GetParam());
+ target = (dnn::Target)(int)get<1>(GetParam());
+ getDefaultThresholds(backend, target, &default_l1, &default_lInf);
+ }
+
+ static void getDefaultThresholds(int backend, int target, double* l1, double* lInf)
+ {
+ if (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD)
+ {
+ *l1 = 4e-3;
+ *lInf = 2e-2;
+ }
+ else
+ {
+ *l1 = 1e-5;
+ *lInf = 1e-4;
+ }
+ }
+
+ static void checkBackend(int backend, int target, Mat* inp = 0, Mat* ref = 0)
+ {
+ if (backend == DNN_BACKEND_OPENCV && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
+ {
+#ifdef HAVE_OPENCL
+ if (!cv::ocl::useOpenCL())
+#endif
+ {
+ throw SkipTestException("OpenCL is not available/disabled in OpenCV");
+ }
+ }
+ if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
+ {
+ if (!checkMyriadTarget())
+ {
+ throw SkipTestException("Myriad is not available/disabled in OpenCV");
+ }
+#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE < 2018030000
+ if (inp && ref && inp->size[0] != 1)
+ {
+ // Myriad plugin supports only batch size 1. Slice a single sample.
+ if (inp->size[0] == ref->size[0])
+ {
+ std::vector<cv::Range> range(inp->dims, Range::all());
+ range[0] = Range(0, 1);
+ *inp = inp->operator()(range);
+
+ range = std::vector<cv::Range>(ref->dims, Range::all());
+ range[0] = Range(0, 1);
+ *ref = ref->operator()(range);
+ }
+ else
+ throw SkipTestException("Myriad plugin supports only batch size 1");
+ }
+#else
+ if (inp && ref && inp->dims == 4 && ref->dims == 4 &&
+ inp->size[0] != 1 && inp->size[0] != ref->size[0])
+ throw SkipTestException("Inconsistent batch size of input and output blobs for Myriad plugin");
+
+#endif
+ }
+ }
+
+protected:
+ void checkBackend(Mat* inp = 0, Mat* ref = 0)
+ {
+ checkBackend(backend, target, inp, ref);
+ }
+};
+
+} // namespace
+
#endif
normAssert(outs[i].rowRange(0, numDets), ref);
if (numDets < outs[i].size[0])
+ {
EXPECT_EQ(countNonZero(outs[i].rowRange(numDets, outs[i].size[0])), 0);
+ }
}
}
net.setPreferableBackend(DNN_BACKEND_OPENCV);
Mat output = net.forward();
- if (i == 0) EXPECT_EQ(output.at<float>(0), 1);
- else if (i == 1) EXPECT_EQ(output.at<float>(0), 2);
- else if (i == 2) EXPECT_EQ(output.at<float>(0), 1);
+ if (i == 0) { EXPECT_EQ(output.at<float>(0), 1); }
+ else if (i == 1) { EXPECT_EQ(output.at<float>(0), 2); }
+ else if (i == 2) { EXPECT_EQ(output.at<float>(0), 1); }
}
LayerFactory::unregisterLayer("CustomType");
}
TEST_P(Test_ONNX_layers, Multiplication)
{
- if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16 ||
- backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
+ if ((backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16) ||
+ (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD))
throw SkipTestException("");
testONNXModels("mul");
}
TEST_P(Test_ONNX_nets, TinyYolov2)
{
if (cvtest::skipUnstableTests ||
- backend == DNN_BACKEND_INFERENCE_ENGINE && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16)) {
+ (backend == DNN_BACKEND_INFERENCE_ENGINE && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))) {
throw SkipTestException("");
}
// output range: [-11; 8]
#include "opencv2/dnn.hpp"
#include "test_common.hpp"
-namespace opencv_test {
-using namespace cv::dnn;
-
-static testing::internal::ParamGenerator<Target> availableDnnTargets()
-{
- static std::vector<Target> targets;
- if (targets.empty())
- {
- targets.push_back(DNN_TARGET_CPU);
-#ifdef HAVE_OPENCL
- if (cv::ocl::useOpenCL())
- targets.push_back(DNN_TARGET_OPENCL);
-#endif
- }
- return testing::ValuesIn(targets);
-}
-
-class DNNTestLayer : public TestWithParam<tuple<Backend, Target> >
-{
-public:
- dnn::Backend backend;
- dnn::Target target;
- double default_l1, default_lInf;
-
- DNNTestLayer()
- {
- backend = (dnn::Backend)(int)get<0>(GetParam());
- target = (dnn::Target)(int)get<1>(GetParam());
- getDefaultThresholds(backend, target, &default_l1, &default_lInf);
- }
-
- static void getDefaultThresholds(int backend, int target, double* l1, double* lInf)
- {
- if (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD)
- {
- *l1 = 4e-3;
- *lInf = 2e-2;
- }
- else
- {
- *l1 = 1e-5;
- *lInf = 1e-4;
- }
- }
-
- static void checkBackend(int backend, int target, Mat* inp = 0, Mat* ref = 0)
- {
- if (backend == DNN_BACKEND_OPENCV && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
- {
-#ifdef HAVE_OPENCL
- if (!cv::ocl::useOpenCL())
-#endif
- {
- throw SkipTestException("OpenCL is not available/disabled in OpenCV");
- }
- }
- if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
- {
- if (!checkMyriadTarget())
- {
- throw SkipTestException("Myriad is not available/disabled in OpenCV");
- }
-#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE < 2018030000
- if (inp && ref && inp->size[0] != 1)
- {
- // Myriad plugin supports only batch size 1. Slice a single sample.
- if (inp->size[0] == ref->size[0])
- {
- std::vector<cv::Range> range(inp->dims, Range::all());
- range[0] = Range(0, 1);
- *inp = inp->operator()(range);
-
- range = std::vector<cv::Range>(ref->dims, Range::all());
- range[0] = Range(0, 1);
- *ref = ref->operator()(range);
- }
- else
- throw SkipTestException("Myriad plugin supports only batch size 1");
- }
-#else
- if (inp && ref && inp->dims == 4 && ref->dims == 4 &&
- inp->size[0] != 1 && inp->size[0] != ref->size[0])
- throw SkipTestException("Inconsistent batch size of input and output blobs for Myriad plugin");
-
-#endif
- }
- }
-
-protected:
- void checkBackend(Mat* inp = 0, Mat* ref = 0)
- {
- checkBackend(backend, target, inp, ref);
- }
-};
-
-} // namespace
#endif
string dataConfig;
if (hasText)
+ {
ASSERT_TRUE(readFileInMemory(netConfig, dataConfig));
+ }
net = readNetFromTensorflow(dataModel.c_str(), dataModel.size(),
dataConfig.c_str(), dataConfig.size());
# it means class methods, not instance methods
decl_str = self.batch_replace(decl_str, [("static inline", ""), ("inline", ""),\
("CV_EXPORTS_W", ""), ("CV_EXPORTS", ""), ("CV_CDECL", ""), ("CV_WRAP ", " "), ("CV_INLINE", ""),
- ("CV_DEPRECATED", "")]).strip()
+ ("CV_DEPRECATED", ""), ("CV_DEPRECATED_EXTERNAL", "")]).strip()
if decl_str.strip().startswith('virtual'):