* (i.e. @f$ x_{t}^{stream} @f$ is stored inside @p input[0][stream, ...]).
*/
- int inputNameToIndex(String inputName);
- int outputNameToIndex(const String& outputName);
+ int inputNameToIndex(String inputName) CV_OVERRIDE;
+ int outputNameToIndex(const String& outputName) CV_OVERRIDE;
};
/** @brief Classical recurrent layer
#define __OPENCV_DNN_CAFFE_IO_HPP__
#ifdef HAVE_PROTOBUF
+#if defined(__GNUC__) && __GNUC__ >= 5
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wsuggest-override"
+#endif
#include "opencv-caffe.pb.h"
+#if defined(__GNUC__) && __GNUC__ >= 5
+#pragma GCC diagnostic pop
+#endif
+
namespace caffe { using namespace opencv_caffe; } // avoid massive renames from caffe proto package
namespace cv {
~OpenCLBackendWrapper() {}
// Copies data from device to a host memory.
- virtual void copyToHost()
+ virtual void copyToHost() CV_OVERRIDE
{
umat.copyTo(*host);
}
- virtual void setHostDirty()
+ virtual void setHostDirty() CV_OVERRIDE
{
hostDirty = true;
};
//fake layer containing network input blobs
struct DataLayer : public Layer
{
- void finalize(const std::vector<Mat*>&, std::vector<Mat>&) {}
- void forward(std::vector<Mat*>&, std::vector<Mat>&, std::vector<Mat> &) {}
- void forward(InputArrayOfArrays inputs, OutputArrayOfArrays outputs, OutputArrayOfArrays internals) {}
+ void finalize(const std::vector<Mat*>&, std::vector<Mat>&) CV_OVERRIDE {}
+ void forward(std::vector<Mat*>&, std::vector<Mat>&, std::vector<Mat> &) CV_OVERRIDE {}
+ void forward(InputArrayOfArrays inputs, OutputArrayOfArrays outputs, OutputArrayOfArrays internals) CV_OVERRIDE {}
- int outputNameToIndex(const String& tgtName)
+ int outputNameToIndex(const String& tgtName) CV_OVERRIDE
{
int idx = (int)(std::find(outNames.begin(), outNames.end(), tgtName) - outNames.begin());
return (idx < (int)outNames.size()) ? idx : -1;
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
- std::vector<MatShape> &internals) const
+ std::vector<MatShape> &internals) const CV_OVERRIDE
{
CV_Assert(inputs.size() == requiredOutputs);
outputs.assign(inputs.begin(), inputs.end());
namespace dnn
{
-class BatchNormLayerImpl : public BatchNormLayer
+class BatchNormLayerImpl CV_FINAL : public BatchNormLayer
{
public:
Mat weights_, bias_;
}
}
- void getScaleShift(Mat& scale, Mat& shift) const
+ void getScaleShift(Mat& scale, Mat& shift) const CV_OVERRIDE
{
scale = weights_;
shift = bias_;
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
- std::vector<MatShape> &internals) const
+ std::vector<MatShape> &internals) const CV_OVERRIDE
{
if (!useGlobalStats && inputs[0][0] != 1)
CV_Error(Error::StsNotImplemented, "Batch normalization in training mode with batch size > 1");
return true;
}
- virtual bool supportBackend(int backendId)
+ virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_DEFAULT ||
backendId == DNN_BACKEND_HALIDE && haveHalide() ||
}
#endif
- void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr)
+ void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
- void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
+ void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
}
}
- virtual Ptr<BackendNode> tryAttach(const Ptr<BackendNode>& node)
+ virtual Ptr<BackendNode> tryAttach(const Ptr<BackendNode>& node) CV_OVERRIDE
{
switch (node->backendId)
{
return Ptr<BackendNode>();
}
- virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs)
+ virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
{
#ifdef HAVE_HALIDE
Halide::Buffer<float> input = halideBuffer(inputs[0]);
}
#endif // HAVE_HALIDE
- virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&)
+ virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
InferenceEngine::LayerParams lp;
}
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
- const std::vector<MatShape> &outputs) const
+ const std::vector<MatShape> &outputs) const CV_OVERRIDE
{
(void)outputs; // suppress unused variable warning
{
namespace dnn
{
-class BlankLayerImpl : public BlankLayer
+class BlankLayerImpl CV_FINAL : public BlankLayer
{
public:
BlankLayerImpl(const LayerParams& params)
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
- std::vector<MatShape> &internals) const
+ std::vector<MatShape> &internals) const CV_OVERRIDE
{
Layer::getMemoryShapes(inputs, requiredOutputs, outputs, internals);
return true;
}
#endif
- void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr)
+ void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
- void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
+ void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
namespace dnn
{
-class ConcatLayerImpl : public ConcatLayer
+class ConcatLayerImpl CV_FINAL : public ConcatLayer
{
public:
ConcatLayerImpl(const LayerParams& params)
virtual bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
- std::vector<MatShape> &internals) const
+ std::vector<MatShape> &internals) const CV_OVERRIDE
{
CV_Assert(inputs.size() > 0);
outputs.resize(1, inputs[0]);
return false;
}
- virtual bool supportBackend(int backendId)
+ virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_DEFAULT ||
backendId == DNN_BACKEND_HALIDE && haveHalide() && axis == 1 && !padding || // By channels
ChannelConcatInvoker() : inputs(0), output(0), nstripes(0) {}
- void operator()(const Range& r) const
+ void operator()(const Range& r) const CV_OVERRIDE
{
size_t planeSize = (size_t)output->size[2]*output->size[3];
size_t nch = chptrs.size();
}
#endif
- void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr)
+ void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
- void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
+ void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
}
}
- virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &input)
+ virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &input) CV_OVERRIDE
{
#ifdef HAVE_HALIDE
std::vector<Halide::Buffer<> > inputBuffers = halideBuffers(input);
return Ptr<BackendNode>();
}
- virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs)
+ virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
adjustPad.height < stride.height);
}
- virtual bool supportBackend(int backendId)
+ virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_DEFAULT ||
backendId == DNN_BACKEND_HALIDE && haveHalide() ||
backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine();
}
- void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs)
+ void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs) CV_OVERRIDE
{
CV_Assert(inputs.size() > 0);
virtual void applyHalideScheduler(Ptr<BackendNode>& node,
const std::vector<Mat*> &inputs,
const std::vector<Mat> &outputs,
- int targetId) const
+ int targetId) const CV_OVERRIDE
{
#ifdef HAVE_HALIDE
if (targetId != DNN_TARGET_CPU)
#define IS_POWER_LAYER(layer) \
(!layer.empty() && !layer->type.compare("Power"))
//TODO: simultaneously convolution and bias addition for cache optimization
-class ConvolutionLayerImpl : public BaseConvolutionLayerImpl
+class ConvolutionLayerImpl CV_FINAL : public BaseConvolutionLayerImpl
{
public:
enum { VEC_ALIGN = 8, DFT_TYPE = CV_32F };
#endif
}
- MatShape computeColRowShape(const MatShape &inpShape, const MatShape &outShape) const
+ MatShape computeColRowShape(const MatShape &inpShape, const MatShape &outShape) const CV_OVERRIDE
{
Size out(outShape[3], outShape[2]);
int inpGroupCn = blobs[0].size[1];
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
- std::vector<MatShape> &internals) const
+ std::vector<MatShape> &internals) const CV_OVERRIDE
{
CV_Assert(blobs.size() != 0);
CV_Assert(!hasBias() || blobs[1].total() == (size_t)blobs[0].size[0]);
return false;
}
- virtual void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs)
+ virtual void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs) CV_OVERRIDE
{
BaseConvolutionLayerImpl::finalize(inputs, outputs);
#endif
}
- bool setActivation(const Ptr<ActivationLayer>& layer)
+ bool setActivation(const Ptr<ActivationLayer>& layer) CV_OVERRIDE
{
activ = layer;
if (activ.empty())
return !activ.empty();
}
- virtual bool tryFuse(Ptr<Layer>& top)
+ virtual bool tryFuse(Ptr<Layer>& top) CV_OVERRIDE
{
Mat w, b;
top->getScaleShift(w, b);
biasvec[outCn] = biasvec[outCn+1] = biasvec[outCn-1];
}
- virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs)
+ virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
{
#ifdef HAVE_HALIDE
Halide::Buffer<float> inputBuffer = halideBuffer(inputs[0]);
return Ptr<BackendNode>();
}
- virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> > &inputs)
+ virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
parallel_for_(Range(0, nstripes), p, nstripes);
}
- virtual void operator ()(const Range &r0) const
+ virtual void operator ()(const Range &r0) const CV_OVERRIDE
{
const int valign = ConvolutionLayerImpl::VEC_ALIGN;
int ngroups = ngroups_, batchSize = input_->size[0]*ngroups;
}
#endif
- void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr)
+ void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
- void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
+ void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
}
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
- const std::vector<MatShape> &outputs) const
+ const std::vector<MatShape> &outputs) const CV_OVERRIDE
{
CV_Assert(inputs.size() == outputs.size());
}
};
-class DeConvolutionLayerImpl : public BaseConvolutionLayerImpl
+class DeConvolutionLayerImpl CV_FINAL : public BaseConvolutionLayerImpl
{
public:
Mat weightsMat, biasesMat;
DeConvolutionLayerImpl(const LayerParams& params) : BaseConvolutionLayerImpl(params) {}
- MatShape computeColRowShape(const MatShape &inpShape, const MatShape &outShape) const
+ MatShape computeColRowShape(const MatShape &inpShape, const MatShape &outShape) const CV_OVERRIDE
{
int inpCn = inpShape[1];
int inpH = inpShape[2];
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
- std::vector<MatShape> &internals) const
+ std::vector<MatShape> &internals) const CV_OVERRIDE
{
CV_Assert(!hasBias() || blobs[1].total() == (size_t)numOutput);
CV_Assert(inputs.size() != 0);
return false;
}
- void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs)
+ void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs) CV_OVERRIDE
{
BaseConvolutionLayerImpl::finalize(inputs, outputs);
getConvPoolPaddings(Size(outputs[0].size[3], outputs[0].size[2]),
useAVX512 = CV_CPU_HAS_SUPPORT_AVX512_SKX;
}
- void operator()(const Range& range_) const
+ void operator()(const Range& range_) const CV_OVERRIDE
{
int stripeSize = (int)alignSize((b_->cols + nstripes_ - 1)/nstripes_, 16);
Range range(range_.start*stripeSize, std::min(range_.end*stripeSize, b_->cols));
parallel_for_(Range(0, nstripes), t, nstripes);
}
- virtual void operator ()(const Range &r) const
+ virtual void operator ()(const Range &r) const CV_OVERRIDE
{
const float* data_col_ = data_col;
float* data_im_ = data_im;
}
#endif
- void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr)
+ void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
- void forward(std::vector<Mat *> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
+ void forward(std::vector<Mat *> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
}
}
- virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs)
+ virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
{
#ifdef HAVE_HALIDE
Halide::Buffer<float> inputBuffer = halideBuffer(inputs[0]);
}
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
- const std::vector<MatShape> &outputs) const
+ const std::vector<MatShape> &outputs) const CV_OVERRIDE
{
CV_Assert(inputs.size() == outputs.size());
namespace dnn
{
-class CropLayerImpl : public CropLayer
+class CropLayerImpl CV_FINAL : public CropLayer
{
public:
CropLayerImpl(const LayerParams& params)
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
- std::vector<MatShape> &internals) const
+ std::vector<MatShape> &internals) const CV_OVERRIDE
{
CV_Assert(inputs.size() == 2);
return false;
}
- void finalize(const std::vector<Mat *> &inputs, std::vector<Mat> &outputs)
+ void finalize(const std::vector<Mat *> &inputs, std::vector<Mat> &outputs) CV_OVERRIDE
{
CV_Assert(2 == inputs.size());
}
}
- void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr)
+ void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
- void forward(std::vector<Mat *> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
+ void forward(std::vector<Mat *> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
} // namespace
-class DetectionOutputLayerImpl : public DetectionOutputLayer
+class DetectionOutputLayerImpl CV_FINAL : public DetectionOutputLayer
{
public:
unsigned _numClasses;
setParamsFrom(params);
}
- virtual bool supportBackend(int backendId)
+ virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_DEFAULT ||
backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine() && !_locPredTransposed;
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
- std::vector<MatShape> &internals) const
+ std::vector<MatShape> &internals) const CV_OVERRIDE
{
CV_Assert(inputs.size() >= 3);
CV_Assert(inputs[0][0] == inputs[1][0]);
}
#endif
- void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr)
+ void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
- void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
+ void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
}
}
- virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&)
+ virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
InferenceEngine::LayerParams lp;
nstripes_ = nstripes;
}
- void operator()(const Range &r) const
+ void operator()(const Range &r) const CV_OVERRIDE
{
int nstripes = nstripes_, nsamples = 1, outCn = 1;
size_t planeSize = 1;
ElementWiseLayer(const Func &f=Func()) : run_parallel(false) { func = f; }
- virtual bool supportBackend(int backendId)
+ virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_DEFAULT ||
backendId == DNN_BACKEND_HALIDE && haveHalide() ||
backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine() && this->type != "Sigmoid";
}
- virtual Ptr<BackendNode> tryAttach(const Ptr<BackendNode>& node)
+ virtual Ptr<BackendNode> tryAttach(const Ptr<BackendNode>& node) CV_OVERRIDE
{
switch (node->backendId)
{
return Ptr<BackendNode>();
}
- virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs)
+ virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
{
#ifdef HAVE_HALIDE
Halide::Buffer<float> input = halideBuffer(inputs[0]);
return Ptr<BackendNode>();
}
- virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&)
+ virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
InferenceEngine::LayerParams lp;
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
- std::vector<MatShape> &internals) const
+ std::vector<MatShape> &internals) const CV_OVERRIDE
{
Layer::getMemoryShapes(inputs, requiredOutputs, outputs, internals);
return true;
}
- void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr)
+ void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
- void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
+ void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
}
}
- void forwardSlice(const float* src, float* dst, int len, size_t planeSize, int cn0, int cn1) const
+ void forwardSlice(const float* src, float* dst, int len, size_t planeSize, int cn0, int cn1) const CV_OVERRIDE
{
func.apply(src, dst, len, planeSize, cn0, cn1);
}
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
- const std::vector<MatShape> &outputs) const
+ const std::vector<MatShape> &outputs) const CV_OVERRIDE
{
long flops = 0;
for (int i = 0; i < outputs.size(); i++)
namespace dnn
{
-class EltwiseLayerImpl : public EltwiseLayer
+class EltwiseLayerImpl CV_FINAL : public EltwiseLayer
{
public:
enum EltwiseOp
}
}
- virtual bool supportBackend(int backendId)
+ virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_DEFAULT ||
backendId == DNN_BACKEND_HALIDE && haveHalide() ||
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
- std::vector<MatShape> &internals) const
+ std::vector<MatShape> &internals) const CV_OVERRIDE
{
CV_Assert(inputs.size() >= 2);
CV_Assert(coeffs.size() == 0 || coeffs.size() == inputs.size());
parallel_for_(Range(0, nstripes), p, nstripes);
}
- void operator()(const Range& r) const
+ void operator()(const Range& r) const CV_OVERRIDE
{
size_t total = dst->size[0]*planeSize;
size_t stripeSize = (total + nstripes - 1)/nstripes;
}
#endif
- void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr)
+ void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
- void forward(std::vector<Mat *> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
+ void forward(std::vector<Mat *> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
coeffs, op, activ.get(), nstripes);
}
- virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &input)
+ virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &input) CV_OVERRIDE
{
#ifdef HAVE_HALIDE
Halide::Var x("x"), y("y"), c("c"), n("n");
return Ptr<BackendNode>();
}
- virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&)
+ virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
InferenceEngine::LayerParams lp;
}
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
- const std::vector<MatShape> &outputs) const
+ const std::vector<MatShape> &outputs) const CV_OVERRIDE
{
(void)outputs; // suppress unused variable warning
CV_Assert(inputs.size());
return flops;
}
- bool setActivation(const Ptr<ActivationLayer>& layer)
+ bool setActivation(const Ptr<ActivationLayer>& layer) CV_OVERRIDE
{
activ = layer;
return !activ.empty();
namespace dnn
{
-class FlattenLayerImpl : public FlattenLayer
+class FlattenLayerImpl CV_FINAL : public FlattenLayer
{
public:
FlattenLayerImpl(const LayerParams ¶ms)
setParamsFrom(params);
}
- virtual bool supportBackend(int backendId)
+ virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_DEFAULT ||
backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine();
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
- std::vector<MatShape> &internals) const
+ std::vector<MatShape> &internals) const CV_OVERRIDE
{
CV_Assert(inputs.size() > 0);
for (size_t i = 1; i < inputs.size(); i++)
}
#endif
- void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr)
+ void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
- void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
+ void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
}
}
- virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&)
+ virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
InferenceEngine::LayerParams lp;
namespace dnn
{
-class FullyConnectedLayerImpl : public InnerProductLayer
+class FullyConnectedLayerImpl CV_FINAL : public InnerProductLayer
{
public:
enum { VEC_ALIGN = 8 };
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
- std::vector<MatShape> &) const
+ std::vector<MatShape> &) const CV_OVERRIDE
{
CV_Assert(inputs.size() == 1);
CV_Assert(1 <= blobs.size() && blobs.size() <= 2);
return false;
}
- virtual bool supportBackend(int backendId)
+ virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_DEFAULT ||
backendId == DNN_BACKEND_HALIDE && haveHalide() && axis == 1 ||
backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine() && axis == 1;
}
- virtual bool setActivation(const Ptr<ActivationLayer>& layer)
+ virtual bool setActivation(const Ptr<ActivationLayer>& layer) CV_OVERRIDE
{
activ = layer;
return !activ.empty();
parallel_for_(Range(0, nstripes), p, nstripes);
}
- void operator()(const Range& r) const
+ void operator()(const Range& r) const CV_OVERRIDE
{
int valign = FullyConnectedLayerImpl::VEC_ALIGN;
int nsamples = srcMat->rows;
};
#ifdef HAVE_OPENCL
- void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs)
+ void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs) CV_OVERRIDE
{
innerProductOp.release();
}
}
#endif
- void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr)
+ void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
- void forward(std::vector<Mat*> &input, std::vector<Mat> &output, std::vector<Mat> &)
+ void forward(std::vector<Mat*> &input, std::vector<Mat> &output, std::vector<Mat> &) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
}
}
- virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs)
+ virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
{
#ifdef HAVE_HALIDE
int inW, inH, inC, inN, outC = blobs[0].size[0];
return Ptr<BackendNode>();
}
- virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&)
+ virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
InferenceEngine::LayerParams lp;
}
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
- const std::vector<MatShape> &outputs) const
+ const std::vector<MatShape> &outputs) const CV_OVERRIDE
{
(void)inputs; // suppress unused variable warning
long flops = 0;
namespace dnn
{
-class LRNLayerImpl : public LRNLayer
+class LRNLayerImpl CV_FINAL : public LRNLayer
{
public:
LRNLayerImpl(const LayerParams& params)
Ptr<OCL4DNNLRN<float> > lrnOp;
#endif
- virtual bool supportBackend(int backendId)
+ virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_DEFAULT ||
backendId == DNN_BACKEND_HALIDE && haveHalide() ||
}
#ifdef HAVE_OPENCL
- void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs)
+ void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs) CV_OVERRIDE
{
lrnOp.release();
}
}
#endif
- void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr)
+ void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
- void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
+ void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
planeSize_ = planeSize; nsamples_ = nsamples; nstripes_ = nstripes;
}
- void operator()(const Range& r) const
+ void operator()(const Range& r) const CV_OVERRIDE
{
int nsamples = nsamples_, nstripes = nstripes_;
size_t planeSize = planeSize_, planeSize_n = planeSize * nsamples;
}
}
- virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs)
+ virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
{
#ifdef HAVE_HALIDE
float alphaSize = alpha;
virtual void applyHalideScheduler(Ptr<BackendNode>& node,
const std::vector<Mat*> &inputs,
const std::vector<Mat> &outputs,
- int targetId) const
+ int targetId) const CV_OVERRIDE
{
#ifdef HAVE_HALIDE
if (targetId != DNN_TARGET_CPU)
#endif // HAVE_HALIDE
}
- virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&)
+ virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
InferenceEngine::LayerParams lp;
}
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
- const std::vector<MatShape> &outputs) const
+ const std::vector<MatShape> &outputs) const CV_OVERRIDE
{
(void)outputs; // suppress unused variable warning
CV_Assert(inputs.size() > 0);
namespace dnn
{
-class MaxUnpoolLayerImpl : public MaxUnpoolLayer
+class MaxUnpoolLayerImpl CV_FINAL : public MaxUnpoolLayer
{
public:
MaxUnpoolLayerImpl(const LayerParams& params)
poolStride = Size(params.get<int>("pool_stride_w"), params.get<int>("pool_stride_h"));
}
- virtual bool supportBackend(int backendId)
+ virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_DEFAULT ||
backendId == DNN_BACKEND_HALIDE && haveHalide() &&
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
- std::vector<MatShape> &internals) const
+ std::vector<MatShape> &internals) const CV_OVERRIDE
{
CV_Assert(inputs.size() == 2);
CV_Assert(total(inputs[0]) == total(inputs[1]));
return false;
}
- void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr)
+ void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
- void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
+ void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
}
}
- virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &input)
+ virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &input) CV_OVERRIDE
{
#ifdef HAVE_HALIDE
// Meaningless operation if false because if kernel > stride
namespace dnn
{
-class MVNLayerImpl : public MVNLayer
+class MVNLayerImpl CV_FINAL : public MVNLayer
{
public:
MVNLayerImpl(const LayerParams& params)
Mat scale, shift;
bool fuse_batch_norm;
- virtual bool tryFuse(Ptr<Layer>& top)
+ virtual bool tryFuse(Ptr<Layer>& top) CV_OVERRIDE
{
if (preferableTarget == DNN_TARGET_OPENCL && !fuse_batch_norm)
{
Ptr<ReLULayer> activ_relu;
float relu_slope;
bool fuse_relu;
- bool setActivation(const Ptr<ActivationLayer>& layer)
+ bool setActivation(const Ptr<ActivationLayer>& layer) CV_OVERRIDE
{
if (!layer.empty() && preferableTarget == DNN_TARGET_OPENCL)
{
}
#endif
- void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr)
+ void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
- void forward(std::vector<Mat *> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
+ void forward(std::vector<Mat *> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
}
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
- const std::vector<MatShape> &outputs) const
+ const std::vector<MatShape> &outputs) const CV_OVERRIDE
{
(void)outputs; // suppress unused variable warning
long flops = 0;
namespace cv { namespace dnn {
-class NormalizeBBoxLayerImpl : public NormalizeBBoxLayer
+class NormalizeBBoxLayerImpl CV_FINAL : public NormalizeBBoxLayer
{
public:
NormalizeBBoxLayerImpl(const LayerParams& params)
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
- std::vector<MatShape> &internals) const
+ std::vector<MatShape> &internals) const CV_OVERRIDE
{
CV_Assert(inputs.size() == 1);
Layer::getMemoryShapes(inputs, requiredOutputs, outputs, internals);
}
#endif
- void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr)
+ void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
- void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
+ void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
namespace dnn
{
-class PaddingLayerImpl : public PaddingLayer
+class PaddingLayerImpl CV_FINAL : public PaddingLayer
{
public:
PaddingLayerImpl(const LayerParams ¶ms)
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
- std::vector<MatShape> &internals) const
+ std::vector<MatShape> &internals) const CV_OVERRIDE
{
CV_Assert(inputs.size() == 1);
const MatShape& inpShape = inputs[0];
return false;
}
- void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs)
+ void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs) CV_OVERRIDE
{
// Compute dstRanges.
const MatSize& inpShape = inputs[0]->size;
dstRanges.push_back(Range::all());
}
- virtual bool supportBackend(int backendId)
+ virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_DEFAULT ||
backendId == DNN_BACKEND_HALIDE && haveHalide() && dstRanges.size() == 4;
}
- void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr)
+ void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
- void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
+ void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
CV_Error(Error::StsNotImplemented, "Unknown padding type: " + paddingType);
}
- virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs)
+ virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
{
#ifdef HAVE_HALIDE
int inW, inH, inC, inN;
{
namespace dnn
{
-class PermuteLayerImpl : public PermuteLayer
+class PermuteLayerImpl CV_FINAL : public PermuteLayer
{
public:
void checkCurrentOrder(int currentOrder)
checkNeedForPermutation();
}
- virtual bool supportBackend(int backendId)
+ virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_DEFAULT ||
backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine();
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
- std::vector<MatShape> &internals) const
+ std::vector<MatShape> &internals) const CV_OVERRIDE
{
if(!_needsPermute)
{
_count = _oldStride[0] * shapeBefore[0];
}
- void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs)
+ void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs) CV_OVERRIDE
{
if(!_needsPermute)
{
PermuteInvoker() : inp(0), out(0), order(0), nstripes(0) {}
- void operator()(const Range& r) const
+ void operator()(const Range& r) const CV_OVERRIDE
{
int n0 = out->size[0], n1 = out->size[1], n2 = out->size[2], n3 = out->size[3];
}
#endif
- void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr)
+ void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
- void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
+ void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
}
}
- virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&)
+ virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
InferenceEngine::LayerParams lp;
return (int)(v + (v >= 0.f ? 0.5f : -0.5f));
}
-class PoolingLayerImpl : public PoolingLayer
+class PoolingLayerImpl CV_FINAL : public PoolingLayer
{
public:
PoolingLayerImpl(const LayerParams& params)
Ptr<OCL4DNNPool<float> > poolOp;
#endif
- void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs)
+ void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs) CV_OVERRIDE
{
CV_Assert(!inputs.empty());
#endif
}
- virtual bool supportBackend(int backendId)
+ virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_DEFAULT ||
backendId == DNN_BACKEND_HALIDE && haveHalide() &&
}
#endif
- void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr)
+ void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
- void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
+ void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
}
}
- virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs)
+ virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
{
if (type == MAX)
return initMaxPoolingHalide(inputs);
return Ptr<BackendNode>();
}
- virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&)
+ virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
InferenceEngine::LayerParams lp;
parallel_for_(Range(0, nstripes), p, nstripes);
}
- void operator()(const Range& r) const
+ void operator()(const Range& r) const CV_OVERRIDE
{
int channels = dst->size[1], width = dst->size[3], height = dst->size[2];
int inp_width = src->size[3], inp_height = src->size[2];
virtual void applyHalideScheduler(Ptr<BackendNode>& node,
const std::vector<Mat*> &inputs,
const std::vector<Mat> &outputs,
- int targetId) const
+ int targetId) const CV_OVERRIDE
{
#ifdef HAVE_HALIDE
if (targetId != DNN_TARGET_CPU)
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
- std::vector<MatShape> &internals) const
+ std::vector<MatShape> &internals) const CV_OVERRIDE
{
CV_Assert(inputs.size() != 0);
Size in(inputs[0][3], inputs[0][2]), out;
}
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
- const std::vector<MatShape> &outputs) const
+ const std::vector<MatShape> &outputs) const CV_OVERRIDE
{
(void)inputs; // suppress unused variable warning
long flops = 0;
namespace dnn
{
-class PriorBoxLayerImpl : public PriorBoxLayer
+class PriorBoxLayerImpl CV_FINAL : public PriorBoxLayer
{
public:
static bool getParameterDict(const LayerParams ¶ms,
}
}
- virtual bool supportBackend(int backendId)
+ virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_DEFAULT ||
backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine() && !_explicitSizes;
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
- std::vector<MatShape> &internals) const
+ std::vector<MatShape> &internals) const CV_OVERRIDE
{
CV_Assert(!inputs.empty());
}
#endif
- void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr)
+ void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
- void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
+ void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
}
}
- virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&)
+ virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
InferenceEngine::LayerParams lp;
}
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
- const std::vector<MatShape> &outputs) const
+ const std::vector<MatShape> &outputs) const CV_OVERRIDE
{
(void)outputs; // suppress unused variable warning
long flops = 0;
namespace cv { namespace dnn {
-class ProposalLayerImpl : public ProposalLayer
+class ProposalLayerImpl CV_FINAL : public ProposalLayer
{
public:
ProposalLayerImpl(const LayerParams& params)
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
- std::vector<MatShape> &internals) const
+ std::vector<MatShape> &internals) const CV_OVERRIDE
{
// We need to allocate the following blobs:
// - output priors from PriorBoxLayer
return false;
}
- void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs)
+ void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs) CV_OVERRIDE
{
std::vector<Mat*> layerInputs;
std::vector<Mat> layerOutputs;
}
#endif
- void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr)
+ void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
- void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
+ void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
cv::pow(1 + dst, -1, dst);
}
-class LSTMLayerImpl : public LSTMLayer
+class LSTMLayerImpl CV_FINAL : public LSTMLayer
{
int numTimeStamps, numSamples;
bool allocated;
outTailShape.clear();
}
- void setUseTimstampsDim(bool use)
+ void setUseTimstampsDim(bool use) CV_OVERRIDE
{
CV_Assert(!allocated);
useTimestampDim = use;
}
- void setProduceCellOutput(bool produce)
+ void setProduceCellOutput(bool produce) CV_OVERRIDE
{
CV_Assert(!allocated);
produceCellOutput = produce;
}
- void setOutShape(const MatShape &outTailShape_)
+ void setOutShape(const MatShape &outTailShape_) CV_OVERRIDE
{
CV_Assert(!allocated || total(outTailShape) == total(outTailShape_));
outTailShape = outTailShape_;
}
- void setWeights(const Mat &Wh, const Mat &Wx, const Mat &bias)
+ void setWeights(const Mat &Wh, const Mat &Wx, const Mat &bias) CV_OVERRIDE
{
CV_Assert(Wh.dims == 2 && Wx.dims == 2);
CV_Assert(Wh.rows == Wx.rows);
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
- std::vector<MatShape> &internals) const
+ std::vector<MatShape> &internals) const CV_OVERRIDE
{
CV_Assert(!usePeephole && blobs.size() == 3 || usePeephole && blobs.size() == 6);
CV_Assert(inputs.size() == 1);
return false;
}
- void finalize(const std::vector<Mat*> &input, std::vector<Mat> &output)
+ void finalize(const std::vector<Mat*> &input, std::vector<Mat> &output) CV_OVERRIDE
{
CV_Assert(!usePeephole && blobs.size() == 3 || usePeephole && blobs.size() == 6);
CV_Assert(input.size() == 1);
allocated = true;
}
- void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr)
+ void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
- void forward(std::vector<Mat*> &input, std::vector<Mat> &output, std::vector<Mat> &internals)
+ void forward(std::vector<Mat*> &input, std::vector<Mat> &output, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
produceH = false;
}
- void setProduceHiddenOutput(bool produce = false)
+ void setProduceHiddenOutput(bool produce = false) CV_OVERRIDE
{
produceH = produce;
}
- void setWeights(const Mat &W_xh, const Mat &b_h, const Mat &W_hh, const Mat &W_ho, const Mat &b_o)
+ void setWeights(const Mat &W_xh, const Mat &b_h, const Mat &W_hh, const Mat &W_ho, const Mat &b_o) CV_OVERRIDE
{
CV_Assert(W_hh.dims == 2 && W_xh.dims == 2);
CV_Assert(W_hh.size[0] == W_xh.size[0] && W_hh.size[0] == W_hh.size[1] && (int)b_h.total() == W_xh.size[0]);
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
- std::vector<MatShape> &internals) const
+ std::vector<MatShape> &internals) const CV_OVERRIDE
{
CV_Assert(inputs.size() >= 1 && inputs.size() <= 2);
return false;
}
- void finalize(const std::vector<Mat*> &input, std::vector<Mat> &output)
+ void finalize(const std::vector<Mat*> &input, std::vector<Mat> &output) CV_OVERRIDE
{
CV_Assert(input.size() >= 1 && input.size() <= 2);
}
}
- void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr)
+ void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
- void forward(std::vector<Mat*> &input, std::vector<Mat> &output, std::vector<Mat> &internals)
+ void forward(std::vector<Mat*> &input, std::vector<Mat> &output, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
namespace dnn
{
-class RegionLayerImpl : public RegionLayer
+class RegionLayerImpl CV_FINAL : public RegionLayer
{
public:
int coords, classes, anchors, classfix;
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
- std::vector<MatShape> &internals) const
+ std::vector<MatShape> &internals) const CV_OVERRIDE
{
CV_Assert(inputs.size() > 0);
CV_Assert(inputs[0][3] == (1 + coords + classes)*anchors);
return false;
}
- virtual bool supportBackend(int backendId)
+ virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_DEFAULT;
}
}
#endif
- void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr)
+ void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
- void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
+ void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
}
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
- const std::vector<MatShape> &outputs) const
+ const std::vector<MatShape> &outputs) const CV_OVERRIDE
{
(void)outputs; // suppress unused variable warning
namespace dnn
{
-class ReorgLayerImpl : public ReorgLayer
+class ReorgLayerImpl CV_FINAL : public ReorgLayer
{
int reorgStride;
public:
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
- std::vector<MatShape> &internals) const
+ std::vector<MatShape> &internals) const CV_OVERRIDE
{
CV_Assert(inputs.size() > 0);
outputs = std::vector<MatShape>(inputs.size(), shape(
return false;
}
- virtual bool supportBackend(int backendId)
+ virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_DEFAULT;
}
}
#endif
- void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr)
+ void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
- void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
+ void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
}
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
- const std::vector<MatShape> &outputs) const
+ const std::vector<MatShape> &outputs) const CV_OVERRIDE
{
(void)outputs; // suppress unused variable warning
}
-class ReshapeLayerImpl : public ReshapeLayer
+class ReshapeLayerImpl CV_FINAL : public ReshapeLayer
{
public:
ReshapeLayerImpl(const LayerParams& params)
}
}
- virtual bool supportBackend(int backendId)
+ virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_DEFAULT ||
backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine();
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
- std::vector<MatShape> &internals) const
+ std::vector<MatShape> &internals) const CV_OVERRIDE
{
outputs.clear();
return true;
}
- void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr)
+ void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
- void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
+ void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
}
}
- virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&)
+ virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
InferenceEngine::LayerParams lp;
namespace cv { namespace dnn {
-class ResizeNearestNeighborLayerImpl : public ResizeNearestNeighborLayer
+class ResizeNearestNeighborLayerImpl CV_FINAL : public ResizeNearestNeighborLayer
{
public:
ResizeNearestNeighborLayerImpl(const LayerParams& params)
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
- std::vector<MatShape> &internals) const
+ std::vector<MatShape> &internals) const CV_OVERRIDE
{
CV_Assert(inputs.size() == 1, inputs[0].size() == 4);
outputs.resize(1, inputs[0]);
return (outputs[0][2] == inputs[0][2]) && (outputs[0][3] == inputs[0][3]);
}
- void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr)
+ void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
- void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
+ void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
namespace dnn
{
-class ScaleLayerImpl : public ScaleLayer
+class ScaleLayerImpl CV_FINAL : public ScaleLayer
{
public:
ScaleLayerImpl(const LayerParams& params)
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
- std::vector<MatShape> &internals) const
+ std::vector<MatShape> &internals) const CV_OVERRIDE
{
CV_Assert(inputs.size() == 2 && blobs.empty() || blobs.size() == 1 + hasBias);
outputs.assign(1, inputs[0]);
return true;
}
- virtual bool supportBackend(int backendId)
+ virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_DEFAULT ||
backendId == DNN_BACKEND_HALIDE && haveHalide() ||
backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine();
}
- void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr)
+ void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
- void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
+ void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
}
}
- virtual Ptr<BackendNode> tryAttach(const Ptr<BackendNode>& node)
+ virtual Ptr<BackendNode> tryAttach(const Ptr<BackendNode>& node) CV_OVERRIDE
{
switch (node->backendId)
{
return Ptr<BackendNode>();
}
- virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs)
+ virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
{
#ifdef HAVE_HALIDE
Halide::Buffer<float> input = halideBuffer(inputs[0]);
}
#endif // HAVE_HALIDE
- virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&)
+ virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
InferenceEngine::LayerParams lp;
return Ptr<BackendNode>();
}
- void getScaleShift(Mat& scale, Mat& shift) const
+ void getScaleShift(Mat& scale, Mat& shift) const CV_OVERRIDE
{
scale = !blobs.empty() ? blobs[0] : Mat();
shift = hasBias ? blobs[1] : Mat();
}
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
- const std::vector<MatShape> &outputs) const
+ const std::vector<MatShape> &outputs) const CV_OVERRIDE
{
(void)outputs; // suppress unused variable warning
long flops = 0;
namespace dnn
{
-class ShiftLayerImpl : public ShiftLayer
+class ShiftLayerImpl CV_FINAL : public ShiftLayer
{
public:
ShiftLayerImpl(const LayerParams ¶ms)
CV_Assert(blobs.size() == 1);
}
- virtual bool supportBackend(int backendId)
+ virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_DEFAULT ||
backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine();
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
- std::vector<MatShape> &internals) const
+ std::vector<MatShape> &internals) const CV_OVERRIDE
{
Layer::getMemoryShapes(inputs, requiredOutputs, outputs, internals);
internals.assign(1, shape(1, total(inputs[0], 2)));
return true;
}
- void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr)
+ void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
- virtual void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
+ virtual void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
}
}
- virtual Ptr<BackendNode> tryAttach(const Ptr<BackendNode>& node)
+ virtual Ptr<BackendNode> tryAttach(const Ptr<BackendNode>& node) CV_OVERRIDE
{
switch (node->backendId)
{
return Ptr<BackendNode>();
}
- virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&)
+ virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
// Inference Engine has no layer just for biases. Create a linear
return Ptr<BackendNode>();
}
- void getScaleShift(Mat& scale, Mat& shift) const
+ void getScaleShift(Mat& scale, Mat& shift) const CV_OVERRIDE
{
scale = Mat();
shift = blobs[0];
}
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
- const std::vector<MatShape> &outputs) const
+ const std::vector<MatShape> &outputs) const CV_OVERRIDE
{
(void)outputs; // suppress unused variable warning
long flops = 0;
namespace dnn
{
-class SliceLayerImpl : public SliceLayer
+class SliceLayerImpl CV_FINAL : public SliceLayer
{
public:
SliceLayerImpl(const LayerParams& params)
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
- std::vector<MatShape> &internals) const
+ std::vector<MatShape> &internals) const CV_OVERRIDE
{
CV_Assert(inputs.size() == 1);
MatShape inpShape = inputs[0];
return false;
}
- void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs)
+ void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs) CV_OVERRIDE
{
CV_Assert(inputs.size() == 1);
const MatSize& inpShape = inputs[0]->size;
}
#endif
- void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr)
+ void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
- void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
+ void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
namespace dnn
{
-class SoftMaxLayerImpl : public SoftmaxLayer
+class SoftMaxLayerImpl CV_FINAL : public SoftmaxLayer
{
public:
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
- std::vector<MatShape> &internals) const
+ std::vector<MatShape> &internals) const CV_OVERRIDE
{
bool inplace = Layer::getMemoryShapes(inputs, requiredOutputs, outputs, internals);
MatShape shape = inputs[0];
return inplace;
}
- virtual bool supportBackend(int backendId)
+ virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_DEFAULT ||
backendId == DNN_BACKEND_HALIDE && haveHalide() && axisRaw == 1 ||
}
#ifdef HAVE_OPENCL
- virtual void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs)
+ virtual void finalize(const std::vector<Mat*> &inputs, std::vector<Mat> &outputs) CV_OVERRIDE
{
softmaxOp.release();
}
}
#endif
- void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr)
+ void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
- void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
+ void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
}
}
- virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs)
+ virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
{
#ifdef HAVE_HALIDE
Halide::Buffer<float> inputBuffer = halideBuffer(inputs[0]);
return Ptr<BackendNode>();
}
- virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&)
+ virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
InferenceEngine::LayerParams lp;
}
int64 getFLOPS(const std::vector<MatShape> &inputs,
- const std::vector<MatShape> &outputs) const
+ const std::vector<MatShape> &outputs) const CV_OVERRIDE
{
(void)outputs; // suppress unused variable warning
int64 flops = 0;
namespace dnn
{
-class SplitLayerImpl : public SplitLayer
+class SplitLayerImpl CV_FINAL : public SplitLayer
{
public:
SplitLayerImpl(const LayerParams ¶ms)
bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
- std::vector<MatShape> &internals) const
+ std::vector<MatShape> &internals) const CV_OVERRIDE
{
CV_Assert(inputs.size() == 1);
return false;
}
- void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr)
+ void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
}
- void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
+ void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals) CV_OVERRIDE
{
CV_TRACE_FUNCTION();
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
#define __OPENCV_DNN_OP_HALIDE_HPP__
#ifdef HAVE_HALIDE
+#if defined(__GNUC__) && __GNUC__ >= 5
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wsuggest-override"
+#endif
#include <Halide.h>
+#if defined(__GNUC__) && __GNUC__ >= 5
+#pragma GCC diagnostic pop
+#endif
#endif // HAVE_HALIDE
namespace cv
HalideBackendWrapper(const Ptr<BackendWrapper>& base, const MatShape& shape);
- ~HalideBackendWrapper();
+ ~HalideBackendWrapper() CV_OVERRIDE;
- virtual void copyToHost();
+ virtual void copyToHost() CV_OVERRIDE;
- virtual void setHostDirty();
+ virtual void setHostDirty() CV_OVERRIDE;
Halide::Buffer<float> buffer;
{
outputs_ = outputs;
}
+void InfEngineBackendNet::getOutputsInfo(InferenceEngine::OutputsDataMap &outputs_) const noexcept
+{
+ outputs_ = outputs;
+}
// Returns input references that aren't connected to internal outputs.
void InfEngineBackendNet::getInputsInfo(InferenceEngine::InputsDataMap &inputs_) noexcept
#define __OPENCV_DNN_OP_INF_ENGINE_HPP__
#ifdef HAVE_INF_ENGINE
+#if defined(__GNUC__) && __GNUC__ >= 5
+//#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wsuggest-override"
+#endif
#include <inference_engine.hpp>
+#if defined(__GNUC__) && __GNUC__ >= 5
+//#pragma GCC diagnostic pop
+#endif
#endif // HAVE_INF_ENGINE
namespace cv { namespace dnn {
InfEngineBackendNet(InferenceEngine::CNNNetwork& net);
- virtual void Release() noexcept;
+ virtual void Release() noexcept CV_OVERRIDE;
- virtual InferenceEngine::Precision getPrecision() noexcept;
+ virtual InferenceEngine::Precision getPrecision() noexcept CV_OVERRIDE;
- virtual void getOutputsInfo(InferenceEngine::OutputsDataMap &out) noexcept;
+ virtual void getOutputsInfo(InferenceEngine::OutputsDataMap &out) noexcept /*CV_OVERRIDE*/;
- virtual void getInputsInfo(InferenceEngine::InputsDataMap &inputs) noexcept;
+ virtual void getOutputsInfo(InferenceEngine::OutputsDataMap &out) const noexcept /*CV_OVERRIDE*/;
- virtual void getInputsInfo(InferenceEngine::InputsDataMap &inputs) const noexcept;
+ virtual void getInputsInfo(InferenceEngine::InputsDataMap &inputs) noexcept /*CV_OVERRIDE*/;
- virtual InferenceEngine::InputInfo::Ptr getInput(const std::string &inputName) noexcept;
+ virtual void getInputsInfo(InferenceEngine::InputsDataMap &inputs) const noexcept /*CV_OVERRIDE*/;
- virtual void getName(char *pName, size_t len) noexcept;
+ virtual InferenceEngine::InputInfo::Ptr getInput(const std::string &inputName) noexcept CV_OVERRIDE;
- virtual size_t layerCount() noexcept;
+ virtual void getName(char *pName, size_t len) noexcept CV_OVERRIDE;
- virtual InferenceEngine::DataPtr& getData(const char *dname) noexcept;
+ virtual size_t layerCount() noexcept CV_OVERRIDE;
- virtual void addLayer(const InferenceEngine::CNNLayerPtr &layer) noexcept;
+ virtual InferenceEngine::DataPtr& getData(const char *dname) noexcept CV_OVERRIDE;
+
+ virtual void addLayer(const InferenceEngine::CNNLayerPtr &layer) noexcept CV_OVERRIDE;
virtual InferenceEngine::StatusCode addOutput(const std::string &layerName,
size_t outputIndex = 0,
- InferenceEngine::ResponseDesc *resp = nullptr) noexcept;
+ InferenceEngine::ResponseDesc *resp = nullptr) noexcept CV_OVERRIDE;
virtual InferenceEngine::StatusCode getLayerByName(const char *layerName,
InferenceEngine::CNNLayerPtr &out,
- InferenceEngine::ResponseDesc *resp) noexcept;
+ InferenceEngine::ResponseDesc *resp) noexcept CV_OVERRIDE;
- virtual void setTargetDevice(InferenceEngine::TargetDevice device) noexcept;
+ virtual void setTargetDevice(InferenceEngine::TargetDevice device) noexcept CV_OVERRIDE;
- virtual InferenceEngine::TargetDevice getTargetDevice() noexcept;
+ virtual InferenceEngine::TargetDevice getTargetDevice() noexcept CV_OVERRIDE;
- virtual InferenceEngine::StatusCode setBatchSize(const size_t size) noexcept;
+ virtual InferenceEngine::StatusCode setBatchSize(const size_t size) noexcept CV_OVERRIDE;
- virtual size_t getBatchSize() const noexcept;
+ virtual size_t getBatchSize() const noexcept CV_OVERRIDE;
void init();
~InfEngineBackendWrapper();
- virtual void copyToHost();
+ virtual void copyToHost() CV_OVERRIDE;
- virtual void setHostDirty();
+ virtual void setHostDirty() CV_OVERRIDE;
InferenceEngine::DataPtr dataPtr;
InferenceEngine::TBlob<float>::Ptr blob;
virtual bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
- std::vector<MatShape> &internals) const;
+ std::vector<MatShape> &internals) const CV_OVERRIDE;
virtual void forward(std::vector<Mat*> &input, std::vector<Mat> &output,
- std::vector<Mat> &internals);
+ std::vector<Mat> &internals) CV_OVERRIDE;
virtual void forward(InputArrayOfArrays inputs, OutputArrayOfArrays outputs,
- OutputArrayOfArrays internals);
+ OutputArrayOfArrays internals) CV_OVERRIDE;
- virtual bool supportBackend(int backendId);
+ virtual bool supportBackend(int backendId) CV_OVERRIDE;
private:
InferenceEngine::DataPtr output;
}
virtual void finalize(tensorflow::GraphDef&, tensorflow::NodeDef* fusedNode,
- std::vector<tensorflow::NodeDef*>& inputNodes)
+ std::vector<tensorflow::NodeDef*>& inputNodes) CV_OVERRIDE
{
Mat epsMat = getTensorContent(inputNodes.back()->attr().at("value").tensor());
CV_Assert(epsMat.total() == 1, epsMat.type() == CV_32FC1);
}
virtual void finalize(tensorflow::GraphDef& net, tensorflow::NodeDef* fusedNode,
- std::vector<tensorflow::NodeDef*>& inputNodes)
+ std::vector<tensorflow::NodeDef*>& inputNodes) CV_OVERRIDE
{
Mat epsMat = getTensorContent(inputNodes.back()->attr().at("value").tensor());
CV_Assert(epsMat.total() == 1, epsMat.type() == CV_32FC1);
setFusedNode("Relu6", input);
}
- virtual bool match(const tensorflow::GraphDef& net, int nodeId, std::vector<int>& matchedNodesIds)
+ virtual bool match(const tensorflow::GraphDef& net, int nodeId, std::vector<int>& matchedNodesIds) CV_OVERRIDE
{
if (!Subgraph::match(net, nodeId, matchedNodesIds))
return false;
}
virtual void finalize(tensorflow::GraphDef&, tensorflow::NodeDef* fusedNode,
- std::vector<tensorflow::NodeDef*>& inputNodes)
+ std::vector<tensorflow::NodeDef*>& inputNodes) CV_OVERRIDE
{
std::vector<int> shape(numOutDims + 1); // batch size in Keras is implicit.
shape[0] = -1;
#include "../precomp.hpp"
#ifdef HAVE_PROTOBUF
-#include "graph.pb.h"
+#include "tf_io.hpp"
#include <iostream>
#include <fstream>
#include <algorithm>
#include <string>
-#include <google/protobuf/message.h>
-#include <google/protobuf/text_format.h>
-#include <google/protobuf/io/zero_copy_stream_impl.h>
-#include "tf_io.hpp"
#include "tf_graph_simplifier.hpp"
#endif
#include <fstream>
#include <vector>
-#include "graph.pb.h"
#include "tf_io.hpp"
+
#include "../caffe/caffe_io.hpp"
#include "../caffe/glog_emulator.hpp"
#define __OPENCV_DNN_TF_IO_HPP__
#ifdef HAVE_PROTOBUF
+#if defined(__GNUC__) && __GNUC__ >= 5
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wsuggest-override"
+#endif
#include "graph.pb.h"
+#include <google/protobuf/message.h>
+#include <google/protobuf/text_format.h>
+#include <google/protobuf/io/zero_copy_stream_impl.h>
+#if defined(__GNUC__) && __GNUC__ >= 5
+#pragma GCC diagnostic pop
+#endif
+
+
namespace cv {
namespace dnn {