Merge pull request #14827 from YashasSamaga:cuda4dnn-csl-low
[platform/upstream/opencv.git] / modules / dnn / src / layers / const_layer.cpp
1 // This file is part of OpenCV project.
2 // It is subject to the license terms in the LICENSE file found in the top-level directory
3 // of this distribution and at http://opencv.org/license.html.
4
5 // Copyright (C) 2018, Intel Corporation, all rights reserved.
6 // Third party copyrights are property of their respective owners.
7
8 #include "../precomp.hpp"
9 #include "../op_inf_engine.hpp"
10 #include "../op_cuda.hpp"
11 #include "layers_common.hpp"
12
13 #ifdef HAVE_OPENCL
14 #include "opencl_kernels_dnn.hpp"
15 #endif
16
17 #ifdef HAVE_CUDA
18 #include "../cuda4dnn/primitives/const.hpp"
19 using namespace cv::dnn::cuda4dnn;
20 #endif
21
22 namespace cv { namespace dnn {
23
24 class ConstLayerImpl CV_FINAL : public ConstLayer
25 {
26 public:
27     ConstLayerImpl(const LayerParams& params)
28     {
29         setParamsFrom(params);
30         CV_Assert(blobs.size() == 1);
31     }
32
33     virtual bool supportBackend(int backendId) CV_OVERRIDE
34     {
35         return backendId == DNN_BACKEND_OPENCV ||
36                backendId == DNN_BACKEND_INFERENCE_ENGINE ||
37                backendId == DNN_BACKEND_CUDA;
38     }
39
40     virtual bool getMemoryShapes(const std::vector<MatShape> &inputs,
41                                  const int requiredOutputs,
42                                  std::vector<MatShape> &outputs,
43                                  std::vector<MatShape> &internals) const CV_OVERRIDE
44     {
45         CV_Assert(inputs.empty());
46         outputs.assign(1, shape(blobs[0]));
47         return false;
48     }
49
50 #ifdef HAVE_OPENCL
51     bool forward_ocl(InputArrayOfArrays inps, OutputArrayOfArrays outs, OutputArrayOfArrays internals)
52     {
53         std::vector<UMat> outputs;
54         outs.getUMatVector(outputs);
55         if (outs.depth() == CV_16S)
56             convertFp16(blobs[0], outputs[0]);
57         else
58             blobs[0].copyTo(outputs[0]);
59         return true;
60     }
61 #endif
62
63     void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
64     {
65         CV_TRACE_FUNCTION();
66         CV_TRACE_ARG_VALUE(name, "name", name.c_str());
67
68         CV_OCL_RUN(IS_DNN_OPENCL_TARGET(preferableTarget),
69                    forward_ocl(inputs_arr, outputs_arr, internals_arr))
70
71         std::vector<Mat> outputs;
72         outputs_arr.getMatVector(outputs);
73         blobs[0].copyTo(outputs[0]);
74     }
75
76 #ifdef HAVE_INF_ENGINE
77     virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
78     {
79         InferenceEngine::Builder::ConstLayer ieLayer(name);
80         ieLayer.setData(wrapToInfEngineBlob(blobs[0]));
81         return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
82     }
83 #endif  // HAVE_INF_ENGINE
84
85 #ifdef HAVE_CUDA
86     Ptr<BackendNode> initCUDA(
87         void *context_,
88         const std::vector<Ptr<BackendWrapper>>& inputs,
89         const std::vector<Ptr<BackendWrapper>>& outputs
90     ) override
91     {
92         auto context = reinterpret_cast<csl::CSLContext*>(context_);
93
94         CV_Assert(blobs.size() == 1);
95         return make_cuda_node<cuda4dnn::ConstOp>(preferableTarget, std::move(context->stream), blobs[0]);
96     }
97 #endif
98
99 };
100
101 Ptr<Layer> ConstLayer::create(const LayerParams& params)
102 {
103     return Ptr<Layer>(new ConstLayerImpl(params));
104 }
105
106 }}  // namespace cv::dnn