Merge pull request #14827 from YashasSamaga:cuda4dnn-csl-low
[platform/upstream/opencv.git] / modules / dnn / src / layers / permute_layer.cpp
1 /*M///////////////////////////////////////////////////////////////////////////////////////
2 //
3 //  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4 //
5 //  By downloading, copying, installing or using the software you agree to this license.
6 //  If you do not agree to this license, do not download, install,
7 //  copy or use the software.
8 //
9 //
10 //                           License Agreement
11 //                For Open Source Computer Vision Library
12 //
13 // Copyright (C) 2013, OpenCV Foundation, all rights reserved.
14 // Copyright (C) 2017, Intel Corporation, all rights reserved.
15 // Third party copyrights are property of their respective owners.
16 //
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
19 //
20 //   * Redistribution's of source code must retain the above copyright notice,
21 //     this list of conditions and the following disclaimer.
22 //
23 //   * Redistribution's in binary form must reproduce the above copyright notice,
24 //     this list of conditions and the following disclaimer in the documentation
25 //     and/or other materials provided with the distribution.
26 //
27 //   * The name of the copyright holders may not be used to endorse or promote products
28 //     derived from this software without specific prior written permission.
29 //
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
40 //
41 //M*/
42
43 #include "../precomp.hpp"
44 #include "layers_common.hpp"
45 #include "../op_cuda.hpp"
46 #include "../op_inf_engine.hpp"
47 #include "../op_vkcom.hpp"
48 #include <float.h>
49 #include <algorithm>
50
51 #ifdef HAVE_OPENCL
52 #include "opencl_kernels_dnn.hpp"
53 #endif
54
55 #ifdef HAVE_CUDA
56 #include "../cuda4dnn/primitives/permute.hpp"
57 using namespace cv::dnn::cuda4dnn;
58 #endif
59
60 namespace cv
61 {
62 namespace dnn
63 {
64 class PermuteLayerImpl CV_FINAL : public PermuteLayer
65 {
66 public:
67     void checkNeedForPermutation()
68     {
69         _needsPermute = false;
70         for (size_t i = 0; i < _numAxes; ++i)
71         {
72             if (_order[i] != i)
73             {
74                 _needsPermute = true;
75                 break;
76             }
77         }
78     }
79
80     PermuteLayerImpl(const LayerParams &params)
81         : _count(0), _needsPermute(false), _numAxes(0)
82     {
83         if (!params.has("order"))
84         {
85             return;
86         }
87
88         DictValue paramOrder = params.get("order");
89         _numAxes = paramOrder.size();
90
91         for (size_t i = 0; i < _numAxes; i++)
92         {
93             int currentOrder = paramOrder.get<int>(i);
94             if (currentOrder < 0 || currentOrder > _numAxes)
95             {
96                 CV_Error(Error::StsBadArg,
97                          format("Orders of dimensions in Permute layer parameter"
98                                 "must be in [0...%zu]", _numAxes - 1));
99             }
100             if (std::find(_order.begin(), _order.end(), currentOrder) != _order.end())
101             {
102                 CV_Error(Error::StsBadArg,
103                          "Permute layer parameter contains duplicated orders.");
104             }
105             _order.push_back(currentOrder);
106         }
107
108         setParamsFrom(params);
109         checkNeedForPermutation();
110     }
111
112     virtual bool supportBackend(int backendId) CV_OVERRIDE
113     {
114         return backendId == DNN_BACKEND_OPENCV ||
115                backendId == DNN_BACKEND_CUDA ||
116                (backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine()) ||
117                (backendId == DNN_BACKEND_VKCOM && haveVulkan());
118     }
119
120     bool getMemoryShapes(const std::vector<MatShape> &inputs,
121                          const int requiredOutputs,
122                          std::vector<MatShape> &outputs,
123                          std::vector<MatShape> &internals) const CV_OVERRIDE
124     {
125         if(!_needsPermute)
126         {
127             Layer::getMemoryShapes(inputs, requiredOutputs, outputs, internals);
128             return true;
129         }
130
131         CV_Assert(inputs.size() > 0);
132         CV_Assert((int)_numAxes == inputs[0].size());
133
134         MatShape shapeBefore = inputs[0], shapeAfter;
135         for (size_t i = 0; i < _numAxes; i++)
136         {
137             shapeAfter.push_back(shapeBefore[_order[i]]);
138         }
139
140         outputs.clear();
141
142         for (size_t i = 0; i < inputs.size(); i++)
143         {
144             CV_Assert(total(inputs[i]) == total(shapeAfter));
145             outputs.push_back(shapeAfter);
146         }
147
148         return false;
149     }
150
151     void computeStrides(const MatShape &shapeBefore, const MatShape &shapeAfter)
152     {
153         _oldStride.resize(_numAxes);
154         _newStride.resize(_numAxes);
155
156         _oldStride[_numAxes - 1] = 1;
157         _newStride[_numAxes - 1] = 1;
158
159         for(int i = _numAxes - 2; i >= 0; i--)
160         {
161             _oldStride[i] = _oldStride[i + 1] * shapeBefore[i + 1];
162             _newStride[i] = _newStride[i + 1] * shapeAfter[i + 1];
163         }
164
165         _count = _oldStride[0] * shapeBefore[0];
166     }
167
168     void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr) CV_OVERRIDE
169     {
170         if(!_needsPermute)
171         {
172             return;
173         }
174         std::vector<Mat> inputs, outputs;
175         inputs_arr.getMatVector(inputs);
176         outputs_arr.getMatVector(outputs);
177
178         CV_Assert(inputs.size() > 0);
179         const Mat& inp0 = inputs[0];
180         CV_Assert((int)_numAxes == inp0.dims);
181
182         computeStrides(shape(inputs[0]), shape(outputs[0]));
183
184 #ifdef HAVE_OPENCL
185         if (uorder.empty())
186         {
187             std::vector<int> orderVec(_order.begin(), _order.end());;
188             Mat morder(1, orderVec.size(), CV_32SC1, &orderVec[0]);
189
190             std::vector<int> oldStrideVec(_oldStride.begin(), _oldStride.end());
191             Mat mold_stride(1, _oldStride.size(), CV_32SC1, &oldStrideVec[0]);
192
193             std::vector<int> newStrideVec(_newStride.begin(), _newStride.end());
194             Mat mnew_stride(1, newStrideVec.size(), CV_32SC1, &newStrideVec[0]);
195
196             morder.copyTo(uorder);
197             mold_stride.copyTo(uold_stride);
198             mnew_stride.copyTo(unew_stride);
199         }
200 #endif
201     }
202
203     class PermuteInvoker : public ParallelLoopBody
204     {
205     public:
206         const Mat* inp;
207         Mat* out;
208         const std::vector<size_t>* order;
209         int nstripes;
210
211         static void run(const Mat& inp, Mat& out, const std::vector<size_t>& order, int nstripes)
212         {
213             PermuteInvoker p;
214             p.inp = &inp;
215             p.out = &out;
216             p.order = &order;
217             p.nstripes = nstripes;
218
219             CV_Assert( out.size[0] == inp.size[order[0]] &&
220                       out.size[1] == inp.size[order[1]] &&
221                       out.size[2] == inp.size[order[2]] &&
222                       out.size[3] == inp.size[order[3]]);
223
224             parallel_for_(Range(0, nstripes), p, nstripes);
225         }
226
227         PermuteInvoker() : inp(0), out(0), order(0), nstripes(0) {}
228
229         void operator()(const Range& r) const CV_OVERRIDE
230         {
231             int n0 = out->size[0], n1 = out->size[1], n2 = out->size[2], n3 = out->size[3];
232
233             size_t orows = (size_t)n0*n1*n2;
234             size_t stripeSize = (orows + nstripes - 1)/nstripes;
235             size_t stripeStart = r.start*stripeSize;
236             size_t stripeEnd = std::min(r.end*stripeSize, orows);
237
238             const size_t esz = sizeof(float);
239             size_t ostep0 = out->step[0]/esz, ostep1 = out->step[1]/esz, ostep2 = out->step[2]/esz;
240             const size_t* ord = &order->at(0);
241             size_t istep0 = inp->step[ord[0]]/esz, istep1 = inp->step[ord[1]]/esz,
242             istep2 = inp->step[ord[2]]/esz, istep3 = inp->step[ord[3]]/esz;
243
244             size_t val = stripeStart;
245             int i2 = (int)(val % n2);
246             val /= n2;
247             int i1 = (int)(val % n1);
248             int i0 = (int)(val / n1);
249
250             const float* inptr_orig = inp->ptr<float>();
251             float* outptr_orig = out->ptr<float>();
252
253             for( size_t ofs = stripeStart; ofs < stripeEnd; ofs++ )
254             {
255                 const float* inptr = inptr_orig + i0*istep0 + i1*istep1 + i2*istep2;
256                 float* outptr = outptr_orig + i0*ostep0 + i1*ostep1 + i2*ostep2;
257
258                 for( int i3 = 0; i3 < n3; i3++ )
259                     outptr[i3] = inptr[i3*istep3];
260
261                 if( ++i2 >= n2 )
262                 {
263                     i2 = 0;
264                     if( ++i1 >= n1 )
265                     {
266                         i1 = 0;
267                         if( ++i0 >= n0 )
268                             break;
269                     }
270                 }
271             }
272         }
273     };
274
275 #ifdef HAVE_OPENCL
276     bool forward_ocl(InputArrayOfArrays inps, OutputArrayOfArrays outs, OutputArrayOfArrays internals)
277     {
278         std::vector<UMat> inputs;
279         std::vector<UMat> outputs;
280
281         inps.getUMatVector(inputs);
282         outs.getUMatVector(outputs);
283
284         if (!_needsPermute)
285             return false;
286
287         bool use_half = (inps.depth() == CV_16S);
288         String opts = format("-DDtype=%s", use_half ? "half" : "float");
289         for (size_t i = 0; i < inputs.size(); i++)
290         {
291             ocl::Kernel kernel("permute", ocl::dnn::permute_oclsrc, opts);
292
293             kernel.set(0, (int)_count);
294             kernel.set(1, ocl::KernelArg::PtrReadOnly(inputs[i]));
295             kernel.set(2, ocl::KernelArg::PtrReadOnly(uorder));
296             kernel.set(3, ocl::KernelArg::PtrReadOnly(uold_stride));
297             kernel.set(4, ocl::KernelArg::PtrReadOnly(unew_stride));
298             kernel.set(5, (int)_numAxes);
299             kernel.set(6, ocl::KernelArg::PtrWriteOnly(outputs[i]));
300
301             if (!kernel.run(1, &_count, NULL, false))
302                 return false;
303         }
304
305         return true;
306     }
307 #endif
308
309     void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
310     {
311         CV_TRACE_FUNCTION();
312         CV_TRACE_ARG_VALUE(name, "name", name.c_str());
313
314         CV_OCL_RUN(IS_DNN_OPENCL_TARGET(preferableTarget),
315                    forward_ocl(inputs_arr, outputs_arr, internals_arr))
316
317         if (inputs_arr.depth() == CV_16S)
318         {
319             forward_fallback(inputs_arr, outputs_arr, internals_arr);
320             return;
321         }
322
323         std::vector<Mat> inputs, outputs;
324         inputs_arr.getMatVector(inputs);
325         outputs_arr.getMatVector(outputs);
326
327         size_t k, ninputs = inputs.size();
328         if(!_needsPermute)
329         {
330             for (k = 0; k < ninputs; k++)
331             {
332                 CV_Assert(outputs[k].total() == inputs[k].total());
333                 if (outputs[k].data != inputs[k].data)
334                     inputs[k].copyTo(outputs[k]);
335             }
336         }
337         else
338         {
339             size_t i, j, count = _count, numAxes = _numAxes;
340             const size_t* newStride = &_newStride[0];
341             const size_t* oldStride = &_oldStride[0];
342             const size_t* order = &_order[0];
343
344             for (k = 0; k < ninputs; k++)
345             {
346                 const Mat& inp = inputs[k];
347                 Mat& out = outputs[k];
348
349                 CV_Assert(inp.dims == numAxes && inp.size == inputs[0].size);
350                 CV_Assert(out.dims == numAxes && out.size == outputs[0].size);
351
352                 CV_Assert(inp.isContinuous() && out.isContinuous());
353                 CV_Assert(inp.type() == CV_32F && out.type() == CV_32F);
354
355                 if( numAxes == 4 )
356                 {
357                     int nstripes = getNumThreads();
358                     PermuteInvoker::run(inp, out, _order, nstripes);
359                 }
360                 else
361                 {
362                     const float *srcData = inp.ptr<float>();
363                     float *dstData = out.ptr<float>();
364
365                     for (i = 0; i < count; ++i)
366                     {
367                         size_t oldPosition = 0;
368                         size_t newPosition = i;
369
370                         for (j = 0; j < numAxes; ++j)
371                         {
372                             oldPosition += (newPosition / newStride[j]) * oldStride[order[j]];
373                             newPosition %= newStride[j];
374                         }
375                         dstData[i] = srcData[oldPosition];
376                     }
377                 }
378             }
379         }
380     }
381
382 #ifdef HAVE_CUDA
383     Ptr<BackendNode> initCUDA(
384         void *context_,
385         const std::vector<Ptr<BackendWrapper>>& inputs,
386         const std::vector<Ptr<BackendWrapper>>& outputs
387     ) override
388     {
389         auto context = reinterpret_cast<csl::CSLContext*>(context_);
390         return make_cuda_node<cuda4dnn::PermuteOp>(preferableTarget, std::move(context->stream), _order);
391     }
392 #endif
393
394     virtual Ptr<BackendNode> initVkCom(const std::vector<Ptr<BackendWrapper> > &input) CV_OVERRIDE
395     {
396 #ifdef HAVE_VULKAN
397         CV_Assert(!_order.empty());
398         std::shared_ptr<vkcom::OpBase> op(new vkcom::OpPermute(_order));
399         return Ptr<BackendNode>(new VkComBackendNode(input, op));
400 #endif // HAVE_VULKAN
401         return Ptr<BackendNode>();
402     }
403
404 #ifdef HAVE_INF_ENGINE
405     virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
406     {
407         InferenceEngine::Builder::PermuteLayer ieLayer(name);
408         ieLayer.setOrder(_order);
409         return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
410     }
411 #endif  // HAVE_INF_ENGINE
412
413     size_t _count;
414     std::vector<size_t> _order;
415
416     std::vector<int> _oldDimensionSize;
417     std::vector<int> _newDimensionSize;
418
419     std::vector<size_t> _oldStride;
420     std::vector<size_t> _newStride;
421     bool _needsPermute;
422
423 #ifdef HAVE_OPENCL
424     UMat uorder, uold_stride, unew_stride;
425 #endif
426
427     size_t _numAxes;
428 };
429
430 Ptr<PermuteLayer> PermuteLayer::create(const LayerParams &params)
431 {
432     return Ptr<PermuteLayer>(new PermuteLayerImpl(params));
433 }
434
435 }
436 }