1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
11 // For Open Source Computer Vision Library
13 // Copyright (C) 2013, OpenCV Foundation, all rights reserved.
14 // Copyright (C) 2017, Intel Corporation, all rights reserved.
15 // Third party copyrights are property of their respective owners.
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
43 #include "../precomp.hpp"
44 #include "layers_common.hpp"
45 #include "../op_cuda.hpp"
46 #include "../op_inf_engine.hpp"
47 #include "../op_vkcom.hpp"
52 #include "opencl_kernels_dnn.hpp"
56 #include "../cuda4dnn/primitives/permute.hpp"
57 using namespace cv::dnn::cuda4dnn;
64 class PermuteLayerImpl CV_FINAL : public PermuteLayer
67 void checkNeedForPermutation()
69 _needsPermute = false;
70 for (size_t i = 0; i < _numAxes; ++i)
80 PermuteLayerImpl(const LayerParams ¶ms)
81 : _count(0), _needsPermute(false), _numAxes(0)
83 if (!params.has("order"))
88 DictValue paramOrder = params.get("order");
89 _numAxes = paramOrder.size();
91 for (size_t i = 0; i < _numAxes; i++)
93 int currentOrder = paramOrder.get<int>(i);
94 if (currentOrder < 0 || currentOrder > _numAxes)
96 CV_Error(Error::StsBadArg,
97 format("Orders of dimensions in Permute layer parameter"
98 "must be in [0...%zu]", _numAxes - 1));
100 if (std::find(_order.begin(), _order.end(), currentOrder) != _order.end())
102 CV_Error(Error::StsBadArg,
103 "Permute layer parameter contains duplicated orders.");
105 _order.push_back(currentOrder);
108 setParamsFrom(params);
109 checkNeedForPermutation();
112 virtual bool supportBackend(int backendId) CV_OVERRIDE
114 return backendId == DNN_BACKEND_OPENCV ||
115 backendId == DNN_BACKEND_CUDA ||
116 (backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine()) ||
117 (backendId == DNN_BACKEND_VKCOM && haveVulkan());
120 bool getMemoryShapes(const std::vector<MatShape> &inputs,
121 const int requiredOutputs,
122 std::vector<MatShape> &outputs,
123 std::vector<MatShape> &internals) const CV_OVERRIDE
127 Layer::getMemoryShapes(inputs, requiredOutputs, outputs, internals);
131 CV_Assert(inputs.size() > 0);
132 CV_Assert((int)_numAxes == inputs[0].size());
134 MatShape shapeBefore = inputs[0], shapeAfter;
135 for (size_t i = 0; i < _numAxes; i++)
137 shapeAfter.push_back(shapeBefore[_order[i]]);
142 for (size_t i = 0; i < inputs.size(); i++)
144 CV_Assert(total(inputs[i]) == total(shapeAfter));
145 outputs.push_back(shapeAfter);
151 void computeStrides(const MatShape &shapeBefore, const MatShape &shapeAfter)
153 _oldStride.resize(_numAxes);
154 _newStride.resize(_numAxes);
156 _oldStride[_numAxes - 1] = 1;
157 _newStride[_numAxes - 1] = 1;
159 for(int i = _numAxes - 2; i >= 0; i--)
161 _oldStride[i] = _oldStride[i + 1] * shapeBefore[i + 1];
162 _newStride[i] = _newStride[i + 1] * shapeAfter[i + 1];
165 _count = _oldStride[0] * shapeBefore[0];
168 void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr) CV_OVERRIDE
174 std::vector<Mat> inputs, outputs;
175 inputs_arr.getMatVector(inputs);
176 outputs_arr.getMatVector(outputs);
178 CV_Assert(inputs.size() > 0);
179 const Mat& inp0 = inputs[0];
180 CV_Assert((int)_numAxes == inp0.dims);
182 computeStrides(shape(inputs[0]), shape(outputs[0]));
187 std::vector<int> orderVec(_order.begin(), _order.end());;
188 Mat morder(1, orderVec.size(), CV_32SC1, &orderVec[0]);
190 std::vector<int> oldStrideVec(_oldStride.begin(), _oldStride.end());
191 Mat mold_stride(1, _oldStride.size(), CV_32SC1, &oldStrideVec[0]);
193 std::vector<int> newStrideVec(_newStride.begin(), _newStride.end());
194 Mat mnew_stride(1, newStrideVec.size(), CV_32SC1, &newStrideVec[0]);
196 morder.copyTo(uorder);
197 mold_stride.copyTo(uold_stride);
198 mnew_stride.copyTo(unew_stride);
203 class PermuteInvoker : public ParallelLoopBody
208 const std::vector<size_t>* order;
211 static void run(const Mat& inp, Mat& out, const std::vector<size_t>& order, int nstripes)
217 p.nstripes = nstripes;
219 CV_Assert( out.size[0] == inp.size[order[0]] &&
220 out.size[1] == inp.size[order[1]] &&
221 out.size[2] == inp.size[order[2]] &&
222 out.size[3] == inp.size[order[3]]);
224 parallel_for_(Range(0, nstripes), p, nstripes);
227 PermuteInvoker() : inp(0), out(0), order(0), nstripes(0) {}
229 void operator()(const Range& r) const CV_OVERRIDE
231 int n0 = out->size[0], n1 = out->size[1], n2 = out->size[2], n3 = out->size[3];
233 size_t orows = (size_t)n0*n1*n2;
234 size_t stripeSize = (orows + nstripes - 1)/nstripes;
235 size_t stripeStart = r.start*stripeSize;
236 size_t stripeEnd = std::min(r.end*stripeSize, orows);
238 const size_t esz = sizeof(float);
239 size_t ostep0 = out->step[0]/esz, ostep1 = out->step[1]/esz, ostep2 = out->step[2]/esz;
240 const size_t* ord = &order->at(0);
241 size_t istep0 = inp->step[ord[0]]/esz, istep1 = inp->step[ord[1]]/esz,
242 istep2 = inp->step[ord[2]]/esz, istep3 = inp->step[ord[3]]/esz;
244 size_t val = stripeStart;
245 int i2 = (int)(val % n2);
247 int i1 = (int)(val % n1);
248 int i0 = (int)(val / n1);
250 const float* inptr_orig = inp->ptr<float>();
251 float* outptr_orig = out->ptr<float>();
253 for( size_t ofs = stripeStart; ofs < stripeEnd; ofs++ )
255 const float* inptr = inptr_orig + i0*istep0 + i1*istep1 + i2*istep2;
256 float* outptr = outptr_orig + i0*ostep0 + i1*ostep1 + i2*ostep2;
258 for( int i3 = 0; i3 < n3; i3++ )
259 outptr[i3] = inptr[i3*istep3];
276 bool forward_ocl(InputArrayOfArrays inps, OutputArrayOfArrays outs, OutputArrayOfArrays internals)
278 std::vector<UMat> inputs;
279 std::vector<UMat> outputs;
281 inps.getUMatVector(inputs);
282 outs.getUMatVector(outputs);
287 bool use_half = (inps.depth() == CV_16S);
288 String opts = format("-DDtype=%s", use_half ? "half" : "float");
289 for (size_t i = 0; i < inputs.size(); i++)
291 ocl::Kernel kernel("permute", ocl::dnn::permute_oclsrc, opts);
293 kernel.set(0, (int)_count);
294 kernel.set(1, ocl::KernelArg::PtrReadOnly(inputs[i]));
295 kernel.set(2, ocl::KernelArg::PtrReadOnly(uorder));
296 kernel.set(3, ocl::KernelArg::PtrReadOnly(uold_stride));
297 kernel.set(4, ocl::KernelArg::PtrReadOnly(unew_stride));
298 kernel.set(5, (int)_numAxes);
299 kernel.set(6, ocl::KernelArg::PtrWriteOnly(outputs[i]));
301 if (!kernel.run(1, &_count, NULL, false))
309 void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
312 CV_TRACE_ARG_VALUE(name, "name", name.c_str());
314 CV_OCL_RUN(IS_DNN_OPENCL_TARGET(preferableTarget),
315 forward_ocl(inputs_arr, outputs_arr, internals_arr))
317 if (inputs_arr.depth() == CV_16S)
319 forward_fallback(inputs_arr, outputs_arr, internals_arr);
323 std::vector<Mat> inputs, outputs;
324 inputs_arr.getMatVector(inputs);
325 outputs_arr.getMatVector(outputs);
327 size_t k, ninputs = inputs.size();
330 for (k = 0; k < ninputs; k++)
332 CV_Assert(outputs[k].total() == inputs[k].total());
333 if (outputs[k].data != inputs[k].data)
334 inputs[k].copyTo(outputs[k]);
339 size_t i, j, count = _count, numAxes = _numAxes;
340 const size_t* newStride = &_newStride[0];
341 const size_t* oldStride = &_oldStride[0];
342 const size_t* order = &_order[0];
344 for (k = 0; k < ninputs; k++)
346 const Mat& inp = inputs[k];
347 Mat& out = outputs[k];
349 CV_Assert(inp.dims == numAxes && inp.size == inputs[0].size);
350 CV_Assert(out.dims == numAxes && out.size == outputs[0].size);
352 CV_Assert(inp.isContinuous() && out.isContinuous());
353 CV_Assert(inp.type() == CV_32F && out.type() == CV_32F);
357 int nstripes = getNumThreads();
358 PermuteInvoker::run(inp, out, _order, nstripes);
362 const float *srcData = inp.ptr<float>();
363 float *dstData = out.ptr<float>();
365 for (i = 0; i < count; ++i)
367 size_t oldPosition = 0;
368 size_t newPosition = i;
370 for (j = 0; j < numAxes; ++j)
372 oldPosition += (newPosition / newStride[j]) * oldStride[order[j]];
373 newPosition %= newStride[j];
375 dstData[i] = srcData[oldPosition];
383 Ptr<BackendNode> initCUDA(
385 const std::vector<Ptr<BackendWrapper>>& inputs,
386 const std::vector<Ptr<BackendWrapper>>& outputs
389 auto context = reinterpret_cast<csl::CSLContext*>(context_);
390 return make_cuda_node<cuda4dnn::PermuteOp>(preferableTarget, std::move(context->stream), _order);
394 virtual Ptr<BackendNode> initVkCom(const std::vector<Ptr<BackendWrapper> > &input) CV_OVERRIDE
397 CV_Assert(!_order.empty());
398 std::shared_ptr<vkcom::OpBase> op(new vkcom::OpPermute(_order));
399 return Ptr<BackendNode>(new VkComBackendNode(input, op));
400 #endif // HAVE_VULKAN
401 return Ptr<BackendNode>();
404 #ifdef HAVE_INF_ENGINE
405 virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
407 InferenceEngine::Builder::PermuteLayer ieLayer(name);
408 ieLayer.setOrder(_order);
409 return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
411 #endif // HAVE_INF_ENGINE
414 std::vector<size_t> _order;
416 std::vector<int> _oldDimensionSize;
417 std::vector<int> _newDimensionSize;
419 std::vector<size_t> _oldStride;
420 std::vector<size_t> _newStride;
424 UMat uorder, uold_stride, unew_stride;
430 Ptr<PermuteLayer> PermuteLayer::create(const LayerParams ¶ms)
432 return Ptr<PermuteLayer>(new PermuteLayerImpl(params));