Merge remote-tracking branch 'upstream/3.4' into merge-3.4
[platform/upstream/opencv.git] / modules / dnn / src / layers / slice_layer.cpp
1 /*M///////////////////////////////////////////////////////////////////////////////////////
2 //
3 //  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4 //
5 //  By downloading, copying, installing or using the software you agree to this license.
6 //  If you do not agree to this license, do not download, install,
7 //  copy or use the software.
8 //
9 //
10 //                           License Agreement
11 //                For Open Source Computer Vision Library
12 //
13 // Copyright (C) 2013, OpenCV Foundation, all rights reserved.
14 // Copyright (C) 2017, Intel Corporation, all rights reserved.
15 // Third party copyrights are property of their respective owners.
16 //
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
19 //
20 //   * Redistribution's of source code must retain the above copyright notice,
21 //     this list of conditions and the following disclaimer.
22 //
23 //   * Redistribution's in binary form must reproduce the above copyright notice,
24 //     this list of conditions and the following disclaimer in the documentation
25 //     and/or other materials provided with the distribution.
26 //
27 //   * The name of the copyright holders may not be used to endorse or promote products
28 //     derived from this software without specific prior written permission.
29 //
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
40 //
41 //M*/
42
43 #include "../precomp.hpp"
44 #include "../op_cuda.hpp"
45 #include "../op_inf_engine.hpp"
46 #include "../ie_ngraph.hpp"
47
48 #include "layers_common.hpp"
49 #include <opencv2/dnn/shape_utils.hpp>
50
51 #include <opencv2/core/utils/logger.hpp>
52
53 #ifdef HAVE_OPENCL
54 #include "opencl_kernels_dnn.hpp"
55 #endif
56
57 #ifdef HAVE_CUDA
58 #include "../cuda4dnn/primitives/slice.hpp"
59 using namespace cv::dnn::cuda4dnn;
60 #endif
61
62 namespace cv
63 {
64 namespace dnn
65 {
66
67 class SliceLayerImpl : public SliceLayer
68 {
69 public:
70     SliceLayerImpl(const LayerParams& params)
71     {
72         setParamsFrom(params);
73         axis = params.get<int>("axis", 1);
74         num_split = params.get<int>("num_split", 0);
75         hasDynamicShapes = params.get<bool>("has_dynamic_shapes", false);
76         shapesInitialized = !hasDynamicShapes;
77         if (params.has("slice_point"))
78         {
79             CV_Assert(!params.has("begin") && !params.has("size") && !params.has("end"));
80             const DictValue &indicesValue = params.get("slice_point");
81             sliceRanges.resize(indicesValue.size() + 1,
82                                std::vector<Range>(axis + 1, Range::all()));
83             int prevSlice = 0;
84             for (int i = 0; i < indicesValue.size(); ++i)
85             {
86                 sliceRanges[i][axis].start = prevSlice;
87                 sliceRanges[i][axis].end = indicesValue.get<int>(i);
88                 prevSlice = sliceRanges[i][axis].end;
89             }
90             sliceRanges.back()[axis].start = prevSlice;
91         }
92         else if (params.has("begin"))
93         {
94             CV_Assert(params.has("size") ^ params.has("end"));
95             const DictValue &begins = params.get("begin");
96             const DictValue &sizesOrEnds = params.has("size") ? params.get("size") : params.get("end");
97             CV_Assert(begins.size() == sizesOrEnds.size());
98
99             sliceRanges.resize(1);
100             sliceRanges[0].resize(begins.size(), Range::all());
101             for (int i = 0; i < begins.size(); ++i)
102             {
103                 int start = begins.get<int>(i);
104                 int sizeOrEnd = sizesOrEnds.get<int>(i);  // It may be negative to reverse indexation.
105                 CV_Assert(start >= 0);
106
107                 sliceRanges[0][i].start = start;
108                 if (params.has("size"))
109                 {
110                     int size = sizeOrEnd;
111                     CV_Assert(size == -1 || size > 0);  // -1 value means range [start, axis_size).
112                     sliceRanges[0][i].end = size > 0 ? (start + size) : -1;  // We'll finalize a negative value later.
113                 }
114                 else
115                 {
116                     int end = sizeOrEnd;
117                     CV_Assert(end < 0 || end > start);  // End index is excluded.
118                     sliceRanges[0][i].end = end;  // We'll finalize a negative value later.
119                 }
120             }
121         }
122     }
123
124     virtual bool supportBackend(int backendId) CV_OVERRIDE
125     {
126 #ifdef HAVE_DNN_IE_NN_BUILDER_2019
127         if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
128             return INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) &&
129                 sliceRanges.size() == 1 && sliceRanges[0].size() == 4;
130 #endif
131 #ifdef HAVE_DNN_NGRAPH
132         if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
133             return sliceRanges.size() == 1;
134 #endif
135         return backendId == DNN_BACKEND_OPENCV ||
136                backendId == DNN_BACKEND_CUDA;
137     }
138
139     bool getMemoryShapes(const std::vector<MatShape> &inputs,
140                             const int requiredOutputs,
141                             std::vector<MatShape> &outputs,
142                             std::vector<MatShape> &internals) const CV_OVERRIDE
143     {
144         CV_Assert(inputs.size() == 1);
145         MatShape inpShape = inputs[0];
146
147         if (!sliceRanges.empty())
148         {
149             outputs.resize(sliceRanges.size(), inpShape);
150             for (int i = 0; i < outputs.size(); ++i)
151             {
152                 CV_Assert(sliceRanges[i].size() <= inpShape.size());
153                 for (int j = 0; j < sliceRanges[i].size(); ++j)
154                 {
155                     if (shapesInitialized || inpShape[j] > 0)
156                         outputs[i][j] = clamp(sliceRanges[i][j], inpShape[j]).size();
157                 }
158             }
159         }
160         else  // Divide input blob on equal parts by axis.
161         {
162             CV_Assert(0 <= axis && axis < inpShape.size());
163             int splits = num_split ? num_split : requiredOutputs;
164             CV_Assert(splits > 0 && inpShape[axis] % splits == 0);
165             inpShape[axis] /= splits;
166             outputs.resize(splits, inpShape);
167         }
168         return false;
169     }
170
171     bool updateMemoryShapes(const std::vector<MatShape> &inputs) CV_OVERRIDE
172     {
173         shapesInitialized = true;
174         return true;
175     }
176
177     void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr) CV_OVERRIDE
178     {
179 #ifdef HAVE_OPENCL
180         ocl_exec_cache.clear();
181 #endif
182
183         std::vector<Mat> inputs, outputs;
184         inputs_arr.getMatVector(inputs);
185         outputs_arr.getMatVector(outputs);
186
187         CV_Assert(inputs.size() == 1);
188         const MatSize& inpShape = inputs[0].size;
189
190         finalSliceRanges = sliceRanges;
191         if (sliceRanges.empty())
192         {
193             // Divide input blob on equal parts by axis.
194             int outAxisSize = inpShape[axis] / outputs.size();
195             finalSliceRanges.resize(outputs.size(),
196                                     std::vector<Range>(axis + 1, Range::all()));
197             int prevSlice = 0;
198             for (int i = 0; i < outputs.size(); ++i)
199             {
200                 finalSliceRanges[i][axis].start = prevSlice;
201                 finalSliceRanges[i][axis].end = finalSliceRanges[i][axis].start + outAxisSize;
202                 prevSlice = finalSliceRanges[i][axis].end;
203             }
204         }
205         else
206             CV_Assert(outputs.size() == sliceRanges.size());
207
208         for (int i = 0; i < outputs.size(); ++i)
209         {
210             CV_Assert(finalSliceRanges[i].size() <= inpShape.dims());
211             // Fill the rest of ranges.
212             for (int j = finalSliceRanges[i].size(); j < inpShape.dims(); ++j)
213             {
214                 finalSliceRanges[i].push_back(Range::all());
215             }
216             // Clamp.
217             for (int j = 0; j < finalSliceRanges[i].size(); ++j)
218             {
219                 finalSliceRanges[i][j] = clamp(finalSliceRanges[i][j], inpShape[j]);
220             }
221         }
222
223 #if 0
224         std::cout << "DEBUG: DNN/Slice: " << outputs.size() << " inpShape=" << inpShape << std::endl;
225         for (int i = 0; i < outputs.size(); ++i)
226         {
227             for (int j = 0; j < finalSliceRanges[i].size(); ++j)
228             {
229                 std::cout << finalSliceRanges[i][j];
230             }
231             std::cout << std::endl;
232         }
233 #endif
234     }
235
236 #ifdef HAVE_OPENCL
237     struct OpenCLExecInfo
238     {
239         std::string kernel_name;
240         std::string build_opts;
241         size_t local_size[2];
242         size_t global_size[2];
243
244         OpenCLExecInfo()
245         {
246             local_size[0] = local_size[1] = 0;
247             global_size[0] = global_size[1] = 0;
248         }
249     };
250     std::vector<OpenCLExecInfo> ocl_exec_cache;
251
252     void ocl_prepare(const std::vector<UMat>& inputs, const std::vector<UMat>& outputs)
253     {
254         CV_TRACE_FUNCTION();
255
256         CV_Assert(outputs.size() == finalSliceRanges.size());
257         ocl_exec_cache.resize(outputs.size());
258
259         const UMat& input = inputs[0];
260         const int dims = input.dims;
261
262         size_t WSZ = 128;
263
264         const int elemSize = (int)input.elemSize();
265         String opts0 = cv::format(
266                 "-DDIMS=%d -DELEMSIZE=%d",
267                 dims, elemSize
268             );
269         for (int d = 0; d < dims; d++)
270         {
271             opts0 += cv::format(" -DSRC_STEP_%d=%d", d, (int)input.step[dims - 1 - d]);
272         }
273         for (size_t i = 0; i < outputs.size(); i++)
274         {
275             OpenCLExecInfo& ocl = ocl_exec_cache[i];
276
277             const UMat& output = outputs[i];
278             const std::vector<Range>& range = finalSliceRanges[i];
279
280             String opts = opts0;
281
282             CV_CheckEQ(output.dims, dims, "");
283             for (int d = 0; d < dims; d++)
284             {
285                 opts += cv::format(" -DDST_STEP_%d=%d -DDST_SZ_%d=%d -DSRC_START_%d=%d",
286                         d, (int)output.step[dims - 1 - d],
287                         d, (int)output.size[dims - 1 - d],
288                         d, (int)range[dims - 1 - d].start
289                     );
290                 CV_CheckEQ(range[d].size(), (int)output.size[d], "");
291             }
292
293             const size_t param_LIMIT_BLOCK_SIZE_PER_WG = WSZ * 64;
294
295             int block_dims = 0;
296             size_t block_size = elemSize;
297             for (int i = dims - 1; i >= 0; --i)
298             {
299                 if (input.step[i] != output.step[i])
300                     break;
301                 block_size *= output.size[i];
302                 block_dims++;
303                 if (block_size >= param_LIMIT_BLOCK_SIZE_PER_WG)
304                     break;
305             }
306
307             const size_t total = output.total() * elemSize;
308             size_t num_blocks = total / block_size;
309
310             if ((num_blocks <= 8 && block_size >= WSZ * 4) || (block_size >= param_LIMIT_BLOCK_SIZE_PER_WG))
311             {
312                 // use 1D copy mode
313                 opts += cv::format(" -DUSE_COPY_1D=1");
314
315                 opts += cv::format(" -DBLOCK_DIMS=%d", block_dims);
316                 opts += cv::format(" -DBLOCK_DIMS_CONTIGUOUS=%d", block_dims);
317                 opts += cv::format(" -DBLOCK_SIZE=%d", (int)block_size);
318
319                 opts += cv::format(" -DBLOCK_COLS=%d", (int)block_size);
320             }
321             else
322             {
323                 // use 2D copy mode
324                 int block_cols = block_size;
325                 int block_dims_contiguous = block_dims;
326                 size_t input_base_step = input.step[dims - 1 - block_dims_contiguous];
327                 size_t output_base_step = output.step[dims - 1 - block_dims_contiguous];
328
329                 size_t block_rows = 1;
330                 for (int i = dims - 1 - block_dims_contiguous; i >= 0; --i)
331                 {
332                     if (input.step[i] * output_base_step != output.step[i] * input_base_step)
333                         break;
334                     block_rows *= output.size[i];
335                     block_dims++;
336                 }
337
338                 block_size *= block_rows;
339
340                 num_blocks = total / block_size;
341
342                 if (block_rows > 1)
343                 {
344                     opts += cv::format(" -DBLOCK_DIMS=%d", block_dims);
345                     opts += cv::format(" -DBLOCK_DIMS_CONTIGUOUS=%d", block_dims_contiguous);
346                     opts += cv::format(" -DBLOCK_SIZE=%d", (int)block_size);
347
348                     opts += cv::format(" -DBLOCK_COLS=%d", (int)block_cols);
349
350                     opts += cv::format(" -DBLOCK_ROWS=%d", (int)block_rows);
351                     opts += cv::format(" -DBLOCK_SRC_STRIDE=%d", (int)input_base_step);
352                 }
353                 else
354                 {
355                     // use 1D copy mode
356                     opts += cv::format(" -DUSE_COPY_1D=1");
357
358                     opts += cv::format(" -DBLOCK_DIMS=%d", block_dims_contiguous);
359                     opts += cv::format(" -DBLOCK_DIMS_CONTIGUOUS=%d", block_dims_contiguous);
360                     opts += cv::format(" -DBLOCK_SIZE=%d", (int)block_size);
361
362                     opts += cv::format(" -DBLOCK_COLS=%d", (int)block_size);
363                 }
364             }
365
366             const size_t MIN_WORK_ITEMS = 16;
367             if (block_size <= 4 * MIN_WORK_ITEMS)
368                 WSZ = 4;
369             else if (block_size <= 8 * MIN_WORK_ITEMS)
370                 WSZ = 8;
371             else if (block_size <= 16 * MIN_WORK_ITEMS)
372                 WSZ = 16;
373             else if (block_size <= 32 * MIN_WORK_ITEMS)
374                 WSZ = 32;
375             else if (block_size <= 64 * MIN_WORK_ITEMS)
376                 WSZ = 64;
377
378             opts += cv::format(" -DWSZ=%d", (int)WSZ);
379
380             std::ostringstream kernel_suffix;
381             kernel_suffix << dims << 'x' << elemSize << "_bsz" << block_size;
382             kernel_suffix << "__src_";
383             for (int d = 0; d < dims; d++)
384             {
385                 kernel_suffix << input.size[dims - 1 - d] << '_';
386             }
387             kernel_suffix << '_';
388             /*for (int d = 0; d < dims; d++)
389             {
390                 kernel_suffix << input.step[dims - 1 - d] << '_';
391             }
392             kernel_suffix << '_';*/
393
394             kernel_suffix << "dst_";
395             for (int d = 0; d < dims; d++)
396             {
397                 kernel_suffix << output.size[dims - 1 - d] << '_';
398             }
399             /*kernel_suffix << '_';
400             for (int d = 0; d < dims; d++)
401             {
402                 kernel_suffix << output.step[dims - 1 - d] << '_';
403             }*/
404             kernel_suffix << "_slice_";
405             for (int d = 0; d < dims; d++)
406             {
407                 kernel_suffix << range[dims - 1 - d].start << '_';
408             }
409             for (int d = 0; d < dims; d++)
410             {
411                 kernel_suffix << '_' << range[dims - 1 - d].end;
412             }
413
414             std::string kernel_suffix_str = kernel_suffix.str();
415             opts += cv::format(" -DSLICE_KERNEL_SUFFIX=%s", kernel_suffix_str.c_str());
416
417             ocl.kernel_name = cv::format("slice_%s", kernel_suffix_str.c_str());
418             ocl.build_opts = opts;
419             ocl.local_size[0] = WSZ;
420             ocl.local_size[1] = 1;
421             ocl.global_size[0] = WSZ;
422             ocl.global_size[1] = num_blocks;
423         }  // for outputs.size()
424     }  // ocl_prepare
425
426     bool forward_ocl(InputArrayOfArrays inputs_, OutputArrayOfArrays outputs_, OutputArrayOfArrays internals_)
427     {
428         CV_TRACE_FUNCTION();
429
430         std::vector<UMat> inputs;
431         std::vector<UMat> outputs;
432
433         inputs_.getUMatVector(inputs);
434         outputs_.getUMatVector(outputs);
435
436         CV_Assert(outputs.size() == finalSliceRanges.size());
437
438         const UMat& input = inputs[0];
439         const int dims = input.dims;
440         if (dims > 5)
441         {
442             CV_LOG_INFO(NULL, "DNN/OpenCL/Slice: implementation doesn't support dims=" << dims << ". Fallback to CPU");
443             return false;
444         }
445
446         if (ocl_exec_cache.empty())
447         {
448             ocl_prepare(inputs, outputs);
449         }
450         CV_CheckEQ(ocl_exec_cache.size(), outputs.size(), "");
451
452         for (size_t i = 0; i < outputs.size(); i++)
453         {
454             const OpenCLExecInfo& ocl = ocl_exec_cache[i];
455
456             UMat& output = outputs[i];
457
458             ocl::Kernel kernel(ocl.kernel_name.c_str(), ocl::dnn::slice_oclsrc, ocl.build_opts);
459             if (kernel.empty())
460                 return false;
461             bool ret = kernel.args(
462                     ocl::KernelArg::PtrReadOnly(input),
463                     ocl::KernelArg::PtrWriteOnly(output)
464                 )
465                 .run(2, (size_t*)ocl.global_size, (size_t*)ocl.local_size, false);
466             if (!ret)
467                 return false;
468         }  // for outputs.size()
469
470         return true;
471     }  // forward_ocl
472 #endif
473
474     void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
475     {
476         CV_TRACE_FUNCTION();
477         CV_TRACE_ARG_VALUE(name, "name", name.c_str());
478
479         CV_OCL_RUN(IS_DNN_OPENCL_TARGET(preferableTarget),
480                    forward_ocl(inputs_arr, outputs_arr, internals_arr))
481
482         std::vector<Mat> inputs, outputs;
483         inputs_arr.getMatVector(inputs);
484         outputs_arr.getMatVector(outputs);
485
486         const Mat& inpMat = inputs[0];
487         CV_Assert(outputs.size() == finalSliceRanges.size());
488         for (size_t i = 0; i < outputs.size(); i++)
489         {
490             inpMat(finalSliceRanges[i]).copyTo(outputs[i]);
491         }
492     }
493
494
495 #ifdef HAVE_DNN_IE_NN_BUILDER_2019
496 #if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1)
497     virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
498     {
499         CV_Assert_N(finalSliceRanges.size() == 1, inputs.size() <= 2);
500
501         std::vector<size_t> axes, offsets, dims;
502         int from, to, step;
503         int numDims = finalSliceRanges[0].size();
504         if (preferableTarget == DNN_TARGET_MYRIAD || preferableTarget == DNN_TARGET_HDDL)
505         {
506             from = axis;
507             to = numDims;
508             step = 1;
509         }
510         else
511         {
512             from = numDims - 1;
513             to = axis - 1;
514             step = -1;
515         }
516         for (int i = from; i != to; i += step)
517         {
518             axes.push_back(i);
519             offsets.push_back(finalSliceRanges[0][i].start);
520             dims.push_back(finalSliceRanges[0][i].size());
521         }
522
523         InferenceEngine::Builder::Layer ieLayer(name);
524         ieLayer.setName(name);
525         ieLayer.setType("Crop");
526         ieLayer.getParameters()["axis"] = axes;
527         ieLayer.getParameters()["dim"] = dims;
528         ieLayer.getParameters()["offset"] = offsets;
529         ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(2));
530         ieLayer.setOutputPorts(std::vector<InferenceEngine::Port>(1));
531
532         if (inputs.size() != 2)
533         {
534             std::vector<size_t> outShape(numDims);
535             for (int i = 0; i < numDims; ++i)
536                 outShape[i] = finalSliceRanges[0][i].size();
537
538             ieLayer.getInputPorts()[1].setParameter("type", "weights");
539
540             auto shapeSource = InferenceEngine::make_shared_blob<float>({
541                                    InferenceEngine::Precision::FP32, outShape,
542                                    InferenceEngine::Layout::ANY
543                                });
544             shapeSource->allocate();
545             addConstantData("weights", shapeSource, ieLayer);
546         }
547         return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
548     }
549 #endif
550 #endif
551
552
553 #ifdef HAVE_DNN_NGRAPH
554     virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
555                                         const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
556     {
557         CV_Assert_N(nodes.size() <= 2);
558         auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
559         CV_Assert(finalSliceRanges[0].size() == ieInpNode->get_shape().size());
560
561         std::vector<int64_t> offsets, dims;
562         for (int i = 0; i < finalSliceRanges[0].size(); ++i)
563         {
564             offsets.push_back(finalSliceRanges[0][i].start);
565             dims.push_back(finalSliceRanges[0][i].end);
566         }
567
568         auto lower_bounds = std::make_shared<ngraph::op::Constant>(ngraph::element::i64,
569                                              ngraph::Shape{offsets.size()}, offsets.data());
570         auto upper_bounds = std::make_shared<ngraph::op::Constant>(ngraph::element::i64,
571                                              ngraph::Shape{dims.size()}, dims.data());
572         auto strides = std::make_shared<ngraph::op::Constant>(ngraph::element::i64,
573                                         ngraph::Shape{dims.size()}, std::vector<int64_t>((int64_t)dims.size(), 1));
574
575         auto slice = std::make_shared<ngraph::op::v1::StridedSlice>(ieInpNode,
576                                       lower_bounds, upper_bounds, strides, std::vector<int64_t>{}, std::vector<int64_t>{});
577
578         return Ptr<BackendNode>(new InfEngineNgraphNode(slice));
579     }
580 #endif  // HAVE_DNN_NGRAPH
581
582
583 #ifdef HAVE_CUDA
584     Ptr<BackendNode> initCUDA(
585         void *context_,
586         const std::vector<Ptr<BackendWrapper>>& inputs,
587         const std::vector<Ptr<BackendWrapper>>& outputs
588     ) override
589     {
590         auto context = reinterpret_cast<csl::CSLContext*>(context_);
591
592         std::vector<std::vector<std::size_t>> offsets;
593         for (const auto& ranges : finalSliceRanges)
594         {
595             std::vector<std::size_t> offsets_i;
596             for (const auto& range : ranges)
597                 offsets_i.push_back(range.start);
598             offsets.push_back(std::move(offsets_i));
599         }
600
601         return make_cuda_node<cuda4dnn::SliceOp>(preferableTarget, std::move(context->stream), std::move(offsets));
602     }
603 #endif
604
605
606 protected:
607     // The actual non-negative values determined from @p sliceRanges depends on input size.
608     std::vector<std::vector<Range> > finalSliceRanges;
609     bool hasDynamicShapes;
610     bool shapesInitialized;
611 };
612
613 class CropLayerImpl CV_FINAL : public SliceLayerImpl
614 {
615 public:
616     CropLayerImpl(const LayerParams& params) : SliceLayerImpl(LayerParams())
617     {
618         setParamsFrom(params);
619         axis = params.get<int>("axis", 2);
620         const DictValue *paramOffset = params.ptr("offset");
621
622         if (paramOffset)
623         {
624             for (int i = 0; i < paramOffset->size(); i++)
625                 offset.push_back(paramOffset->get<int>(i));
626         }
627     }
628
629     bool getMemoryShapes(const std::vector<MatShape> &inputs,
630                          const int requiredOutputs,
631                          std::vector<MatShape> &outputs,
632                          std::vector<MatShape> &internals) const CV_OVERRIDE
633     {
634         CV_Assert(inputs.size() == 2);
635
636         MatShape dstShape = inputs[0];
637         int start = clamp(axis, dstShape);
638         for (int i = start; i < dstShape.size(); i++)
639         {
640             dstShape[i] = inputs[1][i];
641         }
642         outputs.resize(1, dstShape);
643         return false;
644     }
645
646     void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays) CV_OVERRIDE
647     {
648         std::vector<Mat> inputs;
649         inputs_arr.getMatVector(inputs);
650         CV_Assert(2 == inputs.size());
651
652         const Mat &inpBlob = inputs[0];
653         const Mat &inpSzBlob = inputs[1];
654
655         int dims = inpBlob.dims;
656         int start_axis = clamp(axis, dims);
657
658         std::vector<int> offset_final(dims, 0);
659         if (offset.size() == 1)
660         {
661             for (int i = start_axis; i < dims; i++)
662                 offset_final[i] = offset[0];
663         }
664         else if (offset.size() > 1)
665         {
666             if ((int)offset.size() != dims - start_axis)
667                 CV_Error(Error::StsBadArg, "number of offset values specified must be "
668                                            "equal to the number of dimensions following axis.");
669
670             for (int i = start_axis; i < dims; i++)
671                 offset_final[i] = offset[i - start_axis];
672         }
673
674         finalSliceRanges.resize(1);
675         finalSliceRanges[0].resize(dims);
676         for (int i = 0; i < start_axis; i++)
677         {
678             finalSliceRanges[0][i] = Range(0, inpBlob.size[i]);
679         }
680         for (int i = start_axis; i < dims; i++)
681         {
682             if (offset_final[i] < 0 || offset_final[i] + inpSzBlob.size[i] > inpBlob.size[i])
683                 CV_Error(Error::StsBadArg, "invalid crop parameters or blob sizes");
684
685             finalSliceRanges[0][i] = Range(offset_final[i], offset_final[i] + inpSzBlob.size[i]);
686         }
687     }
688
689 private:
690     std::vector<int> offset;
691 };
692
693 Ptr<SliceLayer> SliceLayer::create(const LayerParams& params)
694 {
695     return Ptr<SliceLayer>(new SliceLayerImpl(params));
696 }
697
698 Ptr<Layer> CropLayer::create(const LayerParams& params)
699 {
700     return Ptr<Layer>(new CropLayerImpl(params));
701 }
702
703 }
704 }