1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
11 // For Open Source Computer Vision Library
13 // Copyright (C) 2013, OpenCV Foundation, all rights reserved.
14 // Third party copyrights are property of their respective owners.
16 // Redistribution and use in source and binary forms, with or without modification,
17 // are permitted provided that the following conditions are met:
19 // * Redistribution's of source code must retain the above copyright notice,
20 // this list of conditions and the following disclaimer.
22 // * Redistribution's in binary form must reproduce the above copyright notice,
23 // this list of conditions and the following disclaimer in the documentation
24 // and/or other materials provided with the distribution.
26 // * The name of the copyright holders may not be used to endorse or promote products
27 // derived from this software without specific prior written permission.
29 // This software is provided by the copyright holders and contributors "as is" and
30 // any express or implied warranties, including, but not limited to, the implied
31 // warranties of merchantability and fitness for a particular purpose are disclaimed.
32 // In no event shall the Intel Corporation or contributors be liable for any direct,
33 // indirect, incidental, special, exemplary, or consequential damages
34 // (including, but not limited to, procurement of substitute goods or services;
35 // loss of use, data, or profits; or business interruption) however caused
36 // and on any theory of liability, whether in contract, strict liability,
37 // or tort (including negligence or otherwise) arising in any way out of
38 // the use of this software, even if advised of the possibility of such damage.
42 #ifndef OPENCV_DNN_DNN_ALL_LAYERS_HPP
43 #define OPENCV_DNN_DNN_ALL_LAYERS_HPP
44 #include <opencv2/dnn.hpp>
48 CV__DNN_INLINE_NS_BEGIN
52 /** @defgroup dnnLayerList Partial List of Implemented Layers
54 This subsection of dnn module contains information about built-in layers and their descriptions.
56 Classes listed here, in fact, provides C++ API for creating instances of built-in layers.
57 In addition to this way of layers instantiation, there is a more common factory API (see @ref dnnLayerFactory), it allows to create layers dynamically (by name) and register new ones.
58 You can use both API, but factory API is less convenient for native C++ programming and basically designed for use inside importers (see @ref readNetFromCaffe(), @ref readNetFromTorch(), @ref readNetFromTensorflow()).
60 Built-in layers partially reproduce functionality of corresponding Caffe and Torch7 layers.
61 In particular, the following layers and Caffe importer were tested to reproduce <a href="http://caffe.berkeleyvision.org/tutorial/layers.html">Caffe</a> functionality:
66 - TanH, ReLU, Sigmoid, BNLL, Power, AbsVal
68 - Reshape, Flatten, Slice, Split
71 - Dropout (since it does nothing on forward pass -))
74 class CV_EXPORTS BlankLayer : public Layer
77 static Ptr<Layer> create(const LayerParams ¶ms);
81 * Constant layer produces the same data blob at an every forward pass.
83 class CV_EXPORTS ConstLayer : public Layer
86 static Ptr<Layer> create(const LayerParams ¶ms);
89 //! LSTM recurrent layer
90 class CV_EXPORTS LSTMLayer : public Layer
93 /** Creates instance of LSTM layer */
94 static Ptr<LSTMLayer> create(const LayerParams& params);
96 /** @deprecated Use LayerParams::blobs instead.
97 @brief Set trained weights for LSTM layer.
99 LSTM behavior on each step is defined by current input, previous output, previous cell state and learned weights.
101 Let @f$x_t@f$ be current input, @f$h_t@f$ be current output, @f$c_t@f$ be current state.
102 Than current output and current cell state is computed as follows:
104 h_t &= o_t \odot tanh(c_t), \\
105 c_t &= f_t \odot c_{t-1} + i_t \odot g_t, \\
107 where @f$\odot@f$ is per-element multiply operation and @f$i_t, f_t, o_t, g_t@f$ is internal gates that are computed using learned weights.
109 Gates are computed as follows:
111 i_t &= sigmoid&(W_{xi} x_t + W_{hi} h_{t-1} + b_i), \\
112 f_t &= sigmoid&(W_{xf} x_t + W_{hf} h_{t-1} + b_f), \\
113 o_t &= sigmoid&(W_{xo} x_t + W_{ho} h_{t-1} + b_o), \\
114 g_t &= tanh &(W_{xg} x_t + W_{hg} h_{t-1} + b_g), \\
116 where @f$W_{x?}@f$, @f$W_{h?}@f$ and @f$b_{?}@f$ are learned weights represented as matrices:
117 @f$W_{x?} \in R^{N_h \times N_x}@f$, @f$W_{h?} \in R^{N_h \times N_h}@f$, @f$b_? \in R^{N_h}@f$.
119 For simplicity and performance purposes we use @f$ W_x = [W_{xi}; W_{xf}; W_{xo}, W_{xg}] @f$
120 (i.e. @f$W_x@f$ is vertical concatenation of @f$ W_{x?} @f$), @f$ W_x \in R^{4N_h \times N_x} @f$.
121 The same for @f$ W_h = [W_{hi}; W_{hf}; W_{ho}, W_{hg}], W_h \in R^{4N_h \times N_h} @f$
122 and for @f$ b = [b_i; b_f, b_o, b_g]@f$, @f$b \in R^{4N_h} @f$.
124 @param Wh is matrix defining how previous output is transformed to internal gates (i.e. according to above mentioned notation is @f$ W_h @f$)
125 @param Wx is matrix defining how current input is transformed to internal gates (i.e. according to above mentioned notation is @f$ W_x @f$)
126 @param b is bias vector (i.e. according to above mentioned notation is @f$ b @f$)
128 CV_DEPRECATED virtual void setWeights(const Mat &Wh, const Mat &Wx, const Mat &b) = 0;
130 /** @brief Specifies shape of output blob which will be [[`T`], `N`] + @p outTailShape.
131 * @details If this parameter is empty or unset then @p outTailShape = [`Wh`.size(0)] will be used,
132 * where `Wh` is parameter from setWeights().
134 virtual void setOutShape(const MatShape &outTailShape = MatShape()) = 0;
136 /** @deprecated Use flag `produce_cell_output` in LayerParams.
137 * @brief Specifies either interpret first dimension of input blob as timestamp dimension either as sample.
139 * If flag is set to true then shape of input blob will be interpreted as [`T`, `N`, `[data dims]`] where `T` specifies number of timestamps, `N` is number of independent streams.
140 * In this case each forward() call will iterate through `T` timestamps and update layer's state `T` times.
142 * If flag is set to false then shape of input blob will be interpreted as [`N`, `[data dims]`].
143 * In this case each forward() call will make one iteration and produce one timestamp with shape [`N`, `[out dims]`].
145 CV_DEPRECATED virtual void setUseTimstampsDim(bool use = true) = 0;
147 /** @deprecated Use flag `use_timestamp_dim` in LayerParams.
148 * @brief If this flag is set to true then layer will produce @f$ c_t @f$ as second output.
149 * @details Shape of the second output is the same as first output.
151 CV_DEPRECATED virtual void setProduceCellOutput(bool produce = false) = 0;
153 /* In common case it use single input with @f$x_t@f$ values to compute output(s) @f$h_t@f$ (and @f$c_t@f$).
154 * @param input should contain packed values @f$x_t@f$
155 * @param output contains computed outputs: @f$h_t@f$ (and @f$c_t@f$ if setProduceCellOutput() flag was set to true).
157 * If setUseTimstampsDim() is set to true then @p input[0] should has at least two dimensions with the following shape: [`T`, `N`, `[data dims]`],
158 * where `T` specifies number of timestamps, `N` is number of independent streams (i.e. @f$ x_{t_0 + t}^{stream} @f$ is stored inside @p input[0][t, stream, ...]).
160 * If setUseTimstampsDim() is set to false then @p input[0] should contain single timestamp, its shape should has form [`N`, `[data dims]`] with at least one dimension.
161 * (i.e. @f$ x_{t}^{stream} @f$ is stored inside @p input[0][stream, ...]).
164 int inputNameToIndex(String inputName) CV_OVERRIDE;
165 int outputNameToIndex(const String& outputName) CV_OVERRIDE;
168 /** @brief GRU recurrent one-layer
170 * Accepts input sequence and computes the final hidden state for each element in the batch.
172 * - input[0] containing the features of the input sequence.
173 * input[0] should have shape [`T`, `N`, `data_dims`] where `T` is sequence length, `N` is batch size, `data_dims` is input size
174 * - output would have shape [`T`, `N`, `D` * `hidden_size`] where `D = 2` if layer is bidirectional otherwise `D = 1`
176 * Depends on the following attributes:
177 * - hidden_size - Number of neurons in the hidden layer
178 * - direction - RNN could be bidirectional or forward
180 * The final hidden state @f$ h_t @f$ computes by the following formulas:
183 r_t = \sigma(W_{ir} x_t + b_{ir} + W_{hr} h_{(t-1)} + b_{hr}) \\
184 z_t = \sigma(W_{iz} x_t + b_{iz} + W_{hz} h_{(t-1)} + b_{hz}) \\
185 n_t = \tanh(W_{in} x_t + b_{in} + r_t \odot (W_{hn} h_{(t-1)}+ b_{hn})) \\
186 h_t = (1 - z_t) \odot n_t + z_t \odot h_{(t-1)} \\
188 * Where @f$x_t@f$ is current input, @f$h_{(t-1)}@f$ is previous or initial hidden state.
190 * @f$W_{x?}@f$, @f$W_{h?}@f$ and @f$b_{?}@f$ are learned weights represented as matrices:
191 * @f$W_{x?} \in R^{N_h \times N_x}@f$, @f$W_{h?} \in R^{N_h \times N_h}@f$, @f$b_? \in R^{N_h}@f$.
193 * @f$\odot@f$ is per-element multiply operation.
195 class CV_EXPORTS GRULayer : public Layer
198 /** Creates instance of GRU layer */
199 static Ptr<GRULayer> create(const LayerParams& params);
202 /** @brief Classical recurrent layer
204 Accepts two inputs @f$x_t@f$ and @f$h_{t-1}@f$ and compute two outputs @f$o_t@f$ and @f$h_t@f$.
206 - input: should contain packed input @f$x_t@f$.
207 - output: should contain output @f$o_t@f$ (and @f$h_t@f$ if setProduceHiddenOutput() is set to true).
209 input[0] should have shape [`T`, `N`, `data_dims`] where `T` and `N` is number of timestamps and number of independent samples of @f$x_t@f$ respectively.
211 output[0] will have shape [`T`, `N`, @f$N_o@f$], where @f$N_o@f$ is number of rows in @f$ W_{xo} @f$ matrix.
213 If setProduceHiddenOutput() is set to true then @p output[1] will contain a Mat with shape [`T`, `N`, @f$N_h@f$], where @f$N_h@f$ is number of rows in @f$ W_{hh} @f$ matrix.
215 class CV_EXPORTS RNNLayer : public Layer
218 /** Creates instance of RNNLayer */
219 static Ptr<RNNLayer> create(const LayerParams& params);
221 /** Setups learned weights.
223 Recurrent-layer behavior on each step is defined by current input @f$ x_t @f$, previous state @f$ h_t @f$ and learned weights as follows:
225 h_t &= tanh&(W_{hh} h_{t-1} + W_{xh} x_t + b_h), \\
226 o_t &= tanh&(W_{ho} h_t + b_o),
229 @param Wxh is @f$ W_{xh} @f$ matrix
230 @param bh is @f$ b_{h} @f$ vector
231 @param Whh is @f$ W_{hh} @f$ matrix
232 @param Who is @f$ W_{xo} @f$ matrix
233 @param bo is @f$ b_{o} @f$ vector
235 virtual void setWeights(const Mat &Wxh, const Mat &bh, const Mat &Whh, const Mat &Who, const Mat &bo) = 0;
237 /** @brief If this flag is set to true then layer will produce @f$ h_t @f$ as second output.
238 * @details Shape of the second output is the same as first output.
240 virtual void setProduceHiddenOutput(bool produce = false) = 0;
244 class CV_EXPORTS BaseConvolutionLayer : public Layer
247 CV_DEPRECATED_EXTERNAL Size kernel, stride, pad, dilation, adjustPad;
248 std::vector<size_t> adjust_pads;
249 std::vector<size_t> kernel_size, strides, dilations;
250 std::vector<size_t> pads_begin, pads_end;
255 class CV_EXPORTS ConvolutionLayer : public BaseConvolutionLayer
258 static Ptr<BaseConvolutionLayer> create(const LayerParams& params);
261 class CV_EXPORTS ConvolutionLayerInt8 : public BaseConvolutionLayer
264 int input_zp, output_zp;
265 float input_sc, output_sc;
267 // quantization type flag. The perChannel default is true, that means it contains the parameters
268 // of per-Channel quantization. Otherwise, that means this layer contains per-Tensor quantized parameters.
270 static Ptr<BaseConvolutionLayer> create(const LayerParams& params);
273 class CV_EXPORTS DeconvolutionLayer : public BaseConvolutionLayer
276 static Ptr<BaseConvolutionLayer> create(const LayerParams& params);
279 class CV_EXPORTS LRNLayer : public Layer
285 float alpha, beta, bias;
288 static Ptr<LRNLayer> create(const LayerParams& params);
292 /** @brief ArgMax/ArgMin layer
293 * @note returns indices as floats, which means the supported range is [-2^24; 2^24]
295 class CV_EXPORTS ArgLayer : public Layer
298 static Ptr<ArgLayer> create(const LayerParams& params);
301 class CV_EXPORTS PoolingLayer : public Layer
305 std::vector<size_t> kernel_size, strides;
306 std::vector<size_t> pads_begin, pads_end;
307 bool globalPooling; //!< Flag is true if at least one of the axes is global pooled.
308 std::vector<bool> isGlobalPooling;
312 // If true for average pooling with padding, divide an every output region
313 // by a whole kernel area. Otherwise exclude zero padded values and divide
314 // by number of real values.
315 bool avePoolPaddedArea;
316 // ROIPooling parameters.
319 // PSROIPooling parameters.
320 int psRoiOutChannels;
322 static Ptr<PoolingLayer> create(const LayerParams& params);
325 class CV_EXPORTS PoolingLayerInt8 : public PoolingLayer
328 int input_zp, output_zp;
329 float input_sc, output_sc;
330 static Ptr<PoolingLayerInt8> create(const LayerParams& params);
333 class CV_EXPORTS ReduceLayer : public Layer
337 std::vector<size_t> reduceDims;
338 static Ptr<ReduceLayer> create(const LayerParams& params);
341 class CV_EXPORTS ReduceLayerInt8 : public ReduceLayer
344 static Ptr<ReduceLayerInt8> create(const LayerParams& params);
347 class CV_EXPORTS SoftmaxLayer : public Layer
352 static Ptr<SoftmaxLayer> create(const LayerParams& params);
355 class CV_EXPORTS SoftmaxLayerInt8 : public SoftmaxLayer
360 static Ptr<SoftmaxLayerInt8> create(const LayerParams& params);
363 class CV_EXPORTS InnerProductLayer : public Layer
367 static Ptr<InnerProductLayer> create(const LayerParams& params);
370 class CV_EXPORTS InnerProductLayerInt8 : public InnerProductLayer
373 int input_zp, output_zp;
374 float input_sc, output_sc;
376 // quantization type flag. The perChannel default is true, that means it contains the parameters
377 // of per-Channel quantization. Otherwise, that means this layer contains per-Tensor quantized parameters.
379 static Ptr<InnerProductLayerInt8> create(const LayerParams& params);
382 class CV_EXPORTS MVNLayer : public Layer
386 bool normVariance, acrossChannels;
388 static Ptr<MVNLayer> create(const LayerParams& params);
393 class CV_EXPORTS ReshapeLayer : public Layer
396 MatShape newShapeDesc;
399 static Ptr<ReshapeLayer> create(const LayerParams& params);
402 class CV_EXPORTS FlattenLayer : public Layer
405 static Ptr<FlattenLayer> create(const LayerParams ¶ms);
408 class CV_EXPORTS QuantizeLayer : public Layer
413 static Ptr<QuantizeLayer> create(const LayerParams ¶ms);
416 class CV_EXPORTS DequantizeLayer : public Layer
421 static Ptr<DequantizeLayer> create(const LayerParams ¶ms);
424 class CV_EXPORTS RequantizeLayer : public Layer
428 static Ptr<RequantizeLayer> create(const LayerParams ¶ms);
431 class CV_EXPORTS ConcatLayer : public Layer
436 * @brief Add zero padding in case of concatenation of blobs with different
439 * Details: https://github.com/torch/nn/blob/master/doc/containers.md#depthconcat
444 static Ptr<ConcatLayer> create(const LayerParams ¶ms);
447 class CV_EXPORTS SplitLayer : public Layer
450 int outputsCount; //!< Number of copies that will be produced (is ignored when negative).
452 static Ptr<SplitLayer> create(const LayerParams ¶ms);
456 * Slice layer has several modes:
458 * @param[in] axis Axis of split operation
459 * @param[in] slice_point Array of split points
461 * Number of output blobs equals to number of split points plus one. The
462 * first blob is a slice on input from 0 to @p slice_point[0] - 1 by @p axis,
463 * the second output blob is a slice of input from @p slice_point[0] to
464 * @p slice_point[1] - 1 by @p axis and the last output blob is a slice of
465 * input from @p slice_point[-1] up to the end of @p axis size.
468 * @param begin Vector of start indices
469 * @param size Vector of sizes
471 * More convenient numpy-like slice. One and only output blob
472 * is a slice `input[begin[0]:begin[0]+size[0], begin[1]:begin[1]+size[1], ...]`
475 * @param axis Axis of split operation
477 * Split input blob on the equal parts by @p axis.
479 class CV_EXPORTS SliceLayer : public Layer
483 * @brief Vector of slice ranges.
485 * The first dimension equals number of output blobs.
486 * Inner vector has slice ranges for the first number of input dimensions.
488 std::vector<std::vector<Range> > sliceRanges;
489 std::vector<std::vector<int> > sliceSteps;
493 static Ptr<SliceLayer> create(const LayerParams ¶ms);
496 class CV_EXPORTS PermuteLayer : public Layer
499 static Ptr<PermuteLayer> create(const LayerParams& params);
503 * Permute channels of 4-dimensional input blob.
504 * @param group Number of groups to split input channels and pick in turns
507 * \f[ groupSize = \frac{number\ of\ channels}{group} \f]
508 * \f[ output(n, c, h, w) = input(n, groupSize \times (c \% group) + \lfloor \frac{c}{group} \rfloor, h, w) \f]
509 * Read more at https://arxiv.org/pdf/1707.01083.pdf
511 class CV_EXPORTS ShuffleChannelLayer : public Layer
514 static Ptr<Layer> create(const LayerParams& params);
520 * @brief Adds extra values for specific axes.
521 * @param paddings Vector of paddings in format
523 * [ pad_before, pad_after, // [0]th dimension
524 * pad_before, pad_after, // [1]st dimension
526 * pad_before, pad_after ] // [n]th dimension
528 * that represents number of padded values at every dimension
529 * starting from the first one. The rest of dimensions won't
531 * @param value Value to be padded. Defaults to zero.
532 * @param type Padding type: 'constant', 'reflect'
533 * @param input_dims Torch's parameter. If @p input_dims is not equal to the
534 * actual input dimensionality then the `[0]th` dimension
535 * is considered as a batch dimension and @p paddings are shifted
536 * to a one dimension. Defaults to `-1` that means padding
537 * corresponding to @p paddings.
539 class CV_EXPORTS PaddingLayer : public Layer
542 static Ptr<PaddingLayer> create(const LayerParams& params);
546 class CV_EXPORTS ActivationLayer : public Layer
549 virtual void forwardSlice(const float* src, float* dst, int len,
550 size_t outPlaneSize, int cn0, int cn1) const {};
551 virtual void forwardSlice(const int* src, const int* lut, int* dst, int len,
552 size_t outPlaneSize, int cn0, int cn1) const {};
553 virtual void forwardSlice(const int8_t* src, const int8_t* lut, int8_t* dst, int len,
554 size_t outPlaneSize, int cn0, int cn1) const {};
557 class CV_EXPORTS ReLULayer : public ActivationLayer
562 static Ptr<ReLULayer> create(const LayerParams ¶ms);
565 class CV_EXPORTS ReLU6Layer : public ActivationLayer
568 float minValue, maxValue;
570 static Ptr<ReLU6Layer> create(const LayerParams ¶ms);
573 class CV_EXPORTS ChannelsPReLULayer : public ActivationLayer
576 static Ptr<Layer> create(const LayerParams& params);
579 class CV_EXPORTS ELULayer : public ActivationLayer
584 static Ptr<ELULayer> create(const LayerParams ¶ms);
587 class CV_EXPORTS TanHLayer : public ActivationLayer
590 static Ptr<TanHLayer> create(const LayerParams ¶ms);
593 class CV_EXPORTS SwishLayer : public ActivationLayer
596 static Ptr<SwishLayer> create(const LayerParams ¶ms);
599 class CV_EXPORTS MishLayer : public ActivationLayer
602 static Ptr<MishLayer> create(const LayerParams ¶ms);
605 class CV_EXPORTS SigmoidLayer : public ActivationLayer
608 static Ptr<SigmoidLayer> create(const LayerParams ¶ms);
611 class CV_EXPORTS BNLLLayer : public ActivationLayer
614 static Ptr<BNLLLayer> create(const LayerParams ¶ms);
617 class CV_EXPORTS AbsLayer : public ActivationLayer
620 static Ptr<AbsLayer> create(const LayerParams ¶ms);
623 class CV_EXPORTS PowerLayer : public ActivationLayer
626 float power, scale, shift;
628 static Ptr<PowerLayer> create(const LayerParams ¶ms);
631 class CV_EXPORTS ExpLayer : public ActivationLayer
634 float base, scale, shift;
636 static Ptr<ExpLayer> create(const LayerParams ¶ms);
639 class CV_EXPORTS CeilLayer : public ActivationLayer
642 static Ptr<CeilLayer> create(const LayerParams ¶ms);
645 class CV_EXPORTS FloorLayer : public ActivationLayer
648 static Ptr<FloorLayer> create(const LayerParams ¶ms);
651 class CV_EXPORTS LogLayer : public ActivationLayer
654 static Ptr<LogLayer> create(const LayerParams ¶ms);
657 class CV_EXPORTS RoundLayer : public ActivationLayer
660 static Ptr<RoundLayer> create(const LayerParams ¶ms);
663 class CV_EXPORTS SqrtLayer : public ActivationLayer
666 static Ptr<SqrtLayer> create(const LayerParams ¶ms);
669 class CV_EXPORTS NotLayer : public ActivationLayer
672 static Ptr<NotLayer> create(const LayerParams ¶ms);
675 class CV_EXPORTS AcosLayer : public ActivationLayer
678 static Ptr<AcosLayer> create(const LayerParams ¶ms);
681 class CV_EXPORTS AcoshLayer : public ActivationLayer
684 static Ptr<AcoshLayer> create(const LayerParams ¶ms);
687 class CV_EXPORTS AsinLayer : public ActivationLayer
690 static Ptr<AsinLayer> create(const LayerParams ¶ms);
693 class CV_EXPORTS AsinhLayer : public ActivationLayer
696 static Ptr<AsinhLayer> create(const LayerParams ¶ms);
699 class CV_EXPORTS AtanLayer : public ActivationLayer
702 static Ptr<AtanLayer> create(const LayerParams ¶ms);
705 class CV_EXPORTS AtanhLayer : public ActivationLayer
708 static Ptr<AtanhLayer> create(const LayerParams ¶ms);
711 class CV_EXPORTS CosLayer : public ActivationLayer
714 static Ptr<CosLayer> create(const LayerParams ¶ms);
717 class CV_EXPORTS CoshLayer : public ActivationLayer
720 static Ptr<CoshLayer> create(const LayerParams ¶ms);
723 class CV_EXPORTS ErfLayer : public ActivationLayer
726 static Ptr<ErfLayer> create(const LayerParams ¶ms);
729 class CV_EXPORTS HardSwishLayer : public ActivationLayer
732 static Ptr<HardSwishLayer> create(const LayerParams ¶ms);
735 class CV_EXPORTS SinLayer : public ActivationLayer
738 static Ptr<SinLayer> create(const LayerParams ¶ms);
741 class CV_EXPORTS SinhLayer : public ActivationLayer
744 static Ptr<SinhLayer> create(const LayerParams ¶ms);
747 class CV_EXPORTS SoftplusLayer : public ActivationLayer
750 static Ptr<SoftplusLayer> create(const LayerParams ¶ms);
753 class CV_EXPORTS SoftsignLayer : public ActivationLayer
756 static Ptr<SoftsignLayer> create(const LayerParams ¶ms);
759 class CV_EXPORTS TanLayer : public ActivationLayer
762 static Ptr<TanLayer> create(const LayerParams ¶ms);
765 class CV_EXPORTS CeluLayer : public ActivationLayer
770 static Ptr<CeluLayer> create(const LayerParams ¶ms);
773 class CV_EXPORTS HardSigmoidLayer : public ActivationLayer
779 static Ptr<HardSigmoidLayer> create(const LayerParams ¶ms);
782 class CV_EXPORTS SeluLayer : public ActivationLayer
788 static Ptr<SeluLayer> create(const LayerParams ¶ms);
791 class CV_EXPORTS ThresholdedReluLayer : public ActivationLayer
796 static Ptr<ThresholdedReluLayer> create(const LayerParams ¶ms);
799 class CV_EXPORTS ActivationLayerInt8 : public ActivationLayer
802 static Ptr<ActivationLayerInt8> create(const LayerParams ¶ms);
805 class CV_EXPORTS SignLayer : public ActivationLayer
808 static Ptr<SignLayer> create(const LayerParams ¶ms);
811 class CV_EXPORTS ShrinkLayer : public ActivationLayer
816 static Ptr<ShrinkLayer> create(const LayerParams ¶ms);
819 class CV_EXPORTS ReciprocalLayer : public ActivationLayer
822 static Ptr<ReciprocalLayer> create(const LayerParams ¶ms);
825 /* Layers used in semantic segmentation */
827 class CV_EXPORTS CropLayer : public Layer
830 static Ptr<Layer> create(const LayerParams ¶ms);
833 /** @brief Element wise operation on inputs
835 Extra optional parameters:
836 - "operation" as string. Values are "sum" (default), "prod", "max", "div", "min"
837 - "coeff" as float array. Specify weights of inputs for SUM operation
838 - "output_channels_mode" as string. Values are "same" (default, all input must have the same layout), "input_0", "input_0_truncate", "max_input_channels"
840 class CV_EXPORTS EltwiseLayer : public Layer
843 static Ptr<EltwiseLayer> create(const LayerParams ¶ms);
846 class CV_EXPORTS EltwiseLayerInt8 : public Layer
849 static Ptr<EltwiseLayerInt8> create(const LayerParams ¶ms);
852 class CV_EXPORTS BatchNormLayer : public ActivationLayer
855 bool hasWeights, hasBias;
858 static Ptr<BatchNormLayer> create(const LayerParams ¶ms);
861 class CV_EXPORTS BatchNormLayerInt8 : public BatchNormLayer
864 float input_sc, output_sc;
865 int input_zp, output_zp;
866 static Ptr<BatchNormLayerInt8> create(const LayerParams ¶ms);
869 class CV_EXPORTS MaxUnpoolLayer : public Layer
876 static Ptr<MaxUnpoolLayer> create(const LayerParams ¶ms);
879 class CV_EXPORTS ScaleLayer : public Layer
886 static Ptr<ScaleLayer> create(const LayerParams& params);
889 class CV_EXPORTS ScaleLayerInt8 : public ScaleLayer
894 static Ptr<ScaleLayerInt8> create(const LayerParams ¶ms);
897 class CV_EXPORTS ShiftLayer : public Layer
900 static Ptr<Layer> create(const LayerParams& params);
903 class CV_EXPORTS ShiftLayerInt8 : public Layer
906 static Ptr<Layer> create(const LayerParams& params);
909 class CV_EXPORTS CompareLayer : public Layer
912 static Ptr<Layer> create(const LayerParams& params);
915 class CV_EXPORTS DataAugmentationLayer : public Layer
918 static Ptr<DataAugmentationLayer> create(const LayerParams& params);
921 class CV_EXPORTS CorrelationLayer : public Layer
924 static Ptr<CorrelationLayer> create(const LayerParams& params);
927 class CV_EXPORTS AccumLayer : public Layer
930 static Ptr<AccumLayer> create(const LayerParams& params);
933 class CV_EXPORTS FlowWarpLayer : public Layer
936 static Ptr<FlowWarpLayer> create(const LayerParams& params);
939 class CV_EXPORTS PriorBoxLayer : public Layer
942 static Ptr<PriorBoxLayer> create(const LayerParams& params);
945 class CV_EXPORTS ReorgLayer : public Layer
948 static Ptr<ReorgLayer> create(const LayerParams& params);
951 class CV_EXPORTS RegionLayer : public Layer
956 static Ptr<RegionLayer> create(const LayerParams& params);
960 * @brief Detection output layer.
962 * The layer size is: @f$ (1 \times 1 \times N \times 7) @f$
963 * where N is [keep_top_k] parameter multiplied by batch size. Each row is:
964 * [image_id, label, confidence, xmin, ymin, xmax, ymax]
965 * where image_id is the index of image input in the batch.
967 class CV_EXPORTS DetectionOutputLayer : public Layer
970 static Ptr<DetectionOutputLayer> create(const LayerParams& params);
974 * @brief \f$ L_p \f$ - normalization layer.
975 * @param p Normalization factor. The most common `p = 1` for \f$ L_1 \f$ -
976 * normalization or `p = 2` for \f$ L_2 \f$ - normalization or a custom one.
977 * @param eps Parameter \f$ \epsilon \f$ to prevent a division by zero.
978 * @param across_spatial If true, normalize an input across all non-batch dimensions.
979 * Otherwise normalize an every channel separately.
983 * norm = \sqrt[p]{\epsilon + \sum_{x, y, c} |src(x, y, c)|^p } \\
984 * dst(x, y, c) = \frac{ src(x, y, c) }{norm}
987 * Channel wise normalization:
989 * norm(c) = \sqrt[p]{\epsilon + \sum_{x, y} |src(x, y, c)|^p } \\
990 * dst(x, y, c) = \frac{ src(x, y, c) }{norm(c)}
993 * Where `x, y` - spatial coordinates, `c` - channel.
995 * An every sample in the batch is normalized separately. Optionally,
996 * output is scaled by the trained parameters.
998 class CV_EXPORTS NormalizeBBoxLayer : public Layer
1001 float pnorm, epsilon;
1002 CV_DEPRECATED_EXTERNAL bool acrossSpatial;
1004 static Ptr<NormalizeBBoxLayer> create(const LayerParams& params);
1008 * @brief Resize input 4-dimensional blob by nearest neighbor or bilinear strategy.
1010 * Layer is used to support TensorFlow's resize_nearest_neighbor and resize_bilinear ops.
1012 class CV_EXPORTS ResizeLayer : public Layer
1015 static Ptr<ResizeLayer> create(const LayerParams& params);
1019 * @brief Bilinear resize layer from https://github.com/cdmh/deeplab-public-ver2
1021 * It differs from @ref ResizeLayer in output shape and resize scales computations.
1023 class CV_EXPORTS InterpLayer : public Layer
1026 static Ptr<Layer> create(const LayerParams& params);
1029 class CV_EXPORTS ProposalLayer : public Layer
1032 static Ptr<ProposalLayer> create(const LayerParams& params);
1035 class CV_EXPORTS CropAndResizeLayer : public Layer
1038 static Ptr<Layer> create(const LayerParams& params);
1041 class CV_EXPORTS CumSumLayer : public Layer
1047 static Ptr<CumSumLayer> create(const LayerParams& params);
1052 CV__DNN_INLINE_NS_END