1 // Copyright (C) 2018-2020 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
6 * @brief a header file for internal Layers structure to describe layers information
22 #include "ie_common.h"
24 #include "ie_layers_property.hpp"
26 namespace InferenceEngine {
28 * @brief This is an internal common Layer parameter parsing arguments
35 /// @brief Layer precision
40 * @brief This is a base abstraction Layer - all DNN Layers inherit from this class
42 class INFERENCE_ENGINE_API_CLASS(CNNLayer) {
45 * @brief A shared pointer to CNNLayer
47 using Ptr = std::shared_ptr<CNNLayer>;
58 * @brief Layer base operating precision
62 * @brief A vector of pointers to the output data elements of this layer in the di-graph (order matters)
64 std::vector<DataPtr> outData;
66 * @brief A vector of weak pointers to the input data elements of this layer in the di-graph (order matters)
68 std::vector<DataWeakPtr> insData;
70 * @brief If suggested to fuse - a pointer to the layer which needs to be fused with this layer
74 * @brief Convenience user values to store in this object as extra data
78 * @brief Layer affinity set by user.
83 * @brief A constructor. Creates a new CNNLayer instance and initializes layer parameters with the given values.
85 * @param prms Basic common parsing parameters
87 explicit CNNLayer(const LayerParams& prms)
88 : name(prms.name), type(prms.type), precision(prms.precision), userValue({0}) {}
91 * @brief A virtual destructor
96 * @brief Sets a layer to be fused with
98 * @param layer Reference to the layer to be fused with
100 void fuse(Ptr& layer) {
105 * @brief Returns the first element of the input data for this layer
107 * @return A smart pointer to the input data element
109 virtual const DataPtr input() const {
110 if (insData.empty()) {
111 THROW_IE_EXCEPTION << "Internal error: input data is empty";
113 auto lockedFirstInsData = insData[0].lock();
114 if (!lockedFirstInsData) {
115 THROW_IE_EXCEPTION << "Internal error: unable to lock weak_ptr\n";
117 return lockedFirstInsData;
121 * @brief Checks if the input data and layer data are legitimate
123 void validateLayer();
126 * @brief Parse string with float in accordance with IE rules
128 * @param str input string with float value
129 * @return float value if parsing was successful
130 * @throws InferenceEngineException in case of parsing error
132 static float ie_parse_float(const std::string& str) {
134 return -std::numeric_limits<float>::infinity();
135 } else if (str == "inf") {
136 return std::numeric_limits<float>::infinity();
139 std::stringstream val_stream(str);
140 val_stream.imbue(std::locale("C"));
142 if (!val_stream.eof()) THROW_IE_EXCEPTION;
147 * @brief serialize float with c_locale formating
148 * used for default values serializing
150 static std::string ie_serialize_float(float value) {
151 std::stringstream val_stream;
152 val_stream.imbue(std::locale("C"));
154 return val_stream.str();
158 * @brief Gets float value for the given parameter
160 * @param param name of the parameter to find
161 * @param def default value of the parameter if not found
162 * @return float value
164 float GetParamAsFloat(const char* param, float def) const {
165 std::string val = GetParamAsString(param, ie_serialize_float(def).c_str());
167 return ie_parse_float(val);
169 THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name << ". Value "
170 << val << " cannot be casted to float.";
175 * @brief Returns a float value for the given layer parameter
177 * @param param Name of the layer parameter
178 * @return A float value for the specified parameter
180 float GetParamAsFloat(const char* param) const {
181 std::string val = GetParamAsString(param);
183 return ie_parse_float(val);
185 THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name << ". Value "
186 << val << " cannot be casted to float.";
191 * @brief Returns a vector of float values for the given parameter or returns the default value
193 * @param param Name of the layer parameter
194 * @param def Default value of the parameter if not found
195 * @return vector of float values
197 std::vector<float> GetParamAsFloats(const char* param, std::vector<float> def) const {
198 std::string vals = GetParamAsString(param, "");
199 std::vector<float> result;
200 std::istringstream stream(vals);
202 if (vals.empty()) return def;
203 while (getline(stream, str, ',')) {
205 float val = ie_parse_float(str);
206 result.push_back(val);
208 THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " " << str << " from IR for layer " << name
209 << ". Value " << vals << " cannot be casted to floats.";
216 * @brief Returns a vector of float values for the given parameter
218 * @param param Name of the layer parameter
219 * @return vector of float values
221 std::vector<float> GetParamAsFloats(const char* param) const {
222 std::string vals = GetParamAsString(param);
223 std::vector<float> result;
224 std::istringstream stream(vals);
226 while (getline(stream, str, ',')) {
228 float val = ie_parse_float(str);
229 result.push_back(val);
231 THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " " << str << " from IR for layer " << name
232 << ". Value " << vals << " cannot be casted to floats.";
239 * @brief Returns an integer value for the given parameter or returns the default value
241 * @param param Name of the layer parameter
242 * @param def Default value of the parameter if not found
243 * @return An int value for the specified parameter
245 int GetParamAsInt(const char* param, int def) const {
246 std::string val = GetParamAsString(param, std::to_string(def).c_str());
248 return std::stoi(val);
250 THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name << ". Value "
251 << val << " cannot be casted to int.";
256 * @brief Returns an integer value for the given parameter
258 * @param param Name of the layer parameter
259 * @return An int value for the specified parameter
261 int GetParamAsInt(const char* param) const {
262 std::string val = GetParamAsString(param);
264 return std::stoi(val);
266 THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name << ". Value "
267 << val << " cannot be casted to int.";
272 * @brief Returns a vector of int values for the given parameter or returns the default value
274 * @param param Name of the layer parameter
275 * @param def Default value of the parameter if not found
276 * @return vector of int values
278 std::vector<int> GetParamAsInts(const char* param, std::vector<int> def) const {
279 std::string vals = GetParamAsString(param, "");
280 std::vector<int> result;
281 std::istringstream stream(vals);
283 if (vals.empty()) return def;
284 while (getline(stream, str, ',')) {
286 result.push_back(std::stoi(str));
288 THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " " << str << " from IR for layer " << name
289 << ". Value " << vals << " cannot be casted to int.";
296 * @brief Returns a vector of int values for the given parameter
298 * @param param Name of the layer parameter
299 * @return vector of int values
301 std::vector<int> GetParamAsInts(const char* param) const {
302 std::string vals = GetParamAsString(param);
303 std::vector<int> result;
304 std::istringstream stream(vals);
306 while (getline(stream, str, ',')) {
308 result.push_back(std::stoi(str));
310 THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " " << str << " from IR for layer " << name
311 << ". Value " << vals << " cannot be casted to int.";
317 * @brief Returns an unsigned integer value for the given parameter or returns the default value
319 * @param param Name of the layer parameter
320 * @param def Default value of the parameter if not found
321 * @return An unsigned integer value for the specified parameter
323 unsigned int GetParamAsUInt(const char* param, unsigned int def) const {
324 std::string val = GetParamAsString(param, std::to_string(def).c_str());
325 std::string message = "Cannot parse parameter " + std::string(param) + " from IR for layer " + name +
326 ". Value " + val + " cannot be casted to int.";
328 int value = std::stoi(val);
330 THROW_IE_EXCEPTION << message;
332 return static_cast<unsigned int>(value);
334 THROW_IE_EXCEPTION << message;
339 * @brief Returns an unsigned integer value for the given parameter
341 * @param param Name of the layer parameter
342 * @return An unsigned integer value for the specified parameter
344 unsigned int GetParamAsUInt(const char* param) const {
345 std::string val = GetParamAsString(param);
346 std::string message = "Cannot parse parameter " + std::string(param) + " from IR for layer " + name +
347 ". Value " + val + " cannot be casted to unsigned int.";
349 int value = std::stoi(val);
351 THROW_IE_EXCEPTION << message;
353 return static_cast<unsigned int>(value);
355 THROW_IE_EXCEPTION << message;
360 * @brief Returns a vector of unsigned int values for the given parameter or returns the default value
362 * @param param Name of the layer parameter
363 * @param def Default value of the parameter if not found
364 * @return vector of unsigned int values
366 std::vector<unsigned int> GetParamAsUInts(const char* param, std::vector<unsigned int> def) const {
367 std::string vals = GetParamAsString(param, "");
368 std::vector<unsigned int> result;
369 std::istringstream stream(vals);
371 std::string message = "Cannot parse parameter " + std::string(param) + " " + str + " from IR for layer " +
372 name + ". Value " + vals + " cannot be casted to unsigned int.";
373 if (vals.empty()) return def;
374 while (getline(stream, str, ',')) {
376 int value = std::stoi(str);
378 THROW_IE_EXCEPTION << message;
380 result.push_back(static_cast<unsigned int>(value));
382 THROW_IE_EXCEPTION << message;
389 * @brief Returns a vector of unsigned int values for the given parameter
391 * @param param Name of the layer parameter
392 * @return vector of unsigned int values
394 std::vector<unsigned int> GetParamAsUInts(const char* param) const {
395 std::string vals = GetParamAsString(param);
396 std::vector<unsigned int> result;
397 std::istringstream stream(vals);
399 std::string message = "Cannot parse parameter " + std::string(param) + " " + str + " from IR for layer " +
400 name + ". Value " + vals + " cannot be casted to int.";
401 while (getline(stream, str, ',')) {
403 int value = std::stoi(str);
405 THROW_IE_EXCEPTION << message;
407 result.push_back(static_cast<unsigned int>(value));
409 THROW_IE_EXCEPTION << message;
415 * @brief Returns a boolean value for the given parameter.
417 * The valid values are (true, false, 1, 0).
418 * @param param Name of the layer parameter
419 * @param def Default value of the parameter if not found
420 * @return A bool value for the specified parameter
422 bool GetParamAsBool(const char* param, bool def) const {
423 std::string val = GetParamAsString(param, std::to_string(def).c_str());
424 std::string loweredCaseValue;
425 std::transform(val.begin(), val.end(), std::back_inserter(loweredCaseValue), [](char value) {
426 return std::tolower(value);
431 if (!(std::istringstream(loweredCaseValue) >> std::boolalpha >> result)) {
432 // attempting parse using non alpha bool
433 return (GetParamAsInt(param, def) != 0);
439 * @brief Returns a boolean value for the given parameter
441 * @param param Name of the layer parameter
442 * @return A bool value for the specified parameter
444 bool GetParamAsBool(const char* param) const {
445 std::string val = GetParamAsString(param);
446 std::string loweredCaseValue;
447 std::transform(val.begin(), val.end(), std::back_inserter(loweredCaseValue), [](char value) {
448 return std::tolower(value);
453 if (!(std::istringstream(loweredCaseValue) >> std::boolalpha >> result)) {
454 // attempting parse using non alpha bool
455 return (GetParamAsInt(param) != 0);
462 * @brief Returns a string value for the given parameter or returns the default one
464 * @param param Name of the layer parameter
465 * @param def Default value of the parameter if not found
466 * @return A string value
468 std::string GetParamAsString(const char* param, const char* def) const {
469 auto it = params.find(param);
470 if (it == params.end() || it->second.empty()) {
477 * @brief Checks the param presence in the layer
479 * @param param Name of the layer parameter
480 * @return a bool depending param presence
482 bool CheckParamPresence(const char* param) const {
483 auto it = params.find(param);
484 if (it == params.end()) {
491 * @brief Returns a string value for the given parameter.
493 * Throws exception if parameter was not found.
494 * @param param Name of the layer parameter
495 * @return A string value
497 std::string GetParamAsString(const char* param) const {
498 auto it = params.find(param);
499 if (it == params.end()) {
500 THROW_IE_EXCEPTION << "No such parameter name '" << param << "' for layer " << name;
505 std::vector<std::string> GetParamAsStrings(const char* param, std::vector<std::string> def) const {
506 std::string vals = GetParamAsString(param, "");
507 std::vector<std::string> result;
508 std::istringstream stream(vals);
510 if (vals.empty()) return def;
511 while (getline(stream, str, ',')) {
513 result.push_back(str);
515 THROW_IE_EXCEPTION << "Cannot parse parameter " << param << " from IR for layer " << name << ".";
522 * @brief Map of pairs: (parameter name, parameter value)
524 std::map<std::string, std::string> params;
527 * @brief Map of pairs: (name, weights/biases blob)
529 std::map<std::string, Blob::Ptr> blobs;
533 * @brief Alias for CNNLayer object
535 using GenericLayer = class CNNLayer;
538 * @brief This class represents a layer with Weights and/or Biases (e.g. Convolution/Fully Connected, etc.)
540 class INFERENCE_ENGINE_API_CLASS(WeightableLayer): public CNNLayer {
543 * @brief A default constructor. Constructs a WeightableLayer instance and initiates layer parameters with the given
546 * @param prms Initial layer parameters
548 explicit WeightableLayer(const LayerParams& prms): CNNLayer(prms) {}
551 * @brief A pointer to a weights blob
555 * @brief A pointer to a biases blob
560 * @brief Constructs a WeightableLayer instance and initiates layer parameters with the given values
562 using CNNLayer::CNNLayer;
564 virtual ~WeightableLayer();
568 * @brief convinenent way to declare property with backward compatibility to 2D members
570 #define DEFINE_PROP(prop_name) \
571 PropertyVector<unsigned int> prop_name; \
572 unsigned int& prop_name##_x = prop_name.at(X_AXIS); \
573 unsigned int& prop_name##_y = prop_name.at(Y_AXIS);
576 * @brief This class represents a standard 3D Convolution Layer
578 class INFERENCE_ENGINE_API_CLASS(ConvolutionLayer): public WeightableLayer {
581 * @brief A convolution kernel array [X, Y, Z, ...]
583 DEFINE_PROP(_kernel);
585 * @brief A convolution paddings begin array [X, Y, Z, ...]
587 DEFINE_PROP(_padding);
589 * @brief A convolution paddings end array [X, Y, Z, ...]
591 PropertyVector<unsigned int> _pads_end;
593 * @brief A convolution strides array [X, Y, Z, ...]
595 DEFINE_PROP(_stride);
597 * @brief A convolution dilations array [X, Y, Z, ...]
599 DEFINE_PROP(_dilation);
601 * @brief A number of output feature maps (size) generating the 3'rd output dimension
603 unsigned int _out_depth = 0u;
605 * @brief Number of groups
607 unsigned int _group = 1u;
609 * @brief Auto padding type
611 std::string _auto_pad;
614 * @brief Creates a new ConvolutionLayer instance.
616 explicit ConvolutionLayer(const LayerParams& p)
617 : WeightableLayer(p), _kernel(2, 0u), _padding(2, 0u), _stride(2, 1u), _dilation(2, 1u) {}
619 * @brief assignment operator
621 ConvolutionLayer& operator=(const ConvolutionLayer& that) {
623 WeightableLayer::operator=(that);
624 _kernel = that._kernel;
625 _padding = that._padding;
626 _pads_end = that._pads_end;
627 _stride = that._stride;
628 _dilation = that._dilation;
629 _out_depth = that._out_depth;
630 _group = that._group;
635 * @brief copy constructor
637 ConvolutionLayer(const ConvolutionLayer& that): WeightableLayer(that) {
641 * @brief move constructor
643 ConvolutionLayer(ConvolutionLayer&&) = default;
645 virtual ~ConvolutionLayer();
649 * @brief This class represents a standard deconvolution layer
651 class INFERENCE_ENGINE_API_CLASS(DeconvolutionLayer): public ConvolutionLayer {
653 using ConvolutionLayer::ConvolutionLayer;
654 using ConvolutionLayer::operator=;
656 virtual ~DeconvolutionLayer();
660 * @brief This class represents a standard deformable convolution layer
662 class INFERENCE_ENGINE_API_CLASS(DeformableConvolutionLayer): public ConvolutionLayer {
664 using ConvolutionLayer::ConvolutionLayer;
665 using ConvolutionLayer::operator=;
668 * @brief Number of deformable groups
670 unsigned int _deformable_group = 1u;
672 virtual ~DeformableConvolutionLayer();
676 * @brief This class represents a standard pooling layer
678 class INFERENCE_ENGINE_API_CLASS(PoolingLayer): public CNNLayer {
681 * @brief Pooling kernel array [X, Y, Z, ...]
683 DEFINE_PROP(_kernel);
685 * @brief Pooling paddings begin array [X, Y, Z, ...]
687 DEFINE_PROP(_padding);
689 * @brief Pooling paddings end array [X, Y, Z, ...]
691 PropertyVector<unsigned int> _pads_end;
693 * @brief Pooling strides array [X, Y, Z, ...]
695 DEFINE_PROP(_stride);
699 * @brief Defines available pooling types
701 enum PoolType { MAX = 1, AVG = 2, STOCH = 3, ROI = 4, SPACIAL_PYRAMID = 5 };
704 * @brief A pooling type
706 PoolType _type = MAX;
709 * @brief A flag that indicates if padding is excluded or not
711 bool _exclude_pad = false;
713 * @brief Auto padding type
715 std::string _auto_pad;
718 * @brief Creates a new PoolingLayer instance.
720 explicit PoolingLayer(const LayerParams& p): CNNLayer(p), _kernel(2, 0u), _padding(2, 0u), _stride(2, 0u) {}
723 * @brief assignment operator
725 PoolingLayer& operator=(const PoolingLayer& that) {
727 CNNLayer::operator=(that);
728 _kernel = that._kernel;
729 _padding = that._padding;
730 _pads_end = that._pads_end;
731 _stride = that._stride;
733 _exclude_pad = that._exclude_pad;
738 * @brief copy constructor
740 PoolingLayer(const PoolingLayer& that): CNNLayer(that) {
745 * @brief move constructor
747 PoolingLayer(PoolingLayer&&) = default;
749 virtual ~PoolingLayer();
753 * @brief This class represents a standard binary convolution layer
755 class INFERENCE_ENGINE_API_CLASS(BinaryConvolutionLayer): public WeightableLayer {
758 * @enum eBinaryConvolutionMode
759 * @brief Defines possible modes of binary convolution operation
761 enum eBinaryConvolutionMode { xnor_popcount = 0 };
764 * @brief Mode of binary convolution operation
766 eBinaryConvolutionMode _mode = xnor_popcount;
769 * @brief A number of input feature maps (size) generating the 3'rd input dimension
771 unsigned int _in_depth = 0u;
774 * @brief A pad value which is used to fill pad area
776 float _pad_value = 0.0f;
779 * @brief A convolution kernel array [X, Y, Z, ...]
781 DEFINE_PROP(_kernel);
783 * @brief A convolution paddings begin array [X, Y, Z, ...]
785 DEFINE_PROP(_padding);
787 * @brief A convolution paddings end array [X, Y, Z, ...]
789 PropertyVector<unsigned int> _pads_end;
791 * @brief A convolution strides array [X, Y, Z, ...]
793 DEFINE_PROP(_stride);
795 * @brief A convolution dilations array [X, Y, Z, ...]
797 DEFINE_PROP(_dilation);
799 * @brief A number of output feature maps (size) generating the 3'rd output dimension
801 unsigned int _out_depth = 0u;
803 * @brief Number of groups
805 unsigned int _group = 1u;
807 * @brief Auto padding type
809 std::string _auto_pad;
812 * @brief Creates a new BinaryConvolutionLayer instance.
814 explicit BinaryConvolutionLayer(const LayerParams& p)
815 : WeightableLayer(p), _kernel(2, 0u), _padding(2, 0u), _stride(2, 1u), _dilation(2, 1u) {}
817 * @brief assignment operator
819 BinaryConvolutionLayer& operator=(const BinaryConvolutionLayer& that) {
821 WeightableLayer::operator=(that);
822 _kernel = that._kernel;
823 _padding = that._padding;
824 _pads_end = that._pads_end;
825 _stride = that._stride;
826 _dilation = that._dilation;
827 _out_depth = that._out_depth;
828 _group = that._group;
830 _in_depth = that._in_depth;
831 _pad_value = that._pad_value;
836 * @brief copy constructor
838 BinaryConvolutionLayer(const BinaryConvolutionLayer& that): WeightableLayer(that) {
842 * @brief move constructor
844 BinaryConvolutionLayer(BinaryConvolutionLayer&&) = default;
846 virtual ~BinaryConvolutionLayer();
852 * @brief This class represents a fully connected layer
854 class INFERENCE_ENGINE_API_CLASS(FullyConnectedLayer): public WeightableLayer {
857 * @brief A size of output
859 unsigned int _out_num = 0;
862 * @brief Creates a new FullyConnectedLayer instance and initializes layer parameters with the given values.
864 using WeightableLayer::WeightableLayer;
866 virtual ~FullyConnectedLayer();
870 * @brief This class represents concatenation layer
872 * Takes as input several data elements and merges them to one using the supplied axis
874 class INFERENCE_ENGINE_API_CLASS(ConcatLayer): public CNNLayer {
877 * @brief An axis on which concatenation operation is performed
879 unsigned int _axis = 1;
882 * @brief Creates a new ConcatLayer instance and initializes layer parameters with the given values.
884 * If batch is used, then batch needs to be specified as an input dimension also
885 * In current implementation 1 means channels, 0 - batch
887 using CNNLayer::CNNLayer;
889 virtual ~ConcatLayer();
893 * @brief This class represents a layer that evenly splits the input into the supplied outputs
895 class INFERENCE_ENGINE_API_CLASS(SplitLayer): public CNNLayer {
898 * @brief An axis on which split operation is performed
900 unsigned int _axis = 1;
903 * @brief Creates a new SplitLayer instance.
905 using CNNLayer::CNNLayer;
907 virtual ~SplitLayer();
911 * @brief This class represents a Linear Response Normalization (LRN) Layer
913 class INFERENCE_ENGINE_API_CLASS(NormLayer): public CNNLayer {
916 * @brief Response size
918 unsigned int _size = 0;
924 * @brief Alpha coefficient
928 * @brief Beta coefficient
932 * @brief Flag to specify normalization across feature maps (true) or across channels
934 bool _isAcrossMaps = false;
937 * @brief Creates a new NormLayer instance.
939 using CNNLayer::CNNLayer;
941 virtual ~NormLayer();
945 * @brief This class represents standard softmax Layer
947 class INFERENCE_ENGINE_API_CLASS(SoftMaxLayer): public CNNLayer {
950 * @brief Axis number for a softmax operation
954 * @brief Creates a new SoftMaxLayer instance.
956 using CNNLayer::CNNLayer;
958 virtual ~SoftMaxLayer();
963 * @brief This class represents standard GRN Layer
965 class INFERENCE_ENGINE_API_CLASS(GRNLayer): public CNNLayer {
968 * @brief A default constructor. Creates a new GRNLayer instance and initializes layer parameters with the given
971 * @param prms Initial layer parameters
973 explicit GRNLayer(const LayerParams& prms): CNNLayer(prms), bias(0.f) {}
976 * @brief Bias for squares sum
985 * @brief This class represents standard MVN Layer
987 class INFERENCE_ENGINE_API_CLASS(MVNLayer): public CNNLayer {
990 * @brief A default constructor. Creates a new MVNLayer instance and initializes layer parameters with the given
993 * @param prms Initial layer parameters
995 explicit MVNLayer(const LayerParams& prms): CNNLayer(prms), across_channels(0), normalize(1) {}
998 * @brief Indicate that mean value is calculated across channels
1000 int across_channels;
1003 * @brief Indicate that the result needs to be normalized
1007 virtual ~MVNLayer();
1011 * @brief This class represents a Rectified Linear activation layer
1013 class INFERENCE_ENGINE_API_CLASS(ReLULayer): public CNNLayer {
1016 * @brief Negative slope is used to takle negative inputs instead of setting them to 0
1018 float negative_slope = 0.0f;
1021 * @brief Creates a new ReLULayer instance.
1023 using CNNLayer::CNNLayer;
1025 virtual ~ReLULayer();
1029 * @brief This class represents a Clamp activation layer
1031 * Clamps all tensor elements into the range [min_value, max_value]
1033 class INFERENCE_ENGINE_API_CLASS(ClampLayer): public CNNLayer {
1036 * @brief A minimum value
1038 float min_value = 0.0f;
1041 * @brief A maximum value
1043 float max_value = 1.0f;
1045 * @brief Creates a new ClampLayer instance.
1047 using CNNLayer::CNNLayer;
1049 virtual ~ClampLayer();
1053 * @brief This class represents a ReLU6 activation layer
1055 * Clamps all tensor elements into the range [0, 6.0]
1057 class INFERENCE_ENGINE_API_CLASS(ReLU6Layer): public ClampLayer {
1059 explicit ReLU6Layer(const LayerParams& prms): ClampLayer(prms) {
1063 virtual ~ReLU6Layer();
1067 * @brief This class represents an element wise operation layer
1069 class INFERENCE_ENGINE_API_CLASS(EltwiseLayer): public CNNLayer {
1073 * @brief Defines possible operations that can be used
1099 * @brief A type of the operation to use
1101 eOperation _operation = Sum;
1104 * @brief A vector of coefficients to scale the operands
1106 std::vector<float> coeff;
1109 * @brief Creates a new EltwiseLayer instance.
1111 using CNNLayer::CNNLayer;
1113 virtual ~EltwiseLayer();
1117 * @brief This class represents a standard crop layer
1119 class INFERENCE_ENGINE_API_CLASS(CropLayer): public CNNLayer {
1122 * @brief A vector of dimensions for cropping
1124 std::vector<int> axis;
1126 * @brief A vector of dimensions to be preserved
1128 std::vector<int> dim;
1130 * @brief A vector of offsets for each dimension
1132 std::vector<int> offset;
1135 * @brief Creates a new CropLayer instance.
1137 using CNNLayer::CNNLayer;
1139 virtual ~CropLayer();
1143 * @brief This class represents a standard reshape layer
1145 class INFERENCE_ENGINE_API_CLASS(ReshapeLayer): public CNNLayer {
1148 * @brief A vector of sizes of the shape
1150 std::vector<int> shape;
1152 * @brief A number of axis to be taken for a reshape
1156 * @brief A number of first axises to be taken for a reshape
1161 * @brief Creates a new ReshapeLayer instance.
1163 using CNNLayer::CNNLayer;
1165 virtual ~ReshapeLayer();
1169 * @brief This class represents a standard Tile Layer
1171 class INFERENCE_ENGINE_API_CLASS(TileLayer): public CNNLayer {
1174 * @brief An index of the axis to tile
1178 * @brief A number of copies to be made
1183 * @brief Creates a new TileLayer instance.
1185 using CNNLayer::CNNLayer;
1187 virtual ~TileLayer();
1191 * @brief This class represents a Layer which performs Scale and Shift
1193 class INFERENCE_ENGINE_API_CLASS(ScaleShiftLayer): public WeightableLayer {
1196 * @brief A flag that indicates if the same value is used for all the features. If false, the value is used pixel
1199 unsigned int _broadcast = 0;
1202 * @brief Creates a new ScaleShiftLayer instance.
1204 using WeightableLayer::WeightableLayer;
1206 virtual ~ScaleShiftLayer();
1210 * @brief This class represents TensorIterator layer
1212 class INFERENCE_ENGINE_API_CLASS(TensorIterator): public CNNLayer {
1216 int from; /**< Index of exteral data from ins/outs fields of CNNLayer */
1217 int to; /**< Index of internal data in iterator body */
1220 int axis; /**< Axis to iterate throught */
1221 int stride; /**< Stride to iterate throught */
1222 int start; /**< Start index of iteration range */
1223 int end; /**< Last index of iteration range */
1224 int part_size; /**< Part size which will be transfered to body subnetwork */
1228 std::vector<DataPtr> inputs;
1229 std::vector<DataPtr> outputs;
1232 std::vector<PortMap> input_port_map;
1233 std::vector<PortMap> output_port_map;
1234 std::vector<PortMap> back_edges;
1238 using CNNLayer::CNNLayer;
1240 virtual ~TensorIterator();
1244 * @brief Base class for recurrent cell layers
1246 class INFERENCE_ENGINE_API_CLASS(RNNCellBase): public WeightableLayer {
1248 using WeightableLayer::WeightableLayer;
1251 * @brief Direct type of recurrent cell (including subtypes)
1252 * Description of particular cell semantics is in LSTMCell, GRUCell, RNNCell.
1255 LSTM, /**< Original LSTM cell */
1256 GRU, /**< Original GRU cell */
1257 RNN, /**< Original RNN cell */
1258 GRU_LBR, /**< GRU cell modification. "Linear before reset" */
1261 /** @copybrief CellType */
1262 CellType cellType = LSTM;
1265 * @brief Size of hidden state data
1267 * In case of batch output state tensor will have shape [N, hidden_size]
1269 int hidden_size = 0;
1272 * @brief Clip data into range [-clip, clip] on input of activations
1274 * clip==0.0f means no clipping
1278 * @brief Activations used inside recurrent cell
1280 * Valid values: sigmoid, tanh, relu
1282 std::vector<std::string> activations;
1285 * @brief Alpha parameters of activations
1287 * Respective to activation list.
1289 std::vector<float> activation_alpha;
1292 * @brief Beta parameters of activations
1294 * Respective to activation list.
1296 std::vector<float> activation_beta;
1298 virtual ~RNNCellBase();
1302 * @brief LSTM Cell layer
1304 * G - number of gates (=4)
1306 * S - state size (=hidden_size)
1309 * [N,D] Xt - input data
1310 * [N,S] Ht-1 - initial hidden state
1311 * [N,S] Ct-1 - initial cell state
1314 * [N,S] Ht - out hidden state
1315 * [N,S] Ct - out cell state
1318 * - weights [G,S,D+S]
1320 * NB! gates order is FICO {forget, input, candidate, output}
1322 * activations is {_f, _g, _h}
1323 * default: {_f=sigm, _g=tanh, _h=tanh}
1328 * (.) - eltwise mult
1329 * [,] - concatenation
1331 * - ft = _f(Wf*[Ht-1, Xt] + Bf)
1332 * - it = _f(Wi*[Ht-1, Xt] + Bi)
1333 * - ct = _g(Wc*[Ht-1, Xt] + Bc)
1334 * - ot = _f(Wo*[Ht-1, Xt] + Bo)
1335 * - Ct = ft (.) Ct-1 + it (.) ct
1336 * - Ht = ot (.) _h(Ct)
1338 class INFERENCE_ENGINE_API_CLASS(LSTMCell): public RNNCellBase {
1340 using RNNCellBase::RNNCellBase;
1341 using RNNCellBase::operator=;
1343 virtual ~LSTMCell();
1347 * @brief GRU Cell layer
1349 * G - number of gates (=3)
1351 * S - state size (=hidden_size)
1354 * [N,D] Xt - input data
1355 * [N,S] Ht-1 - initial hidden state
1358 * [N,S] Ht - out hidden state
1361 * - weights [G,S,D+S]
1363 * NB! gates order is ZRH {update, reset, output}
1365 * activations is {_f, _g}
1366 * default: {_f=sigm, _g=tanh}
1371 * (.) - eltwise mult
1372 * [,] - concatenation
1374 * - zt = _f(Wz*[Ht-1, Xt] + Bz)
1375 * - rt = _f(Wr*[Ht-1, Xt] + Br)
1376 * - ht = _g(Wh*[rt (.) Ht-1, Xt] + Bh)
1377 * - Ht = (1 - zt) (.) ht + zt (.) Ht-1
1379 class INFERENCE_ENGINE_API_CLASS(GRUCell): public RNNCellBase {
1381 using RNNCellBase::RNNCellBase;
1382 using RNNCellBase::operator=;
1388 * @brief RNN Cell layer
1390 * G - number of gates (=1)
1392 * S - state size (=hidden_size)
1395 * [N,D] Xt - input data
1396 * [N,S] Ht-1 - initial hidden state
1399 * [N,S] Ht - out hidden state
1402 * - weights [G,S,D+S]
1405 * activations is {_f}
1406 * default: {_f=tanh}
1411 * [,] - concatenation
1413 * - Ht = _f(Wi*[Ht-1, Xt] + Bi)
1415 class INFERENCE_ENGINE_API_CLASS(RNNCell): public RNNCellBase {
1417 using RNNCellBase::RNNCellBase;
1418 using RNNCellBase::operator=;
1424 * @brief Sequence of recurrent cells
1428 * S - state size (=hidden_size)
1429 * NS - num of state tensors (LSTM=2, GRU/RNN=1)
1430 * ND - num of direction (BDR=2, WFD/BWD=1)
1433 * [N,T,D] Xt - input data
1434 * [ND,N,S] Ht-1 - initial hidden state
1435 * [ND,N,S] Ct-1 - initial cell state // if NS==2
1436 * [N] SL - sequence length (optional input)
1439 * [ND,N,T,S] Xt - input data
1440 * [ND,N,S] Ht-1 - initial hidden state
1441 * [ND,N,S] Ct-1 - initial cell state // if NS==2
1443 * NB! if axis==0 batch and sequense dimensions are swapped (N <-> T) for input and output tensors
1446 * - weights [ND,G,S,D+S]
1448 * NB! if ND==2 weights are concatenated cell weights [forward_cell_weights, backward_cell_weights]
1451 class INFERENCE_ENGINE_API_CLASS(RNNSequenceLayer): public RNNCellBase {
1453 using RNNCellBase::RNNCellBase;
1456 * @brief An axis by which iteration is performed
1458 * axis=0 means first input/output data blob dimension is sequence
1459 * axis=1 means first input/output data blob dimension is batch
1461 unsigned int axis = 1;
1464 * @brief Direction of iteration through sequence dimension
1467 FWD, /**< Forward mode. Iterate starts from index 0 with step 1. */
1468 BWD, /**< Backward mode. Iterate starts from last index with step -1. */
1469 BDR /**< Bidirectional mode. First is forward pass, second is backward. */
1472 /** @copybrief Direction */
1473 Direction direction = FWD;
1475 virtual ~RNNSequenceLayer();
1479 * @brief This class represents a Layer which performs Scale and Shift
1481 class INFERENCE_ENGINE_API_CLASS(PReLULayer): public WeightableLayer {
1484 * @brief A flag that indicates if the same negative_slope value is used for all the features. If false, the value
1485 * is used pixel wise
1487 bool _channel_shared;
1490 * @brief A default constructor. Creates a new PReLULayer instance and initializes layer parameters with the given
1493 * @param prms Initial layer parameters
1495 explicit PReLULayer(const LayerParams& prms): WeightableLayer(prms), _channel_shared(false) {}
1497 virtual ~PReLULayer();
1501 * @brief This class represents a standard Power Layer
1503 * Formula is: output = (offset + scale * input) ^ power
1505 class INFERENCE_ENGINE_API_CLASS(PowerLayer): public CNNLayer {
1508 * @brief An exponent value
1512 * @brief A scale factor
1516 * @brief An offset value
1521 * @brief Creates a new PowerLayer instance.
1523 using CNNLayer::CNNLayer;
1525 virtual ~PowerLayer();
1529 * @brief This class represents a Batch Normalization Layer
1531 class INFERENCE_ENGINE_API_CLASS(BatchNormalizationLayer): public WeightableLayer {
1534 * @brief A small value to add to the variance estimate to avoid division by zero
1536 float epsilon = 1e-3f;
1539 * @brief Creates a new BatchNormalizationLayer instance.
1541 using WeightableLayer::WeightableLayer;
1543 virtual ~BatchNormalizationLayer();
1547 * @brief This class represents a general matrix multiplication operation layer
1549 * Formula is: dst := alpha*src1*src2 + beta*src3
1551 class INFERENCE_ENGINE_API_CLASS(GemmLayer): public CNNLayer {
1554 * @brief A scale factor of src1 matrix
1558 * @brief A scale factor of src3 matrix
1562 * @brief A flag that indicates if the src1 matrix is to be transposed
1564 bool transpose_a = false;
1566 * @brief A flag that indicates if the src2 matrix is to be transposed
1568 bool transpose_b = false;
1570 * @brief Creates a new GemmLayer instance.
1572 using CNNLayer::CNNLayer;
1574 virtual ~GemmLayer();
1578 * @brief This class represents a standard Pad layer
1580 * Adds paddings to input tensor
1582 class INFERENCE_ENGINE_API_CLASS(PadLayer): public CNNLayer {
1586 * @brief Defines possible modes of pad operation
1588 enum ePadMode { Constant = 0, Edge, Reflect, Symmetric };
1591 * @brief Size of padding in the beginning of each axis
1593 PropertyVector<unsigned int> pads_begin;
1595 * @brief Size of padding in the end of each axis
1597 PropertyVector<unsigned int> pads_end;
1599 * @brief Mode of pad operation
1601 ePadMode pad_mode = Constant;
1603 * @brief A pad value which is used for filling in Constant mode
1605 float pad_value = 0.0f;
1607 * @brief Creates a new PadLayer instance.
1609 using CNNLayer::CNNLayer;
1611 virtual ~PadLayer();
1615 * @brief This class represents a standard Gather layer
1617 * Gather slices from Dictionary according to Indexes
1619 class INFERENCE_ENGINE_API_CLASS(GatherLayer): public CNNLayer {
1622 * @brief The axis in Dictionary to gather Indexes from
1626 * @brief Creates a new GatherLayer instance.
1628 using CNNLayer::CNNLayer;
1630 virtual ~GatherLayer();
1634 * @brief This class represents a standard Strided Slice layer
1636 * Strided Slice picks from input tensor according parameters
1638 class INFERENCE_ENGINE_API_CLASS(StridedSliceLayer): public CNNLayer {
1641 * @brief The begin_mask is a bitmask where bit i being 0 means
1642 * to ignore the begin value and instead use the default value
1644 std::string begin_mask;
1646 * @brief Analogous to begin_mask
1648 std::string end_mask;
1650 * @brief The ellipsis_mask is a bitmask where bit i being 1 means
1651 * the i-th is actually an ellipsis
1653 std::string ellipsis_mask;
1655 * @brief The new_axis_mask_ is a bitmask where bit i being 1 means
1656 * the i-th position creates a new 1 dimension shape
1658 std::string new_axis_mask;
1660 * @brief The shrink_axis_mask is a bitmask where bit i being 1 means
1661 * the i-th position shrinks the dimensionality
1663 std::string shrink_axis_mask;
1666 * @brief Creates a new StridedSliceLayer instance.
1668 using CNNLayer::CNNLayer;
1670 virtual ~StridedSliceLayer();
1674 * @brief This class represents a standard Shuffle Channels layer
1675 * Shuffle Channels picks from input tensor according parameters
1677 class INFERENCE_ENGINE_API_CLASS(ShuffleChannelsLayer): public CNNLayer {
1680 * @brief The axis in tensor to shuffle channels
1685 * @brief The group of output shuffled channels
1687 unsigned int group = 1;
1690 * @brief Creates a new ShuffleChannelsLayer instance.
1692 using CNNLayer::CNNLayer;
1694 virtual ~ShuffleChannelsLayer();
1698 * @brief This class represents a standard Depth To Space layer
1699 * Depth To Space picks from input tensor according parameters
1701 class INFERENCE_ENGINE_API_CLASS(DepthToSpaceLayer): public CNNLayer {
1704 * @brief The group of output shuffled channels
1706 unsigned int block_size = 1;
1709 * @brief Creates a new DepthToSpaceLayer instance.
1711 using CNNLayer::CNNLayer;
1713 virtual ~DepthToSpaceLayer();
1717 * @brief This class represents a standard Space To Depth layer
1718 * Depth To Space picks from input tensor according parameters
1720 class INFERENCE_ENGINE_API_CLASS(SpaceToDepthLayer): public CNNLayer {
1723 * @brief The group of output Space To Depth
1725 unsigned int block_size = 1;
1728 * @brief Creates a new SpaceToDepthLayer instance.
1730 using CNNLayer::CNNLayer;
1732 virtual ~SpaceToDepthLayer();
1736 * @brief This class represents SparseFillEmptyRows layer
1738 * SparseFillEmptyRows fills empty rows in a sparse tensor
1740 class INFERENCE_ENGINE_API_CLASS(SparseFillEmptyRowsLayer): public CNNLayer {
1743 * @brief Creates a new SparseFillEmptyRowsLayer instance.
1745 using CNNLayer::CNNLayer;
1747 virtual ~SparseFillEmptyRowsLayer();
1751 * @brief This class represents SparseSegmentMean(SqrtN, Sum) layers
1752 * SparseSegmentMean(SqrtN, Sum) layer reduces data along sparse segments of a tensor.
1754 class INFERENCE_ENGINE_API_CLASS(SparseSegmentReduceLayer): public CNNLayer {
1757 * @brief Creates a new SparseSegmentReduceLayer instance.
1759 using CNNLayer::CNNLayer;
1761 virtual ~SparseSegmentReduceLayer();
1765 * @brief This class represents ExperimentalSparseWeightedReduce layer
1766 * ExperimentalSparseWeightedReduce layer reduces data along sparse segments of a tensor.
1768 class INFERENCE_ENGINE_API_CLASS(ExperimentalSparseWeightedReduceLayer) : public CNNLayer {
1771 * @brief Creates a new ExperimentalSparseWeightedReduceLayer instance.
1773 using CNNLayer::CNNLayer;
1775 virtual ~ExperimentalSparseWeightedReduceLayer();
1779 * @brief This class represents SparseToDense layer
1780 * SparseToDense layer converts a sparse tensor to a dense tensor.
1782 class INFERENCE_ENGINE_API_CLASS(SparseToDenseLayer) : public CNNLayer {
1785 * @brief Creates a new SparseToDenseLayer instance.
1787 using CNNLayer::CNNLayer;
1789 virtual ~SparseToDenseLayer();
1793 * @brief This class represents Bucketize layer
1794 * Bucketize layer bucketizes the input based on the boundaries.
1796 class INFERENCE_ENGINE_API_CLASS(BucketizeLayer) : public CNNLayer {
1799 * @brief Indicates whether the intervals include the right or the left bucket edge.
1801 bool with_right_bound = false;
1804 * @brief Creates a new BucketizeLayer instance.
1806 using CNNLayer::CNNLayer;
1808 virtual ~BucketizeLayer();
1812 * @brief This class represents a standard Reverse Sequence layer
1814 * Reverse Sequence modifies input tensor according parameters
1816 class INFERENCE_ENGINE_API_CLASS(ReverseSequenceLayer): public CNNLayer {
1819 * @brief The seq_axis dimension in tensor which is partially reversed
1824 * @brief The batch_axis dimension in tensor along which reversal is performed
1829 * @brief Creates a new ReverseSequence instance.
1831 using CNNLayer::CNNLayer;
1833 virtual ~ReverseSequenceLayer();
1837 * @brief This class represents a OneHot layer
1838 * Converts input into OneHot representation.
1840 class INFERENCE_ENGINE_API_CLASS(OneHotLayer): public CNNLayer {
1843 * @brief A depth of representation
1845 unsigned int depth = 0;
1848 * @brief The locations represented by indices in input take value on_value
1850 float on_value = 1.f;
1853 * @brief The locations not represented by indices in input take value off_value
1855 float off_value = 0.f;
1858 * @brief Define the shape of output tensor
1863 * @brief Creates a new OneHot instance
1865 using CNNLayer::CNNLayer;
1867 virtual ~OneHotLayer();
1871 * @brief This class represents a standard RangeLayer layer
1873 * RangeLayer modifies input tensor dimensions according parameters
1875 class INFERENCE_ENGINE_API_CLASS(RangeLayer): public CNNLayer {
1878 * @brief Creates a new RangeLayer instance.
1880 using CNNLayer::CNNLayer;
1882 virtual ~RangeLayer();
1886 * @brief This class represents a standard Fill layer
1888 * RFill modifies input tensor according parameters
1890 class INFERENCE_ENGINE_API_CLASS(FillLayer): public CNNLayer {
1893 * @brief Creates a new Fill instance.
1895 using CNNLayer::CNNLayer;
1897 virtual ~FillLayer();
1901 * @brief This class represents a SelectLayer layer
1903 * SelectLayer layer takes elements from the second (“then”) or the third (“else”) input based on condition mask
1904 * (“cond”) provided in the first input. The “cond” tensor is broadcasted to “then” and “else” tensors. The output
1905 * tensor shape is equal to broadcasted shape of “cond”, “then” and “else”.
1907 class INFERENCE_ENGINE_API_CLASS(SelectLayer): public CNNLayer {
1910 * @brief Creates a new SelectLayer instance.
1912 using CNNLayer::CNNLayer;
1914 virtual ~SelectLayer();
1918 * @brief This class represents a standard Broadcast layer
1920 * Broadcast modifies input tensor dimensions according parameters
1922 class INFERENCE_ENGINE_API_CLASS(BroadcastLayer): public CNNLayer {
1925 * @brief Creates a new Broadcast instance.
1927 using CNNLayer::CNNLayer;
1929 virtual ~BroadcastLayer();
1933 * @brief This class represents a quantization operation layer
1935 * Element-wise linear quantization of floating point input values into a descrete set of floating point values
1937 class INFERENCE_ENGINE_API_CLASS(QuantizeLayer): public CNNLayer {
1940 * @brief The number of quantization levels
1945 * @brief Creates a new QuantizeLayer instance.
1947 using CNNLayer::CNNLayer;
1949 virtual ~QuantizeLayer();
1953 * @brief This class represents a standard Math layers
1955 * Math modifies input tensor dimensions according parameters
1957 class INFERENCE_ENGINE_API_CLASS(MathLayer): public CNNLayer {
1960 * @brief Creates a new Math instance.
1962 using CNNLayer::CNNLayer;
1964 virtual ~MathLayer();
1968 * @brief This class represents a standard Reduce layers
1970 * Reduce modifies input tensor according parameters
1972 class INFERENCE_ENGINE_API_CLASS(ReduceLayer): public CNNLayer {
1975 * @brief The keep_dims dimension in tensor which is partially reversed
1977 bool keep_dims = true;
1980 * @brief Creates a new Reduce instance.
1982 using CNNLayer::CNNLayer;
1984 virtual ~ReduceLayer();
1988 * @brief This class represents a standard TopK layer
1990 * TopK picks top K values from input tensor according parameters
1992 class INFERENCE_ENGINE_API_CLASS(TopKLayer): public CNNLayer {
1995 * @brief The mode could be 'max' or 'min'
1999 * @brief top K values sort mode could be 'value' or 'index'
2003 * @brief The axis dimension in tensor which is top K values are picked
2008 * @brief Creates a new TopKLayer instance.
2010 using CNNLayer::CNNLayer;
2012 virtual ~TopKLayer();
2016 * @brief This class represents Unique layer.
2018 * The Unique operation searches for unique elements in 1-D input
2020 class INFERENCE_ENGINE_API_CLASS(UniqueLayer): public CNNLayer {
2023 * @brief A flag indicating whether to sort unique elements
2027 * @brief A flag indicating whether to return indices of input data elements in the output of uniques
2029 bool return_inverse;
2031 * @brief A flag indicating whether to return a number of occurences for each unique element
2036 * @brief Creates a new UniqueLayer instance.
2038 using CNNLayer::CNNLayer;
2040 virtual ~UniqueLayer();
2044 * @brief This class represents a standard NonMaxSuppression layer
2046 class INFERENCE_ENGINE_API_CLASS(NonMaxSuppressionLayer): public CNNLayer {
2049 * @brief The 'center_point_box' indicates the format of the box data
2051 bool center_point_box = false;
2053 * @brief The 'sort_result_descending' indicates that result will sort descending by score through all batches and
2056 bool sort_result_descending = true;
2058 * @brief Creates a new NonMaxSuppressionLayer instance.
2060 using CNNLayer::CNNLayer;
2062 virtual ~NonMaxSuppressionLayer();
2066 * @brief This class represents a standard Scatter layer
2068 class INFERENCE_ENGINE_API_CLASS(ScatterLayer): public CNNLayer {
2071 * @brief The axis in Dictionary to scatter Indexes from
2075 * @brief Creates a new ScatterLayer instance.
2077 using CNNLayer::CNNLayer;
2079 virtual ~ScatterLayer();
2082 } // namespace InferenceEngine