1a6ee1aecf72a329dfe5c5a9380c5a5e15ca7f2b
[platform/upstream/dldt.git] / inference-engine / thirdparty / clDNN / src / crop.cpp
1 /*
2 // Copyright (c) 2016-2019 Intel Corporation
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 //      http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 */
16
17 #include "crop_inst.h"
18 #include "primitive_type_base.h"
19 #include "memory_impl.h"
20 #include "error_handler.h"
21 #include "json_object.h"
22 #include <string>
23
24 namespace cldnn {
25 primitive_type_id crop_type_id() {
26     static primitive_type_base<crop> instance;
27     return &instance;
28 }
29
30 layout crop_inst::calc_output_layout(crop_node const& node) {
31     assert(static_cast<bool>(node.get_primitive()->output_data_type) == false &&
32            "Output data type forcing is not supported for crop_node!");
33     const auto& ref_in_sizes = node.get_primitive()->reference_input;
34     const auto in_layout = node.input().get_output_layout();
35     const auto& in_sizes = in_layout.size;
36     const auto& offsets = node.get_primitive()->offsets;
37
38     // Check for borders variant of crop.
39     if (ref_in_sizes.batch[0] < 0 || ref_in_sizes.feature[0] < 0 || ref_in_sizes.spatial[0] < 0 ||
40         ref_in_sizes.spatial[1] < 0 || ref_in_sizes.spatial[2] < 0) {
41         // Ignore not supported dimensions.
42         const auto rb_sizes = ref_in_sizes.negate().sub({0, 0, 0, 0, 0});
43         const auto lt_sizes = offsets.sub({0, 0, 0, 0, 0});
44
45         const auto out_sizes = in_sizes - (rb_sizes + lt_sizes);
46
47         return layout({in_layout.data_type, in_layout.format, out_sizes});
48     }
49     return layout({in_layout.data_type, in_layout.format, ref_in_sizes});
50 }
51
52 std::string crop_inst::to_string(crop_node const& node) {
53     const auto& desc = node.get_primitive();
54     auto ref_in_sizes = desc->reference_input;
55     const auto& offsets = desc->offsets;
56     const auto in_layout = node.input().get_output_layout();
57     const auto& in_sizes = in_layout.size;
58
59     auto node_info = node.desc_to_json();
60
61     // Check for borders variant of crop.
62     if (ref_in_sizes.batch[0] < 0 || ref_in_sizes.feature[0] < 0 || ref_in_sizes.spatial[0] < 0 ||
63         ref_in_sizes.spatial[1] < 0 || ref_in_sizes.spatial[2] < 0) {
64         // Ignore not supported dimensions.
65         const auto rb_sizes = ref_in_sizes.negate().sub({0, 0, 0, 0, 0});
66         const auto lt_sizes = offsets.sub({0, 0, 0, 0, 0});
67
68         ref_in_sizes = in_sizes - (rb_sizes + lt_sizes);
69     }
70
71     std::stringstream primitive_description;
72
73     json_composite crop_info;
74     crop_info.add("reference input size", ref_in_sizes.to_string());
75     crop_info.add("offset", offsets.to_string());
76
77     node_info->add("crop info", crop_info);
78     node_info->dump(primitive_description);
79
80     return primitive_description.str();
81 }
82
83 crop_inst::typed_primitive_inst(network_impl& network, crop_node const& node) : parent(network, node) {
84     const auto& ref_in_sizes = argument.reference_input;
85     const auto in_layout = node.input().get_output_layout();
86     const auto& in_sizes = in_layout.size;
87     const auto in_format = in_layout.format;
88     const auto& offsets = argument.offsets;
89     tensor null_tensor {};
90     tensor value_tensor { 1, 1, 1, 1, 1 };
91
92     CLDNN_ERROR_NOT_PROPER_FORMAT(node.id(),
93                                   "Input format",
94                                   in_format.value,
95                                   "supported crop input formats",
96                                   format::yxfb,
97                                   format::bfyx,
98                                   format::fyxb,
99                                   format::bfzyx);
100
101     // Check for borders variant of crop.
102     if (ref_in_sizes.batch[0] < 0 || ref_in_sizes.feature[0] < 0 || ref_in_sizes.spatial[0] < 0 ||
103         ref_in_sizes.spatial[1] < 0 || ref_in_sizes.spatial[2] < 0) {
104         // Ignore not supported dimensions.
105         const auto rb_sizes = ref_in_sizes.negate().sub({0, 0, 0, 0, 0});
106         const auto lt_sizes = offsets.sub({0, 0, 0, 0, 0});
107
108         const auto out_sizes = in_sizes - (rb_sizes + lt_sizes);
109
110         CLDNN_ERROR_TENSOR_SIZES_LESS_THAN(node.id(),
111                                            "Left/top/lower borders",
112                                            lt_sizes,
113                                            "0 value",
114                                            null_tensor,
115                                            "Invalid border size: negative");
116         CLDNN_ERROR_TENSOR_SIZES_LESS_THAN(node.id(),
117                                            "Right/bottom/upper borders",
118                                            rb_sizes,
119                                            "0 value",
120                                            null_tensor,
121                                            "Invalid border size: negative");
122
123         CLDNN_ERROR_TENSOR_SIZES_LESS_THAN(node.id(),
124                                            "Input sizes - border sizes",
125                                            out_sizes,
126                                            "1 value",
127                                            value_tensor,
128                                            "Invalid border sizes: greater-equal input sizes");
129     }
130
131     // check if output sizes matches reference input sizes
132     CLDNN_ERROR_TENSOR_SIZES_GREATER_THAN(node.id(),
133                                           "Reference input",
134                                           ref_in_sizes,
135                                           "input sizes",
136                                           in_sizes,
137                                           "Reference input tensor/ input tensor mismtach");
138
139     // check if offsets do not extend input sizes and if match the output sizes
140     CLDNN_ERROR_TENSOR_SIZES_LESS_THAN(node.id(),
141                                        "Batch offsets",
142                                        offsets,
143                                        "0 value",
144                                        null_tensor,
145                                        "Invalid Batch offset: negative value");
146     auto input_size_sub_offsets = in_sizes - offsets;
147     CLDNN_ERROR_TENSOR_SIZES_LESS_THAN(node.id(),
148                                        "input sizes - offsets",
149                                        input_size_sub_offsets,
150                                        "reference input sizes",
151                                        ref_in_sizes,
152                                        "Invalid Batch offset: exceeds data for output!");
153
154     if (node.can_be_optimized()) {
155         build_deps();
156         reuse_input();
157     }
158 }
159
160 void crop_inst::on_execute() {
161     if (!node.can_be_optimized())
162         return;
163
164     if (_output && _network.get_engine().is_the_same_buffer(output_memory(), input_memory()))
165         return;
166
167     reuse_input();
168 }
169
170 void crop_inst::reuse_input() {
171     _output = _network.get_engine().reinterpret_buffer(input_memory(), node.get_output_layout());
172 }
173 }  // namespace cldnn