3 Copyright (c) 2018 Intel Corporation
5 Licensed under the Apache License, Version 2.0 (the "License");
6 you may not use this file except in compliance with the License.
7 You may obtain a copy of the License at
9 http://www.apache.org/licenses/LICENSE-2.0
11 Unless required by applicable law or agreed to in writing, software
12 distributed under the License is distributed on an "AS IS" BASIS,
13 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 See the License for the specific language governing permissions and
15 limitations under the License.
17 from __future__ import print_function
20 from argparse import ArgumentParser, SUPPRESS
24 from openvino.inference_engine import IECore
28 def build_argparser():
29 parser = ArgumentParser(add_help=False)
30 args = parser.add_argument_group("Options")
31 args.add_argument('-h', '--help', action='help', default=SUPPRESS, help='Show this help message and exit.')
32 args.add_argument("-m", "--model", help="Required. Path to an .xml or .onnx file with a trained model.",
33 required=True, type=str)
34 args.add_argument("-i", "--input", help="Required. Path to image file.",
35 required=True, type=str, nargs="+")
36 args.add_argument("-l", "--cpu_extension",
37 help="Optional. Required for CPU custom layers. "
38 "Absolute path to a shared library with the kernels implementations.",
39 type=str, default=None)
40 args.add_argument("-d", "--device",
41 help="Optional. Specify the target device to infer on; "
42 "CPU, GPU, FPGA or MYRIAD is acceptable. "
43 "Sample will look for a suitable plugin for device specified (CPU by default)",
44 default="CPU", type=str)
45 args.add_argument("--labels", help="Optional. Labels mapping file", default=None, type=str)
46 args.add_argument("-nt", "--number_top", help="Optional. Number of top results", default=10, type=int)
52 log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
53 args = build_argparser().parse_args()
54 log.info("Loading Inference Engine")
57 # ---1. Read a model in OpenVINO Intermediate Representation (.xml and .bin files) or ONNX (.onnx file) format ---
59 log.info(f"Loading network:\n\t{model}")
60 net = ie.read_network(model=model)
61 func = ng.function_from_cnn(net)
62 ops = func.get_ordered_ops()
63 # -----------------------------------------------------------------------------------------------------
65 # ------------- 2. Load Plugin for inference engine and extensions library if specified --------------
66 log.info("Device info:")
67 versions = ie.get_versions(args.device)
68 print("{}{}".format(" " * 8, args.device))
69 print("{}MKLDNNPlugin version ......... {}.{}".format(" " * 8, versions[args.device].major,
70 versions[args.device].minor))
71 print("{}Build ........... {}".format(" " * 8, versions[args.device].build_number))
73 if args.cpu_extension and "CPU" in args.device:
74 ie.add_extension(args.cpu_extension, "CPU")
75 log.info("CPU extension loaded: {}".format(args.cpu_extension))
76 # -----------------------------------------------------------------------------------------------------
78 # --------------------------- 3. Read and preprocess input --------------------------------------------
80 print("inputs number: " + str(len(net.input_info.keys())))
82 for input_key in net.input_info:
83 print("input shape: " + str(net.input_info[input_key].input_data.shape))
84 print("input key: " + input_key)
85 if len(net.input_info[input_key].input_data.layout) == 4:
86 n, c, h, w = net.input_info[input_key].input_data.shape
88 images = np.ndarray(shape=(n, c, h, w))
91 image = cv2.imread(args.input[i])
92 ih, iw = image.shape[:-1]
93 images_hw.append((ih, iw))
94 log.info("File was added: ")
95 log.info(" {}".format(args.input[i]))
96 if (ih, iw) != (h, w):
97 log.warning("Image {} is resized from {} to {}".format(args.input[i], image.shape[:-1], (h, w)))
98 image = cv2.resize(image, (w, h))
99 image = image.transpose((2, 0, 1)) # Change data layout from HWC to CHW
102 # -----------------------------------------------------------------------------------------------------
104 # --------------------------- 4. Configure input & output ---------------------------------------------
105 # --------------------------- Prepare input blobs -----------------------------------------------------
106 log.info("Preparing input blobs")
107 assert (len(net.input_info.keys()) == 1 or len(
108 net.input_info.keys()) == 2), "Sample supports topologies only with 1 or 2 inputs"
109 out_blob = next(iter(net.outputs))
110 input_name, input_info_name = "", ""
112 for input_key in net.input_info:
113 if len(net.input_info[input_key].layout) == 4:
114 input_name = input_key
115 net.input_info[input_key].precision = 'U8'
116 elif len(net.input_info[input_key].layout) == 2:
117 input_info_name = input_key
118 net.input_info[input_key].precision = 'FP32'
119 if net.input_info[input_key].input_data.shape[1] != 3 and net.input_info[input_key].input_data.shape[1] != 6 or \
120 net.input_info[input_key].input_data.shape[0] != 1:
121 log.error('Invalid input info. Should be 3 or 6 values length.')
124 data[input_name] = images
126 if input_info_name != "":
127 infos = np.ndarray(shape=(n, c), dtype=float)
132 data[input_info_name] = infos
134 # --------------------------- Prepare output blobs ----------------------------------------------------
135 log.info('Preparing output blobs')
137 output_name, output_info = "", net.outputs[next(iter(net.outputs.keys()))]
138 output_ops = {op.friendly_name : op for op in ops \
139 if op.friendly_name in net.outputs and op.get_type_name() == "DetectionOutput"}
140 if len(output_ops) != 0:
141 output_name, output_info = output_ops.popitem()
143 if output_name == "":
144 log.error("Can't find a DetectionOutput layer in the topology")
146 output_dims = output_info.shape
147 if len(output_dims) != 4:
148 log.error("Incorrect output dimensions for SSD model")
149 max_proposal_count, object_size = output_dims[2], output_dims[3]
152 log.error("Output item should have 7 as a last dimension")
154 output_info.precision = "FP32"
155 # -----------------------------------------------------------------------------------------------------
157 # --------------------------- Performing inference ----------------------------------------------------
158 log.info("Loading model to the device")
159 exec_net = ie.load_network(network=net, device_name=args.device)
160 log.info("Creating infer request and starting inference")
161 res = exec_net.infer(inputs=data)
162 # -----------------------------------------------------------------------------------------------------
164 # --------------------------- Read and postprocess output ---------------------------------------------
165 log.info("Processing output blobs")
167 boxes, classes = {}, {}
169 for number, proposal in enumerate(data):
171 imid = np.int(proposal[0])
172 ih, iw = images_hw[imid]
173 label = np.int(proposal[1])
174 confidence = proposal[2]
175 xmin = np.int(iw * proposal[3])
176 ymin = np.int(ih * proposal[4])
177 xmax = np.int(iw * proposal[5])
178 ymax = np.int(ih * proposal[6])
179 print("[{},{}] element, prob = {:.6} ({},{})-({},{}) batch id : {}" \
180 .format(number, label, confidence, xmin, ymin, xmax, ymax, imid), end="")
181 if proposal[2] > 0.5:
182 print(" WILL BE PRINTED!")
183 if not imid in boxes.keys():
185 boxes[imid].append([xmin, ymin, xmax, ymax])
186 if not imid in classes.keys():
188 classes[imid].append(label)
193 tmp_image = cv2.imread(args.input[imid])
194 for box in boxes[imid]:
195 cv2.rectangle(tmp_image, (box[0], box[1]), (box[2], box[3]), (232, 35, 244), 2)
196 cv2.imwrite("out.bmp", tmp_image)
197 log.info("Image out.bmp created!")
198 # -----------------------------------------------------------------------------------------------------
200 log.info("Execution successful\n")
202 "This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool")
205 if __name__ == '__main__':
206 sys.exit(main() or 0)