Add a section of how to link IE with CMake project (#99)
[platform/upstream/dldt.git] / inference-engine / ie_bridges / python / sample / benchmark_app / benchmark / utils / benchmark_utils.py
1 """
2  Copyright (C) 2018-2019 Intel Corporation
3
4  Licensed under the Apache License, Version 2.0 (the "License");
5  you may not use this file except in compliance with the License.
6  You may obtain a copy of the License at
7
8       http://www.apache.org/licenses/LICENSE-2.0
9
10  Unless required by applicable law or agreed to in writing, software
11  distributed under the License is distributed on an "AS IS" BASIS,
12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  See the License for the specific language governing permissions and
14  limitations under the License.
15 """
16
17 import logging
18 import argparse
19 import os
20 import cv2
21 import numpy as np
22 import sys
23
24 from glob import glob
25 from random import choice
26 from datetime import datetime
27 from fnmatch import fnmatch
28
29 from .constants import *
30
31 logging.basicConfig(format="[ %(levelname)s ] %(message)s", level=logging.INFO, stream=sys.stdout)
32 logger = logging.getLogger('BenchmarkApp')
33
34
35 def validate_args(args):
36     if args.number_iterations is not None and args.number_iterations < 0:
37         raise Exception("Number of iterations should be positive (invalid -niter option value)")
38     if args.number_infer_requests < 0:
39         raise Exception("Number of inference requests should be positive (invalid -nireq option value)")
40     if not fnmatch(args.path_to_model, XML_EXTENSION_PATTERN):
41         raise Exception('Path {} is not xml file.')
42
43
44 def parse_args():
45     parser = argparse.ArgumentParser(add_help=False)
46     args = parser.add_argument_group('Options')
47     args.add_argument('-h', '--help', action='help', default=argparse.SUPPRESS, help=HELP_MESSAGES["HELP"])
48     args.add_argument('-i', '--path_to_images', type=str, required=True, help=HELP_MESSAGES['IMAGE_MESSAGE'])
49     args.add_argument('-m', '--path_to_model', type=str, required=True, help=HELP_MESSAGES['MODEL_MESSAGE'])
50     args.add_argument('-c', '--path_to_cldnn_config', type=str, required=False,
51                       help=HELP_MESSAGES['CUSTOM_GPU_LIBRARY_MESSAGE'])
52     args.add_argument('-l', '--path_to_extension', type=str, required=False, default=None,
53                       help=HELP_MESSAGES['CUSTOM_GPU_LIBRARY_MESSAGE'])
54     args.add_argument('-api', '--api_type', type=str, required=False, default='async', choices=['sync', 'async'],
55                       help=HELP_MESSAGES['API_MESSAGE'])
56     args.add_argument('-d', '--target_device', type=str, required=False, default="CPU",
57                       help=HELP_MESSAGES['TARGET_DEVICE_MESSAGE'])
58     args.add_argument('-niter', '--number_iterations', type=int, required=False, default=None,
59                       help=HELP_MESSAGES['ITERATIONS_COUNT_MESSAGE'])
60     args.add_argument('-nireq', '--number_infer_requests', type=int, required=False, default=2,
61                       help=HELP_MESSAGES['INFER_REQUESTS_COUNT_MESSAGE'])
62     args.add_argument('-nthreads', '--number_threads', type=int, required=False, default=None,
63                       help=HELP_MESSAGES['INFER_NUM_THREADS_MESSAGE'])
64     args.add_argument('-b', '--batch_size', type=int, required=False, default=None,
65                       help=HELP_MESSAGES['BATCH_SIZE_MESSAGE'])
66     args.add_argument('-pin', '--infer_threads_pinning', type=str, required=False, default='YES',
67                       choices=['YES', 'NO'], help=HELP_MESSAGES['INFER_THREADS_PINNING_MESSAGE'])
68     return parser.parse_args()
69
70
71 def get_images(path_to_images, batch_size):
72     images = list()
73     if os.path.isfile(path_to_images):
74         while len(images) != batch_size:
75             images.append(path_to_images)
76     else:
77         path = os.path.join(path_to_images, '*')
78         files = glob(path, recursive=True)
79         for file in files:
80             file_extension = file.rsplit('.').pop().upper()
81             if file_extension in IMAGE_EXTENSIONS:
82                 images.append(file)
83         if len(images) == 0:
84             raise Exception("No images found in {}".format(path_to_images))
85         if len(images) < batch_size:
86             while len(images) != batch_size:
87                 images.append(choice(images))
88     return images
89
90
91 def get_duration_in_secs(target_device):
92     duration = 0
93     for device in DEVICE_DURATION_IN_SECS:
94         if device in target_device:
95             duration = max(duration, DEVICE_DURATION_IN_SECS[device])
96
97     if duration == 0:
98         duration = DEVICE_DURATION_IN_SECS[UNKNOWN_DEVICE_TYPE]
99         logger.warn("Default duration {} seconds for unknown device {} is used".format(duration, target_device))
100
101     return duration
102
103
104 def fill_blob_with_image(images_path, shape):
105     images = np.ndarray(shape)
106     for item in range(shape[0]):
107         image = cv2.imread(images_path[item])
108
109         new_im_size = tuple(shape[2:])
110         if image.shape[:-1] != new_im_size:
111             logger.warn("Image {} is resize from ({}) to ({})".format(images_path[item], image.shape[:-1], new_im_size))
112             image = cv2.resize(image, new_im_size)
113
114         image = image.transpose((2, 0, 1))
115         images[item] = image
116     return images
117
118
119 def sync_infer_request(exe_network, times, images):
120     iteration_start_time = datetime.now()
121     exe_network.infer(images)
122     current_time = datetime.now()
123     times.append((current_time - iteration_start_time).total_seconds())
124     return current_time