Publishing 2019 R1 content
[platform/upstream/dldt.git] / tools / benchmark / benchmark.py
1 """
2 Copyright (C) 2018-2019 Intel Corporation
3
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
7
8       http://www.apache.org/licenses/LICENSE-2.0
9
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
15 """
16
17 import numpy
18 import datetime
19
20 import openvino.inference_engine as ie
21
22 from ..accuracy_checker.accuracy_checker.config import ConfigReader
23 from ..accuracy_checker.accuracy_checker.model_evaluator import ModelEvaluator
24 from ..accuracy_checker.accuracy_checker.progress_reporters import PrintProgressReporter, TQDMReporter
25
26 from ..network import Network
27
28 from .configuration import Configuration
29 from .logging import info
30
31
32 class BenchmarkCallback:
33     def __init__(self, configuration: Configuration, network: Network=None, iterations_count:int=1000):
34         self._latency = None
35         self._configuration = configuration
36         self._network = network
37         self._iterations_count = iterations_count if iterations_count else 1000
38
39     def output_callback(self, value, latency = None):
40         pass
41
42
43     def benchmark_callback(self, network_inputs_data):
44         latencies = list()
45
46         if self._network:
47             ie_network = self._network.ie_network
48         else:
49             ie_network = ie.IENetwork(self._configuration.model, self._configuration.weights)
50         plugin = ie.IEPlugin(self._configuration.device)
51         if self._configuration.cpu_extension:
52             plugin.add_cpu_extension(self._configuration.cpu_extension)
53         exec_network = plugin.load(ie_network)
54
55         # warming up
56         exec_network.infer(network_inputs_data)
57
58         for i in range(self._iterations_count):
59             start = datetime.datetime.now()
60             exec_network.infer(network_inputs_data)
61             latencies.append((datetime.datetime.now() - start).microseconds)
62         self._latency = numpy.mean(latencies) / 1000000.0
63
64         del ie_network
65         del exec_network
66         del plugin
67
68
69     @property
70     def latency(self) -> float:
71         return self._latency
72
73
74 class BenchmarkResult:
75     def __init__(self, latency):
76         self._latency = latency
77
78     @property
79     def latency(self) -> float:
80         return self._latency
81
82
83 class InferOptions:
84     def __init__(self, iterations_count=1000):
85         self._iterations_count = iterations_count
86
87     @property
88     def iterations_count(self) -> int:
89         return self._iterations_count
90
91
92 class Benchmark:
93     def __init__(self, configuration: Configuration):
94         if configuration is None:
95             raise ValueError("configuration is None")
96
97         self._configuration = configuration
98         pass
99
100     def run(
101         self,
102         network: Network = None,
103         statistics=None,
104         quantization_levels=None,
105         iterations_count:int = 1000) -> BenchmarkResult:
106
107         model = self._configuration.config['models'][0]
108         launcher_config = model['launchers'][0]
109         dataset_config = model['datasets'][0]
110
111         model_evaluator = ModelEvaluator.from_configs(launcher_config, dataset_config)
112         try:
113             if network:
114                 del model_evaluator.launcher.network
115                 del model_evaluator.launcher.exec_network
116                 model_evaluator.launcher.network = network.ie_network
117                 model_evaluator.launcher.exec_network = model_evaluator.launcher.plugin.load(network.ie_network)
118
119             ie_network = model_evaluator.launcher.network
120
121             if statistics:
122                 network_stats = {}
123                 for layer_name, node_statistic in statistics.items():
124                     network_stats[layer_name] = ie.LayerStats(
125                         min=tuple(node_statistic.min_outputs),
126                         max=tuple(node_statistic.max_outputs))
127                 ie_network.stats.update(network_stats)
128
129             if quantization_levels:
130                 for layer_name, value in quantization_levels.items():
131                     params = ie_network.layers[layer_name].params
132                     params["quantization_level"] = value
133                     ie_network.layers[layer_name].params = params
134
135             if model_evaluator.dataset.size != 1:
136                 info("only one first image is used from dataset annotation to perform benchmark")
137                 model_evaluator.dataset.size = 1
138
139             process_dataset_callback = BenchmarkCallback(
140                 configuration=self._configuration,
141                 network=network,
142                 iterations_count=iterations_count)
143
144             model_evaluator.process_dataset(
145                 None,
146                 progress_reporter=None,
147                 output_callback=process_dataset_callback.output_callback,
148                 benchmark=process_dataset_callback.benchmark_callback)
149
150             if len(model_evaluator.launcher.exec_network.requests) != 1:
151                 raise ValueError("unexpected network requests count")
152
153             latency = process_dataset_callback.latency
154         finally:
155             model_evaluator.release()
156
157         return BenchmarkResult(latency)