Publishing 2019 R1 content
[platform/upstream/dldt.git] / tools / calibration / layer_accuracy_drop / collector_by_image.py
1 import openvino.inference_engine as ie
2
3 from ...utils.network_info import NetworkInfo
4 from ...network import Network
5
6 from ..layer_accuracy_drop_info import LayerAccuracyDropInfo
7 from ..logging import debug
8 from ..single_layer_network import SingleLayerNetwork
9 from ..inference_result import InferenceResult
10
11
12 class CollectorByImage:
13     def __init__(self, configuration, plugin, normalizer):
14         self._configuration = configuration
15         self._plugin = plugin
16         self._normalizer = normalizer
17
18     def _create_single_layer_networks(self, stat):
19         '''
20         Method get layers which can be quantized and affect on final accuracy. Separate network is created for each layer.
21         '''
22         network = ie.IENetwork(self._configuration.model, self._configuration.weights)
23         # if self._configuration.batch_size:
24         #     # need to use reshape API
25         #     network.batch_size = self._configuration.batch_size
26
27         try:
28             network_info = NetworkInfo(self._configuration.model)
29
30             # CVS-14302: IE Network INT8 Normalizer: scale factor calculation is incorrect
31             # for layer_name, layer_statistics in stat.items():
32             #     layer_info = network_info.get_layer(layer_name)
33             #     if layer_info.type == 'Convolution' and \
34             #         layer_info.outputs and \
35             #         layer_info.outputs[0].layer.type == 'ReLU' and \
36             #         layer_info.outputs[0].layer.outputs[0] and \
37             #         len(layer_statistics.max_outputs) > len(stat[layer_info.outputs[0].layer.name].max_outputs):
38
39             #         relu_max_outputs = stat[layer_info.outputs[0].layer.name].max_outputs
40             #         relu_min_outputs = stat[layer_info.outputs[0].layer.name].min_outputs
41
42             #         while len(layer_statistics.max_outputs) > len(relu_max_outputs):
43             #             relu_max_outputs.append(relu_max_outputs[-1])
44             #             relu_min_outputs.append(relu_min_outputs[-1])
45
46             single_layer_networks = dict()
47
48             layer_index = 1
49             for layer_to_clone in network.layers.values():
50                 layer_to_clone_info = network_info.get_layer(layer_to_clone.name)
51                 if not self._normalizer.is_quantization_supported(layer_to_clone.type) or \
52                         len(layer_to_clone_info.outputs) != 1 or \
53                         len(layer_to_clone_info.outputs[0].layer.inputs != 1):
54                     continue
55
56                 activation_layer = network.layers[layer_to_clone_info.outputs[0].layer.name] if (len(layer_to_clone_info.outputs) == 1 and self._normalizer.is_quantization_fusing_supported(layer_to_clone_info, layer_to_clone_info.outputs[0].layer)) else None
57                 if activation_layer:
58                     debug("create network #{} for layer {} ({}) -> {} ({})".format(layer_index, layer_to_clone.name, layer_to_clone.type, activation_layer.name, activation_layer.type))
59                 else:
60                     debug("create network #{} for layer {} ({})".format(layer_index, layer_to_clone.name, layer_to_clone.type))
61
62                 layer_network, reference_output_layer_name = self._normalizer.create_network_for_layer(
63                     self._configuration.weights,
64                     layer_to_clone,
65                     layer_to_clone_info,
66                     activation_layer)
67
68                 Network.reshape(layer_network, self._configuration.batch_size)
69
70                 network_stats = {}
71                 # TODO: initialize only neccessary statistic
72                 for layer_name, node_statistic in stat.items():
73                     network_stats[layer_name] = ie.LayerStats(min=tuple(node_statistic.min_outputs), max=tuple(node_statistic.max_outputs))
74                 layer_network.stats.update(network_stats)
75
76                 params = layer_network.layers[layer_to_clone.name].params
77                 params["quantization_level"] = 'I8' if self._configuration.precision == 'INT8' else self._configuration.precision
78                 layer_network.layers[layer_to_clone.name].params = params
79
80                 exec_network = self._plugin.load(network=layer_network, config={ "EXCLUSIVE_ASYNC_REQUESTS": "YES" })
81
82                 if len(layer_network.inputs) != 1:
83                     raise ValueError("created network has several inputs")
84
85                 network_input_layer_name = next(iter(layer_network.inputs.keys()))
86
87                 single_layer_networks[layer_to_clone.name] = SingleLayerNetwork(
88                     network = layer_network,
89                     exec_network = exec_network,
90                     input_layer_name = network_input_layer_name,
91                     layer_name = layer_to_clone.name,
92                     output_layer_name = layer_to_clone.name + "_",
93                     reference_output_layer_name = reference_output_layer_name)
94
95                 layer_index += 1
96
97             return single_layer_networks
98         finally:
99             del network
100
101     def collect(self, statistics: dict(), full_network_results: InferenceResult) -> list:
102         single_layer_networks = self._create_single_layer_networks(statistics)
103
104         accuracy_drop_list_by_layer_name = dict()
105         image_index = 1
106         for full_network_result in full_network_results.result:
107             debug("image {}/{} handling".format(image_index, full_network_results.result.size()))
108
109             for single_layer_network_name, single_layer_network in single_layer_networks.items():
110                 accuracy_drop = self._normalizer.infer_single_layer_network(single_layer_network, full_network_result)
111
112                 if single_layer_network_name not in accuracy_drop_list_by_layer_name:
113                     accuracy_drop_list_by_layer_name[single_layer_network_name] = list()
114
115                 accuracy_drop_list_by_layer_name[single_layer_network_name].append(accuracy_drop)
116             image_index += 1
117
118         accuracy_drop_by_layer = list()
119         for layer_name, accuracy_drop_list in accuracy_drop_list_by_layer_name.items():
120             accuracy_drop_by_layer.append(LayerAccuracyDropInfo(
121                 layer_name=layer_name,
122                 value=LayerAccuracyDropInfo.calculate(accuracy_drop_list)))
123
124         single_layer_network.release()
125         single_layer_networks.clear()
126
127         accuracy_drop_by_layer.sort(key=lambda accuracy_drop: accuracy_drop.value, reverse=True)
128         return accuracy_drop_by_layer