Publishing 2019 R1 content
[platform/upstream/dldt.git] / tools / utils / building / layer.py
1 """
2 Copyright (C) 2018-2019 Intel Corporation
3
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
7
8       http://www.apache.org/licenses/LICENSE-2.0
9
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
15 """
16 from ..biases import Biases
17 from ..weights import Weights
18
19
20 class Layer:
21     TEMPLATE = (
22         '<layer name="{name}" type="{type}" precision="FP32" id="{id}">'
23             '{data}'
24             '{input}'
25             '{output}'
26             '{weights}'
27             '{biases}'
28         '</layer>')
29
30     def __init__(
31         self, id: int,
32         type: str,
33         name: str,
34         params: dict,
35         input_dims: list,
36         output_dims: list,
37         weights: Weights = None,
38         biases: Biases = None):
39         self._id = id
40         self._type = type
41         self._name = name
42         self._params = params
43         self._input_dims = input_dims
44         self._output_dims = output_dims
45         self._weights = weights
46         self._biases = biases
47
48     @property
49     def id(self) -> str:
50         return self._id
51
52     @property
53     def type(self) -> str:
54         return self._type
55
56     @property
57     def name(self) -> str:
58         return self._name
59
60     @property
61     def params(self) -> dict:
62         return self._params
63
64     @property
65     def input_dims(self) -> list:
66         return self._input_dims
67
68     @property
69     def output_dims(self) -> list:
70         return self._output_dims
71
72     @property
73     def weights(self) -> Weights:
74         return self._weights
75
76     @property
77     def biases(self) -> Biases:
78         return self._biases
79
80     def _output_dims_to_xml(self) -> str:
81         if self._output_dims:
82             if len(self._output_dims) == 2:
83                 output_xml = (
84                     '<output>'
85                         '<port id="1">'
86                             '<dim>{}</dim>'
87                             '<dim>{}</dim>'
88                         '</port>'
89                     '</output>').format(self._output_dims[0], self._output_dims[1])
90             elif len(self._output_dims) == 4:
91                 output_xml = (
92                     '<output>'
93                         '<port id="1">'
94                             '<dim>{}</dim>'
95                             '<dim>{}</dim>'
96                             '<dim>{}</dim>'
97                             '<dim>{}</dim>'
98                         '</port>'
99                     '</output>').format(self._output_dims[0], self._output_dims[1], self._output_dims[2], self._output_dims[3])
100             else:
101                 raise NotImplementedError("{} dimensions for outputs (layer name '{}', type '{}') are not supported".format(
102                     len(self._output_dims),
103                     self._name,
104                     self._type))
105         else:
106             output_xml = None
107         return output_xml
108
109     def _input_dims_to_xml(self) -> str:
110         if self._input_dims:
111             if len(self._input_dims) == 2:
112                 input_xml = (
113                     '<input>'
114                         '<port id="0">'
115                             '<dim>{}</dim>'
116                             '<dim>{}</dim>'
117                         '</port>'
118                     '</input>').format(self._input_dims[0], self._input_dims[1])
119             elif len(self._input_dims) == 4:
120                 input_xml = (
121                     '<input>'
122                         '<port id="0">'
123                             '<dim>{}</dim>'
124                             '<dim>{}</dim>'
125                             '<dim>{}</dim>'
126                             '<dim>{}</dim>'
127                         '</port>'
128                     '</input>').format(self._input_dims[0], self._input_dims[1], self._input_dims[2], self._input_dims[3])
129             else:
130                 raise NotImplementedError("{} dimensions for inputs (layer name '{}', type '{}') are not supported".format(
131                     len(self._input_dims),
132                     self._name,
133                     self._type))
134         else:
135             input_xml = None
136
137         return input_xml
138
139     def __str__(self) -> str:
140         if self._params:
141             data_xml = "<data "
142             for param_key in self._params.keys():
143                 data_xml += '{}="{}" '.format(param_key, self._params[param_key])
144             data_xml += " />"
145         else:
146             data_xml = None
147
148         return self.TEMPLATE.format(
149             name=self._name,
150             type=self._type,
151             id=self._id,
152             data=(data_xml if data_xml else ''),
153             input=(self._input_dims_to_xml() if self._input_dims else ''),
154             output=(self._output_dims_to_xml() if self._output_dims else ''),
155             weights=('<weights offset="{offset}" size="{size}"/>'.format(offset=self._weights.offset, size=self._weights.size) if self._weights else ''),
156             biases=('<biases offset="{offset}" size="{size}"/>'.format(offset=self._biases.offset, size=self._biases.size) if self._biases else '')
157             )