2 Copyright (c) 2018 Intel Corporation
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
8 http://www.apache.org/licenses/LICENSE-2.0
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
24 from google.protobuf import text_format
25 from google.protobuf.internal import api_implementation
27 from mo.front.caffe.proto import caffe_pb2
28 from mo.graph.graph import Node, unique_id
29 from mo.utils.error import Error
30 from mo.utils.utils import refer_to_faq_msg
33 def parse_mean(file_path: str, in_shape: np.ndarray, mean_file_offsets: [tuple, None]):
34 blob = caffe_pb2.BlobProto()
35 with open(file_path, 'rb') as file:
39 raise Error('Mean file "{}" is empty.' + refer_to_faq_msg(5),
43 blob.ParseFromString(data)
44 data = np.array(blob.data)
46 if blob.HasField('channels') or blob.HasField('height') or blob.HasField('width'):
47 data = data.reshape(blob.channels, blob.height, blob.width)
49 data = data.reshape(blob.shape.dim)
50 # crop mean image according to input size
51 if in_shape[2] > data.shape[1] or in_shape[3] > data.shape[2]:
53 'Input image of shape {} is larger than mean image {} from file "{}". ' +
60 if mean_file_offsets is not None and len(mean_file_offsets) == 2:
61 offset_x = mean_file_offsets[0]
62 offset_y = mean_file_offsets[1]
64 offset_x = int((data.shape[1] - in_shape[2]) / 2)
65 offset_y = int((data.shape[2] - in_shape[3]) / 2)
68 for i in range(in_shape[1]):
69 data_channel = np.zeros(in_shape[2] * in_shape[3], dtype=np.float32)
70 for x in range(in_shape[2]):
71 for y in range(in_shape[3]):
72 data_channel[x * in_shape[3] + y] = data[i, x + offset_x, y + offset_y]
73 mean.append(data_channel)
77 except Exception as err:
79 'While processing mean file "{}": {}. Probably mean file has incorrect format. ' +
85 def load_caffe_proto_model(proto_path: str, model_path: [str, None] = None):
86 # 1. python protobuf is used
87 if api_implementation._implementation_type == 'python':
88 message = 'Please expect that Model Optimizer conversion might be slow. ' \
89 'You are currently using Python protobuf library implementation. \n'
91 from google.protobuf.pyext import cpp_message
92 # Check os windows and env variable PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION
93 if os.name == 'nt' and os.environ.get('PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION', default='') != 'cpp':
94 # 2. cpp implementaion is available but not used
95 message += 'However, cpp implementation is available, you can boost ' \
96 'model conversion by setting PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION env variable to cpp. \n' \
97 'Run: set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=cpp \n'
99 # 3. cpp implementaion is not available
100 message += 'However you can use the C++ protobuf implementation that is supplied with the OpenVINO toolkit' \
101 'or build protobuf library from sources. \n' \
102 'Navigate to "install_prerequisites" folder and run: ' \
103 'python -m easy_install protobuf-3.5.1-py($your_python_version)-win-amd64.egg \n' \
104 'set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=cpp'
105 print(message + '\n\n' + refer_to_faq_msg(80))
107 proto = caffe_pb2.NetParameter()
108 with open(proto_path, "r") as file:
109 text_format.Merge(str(file.read()), proto)
111 # Read model layer if exists
114 model = caffe_pb2.NetParameter()
115 with open(model_path, "rb") as infile:
116 map = mmap.mmap(infile.fileno(), 0, access=mmap.ACCESS_READ)
117 model.MergeFromString(map)
122 def get_layers(proto):
125 elif len(proto.layers):
128 raise Error('Invalid proto file: there is neither "layer" nor "layers" top-level messages. ' +
132 def caffe_pb_to_nx(proto, model):
134 Converts proto/model layers to a graph. Edges are restored by bottom/top attributes.
135 Graph nodes has two attributes: pb for prototxt definition and model_pb for caffemodel definition.
140 Protobuf message for NetParameter, representing .prototxt.
142 Protobuf message for NetParameter, representing .caffemodel.
147 built NX Directed graph.
149 graph = nx.MultiDiGraph()
150 # Blobs in prototxt model can be reused by inplace layer.
151 # This requires loading of pb layers in order and tracking the latest
152 # layer that writes a particular blob.
153 blob_producers = {} # maps layer blob name to the layer name and port
154 proto_layers = get_layers(proto)
157 model_layers = get_layers(model)
161 if len(proto.input_dim) > 0 and len(list(proto.input)) > 1:
162 # example of proto input
171 raise Error('Old-style inputs (via "input_dims") are not supported. ' +
172 'Please specify inputs via "input_shape". ' +
174 elif len(list(proto.input)) == 1 and len(list(proto.input_dim)):
175 # example of proto input
181 input_dims = [np.array(list(proto.input_dim), dtype=np.int64)]
182 input_names = [proto.input[0]]
184 elif len(list(proto.input)) == 1 and len(list(proto.input_shape)):
185 # example of proto input
194 input_dims = [np.array(proto.input_shape[0].dim, dtype=np.int64)]
195 input_names = [proto.input[0]]
197 elif len(proto.input_shape) > 0:
198 # example of proto input
213 for i in range(len(proto.input_shape)):
214 input_dims.append(np.array(proto.input_shape[i].dim, dtype=np.int64))
215 input_names.append(proto.input[i])
217 for i in range(len(input_names)):
218 input_name = input_names[i]
219 input_dim = input_dims[i]
220 # Input is defined at the top level of proto instead of distinct Input layer
221 graph.add_node(input_name, pb=None, model_pb=None, type='GlobalInput', name=input_name, shape=input_dim,
223 blob_producers[input_name] = (input_name, 0)
225 for i, layer in enumerate(proto_layers):
230 for ml in model_layers:
231 if ml.name == layer.name:
234 if layer.type == 'Input':
235 if hasattr(layer, 'input_param'):
236 input_param = layer.input_param
238 raise Error('Input layer has no input dims. ' +
240 if hasattr(input_param, 'shape'):
241 # example of proto input
247 # input_param {shape: {dim: 1 dim: 3 dim: 600 dim: 1000}}
255 # input_param {shape: {dim: 1 dim: 3}}
257 dims = map(int, list(filter(None, str(list(input_param.shape)[0]).split('dim:'))))
258 input_dims.append(np.array(list(dims), dtype=np.int64))
259 input_names.append(layer.name)
261 graph.add_node(layer.name, pb=layer, model_pb=model_layer, kind='op')
263 # connect inputs based on blob_producers dictionary
264 for dst_port, bottom in enumerate(layer.bottom):
265 src_layer = blob_producers[bottom][0]
266 src_port = blob_producers[bottom][1]
267 assert (graph.has_node(src_layer))
272 'fw_tensor_debug_info': [(src_layer, bottom)], # debug anchor for a framework tensor name and port
273 'in_attrs': ['in', 'name'],
274 'out_attrs': ['out', 'name'],
275 'data_attrs': ['fw_tensor_debug_info']
277 graph.add_edge(src_layer, layer.name, **edge_attrs)
279 # update blob producers dictionary by output ports
280 for src_port, top in enumerate(layer.top):
281 if top in blob_producers:
282 log.debug("Detected reuse of blob {} by layer {}".format(top, layer.name))
283 blob_producers[top] = (layer.name, src_port)
285 # Find all nodes that do not have consumers.
286 # Add identity ops as a consumers for each output port for such nodes.
287 for node in list(graph.nodes()):
288 node = Node(graph, node)
289 if len(node.out_nodes()) == 0:
290 if not node.has_valid('pb') or not hasattr(node.pb, 'top'):
292 for port, top in enumerate(node.pb.top):
293 new_id = unique_id(graph, 'TerminalIdentity_')
294 graph.add_node(new_id, op='Identity', type='Identity', kind='op')
299 'fw_tensor_debug_info': [(node.id, top)], # debug anchor for a framework tensor name and port
300 'in_attrs': ['in', 'name'],
301 'out_attrs': ['out', 'name'],
302 'data_attrs': ['fw_tensor_debug_info']
304 graph.add_edge(node.id, new_id, **edge_attrs)
306 if len(input_names) <= 0:
307 raise Error('The topology contains no "input" layers. ' +
308 refer_to_faq_msg(79))
309 return graph, {name: shape for (name, shape) in zip(input_names, input_dims)}