[CoreML] Solve CoreML frontend issue of image scaler and padding so that Mobilenet...
authorZhao Wu <wuzhaozju@gmail.com>
Mon, 19 Aug 2019 06:05:41 +0000 (14:05 +0800)
committerTianqi Chen <tqchen@users.noreply.github.com>
Mon, 19 Aug 2019 06:05:41 +0000 (14:05 +0800)
python/tvm/relay/frontend/common.py
python/tvm/relay/frontend/coreml.py
tests/python/frontend/coreml/test_forward.py
tutorials/frontend/from_coreml.py

index 3dab51c..4da3a52 100644 (file)
@@ -273,9 +273,15 @@ class ExprTable(object):
     def get_expr(self, name):
         return self.exprs[name]
 
-    def set_expr(self, name, expr):
+    def set_expr(self, name, expr, force_override=False):
         assert isinstance(expr, _expr.Expr)
-        if name not in self.exprs:
+        # if name exists, we should override the value
+        # otherwise, we can not get like x = func(x) work.
+        # One example is CoreML preprocess, which will override
+        # the same name of input.
+        # However, according to git log, Find keras frontend depends
+        # on this property, so we add one force_override to control it.
+        if name not in self.exprs or force_override:
             self.exprs[name] = expr
 
     def has_expr(self, name):
index 2f2b756..ed7e45b 100644 (file)
@@ -17,6 +17,7 @@
 # pylint: disable=invalid-name, import-self, unused-argument, unused-variable, inconsistent-return-statements
 """CoreML frontend."""
 from __future__ import absolute_import as _abs
+import math
 import numpy as np
 import tvm
 from .. import analysis
@@ -26,11 +27,13 @@ from .. import op as _op
 from ... import nd as _nd
 from ..._ffi import base as _base
 from .common import ExprTable
+from .common import infer_shape as _infer_shape
 
 __all__ = ['from_coreml']
 
 
 def _NeuralNetworkImageScaler(op, inexpr, etab):
+    # TODO: we need to support more colorspace, such as rgb.
     # this changes the symbol
     biases = np.array([op.blueBias, op.greenBias, op.redBias]).reshape([3, 1, 1])
     bias = etab.new_const(biases)
@@ -47,11 +50,16 @@ def _NeuralNetworkMeanImage(op, inexpr, etab):
 
 def _ConvolutionLayerParams(op, inexpr, etab):
     """Convolution layer params."""
-    weights = etab.new_const(np.array(list(op.weights.floatValue)).reshape(
-        tuple([op.outputChannels, op.kernelChannels] + list(op.kernelSize))))
+    if op.isDeconvolution:
+        weights = etab.new_const(np.array(list(op.weights.floatValue)).reshape(
+            tuple([op.kernelChannels, op.outputChannels] + list(op.kernelSize))))
+    else:
+        weights = etab.new_const(np.array(list(op.weights.floatValue)).reshape(
+            tuple([op.outputChannels, op.kernelChannels] + list(op.kernelSize))))
     dilation = list(op.dilationFactor)
     if not dilation:
         dilation = [1, 1]
+    N, C, H, W = _infer_shape(inexpr)
     params = {'channels':op.outputChannels,
               'kernel_size':list(op.kernelSize),
               'strides':list(op.stride),
@@ -60,30 +68,31 @@ def _ConvolutionLayerParams(op, inexpr, etab):
 
     if op.WhichOneof('ConvolutionPaddingType') == 'valid':
         valid = op.valid
-        padding = [b.startEdgeSize for b in valid.paddingAmounts.borderAmounts]
-        padding2 = [b.endEdgeSize for b in valid.paddingAmounts.borderAmounts]
-        for i, j in zip(padding, padding2):
-            assert i == j, "Asymmetry padding not supported"
-        if padding:
-            params['padding'] = padding
+        if valid.paddingAmounts.borderAmounts:
+            assert len(valid.paddingAmounts.borderAmounts) == 2
+            pad_t = valid.paddingAmounts.borderAmounts[0].startEdgeSize
+            pad_l = valid.paddingAmounts.borderAmounts[1].startEdgeSize
+            pad_b = valid.paddingAmounts.borderAmounts[0].endEdgeSize
+            pad_r = valid.paddingAmounts.borderAmounts[1].endEdgeSize
+            inexpr = _op.nn.pad(data=inexpr, pad_width=((0, 0),
+                                                        (0, 0),
+                                                        (pad_t, pad_b),
+                                                        (pad_l, pad_r)))
     elif op.WhichOneof('ConvolutionPaddingType') == 'same':
+        assert op.same.asymmetryMode == 0, "Only support BOTTOM_RIGHT_HEAVY mode, " \
+                                           "which is used by tf/caffe and so on"
         kernel = params['kernel_size']
-        pad_h = kernel[0] - 1
-        pad_w = kernel[1] - 1
-        pad_t = pad_h // 2
-        pad_l = pad_w // 2
-        pad_b = pad_h - pad_t
-        pad_r = pad_w - pad_l
-        assert pad_t == pad_r and pad_l == pad_b, "Asymmetry padding not supported"
-        params['padding'] = [pad_t, pad_l]
+        strides = params['strides']
+        pad_t, pad_b = get_pad_value(H, kernel[0], strides[0])
+        pad_l, pad_r = get_pad_value(W, kernel[1], strides[1])
+        inexpr = _op.nn.pad(data=inexpr, pad_width=((0, 0),
+                                                    (0, 0),
+                                                    (pad_t, pad_b),
+                                                    (pad_l, pad_r)))
+
     else:
         raise NotImplementedError("Valid/Same convolution padding implemented")
 
-    # consume padding layer
-    if etab.in_padding:
-        params['padding'] = [sum(x) for x in zip(params.get('padding', [0, 0]), etab.paddings)]
-        etab.clear_padding()
-
     if op.isDeconvolution:
         ret = _op.nn.conv2d_transpose(data=inexpr, weight=weights, **params)
     else:
@@ -193,11 +202,13 @@ def _PoolingLayerParams(op, inexpr, etab):
 
         if op.WhichOneof('PoolingPaddingType') == 'valid':
             valid = op.valid
-            padding = [b.startEdgeSize for b in valid.paddingAmounts.borderAmounts]
-            padding2 = [b.endEdgeSize for b in valid.paddingAmounts.borderAmounts]
-            for i, j in zip(padding, padding2):
-                assert i == j
-            params['padding'] = padding
+            if valid.paddingAmounts.borderAmounts:
+                assert len(valid.paddingAmounts.borderAmounts) == 2
+                pad_t = valid.paddingAmounts.borderAmounts[0].startEdgeSize
+                pad_l = valid.paddingAmounts.borderAmounts[1].startEdgeSize
+                pad_b = valid.paddingAmounts.borderAmounts[0].endEdgeSize
+                pad_r = valid.paddingAmounts.borderAmounts[1].endEdgeSize
+                params['padding'] = [pad_t, pad_l, pad_b, pad_r]
         elif op.WhichOneof('PoolingPaddingType') == 'includeLastPixel':
             # I don't know if this is correct
             valid = op.includeLastPixel
@@ -209,12 +220,6 @@ def _PoolingLayerParams(op, inexpr, etab):
             op_name = op.WhichOneof('PoolingPaddingType')
             raise tvm.error.OpAttributeUnImplemented(msg.format(op_name))
 
-        # consume padding layer
-        if etab.in_padding:
-            params['padding'] = [sum(x) for x in zip(
-                params.get('padding', [0, 0]), etab.paddings)]
-            etab.clear_padding()
-
         if op.type == 0:
             return _op.nn.max_pool2d(inexpr, **params)
         if op.type == 1:
@@ -276,21 +281,24 @@ def _FlattenLayerParams(op, inexpr, etab):
 
 
 def _PaddingLayerParams(op, inexpr, etab):
-    """Hacking for padding layer params."""
+    """Padding layer params."""
     if op.WhichOneof('PaddingType') == 'constant':
         constant = op.constant
         if constant.value != 0:
             raise tvm.error.OpAttributeUnImplemented(
                 '{} is not supported in operator Padding.'.format(constant.value))
-        padding = [b.startEdgeSize for b in op.paddingAmounts.borderAmounts]
-        padding2 = [b.endEdgeSize for b in op.paddingAmounts.borderAmounts]
-        for i, j in zip(padding, padding2):
-            assert i == j
-        etab.set_padding(padding)
+        pad_t = op.paddingAmounts.borderAmounts[0].startEdgeSize
+        pad_l = op.paddingAmounts.borderAmounts[1].startEdgeSize
+        pad_b = op.paddingAmounts.borderAmounts[0].endEdgeSize
+        pad_r = op.paddingAmounts.borderAmounts[1].endEdgeSize
+        return _op.nn.pad(data=inexpr, pad_width=((0, 0),
+                                                  (0, 0),
+                                                  (pad_t, pad_b),
+                                                  (pad_l, pad_r)))
+
     else:
         raise tvm.error.OpNotImplemented(
             'Non-constant padding is not supported in frontend CoreML.')
-    return inexpr
 
 
 def _PermuteLayerParams(op, inexpr, etab):
@@ -372,6 +380,32 @@ _convert_map = {
     'MinLayerParams': _MinLayerParams,
 }
 
+# SAME padding: https://www.tensorflow.org/api_guides/python/nn
+def get_pad_value(data, kernel, stride):
+    """Get the pad tuple of value for SAME padding
+
+    Parameters
+    ----------
+    data:
+        1D input data
+
+    kernel:
+        1D input kernel
+
+    stride:
+        1D input stride
+
+    Returns
+    -------
+        pad tuple of value
+    """
+
+    out = int(math.ceil(float(data) / float(stride)))
+    pad = max(0, (out - 1) * stride + kernel - data)
+    pad_before = pad // 2
+    pad_after = pad - pad_before
+    return pad_before, pad_after
+
 
 def coreml_op_to_relay(op, inname, outname, etab):
     """Convert coreml layer to a Relay expression and update the expression table.
@@ -399,9 +433,7 @@ def coreml_op_to_relay(op, inname, outname, etab):
         insym = [etab.get_expr(i) for i in inname]
     ret = _convert_map[classname](op, insym, etab)
     if outname:
-        etab.set_expr(outname, ret)
-    if classname != 'PaddingLayerParams':
-        assert not etab.in_padding, "Previous padding not consumed by conv/pool"
+        etab.set_expr(outname, ret, force_override=True)
 
 
 def from_coreml(model, shape=None):
@@ -442,10 +474,19 @@ def from_coreml(model, shape=None):
     for pp in cc.preprocessing:
         whichpp = pp.WhichOneof('preprocessor')
         ppmethod = getattr(pp, whichpp)
-        # the NeuralNetworkImageScalar doesn't seem to have a featureName?
         if whichpp == 'scaler':
+            # Be careful we maybe only preprocess one input when we have multi inputs
+            # which is stored in pp.featureName. See unit testing verify_image_scaler
+            # in test_forward.py for CoreML.
             for i in spec.description.input:
-                coreml_op_to_relay(ppmethod, i.name, i.name, etab)
+                # we have multi inputs
+                if len(spec.description.input) > 1:
+                    assert pp.featureName != ''
+                    if i.name == pp.featureName:
+                        coreml_op_to_relay(ppmethod, i.name, i.name, etab)
+                else:
+                    assert pp.featureName == ''
+                    coreml_op_to_relay(ppmethod, i.name, i.name, etab)
         else:
             coreml_op_to_relay(ppmethod, pp.featureName, pp.featureName, etab)
 
index 59d4dd6..a023414 100644 (file)
@@ -25,6 +25,7 @@ import topi
 import topi.testing
 from tvm import relay
 from tvm.relay.testing.config import ctx_list
+from topi.testing import conv2d_nchw_python
 
 import coremltools as cm
 import model_zoo
@@ -95,7 +96,10 @@ def run_tvm_graph(coreml_model, target, ctx, input_data, input_name, output_shap
             tvm_output_list.append(tvm_output.asnumpy())
         return tvm_output_list
     else:
-        tvm_output = m.get_output(0, tvm.nd.empty((output_shape), output_dtype))
+        if not output_shape:
+            tvm_output = m.get_output(0)
+        else:
+            tvm_output = m.get_output(0, tvm.nd.empty((output_shape), output_dtype))
         return tvm_output.asnumpy()
 
 def verify_AddLayerParams(input_dim, alpha=2):
@@ -330,6 +334,72 @@ def test_forward_min():
     verify_min((1, 3, 20, 20))
     verify_min((20, 20))
 
+def verify_image_scaler(input_dim, blue_bias=0.0, green_bias=0.0, red_bias=0.0, image_scale=1.0):
+    dtype = 'float32'
+    a_np = np.random.uniform(size=input_dim).astype(dtype)
+    # make sure it is valid image format CHW.
+    assert len(a_np.shape) == 3 and a_np.shape[0] == 3
+    b_np = np.zeros(a_np.shape, dtype=dtype)
+    b_np[0, :, :] = image_scale * a_np[0, :, :] + blue_bias
+    b_np[1, :, :] = image_scale * a_np[1, :, :] + green_bias
+    b_np[2, :, :] = image_scale * a_np[2, :, :] + red_bias
+    b_np = np.add(a_np, b_np)
+    inputs = [('input1', datatypes.Array(*input_dim)),
+              ('input2', datatypes.Array(*input_dim))]
+    output = [('output', datatypes.Array(*b_np.shape))]
+    builder = NeuralNetworkBuilder(inputs, output)
+    builder.set_pre_processing_parameters(image_input_names=['input1'],
+                                          is_bgr=True,
+                                          blue_bias=blue_bias,
+                                          green_bias=green_bias,
+                                          red_bias=red_bias,
+                                          image_scale=image_scale)
+    # add one add layer to make CoreML model format valid
+    # add layer has been tested before.
+    builder.add_elementwise(name='add', input_names=['input1', 'input2'],
+                            output_name='output', alpha=0, mode='ADD')
+    model = cm.models.MLModel(builder.spec)
+    for target, ctx in ctx_list():
+        out = run_tvm_graph(model, target, ctx, [a_np, a_np],
+                            ['input1', 'input2'], b_np.shape, dtype)
+        tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
+
+def test_forward_image_scaler():
+    verify_image_scaler((3, 224, 224), image_scale=0.17)
+    verify_image_scaler((3, 224, 224),
+                        blue_bias=-1.7669800519943237,
+                        green_bias=-1.985260009765625,
+                        red_bias=-2.102560043334961,
+                        image_scale=0.379)
+
+def verify_convolution(input_dim, filter, padding):
+    dtype = 'float32'
+    N, C, H, W = input_dim
+    OC, _, KH, KW = filter
+    a_np = np.random.uniform(size=input_dim).astype(dtype)
+    w_np = np.random.uniform(size=(OC, C, KH, KW)).astype(dtype)
+    w_np_cm = np.transpose(w_np, axes=(2, 3, 1, 0))
+    b_np = conv2d_nchw_python(a_np, w_np, [1, 1], padding)
+    inputs = [('input1', datatypes.Array(C, H, W))]
+    output = [('output', datatypes.Array(*b_np.shape))]
+    builder = NeuralNetworkBuilder(inputs, output)
+    builder.add_convolution(name='conv', kernel_channels=3, output_channels=OC,
+                            height=KH, width=KW, stride_height=1, stride_width=1,
+                            border_mode=padding.lower(), groups=1,
+                            W=w_np_cm, b=None, has_bias=False,
+                            is_deconv=False,
+                            input_name='input1',
+                            output_name='output')
+    model = cm.models.MLModel(builder.spec)
+    for target, ctx in ctx_list():
+        out = run_tvm_graph(model, target, ctx, [a_np],
+                            ['input1'], output_shape=None)
+        tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
+
+def test_forward_convolution():
+    verify_convolution((1, 3, 224, 224), filter=(32, 3, 3, 3), padding='VALID')
+    verify_convolution((1, 3, 224, 224), filter=(32, 3, 3, 3), padding='SAME')
+
 if __name__ == '__main__':
     test_forward_AddLayerParams()
     test_forward_ConcatLayerParams()
@@ -342,3 +412,5 @@ if __name__ == '__main__':
     test_forward_min()
     test_mobilenet_checkonly()
     test_resnet50_checkonly()
+    test_forward_image_scaler()
+    test_forward_convolution()
index 1bb7cbd..2f70353 100644 (file)
@@ -18,7 +18,8 @@
 Compile CoreML Models
 =====================
 **Author**: `Joshua Z. Zhang <https://zhreshold.github.io/>`_, \
-            `Kazutaka Morita <https://github.com/kazum>`_
+            `Kazutaka Morita <https://github.com/kazum>`_, \
+            `Zhao Wu <https://github.com/FrozenGene>`_
 
 This article is an introductory tutorial to deploy CoreML models with Relay.
 
@@ -58,13 +59,15 @@ mlmodel = cm.models.MLModel(model_path)
 img_url = 'https://github.com/dmlc/mxnet.js/blob/master/data/cat.png?raw=true'
 img_path = download_testdata(img_url, 'cat.png', module='data')
 img = Image.open(img_path).resize((224, 224))
-x = np.transpose(img, (2, 0, 1))[np.newaxis, :]
+# Mobilenet.mlmodel's input is BGR format
+img_bgr = np.array(img)[:,:,::-1]
+x = np.transpose(img_bgr, (2, 0, 1))[np.newaxis, :]
 
 ######################################################################
 # Compile the model on Relay
 # ---------------------------
 # We should be familiar with the process right now.
-target = 'cuda'
+target = 'llvm'
 shape_dict = {'image': x.shape}
 
 # Parse CoreML model and convert into Relay computation graph
@@ -80,7 +83,7 @@ with relay.build_config(opt_level=3):
 # -------------------
 # The process is no different from other example
 from tvm.contrib import graph_runtime
-ctx = tvm.gpu(0)
+ctx = tvm.cpu(0)
 dtype = 'float32'
 m = graph_runtime.create(graph, lib, ctx)
 # set inputs
@@ -104,4 +107,5 @@ synset_name = 'imagenet1000_clsid_to_human.txt'
 synset_path = download_testdata(synset_url, synset_name, module='data')
 with open(synset_path) as f:
     synset = eval(f.read())
+# You should see the following result: Top-1 id 282 class name tiger cat
 print('Top-1 id', top1, 'class name', synset[top1])