[BUG][ConvertLayout] Fix qnn.conv2d layout conversion too many values to unpack ...
authorlhutton1 <35535092+lhutton1@users.noreply.github.com>
Wed, 16 Sep 2020 08:55:13 +0000 (09:55 +0100)
committerGitHub <noreply@github.com>
Wed, 16 Sep 2020 08:55:13 +0000 (17:55 +0900)
This patch follows a previous bugfix in #6419. I made a very simple oversight for qnn.conv2d in that tinfos also contains qnn parameters. Therefore, we need to extract data_info and weight_info differently.

Change-Id: Ib0ad01f427543371380d0bb604a77b5e0ec1103d

python/tvm/relay/qnn/op/layout_conversions.py
tests/python/relay/test_pass_convert_op_layout.py

index 4105172..a7c90da 100644 (file)
@@ -63,7 +63,8 @@ def convert_qnn_conv2d(attrs, inputs, tinfos, desired_layouts):
         return relay.qnn.op.conv2d(*inputs, **new_attrs)
     if desired_data_layout == "NHWC":
         # Check for depthwise convolution.
-        data_info, weight_info = tinfos
+        data_info = tinfos[0]
+        weight_info = tinfos[1]
         if is_depthwise_conv2d(
             data_info.shape,
             attrs["data_layout"],
index e4771a0..d2a1329 100644 (file)
@@ -749,6 +749,51 @@ def test_qnn_conv_add_convert_layout():
     assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
 
 
+def test_qnn_conv_nhwc_convert_layout():
+    def before():
+        x = relay.var("x", shape=(1, 64, 56, 56), dtype='int8')
+        weight = relay.var('weight', shape=(64, 64, 3, 3), dtype='int8')
+        y = relay.qnn.op.conv2d(x, weight,
+                                relay.const(1, 'int32'),
+                                relay.const(1, 'int32'),
+                                relay.const(1, 'float32'),
+                                relay.const(1, 'float32'),
+                                channels=64,
+                                kernel_size=(3, 3),
+                                padding=(1, 1),
+                                data_layout='NCHW',
+                                kernel_layout='OIHW')
+        y = relay.nn.relu(y)
+        y = relay.Function([x, weight], y)
+        return y
+
+    def expected():
+        x = relay.var("x", shape=(1, 64, 56, 56), dtype='int8')
+        weight = relay.var('weight', shape=(64, 64, 3, 3), dtype='int8')
+        x = relay.layout_transform(x, 'NCHW', 'NHWC')
+        weight = relay.layout_transform(weight, 'OIHW', 'HWIO')
+        y = relay.qnn.op.conv2d(x, weight,
+                                relay.const(1, 'int32'),
+                                relay.const(1, 'int32'),
+                                relay.const(1, 'float32'),
+                                relay.const(1, 'float32'),
+                                channels=64,
+                                kernel_size=(3, 3),
+                                padding=(1, 1),
+                                data_layout="NHWC",
+                                kernel_layout="HWIO")
+        y = relay.nn.relu(y)
+        y = relay.layout_transform(y, 'NHWC', 'NCHW')
+        y = relay.Function(relay.analysis.free_vars(y), y)
+        return y
+
+    a = before()
+    a = run_opt_pass(a, transform.ConvertLayout({'qnn.conv2d': ['NHWC', 'default']}))
+    b = run_opt_pass(expected(), transform.InferType())
+
+    assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
+
+
 def test_conv_convert_kernel_layout():
     """ Check that convolution kernel layout is correctly transformed. """
 
@@ -951,6 +996,7 @@ if __name__ == "__main__":
     test_qnn_conv_requantize_convert_layout()
     test_qnn_conv_concat_convert_layout()
     test_qnn_conv_add_convert_layout()
+    test_qnn_conv_nhwc_convert_layout()
     test_conv_convert_kernel_layout()
     test_conv_transpose_convert_layout()
     test_default_keyword()