[VTA][HotFix] Relay->VTA quantization fix (#4433)
authorThierry Moreau <moreau@uw.edu>
Wed, 27 Nov 2019 03:21:56 +0000 (19:21 -0800)
committerYizhi Liu <liuyizhi@apache.org>
Wed, 27 Nov 2019 03:21:56 +0000 (19:21 -0800)
* relay -> vta fix

* setting optlevel to 3 for quantization to fold batchnorm

vta/scripts/tune_resnet.py
vta/tutorials/autotvm/tune_relay_vta.py
vta/tutorials/frontend/deploy_vision_on_vta.py

index 00fe1e8..18aee09 100644 (file)
@@ -125,9 +125,11 @@ def compile_network(opt, env, target):
     dtype_dict.update({k: str(v.dtype) for k, v in params.items()})
 
     # Perform quantization in Relay
-    with relay.quantize.qconfig(global_scale=8.0,
-                                skip_conv_layers=[0]):
-        relay_prog = relay.quantize.quantize(mod["main"], params=params)
+    # Note: We set opt_level to 3 in order to fold batch norm
+    with relay.build_config(opt_level=3):
+        with relay.quantize.qconfig(global_scale=8.0,
+                                    skip_conv_layers=[0]):
+            relay_prog = relay.quantize.quantize(mod["main"], params=params)
 
     # Perform graph packing and constant folding for VTA target
     if target.device_name == "vta":
index 97dd742..a9ab6d7 100644 (file)
@@ -89,15 +89,17 @@ def compile_network(env, target, model, start_pack, stop_pack):
     dtype_dict.update({k: str(v.dtype) for k, v in params.items()})
 
     # Perform quantization in Relay
-    with relay.quantize.qconfig(global_scale=8.0,
-                                skip_conv_layers=[0]):
-        relay_prog = relay.quantize.quantize(mod["main"], params=params)
+    # Note: We set opt_level to 3 in order to fold batch norm
+    with relay.build_config(opt_level=3):
+        with relay.quantize.qconfig(global_scale=8.0,
+                                    skip_conv_layers=[0]):
+            mod = relay.quantize.quantize(mod, params=params)
 
     # Perform graph packing and constant folding for VTA target
     if target.device_name == "vta":
         assert env.BLOCK_IN == env.BLOCK_OUT
         relay_prog = graph_pack(
-            relay_prog,
+            mod["main"],
             env.BATCH,
             env.BLOCK_OUT,
             env.WGT_WIDTH,
index a508fc4..a316986 100644 (file)
@@ -168,18 +168,20 @@ with autotvm.tophub.context(target):
 
     if target.device_name == "vta":
         # Perform quantization in Relay
-        with relay.quantize.qconfig(global_scale=8.0,
-                                    skip_conv_layers=[0]):
-            relay_prog = relay.quantize.quantize(mod["main"], params=params)
-        # Perform graph packing and constant folding for VTA target
-        assert env.BLOCK_IN == env.BLOCK_OUT
-        relay_prog = graph_pack(
-            relay_prog,
-            env.BATCH,
-            env.BLOCK_OUT,
-            env.WGT_WIDTH,
-            start_name=pack_dict[model][0],
-            stop_name=pack_dict[model][1])
+        # Note: We set opt_level to 3 in order to fold batch norm
+        with relay.build_config(opt_level=3):
+            with relay.quantize.qconfig(global_scale=8.0,
+                                        skip_conv_layers=[0]):
+                mod = relay.quantize.quantize(mod, params=params)
+            # Perform graph packing and constant folding for VTA target
+            assert env.BLOCK_IN == env.BLOCK_OUT
+            relay_prog = graph_pack(
+                mod["main"],
+                env.BATCH,
+                env.BLOCK_OUT,
+                env.WGT_WIDTH,
+                start_name=pack_dict[model][0],
+                stop_name=pack_dict[model][1])
     else:
         relay_prog = mod["main"]