Fixed onnx test failures when run on a cpu backend (#3764)
authortristan-arm <tristan.oconnor@arm.com>
Mon, 19 Aug 2019 16:33:43 +0000 (17:33 +0100)
committerZhi <5145158+zhiics@users.noreply.github.com>
Mon, 19 Aug 2019 16:33:43 +0000 (09:33 -0700)
* Fixed onnx test failures when run on a cpu backend

* Updated check_torch_conversion function to include output comparison

tests/python/frontend/onnx/test_forward.py

index 87d38e0..6173362 100644 (file)
@@ -1083,8 +1083,11 @@ def check_torch_conversion(model, input_size):
     # Set verbose=True for more output
     torch.onnx.export(model(), dummy_input, file_name, export_params=True, verbose=False)
     onnx_model = onnx.load(file_name)
-    shapes = { '0' : input_size }
-    expr, params = relay.frontend.from_onnx(onnx_model, shape=shapes)
+    for target, ctx in ctx_list():
+        input_data = np.random.uniform(size=input_size).astype('int32')
+        c2_out = get_caffe2_output(onnx_model, input_data)
+        tvm_out = get_tvm_output(onnx_model, input_data, target, ctx)
+        tvm.testing.assert_allclose(c2_out, tvm_out)
 
 def test_resnet():
     check_torch_conversion(torchvision.models.resnet18, (1,3,224,224))