[nGraph] Use additional models logic to run ONNX Model Zoo (#2253)
authorTomasz Socha <tomasz.socha@intel.com>
Mon, 21 Sep 2020 11:55:02 +0000 (13:55 +0200)
committerGitHub <noreply@github.com>
Mon, 21 Sep 2020 11:55:02 +0000 (13:55 +0200)
.ci/openvino-onnx/Jenkinsfile
ngraph/python/tests/__init__.py
ngraph/python/tests/conftest.py
ngraph/python/tests/test_onnx/model_zoo_preprocess.sh [new file with mode: 0644]
ngraph/python/tests/test_onnx/test_additional_models.py [deleted file]
ngraph/python/tests/test_onnx/test_zoo_models.py
ngraph/python/tests/test_onnx/utils/model_importer.py
ngraph/python/tests/test_onnx/utils/model_zoo_tester.py [deleted file]
ngraph/python/tox.ini

index 3b6f136..2862759 100644 (file)
@@ -69,7 +69,7 @@ def buildDockerImage() {
 def runTests() {
     sh """
         docker run --rm --name ${DOCKER_CONTAINER_NAME} \
-        --volume ${HOME}/ONNX_CI/onnx_models/.onnx:/root/.onnx ${DOCKER_IMAGE_TAG}
+        --volume ${HOME}/ONNX_CI/models/.onnx:/root/.onnx ${DOCKER_IMAGE_TAG}
     """
 }
 
index 822d99b..408fffd 100644 (file)
@@ -20,11 +20,14 @@ import pytest
 # See `pytest_configure` hook in `conftest.py` for more details.
 BACKEND_NAME = None
 
-# test.ADDITIONAL_MODELS_DIR is a configuration variable providing the path
-# with additional ONNX models to load and test import. It's set during pytest
-# configuration time. See `pytest_configure` hook in `conftest.py` for more
+# test.MODEL_ZOO_DIR is a configuration variable providing the path
+# to the ZOO of ONNX models to test. It's set during pytest configuration time.
+# See `pytest_configure` hook in `conftest.py` for more
 # details.
-ADDITIONAL_MODELS_DIR = None
+MODEL_ZOO_DIR = None
+
+# test.MODEL_ZOO_XFAIL is a configuration variable which enable xfails for model zoo.
+MODEL_ZOO_XFAIL = False
 
 
 def xfail_test(reason="Mark the test as expected to fail", strict=True):
index 905b458..568cae4 100644 (file)
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # ******************************************************************************
+import os
 import pytest
 
 import tests
 
+from pathlib import Path
+
+
+def _get_default_model_zoo_dir():
+    return Path(os.getenv("ONNX_HOME", Path.home() / ".onnx/model_zoo"))
+
 
 def pytest_addoption(parser):
     parser.addoption(
@@ -26,16 +33,23 @@ def pytest_addoption(parser):
         help="Select target device",
     )
     parser.addoption(
-        "--additional_models",
-        default="",
+        "--model_zoo_dir",
+        default=_get_default_model_zoo_dir(),
         type=str,
+        help="location of the model zoo",
+    )
+    parser.addoption(
+        "--model_zoo_xfail",
+        action="store_true",
+        help="treat model zoo known issues as xfails instead of failures",
     )
 
 
 def pytest_configure(config):
     backend_name = config.getvalue("backend")
     tests.BACKEND_NAME = backend_name
-    tests.ADDITIONAL_MODELS_DIR = config.getvalue("additional_models")
+    tests.MODEL_ZOO_DIR = Path(config.getvalue("model_zoo_dir"))
+    tests.MODEL_ZOO_XFAIL = config.getvalue("model_zoo_xfail")
 
     # register additional markers
     config.addinivalue_line("markers", "skip_on_cpu: Skip test on CPU")
@@ -49,7 +63,8 @@ def pytest_configure(config):
 
 def pytest_collection_modifyitems(config, items):
     backend_name = config.getvalue("backend")
-    tests.ADDITIONAL_MODELS_DIR = config.getvalue("additional_models")
+    tests.MODEL_ZOO_DIR = Path(config.getvalue("model_zoo_dir"))
+    tests.MODEL_ZOO_XFAIL = config.getvalue("model_zoo_xfail")
 
     keywords = {
         "CPU": "skip_on_cpu",
diff --git a/ngraph/python/tests/test_onnx/model_zoo_preprocess.sh b/ngraph/python/tests/test_onnx/model_zoo_preprocess.sh
new file mode 100644 (file)
index 0000000..d063f60
--- /dev/null
@@ -0,0 +1,72 @@
+#!/bin/bash
+set -e
+
+MODELS_DIR=false
+
+function print_help {
+       echo "Model preprocessing options:"
+       echo "    -h display this help message"
+       echo "    -c clone ONNX models repository"
+       echo "    -m <DIR> set location of the models"
+       echo "    -f clean target directory(during clone)"
+}
+
+while getopts ":hcfm:" opt; do
+       case ${opt} in
+               h )
+                       print_help
+                       ;;
+               \? )
+                       print_help
+                       ;;
+               : )
+                       print_help
+                       ;;
+               c )
+                       CLONE=true
+                       ;;
+               m )
+                       MODELS_DIR=$OPTARG
+                       ;;
+               f )
+                       CLEAN_DIR=true
+                       ;;
+       esac
+done
+shift $((OPTIND -1))
+
+if [ $MODELS_DIR = false ] ; then
+       echo "Unknown location of the ONNX ZOO models"
+       exit 170
+fi
+
+if [ $CLONE = true ] ; then
+       if [ $CLEAN_DIR = true ] ; then
+               rm -rf $MODELS_DIR
+       fi
+       git clone https://github.com/onnx/models.git $MODELS_DIR
+fi
+
+cd $MODELS_DIR
+# remove already downloaded models
+git clean -f -x -d
+git checkout .
+git pull -p
+# pull models from the lfs repository
+# onnx models are included in the tar.gz archives
+git lfs pull --include="*" --exclude="*.onnx"
+find $MODELS_DIR -name "*.onnx" | while read filename; do rm "$filename"; done;
+echo "extracting tar.gz archives..."
+find $MODELS_DIR -name '*.tar.gz' -execdir sh -c 'BASEDIR=$(basename "{}" .tar.gz) && mkdir -p $BASEDIR' \; -execdir sh -c 'BASEDIR=$(basename "{}" .tar.gz) && tar -xzvf "{}" -C $BASEDIR --strip-components=1' \;
+# fix yolo v4 model
+cd $MODELS_DIR/vision/object_detection_segmentation/yolov4/model/yolov4/yolov4/test_data_set
+mv input0.pb input_0.pb
+mv input1.pb input_1.pb
+mv input2.pb input_2.pb
+mv output0.pb output_0.pb
+mv output1.pb output_1.pb
+mv output2.pb output_2.pb
+# fix roberta model
+cd $MODELS_DIR/text/machine_comprehension/roberta/model/roberta-sequence-classification-9/
+mkdir test_data_set_0
+mv *.pb test_data_set_0/
diff --git a/ngraph/python/tests/test_onnx/test_additional_models.py b/ngraph/python/tests/test_onnx/test_additional_models.py
deleted file mode 100644 (file)
index 3163813..0000000
+++ /dev/null
@@ -1,82 +0,0 @@
-# ******************************************************************************
-# Copyright 2018-2020 Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ******************************************************************************
-
-import tests
-from operator import itemgetter
-from pathlib import Path
-import os
-
-from tests.test_onnx.utils import OpenVinoOnnxBackend
-from tests.test_onnx.utils.model_importer import ModelImportRunner
-
-
-def _get_default_additional_models_dir():
-    onnx_home = os.path.expanduser(os.getenv("ONNX_HOME", os.path.join("~", ".onnx")))
-    return os.path.join(onnx_home, "additional_models")
-
-
-MODELS_ROOT_DIR = tests.ADDITIONAL_MODELS_DIR
-if len(MODELS_ROOT_DIR) == 0:
-    MODELS_ROOT_DIR = _get_default_additional_models_dir()
-
-tolerance_map = {
-    "arcface_lresnet100e_opset8": {"atol": 0.001, "rtol": 0.001},
-    "fp16_inception_v1": {"atol": 0.001, "rtol": 0.001},
-    "mobilenet_opset7": {"atol": 0.001, "rtol": 0.001},
-    "resnet50_v2_opset7": {"atol": 0.001, "rtol": 0.001},
-    "test_mobilenetv2-1.0": {"atol": 0.001, "rtol": 0.001},
-    "test_resnet101v2": {"atol": 0.001, "rtol": 0.001},
-    "test_resnet18v2": {"atol": 0.001, "rtol": 0.001},
-    "test_resnet34v2": {"atol": 0.001, "rtol": 0.001},
-    "test_resnet50v2": {"atol": 0.001, "rtol": 0.001},
-    "mosaic": {"atol": 0.001, "rtol": 0.001},
-    "pointilism": {"atol": 0.001, "rtol": 0.001},
-    "rain_princess": {"atol": 0.001, "rtol": 0.001},
-    "udnie": {"atol": 0.001, "rtol": 0.001},
-    "candy": {"atol": 0.003, "rtol": 0.003},
-}
-
-zoo_models = []
-# rglob doesn't work for symlinks, so models have to be physically somwhere inside "MODELS_ROOT_DIR"
-for path in Path(MODELS_ROOT_DIR).rglob("*.onnx"):
-    mdir, file = os.path.split(str(path))
-    if not file.startswith("."):
-        mdir = str(mdir)
-        if mdir.endswith("/"):
-            mdir = mdir[:-1]
-        model = {"model_name": path, "model_file": file, "dir": mdir}
-        basedir = os.path.basename(mdir)
-        if basedir in tolerance_map:
-            # updated model looks now:
-            # {"model_name": path, "model_file": file, "dir": mdir, "atol": ..., "rtol": ...}
-            model.update(tolerance_map[basedir])
-        zoo_models.append(model)
-
-if len(zoo_models) > 0:
-    sorted(zoo_models, key=itemgetter("model_name"))
-
-    # Set backend device name to be used instead of hardcoded by ONNX BackendTest class ones.
-    OpenVinoOnnxBackend.backend_name = tests.BACKEND_NAME
-
-    # import all test cases at global scope to make them visible to pytest
-    backend_test = ModelImportRunner(OpenVinoOnnxBackend, zoo_models, __name__)
-    test_cases = backend_test.test_cases["OnnxBackendValidationModelImportTest"]
-    del test_cases
-
-    test_cases = backend_test.test_cases["OnnxBackendValidationModelExecutionTest"]
-    del test_cases
-
-    globals().update(backend_test.enable_report().test_cases)
index dbc1f5e..a298e9f 100644 (file)
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # ******************************************************************************
-# ## Prepare a list of models from the ONNX Model Zoo
-#
-# from pathlib import Path
-# from operator import itemgetter
-# import re
-#
-# MODELS_ROOT_DIR = '/path/to/onnx/models'
-# zoo_models = []
-# for path in Path(MODELS_ROOT_DIR).rglob('*.tar.gz'):
-#     match = re.search('.*onnx\/models\/(.*\/model\/(.+)-(\d+)\.tar\.gz)', str(path))
-#     url = match.group(1)
-#     model_name = match.group(2)
-#     opset = match.group(3)
-#     zoo_models.append({'model_name': '{}_opset{}'.format(model_name.replace('-', '_'), opset), 'url': url})
-#
-# sorted(zoo_models, key=itemgetter('model_name'))
-from tests.test_onnx.utils import OpenVinoOnnxBackend
-from tests.test_onnx.utils.model_zoo_tester import ModelZooTestRunner
-from tests import (BACKEND_NAME,
-                   xfail_issue_36533,
-                   xfail_issue_36534,
-                   xfail_issue_35926,
-                   xfail_issue_36535,
-                   xfail_issue_36537,
-                   xfail_issue_36538)
-
-_GITHUB_MODELS_LTS = "https://media.githubusercontent.com/media/onnx/models/master/"
 
-zoo_models = [
-    {
-        "model_name": "FasterRCNN_opset10",
-        "url": _GITHUB_MODELS_LTS
-        + "vision/object_detection_segmentation/faster-rcnn/model/FasterRCNN-10.tar.gz",
-    },
-    {
-        "model_name": "MaskRCNN_opset10",
-        "url": _GITHUB_MODELS_LTS + "vision/object_detection_segmentation/mask-rcnn/model/MaskRCNN-10.tar.gz",
-    },
-    {
-        "model_name": "ResNet101_DUC_opset7",
-        "url": _GITHUB_MODELS_LTS + "vision/object_detection_segmentation/duc/model/ResNet101-DUC-7.tar.gz",
-    },
-    {
-        "model_name": "arcfaceresnet100_opset8",
-        "url": _GITHUB_MODELS_LTS + "vision/body_analysis/arcface/model/arcfaceresnet100-8.tar.gz",
-    },
-    {
-        "model_name": "bertsquad_opset10",
-        "url": _GITHUB_MODELS_LTS + "text/machine_comprehension/bert-squad/model/bertsquad-10.tar.gz",
-    },
-    {
-        "model_name": "bertsquad_opset8",
-        "url": _GITHUB_MODELS_LTS + "text/machine_comprehension/bert-squad/model/bertsquad-8.tar.gz",
-    },
-    {
-        "model_name": "bidaf_opset9",
-        "atol": 1e-07,
-        "rtol": 0.001,
-        "url": _GITHUB_MODELS_LTS
-        + "text/machine_comprehension/bidirectional_attention_flow/model/bidaf-9.tar.gz",
-    },
-    {
-        "model_name": "bvlcalexnet_opset3",
-        "atol": 1e-07,
-        "rtol": 0.001,
-        "url": _GITHUB_MODELS_LTS + "vision/classification/alexnet/model/bvlcalexnet-3.tar.gz",
-    },
-    {
-        "model_name": "bvlcalexnet_opset6",
-        "atol": 1e-07,
-        "rtol": 0.001,
-        "url": _GITHUB_MODELS_LTS + "vision/classification/alexnet/model/bvlcalexnet-6.tar.gz",
-    },
-    {
-        "model_name": "bvlcalexnet_opset7",
-        "atol": 1e-07,
-        "rtol": 0.001,
-        "url": _GITHUB_MODELS_LTS + "vision/classification/alexnet/model/bvlcalexnet-7.tar.gz",
-    },
-    {
-        "model_name": "bvlcalexnet_opset8",
-        "atol": 1e-07,
-        "rtol": 0.001,
-        "url": _GITHUB_MODELS_LTS + "vision/classification/alexnet/model/bvlcalexnet-8.tar.gz",
-    },
-    {
-        "model_name": "bvlcalexnet_opset9",
-        "atol": 1e-07,
-        "rtol": 0.001,
-        "url": _GITHUB_MODELS_LTS + "vision/classification/alexnet/model/bvlcalexnet-9.tar.gz",
-    },
-    {
-        "model_name": "caffenet_opset3",
-        "url": _GITHUB_MODELS_LTS + "vision/classification/caffenet/model/caffenet-3.tar.gz",
-    },
-    {
-        "model_name": "caffenet_opset6",
-        "url": _GITHUB_MODELS_LTS + "vision/classification/caffenet/model/caffenet-6.tar.gz",
-    },
-    {
-        "model_name": "caffenet_opset7",
-        "url": _GITHUB_MODELS_LTS + "vision/classification/caffenet/model/caffenet-7.tar.gz",
-    },
-    {
-        "model_name": "caffenet_opset8",
-        "url": _GITHUB_MODELS_LTS + "vision/classification/caffenet/model/caffenet-8.tar.gz",
-    },
-    {
-        "model_name": "caffenet_opset9",
-        "url": _GITHUB_MODELS_LTS + "vision/classification/caffenet/model/caffenet-9.tar.gz",
-    },
-    {
-        "model_name": "candy_opset8",
-        "url": _GITHUB_MODELS_LTS + "vision/style_transfer/fast_neural_style/model/candy-8.tar.gz",
-    },
-    {
-        "model_name": "candy_opset9",
-        "url": _GITHUB_MODELS_LTS + "vision/style_transfer/fast_neural_style/model/candy-9.tar.gz",
-    },
-    {
-        "model_name": "densenet_opset3",
-        "atol": 1e-07,
-        "rtol": 0.002,
-        "url": _GITHUB_MODELS_LTS + "vision/classification/densenet-121/model/densenet-3.tar.gz",
-    },
-    {
-        "model_name": "densenet_opset6",
-        "atol": 1e-07,
-        "rtol": 0.002,
-        "url": _GITHUB_MODELS_LTS + "vision/classification/densenet-121/model/densenet-6.tar.gz",
-    },
-    {
-        "model_name": "densenet_opset7",
-        "atol": 1e-07,
-        "rtol": 0.002,
-        "url": _GITHUB_MODELS_LTS + "vision/classification/densenet-121/model/densenet-7.tar.gz",
-    },
-    {
-        "model_name": "densenet_opset8",
-        "atol": 1e-07,
-        "rtol": 0.002,
-        "url": _GITHUB_MODELS_LTS + "vision/classification/densenet-121/model/densenet-8.tar.gz",
-    },
-    {
-        "model_name": "densenet_opset9",
-        "atol": 1e-07,
-        "rtol": 0.002,
-        "url": _GITHUB_MODELS_LTS + "vision/classification/densenet-121/model/densenet-9.tar.gz",
-    },
-    {
-        "model_name": "emotion_ferplus_opset2",
-        "url": _GITHUB_MODELS_LTS + "vision/body_analysis/emotion_ferplus/model/emotion-ferplus-2.tar.gz",
-    },
-    {
-        "model_name": "emotion_ferplus_opset7",
-        "url": _GITHUB_MODELS_LTS + "vision/body_analysis/emotion_ferplus/model/emotion-ferplus-7.tar.gz",
-    },
-    {
-        "model_name": "emotion_ferplus_opset8",
-        "url": _GITHUB_MODELS_LTS + "vision/body_analysis/emotion_ferplus/model/emotion-ferplus-8.tar.gz",
-    },
-    {
-        "model_name": "googlenet_opset3",
-        "url": _GITHUB_MODELS_LTS
-        + "vision/classification/inception_and_googlenet/googlenet/model/googlenet-3.tar.gz",
-    },
-    {
-        "model_name": "googlenet_opset6",
-        "url": _GITHUB_MODELS_LTS
-        + "vision/classification/inception_and_googlenet/googlenet/model/googlenet-6.tar.gz",
-    },
-    {
-        "model_name": "googlenet_opset7",
-        "url": _GITHUB_MODELS_LTS
-        + "vision/classification/inception_and_googlenet/googlenet/model/googlenet-7.tar.gz",
-    },
-    {
-        "model_name": "googlenet_opset8",
-        "url": _GITHUB_MODELS_LTS
-        + "vision/classification/inception_and_googlenet/googlenet/model/googlenet-8.tar.gz",
-    },
-    {
-        "model_name": "googlenet_opset9",
-        "url": _GITHUB_MODELS_LTS
-        + "vision/classification/inception_and_googlenet/googlenet/model/googlenet-9.tar.gz",
-    },
-    {
-        "model_name": "gpt2_opset10",
-        "url": _GITHUB_MODELS_LTS + "text/machine_comprehension/gpt-2/model/gpt2-10.tar.gz",
-    },
-    {
-        "model_name": "inception_v1_opset3",
-        "atol": 1e-07,
-        "rtol": 0.001,
-        "url": _GITHUB_MODELS_LTS
-        + "vision/classification/inception_and_googlenet/inception_v1/model/inception-v1-3.tar.gz",
-    },
-    {
-        "model_name": "inception_v1_opset6",
-        "atol": 1e-07,
-        "rtol": 0.001,
-        "url": _GITHUB_MODELS_LTS
-        + "vision/classification/inception_and_googlenet/inception_v1/model/inception-v1-6.tar.gz",
-    },
-    {
-        "model_name": "inception_v1_opset7",
-        "atol": 1e-07,
-        "rtol": 0.001,
-        "url": _GITHUB_MODELS_LTS
-        + "vision/classification/inception_and_googlenet/inception_v1/model/inception-v1-7.tar.gz",
-    },
-    {
-        "model_name": "inception_v1_opset8",
-        "atol": 1e-07,
-        "rtol": 0.001,
-        "url": _GITHUB_MODELS_LTS
-        + "vision/classification/inception_and_googlenet/inception_v1/model/inception-v1-8.tar.gz",
-    },
-    {
-        "model_name": "inception_v1_opset9",
-        "atol": 1e-07,
-        "rtol": 0.001,
-        "url": _GITHUB_MODELS_LTS
-        + "vision/classification/inception_and_googlenet/inception_v1/model/inception-v1-9.tar.gz",
-    },
-    {
-        "model_name": "inception_v2_opset3",
-        "atol": 1e-07,
-        "rtol": 0.001,
-        "url": _GITHUB_MODELS_LTS
-        + "vision/classification/inception_and_googlenet/inception_v2/model/inception-v2-3.tar.gz",
-    },
-    {
-        "model_name": "inception_v2_opset6",
-        "atol": 1e-07,
-        "rtol": 0.001,
-        "url": _GITHUB_MODELS_LTS
-        + "vision/classification/inception_and_googlenet/inception_v2/model/inception-v2-6.tar.gz",
-    },
-    {
-        "model_name": "inception_v2_opset7",
-        "atol": 1e-07,
-        "rtol": 0.001,
-        "url": _GITHUB_MODELS_LTS
-        + "vision/classification/inception_and_googlenet/inception_v2/model/inception-v2-7.tar.gz",
-    },
-    {
-        "model_name": "inception_v2_opset8",
-        "atol": 1e-07,
-        "rtol": 0.001,
-        "url": _GITHUB_MODELS_LTS
-        + "vision/classification/inception_and_googlenet/inception_v2/model/inception-v2-8.tar.gz",
-    },
-    {
-        "model_name": "inception_v2_opset9",
-        "atol": 1e-07,
-        "rtol": 0.001,
-        "url": _GITHUB_MODELS_LTS
-        + "vision/classification/inception_and_googlenet/inception_v2/model/inception-v2-9.tar.gz",
-    },
-    {
-        "model_name": "mnist_opset1",
-        "url": _GITHUB_MODELS_LTS + "vision/classification/mnist/model/mnist-1.tar.gz",
-    },
-    {
-        "model_name": "mnist_opset7",
-        "url": _GITHUB_MODELS_LTS + "vision/classification/mnist/model/mnist-7.tar.gz",
-    },
-    {
-        "model_name": "mnist_opset8",
-        "url": _GITHUB_MODELS_LTS + "vision/classification/mnist/model/mnist-8.tar.gz",
-    },
-    {
-        "model_name": "mobilenetv2_opset7",
-        "atol": 1e-07,
-        "rtol": 0.002,
-        "url": _GITHUB_MODELS_LTS + "vision/classification/mobilenet/model/mobilenetv2-7.tar.gz",
-    },
-    {
-        "model_name": "mosaic_opset8",
-        "url": _GITHUB_MODELS_LTS + "vision/style_transfer/fast_neural_style/model/mosaic-8.tar.gz",
-    },
-    {
-        "model_name": "mosaic_opset9",
-        "url": _GITHUB_MODELS_LTS + "vision/style_transfer/fast_neural_style/model/mosaic-9.tar.gz",
-    },
-    {
-        "model_name": "pointilism_opset8",
-        "url": _GITHUB_MODELS_LTS + "vision/style_transfer/fast_neural_style/model/pointilism-8.tar.gz",
-    },
-    {
-        "model_name": "pointilism_opset9",
-        "url": _GITHUB_MODELS_LTS + "vision/style_transfer/fast_neural_style/model/pointilism-9.tar.gz",
-    },
-    {
-        "model_name": "rain_princess_opset8",
-        "url": _GITHUB_MODELS_LTS + "vision/style_transfer/fast_neural_style/model/rain-princess-8.tar.gz",
-    },
-    {
-        "model_name": "rain_princess_opset9",
-        "url": _GITHUB_MODELS_LTS + "vision/style_transfer/fast_neural_style/model/rain-princess-9.tar.gz",
-    },
-    {
-        "model_name": "rcnn_ilsvrc13_opset3",
-        "url": _GITHUB_MODELS_LTS + "vision/classification/rcnn_ilsvrc13/model/rcnn-ilsvrc13-3.tar.gz",
-    },
-    {
-        "model_name": "rcnn_ilsvrc13_opset6",
-        "url": _GITHUB_MODELS_LTS + "vision/classification/rcnn_ilsvrc13/model/rcnn-ilsvrc13-6.tar.gz",
-    },
-    {
-        "model_name": "rcnn_ilsvrc13_opset7",
-        "url": _GITHUB_MODELS_LTS + "vision/classification/rcnn_ilsvrc13/model/rcnn-ilsvrc13-7.tar.gz",
-    },
-    {
-        "model_name": "rcnn_ilsvrc13_opset8",
-        "url": _GITHUB_MODELS_LTS + "vision/classification/rcnn_ilsvrc13/model/rcnn-ilsvrc13-8.tar.gz",
-    },
-    {
-        "model_name": "rcnn_ilsvrc13_opset9",
-        "url": _GITHUB_MODELS_LTS + "vision/classification/rcnn_ilsvrc13/model/rcnn-ilsvrc13-9.tar.gz",
-    },
-    {
-        "model_name": "resnet101_v1_opset7",
-        "url": _GITHUB_MODELS_LTS + "vision/classification/resnet/model/resnet101-v1-7.tar.gz",
-    },
-    {
-        "model_name": "resnet101_v2_opset7",
-        "url": _GITHUB_MODELS_LTS + "vision/classification/resnet/model/resnet101-v2-7.tar.gz",
-    },
-    {
-        "model_name": "resnet152_v1_opset7",
-        "url": _GITHUB_MODELS_LTS + "vision/classification/resnet/model/resnet152-v1-7.tar.gz",
-    },
-    {
-        "model_name": "resnet152_v2_opset7",
-        "url": _GITHUB_MODELS_LTS + "vision/classification/resnet/model/resnet152-v2-7.tar.gz",
-    },
-    {
-        "model_name": "resnet18_v1_opset7",
-        "url": _GITHUB_MODELS_LTS + "vision/classification/resnet/model/resnet18-v1-7.tar.gz",
-    },
-    {
-        "model_name": "resnet18_v2_opset7",
-        "url": _GITHUB_MODELS_LTS + "vision/classification/resnet/model/resnet18-v2-7.tar.gz",
-    },
-    {
-        "model_name": "resnet34_v1_opset7",
-        "url": _GITHUB_MODELS_LTS + "vision/classification/resnet/model/resnet34-v1-7.tar.gz",
-    },
-    {
-        "model_name": "resnet34_v2_opset7",
-        "url": _GITHUB_MODELS_LTS + "vision/classification/resnet/model/resnet34-v2-7.tar.gz",
-    },
-    {
-        "model_name": "resnet50_caffe2_v1_opset3",
-        "url": _GITHUB_MODELS_LTS + "vision/classification/resnet/model/resnet50-caffe2-v1-3.tar.gz",
-    },
-    {
-        "model_name": "resnet50_caffe2_v1_opset6",
-        "url": _GITHUB_MODELS_LTS + "vision/classification/resnet/model/resnet50-caffe2-v1-6.tar.gz",
-    },
-    {
-        "model_name": "resnet50_caffe2_v1_opset7",
-        "url": _GITHUB_MODELS_LTS + "vision/classification/resnet/model/resnet50-caffe2-v1-7.tar.gz",
-    },
-    {
-        "model_name": "resnet50_caffe2_v1_opset8",
-        "url": _GITHUB_MODELS_LTS + "vision/classification/resnet/model/resnet50-caffe2-v1-8.tar.gz",
-    },
-    {
-        "model_name": "resnet50_caffe2_v1_opset9",
-        "url": _GITHUB_MODELS_LTS + "vision/classification/resnet/model/resnet50-caffe2-v1-9.tar.gz",
-    },
-    {
-        "model_name": "resnet50_v1_opset7",
-        "atol": 1e-07,
-        "rtol": 0.001,
-        "url": _GITHUB_MODELS_LTS + "vision/classification/resnet/model/resnet50-v1-7.tar.gz",
-    },
-    {
-        "model_name": "resnet50_v2_opset7",
-        "atol": 1e-07,
-        "rtol": 0.001,
-        "url": _GITHUB_MODELS_LTS + "vision/classification/resnet/model/resnet50-v2-7.tar.gz",
-    },
-    {
-        "model_name": "shufflenet_opset3",
-        "url": _GITHUB_MODELS_LTS + "vision/classification/shufflenet/model/shufflenet-3.tar.gz",
-    },
-    {
-        "model_name": "shufflenet_opset6",
-        "atol": 1e-07,
-        "rtol": 0.001,
-        "url": _GITHUB_MODELS_LTS + "vision/classification/shufflenet/model/shufflenet-6.tar.gz",
-    },
-    {
-        "model_name": "shufflenet_opset7",
-        "atol": 1e-07,
-        "rtol": 0.001,
-        "url": _GITHUB_MODELS_LTS + "vision/classification/shufflenet/model/shufflenet-7.tar.gz",
-    },
-    {
-        "model_name": "shufflenet_opset8",
-        "atol": 1e-07,
-        "rtol": 0.001,
-        "url": _GITHUB_MODELS_LTS + "vision/classification/shufflenet/model/shufflenet-8.tar.gz",
-    },
-    {
-        "model_name": "shufflenet_opset9",
-        "atol": 1e-07,
-        "rtol": 0.001,
-        "url": _GITHUB_MODELS_LTS + "vision/classification/shufflenet/model/shufflenet-9.tar.gz",
-    },
-    {
-        "model_name": "shufflenet_v2_opset10",
-        "url": _GITHUB_MODELS_LTS + "vision/classification/shufflenet/model/shufflenet-v2-10.tar.gz",
-    },
-    {
-        "model_name": "squeezenet1.0_opset3",
-        "atol": 1e-07,
-        "rtol": 0.001,
-        "url": _GITHUB_MODELS_LTS + "vision/classification/squeezenet/model/squeezenet1.0-3.tar.gz",
-    },
-    {
-        "model_name": "squeezenet1.0_opset6",
-        "atol": 1e-07,
-        "rtol": 0.001,
-        "url": _GITHUB_MODELS_LTS + "vision/classification/squeezenet/model/squeezenet1.0-6.tar.gz",
-    },
-    {
-        "model_name": "squeezenet1.0_opset7",
-        "atol": 1e-07,
-        "rtol": 0.001,
-        "url": _GITHUB_MODELS_LTS + "vision/classification/squeezenet/model/squeezenet1.0-7.tar.gz",
-    },
-    {
-        "model_name": "squeezenet1.0_opset8",
-        "atol": 1e-07,
-        "rtol": 0.001,
-        "url": _GITHUB_MODELS_LTS + "vision/classification/squeezenet/model/squeezenet1.0-8.tar.gz",
-    },
-    {
-        "model_name": "squeezenet1.0_opset9",
-        "atol": 1e-07,
-        "rtol": 0.001,
-        "url": _GITHUB_MODELS_LTS + "vision/classification/squeezenet/model/squeezenet1.0-9.tar.gz",
-    },
-    {
-        "model_name": "squeezenet1.1_opset7",
-        "atol": 1e-07,
-        "rtol": 0.001,
-        "url": _GITHUB_MODELS_LTS + "vision/classification/squeezenet/model/squeezenet1.1-7.tar.gz",
-    },
-    {
-        "model_name": "ssd_opset10",
-        "atol": 1e-07,
-        "rtol": 0.001,
-        "url": _GITHUB_MODELS_LTS + "vision/object_detection_segmentation/ssd/model/ssd-10.tar.gz",
-    },
-    {
-        "model_name": "super_resolution_opset10",
-        "url": _GITHUB_MODELS_LTS
-        + "vision/super_resolution/sub_pixel_cnn_2016/model/super-resolution-10.tar.gz",
-    },
-    {
-        "model_name": "tiny_yolov3_opset11",
-        "url": _GITHUB_MODELS_LTS
-        + "vision/object_detection_segmentation/tiny-yolov3/model/tiny-yolov3-11.tar.gz",
-    },
-    {
-        "model_name": "tinyyolov2_opset1",
-        "url": _GITHUB_MODELS_LTS
-        + "vision/object_detection_segmentation/tiny-yolov2/model/tinyyolov2-1.tar.gz",
-    },
-    {
-        "model_name": "tinyyolov2_opset7",
-        "url": _GITHUB_MODELS_LTS
-        + "vision/object_detection_segmentation/tiny-yolov2/model/tinyyolov2-7.tar.gz",
-    },
-    {
-        "model_name": "tinyyolov2_opset8",
-        "url": _GITHUB_MODELS_LTS
-        + "vision/object_detection_segmentation/tiny-yolov2/model/tinyyolov2-8.tar.gz",
-    },
-    {
-        "model_name": "udnie_opset8",
-        "url": _GITHUB_MODELS_LTS + "vision/style_transfer/fast_neural_style/model/udnie-8.tar.gz",
-    },
-    {
-        "model_name": "udnie_opset9",
-        "url": _GITHUB_MODELS_LTS + "vision/style_transfer/fast_neural_style/model/udnie-9.tar.gz",
-    },
-    {
-        "model_name": "vgg16_bn_opset7",
-        "url": _GITHUB_MODELS_LTS + "vision/classification/vgg/model/vgg16-bn-7.tar.gz",
-    },
-    {
-        "model_name": "vgg16_opset7",
-        "url": _GITHUB_MODELS_LTS + "vision/classification/vgg/model/vgg16-7.tar.gz",
-    },
-    {
-        "model_name": "vgg19_bn_opset7",
-        "url": _GITHUB_MODELS_LTS + "vision/classification/vgg/model/vgg19-bn-7.tar.gz",
-    },
-    {
-        "model_name": "vgg19_caffe2_opset3",
-        "url": _GITHUB_MODELS_LTS + "vision/classification/vgg/model/vgg19-caffe2-3.tar.gz",
-    },
-    {
-        "model_name": "vgg19_caffe2_opset6",
-        "url": _GITHUB_MODELS_LTS + "vision/classification/vgg/model/vgg19-caffe2-6.tar.gz",
-    },
-    {
-        "model_name": "vgg19_caffe2_opset7",
-        "url": _GITHUB_MODELS_LTS + "vision/classification/vgg/model/vgg19-caffe2-7.tar.gz",
-    },
-    {
-        "model_name": "vgg19_caffe2_opset8",
-        "url": _GITHUB_MODELS_LTS + "vision/classification/vgg/model/vgg19-caffe2-8.tar.gz",
-    },
-    {
-        "model_name": "vgg19_caffe2_opset9",
-        "url": _GITHUB_MODELS_LTS + "vision/classification/vgg/model/vgg19-caffe2-9.tar.gz",
-    },
-    {
-        "model_name": "vgg19_opset7",
-        "url": _GITHUB_MODELS_LTS + "vision/classification/vgg/model/vgg19-7.tar.gz",
-    },
-    {
-        "model_name": "yolov3_opset10",
-        "atol": 1e-07,
-        "rtol": 0.001,
-        "url": _GITHUB_MODELS_LTS + "vision/object_detection_segmentation/yolov3/model/yolov3-10.tar.gz",
-    },
-    {
-        "model_name": "zfnet512_opset3",
-        "url": _GITHUB_MODELS_LTS + "vision/classification/zfnet-512/model/zfnet512-3.tar.gz",
-    },
-    {
-        "model_name": "zfnet512_opset6",
-        "url": _GITHUB_MODELS_LTS + "vision/classification/zfnet-512/model/zfnet512-6.tar.gz",
-    },
-    {
-        "model_name": "zfnet512_opset7",
-        "url": _GITHUB_MODELS_LTS + "vision/classification/zfnet-512/model/zfnet512-7.tar.gz",
-    },
-    {
-        "model_name": "zfnet512_opset8",
-        "url": _GITHUB_MODELS_LTS + "vision/classification/zfnet-512/model/zfnet512-8.tar.gz",
-    },
-    {
-        "model_name": "zfnet512_opset9",
-        "url": _GITHUB_MODELS_LTS + "vision/classification/zfnet-512/model/zfnet512-9.tar.gz",
-    },
-]
+import pytest
+import tests
+from operator import itemgetter
+from pathlib import Path
+import os
 
-# Set backend device name to be used instead of hardcoded by ONNX BackendTest class ones.
-OpenVinoOnnxBackend.backend_name = BACKEND_NAME
+from tests.test_onnx.utils import OpenVinoOnnxBackend
+from tests.test_onnx.utils.model_importer import ModelImportRunner
 
-# import all test cases at global scope to make them visible to pytest
-backend_test = ModelZooTestRunner(OpenVinoOnnxBackend, zoo_models, __name__)
-test_cases = backend_test.test_cases["OnnxBackendZooModelTest"]
 
-test_cases_list = [
-    test_cases.test_udnie_opset8_cpu,
-    test_cases.test_udnie_opset8_cpu,
-    test_cases.test_udnie_opset9_cpu,
-    test_cases.test_mosaic_opset8_cpu,
-    test_cases.test_vgg16_opset7_cpu,
-    test_cases.test_pointilism_opset9_cpu,
-    test_cases.test_vgg19_bn_opset7_cpu,
-    test_cases.test_candy_opset9_cpu,
-    test_cases.test_rain_princess_opset8_cpu,
-    test_cases.test_mosaic_opset9_cpu,
-    test_cases.test_pointilism_opset8_cpu,
-    test_cases.test_rain_princess_opset9_cpu,
-    test_cases.test_ssd_opset10_cpu,
-    test_cases.test_resnet152_v2_opset7_cpu,
-    test_cases.test_resnet18_v1_opset7_cpu,
-    test_cases.test_resnet18_v2_opset7_cpu,
-    test_cases.test_resnet34_v2_opset7_cpu,
-    test_cases.test_resnet101_v2_opset7_cpu,
-    test_cases.test_resnet101_v1_opset7_cpu,
-    test_cases.test_ResNet101_DUC_opset7_cpu,
-    test_cases.test_candy_opset8_cpu,
-    test_cases.test_resnet152_v1_opset7_cpu
-]
+MODELS_ROOT_DIR = tests.MODEL_ZOO_DIR
 
-xfail_issue_36534(test_cases.test_FasterRCNN_opset10_cpu)
-xfail_issue_36534(test_cases.test_MaskRCNN_opset10_cpu)
+tolerance_map = {
+    "arcface_lresnet100e_opset8": {"atol": 0.001, "rtol": 0.001},
+    "fp16_inception_v1": {"atol": 0.001, "rtol": 0.001},
+    "mobilenet_opset7": {"atol": 0.001, "rtol": 0.001},
+    "resnet50_v2_opset7": {"atol": 0.001, "rtol": 0.001},
+    "test_mobilenetv2-1.0": {"atol": 0.001, "rtol": 0.001},
+    "test_resnet101v2": {"atol": 0.001, "rtol": 0.001},
+    "test_resnet18v2": {"atol": 0.001, "rtol": 0.001},
+    "test_resnet34v2": {"atol": 0.001, "rtol": 0.001},
+    "test_resnet50v2": {"atol": 0.001, "rtol": 0.001},
+    "mosaic": {"atol": 0.001, "rtol": 0.001},
+    "pointilism": {"atol": 0.001, "rtol": 0.001},
+    "rain_princess": {"atol": 0.001, "rtol": 0.001},
+    "udnie": {"atol": 0.001, "rtol": 0.001},
+    "candy": {"atol": 0.003, "rtol": 0.003},
+    "densenet-3": {"atol": 1e-7, "rtol": 0.0011},
+    "arcfaceresnet100-8": {"atol": 0.001, "rtol": 0.001},
+    "mobilenetv2-7": {"atol": 0.001, "rtol": 0.001},
+    "resnet101-v1-7": {"atol": 0.001, "rtol": 0.001},
+    "resnet101-v2-7": {"atol": 0.001, "rtol": 0.001},
+    "resnet152-v1-7": {"atol": 1e-7, "rtol": 0.003},
+    "resnet152-v2-7": {"atol": 0.001, "rtol": 0.001},
+    "resnet18-v1-7": {"atol": 0.001, "rtol": 0.001},
+    "resnet18-v2-7": {"atol": 0.001, "rtol": 0.001},
+    "resnet34-v2-7": {"atol": 0.001, "rtol": 0.001},
+    "vgg16-7": {"atol": 0.001, "rtol": 0.001},
+    "vgg19-bn-7": {"atol": 0.001, "rtol": 0.001},
+    "tinyyolov2-7": {"atol": 0.001, "rtol": 0.001},
+    "tinyyolov2-8": {"atol": 0.001, "rtol": 0.001},
+    "candy-8": {"atol": 0.001, "rtol": 0.001},
+    "candy-9": {"atol": 0.007, "rtol": 0.001},
+    "mosaic-8": {"atol": 0.003, "rtol": 0.001},
+    "mosaic-9": {"atol": 0.001, "rtol": 0.001},
+    "pointilism-8": {"atol": 0.001, "rtol": 0.001},
+    "pointilism-9": {"atol": 0.001, "rtol": 0.001},
+    "rain-princess-8": {"atol": 0.001, "rtol": 0.001},
+    "rain-princess-9": {"atol": 0.001, "rtol": 0.001},
+    "udnie-8": {"atol": 0.001, "rtol": 0.001},
+    "udnie-9": {"atol": 0.001, "rtol": 0.001},
+}
 
-xfail_issue_35926(test_cases.test_bertsquad_opset8_cpu)
-xfail_issue_35926(test_cases.test_bertsquad_opset10_cpu)
+zoo_models = []
+# rglob doesn't work for symlinks, so models have to be physically somwhere inside "MODELS_ROOT_DIR"
+for path in Path(MODELS_ROOT_DIR).rglob("*.onnx"):
+    mdir = path.parent
+    file_name = path.name
+    if path.is_file() and not file_name.startswith("."):
+        model = {"model_name": path, "model_file": file_name, "dir": mdir}
+        basedir = mdir.stem
+        if basedir in tolerance_map:
+            # updated model looks now:
+            # {"model_name": path, "model_file": file, "dir": mdir, "atol": ..., "rtol": ...}
+            model.update(tolerance_map[basedir])
+        zoo_models.append(model)
 
-xfail_issue_35926(test_cases.test_gpt2_opset10_cpu)
+if len(zoo_models) > 0:
+    sorted(zoo_models, key=itemgetter("model_name"))
 
-xfail_issue_36535(test_cases.test_super_resolution_opset10_cpu)
-xfail_issue_36535(test_cases.test_tinyyolov2_opset7_cpu)
-xfail_issue_36535(test_cases.test_tinyyolov2_opset8_cpu)
+    # Set backend device name to be used instead of hardcoded by ONNX BackendTest class ones.
+    OpenVinoOnnxBackend.backend_name = tests.BACKEND_NAME
 
-xfail_issue_36537(test_cases.test_shufflenet_v2_opset10_cpu)
-xfail_issue_36538(test_cases.test_yolov3_opset10_cpu)
-xfail_issue_36538(test_cases.test_tiny_yolov3_opset11_cpu)
+    # import all test cases at global scope to make them visible to pytest
+    backend_test = ModelImportRunner(OpenVinoOnnxBackend, zoo_models, __name__, MODELS_ROOT_DIR)
+    test_cases = backend_test.test_cases["OnnxBackendModelImportTest"]
+    # flake8: noqa: E501
+    if tests.MODEL_ZOO_XFAIL:
+        import_xfail_list = [
+            "test_onnx_model_zoo_vision_classification_mnist_model_mnist_1_model_cpu",
+            "test_onnx_model_zoo_vision_object_detection_segmentation_tiny_yolov2_model_tinyyolov2_1_model_cpu",
+            "test_onnx_model_zoo_vision_object_detection_segmentation_yolov3_model_yolov3_10_yolov3_cpu",
+            "test_onnx_model_zoo_vision_object_detection_segmentation_ssd_mobilenetv1_model_ssd_mobilenet_v1_10_ssd_mobilenet_v1_cpu",
+            "test_onnx_model_zoo_text_machine_comprehension_bidirectional_attention_flow_model_bidaf_9_bidaf_cpu",
+        ]
+        for test_case in import_xfail_list:
+            pytest.mark.xfail(getattr(test_cases, test_case))
+    del test_cases
 
-for test_case in test_cases_list:
-    xfail_issue_36533(test_case)
+    test_cases = backend_test.test_cases["OnnxBackendModelExecutionTest"]
+    if tests.MODEL_ZOO_XFAIL:
+        execution_xfail_list = [
+            "test_onnx_model_zoo_text_machine_comprehension_gpt_2_model_gpt2_10_GPT2_model_cpu",
+            "test_onnx_model_zoo_text_machine_comprehension_bert_squad_model_bertsquad_10_bertsquad10_cpu",
+            "test_onnx_model_zoo_text_machine_comprehension_roberta_model_roberta_sequence_classification_9_roberta_sequence_classification_9_cpu",
+            "test_onnx_model_zoo_text_machine_comprehension_gpt_2_model_gpt2_lm_head_10_model_cpu",
+            "test_onnx_model_zoo_vision_classification_efficientnet_lite4_model_efficientnet_lite4_11_efficientnet_lite4_cpu",
+            "test_onnx_model_zoo_text_machine_comprehension_bert_squad_model_bertsquad_8_bertsquad8_cpu",
+            "test_onnx_model_zoo_vision_object_detection_segmentation_duc_model_ResNet101_DUC_7_ResNet101_DUC_HDC_cpu",
+            "test_onnx_model_zoo_vision_object_detection_segmentation_retinanet_model_retinanet_9_retinanet_9_cpu",
+            "test_onnx_model_zoo_vision_classification_shufflenet_model_shufflenet_v2_10_test_shufflenetv2_model_cpu",
+            "test_onnx_model_zoo_vision_object_detection_segmentation_yolov4_model_yolov4_yolov4_yolov4_cpu",
+            "test_onnx_model_zoo_text_machine_comprehension_roberta_model_roberta_base_11_roberta_base_11_cpu",
+        ]
+        for test_case in import_xfail_list + execution_xfail_list:
+            pytest.mark.xfail(getattr(test_cases, test_case))
+    del test_cases
 
-del test_cases
-globals().update(backend_test.enable_report().test_cases)
+    globals().update(backend_test.enable_report().test_cases)
index 178515c..5c5ab86 100644 (file)
 # limitations under the License.
 # ******************************************************************************
 
-import glob
 import numpy as np
 import onnx
 import onnx.backend.test
-import os
 import unittest
 
 from collections import defaultdict
@@ -26,6 +24,7 @@ from onnx import numpy_helper, NodeProto, ModelProto
 from onnx.backend.base import Backend, BackendRep
 from onnx.backend.test.case.test_case import TestCase as OnnxTestCase
 from onnx.backend.test.runner import TestItem
+from pathlib import Path
 from tests.test_onnx.utils.onnx_helpers import import_onnx_model
 from typing import Any, Dict, List, Optional, Pattern, Set, Text, Type, Union
 
@@ -34,8 +33,9 @@ class ModelImportRunner(onnx.backend.test.BackendTest):
     def __init__(
         self,
         backend: Type[Backend],
-        models: List[Dict[str, str]],
+        models: List[Dict[str, Path]],
         parent_module: Optional[str] = None,
+        data_root: Optional[Path] = "",
     ) -> None:
         self.backend = backend
         self._parent_module = parent_module
@@ -44,7 +44,12 @@ class ModelImportRunner(onnx.backend.test.BackendTest):
         self._test_items = defaultdict(dict)  # type: Dict[Text, Dict[Text, TestItem]]
 
         for model in models:
-            test_name = "test_{}".format(model["model_name"])
+            test_name = "test{}".format(model["model_name"]) \
+                .replace(str(data_root), "") \
+                .replace(".onnx", "") \
+                .replace("/", "_") \
+                .replace("\\", "_") \
+                .replace("-", "_")
 
             test_case = OnnxTestCase(
                 name=test_name,
@@ -57,18 +62,17 @@ class ModelImportRunner(onnx.backend.test.BackendTest):
                 rtol=model.get("rtol", 0.001),
                 atol=model.get("atol", 1e-07),
             )
-            self._add_model_import_test(test_case, "Validation")
-            self._add_model_execution_test(test_case, "Validation")
+            self._add_model_import_test(test_case)
+            self._add_model_execution_test(test_case)
 
     @staticmethod
-    def _load_onnx_model(model_dir: str, filename: str) -> ModelProto:
+    def _load_onnx_model(model_dir: Path, filename: Path) -> ModelProto:
         if model_dir is None:
             raise unittest.SkipTest("Model directory not provided")
 
-        model_pb_path = os.path.join(model_dir, filename)
-        return onnx.load(model_pb_path)
+        return onnx.load(model_dir / filename)
 
-    def _add_model_import_test(self, model_test: OnnxTestCase, kind: Text) -> None:
+    def _add_model_import_test(self, model_test: OnnxTestCase) -> None:
         # model is loaded at runtime, note sometimes it could even
         # never loaded if the test skipped
         model_marker = [None]  # type: List[Optional[Union[ModelProto, NodeProto]]]
@@ -78,44 +82,52 @@ class ModelImportRunner(onnx.backend.test.BackendTest):
             model_marker[0] = model
             assert import_onnx_model(model)
 
-        self._add_test(kind + "ModelImport", model_test.name, run_import, model_marker)
+        self._add_test("ModelImport", model_test.name, run_import, model_marker)
 
     @classmethod
     def _execute_npz_data(
         cls, model_dir: str, prepared_model: BackendRep, result_rtol: float, result_atol: float,
-    ) -> None:
-        for test_data_npz in glob.glob(os.path.join(model_dir, "test_data_*.npz")):
+    ) -> int:
+        executed_tests = 0
+        for test_data_npz in model_dir.glob("test_data_*.npz"):
             test_data = np.load(test_data_npz, encoding="bytes")
             inputs = list(test_data["inputs"])
             outputs = list(prepared_model.run(inputs))
             ref_outputs = test_data["outputs"]
             cls.assert_similar_outputs(ref_outputs, outputs, result_rtol, result_atol)
+            executed_tests = executed_tests + 1
+        return executed_tests
 
     @classmethod
     def _execute_pb_data(
         cls, model_dir: str, prepared_model: BackendRep, result_rtol: float, result_atol: float,
-    ) -> None:
-        for test_data_dir in glob.glob(os.path.join(model_dir, "test_data_set*")):
+    ) -> int:
+        executed_tests = 0
+        for test_data_dir in model_dir.glob("test_data_set*"):
             inputs = []
-            inputs_num = len(glob.glob(os.path.join(test_data_dir, "input_*.pb")))
+            inputs_num = len(list(test_data_dir.glob("input_*.pb")))
             for i in range(inputs_num):
-                input_file = os.path.join(test_data_dir, "input_{}.pb".format(i))
+                input_file = Path(test_data_dir) / "input_{}.pb".format(i)
                 tensor = onnx.TensorProto()
                 with open(input_file, "rb") as f:
                     tensor.ParseFromString(f.read())
                 inputs.append(numpy_helper.to_array(tensor))
             ref_outputs = []
-            ref_outputs_num = len(glob.glob(os.path.join(test_data_dir, "output_*.pb")))
+            ref_outputs_num = len(list(test_data_dir.glob("output_*.pb")))
             for i in range(ref_outputs_num):
-                output_file = os.path.join(test_data_dir, "output_{}.pb".format(i))
+                output_file = Path(test_data_dir) / "output_{}.pb".format(i)
                 tensor = onnx.TensorProto()
                 with open(output_file, "rb") as f:
                     tensor.ParseFromString(f.read())
                 ref_outputs.append(numpy_helper.to_array(tensor))
+            if(len(inputs) == 0):
+                continue
             outputs = list(prepared_model.run(inputs))
             cls.assert_similar_outputs(ref_outputs, outputs, result_rtol, result_atol)
+            executed_tests = executed_tests + 1
+        return executed_tests
 
-    def _add_model_execution_test(self, model_test: OnnxTestCase, kind: Text) -> None:
+    def _add_model_execution_test(self, model_test: OnnxTestCase) -> None:
         # model is loaded at runtime, note sometimes it could even
         # never loaded if the test skipped
         model_marker = [None]  # type: List[Optional[Union[ModelProto, NodeProto]]]
@@ -125,13 +137,13 @@ class ModelImportRunner(onnx.backend.test.BackendTest):
             model_marker[0] = model
             prepared_model = self.backend.prepare(model, device)
             assert prepared_model is not None
-
-            ModelImportRunner._execute_npz_data(
+            executed_tests = ModelImportRunner._execute_npz_data(
                 model_test.model_dir, prepared_model, model_test.rtol, model_test.atol
             )
 
-            ModelImportRunner._execute_pb_data(
+            executed_tests = executed_tests + ModelImportRunner._execute_pb_data(
                 model_test.model_dir, prepared_model, model_test.rtol, model_test.atol
             )
 
-        self._add_test(kind + "ModelExecution", model_test.name, run_execution, model_marker)
+            assert executed_tests > 0, "This model have no test data"
+        self._add_test("ModelExecution", model_test.name, run_execution, model_marker)
diff --git a/ngraph/python/tests/test_onnx/utils/model_zoo_tester.py b/ngraph/python/tests/test_onnx/utils/model_zoo_tester.py
deleted file mode 100644 (file)
index eb599fd..0000000
+++ /dev/null
@@ -1,128 +0,0 @@
-# ******************************************************************************
-# Copyright 2018-2020 Intel Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ******************************************************************************
-
-import glob
-import os
-import shutil
-import tarfile
-import tempfile
-from collections import defaultdict
-from typing import Dict, List, Optional, Pattern, Set, Text, Type
-
-import onnx.backend.test
-from onnx.backend.base import Backend
-from onnx.backend.test.case.test_case import TestCase as OnnxTestCase
-from onnx.backend.test.runner import TestItem
-from retrying import retry
-from six.moves.urllib.request import urlopen, urlretrieve
-
-
-class ModelZooTestRunner(onnx.backend.test.BackendTest):
-    def __init__(self, backend, zoo_models, parent_module=None):
-        # type: (Type[Backend], List[Dict[str,str]], Optional[str]) -> None
-        self.backend = backend
-        self._parent_module = parent_module
-        self._include_patterns = set()  # type: Set[Pattern[Text]]
-        self._exclude_patterns = set()  # type: Set[Pattern[Text]]
-        self._test_items = defaultdict(dict)  # type: Dict[Text, Dict[Text, TestItem]]
-
-        for zoo_model in zoo_models:
-            test_name = "test_{}".format(zoo_model["model_name"])
-
-            test_case = OnnxTestCase(
-                name=test_name,
-                url=zoo_model["url"],
-                model_name=zoo_model["model_name"],
-                model_dir=None,
-                model=None,
-                data_sets=None,
-                kind="OnnxBackendRealModelTest",
-                rtol=zoo_model.get("rtol", 0.001),
-                atol=zoo_model.get("atol", 1e-07),
-            )
-            self._add_model_test(test_case, "Zoo")
-
-    @staticmethod
-    @retry
-    def _get_etag_for_url(url):  # type: (str) -> str
-        request = urlopen(url)
-        return request.info().get("ETag")
-
-    @staticmethod
-    def _read_etag_file(model_dir):  # type: (str) -> str
-        etag_file_path = os.path.join(model_dir, "source_tar_etag")
-        if os.path.exists(etag_file_path):
-            return open(etag_file_path).read()
-
-    @staticmethod
-    def _write_etag_file(model_dir, etag_value):  # type: (str, str) -> None
-        etag_file_path = os.path.join(model_dir, "source_tar_etag")
-        open(etag_file_path, "w").write(etag_value)
-
-    @classmethod
-    @retry
-    def prepare_model_data(cls, model_test):  # type: (OnnxTestCase) -> Text
-        onnx_home = os.path.expanduser(os.getenv("ONNX_HOME", os.path.join("~", ".onnx")))
-        models_dir = os.getenv("ONNX_MODELS", os.path.join(onnx_home, "models"))
-        model_dir = os.path.join(models_dir, model_test.model_name)  # type: Text
-        current_version_etag = ModelZooTestRunner._get_etag_for_url(model_test.url)
-
-        # If model already exists, check if it's the latest version by verifying cached Etag value
-        if os.path.exists(os.path.join(model_dir, "model.onnx")):
-            if not current_version_etag or current_version_etag == ModelZooTestRunner._read_etag_file(
-                model_dir
-            ):
-                return model_dir
-
-        # Download and extract model and data
-        download_file = tempfile.NamedTemporaryFile(delete=False)
-        temp_clean_dir = tempfile.mkdtemp()
-
-        try:
-            download_file.close()
-            print("\nStart downloading model {} from {}".format(model_test.model_name, model_test.url))
-            urlretrieve(model_test.url, download_file.name)
-            print("Done")
-
-            with tempfile.TemporaryDirectory() as temp_extract_dir:
-                with tarfile.open(download_file.name) as tar_file:
-                    tar_file.extractall(temp_extract_dir)
-
-                # Move model `.onnx` file from temp_extract_dir to temp_clean_dir
-                model_files = glob.glob(temp_extract_dir + "/**/*.onnx", recursive=True)
-                assert len(model_files) > 0, "Model file not found for {}".format(model_test.name)
-                model_file = model_files[0]
-                shutil.move(model_file, temp_clean_dir + "/model.onnx")
-
-                # Move extracted test data sets to temp_clean_dir
-                test_data_sets = glob.glob(temp_extract_dir + "/**/test_data_set_*", recursive=True)
-                test_data_sets.extend(glob.glob(temp_extract_dir + "/**/test_data_*.npz", recursive=True))
-                for test_data_set in test_data_sets:
-                    shutil.move(test_data_set, temp_clean_dir)
-
-                # Save Etag value to Etag file
-                ModelZooTestRunner._write_etag_file(temp_clean_dir, current_version_etag)
-
-                # Move temp_clean_dir to ultimate destination
-                shutil.move(temp_clean_dir, model_dir)
-
-        except Exception as e:
-            print("Failed to prepare data for model {}: {}".format(model_test.model_name, e))
-            os.remove(temp_clean_dir)
-            raise
-        finally:
-            os.remove(download_file.name)
-        return model_dir
index 888dc58..8ccb754 100644 (file)
@@ -27,7 +27,7 @@ commands=
   flake8 {posargs:src/ setup.py}
   flake8 --ignore=D100,D101,D102,D103,D104,D105,D107,W503 tests/  # ignore lack of docs in tests
   mypy --config-file=tox.ini {posargs:src/}
-  pytest --backend={env:NGRAPH_BACKEND} tests -v -n 20 -k 'not _cuda'
+  pytest --backend={env:NGRAPH_BACKEND} tests -v -n 20 -k 'not _cuda' --model_zoo_xfail
 
 [testenv:devenv]
 envdir = devenv