From 0887a7c0d6555cfa2157b78c699eb336182790a9 Mon Sep 17 00:00:00 2001 From: Nadezhda Ageeva Date: Thu, 16 Jul 2020 13:53:43 +0300 Subject: [PATCH] Allow python benchmark_app load onnx model (#1283) --- .../src/openvino/inference_engine/ie_api.pyx | 8 ++++---- .../ie_bridges/python/tests/conftest.py | 4 ++++ .../ie_bridges/python/tests/test_IECore.py | 20 +++++++++++++++++-- inference-engine/samples/benchmark_app/README.md | 2 +- .../samples/benchmark_app/benchmark_app.hpp | 2 +- inference-engine/samples/benchmark_app/main.cpp | 2 +- inference-engine/tools/benchmark_tool/README.md | 4 +++- tools/benchmark/benchmark.py | 17 +++++----------- tools/benchmark/main.py | 2 +- tools/benchmark/parameters.py | 23 ++++++++-------------- tools/benchmark/utils/constants.py | 14 ------------- tools/benchmark/utils/utils.py | 17 ++-------------- 12 files changed, 48 insertions(+), 67 deletions(-) diff --git a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pyx b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pyx index 9fa91a0..fdfb1a8 100644 --- a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pyx +++ b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pyx @@ -245,7 +245,7 @@ cdef class IECore: return versions ## Reads a network from the Intermediate Representation (IR) and creates an `IENetwork`. - # @param model: A `.xml` file of the IR or string with IR. + # @param model: A `.xml`, `.onnx`or `.prototxt` model file or string with IR. # @param weights: A `.bin` file of the IR. Depending on `init_from_buffer` value, can be a string path or # bytes with file content. # @param init_from_buffer: Defines the way of how `model` and `weights` attributes are interpreted. @@ -274,10 +274,10 @@ cdef class IECore: free(xml_buffer) else: weights_ = "".encode() - if isinstance(model, Path) and isinstance(weights, Path): + if isinstance(model, Path) and (isinstance(weights, Path) or not weights): if not model.is_file(): raise Exception("Path to the model {} doesn't exist or it's a directory".format(model)) - if model.suffix != ".onnx": + if model.suffix not in [ ".onnx", ".prototxt"]: if not weights.is_file(): raise Exception("Path to the weights {} doesn't exist or it's a directory".format(weights)) weights_ = bytes(weights) @@ -285,7 +285,7 @@ cdef class IECore: else: if not os.path.isfile(model): raise Exception("Path to the model {} doesn't exist or it's a directory".format(model)) - if not fnmatch(model, "*.onnx"): + if not (fnmatch(model, "*.onnx") or fnmatch(model, "*.prototxt")): if not os.path.isfile(weights): raise Exception("Path to the weights {} doesn't exist or it's a directory".format(weights)) weights_ = weights.encode() diff --git a/inference-engine/ie_bridges/python/tests/conftest.py b/inference-engine/ie_bridges/python/tests/conftest.py index ae86c64..3bb64d7 100644 --- a/inference-engine/ie_bridges/python/tests/conftest.py +++ b/inference-engine/ie_bridges/python/tests/conftest.py @@ -18,6 +18,10 @@ def model_onnx_path(): test_onnx = os.path.join(path_to_repo, "models", "test_model", 'test_model.onnx') return test_onnx +def model_prototxt_path(): + path_to_repo = os.environ["MODELS_PATH"] + test_prototxt = os.path.join(path_to_repo, "models", "test_model", 'test_model.prototxt') + return test_prototxt def image_path(): path_to_repo = os.environ["DATA_PATH"] diff --git a/inference-engine/ie_bridges/python/tests/test_IECore.py b/inference-engine/ie_bridges/python/tests/test_IECore.py index 456710b..577d647 100644 --- a/inference-engine/ie_bridges/python/tests/test_IECore.py +++ b/inference-engine/ie_bridges/python/tests/test_IECore.py @@ -5,10 +5,12 @@ import numpy as np from pathlib import Path from openvino.inference_engine import IENetwork, IECore, ExecutableNetwork -from conftest import model_path, plugins_path, model_onnx_path +from conftest import model_path, plugins_path, model_onnx_path, model_prototxt_path test_net_xml, test_net_bin = model_path() +test_net_onnx = model_onnx_path() +test_net_prototxt = model_prototxt_path() plugins_xml, plugins_win_xml, plugins_osx_xml = plugins_path() @@ -159,9 +161,23 @@ def test_read_network_as_path(): def test_read_network_from_onnx(): ie = IECore() - net = ie.read_network(model=model_onnx_path()) + net = ie.read_network(model=test_net_onnx) assert isinstance(net, IENetwork) +def test_read_network_from_onnx_as_path(): + ie = IECore() + net = ie.read_network(model=Path(test_net_onnx)) + assert isinstance(net, IENetwork) + +def test_read_network_from_prototxt(): + ie = IECore() + net = ie.read_network(model=test_net_prototxt) + assert isinstance(net, IENetwork) + +def test_read_network_from_prototxt_as_path(): + ie = IECore() + net = ie.read_network(model=Path(test_net_prototxt)) + assert isinstance(net, IENetwork) def test_incorrect_xml(): ie = IECore() diff --git a/inference-engine/samples/benchmark_app/README.md b/inference-engine/samples/benchmark_app/README.md index 04cf851..031534c 100644 --- a/inference-engine/samples/benchmark_app/README.md +++ b/inference-engine/samples/benchmark_app/README.md @@ -69,7 +69,7 @@ Options: -h, --help Print a usage message -i "" Optional. Path to a folder with images and/or binaries or to specific image or binary file. - -m "" Required. Path to an .xml file with a trained model. + -m "" Required. Path to an .xml/.onnx/.prototxt file with a trained model or to a .blob files with a trained compiled model. -d "" Optional. Specify a target device to infer on (the list of available devices is shown below). Default value is CPU. Use "-d HETERO:" format to specify HETERO plugin. Use "-d MULTI:" format to specify MULTI plugin. diff --git a/inference-engine/samples/benchmark_app/benchmark_app.hpp b/inference-engine/samples/benchmark_app/benchmark_app.hpp index 7ab2469..6f223f0 100644 --- a/inference-engine/samples/benchmark_app/benchmark_app.hpp +++ b/inference-engine/samples/benchmark_app/benchmark_app.hpp @@ -16,7 +16,7 @@ static const char help_message[] = "Print a usage message"; static const char input_message[] = "Optional. Path to a folder with images and/or binaries or to specific image or binary file."; /// @brief message for model argument -static const char model_message[] = "Required. Path to an .xml file with a trained model or to a .blob files with a trained compiled model"; +static const char model_message[] = "Required. Path to an .xml/.onnx/.prototxt file with a trained model or to a .blob files with a trained compiled model."; /// @brief message for execution mode static const char api_message[] = "Optional. Enable Sync/Async API. Default value is \"async\"."; diff --git a/inference-engine/samples/benchmark_app/main.cpp b/inference-engine/samples/benchmark_app/main.cpp index 87da8c5..5fc0e29 100644 --- a/inference-engine/samples/benchmark_app/main.cpp +++ b/inference-engine/samples/benchmark_app/main.cpp @@ -75,7 +75,7 @@ static void next_step(const std::string additional_info = "") { { 1, "Parsing and validating input arguments" }, { 2, "Loading Inference Engine" }, { 3, "Setting device configuration" }, - { 4, "Reading the Intermediate Representation network" }, + { 4, "Reading network files" }, { 5, "Resizing network to match image sizes and given batch" }, { 6, "Configuring input of the model" }, { 7, "Loading the model to the device" }, diff --git a/inference-engine/tools/benchmark_tool/README.md b/inference-engine/tools/benchmark_tool/README.md index 4bfed66..658f4df 100644 --- a/inference-engine/tools/benchmark_tool/README.md +++ b/inference-engine/tools/benchmark_tool/README.md @@ -69,7 +69,9 @@ Options: Optional. Path to a folder with images and/or binaries or to specific image or binary file. -m PATH_TO_MODEL, --path_to_model PATH_TO_MODEL - Required. Path to an .xml file with a trained model. + Required. Path to an .xml/.onnx/.prototxt file with a + trained model or to a .blob file with a trained + compiled model. -d TARGET_DEVICE, --target_device TARGET_DEVICE Optional. Specify a target device to infer on: CPU, GPU, FPGA, HDDL or MYRIAD. diff --git a/tools/benchmark/benchmark.py b/tools/benchmark/benchmark.py index 0764bbf..88cb6be 100644 --- a/tools/benchmark/benchmark.py +++ b/tools/benchmark/benchmark.py @@ -18,7 +18,7 @@ from datetime import datetime from statistics import median from openvino.inference_engine import IENetwork, IECore, get_version, StatusCode -from .utils.constants import MULTI_DEVICE_NAME, HETERO_DEVICE_NAME, CPU_DEVICE_NAME, GPU_DEVICE_NAME, BIN_EXTENSION +from .utils.constants import MULTI_DEVICE_NAME, HETERO_DEVICE_NAME, CPU_DEVICE_NAME, GPU_DEVICE_NAME, XML_EXTENSION, BIN_EXTENSION from .utils.logging import logger from .utils.utils import get_duration_seconds from .utils.statistics_report import StatisticsReport @@ -60,17 +60,10 @@ class Benchmark: self.ie.set_config(config[device], device) def read_network(self, path_to_model: str): - xml_filename = os.path.abspath(path_to_model) - head, _ = os.path.splitext(xml_filename) - bin_filename = os.path.abspath(head + BIN_EXTENSION) - - ie_network = self.ie.read_network(xml_filename, bin_filename) - - input_info = ie_network.input_info - - if not input_info: - raise AttributeError('No inputs info is provided') - + model_filename = os.path.abspath(path_to_model) + head, ext = os.path.splitext(model_filename) + weights_filename = os.path.abspath(head + BIN_EXTENSION) if ext == XML_EXTENSION else "" + ie_network = self.ie.read_network(model_filename, weights_filename) return ie_network def load_network(self, ie_network: IENetwork, config = {}): diff --git a/tools/benchmark/main.py b/tools/benchmark/main.py index 041e4c3..58c505b 100644 --- a/tools/benchmark/main.py +++ b/tools/benchmark/main.py @@ -5,7 +5,7 @@ from datetime import datetime from openvino.tools.benchmark.benchmark import Benchmark from openvino.tools.benchmark.parameters import parse_args from openvino.tools.benchmark.utils.constants import MULTI_DEVICE_NAME, HETERO_DEVICE_NAME, CPU_DEVICE_NAME, \ - GPU_DEVICE_NAME, MYRIAD_DEVICE_NAME, GNA_DEVICE_NAME, BIN_EXTENSION, BLOB_EXTENSION + GPU_DEVICE_NAME, MYRIAD_DEVICE_NAME, GNA_DEVICE_NAME, BLOB_EXTENSION from openvino.tools.benchmark.utils.inputs_filling import set_inputs from openvino.tools.benchmark.utils.logging import logger from openvino.tools.benchmark.utils.progress_bar import ProgressBar diff --git a/tools/benchmark/parameters.py b/tools/benchmark/parameters.py index abebf5e..2f6ec4a 100644 --- a/tools/benchmark/parameters.py +++ b/tools/benchmark/parameters.py @@ -1,7 +1,6 @@ import sys,argparse from fnmatch import fnmatch -from openvino.tools.benchmark.utils.constants import XML_EXTENSION_PATTERN, BLOB_EXTENSION_PATTERN from openvino.tools.benchmark.utils.utils import show_available_devices def str2bool(v): @@ -12,15 +11,11 @@ def str2bool(v): else: raise argparse.ArgumentTypeError('Boolean value expected.') - -def validate_args(args): - if args.number_iterations is not None and args.number_iterations < 0: - raise Exception("Number of iterations should be positive (invalid -niter option value)") - if args.number_infer_requests and args.number_infer_requests < 0: - raise Exception("Number of inference requests should be positive (invalid -nireq option value)") - if not (fnmatch(args.path_to_model, XML_EXTENSION_PATTERN) or fnmatch(args.path_to_model, BLOB_EXTENSION_PATTERN)): - raise Exception('Path {} is not xml or blob file.') - +def check_positive(value): + ivalue = int(value) + if ivalue <= 0: + raise argparse.ArgumentTypeError("%s is an invalid positive int value" % value) + return ivalue class print_help(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): @@ -37,7 +32,7 @@ def parse_args(): help='Optional. ' 'Path to a folder with images and/or binaries or to specific image or binary file.') args.add_argument('-m', '--path_to_model', type=str, required=True, - help='Required. Path to an .xml file with a trained model or ' + help='Required. Path to an .xml/.onnx/.prototxt file with a trained model or ' 'to a .blob file with a trained compiled model.') args.add_argument('-d', '--target_device', type=str, required=False, default='CPU', help='Optional. Specify a target device to infer on (the list of available devices is shown below). ' @@ -52,10 +47,10 @@ def parse_args(): 'kernels description.') args.add_argument('-api', '--api_type', type=str, required=False, default='async', choices=['sync', 'async'], help='Optional. Enable using sync/async API. Default value is async.') - args.add_argument('-niter', '--number_iterations', type=int, required=False, default=None, + args.add_argument('-niter', '--number_iterations', type=check_positive, required=False, default=None, help='Optional. Number of iterations. ' 'If not specified, the number of iterations is calculated depending on a device.') - args.add_argument('-nireq', '--number_infer_requests', type=int, required=False, default=None, + args.add_argument('-nireq', '--number_infer_requests', type=check_positive, required=False, default=None, help='Optional. Number of infer requests. Default value is determined automatically for device.') args.add_argument('-b', '--batch_size', type=int, required=False, default=0, help='Optional. ' + @@ -111,6 +106,4 @@ def parse_args(): help="Optional. Weight bits for quantization: 8 (I8) or 16 (I16) ") parsed_args = parser.parse_args() - validate_args(parsed_args) - return parsed_args diff --git a/tools/benchmark/utils/constants.py b/tools/benchmark/utils/constants.py index b00d2e4..1eb8255 100644 --- a/tools/benchmark/utils/constants.py +++ b/tools/benchmark/utils/constants.py @@ -29,9 +29,6 @@ XML_EXTENSION = '.xml' BIN_EXTENSION = '.bin' BLOB_EXTENSION = '.blob' -XML_EXTENSION_PATTERN = '*' + XML_EXTENSION -BLOB_EXTENSION_PATTERN = '*' + BLOB_EXTENSION - IMAGE_EXTENSIONS = ['JPEG', 'JPG', 'PNG', 'BMP'] BINARY_EXTENSIONS = ['BIN'] @@ -45,14 +42,3 @@ DEVICE_DURATION_IN_SECS = { GNA_DEVICE_NAME: 60, UNKNOWN_DEVICE_TYPE: 120 } - -DEVICE_NIREQ_ASYNC = { - CPU_DEVICE_NAME: 2, - GPU_DEVICE_NAME: 2, - VPU_DEVICE_NAME: 4, - MYRIAD_DEVICE_NAME: 4, - HDDL_DEVICE_NAME: 100, - FPGA_DEVICE_NAME: 3, - GNA_DEVICE_NAME: 1, - UNKNOWN_DEVICE_TYPE: 1 -} diff --git a/tools/benchmark/utils/utils.py b/tools/benchmark/utils/utils.py index 16c28bb..4a549b6 100644 --- a/tools/benchmark/utils/utils.py +++ b/tools/benchmark/utils/utils.py @@ -15,7 +15,7 @@ """ from openvino.inference_engine import IENetwork,IECore -from .constants import DEVICE_DURATION_IN_SECS, UNKNOWN_DEVICE_TYPE, DEVICE_NIREQ_ASYNC, \ +from .constants import DEVICE_DURATION_IN_SECS, UNKNOWN_DEVICE_TYPE, \ CPU_DEVICE_NAME, GPU_DEVICE_NAME from .inputs_filling import is_image from .logging import logger @@ -38,7 +38,7 @@ def next_step(additional_info='', step_id=0): 1: "Parsing and validating input arguments", 2: "Loading Inference Engine", 3: "Setting device configuration", - 4: "Reading the Intermediate Representation network", + 4: "Reading network files", 5: "Resizing network to match image sizes and given batch", 6: "Configuring input of the model", 7: "Loading the model to the device", @@ -110,19 +110,6 @@ def get_duration_in_secs(target_device): return duration -def get_nireq(target_device): - nireq = 0 - for device in DEVICE_NIREQ_ASYNC: - if device in target_device: - nireq = max(nireq, DEVICE_NIREQ_ASYNC[device]) - - if nireq == 0: - nireq = DEVICE_NIREQ_ASYNC[UNKNOWN_DEVICE_TYPE] - logger.warn('Default number of requests {} is used for unknown device {}'.format(nireq, target_device)) - - return nireq - - def parse_devices(device_string): if device_string in ['MULTI', 'HETERO']: return list() -- 2.7.4