return versions
## Reads a network from the Intermediate Representation (IR) and creates an `IENetwork`.
- # @param model: A `.xml` file of the IR or string with IR.
+ # @param model: A `.xml`, `.onnx`or `.prototxt` model file or string with IR.
# @param weights: A `.bin` file of the IR. Depending on `init_from_buffer` value, can be a string path or
# bytes with file content.
# @param init_from_buffer: Defines the way of how `model` and `weights` attributes are interpreted.
free(xml_buffer)
else:
weights_ = "".encode()
- if isinstance(model, Path) and isinstance(weights, Path):
+ if isinstance(model, Path) and (isinstance(weights, Path) or not weights):
if not model.is_file():
raise Exception("Path to the model {} doesn't exist or it's a directory".format(model))
- if model.suffix != ".onnx":
+ if model.suffix not in [ ".onnx", ".prototxt"]:
if not weights.is_file():
raise Exception("Path to the weights {} doesn't exist or it's a directory".format(weights))
weights_ = bytes(weights)
else:
if not os.path.isfile(model):
raise Exception("Path to the model {} doesn't exist or it's a directory".format(model))
- if not fnmatch(model, "*.onnx"):
+ if not (fnmatch(model, "*.onnx") or fnmatch(model, "*.prototxt")):
if not os.path.isfile(weights):
raise Exception("Path to the weights {} doesn't exist or it's a directory".format(weights))
weights_ = weights.encode()
test_onnx = os.path.join(path_to_repo, "models", "test_model", 'test_model.onnx')
return test_onnx
+def model_prototxt_path():
+ path_to_repo = os.environ["MODELS_PATH"]
+ test_prototxt = os.path.join(path_to_repo, "models", "test_model", 'test_model.prototxt')
+ return test_prototxt
def image_path():
path_to_repo = os.environ["DATA_PATH"]
from pathlib import Path
from openvino.inference_engine import IENetwork, IECore, ExecutableNetwork
-from conftest import model_path, plugins_path, model_onnx_path
+from conftest import model_path, plugins_path, model_onnx_path, model_prototxt_path
test_net_xml, test_net_bin = model_path()
+test_net_onnx = model_onnx_path()
+test_net_prototxt = model_prototxt_path()
plugins_xml, plugins_win_xml, plugins_osx_xml = plugins_path()
def test_read_network_from_onnx():
ie = IECore()
- net = ie.read_network(model=model_onnx_path())
+ net = ie.read_network(model=test_net_onnx)
assert isinstance(net, IENetwork)
+def test_read_network_from_onnx_as_path():
+ ie = IECore()
+ net = ie.read_network(model=Path(test_net_onnx))
+ assert isinstance(net, IENetwork)
+
+def test_read_network_from_prototxt():
+ ie = IECore()
+ net = ie.read_network(model=test_net_prototxt)
+ assert isinstance(net, IENetwork)
+
+def test_read_network_from_prototxt_as_path():
+ ie = IECore()
+ net = ie.read_network(model=Path(test_net_prototxt))
+ assert isinstance(net, IENetwork)
def test_incorrect_xml():
ie = IECore()
-h, --help Print a usage message
-i "<path>" Optional. Path to a folder with images and/or binaries or to specific image or binary file.
- -m "<path>" Required. Path to an .xml file with a trained model.
+ -m "<path>" Required. Path to an .xml/.onnx/.prototxt file with a trained model or to a .blob files with a trained compiled model.
-d "<device>" Optional. Specify a target device to infer on (the list of available devices is shown below). Default value is CPU.
Use "-d HETERO:<comma-separated_devices_list>" format to specify HETERO plugin.
Use "-d MULTI:<comma-separated_devices_list>" format to specify MULTI plugin.
static const char input_message[] = "Optional. Path to a folder with images and/or binaries or to specific image or binary file.";
/// @brief message for model argument
-static const char model_message[] = "Required. Path to an .xml file with a trained model or to a .blob files with a trained compiled model";
+static const char model_message[] = "Required. Path to an .xml/.onnx/.prototxt file with a trained model or to a .blob files with a trained compiled model.";
/// @brief message for execution mode
static const char api_message[] = "Optional. Enable Sync/Async API. Default value is \"async\".";
{ 1, "Parsing and validating input arguments" },
{ 2, "Loading Inference Engine" },
{ 3, "Setting device configuration" },
- { 4, "Reading the Intermediate Representation network" },
+ { 4, "Reading network files" },
{ 5, "Resizing network to match image sizes and given batch" },
{ 6, "Configuring input of the model" },
{ 7, "Loading the model to the device" },
Optional. Path to a folder with images and/or binaries
or to specific image or binary file.
-m PATH_TO_MODEL, --path_to_model PATH_TO_MODEL
- Required. Path to an .xml file with a trained model.
+ Required. Path to an .xml/.onnx/.prototxt file with a
+ trained model or to a .blob file with a trained
+ compiled model.
-d TARGET_DEVICE, --target_device TARGET_DEVICE
Optional. Specify a target device to infer on: CPU,
GPU, FPGA, HDDL or MYRIAD.
from statistics import median
from openvino.inference_engine import IENetwork, IECore, get_version, StatusCode
-from .utils.constants import MULTI_DEVICE_NAME, HETERO_DEVICE_NAME, CPU_DEVICE_NAME, GPU_DEVICE_NAME, BIN_EXTENSION
+from .utils.constants import MULTI_DEVICE_NAME, HETERO_DEVICE_NAME, CPU_DEVICE_NAME, GPU_DEVICE_NAME, XML_EXTENSION, BIN_EXTENSION
from .utils.logging import logger
from .utils.utils import get_duration_seconds
from .utils.statistics_report import StatisticsReport
self.ie.set_config(config[device], device)
def read_network(self, path_to_model: str):
- xml_filename = os.path.abspath(path_to_model)
- head, _ = os.path.splitext(xml_filename)
- bin_filename = os.path.abspath(head + BIN_EXTENSION)
-
- ie_network = self.ie.read_network(xml_filename, bin_filename)
-
- input_info = ie_network.input_info
-
- if not input_info:
- raise AttributeError('No inputs info is provided')
-
+ model_filename = os.path.abspath(path_to_model)
+ head, ext = os.path.splitext(model_filename)
+ weights_filename = os.path.abspath(head + BIN_EXTENSION) if ext == XML_EXTENSION else ""
+ ie_network = self.ie.read_network(model_filename, weights_filename)
return ie_network
def load_network(self, ie_network: IENetwork, config = {}):
from openvino.tools.benchmark.benchmark import Benchmark
from openvino.tools.benchmark.parameters import parse_args
from openvino.tools.benchmark.utils.constants import MULTI_DEVICE_NAME, HETERO_DEVICE_NAME, CPU_DEVICE_NAME, \
- GPU_DEVICE_NAME, MYRIAD_DEVICE_NAME, GNA_DEVICE_NAME, BIN_EXTENSION, BLOB_EXTENSION
+ GPU_DEVICE_NAME, MYRIAD_DEVICE_NAME, GNA_DEVICE_NAME, BLOB_EXTENSION
from openvino.tools.benchmark.utils.inputs_filling import set_inputs
from openvino.tools.benchmark.utils.logging import logger
from openvino.tools.benchmark.utils.progress_bar import ProgressBar
import sys,argparse
from fnmatch import fnmatch
-from openvino.tools.benchmark.utils.constants import XML_EXTENSION_PATTERN, BLOB_EXTENSION_PATTERN
from openvino.tools.benchmark.utils.utils import show_available_devices
def str2bool(v):
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
-
-def validate_args(args):
- if args.number_iterations is not None and args.number_iterations < 0:
- raise Exception("Number of iterations should be positive (invalid -niter option value)")
- if args.number_infer_requests and args.number_infer_requests < 0:
- raise Exception("Number of inference requests should be positive (invalid -nireq option value)")
- if not (fnmatch(args.path_to_model, XML_EXTENSION_PATTERN) or fnmatch(args.path_to_model, BLOB_EXTENSION_PATTERN)):
- raise Exception('Path {} is not xml or blob file.')
-
+def check_positive(value):
+ ivalue = int(value)
+ if ivalue <= 0:
+ raise argparse.ArgumentTypeError("%s is an invalid positive int value" % value)
+ return ivalue
class print_help(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
help='Optional. '
'Path to a folder with images and/or binaries or to specific image or binary file.')
args.add_argument('-m', '--path_to_model', type=str, required=True,
- help='Required. Path to an .xml file with a trained model or '
+ help='Required. Path to an .xml/.onnx/.prototxt file with a trained model or '
'to a .blob file with a trained compiled model.')
args.add_argument('-d', '--target_device', type=str, required=False, default='CPU',
help='Optional. Specify a target device to infer on (the list of available devices is shown below). '
'kernels description.')
args.add_argument('-api', '--api_type', type=str, required=False, default='async', choices=['sync', 'async'],
help='Optional. Enable using sync/async API. Default value is async.')
- args.add_argument('-niter', '--number_iterations', type=int, required=False, default=None,
+ args.add_argument('-niter', '--number_iterations', type=check_positive, required=False, default=None,
help='Optional. Number of iterations. '
'If not specified, the number of iterations is calculated depending on a device.')
- args.add_argument('-nireq', '--number_infer_requests', type=int, required=False, default=None,
+ args.add_argument('-nireq', '--number_infer_requests', type=check_positive, required=False, default=None,
help='Optional. Number of infer requests. Default value is determined automatically for device.')
args.add_argument('-b', '--batch_size', type=int, required=False, default=0,
help='Optional. ' +
help="Optional. Weight bits for quantization: 8 (I8) or 16 (I16) ")
parsed_args = parser.parse_args()
- validate_args(parsed_args)
-
return parsed_args
BIN_EXTENSION = '.bin'
BLOB_EXTENSION = '.blob'
-XML_EXTENSION_PATTERN = '*' + XML_EXTENSION
-BLOB_EXTENSION_PATTERN = '*' + BLOB_EXTENSION
-
IMAGE_EXTENSIONS = ['JPEG', 'JPG', 'PNG', 'BMP']
BINARY_EXTENSIONS = ['BIN']
GNA_DEVICE_NAME: 60,
UNKNOWN_DEVICE_TYPE: 120
}
-
-DEVICE_NIREQ_ASYNC = {
- CPU_DEVICE_NAME: 2,
- GPU_DEVICE_NAME: 2,
- VPU_DEVICE_NAME: 4,
- MYRIAD_DEVICE_NAME: 4,
- HDDL_DEVICE_NAME: 100,
- FPGA_DEVICE_NAME: 3,
- GNA_DEVICE_NAME: 1,
- UNKNOWN_DEVICE_TYPE: 1
-}
"""
from openvino.inference_engine import IENetwork,IECore
-from .constants import DEVICE_DURATION_IN_SECS, UNKNOWN_DEVICE_TYPE, DEVICE_NIREQ_ASYNC, \
+from .constants import DEVICE_DURATION_IN_SECS, UNKNOWN_DEVICE_TYPE, \
CPU_DEVICE_NAME, GPU_DEVICE_NAME
from .inputs_filling import is_image
from .logging import logger
1: "Parsing and validating input arguments",
2: "Loading Inference Engine",
3: "Setting device configuration",
- 4: "Reading the Intermediate Representation network",
+ 4: "Reading network files",
5: "Resizing network to match image sizes and given batch",
6: "Configuring input of the model",
7: "Loading the model to the device",
return duration
-def get_nireq(target_device):
- nireq = 0
- for device in DEVICE_NIREQ_ASYNC:
- if device in target_device:
- nireq = max(nireq, DEVICE_NIREQ_ASYNC[device])
-
- if nireq == 0:
- nireq = DEVICE_NIREQ_ASYNC[UNKNOWN_DEVICE_TYPE]
- logger.warn('Default number of requests {} is used for unknown device {}'.format(nireq, target_device))
-
- return nireq
-
-
def parse_devices(device_string):
if device_string in ['MULTI', 'HETERO']:
return list()