From: Mikhail Ryzhov Date: Tue, 17 Nov 2020 07:12:33 +0000 (+0300) Subject: Reduced usage of batch in python samples (#3104) X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=23e653858bc5e543afe17dcfdb6ce89047e96788;p=platform%2Fupstream%2Fdldt.git Reduced usage of batch in python samples (#3104) * Reduced usage of batch in python sampes Excluded from hello_classification and object_detection samples --- diff --git a/inference-engine/ie_bridges/python/sample/hello_classification/hello_classification.py b/inference-engine/ie_bridges/python/sample/hello_classification/hello_classification.py index 091b463..0d7c011 100644 --- a/inference-engine/ie_bridges/python/sample/hello_classification/hello_classification.py +++ b/inference-engine/ie_bridges/python/sample/hello_classification/hello_classification.py @@ -30,9 +30,8 @@ def build_argparser(): args.add_argument('-h', '--help', action='help', default=SUPPRESS, help='Show this help message and exit.') args.add_argument("-m", "--model", help="Required. Path to an .xml or .onnx file with a trained model.", required=True, type=str) - args.add_argument("-i", "--input", help="Required. Path to a folder with images or path to an image files", - required=True, - type=str, nargs="+") + args.add_argument("-i", "--input", help="Required. Path to image file.", + required=True, type=str) args.add_argument("-l", "--cpu_extension", help="Optional. Required for CPU custom layers. " "MKLDNN (CPU)-targeted custom layers. Absolute path to a shared library with the" @@ -69,7 +68,6 @@ def main(): log.info("Preparing input blobs") input_blob = next(iter(net.input_info)) out_blob = next(iter(net.outputs)) - net.batch_size = len(args.input) # Read and pre-process input images n, c, h, w = net.input_info[input_blob].input_data.shape @@ -81,7 +79,6 @@ def main(): image = cv2.resize(image, (w, h)) image = image.transpose((2, 0, 1)) # Change data layout from HWC to CHW images[i] = image - log.info("Batch size is {}".format(n)) # Loading model to the plugin log.info("Loading model to the plugin") diff --git a/inference-engine/ie_bridges/python/sample/ngraph_function_creation_sample/ngraph_function_creation_sample.py b/inference-engine/ie_bridges/python/sample/ngraph_function_creation_sample/ngraph_function_creation_sample.py index 26bc7a6..84f3bad 100644 --- a/inference-engine/ie_bridges/python/sample/ngraph_function_creation_sample/ngraph_function_creation_sample.py +++ b/inference-engine/ie_bridges/python/sample/ngraph_function_creation_sample/ngraph_function_creation_sample.py @@ -33,8 +33,7 @@ def build_argparser() -> ArgumentParser: args = parser.add_argument_group('Options') args.add_argument('-h', '--help', action='help', default=SUPPRESS, help='Show this help message and exit.') args.add_argument('-i', '--input', help='Required. Path to a folder with images or path to an image files', - required=True, - type=str, nargs="+") + required=True, type=str, nargs="+") args.add_argument('-m', '--model', help='Required. Path to file where weights for the network are located') args.add_argument('-d', '--device', help='Optional. Specify the target device to infer on; CPU, GPU, FPGA, HDDL, MYRIAD or HETERO: ' diff --git a/inference-engine/ie_bridges/python/sample/object_detection_sample_ssd/object_detection_sample_ssd.py b/inference-engine/ie_bridges/python/sample/object_detection_sample_ssd/object_detection_sample_ssd.py index 081a5fd..60d5e4e 100644 --- a/inference-engine/ie_bridges/python/sample/object_detection_sample_ssd/object_detection_sample_ssd.py +++ b/inference-engine/ie_bridges/python/sample/object_detection_sample_ssd/object_detection_sample_ssd.py @@ -112,7 +112,6 @@ def main(): for input_key in net.input_info: if len(net.input_info[input_key].layout) == 4: input_name = input_key - log.info("Batch size is {}".format(net.batch_size)) net.input_info[input_key].precision = 'U8' elif len(net.input_info[input_key].layout) == 2: input_info_name = input_key