if (help)
return -1;
- if (img_msg == NULL) {
- printf("Parameter -i is not set\n");
+ if (input_model == NULL) {
+ printf("Model is required but not set. Please set -m option. \n");
return -1;
}
- if (input_model == NULL) {
- printf("Parameter -m is not set \n");
+ if (img_msg == NULL) {
+ printf("Input is required but not set.Please set - i option.\n");
return -1;
}
/// @brief message for help argument
static const char *help_message = "Print a usage message.";
+/// @brief message for model argument
+static const char* model_message = "Required. Path to an .xml file with a trained model.";
+
/// @brief message for images argument
static const char *image_message = "Required. Path to an .bmp image.";
-/// @brief message for model argument
-static const char *model_message = "Required. Path to an .xml file with a trained model.";
-
/// @brief message for plugin argument
static const char *plugin_message = "Plugin name. For example MKLDNNPlugin. If this parameter is pointed, " \
"the sample will look for this plugin only";
printf("\nobject_detection_sample_ssd [OPTION]\n");
printf("Options:\n\n");
printf(" -h %s\n", help_message);
- printf(" -i \"<path>\" %s\n", image_message);
printf(" -m \"<path>\" %s\n", model_message);
+ printf(" -i \"<path>\" %s\n", image_message);
printf(" -l \"<absolute_path>\" %s\n", custom_cpu_library_message);
printf(" Or\n");
printf(" -c \"<absolute_path>\" %s\n", custom_cldnn_message);
optopt = c = argv[optind][sp];
if(c == ':' || (cp = strchr(opts, c)) == 0) {
ERR(": unrecognized option -- ", c);
+ showUsage();
if(argv[optind][++sp] == '\0') {
optind++;
sp = 1;
}
}
return(c);
-}
\ No newline at end of file
+}
Options:
-h, --help Print a usage message
+ -m "<path>" Required. Path to an .xml/.onnx/.prototxt file with a trained model or to a .blob files with a trained compiled model.
-i "<path>" Optional. Path to a folder with images and/or binaries or to specific image or binary file.
- -m "<path>" Required. Path to an .xml/.onnx/.prototxt file with a trained model or to a .blob files with a trained compiled model.
-d "<device>" Optional. Specify a target device to infer on (the list of available devices is shown below). Default value is CPU.
Use "-d HETERO:<comma-separated_devices_list>" format to specify HETERO plugin.
Use "-d MULTI:<comma-separated_devices_list>" format to specify MULTI plugin.
* On CPU:
```sh
- ./benchmark_app -m <ir_dir>/googlenet-v1.xml -d CPU -api async -i <INSTALL_DIR>/deployment_tools/demo/car.png --progress true
+ ./benchmark_app -m <ir_dir>/googlenet-v1.xml -i <INSTALL_DIR>/deployment_tools/demo/car.png -d CPU -api async --progress true
```
* On FPGA:
```sh
- ./benchmark_app -m <ir_dir>/googlenet-v1.xml -d HETERO:FPGA,CPU -api async -i <INSTALL_DIR>/deployment_tools/demo/car.png --progress true
+ ./benchmark_app -m <ir_dir>/googlenet-v1.xml -i <INSTALL_DIR>/deployment_tools/demo/car.png -d HETERO:FPGA,CPU -api async --progress true
```
The application outputs the number of executed iterations, total duration of execution, latency, and throughput.
std::cout << "Options:" << std::endl;
std::cout << std::endl;
std::cout << " -h, --help " << help_message << std::endl;
- std::cout << " -i \"<path>\" " << input_message << std::endl;
std::cout << " -m \"<path>\" " << model_message << std::endl;
+ std::cout << " -i \"<path>\" " << input_message << std::endl;
std::cout << " -d \"<device>\" " << target_device_message << std::endl;
std::cout << " -l \"<absolute_path>\" " << custom_cpu_library_message << std::endl;
std::cout << " Or" << std::endl;
Options:
-h Print a usage message.
- -i "<path>" Required. Path to a folder with images or path to an image files: a .ubyte file for LeNetand a .bmp file for the other networks.
-m "<path>" Required. Path to an .xml file with a trained model.
+ -i "<path>" Required. Path to a folder with images or path to an image files: a .ubyte file for LeNetand a .bmp file for the other networks.
-l "<absolute_path>" Required for CPU custom layers.Absolute path to a shared library with the kernels implementation
Or
-c "<absolute_path>" Required for GPU custom kernels.Absolute path to the .xml file with kernels description
You can do inference of an image using a trained AlexNet network on FPGA with fallback to CPU using the following command:
```sh
-./classification_sample_async -i <path_to_image>/cat.bmp -m <path_to_model>/alexnet_fp32.xml -nt 5 -d HETERO:FPGA,CPU
+./classification_sample_async -m <path_to_model>/alexnet_fp32.xml -i <path_to_image>/cat.bmp -d HETERO:FPGA,CPU -nt 5
```
## Sample Output
/// @brief message for help argument
static const char help_message[] = "Print a usage message.";
+/// @brief message for model argument
+static const char model_message[] = "Required. Path to an .xml file with a trained model.";
+
/// @brief message for images argument
static const char image_message[] = "Required. Path to a folder with images or path to an image files: a .ubyte file for LeNet"\
"and a .bmp file for the other networks.";
-/// @brief message for model argument
-static const char model_message[] = "Required. Path to an .xml file with a trained model.";
-
/// @brief message for assigning cnn calculation to device
static const char target_device_message[] = "Optional. Specify the target device to infer on (the list of available devices is shown below). " \
"Default value is CPU. Sample will look for a suitable plugin for device specified.";
std::cout << "Options:" << std::endl;
std::cout << std::endl;
std::cout << " -h " << help_message << std::endl;
- std::cout << " -i \"<path>\" " << image_message << std::endl;
std::cout << " -m \"<path>\" " << model_message << std::endl;
+ std::cout << " -i \"<path>\" " << image_message << std::endl;
std::cout << " -l \"<absolute_path>\" " << custom_cpu_library_message << std::endl;
std::cout << " Or" << std::endl;
std::cout << " -c \"<absolute_path>\" " << custom_cldnn_message << std::endl;
}
slog::info << "Parsing input parameters" << slog::endl;
- if (FLAGS_i.empty()) {
- throw std::logic_error("Parameter -i is not set");
+ if (FLAGS_m.empty()) {
+ throw std::logic_error("Model is required but not set. Please set -m option.");
}
- if (FLAGS_m.empty()) {
- throw std::logic_error("Parameter -m is not set");
+ if (FLAGS_i.empty()) {
+ throw std::logic_error("Input is required but not set. Please set -i option.");
}
return true;
for (const auto& device : devices) {
std::cout << " " << device;
}
+ std::cout << std::endl;
}
try {
// ------------------------------ Parsing and validation of input args ---------------------------------
if (argc != 4) {
- tcout << "Usage : ./hello_classification <path_to_model> <path_to_image> <device_name>" << std::endl;
+ tcout << "Usage : " << argv[0] << " <path_to_model> <path_to_image> <device_name>" << std::endl;
return EXIT_FAILURE;
}
try {
// ------------------------------ Parsing and validation of input args ---------------------------------
if (argc != 1) {
- std::cout << "Usage : ./hello_query_device" << std::endl;
+ std::cout << "Usage : "<< argv[0] << std::endl;
return EXIT_FAILURE;
}
try {
// ------------------------------ Parsing and validation of input args ---------------------------------
if (argc != 5) {
- std::cout << "Usage : ./hello_reshape_ssd <path_to_model> <path_to_image> <device> <batch>"
+ std::cout << "Usage : "<< argv[0] <<" <path_to_model> <path_to_image> <device> <batch>"
<< std::endl;
return EXIT_FAILURE;
}
Options:
-h Print a usage message.
- -m "<path>" Path to a .bin file with weights for the trained model
+ -m "<path>" Required. Path to a .bin file with weights for the trained model
-i "<path>" Required. Path to an image or folder with images
-d "<device>" Specify the target device to infer on it. See the list of available devices below. The sample looks for a suitable plugin for the specified device. The default value is CPU.
-nt "<integer>" Number of top results. The default value is 10.
For example, to do inference of an UByte image on a GPU run the following command:
```sh
-./ngraph_function_creation_sample -i <path_to_image> -m <path_to_weights_file> -d GPU
+./ngraph_function_creation_sample -m <path_to_weights_file> -i <path_to_image> -d GPU
```
## Sample Output
throw std::logic_error("Incorrect value for nt argument. It should be greater than 0 and less than 10.");
}
+ if (FLAGS_m.empty()) {
+ throw std::logic_error("Path to a .bin file with weights for the trained model is required but not set. Please set -m option.");
+ }
+
+ if (FLAGS_i.empty()) {
+ throw std::logic_error("Path to an image is required but not set. Please set -i option.");
+ }
+
return true;
}
static const char input_message[] = "Required. Path to image or folder with images";
/// @brief message for model argument
-static const char model_message[] = "Path to a .bin file with weights for the trained model.";
+static const char model_message[] = "Required. Path to a .bin file with weights for the trained model.";
/// @brief message for assigning cnn calculation to device
static const char target_device_message[] = "Specify the target device to infer on it . See the list of available devices below. " \
Options:
-h Print a usage message.
- -i "<path>" Required. Path to an .bmp image.
-m "<path>" Required. Path to an .xml file with a trained model.
+ -i "<path>" Required. Path to an .bmp image.
-l "<absolute_path>" Required for CPU custom layers. Absolute path to a shared library with the kernels implementations.
Or
-c "<absolute_path>" Required for GPU custom kernels. Absolute path to the .xml file with the kernels descriptions.
For example, to do inference on a CPU with the OpenVINO™ toolkit person detection SSD models, run one of the following commands:
```sh
-./object_detection_sample_ssd -i <path_to_image>/inputImage.bmp -m <path_to_model>person-detection-retail-0013.xml -d CPU
+./object_detection_sample_ssd -m <path_to_model>person-detection-retail-0013.xml -i <path_to_image>/inputImage.bmp -d CPU
```
or
```sh
-./object_detection_sample_ssd -i <path_to_image>/inputImage.jpg -m <path_to_model>person-detection-retail-0002.xml -d CPU
+./object_detection_sample_ssd -m <path_to_model>person-detection-retail-0002.xml -i <path_to_image>/inputImage.jpg -d CPU
```
## Sample Output
slog::info << "Parsing input parameters" << slog::endl;
- if (FLAGS_i.empty()) {
- throw std::logic_error("Parameter -i is not set");
+ if (FLAGS_m.empty()) {
+ throw std::logic_error("Model is required but not set. Please set -m option.");
}
- if (FLAGS_m.empty()) {
- throw std::logic_error("Parameter -m is not set");
+ if (FLAGS_i.empty()) {
+ throw std::logic_error("Input is required but not set. Please set -i option.");
}
return true;
/// @brief message for help argument
static const char help_message[] = "Print a usage message.";
-/// @brief message for images argument
-static const char image_message[] = "Required. Path to an .bmp image.";
-
/// @brief message for model argument
static const char model_message[] = "Required. Path to an .xml file with a trained model.";
+/// @brief message for images argument
+static const char image_message[] = "Required. Path to an .bmp image.";
+
/// @brief message for plugin argument
static const char plugin_message[] = "Plugin name. For example MKLDNNPlugin. If this parameter is pointed, " \
"the sample will look for this plugin only";
std::cout << "Options:" << std::endl;
std::cout << std::endl;
std::cout << " -h " << help_message << std::endl;
- std::cout << " -i \"<path>\" " << image_message << std::endl;
std::cout << " -m \"<path>\" " << model_message << std::endl;
+ std::cout << " -i \"<path>\" " << image_message << std::endl;
std::cout << " -l \"<absolute_path>\" " << custom_cpu_library_message << std::endl;
std::cout << " Or" << std::endl;
std::cout << " -c \"<absolute_path>\" " << custom_cldnn_message << std::endl;
return false;
}
- if (FLAGS_i.empty()) {
- throw std::logic_error("Parameter -i is not set");
+ if (FLAGS_m.empty()) {
+ throw std::logic_error("Model is required but not set. Please set -m option.");
}
- if (FLAGS_m.empty()) {
- throw std::logic_error("Parameter -m is not set");
+ if (FLAGS_i.empty()) {
+ throw std::logic_error("Input is required but not set. Please set -i option.");
}
return true;