iteration = 1;
}
+ MachineCapacity *Cap = GetMachineCapacity();
+ if (Cap == NULL) {
+ std::cout << "Failed to get machine capacity" << std::endl;
+ return;
+ }
+
+ // If current machine doesn't support inference engine then skip this test.
+ if (Cap->avaliable == false) {
+ return;
+ }
+
+ // If current machine doesn't support OpenCL then skip the inference on GPU.
+ if (target_devices == INFERENCE_TARGET_GPU && Cap->has_gpu == false) {
+ return;
+ }
+
std::string test_name;
switch (test_type) {
case TEST_IMAGE_CLASSIFICATION:
iteration = 1;
}
+ MachineCapacity *Cap = GetMachineCapacity();
+ if (Cap == NULL) {
+ std::cout << "Failed to get machine capacity" << std::endl;
+ return;
+ }
+
+ // If current machine doesn't support inference engine then skip this test.
+ if (Cap->avaliable == false) {
+ return;
+ }
+
+ // If current machine doesn't support OpenCL then skip the inference on GPU.
+ if (target_devices == INFERENCE_TARGET_GPU && Cap->has_gpu == false) {
+ return;
+ }
+
std::string test_name;
switch (test_type) {
case TEST_IMAGE_CLASSIFICATION:
iteration = 1;
}
+ MachineCapacity *Cap = GetMachineCapacity();
+ if (Cap == NULL) {
+ std::cout << "Failed to get machine capacity" << std::endl;
+ return;
+ }
+
+ // If current machine doesn't support inference engine then skip this test.
+ if (Cap->avaliable == false) {
+ return;
+ }
+
+ // If current machine doesn't support OpenCL then skip the inference on GPU.
+ if (target_devices == INFERENCE_TARGET_GPU && Cap->has_gpu == false) {
+ return;
+ }
+
std::string test_name;
switch (test_type) {
case TEST_IMAGE_CLASSIFICATION:
#include <unistd.h>
#include <queue>
#include <algorithm>
+#include <iostream>
+#include <fstream>
#include "gtest/gtest.h"
#include "inference_engine_common_impl.h"
#include "inference_engine_test_common.h"
+// TODO. Below device tree file is not available on TM1 board.
+// So find another way to get Machine ID.
+#define MACHINE_MODEL_PATH "/sys/firmware/devicetree/base/model"
+
static std::map<std::string, int> Model_Formats = {
{ "caffemodel", INFERENCE_MODEL_CAFFE }, { "pb", INFERENCE_MODEL_TF },
{ "tflite", INFERENCE_MODEL_TFLITE }, { "t7", INFERENCE_MODEL_TORCH },
{ "onnx", INFERENCE_MODEL_ONNX }
};
+static MachineCapacity MachineCap[] = {
+ { false, false, false }, // MIN
+ { true, false, false }, // TM1
+ { true, true, false }, // TM4
+ { true, false, false }, // RPI4
+ { true, true, false }, // ODROID
+ { true, false, true }, // VIM3
+ { false, false, false } // MAX
+};
+
+static std::map<std::string, int> Machine_Idx = {
+ { "Raspberry Pi 4 Model B", MACHINE_NAME_RPI4 },
+ { "Khadas VIM3", MACHINE_NAME_VIM3 },
+ { "Hardkernel Odroid XU4", MACHINE_NAME_ODROID },
+ { "Samsung BEYOND1LTE EUR OPEN 22 board based on EXYNOS9820", MACHINE_NAME_TM4 }
+ // TODO.
+};
+
+MachineCapacity *GetMachineCapacity(void)
+{
+ std::ifstream readFile;
+ int machine_id = -1;
+
+ readFile.open(MACHINE_MODEL_PATH);
+ if (readFile.is_open()) {
+ char buf[256] = { 0, };
+
+ readFile.getline(buf, 256);
+ std::string machine_name(buf);
+ std::map<std::string, int>::iterator key = Machine_Idx.find(machine_name);
+ machine_id = key != Machine_Idx.end() ? key->second : -1;
+ }
+
+ readFile.close();
+
+ if (machine_id == -1) {
+ return NULL;
+ }
+
+ // TODO. in runtime, we have to check if GPU or NPU acceleration is supported or not on current Platform,
+ // and then update the information on has_gpu or has_npu of MachineCap.
+
+ return &MachineCap[machine_id];
+}
+
int GetModelInfo(std::vector<std::string> &model_paths,
std::vector<std::string> &models)
{