test: Perform test cases depending on current HW capability 43/235743/2
authorInki Dae <inki.dae@samsung.com>
Tue, 9 Jun 2020 07:04:40 +0000 (16:04 +0900)
committerInki Dae <inki.dae@samsung.com>
Tue, 9 Jun 2020 09:37:27 +0000 (18:37 +0900)
This patch adds a new feature which detects Machine ID in runtime,
and then performs test cases properly depending on whether current
Platform is able to use GPU or NPU for the inference.

Linux kernel exposes device tree sysfs files which describe
HW information so this patch reads /sys/firmware/devicetree/base/model
file to get Machine name in runtime.

Based on this information, test case and profiler applications
decide whether inference should be performed on a given accelerator -
GPU or NPU - according on pre-defined HW table which contains
what kinds of HW accelerators can be used for the inference.

Change-Id: If052c9860dbb72f36c73baf6eb52f627bdde08a9
Signed-off-by: Inki Dae <inki.dae@samsung.com>
test/src/inference_engine_profiler.cpp
test/src/inference_engine_tc.cpp
test/src/inference_engine_test_common.cpp
test/src/inference_engine_test_common.h

index 48be58eb7d97e195716e53694ca217281bba6322..736f1fe0fd09f8be34fec37ec8937d1da99732af 100644 (file)
@@ -65,6 +65,22 @@ TEST_P(InferenceEngineTfliteTest, Inference)
                iteration = 1;
        }
 
+       MachineCapacity *Cap = GetMachineCapacity();
+       if (Cap == NULL) {
+               std::cout << "Failed to get machine capacity" << std::endl;
+               return;
+       }
+
+       // If current machine doesn't support inference engine then skip this test.
+       if (Cap->avaliable == false) {
+               return;
+       }
+
+       // If current machine doesn't support OpenCL then skip the inference on GPU.
+       if (target_devices == INFERENCE_TARGET_GPU && Cap->has_gpu == false) {
+               return;
+       }
+
        std::string test_name;
        switch (test_type) {
        case TEST_IMAGE_CLASSIFICATION:
@@ -244,6 +260,22 @@ TEST_P(InferenceEngineCaffeTest, Inference)
                iteration = 1;
        }
 
+       MachineCapacity *Cap = GetMachineCapacity();
+       if (Cap == NULL) {
+               std::cout << "Failed to get machine capacity" << std::endl;
+               return;
+       }
+
+       // If current machine doesn't support inference engine then skip this test.
+       if (Cap->avaliable == false) {
+               return;
+       }
+
+       // If current machine doesn't support OpenCL then skip the inference on GPU.
+       if (target_devices == INFERENCE_TARGET_GPU && Cap->has_gpu == false) {
+               return;
+       }
+
        std::string test_name;
        switch (test_type) {
        case TEST_IMAGE_CLASSIFICATION:
@@ -425,6 +457,22 @@ TEST_P(InferenceEngineDldtTest, Inference)
                iteration = 1;
        }
 
+       MachineCapacity *Cap = GetMachineCapacity();
+       if (Cap == NULL) {
+               std::cout << "Failed to get machine capacity" << std::endl;
+               return;
+       }
+
+       // If current machine doesn't support inference engine then skip this test.
+       if (Cap->avaliable == false) {
+               return;
+       }
+
+       // If current machine doesn't support OpenCL then skip the inference on GPU.
+       if (target_devices == INFERENCE_TARGET_GPU && Cap->has_gpu == false) {
+               return;
+       }
+
        std::string test_name;
        switch (test_type) {
        case TEST_IMAGE_CLASSIFICATION:
index 37153ecfc5b85f339fc48b4281ca20de228af585..cdffce70957f462f09ecd43f3a1569693c556763 100644 (file)
@@ -518,6 +518,22 @@ TEST_P(InferenceEngineTestCase_G6, Inference_P)
                iteration = 1;
        }
 
+       MachineCapacity *Cap = GetMachineCapacity();
+       if (Cap == NULL) {
+               std::cout << "Failed to get machine capacity" << std::endl;
+               return;
+       }
+
+       // If current machine doesn't support inference engine then skip this test.
+       if (Cap->avaliable == false) {
+               return;
+       }
+
+       // If current machine doesn't support OpenCL then skip the inference on GPU.
+       if (target_devices == INFERENCE_TARGET_GPU && Cap->has_gpu == false) {
+               return;
+       }
+
        std::string test_name;
        switch (test_type) {
        case TEST_IMAGE_CLASSIFICATION:
index 4bc63c3e8e7eb40f381b7f89dabe9da848c410a7..1780b95139dd1dace61304e0cfb76aff0053c03b 100644 (file)
@@ -21,6 +21,8 @@
 #include <unistd.h>
 #include <queue>
 #include <algorithm>
+#include <iostream>
+#include <fstream>
 
 #include "gtest/gtest.h"
 
 #include "inference_engine_common_impl.h"
 #include "inference_engine_test_common.h"
 
+// TODO. Below device tree file is not available on TM1 board.
+//              So find another way to get Machine ID.
+#define MACHINE_MODEL_PATH     "/sys/firmware/devicetree/base/model"
+
 static std::map<std::string, int> Model_Formats = {
        { "caffemodel", INFERENCE_MODEL_CAFFE }, { "pb", INFERENCE_MODEL_TF },
        { "tflite", INFERENCE_MODEL_TFLITE },   { "t7", INFERENCE_MODEL_TORCH },
@@ -35,6 +41,51 @@ static std::map<std::string, int> Model_Formats = {
        { "onnx", INFERENCE_MODEL_ONNX }
 };
 
+static MachineCapacity MachineCap[] = {
+       { false, false, false },    // MIN
+       { true, false, false },     // TM1
+       { true, true, false },      // TM4
+       { true, false, false },     // RPI4
+       { true, true, false },      // ODROID
+       { true, false, true },      // VIM3
+       { false, false, false }     // MAX
+};
+
+static std::map<std::string, int> Machine_Idx = {
+       { "Raspberry Pi 4 Model B", MACHINE_NAME_RPI4 },
+       { "Khadas VIM3", MACHINE_NAME_VIM3 },
+       { "Hardkernel Odroid XU4", MACHINE_NAME_ODROID },
+       { "Samsung BEYOND1LTE EUR OPEN 22 board based on EXYNOS9820", MACHINE_NAME_TM4 }
+       // TODO.
+};
+
+MachineCapacity *GetMachineCapacity(void)
+{
+       std::ifstream readFile;
+       int machine_id = -1;
+
+       readFile.open(MACHINE_MODEL_PATH);
+       if (readFile.is_open()) {
+               char buf[256] = { 0, };
+
+               readFile.getline(buf, 256);
+               std::string machine_name(buf);
+               std::map<std::string, int>::iterator key = Machine_Idx.find(machine_name);
+               machine_id = key != Machine_Idx.end() ? key->second : -1;
+       }
+
+       readFile.close();
+
+       if (machine_id == -1) {
+               return NULL;
+       }
+
+       // TODO. in runtime, we have to check if GPU or NPU acceleration is supported or not on current Platform,
+       //               and then update the information on has_gpu or has_npu of MachineCap.
+
+       return &MachineCap[machine_id];
+}
+
 int GetModelInfo(std::vector<std::string> &model_paths,
                                 std::vector<std::string> &models)
 {
index b9d3263bbefaae630febbe0797e7b44ba62c6e30..1605d2e3d303e2584dbbba8abd634e26aba2fd55 100644 (file)
@@ -27,6 +27,16 @@ static std::map<int, std::string> Target_Formats = {
        { INFERENCE_TARGET_CUSTOM, "custom" }
 };
 
+enum {
+       MACHINE_NAME_MIN = 0,
+       MACHINE_NAME_TM1,
+       MACHINE_NAME_TM4,
+       MACHINE_NAME_RPI4,
+       MACHINE_NAME_ODROID,
+       MACHINE_NAME_VIM3,
+       MACHINE_NAME_MAX,
+};
+
 enum {
        TEST_IMAGE_CLASSIFICATION = 0,
        TEST_OBJECT_DETECTION,
@@ -35,6 +45,15 @@ enum {
        TEST_POSE_ESTIMATION
 };
 
+typedef struct _MachineCapacity {
+       bool avaliable;
+       bool has_gpu;
+       bool has_npu;
+       // TODO.
+} MachineCapacity;
+
+MachineCapacity *GetMachineCapacity(void);
+
 int GetModelInfo(std::vector<std::string> &model_paths,
                                 std::vector<std::string> &models);