Change postfix of file name to "mlapi"
authorInki Dae <inki.dae@samsung.com>
Tue, 2 Jun 2020 09:36:54 +0000 (18:36 +0900)
committerInki Dae <inki.dae@samsung.com>
Tue, 2 Jun 2020 09:36:54 +0000 (18:36 +0900)
Signed-off-by: Inki Dae <inki.dae@samsung.com>
CMakeLists.txt
src/inference_engine_mlapi.cpp [moved from src/inference_engine_nnstreamer.cpp with 96% similarity]
src/inference_engine_mlapi_private.h [moved from src/inference_engine_nnstreamer_private.h with 100% similarity]

index d7aab7a..010a06f 100644 (file)
@@ -1,6 +1,6 @@
 
 CMAKE_MINIMUM_REQUIRED(VERSION 2.6)
-SET(fw_name "inference-engine-nnstreamer")
+SET(fw_name "inference-engine-mlapi")
 
 PROJECT(${fw_name})
 
similarity index 96%
rename from src/inference_engine_nnstreamer.cpp
rename to src/inference_engine_mlapi.cpp
index ddfb784..4a418ec 100644 (file)
@@ -15,7 +15,7 @@
  */
 
 #include <inference_engine_error.h>
-#include "inference_engine_nnstreamer_private.h"
+#include "inference_engine_mlapi_private.h"
 
 #include <fstream>
 #include <iostream>
@@ -159,7 +159,7 @@ int InferenceMLAPI::GetInputTensorBuffers(std::vector<inference_engine_tensor_bu
 {
     LOGI("ENTER");
 
-       // TODO. Implement this function according to a given nnstreamer backend properly.
+       // TODO. Implement this function according to a given ML Single API backend properly.
 
     LOGI("LEAVE");
 
@@ -170,7 +170,7 @@ int InferenceMLAPI::GetOutputTensorBuffers(std::vector<inference_engine_tensor_b
 {
     LOGI("ENTER");
 
-       // Output tensor buffers will be allocated by a backend plugin of nnstreamer
+       // Output tensor buffers will be allocated by a backend plugin of ML Single API of nnstreamer
        // So add a null tensor buffer object. This buffer will be updated at Run callback.
 
        // Caution. this tensor buffer will be checked by upper framework to verity if
@@ -276,7 +276,7 @@ int InferenceMLAPI::GetOutputLayerProperty(inference_engine_layer_property &prop
 
        inference_engine_tensor_info tensor_info;
 
-       // TODO. Set tensor info from a given nnstreamer backend instead of fixed one.
+       // TODO. Set tensor info from a given ML Single API of nnstreamer backend instead of fixed one.
 
        tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT16;
        tensor_info.shape = { 1, 1001 };
@@ -301,7 +301,7 @@ int InferenceMLAPI::SetInputLayerProperty(inference_engine_layer_property &prope
     mDesignated_inputs.clear();
     std::vector<std::string>().swap(mDesignated_inputs);
 
-       // TODO. Request input property information to a given nnstreamer backend,
+       // TODO. Request input property information to a given ML Single API of nnstreamer backend,
        // and set it instead of user-given one,
 
     mDesignated_inputs = property.layer_names;
@@ -325,7 +325,7 @@ int InferenceMLAPI::SetOutputLayerProperty(inference_engine_layer_property &prop
     mDesignated_outputs.clear();
     std::vector<std::string>().swap(mDesignated_outputs);
 
-       // TODO. Request output property information to a given nnstreamer backend,
+       // TODO. Request output property information to a given ML Single API of nnstreamer backend,
        // and set it instead of user-given one,
 
     mDesignated_outputs = property.layer_names;
@@ -345,7 +345,7 @@ int InferenceMLAPI::GetBackendCapacity(inference_engine_capacity *capacity)
         return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
     }
 
-       // TODO. flag supported accel device types according to a given nnstreamer backend.
+       // TODO. flag supported accel device types according to a given ML Single API of nnstreamer backend.
     capacity->supported_accel_devices = INFERENCE_TARGET_CUSTOM;
 
     LOGI("LEAVE");