#include <armnnTfLiteParser/ITfLiteParser.hpp>
#include <armnnCaffeParser/ICaffeParser.hpp>
+#include <nnstreamer_log.h>
#include <nnstreamer_plugin_api_filter.h>
#include <nnstreamer_plugin_api.h>
#include <nnstreamer_conf.h>
ArmNNCore::init (const GstTensorFilterProperties * prop)
{
if (loadModel (prop)) {
- g_critical ("Failed to load model\n");
+ ml_loge ("Failed to load model\n");
return -1;
}
if (setInputTensorProp ()) {
- g_critical ("Failed to initialize input tensor\n");
+ ml_loge ("Failed to initialize input tensor\n");
return -2;
}
if (setOutputTensorProp ()) {
- g_critical ("Failed to initialize output tensor\n");
+ ml_loge ("Failed to initialize output tensor\n");
return -3;
}
return 0;
armnn::Status status;
if (!g_file_test (model_path, G_FILE_TEST_IS_REGULAR)) {
- g_critical ("the file of model_path (%s) is not valid (not regular)\n",
+ ml_loge ("the file of model_path (%s) is not valid (not regular)\n",
model_path);
return -EINVAL;
}
throw;
}
catch (const std::runtime_error & re) {
- g_critical ("Runtime error while loading the network: %s", re.what ());
+ ml_loge ("Runtime error while loading the network: %s", re.what ());
return -EINVAL;
}
catch (const std::exception & ex) {
- g_critical ("Exception while loading the network : %s", ex.what ());
+ ml_loge ("Exception while loading the network : %s", ex.what ());
return -EINVAL;
}
catch ( ...) {
- g_critical ("Unknown exception while loading the network");
+ ml_loge ("Unknown exception while loading the network");
return -EINVAL;
}
}
/** Supported with tf, tflite and caffe */
return _NNS_FLOAT32;
case armnn::DataType::Float16:
- g_warning ("Unsupported armnn datatype Float16.");
+ ml_logw ("Unsupported armnn datatype Float16.");
break;
case armnn::DataType::QuantisedAsymm8:
/** Supported with tflite */
return _NNS_UINT8;
case armnn::DataType::Boolean:
- g_warning ("Unsupported armnn datatype Boolean.");
+ ml_logw ("Unsupported armnn datatype Boolean.");
break;
case armnn::DataType::QuantisedSymm16:
- g_warning ("Unsupported armnn datatype QuantisedSym16.");
+ ml_logw ("Unsupported armnn datatype QuantisedSym16.");
break;
default:
- g_warning ("Unsupported armnn datatype unknown.");
+ ml_logw ("Unsupported armnn datatype unknown.");
/** @todo Support other types */
break;
}
if (gst_info->type == _NNS_END) {
gst_info->type = getGstTensorType (arm_info.GetDataType ());
} else if (gst_info->type != getGstTensorType (arm_info.GetDataType ())) {
- g_warning ("Provided data type info does not match with model.");
+ ml_logw ("Provided data type info does not match with model.");
return -EINVAL;
}
if (gst_info->type == _NNS_END) {
- g_warning ("Data type not supported.");
+ ml_logw ("Data type not supported.");
return -EINVAL;
}
/* Set the dimensions */
int num_dim = arm_info.GetNumDimensions ();
if (num_dim > NNS_TENSOR_RANK_LIMIT) {
- g_warning ("Data rank exceeds max supported rank.");
+ ml_logw ("Data rank exceeds max supported rank.");
return -EINVAL;
}
#include <unistd.h>
#include <algorithm>
+#include <nnstreamer_log.h>
#include <nnstreamer_plugin_api.h>
#include <nnstreamer_plugin_api_filter.h>
Caffe2Core::init (const GstTensorFilterProperties * prop)
{
if (loadModels ()) {
- g_critical ("Failed to load model\n");
+ ml_loge ("Failed to load model\n");
return -1;
}
gst_tensors_info_copy (&outputTensorMeta, &prop->output_meta);
if (initInputTensor ()) {
- g_critical ("Failed to initialize input tensor\n");
+ ml_loge ("Failed to initialize input tensor\n");
return -2;
}
initializeTensor (int32_t);
break;
case _NNS_UINT32:
- g_critical ("invalid data type is used");
+ ml_loge ("invalid data type is used");
return -1;
case _NNS_INT16:
initializeTensor (int16_t);
initializeTensor (int64_t);
break;
case _NNS_UINT64:
- g_critical ("invalid data type is used");
+ ml_loge ("invalid data type is used");
return -1;
default:
- g_critical ("invalid data type is used");
+ ml_loge ("invalid data type is used");
return -1;
}
gint64 start_time = g_get_real_time ();
#endif
if (!g_file_test (init_model_path, G_FILE_TEST_IS_REGULAR)) {
- g_critical ("the file of init_model_path is not valid: %s\n", init_model_path);
+ ml_loge ("the file of init_model_path is not valid: %s\n", init_model_path);
return -1;
}
if (!g_file_test (pred_model_path, G_FILE_TEST_IS_REGULAR)) {
- g_critical ("the file of pred_model_path is not valid: %s\n", pred_model_path);
+ ml_loge ("the file of pred_model_path is not valid: %s\n", pred_model_path);
return -1;
}
CAFFE_ENFORCE (ReadProtoFromFile (init_model_path, &initNet));
inputTensor->ShareExternalPointer ((int32_t*) input[i].data);
break;
case _NNS_UINT32:
- g_critical ("invalid data type is used");
+ ml_loge ("invalid data type is used");
return -1;
case _NNS_INT16:
inputTensor->ShareExternalPointer ((int16_t*) input[i].data);
inputTensor->ShareExternalPointer ((int64_t*) input[i].data);
break;
case _NNS_UINT64:
- g_critical ("invalid data type is used");
+ ml_loge ("invalid data type is used");
return -1;
default:
- g_critical ("invalid data type is used");
+ ml_loge ("invalid data type is used");
return -1;
}
}
workSpace.RunNet (predictNet.name ());
first_run = false;
} catch(const std::runtime_error& re) {
- g_critical ("Runtime error while running the model: %s", re.what());
+ ml_loge ("Runtime error while running the model: %s", re.what());
return -4;
} catch(const std::exception& ex) {
- g_critical ("Exception while running the model : %s", ex.what());
+ ml_loge ("Exception while running the model : %s", ex.what());
return -4;
} catch (...) {
- g_critical ("Unknown exception while running the model");
+ ml_loge ("Unknown exception while running the model");
return -4;
}
} else {
output[i].data = out.data<int32_t>();
break;
case _NNS_UINT32:
- g_critical ("invalid data type (uint32) is used");
+ ml_loge ("invalid data type (uint32) is used");
return -1;
case _NNS_INT16:
output[i].data = out.data<int16_t>();
output[i].data = out.data<int64_t>();
break;
case _NNS_UINT64:
- g_critical ("invalid data type (uint64) is used");
+ ml_loge ("invalid data type (uint64) is used");
return -1;
default:
- g_critical ("invalid data type is used");
+ ml_loge ("invalid data type is used");
return -1;
}
}
const gchar *pred_model;
if (prop->num_models != 2) {
- g_critical ("Caffe2 requires two model files\n");
+ ml_loge ("Caffe2 requires two model files\n");
return -1;
}
core = new Caffe2Core (init_model, pred_model);
if (core == NULL) {
- g_critical ("Failed to allocate memory for filter subplugin: Caffe2\n");
+ ml_loge ("Failed to allocate memory for filter subplugin: Caffe2\n");
return -1;
}
*private_data = NULL;
delete core;
- g_critical ("failed to initialize the object: Caffe2");
+ ml_loge ("failed to initialize the object: Caffe2");
return -2;
}
#include <glib.h>
#include <gmodule.h>
+#include <nnstreamer_log.h>
#include <nnstreamer_plugin_api_filter.h>
#include "tensor_filter_cpp.hh"
return -EINVAL; /** Not found */
if (filters[name]->ref_count > 0) {
unsigned int cnt = filters[name]->ref_count;
- g_critical ("The reference counter of c++ filter, %s, is %u. Anyway, we are closing this because this is being closed by destructor of .so file.", name, cnt);
+ ml_loge ("The reference counter of c++ filter, %s, is %u. Anyway, we are closing this because this is being closed by destructor of .so file.", name, cnt);
}
size_t num = filters.erase (name);
if (num != 1)
#include <stdint.h>
+#include <nnstreamer_log.h>
#include <nnstreamer_plugin_api_filter.h>
#include <tensor_common.h>
#include <glib.h>
}
if (status != kTfLiteOk) {
- g_critical ("Failed to invoke");
+ ml_loge ("Failed to invoke");
return -1;
}
#include <glib/gstdio.h>
#include <gst/gst.h>
#include <mvnc2/mvnc.h>
+#include <nnstreamer_log.h>
#include <nnstreamer_plugin_api_filter.h>
#include <sys/types.h>
#include <sys/stat.h>
g_return_val_if_fail (prop->input_configured, -1);
if (prop->input_meta.num_tensors != NNS_MVNCSDK2_MAX_NUM_TENOSORS_SUPPORTED) {
- g_critical ("The number of input tensor should be one: "
+ ml_loge ("The number of input tensor should be one: "
"The MVNCSDK API supports single tensor input and output only");
goto err_destroy;
}
*/
#include <glib.h>
+#include <nnstreamer_log.h>
#include <nnstreamer_plugin_api_filter.h>
#include <tensor_common.h>
#ifdef __OPENVINO_CPU_EXT__
if (this->_isLoaded) {
// TODO: Can OpenVino support to replace the loaded model with a new one?
- g_critical ("The model file is already loaded onto the device.");
+ ml_loge ("The model file is already loaded onto the device.");
return RetEBusy;
}
strVector = this->_ieCore.GetAvailableDevices ();
if (strVector.size () == 0) {
- g_critical ("No devices found for the OpenVino toolkit; "
+ ml_loge ("No devices found for the OpenVino toolkit; "
"check your plugin is installed, and the device is also connected.");
return RetENoDev;
}
if (!TensorFilterOpenvino::isAcclDevSupported (strVector, hw)) {
- g_critical ("Failed to find the device (%s) or its plugin (%s)",
+ ml_loge ("Failed to find the device (%s) or its plugin (%s)",
get_accl_hw_str (hw), _nnsAcclHwToOVDevMap[hw].c_str());
return RetEInval;
}
info->num_tensors = (uint32_t) inputsDataMap->size ();
if (info->num_tensors > NNS_TENSOR_SIZE_LIMIT) {
- g_critical ("The number of input tenosrs in the model "
+ ml_loge ("The number of input tenosrs in the model "
"exceeds more than NNS_TENSOR_SIZE_LIMIT, %s",
NNS_TENSOR_SIZE_LIMIT_STR);
ret = RetEOverFlow;
eachInputTensorDesc = eachInputInfo->getTensorDesc ();
dimsSizeVec = eachInputTensorDesc.getDims ();
if (dimsSizeVec.size () > NNS_TENSOR_RANK_LIMIT) {
- g_critical ("The ranks of dimensions of InputTensor[%d] in the model "
+ ml_loge ("The ranks of dimensions of InputTensor[%d] in the model "
"exceeds NNS_TENSOR_RANK_LIMIT, %u", i, NNS_TENSOR_RANK_LIMIT);
ret = RetEOverFlow;
goto failed;
ieTensorTypeStr = eachInputInfo->getPrecision ().name ();
nnsTensorType = TensorFilterOpenvino::convertFromIETypeStr (ieTensorTypeStr);
if (nnsTensorType == _NNS_END) {
- g_critical ("The type of tensor elements, %s, "
+ ml_loge ("The type of tensor elements, %s, "
"in the model is not supported", ieTensorTypeStr.c_str ());
ret = RetEInval;
goto failed;
return TensorFilterOpenvino::RetSuccess;
failed:
- g_critical ("Failed to get dimension information about input tensor");
+ ml_loge ("Failed to get dimension information about input tensor");
return ret;
}
info->num_tensors = (uint32_t) outputsDataMap->size ();
if (info->num_tensors > NNS_TENSOR_SIZE_LIMIT) {
- g_critical ("The number of output tenosrs in the model "
+ ml_loge ("The number of output tenosrs in the model "
"exceeds more than NNS_TENSOR_SIZE_LIMIT, %s",
NNS_TENSOR_SIZE_LIMIT_STR);
ret = RetEOverFlow;
eachOutputTensorDesc = eachOutputInfo->getTensorDesc ();
dimsSizeVec = eachOutputTensorDesc.getDims ();
if (dimsSizeVec.size () > NNS_TENSOR_RANK_LIMIT) {
- g_critical ("The ranks of dimensions of OutputTensor[%d] in the model "
+ ml_loge ("The ranks of dimensions of OutputTensor[%d] in the model "
"exceeds NNS_TENSOR_RANK_LIMIT, %u", i, NNS_TENSOR_RANK_LIMIT);
ret = RetEOverFlow;
goto failed;
ieTensorTypeStr = eachOutputInfo->getPrecision ().name ();
nnsTensorType = TensorFilterOpenvino::convertFromIETypeStr (ieTensorTypeStr);
if (nnsTensorType == _NNS_END) {
- g_critical ("The type of tensor elements, %s, "
+ ml_loge ("The type of tensor elements, %s, "
"in the model is not supported", ieTensorTypeStr.c_str ());
ret = RetEInval;
goto failed;
return TensorFilterOpenvino::RetSuccess;
failed:
- g_critical ("Failed to get dimension information about output tensor");
+ ml_loge ("Failed to get dimension information about output tensor");
return ret;
}
InferenceEngine::Blob::Ptr blob = convertGstTensorMemoryToBlobPtr (
this->_inputTensorDescs[i], &(input[i]));
if (blob == nullptr) {
- g_critical ("Failed to create a blob for the input tensor: %u", i);
+ ml_loge ("Failed to create a blob for the input tensor: %u", i);
return RetEInval;
}
inBlobMap.insert (make_pair (std::string(info->name), blob));
this->_outputTensorDescs[i], &(output[i]));
outBlobMap.insert (make_pair (std::string(info->name), blob));
if (blob == nullptr) {
- g_critical ("Failed to create a blob for the output tensor: %u", i);
+ ml_loge ("Failed to create a blob for the output tensor: %u", i);
return RetEInval;
}
}
accelerator = parse_accl_hw (prop->accl_str, openvino_accl_support);
#ifndef __OPENVINO_CPU_EXT__
if (accelerator == ACCL_CPU) {
- g_critical ("Accelerating via CPU is not supported on the current platform");
+ ml_loge ("Accelerating via CPU is not supported on the current platform");
return TensorFilterOpenvino::RetEInval;
}
#endif
if (accelerator == ACCL_NONE || accelerator == ACCL_AUTO
|| accelerator == ACCL_DEFAULT) {
if (prop->accl_str != NULL) {
- g_critical("'%s' is not valid value for the 'accelerator' property",
+ ml_loge("'%s' is not valid value for the 'accelerator' property",
prop->accl_str);
}
- g_critical ("The 'accelerator' property is mandatory to use the tensor filter for OpenVino.\n"
+ ml_loge ("The 'accelerator' property is mandatory to use the tensor filter for OpenVino.\n"
"An acceptable format is as follows: 'true:[cpu|npu.movidius]'. Note that 'cpu' is only for the x86_64 architecture.");
return TensorFilterOpenvino::RetEInval;
}
if (num_models_xml > 1) {
- g_critical ("Too many model files in a XML format are provided.");
+ ml_loge ("Too many model files in a XML format are provided.");
return TensorFilterOpenvino::RetEInval;
} else if (num_models_bin > 1) {
- g_critical ("Too many model files in a BIN format are provided.");
+ ml_loge ("Too many model files in a BIN format are provided.");
return TensorFilterOpenvino::RetEInval;
}
}
}
if (!g_file_test (model_path_xml.c_str (), G_FILE_TEST_IS_REGULAR)) {
- g_critical ("Failed to open the XML model file, %s",
+ ml_loge ("Failed to open the XML model file, %s",
model_path_xml.c_str ());
return TensorFilterOpenvino::RetEInval;
}
if (!g_file_test (model_path_bin.c_str (), G_FILE_TEST_IS_REGULAR)) {
- g_critical ("Failed to open the BIN model file, %s",
+ ml_loge ("Failed to open the BIN model file, %s",
model_path_bin.c_str ());
return TensorFilterOpenvino::RetEInval;
}
#include <string.h>
#include <dlfcn.h>
+#include <nnstreamer_log.h>
#include <nnstreamer_plugin_api.h>
#include <nnstreamer_plugin_api_filter.h>
#include <nnstreamer_conf.h>
#define PYCORE_LIB_NAME_FORMAT "libpython%d.%dm.so.1.0"
#endif
-#define Py_ERRMSG(...) do {PyErr_Print(); g_critical (__VA_ARGS__);} while (0);
+#define Py_ERRMSG(...) do {PyErr_Print(); ml_loge (__VA_ARGS__);} while (0);
static const gchar *python_accl_support[] = {
ACCL_AUTO_STR,
Py_XDECREF(it->second);
outputArrayMap.erase (it);
} else {
- g_critical("Cannot find output data: 0x%lx", (unsigned long) data);
+ ml_loge("Cannot find output data: 0x%lx", (unsigned long) data);
}
}
Py_XINCREF(output_array);
outputArrayMap.insert (std::make_pair (output[i].data, output_array));
} else {
- g_critical ("Output tensor type/size is not matched\n");
+ ml_loge ("Output tensor type/size is not matched\n");
res = -2;
break;
}
#include <glib.h>
#include <tensor_typedef.h>
+#include <nnstreamer_log.h>
/** @brief object structure for custom Python type: TensorShape */
typedef struct
self->type = dtype;
Py_XINCREF (dtype);
} else
- g_critical ("Wrong data type");
+ ml_loge ("Wrong data type");
}
return 0;
*
*/
+#include <nnstreamer_log.h>
#include <nnstreamer_plugin_api.h>
#include <nnstreamer_plugin_api_filter.h>
gst_tensors_info_copy (&outputTensorMeta, &prop->output_meta);
if (loadModel ()) {
- g_critical ("Failed to load model\n");
+ ml_loge ("Failed to load model\n");
return -1;
}
#endif
if (!g_file_test (model_path, G_FILE_TEST_IS_REGULAR)) {
- g_critical ("the file of model_path (%s) is not valid (not regular).",
+ ml_loge ("the file of model_path (%s) is not valid (not regular).",
model_path);
return -1;
}
model = torch::jit::load (model_path);
if (model == nullptr) {
- g_critical ("Failed to read graph.");
+ ml_loge ("Failed to read graph.");
return -2;
}
auto tensor_shape = output.sizes ();
if (tensor_shape[0] != 0 && outputTensorMeta.num_tensors != tensor_shape[0]) {
- g_critical ("Invalid output meta: different size");
+ ml_loge ("Invalid output meta: different size");
return -1;
}
if (tensor_shape[0] == 0) {
tensor_type otype = getTensorTypeFromTorch (output.scalar_type ());
if (outputTensorMeta.info[0].type != otype) {
- g_critical ("Invalid output meta: different type");
+ ml_loge ("Invalid output meta: different type");
return -2;
}
goto done;
}
if (outputTensorMeta.info[i].type != otype) {
- g_critical ("Invalid output meta: different type");
+ ml_loge ("Invalid output meta: different type");
return -2;
}
if (num_gst_tensor != num_torch_tensor) {
- g_critical ("Invalid output meta: different element size");
+ ml_loge ("Invalid output meta: different element size");
return -3;
}
}
/* validate output tensor once */
if (!configured && validateOutputTensor (output_tensor)) {
- g_critical ("Output Tensor Information is not valid");
+ ml_loge ("Output Tensor Information is not valid");
return -1;
}
&inputTensorMeta.info[i].dimension[0] + NNS_TENSOR_RANK_LIMIT);
if (!getTensorTypeToTorch (input[i].type, &type)) {
- g_critical ("This data type is not valid: %d", input[i].type);
+ ml_loge ("This data type is not valid: %d", input[i].type);
return -1;
}
at::TensorOptions options = torch::TensorOptions ().dtype (type);
}
catch (const std::runtime_error & re)
{
- g_critical ("Runtime error while running the model: %s", re.what ());
+ ml_loge ("Runtime error while running the model: %s", re.what ());
return -4;
}
catch (const std::exception & ex) {
- g_critical ("Exception while running the model : %s", ex.what ());
+ ml_loge ("Exception while running the model : %s", ex.what ());
return -4;
}
catch (...) {
- g_critical ("Unknown exception while running the model");
+ ml_loge ("Unknown exception while running the model");
return -4;
}
} else {
if (output_value.isTensor ()) {
g_assert (outputTensorMeta.num_tensors == 1);
if (processIValue (output_value, &output[0])) {
- g_critical ("Output Tensor Information is not valid");
+ ml_loge ("Output Tensor Information is not valid");
return -2;
}
} else if (output_value.isGenericList ()) {
int idx = 0;
for (auto & ivalue_element:output_list) {
if (processIValue (ivalue_element, &output[idx++])) {
- g_critical ("Output Tensor Information is not valid");
+ ml_loge ("Output Tensor Information is not valid");
return -2;
}
}
} else {
- g_critical ("Output is not a tensor.");
+ ml_loge ("Output is not a tensor.");
return -3;
}
* This is the per-NN-framework plugin (tensorflow) for tensor_filter.
*/
+#include <nnstreamer_log.h>
#include <nnstreamer_plugin_api.h>
#include <nnstreamer_plugin_api_filter.h>
TF_CloseSession (session, status);
if (TF_GetCode (status) != TF_OK) {
- g_critical ("Error during session close!! - [Code: %d] %s",
+ ml_loge ("Error during session close!! - [Code: %d] %s",
TF_GetCode (status), TF_Message (status));
}
TF_DeleteSession (session, status);
if (TF_GetCode (status) != TF_OK) {
- g_critical ("Error during session delete!! - [Code: %d] %s",
+ ml_loge ("Error during session delete!! - [Code: %d] %s",
TF_GetCode (status), TF_Message (status));
}
TF_DeleteStatus (status);
TFCore::init (const GstTensorFilterProperties * prop)
{
if (loadModel ()) {
- g_critical ("Failed to load model");
+ ml_loge ("Failed to load model");
return -1;
}
if (validateTensor (&prop->input_meta, 1)) {
- g_critical ("Failed to validate input tensor");
+ ml_loge ("Failed to validate input tensor");
return -2;
}
if (validateTensor (&prop->output_meta, 0)) {
- g_critical ("Failed to validate output tensor");
+ ml_loge ("Failed to validate output tensor");
return -3;
}
g_assert (model_path != nullptr);
if (!g_file_test (model_path, G_FILE_TEST_IS_REGULAR)) {
- g_critical ("the file of model_path (%s) is not valid (not regular)\n", model_path);
+ ml_loge ("the file of model_path (%s) is not valid (not regular)\n", model_path);
return -1;
}
if (!g_file_get_contents (model_path, &content, &file_size, &file_error)) {
- g_critical ("Error reading model file!! - %s", file_error->message);
+ ml_loge ("Error reading model file!! - %s", file_error->message);
g_clear_error (&file_error);
return -2;
}
TF_DeleteBuffer (buffer);
if (TF_GetCode (status) != TF_OK) {
- g_critical ("Error deleting graph!! - [Code: %d] %s",
+ ml_loge ("Error deleting graph!! - [Code: %d] %s",
TF_GetCode (status), TF_Message (status));
TF_DeleteStatus (status);
TF_DeleteGraph (graph);
TF_DeleteSessionOptions (options);
if (TF_GetCode (status) != TF_OK) {
- g_critical ("Error creating Session!! - [Code: %d] %s",
+ ml_loge ("Error creating Session!! - [Code: %d] %s",
TF_GetCode (status), TF_Message (status));
TF_DeleteStatus (status);
TF_DeleteGraph (graph);
tf_tensor_info_s info_s;
if (TF_GetCode (status) != TF_OK) {
- g_critical ("Error Tensor validation!! - [Code: %d] %s",
+ ml_loge ("Error Tensor validation!! - [Code: %d] %s",
TF_GetCode (status), TF_Message (status));
TF_DeleteStatus (status);
return -1;
TF_GraphGetTensorShape (graph, output, dims.data (), num_dims, status);
if (TF_GetCode (status) != TF_OK) {
- g_critical ("Error Tensor validation!! - [Code: %d] %s",
+ ml_loge ("Error Tensor validation!! - [Code: %d] %s",
TF_GetCode (status), TF_Message (status));
TF_DeleteStatus (status);
return -2;
char *input_encoded = (char*) g_malloc0 (total_size);
if (input_encoded == NULL) {
- g_critical ("Failed to allocate memory for input tensor.");
+ ml_loge ("Failed to allocate memory for input tensor.");
ret = -1;
goto failed;
}
encoded_size,
status); /* fills the rest of tensor data */
if (TF_GetCode (status) != TF_OK) {
- g_critical ("Error String Encoding!! - [Code: %d] %s",
+ ml_loge ("Error String Encoding!! - [Code: %d] %s",
TF_GetCode (status), TF_Message (status));
g_free (input_encoded);
ret = -1;
);
if (TF_GetCode (status) != TF_OK) {
- g_critical ("Error Running Session!! - [Code: %d] %s",
+ ml_loge ("Error Running Session!! - [Code: %d] %s",
TF_GetCode (status), TF_Message (status));
ret = -2;
goto failed;
#include <limits.h>
#include <algorithm>
+#include <nnstreamer_log.h>
#include <nnstreamer_plugin_api.h>
#include <nnstreamer_plugin_api_filter.h>
#include <nnstreamer_conf.h>
#endif
if (status != kTfLiteOk) {
- g_critical ("Failed to invoke");
+ ml_loge ("Failed to invoke");
return -1;
}
model = tflite::FlatBufferModel::BuildFromFile (model_path);
if (!model) {
- g_critical ("Failed to mmap model\n");
+ ml_loge ("Failed to mmap model\n");
return -1;
}
/* If got any trouble at model, active below code. It'll be help to analyze. */
tflite::ops::builtin::BuiltinOpResolver resolver;
tflite::InterpreterBuilder (*model, resolver) (&interpreter);
if (!interpreter) {
- g_critical ("Failed to construct interpreter\n");
+ ml_loge ("Failed to construct interpreter\n");
return -2;
}
if (use_nnapi) {
nnfw_delegate.reset (new ::nnfw::tflite::NNAPIDelegate);
if (nnfw_delegate->BuildGraph (interpreter) != kTfLiteOk) {
- g_critical ("Fail to BuildGraph");
+ ml_loge ("Fail to BuildGraph");
return -3;
}
}
}
if (interpreter->AllocateTensors () != kTfLiteOk) {
- g_critical ("Failed to allocate tensors\n");
+ ml_loge ("Failed to allocate tensors\n");
return -2;
}
#if (DBG)
case kTfLiteFloat16:
#endif
default:
- g_critical ("Not supported Tensorflow Data Type: [%d].", tfType);
+ ml_loge ("Not supported Tensorflow Data Type: [%d].", tfType);
/** @todo Support other types */
break;
}
for (unsigned int i = 0; i < tensorMeta->num_tensors; ++i) {
if (getTensorDim (tensor_idx_list[i], tensorMeta->info[i].dimension)) {
- g_critical ("failed to get the dimension of input tensors");
+ ml_loge ("failed to get the dimension of input tensors");
return -1;
}
tensorMeta->info[i].type =
TFLiteCore::init ()
{
if (loadModel ()) {
- g_critical ("Failed to load model\n");
+ ml_loge ("Failed to load model\n");
return -1;
}
if (setInputTensorProp ()) {
- g_critical ("Failed to initialize input tensor\n");
+ ml_loge ("Failed to initialize input tensor\n");
return -2;
}
if (setOutputTensorProp ()) {
- g_critical ("Failed to initialize output tensor\n");
+ ml_loge ("Failed to initialize output tensor\n");
return -3;
}
return 0;
int err;
if (!g_file_test (_model_path, G_FILE_TEST_IS_REGULAR)) {
- g_critical ("The path of model file(s), %s, to reload is invalid.",
+ ml_loge ("The path of model file(s), %s, to reload is invalid.",
_model_path);
return -EINVAL;
}
*/
err = interpreter_sub.loadModel (use_nnapi);
if (err != 0) {
- g_critical ("Failed to load model %s\n", _model_path);
+ ml_loge ("Failed to load model %s\n", _model_path);
goto out_unlock;
}
err = interpreter_sub.setInputTensorProp ();
if (err != 0) {
- g_critical ("Failed to initialize input tensor\n");
+ ml_loge ("Failed to initialize input tensor\n");
goto out_unlock;
}
err = interpreter_sub.setOutputTensorProp ();
if (err != 0) {
- g_critical ("Failed to initialize output tensor\n");
+ ml_loge ("Failed to initialize output tensor\n");
goto out_unlock;
}
!gst_tensors_info_is_equal (
interpreter.getOutputTensorsInfo (),
interpreter_sub.getOutputTensorsInfo ())) {
- g_critical ("The model has unmatched tensors info\n");
+ ml_loge ("The model has unmatched tensors info\n");
err = -EINVAL;
goto out_unlock;
}
#include <string.h>
#include <glib.h>
+#include "nnstreamer_log.h"
#include "nnstreamer_conf.h"
/**
{
if (type >= NNSCONF_PATH_END) {
/* unknown type */
- g_critical ("Failed to get sub-plugins, unknown sub-plugin type.");
+ ml_loge ("Failed to get sub-plugins, unknown sub-plugin type.");
return FALSE;
}
* Failed to get the configuration.
* Note that Android API does not use the configuration.
*/
- g_warning ("Failed to load the configuration, no config file found.");
+ ml_logw ("Failed to load the configuration, no config file found.");
}
for (t = 0; t < NNSCONF_PATH_END; t++) {
#ifndef __NNSTREAMER_LOG_H__
#define __NNSTREAMER_LOG_H__
-G_BEGIN_DECLS
-
#define TAG_NAME "nnstreamer"
#if defined(__TIZEN__)
#define nns_loge ml_loge
#define nns_logd ml_logd
-G_END_DECLS
#endif /* __NNSTREAMER_LOG_H__ */
#include <glib.h>
#include <gmodule.h>
+#include "nnstreamer_log.h"
#include "nnstreamer_subplugin.h"
#include "nnstreamer_conf.h"
module = g_module_open (path, 0);
/* If this is a correct subplugin, it will register itself */
if (module == NULL) {
- g_critical ("Cannot open %s(%s) with error %s.", name, path,
+ ml_loge ("Cannot open %s(%s) with error %s.", name, path,
g_module_error ());
return NULL;
}
if (spdata) {
g_ptr_array_add (handles, (gpointer) module);
} else {
- g_critical
+ ml_loge
("nnstreamer_subplugin of %s(%s) is broken. It does not call register_subplugin with its init function.",
name, path);
g_module_close (module);
/* check the sub-pugin name */
if (g_ascii_strcasecmp (name, "auto") == 0) {
- g_critical ("Failed, the name %s is not allowed.", name);
+ ml_loge ("Failed, the name %s is not allowed.", name);
return FALSE;
}
spdata = _get_subplugin_data (type, name);
if (spdata) {
/* already exists */
- g_warning ("Subplugin %s is already registered.", name);
+ ml_logw ("Subplugin %s is already registered.", name);
return FALSE;
}
spdata = g_new0 (subpluginData, 1);
if (spdata == NULL) {
- g_critical ("Failed to allocate memory for subplugin registration.");
+ ml_loge ("Failed to allocate memory for subplugin registration.");
return FALSE;
}
#include <string.h>
#include "tensor_converter.h"
#include "converter-media-info.h"
+#include <nnstreamer_log.h>
#include <nnstreamer_subplugin.h>
#include <nnstreamer_plugin_api_converter.h>
frames_dim = 3;
self->frame_size = GST_VIDEO_INFO_SIZE (&info);
} else {
- g_critical
+ ml_loge
("\n This binary does not support video type. Please build NNStreamer with disable-video-support : false\n");
return FALSE;
}
frames_dim = 1;
self->frame_size = GST_AUDIO_INFO_BPF (&info);
} else {
- g_critical
+ ml_loge
("\n This binary does not support audio type. Please build NNStreamer with disable-audio-support : false\n");
return FALSE;
}
GST_ERROR_OBJECT (self,
"Failed to get tensor info, need to update string size.");
- g_critical ("Please set the property input-dim to convert stream.\n"
+ ml_loge ("Please set the property input-dim to convert stream.\n"
"For example, input-dim=30 to handle up to 30 bytes of string per frame.");
return FALSE;
}
GST_ERROR_OBJECT (self,
"Failed to get tensor info, need to update dimension and type.");
- g_critical
+ ml_loge
("Please set the properties input-dim and input-type to convert stream.\n"
"For example, input-dim=30 input-type=unit8 to handle 30 bytes of bin data.");
return FALSE;
GST_ERROR_OBJECT (self,
"Failed to get tensor info from %s. Check the given options.",
name);
- g_critical ("Please set the options property correctly.\n");
+ ml_loge ("Please set the options property correctly.\n");
self->externalConverter = NULL;
return FALSE;
}
#include <string.h>
+#include <nnstreamer_log.h>
#include <tensor_common.h>
#include "tensor_filter_common.h"
if ((prop->model_files != NULL) && (verify_model_path == TRUE)) {
for (i = 0; i < prop->num_models; i++) {
if (!g_file_test (prop->model_files[i], G_FILE_TEST_IS_REGULAR)) {
- g_critical ("Cannot find the model file [%d]: %s\n",
+ ml_loge ("Cannot find the model file [%d]: %s\n",
i, prop->model_files[i]);
ret = FALSE;
}
if (info->name == NULL) {
status = priv->fw->getFrameworkInfo (prop, priv->privateData, info);
if (status != 0 || info->hw_list == NULL) {
- g_warning ("Unable to fetch accelerators supported by the framework.");
+ ml_logw ("Unable to fetch accelerators supported by the framework.");
return;
}
}
gst_tensor_filter_common_close_fw (priv);
priv->fw = NULL;
} else {
- g_debug ("Framework = %s\n", fw_name);
+ ml_logd ("Framework = %s\n", fw_name);
break;
}
}
if (status == 0) {
g_strfreev_const (_prop.model_files);
} else {
- g_critical ("Fail to reload model\n");
+ ml_loge ("Fail to reload model\n");
g_strfreev_const (prop->model_files);
prop->model_files = _prop.model_files;
prop->num_models = _prop.num_models;
#include "tensor_filter_custom.h"
#include "nnstreamer_plugin_api_filter.h"
#include "nnstreamer_conf.h"
+#include <nnstreamer_log.h>
void init_filter_custom (void) __attribute__ ((constructor));
void fini_filter_custom (void) __attribute__ ((destructor));
ptr = *private_data = g_new0 (internal_data, 1);
if (ptr == NULL) {
- g_critical ("Failed to allocate memory for custom filter.");
+ ml_loge ("Failed to allocate memory for custom filter.");
return -1;
}
}
if (!g_module_symbol (ptr->module, "NNStreamer_custom", &custom_cls)) {
- g_critical ("tensor_filter_custom:loadlib error: %s\n", g_module_error ());
+ ml_loge ("tensor_filter_custom:loadlib error: %s\n", g_module_error ());
g_module_close (ptr->module);
g_free (ptr);
*private_data = NULL;
#include <errno.h>
#include <glib.h>
#include <tensor_filter_custom_easy.h>
+#include <nnstreamer_log.h>
#include <nnstreamer_plugin_api.h>
#include <nnstreamer_subplugin.h>
rd->model = get_subplugin (NNS_EASY_CUSTOM_FILTER, prop->model_files[0]);
if (NULL == rd->model) {
- g_critical
+ ml_loge
("Cannot find the easy-custom model, \"%s\". You should provide a valid model name of easy-custom.",
prop->model_files[0]);
g_free (rd);
#include <string.h>
#include <math.h>
+#include <nnstreamer_log.h>
#include "tensor_transform.h"
#ifdef HAVE_ORC
gchar **strv = NULL;
if (!g_regex_match_simple (REGEX_DIMCHG_OPTION, filter->option, 0, 0)) {
- g_critical
+ ml_loge
("%s: dimchg: \'%s\' is not valid option string: it should be in the form of IDX_DIM_FROM:IDX_DIM_TO: with a regex, "
REGEX_DIMCHG_OPTION "\n", filter_name, filter->option);
break;
filter->data_typecast.to = gst_tensor_get_type (filter->option);
filter->loaded = TRUE;
} else {
- g_critical
+ ml_loge
("%s: typecast: \'%s\' is not valid data type for tensor: data type of tensor should be one of %s\n",
filter_name, filter->option, GST_TENSOR_TYPE_ALL);
}
1, 0, NULL, NULL)) {
str_option = g_regex_replace (regex_option_tc, filter->option, -1, 1,
"", 0, 0);
- g_critical
+ ml_loge
("%s: arithmetic: [typecast:TYPE,] should be located at the first to prevent memory re-allocation: typecast(s) in the middle of \'%s\' will be ignored\n",
filter_name, filter->option);
} else {
g_regex_unref (regex_option_tc);
if (!g_regex_match_simple (REGEX_ARITH_OPTION, str_option, 0, 0)) {
- g_critical
+ ml_loge
("%s: arithmetic: \'%s\' is not valid option string: it should be in the form of [typecast:TYPE,]add|mul|div:NUMBER..., ...\n",
filter_name, str_option);
g_free (str_option);
gchar **strv = NULL;
if (!g_regex_match_simple (REGEX_TRANSPOSE_OPTION, filter->option, 0, 0)) {
- g_critical
+ ml_loge
("%s: transpose: \'%s\' is not valid option string: it should be in the form of NEW_IDX_DIM0:NEW_IDX_DIM1:NEW_IDX_DIM2:3 (note that the index of the last dim is alwayes fixed to 3)\n",
filter_name, filter->option);
break;
filter->data_stand.mode =
gst_tensor_transform_get_stand_mode (filter->option);
if (filter->data_stand.mode == STAND_END) {
- g_critical
+ ml_loge
("%s: stand: \'%s\' is not valid option string: it should be \'default\', currently the only supported mode.\n",
filter_name, filter->option);
break;