DefConfigFilePath = MV_CONFIG_PATH;
DefConfigFilePath += MV_ENGINE_CONFIG_FILE_NAME;
- LOGE("Default Engine config file location is %s", DefConfigFilePath.c_str());
+ LOGI("Default Engine config file location is %s", DefConfigFilePath.c_str());
/* Force load default attributes from configuration file */
cacheDictionaries(false);
for (int i = 0; i < MV_INFERENCE_BACKEND_MAX; ++i) {
auto iter = mSupportedInferenceBackend.find(i);
- LOGE("%d: %s: %s", i, (iter->second).first.c_str(),
+ LOGI("%d: %s: %s", i, (iter->second).first.c_str(),
(iter->second).second ? "TRUE" : "FALSE");
}
void Inference::CheckSupportedInferenceBackend()
{
- LOGE("ENTER");
+ LOGI("ENTER");
InferenceInI ini;
ini.LoadInI();
std::vector<int> supportedBackend = ini.GetSupportedInferenceEngines();
for (std::vector<int>::const_iterator it = supportedBackend.begin();
it != supportedBackend.end(); ++it) {
- LOGE("engine: %d", *it);
+ LOGI("engine: %d", *it);
auto iter = mSupportedInferenceBackend.find(*it);
(iter->second).second = true;
}
- LOGE("LEAVE");
+ LOGI("LEAVE");
}
int Inference::ConvertEngineErrorToVisionError(int error)
.clone();
}
- LOGE("Size: w:%u, h:%u", cvSource.size().width, cvSource.size().height);
+ LOGI("Size: w:%u, h:%u", cvSource.size().width, cvSource.size().height);
if (mCh != 1 && mCh != 3) {
LOGE("Channel not supported.");
iter->second.y = height - iter->second.y;
- LOGE("(%d, %d)", iter->second.x, iter->second.y);
+ LOGI("(%d, %d)", iter->second.x, iter->second.y);
}
ret = getParts((MV_INFERENCE_HUMAN_BODY_PART_HEAD |