#define MV_SURVEILLANCE_FACE_RECOGNITION_MODEL_FILE_PATH "MV_SURVEILLANCE_FACE_RECOGNITION_MODEL_FILE_PATH"
/**
- * @brief Defines MV_SURVEILLANCE_MOVEMENT_DETECTION_THRESOLD to set movement
+ * @brief Defines MV_SURVEILLANCE_MOVEMENT_DETECTION_THRESHOLD to set movement
* detection threshold. It is an attribute of the engine configuration.
* @details This value might be set in engine configuration before subscription
* on #MV_SURVEILLANCE_EVENT_TYPE_MOVEMENT_DETECTED event trigger
- `name`: name to an output tensor for score
- `index`: index to get score from the output tensor
- `top_number`: the top number of outputs
-- `threshold` : threshold to cut ouputs under the `threshold` value
+- `threshold` : threshold to cut outputs under the `threshold` value
- `score_type` : score type; `NORMAL` if score between 0 ~ 1, `SIGMOID` if score requires sigmoid
The classification meta file, thus, illustrates that the model has an input which is named of `input_2`, `NHWC` shape type with `[1, 224, 224, 3]` dimensions, `MV_INFERENCE_DATA_FLOAT32` data type, and `RGB888` color space. It requires normalization with mean `[127.5, 127.5, 127.5]` and standard deviation `[127.5, 127.5, 127.5]`. But it doesn't apply quantization.
-The meta file illustrates that the model has an ouput which is named of `dense_3/Softmax`. The tensor is 2-dimensional and its' 2nd index corresponds to the score. In addition, the score is just between 0 ~ 1. The score under `threshold` 0.3 should be thrown out and the `top_number` of outputs should be given as results.
+The meta file illustrates that the model has an output which is named of `dense_3/Softmax`. The tensor is 2-dimensional and its' 2nd index corresponds to the score. In addition, the score is just between 0 ~ 1. The score under `threshold` 0.3 should be thrown out and the `top_number` of outputs should be given as results.
A meta file, however, for classification with quantized model is shown below.
* `value` = `value8` / `scale`+ `zeropoint`
The classification meta file, thus, illustrates that the model has an input which is named of `input`, `NHWC` shape type with `[1, 224, 224, 3]` dimensions, `MV_INFERENCE_DATA_UINT8` data type, and `RGB888` color space. It requires any preprocess.
-The meta file illustrates that the model has an ouput which is named of `MobilenetV1/Predictions/Reshape_1`. The tensor is 2-dimensional and its' 2nd index corresponds to the score. In addition, the score is just between 0 ~ 1, but the value requires dequantization with scale and zeropoint values. The score after dequantizing under `threshold`0.3 should be thrown out and the `top_number` of outputs should be given as results.
+The meta file illustrates that the model has an output which is named of `MobilenetV1/Predictions/Reshape_1`. The tensor is 2-dimensional and its' 2nd index corresponds to the score. In addition, the score is just between 0 ~ 1, but the value requires dequantization with scale and zeropoint values. The score after dequantizing under `threshold`0.3 should be thrown out and the `top_number` of outputs should be given as results.
To show how to apply meta files to well-know models, we provide example meta files which support google hosted models for image classification as:
static gpointer DfsThreadLoop(gpointer data);
#ifdef MV_3D_POINTCLOUD_IS_AVAILABLE
- PointCloudPtr GetPointcloudFromSource(DfsInputData &intput, DfsOutputData &depthData);
+ PointCloudPtr GetPointcloudFromSource(DfsInputData &input, DfsOutputData &depthData);
#endif
public:
Mv3d();
/**
* @brief Configure mv3d handle.
- * @sicne_tizen 7.0
+ * @since_tizen 7.0
*/
int mv3dConfigure(mv_3d_h mv3d, mv_engine_config_h engine_config);
/**
* @brief Set depth callback to mv3d handle.
- * @sicne_tizen 7.0
+ * @since_tizen 7.0
*/
int mv3dSetDepthCallback(mv_3d_h mv3d, mv_3d_depth_cb depth_cb, void *user_data);
/**
* @brief Set pointcloud callback to mv3d handle.
- * @sicne_tizen 7.0
+ * @since_tizen 7.0
*/
int mv3dSetPointcloudCallback(mv_3d_h mv3d, mv_3d_pointcloud_cb pointcloud_cb, void *user_data);
}
if (!depth_cb) {
- LOGE("Callbakc is NULL");
+ LOGE("Callback is NULL");
return MEDIA_VISION_ERROR_INVALID_PARAMETER;
}
}
if (!pointcloud_cb) {
- LOGE("Callbakc is NULL");
+ LOGE("Callback is NULL");
return MEDIA_VISION_ERROR_INVALID_PARAMETER;
}
case MEDIA_VISION_COLORSPACE_Y800:
channelsNumber = 1;
conversionType = -1; /* Type of conversion from given colorspace to gray */
- /* Without convertion */
+ /* Without conversion */
break;
case MEDIA_VISION_COLORSPACE_I420:
channelsNumber = 1;
faceLocations.clear();
- cv::Mat intrestingRegion = image;
+ cv::Mat interestingRegion = image;
bool roiIsUsed = false;
if (roi.x >= 0 && roi.y >= 0 && roi.width > 0 && roi.height > 0 && (roi.x + roi.width) <= image.cols &&
(roi.y + roi.height) <= image.rows) {
- intrestingRegion = intrestingRegion(roi);
+ interestingRegion = interestingRegion(roi);
roiIsUsed = true;
}
try {
- m_faceCascade.detectMultiScale(intrestingRegion, faceLocations, 1.1, 3, 0, minSize);
+ m_faceCascade.detectMultiScale(interestingRegion, faceLocations, 1.1, 3, 0, minSize);
} catch (cv::Exception &e) {
return false;
}
cv::Mat eyeEqualized;
cv::equalizeHist(eye, eyeEqualized);
- const int thresold = 20;
- eyeEqualized = eyeEqualized < thresold;
+ const int threshold = 20;
+ eyeEqualized = eyeEqualized < threshold;
std::vector<std::vector<cv::Point> > contours;
std::vector<cv::Vec4i> hierarchy;
const int width = eyeEqualized.cols / 2.5;
const int height = eyeEqualized.rows / 2.5;
- const cv::Rect boundThresold(xCenter - width, yCenter - height, 2 * width, 2 * height);
+ const cv::Rect boundThreshold(xCenter - width, yCenter - height, 2 * width, 2 * height);
const int widthHeightRatio = 3;
const double areaRatio = 0.005;
const cv::Rect currentRect = cv::boundingRect(contours[i]);
const double currentArea = cv::contourArea(contours[i]);
- if (boundThresold.contains(currentRect.br()) && boundThresold.contains(currentRect.tl()) &&
- currentArea > areaRatio * boundThresold.area() && currentRect.width < widthHeightRatio * currentRect.height)
+ if (boundThreshold.contains(currentRect.br()) && boundThreshold.contains(currentRect.tl()) &&
+ currentArea > areaRatio * boundThreshold.area() &&
+ currentRect.width < widthHeightRatio * currentRect.height)
isOpen = MV_FACE_EYES_OPEN;
- else if (boundThresold.contains(currentRect.br()) && boundThresold.contains(currentRect.tl()) &&
- currentArea > areaSmallRatio * boundThresold.area())
+ else if (boundThreshold.contains(currentRect.br()) && boundThreshold.contains(currentRect.tl()) &&
+ currentArea > areaSmallRatio * boundThreshold.area())
++rectanglesInsideCount;
}
dstAlg->set(paramNames[i], srcAlg->getAlgorithm(paramNames[i]));
break;
default:
- LOGE("While copying algorothm parameters unsupported parameter "
+ LOGE("While copying algorithm parameters unsupported parameter "
"%s was found.", paramNames[i].c_str());
return MEDIA_VISION_ERROR_NOT_SUPPORTED;
int error = MediaVision::Common::convertSourceMV2GrayCV(source, image);
if (error != MEDIA_VISION_ERROR_NONE) {
- LOGE("Convertion mv_source_h to gray failed");
+ LOGE("Conversion mv_source_h to gray failed");
return error;
}
error);
}
- /* Ser roi to be detected */
+ /* Set roi to be detected */
error = mv_engine_config_get_int_attribute_c(engine_cfg, MV_FACE_DETECTION_ROI_X, &roi.x);
if (error != MEDIA_VISION_ERROR_NONE)
LOGE("Error occurred during face detection roi (x) receiving."
int ret = MediaVision::Common::convertSourceMV2GrayCV(source, grayImage);
if (MEDIA_VISION_ERROR_NONE != ret) {
- LOGE("Convertion mv_source_h to gray failed");
+ LOGE("Conversion mv_source_h to gray failed");
return ret;
}
int ret = MediaVision::Common::convertSourceMV2GrayCV(source, grayImage);
if (MEDIA_VISION_ERROR_NONE != ret) {
- LOGE("Convertion mv_source_h to gray failed");
+ LOGE("Conversion mv_source_h to gray failed");
return ret;
}
int error = MediaVision::Common::convertSourceMV2GrayCV(source, image);
if (error != MEDIA_VISION_ERROR_NONE) {
- LOGE("Convertion mv_source_h to gray failed");
+ LOGE("Conversion mv_source_h to gray failed");
return error;
}
error = FaceEyeCondition::recognizeEyeCondition(image, face_location, &eye_condition);
if (error != MEDIA_VISION_ERROR_NONE) {
- LOGE("eye contition recognition failed");
+ LOGE("eye condition recognition failed");
return error;
}
int error = MediaVision::Common::convertSourceMV2GrayCV(source, image);
if (error != MEDIA_VISION_ERROR_NONE) {
- LOGE("Convertion mv_source_h to gray failed");
+ LOGE("Conversion mv_source_h to gray failed");
return error;
}
error = FaceExpressionRecognizer::recognizeFaceExpression(image, face_location, &expression);
if (error != MEDIA_VISION_ERROR_NONE) {
- LOGE("eye contition recognition failed");
+ LOGE("eye condition recognition failed");
return error;
}
cv::Mat image;
int ret = MediaVision::Common::convertSourceMV2GrayCV(source, image);
if (MEDIA_VISION_ERROR_NONE != ret) {
- LOGE("Convertion mv_source_h to gray failed");
+ LOGE("Conversion mv_source_h to gray failed");
return ret;
}
cv::Mat image;
int ret = MediaVision::Common::convertSourceMV2GrayCV(source, image);
if (MEDIA_VISION_ERROR_NONE != ret) {
- LOGE("Convertion mv_source_h to gray failed");
+ LOGE("Conversion mv_source_h to gray failed");
return ret;
}
* rectangle from {0,0} to @a maxSize
*
* @since_tizen 3.0
- * @param [in] rectange Rectangle which will be cut
+ * @param [in] rectangle Rectangle which will be cut
* @param [in] maxSize Maximum values of needed rectangle
*/
-void catRect(cv::Rect &rectange, const cv::Size &maxSize);
+void catRect(cv::Rect &rectangle, const cv::Size &maxSize);
/**
* @brief Resizes a region.
return insideFlag;
}
-void catRect(cv::Rect &rectange, const cv::Size &maxSize)
+void catRect(cv::Rect &rectangle, const cv::Size &maxSize)
{
- if (rectange.width < 0) {
- rectange.x += rectange.width;
- rectange.width *= -1;
+ if (rectangle.width < 0) {
+ rectangle.x += rectangle.width;
+ rectangle.width *= -1;
}
- if (rectange.height < 0) {
- rectange.y += rectange.height;
- rectange.height *= -1;
+ if (rectangle.height < 0) {
+ rectangle.y += rectangle.height;
+ rectangle.height *= -1;
}
- if (rectange.x > maxSize.width || rectange.y > maxSize.height) {
- rectange.x = 0;
- rectange.y = 0;
- rectange.width = 0;
- rectange.height = 0;
+ if (rectangle.x > maxSize.width || rectangle.y > maxSize.height) {
+ rectangle.x = 0;
+ rectangle.y = 0;
+ rectangle.width = 0;
+ rectangle.height = 0;
return;
}
- if (rectange.x < 0) {
- rectange.width += rectange.x;
- rectange.x = 0;
+ if (rectangle.x < 0) {
+ rectangle.width += rectangle.x;
+ rectangle.x = 0;
}
- if (rectange.y < 0) {
- rectange.height += rectange.y;
- rectange.y = 0;
+ if (rectangle.y < 0) {
+ rectangle.height += rectangle.y;
+ rectangle.y = 0;
}
- if (rectange.x + rectange.width > maxSize.width)
- rectange.width = maxSize.width - rectange.x;
+ if (rectangle.x + rectangle.width > maxSize.width)
+ rectangle.width = maxSize.width - rectangle.x;
- if (rectange.y + rectange.height > maxSize.height)
- rectange.height = maxSize.height - rectange.y;
+ if (rectangle.y + rectangle.height > maxSize.height)
+ rectangle.height = maxSize.height - rectangle.y;
}
std::vector<cv::Point2f> contourResize(const std::vector<cv::Point2f> &roi, float scalingCoefficient)
/* Parameters of cascade tracker */
- const float recognitionBasedTrackerPriotity = 1.0f;
- const float featureSubstitutionTrackerPriotity = 0.6f;
- const float medianFlowTrackerPriotity = 0.1f;
+ const float recognitionBasedTrackerPriority = 1.0f;
+ const float featureSubstitutionTrackerPriority = 0.6f;
+ const float medianFlowTrackerPriority = 0.1f;
/* Parameters of stabilization */
if (asyncRecogTracker == NULL)
LOGE("Failed to create Async Recognition Tracker");
- mainTracker->enableTracker(asyncRecogTracker, recognitionBasedTrackerPriotity);
+ mainTracker->enableTracker(asyncRecogTracker, recognitionBasedTrackerPriority);
/* Adding asynchronous feature substitution based tracker */
if (asyncSubstitutionTracker == NULL)
LOGE("Failed to create Async Substitution Tracker");
- mainTracker->enableTracker(asyncSubstitutionTracker, featureSubstitutionTrackerPriotity);
+ mainTracker->enableTracker(asyncSubstitutionTracker, featureSubstitutionTrackerPriority);
/* Adding median flow tracker */
if (mfTracker == NULL)
LOGE("Failed to create MFTracker");
- mainTracker->enableTracker(mfTracker, medianFlowTrackerPriotity);
+ mainTracker->enableTracker(mfTracker, medianFlowTrackerPriority);
__tracker = mainTracker;
__target = target;
std::vector<int> mSupportedInferenceBackend;
std::string mIniDefaultPath;
std::string mDefaultBackend;
- std::string mDelimeter;
+ std::string mDelimiter;
};
} /* Inference */
TensorBuffer mTensorBuffer;
OutputMetadata mMeta;
int mBoxOffset;
- int mNumberOfOjects;
+ int mNumberOfObjects;
float mScaleW;
float mScaleH;
Boxes mResultBoxes;
: mTensorBuffer(buffer)
, mMeta(metaData)
, mBoxOffset(boxOffset)
- , mNumberOfOjects(numberOfObjects)
+ , mNumberOfObjects(numberOfObjects)
, mScaleW(scaleW)
, mScaleH(scaleH)
, mResultBoxes() {};
// In case of object detection,
// a model may apply post-process but others may not.
- // Thus, those cases should be hanlded separately.
+ // Thus, those cases should be handled separately.
float *boxes = nullptr;
float *classes = nullptr;
if (outputMeta.GetBoxDecodingType() != INFERENCE_BOX_DECODING_TYPE_BYPASS) {
std::vector<int> scoreIndexes = outputMeta.GetScoreDimInfo().GetValidIndexAll();
if (scoreIndexes.size() != 1) {
- LOGE("Invaid dim size. It should be 1");
+ LOGE("Invalid dim size. It should be 1");
return MEDIA_VISION_ERROR_INVALID_OPERATION;
}
numberOfFaces = mOutputLayerProperty.layers[outputMeta.GetScoreName()].shape[scoreIndexes[0]];
PoseDecoder poseDecoder(mOutputTensorBuffers, outputMeta, heatMapWidth, heatMapHeight, heatMapChannel,
number_of_landmarks);
- // initialize decorder queue with landmarks to be decoded.
+ // initialize decoder queue with landmarks to be decoded.
int ret = poseDecoder.init();
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to init poseDecoder");
LOGI("number of landmarks per pose: %d", poseResult->number_of_landmarks_per_pose);
if (poseResult->number_of_landmarks_per_pose >= MAX_NUMBER_OF_LANDMARKS_PER_POSE) {
- LOGE("Exceeded maxinum number of landmarks per pose(%d >= %d).", poseResult->number_of_landmarks_per_pose,
+ LOGE("Exceeded maximum number of landmarks per pose(%d >= %d).", poseResult->number_of_landmarks_per_pose,
MAX_NUMBER_OF_LANDMARKS_PER_POSE);
return MEDIA_VISION_ERROR_INVALID_PARAMETER;
}
PoseDecoder poseDecoder(mOutputTensorBuffers, outputMeta, heatMapWidth, heatMapHeight, heatMapChannel,
poseResult->number_of_landmarks_per_pose);
- // initialize decorder queue with landmarks to be decoded.
+ // initialize decoder queue with landmarks to be decoded.
int ret = poseDecoder.init();
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to init poseDecoder");
poseResult->number_of_landmarks_per_pose = outputTensorInfo.dimInfo[0][3];
if (poseResult->number_of_landmarks_per_pose >= MAX_NUMBER_OF_LANDMARKS_PER_POSE) {
- LOGE("Exeeded maxinum number of landmarks per pose(%d >= %d).", poseResult->number_of_landmarks_per_pose,
+ LOGE("Exeeded maximum number of landmarks per pose(%d >= %d).", poseResult->number_of_landmarks_per_pose,
MAX_NUMBER_OF_LANDMARKS_PER_POSE);
return MEDIA_VISION_ERROR_INVALID_PARAMETER;
}
return ltrim(rtrim(s, t), t);
}
-InferenceInI::InferenceInI() : mIniDefaultPath(SYSCONFDIR), mDefaultBackend("OPENCV"), mDelimeter(",")
+InferenceInI::InferenceInI() : mIniDefaultPath(SYSCONFDIR), mDefaultBackend("OPENCV"), mDelimiter(",")
{
mIniDefaultPath += INFERENCE_INI_FILENAME;
}
iniparser_getstring(dict, "inference backend:supported backend types", (char *) mDefaultBackend.c_str()));
size_t pos = 0;
- while ((pos = list.find(mDelimeter)) != std::string::npos) {
+ while ((pos = list.find(mDelimiter)) != std::string::npos) {
std::string tmp = list.substr(0, pos);
mSupportedInferenceBackend.push_back(atoi(tmp.c_str()));
- list.erase(0, pos + mDelimeter.length());
+ list.erase(0, pos + mDelimiter.length());
}
mSupportedInferenceBackend.push_back(atoi(list.c_str()));
std::map<std::string, LayerInfo>().swap(layer);
// TODO: handling error
- // FIXEME: LayerInfo.set()??
+ // FIXME: LayerInfo.set()??
LayerInfo info;
info.name = static_cast<const char *>(json_object_get_string_member(object, "name"));
// mNumberOfObjects is set again if INFERENCE_BOX_DECODING_TYPE_BYPASS.
// Otherwise it is set already within ctor.
- mNumberOfOjects = mTensorBuffer.getValue<int>(mMeta.GetNumberName(), indexes[0]);
+ mNumberOfObjects = mTensorBuffer.getValue<int>(mMeta.GetNumberName(), indexes[0]);
} else if (mMeta.GetBoxDecodingType() == INFERENCE_BOX_DECODING_TYPE_SSD_ANCHOR) {
if (mMeta.GetBoxDecodeInfo().IsAnchorBoxEmpty()) {
LOGE("Anchor boxes are required but empty.");
BoxesList boxList;
Boxes boxes;
int ret = MEDIA_VISION_ERROR_NONE;
- int totalIdx = mNumberOfOjects;
+ int totalIdx = mNumberOfObjects;
for (int idx = 0; idx < totalIdx; ++idx) {
if (mMeta.GetBoxDecodingType() == INFERENCE_BOX_DECODING_TYPE_BYPASS) {
for (auto &anchorBox : mMeta.GetBoxDecodeInfo().GetAnchorBoxAll()) {
anchorIdx++;
- float score = decodeScore(anchorIdx * mNumberOfOjects + idx);
+ float score = decodeScore(anchorIdx * mNumberOfObjects + idx);
if (score <= 0.0f)
continue;
box::AnchorParam &yoloAnchor = decodeInfo.anchorParam;
//offsetAnchors is 3 which is number of BOX
- mNumberOfOjects = mBoxOffset / yoloAnchor.offsetAnchors - 5;
- boxesList.resize(mNumberOfOjects);
+ mNumberOfObjects = mBoxOffset / yoloAnchor.offsetAnchors - 5;
+ boxesList.resize(mNumberOfObjects);
for (auto strideIdx = 0; strideIdx < yoloAnchor.offsetAnchors; strideIdx++) {
auto &stride = yoloAnchor.strides[strideIdx];
//for each BOX
//handle order is (H,W,A)
float boxScore =
- decodeYOLOScore(anchorIdx * mBoxOffset + (mNumberOfOjects + 5) * offset + 4, strideIdx);
+ decodeYOLOScore(anchorIdx * mBoxOffset + (mNumberOfObjects + 5) * offset + 4, strideIdx);
auto anchorBox = decodeInfo.vAnchorBoxes[strideIdx][anchorIdx * yoloAnchor.offsetAnchors + offset];
- for (int objIdx = 0; objIdx < mNumberOfOjects; ++objIdx) { //each box to every object
+ for (int objIdx = 0; objIdx < mNumberOfObjects; ++objIdx) { //each box to every object
float objScore = decodeYOLOScore(
- anchorIdx * mBoxOffset + (mNumberOfOjects + 5) * offset + 5 + objIdx, strideIdx);
+ anchorIdx * mBoxOffset + (mNumberOfObjects + 5) * offset + 5 + objIdx, strideIdx);
if (boxScore * objScore < mMeta.GetScoreThreshold())
continue;
- Box box = decodeYOLOBox(anchorIdx, objScore, objIdx, (mNumberOfOjects + 5) * offset, strideIdx);
+ Box box = decodeYOLOBox(anchorIdx, objScore, objIdx, (mNumberOfObjects + 5) * offset, strideIdx);
if (!decodeInfo.vAnchorBoxes.empty()) {
box.location.x = (box.location.x * 2 + anchorBox.x) * stride / mScaleW;
return ret;
}
- // addtional parsing is required according to decoding type
+ // additional parsing is required according to decoding type
if (box.GetDecodingType() != INFERENCE_BOX_DECODING_TYPE_BYPASS) {
int ret = box.ParseDecodeInfo(object);
if (ret != MEDIA_VISION_ERROR_NONE) {
float Posture::cosineSimilarity(std::vector<cv::Vec2f> vec1, std::vector<cv::Vec2f> vec2, int size)
{
- float numer = 0.0f;
+ float number = 0.0f;
float denom1 = 0.0f;
float denom2 = 0.0f;
float value = 0.0f;
for (int k = 0; k < size; ++k) {
- numer = denom1 = denom2 = 0.0f;
+ number = denom1 = denom2 = 0.0f;
for (int dim = 0; dim < 2; ++dim) {
- numer += (vec1[k][dim] * vec2[k][dim]);
+ number += (vec1[k][dim] * vec2[k][dim]);
denom1 += (vec1[k][dim] * vec1[k][dim]);
denom2 += (vec2[k][dim] * vec2[k][dim]);
}
- LOGI("similarity: %f", numer / sqrt(denom1 * denom2));
- value += numer / sqrt(denom1 * denom2);
+ LOGI("similarity: %f", number / sqrt(denom1 * denom2));
+ value += number / sqrt(denom1 * denom2);
}
return value;
{
// In default, Mobilenet v1 ssd model will be used.
// If other model is set by user then strategy pattern will be used
- // to create its corresponding concerte class by calling create().
+ // to create its corresponding concrete class by calling create().
_landmark_detection = make_unique<FldTweakCnn>(LandmarkDetectionTaskType::FLD_TWEAK_CNN);
}
{
// In default, Mobilenet v1 ssd model will be used.
// If other model is set by user then strategy pattern will be used
- // to create its corresponding concerte class by calling create().
+ // to create its corresponding concrete class by calling create().
_landmark_detection = make_unique<PldCpm>(LandmarkDetectionTaskType::PLD_CPM);
}
/**
* @brief parse postprocess node from a given meta file.
- * This is a pure virtual funcation so each derived class
+ * This is a pure virtual function so each derived class
* should implement this function properly.
*
* @param metaInfo A MetaInfo object to output tensor.
{
// In default, FD Mobilenet v1 ssd model will be used.
// If other model is set by user then strategy pattern will be used
- // to create its corresponding concerte class by calling create().
+ // to create its corresponding concrete class by calling create().
_object_detection = make_unique<MobilenetV1Ssd>(ObjectDetectionTaskType::FD_MOBILENET_V1_SSD);
}
{
// In default, Mobilenet v1 ssd model will be used.
// If other model is set by user then strategy pattern will be used
- // to create its corresponding concerte class by calling create().
+ // to create its corresponding concrete class by calling create().
_object_detection = make_unique<MobilenetV1Ssd>(ObjectDetectionTaskType::MOBILENET_V1_SSD);
}
resized.convertTo(floatSrc, CV_32FC3);
- cv::Mat meaned = cv::Mat(floatSrc.size(), CV_32FC3, cv::Scalar(127.5f, 127.5f, 127.5f));
+ cv::Mat meant = cv::Mat(floatSrc.size(), CV_32FC3, cv::Scalar(127.5f, 127.5f, 127.5f));
- cv::subtract(floatSrc, meaned, dst);
+ cv::subtract(floatSrc, meant, dst);
dst /= 127.5f;
vec.assign((float *) dst.data, (float *) dst.data + dst.total() * dst.channels());
resized.convertTo(floatSrc, CV_32FC3);
- cv::Mat meaned = cv::Mat(floatSrc.size(), CV_32FC3, cv::Scalar(127.5f, 127.5f, 127.5f));
+ cv::Mat meant = cv::Mat(floatSrc.size(), CV_32FC3, cv::Scalar(127.5f, 127.5f, 127.5f));
cv::Mat dst;
- cv::subtract(floatSrc, meaned, dst);
+ cv::subtract(floatSrc, meant, dst);
dst /= 127.5f;
vec.assign((float *) dst.data, (float *) dst.data + dst.total() * dst.channels());
cv::Mat split_rgbx[4];
cv::split(argb, split_rgbx);
- cv::Mat splitted[] = { split_rgbx[0], split_rgbx[1], split_rgbx[2] };
+ cv::Mat split[] = { split_rgbx[0], split_rgbx[1], split_rgbx[2] };
cv::Mat rgb;
- cv::merge(splitted, 3, rgb);
+ cv::merge(split, 3, rgb);
cv::Mat resized;
resized.convertTo(floatSrc, CV_32FC3);
- cv::Mat meaned = cv::Mat(floatSrc.size(), CV_32FC3, cv::Scalar(127.5f, 127.5f, 127.5f));
+ cv::Mat meant = cv::Mat(floatSrc.size(), CV_32FC3, cv::Scalar(127.5f, 127.5f, 127.5f));
cv::Mat dst;
- cv::subtract(floatSrc, meaned, dst);
+ cv::subtract(floatSrc, meant, dst);
dst /= 127.5f;
vec.assign((float *) dst.data, (float *) dst.data + dst.total() * dst.channels());
* @param [in] eventType Type of the event
* @param [in] videoStreamId Video stream identificator
* @param [in] engineCfg The engine configuration for event trigger
- * @param [in] callback The callback to be called if event will be occured
+ * @param [in] callback The callback to be called if event will be occurred
* @param [in] user_data The user data to be passed to the callback function
* @param [in] numberOfPoints The number of ROI points
- * @param [in] roi The intput array with ROI points
+ * @param [in] roi The input array with ROI points
* @param [in] isInternal Interpretation event as internal in surveillance
* @return @c 0 on success, otherwise a negative error value
*/
* @param [in] eventTrigger The event trigger to be register (NULL if internal)
* @param [in] triggerId Unique event trigger identifier to be register
* @param [in] videoStreamId Video stream identifier
- * @param [in] callback The callback to be called if event will be occured
+ * @param [in] callback The callback to be called if event will be occurred
* @param [in] user_data The user data to be passed to the callback function
* @param [in] numberOfPoints The number of ROI points
- * @param [in] roi The intput array with ROI points
+ * @param [in] roi The input array with ROI points
* @param [in] isInternal Interpretation event as internal in surveillance
*/
EventTrigger(mv_surveillance_event_trigger_h eventTrigger, long int triggerId, int videoStreamId,
* @brief Checks if callback with the identifier is subscribed.
*
* @since_tizen 3.0
- * @return true if suscribed, false otherwise
+ * @return true if subscribed, false otherwise
*/
bool isCallbackSubscribed(long int triggerId) const;
* @since_tizen 3.0
* @param [in] eventTrigger The event trigger to be register (NULL if internal)
* @param [in] triggerId Unique event trigger identifier to be subscribed
- * @param [in] callback The callback to be called if event will be occured
+ * @param [in] callback The callback to be called if event will be occurred
* @param [in] user_data The user data to be passed to the callback function
* @param [in] numberOfPoints The number of ROI points
- * @param [in] roi The intput array with ROI points
+ * @param [in] roi The input array with ROI points
* @param [in] isInternal Interpretation event as internal in surveillance
* @return @c true on success, false otherwise
*/
* @param [in, out] image The input image where ROI will be applied
* @param [in] imageWidth The input image width
* @param [in] imageHeight The input image height
- * @param [in] scalePoints True if ROI points must be scaled, false oterwise
+ * @param [in] scalePoints True if ROI points must be scaled, false otherwise
* @param [in] scaleX The scale for X ROI point coordinate
* @param [in] scaleY The scale for Y ROI point coordinate
* @return @c true on success, false otherwise
* @param [in] eventTrigger The event trigger to be register (NULL if internal)
* @param [in] triggerId Unique event trigger identifier to be register
* @param [in] videoStreamId Video stream identifier
- * @param [in] callback The callback to be called if event will be occured
+ * @param [in] callback The callback to be called if event will be occurred
* @param [in] user_data The user data to be passed to the callback function
* @param [in] numberOfPoints The number of ROI points
- * @param [in] roi The intput array with ROI points
+ * @param [in] roi The input array with ROI points
* @param [in] isInternal Interpretation event as internal in surveillance
*/
EventTriggerMovementDetection(mv_surveillance_event_trigger_h eventTrigger, long int triggerId, int videoStreamId,
/**
* @file EventTriggerPersonAppearance.h
- * @brief This file contains interface for person appeared / disapeared events.
+ * @brief This file contains interface for person appeared / disappeared events.
*/
#include "EventTrigger.h"
{
/**
* @class EventResultPersonAppearance
- * @brief This class contains person appeared / disapeared event results.
+ * @brief This class contains person appeared / disappeared event results.
*
* @since_tizen 3.0
*/
/**
* @class EventTriggerPersonAppearance
- * @brief This class contains person appeared / disapeared events.
+ * @brief This class contains person appeared / disappeared events.
*
* @since_tizen 3.0
*/
* @param [in] eventTrigger The event trigger to be register (NULL if internal)
* @param [in] triggerId Unique event trigger identifier to be register
* @param [in] videoStreamId Video stream identifier
- * @param [in] callback The callback to be called if event will be occured
+ * @param [in] callback The callback to be called if event will be occurred
* @param [in] user_data The user data to be passed to the callback function
* @param [in] numberOfPoints The number of ROI points
- * @param [in] roi The intput array with ROI points
+ * @param [in] roi The input array with ROI points
* @param [in] isInternal Interpretation event as internal in surveillance
*/
EventTriggerPersonAppearance(mv_surveillance_event_trigger_h eventTrigger, long int triggerId, int videoStreamId,
namespace surveillance
{
/**
- * @class EventResultPersonRecogniton
+ * @class EventResultPersonRecognition
* @brief This class contains person recognized event results.
*
* @since_tizen 3.0
public:
MVRectangles __locations; /**< Persons locations */
- IntVector __faceLabels; /**< Persons face lables */
+ IntVector __faceLabels; /**< Persons face labels */
DoubleVector __confidences; /**< Persons face recognition confidences */
};
* @param [in] eventTrigger The event trigger to be register (NULL if internal)
* @param [in] triggerId Unique event trigger identifier to be register
* @param [in] videoStreamId Video stream identifier
- * @param [in] callback The callback to be called if event will be occured
+ * @param [in] callback The callback to be called if event will be occurred
* @param [in] user_data The user data to be passed to the callback function
* @param [in] numberOfPoints The number of ROI points
- * @param [in] roi The intput array with ROI points
+ * @param [in] roi The input array with ROI points
* @param [in] isInternal Interpretation event as internal in surveillance
*/
EventTriggerPersonRecognition(mv_surveillance_event_trigger_h eventTrigger, long int triggerId, int videoStreamId,
*
* @since_tizen 3.0
* @param [in] mvSource The input media source handle
- * @param [out] cvSource The outut matrix with gray scaled image
+ * @param [out] cvSource The output matrix with gray scaled image
* @return @c 0 on success, otherwise a negative error value
*/
static int convertSourceMVRGB2GrayCVNeon(mv_source_h mvSource, cv::Mat &cvSource);
/**
* @brief Gets mask buffer from buffer with known size.
- * @details Mask buffer values: 0 ouside polygon and 255 inside polygon.
+ * @details Mask buffer values: 0 outside polygon and 255 inside polygon.
*
* @since_tizen 3.0
* @param [in] buffer_width The buffer width
// LCOV_EXCL_START
using namespace cv;
-static const int MAX_VALUE_NAME_LENGHT = 255;
+static const int MAX_VALUE_NAME_LENGTH = 255;
static const int DEFAULT_SKIP_FRAMES_COUNT = 6;
return MEDIA_VISION_ERROR_INVALID_PARAMETER;
}
- if (strncmp(valueName, MV_SURVEILLANCE_PERSONS_APPEARED_NUMBER, MAX_VALUE_NAME_LENGHT) == 0) {
+ if (strncmp(valueName, MV_SURVEILLANCE_PERSONS_APPEARED_NUMBER, MAX_VALUE_NAME_LENGTH) == 0) {
size_t *const numberOfAppearedPersons = (size_t *) value;
*numberOfAppearedPersons = __appearedLocations.size();
- } else if (strncmp(valueName, MV_SURVEILLANCE_PERSONS_APPEARED_LOCATIONS, MAX_VALUE_NAME_LENGHT) == 0) {
+ } else if (strncmp(valueName, MV_SURVEILLANCE_PERSONS_APPEARED_LOCATIONS, MAX_VALUE_NAME_LENGTH) == 0) {
mv_rectangle_s *const appearedLocations = (mv_rectangle_s *) value;
const size_t numberOfAppearedPersons = __appearedLocations.size();
for (size_t i = 0u; i < numberOfAppearedPersons; ++i)
appearedLocations[i] = __appearedLocations[i];
- } else if (strncmp(valueName, MV_SURVEILLANCE_PERSONS_TRACKED_NUMBER, MAX_VALUE_NAME_LENGHT) == 0) {
+ } else if (strncmp(valueName, MV_SURVEILLANCE_PERSONS_TRACKED_NUMBER, MAX_VALUE_NAME_LENGTH) == 0) {
size_t *const numberOfTrackedPersons = (size_t *) value;
*numberOfTrackedPersons = __trackedLocations.size();
- } else if (strncmp(valueName, MV_SURVEILLANCE_PERSONS_TRACKED_LOCATIONS, MAX_VALUE_NAME_LENGHT) == 0) {
+ } else if (strncmp(valueName, MV_SURVEILLANCE_PERSONS_TRACKED_LOCATIONS, MAX_VALUE_NAME_LENGTH) == 0) {
mv_rectangle_s *const trackedLocations = (mv_rectangle_s *) value;
const size_t numberOfTrackedPersons = __trackedLocations.size();
for (size_t i = 0u; i < numberOfTrackedPersons; ++i)
trackedLocations[i] = __trackedLocations[i];
- } else if (strncmp(valueName, MV_SURVEILLANCE_PERSONS_DISAPPEARED_NUMBER, MAX_VALUE_NAME_LENGHT) == 0) {
+ } else if (strncmp(valueName, MV_SURVEILLANCE_PERSONS_DISAPPEARED_NUMBER, MAX_VALUE_NAME_LENGTH) == 0) {
size_t *const numberOfDisappearedPersons = (size_t *) value;
*numberOfDisappearedPersons = __disappearedLocations.size();
- } else if (strncmp(valueName, MV_SURVEILLANCE_PERSONS_DISAPPEARED_LOCATIONS, MAX_VALUE_NAME_LENGHT) == 0) {
+ } else if (strncmp(valueName, MV_SURVEILLANCE_PERSONS_DISAPPEARED_LOCATIONS, MAX_VALUE_NAME_LENGTH) == 0) {
mv_rectangle_s *const disappearedLocations = (mv_rectangle_s *) value;
const size_t numberOfDisappearedPersons = __disappearedLocations.size();
{
namespace surveillance
{
-static const int MAX_VALUE_NAME_LENGHT = 255;
+static const int MAX_VALUE_NAME_LENGTH = 255;
namespace
{
const size_t numberOfPersons = __locations.size();
- if (strncmp(valueName, MV_SURVEILLANCE_PERSONS_RECOGNIZED_NUMBER, MAX_VALUE_NAME_LENGHT) == 0) {
+ if (strncmp(valueName, MV_SURVEILLANCE_PERSONS_RECOGNIZED_NUMBER, MAX_VALUE_NAME_LENGTH) == 0) {
size_t *outNumberOfPersons = (size_t *) value;
*outNumberOfPersons = numberOfPersons;
- } else if (strncmp(valueName, MV_SURVEILLANCE_PERSONS_RECOGNIZED_LOCATIONS, MAX_VALUE_NAME_LENGHT) == 0) {
+ } else if (strncmp(valueName, MV_SURVEILLANCE_PERSONS_RECOGNIZED_LOCATIONS, MAX_VALUE_NAME_LENGTH) == 0) {
mv_rectangle_s *locations = (mv_rectangle_s *) value;
for (size_t i = 0; i < numberOfPersons; ++i)
locations[i] = __locations[i];
- } else if (strncmp(valueName, MV_SURVEILLANCE_PERSONS_RECOGNIZED_LABELS, MAX_VALUE_NAME_LENGHT) == 0) {
+ } else if (strncmp(valueName, MV_SURVEILLANCE_PERSONS_RECOGNIZED_LABELS, MAX_VALUE_NAME_LENGTH) == 0) {
int *labels = (int *) value;
for (size_t i = 0; i < numberOfPersons; ++i)
labels[i] = __faceLabels[i];
- } else if (strncmp(valueName, MV_SURVEILLANCE_PERSONS_RECOGNIZED_CONFIDENCES, MAX_VALUE_NAME_LENGHT) == 0) {
+ } else if (strncmp(valueName, MV_SURVEILLANCE_PERSONS_RECOGNIZED_CONFIDENCES, MAX_VALUE_NAME_LENGTH) == 0) {
double *confidences = (double *) value;
for (size_t i = 0; i < numberOfPersons; ++i)
### Why gtest?
I tried with libcheck but since Tizen API is C/C++, hard to cover all of C++ modules (interal API is more rely on C++).
-libcheck also needs runtime dependancy.
\ No newline at end of file
+libcheck also needs runtime dependency.
\ No newline at end of file
void print_action_result(const char *action_name, int action_return_value, notification_type_e notification_type_e)
{
switch (notification_type_e) {
- case FAIL_OR_SUCCESSS:
+ case FAIL_OR_SUCCESS:
if (MEDIA_VISION_ERROR_NONE != action_return_value)
print_fail_result(action_name, action_return_value);
else
extern "C" {
#endif /* __cplusplus */
-typedef enum { FAIL_OR_SUCCESSS, FAIL_OR_DONE } notification_type_e;
+typedef enum { FAIL_OR_SUCCESS, FAIL_OR_DONE } notification_type_e;
/**
* @brief Prints success result of action.
unsigned long buffer_size;
} mv_video_writer_s;
-/* video reader internal funcitons */
+/* video reader internal functions */
static int _mv_video_reader_create_internals(mv_video_reader_s *reader);
static int _mv_video_reader_link_internals(mv_video_reader_s *reader);
static int _mv_video_reader_state_change(mv_video_reader_s *reader, GstState state);
-/* video writer internal funciton */
+/* video writer internal function */
static int _mv_video_writer_create_internals(mv_video_writer_s *writer);
static int _mv_video_writer_link_internals(mv_video_writer_s *writer);
static int _mv_video_writer_state_change(mv_video_writer_s *writer, GstState state);
ret = eglMakeCurrent(s_dpy, s_sfc, s_sfc, s_ctx);
if (ret != EGL_TRUE) {
- LOGE("Falied to call eglMakeCurrent()");
+ LOGE("Failed to call eglMakeCurrent()");
return MEDIA_VISION_ERROR_INTERNAL;
}
printf(TEXT_RED "Can't read from specified directory (%s)\n" TEXT_RESET, in_file_name);
}
} else {
- *notification_type = FAIL_OR_SUCCESSS;
+ *notification_type = FAIL_OR_SUCCESS;
mv_rectangle_s roi;
err = add_single_example(model, in_file_name, &roi, NULL);
}
while (!sel_opt) {
sel_opt = show_menu("Select action:", options, names, 11);
- notification_type_e notification_type = FAIL_OR_SUCCESSS;
+ notification_type_e notification_type = FAIL_OR_SUCCESS;
switch (sel_opt) {
case 1:
while (!sel_opt) {
sel_opt = show_menu("Select action:", options, names, 6);
- notification_type_e notification_type = FAIL_OR_SUCCESSS;
+ notification_type_e notification_type = FAIL_OR_SUCCESS;
switch (sel_opt) {
case 1:
"/opt/usr/home/owner/media/Others/mv_test/open_model_zoo/models/OD/tflite/od_yolo_v5_320x320.tflite"
#define OD_TFLITE_META_YOLO_V5_320_PATH \
"/opt/usr/home/owner/media/Others/mv_test/open_model_zoo/models/OD/tflite/od_yolo_v5_320x320.json"
-#define OD_LABLE_YOLO_V5_320_PATH \
+#define OD_LABEL_YOLO_V5_320_PATH \
"/opt/usr/home/owner/media/Others/mv_test/open_model_zoo/models/OD/tflite/od_yolo_v5_label.txt"
//Face Detection
/*
* Hosted models
*/
-#define FLD_TFLITE_WIEGHT_TWEAKCNN_128_PATH \
+#define FLD_TFLITE_WEIGHT_TWEAKCNN_128_PATH \
"/opt/usr/home/owner/media/Others/mv_test/open_model_zoo/models/FLD/tflite/fld_tweakcnn_128x128.tflite"
#define FLD_TFLITE_META_TWEAKCNN_128_PATH \
"/opt/usr/home/owner/media/Others/mv_test/open_model_zoo/models/FLD/tflite/fld_tweakcnn_128x128.json"
-#define FLD_TFLITE_WIEGHT_MEDIAPIPE_192_PATH \
+#define FLD_TFLITE_WEIGHT_MEDIAPIPE_192_PATH \
"/opt/usr/home/owner/media/Others/mv_test/open_model_zoo/models/FLD/tflite/fld_mediapipe_192x192.tflite"
#define FLD_TFLITE_META_MEDIAPIPE_192_PATH \
"/opt/usr/home/owner/media/Others/mv_test/open_model_zoo/models/FLD/tflite/fld_mediapipe_192x192.json"
} break;
case 7: {
err = engine_config_user_hosted_tflite_cpu(engine_cfg, OD_TFLITE_WEIGHT_YOLO_V5_320_PATH,
- OD_LABLE_YOLO_V5_320_PATH, OD_TFLITE_META_YOLO_V5_320_PATH);
+ OD_LABEL_YOLO_V5_320_PATH, OD_TFLITE_META_YOLO_V5_320_PATH);
} break;
}
if (err != MEDIA_VISION_ERROR_NONE) {
err = perform_opencv_cnncascade(engine_cfg);
} break;
case 3: {
- err = engine_config_hosted_tflite_cpu(engine_cfg, FLD_TFLITE_WIEGHT_TWEAKCNN_128_PATH,
+ err = engine_config_hosted_tflite_cpu(engine_cfg, FLD_TFLITE_WEIGHT_TWEAKCNN_128_PATH,
FLD_TFLITE_META_TWEAKCNN_128_PATH);
} break;
case 4: {
- err = engine_config_hosted_tflite_cpu(engine_cfg, FLD_TFLITE_WIEGHT_MEDIAPIPE_192_PATH,
+ err = engine_config_hosted_tflite_cpu(engine_cfg, FLD_TFLITE_WEIGHT_MEDIAPIPE_192_PATH,
FLD_TFLITE_META_MEDIAPIPE_192_PATH);
} break;
}
TEST_RES_PATH \
"/res/inference/images/faceLandmark.jpg"
-#define FLD_TFLITE_WIEGHT_TWEAKCNN_128_PATH \
+#define FLD_TFLITE_WEIGHT_TWEAKCNN_128_PATH \
TEST_RES_PATH \
"/open_model_zoo/models/FLD/tflite/fld_tweakcnn_128x128.tflite"
#define FLD_TFLITE_META_TWEAKCNN_128_PATH \
TEST_P(TestFaceLandmarkDetectionTflite, TweakCNN)
{
- engine_config_hosted_tflite_model(engine_cfg, FLD_TFLITE_WIEGHT_TWEAKCNN_128_PATH,
+ engine_config_hosted_tflite_model(engine_cfg, FLD_TFLITE_WEIGHT_TWEAKCNN_128_PATH,
FLD_TFLITE_META_TWEAKCNN_128_PATH, _use_json_parser, _target_device_type);
if (_use_json_parser) {
inferenceFaceLandmark();
{
const int error = mv_surveillance_event_trigger_destroy(trigger);
if (MEDIA_VISION_ERROR_NONE != error) {
- PRINT_E("Error with code %d was occured when try to destroy "
+ PRINT_E("Error with code %d was occurred when try to destroy "
"event trigger.",
error);
return false;
const int error = mv_surveillance_unsubscribe_event_trigger(event_trigger, video_streams_ids[trigger_id]);
if (error != MEDIA_VISION_ERROR_NONE) {
- PRINT_E("Error with code %d was occured in unsubscribe event.", error);
+ PRINT_E("Error with code %d was occurred in unsubscribe event.", error);
return;
}
void detect_person_appeared_cb(mv_surveillance_event_trigger_h handle, mv_source_h source, int video_stream_id,
mv_surveillance_result_h event_result, void *user_data)
{
- PRINT_G("Person appeared / disappeared event was occured");
+ PRINT_G("Person appeared / disappeared event was occurred");
if (save_results_to_image)
PRINT_G("Output image will be saved to /tmp/person_app.jpg.\n"
"Appeared locations - green;\n"
&number_of_appeared_persons);
if (error != MEDIA_VISION_ERROR_NONE) {
- PRINT_E("Error with code %d was occured in getting number of "
+ PRINT_E("Error with code %d was occurred in getting number of "
"appeared persons.",
error);
if (out_buffer_copy != NULL)
appeared_locations);
if (error != MEDIA_VISION_ERROR_NONE) {
- PRINT_E("Error with code %d was occured in getting locations of "
+ PRINT_E("Error with code %d was occurred in getting locations of "
"appeared persons.",
error);
&number_of_tracked_persons);
if (error != MEDIA_VISION_ERROR_NONE) {
- PRINT_E("Error with code %d was occured in getting number of "
+ PRINT_E("Error with code %d was occurred in getting number of "
"tracked persons.",
error);
tracked_locations);
if (error != MEDIA_VISION_ERROR_NONE) {
- PRINT_E("Error with code %d was occured in getting locations of "
+ PRINT_E("Error with code %d was occurred in getting locations of "
"tracked persons.",
error);
&number_of_disappeared_persons);
if (error != MEDIA_VISION_ERROR_NONE) {
- PRINT_E("Error with code %d was occured in getting number of "
+ PRINT_E("Error with code %d was occurred in getting number of "
"disappeared persons.",
error);
disappeared_locations);
if (error != MEDIA_VISION_ERROR_NONE) {
- PRINT_E("Error with code %d was occured in getting locations of "
+ PRINT_E("Error with code %d was occurred in getting locations of "
"disappeared persons.",
error);
&number_of_persons);
if (error != MEDIA_VISION_ERROR_NONE) {
- PRINT_E("Error with code %d was occured in getting number of persons.", error);
+ PRINT_E("Error with code %d was occurred in getting number of persons.", error);
return;
}
error = mv_surveillance_get_result_value(event_result, MV_SURVEILLANCE_PERSONS_RECOGNIZED_LOCATIONS, locations);
if (error != MEDIA_VISION_ERROR_NONE) {
- PRINT_E("Error with code %d was occured in getting locations of persons.", error);
+ PRINT_E("Error with code %d was occurred in getting locations of persons.", error);
if (locations != NULL)
free(locations);
error = mv_surveillance_get_result_value(event_result, MV_SURVEILLANCE_PERSONS_RECOGNIZED_LABELS, labels);
if (error != MEDIA_VISION_ERROR_NONE) {
- PRINT_E("Error with code %d was occured in getting labels of persons.", error);
+ PRINT_E("Error with code %d was occurred in getting labels of persons.", error);
if (locations != NULL)
free(locations);
error = mv_surveillance_get_result_value(event_result, MV_SURVEILLANCE_PERSONS_RECOGNIZED_CONFIDENCES, confidences);
if (error != MEDIA_VISION_ERROR_NONE) {
- PRINT_E("Error with code %d was occured in getting confidences of persons.", error);
+ PRINT_E("Error with code %d was occurred in getting confidences of persons.", error);
if (locations != NULL)
free(locations);
void movement_detected_cb(mv_surveillance_event_trigger_h event_trigger, mv_source_h source, int video_stream_id,
mv_surveillance_result_h event_result, void *user_data)
{
- PRINT_G("Movement detected event was occured");
+ PRINT_G("Movement detected event was occurred");
if (save_results_to_image)
PRINT_G("Output image will be saved to /tmp/move_detect.jpg.\n"
"Movement detected locations - blue.");
&number_of_movement_regions);
if (error != MEDIA_VISION_ERROR_NONE) {
- PRINT_E("Error with code %d was occured in getting number of "
+ PRINT_E("Error with code %d was occurred in getting number of "
"movement regions.",
error);
error = mv_surveillance_get_result_value(event_result, MV_SURVEILLANCE_MOVEMENT_REGIONS, movement_regions);
if (error != MEDIA_VISION_ERROR_NONE) {
- PRINT_E("Error with code %d was occured in getting movement regions.", error);
+ PRINT_E("Error with code %d was occurred in getting movement regions.", error);
if (movement_regions != NULL)
free(movement_regions);