GazeEstimator returns axes of a head pose.
The origin of the axes is the nose tip of the detected face.
Then, XYZ axes from the origin are given and the direction of the axes
indicate the head pose.
The test_focusfinder_on_screen testcase is added and it work when
enable_visualizer 1 in the spec file.
Note that this patch uses FLD_U2NET model for face landmark with mediavision which
provides 68 landmarks. With others it may return unexpected results.
Change-Id: If22c1434031b8b2577a0fe405e79d37de8626c11
Signed-off-by: Tae-Young Chung <ty83.chung@samsung.com>
~NoData() final = default;
};
+class BadCast : public BaseException
+{
+public:
+ BadCast(std::string msg, int errorType = SINGLEO_ERROR_EFAULT)
+ : BaseException("Bad Casting: " + msg, errorType)
+ {}
+ ~BadCast() final = default;
+};
}; // exception
}; // singleo
{
// TODO
-enum class ServiceResultType { NONE, AUTO_ZOOM };
+enum class ServiceResultType { NONE, AUTO_ZOOM, FOCUS_FINDER };
struct ServiceBaseResultType {
ServiceResultType _result_type { ServiceResultType::NONE };
focus_finder/src/CameraParamManager.cpp
focus_finder/src/FaceShapeModelManager.cpp
focus_finder/src/HeadPoseEstimator.cpp
+ focus_finder/src/FocusMapper.cpp
)
LIST(APPEND SERVICE_LIBRARY_LIST ${SERVICE_LIBRARY_LIST})
SET(SINGLEO_SERVICE_RESOURCE_FILES ${SINGLEO_SERVICE_RESOURCE_FILES} ${CMAKE_CURRENT_SOURCE_DIR}/focus_finder/res/pdm.txt)
SET(SINGLEO_SERVICE_RESOURCE_FILES ${SINGLEO_SERVICE_RESOURCE_FILES} ${CMAKE_CURRENT_SOURCE_DIR}/focus_finder/res/camera_param_dummy.yaml)
+SET(SINGLEO_SERVICE_RESOURCE_FILES ${SINGLEO_SERVICE_RESOURCE_FILES} ${CMAKE_CURRENT_SOURCE_DIR}/focus_finder/res/camera_param_rs2_640.yaml)
INSTALL(FILES ${SINGLEO_SERVICE_RESOURCE_FILES} DESTINATION /usr/share/singleo)
\ No newline at end of file
#include "IInputObserver.h"
#include "ServiceDataType.h"
#include "GazeEstimator.h"
+#include "AsyncManager.h"
+#include "FocusFinderResult.h"
+#include "FocusMapper.h"
namespace singleo
{
static bool _registered;
std::unique_ptr<singleo::input::IInputService> _data_feeder;
std::unique_ptr<GazeEstimator> _gaze_estimator;
+ std::unique_ptr<IPreprocessor> _preprocessor;
+ std::unique_ptr<FocusMapper> _postprocessor;
std::queue<std::shared_ptr<BaseDataType> > _input_q;
std::mutex _input_mutex;
+ FocusFinderResult _result;
+ std::map<std::string, FocusFinderResultType> _result_keys = { { "X", FocusFinderResultType::X },
+ { "Y", FocusFinderResultType::Y } };
+ std::unique_ptr<AsyncManager<ImageDataType, FocusFinderResult> > _async_manager;
+
singleo_user_cb_t _user_cb {};
void *_user_data {};
+ bool isKeyValid(std::string key);
+ void updateResult(const std::vector<Point> &axes);
+
public:
FocusFinder();
virtual ~FocusFinder();
--- /dev/null
+/**
+ * Copyright (c) 2024 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __FOCUS_FINDER_RESULT_H__
+#define __FOCUS_FINDER_RESULT_H__
+
+#include "SingleoCommonTypes.h"
+#include "ServiceDataType.h"
+
+namespace singleo
+{
+namespace services
+{
+namespace focusfinder
+{
+enum class FocusFinderResultType { X, Y };
+
+struct FocusFinderResult : public ServiceBaseResultType {
+ FocusFinderResult() : ServiceBaseResultType(ServiceResultType::FOCUS_FINDER)
+ {}
+ unsigned int frame_number {};
+ bool is_focused { false };
+ Point focus;
+ std::vector<Point> head_axes;
+};
+}
+}
+}
+
+#endif
\ No newline at end of file
--- /dev/null
+/**
+ * Copyright (c) 2024 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __FOCUS_MAPPER_H__
+#define __FOCUS_MAPPER_H__
+
+#include <vector>
+#include <opencv2/opencv.hpp>
+#include <mutex>
+
+#include "IPostprocessor.h"
+#include "SingleoCommonTypes.h"
+#include "ServiceDataType.h"
+#include "FocusFinderResult.h"
+
+namespace singleo
+{
+namespace services
+{
+namespace focusfinder
+{
+class FocusMapper : public IPostprocessor
+{
+private:
+ FocusFinderResult _result;
+ cv::Mat _cv_image;
+ ImageDataType _image_data;
+ std::mutex _mutex;
+
+public:
+ FocusMapper() = default;
+ virtual ~FocusMapper() = default;
+
+ void addInput(ServiceBaseResultType &data) override;
+ void invoke(BaseDataType &data) override;
+ BaseDataType &getOutput() override;
+ bool isWorking() override;
+};
+
+}
+}
+}
+
+#endif
\ No newline at end of file
private:
std::shared_ptr<IFaceTaskManager> _face_tasker;
std::unique_ptr<HeadPoseEstimator> _head_pose_estimator;
+ std::vector<Point> _head_axes;
public:
GazeEstimator();
~GazeEstimator();
- void run(BaseDataType &input);
+ std::vector<Point> run(BaseDataType &input);
};
} // focusfinder
} // services
cv::Mat _rotation_matrix;
std::vector<cv::Point3f> _pose_axes_3d;
- std::vector<cv::Point2f> _pose_axes;
+ std::vector<Point> _pose_axes;
bool _isInitialized;
bool validRotationMatrix(cv::Mat &matrix);
HeadPoseEstimator(const std::vector<FaceComponents> &components);
~HeadPoseEstimator();
- std::tuple<cv::Point3f, cv::Point3f> estimate(const std::vector<singleo::Point> &landmark2d);
- std::vector<cv::Point2f> &getPoseAxes();
+ std::tuple<cv::Point3f, cv::Point3f> estimate(const FaceResult &face);
+ std::vector<Point> &getPoseAxes();
std::vector<int> &getFaceComponentsIndieces();
};
} // smartpointer
--- /dev/null
+%YAML:1.0
+camera_matrix: !!opencv-matrix
+ rows: 3
+ cols: 3
+ dt: f
+ data: [379.787, 0.0, 320.610,
+ 0.0, 379.787, 233.605,
+ 0.0, 0.0, 1.0]
+dist_coeff: !!opencv-matrix
+ rows: 5
+ cols: 1
+ dt: f
+ data: [0.0, 0.0, 0.0, 0.0, 0.0]
\ No newline at end of file
FocusFinder::FocusFinder()
{
_gaze_estimator = make_unique<GazeEstimator>();
+ _preprocessor = make_unique<ImagePreprocessor>();
+ _postprocessor = make_unique<FocusMapper>();
}
FocusFinder::~FocusFinder()
-{}
+{
+ if(_async_manager) {
+ _data_feeder->streamOff();
+ _async_manager->destroy();
+ }
+}
void FocusFinder::configure(InputConfigBase &config)
{
}
void FocusFinder::inputFeedCb(BaseDataType &data)
-{}
+{
+ _preprocessor->addInput(data);
+
+ // Original image not preprocessed is used to display on screen.
+ auto originalImg = dynamic_pointer_cast<ImageDataType>(_preprocessor->getOutput().clone());
+
+ _preprocessor->invoke();
+
+ auto preprocessed = dynamic_pointer_cast<ImageDataType>(_preprocessor->getOutput().clone());
+
+ // Set is_owned to false to prevent from releasing preprocessed image data
+ // because the owner of preprocessed is FocusFinder so the preprocessed will be released
+ // in performAsync function.
+ preprocessed->is_owned = false;
+
+ if (_user_cb) {
+ if (_result.is_focused) {
+ _postprocessor->invoke(*originalImg);
+ ImageDataType focused_data = dynamic_cast<ImageDataType &>(_postprocessor->getOutput());
+ _user_cb(focused_data.ptr, focused_data.width, focused_data.height, focused_data.byte_per_pixel,
+ _user_data);
+ } else {
+ _user_cb(originalImg->ptr, originalImg->width, originalImg->height, originalImg->byte_per_pixel,
+ _user_data);
+ }
+ }
+
+ if (_async_manager->pushInput(*preprocessed) != SINGLEO_ERROR_NONE)
+ delete preprocessed->ptr;
+}
+
+bool FocusFinder::isKeyValid(std::string key)
+{
+ return (_result_keys.find(key) != _result_keys.end());
+}
void FocusFinder::add_input(BaseDataType &input_data)
{
void FocusFinder::perform()
{
- ImagePreprocessor preprocessor;
-
// If input service is not set, input data comes from user.
// In this case, get input data from _inputs.
if (!_data_feeder) {
std::lock_guard<std::mutex> lock(_input_mutex);
- preprocessor.addInput(*_input_q.front());
+ _preprocessor->addInput(*_input_q.front());
_input_q.pop();
} else {
ImageDataType input_data;
_data_feeder->capture(input_data);
- preprocessor.addInput(input_data);
+ _preprocessor->addInput(input_data);
}
- preprocessor.invoke();
+ _preprocessor->invoke();
+
+ auto result = _gaze_estimator->run(dynamic_cast<ImageDataType &>(_preprocessor->getOutput()));
- _gaze_estimator->run(dynamic_cast<ImageDataType &>(preprocessor.getOutput()));
+ updateResult(result);
}
void FocusFinder::performAsync()
{
- throw FuncNotImpl("performAsync Not Supported Yet");
+ if (!_data_feeder) {
+ SINGLEO_LOGE("This API is valid only the case that input feed service is used.");
+ throw InvalidOperation("Invalid API request.");
+ }
+
+ _data_feeder->streamOn();
+
+ _async_manager = make_unique<AsyncManager<ImageDataType, FocusFinderResult> >();
+ _async_manager->registerInvokeCb(this, [this](IService *service, BaseDataType &data) {
+ auto focus_finder = static_cast<FocusFinder *>(service);
+
+ auto result = focus_finder->_gaze_estimator->run(data);
+
+ auto imageData = dynamic_cast<ImageDataType &>(data);
+
+ focus_finder->updateResult(result);
+
+ // A given imageData - allocated by inputFeedCb function - has been used so release it here.
+ if (!imageData.is_owned)
+ delete imageData.ptr;
+ });
+}
+
+void FocusFinder::updateResult(const vector<Point> &axes)
+{
+ FocusFinderResult result;
+ // TODO: Implement focus point calculation based on head axes.
+ result.focus = Point { .x = 0, .y = 0 };
+
+ result.head_axes = axes;
+ if (!result.head_axes.empty()) {
+ result.is_focused = true;
+ }
+
+ if (_async_manager)
+ _async_manager->pushOutput(result);
+ else
+ _result = result;
}
void FocusFinder::getResultCnt(unsigned int *cnt)
-{}
+{
+ if (_async_manager)
+ _result = _async_manager->popOutput();
+
+ *cnt = _result.is_focused ? 1 : 0;
+
+ _postprocessor->addInput(_result);
+}
void FocusFinder::getResultInt(unsigned int idx, std::string key, unsigned int *value)
{}
--- /dev/null
+/**
+ * Copyright (c) 2024 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "SingleoException.h"
+#include "SingleoLog.h"
+#include "FocusMapper.h"
+
+using namespace std;
+using namespace cv;
+using namespace singleo::exception;
+
+namespace singleo
+{
+namespace services
+{
+namespace focusfinder
+{
+void FocusMapper::addInput(ServiceBaseResultType &data)
+{
+ if (data._result_type != ServiceResultType::FOCUS_FINDER) {
+ SINGLEO_LOGE("invalid result data type.");
+ throw InvalidParameter("invalid result data type.");
+ }
+
+ std::lock_guard<std::mutex> lock(_mutex);
+ try {
+ _result = dynamic_cast<FocusFinderResult &>(data);
+ } catch (const std::bad_cast &e) {
+ SINGLEO_LOGE("bad casting.");
+ throw BadCast("bad casting.");
+ }
+}
+
+void FocusMapper::invoke(BaseDataType &data)
+{
+ if (data._data_type != DataType::IMAGE) {
+ SINGLEO_LOGE("invalid data type.");
+ throw InvalidParameter("invalid data type.");
+ }
+
+ std::lock_guard<std::mutex> lock(_mutex);
+ ImageDataType &image_data = dynamic_cast<ImageDataType &>(data);
+
+ _cv_image = cv::Mat(cv::Size(image_data.width, image_data.height), CV_MAKETYPE(CV_8U, 3), image_data.ptr).clone();
+
+ line(_cv_image, Point2i(_result.head_axes[0].x, _result.head_axes[0].y),
+ Point2i(_result.head_axes[1].x, _result.head_axes[1].y), Scalar(0, 0, 255), 2); // x - red
+ line(_cv_image, Point2i(_result.head_axes[0].x, _result.head_axes[0].y),
+ Point2i(_result.head_axes[2].x, _result.head_axes[2].y), Scalar(0, 255, 0), 2); // y - green
+ line(_cv_image, Point2i(_result.head_axes[0].x, _result.head_axes[0].y),
+ Point2i(_result.head_axes[3].x, _result.head_axes[3].y), Scalar(255, 0, 0), 2); // z - blue
+
+ _image_data.width = _cv_image.cols;
+ _image_data.height = _cv_image.rows;
+ _image_data.byte_per_pixel = _cv_image.channels();
+ _image_data.ptr = _cv_image.data;
+}
+
+BaseDataType &FocusMapper::getOutput()
+{
+ return _image_data;
+}
+
+bool FocusMapper::isWorking()
+{
+ return false;
+}
+
+} // focusfinder
+} // services
+} // singleo
\ No newline at end of file
#include "SingleoLog.h"
using namespace std;
+using namespace cv;
namespace singleo
{
_face_tasker = make_shared<FaceDetector>();
_face_tasker->setNext(make_shared<FaceLandmarker>());
- const vector<FaceComponents> _faceComponents { FaceComponents::NOSE_RIDGE, FaceComponents::NOSE_BASE,
+ const vector<FaceComponents> faceComponents { FaceComponents::NOSE_RIDGE, FaceComponents::NOSE_BASE,
FaceComponents::LEFT_EYE_CORNERS,
FaceComponents::RIGHT_EYE_CORNERS };
- _head_pose_estimator = make_unique<HeadPoseEstimator>(_faceComponents);
+ _head_pose_estimator = make_unique<HeadPoseEstimator>(faceComponents);
}
GazeEstimator::~GazeEstimator()
{}
-void GazeEstimator::run(BaseDataType &input)
+vector<Point> GazeEstimator::run(BaseDataType &input)
{
FaceResult result;
_face_tasker->handle(input, result);
+ SINGLEO_LOGI("FaceResult: faces %zu", result._rects.size());
- SINGLEO_LOGI("FaceResult: faces %zu, landmarks %zu-%zu", result._rects.size(), result._landmarks.size(),
- result._landmarks[0].size());
+ _head_axes.clear();
+ if (!result._landmarks.empty()) {
+ SINGLEO_LOGI("landmarks %zu-%zu", result._landmarks.size(), result._landmarks[0].size());
- // TODO: use multiple faces
- auto head_pose = _head_pose_estimator->estimate(result._landmarks[0]);
- SINGLEO_LOGI("head pose: \n\t rotation(x,y,z): %.3f, %.3f, %.3f\n\t translation(x,y,z): %.3f, %.3f, %.3f\n",
- get<0>(head_pose).x, get<0>(head_pose).y, get<0>(head_pose).z, get<1>(head_pose).x,
- get<1>(head_pose).y, get<1>(head_pose).z);
+ auto head_pose = _head_pose_estimator->estimate(result);
+ SINGLEO_LOGI("head pose: \n\t rotation(x,y,z): %.3f, %.3f, %.3f\n\t translation(x,y,z): %.3f, %.3f, %.3f\n",
+ get<0>(head_pose).x, get<0>(head_pose).y, get<0>(head_pose).z, get<1>(head_pose).x,
+ get<1>(head_pose).y, get<1>(head_pose).z);
+
+ _head_axes = _head_pose_estimator->getPoseAxes();
+ }
+
+ return _head_axes;
}
-} // smartpointer
+} // focusfinder
} // services
} // singleo
#include "HeadPoseEstimator.h"
#include "SingleoLog.h"
-#include <iostream>
-
using namespace std;
using namespace cv;
_rotation_vector = Mat(Size(3, 1), CV_32FC1);
_translation_vector = Mat(Size(3, 1), CV_32FC1);
- CameraParamManager cam_dummy("/usr/share/singleo/camera_param_dummy.yaml");
+ CameraParamManager cam_dummy("/usr/share/singleo/camera_param_rs2_640.yaml");
_camera_matrix = Mat::zeros(Size(3, 3), CV_32FC1);
tuple<float, float> focalLength = cam_dummy.getFocalLengthXY();
SINGLEO_LOGD("%.3f, %.3f, %.3f", vec[0], vec[1], vec[2]);
}
-tuple<Point3f, Point3f> HeadPoseEstimator::estimate(const vector<Point> &landmark2d)
+tuple<Point3f, Point3f> HeadPoseEstimator::estimate(const FaceResult &face)
{
// TODO: parallel??, c++17 structured binding??
- for (size_t idx = 0; idx < landmark2d.size(); ++idx) {
- _landmarks_2d[idx].x = landmark2d[idx].x;
- _landmarks_2d[idx].y = landmark2d[idx].y;
+ // TODO: use multiple faces
+ size_t idx = 0;
+ for (auto &compIdx : _faceComponentIndices) {
+ _landmarks_2d[idx].x = face._landmarks[0][compIdx].x + face._rects[0].left;
+ _landmarks_2d[idx].y = face._landmarks[0][compIdx].y + face._rects[0].top;
+ idx++;
}
-
solvePnPRansac(_landmarks_3d, _landmarks_2d, _camera_matrix, _camera_dist_coeff, _rotation_vector,
_translation_vector, false, 100, 3.0F, 0.99, noArray(), SOLVEPNP_EPNP);
static_cast<float>(_translation_vector.at<double>(2)) });
}
-vector<Point2f> &HeadPoseEstimator::getPoseAxes()
+vector<Point> &HeadPoseEstimator::getPoseAxes()
{
vector<Point2f> poseAxes2d;
projectPoints(_pose_axes_3d, _rotation_vector, _translation_vector, _camera_matrix, _camera_dist_coeff, poseAxes2d);
for (size_t i = 0; i < poseAxes2d.size(); i++) {
- _pose_axes[i].x = poseAxes2d[i].x;
- _pose_axes[i].y = poseAxes2d[i].y;
+ _pose_axes[i].x = static_cast<int>(poseAxes2d[i].x);
+ _pose_axes[i].y = static_cast<int>(poseAxes2d[i].y);
}
return _pose_axes;
} catch (const std::bad_alloc &e) {
SINGLEO_LOGE("A new node creation failed.(%s)", e.what());
throw BadAddress("A new node creation failed.");
- } catch(const BaseException &e) {
+ } catch (const BaseException &e) {
SINGLEO_LOGE("Node type not supported.");
throw e;
}
IF (${USE_VISUALIZER})
SET(TEST_SOURCE_ON_SCREEN_LIST test_autozoom_on_screen.cpp)
+ SET(TEST_SOURCE_ON_SCREEN_LIST ${TEST_SOURCE_ON_SCREEN_LIST} test_focusfinder_on_screen.cpp)
ADD_EXECUTABLE(test_singleo_on_screen ${TEST_SOURCE_ON_SCREEN_LIST})
TARGET_COMPILE_DEFINITIONS(test_singleo_on_screen PRIVATE -DTEST_RES_PATH="${TEST_RES_PATH}")
--- /dev/null
+/**
+ * Copyright (c) 2024 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <iostream>
+#include <thread>
+#include <opencv2/opencv.hpp>
+
+#include "gtest/gtest.h"
+#include "singleo_native_capi_internal.h"
+#include "singleo_util_visualizer_2d.h"
+#include "singleo_error.h"
+
+using namespace testing;
+using namespace std;
+
+struct Context {
+ singleo_service_h handle {};
+};
+
+static bool focusfinder_callback(void *user_data)
+{
+ Context *context = static_cast<Context *>(user_data);
+ singleo_service_h handle = static_cast<singleo_service_h>(context->handle);
+ bool is_loop_exit = false;
+ unsigned long frame_number = 0;
+
+ while (!is_loop_exit) {
+ unsigned int cnt = 1;
+
+ int ret = singleo_service_get_result_cnt(handle, &cnt);
+ if (ret != SINGLEO_ERROR_NONE)
+ break;
+
+ ASSERT_EQ(ret, SINGLEO_ERROR_NONE);
+
+ cout << "cnt = " << cnt << " frame number = " << frame_number << endl;
+ if (++frame_number > 10)
+ is_loop_exit = true;
+ }
+
+ return false;
+}
+
+static void user_callback(unsigned char *buffer, unsigned int width, unsigned int height, unsigned int bytes_per_pixel,
+ void *user_data)
+{
+ cv::Mat result(cv::Size(width, height), CV_MAKETYPE(CV_8U, 3), buffer);
+ cv::cvtColor(result, result, cv::COLOR_BGR2RGB);
+
+ singleo_util_visualizer_2d(result, NULL);
+}
+
+TEST(FocusFinderSyncOnScreenTest, RequestWithCameraShouldBeOk)
+{
+ Context context {};
+
+ int ret = singleo_service_create("service=FocusFinder, input_feed=camera, camera_id=4, fps=30, async=1",
+ &context.handle);
+ ASSERT_EQ(ret, SINGLEO_ERROR_NONE);
+
+ ret = singleo_service_register_user_callback(context.handle, user_callback, &context);
+ ASSERT_EQ(ret, SINGLEO_ERROR_NONE);
+
+ ret = singleo_service_perform(context.handle);
+ ASSERT_EQ(ret, SINGLEO_ERROR_NONE);
+
+ unique_ptr<thread> thread_handle = make_unique<thread>(&focusfinder_callback, static_cast<void *>(&context));
+
+ thread_handle->join();
+
+ ret = singleo_service_unregister_user_callback(context.handle);
+ ASSERT_EQ(ret, SINGLEO_ERROR_NONE);
+
+ ret = singleo_service_destroy(context.handle);
+ ASSERT_EQ(ret, SINGLEO_ERROR_NONE);
+}