return mProcessor.isStarted();
}
-void Client::stop()
+void Client::stop(bool wait)
{
if (!mProcessor.isStarted()) {
return;
}
LOGS("Client stop");
- mProcessor.stop();
+ mProcessor.stop(wait);
}
void Client::handle(const FileDescriptor fd, const epoll::Events pollEvents)
/**
* Stops processing
+ *
+ * @param wait does it block waiting for all internals to stop
*/
- void stop();
+ void stop(bool wait = true);
/**
* Set the callback called for each new connection to a peer
} // namespace ipc
-#endif // COMMON_IPC_INTERNALS_FINISH_REQUEST_HPP
+#endif // COMMON_IPC_INTERNALS_FINISH_REQUEST_HPP
\ No newline at end of file
{
LOGS(mLogPrefix + "Processor Destructor");
try {
- stop();
+ stop(false);
} catch (std::exception& e) {
LOGE(mLogPrefix + "Error in Processor's destructor: " << e.what());
}
}
}
-void Processor::stop()
+void Processor::stop(bool wait)
{
LOGS(mLogPrefix + "Processor stop");
mRequestQueue.pushBack(Event::FINISH, request);
}
- LOGD(mLogPrefix + "Waiting for the Processor to stop");
+ if(wait){
+ LOGD(mLogPrefix + "Waiting for the Processor to stop");
- // Wait till the FINISH request is served
- Lock lock(mStateMutex);
- conditionPtr->wait(lock, [this]() {
- return !mIsRunning;
- });
- assert(mPeerInfo.empty());
+ // Wait till the FINISH request is served
+ Lock lock(mStateMutex);
+ conditionPtr->wait(lock, [this]() {
+ return !mIsRunning;
+ });
+ assert(mPeerInfo.empty());
+ }
}
}
mRequestQueue.pushBack(Event::ADD_PEER, requestPtr);
LOGI(mLogPrefix + "Add Peer Request. Id: " << requestPtr->peerID
- << ", fd: " << socketPtr->getFD());
+ << ", fd: " << socketPtr->getFD());
return requestPtr->peerID;
}
mEventPoll.removeFD(mRequestQueue.getFD());
mIsRunning = false;
- requestFinisher.conditionPtr->notify_all();
+ requestFinisher.conditionPtr->notify_all();
return true;
}
/**
* Stops the processing thread.
* No incoming data will be handled after.
+ *
+ * @param wait does it block waiting for all internals to stop
*/
- void stop();
+ void stop(bool wait);
/**
* Set the callback called for each new connection to a peer
bool onAddPeerRequest(AddPeerRequest& request);
bool onRemovePeerRequest(RemovePeerRequest& request);
bool onSendResultRequest(SendResultRequest& request);
- bool onFinishRequest(FinishRequest& requestFinisher);
+ bool onFinishRequest(FinishRequest& request);
bool onReturnValue(Peers::iterator& peerIt,
const MessageID messageID);
return mProcessor.isStarted();
}
-void Service::stop()
+void Service::stop(bool wait)
{
if (!mProcessor.isStarted()) {
return;
}
LOGS("Service stop");
- mProcessor.stop();
+ mProcessor.stop(wait);
}
void Service::handle(const FileDescriptor fd, const epoll::Events pollEvents)
/**
* Stops all working threads
+ *
+ * @param wait does it block waiting for all internals to stop
*/
- void stop();
+ void stop(bool wait = true);
/**
* Set the callback called for each new connection to a peer
LOGD("Connected");
}
+void HostIPCConnection::stop(bool wait)
+{
+ LOGT("Stopping IPC");
+ mService->stop(wait);
+}
+
+bool HostIPCConnection::isRunning()
+{
+ return mService->isStarted();
+}
+
void HostIPCConnection::setLockQueueCallback(const Method<api::Void>::type& callback)
{
typedef IPCMethodWrapper<api::Void> Callback;
~HostIPCConnection();
void start();
+ void stop(bool wait);
void signalZoneConnectionState(const api::ConnectionState& connectionState);
+ bool isRunning();
private:
void setLockQueueCallback(const Method<api::Void>::type& callback);
// TODO: SIGTERM used by lxc, get rid of this
utils::signalIgnore({SIGTERM});
+ LOGI("Starting daemon...");
Server server(CONFIG_PATH);
server.run(runAsRoot);
server.reloadIfRequired(argv);
+ LOGI("Daemon stopped");
} catch (std::exception& e) {
LOGE("Unexpected: " << utils::getTypeName(e) << ": " << e.what());
#include "config.hpp"
#include "server.hpp"
-#include "zones-manager.hpp"
#include "exception.hpp"
#include "config/manager.hpp"
#include "logger/logger.hpp"
-#include "utils/glib-loop.hpp"
#include "utils/environment.hpp"
#include "utils/fs.hpp"
#include "utils/signal.hpp"
#include <csignal>
#include <cerrno>
#include <string>
+#include <functional>
#include <cstring>
#include <unistd.h>
#include <pwd.h>
namespace vasum {
Server::Server(const std::string& configPath)
- : mIsUpdate(false),
+ : mIsRunning(true),
+ mIsUpdate(false),
mConfigPath(configPath),
- mSignalFD(mDispatcher.getPoll())
+ mSignalFD(mEventPoll),
+ mZonesManager(mEventPoll, mConfigPath),
+ mDispatchingThread(::pthread_self())
{
- mSignalFD.setHandlerAndBlock(SIGUSR1, [this] (int) {
- LOGD("Received SIGUSR1 - triggering update.");
- mIsUpdate = true;
- mStopLatch.set();
- });
-
- mSignalFD.setHandlerAndBlock(SIGINT, [this](int) {
- mStopLatch.set();
- });
-
- mSignalFD.setHandler(SIGTERM, [this] (int) {
- mStopLatch.set();
- });
+ mSignalFD.setHandlerAndBlock(SIGUSR1, std::bind(&Server::handleUpdate, this));
+ mSignalFD.setHandlerAndBlock(SIGINT, std::bind(&Server::handleStop, this));
+ mSignalFD.setHandler(SIGTERM, std::bind(&Server::handleStop, this));
+}
+
+void Server::handleUpdate()
+{
+ LOGD("Received SIGUSR1 - triggering update.");
+ mZonesManager.setZonesDetachOnExit();
+ mZonesManager.stop(false);
+ mIsUpdate = true;
+ mIsRunning = false;
+}
+
+void Server::handleStop()
+{
+ LOGD("Stopping Server");
+ mZonesManager.stop(false);
+ mIsRunning = false;
}
void Server::run(bool asRoot)
throw ServerException("Environment setup failed");
}
- LOGI("Starting daemon...");
- {
- utils::ScopedGlibLoop loop;
- ZonesManager manager(mDispatcher.getPoll(), mConfigPath);
+ mZonesManager.start();
- // Do not restore zones state at Vasum start
- LOGI("Daemon started");
-
- mStopLatch.wait();
-
- // Detach zones if we triggered an update
- if (mIsUpdate) {
- manager.setZonesDetachOnExit();
- }
-
- LOGI("Stopping daemon...");
+ while(mIsRunning || mZonesManager.isRunning()) {
+ mEventPoll.dispatchIteration(-1);
}
- LOGI("Daemon stopped");
}
void Server::reloadIfRequired(char* argv[])
{
if (mIsUpdate) {
- execve(argv[0], argv, environ);
+ ::execve(argv[0], argv, environ);
LOGE("Failed to reload " << argv[0] << ": " << getSystemErrorMessage());
}
}
void Server::terminate()
{
LOGI("Terminating server");
- mStopLatch.set();
+ int ret = ::pthread_kill(mDispatchingThread, SIGINT);
+ if(ret != 0) {
+ const std::string msg = utils::getSystemErrorMessage(ret);
+ LOGE("Error during Server termination: " << msg);
+ throw ServerException("Error during Server termination: " + msg);
+ }
}
bool Server::checkEnvironment()
#ifndef SERVER_SERVER_HPP
#define SERVER_SERVER_HPP
+#include "zones-manager.hpp"
+
#include "utils/latch.hpp"
#include "utils/signalfd.hpp"
-#include "ipc/epoll/thread-dispatcher.hpp"
+#include "utils/glib-loop.hpp"
+#include "ipc/epoll/event-poll.hpp"
#include <atomic>
#include <string>
+#include <pthread.h>
namespace vasum {
static bool checkEnvironment();
private:
- std::atomic_bool mIsUpdate;
+ bool mIsRunning;
+ bool mIsUpdate;
std::string mConfigPath;
- utils::Latch mStopLatch;
- ipc::epoll::ThreadDispatcher mDispatcher;
+ utils::ScopedGlibLoop loop;
+ ipc::epoll::EventPoll mEventPoll;
utils::SignalFD mSignalFD;
+ ZonesManager mZonesManager;
+ ::pthread_t mDispatchingThread;
+
/**
* Set needed caps, groups and drop root privileges.
*/
static bool prepareEnvironment(const std::string& configPath, bool runAsRoot);
+ void handleUpdate();
+ void handleStop();
+
};
ZonesManager::ZonesManager(ipc::epoll::EventPoll& eventPoll, const std::string& configPath)
- : mWorker(utils::Worker::create())
- , mHostIPCConnection(eventPoll, this)
+ : mIsRunning(true)
+ , mWorker(utils::Worker::create())
, mDetachOnExit(false)
, mExclusiveIDLock(INVALID_CONNECTION_ID)
+ , mHostIPCConnection(eventPoll, this)
#ifdef DBUS_CONNECTION
, mHostDbusConnection(this)
#endif
configPath,
mDynamicConfig,
getVasumDbPrefix());
+}
+
+ZonesManager::~ZonesManager()
+{
+ LOGD("Destroying ZonesManager object...");
+ stop(true);
+}
+
+void ZonesManager::start()
+{
+ Lock lock(mMutex);
+
+ LOGD("Starting ZonesManager");
+
+ mIsRunning = true;
#ifdef DBUS_CONNECTION
using namespace std::placeholders;
mProxyCallPolicy.reset(new ProxyCallPolicy(mConfig.proxyCallRules));
- mHostDbusConnection.setProxyCallCallback(bind(&ZonesManager::handleProxyCall,
- this, HOST_ID, _1, _2, _3, _4, _5, _6, _7));
+ mHostDbusConnection.setProxyCallCallback(std::bind(&ZonesManager::handleProxyCall,
+ this, HOST_ID, _1, _2, _3, _4, _5, _6, _7));
#endif //DBUS_CONNECTION
for (const auto& zoneId : mDynamicConfig.zoneIds) {
mHostIPCConnection.start();
}
-ZonesManager::~ZonesManager()
+void ZonesManager::stop(bool wait)
{
- LOGD("Destroying ZonesManager object...");
+ Lock lock(mMutex);
+ LOGD("Stopping ZonesManager");
+
+ if(!mIsRunning) {
+ return;
+ }
if (!mDetachOnExit) {
try {
LOGE("Failed to shutdown all of the zones");
}
}
+
// wait for all tasks to complete
mWorker.reset();
+ mHostIPCConnection.stop(wait);
+ mIsRunning = false;
+}
- LOGD("ZonesManager object destroyed");
+bool ZonesManager::isRunning()
+{
+ Lock lock(mMutex);
+ return mIsRunning || mHostIPCConnection.isRunning();
}
ZonesManager::Zones::iterator ZonesManager::findZone(const std::string& id)
~ZonesManager();
/**
+ * Request stopping the manager.
+ *
+ * @param wait does it block waiting for all internals to stop
+ */
+ void stop(bool wait);
+
+ /**
+ * Starts the manager
+ */
+ void start();
+
+ /**
+ * If ZoneManager is running it needs the external polling loop to operate.
+ * @return is manager still running
+ */
+ bool isRunning();
+
+ /**
* Create new zone.
*
* @param zoneId id of new zone
typedef std::recursive_mutex Mutex;
typedef std::unique_lock<Mutex> Lock;
+ bool mIsRunning;
utils::Worker::Pointer mWorker;
Mutex mMutex; // used to protect mZones
ZonesManagerConfig mConfig; //TODO make it const
ZonesManagerDynamicConfig mDynamicConfig;
- HostIPCConnection mHostIPCConnection;
// to hold InputMonitor pointer to monitor if zone switching sequence is recognized
std::unique_ptr<InputMonitor> mSwitchingSequenceMonitor;
// like set but keep insertion order
void insertZone(const std::string& zoneId, const std::string& templatePath);
void tryAddTask(const utils::Worker::Task& task, api::MethodResultBuilder::Pointer result, bool wait);
+ HostIPCConnection mHostIPCConnection;
#ifdef DBUS_CONNECTION
HostDbusConnection mHostDbusConnection;
std::unique_ptr<ProxyCallPolicy> mProxyCallPolicy;
, mRunGuard("/tmp/ut-run")
, cm(new ZonesManager(mDispatcher.getPoll(), TEST_CONFIG_PATH))
{
+ cm->start();
cm->createZone("zone1", TEMPLATE_NAME);
cm->createZone("zone2", TEMPLATE_NAME);
cm->createZone("zone3", TEMPLATE_NAME);
#include "utils/glib-loop.hpp"
#include "utils/scoped-dir.hpp"
#include "logger/logger.hpp"
+#include "ipc/epoll/thread-dispatcher.hpp"
#include <string>
#include <future>
struct Fixture {
utils::ScopedDir mZonesPathGuard;
- ipc::epoll::ThreadDispatcher mDispatcher;
Fixture()
: mZonesPathGuard(ZONES_PATH)
void prepare()
{
ScopedGlibLoop loop;
+ ipc::epoll::ThreadDispatcher mDispatcher;
ZonesManager manager(mDispatcher.getPoll(), TEST_CONFIG_PATH);
+ manager.start();
manager.createZone("zone1", TEMPLATE_NAME);
manager.createZone("zone2", TEMPLATE_NAME);
manager.restoreAll();
+ manager.stop(true);
}
};
} // namespace
BOOST_AUTO_TEST_CASE(RunTerminate)
{
Server s(TEST_CONFIG_PATH);
- std::future<void> runFuture = std::async(std::launch::async, [&] {s.run(AS_ROOT);});
+ std::future<void> runFuture = std::async(std::launch::async, [&] {
+ // give a chance to run
+ std::this_thread::sleep_for(std::chrono::milliseconds(200));
+ s.terminate();
+ });
- // give a chance to run a thread
- std::this_thread::sleep_for(std::chrono::milliseconds(200));
-
- s.terminate();
+ s.run(AS_ROOT);
runFuture.wait();
// a potential exception from std::async thread will be delegated to this thread
BOOST_AUTO_TEST_CASE(Create)
{
ZonesManager cm(dispatcher.getPoll(), TEST_CONFIG_PATH);
+ cm.start();
cm.createZone("zone1", SIMPLE_TEMPLATE);
cm.createZone("zone2", SIMPLE_TEMPLATE);
}
BOOST_AUTO_TEST_CASE(StartStop)
{
ZonesManager cm(dispatcher.getPoll(), TEST_CONFIG_PATH);
+ cm.start();
cm.createZone("zone1", SIMPLE_TEMPLATE);
cm.createZone("zone2", SIMPLE_TEMPLATE);
{
{
ZonesManager cm(dispatcher.getPoll(), TEST_CONFIG_PATH);
+ cm.start();
cm.createZone("zone1", SIMPLE_TEMPLATE);
cm.createZone("zone2", SIMPLE_TEMPLATE);
cm.restoreAll();
}
{
ZonesManager cm(dispatcher.getPoll(), TEST_CONFIG_PATH);
+ cm.start();
cm.restoreAll();
BOOST_CHECK_EQUAL(cm.getRunningForegroundZoneId(), "zone1");
}
BOOST_AUTO_TEST_CASE(Focus)
{
ZonesManager cm(dispatcher.getPoll(), TEST_CONFIG_PATH);
+ cm.start();
cm.createZone("zone1", SIMPLE_TEMPLATE);
cm.createZone("zone2", SIMPLE_TEMPLATE);
cm.createZone("zone3", SIMPLE_TEMPLATE);
MULTI_FIXTURE_TEST_CASE(SwitchToDefault, F, ACCESSORS)
{
ZonesManager cm(F::dispatcher.getPoll(), TEST_CONFIG_PATH);
+ cm.start();
cm.createZone("zone1", SIMPLE_TEMPLATE);
cm.createZone("zone2", SIMPLE_TEMPLATE);
cm.createZone("zone3", SIMPLE_TEMPLATE);
MULTI_FIXTURE_TEST_CASE(AllowSwitchToDefault, F, ACCESSORS)
{
ZonesManager cm(F::dispatcher.getPoll(), TEST_CONFIG_PATH);
+ cm.start();
cm.createZone("zone1", SIMPLE_TEMPLATE);
cm.createZone("zone2", SIMPLE_TEMPLATE);
cm.createZone("zone3", SIMPLE_TEMPLATE);
MULTI_FIXTURE_TEST_CASE(ProxyCall, F, DbusFixture)
{
ZonesManager cm(F::dispatcher.getPoll(), TEST_CONFIG_PATH);
+ cm.start();
cm.createZone("zone1", SIMPLE_TEMPLATE);
cm.createZone("zone2", SIMPLE_TEMPLATE);
cm.createZone("zone3", SIMPLE_TEMPLATE);
MULTI_FIXTURE_TEST_CASE(GetZoneIds, F, ACCESSORS)
{
ZonesManager cm(F::dispatcher.getPoll(), TEST_CONFIG_PATH);
+ cm.start();
cm.createZone("zone1", SIMPLE_TEMPLATE);
cm.createZone("zone2", SIMPLE_TEMPLATE);
cm.createZone("zone3", SIMPLE_TEMPLATE);
MULTI_FIXTURE_TEST_CASE(GetActiveZoneId, F, ACCESSORS)
{
ZonesManager cm(F::dispatcher.getPoll(), TEST_CONFIG_PATH);
+ cm.start();
cm.createZone("zone1", SIMPLE_TEMPLATE);
cm.createZone("zone2", SIMPLE_TEMPLATE);
cm.createZone("zone3", SIMPLE_TEMPLATE);
MULTI_FIXTURE_TEST_CASE(SetActiveZone, F, ACCESSORS)
{
ZonesManager cm(F::dispatcher.getPoll(), TEST_CONFIG_PATH);
+ cm.start();
cm.createZone("zone1", SIMPLE_TEMPLATE);
cm.createZone("zone2", SIMPLE_TEMPLATE);
cm.createZone("zone3", SIMPLE_TEMPLATE);
const std::string zone3 = "test3";
ZonesManager cm(F::dispatcher.getPoll(), TEST_CONFIG_PATH);
+ cm.start();
cm.restoreAll();
BOOST_CHECK_EQUAL(cm.getRunningForegroundZoneId(), "");
auto getZoneIds = [this]() -> std::vector<std::string> {
ZonesManager cm(F::dispatcher.getPoll(), TEST_CONFIG_PATH);
+ cm.start();
cm.restoreAll();
typename F::HostAccessory host;
// create zone
{
ZonesManager cm(F::dispatcher.getPoll(), TEST_CONFIG_PATH);
+ cm.start();
typename F::HostAccessory host;
host.callAsyncMethodCreateZone(zone, SIMPLE_TEMPLATE, resultCallback);
BOOST_REQUIRE(callDone.wait(EVENT_TIMEOUT));
// destroy zone
{
ZonesManager cm(F::dispatcher.getPoll(), TEST_CONFIG_PATH);
+ cm.start();
typename F::HostAccessory host;
host.callAsyncMethodDestroyZone(zone, resultCallback);
BOOST_REQUIRE(callDone.wait(EVENT_TIMEOUT));
// firts run
{
ZonesManager cm(F::dispatcher.getPoll(), TEST_CONFIG_PATH);
+ cm.start();
typename F::HostAccessory host;
// zone1 - created
// second run
{
ZonesManager cm(F::dispatcher.getPoll(), TEST_CONFIG_PATH);
+ cm.start();
cm.restoreAll();
BOOST_CHECK(cm.isRunning(zone1)); // because the default json value
const std::string zone2 = "zone2";
ZonesManager cm(F::dispatcher.getPoll(), TEST_CONFIG_PATH);
+ cm.start();
cm.createZone(zone1, SIMPLE_TEMPLATE);
cm.createZone(zone2, SIMPLE_TEMPLATE);
MULTI_FIXTURE_TEST_CASE(LockUnlockZone, F, ACCESSORS)
{
ZonesManager cm(F::dispatcher.getPoll(), TEST_CONFIG_PATH);
+ cm.start();
cm.createZone("zone1", SIMPLE_TEMPLATE);
cm.createZone("zone2", SIMPLE_TEMPLATE);
cm.createZone("zone3", SIMPLE_TEMPLATE);
MULTI_FIXTURE_TEST_CASE(CreateFile, F, ACCESSORS)
{
ZonesManager cm(F::dispatcher.getPoll(), TEST_CONFIG_PATH);
+ cm.start();
cm.createZone("zone1", SIMPLE_TEMPLATE);
cm.restoreAll();
MULTI_FIXTURE_TEST_CASE(CreateWriteReadFile, F, ACCESSORS)
{
ZonesManager cm(F::dispatcher.getPoll(), TEST_CONFIG_PATH);
+ cm.start();
cm.createZone("zone1", SIMPLE_TEMPLATE);
cm.restoreAll();