[Feature] Reload SCS without turning containers off when binary is updated.
[Cause] When updating SCS we don't want to restart containers, only SCS itself.
[Solution] Add SIGUSR1 handling which will tell SCS to keep containers alive when exiting.
Add check in ContainerConnectionTransport if containers are running to skip remount
of tmpfs when it is not needed.
[Verification] Build, install, reboot target. Test the following when SCS is running together with
containers active:
* Call "systemctl stop security-containers". SCS should turn off and containers
should turn off as well. Call "systemctl start security-containers", SCS and
containers should start up.
* Simulate update by calling "kill -USR1 `pidof security-containers-server`". SCS
should properly reload, however containers should stay on. (note - the best way
to check it would be by verifying logs in journalctl).
Change-Id: I3a6d0fb25a4579208ad0f6d0de00e2755548230e
Signed-off-by: Lukasz Kostyra <l.kostyra@samsung.com>
return true;
}
+bool isMountPoint(const std::string& path, bool& result)
+{
+ struct stat stat, parentStat;
+ std::string parentPath = dirName(path);
+
+ if (::stat(path.c_str(), &stat)) {
+ LOGD("Failed to get stat of " << path << ": " << strerror(errno));
+ return false;
+ }
+
+ if (::stat(parentPath.c_str(), &parentStat)) {
+ LOGD("Failed to get stat of " << parentPath << ": " << strerror(errno));
+ return false;
+ }
+
+ result = (stat.st_dev != parentStat.st_dev);
+ return true;
+}
+
} // namespace utils
} // namespace security_containers
*/
bool umount(const std::string& path);
+/**
+ * Check if given path is a mount point
+ */
+bool isMountPoint(const std::string& path, bool& result);
+
} // namespace utils
} // namespace security_containers
: mConfig(config),
mDom(utils::readFileContent(mConfig.config)),
mId(getDomainName(mDom.get())),
+ mDetachOnExit(false),
mLifecycleCallbackId(-1),
mRebootCallbackId(-1),
mNextIdForListener(0)
ContainerAdmin::~ContainerAdmin()
{
+ LOGD(mId << ": Destroying ContainerAdmin object...");
+
// Deregister callbacks
if (mLifecycleCallbackId >= 0) {
virConnectDomainEventDeregisterAny(virDomainGetConnect(mDom.get()),
}
// Try to forcefully stop
- LOGD(mId << ": Destroying ContainerAdmin object...");
- try {
- destroy();
- } catch (ServerException&) {
- LOGE(mId << ": Failed to destroy the container");
+ if (!mDetachOnExit) {
+ try {
+ destroy();
+ } catch (ServerException&) {
+ LOGE(mId << ": Failed to destroy the container");
+ }
}
LOGD(mId << ": ContainerAdmin object destroyed");
return;
}
- // Autodestroyed when connection pointer released
- // Any managed save file for this domain is discarded,
- // and the domain boots from scratch
- u_int flags = VIR_DOMAIN_START_AUTODESTROY;
+ // In order to update daemon without shutting down the containers
+ // autodestroy option must NOT be set. It's best to create domain
+ // without any flags.
+ u_int flags = VIR_DOMAIN_NONE;
if (virDomainCreateWithFlags(mDom.get(), flags) < 0) {
LOGE(mId << ": Failed to start the container\n"
}
}
+void ContainerAdmin::setDetachOnExit()
+{
+ mDetachOnExit = true;
+}
std::int64_t ContainerAdmin::getSchedulerQuota()
{
void setSchedulerLevel(SchedulerLevel sched);
/**
+ * Set whether container should be detached on exit.
+ */
+ void setDetachOnExit();
+
+ /**
* @return Scheduler CFS quota,
* TODO: this function is only for UNIT TESTS
*/
ContainerConfig& mConfig;
libvirt::LibvirtDomain mDom;
const std::string mId;
+ bool mDetachOnExit;
int getState(); // get the libvirt's domain state
void setSchedulerParams(std::uint64_t cpuShares, std::uint64_t vcpuPeriod, std::int64_t vcpuQuota);
ContainerConnectionTransport::ContainerConnectionTransport(const std::string& runMountPoint)
- : mRunMountPoint(runMountPoint)
+ : mRunMountPoint(runMountPoint), mDetachOnExit(false)
{
if (runMountPoint.empty()) {
return;
throw ContainerConnectionException("Could not create: " + runMountPoint);
}
- // try to umount if already mounted
- utils::umount(runMountPoint);
+ bool isMount = false;
+ if (!utils::isMountPoint(runMountPoint, isMount)) {
+ LOGE("Failed to check if " << runMountPoint << " is a mount point.");
+ throw ContainerConnectionException("Could not check if " + runMountPoint +
+ " is a mount point.");
+ }
+
+ if (!isMount) {
+ LOGD(runMountPoint << " not mounted - mounting.");
- if (!utils::mountTmpfs(runMountPoint)) {
- LOGE("Initialization failed: could not mount " << runMountPoint);
- throw ContainerConnectionException("Could not mount: " + runMountPoint);
+ if (!utils::mountTmpfs(runMountPoint)) {
+ LOGE("Initialization failed: could not mount " << runMountPoint);
+ throw ContainerConnectionException("Could not mount: " + runMountPoint);
+ }
}
// if there is no systemd in the container this dir won't be created automatically
ContainerConnectionTransport::~ContainerConnectionTransport()
{
- if (!mRunMountPoint.empty()) {
- if (!utils::umount(mRunMountPoint)) {
- LOGE("Deinitialization failed: could not umount " << mRunMountPoint);
+ if (!mDetachOnExit) {
+ if (!mRunMountPoint.empty()) {
+ if (!utils::umount(mRunMountPoint)) {
+ LOGE("Deinitialization failed: could not umount " << mRunMountPoint);
+ }
}
}
}
return "unix:path=" + dbusPath;
}
+void ContainerConnectionTransport::setDetachOnExit()
+{
+ mDetachOnExit = true;
+}
} // namespace security_containers
*/
std::string acquireAddress();
+ /**
+ * Set whether object should detach from transport filesystem on exit
+ */
+ void setDetachOnExit();
+
private:
std::string mRunMountPoint;
+ bool mDetachOnExit;
};
mAdmin->setSchedulerLevel(SchedulerLevel::BACKGROUND);
}
+void Container::setDetachOnExit()
+{
+ mAdmin->setDetachOnExit();
+ mConnectionTransport->setDetachOnExit();
+}
+
bool Container::isRunning()
{
return mAdmin->isRunning();
void goBackground();
/**
+ * Set if container should be detached on exit.
+ *
+ * This sends detach flag to ContainerAdmin object and disables unmounting tmpfs
+ * in ContainerConnectionTransport.
+ */
+ void setDetachOnExit();
+
+ /**
* @return Is the container running?
*/
bool isRunning();
namespace security_containers {
-ContainersManager::ContainersManager(const std::string& managerConfigPath)
+ContainersManager::ContainersManager(const std::string& managerConfigPath): mDetachOnExit(false)
{
LOGD("Instantiating ContainersManager object...");
mConfig.parseFile(managerConfigPath);
ContainersManager::~ContainersManager()
{
LOGD("Destroying ContainersManager object...");
- try {
- stopAll();
- } catch (ServerException&) {
- LOGE("Failed to stop all of the containers");
+
+ if (!mDetachOnExit) {
+ try {
+ stopAll();
+ } catch (ServerException&) {
+ LOGE("Failed to stop all of the containers");
+ }
}
+
LOGD("ContainersManager object destroyed");
}
}
+void ContainersManager::setContainersDetachOnExit()
+{
+ mDetachOnExit = true;
+
+ for (auto& container : mContainers) {
+ container.second->setDetachOnExit();
+ }
+}
+
} // namespace security_containers
*/
std::string getRunningForegroundContainerId();
+ /**
+ * Set whether ContainersManager should detach containers on exit
+ */
+ void setContainersDetachOnExit();
+
private:
ContainersManagerConfig mConfig;
typedef std::unordered_map<std::string, std::unique_ptr<Container>> ContainerMap;
ContainerMap mContainers; // map of containers, id is the key
+ bool mDetachOnExit;
};
try {
Server server(configPath);
server.run();
+ server.reloadIfRequired(argv);
} catch (std::exception& e) {
LOGE("Unexpected: " << utils::getTypeName(e) << ": " << e.what());
#include "utils/glib-loop.hpp"
#include <csignal>
+#include <cerrno>
#include <string>
+#include <cstring>
+#include <atomic>
+#include <unistd.h>
+
+extern char** environ;
namespace security_containers {
namespace {
-utils::Latch signalLatch;
+
+std::atomic_bool gUpdateTriggered(false);
+utils::Latch gSignalLatch;
+
void signalHandler(const int sig)
{
LOGI("Got signal " << sig);
- signalLatch.set();
-}
+
+ if (sig == SIGUSR1) {
+ LOGD("Received SIGUSR1 - triggering update.");
+ gUpdateTriggered = true;
+ }
+
+ gSignalLatch.set();
}
+} // namespace
+
void Server::run()
{
signal(SIGINT, signalHandler);
signal(SIGTERM, signalHandler);
+ signal(SIGUSR1, signalHandler);
LOGI("Starting daemon...");
{
manager.startAll();
LOGI("Daemon started");
- signalLatch.wait();
+ gSignalLatch.wait();
+
+ // Detach containers if we triggered an update
+ if (gUpdateTriggered) {
+ manager.setContainersDetachOnExit();
+ }
LOGI("Stopping daemon...");
// manager.stopAll() will be called in destructor
LOGI("Daemon stopped");
}
+void Server::reloadIfRequired(char* argv[])
+{
+ if (gUpdateTriggered) {
+ execve(argv[0], argv, environ);
+
+ LOGE("Failed to reload " << argv[0] << ": " << strerror(errno));
+ }
+}
+
void Server::terminate()
{
LOGI("Terminating server");
- signalLatch.set();
+ gSignalLatch.set();
}
} // namespace security_containers
virtual ~Server();
/**
- * Starts all the containers and blocks until SIGINT or SIGTERM
+ * Starts all the containers and blocks until SIGINT, SIGTERM or SIGUSR1
*/
void run();
/**
+ * Reload the server by launching execve on itself if SIGUSR1 was sent to server.
+ */
+ void reloadIfRequired(char* argv[]);
+
+ /**
* Terminates the server.
* Equivalent of sending SIGINT or SIGTERM signal
*/