1 /*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
4 This file is part of systemd.
6 Copyright 2013 Lennart Poettering
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 2.1 of the License, or
11 (at your option) any later version.
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
18 You should have received a copy of the GNU Lesser General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
25 #include "path-util.h"
27 #include "cgroup-util.h"
30 #define CGROUP_CPU_QUOTA_PERIOD_USEC ((usec_t) 100 * USEC_PER_MSEC)
32 void cgroup_context_init(CGroupContext *c) {
35 /* Initialize everything to the kernel defaults, assuming the
36 * structure is preinitialized to 0 */
38 c->cpu_shares = (unsigned long) -1;
39 c->startup_cpu_shares = (unsigned long) -1;
40 c->memory_limit = (uint64_t) -1;
41 c->blockio_weight = (unsigned long) -1;
42 c->startup_blockio_weight = (unsigned long) -1;
44 c->cpu_quota_per_sec_usec = USEC_INFINITY;
47 void cgroup_context_free_device_allow(CGroupContext *c, CGroupDeviceAllow *a) {
51 LIST_REMOVE(device_allow, c->device_allow, a);
56 void cgroup_context_free_blockio_device_weight(CGroupContext *c, CGroupBlockIODeviceWeight *w) {
60 LIST_REMOVE(device_weights, c->blockio_device_weights, w);
65 void cgroup_context_free_blockio_device_bandwidth(CGroupContext *c, CGroupBlockIODeviceBandwidth *b) {
69 LIST_REMOVE(device_bandwidths, c->blockio_device_bandwidths, b);
74 void cgroup_context_done(CGroupContext *c) {
77 while (c->blockio_device_weights)
78 cgroup_context_free_blockio_device_weight(c, c->blockio_device_weights);
80 while (c->blockio_device_bandwidths)
81 cgroup_context_free_blockio_device_bandwidth(c, c->blockio_device_bandwidths);
83 while (c->device_allow)
84 cgroup_context_free_device_allow(c, c->device_allow);
87 void cgroup_context_dump(CGroupContext *c, FILE* f, const char *prefix) {
88 CGroupBlockIODeviceBandwidth *b;
89 CGroupBlockIODeviceWeight *w;
91 char u[FORMAT_TIMESPAN_MAX];
96 prefix = strempty(prefix);
99 "%sCPUAccounting=%s\n"
100 "%sBlockIOAccounting=%s\n"
101 "%sMemoryAccounting=%s\n"
103 "%sStartupCPUShares=%lu\n"
104 "%sCPUQuotaPerSecSec=%s\n"
105 "%sBlockIOWeight=%lu\n"
106 "%sStartupBlockIOWeight=%lu\n"
107 "%sMemoryLimit=%" PRIu64 "\n"
108 "%sDevicePolicy=%s\n",
109 prefix, yes_no(c->cpu_accounting),
110 prefix, yes_no(c->blockio_accounting),
111 prefix, yes_no(c->memory_accounting),
112 prefix, c->cpu_shares,
113 prefix, c->startup_cpu_shares,
114 prefix, strna(format_timespan(u, sizeof(u), c->cpu_quota_per_sec_usec, 1)),
115 prefix, c->blockio_weight,
116 prefix, c->startup_blockio_weight,
117 prefix, c->memory_limit,
118 prefix, cgroup_device_policy_to_string(c->device_policy));
120 LIST_FOREACH(device_allow, a, c->device_allow)
122 "%sDeviceAllow=%s %s%s%s\n",
125 a->r ? "r" : "", a->w ? "w" : "", a->m ? "m" : "");
127 LIST_FOREACH(device_weights, w, c->blockio_device_weights)
129 "%sBlockIODeviceWeight=%s %lu",
134 LIST_FOREACH(device_bandwidths, b, c->blockio_device_bandwidths) {
135 char buf[FORMAT_BYTES_MAX];
140 b->read ? "BlockIOReadBandwidth" : "BlockIOWriteBandwidth",
142 format_bytes(buf, sizeof(buf), b->bandwidth));
146 static int lookup_blkio_device(const char *p, dev_t *dev) {
155 log_warning("Couldn't stat device %s: %m", p);
159 if (S_ISBLK(st.st_mode))
161 else if (major(st.st_dev) != 0) {
162 /* If this is not a device node then find the block
163 * device this file is stored on */
166 /* If this is a partition, try to get the originating
168 block_get_whole_disk(*dev, dev);
170 log_warning("%s is not a block device and file system block device cannot be determined or is not local.", p);
177 static int whitelist_device(const char *path, const char *node, const char *acc) {
178 char buf[2+DECIMAL_STR_MAX(dev_t)*2+2+4];
185 if (stat(node, &st) < 0) {
186 log_warning("Couldn't stat device %s", node);
190 if (!S_ISCHR(st.st_mode) && !S_ISBLK(st.st_mode)) {
191 log_warning("%s is not a device.", node);
197 S_ISCHR(st.st_mode) ? 'c' : 'b',
198 major(st.st_rdev), minor(st.st_rdev),
201 r = cg_set_attribute("devices", path, "devices.allow", buf);
203 log_full(r == -ENOENT ? LOG_DEBUG : LOG_WARNING, "Failed to set devices.allow on %s: %s", path, strerror(-r));
208 static int whitelist_major(const char *path, const char *name, char type, const char *acc) {
209 _cleanup_fclose_ FILE *f = NULL;
216 assert(type == 'b' || type == 'c');
218 f = fopen("/proc/devices", "re");
220 log_warning("Cannot open /proc/devices to resolve %s (%c): %m", name, type);
224 FOREACH_LINE(line, f, goto fail) {
225 char buf[2+DECIMAL_STR_MAX(unsigned)+3+4], *p, *w;
230 if (type == 'c' && streq(line, "Character devices:")) {
235 if (type == 'b' && streq(line, "Block devices:")) {
250 w = strpbrk(p, WHITESPACE);
255 r = safe_atou(p, &maj);
262 w += strspn(w, WHITESPACE);
264 if (fnmatch(name, w, 0) != 0)
273 r = cg_set_attribute("devices", path, "devices.allow", buf);
275 log_full(r == -ENOENT ? LOG_DEBUG : LOG_WARNING, "Failed to set devices.allow on %s: %s", path, strerror(-r));
281 log_warning("Failed to read /proc/devices: %m");
285 void cgroup_context_apply(CGroupContext *c, CGroupControllerMask mask, const char *path, ManagerState state) {
295 /* Some cgroup attributes are not support on the root cgroup,
296 * hence silently ignore */
297 is_root = isempty(path) || path_equal(path, "/");
299 if ((mask & CGROUP_CPU) && !is_root) {
300 char buf[MAX(DECIMAL_STR_MAX(unsigned long), DECIMAL_STR_MAX(usec_t)) + 1];
302 sprintf(buf, "%lu\n",
303 state == MANAGER_STARTING && c->startup_cpu_shares != (unsigned long) -1 ? c->startup_cpu_shares :
304 c->cpu_shares != (unsigned long) -1 ? c->cpu_shares : 1024);
305 r = cg_set_attribute("cpu", path, "cpu.shares", buf);
307 log_full(r == -ENOENT ? LOG_DEBUG : LOG_WARNING, "Failed to set cpu.shares on %s: %s", path, strerror(-r));
309 sprintf(buf, USEC_FMT "\n", CGROUP_CPU_QUOTA_PERIOD_USEC);
310 r = cg_set_attribute("cpu", path, "cpu.cfs_period_us", buf);
312 log_full(r == -ENOENT ? LOG_DEBUG : LOG_WARNING, "Failed to set cpu.cfs_period_us on %s: %s", path, strerror(-r));
314 if (c->cpu_quota_per_sec_usec != USEC_INFINITY) {
315 sprintf(buf, USEC_FMT "\n", c->cpu_quota_per_sec_usec * CGROUP_CPU_QUOTA_PERIOD_USEC / USEC_PER_SEC);
316 r = cg_set_attribute("cpu", path, "cpu.cfs_quota_us", buf);
318 r = cg_set_attribute("cpu", path, "cpu.cfs_quota_us", "-1");
320 log_full(r == -ENOENT ? LOG_DEBUG : LOG_WARNING, "Failed to set cpu.cfs_quota_us on %s: %s", path, strerror(-r));
323 if (mask & CGROUP_BLKIO) {
324 char buf[MAX3(DECIMAL_STR_MAX(unsigned long)+1,
325 DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(unsigned long)*1,
326 DECIMAL_STR_MAX(dev_t)*2+2+DECIMAL_STR_MAX(uint64_t)+1)];
327 CGroupBlockIODeviceWeight *w;
328 CGroupBlockIODeviceBandwidth *b;
331 sprintf(buf, "%lu\n", state == MANAGER_STARTING && c->startup_blockio_weight != (unsigned long) -1 ? c->startup_blockio_weight :
332 c->blockio_weight != (unsigned long) -1 ? c->blockio_weight : 1000);
333 r = cg_set_attribute("blkio", path, "blkio.weight", buf);
335 log_full(r == -ENOENT ? LOG_DEBUG : LOG_WARNING, "Failed to set blkio.weight on %s: %s", path, strerror(-r));
337 /* FIXME: no way to reset this list */
338 LIST_FOREACH(device_weights, w, c->blockio_device_weights) {
341 r = lookup_blkio_device(w->path, &dev);
345 sprintf(buf, "%u:%u %lu", major(dev), minor(dev), w->weight);
346 r = cg_set_attribute("blkio", path, "blkio.weight_device", buf);
348 log_full(r == -ENOENT ? LOG_DEBUG : LOG_WARNING, "Failed to set blkio.weight_device on %s: %s", path, strerror(-r));
352 /* FIXME: no way to reset this list */
353 LIST_FOREACH(device_bandwidths, b, c->blockio_device_bandwidths) {
357 r = lookup_blkio_device(b->path, &dev);
361 a = b->read ? "blkio.throttle.read_bps_device" : "blkio.throttle.write_bps_device";
363 sprintf(buf, "%u:%u %" PRIu64 "\n", major(dev), minor(dev), b->bandwidth);
364 r = cg_set_attribute("blkio", path, a, buf);
366 log_full(r == -ENOENT ? LOG_DEBUG : LOG_WARNING, "Failed to set %s on %s: %s", a, path, strerror(-r));
370 if (mask & CGROUP_MEMORY) {
371 if (c->memory_limit != (uint64_t) -1) {
372 char buf[DECIMAL_STR_MAX(uint64_t) + 1];
374 sprintf(buf, "%" PRIu64 "\n", c->memory_limit);
375 r = cg_set_attribute("memory", path, "memory.limit_in_bytes", buf);
377 r = cg_set_attribute("memory", path, "memory.limit_in_bytes", "-1");
380 log_full(r == -ENOENT ? LOG_DEBUG : LOG_WARNING, "Failed to set memory.limit_in_bytes on %s: %s", path, strerror(-r));
383 if ((mask & CGROUP_DEVICE) && !is_root) {
384 CGroupDeviceAllow *a;
386 if (c->device_allow || c->device_policy != CGROUP_AUTO)
387 r = cg_set_attribute("devices", path, "devices.deny", "a");
389 r = cg_set_attribute("devices", path, "devices.allow", "a");
391 log_full(r == -ENOENT ? LOG_DEBUG : LOG_WARNING, "Failed to reset devices.list on %s: %s", path, strerror(-r));
393 if (c->device_policy == CGROUP_CLOSED ||
394 (c->device_policy == CGROUP_AUTO && c->device_allow)) {
395 static const char auto_devices[] =
396 "/dev/null\0" "rwm\0"
397 "/dev/zero\0" "rwm\0"
398 "/dev/full\0" "rwm\0"
399 "/dev/random\0" "rwm\0"
400 "/dev/urandom\0" "rwm\0"
402 "/dev/pts/ptmx\0" "rw\0"; /* /dev/pts/ptmx may not be duplicated, but accessed */
406 NULSTR_FOREACH_PAIR(x, y, auto_devices)
407 whitelist_device(path, x, y);
409 whitelist_major(path, "pts", 'c', "rw");
410 whitelist_major(path, "kdbus", 'c', "rw");
411 whitelist_major(path, "kdbus/*", 'c', "rw");
414 LIST_FOREACH(device_allow, a, c->device_allow) {
430 if (startswith(a->path, "/dev/"))
431 whitelist_device(path, a->path, acc);
432 else if (startswith(a->path, "block-"))
433 whitelist_major(path, a->path + 6, 'b', acc);
434 else if (startswith(a->path, "char-"))
435 whitelist_major(path, a->path + 5, 'c', acc);
437 log_debug("Ignoring device %s while writing cgroup attribute.", a->path);
442 CGroupControllerMask cgroup_context_get_mask(CGroupContext *c) {
443 CGroupControllerMask mask = 0;
445 /* Figure out which controllers we need */
447 if (c->cpu_accounting ||
448 c->cpu_shares != (unsigned long) -1 ||
449 c->startup_cpu_shares != (unsigned long) -1 ||
450 c->cpu_quota_per_sec_usec != USEC_INFINITY)
451 mask |= CGROUP_CPUACCT | CGROUP_CPU;
453 if (c->blockio_accounting ||
454 c->blockio_weight != (unsigned long) -1 ||
455 c->startup_blockio_weight != (unsigned long) -1 ||
456 c->blockio_device_weights ||
457 c->blockio_device_bandwidths)
458 mask |= CGROUP_BLKIO;
460 if (c->memory_accounting ||
461 c->memory_limit != (uint64_t) -1)
462 mask |= CGROUP_MEMORY;
464 if (c->device_allow || c->device_policy != CGROUP_AUTO)
465 mask |= CGROUP_DEVICE;
470 CGroupControllerMask unit_get_cgroup_mask(Unit *u) {
473 c = unit_get_cgroup_context(u);
477 return cgroup_context_get_mask(c);
480 CGroupControllerMask unit_get_members_mask(Unit *u) {
483 if (u->cgroup_members_mask_valid)
484 return u->cgroup_members_mask;
486 u->cgroup_members_mask = 0;
488 if (u->type == UNIT_SLICE) {
492 SET_FOREACH(member, u->dependencies[UNIT_BEFORE], i) {
497 if (UNIT_DEREF(member->slice) != u)
500 u->cgroup_members_mask |=
501 unit_get_cgroup_mask(member) |
502 unit_get_members_mask(member);
506 u->cgroup_members_mask_valid = true;
507 return u->cgroup_members_mask;
510 CGroupControllerMask unit_get_siblings_mask(Unit *u) {
513 if (UNIT_ISSET(u->slice))
514 return unit_get_members_mask(UNIT_DEREF(u->slice));
516 return unit_get_cgroup_mask(u) | unit_get_members_mask(u);
519 CGroupControllerMask unit_get_target_mask(Unit *u) {
520 CGroupControllerMask mask;
522 mask = unit_get_cgroup_mask(u) | unit_get_members_mask(u) | unit_get_siblings_mask(u);
523 mask &= u->manager->cgroup_supported;
528 /* Recurse from a unit up through its containing slices, propagating
529 * mask bits upward. A unit is also member of itself. */
530 void unit_update_cgroup_members_masks(Unit *u) {
531 CGroupControllerMask m;
536 /* Calculate subtree mask */
537 m = unit_get_cgroup_mask(u) | unit_get_members_mask(u);
539 /* See if anything changed from the previous invocation. If
540 * not, we're done. */
541 if (u->cgroup_subtree_mask_valid && m == u->cgroup_subtree_mask)
545 u->cgroup_subtree_mask_valid &&
546 ((m & ~u->cgroup_subtree_mask) != 0) &&
547 ((~m & u->cgroup_subtree_mask) == 0);
549 u->cgroup_subtree_mask = m;
550 u->cgroup_subtree_mask_valid = true;
552 if (UNIT_ISSET(u->slice)) {
553 Unit *s = UNIT_DEREF(u->slice);
556 /* There's more set now than before. We
557 * propagate the new mask to the parent's mask
558 * (not caring if it actually was valid or
561 s->cgroup_members_mask |= m;
564 /* There's less set now than before (or we
565 * don't know), we need to recalculate
566 * everything, so let's invalidate the
567 * parent's members mask */
569 s->cgroup_members_mask_valid = false;
571 /* And now make sure that this change also hits our
573 unit_update_cgroup_members_masks(s);
577 static const char *migrate_callback(CGroupControllerMask mask, void *userdata) {
584 if (u->cgroup_path &&
585 u->cgroup_realized &&
586 (u->cgroup_realized_mask & mask) == mask)
587 return u->cgroup_path;
589 u = UNIT_DEREF(u->slice);
595 static int unit_create_cgroups(Unit *u, CGroupControllerMask mask) {
596 _cleanup_free_ char *path = NULL;
601 path = unit_default_cgroup_path(u);
605 r = hashmap_put(u->manager->cgroup_unit, path, u);
607 log_error(r == -EEXIST ? "cgroup %s exists already: %s" : "hashmap_put failed for %s: %s", path, strerror(-r));
611 u->cgroup_path = path;
615 /* First, create our own group */
616 r = cg_create_everywhere(u->manager->cgroup_supported, mask, u->cgroup_path);
618 log_error("Failed to create cgroup %s: %s", u->cgroup_path, strerror(-r));
622 /* Keep track that this is now realized */
623 u->cgroup_realized = true;
624 u->cgroup_realized_mask = mask;
626 /* Then, possibly move things over */
627 r = cg_migrate_everywhere(u->manager->cgroup_supported, u->cgroup_path, u->cgroup_path, migrate_callback, u);
629 log_warning("Failed to migrate cgroup from to %s: %s", u->cgroup_path, strerror(-r));
634 static bool unit_has_mask_realized(Unit *u, CGroupControllerMask mask) {
637 return u->cgroup_realized && u->cgroup_realized_mask == mask;
640 /* Check if necessary controllers and attributes for a unit are in place.
643 * If not, create paths, move processes over, and set attributes.
645 * Returns 0 on success and < 0 on failure. */
646 static int unit_realize_cgroup_now(Unit *u, ManagerState state) {
647 CGroupControllerMask mask;
652 if (u->in_cgroup_queue) {
653 LIST_REMOVE(cgroup_queue, u->manager->cgroup_queue, u);
654 u->in_cgroup_queue = false;
657 mask = unit_get_target_mask(u);
659 if (unit_has_mask_realized(u, mask))
662 /* First, realize parents */
663 if (UNIT_ISSET(u->slice)) {
664 r = unit_realize_cgroup_now(UNIT_DEREF(u->slice), state);
669 /* And then do the real work */
670 r = unit_create_cgroups(u, mask);
674 /* Finally, apply the necessary attributes. */
675 cgroup_context_apply(unit_get_cgroup_context(u), mask, u->cgroup_path, state);
680 static void unit_add_to_cgroup_queue(Unit *u) {
682 if (u->in_cgroup_queue)
685 LIST_PREPEND(cgroup_queue, u->manager->cgroup_queue, u);
686 u->in_cgroup_queue = true;
689 unsigned manager_dispatch_cgroup_queue(Manager *m) {
695 state = manager_state(m);
697 while ((i = m->cgroup_queue)) {
698 assert(i->in_cgroup_queue);
700 r = unit_realize_cgroup_now(i, state);
702 log_warning("Failed to realize cgroups for queued unit %s: %s", i->id, strerror(-r));
710 static void unit_queue_siblings(Unit *u) {
713 /* This adds the siblings of the specified unit and the
714 * siblings of all parent units to the cgroup queue. (But
715 * neither the specified unit itself nor the parents.) */
717 while ((slice = UNIT_DEREF(u->slice))) {
721 SET_FOREACH(m, slice->dependencies[UNIT_BEFORE], i) {
725 /* Skip units that have a dependency on the slice
726 * but aren't actually in it. */
727 if (UNIT_DEREF(m->slice) != slice)
730 /* No point in doing cgroup application for units
731 * without active processes. */
732 if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(m)))
735 /* If the unit doesn't need any new controllers
736 * and has current ones realized, it doesn't need
738 if (unit_has_mask_realized(m, unit_get_target_mask(m)))
741 unit_add_to_cgroup_queue(m);
748 int unit_realize_cgroup(Unit *u) {
753 c = unit_get_cgroup_context(u);
757 /* So, here's the deal: when realizing the cgroups for this
758 * unit, we need to first create all parents, but there's more
759 * actually: for the weight-based controllers we also need to
760 * make sure that all our siblings (i.e. units that are in the
761 * same slice as we are) have cgroups, too. Otherwise, things
762 * would become very uneven as each of their processes would
763 * get as much resources as all our group together. This call
764 * will synchronously create the parent cgroups, but will
765 * defer work on the siblings to the next event loop
768 /* Add all sibling slices to the cgroup queue. */
769 unit_queue_siblings(u);
771 /* And realize this one now (and apply the values) */
772 return unit_realize_cgroup_now(u, manager_state(u->manager));
775 void unit_destroy_cgroup(Unit *u) {
783 r = cg_trim_everywhere(u->manager->cgroup_supported, u->cgroup_path, !unit_has_name(u, SPECIAL_ROOT_SLICE));
785 log_debug("Failed to destroy cgroup %s: %s", u->cgroup_path, strerror(-r));
787 hashmap_remove(u->manager->cgroup_unit, u->cgroup_path);
789 free(u->cgroup_path);
790 u->cgroup_path = NULL;
791 u->cgroup_realized = false;
792 u->cgroup_realized_mask = 0;
796 pid_t unit_search_main_pid(Unit *u) {
797 _cleanup_fclose_ FILE *f = NULL;
798 pid_t pid = 0, npid, mypid;
805 if (cg_enumerate_processes(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, &f) < 0)
809 while (cg_read_pid(f, &npid) > 0) {
815 /* Ignore processes that aren't our kids */
816 if (get_parent_of_pid(npid, &ppid) >= 0 && ppid != mypid)
820 /* Dang, there's more than one daemonized PID
821 in this group, so we don't know what process
822 is the main process. */
833 int manager_setup_cgroup(Manager *m) {
834 _cleanup_free_ char *path = NULL;
839 /* 1. Determine hierarchy */
840 free(m->cgroup_root);
841 m->cgroup_root = NULL;
843 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 0, &m->cgroup_root);
845 log_error("Cannot determine cgroup we are running in: %s", strerror(-r));
849 /* LEGACY: Already in /system.slice? If so, let's cut this
850 * off. This is to support live upgrades from older systemd
851 * versions where PID 1 was moved there. */
852 if (m->running_as == SYSTEMD_SYSTEM) {
855 e = endswith(m->cgroup_root, "/" SPECIAL_SYSTEM_SLICE);
857 e = endswith(m->cgroup_root, "/system");
862 /* And make sure to store away the root value without trailing
863 * slash, even for the root dir, so that we can easily prepend
865 if (streq(m->cgroup_root, "/"))
866 m->cgroup_root[0] = 0;
869 r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, NULL, &path);
871 log_error("Cannot find cgroup mount point: %s", strerror(-r));
875 log_debug("Using cgroup controller " SYSTEMD_CGROUP_CONTROLLER ". File system hierarchy is at %s.", path);
878 /* 3. Install agent */
879 if (m->running_as == SYSTEMD_SYSTEM) {
880 r = cg_install_release_agent(SYSTEMD_CGROUP_CONTROLLER, SYSTEMD_CGROUP_AGENT_PATH);
882 log_warning("Failed to install release agent, ignoring: %s", strerror(-r));
884 log_debug("Installed release agent.");
886 log_debug("Release agent already installed.");
889 /* 4. Make sure we are in the root cgroup */
890 r = cg_create_and_attach(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, 0);
892 log_error("Failed to create root cgroup hierarchy: %s", strerror(-r));
896 /* 5. And pin it, so that it cannot be unmounted */
897 safe_close(m->pin_cgroupfs_fd);
899 m->pin_cgroupfs_fd = open(path, O_RDONLY|O_CLOEXEC|O_DIRECTORY|O_NOCTTY|O_NONBLOCK);
900 if (m->pin_cgroupfs_fd < 0) {
901 log_error("Failed to open pin file: %m");
905 /* 6. Always enable hierarchial support if it exists... */
906 cg_set_attribute("memory", "/", "memory.use_hierarchy", "1");
909 /* 7. Figure out which controllers are supported */
910 m->cgroup_supported = cg_mask_supported();
915 void manager_shutdown_cgroup(Manager *m, bool delete) {
918 /* We can't really delete the group, since we are in it. But
920 if (delete && m->cgroup_root)
921 cg_trim(SYSTEMD_CGROUP_CONTROLLER, m->cgroup_root, false);
923 m->pin_cgroupfs_fd = safe_close(m->pin_cgroupfs_fd);
925 free(m->cgroup_root);
926 m->cgroup_root = NULL;
929 Unit* manager_get_unit_by_cgroup(Manager *m, const char *cgroup) {
936 u = hashmap_get(m->cgroup_unit, cgroup);
950 u = hashmap_get(m->cgroup_unit, p);
956 Unit *manager_get_unit_by_pid(Manager *m, pid_t pid) {
957 _cleanup_free_ char *cgroup = NULL;
965 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, pid, &cgroup);
969 return manager_get_unit_by_cgroup(m, cgroup);
972 int manager_notify_cgroup_empty(Manager *m, const char *cgroup) {
979 u = manager_get_unit_by_cgroup(m, cgroup);
981 r = cg_is_empty_recursive(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, true);
983 if (UNIT_VTABLE(u)->notify_cgroup_empty)
984 UNIT_VTABLE(u)->notify_cgroup_empty(u);
986 unit_add_to_gc_queue(u);
993 static const char* const cgroup_device_policy_table[_CGROUP_DEVICE_POLICY_MAX] = {
994 [CGROUP_AUTO] = "auto",
995 [CGROUP_CLOSED] = "closed",
996 [CGROUP_STRICT] = "strict",
999 DEFINE_STRING_TABLE_LOOKUP(cgroup_device_policy, CGroupDevicePolicy);