3 * Copyright IBM Corporation, 2012
4 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
7 * Copyright (C) 2019 Red Hat, Inc.
8 * Author: Giuseppe Scrivano <gscrivan@redhat.com>
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of version 2.1 of the GNU Lesser General Public License
12 * as published by the Free Software Foundation.
14 * This program is distributed in the hope that it would be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
20 #include <linux/cgroup.h>
21 #include <linux/page_counter.h>
22 #include <linux/slab.h>
23 #include <linux/hugetlb.h>
24 #include <linux/hugetlb_cgroup.h>
26 #define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
27 #define MEMFILE_IDX(val) (((val) >> 16) & 0xffff)
28 #define MEMFILE_ATTR(val) ((val) & 0xffff)
30 #define hugetlb_cgroup_from_counter(counter, idx) \
31 container_of(counter, struct hugetlb_cgroup, hugepage[idx])
33 static struct hugetlb_cgroup *root_h_cgroup __read_mostly;
35 static inline struct page_counter *
36 __hugetlb_cgroup_counter_from_cgroup(struct hugetlb_cgroup *h_cg, int idx,
40 return &h_cg->rsvd_hugepage[idx];
41 return &h_cg->hugepage[idx];
44 static inline struct page_counter *
45 hugetlb_cgroup_counter_from_cgroup(struct hugetlb_cgroup *h_cg, int idx)
47 return __hugetlb_cgroup_counter_from_cgroup(h_cg, idx, false);
50 static inline struct page_counter *
51 hugetlb_cgroup_counter_from_cgroup_rsvd(struct hugetlb_cgroup *h_cg, int idx)
53 return __hugetlb_cgroup_counter_from_cgroup(h_cg, idx, true);
57 struct hugetlb_cgroup *hugetlb_cgroup_from_css(struct cgroup_subsys_state *s)
59 return s ? container_of(s, struct hugetlb_cgroup, css) : NULL;
63 struct hugetlb_cgroup *hugetlb_cgroup_from_task(struct task_struct *task)
65 return hugetlb_cgroup_from_css(task_css(task, hugetlb_cgrp_id));
68 static inline bool hugetlb_cgroup_is_root(struct hugetlb_cgroup *h_cg)
70 return (h_cg == root_h_cgroup);
73 static inline struct hugetlb_cgroup *
74 parent_hugetlb_cgroup(struct hugetlb_cgroup *h_cg)
76 return hugetlb_cgroup_from_css(h_cg->css.parent);
79 static inline bool hugetlb_cgroup_have_usage(struct hugetlb_cgroup *h_cg)
83 for (idx = 0; idx < hugetlb_max_hstate; idx++) {
84 if (page_counter_read(
85 hugetlb_cgroup_counter_from_cgroup(h_cg, idx)) ||
86 page_counter_read(hugetlb_cgroup_counter_from_cgroup_rsvd(
94 static void hugetlb_cgroup_init(struct hugetlb_cgroup *h_cgroup,
95 struct hugetlb_cgroup *parent_h_cgroup)
99 for (idx = 0; idx < HUGE_MAX_HSTATE; idx++) {
100 struct page_counter *fault_parent = NULL;
101 struct page_counter *rsvd_parent = NULL;
105 if (parent_h_cgroup) {
106 fault_parent = hugetlb_cgroup_counter_from_cgroup(
107 parent_h_cgroup, idx);
108 rsvd_parent = hugetlb_cgroup_counter_from_cgroup_rsvd(
109 parent_h_cgroup, idx);
111 page_counter_init(hugetlb_cgroup_counter_from_cgroup(h_cgroup,
115 hugetlb_cgroup_counter_from_cgroup_rsvd(h_cgroup, idx),
118 limit = round_down(PAGE_COUNTER_MAX,
119 1 << huge_page_order(&hstates[idx]));
121 ret = page_counter_set_max(
122 hugetlb_cgroup_counter_from_cgroup(h_cgroup, idx),
125 ret = page_counter_set_max(
126 hugetlb_cgroup_counter_from_cgroup_rsvd(h_cgroup, idx),
132 static struct cgroup_subsys_state *
133 hugetlb_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
135 struct hugetlb_cgroup *parent_h_cgroup = hugetlb_cgroup_from_css(parent_css);
136 struct hugetlb_cgroup *h_cgroup;
138 h_cgroup = kzalloc(sizeof(*h_cgroup), GFP_KERNEL);
140 return ERR_PTR(-ENOMEM);
142 if (!parent_h_cgroup)
143 root_h_cgroup = h_cgroup;
145 hugetlb_cgroup_init(h_cgroup, parent_h_cgroup);
146 return &h_cgroup->css;
149 static void hugetlb_cgroup_css_free(struct cgroup_subsys_state *css)
151 struct hugetlb_cgroup *h_cgroup;
153 h_cgroup = hugetlb_cgroup_from_css(css);
158 * Should be called with hugetlb_lock held.
159 * Since we are holding hugetlb_lock, pages cannot get moved from
160 * active list or uncharged from the cgroup, So no need to get
161 * page reference and test for page active here. This function
164 static void hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg,
167 unsigned int nr_pages;
168 struct page_counter *counter;
169 struct hugetlb_cgroup *page_hcg;
170 struct hugetlb_cgroup *parent = parent_hugetlb_cgroup(h_cg);
172 page_hcg = hugetlb_cgroup_from_page(page);
174 * We can have pages in active list without any cgroup
175 * ie, hugepage with less than 3 pages. We can safely
176 * ignore those pages.
178 if (!page_hcg || page_hcg != h_cg)
181 nr_pages = compound_nr(page);
183 parent = root_h_cgroup;
184 /* root has no limit */
185 page_counter_charge(&parent->hugepage[idx], nr_pages);
187 counter = &h_cg->hugepage[idx];
188 /* Take the pages off the local counter */
189 page_counter_cancel(counter, nr_pages);
191 set_hugetlb_cgroup(page, parent);
197 * Force the hugetlb cgroup to empty the hugetlb resources by moving them to
200 static void hugetlb_cgroup_css_offline(struct cgroup_subsys_state *css)
202 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
209 spin_lock(&hugetlb_lock);
210 list_for_each_entry(page, &h->hugepage_activelist, lru)
211 hugetlb_cgroup_move_parent(idx, h_cg, page);
213 spin_unlock(&hugetlb_lock);
217 } while (hugetlb_cgroup_have_usage(h_cg));
220 static inline void hugetlb_event(struct hugetlb_cgroup *hugetlb, int idx,
221 enum hugetlb_memory_event event)
223 atomic_long_inc(&hugetlb->events_local[idx][event]);
224 cgroup_file_notify(&hugetlb->events_local_file[idx]);
227 atomic_long_inc(&hugetlb->events[idx][event]);
228 cgroup_file_notify(&hugetlb->events_file[idx]);
229 } while ((hugetlb = parent_hugetlb_cgroup(hugetlb)) &&
230 !hugetlb_cgroup_is_root(hugetlb));
233 static int __hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
234 struct hugetlb_cgroup **ptr,
238 struct page_counter *counter;
239 struct hugetlb_cgroup *h_cg = NULL;
241 if (hugetlb_cgroup_disabled())
244 * We don't charge any cgroup if the compound page have less
247 if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
251 h_cg = hugetlb_cgroup_from_task(current);
252 if (!css_tryget(&h_cg->css)) {
258 if (!page_counter_try_charge(
259 __hugetlb_cgroup_counter_from_cgroup(h_cg, idx, rsvd),
260 nr_pages, &counter)) {
262 hugetlb_event(h_cg, idx, HUGETLB_MAX);
266 /* Reservations take a reference to the css because they do not get
276 int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
277 struct hugetlb_cgroup **ptr)
279 return __hugetlb_cgroup_charge_cgroup(idx, nr_pages, ptr, false);
282 int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages,
283 struct hugetlb_cgroup **ptr)
285 return __hugetlb_cgroup_charge_cgroup(idx, nr_pages, ptr, true);
288 /* Should be called with hugetlb_lock held */
289 static void __hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
290 struct hugetlb_cgroup *h_cg,
291 struct page *page, bool rsvd)
293 if (hugetlb_cgroup_disabled() || !h_cg)
296 __set_hugetlb_cgroup(page, h_cg, rsvd);
300 void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
301 struct hugetlb_cgroup *h_cg,
304 __hugetlb_cgroup_commit_charge(idx, nr_pages, h_cg, page, false);
307 void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
308 struct hugetlb_cgroup *h_cg,
311 __hugetlb_cgroup_commit_charge(idx, nr_pages, h_cg, page, true);
315 * Should be called with hugetlb_lock held
317 static void __hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
318 struct page *page, bool rsvd)
320 struct hugetlb_cgroup *h_cg;
322 if (hugetlb_cgroup_disabled())
324 lockdep_assert_held(&hugetlb_lock);
325 h_cg = __hugetlb_cgroup_from_page(page, rsvd);
328 __set_hugetlb_cgroup(page, NULL, rsvd);
330 page_counter_uncharge(__hugetlb_cgroup_counter_from_cgroup(h_cg, idx,
340 void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
343 __hugetlb_cgroup_uncharge_page(idx, nr_pages, page, false);
346 void hugetlb_cgroup_uncharge_page_rsvd(int idx, unsigned long nr_pages,
349 __hugetlb_cgroup_uncharge_page(idx, nr_pages, page, true);
352 static void __hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
353 struct hugetlb_cgroup *h_cg,
356 if (hugetlb_cgroup_disabled() || !h_cg)
359 if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
362 page_counter_uncharge(__hugetlb_cgroup_counter_from_cgroup(h_cg, idx,
370 void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
371 struct hugetlb_cgroup *h_cg)
373 __hugetlb_cgroup_uncharge_cgroup(idx, nr_pages, h_cg, false);
376 void hugetlb_cgroup_uncharge_cgroup_rsvd(int idx, unsigned long nr_pages,
377 struct hugetlb_cgroup *h_cg)
379 __hugetlb_cgroup_uncharge_cgroup(idx, nr_pages, h_cg, true);
382 void hugetlb_cgroup_uncharge_counter(struct resv_map *resv, unsigned long start,
385 if (hugetlb_cgroup_disabled() || !resv || !resv->reservation_counter ||
389 page_counter_uncharge(resv->reservation_counter,
390 (end - start) * resv->pages_per_hpage);
394 void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
395 struct file_region *rg,
396 unsigned long nr_pages)
398 if (hugetlb_cgroup_disabled() || !resv || !rg || !nr_pages)
401 if (rg->reservation_counter && resv->pages_per_hpage && nr_pages > 0 &&
402 !resv->reservation_counter) {
403 page_counter_uncharge(rg->reservation_counter,
404 nr_pages * resv->pages_per_hpage);
420 static u64 hugetlb_cgroup_read_u64(struct cgroup_subsys_state *css,
423 struct page_counter *counter;
424 struct page_counter *rsvd_counter;
425 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
427 counter = &h_cg->hugepage[MEMFILE_IDX(cft->private)];
428 rsvd_counter = &h_cg->rsvd_hugepage[MEMFILE_IDX(cft->private)];
430 switch (MEMFILE_ATTR(cft->private)) {
432 return (u64)page_counter_read(counter) * PAGE_SIZE;
434 return (u64)page_counter_read(rsvd_counter) * PAGE_SIZE;
436 return (u64)counter->max * PAGE_SIZE;
438 return (u64)rsvd_counter->max * PAGE_SIZE;
440 return (u64)counter->watermark * PAGE_SIZE;
441 case RES_RSVD_MAX_USAGE:
442 return (u64)rsvd_counter->watermark * PAGE_SIZE;
444 return counter->failcnt;
445 case RES_RSVD_FAILCNT:
446 return rsvd_counter->failcnt;
452 static int hugetlb_cgroup_read_u64_max(struct seq_file *seq, void *v)
456 struct cftype *cft = seq_cft(seq);
458 struct page_counter *counter;
459 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(seq_css(seq));
461 idx = MEMFILE_IDX(cft->private);
462 counter = &h_cg->hugepage[idx];
464 limit = round_down(PAGE_COUNTER_MAX,
465 1 << huge_page_order(&hstates[idx]));
467 switch (MEMFILE_ATTR(cft->private)) {
469 counter = &h_cg->rsvd_hugepage[idx];
472 val = (u64)page_counter_read(counter);
473 seq_printf(seq, "%llu\n", val * PAGE_SIZE);
476 counter = &h_cg->rsvd_hugepage[idx];
479 val = (u64)counter->max;
481 seq_puts(seq, "max\n");
483 seq_printf(seq, "%llu\n", val * PAGE_SIZE);
492 static DEFINE_MUTEX(hugetlb_limit_mutex);
494 static ssize_t hugetlb_cgroup_write(struct kernfs_open_file *of,
495 char *buf, size_t nbytes, loff_t off,
499 unsigned long nr_pages;
500 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(of_css(of));
503 if (hugetlb_cgroup_is_root(h_cg)) /* Can't set limit on root */
507 ret = page_counter_memparse(buf, max, &nr_pages);
511 idx = MEMFILE_IDX(of_cft(of)->private);
512 nr_pages = round_down(nr_pages, 1 << huge_page_order(&hstates[idx]));
514 switch (MEMFILE_ATTR(of_cft(of)->private)) {
519 mutex_lock(&hugetlb_limit_mutex);
520 ret = page_counter_set_max(
521 __hugetlb_cgroup_counter_from_cgroup(h_cg, idx, rsvd),
523 mutex_unlock(&hugetlb_limit_mutex);
529 return ret ?: nbytes;
532 static ssize_t hugetlb_cgroup_write_legacy(struct kernfs_open_file *of,
533 char *buf, size_t nbytes, loff_t off)
535 return hugetlb_cgroup_write(of, buf, nbytes, off, "-1");
538 static ssize_t hugetlb_cgroup_write_dfl(struct kernfs_open_file *of,
539 char *buf, size_t nbytes, loff_t off)
541 return hugetlb_cgroup_write(of, buf, nbytes, off, "max");
544 static ssize_t hugetlb_cgroup_reset(struct kernfs_open_file *of,
545 char *buf, size_t nbytes, loff_t off)
548 struct page_counter *counter, *rsvd_counter;
549 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(of_css(of));
551 counter = &h_cg->hugepage[MEMFILE_IDX(of_cft(of)->private)];
552 rsvd_counter = &h_cg->rsvd_hugepage[MEMFILE_IDX(of_cft(of)->private)];
554 switch (MEMFILE_ATTR(of_cft(of)->private)) {
556 page_counter_reset_watermark(counter);
558 case RES_RSVD_MAX_USAGE:
559 page_counter_reset_watermark(rsvd_counter);
562 counter->failcnt = 0;
564 case RES_RSVD_FAILCNT:
565 rsvd_counter->failcnt = 0;
571 return ret ?: nbytes;
574 static char *mem_fmt(char *buf, int size, unsigned long hsize)
576 if (hsize >= (1UL << 30))
577 snprintf(buf, size, "%luGB", hsize >> 30);
578 else if (hsize >= (1UL << 20))
579 snprintf(buf, size, "%luMB", hsize >> 20);
581 snprintf(buf, size, "%luKB", hsize >> 10);
585 static int __hugetlb_events_show(struct seq_file *seq, bool local)
589 struct cftype *cft = seq_cft(seq);
590 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(seq_css(seq));
592 idx = MEMFILE_IDX(cft->private);
595 max = atomic_long_read(&h_cg->events_local[idx][HUGETLB_MAX]);
597 max = atomic_long_read(&h_cg->events[idx][HUGETLB_MAX]);
599 seq_printf(seq, "max %lu\n", max);
604 static int hugetlb_events_show(struct seq_file *seq, void *v)
606 return __hugetlb_events_show(seq, false);
609 static int hugetlb_events_local_show(struct seq_file *seq, void *v)
611 return __hugetlb_events_show(seq, true);
614 static void __init __hugetlb_cgroup_file_dfl_init(int idx)
618 struct hstate *h = &hstates[idx];
620 /* format the size */
621 mem_fmt(buf, sizeof(buf), huge_page_size(h));
623 /* Add the limit file */
624 cft = &h->cgroup_files_dfl[0];
625 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.max", buf);
626 cft->private = MEMFILE_PRIVATE(idx, RES_LIMIT);
627 cft->seq_show = hugetlb_cgroup_read_u64_max;
628 cft->write = hugetlb_cgroup_write_dfl;
629 cft->flags = CFTYPE_NOT_ON_ROOT;
631 /* Add the reservation limit file */
632 cft = &h->cgroup_files_dfl[1];
633 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.max", buf);
634 cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_LIMIT);
635 cft->seq_show = hugetlb_cgroup_read_u64_max;
636 cft->write = hugetlb_cgroup_write_dfl;
637 cft->flags = CFTYPE_NOT_ON_ROOT;
639 /* Add the current usage file */
640 cft = &h->cgroup_files_dfl[2];
641 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.current", buf);
642 cft->private = MEMFILE_PRIVATE(idx, RES_USAGE);
643 cft->seq_show = hugetlb_cgroup_read_u64_max;
644 cft->flags = CFTYPE_NOT_ON_ROOT;
646 /* Add the current reservation usage file */
647 cft = &h->cgroup_files_dfl[3];
648 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.current", buf);
649 cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_USAGE);
650 cft->seq_show = hugetlb_cgroup_read_u64_max;
651 cft->flags = CFTYPE_NOT_ON_ROOT;
653 /* Add the events file */
654 cft = &h->cgroup_files_dfl[4];
655 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.events", buf);
656 cft->private = MEMFILE_PRIVATE(idx, 0);
657 cft->seq_show = hugetlb_events_show;
658 cft->file_offset = offsetof(struct hugetlb_cgroup, events_file[idx]);
659 cft->flags = CFTYPE_NOT_ON_ROOT;
661 /* Add the events.local file */
662 cft = &h->cgroup_files_dfl[5];
663 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.events.local", buf);
664 cft->private = MEMFILE_PRIVATE(idx, 0);
665 cft->seq_show = hugetlb_events_local_show;
666 cft->file_offset = offsetof(struct hugetlb_cgroup,
667 events_local_file[idx]);
668 cft->flags = CFTYPE_NOT_ON_ROOT;
670 /* NULL terminate the last cft */
671 cft = &h->cgroup_files_dfl[6];
672 memset(cft, 0, sizeof(*cft));
674 WARN_ON(cgroup_add_dfl_cftypes(&hugetlb_cgrp_subsys,
675 h->cgroup_files_dfl));
678 static void __init __hugetlb_cgroup_file_legacy_init(int idx)
682 struct hstate *h = &hstates[idx];
684 /* format the size */
685 mem_fmt(buf, sizeof(buf), huge_page_size(h));
687 /* Add the limit file */
688 cft = &h->cgroup_files_legacy[0];
689 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.limit_in_bytes", buf);
690 cft->private = MEMFILE_PRIVATE(idx, RES_LIMIT);
691 cft->read_u64 = hugetlb_cgroup_read_u64;
692 cft->write = hugetlb_cgroup_write_legacy;
694 /* Add the reservation limit file */
695 cft = &h->cgroup_files_legacy[1];
696 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.limit_in_bytes", buf);
697 cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_LIMIT);
698 cft->read_u64 = hugetlb_cgroup_read_u64;
699 cft->write = hugetlb_cgroup_write_legacy;
701 /* Add the usage file */
702 cft = &h->cgroup_files_legacy[2];
703 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.usage_in_bytes", buf);
704 cft->private = MEMFILE_PRIVATE(idx, RES_USAGE);
705 cft->read_u64 = hugetlb_cgroup_read_u64;
707 /* Add the reservation usage file */
708 cft = &h->cgroup_files_legacy[3];
709 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.usage_in_bytes", buf);
710 cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_USAGE);
711 cft->read_u64 = hugetlb_cgroup_read_u64;
713 /* Add the MAX usage file */
714 cft = &h->cgroup_files_legacy[4];
715 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.max_usage_in_bytes", buf);
716 cft->private = MEMFILE_PRIVATE(idx, RES_MAX_USAGE);
717 cft->write = hugetlb_cgroup_reset;
718 cft->read_u64 = hugetlb_cgroup_read_u64;
720 /* Add the MAX reservation usage file */
721 cft = &h->cgroup_files_legacy[5];
722 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.max_usage_in_bytes", buf);
723 cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_MAX_USAGE);
724 cft->write = hugetlb_cgroup_reset;
725 cft->read_u64 = hugetlb_cgroup_read_u64;
727 /* Add the failcntfile */
728 cft = &h->cgroup_files_legacy[6];
729 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.failcnt", buf);
730 cft->private = MEMFILE_PRIVATE(idx, RES_FAILCNT);
731 cft->write = hugetlb_cgroup_reset;
732 cft->read_u64 = hugetlb_cgroup_read_u64;
734 /* Add the reservation failcntfile */
735 cft = &h->cgroup_files_legacy[7];
736 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.failcnt", buf);
737 cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_FAILCNT);
738 cft->write = hugetlb_cgroup_reset;
739 cft->read_u64 = hugetlb_cgroup_read_u64;
741 /* NULL terminate the last cft */
742 cft = &h->cgroup_files_legacy[8];
743 memset(cft, 0, sizeof(*cft));
745 WARN_ON(cgroup_add_legacy_cftypes(&hugetlb_cgrp_subsys,
746 h->cgroup_files_legacy));
749 static void __init __hugetlb_cgroup_file_init(int idx)
751 __hugetlb_cgroup_file_dfl_init(idx);
752 __hugetlb_cgroup_file_legacy_init(idx);
755 void __init hugetlb_cgroup_file_init(void)
761 * Add cgroup control files only if the huge page consists
762 * of more than two normal pages. This is because we use
763 * page[2].private for storing cgroup details.
765 if (huge_page_order(h) >= HUGETLB_CGROUP_MIN_ORDER)
766 __hugetlb_cgroup_file_init(hstate_index(h));
771 * hugetlb_lock will make sure a parallel cgroup rmdir won't happen
772 * when we migrate hugepages
774 void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
776 struct hugetlb_cgroup *h_cg;
777 struct hugetlb_cgroup *h_cg_rsvd;
778 struct hstate *h = page_hstate(oldhpage);
780 if (hugetlb_cgroup_disabled())
783 VM_BUG_ON_PAGE(!PageHuge(oldhpage), oldhpage);
784 spin_lock(&hugetlb_lock);
785 h_cg = hugetlb_cgroup_from_page(oldhpage);
786 h_cg_rsvd = hugetlb_cgroup_from_page_rsvd(oldhpage);
787 set_hugetlb_cgroup(oldhpage, NULL);
788 set_hugetlb_cgroup_rsvd(oldhpage, NULL);
790 /* move the h_cg details to new cgroup */
791 set_hugetlb_cgroup(newhpage, h_cg);
792 set_hugetlb_cgroup_rsvd(newhpage, h_cg_rsvd);
793 list_move(&newhpage->lru, &h->hugepage_activelist);
794 spin_unlock(&hugetlb_lock);
798 static struct cftype hugetlb_files[] = {
802 struct cgroup_subsys hugetlb_cgrp_subsys = {
803 .css_alloc = hugetlb_cgroup_css_alloc,
804 .css_offline = hugetlb_cgroup_css_offline,
805 .css_free = hugetlb_cgroup_css_free,
806 .dfl_cftypes = hugetlb_files,
807 .legacy_cftypes = hugetlb_files,