3 * Copyright IBM Corporation, 2012
4 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
7 * Copyright (C) 2019 Red Hat, Inc.
8 * Author: Giuseppe Scrivano <gscrivan@redhat.com>
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of version 2.1 of the GNU Lesser General Public License
12 * as published by the Free Software Foundation.
14 * This program is distributed in the hope that it would be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
20 #include <linux/cgroup.h>
21 #include <linux/page_counter.h>
22 #include <linux/slab.h>
23 #include <linux/hugetlb.h>
24 #include <linux/hugetlb_cgroup.h>
26 #define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
27 #define MEMFILE_IDX(val) (((val) >> 16) & 0xffff)
28 #define MEMFILE_ATTR(val) ((val) & 0xffff)
30 static struct hugetlb_cgroup *root_h_cgroup __read_mostly;
32 static inline struct page_counter *
33 __hugetlb_cgroup_counter_from_cgroup(struct hugetlb_cgroup *h_cg, int idx,
37 return &h_cg->rsvd_hugepage[idx];
38 return &h_cg->hugepage[idx];
41 static inline struct page_counter *
42 hugetlb_cgroup_counter_from_cgroup(struct hugetlb_cgroup *h_cg, int idx)
44 return __hugetlb_cgroup_counter_from_cgroup(h_cg, idx, false);
47 static inline struct page_counter *
48 hugetlb_cgroup_counter_from_cgroup_rsvd(struct hugetlb_cgroup *h_cg, int idx)
50 return __hugetlb_cgroup_counter_from_cgroup(h_cg, idx, true);
54 struct hugetlb_cgroup *hugetlb_cgroup_from_css(struct cgroup_subsys_state *s)
56 return s ? container_of(s, struct hugetlb_cgroup, css) : NULL;
60 struct hugetlb_cgroup *hugetlb_cgroup_from_task(struct task_struct *task)
62 return hugetlb_cgroup_from_css(task_css(task, hugetlb_cgrp_id));
65 static inline bool hugetlb_cgroup_is_root(struct hugetlb_cgroup *h_cg)
67 return (h_cg == root_h_cgroup);
70 static inline struct hugetlb_cgroup *
71 parent_hugetlb_cgroup(struct hugetlb_cgroup *h_cg)
73 return hugetlb_cgroup_from_css(h_cg->css.parent);
76 static inline bool hugetlb_cgroup_have_usage(struct hugetlb_cgroup *h_cg)
81 if (page_counter_read(
82 hugetlb_cgroup_counter_from_cgroup(h_cg, hstate_index(h))))
88 static void hugetlb_cgroup_init(struct hugetlb_cgroup *h_cgroup,
89 struct hugetlb_cgroup *parent_h_cgroup)
93 for (idx = 0; idx < HUGE_MAX_HSTATE; idx++) {
94 struct page_counter *fault_parent = NULL;
95 struct page_counter *rsvd_parent = NULL;
99 if (parent_h_cgroup) {
100 fault_parent = hugetlb_cgroup_counter_from_cgroup(
101 parent_h_cgroup, idx);
102 rsvd_parent = hugetlb_cgroup_counter_from_cgroup_rsvd(
103 parent_h_cgroup, idx);
105 page_counter_init(hugetlb_cgroup_counter_from_cgroup(h_cgroup,
109 hugetlb_cgroup_counter_from_cgroup_rsvd(h_cgroup, idx),
112 limit = round_down(PAGE_COUNTER_MAX,
113 pages_per_huge_page(&hstates[idx]));
115 ret = page_counter_set_max(
116 hugetlb_cgroup_counter_from_cgroup(h_cgroup, idx),
119 ret = page_counter_set_max(
120 hugetlb_cgroup_counter_from_cgroup_rsvd(h_cgroup, idx),
126 static void hugetlb_cgroup_free(struct hugetlb_cgroup *h_cgroup)
131 kfree(h_cgroup->nodeinfo[node]);
135 static struct cgroup_subsys_state *
136 hugetlb_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
138 struct hugetlb_cgroup *parent_h_cgroup = hugetlb_cgroup_from_css(parent_css);
139 struct hugetlb_cgroup *h_cgroup;
142 h_cgroup = kzalloc(struct_size(h_cgroup, nodeinfo, nr_node_ids),
146 return ERR_PTR(-ENOMEM);
148 if (!parent_h_cgroup)
149 root_h_cgroup = h_cgroup;
152 * TODO: this routine can waste much memory for nodes which will
153 * never be onlined. It's better to use memory hotplug callback
156 for_each_node(node) {
157 /* Set node_to_alloc to NUMA_NO_NODE for offline nodes. */
159 node_state(node, N_NORMAL_MEMORY) ? node : NUMA_NO_NODE;
160 h_cgroup->nodeinfo[node] =
161 kzalloc_node(sizeof(struct hugetlb_cgroup_per_node),
162 GFP_KERNEL, node_to_alloc);
163 if (!h_cgroup->nodeinfo[node])
164 goto fail_alloc_nodeinfo;
167 hugetlb_cgroup_init(h_cgroup, parent_h_cgroup);
168 return &h_cgroup->css;
171 hugetlb_cgroup_free(h_cgroup);
172 return ERR_PTR(-ENOMEM);
175 static void hugetlb_cgroup_css_free(struct cgroup_subsys_state *css)
177 hugetlb_cgroup_free(hugetlb_cgroup_from_css(css));
181 * Should be called with hugetlb_lock held.
182 * Since we are holding hugetlb_lock, pages cannot get moved from
183 * active list or uncharged from the cgroup, So no need to get
184 * page reference and test for page active here. This function
187 static void hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg,
190 unsigned int nr_pages;
191 struct page_counter *counter;
192 struct hugetlb_cgroup *page_hcg;
193 struct hugetlb_cgroup *parent = parent_hugetlb_cgroup(h_cg);
194 struct folio *folio = page_folio(page);
196 page_hcg = hugetlb_cgroup_from_folio(folio);
198 * We can have pages in active list without any cgroup
199 * ie, hugepage with less than 3 pages. We can safely
200 * ignore those pages.
202 if (!page_hcg || page_hcg != h_cg)
205 nr_pages = compound_nr(page);
207 parent = root_h_cgroup;
208 /* root has no limit */
209 page_counter_charge(&parent->hugepage[idx], nr_pages);
211 counter = &h_cg->hugepage[idx];
212 /* Take the pages off the local counter */
213 page_counter_cancel(counter, nr_pages);
215 set_hugetlb_cgroup(page, parent);
221 * Force the hugetlb cgroup to empty the hugetlb resources by moving them to
224 static void hugetlb_cgroup_css_offline(struct cgroup_subsys_state *css)
226 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
232 spin_lock_irq(&hugetlb_lock);
233 list_for_each_entry(page, &h->hugepage_activelist, lru)
234 hugetlb_cgroup_move_parent(hstate_index(h), h_cg, page);
236 spin_unlock_irq(&hugetlb_lock);
239 } while (hugetlb_cgroup_have_usage(h_cg));
242 static inline void hugetlb_event(struct hugetlb_cgroup *hugetlb, int idx,
243 enum hugetlb_memory_event event)
245 atomic_long_inc(&hugetlb->events_local[idx][event]);
246 cgroup_file_notify(&hugetlb->events_local_file[idx]);
249 atomic_long_inc(&hugetlb->events[idx][event]);
250 cgroup_file_notify(&hugetlb->events_file[idx]);
251 } while ((hugetlb = parent_hugetlb_cgroup(hugetlb)) &&
252 !hugetlb_cgroup_is_root(hugetlb));
255 static int __hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
256 struct hugetlb_cgroup **ptr,
260 struct page_counter *counter;
261 struct hugetlb_cgroup *h_cg = NULL;
263 if (hugetlb_cgroup_disabled())
266 * We don't charge any cgroup if the compound page have less
269 if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
273 h_cg = hugetlb_cgroup_from_task(current);
274 if (!css_tryget(&h_cg->css)) {
280 if (!page_counter_try_charge(
281 __hugetlb_cgroup_counter_from_cgroup(h_cg, idx, rsvd),
282 nr_pages, &counter)) {
284 hugetlb_event(h_cg, idx, HUGETLB_MAX);
288 /* Reservations take a reference to the css because they do not get
298 int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
299 struct hugetlb_cgroup **ptr)
301 return __hugetlb_cgroup_charge_cgroup(idx, nr_pages, ptr, false);
304 int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages,
305 struct hugetlb_cgroup **ptr)
307 return __hugetlb_cgroup_charge_cgroup(idx, nr_pages, ptr, true);
310 /* Should be called with hugetlb_lock held */
311 static void __hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
312 struct hugetlb_cgroup *h_cg,
313 struct page *page, bool rsvd)
315 if (hugetlb_cgroup_disabled() || !h_cg)
318 __set_hugetlb_cgroup(page_folio(page), h_cg, rsvd);
320 unsigned long usage =
321 h_cg->nodeinfo[page_to_nid(page)]->usage[idx];
323 * This write is not atomic due to fetching usage and writing
324 * to it, but that's fine because we call this with
325 * hugetlb_lock held anyway.
327 WRITE_ONCE(h_cg->nodeinfo[page_to_nid(page)]->usage[idx],
332 void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
333 struct hugetlb_cgroup *h_cg,
336 __hugetlb_cgroup_commit_charge(idx, nr_pages, h_cg, page, false);
339 void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
340 struct hugetlb_cgroup *h_cg,
343 __hugetlb_cgroup_commit_charge(idx, nr_pages, h_cg, page, true);
347 * Should be called with hugetlb_lock held
349 static void __hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
350 struct page *page, bool rsvd)
352 struct hugetlb_cgroup *h_cg;
353 struct folio *folio = page_folio(page);
355 if (hugetlb_cgroup_disabled())
357 lockdep_assert_held(&hugetlb_lock);
358 h_cg = __hugetlb_cgroup_from_folio(folio, rsvd);
361 __set_hugetlb_cgroup(folio, NULL, rsvd);
363 page_counter_uncharge(__hugetlb_cgroup_counter_from_cgroup(h_cg, idx,
370 unsigned long usage =
371 h_cg->nodeinfo[page_to_nid(page)]->usage[idx];
373 * This write is not atomic due to fetching usage and writing
374 * to it, but that's fine because we call this with
375 * hugetlb_lock held anyway.
377 WRITE_ONCE(h_cg->nodeinfo[page_to_nid(page)]->usage[idx],
382 void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
385 __hugetlb_cgroup_uncharge_page(idx, nr_pages, page, false);
388 void hugetlb_cgroup_uncharge_page_rsvd(int idx, unsigned long nr_pages,
391 __hugetlb_cgroup_uncharge_page(idx, nr_pages, page, true);
394 static void __hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
395 struct hugetlb_cgroup *h_cg,
398 if (hugetlb_cgroup_disabled() || !h_cg)
401 if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
404 page_counter_uncharge(__hugetlb_cgroup_counter_from_cgroup(h_cg, idx,
412 void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
413 struct hugetlb_cgroup *h_cg)
415 __hugetlb_cgroup_uncharge_cgroup(idx, nr_pages, h_cg, false);
418 void hugetlb_cgroup_uncharge_cgroup_rsvd(int idx, unsigned long nr_pages,
419 struct hugetlb_cgroup *h_cg)
421 __hugetlb_cgroup_uncharge_cgroup(idx, nr_pages, h_cg, true);
424 void hugetlb_cgroup_uncharge_counter(struct resv_map *resv, unsigned long start,
427 if (hugetlb_cgroup_disabled() || !resv || !resv->reservation_counter ||
431 page_counter_uncharge(resv->reservation_counter,
432 (end - start) * resv->pages_per_hpage);
436 void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
437 struct file_region *rg,
438 unsigned long nr_pages,
441 if (hugetlb_cgroup_disabled() || !resv || !rg || !nr_pages)
444 if (rg->reservation_counter && resv->pages_per_hpage &&
445 !resv->reservation_counter) {
446 page_counter_uncharge(rg->reservation_counter,
447 nr_pages * resv->pages_per_hpage);
449 * Only do css_put(rg->css) when we delete the entire region
450 * because one file_region must hold exactly one css reference.
468 static int hugetlb_cgroup_read_numa_stat(struct seq_file *seq, void *dummy)
471 struct cftype *cft = seq_cft(seq);
472 int idx = MEMFILE_IDX(cft->private);
473 bool legacy = MEMFILE_ATTR(cft->private);
474 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(seq_css(seq));
475 struct cgroup_subsys_state *css;
479 /* Add up usage across all nodes for the non-hierarchical total. */
481 for_each_node_state(nid, N_MEMORY)
482 usage += READ_ONCE(h_cg->nodeinfo[nid]->usage[idx]);
483 seq_printf(seq, "total=%lu", usage * PAGE_SIZE);
485 /* Simply print the per-node usage for the non-hierarchical total. */
486 for_each_node_state(nid, N_MEMORY)
487 seq_printf(seq, " N%d=%lu", nid,
488 READ_ONCE(h_cg->nodeinfo[nid]->usage[idx]) *
494 * The hierarchical total is pretty much the value recorded by the
495 * counter, so use that.
497 seq_printf(seq, "%stotal=%lu", legacy ? "hierarchical_" : "",
498 page_counter_read(&h_cg->hugepage[idx]) * PAGE_SIZE);
501 * For each node, transverse the css tree to obtain the hierarchical
504 for_each_node_state(nid, N_MEMORY) {
507 css_for_each_descendant_pre(css, &h_cg->css) {
508 usage += READ_ONCE(hugetlb_cgroup_from_css(css)
513 seq_printf(seq, " N%d=%lu", nid, usage * PAGE_SIZE);
521 static u64 hugetlb_cgroup_read_u64(struct cgroup_subsys_state *css,
524 struct page_counter *counter;
525 struct page_counter *rsvd_counter;
526 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
528 counter = &h_cg->hugepage[MEMFILE_IDX(cft->private)];
529 rsvd_counter = &h_cg->rsvd_hugepage[MEMFILE_IDX(cft->private)];
531 switch (MEMFILE_ATTR(cft->private)) {
533 return (u64)page_counter_read(counter) * PAGE_SIZE;
535 return (u64)page_counter_read(rsvd_counter) * PAGE_SIZE;
537 return (u64)counter->max * PAGE_SIZE;
539 return (u64)rsvd_counter->max * PAGE_SIZE;
541 return (u64)counter->watermark * PAGE_SIZE;
542 case RES_RSVD_MAX_USAGE:
543 return (u64)rsvd_counter->watermark * PAGE_SIZE;
545 return counter->failcnt;
546 case RES_RSVD_FAILCNT:
547 return rsvd_counter->failcnt;
553 static int hugetlb_cgroup_read_u64_max(struct seq_file *seq, void *v)
557 struct cftype *cft = seq_cft(seq);
559 struct page_counter *counter;
560 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(seq_css(seq));
562 idx = MEMFILE_IDX(cft->private);
563 counter = &h_cg->hugepage[idx];
565 limit = round_down(PAGE_COUNTER_MAX,
566 pages_per_huge_page(&hstates[idx]));
568 switch (MEMFILE_ATTR(cft->private)) {
570 counter = &h_cg->rsvd_hugepage[idx];
573 val = (u64)page_counter_read(counter);
574 seq_printf(seq, "%llu\n", val * PAGE_SIZE);
577 counter = &h_cg->rsvd_hugepage[idx];
580 val = (u64)counter->max;
582 seq_puts(seq, "max\n");
584 seq_printf(seq, "%llu\n", val * PAGE_SIZE);
593 static DEFINE_MUTEX(hugetlb_limit_mutex);
595 static ssize_t hugetlb_cgroup_write(struct kernfs_open_file *of,
596 char *buf, size_t nbytes, loff_t off,
600 unsigned long nr_pages;
601 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(of_css(of));
604 if (hugetlb_cgroup_is_root(h_cg)) /* Can't set limit on root */
608 ret = page_counter_memparse(buf, max, &nr_pages);
612 idx = MEMFILE_IDX(of_cft(of)->private);
613 nr_pages = round_down(nr_pages, pages_per_huge_page(&hstates[idx]));
615 switch (MEMFILE_ATTR(of_cft(of)->private)) {
620 mutex_lock(&hugetlb_limit_mutex);
621 ret = page_counter_set_max(
622 __hugetlb_cgroup_counter_from_cgroup(h_cg, idx, rsvd),
624 mutex_unlock(&hugetlb_limit_mutex);
630 return ret ?: nbytes;
633 static ssize_t hugetlb_cgroup_write_legacy(struct kernfs_open_file *of,
634 char *buf, size_t nbytes, loff_t off)
636 return hugetlb_cgroup_write(of, buf, nbytes, off, "-1");
639 static ssize_t hugetlb_cgroup_write_dfl(struct kernfs_open_file *of,
640 char *buf, size_t nbytes, loff_t off)
642 return hugetlb_cgroup_write(of, buf, nbytes, off, "max");
645 static ssize_t hugetlb_cgroup_reset(struct kernfs_open_file *of,
646 char *buf, size_t nbytes, loff_t off)
649 struct page_counter *counter, *rsvd_counter;
650 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(of_css(of));
652 counter = &h_cg->hugepage[MEMFILE_IDX(of_cft(of)->private)];
653 rsvd_counter = &h_cg->rsvd_hugepage[MEMFILE_IDX(of_cft(of)->private)];
655 switch (MEMFILE_ATTR(of_cft(of)->private)) {
657 page_counter_reset_watermark(counter);
659 case RES_RSVD_MAX_USAGE:
660 page_counter_reset_watermark(rsvd_counter);
663 counter->failcnt = 0;
665 case RES_RSVD_FAILCNT:
666 rsvd_counter->failcnt = 0;
672 return ret ?: nbytes;
675 static char *mem_fmt(char *buf, int size, unsigned long hsize)
678 snprintf(buf, size, "%luGB", hsize / SZ_1G);
679 else if (hsize >= SZ_1M)
680 snprintf(buf, size, "%luMB", hsize / SZ_1M);
682 snprintf(buf, size, "%luKB", hsize / SZ_1K);
686 static int __hugetlb_events_show(struct seq_file *seq, bool local)
690 struct cftype *cft = seq_cft(seq);
691 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(seq_css(seq));
693 idx = MEMFILE_IDX(cft->private);
696 max = atomic_long_read(&h_cg->events_local[idx][HUGETLB_MAX]);
698 max = atomic_long_read(&h_cg->events[idx][HUGETLB_MAX]);
700 seq_printf(seq, "max %lu\n", max);
705 static int hugetlb_events_show(struct seq_file *seq, void *v)
707 return __hugetlb_events_show(seq, false);
710 static int hugetlb_events_local_show(struct seq_file *seq, void *v)
712 return __hugetlb_events_show(seq, true);
715 static void __init __hugetlb_cgroup_file_dfl_init(int idx)
719 struct hstate *h = &hstates[idx];
721 /* format the size */
722 mem_fmt(buf, sizeof(buf), huge_page_size(h));
724 /* Add the limit file */
725 cft = &h->cgroup_files_dfl[0];
726 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.max", buf);
727 cft->private = MEMFILE_PRIVATE(idx, RES_LIMIT);
728 cft->seq_show = hugetlb_cgroup_read_u64_max;
729 cft->write = hugetlb_cgroup_write_dfl;
730 cft->flags = CFTYPE_NOT_ON_ROOT;
732 /* Add the reservation limit file */
733 cft = &h->cgroup_files_dfl[1];
734 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.max", buf);
735 cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_LIMIT);
736 cft->seq_show = hugetlb_cgroup_read_u64_max;
737 cft->write = hugetlb_cgroup_write_dfl;
738 cft->flags = CFTYPE_NOT_ON_ROOT;
740 /* Add the current usage file */
741 cft = &h->cgroup_files_dfl[2];
742 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.current", buf);
743 cft->private = MEMFILE_PRIVATE(idx, RES_USAGE);
744 cft->seq_show = hugetlb_cgroup_read_u64_max;
745 cft->flags = CFTYPE_NOT_ON_ROOT;
747 /* Add the current reservation usage file */
748 cft = &h->cgroup_files_dfl[3];
749 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.current", buf);
750 cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_USAGE);
751 cft->seq_show = hugetlb_cgroup_read_u64_max;
752 cft->flags = CFTYPE_NOT_ON_ROOT;
754 /* Add the events file */
755 cft = &h->cgroup_files_dfl[4];
756 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.events", buf);
757 cft->private = MEMFILE_PRIVATE(idx, 0);
758 cft->seq_show = hugetlb_events_show;
759 cft->file_offset = offsetof(struct hugetlb_cgroup, events_file[idx]);
760 cft->flags = CFTYPE_NOT_ON_ROOT;
762 /* Add the events.local file */
763 cft = &h->cgroup_files_dfl[5];
764 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.events.local", buf);
765 cft->private = MEMFILE_PRIVATE(idx, 0);
766 cft->seq_show = hugetlb_events_local_show;
767 cft->file_offset = offsetof(struct hugetlb_cgroup,
768 events_local_file[idx]);
769 cft->flags = CFTYPE_NOT_ON_ROOT;
771 /* Add the numa stat file */
772 cft = &h->cgroup_files_dfl[6];
773 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.numa_stat", buf);
774 cft->private = MEMFILE_PRIVATE(idx, 0);
775 cft->seq_show = hugetlb_cgroup_read_numa_stat;
776 cft->flags = CFTYPE_NOT_ON_ROOT;
778 /* NULL terminate the last cft */
779 cft = &h->cgroup_files_dfl[7];
780 memset(cft, 0, sizeof(*cft));
782 WARN_ON(cgroup_add_dfl_cftypes(&hugetlb_cgrp_subsys,
783 h->cgroup_files_dfl));
786 static void __init __hugetlb_cgroup_file_legacy_init(int idx)
790 struct hstate *h = &hstates[idx];
792 /* format the size */
793 mem_fmt(buf, sizeof(buf), huge_page_size(h));
795 /* Add the limit file */
796 cft = &h->cgroup_files_legacy[0];
797 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.limit_in_bytes", buf);
798 cft->private = MEMFILE_PRIVATE(idx, RES_LIMIT);
799 cft->read_u64 = hugetlb_cgroup_read_u64;
800 cft->write = hugetlb_cgroup_write_legacy;
802 /* Add the reservation limit file */
803 cft = &h->cgroup_files_legacy[1];
804 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.limit_in_bytes", buf);
805 cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_LIMIT);
806 cft->read_u64 = hugetlb_cgroup_read_u64;
807 cft->write = hugetlb_cgroup_write_legacy;
809 /* Add the usage file */
810 cft = &h->cgroup_files_legacy[2];
811 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.usage_in_bytes", buf);
812 cft->private = MEMFILE_PRIVATE(idx, RES_USAGE);
813 cft->read_u64 = hugetlb_cgroup_read_u64;
815 /* Add the reservation usage file */
816 cft = &h->cgroup_files_legacy[3];
817 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.usage_in_bytes", buf);
818 cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_USAGE);
819 cft->read_u64 = hugetlb_cgroup_read_u64;
821 /* Add the MAX usage file */
822 cft = &h->cgroup_files_legacy[4];
823 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.max_usage_in_bytes", buf);
824 cft->private = MEMFILE_PRIVATE(idx, RES_MAX_USAGE);
825 cft->write = hugetlb_cgroup_reset;
826 cft->read_u64 = hugetlb_cgroup_read_u64;
828 /* Add the MAX reservation usage file */
829 cft = &h->cgroup_files_legacy[5];
830 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.max_usage_in_bytes", buf);
831 cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_MAX_USAGE);
832 cft->write = hugetlb_cgroup_reset;
833 cft->read_u64 = hugetlb_cgroup_read_u64;
835 /* Add the failcntfile */
836 cft = &h->cgroup_files_legacy[6];
837 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.failcnt", buf);
838 cft->private = MEMFILE_PRIVATE(idx, RES_FAILCNT);
839 cft->write = hugetlb_cgroup_reset;
840 cft->read_u64 = hugetlb_cgroup_read_u64;
842 /* Add the reservation failcntfile */
843 cft = &h->cgroup_files_legacy[7];
844 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.failcnt", buf);
845 cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_FAILCNT);
846 cft->write = hugetlb_cgroup_reset;
847 cft->read_u64 = hugetlb_cgroup_read_u64;
849 /* Add the numa stat file */
850 cft = &h->cgroup_files_legacy[8];
851 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.numa_stat", buf);
852 cft->private = MEMFILE_PRIVATE(idx, 1);
853 cft->seq_show = hugetlb_cgroup_read_numa_stat;
855 /* NULL terminate the last cft */
856 cft = &h->cgroup_files_legacy[9];
857 memset(cft, 0, sizeof(*cft));
859 WARN_ON(cgroup_add_legacy_cftypes(&hugetlb_cgrp_subsys,
860 h->cgroup_files_legacy));
863 static void __init __hugetlb_cgroup_file_init(int idx)
865 __hugetlb_cgroup_file_dfl_init(idx);
866 __hugetlb_cgroup_file_legacy_init(idx);
869 void __init hugetlb_cgroup_file_init(void)
875 * Add cgroup control files only if the huge page consists
876 * of more than two normal pages. This is because we use
877 * page[2].private for storing cgroup details.
879 if (huge_page_order(h) >= HUGETLB_CGROUP_MIN_ORDER)
880 __hugetlb_cgroup_file_init(hstate_index(h));
885 * hugetlb_lock will make sure a parallel cgroup rmdir won't happen
886 * when we migrate hugepages
888 void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
890 struct hugetlb_cgroup *h_cg;
891 struct hugetlb_cgroup *h_cg_rsvd;
892 struct hstate *h = page_hstate(oldhpage);
893 struct folio *old_folio = page_folio(oldhpage);
895 if (hugetlb_cgroup_disabled())
898 spin_lock_irq(&hugetlb_lock);
899 h_cg = hugetlb_cgroup_from_folio(old_folio);
900 h_cg_rsvd = hugetlb_cgroup_from_folio_rsvd(old_folio);
901 set_hugetlb_cgroup(oldhpage, NULL);
902 set_hugetlb_cgroup_rsvd(oldhpage, NULL);
904 /* move the h_cg details to new cgroup */
905 set_hugetlb_cgroup(newhpage, h_cg);
906 set_hugetlb_cgroup_rsvd(newhpage, h_cg_rsvd);
907 list_move(&newhpage->lru, &h->hugepage_activelist);
908 spin_unlock_irq(&hugetlb_lock);
912 static struct cftype hugetlb_files[] = {
916 struct cgroup_subsys hugetlb_cgrp_subsys = {
917 .css_alloc = hugetlb_cgroup_css_alloc,
918 .css_offline = hugetlb_cgroup_css_offline,
919 .css_free = hugetlb_cgroup_css_free,
920 .dfl_cftypes = hugetlb_files,
921 .legacy_cftypes = hugetlb_files,