mm/hugetlb_cgroup: convert hugetlb_cgroup_from_page() to folios
[platform/kernel/linux-starfive.git] / mm / hugetlb_cgroup.c
1 /*
2  *
3  * Copyright IBM Corporation, 2012
4  * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
5  *
6  * Cgroup v2
7  * Copyright (C) 2019 Red Hat, Inc.
8  * Author: Giuseppe Scrivano <gscrivan@redhat.com>
9  *
10  * This program is free software; you can redistribute it and/or modify it
11  * under the terms of version 2.1 of the GNU Lesser General Public License
12  * as published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it would be useful, but
15  * WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
17  *
18  */
19
20 #include <linux/cgroup.h>
21 #include <linux/page_counter.h>
22 #include <linux/slab.h>
23 #include <linux/hugetlb.h>
24 #include <linux/hugetlb_cgroup.h>
25
26 #define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
27 #define MEMFILE_IDX(val)        (((val) >> 16) & 0xffff)
28 #define MEMFILE_ATTR(val)       ((val) & 0xffff)
29
30 static struct hugetlb_cgroup *root_h_cgroup __read_mostly;
31
32 static inline struct page_counter *
33 __hugetlb_cgroup_counter_from_cgroup(struct hugetlb_cgroup *h_cg, int idx,
34                                      bool rsvd)
35 {
36         if (rsvd)
37                 return &h_cg->rsvd_hugepage[idx];
38         return &h_cg->hugepage[idx];
39 }
40
41 static inline struct page_counter *
42 hugetlb_cgroup_counter_from_cgroup(struct hugetlb_cgroup *h_cg, int idx)
43 {
44         return __hugetlb_cgroup_counter_from_cgroup(h_cg, idx, false);
45 }
46
47 static inline struct page_counter *
48 hugetlb_cgroup_counter_from_cgroup_rsvd(struct hugetlb_cgroup *h_cg, int idx)
49 {
50         return __hugetlb_cgroup_counter_from_cgroup(h_cg, idx, true);
51 }
52
53 static inline
54 struct hugetlb_cgroup *hugetlb_cgroup_from_css(struct cgroup_subsys_state *s)
55 {
56         return s ? container_of(s, struct hugetlb_cgroup, css) : NULL;
57 }
58
59 static inline
60 struct hugetlb_cgroup *hugetlb_cgroup_from_task(struct task_struct *task)
61 {
62         return hugetlb_cgroup_from_css(task_css(task, hugetlb_cgrp_id));
63 }
64
65 static inline bool hugetlb_cgroup_is_root(struct hugetlb_cgroup *h_cg)
66 {
67         return (h_cg == root_h_cgroup);
68 }
69
70 static inline struct hugetlb_cgroup *
71 parent_hugetlb_cgroup(struct hugetlb_cgroup *h_cg)
72 {
73         return hugetlb_cgroup_from_css(h_cg->css.parent);
74 }
75
76 static inline bool hugetlb_cgroup_have_usage(struct hugetlb_cgroup *h_cg)
77 {
78         struct hstate *h;
79
80         for_each_hstate(h) {
81                 if (page_counter_read(
82                     hugetlb_cgroup_counter_from_cgroup(h_cg, hstate_index(h))))
83                         return true;
84         }
85         return false;
86 }
87
88 static void hugetlb_cgroup_init(struct hugetlb_cgroup *h_cgroup,
89                                 struct hugetlb_cgroup *parent_h_cgroup)
90 {
91         int idx;
92
93         for (idx = 0; idx < HUGE_MAX_HSTATE; idx++) {
94                 struct page_counter *fault_parent = NULL;
95                 struct page_counter *rsvd_parent = NULL;
96                 unsigned long limit;
97                 int ret;
98
99                 if (parent_h_cgroup) {
100                         fault_parent = hugetlb_cgroup_counter_from_cgroup(
101                                 parent_h_cgroup, idx);
102                         rsvd_parent = hugetlb_cgroup_counter_from_cgroup_rsvd(
103                                 parent_h_cgroup, idx);
104                 }
105                 page_counter_init(hugetlb_cgroup_counter_from_cgroup(h_cgroup,
106                                                                      idx),
107                                   fault_parent);
108                 page_counter_init(
109                         hugetlb_cgroup_counter_from_cgroup_rsvd(h_cgroup, idx),
110                         rsvd_parent);
111
112                 limit = round_down(PAGE_COUNTER_MAX,
113                                    pages_per_huge_page(&hstates[idx]));
114
115                 ret = page_counter_set_max(
116                         hugetlb_cgroup_counter_from_cgroup(h_cgroup, idx),
117                         limit);
118                 VM_BUG_ON(ret);
119                 ret = page_counter_set_max(
120                         hugetlb_cgroup_counter_from_cgroup_rsvd(h_cgroup, idx),
121                         limit);
122                 VM_BUG_ON(ret);
123         }
124 }
125
126 static void hugetlb_cgroup_free(struct hugetlb_cgroup *h_cgroup)
127 {
128         int node;
129
130         for_each_node(node)
131                 kfree(h_cgroup->nodeinfo[node]);
132         kfree(h_cgroup);
133 }
134
135 static struct cgroup_subsys_state *
136 hugetlb_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
137 {
138         struct hugetlb_cgroup *parent_h_cgroup = hugetlb_cgroup_from_css(parent_css);
139         struct hugetlb_cgroup *h_cgroup;
140         int node;
141
142         h_cgroup = kzalloc(struct_size(h_cgroup, nodeinfo, nr_node_ids),
143                            GFP_KERNEL);
144
145         if (!h_cgroup)
146                 return ERR_PTR(-ENOMEM);
147
148         if (!parent_h_cgroup)
149                 root_h_cgroup = h_cgroup;
150
151         /*
152          * TODO: this routine can waste much memory for nodes which will
153          * never be onlined. It's better to use memory hotplug callback
154          * function.
155          */
156         for_each_node(node) {
157                 /* Set node_to_alloc to NUMA_NO_NODE for offline nodes. */
158                 int node_to_alloc =
159                         node_state(node, N_NORMAL_MEMORY) ? node : NUMA_NO_NODE;
160                 h_cgroup->nodeinfo[node] =
161                         kzalloc_node(sizeof(struct hugetlb_cgroup_per_node),
162                                      GFP_KERNEL, node_to_alloc);
163                 if (!h_cgroup->nodeinfo[node])
164                         goto fail_alloc_nodeinfo;
165         }
166
167         hugetlb_cgroup_init(h_cgroup, parent_h_cgroup);
168         return &h_cgroup->css;
169
170 fail_alloc_nodeinfo:
171         hugetlb_cgroup_free(h_cgroup);
172         return ERR_PTR(-ENOMEM);
173 }
174
175 static void hugetlb_cgroup_css_free(struct cgroup_subsys_state *css)
176 {
177         hugetlb_cgroup_free(hugetlb_cgroup_from_css(css));
178 }
179
180 /*
181  * Should be called with hugetlb_lock held.
182  * Since we are holding hugetlb_lock, pages cannot get moved from
183  * active list or uncharged from the cgroup, So no need to get
184  * page reference and test for page active here. This function
185  * cannot fail.
186  */
187 static void hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg,
188                                        struct page *page)
189 {
190         unsigned int nr_pages;
191         struct page_counter *counter;
192         struct hugetlb_cgroup *page_hcg;
193         struct hugetlb_cgroup *parent = parent_hugetlb_cgroup(h_cg);
194         struct folio *folio = page_folio(page);
195
196         page_hcg = hugetlb_cgroup_from_folio(folio);
197         /*
198          * We can have pages in active list without any cgroup
199          * ie, hugepage with less than 3 pages. We can safely
200          * ignore those pages.
201          */
202         if (!page_hcg || page_hcg != h_cg)
203                 goto out;
204
205         nr_pages = compound_nr(page);
206         if (!parent) {
207                 parent = root_h_cgroup;
208                 /* root has no limit */
209                 page_counter_charge(&parent->hugepage[idx], nr_pages);
210         }
211         counter = &h_cg->hugepage[idx];
212         /* Take the pages off the local counter */
213         page_counter_cancel(counter, nr_pages);
214
215         set_hugetlb_cgroup(page, parent);
216 out:
217         return;
218 }
219
220 /*
221  * Force the hugetlb cgroup to empty the hugetlb resources by moving them to
222  * the parent cgroup.
223  */
224 static void hugetlb_cgroup_css_offline(struct cgroup_subsys_state *css)
225 {
226         struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
227         struct hstate *h;
228         struct page *page;
229
230         do {
231                 for_each_hstate(h) {
232                         spin_lock_irq(&hugetlb_lock);
233                         list_for_each_entry(page, &h->hugepage_activelist, lru)
234                                 hugetlb_cgroup_move_parent(hstate_index(h), h_cg, page);
235
236                         spin_unlock_irq(&hugetlb_lock);
237                 }
238                 cond_resched();
239         } while (hugetlb_cgroup_have_usage(h_cg));
240 }
241
242 static inline void hugetlb_event(struct hugetlb_cgroup *hugetlb, int idx,
243                                  enum hugetlb_memory_event event)
244 {
245         atomic_long_inc(&hugetlb->events_local[idx][event]);
246         cgroup_file_notify(&hugetlb->events_local_file[idx]);
247
248         do {
249                 atomic_long_inc(&hugetlb->events[idx][event]);
250                 cgroup_file_notify(&hugetlb->events_file[idx]);
251         } while ((hugetlb = parent_hugetlb_cgroup(hugetlb)) &&
252                  !hugetlb_cgroup_is_root(hugetlb));
253 }
254
255 static int __hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
256                                           struct hugetlb_cgroup **ptr,
257                                           bool rsvd)
258 {
259         int ret = 0;
260         struct page_counter *counter;
261         struct hugetlb_cgroup *h_cg = NULL;
262
263         if (hugetlb_cgroup_disabled())
264                 goto done;
265         /*
266          * We don't charge any cgroup if the compound page have less
267          * than 3 pages.
268          */
269         if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
270                 goto done;
271 again:
272         rcu_read_lock();
273         h_cg = hugetlb_cgroup_from_task(current);
274         if (!css_tryget(&h_cg->css)) {
275                 rcu_read_unlock();
276                 goto again;
277         }
278         rcu_read_unlock();
279
280         if (!page_counter_try_charge(
281                     __hugetlb_cgroup_counter_from_cgroup(h_cg, idx, rsvd),
282                     nr_pages, &counter)) {
283                 ret = -ENOMEM;
284                 hugetlb_event(h_cg, idx, HUGETLB_MAX);
285                 css_put(&h_cg->css);
286                 goto done;
287         }
288         /* Reservations take a reference to the css because they do not get
289          * reparented.
290          */
291         if (!rsvd)
292                 css_put(&h_cg->css);
293 done:
294         *ptr = h_cg;
295         return ret;
296 }
297
298 int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
299                                  struct hugetlb_cgroup **ptr)
300 {
301         return __hugetlb_cgroup_charge_cgroup(idx, nr_pages, ptr, false);
302 }
303
304 int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages,
305                                       struct hugetlb_cgroup **ptr)
306 {
307         return __hugetlb_cgroup_charge_cgroup(idx, nr_pages, ptr, true);
308 }
309
310 /* Should be called with hugetlb_lock held */
311 static void __hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
312                                            struct hugetlb_cgroup *h_cg,
313                                            struct page *page, bool rsvd)
314 {
315         if (hugetlb_cgroup_disabled() || !h_cg)
316                 return;
317
318         __set_hugetlb_cgroup(page_folio(page), h_cg, rsvd);
319         if (!rsvd) {
320                 unsigned long usage =
321                         h_cg->nodeinfo[page_to_nid(page)]->usage[idx];
322                 /*
323                  * This write is not atomic due to fetching usage and writing
324                  * to it, but that's fine because we call this with
325                  * hugetlb_lock held anyway.
326                  */
327                 WRITE_ONCE(h_cg->nodeinfo[page_to_nid(page)]->usage[idx],
328                            usage + nr_pages);
329         }
330 }
331
332 void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
333                                   struct hugetlb_cgroup *h_cg,
334                                   struct page *page)
335 {
336         __hugetlb_cgroup_commit_charge(idx, nr_pages, h_cg, page, false);
337 }
338
339 void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
340                                        struct hugetlb_cgroup *h_cg,
341                                        struct page *page)
342 {
343         __hugetlb_cgroup_commit_charge(idx, nr_pages, h_cg, page, true);
344 }
345
346 /*
347  * Should be called with hugetlb_lock held
348  */
349 static void __hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
350                                            struct page *page, bool rsvd)
351 {
352         struct hugetlb_cgroup *h_cg;
353         struct folio *folio = page_folio(page);
354
355         if (hugetlb_cgroup_disabled())
356                 return;
357         lockdep_assert_held(&hugetlb_lock);
358         h_cg = __hugetlb_cgroup_from_folio(folio, rsvd);
359         if (unlikely(!h_cg))
360                 return;
361         __set_hugetlb_cgroup(folio, NULL, rsvd);
362
363         page_counter_uncharge(__hugetlb_cgroup_counter_from_cgroup(h_cg, idx,
364                                                                    rsvd),
365                               nr_pages);
366
367         if (rsvd)
368                 css_put(&h_cg->css);
369         else {
370                 unsigned long usage =
371                         h_cg->nodeinfo[page_to_nid(page)]->usage[idx];
372                 /*
373                  * This write is not atomic due to fetching usage and writing
374                  * to it, but that's fine because we call this with
375                  * hugetlb_lock held anyway.
376                  */
377                 WRITE_ONCE(h_cg->nodeinfo[page_to_nid(page)]->usage[idx],
378                            usage - nr_pages);
379         }
380 }
381
382 void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
383                                   struct page *page)
384 {
385         __hugetlb_cgroup_uncharge_page(idx, nr_pages, page, false);
386 }
387
388 void hugetlb_cgroup_uncharge_page_rsvd(int idx, unsigned long nr_pages,
389                                        struct page *page)
390 {
391         __hugetlb_cgroup_uncharge_page(idx, nr_pages, page, true);
392 }
393
394 static void __hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
395                                              struct hugetlb_cgroup *h_cg,
396                                              bool rsvd)
397 {
398         if (hugetlb_cgroup_disabled() || !h_cg)
399                 return;
400
401         if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
402                 return;
403
404         page_counter_uncharge(__hugetlb_cgroup_counter_from_cgroup(h_cg, idx,
405                                                                    rsvd),
406                               nr_pages);
407
408         if (rsvd)
409                 css_put(&h_cg->css);
410 }
411
412 void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
413                                     struct hugetlb_cgroup *h_cg)
414 {
415         __hugetlb_cgroup_uncharge_cgroup(idx, nr_pages, h_cg, false);
416 }
417
418 void hugetlb_cgroup_uncharge_cgroup_rsvd(int idx, unsigned long nr_pages,
419                                          struct hugetlb_cgroup *h_cg)
420 {
421         __hugetlb_cgroup_uncharge_cgroup(idx, nr_pages, h_cg, true);
422 }
423
424 void hugetlb_cgroup_uncharge_counter(struct resv_map *resv, unsigned long start,
425                                      unsigned long end)
426 {
427         if (hugetlb_cgroup_disabled() || !resv || !resv->reservation_counter ||
428             !resv->css)
429                 return;
430
431         page_counter_uncharge(resv->reservation_counter,
432                               (end - start) * resv->pages_per_hpage);
433         css_put(resv->css);
434 }
435
436 void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
437                                          struct file_region *rg,
438                                          unsigned long nr_pages,
439                                          bool region_del)
440 {
441         if (hugetlb_cgroup_disabled() || !resv || !rg || !nr_pages)
442                 return;
443
444         if (rg->reservation_counter && resv->pages_per_hpage &&
445             !resv->reservation_counter) {
446                 page_counter_uncharge(rg->reservation_counter,
447                                       nr_pages * resv->pages_per_hpage);
448                 /*
449                  * Only do css_put(rg->css) when we delete the entire region
450                  * because one file_region must hold exactly one css reference.
451                  */
452                 if (region_del)
453                         css_put(rg->css);
454         }
455 }
456
457 enum {
458         RES_USAGE,
459         RES_RSVD_USAGE,
460         RES_LIMIT,
461         RES_RSVD_LIMIT,
462         RES_MAX_USAGE,
463         RES_RSVD_MAX_USAGE,
464         RES_FAILCNT,
465         RES_RSVD_FAILCNT,
466 };
467
468 static int hugetlb_cgroup_read_numa_stat(struct seq_file *seq, void *dummy)
469 {
470         int nid;
471         struct cftype *cft = seq_cft(seq);
472         int idx = MEMFILE_IDX(cft->private);
473         bool legacy = MEMFILE_ATTR(cft->private);
474         struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(seq_css(seq));
475         struct cgroup_subsys_state *css;
476         unsigned long usage;
477
478         if (legacy) {
479                 /* Add up usage across all nodes for the non-hierarchical total. */
480                 usage = 0;
481                 for_each_node_state(nid, N_MEMORY)
482                         usage += READ_ONCE(h_cg->nodeinfo[nid]->usage[idx]);
483                 seq_printf(seq, "total=%lu", usage * PAGE_SIZE);
484
485                 /* Simply print the per-node usage for the non-hierarchical total. */
486                 for_each_node_state(nid, N_MEMORY)
487                         seq_printf(seq, " N%d=%lu", nid,
488                                    READ_ONCE(h_cg->nodeinfo[nid]->usage[idx]) *
489                                            PAGE_SIZE);
490                 seq_putc(seq, '\n');
491         }
492
493         /*
494          * The hierarchical total is pretty much the value recorded by the
495          * counter, so use that.
496          */
497         seq_printf(seq, "%stotal=%lu", legacy ? "hierarchical_" : "",
498                    page_counter_read(&h_cg->hugepage[idx]) * PAGE_SIZE);
499
500         /*
501          * For each node, transverse the css tree to obtain the hierarchical
502          * node usage.
503          */
504         for_each_node_state(nid, N_MEMORY) {
505                 usage = 0;
506                 rcu_read_lock();
507                 css_for_each_descendant_pre(css, &h_cg->css) {
508                         usage += READ_ONCE(hugetlb_cgroup_from_css(css)
509                                                    ->nodeinfo[nid]
510                                                    ->usage[idx]);
511                 }
512                 rcu_read_unlock();
513                 seq_printf(seq, " N%d=%lu", nid, usage * PAGE_SIZE);
514         }
515
516         seq_putc(seq, '\n');
517
518         return 0;
519 }
520
521 static u64 hugetlb_cgroup_read_u64(struct cgroup_subsys_state *css,
522                                    struct cftype *cft)
523 {
524         struct page_counter *counter;
525         struct page_counter *rsvd_counter;
526         struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
527
528         counter = &h_cg->hugepage[MEMFILE_IDX(cft->private)];
529         rsvd_counter = &h_cg->rsvd_hugepage[MEMFILE_IDX(cft->private)];
530
531         switch (MEMFILE_ATTR(cft->private)) {
532         case RES_USAGE:
533                 return (u64)page_counter_read(counter) * PAGE_SIZE;
534         case RES_RSVD_USAGE:
535                 return (u64)page_counter_read(rsvd_counter) * PAGE_SIZE;
536         case RES_LIMIT:
537                 return (u64)counter->max * PAGE_SIZE;
538         case RES_RSVD_LIMIT:
539                 return (u64)rsvd_counter->max * PAGE_SIZE;
540         case RES_MAX_USAGE:
541                 return (u64)counter->watermark * PAGE_SIZE;
542         case RES_RSVD_MAX_USAGE:
543                 return (u64)rsvd_counter->watermark * PAGE_SIZE;
544         case RES_FAILCNT:
545                 return counter->failcnt;
546         case RES_RSVD_FAILCNT:
547                 return rsvd_counter->failcnt;
548         default:
549                 BUG();
550         }
551 }
552
553 static int hugetlb_cgroup_read_u64_max(struct seq_file *seq, void *v)
554 {
555         int idx;
556         u64 val;
557         struct cftype *cft = seq_cft(seq);
558         unsigned long limit;
559         struct page_counter *counter;
560         struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(seq_css(seq));
561
562         idx = MEMFILE_IDX(cft->private);
563         counter = &h_cg->hugepage[idx];
564
565         limit = round_down(PAGE_COUNTER_MAX,
566                            pages_per_huge_page(&hstates[idx]));
567
568         switch (MEMFILE_ATTR(cft->private)) {
569         case RES_RSVD_USAGE:
570                 counter = &h_cg->rsvd_hugepage[idx];
571                 fallthrough;
572         case RES_USAGE:
573                 val = (u64)page_counter_read(counter);
574                 seq_printf(seq, "%llu\n", val * PAGE_SIZE);
575                 break;
576         case RES_RSVD_LIMIT:
577                 counter = &h_cg->rsvd_hugepage[idx];
578                 fallthrough;
579         case RES_LIMIT:
580                 val = (u64)counter->max;
581                 if (val == limit)
582                         seq_puts(seq, "max\n");
583                 else
584                         seq_printf(seq, "%llu\n", val * PAGE_SIZE);
585                 break;
586         default:
587                 BUG();
588         }
589
590         return 0;
591 }
592
593 static DEFINE_MUTEX(hugetlb_limit_mutex);
594
595 static ssize_t hugetlb_cgroup_write(struct kernfs_open_file *of,
596                                     char *buf, size_t nbytes, loff_t off,
597                                     const char *max)
598 {
599         int ret, idx;
600         unsigned long nr_pages;
601         struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(of_css(of));
602         bool rsvd = false;
603
604         if (hugetlb_cgroup_is_root(h_cg)) /* Can't set limit on root */
605                 return -EINVAL;
606
607         buf = strstrip(buf);
608         ret = page_counter_memparse(buf, max, &nr_pages);
609         if (ret)
610                 return ret;
611
612         idx = MEMFILE_IDX(of_cft(of)->private);
613         nr_pages = round_down(nr_pages, pages_per_huge_page(&hstates[idx]));
614
615         switch (MEMFILE_ATTR(of_cft(of)->private)) {
616         case RES_RSVD_LIMIT:
617                 rsvd = true;
618                 fallthrough;
619         case RES_LIMIT:
620                 mutex_lock(&hugetlb_limit_mutex);
621                 ret = page_counter_set_max(
622                         __hugetlb_cgroup_counter_from_cgroup(h_cg, idx, rsvd),
623                         nr_pages);
624                 mutex_unlock(&hugetlb_limit_mutex);
625                 break;
626         default:
627                 ret = -EINVAL;
628                 break;
629         }
630         return ret ?: nbytes;
631 }
632
633 static ssize_t hugetlb_cgroup_write_legacy(struct kernfs_open_file *of,
634                                            char *buf, size_t nbytes, loff_t off)
635 {
636         return hugetlb_cgroup_write(of, buf, nbytes, off, "-1");
637 }
638
639 static ssize_t hugetlb_cgroup_write_dfl(struct kernfs_open_file *of,
640                                         char *buf, size_t nbytes, loff_t off)
641 {
642         return hugetlb_cgroup_write(of, buf, nbytes, off, "max");
643 }
644
645 static ssize_t hugetlb_cgroup_reset(struct kernfs_open_file *of,
646                                     char *buf, size_t nbytes, loff_t off)
647 {
648         int ret = 0;
649         struct page_counter *counter, *rsvd_counter;
650         struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(of_css(of));
651
652         counter = &h_cg->hugepage[MEMFILE_IDX(of_cft(of)->private)];
653         rsvd_counter = &h_cg->rsvd_hugepage[MEMFILE_IDX(of_cft(of)->private)];
654
655         switch (MEMFILE_ATTR(of_cft(of)->private)) {
656         case RES_MAX_USAGE:
657                 page_counter_reset_watermark(counter);
658                 break;
659         case RES_RSVD_MAX_USAGE:
660                 page_counter_reset_watermark(rsvd_counter);
661                 break;
662         case RES_FAILCNT:
663                 counter->failcnt = 0;
664                 break;
665         case RES_RSVD_FAILCNT:
666                 rsvd_counter->failcnt = 0;
667                 break;
668         default:
669                 ret = -EINVAL;
670                 break;
671         }
672         return ret ?: nbytes;
673 }
674
675 static char *mem_fmt(char *buf, int size, unsigned long hsize)
676 {
677         if (hsize >= SZ_1G)
678                 snprintf(buf, size, "%luGB", hsize / SZ_1G);
679         else if (hsize >= SZ_1M)
680                 snprintf(buf, size, "%luMB", hsize / SZ_1M);
681         else
682                 snprintf(buf, size, "%luKB", hsize / SZ_1K);
683         return buf;
684 }
685
686 static int __hugetlb_events_show(struct seq_file *seq, bool local)
687 {
688         int idx;
689         long max;
690         struct cftype *cft = seq_cft(seq);
691         struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(seq_css(seq));
692
693         idx = MEMFILE_IDX(cft->private);
694
695         if (local)
696                 max = atomic_long_read(&h_cg->events_local[idx][HUGETLB_MAX]);
697         else
698                 max = atomic_long_read(&h_cg->events[idx][HUGETLB_MAX]);
699
700         seq_printf(seq, "max %lu\n", max);
701
702         return 0;
703 }
704
705 static int hugetlb_events_show(struct seq_file *seq, void *v)
706 {
707         return __hugetlb_events_show(seq, false);
708 }
709
710 static int hugetlb_events_local_show(struct seq_file *seq, void *v)
711 {
712         return __hugetlb_events_show(seq, true);
713 }
714
715 static void __init __hugetlb_cgroup_file_dfl_init(int idx)
716 {
717         char buf[32];
718         struct cftype *cft;
719         struct hstate *h = &hstates[idx];
720
721         /* format the size */
722         mem_fmt(buf, sizeof(buf), huge_page_size(h));
723
724         /* Add the limit file */
725         cft = &h->cgroup_files_dfl[0];
726         snprintf(cft->name, MAX_CFTYPE_NAME, "%s.max", buf);
727         cft->private = MEMFILE_PRIVATE(idx, RES_LIMIT);
728         cft->seq_show = hugetlb_cgroup_read_u64_max;
729         cft->write = hugetlb_cgroup_write_dfl;
730         cft->flags = CFTYPE_NOT_ON_ROOT;
731
732         /* Add the reservation limit file */
733         cft = &h->cgroup_files_dfl[1];
734         snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.max", buf);
735         cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_LIMIT);
736         cft->seq_show = hugetlb_cgroup_read_u64_max;
737         cft->write = hugetlb_cgroup_write_dfl;
738         cft->flags = CFTYPE_NOT_ON_ROOT;
739
740         /* Add the current usage file */
741         cft = &h->cgroup_files_dfl[2];
742         snprintf(cft->name, MAX_CFTYPE_NAME, "%s.current", buf);
743         cft->private = MEMFILE_PRIVATE(idx, RES_USAGE);
744         cft->seq_show = hugetlb_cgroup_read_u64_max;
745         cft->flags = CFTYPE_NOT_ON_ROOT;
746
747         /* Add the current reservation usage file */
748         cft = &h->cgroup_files_dfl[3];
749         snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.current", buf);
750         cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_USAGE);
751         cft->seq_show = hugetlb_cgroup_read_u64_max;
752         cft->flags = CFTYPE_NOT_ON_ROOT;
753
754         /* Add the events file */
755         cft = &h->cgroup_files_dfl[4];
756         snprintf(cft->name, MAX_CFTYPE_NAME, "%s.events", buf);
757         cft->private = MEMFILE_PRIVATE(idx, 0);
758         cft->seq_show = hugetlb_events_show;
759         cft->file_offset = offsetof(struct hugetlb_cgroup, events_file[idx]);
760         cft->flags = CFTYPE_NOT_ON_ROOT;
761
762         /* Add the events.local file */
763         cft = &h->cgroup_files_dfl[5];
764         snprintf(cft->name, MAX_CFTYPE_NAME, "%s.events.local", buf);
765         cft->private = MEMFILE_PRIVATE(idx, 0);
766         cft->seq_show = hugetlb_events_local_show;
767         cft->file_offset = offsetof(struct hugetlb_cgroup,
768                                     events_local_file[idx]);
769         cft->flags = CFTYPE_NOT_ON_ROOT;
770
771         /* Add the numa stat file */
772         cft = &h->cgroup_files_dfl[6];
773         snprintf(cft->name, MAX_CFTYPE_NAME, "%s.numa_stat", buf);
774         cft->private = MEMFILE_PRIVATE(idx, 0);
775         cft->seq_show = hugetlb_cgroup_read_numa_stat;
776         cft->flags = CFTYPE_NOT_ON_ROOT;
777
778         /* NULL terminate the last cft */
779         cft = &h->cgroup_files_dfl[7];
780         memset(cft, 0, sizeof(*cft));
781
782         WARN_ON(cgroup_add_dfl_cftypes(&hugetlb_cgrp_subsys,
783                                        h->cgroup_files_dfl));
784 }
785
786 static void __init __hugetlb_cgroup_file_legacy_init(int idx)
787 {
788         char buf[32];
789         struct cftype *cft;
790         struct hstate *h = &hstates[idx];
791
792         /* format the size */
793         mem_fmt(buf, sizeof(buf), huge_page_size(h));
794
795         /* Add the limit file */
796         cft = &h->cgroup_files_legacy[0];
797         snprintf(cft->name, MAX_CFTYPE_NAME, "%s.limit_in_bytes", buf);
798         cft->private = MEMFILE_PRIVATE(idx, RES_LIMIT);
799         cft->read_u64 = hugetlb_cgroup_read_u64;
800         cft->write = hugetlb_cgroup_write_legacy;
801
802         /* Add the reservation limit file */
803         cft = &h->cgroup_files_legacy[1];
804         snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.limit_in_bytes", buf);
805         cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_LIMIT);
806         cft->read_u64 = hugetlb_cgroup_read_u64;
807         cft->write = hugetlb_cgroup_write_legacy;
808
809         /* Add the usage file */
810         cft = &h->cgroup_files_legacy[2];
811         snprintf(cft->name, MAX_CFTYPE_NAME, "%s.usage_in_bytes", buf);
812         cft->private = MEMFILE_PRIVATE(idx, RES_USAGE);
813         cft->read_u64 = hugetlb_cgroup_read_u64;
814
815         /* Add the reservation usage file */
816         cft = &h->cgroup_files_legacy[3];
817         snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.usage_in_bytes", buf);
818         cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_USAGE);
819         cft->read_u64 = hugetlb_cgroup_read_u64;
820
821         /* Add the MAX usage file */
822         cft = &h->cgroup_files_legacy[4];
823         snprintf(cft->name, MAX_CFTYPE_NAME, "%s.max_usage_in_bytes", buf);
824         cft->private = MEMFILE_PRIVATE(idx, RES_MAX_USAGE);
825         cft->write = hugetlb_cgroup_reset;
826         cft->read_u64 = hugetlb_cgroup_read_u64;
827
828         /* Add the MAX reservation usage file */
829         cft = &h->cgroup_files_legacy[5];
830         snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.max_usage_in_bytes", buf);
831         cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_MAX_USAGE);
832         cft->write = hugetlb_cgroup_reset;
833         cft->read_u64 = hugetlb_cgroup_read_u64;
834
835         /* Add the failcntfile */
836         cft = &h->cgroup_files_legacy[6];
837         snprintf(cft->name, MAX_CFTYPE_NAME, "%s.failcnt", buf);
838         cft->private = MEMFILE_PRIVATE(idx, RES_FAILCNT);
839         cft->write = hugetlb_cgroup_reset;
840         cft->read_u64 = hugetlb_cgroup_read_u64;
841
842         /* Add the reservation failcntfile */
843         cft = &h->cgroup_files_legacy[7];
844         snprintf(cft->name, MAX_CFTYPE_NAME, "%s.rsvd.failcnt", buf);
845         cft->private = MEMFILE_PRIVATE(idx, RES_RSVD_FAILCNT);
846         cft->write = hugetlb_cgroup_reset;
847         cft->read_u64 = hugetlb_cgroup_read_u64;
848
849         /* Add the numa stat file */
850         cft = &h->cgroup_files_legacy[8];
851         snprintf(cft->name, MAX_CFTYPE_NAME, "%s.numa_stat", buf);
852         cft->private = MEMFILE_PRIVATE(idx, 1);
853         cft->seq_show = hugetlb_cgroup_read_numa_stat;
854
855         /* NULL terminate the last cft */
856         cft = &h->cgroup_files_legacy[9];
857         memset(cft, 0, sizeof(*cft));
858
859         WARN_ON(cgroup_add_legacy_cftypes(&hugetlb_cgrp_subsys,
860                                           h->cgroup_files_legacy));
861 }
862
863 static void __init __hugetlb_cgroup_file_init(int idx)
864 {
865         __hugetlb_cgroup_file_dfl_init(idx);
866         __hugetlb_cgroup_file_legacy_init(idx);
867 }
868
869 void __init hugetlb_cgroup_file_init(void)
870 {
871         struct hstate *h;
872
873         for_each_hstate(h) {
874                 /*
875                  * Add cgroup control files only if the huge page consists
876                  * of more than two normal pages. This is because we use
877                  * page[2].private for storing cgroup details.
878                  */
879                 if (huge_page_order(h) >= HUGETLB_CGROUP_MIN_ORDER)
880                         __hugetlb_cgroup_file_init(hstate_index(h));
881         }
882 }
883
884 /*
885  * hugetlb_lock will make sure a parallel cgroup rmdir won't happen
886  * when we migrate hugepages
887  */
888 void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
889 {
890         struct hugetlb_cgroup *h_cg;
891         struct hugetlb_cgroup *h_cg_rsvd;
892         struct hstate *h = page_hstate(oldhpage);
893         struct folio *old_folio = page_folio(oldhpage);
894
895         if (hugetlb_cgroup_disabled())
896                 return;
897
898         spin_lock_irq(&hugetlb_lock);
899         h_cg = hugetlb_cgroup_from_folio(old_folio);
900         h_cg_rsvd = hugetlb_cgroup_from_folio_rsvd(old_folio);
901         set_hugetlb_cgroup(oldhpage, NULL);
902         set_hugetlb_cgroup_rsvd(oldhpage, NULL);
903
904         /* move the h_cg details to new cgroup */
905         set_hugetlb_cgroup(newhpage, h_cg);
906         set_hugetlb_cgroup_rsvd(newhpage, h_cg_rsvd);
907         list_move(&newhpage->lru, &h->hugepage_activelist);
908         spin_unlock_irq(&hugetlb_lock);
909         return;
910 }
911
912 static struct cftype hugetlb_files[] = {
913         {} /* terminate */
914 };
915
916 struct cgroup_subsys hugetlb_cgrp_subsys = {
917         .css_alloc      = hugetlb_cgroup_css_alloc,
918         .css_offline    = hugetlb_cgroup_css_offline,
919         .css_free       = hugetlb_cgroup_css_free,
920         .dfl_cftypes    = hugetlb_files,
921         .legacy_cftypes = hugetlb_files,
922 };