Merge branch 'bkl/procfs' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic...
[platform/adaptation/renesas_rcar/renesas_kernel.git] / kernel / res_counter.c
1 /*
2  * resource cgroups
3  *
4  * Copyright 2007 OpenVZ SWsoft Inc
5  *
6  * Author: Pavel Emelianov <xemul@openvz.org>
7  *
8  */
9
10 #include <linux/types.h>
11 #include <linux/parser.h>
12 #include <linux/fs.h>
13 #include <linux/res_counter.h>
14 #include <linux/uaccess.h>
15 #include <linux/mm.h>
16
17 void res_counter_init(struct res_counter *counter, struct res_counter *parent)
18 {
19         spin_lock_init(&counter->lock);
20         counter->limit = RESOURCE_MAX;
21         counter->soft_limit = RESOURCE_MAX;
22         counter->parent = parent;
23 }
24
25 int res_counter_charge_locked(struct res_counter *counter, unsigned long val)
26 {
27         if (counter->usage + val > counter->limit) {
28                 counter->failcnt++;
29                 return -ENOMEM;
30         }
31
32         counter->usage += val;
33         if (counter->usage > counter->max_usage)
34                 counter->max_usage = counter->usage;
35         return 0;
36 }
37
38 int res_counter_charge(struct res_counter *counter, unsigned long val,
39                         struct res_counter **limit_fail_at)
40 {
41         int ret;
42         unsigned long flags;
43         struct res_counter *c, *u;
44
45         *limit_fail_at = NULL;
46         local_irq_save(flags);
47         for (c = counter; c != NULL; c = c->parent) {
48                 spin_lock(&c->lock);
49                 ret = res_counter_charge_locked(c, val);
50                 spin_unlock(&c->lock);
51                 if (ret < 0) {
52                         *limit_fail_at = c;
53                         goto undo;
54                 }
55         }
56         ret = 0;
57         goto done;
58 undo:
59         for (u = counter; u != c; u = u->parent) {
60                 spin_lock(&u->lock);
61                 res_counter_uncharge_locked(u, val);
62                 spin_unlock(&u->lock);
63         }
64 done:
65         local_irq_restore(flags);
66         return ret;
67 }
68
69 void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val)
70 {
71         if (WARN_ON(counter->usage < val))
72                 val = counter->usage;
73
74         counter->usage -= val;
75 }
76
77 void res_counter_uncharge(struct res_counter *counter, unsigned long val)
78 {
79         unsigned long flags;
80         struct res_counter *c;
81
82         local_irq_save(flags);
83         for (c = counter; c != NULL; c = c->parent) {
84                 spin_lock(&c->lock);
85                 res_counter_uncharge_locked(c, val);
86                 spin_unlock(&c->lock);
87         }
88         local_irq_restore(flags);
89 }
90
91
92 static inline unsigned long long *
93 res_counter_member(struct res_counter *counter, int member)
94 {
95         switch (member) {
96         case RES_USAGE:
97                 return &counter->usage;
98         case RES_MAX_USAGE:
99                 return &counter->max_usage;
100         case RES_LIMIT:
101                 return &counter->limit;
102         case RES_FAILCNT:
103                 return &counter->failcnt;
104         case RES_SOFT_LIMIT:
105                 return &counter->soft_limit;
106         };
107
108         BUG();
109         return NULL;
110 }
111
112 ssize_t res_counter_read(struct res_counter *counter, int member,
113                 const char __user *userbuf, size_t nbytes, loff_t *pos,
114                 int (*read_strategy)(unsigned long long val, char *st_buf))
115 {
116         unsigned long long *val;
117         char buf[64], *s;
118
119         s = buf;
120         val = res_counter_member(counter, member);
121         if (read_strategy)
122                 s += read_strategy(*val, s);
123         else
124                 s += sprintf(s, "%llu\n", *val);
125         return simple_read_from_buffer((void __user *)userbuf, nbytes,
126                         pos, buf, s - buf);
127 }
128
129 u64 res_counter_read_u64(struct res_counter *counter, int member)
130 {
131         return *res_counter_member(counter, member);
132 }
133
134 int res_counter_memparse_write_strategy(const char *buf,
135                                         unsigned long long *res)
136 {
137         char *end;
138
139         /* return RESOURCE_MAX(unlimited) if "-1" is specified */
140         if (*buf == '-') {
141                 *res = simple_strtoull(buf + 1, &end, 10);
142                 if (*res != 1 || *end != '\0')
143                         return -EINVAL;
144                 *res = RESOURCE_MAX;
145                 return 0;
146         }
147
148         /* FIXME - make memparse() take const char* args */
149         *res = memparse((char *)buf, &end);
150         if (*end != '\0')
151                 return -EINVAL;
152
153         *res = PAGE_ALIGN(*res);
154         return 0;
155 }
156
157 int res_counter_write(struct res_counter *counter, int member,
158                       const char *buf, write_strategy_fn write_strategy)
159 {
160         char *end;
161         unsigned long flags;
162         unsigned long long tmp, *val;
163
164         if (write_strategy) {
165                 if (write_strategy(buf, &tmp))
166                         return -EINVAL;
167         } else {
168                 tmp = simple_strtoull(buf, &end, 10);
169                 if (*end != '\0')
170                         return -EINVAL;
171         }
172         spin_lock_irqsave(&counter->lock, flags);
173         val = res_counter_member(counter, member);
174         *val = tmp;
175         spin_unlock_irqrestore(&counter->lock, flags);
176         return 0;
177 }