1 // SPDX-License-Identifier: GPL-2.0-only
3 #include <linux/stat.h>
4 #include <linux/sysctl.h>
5 #include <linux/slab.h>
6 #include <linux/cred.h>
7 #include <linux/hash.h>
8 #include <linux/kmemleak.h>
9 #include <linux/user_namespace.h>
11 struct ucounts init_ucounts = {
13 .uid = GLOBAL_ROOT_UID,
14 .count = ATOMIC_INIT(1),
17 #define UCOUNTS_HASHTABLE_BITS 10
18 static struct hlist_head ucounts_hashtable[(1 << UCOUNTS_HASHTABLE_BITS)];
19 static DEFINE_SPINLOCK(ucounts_lock);
21 #define ucounts_hashfn(ns, uid) \
22 hash_long((unsigned long)__kuid_val(uid) + (unsigned long)(ns), \
23 UCOUNTS_HASHTABLE_BITS)
24 #define ucounts_hashentry(ns, uid) \
25 (ucounts_hashtable + ucounts_hashfn(ns, uid))
29 static struct ctl_table_set *
30 set_lookup(struct ctl_table_root *root)
32 return ¤t_user_ns()->set;
35 static int set_is_seen(struct ctl_table_set *set)
37 return ¤t_user_ns()->set == set;
40 static int set_permissions(struct ctl_table_header *head,
41 struct ctl_table *table)
43 struct user_namespace *user_ns =
44 container_of(head->set, struct user_namespace, set);
47 /* Allow users with CAP_SYS_RESOURCE unrestrained access */
48 if (ns_capable(user_ns, CAP_SYS_RESOURCE))
49 mode = (table->mode & S_IRWXU) >> 6;
51 /* Allow all others at most read-only access */
52 mode = table->mode & S_IROTH;
53 return (mode << 6) | (mode << 3) | mode;
56 static struct ctl_table_root set_root = {
58 .permissions = set_permissions,
61 static long ue_zero = 0;
62 static long ue_int_max = INT_MAX;
64 #define UCOUNT_ENTRY(name) \
67 .maxlen = sizeof(long), \
69 .proc_handler = proc_doulongvec_minmax, \
71 .extra2 = &ue_int_max, \
73 static struct ctl_table user_table[] = {
74 UCOUNT_ENTRY("max_user_namespaces"),
75 UCOUNT_ENTRY("max_pid_namespaces"),
76 UCOUNT_ENTRY("max_uts_namespaces"),
77 UCOUNT_ENTRY("max_ipc_namespaces"),
78 UCOUNT_ENTRY("max_net_namespaces"),
79 UCOUNT_ENTRY("max_mnt_namespaces"),
80 UCOUNT_ENTRY("max_cgroup_namespaces"),
81 UCOUNT_ENTRY("max_time_namespaces"),
82 #ifdef CONFIG_INOTIFY_USER
83 UCOUNT_ENTRY("max_inotify_instances"),
84 UCOUNT_ENTRY("max_inotify_watches"),
86 #ifdef CONFIG_FANOTIFY
87 UCOUNT_ENTRY("max_fanotify_groups"),
88 UCOUNT_ENTRY("max_fanotify_marks"),
92 #endif /* CONFIG_SYSCTL */
94 bool setup_userns_sysctls(struct user_namespace *ns)
97 struct ctl_table *tbl;
99 BUILD_BUG_ON(ARRAY_SIZE(user_table) != UCOUNT_COUNTS + 1);
100 setup_sysctl_set(&ns->set, &set_root, set_is_seen);
101 tbl = kmemdup(user_table, sizeof(user_table), GFP_KERNEL);
104 for (i = 0; i < UCOUNT_COUNTS; i++) {
105 tbl[i].data = &ns->ucount_max[i];
107 ns->sysctls = __register_sysctl_table(&ns->set, "user", tbl);
111 retire_sysctl_set(&ns->set);
118 void retire_userns_sysctls(struct user_namespace *ns)
121 struct ctl_table *tbl;
123 tbl = ns->sysctls->ctl_table_arg;
124 unregister_sysctl_table(ns->sysctls);
125 retire_sysctl_set(&ns->set);
130 static struct ucounts *find_ucounts(struct user_namespace *ns, kuid_t uid, struct hlist_head *hashent)
132 struct ucounts *ucounts;
134 hlist_for_each_entry(ucounts, hashent, node) {
135 if (uid_eq(ucounts->uid, uid) && (ucounts->ns == ns))
141 static void hlist_add_ucounts(struct ucounts *ucounts)
143 struct hlist_head *hashent = ucounts_hashentry(ucounts->ns, ucounts->uid);
144 spin_lock_irq(&ucounts_lock);
145 hlist_add_head(&ucounts->node, hashent);
146 spin_unlock_irq(&ucounts_lock);
149 static inline bool get_ucounts_or_wrap(struct ucounts *ucounts)
151 /* Returns true on a successful get, false if the count wraps. */
152 return !atomic_add_negative(1, &ucounts->count);
155 struct ucounts *get_ucounts(struct ucounts *ucounts)
157 if (!get_ucounts_or_wrap(ucounts)) {
158 put_ucounts(ucounts);
164 struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid)
166 struct hlist_head *hashent = ucounts_hashentry(ns, uid);
167 struct ucounts *ucounts, *new;
170 spin_lock_irq(&ucounts_lock);
171 ucounts = find_ucounts(ns, uid, hashent);
173 spin_unlock_irq(&ucounts_lock);
175 new = kzalloc(sizeof(*new), GFP_KERNEL);
181 atomic_set(&new->count, 1);
183 spin_lock_irq(&ucounts_lock);
184 ucounts = find_ucounts(ns, uid, hashent);
188 hlist_add_head(&new->node, hashent);
189 get_user_ns(new->ns);
190 spin_unlock_irq(&ucounts_lock);
194 wrapped = !get_ucounts_or_wrap(ucounts);
195 spin_unlock_irq(&ucounts_lock);
197 put_ucounts(ucounts);
203 void put_ucounts(struct ucounts *ucounts)
207 if (atomic_dec_and_lock_irqsave(&ucounts->count, &ucounts_lock, flags)) {
208 hlist_del_init(&ucounts->node);
209 spin_unlock_irqrestore(&ucounts_lock, flags);
210 put_user_ns(ucounts->ns);
215 static inline bool atomic_long_inc_below(atomic_long_t *v, int u)
218 c = atomic_long_read(v);
220 if (unlikely(c >= u))
222 old = atomic_long_cmpxchg(v, c, c+1);
223 if (likely(old == c))
229 struct ucounts *inc_ucount(struct user_namespace *ns, kuid_t uid,
230 enum ucount_type type)
232 struct ucounts *ucounts, *iter, *bad;
233 struct user_namespace *tns;
234 ucounts = alloc_ucounts(ns, uid);
235 for (iter = ucounts; iter; iter = tns->ucounts) {
238 max = READ_ONCE(tns->ucount_max[type]);
239 if (!atomic_long_inc_below(&iter->ucount[type], max))
245 for (iter = ucounts; iter != bad; iter = iter->ns->ucounts)
246 atomic_long_dec(&iter->ucount[type]);
248 put_ucounts(ucounts);
252 void dec_ucount(struct ucounts *ucounts, enum ucount_type type)
254 struct ucounts *iter;
255 for (iter = ucounts; iter; iter = iter->ns->ucounts) {
256 long dec = atomic_long_dec_if_positive(&iter->ucount[type]);
257 WARN_ON_ONCE(dec < 0);
259 put_ucounts(ucounts);
262 long inc_rlimit_ucounts(struct ucounts *ucounts, enum rlimit_type type, long v)
264 struct ucounts *iter;
268 for (iter = ucounts; iter; iter = iter->ns->ucounts) {
269 long new = atomic_long_add_return(v, &iter->rlimit[type]);
270 if (new < 0 || new > max)
272 else if (iter == ucounts)
274 max = get_userns_rlimit_max(iter->ns, type);
279 bool dec_rlimit_ucounts(struct ucounts *ucounts, enum rlimit_type type, long v)
281 struct ucounts *iter;
282 long new = -1; /* Silence compiler warning */
283 for (iter = ucounts; iter; iter = iter->ns->ucounts) {
284 long dec = atomic_long_sub_return(v, &iter->rlimit[type]);
285 WARN_ON_ONCE(dec < 0);
292 static void do_dec_rlimit_put_ucounts(struct ucounts *ucounts,
293 struct ucounts *last, enum rlimit_type type)
295 struct ucounts *iter, *next;
296 for (iter = ucounts; iter != last; iter = next) {
297 long dec = atomic_long_sub_return(1, &iter->rlimit[type]);
298 WARN_ON_ONCE(dec < 0);
299 next = iter->ns->ucounts;
305 void dec_rlimit_put_ucounts(struct ucounts *ucounts, enum rlimit_type type)
307 do_dec_rlimit_put_ucounts(ucounts, NULL, type);
310 long inc_rlimit_get_ucounts(struct ucounts *ucounts, enum rlimit_type type)
312 /* Caller must hold a reference to ucounts */
313 struct ucounts *iter;
317 for (iter = ucounts; iter; iter = iter->ns->ucounts) {
318 long new = atomic_long_add_return(1, &iter->rlimit[type]);
319 if (new < 0 || new > max)
323 max = get_userns_rlimit_max(iter->ns, type);
325 * Grab an extra ucount reference for the caller when
326 * the rlimit count was previously 0.
330 if (!get_ucounts(iter))
335 dec = atomic_long_sub_return(1, &iter->rlimit[type]);
336 WARN_ON_ONCE(dec < 0);
338 do_dec_rlimit_put_ucounts(ucounts, iter, type);
342 bool is_rlimit_overlimit(struct ucounts *ucounts, enum rlimit_type type, unsigned long rlimit)
344 struct ucounts *iter;
346 if (rlimit > LONG_MAX)
348 for (iter = ucounts; iter; iter = iter->ns->ucounts) {
349 long val = get_rlimit_value(iter, type);
350 if (val < 0 || val > max)
352 max = get_userns_rlimit_max(iter->ns, type);
357 static __init int user_namespace_sysctl_init(void)
360 static struct ctl_table_header *user_header;
361 static struct ctl_table empty[1];
363 * It is necessary to register the user directory in the
364 * default set so that registrations in the child sets work
367 user_header = register_sysctl("user", empty);
368 kmemleak_ignore(user_header);
369 BUG_ON(!user_header);
370 BUG_ON(!setup_userns_sysctls(&init_user_ns));
372 hlist_add_ucounts(&init_ucounts);
373 inc_rlimit_ucounts(&init_ucounts, UCOUNT_RLIMIT_NPROC, 1);
376 subsys_initcall(user_namespace_sysctl_init);