[PATCH] Slab: Do not fallback to nodes that have not been bootstrapped yet
[profile/ivi/kernel-x86-ivi.git] / kernel / user.c
1 /*
2  * The "user cache".
3  *
4  * (C) Copyright 1991-2000 Linus Torvalds
5  *
6  * We have a per-user structure to keep track of how many
7  * processes, files etc the user has claimed, in order to be
8  * able to have per-user limits for system resources. 
9  */
10
11 #include <linux/init.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/bitops.h>
15 #include <linux/key.h>
16 #include <linux/interrupt.h>
17
18 /*
19  * UID task count cache, to get fast user lookup in "alloc_uid"
20  * when changing user ID's (ie setuid() and friends).
21  */
22
23 #define UIDHASH_BITS (CONFIG_BASE_SMALL ? 3 : 8)
24 #define UIDHASH_SZ              (1 << UIDHASH_BITS)
25 #define UIDHASH_MASK            (UIDHASH_SZ - 1)
26 #define __uidhashfn(uid)        (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
27 #define uidhashentry(uid)       (uidhash_table + __uidhashfn((uid)))
28
29 static kmem_cache_t *uid_cachep;
30 static struct list_head uidhash_table[UIDHASH_SZ];
31
32 /*
33  * The uidhash_lock is mostly taken from process context, but it is
34  * occasionally also taken from softirq/tasklet context, when
35  * task-structs get RCU-freed. Hence all locking must be softirq-safe.
36  * But free_uid() is also called with local interrupts disabled, and running
37  * local_bh_enable() with local interrupts disabled is an error - we'll run
38  * softirq callbacks, and they can unconditionally enable interrupts, and
39  * the caller of free_uid() didn't expect that..
40  */
41 static DEFINE_SPINLOCK(uidhash_lock);
42
43 struct user_struct root_user = {
44         .__count        = ATOMIC_INIT(1),
45         .processes      = ATOMIC_INIT(1),
46         .files          = ATOMIC_INIT(0),
47         .sigpending     = ATOMIC_INIT(0),
48         .mq_bytes       = 0,
49         .locked_shm     = 0,
50 #ifdef CONFIG_KEYS
51         .uid_keyring    = &root_user_keyring,
52         .session_keyring = &root_session_keyring,
53 #endif
54 };
55
56 /*
57  * These routines must be called with the uidhash spinlock held!
58  */
59 static inline void uid_hash_insert(struct user_struct *up, struct list_head *hashent)
60 {
61         list_add(&up->uidhash_list, hashent);
62 }
63
64 static inline void uid_hash_remove(struct user_struct *up)
65 {
66         list_del(&up->uidhash_list);
67 }
68
69 static inline struct user_struct *uid_hash_find(uid_t uid, struct list_head *hashent)
70 {
71         struct list_head *up;
72
73         list_for_each(up, hashent) {
74                 struct user_struct *user;
75
76                 user = list_entry(up, struct user_struct, uidhash_list);
77
78                 if(user->uid == uid) {
79                         atomic_inc(&user->__count);
80                         return user;
81                 }
82         }
83
84         return NULL;
85 }
86
87 /*
88  * Locate the user_struct for the passed UID.  If found, take a ref on it.  The
89  * caller must undo that ref with free_uid().
90  *
91  * If the user_struct could not be found, return NULL.
92  */
93 struct user_struct *find_user(uid_t uid)
94 {
95         struct user_struct *ret;
96         unsigned long flags;
97
98         spin_lock_irqsave(&uidhash_lock, flags);
99         ret = uid_hash_find(uid, uidhashentry(uid));
100         spin_unlock_irqrestore(&uidhash_lock, flags);
101         return ret;
102 }
103
104 void free_uid(struct user_struct *up)
105 {
106         unsigned long flags;
107
108         if (!up)
109                 return;
110
111         local_irq_save(flags);
112         if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
113                 uid_hash_remove(up);
114                 spin_unlock_irqrestore(&uidhash_lock, flags);
115                 key_put(up->uid_keyring);
116                 key_put(up->session_keyring);
117                 kmem_cache_free(uid_cachep, up);
118         } else {
119                 local_irq_restore(flags);
120         }
121 }
122
123 struct user_struct * alloc_uid(uid_t uid)
124 {
125         struct list_head *hashent = uidhashentry(uid);
126         struct user_struct *up;
127
128         spin_lock_irq(&uidhash_lock);
129         up = uid_hash_find(uid, hashent);
130         spin_unlock_irq(&uidhash_lock);
131
132         if (!up) {
133                 struct user_struct *new;
134
135                 new = kmem_cache_alloc(uid_cachep, SLAB_KERNEL);
136                 if (!new)
137                         return NULL;
138                 new->uid = uid;
139                 atomic_set(&new->__count, 1);
140                 atomic_set(&new->processes, 0);
141                 atomic_set(&new->files, 0);
142                 atomic_set(&new->sigpending, 0);
143 #ifdef CONFIG_INOTIFY_USER
144                 atomic_set(&new->inotify_watches, 0);
145                 atomic_set(&new->inotify_devs, 0);
146 #endif
147
148                 new->mq_bytes = 0;
149                 new->locked_shm = 0;
150
151                 if (alloc_uid_keyring(new, current) < 0) {
152                         kmem_cache_free(uid_cachep, new);
153                         return NULL;
154                 }
155
156                 /*
157                  * Before adding this, check whether we raced
158                  * on adding the same user already..
159                  */
160                 spin_lock_irq(&uidhash_lock);
161                 up = uid_hash_find(uid, hashent);
162                 if (up) {
163                         key_put(new->uid_keyring);
164                         key_put(new->session_keyring);
165                         kmem_cache_free(uid_cachep, new);
166                 } else {
167                         uid_hash_insert(new, hashent);
168                         up = new;
169                 }
170                 spin_unlock_irq(&uidhash_lock);
171
172         }
173         return up;
174 }
175
176 void switch_uid(struct user_struct *new_user)
177 {
178         struct user_struct *old_user;
179
180         /* What if a process setreuid()'s and this brings the
181          * new uid over his NPROC rlimit?  We can check this now
182          * cheaply with the new uid cache, so if it matters
183          * we should be checking for it.  -DaveM
184          */
185         old_user = current->user;
186         atomic_inc(&new_user->processes);
187         atomic_dec(&old_user->processes);
188         switch_uid_keyring(new_user);
189         current->user = new_user;
190         free_uid(old_user);
191         suid_keys(current);
192 }
193
194
195 static int __init uid_cache_init(void)
196 {
197         int n;
198
199         uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
200                         0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
201
202         for(n = 0; n < UIDHASH_SZ; ++n)
203                 INIT_LIST_HEAD(uidhash_table + n);
204
205         /* Insert the root user immediately (init already runs as root) */
206         spin_lock_irq(&uidhash_lock);
207         uid_hash_insert(&root_user, uidhashentry(0));
208         spin_unlock_irq(&uidhash_lock);
209
210         return 0;
211 }
212
213 module_init(uid_cache_init);