1 // SPDX-License-Identifier: GPL-2.0
3 * Implementation of the SID table type.
5 * Original author: Stephen Smalley, <sds@tycho.nsa.gov>
6 * Author: Ondrej Mosnacek, <omosnacek@gmail.com>
8 * Copyright (C) 2018 Red Hat, Inc.
10 #include <linux/errno.h>
11 #include <linux/kernel.h>
12 #include <linux/list.h>
13 #include <linux/rcupdate.h>
14 #include <linux/slab.h>
15 #include <linux/sched.h>
16 #include <linux/spinlock.h>
17 #include <asm/barrier.h>
23 struct sidtab_str_cache {
24 struct rcu_head rcu_member;
25 struct list_head lru_member;
26 struct sidtab_entry *parent;
31 #define index_to_sid(index) ((index) + SECINITSID_NUM + 1)
32 #define sid_to_index(sid) ((sid) - (SECINITSID_NUM + 1))
34 int sidtab_init(struct sidtab *s)
38 memset(s->roots, 0, sizeof(s->roots));
40 for (i = 0; i < SECINITSID_NUM; i++)
46 hash_init(s->context_to_sid);
48 spin_lock_init(&s->lock);
50 #if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
51 s->cache_free_slots = CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE;
52 INIT_LIST_HEAD(&s->cache_lru_list);
53 spin_lock_init(&s->cache_lock);
59 static u32 context_to_sid(struct sidtab *s, struct context *context, u32 hash)
61 struct sidtab_entry *entry;
65 hash_for_each_possible_rcu(s->context_to_sid, entry, list, hash) {
66 if (entry->hash != hash)
68 if (context_cmp(&entry->context, context)) {
77 int sidtab_set_initial(struct sidtab *s, u32 sid, struct context *context)
79 struct sidtab_isid_entry *isid;
83 if (sid == 0 || sid > SECINITSID_NUM)
86 isid = &s->isids[sid - 1];
88 rc = context_cpy(&isid->entry.context, context);
92 #if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
93 isid->entry.cache = NULL;
97 hash = context_compute_hash(context);
100 * Multiple initial sids may map to the same context. Check that this
101 * context is not already represented in the context_to_sid hashtable
102 * to avoid duplicate entries and long linked lists upon hash
105 if (!context_to_sid(s, context, hash)) {
106 isid->entry.sid = sid;
107 isid->entry.hash = hash;
108 hash_add(s->context_to_sid, &isid->entry.list, hash);
114 int sidtab_hash_stats(struct sidtab *sidtab, char *page)
120 int max_chain_len = 0;
122 struct sidtab_entry *entry;
125 hash_for_each_rcu(sidtab->context_to_sid, i, entry, list) {
127 if (i == cur_bucket) {
133 if (chain_len > max_chain_len)
134 max_chain_len = chain_len;
140 if (chain_len > max_chain_len)
141 max_chain_len = chain_len;
143 return scnprintf(page, PAGE_SIZE, "entries: %d\nbuckets used: %d/%d\n"
144 "longest chain: %d\n", entries,
145 slots_used, SIDTAB_HASH_BUCKETS, max_chain_len);
148 static u32 sidtab_level_from_count(u32 count)
150 u32 capacity = SIDTAB_LEAF_ENTRIES;
153 while (count > capacity) {
154 capacity <<= SIDTAB_INNER_SHIFT;
160 static int sidtab_alloc_roots(struct sidtab *s, u32 level)
164 if (!s->roots[0].ptr_leaf) {
165 s->roots[0].ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
167 if (!s->roots[0].ptr_leaf)
170 for (l = 1; l <= level; ++l)
171 if (!s->roots[l].ptr_inner) {
172 s->roots[l].ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
174 if (!s->roots[l].ptr_inner)
176 s->roots[l].ptr_inner->entries[0] = s->roots[l - 1];
181 static struct sidtab_entry *sidtab_do_lookup(struct sidtab *s, u32 index,
184 union sidtab_entry_inner *entry;
185 u32 level, capacity_shift, leaf_index = index / SIDTAB_LEAF_ENTRIES;
187 /* find the level of the subtree we need */
188 level = sidtab_level_from_count(index + 1);
189 capacity_shift = level * SIDTAB_INNER_SHIFT;
191 /* allocate roots if needed */
192 if (alloc && sidtab_alloc_roots(s, level) != 0)
195 /* lookup inside the subtree */
196 entry = &s->roots[level];
198 capacity_shift -= SIDTAB_INNER_SHIFT;
201 entry = &entry->ptr_inner->entries[leaf_index >> capacity_shift];
202 leaf_index &= ((u32)1 << capacity_shift) - 1;
204 if (!entry->ptr_inner) {
206 entry->ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
208 if (!entry->ptr_inner)
212 if (!entry->ptr_leaf) {
214 entry->ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
216 if (!entry->ptr_leaf)
219 return &entry->ptr_leaf->entries[index % SIDTAB_LEAF_ENTRIES];
222 static struct sidtab_entry *sidtab_lookup(struct sidtab *s, u32 index)
224 /* read entries only after reading count */
225 u32 count = smp_load_acquire(&s->count);
230 return sidtab_do_lookup(s, index, 0);
233 static struct sidtab_entry *sidtab_lookup_initial(struct sidtab *s, u32 sid)
235 return s->isids[sid - 1].set ? &s->isids[sid - 1].entry : NULL;
238 static struct sidtab_entry *sidtab_search_core(struct sidtab *s, u32 sid,
242 struct sidtab_entry *entry;
244 if (sid > SECINITSID_NUM)
245 entry = sidtab_lookup(s, sid_to_index(sid));
247 entry = sidtab_lookup_initial(s, sid);
248 if (entry && (!entry->context.len || force))
252 return sidtab_lookup_initial(s, SECINITSID_UNLABELED);
255 struct sidtab_entry *sidtab_search_entry(struct sidtab *s, u32 sid)
257 return sidtab_search_core(s, sid, 0);
260 struct sidtab_entry *sidtab_search_entry_force(struct sidtab *s, u32 sid)
262 return sidtab_search_core(s, sid, 1);
265 int sidtab_context_to_sid(struct sidtab *s, struct context *context,
269 u32 count, hash = context_compute_hash(context);
270 struct sidtab_convert_params *convert;
271 struct sidtab_entry *dst, *dst_convert;
274 *sid = context_to_sid(s, context, hash);
278 /* lock-free search failed: lock, re-search, and insert if not found */
279 spin_lock_irqsave(&s->lock, flags);
282 *sid = context_to_sid(s, context, hash);
286 if (unlikely(s->frozen)) {
288 * This sidtab is now frozen - tell the caller to abort and
297 /* bail out if we already reached max entries */
299 if (count >= SIDTAB_MAX)
302 /* insert context into new entry */
304 dst = sidtab_do_lookup(s, count, 1);
308 dst->sid = index_to_sid(count);
311 rc = context_cpy(&dst->context, context);
316 * if we are building a new sidtab, we need to convert the context
317 * and insert it there as well
319 convert = s->convert;
321 struct sidtab *target = convert->target;
324 dst_convert = sidtab_do_lookup(target, count, 1);
326 context_destroy(&dst->context);
330 rc = services_convert_context(convert->args,
331 context, &dst_convert->context,
334 context_destroy(&dst->context);
337 dst_convert->sid = index_to_sid(count);
338 dst_convert->hash = context_compute_hash(&dst_convert->context);
339 target->count = count + 1;
341 hash_add_rcu(target->context_to_sid,
342 &dst_convert->list, dst_convert->hash);
346 pr_info("SELinux: Context %s is not valid (left unmapped).\n",
349 *sid = index_to_sid(count);
351 /* write entries before updating count */
352 smp_store_release(&s->count, count + 1);
353 hash_add_rcu(s->context_to_sid, &dst->list, dst->hash);
357 spin_unlock_irqrestore(&s->lock, flags);
361 static void sidtab_convert_hashtable(struct sidtab *s, u32 count)
363 struct sidtab_entry *entry;
366 for (i = 0; i < count; i++) {
367 entry = sidtab_do_lookup(s, i, 0);
368 entry->sid = index_to_sid(i);
369 entry->hash = context_compute_hash(&entry->context);
371 hash_add_rcu(s->context_to_sid, &entry->list, entry->hash);
375 static int sidtab_convert_tree(union sidtab_entry_inner *edst,
376 union sidtab_entry_inner *esrc,
377 u32 *pos, u32 count, u32 level,
378 struct sidtab_convert_params *convert)
384 if (!edst->ptr_inner) {
385 edst->ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
387 if (!edst->ptr_inner)
391 while (i < SIDTAB_INNER_ENTRIES && *pos < count) {
392 rc = sidtab_convert_tree(&edst->ptr_inner->entries[i],
393 &esrc->ptr_inner->entries[i],
394 pos, count, level - 1,
401 if (!edst->ptr_leaf) {
402 edst->ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
408 while (i < SIDTAB_LEAF_ENTRIES && *pos < count) {
409 rc = services_convert_context(convert->args,
410 &esrc->ptr_leaf->entries[i].context,
411 &edst->ptr_leaf->entries[i].context,
423 int sidtab_convert(struct sidtab *s, struct sidtab_convert_params *params)
426 u32 count, level, pos;
429 spin_lock_irqsave(&s->lock, flags);
431 /* concurrent policy loads are not allowed */
433 spin_unlock_irqrestore(&s->lock, flags);
438 level = sidtab_level_from_count(count);
440 /* allocate last leaf in the new sidtab (to avoid race with
443 rc = sidtab_do_lookup(params->target, count - 1, 1) ? 0 : -ENOMEM;
445 spin_unlock_irqrestore(&s->lock, flags);
449 /* set count in case no new entries are added during conversion */
450 params->target->count = count;
452 /* enable live convert of new entries */
455 /* we can safely convert the tree outside the lock */
456 spin_unlock_irqrestore(&s->lock, flags);
458 pr_info("SELinux: Converting %u SID table entries...\n", count);
460 /* convert all entries not covered by live convert */
462 rc = sidtab_convert_tree(¶ms->target->roots[level],
463 &s->roots[level], &pos, count, level, params);
465 /* we need to keep the old table - disable live convert */
466 spin_lock_irqsave(&s->lock, flags);
468 spin_unlock_irqrestore(&s->lock, flags);
472 * The hashtable can also be modified in sidtab_context_to_sid()
473 * so we must re-acquire the lock here.
475 spin_lock_irqsave(&s->lock, flags);
476 sidtab_convert_hashtable(params->target, count);
477 spin_unlock_irqrestore(&s->lock, flags);
482 void sidtab_cancel_convert(struct sidtab *s)
486 /* cancelling policy load - disable live convert of sidtab */
487 spin_lock_irqsave(&s->lock, flags);
489 spin_unlock_irqrestore(&s->lock, flags);
492 void sidtab_freeze_begin(struct sidtab *s, unsigned long *flags) __acquires(&s->lock)
494 spin_lock_irqsave(&s->lock, *flags);
498 void sidtab_freeze_end(struct sidtab *s, unsigned long *flags) __releases(&s->lock)
500 spin_unlock_irqrestore(&s->lock, *flags);
503 static void sidtab_destroy_entry(struct sidtab_entry *entry)
505 context_destroy(&entry->context);
506 #if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
507 kfree(rcu_dereference_raw(entry->cache));
511 static void sidtab_destroy_tree(union sidtab_entry_inner entry, u32 level)
516 struct sidtab_node_inner *node = entry.ptr_inner;
521 for (i = 0; i < SIDTAB_INNER_ENTRIES; i++)
522 sidtab_destroy_tree(node->entries[i], level - 1);
525 struct sidtab_node_leaf *node = entry.ptr_leaf;
530 for (i = 0; i < SIDTAB_LEAF_ENTRIES; i++)
531 sidtab_destroy_entry(&node->entries[i]);
536 void sidtab_destroy(struct sidtab *s)
540 for (i = 0; i < SECINITSID_NUM; i++)
542 sidtab_destroy_entry(&s->isids[i].entry);
544 level = SIDTAB_MAX_LEVEL;
545 while (level && !s->roots[level].ptr_inner)
548 sidtab_destroy_tree(s->roots[level], level);
550 * The context_to_sid hashtable's objects are all shared
551 * with the isids array and context tree, and so don't need
552 * to be cleaned up here.
556 #if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
558 void sidtab_sid2str_put(struct sidtab *s, struct sidtab_entry *entry,
559 const char *str, u32 str_len)
561 struct sidtab_str_cache *cache, *victim = NULL;
564 /* do not cache invalid contexts */
565 if (entry->context.len)
568 spin_lock_irqsave(&s->cache_lock, flags);
570 cache = rcu_dereference_protected(entry->cache,
571 lockdep_is_held(&s->cache_lock));
573 /* entry in cache - just bump to the head of LRU list */
574 list_move(&cache->lru_member, &s->cache_lru_list);
578 cache = kmalloc(struct_size(cache, str, str_len), GFP_ATOMIC);
582 if (s->cache_free_slots == 0) {
583 /* pop a cache entry from the tail and free it */
584 victim = container_of(s->cache_lru_list.prev,
585 struct sidtab_str_cache, lru_member);
586 list_del(&victim->lru_member);
587 rcu_assign_pointer(victim->parent->cache, NULL);
589 s->cache_free_slots--;
591 cache->parent = entry;
592 cache->len = str_len;
593 memcpy(cache->str, str, str_len);
594 list_add(&cache->lru_member, &s->cache_lru_list);
596 rcu_assign_pointer(entry->cache, cache);
599 spin_unlock_irqrestore(&s->cache_lock, flags);
600 kfree_rcu(victim, rcu_member);
603 int sidtab_sid2str_get(struct sidtab *s, struct sidtab_entry *entry,
604 char **out, u32 *out_len)
606 struct sidtab_str_cache *cache;
609 if (entry->context.len)
610 return -ENOENT; /* do not cache invalid contexts */
614 cache = rcu_dereference(entry->cache);
618 *out_len = cache->len;
620 *out = kmemdup(cache->str, cache->len, GFP_ATOMIC);
629 sidtab_sid2str_put(s, entry, *out, *out_len);
633 #endif /* CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0 */