(klist)->keys[index], \
rwsem_is_locked((struct rw_semaphore *)&(keyring)->sem)))
+#define MAX_KEYRING_LINKS \
+ min_t(size_t, USHRT_MAX - 1, \
+ ((PAGE_SIZE - sizeof(struct keyring_list)) / sizeof(struct key *)))
+
#define KEY_LINK_FIXQUOTA 1UL
/*
bool no_state_check)
{
struct {
+ /* Need a separate keylist pointer for RCU purposes */
+ struct key *keyring;
struct keyring_list *keylist;
int kix;
} stack[KEYRING_SEARCH_MAX_DEPTH];
continue;
/* stack the current position */
+ stack[sp].keyring = keyring;
stack[sp].keylist = keylist;
stack[sp].kix = kix;
sp++;
if (sp > 0) {
/* resume the processing of a keyring higher up in the tree */
sp--;
+ keyring = stack[sp].keyring;
keylist = stack[sp].keylist;
kix = stack[sp].kix + 1;
goto ascend;
/* we found a viable match */
found:
atomic_inc(&key->usage);
+ key->last_used_at = now.tv_sec;
+ keyring->last_used_at = now.tv_sec;
+ while (sp > 0)
+ stack[--sp].keyring->last_used_at = now.tv_sec;
key_check(key);
key_ref = make_key_ref(key, possessed);
error_2:
found:
atomic_inc(&key->usage);
+ keyring->last_used_at = key->last_used_at =
+ current_kernel_time().tv_sec;
rcu_read_unlock();
return make_key_ref(key, possessed);
}
* (ie. it has a zero usage count) */
if (!atomic_inc_not_zero(&keyring->usage))
continue;
+ keyring->last_used_at = current_kernel_time().tv_sec;
goto out;
}
}
struct keyring_list *klist, *nklist;
unsigned long prealloc;
unsigned max;
+ time_t lowest_lru;
size_t size;
- int loop, ret;
+ int loop, lru, ret;
kenter("%d,%s,%s,", key_serial(keyring), type->name, description);
klist = rcu_dereference_locked_keyring(keyring);
/* see if there's a matching key we can displace */
+ lru = -1;
if (klist && klist->nkeys > 0) {
+ lowest_lru = TIME_T_MAX;
for (loop = klist->nkeys - 1; loop >= 0; loop--) {
struct key *key = rcu_deref_link_locked(klist, loop,
keyring);
prealloc = 0;
goto done;
}
+ if (key->last_used_at < lowest_lru) {
+ lowest_lru = key->last_used_at;
+ lru = loop;
+ }
}
}
+ /* If the keyring is full then do an LRU discard */
+ if (klist &&
+ klist->nkeys == klist->maxkeys &&
+ klist->maxkeys >= MAX_KEYRING_LINKS) {
+ kdebug("LRU discard %d\n", lru);
+ klist->delkey = lru;
+ prealloc = 0;
+ goto done;
+ }
+
/* check that we aren't going to overrun the user's quota */
ret = key_payload_reserve(keyring,
keyring->datalen + KEYQUOTA_LINK_BYTES);
} else {
/* grow the key list */
max = 4;
- if (klist)
+ if (klist) {
max += klist->maxkeys;
+ if (max > MAX_KEYRING_LINKS)
+ max = MAX_KEYRING_LINKS;
+ BUG_ON(max <= klist->maxkeys);
+ }
- ret = -ENFILE;
- if (max > USHRT_MAX - 1)
- goto error_quota;
size = sizeof(*klist) + sizeof(struct key *) * max;
- if (size > PAGE_SIZE)
- goto error_quota;
ret = -ENOMEM;
nklist = kmalloc(size, GFP_KERNEL);
klist = rcu_dereference_locked_keyring(keyring);
atomic_inc(&key->usage);
+ keyring->last_used_at = key->last_used_at =
+ current_kernel_time().tv_sec;
/* there's a matching key we can displace or an empty slot in a newly
* allocated list we can fill */