1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright 2019 Google LLC
7 * DOC: blk-crypto profiles
9 * 'struct blk_crypto_profile' contains all generic inline encryption-related
10 * state for a particular inline encryption device. blk_crypto_profile serves
11 * as the way that drivers for inline encryption hardware expose their crypto
12 * capabilities and certain functions (e.g., functions to program and evict
13 * keys) to upper layers. Device drivers that want to support inline encryption
14 * construct a crypto profile, then associate it with the disk's request_queue.
16 * If the device has keyslots, then its blk_crypto_profile also handles managing
17 * these keyslots in a device-independent way, using the driver-provided
18 * functions to program and evict keys as needed. This includes keeping track
19 * of which key and how many I/O requests are using each keyslot, getting
20 * keyslots for I/O requests, and handling key eviction requests.
22 * For more information, see Documentation/block/inline-encryption.rst.
25 #define pr_fmt(fmt) "blk-crypto: " fmt
27 #include <linux/blk-crypto-profile.h>
28 #include <linux/device.h>
29 #include <linux/atomic.h>
30 #include <linux/mutex.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/wait.h>
33 #include <linux/blkdev.h>
34 #include <linux/blk-integrity.h>
36 struct blk_crypto_keyslot {
38 struct list_head idle_slot_node;
39 struct hlist_node hash_node;
40 const struct blk_crypto_key *key;
41 struct blk_crypto_profile *profile;
44 static inline void blk_crypto_hw_enter(struct blk_crypto_profile *profile)
47 * Calling into the driver requires profile->lock held and the device
48 * resumed. But we must resume the device first, since that can acquire
49 * and release profile->lock via blk_crypto_reprogram_all_keys().
52 pm_runtime_get_sync(profile->dev);
53 down_write(&profile->lock);
56 static inline void blk_crypto_hw_exit(struct blk_crypto_profile *profile)
58 up_write(&profile->lock);
60 pm_runtime_put_sync(profile->dev);
64 * blk_crypto_profile_init() - Initialize a blk_crypto_profile
65 * @profile: the blk_crypto_profile to initialize
66 * @num_slots: the number of keyslots
68 * Storage drivers must call this when starting to set up a blk_crypto_profile,
69 * before filling in additional fields.
71 * Return: 0 on success, or else a negative error code.
73 int blk_crypto_profile_init(struct blk_crypto_profile *profile,
74 unsigned int num_slots)
78 unsigned int slot_hashtable_size;
80 memset(profile, 0, sizeof(*profile));
81 init_rwsem(&profile->lock);
86 /* Initialize keyslot management data. */
88 profile->slots = kvcalloc(num_slots, sizeof(profile->slots[0]),
93 profile->num_slots = num_slots;
95 init_waitqueue_head(&profile->idle_slots_wait_queue);
96 INIT_LIST_HEAD(&profile->idle_slots);
98 for (slot = 0; slot < num_slots; slot++) {
99 profile->slots[slot].profile = profile;
100 list_add_tail(&profile->slots[slot].idle_slot_node,
101 &profile->idle_slots);
104 spin_lock_init(&profile->idle_slots_lock);
106 slot_hashtable_size = roundup_pow_of_two(num_slots);
108 * hash_ptr() assumes bits != 0, so ensure the hash table has at least 2
109 * buckets. This only makes a difference when there is only 1 keyslot.
111 if (slot_hashtable_size < 2)
112 slot_hashtable_size = 2;
114 profile->log_slot_ht_size = ilog2(slot_hashtable_size);
115 profile->slot_hashtable =
116 kvmalloc_array(slot_hashtable_size,
117 sizeof(profile->slot_hashtable[0]), GFP_KERNEL);
118 if (!profile->slot_hashtable)
120 for (i = 0; i < slot_hashtable_size; i++)
121 INIT_HLIST_HEAD(&profile->slot_hashtable[i]);
126 blk_crypto_profile_destroy(profile);
129 EXPORT_SYMBOL_GPL(blk_crypto_profile_init);
131 static void blk_crypto_profile_destroy_callback(void *profile)
133 blk_crypto_profile_destroy(profile);
137 * devm_blk_crypto_profile_init() - Resource-managed blk_crypto_profile_init()
138 * @dev: the device which owns the blk_crypto_profile
139 * @profile: the blk_crypto_profile to initialize
140 * @num_slots: the number of keyslots
142 * Like blk_crypto_profile_init(), but causes blk_crypto_profile_destroy() to be
143 * called automatically on driver detach.
145 * Return: 0 on success, or else a negative error code.
147 int devm_blk_crypto_profile_init(struct device *dev,
148 struct blk_crypto_profile *profile,
149 unsigned int num_slots)
151 int err = blk_crypto_profile_init(profile, num_slots);
156 return devm_add_action_or_reset(dev,
157 blk_crypto_profile_destroy_callback,
160 EXPORT_SYMBOL_GPL(devm_blk_crypto_profile_init);
162 static inline struct hlist_head *
163 blk_crypto_hash_bucket_for_key(struct blk_crypto_profile *profile,
164 const struct blk_crypto_key *key)
166 return &profile->slot_hashtable[
167 hash_ptr(key, profile->log_slot_ht_size)];
171 blk_crypto_remove_slot_from_lru_list(struct blk_crypto_keyslot *slot)
173 struct blk_crypto_profile *profile = slot->profile;
176 spin_lock_irqsave(&profile->idle_slots_lock, flags);
177 list_del(&slot->idle_slot_node);
178 spin_unlock_irqrestore(&profile->idle_slots_lock, flags);
181 static struct blk_crypto_keyslot *
182 blk_crypto_find_keyslot(struct blk_crypto_profile *profile,
183 const struct blk_crypto_key *key)
185 const struct hlist_head *head =
186 blk_crypto_hash_bucket_for_key(profile, key);
187 struct blk_crypto_keyslot *slotp;
189 hlist_for_each_entry(slotp, head, hash_node) {
190 if (slotp->key == key)
196 static struct blk_crypto_keyslot *
197 blk_crypto_find_and_grab_keyslot(struct blk_crypto_profile *profile,
198 const struct blk_crypto_key *key)
200 struct blk_crypto_keyslot *slot;
202 slot = blk_crypto_find_keyslot(profile, key);
205 if (atomic_inc_return(&slot->slot_refs) == 1) {
206 /* Took first reference to this slot; remove it from LRU list */
207 blk_crypto_remove_slot_from_lru_list(slot);
213 * blk_crypto_keyslot_index() - Get the index of a keyslot
214 * @slot: a keyslot that blk_crypto_get_keyslot() returned
216 * Return: the 0-based index of the keyslot within the device's keyslots.
218 unsigned int blk_crypto_keyslot_index(struct blk_crypto_keyslot *slot)
220 return slot - slot->profile->slots;
222 EXPORT_SYMBOL_GPL(blk_crypto_keyslot_index);
225 * blk_crypto_get_keyslot() - Get a keyslot for a key, if needed.
226 * @profile: the crypto profile of the device the key will be used on
227 * @key: the key that will be used
228 * @slot_ptr: If a keyslot is allocated, an opaque pointer to the keyslot struct
229 * will be stored here; otherwise NULL will be stored here.
231 * If the device has keyslots, this gets a keyslot that's been programmed with
232 * the specified key. If the key is already in a slot, this reuses it;
233 * otherwise this waits for a slot to become idle and programs the key into it.
235 * This must be paired with a call to blk_crypto_put_keyslot().
237 * Context: Process context. Takes and releases profile->lock.
238 * Return: BLK_STS_OK on success, meaning that either a keyslot was allocated or
239 * one wasn't needed; or a blk_status_t error on failure.
241 blk_status_t blk_crypto_get_keyslot(struct blk_crypto_profile *profile,
242 const struct blk_crypto_key *key,
243 struct blk_crypto_keyslot **slot_ptr)
245 struct blk_crypto_keyslot *slot;
252 * If the device has no concept of "keyslots", then there is no need to
255 if (profile->num_slots == 0)
258 down_read(&profile->lock);
259 slot = blk_crypto_find_and_grab_keyslot(profile, key);
260 up_read(&profile->lock);
265 blk_crypto_hw_enter(profile);
266 slot = blk_crypto_find_and_grab_keyslot(profile, key);
268 blk_crypto_hw_exit(profile);
273 * If we're here, that means there wasn't a slot that was
274 * already programmed with the key. So try to program it.
276 if (!list_empty(&profile->idle_slots))
279 blk_crypto_hw_exit(profile);
280 wait_event(profile->idle_slots_wait_queue,
281 !list_empty(&profile->idle_slots));
284 slot = list_first_entry(&profile->idle_slots, struct blk_crypto_keyslot,
286 slot_idx = blk_crypto_keyslot_index(slot);
288 err = profile->ll_ops.keyslot_program(profile, key, slot_idx);
290 wake_up(&profile->idle_slots_wait_queue);
291 blk_crypto_hw_exit(profile);
292 return errno_to_blk_status(err);
295 /* Move this slot to the hash list for the new key. */
297 hlist_del(&slot->hash_node);
299 hlist_add_head(&slot->hash_node,
300 blk_crypto_hash_bucket_for_key(profile, key));
302 atomic_set(&slot->slot_refs, 1);
304 blk_crypto_remove_slot_from_lru_list(slot);
306 blk_crypto_hw_exit(profile);
313 * blk_crypto_put_keyslot() - Release a reference to a keyslot
314 * @slot: The keyslot to release the reference of (may be NULL).
316 * Context: Any context.
318 void blk_crypto_put_keyslot(struct blk_crypto_keyslot *slot)
320 struct blk_crypto_profile *profile;
326 profile = slot->profile;
328 if (atomic_dec_and_lock_irqsave(&slot->slot_refs,
329 &profile->idle_slots_lock, flags)) {
330 list_add_tail(&slot->idle_slot_node, &profile->idle_slots);
331 spin_unlock_irqrestore(&profile->idle_slots_lock, flags);
332 wake_up(&profile->idle_slots_wait_queue);
337 * __blk_crypto_cfg_supported() - Check whether the given crypto profile
338 * supports the given crypto configuration.
339 * @profile: the crypto profile to check
340 * @cfg: the crypto configuration to check for
342 * Return: %true if @profile supports the given @cfg.
344 bool __blk_crypto_cfg_supported(struct blk_crypto_profile *profile,
345 const struct blk_crypto_config *cfg)
349 if (!(profile->modes_supported[cfg->crypto_mode] & cfg->data_unit_size))
351 if (profile->max_dun_bytes_supported < cfg->dun_bytes)
357 * __blk_crypto_evict_key() - Evict a key from a device.
358 * @profile: the crypto profile of the device
359 * @key: the key to evict. It must not still be used in any I/O.
361 * If the device has keyslots, this finds the keyslot (if any) that contains the
362 * specified key and calls the driver's keyslot_evict function to evict it.
364 * Otherwise, this just calls the driver's keyslot_evict function if it is
365 * implemented, passing just the key (without any particular keyslot). This
366 * allows layered devices to evict the key from their underlying devices.
368 * Context: Process context. Takes and releases profile->lock.
369 * Return: 0 on success or if there's no keyslot with the specified key, -EBUSY
370 * if the keyslot is still in use, or another -errno value on other
373 int __blk_crypto_evict_key(struct blk_crypto_profile *profile,
374 const struct blk_crypto_key *key)
376 struct blk_crypto_keyslot *slot;
379 if (profile->num_slots == 0) {
380 if (profile->ll_ops.keyslot_evict) {
381 blk_crypto_hw_enter(profile);
382 err = profile->ll_ops.keyslot_evict(profile, key, -1);
383 blk_crypto_hw_exit(profile);
389 blk_crypto_hw_enter(profile);
390 slot = blk_crypto_find_keyslot(profile, key);
394 if (WARN_ON_ONCE(atomic_read(&slot->slot_refs) != 0)) {
398 err = profile->ll_ops.keyslot_evict(profile, key,
399 blk_crypto_keyslot_index(slot));
403 hlist_del(&slot->hash_node);
407 blk_crypto_hw_exit(profile);
412 * blk_crypto_reprogram_all_keys() - Re-program all keyslots.
413 * @profile: The crypto profile
415 * Re-program all keyslots that are supposed to have a key programmed. This is
416 * intended only for use by drivers for hardware that loses its keys on reset.
418 * Context: Process context. Takes and releases profile->lock.
420 void blk_crypto_reprogram_all_keys(struct blk_crypto_profile *profile)
424 if (profile->num_slots == 0)
427 /* This is for device initialization, so don't resume the device */
428 down_write(&profile->lock);
429 for (slot = 0; slot < profile->num_slots; slot++) {
430 const struct blk_crypto_key *key = profile->slots[slot].key;
436 err = profile->ll_ops.keyslot_program(profile, key, slot);
439 up_write(&profile->lock);
441 EXPORT_SYMBOL_GPL(blk_crypto_reprogram_all_keys);
443 void blk_crypto_profile_destroy(struct blk_crypto_profile *profile)
447 kvfree(profile->slot_hashtable);
448 kvfree_sensitive(profile->slots,
449 sizeof(profile->slots[0]) * profile->num_slots);
450 memzero_explicit(profile, sizeof(*profile));
452 EXPORT_SYMBOL_GPL(blk_crypto_profile_destroy);
454 bool blk_crypto_register(struct blk_crypto_profile *profile,
455 struct request_queue *q)
457 if (blk_integrity_queue_supports_integrity(q)) {
458 pr_warn("Integrity and hardware inline encryption are not supported together. Disabling hardware inline encryption.\n");
461 q->crypto_profile = profile;
464 EXPORT_SYMBOL_GPL(blk_crypto_register);
467 * blk_crypto_intersect_capabilities() - restrict supported crypto capabilities
469 * @parent: the crypto profile for the parent device
470 * @child: the crypto profile for the child device, or NULL
472 * This clears all crypto capabilities in @parent that aren't set in @child. If
473 * @child is NULL, then this clears all parent capabilities.
475 * Only use this when setting up the crypto profile for a layered device, before
476 * it's been exposed yet.
478 void blk_crypto_intersect_capabilities(struct blk_crypto_profile *parent,
479 const struct blk_crypto_profile *child)
484 parent->max_dun_bytes_supported =
485 min(parent->max_dun_bytes_supported,
486 child->max_dun_bytes_supported);
487 for (i = 0; i < ARRAY_SIZE(child->modes_supported); i++)
488 parent->modes_supported[i] &= child->modes_supported[i];
490 parent->max_dun_bytes_supported = 0;
491 memset(parent->modes_supported, 0,
492 sizeof(parent->modes_supported));
495 EXPORT_SYMBOL_GPL(blk_crypto_intersect_capabilities);
498 * blk_crypto_has_capabilities() - Check whether @target supports at least all
499 * the crypto capabilities that @reference does.
500 * @target: the target profile
501 * @reference: the reference profile
503 * Return: %true if @target supports all the crypto capabilities of @reference.
505 bool blk_crypto_has_capabilities(const struct blk_crypto_profile *target,
506 const struct blk_crypto_profile *reference)
516 for (i = 0; i < ARRAY_SIZE(target->modes_supported); i++) {
517 if (reference->modes_supported[i] & ~target->modes_supported[i])
521 if (reference->max_dun_bytes_supported >
522 target->max_dun_bytes_supported)
527 EXPORT_SYMBOL_GPL(blk_crypto_has_capabilities);
530 * blk_crypto_update_capabilities() - Update the capabilities of a crypto
531 * profile to match those of another crypto
533 * @dst: The crypto profile whose capabilities to update.
534 * @src: The crypto profile whose capabilities this function will update @dst's
537 * Blk-crypto requires that crypto capabilities that were
538 * advertised when a bio was created continue to be supported by the
539 * device until that bio is ended. This is turn means that a device cannot
540 * shrink its advertised crypto capabilities without any explicit
541 * synchronization with upper layers. So if there's no such explicit
542 * synchronization, @src must support all the crypto capabilities that
543 * @dst does (i.e. we need blk_crypto_has_capabilities(@src, @dst)).
545 * Note also that as long as the crypto capabilities are being expanded, the
546 * order of updates becoming visible is not important because it's alright
547 * for blk-crypto to see stale values - they only cause blk-crypto to
548 * believe that a crypto capability isn't supported when it actually is (which
549 * might result in blk-crypto-fallback being used if available, or the bio being
552 void blk_crypto_update_capabilities(struct blk_crypto_profile *dst,
553 const struct blk_crypto_profile *src)
555 memcpy(dst->modes_supported, src->modes_supported,
556 sizeof(dst->modes_supported));
558 dst->max_dun_bytes_supported = src->max_dun_bytes_supported;
560 EXPORT_SYMBOL_GPL(blk_crypto_update_capabilities);