1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright 2019 Google LLC
7 * Refer to Documentation/block/inline-encryption.rst for detailed explanation.
10 #define pr_fmt(fmt) "blk-crypto: " fmt
12 #include <linux/bio.h>
13 #include <linux/blkdev.h>
14 #include <linux/blk-crypto-profile.h>
15 #include <linux/module.h>
16 #include <linux/ratelimit.h>
17 #include <linux/slab.h>
19 #include "blk-crypto-internal.h"
21 const struct blk_crypto_mode blk_crypto_modes[] = {
22 [BLK_ENCRYPTION_MODE_AES_256_XTS] = {
23 .name = "AES-256-XTS",
24 .cipher_str = "xts(aes)",
28 [BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV] = {
29 .name = "AES-128-CBC-ESSIV",
30 .cipher_str = "essiv(cbc(aes),sha256)",
34 [BLK_ENCRYPTION_MODE_ADIANTUM] = {
36 .cipher_str = "adiantum(xchacha12,aes)",
43 * This number needs to be at least (the number of threads doing IO
44 * concurrently) * (maximum recursive depth of a bio), so that we don't
45 * deadlock on crypt_ctx allocations. The default is chosen to be the same
46 * as the default number of post read contexts in both EXT4 and F2FS.
48 static int num_prealloc_crypt_ctxs = 128;
50 module_param(num_prealloc_crypt_ctxs, int, 0444);
51 MODULE_PARM_DESC(num_prealloc_crypt_ctxs,
52 "Number of bio crypto contexts to preallocate");
54 static struct kmem_cache *bio_crypt_ctx_cache;
55 static mempool_t *bio_crypt_ctx_pool;
57 static int __init bio_crypt_ctx_init(void)
61 bio_crypt_ctx_cache = KMEM_CACHE(bio_crypt_ctx, 0);
62 if (!bio_crypt_ctx_cache)
65 bio_crypt_ctx_pool = mempool_create_slab_pool(num_prealloc_crypt_ctxs,
67 if (!bio_crypt_ctx_pool)
70 /* This is assumed in various places. */
71 BUILD_BUG_ON(BLK_ENCRYPTION_MODE_INVALID != 0);
73 /* Sanity check that no algorithm exceeds the defined limits. */
74 for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++) {
75 BUG_ON(blk_crypto_modes[i].keysize > BLK_CRYPTO_MAX_KEY_SIZE);
76 BUG_ON(blk_crypto_modes[i].ivsize > BLK_CRYPTO_MAX_IV_SIZE);
81 panic("Failed to allocate mem for bio crypt ctxs\n");
83 subsys_initcall(bio_crypt_ctx_init);
85 void bio_crypt_set_ctx(struct bio *bio, const struct blk_crypto_key *key,
86 const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], gfp_t gfp_mask)
88 struct bio_crypt_ctx *bc;
91 * The caller must use a gfp_mask that contains __GFP_DIRECT_RECLAIM so
92 * that the mempool_alloc() can't fail.
94 WARN_ON_ONCE(!(gfp_mask & __GFP_DIRECT_RECLAIM));
96 bc = mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
99 memcpy(bc->bc_dun, dun, sizeof(bc->bc_dun));
101 bio->bi_crypt_context = bc;
104 void __bio_crypt_free_ctx(struct bio *bio)
106 mempool_free(bio->bi_crypt_context, bio_crypt_ctx_pool);
107 bio->bi_crypt_context = NULL;
110 int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask)
112 dst->bi_crypt_context = mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
113 if (!dst->bi_crypt_context)
115 *dst->bi_crypt_context = *src->bi_crypt_context;
119 /* Increments @dun by @inc, treating @dun as a multi-limb integer. */
120 void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
125 for (i = 0; inc && i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) {
128 * If the addition in this limb overflowed, then we need to
129 * carry 1 into the next limb. Else the carry is 0.
138 void __bio_crypt_advance(struct bio *bio, unsigned int bytes)
140 struct bio_crypt_ctx *bc = bio->bi_crypt_context;
142 bio_crypt_dun_increment(bc->bc_dun,
143 bytes >> bc->bc_key->data_unit_size_bits);
147 * Returns true if @bc->bc_dun plus @bytes converted to data units is equal to
148 * @next_dun, treating the DUNs as multi-limb integers.
150 bool bio_crypt_dun_is_contiguous(const struct bio_crypt_ctx *bc,
152 const u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE])
155 unsigned int carry = bytes >> bc->bc_key->data_unit_size_bits;
157 for (i = 0; i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) {
158 if (bc->bc_dun[i] + carry != next_dun[i])
161 * If the addition in this limb overflowed, then we need to
162 * carry 1 into the next limb. Else the carry is 0.
164 if ((bc->bc_dun[i] + carry) < carry)
170 /* If the DUN wrapped through 0, don't treat it as contiguous. */
175 * Checks that two bio crypt contexts are compatible - i.e. that
176 * they are mergeable except for data_unit_num continuity.
178 static bool bio_crypt_ctx_compatible(struct bio_crypt_ctx *bc1,
179 struct bio_crypt_ctx *bc2)
184 return bc2 && bc1->bc_key == bc2->bc_key;
187 bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio)
189 return bio_crypt_ctx_compatible(rq->crypt_ctx, bio->bi_crypt_context);
193 * Checks that two bio crypt contexts are compatible, and also
194 * that their data_unit_nums are continuous (and can hence be merged)
195 * in the order @bc1 followed by @bc2.
197 bool bio_crypt_ctx_mergeable(struct bio_crypt_ctx *bc1, unsigned int bc1_bytes,
198 struct bio_crypt_ctx *bc2)
200 if (!bio_crypt_ctx_compatible(bc1, bc2))
203 return !bc1 || bio_crypt_dun_is_contiguous(bc1, bc1_bytes, bc2->bc_dun);
206 /* Check that all I/O segments are data unit aligned. */
207 static bool bio_crypt_check_alignment(struct bio *bio)
209 const unsigned int data_unit_size =
210 bio->bi_crypt_context->bc_key->crypto_cfg.data_unit_size;
211 struct bvec_iter iter;
214 bio_for_each_segment(bv, bio, iter) {
215 if (!IS_ALIGNED(bv.bv_len | bv.bv_offset, data_unit_size))
222 blk_status_t __blk_crypto_rq_get_keyslot(struct request *rq)
224 return blk_crypto_get_keyslot(rq->q->crypto_profile,
225 rq->crypt_ctx->bc_key,
229 void __blk_crypto_rq_put_keyslot(struct request *rq)
231 blk_crypto_put_keyslot(rq->crypt_keyslot);
232 rq->crypt_keyslot = NULL;
235 void __blk_crypto_free_request(struct request *rq)
237 /* The keyslot, if one was needed, should have been released earlier. */
238 if (WARN_ON_ONCE(rq->crypt_keyslot))
239 __blk_crypto_rq_put_keyslot(rq);
241 mempool_free(rq->crypt_ctx, bio_crypt_ctx_pool);
242 rq->crypt_ctx = NULL;
246 * __blk_crypto_bio_prep - Prepare bio for inline encryption
248 * @bio_ptr: pointer to original bio pointer
250 * If the bio crypt context provided for the bio is supported by the underlying
251 * device's inline encryption hardware, do nothing.
253 * Otherwise, try to perform en/decryption for this bio by falling back to the
254 * kernel crypto API. When the crypto API fallback is used for encryption,
255 * blk-crypto may choose to split the bio into 2 - the first one that will
256 * continue to be processed and the second one that will be resubmitted via
257 * submit_bio_noacct. A bounce bio will be allocated to encrypt the contents
258 * of the aforementioned "first one", and *bio_ptr will be updated to this
261 * Caller must ensure bio has bio_crypt_ctx.
263 * Return: true on success; false on error (and bio->bi_status will be set
264 * appropriately, and bio_endio() will have been called so bio
265 * submission should abort).
267 bool __blk_crypto_bio_prep(struct bio **bio_ptr)
269 struct bio *bio = *bio_ptr;
270 const struct blk_crypto_key *bc_key = bio->bi_crypt_context->bc_key;
272 /* Error if bio has no data. */
273 if (WARN_ON_ONCE(!bio_has_data(bio))) {
274 bio->bi_status = BLK_STS_IOERR;
278 if (!bio_crypt_check_alignment(bio)) {
279 bio->bi_status = BLK_STS_IOERR;
284 * Success if device supports the encryption context, or if we succeeded
285 * in falling back to the crypto API.
287 if (blk_crypto_config_supported_natively(bio->bi_bdev,
288 &bc_key->crypto_cfg))
290 if (blk_crypto_fallback_bio_prep(bio_ptr))
297 int __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
300 if (!rq->crypt_ctx) {
301 rq->crypt_ctx = mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
305 *rq->crypt_ctx = *bio->bi_crypt_context;
310 * blk_crypto_init_key() - Prepare a key for use with blk-crypto
311 * @blk_key: Pointer to the blk_crypto_key to initialize.
312 * @raw_key: Pointer to the raw key. Must be the correct length for the chosen
313 * @crypto_mode; see blk_crypto_modes[].
314 * @crypto_mode: identifier for the encryption algorithm to use
315 * @dun_bytes: number of bytes that will be used to specify the DUN when this
317 * @data_unit_size: the data unit size to use for en/decryption
319 * Return: 0 on success, -errno on failure. The caller is responsible for
320 * zeroizing both blk_key and raw_key when done with them.
322 int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key,
323 enum blk_crypto_mode_num crypto_mode,
324 unsigned int dun_bytes,
325 unsigned int data_unit_size)
327 const struct blk_crypto_mode *mode;
329 memset(blk_key, 0, sizeof(*blk_key));
331 if (crypto_mode >= ARRAY_SIZE(blk_crypto_modes))
334 mode = &blk_crypto_modes[crypto_mode];
335 if (mode->keysize == 0)
338 if (dun_bytes == 0 || dun_bytes > mode->ivsize)
341 if (!is_power_of_2(data_unit_size))
344 blk_key->crypto_cfg.crypto_mode = crypto_mode;
345 blk_key->crypto_cfg.dun_bytes = dun_bytes;
346 blk_key->crypto_cfg.data_unit_size = data_unit_size;
347 blk_key->data_unit_size_bits = ilog2(data_unit_size);
348 blk_key->size = mode->keysize;
349 memcpy(blk_key->raw, raw_key, mode->keysize);
354 bool blk_crypto_config_supported_natively(struct block_device *bdev,
355 const struct blk_crypto_config *cfg)
357 return __blk_crypto_cfg_supported(bdev_get_queue(bdev)->crypto_profile,
362 * Check if bios with @cfg can be en/decrypted by blk-crypto (i.e. either the
363 * block_device it's submitted to supports inline crypto, or the
364 * blk-crypto-fallback is enabled and supports the cfg).
366 bool blk_crypto_config_supported(struct block_device *bdev,
367 const struct blk_crypto_config *cfg)
369 return IS_ENABLED(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) ||
370 blk_crypto_config_supported_natively(bdev, cfg);
374 * blk_crypto_start_using_key() - Start using a blk_crypto_key on a device
375 * @bdev: block device to operate on
376 * @key: A key to use on the device
378 * Upper layers must call this function to ensure that either the hardware
379 * supports the key's crypto settings, or the crypto API fallback has transforms
380 * for the needed mode allocated and ready to go. This function may allocate
381 * an skcipher, and *should not* be called from the data path, since that might
384 * Return: 0 on success; -ENOPKG if the hardware doesn't support the key and
385 * blk-crypto-fallback is either disabled or the needed algorithm
386 * is disabled in the crypto API; or another -errno code.
388 int blk_crypto_start_using_key(struct block_device *bdev,
389 const struct blk_crypto_key *key)
391 if (blk_crypto_config_supported_natively(bdev, &key->crypto_cfg))
393 return blk_crypto_fallback_start_using_mode(key->crypto_cfg.crypto_mode);
397 * blk_crypto_evict_key() - Evict a blk_crypto_key from a block_device
398 * @bdev: a block_device on which I/O using the key may have been done
399 * @key: the key to evict
401 * For a given block_device, this function removes the given blk_crypto_key from
402 * the keyslot management structures and evicts it from any underlying hardware
403 * keyslot(s) or blk-crypto-fallback keyslot it may have been programmed into.
405 * Upper layers must call this before freeing the blk_crypto_key. It must be
406 * called for every block_device the key may have been used on. The key must no
407 * longer be in use by any I/O when this function is called.
409 * Context: May sleep.
411 void blk_crypto_evict_key(struct block_device *bdev,
412 const struct blk_crypto_key *key)
414 struct request_queue *q = bdev_get_queue(bdev);
417 if (blk_crypto_config_supported_natively(bdev, &key->crypto_cfg))
418 err = __blk_crypto_evict_key(q->crypto_profile, key);
420 err = blk_crypto_fallback_evict_key(key);
422 * An error can only occur here if the key failed to be evicted from a
423 * keyslot (due to a hardware or driver issue) or is allegedly still in
424 * use by I/O (due to a kernel bug). Even in these cases, the key is
425 * still unlinked from the keyslot management structures, and the caller
426 * is allowed and expected to free it right away. There's nothing
427 * callers can do to handle errors, so just log them and return void.
430 pr_warn_ratelimited("%pg: error %d evicting key\n", bdev, err);
432 EXPORT_SYMBOL_GPL(blk_crypto_evict_key);