1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright 2019 Google LLC
7 * Refer to Documentation/block/inline-encryption.rst for detailed explanation.
10 #define pr_fmt(fmt) "blk-crypto-fallback: " fmt
12 #include <crypto/skcipher.h>
13 #include <linux/blk-crypto.h>
14 #include <linux/blk-crypto-profile.h>
15 #include <linux/blkdev.h>
16 #include <linux/crypto.h>
17 #include <linux/mempool.h>
18 #include <linux/module.h>
19 #include <linux/random.h>
20 #include <linux/scatterlist.h>
22 #include "blk-cgroup.h"
23 #include "blk-crypto-internal.h"
25 static unsigned int num_prealloc_bounce_pg = 32;
26 module_param(num_prealloc_bounce_pg, uint, 0);
27 MODULE_PARM_DESC(num_prealloc_bounce_pg,
28 "Number of preallocated bounce pages for the blk-crypto crypto API fallback");
30 static unsigned int blk_crypto_num_keyslots = 100;
31 module_param_named(num_keyslots, blk_crypto_num_keyslots, uint, 0);
32 MODULE_PARM_DESC(num_keyslots,
33 "Number of keyslots for the blk-crypto crypto API fallback");
35 static unsigned int num_prealloc_fallback_crypt_ctxs = 128;
36 module_param(num_prealloc_fallback_crypt_ctxs, uint, 0);
37 MODULE_PARM_DESC(num_prealloc_crypt_fallback_ctxs,
38 "Number of preallocated bio fallback crypto contexts for blk-crypto to use during crypto API fallback");
40 struct bio_fallback_crypt_ctx {
41 struct bio_crypt_ctx crypt_ctx;
43 * Copy of the bvec_iter when this bio was submitted.
44 * We only want to en/decrypt the part of the bio as described by the
45 * bvec_iter upon submission because bio might be split before being
48 struct bvec_iter crypt_iter;
51 struct work_struct work;
55 void *bi_private_orig;
56 bio_end_io_t *bi_end_io_orig;
61 static struct kmem_cache *bio_fallback_crypt_ctx_cache;
62 static mempool_t *bio_fallback_crypt_ctx_pool;
65 * Allocating a crypto tfm during I/O can deadlock, so we have to preallocate
66 * all of a mode's tfms when that mode starts being used. Since each mode may
67 * need all the keyslots at some point, each mode needs its own tfm for each
68 * keyslot; thus, a keyslot may contain tfms for multiple modes. However, to
69 * match the behavior of real inline encryption hardware (which only supports a
70 * single encryption context per keyslot), we only allow one tfm per keyslot to
71 * be used at a time - the rest of the unused tfms have their keys cleared.
73 static DEFINE_MUTEX(tfms_init_lock);
74 static bool tfms_inited[BLK_ENCRYPTION_MODE_MAX];
76 static struct blk_crypto_fallback_keyslot {
77 enum blk_crypto_mode_num crypto_mode;
78 struct crypto_skcipher *tfms[BLK_ENCRYPTION_MODE_MAX];
79 } *blk_crypto_keyslots;
81 static struct blk_crypto_profile blk_crypto_fallback_profile;
82 static struct workqueue_struct *blk_crypto_wq;
83 static mempool_t *blk_crypto_bounce_page_pool;
84 static struct bio_set crypto_bio_split;
87 * This is the key we set when evicting a keyslot. This *should* be the all 0's
88 * key, but AES-XTS rejects that key, so we use some random bytes instead.
90 static u8 blank_key[BLK_CRYPTO_MAX_KEY_SIZE];
92 static void blk_crypto_fallback_evict_keyslot(unsigned int slot)
94 struct blk_crypto_fallback_keyslot *slotp = &blk_crypto_keyslots[slot];
95 enum blk_crypto_mode_num crypto_mode = slotp->crypto_mode;
98 WARN_ON(slotp->crypto_mode == BLK_ENCRYPTION_MODE_INVALID);
100 /* Clear the key in the skcipher */
101 err = crypto_skcipher_setkey(slotp->tfms[crypto_mode], blank_key,
102 blk_crypto_modes[crypto_mode].keysize);
104 slotp->crypto_mode = BLK_ENCRYPTION_MODE_INVALID;
108 blk_crypto_fallback_keyslot_program(struct blk_crypto_profile *profile,
109 const struct blk_crypto_key *key,
112 struct blk_crypto_fallback_keyslot *slotp = &blk_crypto_keyslots[slot];
113 const enum blk_crypto_mode_num crypto_mode =
114 key->crypto_cfg.crypto_mode;
117 if (crypto_mode != slotp->crypto_mode &&
118 slotp->crypto_mode != BLK_ENCRYPTION_MODE_INVALID)
119 blk_crypto_fallback_evict_keyslot(slot);
121 slotp->crypto_mode = crypto_mode;
122 err = crypto_skcipher_setkey(slotp->tfms[crypto_mode], key->raw,
125 blk_crypto_fallback_evict_keyslot(slot);
131 static int blk_crypto_fallback_keyslot_evict(struct blk_crypto_profile *profile,
132 const struct blk_crypto_key *key,
135 blk_crypto_fallback_evict_keyslot(slot);
139 static const struct blk_crypto_ll_ops blk_crypto_fallback_ll_ops = {
140 .keyslot_program = blk_crypto_fallback_keyslot_program,
141 .keyslot_evict = blk_crypto_fallback_keyslot_evict,
144 static void blk_crypto_fallback_encrypt_endio(struct bio *enc_bio)
146 struct bio *src_bio = enc_bio->bi_private;
149 for (i = 0; i < enc_bio->bi_vcnt; i++)
150 mempool_free(enc_bio->bi_io_vec[i].bv_page,
151 blk_crypto_bounce_page_pool);
153 src_bio->bi_status = enc_bio->bi_status;
159 static struct bio *blk_crypto_fallback_clone_bio(struct bio *bio_src)
161 struct bvec_iter iter;
165 bio = bio_kmalloc(GFP_NOIO, bio_segments(bio_src));
168 bio->bi_bdev = bio_src->bi_bdev;
169 if (bio_flagged(bio_src, BIO_REMAPPED))
170 bio_set_flag(bio, BIO_REMAPPED);
171 bio->bi_opf = bio_src->bi_opf;
172 bio->bi_ioprio = bio_src->bi_ioprio;
173 bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
174 bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
176 bio_for_each_segment(bv, bio_src, iter)
177 bio->bi_io_vec[bio->bi_vcnt++] = bv;
179 bio_clone_blkg_association(bio, bio_src);
180 blkcg_bio_issue_init(bio);
186 blk_crypto_fallback_alloc_cipher_req(struct blk_crypto_keyslot *slot,
187 struct skcipher_request **ciph_req_ret,
188 struct crypto_wait *wait)
190 struct skcipher_request *ciph_req;
191 const struct blk_crypto_fallback_keyslot *slotp;
192 int keyslot_idx = blk_crypto_keyslot_index(slot);
194 slotp = &blk_crypto_keyslots[keyslot_idx];
195 ciph_req = skcipher_request_alloc(slotp->tfms[slotp->crypto_mode],
200 skcipher_request_set_callback(ciph_req,
201 CRYPTO_TFM_REQ_MAY_BACKLOG |
202 CRYPTO_TFM_REQ_MAY_SLEEP,
203 crypto_req_done, wait);
204 *ciph_req_ret = ciph_req;
209 static bool blk_crypto_fallback_split_bio_if_needed(struct bio **bio_ptr)
211 struct bio *bio = *bio_ptr;
213 unsigned int num_sectors = 0;
215 struct bvec_iter iter;
217 bio_for_each_segment(bv, bio, iter) {
218 num_sectors += bv.bv_len >> SECTOR_SHIFT;
219 if (++i == BIO_MAX_VECS)
222 if (num_sectors < bio_sectors(bio)) {
223 struct bio *split_bio;
225 split_bio = bio_split(bio, num_sectors, GFP_NOIO,
228 bio->bi_status = BLK_STS_RESOURCE;
231 bio_chain(split_bio, bio);
232 submit_bio_noacct(bio);
233 *bio_ptr = split_bio;
239 union blk_crypto_iv {
240 __le64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
241 u8 bytes[BLK_CRYPTO_MAX_IV_SIZE];
244 static void blk_crypto_dun_to_iv(const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
245 union blk_crypto_iv *iv)
249 for (i = 0; i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++)
250 iv->dun[i] = cpu_to_le64(dun[i]);
254 * The crypto API fallback's encryption routine.
255 * Allocate a bounce bio for encryption, encrypt the input bio using crypto API,
256 * and replace *bio_ptr with the bounce bio. May split input bio if it's too
257 * large. Returns true on success. Returns false and sets bio->bi_status on
260 static bool blk_crypto_fallback_encrypt_bio(struct bio **bio_ptr)
262 struct bio *src_bio, *enc_bio;
263 struct bio_crypt_ctx *bc;
264 struct blk_crypto_keyslot *slot;
266 struct skcipher_request *ciph_req = NULL;
267 DECLARE_CRYPTO_WAIT(wait);
268 u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
269 struct scatterlist src, dst;
270 union blk_crypto_iv iv;
275 /* Split the bio if it's too big for single page bvec */
276 if (!blk_crypto_fallback_split_bio_if_needed(bio_ptr))
280 bc = src_bio->bi_crypt_context;
281 data_unit_size = bc->bc_key->crypto_cfg.data_unit_size;
283 /* Allocate bounce bio for encryption */
284 enc_bio = blk_crypto_fallback_clone_bio(src_bio);
286 src_bio->bi_status = BLK_STS_RESOURCE;
291 * Get a blk-crypto-fallback keyslot that contains a crypto_skcipher for
292 * this bio's algorithm and key.
294 blk_st = blk_crypto_get_keyslot(&blk_crypto_fallback_profile,
296 if (blk_st != BLK_STS_OK) {
297 src_bio->bi_status = blk_st;
298 goto out_put_enc_bio;
301 /* and then allocate an skcipher_request for it */
302 if (!blk_crypto_fallback_alloc_cipher_req(slot, &ciph_req, &wait)) {
303 src_bio->bi_status = BLK_STS_RESOURCE;
304 goto out_release_keyslot;
307 memcpy(curr_dun, bc->bc_dun, sizeof(curr_dun));
308 sg_init_table(&src, 1);
309 sg_init_table(&dst, 1);
311 skcipher_request_set_crypt(ciph_req, &src, &dst, data_unit_size,
314 /* Encrypt each page in the bounce bio */
315 for (i = 0; i < enc_bio->bi_vcnt; i++) {
316 struct bio_vec *enc_bvec = &enc_bio->bi_io_vec[i];
317 struct page *plaintext_page = enc_bvec->bv_page;
318 struct page *ciphertext_page =
319 mempool_alloc(blk_crypto_bounce_page_pool, GFP_NOIO);
321 enc_bvec->bv_page = ciphertext_page;
323 if (!ciphertext_page) {
324 src_bio->bi_status = BLK_STS_RESOURCE;
325 goto out_free_bounce_pages;
328 sg_set_page(&src, plaintext_page, data_unit_size,
329 enc_bvec->bv_offset);
330 sg_set_page(&dst, ciphertext_page, data_unit_size,
331 enc_bvec->bv_offset);
333 /* Encrypt each data unit in this page */
334 for (j = 0; j < enc_bvec->bv_len; j += data_unit_size) {
335 blk_crypto_dun_to_iv(curr_dun, &iv);
336 if (crypto_wait_req(crypto_skcipher_encrypt(ciph_req),
339 src_bio->bi_status = BLK_STS_IOERR;
340 goto out_free_bounce_pages;
342 bio_crypt_dun_increment(curr_dun, 1);
343 src.offset += data_unit_size;
344 dst.offset += data_unit_size;
348 enc_bio->bi_private = src_bio;
349 enc_bio->bi_end_io = blk_crypto_fallback_encrypt_endio;
354 goto out_free_ciph_req;
356 out_free_bounce_pages:
358 mempool_free(enc_bio->bi_io_vec[--i].bv_page,
359 blk_crypto_bounce_page_pool);
361 skcipher_request_free(ciph_req);
363 blk_crypto_put_keyslot(slot);
372 * The crypto API fallback's main decryption routine.
373 * Decrypts input bio in place, and calls bio_endio on the bio.
375 static void blk_crypto_fallback_decrypt_bio(struct work_struct *work)
377 struct bio_fallback_crypt_ctx *f_ctx =
378 container_of(work, struct bio_fallback_crypt_ctx, work);
379 struct bio *bio = f_ctx->bio;
380 struct bio_crypt_ctx *bc = &f_ctx->crypt_ctx;
381 struct blk_crypto_keyslot *slot;
382 struct skcipher_request *ciph_req = NULL;
383 DECLARE_CRYPTO_WAIT(wait);
384 u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
385 union blk_crypto_iv iv;
386 struct scatterlist sg;
388 struct bvec_iter iter;
389 const int data_unit_size = bc->bc_key->crypto_cfg.data_unit_size;
394 * Get a blk-crypto-fallback keyslot that contains a crypto_skcipher for
395 * this bio's algorithm and key.
397 blk_st = blk_crypto_get_keyslot(&blk_crypto_fallback_profile,
399 if (blk_st != BLK_STS_OK) {
400 bio->bi_status = blk_st;
404 /* and then allocate an skcipher_request for it */
405 if (!blk_crypto_fallback_alloc_cipher_req(slot, &ciph_req, &wait)) {
406 bio->bi_status = BLK_STS_RESOURCE;
410 memcpy(curr_dun, bc->bc_dun, sizeof(curr_dun));
411 sg_init_table(&sg, 1);
412 skcipher_request_set_crypt(ciph_req, &sg, &sg, data_unit_size,
415 /* Decrypt each segment in the bio */
416 __bio_for_each_segment(bv, bio, iter, f_ctx->crypt_iter) {
417 struct page *page = bv.bv_page;
419 sg_set_page(&sg, page, data_unit_size, bv.bv_offset);
421 /* Decrypt each data unit in the segment */
422 for (i = 0; i < bv.bv_len; i += data_unit_size) {
423 blk_crypto_dun_to_iv(curr_dun, &iv);
424 if (crypto_wait_req(crypto_skcipher_decrypt(ciph_req),
426 bio->bi_status = BLK_STS_IOERR;
429 bio_crypt_dun_increment(curr_dun, 1);
430 sg.offset += data_unit_size;
435 skcipher_request_free(ciph_req);
436 blk_crypto_put_keyslot(slot);
438 mempool_free(f_ctx, bio_fallback_crypt_ctx_pool);
443 * blk_crypto_fallback_decrypt_endio - queue bio for fallback decryption
445 * @bio: the bio to queue
447 * Restore bi_private and bi_end_io, and queue the bio for decryption into a
448 * workqueue, since this function will be called from an atomic context.
450 static void blk_crypto_fallback_decrypt_endio(struct bio *bio)
452 struct bio_fallback_crypt_ctx *f_ctx = bio->bi_private;
454 bio->bi_private = f_ctx->bi_private_orig;
455 bio->bi_end_io = f_ctx->bi_end_io_orig;
457 /* If there was an IO error, don't queue for decrypt. */
458 if (bio->bi_status) {
459 mempool_free(f_ctx, bio_fallback_crypt_ctx_pool);
464 INIT_WORK(&f_ctx->work, blk_crypto_fallback_decrypt_bio);
466 queue_work(blk_crypto_wq, &f_ctx->work);
470 * blk_crypto_fallback_bio_prep - Prepare a bio to use fallback en/decryption
472 * @bio_ptr: pointer to the bio to prepare
474 * If bio is doing a WRITE operation, this splits the bio into two parts if it's
475 * too big (see blk_crypto_fallback_split_bio_if_needed()). It then allocates a
476 * bounce bio for the first part, encrypts it, and updates bio_ptr to point to
479 * For a READ operation, we mark the bio for decryption by using bi_private and
482 * In either case, this function will make the bio look like a regular bio (i.e.
483 * as if no encryption context was ever specified) for the purposes of the rest
484 * of the stack except for blk-integrity (blk-integrity and blk-crypto are not
485 * currently supported together).
487 * Return: true on success. Sets bio->bi_status and returns false on error.
489 bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr)
491 struct bio *bio = *bio_ptr;
492 struct bio_crypt_ctx *bc = bio->bi_crypt_context;
493 struct bio_fallback_crypt_ctx *f_ctx;
495 if (WARN_ON_ONCE(!tfms_inited[bc->bc_key->crypto_cfg.crypto_mode])) {
496 /* User didn't call blk_crypto_start_using_key() first */
497 bio->bi_status = BLK_STS_IOERR;
501 if (!__blk_crypto_cfg_supported(&blk_crypto_fallback_profile,
502 &bc->bc_key->crypto_cfg)) {
503 bio->bi_status = BLK_STS_NOTSUPP;
507 if (bio_data_dir(bio) == WRITE)
508 return blk_crypto_fallback_encrypt_bio(bio_ptr);
511 * bio READ case: Set up a f_ctx in the bio's bi_private and set the
512 * bi_end_io appropriately to trigger decryption when the bio is ended.
514 f_ctx = mempool_alloc(bio_fallback_crypt_ctx_pool, GFP_NOIO);
515 f_ctx->crypt_ctx = *bc;
516 f_ctx->crypt_iter = bio->bi_iter;
517 f_ctx->bi_private_orig = bio->bi_private;
518 f_ctx->bi_end_io_orig = bio->bi_end_io;
519 bio->bi_private = (void *)f_ctx;
520 bio->bi_end_io = blk_crypto_fallback_decrypt_endio;
521 bio_crypt_free_ctx(bio);
526 int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key)
528 return __blk_crypto_evict_key(&blk_crypto_fallback_profile, key);
531 static bool blk_crypto_fallback_inited;
532 static int blk_crypto_fallback_init(void)
536 struct blk_crypto_profile *profile = &blk_crypto_fallback_profile;
538 if (blk_crypto_fallback_inited)
541 prandom_bytes(blank_key, BLK_CRYPTO_MAX_KEY_SIZE);
543 err = bioset_init(&crypto_bio_split, 64, 0, 0);
547 err = blk_crypto_profile_init(profile, blk_crypto_num_keyslots);
549 goto fail_free_bioset;
552 profile->ll_ops = blk_crypto_fallback_ll_ops;
553 profile->max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE;
555 /* All blk-crypto modes have a crypto API fallback. */
556 for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++)
557 profile->modes_supported[i] = 0xFFFFFFFF;
558 profile->modes_supported[BLK_ENCRYPTION_MODE_INVALID] = 0;
560 blk_crypto_wq = alloc_workqueue("blk_crypto_wq",
561 WQ_UNBOUND | WQ_HIGHPRI |
562 WQ_MEM_RECLAIM, num_online_cpus());
564 goto fail_destroy_profile;
566 blk_crypto_keyslots = kcalloc(blk_crypto_num_keyslots,
567 sizeof(blk_crypto_keyslots[0]),
569 if (!blk_crypto_keyslots)
572 blk_crypto_bounce_page_pool =
573 mempool_create_page_pool(num_prealloc_bounce_pg, 0);
574 if (!blk_crypto_bounce_page_pool)
575 goto fail_free_keyslots;
577 bio_fallback_crypt_ctx_cache = KMEM_CACHE(bio_fallback_crypt_ctx, 0);
578 if (!bio_fallback_crypt_ctx_cache)
579 goto fail_free_bounce_page_pool;
581 bio_fallback_crypt_ctx_pool =
582 mempool_create_slab_pool(num_prealloc_fallback_crypt_ctxs,
583 bio_fallback_crypt_ctx_cache);
584 if (!bio_fallback_crypt_ctx_pool)
585 goto fail_free_crypt_ctx_cache;
587 blk_crypto_fallback_inited = true;
590 fail_free_crypt_ctx_cache:
591 kmem_cache_destroy(bio_fallback_crypt_ctx_cache);
592 fail_free_bounce_page_pool:
593 mempool_destroy(blk_crypto_bounce_page_pool);
595 kfree(blk_crypto_keyslots);
597 destroy_workqueue(blk_crypto_wq);
598 fail_destroy_profile:
599 blk_crypto_profile_destroy(profile);
601 bioset_exit(&crypto_bio_split);
607 * Prepare blk-crypto-fallback for the specified crypto mode.
608 * Returns -ENOPKG if the needed crypto API support is missing.
610 int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num)
612 const char *cipher_str = blk_crypto_modes[mode_num].cipher_str;
613 struct blk_crypto_fallback_keyslot *slotp;
619 * Ensure that updates to blk_crypto_keyslots[i].tfms[mode_num]
620 * for each i are visible before we try to access them.
622 if (likely(smp_load_acquire(&tfms_inited[mode_num])))
625 mutex_lock(&tfms_init_lock);
626 if (tfms_inited[mode_num])
629 err = blk_crypto_fallback_init();
633 for (i = 0; i < blk_crypto_num_keyslots; i++) {
634 slotp = &blk_crypto_keyslots[i];
635 slotp->tfms[mode_num] = crypto_alloc_skcipher(cipher_str, 0, 0);
636 if (IS_ERR(slotp->tfms[mode_num])) {
637 err = PTR_ERR(slotp->tfms[mode_num]);
638 if (err == -ENOENT) {
639 pr_warn_once("Missing crypto API support for \"%s\"\n",
643 slotp->tfms[mode_num] = NULL;
647 crypto_skcipher_set_flags(slotp->tfms[mode_num],
648 CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
652 * Ensure that updates to blk_crypto_keyslots[i].tfms[mode_num]
653 * for each i are visible before we set tfms_inited[mode_num].
655 smp_store_release(&tfms_inited[mode_num], true);
659 for (i = 0; i < blk_crypto_num_keyslots; i++) {
660 slotp = &blk_crypto_keyslots[i];
661 crypto_free_skcipher(slotp->tfms[mode_num]);
662 slotp->tfms[mode_num] = NULL;
665 mutex_unlock(&tfms_init_lock);