1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright 2019 Google LLC
7 * Refer to Documentation/block/inline-encryption.rst for detailed explanation.
10 #define pr_fmt(fmt) "blk-crypto-fallback: " fmt
12 #include <crypto/skcipher.h>
13 #include <linux/blk-cgroup.h>
14 #include <linux/blk-crypto.h>
15 #include <linux/blkdev.h>
16 #include <linux/crypto.h>
17 #include <linux/keyslot-manager.h>
18 #include <linux/mempool.h>
19 #include <linux/module.h>
20 #include <linux/random.h>
22 #include "blk-crypto-internal.h"
24 static unsigned int num_prealloc_bounce_pg = 32;
25 module_param(num_prealloc_bounce_pg, uint, 0);
26 MODULE_PARM_DESC(num_prealloc_bounce_pg,
27 "Number of preallocated bounce pages for the blk-crypto crypto API fallback");
29 static unsigned int blk_crypto_num_keyslots = 100;
30 module_param_named(num_keyslots, blk_crypto_num_keyslots, uint, 0);
31 MODULE_PARM_DESC(num_keyslots,
32 "Number of keyslots for the blk-crypto crypto API fallback");
34 static unsigned int num_prealloc_fallback_crypt_ctxs = 128;
35 module_param(num_prealloc_fallback_crypt_ctxs, uint, 0);
36 MODULE_PARM_DESC(num_prealloc_crypt_fallback_ctxs,
37 "Number of preallocated bio fallback crypto contexts for blk-crypto to use during crypto API fallback");
39 struct bio_fallback_crypt_ctx {
40 struct bio_crypt_ctx crypt_ctx;
42 * Copy of the bvec_iter when this bio was submitted.
43 * We only want to en/decrypt the part of the bio as described by the
44 * bvec_iter upon submission because bio might be split before being
47 struct bvec_iter crypt_iter;
50 struct work_struct work;
54 void *bi_private_orig;
55 bio_end_io_t *bi_end_io_orig;
60 static struct kmem_cache *bio_fallback_crypt_ctx_cache;
61 static mempool_t *bio_fallback_crypt_ctx_pool;
64 * Allocating a crypto tfm during I/O can deadlock, so we have to preallocate
65 * all of a mode's tfms when that mode starts being used. Since each mode may
66 * need all the keyslots at some point, each mode needs its own tfm for each
67 * keyslot; thus, a keyslot may contain tfms for multiple modes. However, to
68 * match the behavior of real inline encryption hardware (which only supports a
69 * single encryption context per keyslot), we only allow one tfm per keyslot to
70 * be used at a time - the rest of the unused tfms have their keys cleared.
72 static DEFINE_MUTEX(tfms_init_lock);
73 static bool tfms_inited[BLK_ENCRYPTION_MODE_MAX];
75 static struct blk_crypto_keyslot {
76 enum blk_crypto_mode_num crypto_mode;
77 struct crypto_skcipher *tfms[BLK_ENCRYPTION_MODE_MAX];
78 } *blk_crypto_keyslots;
80 static struct blk_keyslot_manager blk_crypto_ksm;
81 static struct workqueue_struct *blk_crypto_wq;
82 static mempool_t *blk_crypto_bounce_page_pool;
83 static struct bio_set crypto_bio_split;
86 * This is the key we set when evicting a keyslot. This *should* be the all 0's
87 * key, but AES-XTS rejects that key, so we use some random bytes instead.
89 static u8 blank_key[BLK_CRYPTO_MAX_KEY_SIZE];
91 static void blk_crypto_evict_keyslot(unsigned int slot)
93 struct blk_crypto_keyslot *slotp = &blk_crypto_keyslots[slot];
94 enum blk_crypto_mode_num crypto_mode = slotp->crypto_mode;
97 WARN_ON(slotp->crypto_mode == BLK_ENCRYPTION_MODE_INVALID);
99 /* Clear the key in the skcipher */
100 err = crypto_skcipher_setkey(slotp->tfms[crypto_mode], blank_key,
101 blk_crypto_modes[crypto_mode].keysize);
103 slotp->crypto_mode = BLK_ENCRYPTION_MODE_INVALID;
106 static int blk_crypto_keyslot_program(struct blk_keyslot_manager *ksm,
107 const struct blk_crypto_key *key,
110 struct blk_crypto_keyslot *slotp = &blk_crypto_keyslots[slot];
111 const enum blk_crypto_mode_num crypto_mode =
112 key->crypto_cfg.crypto_mode;
115 if (crypto_mode != slotp->crypto_mode &&
116 slotp->crypto_mode != BLK_ENCRYPTION_MODE_INVALID)
117 blk_crypto_evict_keyslot(slot);
119 slotp->crypto_mode = crypto_mode;
120 err = crypto_skcipher_setkey(slotp->tfms[crypto_mode], key->raw,
123 blk_crypto_evict_keyslot(slot);
129 static int blk_crypto_keyslot_evict(struct blk_keyslot_manager *ksm,
130 const struct blk_crypto_key *key,
133 blk_crypto_evict_keyslot(slot);
138 * The crypto API fallback KSM ops - only used for a bio when it specifies a
139 * blk_crypto_key that was not supported by the device's inline encryption
142 static const struct blk_ksm_ll_ops blk_crypto_ksm_ll_ops = {
143 .keyslot_program = blk_crypto_keyslot_program,
144 .keyslot_evict = blk_crypto_keyslot_evict,
147 static void blk_crypto_fallback_encrypt_endio(struct bio *enc_bio)
149 struct bio *src_bio = enc_bio->bi_private;
152 for (i = 0; i < enc_bio->bi_vcnt; i++)
153 mempool_free(enc_bio->bi_io_vec[i].bv_page,
154 blk_crypto_bounce_page_pool);
156 src_bio->bi_status = enc_bio->bi_status;
162 static struct bio *blk_crypto_clone_bio(struct bio *bio_src)
164 struct bvec_iter iter;
168 bio = bio_kmalloc(GFP_NOIO, bio_segments(bio_src));
171 bio->bi_bdev = bio_src->bi_bdev;
172 if (bio_flagged(bio_src, BIO_REMAPPED))
173 bio_set_flag(bio, BIO_REMAPPED);
174 bio->bi_opf = bio_src->bi_opf;
175 bio->bi_ioprio = bio_src->bi_ioprio;
176 bio->bi_write_hint = bio_src->bi_write_hint;
177 bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
178 bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
180 bio_for_each_segment(bv, bio_src, iter)
181 bio->bi_io_vec[bio->bi_vcnt++] = bv;
183 bio_clone_blkg_association(bio, bio_src);
184 blkcg_bio_issue_init(bio);
189 static bool blk_crypto_alloc_cipher_req(struct blk_ksm_keyslot *slot,
190 struct skcipher_request **ciph_req_ret,
191 struct crypto_wait *wait)
193 struct skcipher_request *ciph_req;
194 const struct blk_crypto_keyslot *slotp;
195 int keyslot_idx = blk_ksm_get_slot_idx(slot);
197 slotp = &blk_crypto_keyslots[keyslot_idx];
198 ciph_req = skcipher_request_alloc(slotp->tfms[slotp->crypto_mode],
203 skcipher_request_set_callback(ciph_req,
204 CRYPTO_TFM_REQ_MAY_BACKLOG |
205 CRYPTO_TFM_REQ_MAY_SLEEP,
206 crypto_req_done, wait);
207 *ciph_req_ret = ciph_req;
212 static bool blk_crypto_split_bio_if_needed(struct bio **bio_ptr)
214 struct bio *bio = *bio_ptr;
216 unsigned int num_sectors = 0;
218 struct bvec_iter iter;
220 bio_for_each_segment(bv, bio, iter) {
221 num_sectors += bv.bv_len >> SECTOR_SHIFT;
222 if (++i == BIO_MAX_VECS)
225 if (num_sectors < bio_sectors(bio)) {
226 struct bio *split_bio;
228 split_bio = bio_split(bio, num_sectors, GFP_NOIO,
231 bio->bi_status = BLK_STS_RESOURCE;
234 bio_chain(split_bio, bio);
235 submit_bio_noacct(bio);
236 *bio_ptr = split_bio;
242 union blk_crypto_iv {
243 __le64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
244 u8 bytes[BLK_CRYPTO_MAX_IV_SIZE];
247 static void blk_crypto_dun_to_iv(const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
248 union blk_crypto_iv *iv)
252 for (i = 0; i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++)
253 iv->dun[i] = cpu_to_le64(dun[i]);
257 * The crypto API fallback's encryption routine.
258 * Allocate a bounce bio for encryption, encrypt the input bio using crypto API,
259 * and replace *bio_ptr with the bounce bio. May split input bio if it's too
260 * large. Returns true on success. Returns false and sets bio->bi_status on
263 static bool blk_crypto_fallback_encrypt_bio(struct bio **bio_ptr)
265 struct bio *src_bio, *enc_bio;
266 struct bio_crypt_ctx *bc;
267 struct blk_ksm_keyslot *slot;
269 struct skcipher_request *ciph_req = NULL;
270 DECLARE_CRYPTO_WAIT(wait);
271 u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
272 struct scatterlist src, dst;
273 union blk_crypto_iv iv;
278 /* Split the bio if it's too big for single page bvec */
279 if (!blk_crypto_split_bio_if_needed(bio_ptr))
283 bc = src_bio->bi_crypt_context;
284 data_unit_size = bc->bc_key->crypto_cfg.data_unit_size;
286 /* Allocate bounce bio for encryption */
287 enc_bio = blk_crypto_clone_bio(src_bio);
289 src_bio->bi_status = BLK_STS_RESOURCE;
294 * Use the crypto API fallback keyslot manager to get a crypto_skcipher
295 * for the algorithm and key specified for this bio.
297 blk_st = blk_ksm_get_slot_for_key(&blk_crypto_ksm, bc->bc_key, &slot);
298 if (blk_st != BLK_STS_OK) {
299 src_bio->bi_status = blk_st;
300 goto out_put_enc_bio;
303 /* and then allocate an skcipher_request for it */
304 if (!blk_crypto_alloc_cipher_req(slot, &ciph_req, &wait)) {
305 src_bio->bi_status = BLK_STS_RESOURCE;
306 goto out_release_keyslot;
309 memcpy(curr_dun, bc->bc_dun, sizeof(curr_dun));
310 sg_init_table(&src, 1);
311 sg_init_table(&dst, 1);
313 skcipher_request_set_crypt(ciph_req, &src, &dst, data_unit_size,
316 /* Encrypt each page in the bounce bio */
317 for (i = 0; i < enc_bio->bi_vcnt; i++) {
318 struct bio_vec *enc_bvec = &enc_bio->bi_io_vec[i];
319 struct page *plaintext_page = enc_bvec->bv_page;
320 struct page *ciphertext_page =
321 mempool_alloc(blk_crypto_bounce_page_pool, GFP_NOIO);
323 enc_bvec->bv_page = ciphertext_page;
325 if (!ciphertext_page) {
326 src_bio->bi_status = BLK_STS_RESOURCE;
327 goto out_free_bounce_pages;
330 sg_set_page(&src, plaintext_page, data_unit_size,
331 enc_bvec->bv_offset);
332 sg_set_page(&dst, ciphertext_page, data_unit_size,
333 enc_bvec->bv_offset);
335 /* Encrypt each data unit in this page */
336 for (j = 0; j < enc_bvec->bv_len; j += data_unit_size) {
337 blk_crypto_dun_to_iv(curr_dun, &iv);
338 if (crypto_wait_req(crypto_skcipher_encrypt(ciph_req),
341 src_bio->bi_status = BLK_STS_IOERR;
342 goto out_free_bounce_pages;
344 bio_crypt_dun_increment(curr_dun, 1);
345 src.offset += data_unit_size;
346 dst.offset += data_unit_size;
350 enc_bio->bi_private = src_bio;
351 enc_bio->bi_end_io = blk_crypto_fallback_encrypt_endio;
356 goto out_free_ciph_req;
358 out_free_bounce_pages:
360 mempool_free(enc_bio->bi_io_vec[--i].bv_page,
361 blk_crypto_bounce_page_pool);
363 skcipher_request_free(ciph_req);
365 blk_ksm_put_slot(slot);
374 * The crypto API fallback's main decryption routine.
375 * Decrypts input bio in place, and calls bio_endio on the bio.
377 static void blk_crypto_fallback_decrypt_bio(struct work_struct *work)
379 struct bio_fallback_crypt_ctx *f_ctx =
380 container_of(work, struct bio_fallback_crypt_ctx, work);
381 struct bio *bio = f_ctx->bio;
382 struct bio_crypt_ctx *bc = &f_ctx->crypt_ctx;
383 struct blk_ksm_keyslot *slot;
384 struct skcipher_request *ciph_req = NULL;
385 DECLARE_CRYPTO_WAIT(wait);
386 u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
387 union blk_crypto_iv iv;
388 struct scatterlist sg;
390 struct bvec_iter iter;
391 const int data_unit_size = bc->bc_key->crypto_cfg.data_unit_size;
396 * Use the crypto API fallback keyslot manager to get a crypto_skcipher
397 * for the algorithm and key specified for this bio.
399 blk_st = blk_ksm_get_slot_for_key(&blk_crypto_ksm, bc->bc_key, &slot);
400 if (blk_st != BLK_STS_OK) {
401 bio->bi_status = blk_st;
405 /* and then allocate an skcipher_request for it */
406 if (!blk_crypto_alloc_cipher_req(slot, &ciph_req, &wait)) {
407 bio->bi_status = BLK_STS_RESOURCE;
411 memcpy(curr_dun, bc->bc_dun, sizeof(curr_dun));
412 sg_init_table(&sg, 1);
413 skcipher_request_set_crypt(ciph_req, &sg, &sg, data_unit_size,
416 /* Decrypt each segment in the bio */
417 __bio_for_each_segment(bv, bio, iter, f_ctx->crypt_iter) {
418 struct page *page = bv.bv_page;
420 sg_set_page(&sg, page, data_unit_size, bv.bv_offset);
422 /* Decrypt each data unit in the segment */
423 for (i = 0; i < bv.bv_len; i += data_unit_size) {
424 blk_crypto_dun_to_iv(curr_dun, &iv);
425 if (crypto_wait_req(crypto_skcipher_decrypt(ciph_req),
427 bio->bi_status = BLK_STS_IOERR;
430 bio_crypt_dun_increment(curr_dun, 1);
431 sg.offset += data_unit_size;
436 skcipher_request_free(ciph_req);
437 blk_ksm_put_slot(slot);
439 mempool_free(f_ctx, bio_fallback_crypt_ctx_pool);
444 * blk_crypto_fallback_decrypt_endio - queue bio for fallback decryption
446 * @bio: the bio to queue
448 * Restore bi_private and bi_end_io, and queue the bio for decryption into a
449 * workqueue, since this function will be called from an atomic context.
451 static void blk_crypto_fallback_decrypt_endio(struct bio *bio)
453 struct bio_fallback_crypt_ctx *f_ctx = bio->bi_private;
455 bio->bi_private = f_ctx->bi_private_orig;
456 bio->bi_end_io = f_ctx->bi_end_io_orig;
458 /* If there was an IO error, don't queue for decrypt. */
459 if (bio->bi_status) {
460 mempool_free(f_ctx, bio_fallback_crypt_ctx_pool);
465 INIT_WORK(&f_ctx->work, blk_crypto_fallback_decrypt_bio);
467 queue_work(blk_crypto_wq, &f_ctx->work);
471 * blk_crypto_fallback_bio_prep - Prepare a bio to use fallback en/decryption
473 * @bio_ptr: pointer to the bio to prepare
475 * If bio is doing a WRITE operation, this splits the bio into two parts if it's
476 * too big (see blk_crypto_split_bio_if_needed). It then allocates a bounce bio
477 * for the first part, encrypts it, and update bio_ptr to point to the bounce
480 * For a READ operation, we mark the bio for decryption by using bi_private and
483 * In either case, this function will make the bio look like a regular bio (i.e.
484 * as if no encryption context was ever specified) for the purposes of the rest
485 * of the stack except for blk-integrity (blk-integrity and blk-crypto are not
486 * currently supported together).
488 * Return: true on success. Sets bio->bi_status and returns false on error.
490 bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr)
492 struct bio *bio = *bio_ptr;
493 struct bio_crypt_ctx *bc = bio->bi_crypt_context;
494 struct bio_fallback_crypt_ctx *f_ctx;
496 if (WARN_ON_ONCE(!tfms_inited[bc->bc_key->crypto_cfg.crypto_mode])) {
497 /* User didn't call blk_crypto_start_using_key() first */
498 bio->bi_status = BLK_STS_IOERR;
502 if (!blk_ksm_crypto_cfg_supported(&blk_crypto_ksm,
503 &bc->bc_key->crypto_cfg)) {
504 bio->bi_status = BLK_STS_NOTSUPP;
508 if (bio_data_dir(bio) == WRITE)
509 return blk_crypto_fallback_encrypt_bio(bio_ptr);
512 * bio READ case: Set up a f_ctx in the bio's bi_private and set the
513 * bi_end_io appropriately to trigger decryption when the bio is ended.
515 f_ctx = mempool_alloc(bio_fallback_crypt_ctx_pool, GFP_NOIO);
516 f_ctx->crypt_ctx = *bc;
517 f_ctx->crypt_iter = bio->bi_iter;
518 f_ctx->bi_private_orig = bio->bi_private;
519 f_ctx->bi_end_io_orig = bio->bi_end_io;
520 bio->bi_private = (void *)f_ctx;
521 bio->bi_end_io = blk_crypto_fallback_decrypt_endio;
522 bio_crypt_free_ctx(bio);
527 int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key)
529 return blk_ksm_evict_key(&blk_crypto_ksm, key);
532 static bool blk_crypto_fallback_inited;
533 static int blk_crypto_fallback_init(void)
538 if (blk_crypto_fallback_inited)
541 prandom_bytes(blank_key, BLK_CRYPTO_MAX_KEY_SIZE);
543 err = bioset_init(&crypto_bio_split, 64, 0, 0);
547 err = blk_ksm_init(&blk_crypto_ksm, blk_crypto_num_keyslots);
549 goto fail_free_bioset;
552 blk_crypto_ksm.ksm_ll_ops = blk_crypto_ksm_ll_ops;
553 blk_crypto_ksm.max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE;
555 /* All blk-crypto modes have a crypto API fallback. */
556 for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++)
557 blk_crypto_ksm.crypto_modes_supported[i] = 0xFFFFFFFF;
558 blk_crypto_ksm.crypto_modes_supported[BLK_ENCRYPTION_MODE_INVALID] = 0;
560 blk_crypto_wq = alloc_workqueue("blk_crypto_wq",
561 WQ_UNBOUND | WQ_HIGHPRI |
562 WQ_MEM_RECLAIM, num_online_cpus());
566 blk_crypto_keyslots = kcalloc(blk_crypto_num_keyslots,
567 sizeof(blk_crypto_keyslots[0]),
569 if (!blk_crypto_keyslots)
572 blk_crypto_bounce_page_pool =
573 mempool_create_page_pool(num_prealloc_bounce_pg, 0);
574 if (!blk_crypto_bounce_page_pool)
575 goto fail_free_keyslots;
577 bio_fallback_crypt_ctx_cache = KMEM_CACHE(bio_fallback_crypt_ctx, 0);
578 if (!bio_fallback_crypt_ctx_cache)
579 goto fail_free_bounce_page_pool;
581 bio_fallback_crypt_ctx_pool =
582 mempool_create_slab_pool(num_prealloc_fallback_crypt_ctxs,
583 bio_fallback_crypt_ctx_cache);
584 if (!bio_fallback_crypt_ctx_pool)
585 goto fail_free_crypt_ctx_cache;
587 blk_crypto_fallback_inited = true;
590 fail_free_crypt_ctx_cache:
591 kmem_cache_destroy(bio_fallback_crypt_ctx_cache);
592 fail_free_bounce_page_pool:
593 mempool_destroy(blk_crypto_bounce_page_pool);
595 kfree(blk_crypto_keyslots);
597 destroy_workqueue(blk_crypto_wq);
599 blk_ksm_destroy(&blk_crypto_ksm);
601 bioset_exit(&crypto_bio_split);
607 * Prepare blk-crypto-fallback for the specified crypto mode.
608 * Returns -ENOPKG if the needed crypto API support is missing.
610 int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num)
612 const char *cipher_str = blk_crypto_modes[mode_num].cipher_str;
613 struct blk_crypto_keyslot *slotp;
619 * Ensure that updates to blk_crypto_keyslots[i].tfms[mode_num]
620 * for each i are visible before we try to access them.
622 if (likely(smp_load_acquire(&tfms_inited[mode_num])))
625 mutex_lock(&tfms_init_lock);
626 if (tfms_inited[mode_num])
629 err = blk_crypto_fallback_init();
633 for (i = 0; i < blk_crypto_num_keyslots; i++) {
634 slotp = &blk_crypto_keyslots[i];
635 slotp->tfms[mode_num] = crypto_alloc_skcipher(cipher_str, 0, 0);
636 if (IS_ERR(slotp->tfms[mode_num])) {
637 err = PTR_ERR(slotp->tfms[mode_num]);
638 if (err == -ENOENT) {
639 pr_warn_once("Missing crypto API support for \"%s\"\n",
643 slotp->tfms[mode_num] = NULL;
647 crypto_skcipher_set_flags(slotp->tfms[mode_num],
648 CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
652 * Ensure that updates to blk_crypto_keyslots[i].tfms[mode_num]
653 * for each i are visible before we set tfms_inited[mode_num].
655 smp_store_release(&tfms_inited[mode_num], true);
659 for (i = 0; i < blk_crypto_num_keyslots; i++) {
660 slotp = &blk_crypto_keyslots[i];
661 crypto_free_skcipher(slotp->tfms[mode_num]);
662 slotp->tfms[mode_num] = NULL;
665 mutex_unlock(&tfms_init_lock);