blk-crypto: dynamically allocate fallback profile
authorSweet Tea Dorminy <sweettea-kernel@dorminy.me>
Thu, 17 Aug 2023 14:15:56 +0000 (10:15 -0400)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 23 Aug 2023 15:52:39 +0000 (17:52 +0200)
commit c984ff1423ae9f70b1f28ce811856db0d9c99021 upstream.

blk_crypto_profile_init() calls lockdep_register_key(), which warns and
does not register if the provided memory is a static object.
blk-crypto-fallback currently has a static blk_crypto_profile and calls
blk_crypto_profile_init() thereupon, resulting in the warning and
failure to register.

Fortunately it is simple enough to use a dynamically allocated profile
and make lockdep function correctly.

Fixes: 2fb48d88e77f ("blk-crypto: use dynamic lock class for blk_crypto_profile::lock")
Cc: stable@vger.kernel.org
Signed-off-by: Sweet Tea Dorminy <sweettea-kernel@dorminy.me>
Reviewed-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20230817141615.15387-1-sweettea-kernel@dorminy.me
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
block/blk-crypto-fallback.c

index ad9844c..e6468ea 100644 (file)
@@ -78,7 +78,7 @@ static struct blk_crypto_fallback_keyslot {
        struct crypto_skcipher *tfms[BLK_ENCRYPTION_MODE_MAX];
 } *blk_crypto_keyslots;
 
-static struct blk_crypto_profile blk_crypto_fallback_profile;
+static struct blk_crypto_profile *blk_crypto_fallback_profile;
 static struct workqueue_struct *blk_crypto_wq;
 static mempool_t *blk_crypto_bounce_page_pool;
 static struct bio_set crypto_bio_split;
@@ -292,7 +292,7 @@ static bool blk_crypto_fallback_encrypt_bio(struct bio **bio_ptr)
         * Get a blk-crypto-fallback keyslot that contains a crypto_skcipher for
         * this bio's algorithm and key.
         */
-       blk_st = blk_crypto_get_keyslot(&blk_crypto_fallback_profile,
+       blk_st = blk_crypto_get_keyslot(blk_crypto_fallback_profile,
                                        bc->bc_key, &slot);
        if (blk_st != BLK_STS_OK) {
                src_bio->bi_status = blk_st;
@@ -395,7 +395,7 @@ static void blk_crypto_fallback_decrypt_bio(struct work_struct *work)
         * Get a blk-crypto-fallback keyslot that contains a crypto_skcipher for
         * this bio's algorithm and key.
         */
-       blk_st = blk_crypto_get_keyslot(&blk_crypto_fallback_profile,
+       blk_st = blk_crypto_get_keyslot(blk_crypto_fallback_profile,
                                        bc->bc_key, &slot);
        if (blk_st != BLK_STS_OK) {
                bio->bi_status = blk_st;
@@ -499,7 +499,7 @@ bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr)
                return false;
        }
 
-       if (!__blk_crypto_cfg_supported(&blk_crypto_fallback_profile,
+       if (!__blk_crypto_cfg_supported(blk_crypto_fallback_profile,
                                        &bc->bc_key->crypto_cfg)) {
                bio->bi_status = BLK_STS_NOTSUPP;
                return false;
@@ -526,7 +526,7 @@ bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr)
 
 int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key)
 {
-       return __blk_crypto_evict_key(&blk_crypto_fallback_profile, key);
+       return __blk_crypto_evict_key(blk_crypto_fallback_profile, key);
 }
 
 static bool blk_crypto_fallback_inited;
@@ -534,7 +534,6 @@ static int blk_crypto_fallback_init(void)
 {
        int i;
        int err;
-       struct blk_crypto_profile *profile = &blk_crypto_fallback_profile;
 
        if (blk_crypto_fallback_inited)
                return 0;
@@ -545,18 +544,27 @@ static int blk_crypto_fallback_init(void)
        if (err)
                goto out;
 
-       err = blk_crypto_profile_init(profile, blk_crypto_num_keyslots);
-       if (err)
+       /* Dynamic allocation is needed because of lockdep_register_key(). */
+       blk_crypto_fallback_profile =
+               kzalloc(sizeof(*blk_crypto_fallback_profile), GFP_KERNEL);
+       if (!blk_crypto_fallback_profile) {
+               err = -ENOMEM;
                goto fail_free_bioset;
+       }
+
+       err = blk_crypto_profile_init(blk_crypto_fallback_profile,
+                                     blk_crypto_num_keyslots);
+       if (err)
+               goto fail_free_profile;
        err = -ENOMEM;
 
-       profile->ll_ops = blk_crypto_fallback_ll_ops;
-       profile->max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE;
+       blk_crypto_fallback_profile->ll_ops = blk_crypto_fallback_ll_ops;
+       blk_crypto_fallback_profile->max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE;
 
        /* All blk-crypto modes have a crypto API fallback. */
        for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++)
-               profile->modes_supported[i] = 0xFFFFFFFF;
-       profile->modes_supported[BLK_ENCRYPTION_MODE_INVALID] = 0;
+               blk_crypto_fallback_profile->modes_supported[i] = 0xFFFFFFFF;
+       blk_crypto_fallback_profile->modes_supported[BLK_ENCRYPTION_MODE_INVALID] = 0;
 
        blk_crypto_wq = alloc_workqueue("blk_crypto_wq",
                                        WQ_UNBOUND | WQ_HIGHPRI |
@@ -597,7 +605,9 @@ fail_free_keyslots:
 fail_free_wq:
        destroy_workqueue(blk_crypto_wq);
 fail_destroy_profile:
-       blk_crypto_profile_destroy(profile);
+       blk_crypto_profile_destroy(blk_crypto_fallback_profile);
+fail_free_profile:
+       kfree(blk_crypto_fallback_profile);
 fail_free_bioset:
        bioset_exit(&crypto_bio_split);
 out: