dm: prefer kmap_local_page() instead of deprecated kmap_atomic()
authorHeinz Mauelshagen <heinzm@redhat.com>
Tue, 7 Feb 2023 19:22:58 +0000 (20:22 +0100)
committerMike Snitzer <snitzer@kernel.org>
Tue, 14 Feb 2023 19:23:06 +0000 (14:23 -0500)
Signed-off-by: Heinz Mauelshagen <heinzm@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@kernel.org>
drivers/md/dm-crypt.c
drivers/md/dm-log-writes.c

index 7f48256db9b254309fe0849d2aedb3a3101bbb95..e892e52e961faafc5c302776c2b1ce3a00ee1700 100644 (file)
@@ -532,9 +532,9 @@ static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
 
        if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
                sg = crypt_get_sg_data(cc, dmreq->sg_in);
-               src = kmap_atomic(sg_page(sg));
+               src = kmap_local_page(sg_page(sg));
                r = crypt_iv_lmk_one(cc, iv, dmreq, src + sg->offset);
-               kunmap_atomic(src);
+               kunmap_local(src);
        } else
                memset(iv, 0, cc->iv_size);
 
@@ -552,14 +552,14 @@ static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
                return 0;
 
        sg = crypt_get_sg_data(cc, dmreq->sg_out);
-       dst = kmap_atomic(sg_page(sg));
+       dst = kmap_local_page(sg_page(sg));
        r = crypt_iv_lmk_one(cc, iv, dmreq, dst + sg->offset);
 
        /* Tweak the first block of plaintext sector */
        if (!r)
                crypto_xor(dst + sg->offset, iv, cc->iv_size);
 
-       kunmap_atomic(dst);
+       kunmap_local(dst);
        return r;
 }
 
@@ -682,9 +682,9 @@ static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
        /* Remove whitening from ciphertext */
        if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
                sg = crypt_get_sg_data(cc, dmreq->sg_in);
-               src = kmap_atomic(sg_page(sg));
+               src = kmap_local_page(sg_page(sg));
                r = crypt_iv_tcw_whitening(cc, dmreq, src + sg->offset);
-               kunmap_atomic(src);
+               kunmap_local(src);
        }
 
        /* Calculate IV */
@@ -708,9 +708,9 @@ static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv,
 
        /* Apply whitening on ciphertext */
        sg = crypt_get_sg_data(cc, dmreq->sg_out);
-       dst = kmap_atomic(sg_page(sg));
+       dst = kmap_local_page(sg_page(sg));
        r = crypt_iv_tcw_whitening(cc, dmreq, dst + sg->offset);
-       kunmap_atomic(dst);
+       kunmap_local(dst);
 
        return r;
 }
@@ -975,15 +975,15 @@ static int crypt_iv_elephant(struct crypt_config *cc, struct dm_crypt_request *d
                goto out;
 
        sg = crypt_get_sg_data(cc, dmreq->sg_out);
-       data = kmap_atomic(sg_page(sg));
+       data = kmap_local_page(sg_page(sg));
        data_offset = data + sg->offset;
 
        /* Cannot modify original bio, copy to sg_out and apply Elephant to it */
        if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
                sg2 = crypt_get_sg_data(cc, dmreq->sg_in);
-               data2 = kmap_atomic(sg_page(sg2));
+               data2 = kmap_local_page(sg_page(sg2));
                memcpy(data_offset, data2 + sg2->offset, cc->sector_size);
-               kunmap_atomic(data2);
+               kunmap_local(data2);
        }
 
        if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
@@ -1003,7 +1003,7 @@ static int crypt_iv_elephant(struct crypt_config *cc, struct dm_crypt_request *d
                diffuser_cpu_to_disk((__le32*)data_offset, cc->sector_size / sizeof(u32));
        }
 
-       kunmap_atomic(data);
+       kunmap_local(data);
 out:
        kfree_sensitive(ks);
        kfree_sensitive(es);
index 7c3b83536481c9ba42b43f8e9c2e0b56597257a4..b126263f814cf21ed8f8c3ff70a2f17845fc657e 100644 (file)
@@ -232,13 +232,13 @@ static int write_metadata(struct log_writes_c *lc, void *entry,
                goto error;
        }
 
-       ptr = kmap_atomic(page);
+       ptr = kmap_local_page(page);
        memcpy(ptr, entry, entrylen);
        if (datalen)
                memcpy(ptr + entrylen, data, datalen);
        memset(ptr + entrylen + datalen, 0,
               lc->sectorsize - entrylen - datalen);
-       kunmap_atomic(ptr);
+       kunmap_local(ptr);
 
        ret = bio_add_page(bio, page, lc->sectorsize, 0);
        if (ret != lc->sectorsize) {
@@ -287,11 +287,11 @@ static int write_inline_data(struct log_writes_c *lc, void *entry,
                                goto error_bio;
                        }
 
-                       ptr = kmap_atomic(page);
+                       ptr = kmap_local_page(page);
                        memcpy(ptr, data, pg_datalen);
                        if (pg_sectorlen > pg_datalen)
                                memset(ptr + pg_datalen, 0, pg_sectorlen - pg_datalen);
-                       kunmap_atomic(ptr);
+                       kunmap_local(ptr);
 
                        ret = bio_add_page(bio, page, pg_sectorlen, 0);
                        if (ret != pg_sectorlen) {
@@ -743,9 +743,9 @@ static int log_writes_map(struct dm_target *ti, struct bio *bio)
                        return DM_MAPIO_KILL;
                }
 
-               dst = kmap_atomic(page);
+               dst = kmap_local_page(page);
                memcpy_from_bvec(dst, &bv);
-               kunmap_atomic(dst);
+               kunmap_local(dst);
                block->vecs[i].bv_page = page;
                block->vecs[i].bv_len = bv.bv_len;
                block->vec_cnt++;