1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright 2019 Google LLC
6 #ifndef __LINUX_BLK_CRYPTO_INTERNAL_H
7 #define __LINUX_BLK_CRYPTO_INTERNAL_H
10 #include <linux/blk-mq.h>
12 /* Represents a crypto mode supported by blk-crypto */
13 struct blk_crypto_mode {
14 const char *name; /* name of this mode, shown in sysfs */
15 const char *cipher_str; /* crypto API name (for fallback case) */
16 unsigned int keysize; /* key size in bytes */
17 unsigned int ivsize; /* iv size in bytes */
20 extern const struct blk_crypto_mode blk_crypto_modes[];
22 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
24 int blk_crypto_sysfs_register(struct request_queue *q);
26 void blk_crypto_sysfs_unregister(struct request_queue *q);
28 void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
31 bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio);
33 bool bio_crypt_ctx_mergeable(struct bio_crypt_ctx *bc1, unsigned int bc1_bytes,
34 struct bio_crypt_ctx *bc2);
36 static inline bool bio_crypt_ctx_back_mergeable(struct request *req,
39 return bio_crypt_ctx_mergeable(req->crypt_ctx, blk_rq_bytes(req),
40 bio->bi_crypt_context);
43 static inline bool bio_crypt_ctx_front_mergeable(struct request *req,
46 return bio_crypt_ctx_mergeable(bio->bi_crypt_context,
47 bio->bi_iter.bi_size, req->crypt_ctx);
50 static inline bool bio_crypt_ctx_merge_rq(struct request *req,
53 return bio_crypt_ctx_mergeable(req->crypt_ctx, blk_rq_bytes(req),
57 static inline void blk_crypto_rq_set_defaults(struct request *rq)
60 rq->crypt_keyslot = NULL;
63 static inline bool blk_crypto_rq_is_encrypted(struct request *rq)
68 blk_status_t blk_crypto_get_keyslot(struct blk_crypto_profile *profile,
69 const struct blk_crypto_key *key,
70 struct blk_crypto_keyslot **slot_ptr);
72 void blk_crypto_put_keyslot(struct blk_crypto_keyslot *slot);
74 int __blk_crypto_evict_key(struct blk_crypto_profile *profile,
75 const struct blk_crypto_key *key);
77 bool __blk_crypto_cfg_supported(struct blk_crypto_profile *profile,
78 const struct blk_crypto_config *cfg);
80 #else /* CONFIG_BLK_INLINE_ENCRYPTION */
82 static inline int blk_crypto_sysfs_register(struct request_queue *q)
87 static inline void blk_crypto_sysfs_unregister(struct request_queue *q) { }
89 static inline bool bio_crypt_rq_ctx_compatible(struct request *rq,
95 static inline bool bio_crypt_ctx_front_mergeable(struct request *req,
101 static inline bool bio_crypt_ctx_back_mergeable(struct request *req,
107 static inline bool bio_crypt_ctx_merge_rq(struct request *req,
108 struct request *next)
113 static inline void blk_crypto_rq_set_defaults(struct request *rq) { }
115 static inline bool blk_crypto_rq_is_encrypted(struct request *rq)
120 #endif /* CONFIG_BLK_INLINE_ENCRYPTION */
122 void __bio_crypt_advance(struct bio *bio, unsigned int bytes);
123 static inline void bio_crypt_advance(struct bio *bio, unsigned int bytes)
125 if (bio_has_crypt_ctx(bio))
126 __bio_crypt_advance(bio, bytes);
129 void __bio_crypt_free_ctx(struct bio *bio);
130 static inline void bio_crypt_free_ctx(struct bio *bio)
132 if (bio_has_crypt_ctx(bio))
133 __bio_crypt_free_ctx(bio);
136 static inline void bio_crypt_do_front_merge(struct request *rq,
139 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
140 if (bio_has_crypt_ctx(bio))
141 memcpy(rq->crypt_ctx->bc_dun, bio->bi_crypt_context->bc_dun,
142 sizeof(rq->crypt_ctx->bc_dun));
146 bool __blk_crypto_bio_prep(struct bio **bio_ptr);
147 static inline bool blk_crypto_bio_prep(struct bio **bio_ptr)
149 if (bio_has_crypt_ctx(*bio_ptr))
150 return __blk_crypto_bio_prep(bio_ptr);
154 blk_status_t __blk_crypto_init_request(struct request *rq);
155 static inline blk_status_t blk_crypto_init_request(struct request *rq)
157 if (blk_crypto_rq_is_encrypted(rq))
158 return __blk_crypto_init_request(rq);
162 void __blk_crypto_free_request(struct request *rq);
163 static inline void blk_crypto_free_request(struct request *rq)
165 if (blk_crypto_rq_is_encrypted(rq))
166 __blk_crypto_free_request(rq);
169 int __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
172 * blk_crypto_rq_bio_prep - Prepare a request's crypt_ctx when its first bio
174 * @rq: The request to prepare
175 * @bio: The first bio being inserted into the request
176 * @gfp_mask: Memory allocation flags
178 * Return: 0 on success, -ENOMEM if out of memory. -ENOMEM is only possible if
179 * @gfp_mask doesn't include %__GFP_DIRECT_RECLAIM.
181 static inline int blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
184 if (bio_has_crypt_ctx(bio))
185 return __blk_crypto_rq_bio_prep(rq, bio, gfp_mask);
190 * blk_crypto_insert_cloned_request - Prepare a cloned request to be inserted
191 * into a request queue.
192 * @rq: the request being queued
194 * Return: BLK_STS_OK on success, nonzero on error.
196 static inline blk_status_t blk_crypto_insert_cloned_request(struct request *rq)
199 if (blk_crypto_rq_is_encrypted(rq))
200 return blk_crypto_init_request(rq);
204 #ifdef CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK
206 int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num);
208 bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr);
210 int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key);
212 #else /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */
215 blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num)
217 pr_warn_once("crypto API fallback is disabled\n");
221 static inline bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr)
223 pr_warn_once("crypto API fallback disabled; failing request.\n");
224 (*bio_ptr)->bi_status = BLK_STS_NOTSUPP;
229 blk_crypto_fallback_evict_key(const struct blk_crypto_key *key)
234 #endif /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */
236 #endif /* __LINUX_BLK_CRYPTO_INTERNAL_H */