2 * Block chaining cipher operations.
4 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
5 * multiple page boundaries by using temporary blocks. In user context,
6 * the kernel is given a chance to schedule us once per page.
8 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
17 #include <linux/crypto.h>
18 #include <linux/errno.h>
19 #include <linux/hardirq.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/scatterlist.h>
23 #include <linux/seq_file.h>
24 #include <linux/slab.h>
25 #include <linux/string.h>
28 #include "scatterwalk.h"
31 BLKCIPHER_WALK_PHYS = 1 << 0,
32 BLKCIPHER_WALK_SLOW = 1 << 1,
33 BLKCIPHER_WALK_COPY = 1 << 2,
34 BLKCIPHER_WALK_DIFF = 1 << 3,
37 static int blkcipher_walk_next(struct blkcipher_desc *desc,
38 struct blkcipher_walk *walk);
39 static int blkcipher_walk_first(struct blkcipher_desc *desc,
40 struct blkcipher_walk *walk);
42 static inline void blkcipher_map_src(struct blkcipher_walk *walk)
44 walk->src.virt.addr = scatterwalk_map(&walk->in, 0);
47 static inline void blkcipher_map_dst(struct blkcipher_walk *walk)
49 walk->dst.virt.addr = scatterwalk_map(&walk->out, 1);
52 static inline void blkcipher_unmap_src(struct blkcipher_walk *walk)
54 scatterwalk_unmap(walk->src.virt.addr, 0);
57 static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk)
59 scatterwalk_unmap(walk->dst.virt.addr, 1);
62 /* Get a spot of the specified length that does not straddle a page.
63 * The caller needs to ensure that there is enough space for this operation.
65 static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
67 u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
68 return max(start, end_page);
71 static inline unsigned int blkcipher_done_slow(struct crypto_blkcipher *tfm,
72 struct blkcipher_walk *walk,
76 unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
78 addr = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1);
79 addr = blkcipher_get_spot(addr, bsize);
80 scatterwalk_copychunks(addr, &walk->out, bsize, 1);
84 static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
89 if (walk->flags & BLKCIPHER_WALK_COPY) {
90 blkcipher_map_dst(walk);
91 memcpy(walk->dst.virt.addr, walk->page, n);
92 blkcipher_unmap_dst(walk);
93 } else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) {
94 blkcipher_unmap_src(walk);
95 if (walk->flags & BLKCIPHER_WALK_DIFF)
96 blkcipher_unmap_dst(walk);
99 scatterwalk_advance(&walk->in, n);
100 scatterwalk_advance(&walk->out, n);
105 int blkcipher_walk_done(struct blkcipher_desc *desc,
106 struct blkcipher_walk *walk, int err)
108 struct crypto_blkcipher *tfm = desc->tfm;
109 unsigned int nbytes = 0;
111 if (likely(err >= 0)) {
112 unsigned int bsize = crypto_blkcipher_blocksize(tfm);
115 if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW)))
116 n = blkcipher_done_fast(walk, err);
118 n = blkcipher_done_slow(tfm, walk, bsize);
120 nbytes = walk->total - n;
124 scatterwalk_done(&walk->in, 0, nbytes);
125 scatterwalk_done(&walk->out, 1, nbytes);
127 walk->total = nbytes;
128 walk->nbytes = nbytes;
131 crypto_yield(desc->flags);
132 return blkcipher_walk_next(desc, walk);
135 if (walk->iv != desc->info)
136 memcpy(desc->info, walk->iv, crypto_blkcipher_ivsize(tfm));
137 if (walk->buffer != walk->page)
140 free_page((unsigned long)walk->page);
144 EXPORT_SYMBOL_GPL(blkcipher_walk_done);
146 static inline int blkcipher_next_slow(struct blkcipher_desc *desc,
147 struct blkcipher_walk *walk,
149 unsigned int alignmask)
152 unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
157 walk->buffer = walk->page;
161 n = bsize * 3 - (alignmask + 1) +
162 (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
163 walk->buffer = kmalloc(n, GFP_ATOMIC);
165 return blkcipher_walk_done(desc, walk, -ENOMEM);
168 walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer,
170 walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize);
171 walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr +
172 aligned_bsize, bsize);
174 scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
176 walk->nbytes = bsize;
177 walk->flags |= BLKCIPHER_WALK_SLOW;
182 static inline int blkcipher_next_copy(struct blkcipher_walk *walk)
184 u8 *tmp = walk->page;
186 blkcipher_map_src(walk);
187 memcpy(tmp, walk->src.virt.addr, walk->nbytes);
188 blkcipher_unmap_src(walk);
190 walk->src.virt.addr = tmp;
191 walk->dst.virt.addr = tmp;
196 static inline int blkcipher_next_fast(struct blkcipher_desc *desc,
197 struct blkcipher_walk *walk)
201 walk->src.phys.page = scatterwalk_page(&walk->in);
202 walk->src.phys.offset = offset_in_page(walk->in.offset);
203 walk->dst.phys.page = scatterwalk_page(&walk->out);
204 walk->dst.phys.offset = offset_in_page(walk->out.offset);
206 if (walk->flags & BLKCIPHER_WALK_PHYS)
209 diff = walk->src.phys.offset - walk->dst.phys.offset;
210 diff |= walk->src.virt.page - walk->dst.virt.page;
212 blkcipher_map_src(walk);
213 walk->dst.virt.addr = walk->src.virt.addr;
216 walk->flags |= BLKCIPHER_WALK_DIFF;
217 blkcipher_map_dst(walk);
223 static int blkcipher_walk_next(struct blkcipher_desc *desc,
224 struct blkcipher_walk *walk)
226 struct crypto_blkcipher *tfm = desc->tfm;
227 unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
228 unsigned int bsize = crypto_blkcipher_blocksize(tfm);
233 if (unlikely(n < bsize)) {
234 desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
235 return blkcipher_walk_done(desc, walk, -EINVAL);
238 walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
239 BLKCIPHER_WALK_DIFF);
240 if (!scatterwalk_aligned(&walk->in, alignmask) ||
241 !scatterwalk_aligned(&walk->out, alignmask)) {
242 walk->flags |= BLKCIPHER_WALK_COPY;
244 walk->page = (void *)__get_free_page(GFP_ATOMIC);
250 n = scatterwalk_clamp(&walk->in, n);
251 n = scatterwalk_clamp(&walk->out, n);
253 if (unlikely(n < bsize)) {
254 err = blkcipher_next_slow(desc, walk, bsize, alignmask);
255 goto set_phys_lowmem;
259 if (walk->flags & BLKCIPHER_WALK_COPY) {
260 err = blkcipher_next_copy(walk);
261 goto set_phys_lowmem;
264 return blkcipher_next_fast(desc, walk);
267 if (walk->flags & BLKCIPHER_WALK_PHYS) {
268 walk->src.phys.page = virt_to_page(walk->src.virt.addr);
269 walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
270 walk->src.phys.offset &= PAGE_SIZE - 1;
271 walk->dst.phys.offset &= PAGE_SIZE - 1;
276 static inline int blkcipher_copy_iv(struct blkcipher_walk *walk,
277 struct crypto_blkcipher *tfm,
278 unsigned int alignmask)
280 unsigned bs = crypto_blkcipher_blocksize(tfm);
281 unsigned int ivsize = crypto_blkcipher_ivsize(tfm);
282 unsigned aligned_bs = ALIGN(bs, alignmask + 1);
283 unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
287 size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
288 walk->buffer = kmalloc(size, GFP_ATOMIC);
292 iv = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1);
293 iv = blkcipher_get_spot(iv, bs) + aligned_bs;
294 iv = blkcipher_get_spot(iv, bs) + aligned_bs;
295 iv = blkcipher_get_spot(iv, ivsize);
297 walk->iv = memcpy(iv, walk->iv, ivsize);
301 int blkcipher_walk_virt(struct blkcipher_desc *desc,
302 struct blkcipher_walk *walk)
304 walk->flags &= ~BLKCIPHER_WALK_PHYS;
305 return blkcipher_walk_first(desc, walk);
307 EXPORT_SYMBOL_GPL(blkcipher_walk_virt);
309 int blkcipher_walk_phys(struct blkcipher_desc *desc,
310 struct blkcipher_walk *walk)
312 walk->flags |= BLKCIPHER_WALK_PHYS;
313 return blkcipher_walk_first(desc, walk);
315 EXPORT_SYMBOL_GPL(blkcipher_walk_phys);
317 static int blkcipher_walk_first(struct blkcipher_desc *desc,
318 struct blkcipher_walk *walk)
320 struct crypto_blkcipher *tfm = desc->tfm;
321 unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
323 if (WARN_ON_ONCE(in_irq()))
326 walk->nbytes = walk->total;
327 if (unlikely(!walk->total))
331 walk->iv = desc->info;
332 if (unlikely(((unsigned long)walk->iv & alignmask))) {
333 int err = blkcipher_copy_iv(walk, tfm, alignmask);
338 scatterwalk_start(&walk->in, walk->in.sg);
339 scatterwalk_start(&walk->out, walk->out.sg);
342 return blkcipher_walk_next(desc, walk);
345 static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
348 struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
349 unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
351 u8 *buffer, *alignbuffer;
352 unsigned long absize;
354 absize = keylen + alignmask;
355 buffer = kmalloc(absize, GFP_ATOMIC);
359 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
360 memcpy(alignbuffer, key, keylen);
361 ret = cipher->setkey(tfm, alignbuffer, keylen);
362 memset(alignbuffer, 0, keylen);
367 static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
369 struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
370 unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
372 if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
373 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
377 if ((unsigned long)key & alignmask)
378 return setkey_unaligned(tfm, key, keylen);
380 return cipher->setkey(tfm, key, keylen);
383 static int async_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
386 return setkey(crypto_ablkcipher_tfm(tfm), key, keylen);
389 static int async_encrypt(struct ablkcipher_request *req)
391 struct crypto_tfm *tfm = req->base.tfm;
392 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
393 struct blkcipher_desc desc = {
394 .tfm = __crypto_blkcipher_cast(tfm),
396 .flags = req->base.flags,
400 return alg->encrypt(&desc, req->dst, req->src, req->nbytes);
403 static int async_decrypt(struct ablkcipher_request *req)
405 struct crypto_tfm *tfm = req->base.tfm;
406 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
407 struct blkcipher_desc desc = {
408 .tfm = __crypto_blkcipher_cast(tfm),
410 .flags = req->base.flags,
413 return alg->decrypt(&desc, req->dst, req->src, req->nbytes);
416 static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type,
419 struct blkcipher_alg *cipher = &alg->cra_blkcipher;
420 unsigned int len = alg->cra_ctxsize;
422 type ^= CRYPTO_ALG_ASYNC;
423 mask &= CRYPTO_ALG_ASYNC;
424 if ((type & mask) && cipher->ivsize) {
425 len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
426 len += cipher->ivsize;
432 static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm)
434 struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
435 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
437 crt->setkey = async_setkey;
438 crt->encrypt = async_encrypt;
439 crt->decrypt = async_decrypt;
440 crt->ivsize = alg->ivsize;
445 static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm)
447 struct blkcipher_tfm *crt = &tfm->crt_blkcipher;
448 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
449 unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1;
452 crt->setkey = setkey;
453 crt->encrypt = alg->encrypt;
454 crt->decrypt = alg->decrypt;
456 addr = (unsigned long)crypto_tfm_ctx(tfm);
457 addr = ALIGN(addr, align);
458 addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
459 crt->iv = (void *)addr;
464 static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
466 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
468 if (alg->ivsize > PAGE_SIZE / 8)
471 type ^= CRYPTO_ALG_ASYNC;
472 mask &= CRYPTO_ALG_ASYNC;
474 return crypto_init_blkcipher_ops_sync(tfm);
476 return crypto_init_blkcipher_ops_async(tfm);
479 static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
480 __attribute__ ((unused));
481 static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
483 seq_printf(m, "type : blkcipher\n");
484 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
485 seq_printf(m, "min keysize : %u\n", alg->cra_blkcipher.min_keysize);
486 seq_printf(m, "max keysize : %u\n", alg->cra_blkcipher.max_keysize);
487 seq_printf(m, "ivsize : %u\n", alg->cra_blkcipher.ivsize);
490 const struct crypto_type crypto_blkcipher_type = {
491 .ctxsize = crypto_blkcipher_ctxsize,
492 .init = crypto_init_blkcipher_ops,
493 #ifdef CONFIG_PROC_FS
494 .show = crypto_blkcipher_show,
497 EXPORT_SYMBOL_GPL(crypto_blkcipher_type);
499 MODULE_LICENSE("GPL");
500 MODULE_DESCRIPTION("Generic block chaining cipher type");