2 * Block chaining cipher operations.
4 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
5 * multiple page boundaries by using temporary blocks. In user context,
6 * the kernel is given a chance to schedule us once per page.
8 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
17 #include <linux/crypto.h>
18 #include <linux/errno.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/scatterlist.h>
22 #include <linux/seq_file.h>
23 #include <linux/slab.h>
24 #include <linux/string.h>
27 #include "scatterwalk.h"
30 BLKCIPHER_WALK_PHYS = 1 << 0,
31 BLKCIPHER_WALK_SLOW = 1 << 1,
32 BLKCIPHER_WALK_COPY = 1 << 2,
33 BLKCIPHER_WALK_DIFF = 1 << 3,
36 static int blkcipher_walk_next(struct blkcipher_desc *desc,
37 struct blkcipher_walk *walk);
38 static int blkcipher_walk_first(struct blkcipher_desc *desc,
39 struct blkcipher_walk *walk);
41 static inline void blkcipher_map_src(struct blkcipher_walk *walk)
43 walk->src.virt.addr = scatterwalk_map(&walk->in, 0);
46 static inline void blkcipher_map_dst(struct blkcipher_walk *walk)
48 walk->dst.virt.addr = scatterwalk_map(&walk->out, 1);
51 static inline void blkcipher_unmap_src(struct blkcipher_walk *walk)
53 scatterwalk_unmap(walk->src.virt.addr, 0);
56 static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk)
58 scatterwalk_unmap(walk->dst.virt.addr, 1);
61 static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
63 if (offset_in_page(start + len) < len)
64 return (u8 *)((unsigned long)(start + len) & PAGE_MASK);
68 static inline unsigned int blkcipher_done_slow(struct crypto_blkcipher *tfm,
69 struct blkcipher_walk *walk,
73 unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
75 addr = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1);
76 addr = blkcipher_get_spot(addr, bsize);
77 scatterwalk_copychunks(addr, &walk->out, bsize, 1);
81 static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
86 if (walk->flags & BLKCIPHER_WALK_COPY) {
87 blkcipher_map_dst(walk);
88 memcpy(walk->dst.virt.addr, walk->page, n);
89 blkcipher_unmap_dst(walk);
90 } else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) {
91 blkcipher_unmap_src(walk);
92 if (walk->flags & BLKCIPHER_WALK_DIFF)
93 blkcipher_unmap_dst(walk);
96 scatterwalk_advance(&walk->in, n);
97 scatterwalk_advance(&walk->out, n);
102 int blkcipher_walk_done(struct blkcipher_desc *desc,
103 struct blkcipher_walk *walk, int err)
105 struct crypto_blkcipher *tfm = desc->tfm;
106 unsigned int nbytes = 0;
108 if (likely(err >= 0)) {
109 unsigned int bsize = crypto_blkcipher_blocksize(tfm);
112 if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW)))
113 n = blkcipher_done_fast(walk, err);
115 n = blkcipher_done_slow(tfm, walk, bsize);
117 nbytes = walk->total - n;
121 scatterwalk_done(&walk->in, 0, nbytes);
122 scatterwalk_done(&walk->out, 1, nbytes);
124 walk->total = nbytes;
125 walk->nbytes = nbytes;
128 crypto_yield(desc->flags);
129 return blkcipher_walk_next(desc, walk);
132 if (walk->iv != desc->info)
133 memcpy(desc->info, walk->iv, crypto_blkcipher_ivsize(tfm));
134 if (walk->buffer != walk->page)
137 free_page((unsigned long)walk->page);
141 EXPORT_SYMBOL_GPL(blkcipher_walk_done);
143 static inline int blkcipher_next_slow(struct blkcipher_desc *desc,
144 struct blkcipher_walk *walk,
146 unsigned int alignmask)
153 walk->buffer = walk->page;
157 n = bsize * 2 + (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
158 walk->buffer = kmalloc(n, GFP_ATOMIC);
160 return blkcipher_walk_done(desc, walk, -ENOMEM);
163 walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer,
165 walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize);
166 walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr + bsize,
169 scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
171 walk->nbytes = bsize;
172 walk->flags |= BLKCIPHER_WALK_SLOW;
177 static inline int blkcipher_next_copy(struct blkcipher_walk *walk)
179 u8 *tmp = walk->page;
181 blkcipher_map_src(walk);
182 memcpy(tmp, walk->src.virt.addr, walk->nbytes);
183 blkcipher_unmap_src(walk);
185 walk->src.virt.addr = tmp;
186 walk->dst.virt.addr = tmp;
191 static inline int blkcipher_next_fast(struct blkcipher_desc *desc,
192 struct blkcipher_walk *walk)
196 walk->src.phys.page = scatterwalk_page(&walk->in);
197 walk->src.phys.offset = offset_in_page(walk->in.offset);
198 walk->dst.phys.page = scatterwalk_page(&walk->out);
199 walk->dst.phys.offset = offset_in_page(walk->out.offset);
201 if (walk->flags & BLKCIPHER_WALK_PHYS)
204 diff = walk->src.phys.offset - walk->dst.phys.offset;
205 diff |= walk->src.virt.page - walk->dst.virt.page;
207 blkcipher_map_src(walk);
208 walk->dst.virt.addr = walk->src.virt.addr;
211 walk->flags |= BLKCIPHER_WALK_DIFF;
212 blkcipher_map_dst(walk);
218 static int blkcipher_walk_next(struct blkcipher_desc *desc,
219 struct blkcipher_walk *walk)
221 struct crypto_blkcipher *tfm = desc->tfm;
222 unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
223 unsigned int bsize = crypto_blkcipher_blocksize(tfm);
228 if (unlikely(n < bsize)) {
229 desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
230 return blkcipher_walk_done(desc, walk, -EINVAL);
233 walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
234 BLKCIPHER_WALK_DIFF);
235 if (!scatterwalk_aligned(&walk->in, alignmask) ||
236 !scatterwalk_aligned(&walk->out, alignmask)) {
237 walk->flags |= BLKCIPHER_WALK_COPY;
239 walk->page = (void *)__get_free_page(GFP_ATOMIC);
245 n = scatterwalk_clamp(&walk->in, n);
246 n = scatterwalk_clamp(&walk->out, n);
248 if (unlikely(n < bsize)) {
249 err = blkcipher_next_slow(desc, walk, bsize, alignmask);
250 goto set_phys_lowmem;
254 if (walk->flags & BLKCIPHER_WALK_COPY) {
255 err = blkcipher_next_copy(walk);
256 goto set_phys_lowmem;
259 return blkcipher_next_fast(desc, walk);
262 if (walk->flags & BLKCIPHER_WALK_PHYS) {
263 walk->src.phys.page = virt_to_page(walk->src.virt.addr);
264 walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
265 walk->src.phys.offset &= PAGE_SIZE - 1;
266 walk->dst.phys.offset &= PAGE_SIZE - 1;
271 static inline int blkcipher_copy_iv(struct blkcipher_walk *walk,
272 struct crypto_blkcipher *tfm,
273 unsigned int alignmask)
275 unsigned bs = crypto_blkcipher_blocksize(tfm);
276 unsigned int ivsize = crypto_blkcipher_ivsize(tfm);
277 unsigned int size = bs * 2 + ivsize + max(bs, ivsize) - (alignmask + 1);
280 size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
281 walk->buffer = kmalloc(size, GFP_ATOMIC);
285 iv = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1);
286 iv = blkcipher_get_spot(iv, bs) + bs;
287 iv = blkcipher_get_spot(iv, bs) + bs;
288 iv = blkcipher_get_spot(iv, ivsize);
290 walk->iv = memcpy(iv, walk->iv, ivsize);
294 int blkcipher_walk_virt(struct blkcipher_desc *desc,
295 struct blkcipher_walk *walk)
297 walk->flags &= ~BLKCIPHER_WALK_PHYS;
298 return blkcipher_walk_first(desc, walk);
300 EXPORT_SYMBOL_GPL(blkcipher_walk_virt);
302 int blkcipher_walk_phys(struct blkcipher_desc *desc,
303 struct blkcipher_walk *walk)
305 walk->flags |= BLKCIPHER_WALK_PHYS;
306 return blkcipher_walk_first(desc, walk);
308 EXPORT_SYMBOL_GPL(blkcipher_walk_phys);
310 static int blkcipher_walk_first(struct blkcipher_desc *desc,
311 struct blkcipher_walk *walk)
313 struct crypto_blkcipher *tfm = desc->tfm;
314 unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
316 walk->nbytes = walk->total;
317 if (unlikely(!walk->total))
321 walk->iv = desc->info;
322 if (unlikely(((unsigned long)walk->iv & alignmask))) {
323 int err = blkcipher_copy_iv(walk, tfm, alignmask);
328 scatterwalk_start(&walk->in, walk->in.sg);
329 scatterwalk_start(&walk->out, walk->out.sg);
332 return blkcipher_walk_next(desc, walk);
335 static int setkey(struct crypto_tfm *tfm, const u8 *key,
338 struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
340 if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
341 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
345 return cipher->setkey(tfm, key, keylen);
348 static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg)
350 struct blkcipher_alg *cipher = &alg->cra_blkcipher;
351 unsigned int len = alg->cra_ctxsize;
353 if (cipher->ivsize) {
354 len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
355 len += cipher->ivsize;
361 static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm)
363 struct blkcipher_tfm *crt = &tfm->crt_blkcipher;
364 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
365 unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1;
368 if (alg->ivsize > PAGE_SIZE / 8)
371 crt->setkey = setkey;
372 crt->encrypt = alg->encrypt;
373 crt->decrypt = alg->decrypt;
375 addr = (unsigned long)crypto_tfm_ctx(tfm);
376 addr = ALIGN(addr, align);
377 addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
378 crt->iv = (void *)addr;
383 static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
385 static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
387 seq_printf(m, "type : blkcipher\n");
388 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
389 seq_printf(m, "min keysize : %u\n", alg->cra_blkcipher.min_keysize);
390 seq_printf(m, "max keysize : %u\n", alg->cra_blkcipher.max_keysize);
391 seq_printf(m, "ivsize : %u\n", alg->cra_blkcipher.ivsize);
394 const struct crypto_type crypto_blkcipher_type = {
395 .ctxsize = crypto_blkcipher_ctxsize,
396 .init = crypto_init_blkcipher_ops,
397 #ifdef CONFIG_PROC_FS
398 .show = crypto_blkcipher_show,
401 EXPORT_SYMBOL_GPL(crypto_blkcipher_type);
403 MODULE_LICENSE("GPL");
404 MODULE_DESCRIPTION("Generic block chaining cipher type");