1 /* LRW: as defined by Cyril Guyot in
2 * http://grouper.ieee.org/groups/1619/email/pdf00017.pdf
4 * Copyright (c) 2006 Rik Snel <rsnel@cube.dyndns.org>
7 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
14 /* This implementation is checked against the test vectors in the above
15 * document and by a test vector provided by Ken Buchanan at
16 * http://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html
18 * The test vectors are included in the testing module tcrypt.[ch] */
20 #include <crypto/internal/skcipher.h>
21 #include <crypto/scatterwalk.h>
22 #include <linux/err.h>
23 #include <linux/init.h>
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/scatterlist.h>
27 #include <linux/slab.h>
29 #include <crypto/b128ops.h>
30 #include <crypto/gf128mul.h>
32 #define LRW_BUFFER_SIZE 128u
34 #define LRW_BLOCK_SIZE 16
37 struct crypto_skcipher *child;
40 * optimizes multiplying a random (non incrementing, as at the
41 * start of a new sector) value with key2, we could also have
42 * used 4k optimization tables or no optimization at all. In the
43 * latter case we would have to store key2 here
45 struct gf128mul_64k *table;
49 * key2*{ 0,0,...0,0,0,0,1 }, key2*{ 0,0,...0,0,0,1,1 },
50 * key2*{ 0,0,...0,0,1,1,1 }, key2*{ 0,0,...0,1,1,1,1 }
51 * key2*{ 0,0,...1,1,1,1,1 }, etc
52 * needed for optimized multiplication of incrementing values
59 be128 buf[LRW_BUFFER_SIZE / sizeof(be128)];
65 struct scatterlist srcbuf[2];
66 struct scatterlist dstbuf[2];
67 struct scatterlist *src;
68 struct scatterlist *dst;
72 struct skcipher_request subreq;
75 static inline void setbit128_bbe(void *b, int bit)
77 __set_bit(bit ^ (0x80 -
86 static int setkey(struct crypto_skcipher *parent, const u8 *key,
89 struct priv *ctx = crypto_skcipher_ctx(parent);
90 struct crypto_skcipher *child = ctx->child;
91 int err, bsize = LRW_BLOCK_SIZE;
92 const u8 *tweak = key + keylen - bsize;
96 crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
97 crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
99 err = crypto_skcipher_setkey(child, key, keylen - bsize);
100 crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
101 CRYPTO_TFM_RES_MASK);
106 gf128mul_free_64k(ctx->table);
108 /* initialize multiplication table for Key2 */
109 ctx->table = gf128mul_init_64k_bbe((be128 *)tweak);
113 /* initialize optimization table */
114 for (i = 0; i < 128; i++) {
115 setbit128_bbe(&tmp, i);
116 ctx->mulinc[i] = tmp;
117 gf128mul_64k_bbe(&ctx->mulinc[i], ctx->table);
123 static inline void inc(be128 *iv)
125 be64_add_cpu(&iv->b, 1);
127 be64_add_cpu(&iv->a, 1);
130 /* this returns the number of consequative 1 bits starting
131 * from the right, get_index128(00 00 00 00 00 00 ... 00 00 10 FB) = 2 */
132 static inline int get_index128(be128 *block)
135 __be32 *p = (__be32 *) block;
137 for (p += 3, x = 0; x < 128; p--, x += 32) {
138 u32 val = be32_to_cpup(p);
149 static int post_crypt(struct skcipher_request *req)
151 struct rctx *rctx = skcipher_request_ctx(req);
152 be128 *buf = rctx->ext ?: rctx->buf;
153 struct skcipher_request *subreq;
154 const int bs = LRW_BLOCK_SIZE;
155 struct skcipher_walk w;
156 struct scatterlist *sg;
160 subreq = &rctx->subreq;
161 err = skcipher_walk_virt(&w, subreq, false);
164 unsigned int avail = w.nbytes;
167 wdst = w.dst.virt.addr;
170 be128_xor(wdst, buf++, wdst);
172 } while ((avail -= bs) >= bs);
174 err = skcipher_walk_done(&w, avail);
177 rctx->left -= subreq->cryptlen;
179 if (err || !rctx->left)
182 rctx->dst = rctx->dstbuf;
184 scatterwalk_done(&w.out, 0, 1);
186 offset = w.out.offset;
188 if (rctx->dst != sg) {
190 sg_unmark_end(rctx->dst);
191 scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 0, 2);
193 rctx->dst[0].length -= offset - sg->offset;
194 rctx->dst[0].offset = offset;
200 static int pre_crypt(struct skcipher_request *req)
202 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
203 struct rctx *rctx = skcipher_request_ctx(req);
204 struct priv *ctx = crypto_skcipher_ctx(tfm);
205 be128 *buf = rctx->ext ?: rctx->buf;
206 struct skcipher_request *subreq;
207 const int bs = LRW_BLOCK_SIZE;
208 struct skcipher_walk w;
209 struct scatterlist *sg;
216 subreq = &rctx->subreq;
217 skcipher_request_set_tfm(subreq, tfm);
219 cryptlen = subreq->cryptlen;
220 more = rctx->left > cryptlen;
222 cryptlen = rctx->left;
224 skcipher_request_set_crypt(subreq, rctx->src, rctx->dst,
227 err = skcipher_walk_virt(&w, subreq, false);
231 unsigned int avail = w.nbytes;
235 wsrc = w.src.virt.addr;
236 wdst = w.dst.virt.addr;
240 be128_xor(wdst++, &rctx->t, wsrc++);
242 /* T <- I*Key2, using the optimization
243 * discussed in the specification */
244 be128_xor(&rctx->t, &rctx->t,
245 &ctx->mulinc[get_index128(iv)]);
247 } while ((avail -= bs) >= bs);
249 err = skcipher_walk_done(&w, avail);
252 skcipher_request_set_tfm(subreq, ctx->child);
253 skcipher_request_set_crypt(subreq, rctx->dst, rctx->dst,
259 rctx->src = rctx->srcbuf;
261 scatterwalk_done(&w.in, 0, 1);
263 offset = w.in.offset;
265 if (rctx->src != sg) {
267 sg_unmark_end(rctx->src);
268 scatterwalk_crypto_chain(rctx->src, sg_next(sg), 0, 2);
270 rctx->src[0].length -= offset - sg->offset;
271 rctx->src[0].offset = offset;
277 static int init_crypt(struct skcipher_request *req, crypto_completion_t done)
279 struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
280 struct rctx *rctx = skcipher_request_ctx(req);
281 struct skcipher_request *subreq;
284 subreq = &rctx->subreq;
285 skcipher_request_set_callback(subreq, req->base.flags, done, req);
287 gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
291 subreq->cryptlen = LRW_BUFFER_SIZE;
292 if (req->cryptlen > LRW_BUFFER_SIZE) {
293 unsigned int n = min(req->cryptlen, (unsigned int)PAGE_SIZE);
295 rctx->ext = kmalloc(n, gfp);
297 subreq->cryptlen = n;
300 rctx->src = req->src;
301 rctx->dst = req->dst;
302 rctx->left = req->cryptlen;
304 /* calculate first value of T */
305 memcpy(&rctx->t, req->iv, sizeof(rctx->t));
308 gf128mul_64k_bbe(&rctx->t, ctx->table);
313 static void exit_crypt(struct skcipher_request *req)
315 struct rctx *rctx = skcipher_request_ctx(req);
323 static int do_encrypt(struct skcipher_request *req, int err)
325 struct rctx *rctx = skcipher_request_ctx(req);
326 struct skcipher_request *subreq;
328 subreq = &rctx->subreq;
330 while (!err && rctx->left) {
331 err = pre_crypt(req) ?:
332 crypto_skcipher_encrypt(subreq) ?:
335 if (err == -EINPROGRESS || err == -EBUSY)
343 static void encrypt_done(struct crypto_async_request *areq, int err)
345 struct skcipher_request *req = areq->data;
346 struct skcipher_request *subreq;
349 rctx = skcipher_request_ctx(req);
351 if (err == -EINPROGRESS) {
352 if (rctx->left != req->cryptlen)
357 subreq = &rctx->subreq;
358 subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
360 err = do_encrypt(req, err ?: post_crypt(req));
365 skcipher_request_complete(req, err);
368 static int encrypt(struct skcipher_request *req)
370 return do_encrypt(req, init_crypt(req, encrypt_done));
373 static int do_decrypt(struct skcipher_request *req, int err)
375 struct rctx *rctx = skcipher_request_ctx(req);
376 struct skcipher_request *subreq;
378 subreq = &rctx->subreq;
380 while (!err && rctx->left) {
381 err = pre_crypt(req) ?:
382 crypto_skcipher_decrypt(subreq) ?:
385 if (err == -EINPROGRESS || err == -EBUSY)
393 static void decrypt_done(struct crypto_async_request *areq, int err)
395 struct skcipher_request *req = areq->data;
396 struct skcipher_request *subreq;
399 rctx = skcipher_request_ctx(req);
401 if (err == -EINPROGRESS) {
402 if (rctx->left != req->cryptlen)
407 subreq = &rctx->subreq;
408 subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
410 err = do_decrypt(req, err ?: post_crypt(req));
415 skcipher_request_complete(req, err);
418 static int decrypt(struct skcipher_request *req)
420 return do_decrypt(req, init_crypt(req, decrypt_done));
423 static int init_tfm(struct crypto_skcipher *tfm)
425 struct skcipher_instance *inst = skcipher_alg_instance(tfm);
426 struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst);
427 struct priv *ctx = crypto_skcipher_ctx(tfm);
428 struct crypto_skcipher *cipher;
430 cipher = crypto_spawn_skcipher(spawn);
432 return PTR_ERR(cipher);
436 crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(cipher) +
437 sizeof(struct rctx));
442 static void exit_tfm(struct crypto_skcipher *tfm)
444 struct priv *ctx = crypto_skcipher_ctx(tfm);
447 gf128mul_free_64k(ctx->table);
448 crypto_free_skcipher(ctx->child);
451 static void free(struct skcipher_instance *inst)
453 crypto_drop_skcipher(skcipher_instance_ctx(inst));
457 static int create(struct crypto_template *tmpl, struct rtattr **tb)
459 struct crypto_skcipher_spawn *spawn;
460 struct skcipher_instance *inst;
461 struct crypto_attr_type *algt;
462 struct skcipher_alg *alg;
463 const char *cipher_name;
464 char ecb_name[CRYPTO_MAX_ALG_NAME];
467 algt = crypto_get_attr_type(tb);
469 return PTR_ERR(algt);
471 if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
474 cipher_name = crypto_attr_alg_name(tb[1]);
475 if (IS_ERR(cipher_name))
476 return PTR_ERR(cipher_name);
478 inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
482 spawn = skcipher_instance_ctx(inst);
484 crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst));
485 err = crypto_grab_skcipher(spawn, cipher_name, 0,
486 crypto_requires_sync(algt->type,
488 if (err == -ENOENT) {
490 if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
491 cipher_name) >= CRYPTO_MAX_ALG_NAME)
494 err = crypto_grab_skcipher(spawn, ecb_name, 0,
495 crypto_requires_sync(algt->type,
502 alg = crypto_skcipher_spawn_alg(spawn);
505 if (alg->base.cra_blocksize != LRW_BLOCK_SIZE)
508 if (crypto_skcipher_alg_ivsize(alg))
511 err = crypto_inst_setname(skcipher_crypto_instance(inst), "lrw",
517 cipher_name = alg->base.cra_name;
519 /* Alas we screwed up the naming so we have to mangle the
522 if (!strncmp(cipher_name, "ecb(", 4)) {
525 len = strlcpy(ecb_name, cipher_name + 4, sizeof(ecb_name));
526 if (len < 2 || len >= sizeof(ecb_name))
529 if (ecb_name[len - 1] != ')')
532 ecb_name[len - 1] = 0;
534 if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
535 "lrw(%s)", ecb_name) >= CRYPTO_MAX_ALG_NAME) {
542 inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
543 inst->alg.base.cra_priority = alg->base.cra_priority;
544 inst->alg.base.cra_blocksize = LRW_BLOCK_SIZE;
545 inst->alg.base.cra_alignmask = alg->base.cra_alignmask |
546 (__alignof__(u64) - 1);
548 inst->alg.ivsize = LRW_BLOCK_SIZE;
549 inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) +
551 inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) +
554 inst->alg.base.cra_ctxsize = sizeof(struct priv);
556 inst->alg.init = init_tfm;
557 inst->alg.exit = exit_tfm;
559 inst->alg.setkey = setkey;
560 inst->alg.encrypt = encrypt;
561 inst->alg.decrypt = decrypt;
565 err = skcipher_register_instance(tmpl, inst);
573 crypto_drop_skcipher(spawn);
579 static struct crypto_template crypto_tmpl = {
582 .module = THIS_MODULE,
585 static int __init crypto_module_init(void)
587 return crypto_register_template(&crypto_tmpl);
590 static void __exit crypto_module_exit(void)
592 crypto_unregister_template(&crypto_tmpl);
595 module_init(crypto_module_init);
596 module_exit(crypto_module_exit);
598 MODULE_LICENSE("GPL");
599 MODULE_DESCRIPTION("LRW block cipher mode");
600 MODULE_ALIAS_CRYPTO("lrw");