1 /* XTS: as defined in IEEE1619/D16
2 * http://grouper.ieee.org/groups/1619/email/pdf00086.pdf
3 * (sector sizes which are not a multiple of 16 bytes are,
4 * however currently unsupported)
6 * Copyright (c) 2007 Rik Snel <rsnel@cube.dyndns.org>
9 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the Free
13 * Software Foundation; either version 2 of the License, or (at your option)
16 #include <crypto/internal/skcipher.h>
17 #include <crypto/scatterwalk.h>
18 #include <linux/err.h>
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/scatterlist.h>
23 #include <linux/slab.h>
25 #include <crypto/xts.h>
26 #include <crypto/b128ops.h>
27 #include <crypto/gf128mul.h>
29 #define XTS_BUFFER_SIZE 128u
32 struct crypto_skcipher *child;
33 struct crypto_cipher *tweak;
36 struct xts_instance_ctx {
37 struct crypto_skcipher_spawn spawn;
38 char name[CRYPTO_MAX_ALG_NAME];
42 le128 buf[XTS_BUFFER_SIZE / sizeof(le128)];
48 struct scatterlist srcbuf[2];
49 struct scatterlist dstbuf[2];
50 struct scatterlist *src;
51 struct scatterlist *dst;
55 struct skcipher_request subreq;
58 static int setkey(struct crypto_skcipher *parent, const u8 *key,
61 struct priv *ctx = crypto_skcipher_ctx(parent);
62 struct crypto_skcipher *child;
63 struct crypto_cipher *tweak;
66 err = xts_verify_key(parent, key, keylen);
72 /* we need two cipher instances: one to compute the initial 'tweak'
73 * by encrypting the IV (usually the 'plain' iv) and the other
74 * one to encrypt and decrypt the data */
76 /* tweak cipher, uses Key2 i.e. the second half of *key */
78 crypto_cipher_clear_flags(tweak, CRYPTO_TFM_REQ_MASK);
79 crypto_cipher_set_flags(tweak, crypto_skcipher_get_flags(parent) &
81 err = crypto_cipher_setkey(tweak, key + keylen, keylen);
82 crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(tweak) &
87 /* data cipher, uses Key1 i.e. the first half of *key */
89 crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
90 crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
92 err = crypto_skcipher_setkey(child, key, keylen);
93 crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
99 static int post_crypt(struct skcipher_request *req)
101 struct rctx *rctx = skcipher_request_ctx(req);
102 le128 *buf = rctx->ext ?: rctx->buf;
103 struct skcipher_request *subreq;
104 const int bs = XTS_BLOCK_SIZE;
105 struct skcipher_walk w;
106 struct scatterlist *sg;
110 subreq = &rctx->subreq;
111 err = skcipher_walk_virt(&w, subreq, false);
114 unsigned int avail = w.nbytes;
117 wdst = w.dst.virt.addr;
120 le128_xor(wdst, buf++, wdst);
122 } while ((avail -= bs) >= bs);
124 err = skcipher_walk_done(&w, avail);
127 rctx->left -= subreq->cryptlen;
129 if (err || !rctx->left)
132 rctx->dst = rctx->dstbuf;
134 scatterwalk_done(&w.out, 0, 1);
136 offset = w.out.offset;
138 if (rctx->dst != sg) {
140 sg_unmark_end(rctx->dst);
141 scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 0, 2);
143 rctx->dst[0].length -= offset - sg->offset;
144 rctx->dst[0].offset = offset;
150 static int pre_crypt(struct skcipher_request *req)
152 struct rctx *rctx = skcipher_request_ctx(req);
153 le128 *buf = rctx->ext ?: rctx->buf;
154 struct skcipher_request *subreq;
155 const int bs = XTS_BLOCK_SIZE;
156 struct skcipher_walk w;
157 struct scatterlist *sg;
163 subreq = &rctx->subreq;
164 cryptlen = subreq->cryptlen;
166 more = rctx->left > cryptlen;
168 cryptlen = rctx->left;
170 skcipher_request_set_crypt(subreq, rctx->src, rctx->dst,
173 err = skcipher_walk_virt(&w, subreq, false);
176 unsigned int avail = w.nbytes;
180 wsrc = w.src.virt.addr;
181 wdst = w.dst.virt.addr;
185 le128_xor(wdst++, &rctx->t, wsrc++);
186 gf128mul_x_ble(&rctx->t, &rctx->t);
187 } while ((avail -= bs) >= bs);
189 err = skcipher_walk_done(&w, avail);
192 skcipher_request_set_crypt(subreq, rctx->dst, rctx->dst,
198 rctx->src = rctx->srcbuf;
200 scatterwalk_done(&w.in, 0, 1);
202 offset = w.in.offset;
204 if (rctx->src != sg) {
206 sg_unmark_end(rctx->src);
207 scatterwalk_crypto_chain(rctx->src, sg_next(sg), 0, 2);
209 rctx->src[0].length -= offset - sg->offset;
210 rctx->src[0].offset = offset;
216 static int init_crypt(struct skcipher_request *req, crypto_completion_t done)
218 struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
219 struct rctx *rctx = skcipher_request_ctx(req);
220 struct skcipher_request *subreq;
223 subreq = &rctx->subreq;
224 skcipher_request_set_tfm(subreq, ctx->child);
225 skcipher_request_set_callback(subreq, req->base.flags, done, req);
227 gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
231 subreq->cryptlen = XTS_BUFFER_SIZE;
232 if (req->cryptlen > XTS_BUFFER_SIZE) {
233 unsigned int n = min(req->cryptlen, (unsigned int)PAGE_SIZE);
235 rctx->ext = kmalloc(n, gfp);
237 subreq->cryptlen = n;
240 rctx->src = req->src;
241 rctx->dst = req->dst;
242 rctx->left = req->cryptlen;
244 /* calculate first value of T */
245 crypto_cipher_encrypt_one(ctx->tweak, (u8 *)&rctx->t, req->iv);
250 static void exit_crypt(struct skcipher_request *req)
252 struct rctx *rctx = skcipher_request_ctx(req);
260 static int do_encrypt(struct skcipher_request *req, int err)
262 struct rctx *rctx = skcipher_request_ctx(req);
263 struct skcipher_request *subreq;
265 subreq = &rctx->subreq;
267 while (!err && rctx->left) {
268 err = pre_crypt(req) ?:
269 crypto_skcipher_encrypt(subreq) ?:
272 if (err == -EINPROGRESS ||
274 req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
282 static void encrypt_done(struct crypto_async_request *areq, int err)
284 struct skcipher_request *req = areq->data;
285 struct skcipher_request *subreq;
288 rctx = skcipher_request_ctx(req);
290 if (err == -EINPROGRESS) {
291 if (rctx->left != req->cryptlen)
296 subreq = &rctx->subreq;
297 subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
299 err = do_encrypt(req, err ?: post_crypt(req));
304 skcipher_request_complete(req, err);
307 static int encrypt(struct skcipher_request *req)
309 return do_encrypt(req, init_crypt(req, encrypt_done));
312 static int do_decrypt(struct skcipher_request *req, int err)
314 struct rctx *rctx = skcipher_request_ctx(req);
315 struct skcipher_request *subreq;
317 subreq = &rctx->subreq;
319 while (!err && rctx->left) {
320 err = pre_crypt(req) ?:
321 crypto_skcipher_decrypt(subreq) ?:
324 if (err == -EINPROGRESS ||
326 req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
334 static void decrypt_done(struct crypto_async_request *areq, int err)
336 struct skcipher_request *req = areq->data;
337 struct skcipher_request *subreq;
340 rctx = skcipher_request_ctx(req);
342 if (err == -EINPROGRESS) {
343 if (rctx->left != req->cryptlen)
348 subreq = &rctx->subreq;
349 subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
351 err = do_decrypt(req, err ?: post_crypt(req));
356 skcipher_request_complete(req, err);
359 static int decrypt(struct skcipher_request *req)
361 return do_decrypt(req, init_crypt(req, decrypt_done));
364 int xts_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst,
365 struct scatterlist *ssrc, unsigned int nbytes,
366 struct xts_crypt_req *req)
368 const unsigned int bsize = XTS_BLOCK_SIZE;
369 const unsigned int max_blks = req->tbuflen / bsize;
370 struct blkcipher_walk walk;
371 unsigned int nblocks;
372 le128 *src, *dst, *t;
373 le128 *t_buf = req->tbuf;
376 BUG_ON(max_blks < 1);
378 blkcipher_walk_init(&walk, sdst, ssrc, nbytes);
380 err = blkcipher_walk_virt(desc, &walk);
381 nbytes = walk.nbytes;
385 nblocks = min(nbytes / bsize, max_blks);
386 src = (le128 *)walk.src.virt.addr;
387 dst = (le128 *)walk.dst.virt.addr;
389 /* calculate first value of T */
390 req->tweak_fn(req->tweak_ctx, (u8 *)&t_buf[0], walk.iv);
397 for (i = 0; i < nblocks; i++) {
398 gf128mul_x_ble(&t_buf[i], t);
403 le128_xor(dst + i, t, src + i);
406 /* CC <- E(Key2,PP) */
407 req->crypt_fn(req->crypt_ctx, (u8 *)dst,
411 for (i = 0; i < nblocks; i++)
412 le128_xor(dst + i, dst + i, &t_buf[i]);
416 nbytes -= nblocks * bsize;
417 nblocks = min(nbytes / bsize, max_blks);
418 } while (nblocks > 0);
420 *(le128 *)walk.iv = *t;
422 err = blkcipher_walk_done(desc, &walk, nbytes);
423 nbytes = walk.nbytes;
427 nblocks = min(nbytes / bsize, max_blks);
428 src = (le128 *)walk.src.virt.addr;
429 dst = (le128 *)walk.dst.virt.addr;
434 EXPORT_SYMBOL_GPL(xts_crypt);
436 static int init_tfm(struct crypto_skcipher *tfm)
438 struct skcipher_instance *inst = skcipher_alg_instance(tfm);
439 struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst);
440 struct priv *ctx = crypto_skcipher_ctx(tfm);
441 struct crypto_skcipher *child;
442 struct crypto_cipher *tweak;
444 child = crypto_spawn_skcipher(&ictx->spawn);
446 return PTR_ERR(child);
450 tweak = crypto_alloc_cipher(ictx->name, 0, 0);
452 crypto_free_skcipher(ctx->child);
453 return PTR_ERR(tweak);
458 crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(child) +
459 sizeof(struct rctx));
464 static void exit_tfm(struct crypto_skcipher *tfm)
466 struct priv *ctx = crypto_skcipher_ctx(tfm);
468 crypto_free_skcipher(ctx->child);
469 crypto_free_cipher(ctx->tweak);
472 static void free(struct skcipher_instance *inst)
474 crypto_drop_skcipher(skcipher_instance_ctx(inst));
478 static int create(struct crypto_template *tmpl, struct rtattr **tb)
480 struct skcipher_instance *inst;
481 struct crypto_attr_type *algt;
482 struct xts_instance_ctx *ctx;
483 struct skcipher_alg *alg;
484 const char *cipher_name;
488 algt = crypto_get_attr_type(tb);
490 return PTR_ERR(algt);
492 if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
495 cipher_name = crypto_attr_alg_name(tb[1]);
496 if (IS_ERR(cipher_name))
497 return PTR_ERR(cipher_name);
499 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
503 ctx = skcipher_instance_ctx(inst);
505 crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst));
507 mask = crypto_requires_off(algt->type, algt->mask,
508 CRYPTO_ALG_NEED_FALLBACK |
511 err = crypto_grab_skcipher(&ctx->spawn, cipher_name, 0, mask);
512 if (err == -ENOENT) {
514 if (snprintf(ctx->name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
515 cipher_name) >= CRYPTO_MAX_ALG_NAME)
518 err = crypto_grab_skcipher(&ctx->spawn, ctx->name, 0, mask);
524 alg = crypto_skcipher_spawn_alg(&ctx->spawn);
527 if (alg->base.cra_blocksize != XTS_BLOCK_SIZE)
530 if (crypto_skcipher_alg_ivsize(alg))
533 err = crypto_inst_setname(skcipher_crypto_instance(inst), "xts",
539 cipher_name = alg->base.cra_name;
541 /* Alas we screwed up the naming so we have to mangle the
544 if (!strncmp(cipher_name, "ecb(", 4)) {
547 len = strlcpy(ctx->name, cipher_name + 4, sizeof(ctx->name));
548 if (len < 2 || len >= sizeof(ctx->name))
551 if (ctx->name[len - 1] != ')')
554 ctx->name[len - 1] = 0;
556 if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
557 "xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME)
558 return -ENAMETOOLONG;
562 inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
563 inst->alg.base.cra_priority = alg->base.cra_priority;
564 inst->alg.base.cra_blocksize = XTS_BLOCK_SIZE;
565 inst->alg.base.cra_alignmask = alg->base.cra_alignmask |
566 (__alignof__(u64) - 1);
568 inst->alg.ivsize = XTS_BLOCK_SIZE;
569 inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) * 2;
570 inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) * 2;
572 inst->alg.base.cra_ctxsize = sizeof(struct priv);
574 inst->alg.init = init_tfm;
575 inst->alg.exit = exit_tfm;
577 inst->alg.setkey = setkey;
578 inst->alg.encrypt = encrypt;
579 inst->alg.decrypt = decrypt;
583 err = skcipher_register_instance(tmpl, inst);
591 crypto_drop_skcipher(&ctx->spawn);
597 static struct crypto_template crypto_tmpl = {
600 .module = THIS_MODULE,
603 static int __init crypto_module_init(void)
605 return crypto_register_template(&crypto_tmpl);
608 static void __exit crypto_module_exit(void)
610 crypto_unregister_template(&crypto_tmpl);
613 module_init(crypto_module_init);
614 module_exit(crypto_module_exit);
616 MODULE_LICENSE("GPL");
617 MODULE_DESCRIPTION("XTS block cipher mode");
618 MODULE_ALIAS_CRYPTO("xts");