2 * echainiv: Encrypted Chain IV Generator
4 * This generator generates an IV based on a sequence number by xoring it
5 * with a salt and then encrypting it with the same key as used to encrypt
6 * the plain text. This algorithm requires that the block size be equal
7 * to the IV size. It is mainly useful for CBC.
9 * This generator can only be used by algorithms where authentication
10 * is performed after encryption (i.e., authenc).
12 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
14 * This program is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the Free
16 * Software Foundation; either version 2 of the License, or (at your option)
21 #include <crypto/internal/geniv.h>
22 #include <crypto/scatterwalk.h>
23 #include <crypto/skcipher.h>
24 #include <linux/err.h>
25 #include <linux/init.h>
26 #include <linux/kernel.h>
28 #include <linux/module.h>
29 #include <linux/percpu.h>
30 #include <linux/spinlock.h>
31 #include <linux/string.h>
33 #define MAX_IV_SIZE 16
35 static DEFINE_PER_CPU(u32 [MAX_IV_SIZE / sizeof(u32)], echainiv_iv);
37 /* We don't care if we get preempted and read/write IVs from the next CPU. */
38 static void echainiv_read_iv(u8 *dst, unsigned size)
41 u32 __percpu *b = echainiv_iv;
43 for (; size >= 4; size -= 4) {
44 *a++ = this_cpu_read(*b);
49 static void echainiv_write_iv(const u8 *src, unsigned size)
51 const u32 *a = (const u32 *)src;
52 u32 __percpu *b = echainiv_iv;
54 for (; size >= 4; size -= 4) {
55 this_cpu_write(*b, *a);
61 static void echainiv_encrypt_complete2(struct aead_request *req, int err)
63 struct aead_request *subreq = aead_request_ctx(req);
64 struct crypto_aead *geniv;
67 if (err == -EINPROGRESS)
73 geniv = crypto_aead_reqtfm(req);
74 ivsize = crypto_aead_ivsize(geniv);
76 echainiv_write_iv(subreq->iv, ivsize);
78 if (req->iv != subreq->iv)
79 memcpy(req->iv, subreq->iv, ivsize);
82 if (req->iv != subreq->iv)
86 static void echainiv_encrypt_complete(struct crypto_async_request *base,
89 struct aead_request *req = base->data;
91 echainiv_encrypt_complete2(req, err);
92 aead_request_complete(req, err);
95 static int echainiv_encrypt(struct aead_request *req)
97 struct crypto_aead *geniv = crypto_aead_reqtfm(req);
98 struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
99 struct aead_request *subreq = aead_request_ctx(req);
100 crypto_completion_t compl;
103 unsigned int ivsize = crypto_aead_ivsize(geniv);
106 if (req->cryptlen < ivsize)
109 aead_request_set_tfm(subreq, ctx->child);
111 compl = echainiv_encrypt_complete;
115 if (req->src != req->dst) {
116 SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull);
118 skcipher_request_set_tfm(nreq, ctx->sknull);
119 skcipher_request_set_callback(nreq, req->base.flags,
121 skcipher_request_set_crypt(nreq, req->src, req->dst,
122 req->assoclen + req->cryptlen,
125 err = crypto_skcipher_encrypt(nreq);
130 if (unlikely(!IS_ALIGNED((unsigned long)info,
131 crypto_aead_alignmask(geniv) + 1))) {
132 info = kmalloc(ivsize, req->base.flags &
133 CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
138 memcpy(info, req->iv, ivsize);
141 aead_request_set_callback(subreq, req->base.flags, compl, data);
142 aead_request_set_crypt(subreq, req->dst, req->dst,
143 req->cryptlen, info);
144 aead_request_set_ad(subreq, req->assoclen);
146 crypto_xor(info, ctx->salt, ivsize);
147 scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1);
148 echainiv_read_iv(info, ivsize);
150 err = crypto_aead_encrypt(subreq);
151 echainiv_encrypt_complete2(req, err);
155 static int echainiv_decrypt(struct aead_request *req)
157 struct crypto_aead *geniv = crypto_aead_reqtfm(req);
158 struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
159 struct aead_request *subreq = aead_request_ctx(req);
160 crypto_completion_t compl;
162 unsigned int ivsize = crypto_aead_ivsize(geniv);
164 if (req->cryptlen < ivsize)
167 aead_request_set_tfm(subreq, ctx->child);
169 compl = req->base.complete;
170 data = req->base.data;
172 aead_request_set_callback(subreq, req->base.flags, compl, data);
173 aead_request_set_crypt(subreq, req->src, req->dst,
174 req->cryptlen - ivsize, req->iv);
175 aead_request_set_ad(subreq, req->assoclen + ivsize);
177 scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0);
179 return crypto_aead_decrypt(subreq);
182 static int echainiv_aead_create(struct crypto_template *tmpl,
185 struct aead_instance *inst;
186 struct crypto_aead_spawn *spawn;
187 struct aead_alg *alg;
190 inst = aead_geniv_alloc(tmpl, tb, 0, 0);
193 return PTR_ERR(inst);
195 spawn = aead_instance_ctx(inst);
196 alg = crypto_spawn_aead_alg(spawn);
199 if (inst->alg.ivsize & (sizeof(u32) - 1) ||
200 inst->alg.ivsize > MAX_IV_SIZE)
203 inst->alg.encrypt = echainiv_encrypt;
204 inst->alg.decrypt = echainiv_decrypt;
206 inst->alg.init = aead_init_geniv;
207 inst->alg.exit = aead_exit_geniv;
209 inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
210 inst->alg.base.cra_ctxsize = sizeof(struct aead_geniv_ctx);
211 inst->alg.base.cra_ctxsize += inst->alg.ivsize;
213 inst->free = aead_geniv_free;
215 err = aead_register_instance(tmpl, inst);
223 aead_geniv_free(inst);
227 static void echainiv_free(struct crypto_instance *inst)
229 aead_geniv_free(aead_instance(inst));
232 static struct crypto_template echainiv_tmpl = {
234 .create = echainiv_aead_create,
235 .free = echainiv_free,
236 .module = THIS_MODULE,
239 static int __init echainiv_module_init(void)
241 return crypto_register_template(&echainiv_tmpl);
244 static void __exit echainiv_module_exit(void)
246 crypto_unregister_template(&echainiv_tmpl);
249 module_init(echainiv_module_init);
250 module_exit(echainiv_module_exit);
252 MODULE_LICENSE("GPL");
253 MODULE_DESCRIPTION("Encrypted Chain IV Generator");
254 MODULE_ALIAS_CRYPTO("echainiv");