2 * x64 SIMD accelerated ChaCha and XChaCha stream ciphers,
3 * including ChaCha20 (RFC7539)
5 * Copyright (C) 2015 Martin Willi
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
13 #include <crypto/algapi.h>
14 #include <crypto/chacha.h>
15 #include <crypto/internal/skcipher.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <asm/fpu/api.h>
21 #define CHACHA_STATE_ALIGN 16
23 asmlinkage void chacha_block_xor_ssse3(u32 *state, u8 *dst, const u8 *src,
24 unsigned int len, int nrounds);
25 asmlinkage void chacha_4block_xor_ssse3(u32 *state, u8 *dst, const u8 *src,
26 unsigned int len, int nrounds);
27 asmlinkage void hchacha_block_ssse3(const u32 *state, u32 *out, int nrounds);
29 asmlinkage void chacha_2block_xor_avx2(u32 *state, u8 *dst, const u8 *src,
30 unsigned int len, int nrounds);
31 asmlinkage void chacha_4block_xor_avx2(u32 *state, u8 *dst, const u8 *src,
32 unsigned int len, int nrounds);
33 asmlinkage void chacha_8block_xor_avx2(u32 *state, u8 *dst, const u8 *src,
34 unsigned int len, int nrounds);
35 static bool chacha_use_avx2;
36 #ifdef CONFIG_AS_AVX512
37 asmlinkage void chacha_2block_xor_avx512vl(u32 *state, u8 *dst, const u8 *src,
38 unsigned int len, int nrounds);
39 asmlinkage void chacha_4block_xor_avx512vl(u32 *state, u8 *dst, const u8 *src,
40 unsigned int len, int nrounds);
41 asmlinkage void chacha_8block_xor_avx512vl(u32 *state, u8 *dst, const u8 *src,
42 unsigned int len, int nrounds);
43 static bool chacha_use_avx512vl;
47 static unsigned int chacha_advance(unsigned int len, unsigned int maxblocks)
49 len = min(len, maxblocks * CHACHA_BLOCK_SIZE);
50 return round_up(len, CHACHA_BLOCK_SIZE) / CHACHA_BLOCK_SIZE;
53 static void chacha_dosimd(u32 *state, u8 *dst, const u8 *src,
54 unsigned int bytes, int nrounds)
57 #ifdef CONFIG_AS_AVX512
58 if (chacha_use_avx512vl) {
59 while (bytes >= CHACHA_BLOCK_SIZE * 8) {
60 chacha_8block_xor_avx512vl(state, dst, src, bytes,
62 bytes -= CHACHA_BLOCK_SIZE * 8;
63 src += CHACHA_BLOCK_SIZE * 8;
64 dst += CHACHA_BLOCK_SIZE * 8;
67 if (bytes > CHACHA_BLOCK_SIZE * 4) {
68 chacha_8block_xor_avx512vl(state, dst, src, bytes,
70 state[12] += chacha_advance(bytes, 8);
73 if (bytes > CHACHA_BLOCK_SIZE * 2) {
74 chacha_4block_xor_avx512vl(state, dst, src, bytes,
76 state[12] += chacha_advance(bytes, 4);
80 chacha_2block_xor_avx512vl(state, dst, src, bytes,
82 state[12] += chacha_advance(bytes, 2);
87 if (chacha_use_avx2) {
88 while (bytes >= CHACHA_BLOCK_SIZE * 8) {
89 chacha_8block_xor_avx2(state, dst, src, bytes, nrounds);
90 bytes -= CHACHA_BLOCK_SIZE * 8;
91 src += CHACHA_BLOCK_SIZE * 8;
92 dst += CHACHA_BLOCK_SIZE * 8;
95 if (bytes > CHACHA_BLOCK_SIZE * 4) {
96 chacha_8block_xor_avx2(state, dst, src, bytes, nrounds);
97 state[12] += chacha_advance(bytes, 8);
100 if (bytes > CHACHA_BLOCK_SIZE * 2) {
101 chacha_4block_xor_avx2(state, dst, src, bytes, nrounds);
102 state[12] += chacha_advance(bytes, 4);
105 if (bytes > CHACHA_BLOCK_SIZE) {
106 chacha_2block_xor_avx2(state, dst, src, bytes, nrounds);
107 state[12] += chacha_advance(bytes, 2);
112 while (bytes >= CHACHA_BLOCK_SIZE * 4) {
113 chacha_4block_xor_ssse3(state, dst, src, bytes, nrounds);
114 bytes -= CHACHA_BLOCK_SIZE * 4;
115 src += CHACHA_BLOCK_SIZE * 4;
116 dst += CHACHA_BLOCK_SIZE * 4;
119 if (bytes > CHACHA_BLOCK_SIZE) {
120 chacha_4block_xor_ssse3(state, dst, src, bytes, nrounds);
121 state[12] += chacha_advance(bytes, 4);
125 chacha_block_xor_ssse3(state, dst, src, bytes, nrounds);
130 static int chacha_simd_stream_xor(struct skcipher_walk *walk,
131 struct chacha_ctx *ctx, u8 *iv)
133 u32 *state, state_buf[16 + 2] __aligned(8);
134 int next_yield = 4096; /* bytes until next FPU yield */
137 BUILD_BUG_ON(CHACHA_STATE_ALIGN != 16);
138 state = PTR_ALIGN(state_buf + 0, CHACHA_STATE_ALIGN);
140 crypto_chacha_init(state, ctx, iv);
142 while (walk->nbytes > 0) {
143 unsigned int nbytes = walk->nbytes;
145 if (nbytes < walk->total) {
146 nbytes = round_down(nbytes, walk->stride);
147 next_yield -= nbytes;
150 chacha_dosimd(state, walk->dst.virt.addr, walk->src.virt.addr,
151 nbytes, ctx->nrounds);
153 if (next_yield <= 0) {
154 /* temporarily allow preemption */
160 err = skcipher_walk_done(walk, walk->nbytes - nbytes);
166 static int chacha_simd(struct skcipher_request *req)
168 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
169 struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
170 struct skcipher_walk walk;
173 if (req->cryptlen <= CHACHA_BLOCK_SIZE || !irq_fpu_usable())
174 return crypto_chacha_crypt(req);
176 err = skcipher_walk_virt(&walk, req, true);
181 err = chacha_simd_stream_xor(&walk, ctx, req->iv);
186 static int xchacha_simd(struct skcipher_request *req)
188 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
189 struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
190 struct skcipher_walk walk;
191 struct chacha_ctx subctx;
192 u32 *state, state_buf[16 + 2] __aligned(8);
196 if (req->cryptlen <= CHACHA_BLOCK_SIZE || !irq_fpu_usable())
197 return crypto_xchacha_crypt(req);
199 err = skcipher_walk_virt(&walk, req, true);
203 BUILD_BUG_ON(CHACHA_STATE_ALIGN != 16);
204 state = PTR_ALIGN(state_buf + 0, CHACHA_STATE_ALIGN);
205 crypto_chacha_init(state, ctx, req->iv);
209 hchacha_block_ssse3(state, subctx.key, ctx->nrounds);
210 subctx.nrounds = ctx->nrounds;
212 memcpy(&real_iv[0], req->iv + 24, 8);
213 memcpy(&real_iv[8], req->iv + 16, 8);
214 err = chacha_simd_stream_xor(&walk, &subctx, real_iv);
221 static struct skcipher_alg algs[] = {
223 .base.cra_name = "chacha20",
224 .base.cra_driver_name = "chacha20-simd",
225 .base.cra_priority = 300,
226 .base.cra_blocksize = 1,
227 .base.cra_ctxsize = sizeof(struct chacha_ctx),
228 .base.cra_module = THIS_MODULE,
230 .min_keysize = CHACHA_KEY_SIZE,
231 .max_keysize = CHACHA_KEY_SIZE,
232 .ivsize = CHACHA_IV_SIZE,
233 .chunksize = CHACHA_BLOCK_SIZE,
234 .setkey = crypto_chacha20_setkey,
235 .encrypt = chacha_simd,
236 .decrypt = chacha_simd,
238 .base.cra_name = "xchacha20",
239 .base.cra_driver_name = "xchacha20-simd",
240 .base.cra_priority = 300,
241 .base.cra_blocksize = 1,
242 .base.cra_ctxsize = sizeof(struct chacha_ctx),
243 .base.cra_module = THIS_MODULE,
245 .min_keysize = CHACHA_KEY_SIZE,
246 .max_keysize = CHACHA_KEY_SIZE,
247 .ivsize = XCHACHA_IV_SIZE,
248 .chunksize = CHACHA_BLOCK_SIZE,
249 .setkey = crypto_chacha20_setkey,
250 .encrypt = xchacha_simd,
251 .decrypt = xchacha_simd,
253 .base.cra_name = "xchacha12",
254 .base.cra_driver_name = "xchacha12-simd",
255 .base.cra_priority = 300,
256 .base.cra_blocksize = 1,
257 .base.cra_ctxsize = sizeof(struct chacha_ctx),
258 .base.cra_module = THIS_MODULE,
260 .min_keysize = CHACHA_KEY_SIZE,
261 .max_keysize = CHACHA_KEY_SIZE,
262 .ivsize = XCHACHA_IV_SIZE,
263 .chunksize = CHACHA_BLOCK_SIZE,
264 .setkey = crypto_chacha12_setkey,
265 .encrypt = xchacha_simd,
266 .decrypt = xchacha_simd,
270 static int __init chacha_simd_mod_init(void)
272 if (!boot_cpu_has(X86_FEATURE_SSSE3))
275 #ifdef CONFIG_AS_AVX2
276 chacha_use_avx2 = boot_cpu_has(X86_FEATURE_AVX) &&
277 boot_cpu_has(X86_FEATURE_AVX2) &&
278 cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL);
279 #ifdef CONFIG_AS_AVX512
280 chacha_use_avx512vl = chacha_use_avx2 &&
281 boot_cpu_has(X86_FEATURE_AVX512VL) &&
282 boot_cpu_has(X86_FEATURE_AVX512BW); /* kmovq */
285 return crypto_register_skciphers(algs, ARRAY_SIZE(algs));
288 static void __exit chacha_simd_mod_fini(void)
290 crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
293 module_init(chacha_simd_mod_init);
294 module_exit(chacha_simd_mod_fini);
296 MODULE_LICENSE("GPL");
297 MODULE_AUTHOR("Martin Willi <martin@strongswan.org>");
298 MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (x64 SIMD accelerated)");
299 MODULE_ALIAS_CRYPTO("chacha20");
300 MODULE_ALIAS_CRYPTO("chacha20-simd");
301 MODULE_ALIAS_CRYPTO("xchacha20");
302 MODULE_ALIAS_CRYPTO("xchacha20-simd");
303 MODULE_ALIAS_CRYPTO("xchacha12");
304 MODULE_ALIAS_CRYPTO("xchacha12-simd");