4 * Support for VIA PadLock hardware crypto engine.
6 * Copyright (c) 2006 Michal Ludvig <michal@logix.cz>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
15 #include <crypto/internal/hash.h>
16 #include <crypto/padlock.h>
17 #include <crypto/sha.h>
18 #include <linux/err.h>
19 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/errno.h>
22 #include <linux/interrupt.h>
23 #include <linux/kernel.h>
24 #include <linux/scatterlist.h>
25 #include <asm/cpu_device_id.h>
28 struct padlock_sha_desc {
29 struct shash_desc fallback;
32 struct padlock_sha_ctx {
33 struct crypto_shash *fallback;
36 static int padlock_sha_init(struct shash_desc *desc)
38 struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
39 struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
41 dctx->fallback.tfm = ctx->fallback;
42 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
43 return crypto_shash_init(&dctx->fallback);
46 static int padlock_sha_update(struct shash_desc *desc,
47 const u8 *data, unsigned int length)
49 struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
51 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
52 return crypto_shash_update(&dctx->fallback, data, length);
55 static int padlock_sha_export(struct shash_desc *desc, void *out)
57 struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
59 return crypto_shash_export(&dctx->fallback, out);
62 static int padlock_sha_import(struct shash_desc *desc, const void *in)
64 struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
65 struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
67 dctx->fallback.tfm = ctx->fallback;
68 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
69 return crypto_shash_import(&dctx->fallback, in);
72 static inline void padlock_output_block(uint32_t *src,
73 uint32_t *dst, size_t count)
76 *dst++ = swab32(*src++);
79 static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in,
80 unsigned int count, u8 *out)
82 /* We can't store directly to *out as it may be unaligned. */
83 /* BTW Don't reduce the buffer size below 128 Bytes!
84 * PadLock microcode needs it that big. */
85 char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
86 ((aligned(STACK_ALIGN)));
87 char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
88 struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
89 struct sha1_state state;
91 unsigned int leftover;
95 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
96 err = crypto_shash_export(&dctx->fallback, &state);
100 if (state.count + count > ULONG_MAX)
101 return crypto_shash_finup(&dctx->fallback, in, count, out);
103 leftover = ((state.count - 1) & (SHA1_BLOCK_SIZE - 1)) + 1;
104 space = SHA1_BLOCK_SIZE - leftover;
107 err = crypto_shash_update(&dctx->fallback, in, space) ?:
108 crypto_shash_export(&dctx->fallback, &state);
114 memcpy(state.buffer + leftover, in, count);
117 state.count &= ~(SHA1_BLOCK_SIZE - 1);
121 memcpy(result, &state.state, SHA1_DIGEST_SIZE);
123 /* prevent taking the spurious DNA fault with padlock. */
124 ts_state = irq_ts_save();
125 asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */
127 : "c"((unsigned long)state.count + count), \
128 "a"((unsigned long)state.count), \
129 "S"(in), "D"(result));
130 irq_ts_restore(ts_state);
132 padlock_output_block((uint32_t *)result, (uint32_t *)out, 5);
138 static int padlock_sha1_final(struct shash_desc *desc, u8 *out)
142 return padlock_sha1_finup(desc, buf, 0, out);
145 static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in,
146 unsigned int count, u8 *out)
148 /* We can't store directly to *out as it may be unaligned. */
149 /* BTW Don't reduce the buffer size below 128 Bytes!
150 * PadLock microcode needs it that big. */
151 char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
152 ((aligned(STACK_ALIGN)));
153 char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
154 struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
155 struct sha256_state state;
157 unsigned int leftover;
161 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
162 err = crypto_shash_export(&dctx->fallback, &state);
166 if (state.count + count > ULONG_MAX)
167 return crypto_shash_finup(&dctx->fallback, in, count, out);
169 leftover = ((state.count - 1) & (SHA256_BLOCK_SIZE - 1)) + 1;
170 space = SHA256_BLOCK_SIZE - leftover;
173 err = crypto_shash_update(&dctx->fallback, in, space) ?:
174 crypto_shash_export(&dctx->fallback, &state);
180 memcpy(state.buf + leftover, in, count);
183 state.count &= ~(SHA1_BLOCK_SIZE - 1);
187 memcpy(result, &state.state, SHA256_DIGEST_SIZE);
189 /* prevent taking the spurious DNA fault with padlock. */
190 ts_state = irq_ts_save();
191 asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
193 : "c"((unsigned long)state.count + count), \
194 "a"((unsigned long)state.count), \
195 "S"(in), "D"(result));
196 irq_ts_restore(ts_state);
198 padlock_output_block((uint32_t *)result, (uint32_t *)out, 8);
204 static int padlock_sha256_final(struct shash_desc *desc, u8 *out)
208 return padlock_sha256_finup(desc, buf, 0, out);
211 static int padlock_cra_init(struct crypto_tfm *tfm)
213 struct crypto_shash *hash = __crypto_shash_cast(tfm);
214 const char *fallback_driver_name = tfm->__crt_alg->cra_name;
215 struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
216 struct crypto_shash *fallback_tfm;
219 /* Allocate a fallback and abort if it failed. */
220 fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
221 CRYPTO_ALG_NEED_FALLBACK);
222 if (IS_ERR(fallback_tfm)) {
223 printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n",
224 fallback_driver_name);
225 err = PTR_ERR(fallback_tfm);
229 ctx->fallback = fallback_tfm;
230 hash->descsize += crypto_shash_descsize(fallback_tfm);
237 static void padlock_cra_exit(struct crypto_tfm *tfm)
239 struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
241 crypto_free_shash(ctx->fallback);
244 static struct shash_alg sha1_alg = {
245 .digestsize = SHA1_DIGEST_SIZE,
246 .init = padlock_sha_init,
247 .update = padlock_sha_update,
248 .finup = padlock_sha1_finup,
249 .final = padlock_sha1_final,
250 .export = padlock_sha_export,
251 .import = padlock_sha_import,
252 .descsize = sizeof(struct padlock_sha_desc),
253 .statesize = sizeof(struct sha1_state),
256 .cra_driver_name = "sha1-padlock",
257 .cra_priority = PADLOCK_CRA_PRIORITY,
258 .cra_flags = CRYPTO_ALG_TYPE_SHASH |
259 CRYPTO_ALG_NEED_FALLBACK,
260 .cra_blocksize = SHA1_BLOCK_SIZE,
261 .cra_ctxsize = sizeof(struct padlock_sha_ctx),
262 .cra_module = THIS_MODULE,
263 .cra_init = padlock_cra_init,
264 .cra_exit = padlock_cra_exit,
268 static struct shash_alg sha256_alg = {
269 .digestsize = SHA256_DIGEST_SIZE,
270 .init = padlock_sha_init,
271 .update = padlock_sha_update,
272 .finup = padlock_sha256_finup,
273 .final = padlock_sha256_final,
274 .export = padlock_sha_export,
275 .import = padlock_sha_import,
276 .descsize = sizeof(struct padlock_sha_desc),
277 .statesize = sizeof(struct sha256_state),
279 .cra_name = "sha256",
280 .cra_driver_name = "sha256-padlock",
281 .cra_priority = PADLOCK_CRA_PRIORITY,
282 .cra_flags = CRYPTO_ALG_TYPE_SHASH |
283 CRYPTO_ALG_NEED_FALLBACK,
284 .cra_blocksize = SHA256_BLOCK_SIZE,
285 .cra_ctxsize = sizeof(struct padlock_sha_ctx),
286 .cra_module = THIS_MODULE,
287 .cra_init = padlock_cra_init,
288 .cra_exit = padlock_cra_exit,
292 /* Add two shash_alg instance for hardware-implemented *
293 * multiple-parts hash supported by VIA Nano Processor.*/
294 static int padlock_sha1_init_nano(struct shash_desc *desc)
296 struct sha1_state *sctx = shash_desc_ctx(desc);
298 *sctx = (struct sha1_state){
299 .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
305 static int padlock_sha1_update_nano(struct shash_desc *desc,
306 const u8 *data, unsigned int len)
308 struct sha1_state *sctx = shash_desc_ctx(desc);
309 unsigned int partial, done;
311 /*The PHE require the out buffer must 128 bytes and 16-bytes aligned*/
312 u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
313 ((aligned(STACK_ALIGN)));
314 u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
317 partial = sctx->count & 0x3f;
321 memcpy(dst, (u8 *)(sctx->state), SHA1_DIGEST_SIZE);
323 if ((partial + len) >= SHA1_BLOCK_SIZE) {
325 /* Append the bytes in state's buffer to a block to handle */
328 memcpy(sctx->buffer + partial, data,
329 done + SHA1_BLOCK_SIZE);
331 ts_state = irq_ts_save();
332 asm volatile (".byte 0xf3,0x0f,0xa6,0xc8"
333 : "+S"(src), "+D"(dst) \
334 : "a"((long)-1), "c"((unsigned long)1));
335 irq_ts_restore(ts_state);
336 done += SHA1_BLOCK_SIZE;
340 /* Process the left bytes from the input data */
341 if (len - done >= SHA1_BLOCK_SIZE) {
342 ts_state = irq_ts_save();
343 asm volatile (".byte 0xf3,0x0f,0xa6,0xc8"
344 : "+S"(src), "+D"(dst)
346 "c"((unsigned long)((len - done) / SHA1_BLOCK_SIZE)));
347 irq_ts_restore(ts_state);
348 done += ((len - done) - (len - done) % SHA1_BLOCK_SIZE);
353 memcpy((u8 *)(sctx->state), dst, SHA1_DIGEST_SIZE);
354 memcpy(sctx->buffer + partial, src, len - done);
359 static int padlock_sha1_final_nano(struct shash_desc *desc, u8 *out)
361 struct sha1_state *state = (struct sha1_state *)shash_desc_ctx(desc);
362 unsigned int partial, padlen;
364 static const u8 padding[64] = { 0x80, };
366 bits = cpu_to_be64(state->count << 3);
368 /* Pad out to 56 mod 64 */
369 partial = state->count & 0x3f;
370 padlen = (partial < 56) ? (56 - partial) : ((64+56) - partial);
371 padlock_sha1_update_nano(desc, padding, padlen);
373 /* Append length field bytes */
374 padlock_sha1_update_nano(desc, (const u8 *)&bits, sizeof(bits));
377 padlock_output_block((uint32_t *)(state->state), (uint32_t *)out, 5);
382 static int padlock_sha256_init_nano(struct shash_desc *desc)
384 struct sha256_state *sctx = shash_desc_ctx(desc);
386 *sctx = (struct sha256_state){
387 .state = { SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, \
388 SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7},
394 static int padlock_sha256_update_nano(struct shash_desc *desc, const u8 *data,
397 struct sha256_state *sctx = shash_desc_ctx(desc);
398 unsigned int partial, done;
400 /*The PHE require the out buffer must 128 bytes and 16-bytes aligned*/
401 u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
402 ((aligned(STACK_ALIGN)));
403 u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
406 partial = sctx->count & 0x3f;
410 memcpy(dst, (u8 *)(sctx->state), SHA256_DIGEST_SIZE);
412 if ((partial + len) >= SHA256_BLOCK_SIZE) {
414 /* Append the bytes in state's buffer to a block to handle */
417 memcpy(sctx->buf + partial, data,
418 done + SHA256_BLOCK_SIZE);
420 ts_state = irq_ts_save();
421 asm volatile (".byte 0xf3,0x0f,0xa6,0xd0"
422 : "+S"(src), "+D"(dst)
423 : "a"((long)-1), "c"((unsigned long)1));
424 irq_ts_restore(ts_state);
425 done += SHA256_BLOCK_SIZE;
429 /* Process the left bytes from input data*/
430 if (len - done >= SHA256_BLOCK_SIZE) {
431 ts_state = irq_ts_save();
432 asm volatile (".byte 0xf3,0x0f,0xa6,0xd0"
433 : "+S"(src), "+D"(dst)
435 "c"((unsigned long)((len - done) / 64)));
436 irq_ts_restore(ts_state);
437 done += ((len - done) - (len - done) % 64);
442 memcpy((u8 *)(sctx->state), dst, SHA256_DIGEST_SIZE);
443 memcpy(sctx->buf + partial, src, len - done);
448 static int padlock_sha256_final_nano(struct shash_desc *desc, u8 *out)
450 struct sha256_state *state =
451 (struct sha256_state *)shash_desc_ctx(desc);
452 unsigned int partial, padlen;
454 static const u8 padding[64] = { 0x80, };
456 bits = cpu_to_be64(state->count << 3);
458 /* Pad out to 56 mod 64 */
459 partial = state->count & 0x3f;
460 padlen = (partial < 56) ? (56 - partial) : ((64+56) - partial);
461 padlock_sha256_update_nano(desc, padding, padlen);
463 /* Append length field bytes */
464 padlock_sha256_update_nano(desc, (const u8 *)&bits, sizeof(bits));
467 padlock_output_block((uint32_t *)(state->state), (uint32_t *)out, 8);
472 static int padlock_sha_export_nano(struct shash_desc *desc,
475 int statesize = crypto_shash_statesize(desc->tfm);
476 void *sctx = shash_desc_ctx(desc);
478 memcpy(out, sctx, statesize);
482 static int padlock_sha_import_nano(struct shash_desc *desc,
485 int statesize = crypto_shash_statesize(desc->tfm);
486 void *sctx = shash_desc_ctx(desc);
488 memcpy(sctx, in, statesize);
492 static struct shash_alg sha1_alg_nano = {
493 .digestsize = SHA1_DIGEST_SIZE,
494 .init = padlock_sha1_init_nano,
495 .update = padlock_sha1_update_nano,
496 .final = padlock_sha1_final_nano,
497 .export = padlock_sha_export_nano,
498 .import = padlock_sha_import_nano,
499 .descsize = sizeof(struct sha1_state),
500 .statesize = sizeof(struct sha1_state),
503 .cra_driver_name = "sha1-padlock-nano",
504 .cra_priority = PADLOCK_CRA_PRIORITY,
505 .cra_flags = CRYPTO_ALG_TYPE_SHASH,
506 .cra_blocksize = SHA1_BLOCK_SIZE,
507 .cra_module = THIS_MODULE,
511 static struct shash_alg sha256_alg_nano = {
512 .digestsize = SHA256_DIGEST_SIZE,
513 .init = padlock_sha256_init_nano,
514 .update = padlock_sha256_update_nano,
515 .final = padlock_sha256_final_nano,
516 .export = padlock_sha_export_nano,
517 .import = padlock_sha_import_nano,
518 .descsize = sizeof(struct sha256_state),
519 .statesize = sizeof(struct sha256_state),
521 .cra_name = "sha256",
522 .cra_driver_name = "sha256-padlock-nano",
523 .cra_priority = PADLOCK_CRA_PRIORITY,
524 .cra_flags = CRYPTO_ALG_TYPE_SHASH,
525 .cra_blocksize = SHA256_BLOCK_SIZE,
526 .cra_module = THIS_MODULE,
530 static struct x86_cpu_id padlock_sha_ids[] = {
531 X86_FEATURE_MATCH(X86_FEATURE_PHE),
534 MODULE_DEVICE_TABLE(x86cpu, padlock_sha_ids);
536 static int __init padlock_init(void)
539 struct cpuinfo_x86 *c = &cpu_data(0);
540 struct shash_alg *sha1;
541 struct shash_alg *sha256;
543 if (!x86_match_cpu(padlock_sha_ids) || !cpu_has_phe_enabled)
546 /* Register the newly added algorithm module if on *
547 * VIA Nano processor, or else just do as before */
548 if (c->x86_model < 0x0f) {
550 sha256 = &sha256_alg;
552 sha1 = &sha1_alg_nano;
553 sha256 = &sha256_alg_nano;
556 rc = crypto_register_shash(sha1);
560 rc = crypto_register_shash(sha256);
564 printk(KERN_NOTICE PFX "Using VIA PadLock ACE for SHA1/SHA256 algorithms.\n");
569 crypto_unregister_shash(sha1);
572 printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n");
576 static void __exit padlock_fini(void)
578 struct cpuinfo_x86 *c = &cpu_data(0);
580 if (c->x86_model >= 0x0f) {
581 crypto_unregister_shash(&sha1_alg_nano);
582 crypto_unregister_shash(&sha256_alg_nano);
584 crypto_unregister_shash(&sha1_alg);
585 crypto_unregister_shash(&sha256_alg);
589 module_init(padlock_init);
590 module_exit(padlock_fini);
592 MODULE_DESCRIPTION("VIA PadLock SHA1/SHA256 algorithms support.");
593 MODULE_LICENSE("GPL");
594 MODULE_AUTHOR("Michal Ludvig");
596 MODULE_ALIAS("sha1-all");
597 MODULE_ALIAS("sha256-all");
598 MODULE_ALIAS("sha1-padlock");
599 MODULE_ALIAS("sha256-padlock");