Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
[platform/kernel/linux-starfive.git] / lib / raid6 / recov_neon_inner.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012 Intel Corporation
4  * Copyright (C) 2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
5  */
6
7 #include <arm_neon.h>
8
9 #ifdef CONFIG_ARM
10 /*
11  * AArch32 does not provide this intrinsic natively because it does not
12  * implement the underlying instruction. AArch32 only provides a 64-bit
13  * wide vtbl.8 instruction, so use that instead.
14  */
15 static uint8x16_t vqtbl1q_u8(uint8x16_t a, uint8x16_t b)
16 {
17         union {
18                 uint8x16_t      val;
19                 uint8x8x2_t     pair;
20         } __a = { a };
21
22         return vcombine_u8(vtbl2_u8(__a.pair, vget_low_u8(b)),
23                            vtbl2_u8(__a.pair, vget_high_u8(b)));
24 }
25 #endif
26
27 void __raid6_2data_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dp,
28                               uint8_t *dq, const uint8_t *pbmul,
29                               const uint8_t *qmul)
30 {
31         uint8x16_t pm0 = vld1q_u8(pbmul);
32         uint8x16_t pm1 = vld1q_u8(pbmul + 16);
33         uint8x16_t qm0 = vld1q_u8(qmul);
34         uint8x16_t qm1 = vld1q_u8(qmul + 16);
35         uint8x16_t x0f = vdupq_n_u8(0x0f);
36
37         /*
38          * while ( bytes-- ) {
39          *      uint8_t px, qx, db;
40          *
41          *      px    = *p ^ *dp;
42          *      qx    = qmul[*q ^ *dq];
43          *      *dq++ = db = pbmul[px] ^ qx;
44          *      *dp++ = db ^ px;
45          *      p++; q++;
46          * }
47          */
48
49         while (bytes) {
50                 uint8x16_t vx, vy, px, qx, db;
51
52                 px = veorq_u8(vld1q_u8(p), vld1q_u8(dp));
53                 vx = veorq_u8(vld1q_u8(q), vld1q_u8(dq));
54
55                 vy = vshrq_n_u8(vx, 4);
56                 vx = vqtbl1q_u8(qm0, vandq_u8(vx, x0f));
57                 vy = vqtbl1q_u8(qm1, vy);
58                 qx = veorq_u8(vx, vy);
59
60                 vy = vshrq_n_u8(px, 4);
61                 vx = vqtbl1q_u8(pm0, vandq_u8(px, x0f));
62                 vy = vqtbl1q_u8(pm1, vy);
63                 vx = veorq_u8(vx, vy);
64                 db = veorq_u8(vx, qx);
65
66                 vst1q_u8(dq, db);
67                 vst1q_u8(dp, veorq_u8(db, px));
68
69                 bytes -= 16;
70                 p += 16;
71                 q += 16;
72                 dp += 16;
73                 dq += 16;
74         }
75 }
76
77 void __raid6_datap_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dq,
78                               const uint8_t *qmul)
79 {
80         uint8x16_t qm0 = vld1q_u8(qmul);
81         uint8x16_t qm1 = vld1q_u8(qmul + 16);
82         uint8x16_t x0f = vdupq_n_u8(0x0f);
83
84         /*
85          * while (bytes--) {
86          *      *p++ ^= *dq = qmul[*q ^ *dq];
87          *      q++; dq++;
88          * }
89          */
90
91         while (bytes) {
92                 uint8x16_t vx, vy;
93
94                 vx = veorq_u8(vld1q_u8(q), vld1q_u8(dq));
95
96                 vy = vshrq_n_u8(vx, 4);
97                 vx = vqtbl1q_u8(qm0, vandq_u8(vx, x0f));
98                 vy = vqtbl1q_u8(qm1, vy);
99                 vx = veorq_u8(vx, vy);
100                 vy = veorq_u8(vx, vld1q_u8(p));
101
102                 vst1q_u8(dq, vx);
103                 vst1q_u8(p, vy);
104
105                 bytes -= 16;
106                 p += 16;
107                 q += 16;
108                 dq += 16;
109         }
110 }