1 // Copyright 2018 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // +build s390x,go1.11,!gccgo,!appengine
9 // Implementation of Poly1305 using the vector facility (vx) and the VMSL instruction.
58 GLOBL ·keyMask<>(SB), RODATA, $16
59 DATA ·keyMask<>+0(SB)/8, $0xffffff0ffcffff0f
60 DATA ·keyMask<>+8(SB)/8, $0xfcffff0ffcffff0f
62 GLOBL ·bswapMask<>(SB), RODATA, $16
63 DATA ·bswapMask<>+0(SB)/8, $0x0f0e0d0c0b0a0908
64 DATA ·bswapMask<>+8(SB)/8, $0x0706050403020100
66 GLOBL ·constants<>(SB), RODATA, $48
68 DATA ·constants<>+0(SB)/8, $0x18191a1b1c1d1e1f
69 DATA ·constants<>+8(SB)/8, $0x0000050403020100
71 DATA ·constants<>+16(SB)/8, $0x18191a1b1c1d1e1f
72 DATA ·constants<>+24(SB)/8, $0x00000a0908070605
74 DATA ·constants<>+32(SB)/8, $0x18191a1b1c1d1e1f
75 DATA ·constants<>+40(SB)/8, $0x0000000f0e0d0c0b
77 GLOBL ·c<>(SB), RODATA, $48
79 DATA ·c<>+0(SB)/8, $0x0000050403020100
80 DATA ·c<>+8(SB)/8, $0x0000151413121110
82 DATA ·c<>+16(SB)/8, $0x00000a0908070605
83 DATA ·c<>+24(SB)/8, $0x00001a1918171615
85 DATA ·c<>+32(SB)/8, $0x0000000f0e0d0c0b
86 DATA ·c<>+40(SB)/8, $0x0000001f1e1d1c1b
88 GLOBL ·reduce<>(SB), RODATA, $32
90 DATA ·reduce<>+0(SB)/8, $0x0
91 DATA ·reduce<>+8(SB)/8, $0xfffffffffff
93 DATA ·reduce<>+16(SB)/8, $0x0
94 DATA ·reduce<>+24(SB)/8, $0x3ffffffffff
96 // h = (f*g) % (2**130-5) [partial reduction]
97 // uses T_0...T_9 temporary registers
98 // input: m02_0, m02_1, m02_2, m13_0, m13_1, m13_2, r_0, r_1, r_2, r5_1, r5_2, m4_0, m4_1, m4_2, m5_0, m5_1, m5_2
99 // temp: t0, t1, t2, t3, t4, t5, t6, t7, t8, t9
100 // output: m02_0, m02_1, m02_2, m13_0, m13_1, m13_2
101 #define MULTIPLY(m02_0, m02_1, m02_2, m13_0, m13_1, m13_2, r_0, r_1, r_2, r5_1, r5_2, m4_0, m4_1, m4_2, m5_0, m5_1, m5_2, t0, t1, t2, t3, t4, t5, t6, t7, t8, t9) \
102 \ // Eliminate the dependency for the last 2 VMSLs
103 VMSLG m02_0, r_2, m4_2, m4_2 \
104 VMSLG m13_0, r_2, m5_2, m5_2 \ // 8 VMSLs pipelined
105 VMSLG m02_0, r_0, m4_0, m4_0 \
106 VMSLG m02_1, r5_2, V0, T_0 \
107 VMSLG m02_0, r_1, m4_1, m4_1 \
108 VMSLG m02_1, r_0, V0, T_1 \
109 VMSLG m02_1, r_1, V0, T_2 \
110 VMSLG m02_2, r5_1, V0, T_3 \
111 VMSLG m02_2, r5_2, V0, T_4 \
112 VMSLG m13_0, r_0, m5_0, m5_0 \
113 VMSLG m13_1, r5_2, V0, T_5 \
114 VMSLG m13_0, r_1, m5_1, m5_1 \
115 VMSLG m13_1, r_0, V0, T_6 \
116 VMSLG m13_1, r_1, V0, T_7 \
117 VMSLG m13_2, r5_1, V0, T_8 \
118 VMSLG m13_2, r5_2, V0, T_9 \
119 VMSLG m02_2, r_0, m4_2, m4_2 \
120 VMSLG m13_2, r_0, m5_2, m5_2 \
121 VAQ m4_0, T_0, m02_0 \
122 VAQ m4_1, T_1, m02_1 \
123 VAQ m5_0, T_5, m13_0 \
124 VAQ m5_1, T_6, m13_1 \
125 VAQ m02_0, T_3, m02_0 \
126 VAQ m02_1, T_4, m02_1 \
127 VAQ m13_0, T_8, m13_0 \
128 VAQ m13_1, T_9, m13_1 \
129 VAQ m4_2, T_2, m02_2 \
130 VAQ m5_2, T_7, m13_2 \
132 // SQUARE uses three limbs of r and r_2*5 to output square of r
133 // uses T_1, T_5 and T_7 temporary registers
134 // input: r_0, r_1, r_2, r5_2
135 // temp: TEMP0, TEMP1, TEMP2
136 // output: p0, p1, p2
137 #define SQUARE(r_0, r_1, r_2, r5_2, p0, p1, p2, TEMP0, TEMP1, TEMP2) \
138 VMSLG r_0, r_0, p0, p0 \
139 VMSLG r_1, r5_2, V0, TEMP0 \
140 VMSLG r_2, r5_2, p1, p1 \
141 VMSLG r_0, r_1, V0, TEMP1 \
142 VMSLG r_1, r_1, p2, p2 \
143 VMSLG r_0, r_2, V0, TEMP2 \
151 // carry h0->h1->h2->h0 || h3->h4->h5->h3
152 // uses T_2, T_4, T_5, T_7, T_8, T_9
153 // t6, t7, t8, t9, t10, t11
154 // input: h0, h1, h2, h3, h4, h5
155 // temp: t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11
156 // output: h0, h1, h2, h3, h4, h5
157 #define REDUCE(h0, h1, h2, h3, h4, h5, t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11) \
158 VLM (R12), t6, t7 \ // 44 and 42 bit clear mask
159 VLEIB $7, $0x28, t10 \ // 5 byte shift mask
160 VREPIB $4, t8 \ // 4 bit shift mask
161 VREPIB $2, t11 \ // 2 bit shift mask
162 VSRLB t10, h0, t0 \ // h0 byte shift
163 VSRLB t10, h1, t1 \ // h1 byte shift
164 VSRLB t10, h2, t2 \ // h2 byte shift
165 VSRLB t10, h3, t3 \ // h3 byte shift
166 VSRLB t10, h4, t4 \ // h4 byte shift
167 VSRLB t10, h5, t5 \ // h5 byte shift
168 VSRL t8, t0, t0 \ // h0 bit shift
169 VSRL t8, t1, t1 \ // h2 bit shift
170 VSRL t11, t2, t2 \ // h2 bit shift
171 VSRL t8, t3, t3 \ // h3 bit shift
172 VSRL t8, t4, t4 \ // h4 bit shift
173 VESLG $2, t2, t9 \ // h2 carry x5
174 VSRL t11, t5, t5 \ // h5 bit shift
175 VN t6, h0, h0 \ // h0 clear carry
176 VAQ t2, t9, t2 \ // h2 carry x5
177 VESLG $2, t5, t9 \ // h5 carry x5
178 VN t6, h1, h1 \ // h1 clear carry
179 VN t7, h2, h2 \ // h2 clear carry
180 VAQ t5, t9, t5 \ // h5 carry x5
181 VN t6, h3, h3 \ // h3 clear carry
182 VN t6, h4, h4 \ // h4 clear carry
183 VN t7, h5, h5 \ // h5 clear carry
184 VAQ t0, h1, h1 \ // h0->h1
185 VAQ t3, h4, h4 \ // h3->h4
186 VAQ t1, h2, h2 \ // h1->h2
187 VAQ t4, h5, h5 \ // h4->h5
188 VAQ t2, h0, h0 \ // h2->h0
189 VAQ t5, h3, h3 \ // h5->h3
190 VREPG $1, t6, t6 \ // 44 and 42 bit masks across both halves
192 VSLDB $8, h0, h0, h0 \ // set up [h0/1/2, h3/4/5]
193 VSLDB $8, h1, h1, h1 \
194 VSLDB $8, h2, h2, h2 \
198 VESRLG $44, h3, t0 \ // 44 bit shift right
201 VN t6, h3, h3 \ // clear carry bits
204 VESLG $2, t2, t9 \ // multiply carry by 5
210 // carry h0->h1->h2->h0
212 // temp: t0, t1, t2, t3, t4, t5, t6, t7, t8
213 // output: h0, h1, h2
214 #define REDUCE2(h0, h1, h2, t0, t1, t2, t3, t4, t5, t6, t7, t8) \
215 VLEIB $7, $0x28, t3 \ // 5 byte shift mask
216 VREPIB $4, t4 \ // 4 bit shift mask
217 VREPIB $2, t7 \ // 2 bit shift mask
218 VGBM $0x003F, t5 \ // mask to clear carry bits
222 VESRLG $4, t5, t5 \ // 44 bit clear mask
226 VESRLG $2, t5, t6 \ // 42 bit clear mask
250 // expands two message blocks into the lower halfs of the d registers
251 // moves the contents of the d registers into upper halfs
252 // input: in1, in2, d0, d1, d2, d3, d4, d5
253 // temp: TEMP0, TEMP1, TEMP2, TEMP3
254 // output: d0, d1, d2, d3, d4, d5
255 #define EXPACC(in1, in2, d0, d1, d2, d3, d4, d5, TEMP0, TEMP1, TEMP2, TEMP3) \
256 VGBM $0xff3f, TEMP0 \
257 VGBM $0xff1f, TEMP1 \
258 VESLG $4, d1, TEMP2 \
259 VESLG $4, d4, TEMP3 \
260 VESRLG $4, TEMP0, TEMP0 \
261 VPERM in1, d0, EX0, d0 \
262 VPERM in2, d3, EX0, d3 \
263 VPERM in1, d2, EX2, d2 \
264 VPERM in2, d5, EX2, d5 \
265 VPERM in1, TEMP2, EX1, d1 \
266 VPERM in2, TEMP3, EX1, d4 \
276 // expands one message block into the lower halfs of the d registers
277 // moves the contents of the d registers into upper halfs
278 // input: in, d0, d1, d2
279 // temp: TEMP0, TEMP1, TEMP2
280 // output: d0, d1, d2
281 #define EXPACC2(in, d0, d1, d2, TEMP0, TEMP1, TEMP2) \
282 VGBM $0xff3f, TEMP0 \
283 VESLG $4, d1, TEMP2 \
284 VGBM $0xff1f, TEMP1 \
285 VPERM in, d0, EX0, d0 \
286 VESRLG $4, TEMP0, TEMP0 \
287 VPERM in, d2, EX2, d2 \
288 VPERM in, TEMP2, EX1, d1 \
294 // pack h2:h0 into h1:h0 (no carry)
296 // output: h0, h1, h2
297 #define PACK(h0, h1, h2) \
298 VMRLG h1, h2, h2 \ // copy h1 to upper half h2
299 VESLG $44, h1, h1 \ // shift limb 1 44 bits, leaving 20
300 VO h0, h1, h0 \ // combine h0 with 20 bits from limb 1
301 VESRLG $20, h2, h1 \ // put top 24 bits of limb 1 into h1
302 VLEIG $1, $0, h1 \ // clear h2 stuff from lower half of h1
303 VO h0, h1, h0 \ // h0 now has 88 bits (limb 0 and 1)
304 VLEIG $0, $0, h2 \ // clear upper half of h2
305 VESRLG $40, h2, h1 \ // h1 now has upper two bits of result
306 VLEIB $7, $88, h1 \ // for byte shift (11 bytes)
307 VSLB h1, h2, h2 \ // shift h2 11 bytes to the left
308 VO h0, h2, h0 \ // combine h0 with 20 bits from limb 1
309 VLEIG $0, $0, h1 \ // clear upper half of h1
311 // if h > 2**130-5 then h -= 2**130-5
315 #define MOD(h0, h1, t0, t1, t2) \
330 // func poly1305vmsl(out *[16]byte, m *byte, mlen uint64, key *[32]key)
331 TEXT ·poly1305vmsl(SB), $0-32
332 // This code processes 6 + up to 4 blocks (32 bytes) per iteration
333 // using the algorithm described in:
334 // NEON crypto, Daniel J. Bernstein & Peter Schwabe
335 // https://cryptojedi.org/papers/neoncrypto-20120320.pdf
336 // And as moddified for VMSL as described in
337 // Accelerating Poly1305 Cryptographic Message Authentication on the z14
338 // O'Farrell et al, CASCON 2017, p48-55
339 // https://ibm.ent.box.com/s/jf9gedj0e9d2vjctfyh186shaztavnht
341 LMG out+0(FP), R1, R4 // R1=out, R2=m, R3=mlen, R4=key
344 // load EX0, EX1 and EX2
345 MOVD $·constants<>(SB), R5
346 VLM (R5), EX0, EX2 // c
350 MOVD $·keyMask<>(SB), R6
353 VZERO T_2 // limbs for r
356 EXPACC2(T_0, T_2, T_3, T_4, T_1, T_5, T_7)
358 // T_2, T_3, T_4: [0, r]
362 VLEIG $1, $20, T_0 // T_0: [0, 20]
365 VMSLG T_0, T_3, T_5, T_5
366 VMSLG T_0, T_4, T_6, T_6
368 // store r for final block in GR
369 VLGVG $1, T_2, RSAVE_0 // c
370 VLGVG $1, T_3, RSAVE_1 // c
371 VLGVG $1, T_4, RSAVE_2 // c
372 VLGVG $1, T_5, R5SAVE_1 // c
373 VLGVG $1, T_6, R5SAVE_2 // c
383 // initialize pointer for reduce constants
384 MOVD $·reduce<>(SB), R12
386 // calculate r**2 and 20*(r**2)
390 SQUARE(T_2, T_3, T_4, T_6, R_0, R_1, R_2, T_1, T_5, T_7)
391 REDUCE2(R_0, R_1, R_2, M0, M1, M2, M3, M4, R5_1, R5_2, M5, T_1)
394 VMSLG T_0, R_1, R5_1, R5_1
395 VMSLG T_0, R_2, R5_2, R5_2
397 // skip r**4 calculation if 3 blocks or less
400 // calculate r**4 and 20*(r**4)
404 SQUARE(R_0, R_1, R_2, R5_2, T_8, T_9, T_10, T_1, T_5, T_7)
405 REDUCE2(T_8, T_9, T_10, M0, M1, M2, M3, M4, T_2, T_3, M5, T_1)
408 VMSLG T_0, T_9, T_2, T_2
409 VMSLG T_0, T_10, T_3, T_3
411 // put r**2 to the right and r**4 to the left of R_0, R_1, R_2
412 VSLDB $8, T_8, T_8, T_8
413 VSLDB $8, T_9, T_9, T_9
414 VSLDB $8, T_10, T_10, T_10
415 VSLDB $8, T_2, T_2, T_2
416 VSLDB $8, T_3, T_3, T_3
424 CMPBLE R3, $80, load // less than or equal to 5 blocks in message
432 CMPBGE R3, $16, 2(PC)
435 EXPACC(M0, M1, H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_0, T_1, T_2, T_3)
436 EXPACC(M2, M3, H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_0, T_1, T_2, T_3)
448 EXPACC(M4, M5, M0, M1, M2, M3, T_4, T_10, T_0, T_1, T_2, T_3)
451 CMPBLT R3, $16, 2(PC)
453 MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, T_10, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
454 REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M2, M3, M4, T_4, T_5, T_2, T_7, T_8, T_9)
463 CMPBLE R3, $0, square
466 // load EX0, EX1 and EX2
471 CMPBLE R3, $64, add // b4 // last 4 or less blocks left
473 // next 4 full blocks
477 REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, T_0, T_1, T_3, T_4, T_5, T_2, T_7, T_8, T_9)
479 // expacc in-lined to create [m2, m3] limbs
480 VGBM $0x3f3f, T_0 // 44 bit clear mask
481 VGBM $0x1f1f, T_1 // 40 bit clear mask
482 VPERM M2, M3, EX0, T_3
483 VESRLG $4, T_0, T_0 // 44 bit clear mask ready
484 VPERM M2, M3, EX1, T_4
485 VPERM M2, M3, EX2, T_5
490 VMRHG H0_1, T_3, H0_0
491 VMRHG H1_1, T_4, H1_0
492 VMRHG H2_1, T_5, H2_0
493 VMRLG H0_1, T_3, H0_1
494 VMRLG H1_1, T_4, H1_1
495 VMRLG H2_1, T_5, H2_1
498 VPERM M4, M5, EX0, T_3
499 VPERM M4, M5, EX1, T_4
500 VPERM M4, M5, EX2, T_5
514 MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
516 REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M3, M4, M5, T_4, T_5, T_2, T_7, T_8, T_9)
524 // load EX0, EX1, EX2
525 MOVD $·constants<>(SB), R5
533 // h may be >= 2*(2**130-5) so we need to reduce it again
534 // M0...M4 are used as temps here
535 REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, T_9, T_10, H0_1, M5)
537 next: // carry h1->h2
549 // clear h1 carry bits
555 // h is now < 2*(2**130-5)
556 // pack h into h1 (hi) and h0 (lo)
557 PACK(H0_0, H1_0, H2_0)
559 // if h > 2**130-5 then h -= 2**130-5
560 MOD(H0_0, H1_0, T_0, T_1, T_2)
563 MOVD $·bswapMask<>(SB), R5
566 VPERM T_0, T_0, T_1, T_0 // reverse bytes (to big)
568 VPERM H0_0, H0_0, T_1, H0_0 // reverse bytes (to little)
573 // load EX0, EX1, EX2
574 MOVD $·constants<>(SB), R5
577 REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M3, M4, M5, T_4, T_5, T_2, T_7, T_8, T_9)
587 CMPBLE R3, $48, b3 // 3 blocks or less
589 // 4(3+1) blocks remaining
595 CMPBEQ R3, $16, 2(PC)
598 EXPACC(M0, M1, H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_0, T_1, T_2, T_3)
607 EXPACC(M2, M3, M0, M1, M4, M5, T_4, T_10, T_0, T_1, T_2, T_3)
610 CMPBNE R3, $16, 2(PC)
612 MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M4, M5, M2, T_10, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
613 REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M3, M4, M5, T_4, T_5, T_2, T_7, T_8, T_9)
621 CMPBLE R3, $0, square // this condition must always hold true!
626 // 3 blocks remaining
629 VSLDB $8, R_0, R_0, R_0
630 VSLDB $8, R_1, R_1, R_1
631 VSLDB $8, R_2, R_2, R_2
632 VSLDB $8, R5_1, R5_1, R5_1
633 VSLDB $8, R5_2, R5_2, R5_2
635 VLVGG $1, RSAVE_0, R_0
636 VLVGG $1, RSAVE_1, R_1
637 VLVGG $1, RSAVE_2, R_2
638 VLVGG $1, R5SAVE_1, R5_1
639 VLVGG $1, R5SAVE_2, R5_2
642 VSLDB $8, H0_0, H0_0, H0_0
643 VSLDB $8, H1_0, H1_0, H1_0
644 VSLDB $8, H2_0, H2_0, H2_0
660 MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
661 REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, H0_1, H1_1, T_10, M5)
668 CMPBEQ R3, $16, 2(PC)
675 EXPACC2(M0, T_1, T_2, T_3, T_4, T_5, T_6)
688 MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M3, M4, M5, V0, T_10, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
689 REDUCE2(H0_0, H1_0, H2_0, M0, M3, M4, M5, T_10, H0_1, H1_1, H2_1, T_9)
696 EXPACC2(M1, T_1, T_2, T_3, T_4, T_5, T_6)
701 REDUCE2(H0_0, H1_0, H2_0, M0, M3, M4, M5, T_9, H0_1, H1_1, H2_1, T_10)
703 // [H, m2] * [r**2, r]
704 EXPACC2(M2, H0_0, H1_0, H2_0, T_1, T_2, T_3)
705 CMPBNE R3, $16, 2(PC)
713 MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
714 REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, H0_1, H1_1, M5, T_10)
716 CMPBLE R3, $0, next // this condition must always hold true!
721 // 2 blocks remaining
724 VSLDB $8, R_0, R_0, R_0
725 VSLDB $8, R_1, R_1, R_1
726 VSLDB $8, R_2, R_2, R_2
727 VSLDB $8, R5_1, R5_1, R5_1
728 VSLDB $8, R5_2, R5_2, R5_2
730 VLVGG $1, RSAVE_0, R_0
731 VLVGG $1, RSAVE_1, R_1
732 VLVGG $1, RSAVE_2, R_2
733 VLVGG $1, R5SAVE_1, R5_1
734 VLVGG $1, R5SAVE_2, R5_2
737 VSLDB $8, H0_0, H0_0, H0_0
738 VSLDB $8, H1_0, H1_0, H1_0
739 VSLDB $8, H2_0, H2_0, H2_0
755 MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
756 REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M2, M3, M4, T_4, T_5, T_2, T_7, T_8, T_9)
764 // move h to the left and 0s at the right
765 VSLDB $8, H0_0, H0_0, H0_0
766 VSLDB $8, H1_0, H1_0, H1_0
767 VSLDB $8, H2_0, H2_0, H2_0
769 // get message blocks and append 1 to start
775 CMPBEQ R3, $16, 2(PC)
780 EXPACC2(M0, T_6, T_7, T_8, T_1, T_2, T_3)
781 EXPACC2(M1, T_6, T_7, T_8, T_1, T_2, T_3)
783 CMPBNE R3, $16, 2(PC)
798 // at this point R_0 .. R5_2 look like [r**2, r]
799 MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M2, M3, M4, M5, T_10, M0, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
800 REDUCE2(H0_0, H1_0, H2_0, M2, M3, M4, M5, T_9, H0_1, H1_1, H2_1, T_10)
810 VSLDB $8, R_0, R_0, R_0
811 VSLDB $8, R_1, R_1, R_1
812 VSLDB $8, R_2, R_2, R_2
813 VSLDB $8, R5_1, R5_1, R5_1
814 VSLDB $8, R5_2, R5_2, R5_2
816 VLVGG $1, RSAVE_0, R_0
817 VLVGG $1, RSAVE_1, R_1
818 VLVGG $1, RSAVE_2, R_2
819 VLVGG $1, R5SAVE_1, R5_1
820 VLVGG $1, R5SAVE_2, R5_2
823 VSLDB $8, H0_0, H0_0, H0_0
824 VSLDB $8, H1_0, H1_0, H1_0
825 VSLDB $8, H2_0, H2_0, H2_0
841 MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
842 REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, T_9, T_10, H0_1, M5)
844 // set up [0, m0] limbs
849 CMPBEQ R3, $16, 2(PC)
854 EXPACC2(M0, T_1, T_2, T_3, T_4, T_5, T_6)// limbs: [0, m]
855 CMPBNE R3, $16, 2(PC)
869 MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
870 REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, T_9, T_10, H0_1, M5)
876 VSLDB $8, R_0, R_0, R_0
877 VSLDB $8, R_1, R_1, R_1
878 VSLDB $8, R_2, R_2, R_2
879 VSLDB $8, R5_1, R5_1, R5_1
880 VSLDB $8, R5_2, R5_2, R5_2
882 VLVGG $1, RSAVE_0, R_0
883 VLVGG $1, RSAVE_1, R_1
884 VLVGG $1, RSAVE_2, R_2
885 VLVGG $1, R5SAVE_1, R5_1
886 VLVGG $1, R5SAVE_2, R5_2
889 VSLDB $8, H0_0, H0_0, H0_0
890 VSLDB $8, H1_0, H1_0, H1_0
891 VSLDB $8, H2_0, H2_0, H2_0
906 // (h0*r**2) + (h1*r)
907 MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
908 REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, T_9, T_10, H0_1, M5)