2 * Floating-point, VMX/Altivec and VSX loads and stores
3 * for use in instruction emulation.
5 * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #include <asm/processor.h>
14 #include <asm/ppc_asm.h>
15 #include <asm/ppc-opcode.h>
17 #include <asm/asm-offsets.h>
18 #include <linux/errno.h>
22 #define STKFRM (PPC_MIN_STKFRM + 16)
24 .macro extab instr,handler
25 .section __ex_table,"a"
26 PPC_LONG \instr,\handler
40 /* Get the contents of frN into fr0; N is in r3. */
45 blr /* fr0 is already in fr0 */
59 /* Put the contents of fr0 into frN; N is in r3. */
64 blr /* fr0 is already in fr0 */
78 /* Load FP reg N from float at *p. N is in r3, p in r4. */
80 PPC_STLU r1,-STKFRM(r1)
82 PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1)
89 stfd fr0,STKFRM-16(r1)
96 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
105 /* Load FP reg N from double at *p. N is in r3, p in r4. */
107 PPC_STLU r1,-STKFRM(r1)
109 PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1)
116 stfd fr0,STKFRM-16(r1)
122 lfd fr0,STKFRM-16(r1)
123 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
132 /* Store FP reg N to float at *p. N is in r3, p in r4. */
134 PPC_STLU r1,-STKFRM(r1)
136 PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1)
143 stfd fr0,STKFRM-16(r1)
149 lfd fr0,STKFRM-16(r1)
150 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
159 /* Store FP reg N to double at *p. N is in r3, p in r4. */
161 PPC_STLU r1,-STKFRM(r1)
163 PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1)
170 stfd fr0,STKFRM-16(r1)
176 lfd fr0,STKFRM-16(r1)
177 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
186 #ifdef CONFIG_ALTIVEC
187 /* Get the contents of vrN into vr0; N is in r3. */
192 blr /* vr0 is already in vr0 */
196 vor vr0,reg,reg /* assembler doesn't know vmr? */
206 /* Put the contents of vr0 into vrN; N is in r3. */
211 blr /* vr0 is already in vr0 */
225 /* Load vector reg N from *p. N is in r3, p in r4. */
227 PPC_STLU r1,-STKFRM(r1)
229 PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1)
244 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
253 /* Store vector reg N to *p. N is in r3, p in r4. */
255 PPC_STLU r1,-STKFRM(r1)
257 PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1)
272 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
280 #endif /* CONFIG_ALTIVEC */
283 /* Get the contents of vsrN into vsr0; N is in r3. */
288 blr /* vsr0 is already in vsr0 */
302 /* Put the contents of vsr0 into vsrN; N is in r3. */
307 blr /* vr0 is already in vr0 */
321 /* Load VSX reg N from vector doubleword *p. N is in r3, p in r4. */
323 PPC_STLU r1,-STKFRM(r1)
325 PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1)
340 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
349 /* Store VSX reg N to vector doubleword *p. N is in r3, p in r4. */
351 PPC_STLU r1,-STKFRM(r1)
353 PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1)
368 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
377 #endif /* CONFIG_VSX */
379 #endif /* CONFIG_PPC_FPU */