1 #include <asm/processor.h>
2 #include <asm/ppc_asm.h>
4 #include <asm/asm-offsets.h>
5 #include <asm/cputable.h>
6 #include <asm/thread_info.h>
8 #include <asm/ptrace.h>
10 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
11 /* void do_load_up_transact_altivec(struct thread_struct *thread)
13 * This is similar to load_up_altivec but for the transactional version of the
14 * vector regs. It doesn't mess with the task MSR or valid flags.
15 * Furthermore, VEC laziness is not supported with TM currently.
17 _GLOBAL(do_load_up_transact_altivec)
24 stw r4,THREAD_USED_VR(r3)
26 li r10,THREAD_TRANSACT_VRSTATE+VRSTATE_VSCR
29 addi r10,r3,THREAD_TRANSACT_VRSTATE
32 /* Disable VEC again. */
40 * Load state from memory into VMX registers including VSCR.
41 * Assumes the caller has enabled VMX in the MSR.
43 _GLOBAL(load_vr_state)
51 * Store VMX state into memory, including VSCR.
52 * Assumes the caller has enabled VMX in the MSR.
54 _GLOBAL(store_vr_state)
62 * Disable VMX for the task which had it previously,
63 * and save its vector registers in its thread_struct.
64 * Enables the VMX for use in the kernel on return.
65 * On SMP we know the VMX is free, since we give it up every
66 * switch (ie, no lazy save of the vector registers).
68 * Note that on 32-bit this can only use registers that will be
69 * restored by fast_exception_return, i.e. r3 - r6, r10 and r11.
71 _GLOBAL(load_up_altivec)
72 mfmsr r5 /* grab the current MSR */
74 MTMSRD(r5) /* enable use of AltiVec now */
78 * For SMP, we don't do lazy VMX switching because it just gets too
79 * horrendously complex, especially when a task switches from one CPU
80 * to another. Instead we call giveup_altvec in switch_to.
81 * VRSAVE isn't dealt with here, that is done in the normal context
82 * switch code. Note that we could rely on vrsave value to eventually
83 * avoid saving all of the VREGs here...
86 LOAD_REG_ADDRBASE(r3, last_task_used_altivec)
88 PPC_LL r4,ADDROFF(last_task_used_altivec)(r3)
92 /* Save VMX state to last_task_used_altivec's THREAD struct */
95 addi r6,r4,THREAD_VRSTATE
100 /* Disable VMX for last_task_used_altivec */
101 PPC_LL r5,PT_REGS(r4)
103 PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
106 PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
108 #endif /* CONFIG_SMP */
110 /* Hack: if we get an altivec unavailable trap with VRSAVE
111 * set to all zeros, we assume this is a broken application
112 * that fails to set it properly, and thus we switch it to
121 /* enable use of VMX after return */
123 mfspr r5,SPRN_SPRG_THREAD /* current task's THREAD (phys) */
126 ld r4,PACACURRENT(r13)
127 addi r5,r4,THREAD /* Get THREAD */
128 oris r12,r12,MSR_VEC@h
131 addi r6,r5,THREAD_VRSTATE
134 stw r4,THREAD_USED_VR(r5)
139 /* Update last_task_used_altivec to 'current' */
140 subi r4,r5,THREAD /* Back to 'current' */
142 PPC_STL r4,ADDROFF(last_task_used_altivec)(r3)
143 #endif /* CONFIG_SMP */
144 /* restore registers and return */
147 _GLOBAL(giveup_altivec_notask)
149 andis. r4,r3,MSR_VEC@h
150 bnelr /* Already enabled? */
153 MTMSRD(r3) /* enable use of VMX now */
158 * giveup_altivec(tsk)
159 * Disable VMX for the task given as the argument,
160 * and save the vector registers in its thread_struct.
161 * Enables the VMX for use in the kernel on return.
163 _GLOBAL(giveup_altivec)
167 MTMSRD(r5) /* enable use of VMX now */
170 beqlr /* if no previous owner, done */
171 addi r3,r3,THREAD /* want THREAD of task */
172 PPC_LL r7,THREAD_VRSAVEAREA(r3)
173 PPC_LL r5,PT_REGS(r3)
176 addi r7,r3,THREAD_VRSTATE
183 PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
186 lis r3,(MSR_VEC|MSR_VSX)@h
189 ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
193 andc r4,r4,r3 /* disable FP for previous task */
194 PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
198 LOAD_REG_ADDRBASE(r4,last_task_used_altivec)
199 PPC_STL r5,ADDROFF(last_task_used_altivec)(r4)
200 #endif /* CONFIG_SMP */
206 #error This asm code isn't ready for 32-bit kernels
210 * load_up_vsx(unused, unused, tsk)
211 * Disable VSX for the task which had it previously,
212 * and save its vector registers in its thread_struct.
213 * Reuse the fp and vsx saves, but first check to see if they have
214 * been saved already.
217 /* Load FP and VSX registers if they haven't been done yet */
219 beql+ load_up_fpu /* skip if already loaded */
220 andis. r5,r12,MSR_VEC@h
221 beql+ load_up_altivec /* skip if already loaded */
224 ld r3,last_task_used_vsx@got(r2)
228 /* Disable VSX for last_task_used_vsx */
231 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
234 std r6,_MSR-STACK_FRAME_OVERHEAD(r5)
236 #endif /* CONFIG_SMP */
237 ld r4,PACACURRENT(r13)
238 addi r4,r4,THREAD /* Get THREAD */
240 stw r6,THREAD_USED_VSR(r4) /* ... also set thread used vsr */
241 /* enable use of VSX after return */
242 oris r12,r12,MSR_VSX@h
245 /* Update last_task_used_vsx to 'current' */
246 ld r4,PACACURRENT(r13)
248 #endif /* CONFIG_SMP */
249 b fast_exception_return
253 * Disable VSX for the task given as the argument.
254 * Does NOT save vsx registers.
255 * Enables the VSX for use in the kernel on return.
257 _GLOBAL(__giveup_vsx)
260 mtmsrd r5 /* enable use of VSX now */
264 beqlr- /* if no previous owner, done */
265 addi r3,r3,THREAD /* want THREAD of task */
269 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
271 andc r4,r4,r3 /* disable VSX for previous task */
272 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
276 ld r4,last_task_used_vsx@got(r2)
278 #endif /* CONFIG_SMP */
281 #endif /* CONFIG_VSX */
285 * The routines below are in assembler so we can closely control the
286 * usage of floating-point registers. These routines must be called
287 * with preempt disabled.
294 .long 0x3f800000 /* 1.0 in single-precision FP */
296 .long 0x3f000000 /* 0.5 in single-precision FP */
298 #define LDCONST(fr, name) \
307 .tc FD_3ff00000_0[TC],0x3ff0000000000000 /* 1.0 */
309 .tc FD_3fe00000_0[TC],0x3fe0000000000000 /* 0.5 */
311 #define LDCONST(fr, name) \
317 * Internal routine to enable floating point and set FPSCR to 0.
318 * Don't call it from C; it doesn't use the normal calling convention.
350 * Vector add, floating point.
367 * Vector subtract, floating point.
384 * Vector multiply and add, floating point.
396 fmadds fr0,fr0,fr2,fr1
404 * Vector negative multiply and subtract, floating point.
416 fnmsubs fr0,fr0,fr2,fr1
424 * Vector reciprocal estimate. We just compute 1.0/x.
425 * r3 -> destination, r4 -> source.
442 * Vector reciprocal square-root estimate, floating point.
443 * We use the frsqrte instruction for the initial estimate followed
444 * by 2 iterations of Newton-Raphson to get sufficient accuracy.
445 * r3 -> destination, r4 -> source.
460 frsqrte fr1,fr0 /* r = frsqrte(s) */
461 fmuls fr3,fr1,fr0 /* r * s */
462 fmuls fr2,fr1,fr5 /* r * 0.5 */
463 fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */
464 fmadds fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */
465 fmuls fr3,fr1,fr0 /* r * s */
466 fmuls fr2,fr1,fr5 /* r * 0.5 */
467 fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */
468 fmadds fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */