1 // SPDX-License-Identifier: GPL-2.0
3 * In-kernel vector facility support functions
5 * Copyright IBM Corp. 2015
6 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
8 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <asm/fpu/types.h>
12 #include <asm/fpu/api.h>
14 asm(".include \"asm/vx-insn.h\"\n");
16 void __kernel_fpu_begin(struct kernel_fpu *state, u32 flags)
19 * Limit the save to the FPU/vector registers already
20 * in use by the previous context
24 if (flags & KERNEL_FPC)
25 /* Save floating point control */
26 asm volatile("stfpc %0" : "=Q" (state->fpc));
28 if (!MACHINE_HAS_VX) {
29 if (flags & KERNEL_VXR_V0V7) {
30 /* Save floating-point registers */
31 asm volatile("std 0,%0" : "=Q" (state->fprs[0]));
32 asm volatile("std 1,%0" : "=Q" (state->fprs[1]));
33 asm volatile("std 2,%0" : "=Q" (state->fprs[2]));
34 asm volatile("std 3,%0" : "=Q" (state->fprs[3]));
35 asm volatile("std 4,%0" : "=Q" (state->fprs[4]));
36 asm volatile("std 5,%0" : "=Q" (state->fprs[5]));
37 asm volatile("std 6,%0" : "=Q" (state->fprs[6]));
38 asm volatile("std 7,%0" : "=Q" (state->fprs[7]));
39 asm volatile("std 8,%0" : "=Q" (state->fprs[8]));
40 asm volatile("std 9,%0" : "=Q" (state->fprs[9]));
41 asm volatile("std 10,%0" : "=Q" (state->fprs[10]));
42 asm volatile("std 11,%0" : "=Q" (state->fprs[11]));
43 asm volatile("std 12,%0" : "=Q" (state->fprs[12]));
44 asm volatile("std 13,%0" : "=Q" (state->fprs[13]));
45 asm volatile("std 14,%0" : "=Q" (state->fprs[14]));
46 asm volatile("std 15,%0" : "=Q" (state->fprs[15]));
51 /* Test and save vector registers */
54 * Test if any vector register must be saved and, if so,
55 * test if all register can be saved.
57 " la 1,%[vxrs]\n" /* load save area */
58 " tmll %[m],30\n" /* KERNEL_VXR */
59 " jz 7f\n" /* no work -> done */
60 " jo 5f\n" /* -> save V0..V31 */
62 * Test for special case KERNEL_FPU_MID only. In this
63 * case a vstm V8..V23 is the best instruction
65 " chi %[m],12\n" /* KERNEL_VXR_MID */
66 " jne 0f\n" /* -> save V8..V23 */
67 " VSTM 8,23,128,1\n" /* vstm %v8,%v23,128(%r1) */
69 /* Test and save the first half of 16 vector registers */
70 "0: tmll %[m],6\n" /* KERNEL_VXR_LOW */
71 " jz 3f\n" /* -> KERNEL_VXR_HIGH */
72 " jo 2f\n" /* 11 -> save V0..V15 */
73 " brc 2,1f\n" /* 10 -> save V8..V15 */
74 " VSTM 0,7,0,1\n" /* vstm %v0,%v7,0(%r1) */
76 "1: VSTM 8,15,128,1\n" /* vstm %v8,%v15,128(%r1) */
78 "2: VSTM 0,15,0,1\n" /* vstm %v0,%v15,0(%r1) */
79 /* Test and save the second half of 16 vector registers */
80 "3: tmll %[m],24\n" /* KERNEL_VXR_HIGH */
82 " jo 6f\n" /* 11 -> save V16..V31 */
83 " brc 2,4f\n" /* 10 -> save V24..V31 */
84 " VSTM 16,23,256,1\n" /* vstm %v16,%v23,256(%r1) */
86 "4: VSTM 24,31,384,1\n" /* vstm %v24,%v31,384(%r1) */
88 "5: VSTM 0,15,0,1\n" /* vstm %v0,%v15,0(%r1) */
89 "6: VSTM 16,31,256,1\n" /* vstm %v16,%v31,256(%r1) */
91 : [vxrs] "=Q" (*(struct vx_array *) &state->vxrs)
95 EXPORT_SYMBOL(__kernel_fpu_begin);
97 void __kernel_fpu_end(struct kernel_fpu *state, u32 flags)
100 * Limit the restore to the FPU/vector registers of the
101 * previous context that have been overwritte by the
104 flags &= state->mask;
106 if (flags & KERNEL_FPC)
107 /* Restore floating-point controls */
108 asm volatile("lfpc %0" : : "Q" (state->fpc));
110 if (!MACHINE_HAS_VX) {
111 if (flags & KERNEL_VXR_V0V7) {
112 /* Restore floating-point registers */
113 asm volatile("ld 0,%0" : : "Q" (state->fprs[0]));
114 asm volatile("ld 1,%0" : : "Q" (state->fprs[1]));
115 asm volatile("ld 2,%0" : : "Q" (state->fprs[2]));
116 asm volatile("ld 3,%0" : : "Q" (state->fprs[3]));
117 asm volatile("ld 4,%0" : : "Q" (state->fprs[4]));
118 asm volatile("ld 5,%0" : : "Q" (state->fprs[5]));
119 asm volatile("ld 6,%0" : : "Q" (state->fprs[6]));
120 asm volatile("ld 7,%0" : : "Q" (state->fprs[7]));
121 asm volatile("ld 8,%0" : : "Q" (state->fprs[8]));
122 asm volatile("ld 9,%0" : : "Q" (state->fprs[9]));
123 asm volatile("ld 10,%0" : : "Q" (state->fprs[10]));
124 asm volatile("ld 11,%0" : : "Q" (state->fprs[11]));
125 asm volatile("ld 12,%0" : : "Q" (state->fprs[12]));
126 asm volatile("ld 13,%0" : : "Q" (state->fprs[13]));
127 asm volatile("ld 14,%0" : : "Q" (state->fprs[14]));
128 asm volatile("ld 15,%0" : : "Q" (state->fprs[15]));
133 /* Test and restore (load) vector registers */
136 * Test if any vector register must be loaded and, if so,
137 * test if all registers can be loaded at once.
139 " la 1,%[vxrs]\n" /* load restore area */
140 " tmll %[m],30\n" /* KERNEL_VXR */
141 " jz 7f\n" /* no work -> done */
142 " jo 5f\n" /* -> restore V0..V31 */
144 * Test for special case KERNEL_FPU_MID only. In this
145 * case a vlm V8..V23 is the best instruction
147 " chi %[m],12\n" /* KERNEL_VXR_MID */
148 " jne 0f\n" /* -> restore V8..V23 */
149 " VLM 8,23,128,1\n" /* vlm %v8,%v23,128(%r1) */
151 /* Test and restore the first half of 16 vector registers */
152 "0: tmll %[m],6\n" /* KERNEL_VXR_LOW */
153 " jz 3f\n" /* -> KERNEL_VXR_HIGH */
154 " jo 2f\n" /* 11 -> restore V0..V15 */
155 " brc 2,1f\n" /* 10 -> restore V8..V15 */
156 " VLM 0,7,0,1\n" /* vlm %v0,%v7,0(%r1) */
158 "1: VLM 8,15,128,1\n" /* vlm %v8,%v15,128(%r1) */
160 "2: VLM 0,15,0,1\n" /* vlm %v0,%v15,0(%r1) */
161 /* Test and restore the second half of 16 vector registers */
162 "3: tmll %[m],24\n" /* KERNEL_VXR_HIGH */
164 " jo 6f\n" /* 11 -> restore V16..V31 */
165 " brc 2,4f\n" /* 10 -> restore V24..V31 */
166 " VLM 16,23,256,1\n" /* vlm %v16,%v23,256(%r1) */
168 "4: VLM 24,31,384,1\n" /* vlm %v24,%v31,384(%r1) */
170 "5: VLM 0,15,0,1\n" /* vlm %v0,%v15,0(%r1) */
171 "6: VLM 16,31,256,1\n" /* vlm %v16,%v31,256(%r1) */
173 : [vxrs] "=Q" (*(struct vx_array *) &state->vxrs)
177 EXPORT_SYMBOL(__kernel_fpu_end);
179 void __load_fpu_regs(void)
181 struct fpu *state = ¤t->thread.fpu;
182 unsigned long *regs = current->thread.fpu.regs;
184 asm volatile("lfpc %0" : : "Q" (state->fpc));
185 if (likely(MACHINE_HAS_VX)) {
186 asm volatile("lgr 1,%0\n"
191 : "1", "cc", "memory");
193 asm volatile("ld 0,%0" : : "Q" (regs[0]));
194 asm volatile("ld 1,%0" : : "Q" (regs[1]));
195 asm volatile("ld 2,%0" : : "Q" (regs[2]));
196 asm volatile("ld 3,%0" : : "Q" (regs[3]));
197 asm volatile("ld 4,%0" : : "Q" (regs[4]));
198 asm volatile("ld 5,%0" : : "Q" (regs[5]));
199 asm volatile("ld 6,%0" : : "Q" (regs[6]));
200 asm volatile("ld 7,%0" : : "Q" (regs[7]));
201 asm volatile("ld 8,%0" : : "Q" (regs[8]));
202 asm volatile("ld 9,%0" : : "Q" (regs[9]));
203 asm volatile("ld 10,%0" : : "Q" (regs[10]));
204 asm volatile("ld 11,%0" : : "Q" (regs[11]));
205 asm volatile("ld 12,%0" : : "Q" (regs[12]));
206 asm volatile("ld 13,%0" : : "Q" (regs[13]));
207 asm volatile("ld 14,%0" : : "Q" (regs[14]));
208 asm volatile("ld 15,%0" : : "Q" (regs[15]));
210 clear_cpu_flag(CIF_FPU);
212 EXPORT_SYMBOL(__load_fpu_regs);
214 void load_fpu_regs(void)
216 raw_local_irq_disable();
218 raw_local_irq_enable();
220 EXPORT_SYMBOL(load_fpu_regs);
222 void save_fpu_regs(void)
224 unsigned long flags, *regs;
227 local_irq_save(flags);
229 if (test_cpu_flag(CIF_FPU))
232 state = ¤t->thread.fpu;
233 regs = current->thread.fpu.regs;
235 asm volatile("stfpc %0" : "=Q" (state->fpc));
236 if (likely(MACHINE_HAS_VX)) {
237 asm volatile("lgr 1,%0\n"
242 : "1", "cc", "memory");
244 asm volatile("std 0,%0" : "=Q" (regs[0]));
245 asm volatile("std 1,%0" : "=Q" (regs[1]));
246 asm volatile("std 2,%0" : "=Q" (regs[2]));
247 asm volatile("std 3,%0" : "=Q" (regs[3]));
248 asm volatile("std 4,%0" : "=Q" (regs[4]));
249 asm volatile("std 5,%0" : "=Q" (regs[5]));
250 asm volatile("std 6,%0" : "=Q" (regs[6]));
251 asm volatile("std 7,%0" : "=Q" (regs[7]));
252 asm volatile("std 8,%0" : "=Q" (regs[8]));
253 asm volatile("std 9,%0" : "=Q" (regs[9]));
254 asm volatile("std 10,%0" : "=Q" (regs[10]));
255 asm volatile("std 11,%0" : "=Q" (regs[11]));
256 asm volatile("std 12,%0" : "=Q" (regs[12]));
257 asm volatile("std 13,%0" : "=Q" (regs[13]));
258 asm volatile("std 14,%0" : "=Q" (regs[14]));
259 asm volatile("std 15,%0" : "=Q" (regs[15]));
261 set_cpu_flag(CIF_FPU);
263 local_irq_restore(flags);
265 EXPORT_SYMBOL(save_fpu_regs);