2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others.
7 * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 * Copyright (C) 2004 Thiemo Seufer
10 * Copyright (C) 2013 Imagination Technologies Ltd.
12 #include <linux/cpu.h>
13 #include <linux/errno.h>
14 #include <linux/init.h>
15 #include <linux/kallsyms.h>
16 #include <linux/kernel.h>
17 #include <linux/nmi.h>
18 #include <linux/personality.h>
19 #include <linux/prctl.h>
20 #include <linux/random.h>
21 #include <linux/sched.h>
22 #include <linux/sched/debug.h>
23 #include <linux/sched/task_stack.h>
27 #include <asm/dsemul.h>
33 #include <asm/irq_regs.h>
34 #include <asm/isadep.h>
36 #include <asm/mips-cps.h>
37 #include <asm/mipsregs.h>
38 #include <asm/processor.h>
40 #include <asm/stacktrace.h>
42 #ifdef CONFIG_HOTPLUG_CPU
43 void arch_cpu_idle_dead(void)
49 asmlinkage void ret_from_fork(void);
50 asmlinkage void ret_from_kernel_thread(void);
52 void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
56 /* New thread loses kernel privileges. */
57 status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_CU2|ST0_FR|KU_MASK);
59 regs->cp0_status = status;
61 clear_thread_flag(TIF_MSA_CTX_LIVE);
63 #ifdef CONFIG_MIPS_FP_SUPPORT
64 atomic_set(¤t->thread.bd_emu_frame, BD_EMUFRAME_NONE);
71 void exit_thread(struct task_struct *tsk)
74 * User threads may have allocated a delay slot emulation frame.
75 * If so, clean up that allocation.
77 if (!(current->flags & PF_KTHREAD))
78 dsemul_thread_cleanup(tsk);
81 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
84 * Save any process state which is live in hardware registers to the
85 * parent context prior to duplication. This prevents the new child
86 * state becoming stale if the parent is preempted before copy_thread()
87 * gets a chance to save the parent's live hardware registers to the
94 else if (is_fpu_owner())
106 * Copy architecture-specific thread state
108 int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
110 unsigned long clone_flags = args->flags;
111 unsigned long usp = args->stack;
112 unsigned long tls = args->tls;
113 struct thread_info *ti = task_thread_info(p);
114 struct pt_regs *childregs, *regs = current_pt_regs();
115 unsigned long childksp;
117 childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
119 /* set up new TSS. */
120 childregs = (struct pt_regs *) childksp - 1;
121 /* Put the stack after the struct pt_regs. */
122 childksp = (unsigned long) childregs;
123 p->thread.cp0_status = (read_c0_status() & ~(ST0_CU2|ST0_CU1)) | ST0_KERNEL_CUMASK;
124 if (unlikely(args->fn)) {
126 unsigned long status = p->thread.cp0_status;
127 memset(childregs, 0, sizeof(struct pt_regs));
128 p->thread.reg16 = (unsigned long)args->fn;
129 p->thread.reg17 = (unsigned long)args->fn_arg;
130 p->thread.reg29 = childksp;
131 p->thread.reg31 = (unsigned long) ret_from_kernel_thread;
132 #if defined(CONFIG_CPU_R3000)
133 status = (status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) |
134 ((status & (ST0_KUC | ST0_IEC)) << 2);
138 childregs->cp0_status = status;
144 childregs->regs[7] = 0; /* Clear error flag */
145 childregs->regs[2] = 0; /* Child gets zero as return value */
147 childregs->regs[29] = usp;
149 p->thread.reg29 = (unsigned long) childregs;
150 p->thread.reg31 = (unsigned long) ret_from_fork;
153 * New tasks lose permission to use the fpu. This accelerates context
154 * switching for most programs since they don't use the fpu.
156 childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
158 clear_tsk_thread_flag(p, TIF_USEDFPU);
159 clear_tsk_thread_flag(p, TIF_USEDMSA);
160 clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE);
162 #ifdef CONFIG_MIPS_MT_FPAFF
163 clear_tsk_thread_flag(p, TIF_FPUBOUND);
164 #endif /* CONFIG_MIPS_MT_FPAFF */
166 #ifdef CONFIG_MIPS_FP_SUPPORT
167 atomic_set(&p->thread.bd_emu_frame, BD_EMUFRAME_NONE);
170 if (clone_flags & CLONE_SETTLS)
176 #ifdef CONFIG_STACKPROTECTOR
177 #include <linux/stackprotector.h>
178 unsigned long __stack_chk_guard __read_mostly;
179 EXPORT_SYMBOL(__stack_chk_guard);
182 struct mips_frame_info {
184 unsigned long func_size;
189 #define J_TARGET(pc,target) \
190 (((unsigned long)(pc) & 0xf0000000) | ((target) << 2))
192 static inline int is_jr_ra_ins(union mips_instruction *ip)
194 #ifdef CONFIG_CPU_MICROMIPS
199 if (mm_insn_16bit(ip->word >> 16)) {
200 if (ip->mm16_r5_format.opcode == mm_pool16c_op &&
201 ip->mm16_r5_format.rt == mm_jr16_op &&
202 ip->mm16_r5_format.imm == 31)
207 if (ip->r_format.opcode == mm_pool32a_op &&
208 ip->r_format.func == mm_pool32axf_op &&
209 ((ip->u_format.uimmediate >> 6) & GENMASK(9, 0)) == mm_jalr_op &&
210 ip->r_format.rt == 31)
214 if (ip->r_format.opcode == spec_op &&
215 ip->r_format.func == jr_op &&
216 ip->r_format.rs == 31)
222 static inline int is_ra_save_ins(union mips_instruction *ip, int *poff)
224 #ifdef CONFIG_CPU_MICROMIPS
227 * swm16 reglist,offset(sp)
228 * swm32 reglist,offset(sp)
230 * jradiussp - NOT SUPPORTED
232 * microMIPS is way more fun...
234 if (mm_insn_16bit(ip->word >> 16)) {
235 switch (ip->mm16_r5_format.opcode) {
237 if (ip->mm16_r5_format.rt != 31)
240 *poff = ip->mm16_r5_format.imm;
241 *poff = (*poff << 2) / sizeof(ulong);
245 switch (ip->mm16_m_format.func) {
247 *poff = ip->mm16_m_format.imm;
248 *poff += 1 + ip->mm16_m_format.rlist;
249 *poff = (*poff << 2) / sizeof(ulong);
261 switch (ip->i_format.opcode) {
263 if (ip->i_format.rs != 29)
265 if (ip->i_format.rt != 31)
268 *poff = ip->i_format.simmediate / sizeof(ulong);
272 switch (ip->mm_m_format.func) {
274 if (ip->mm_m_format.rd < 0x10)
276 if (ip->mm_m_format.base != 29)
279 *poff = ip->mm_m_format.simmediate;
280 *poff += (ip->mm_m_format.rd & 0xf) * sizeof(u32);
281 *poff /= sizeof(ulong);
291 /* sw / sd $ra, offset($sp) */
292 if ((ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) &&
293 ip->i_format.rs == 29 && ip->i_format.rt == 31) {
294 *poff = ip->i_format.simmediate / sizeof(ulong);
297 #ifdef CONFIG_CPU_LOONGSON64
298 if ((ip->loongson3_lswc2_format.opcode == swc2_op) &&
299 (ip->loongson3_lswc2_format.ls == 1) &&
300 (ip->loongson3_lswc2_format.fr == 0) &&
301 (ip->loongson3_lswc2_format.base == 29)) {
302 if (ip->loongson3_lswc2_format.rt == 31) {
303 *poff = ip->loongson3_lswc2_format.offset << 1;
306 if (ip->loongson3_lswc2_format.rq == 31) {
307 *poff = (ip->loongson3_lswc2_format.offset << 1) + 1;
316 static inline int is_jump_ins(union mips_instruction *ip)
318 #ifdef CONFIG_CPU_MICROMIPS
320 * jr16,jrc,jalr16,jalr16
322 * jalr/jr,jalr.hb/jr.hb,jalrs,jalrs.hb
323 * jraddiusp - NOT SUPPORTED
325 * microMIPS is kind of more fun...
327 if (mm_insn_16bit(ip->word >> 16)) {
328 if ((ip->mm16_r5_format.opcode == mm_pool16c_op &&
329 (ip->mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op))
334 if (ip->j_format.opcode == mm_j32_op)
336 if (ip->j_format.opcode == mm_jal32_op)
338 if (ip->r_format.opcode != mm_pool32a_op ||
339 ip->r_format.func != mm_pool32axf_op)
341 return ((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op;
343 if (ip->j_format.opcode == j_op)
345 if (ip->j_format.opcode == jal_op)
347 if (ip->r_format.opcode != spec_op)
349 return ip->r_format.func == jalr_op || ip->r_format.func == jr_op;
353 static inline int is_sp_move_ins(union mips_instruction *ip, int *frame_size)
355 #ifdef CONFIG_CPU_MICROMIPS
362 * jradiussp - NOT SUPPORTED
364 * microMIPS is not more fun...
366 if (mm_insn_16bit(ip->word >> 16)) {
367 if (ip->mm16_r3_format.opcode == mm_pool16d_op &&
368 ip->mm16_r3_format.simmediate & mm_addiusp_func) {
369 tmp = ip->mm_b0_format.simmediate >> 1;
370 tmp = ((tmp & 0x1ff) ^ 0x100) - 0x100;
371 if ((tmp + 2) < 4) /* 0x0,0x1,0x1fe,0x1ff are special */
373 *frame_size = -(signed short)(tmp << 2);
376 if (ip->mm16_r5_format.opcode == mm_pool16d_op &&
377 ip->mm16_r5_format.rt == 29) {
378 tmp = ip->mm16_r5_format.imm >> 1;
379 *frame_size = -(signed short)(tmp & 0xf);
385 if (ip->mm_i_format.opcode == mm_addiu32_op &&
386 ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29) {
387 *frame_size = -ip->i_format.simmediate;
391 /* addiu/daddiu sp,sp,-imm */
392 if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
395 if (ip->i_format.opcode == addiu_op ||
396 ip->i_format.opcode == daddiu_op) {
397 *frame_size = -ip->i_format.simmediate;
404 static int get_frame_info(struct mips_frame_info *info)
406 bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS);
407 union mips_instruction insn, *ip, *ip_end;
408 unsigned int last_insn_size = 0;
409 bool saw_jump = false;
411 info->pc_offset = -1;
412 info->frame_size = 0;
414 ip = (void *)msk_isa16_mode((ulong)info->func);
418 ip_end = (void *)ip + (info->func_size ? info->func_size : 512);
420 while (ip < ip_end) {
421 ip = (void *)ip + last_insn_size;
423 if (is_mmips && mm_insn_16bit(ip->halfword[0])) {
424 insn.word = ip->halfword[0] << 16;
426 } else if (is_mmips) {
427 insn.word = ip->halfword[0] << 16 | ip->halfword[1];
430 insn.word = ip->word;
434 if (is_jr_ra_ins(ip)) {
436 } else if (!info->frame_size) {
437 is_sp_move_ins(&insn, &info->frame_size);
439 } else if (!saw_jump && is_jump_ins(ip)) {
441 * If we see a jump instruction, we are finished
442 * with the frame save.
444 * Some functions can have a shortcut return at
445 * the beginning of the function, so don't start
446 * looking for jump instruction until we see the
449 * The RA save instruction can get put into the
450 * delay slot of the jump instruction, so look
451 * at the next instruction, too.
456 if (info->pc_offset == -1 &&
457 is_ra_save_ins(&insn, &info->pc_offset))
462 if (info->frame_size && info->pc_offset >= 0) /* nested */
464 if (info->pc_offset < 0) /* leaf */
466 /* prologue seems bogus... */
471 static struct mips_frame_info schedule_mfi __read_mostly;
473 #ifdef CONFIG_KALLSYMS
474 static unsigned long get___schedule_addr(void)
476 return kallsyms_lookup_name("__schedule");
479 static unsigned long get___schedule_addr(void)
481 union mips_instruction *ip = (void *)schedule;
485 for (i = 0; i < max_insns; i++, ip++) {
486 if (ip->j_format.opcode == j_op)
487 return J_TARGET(ip, ip->j_format.target);
493 static int __init frame_info_init(void)
495 unsigned long size = 0;
496 #ifdef CONFIG_KALLSYMS
501 addr = get___schedule_addr();
503 addr = (unsigned long)schedule;
505 #ifdef CONFIG_KALLSYMS
506 kallsyms_lookup_size_offset(addr, &size, &ofs);
508 schedule_mfi.func = (void *)addr;
509 schedule_mfi.func_size = size;
511 get_frame_info(&schedule_mfi);
514 * Without schedule() frame info, result given by
515 * thread_saved_pc() and __get_wchan() are not reliable.
517 if (schedule_mfi.pc_offset < 0)
518 printk("Can't analyze schedule() prologue at %p\n", schedule);
523 arch_initcall(frame_info_init);
526 * Return saved PC of a blocked thread.
528 static unsigned long thread_saved_pc(struct task_struct *tsk)
530 struct thread_struct *t = &tsk->thread;
532 /* New born processes are a special case */
533 if (t->reg31 == (unsigned long) ret_from_fork)
535 if (schedule_mfi.pc_offset < 0)
537 return ((unsigned long *)t->reg29)[schedule_mfi.pc_offset];
541 #ifdef CONFIG_KALLSYMS
542 /* generic stack unwinding function */
543 unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
548 unsigned long low, high, irq_stack_high;
549 struct mips_frame_info info;
550 unsigned long size, ofs;
551 struct pt_regs *regs;
558 * IRQ stacks start at IRQ_STACK_START
559 * task stacks at THREAD_SIZE - 32
562 if (!preemptible() && on_irq_stack(raw_smp_processor_id(), *sp)) {
563 high = stack_page + IRQ_STACK_START;
564 irq_stack_high = high;
566 high = stack_page + THREAD_SIZE - 32;
571 * If we reached the top of the interrupt stack, start unwinding
572 * the interrupted task stack.
574 if (unlikely(*sp == irq_stack_high)) {
575 unsigned long task_sp = *(unsigned long *)*sp;
578 * Check that the pointer saved in the IRQ stack head points to
579 * something within the stack of the current task
581 if (!object_is_on_stack((void *)task_sp))
585 * Follow pointer to tasks kernel stack frame where interrupted
588 regs = (struct pt_regs *)task_sp;
590 if (!user_mode(regs) && __kernel_text_address(pc)) {
591 *sp = regs->regs[29];
592 *ra = regs->regs[31];
597 if (!kallsyms_lookup_size_offset(pc, &size, &ofs))
600 * Return ra if an exception occurred at the first instruction
602 if (unlikely(ofs == 0)) {
608 info.func = (void *)(pc - ofs);
609 info.func_size = ofs; /* analyze from start to ofs */
610 leaf = get_frame_info(&info);
614 if (*sp < low || *sp + info.frame_size > high)
619 * For some extreme cases, get_frame_info() can
620 * consider wrongly a nested function as a leaf
621 * one. In that cases avoid to return always the
624 pc = pc != *ra ? *ra : 0;
626 pc = ((unsigned long *)(*sp))[info.pc_offset];
628 *sp += info.frame_size;
630 return __kernel_text_address(pc) ? pc : 0;
632 EXPORT_SYMBOL(unwind_stack_by_address);
634 /* used by show_backtrace() */
635 unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
636 unsigned long pc, unsigned long *ra)
638 unsigned long stack_page = 0;
641 for_each_possible_cpu(cpu) {
642 if (on_irq_stack(cpu, *sp)) {
643 stack_page = (unsigned long)irq_stack[cpu];
649 stack_page = (unsigned long)task_stack_page(task);
651 return unwind_stack_by_address(stack_page, sp, pc, ra);
656 * __get_wchan - a maintenance nightmare^W^Wpain in the ass ...
658 unsigned long __get_wchan(struct task_struct *task)
660 unsigned long pc = 0;
661 #ifdef CONFIG_KALLSYMS
663 unsigned long ra = 0;
666 if (!task_stack_page(task))
669 pc = thread_saved_pc(task);
671 #ifdef CONFIG_KALLSYMS
672 sp = task->thread.reg29 + schedule_mfi.frame_size;
674 while (in_sched_functions(pc))
675 pc = unwind_stack(task, &sp, pc, &ra);
682 unsigned long mips_stack_top(void)
684 unsigned long top = TASK_SIZE & PAGE_MASK;
686 if (IS_ENABLED(CONFIG_MIPS_FP_SUPPORT)) {
687 /* One page for branch delay slot "emulation" */
691 /* Space for the VDSO, data page & GIC user page */
692 top -= PAGE_ALIGN(current->thread.abi->vdso->size);
694 top -= mips_gic_present() ? PAGE_SIZE : 0;
696 /* Space for cache colour alignment */
697 if (cpu_has_dc_aliases)
698 top -= shm_align_mask + 1;
700 /* Space to randomize the VDSO base */
701 if (current->flags & PF_RANDOMIZE)
702 top -= VDSO_RANDOMIZE_SIZE;
708 * Don't forget that the stack pointer must be aligned on a 8 bytes
709 * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
711 unsigned long arch_align_stack(unsigned long sp)
713 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
714 sp -= prandom_u32_max(PAGE_SIZE);
719 static struct cpumask backtrace_csd_busy;
721 static void handle_backtrace(void *info)
723 nmi_cpu_backtrace(get_irq_regs());
724 cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy);
727 static DEFINE_PER_CPU(call_single_data_t, backtrace_csd) =
728 CSD_INIT(handle_backtrace, NULL);
730 static void raise_backtrace(cpumask_t *mask)
732 call_single_data_t *csd;
735 for_each_cpu(cpu, mask) {
737 * If we previously sent an IPI to the target CPU & it hasn't
738 * cleared its bit in the busy cpumask then it didn't handle
739 * our previous IPI & it's not safe for us to reuse the
740 * call_single_data_t.
742 if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) {
743 pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n",
748 csd = &per_cpu(backtrace_csd, cpu);
749 smp_call_function_single_async(cpu, csd);
753 void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
755 nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace);
758 int mips_get_process_fp_mode(struct task_struct *task)
762 if (!test_tsk_thread_flag(task, TIF_32BIT_FPREGS))
763 value |= PR_FP_MODE_FR;
764 if (test_tsk_thread_flag(task, TIF_HYBRID_FPREGS))
765 value |= PR_FP_MODE_FRE;
770 static long prepare_for_fp_mode_switch(void *unused)
773 * This is icky, but we use this to simply ensure that all CPUs have
774 * context switched, regardless of whether they were previously running
775 * kernel or user code. This ensures that no CPU that a mode-switching
776 * program may execute on keeps its FPU enabled (& in the old mode)
777 * throughout the mode switch.
782 int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
784 const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE;
785 struct task_struct *t;
786 struct cpumask process_cpus;
789 /* If nothing to change, return right away, successfully. */
790 if (value == mips_get_process_fp_mode(task))
793 /* Only accept a mode change if 64-bit FP enabled for o32. */
794 if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT))
797 /* And only for o32 tasks. */
798 if (IS_ENABLED(CONFIG_64BIT) && !test_thread_flag(TIF_32BIT_REGS))
801 /* Check the value is valid */
802 if (value & ~known_bits)
805 /* Setting FRE without FR is not supported. */
806 if ((value & (PR_FP_MODE_FR | PR_FP_MODE_FRE)) == PR_FP_MODE_FRE)
809 /* Avoid inadvertently triggering emulation */
810 if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu &&
811 !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64))
813 if ((value & PR_FP_MODE_FRE) && raw_cpu_has_fpu && !cpu_has_fre)
816 /* FR = 0 not supported in MIPS R6 */
817 if (!(value & PR_FP_MODE_FR) && raw_cpu_has_fpu && cpu_has_mips_r6)
820 /* Indicate the new FP mode in each thread */
821 for_each_thread(task, t) {
822 /* Update desired FP register width */
823 if (value & PR_FP_MODE_FR) {
824 clear_tsk_thread_flag(t, TIF_32BIT_FPREGS);
826 set_tsk_thread_flag(t, TIF_32BIT_FPREGS);
827 clear_tsk_thread_flag(t, TIF_MSA_CTX_LIVE);
830 /* Update desired FP single layout */
831 if (value & PR_FP_MODE_FRE)
832 set_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
834 clear_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
838 * We need to ensure that all threads in the process have switched mode
839 * before returning, in order to allow userland to not worry about
840 * races. We can do this by forcing all CPUs that any thread in the
841 * process may be running on to schedule something else - in this case
842 * prepare_for_fp_mode_switch().
844 * We begin by generating a mask of all CPUs that any thread in the
845 * process may be running on.
847 cpumask_clear(&process_cpus);
848 for_each_thread(task, t)
849 cpumask_set_cpu(task_cpu(t), &process_cpus);
852 * Now we schedule prepare_for_fp_mode_switch() on each of those CPUs.
854 * The CPUs may have rescheduled already since we switched mode or
855 * generated the cpumask, but that doesn't matter. If the task in this
856 * process is scheduled out then our scheduling
857 * prepare_for_fp_mode_switch() will simply be redundant. If it's
858 * scheduled in then it will already have picked up the new FP mode
862 for_each_cpu_and(cpu, &process_cpus, cpu_online_mask)
863 work_on_cpu(cpu, prepare_for_fp_mode_switch, NULL);
869 #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
870 void mips_dump_regs32(u32 *uregs, const struct pt_regs *regs)
874 for (i = MIPS32_EF_R1; i <= MIPS32_EF_R31; i++) {
875 /* k0/k1 are copied as zero. */
876 if (i == MIPS32_EF_R26 || i == MIPS32_EF_R27)
879 uregs[i] = regs->regs[i - MIPS32_EF_R0];
882 uregs[MIPS32_EF_LO] = regs->lo;
883 uregs[MIPS32_EF_HI] = regs->hi;
884 uregs[MIPS32_EF_CP0_EPC] = regs->cp0_epc;
885 uregs[MIPS32_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
886 uregs[MIPS32_EF_CP0_STATUS] = regs->cp0_status;
887 uregs[MIPS32_EF_CP0_CAUSE] = regs->cp0_cause;
889 #endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
892 void mips_dump_regs64(u64 *uregs, const struct pt_regs *regs)
896 for (i = MIPS64_EF_R1; i <= MIPS64_EF_R31; i++) {
897 /* k0/k1 are copied as zero. */
898 if (i == MIPS64_EF_R26 || i == MIPS64_EF_R27)
901 uregs[i] = regs->regs[i - MIPS64_EF_R0];
904 uregs[MIPS64_EF_LO] = regs->lo;
905 uregs[MIPS64_EF_HI] = regs->hi;
906 uregs[MIPS64_EF_CP0_EPC] = regs->cp0_epc;
907 uregs[MIPS64_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
908 uregs[MIPS64_EF_CP0_STATUS] = regs->cp0_status;
909 uregs[MIPS64_EF_CP0_CAUSE] = regs->cp0_cause;
911 #endif /* CONFIG_64BIT */