2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
15 #include <linux/sched.h>
16 #include <linux/sched/debug.h>
17 #include <linux/sched/task.h>
18 #include <linux/preempt.h>
19 #include <linux/module.h>
21 #include <linux/kprobes.h>
22 #include <linux/elfcore.h>
23 #include <linux/tick.h>
24 #include <linux/init.h>
26 #include <linux/compat.h>
27 #include <linux/nmi.h>
28 #include <linux/syscalls.h>
29 #include <linux/kernel.h>
30 #include <linux/tracehook.h>
31 #include <linux/signal.h>
32 #include <linux/delay.h>
33 #include <linux/context_tracking.h>
34 #include <asm/stack.h>
35 #include <asm/switch_to.h>
36 #include <asm/homecache.h>
37 #include <asm/syscalls.h>
38 #include <asm/traps.h>
39 #include <asm/setup.h>
40 #include <linux/uaccess.h>
41 #ifdef CONFIG_HARDWALL
42 #include <asm/hardwall.h>
44 #include <arch/chip.h>
46 #include <arch/sim_def.h>
49 * Use the (x86) "idle=poll" option to prefer low latency when leaving the
50 * idle loop over low power while in the idle loop, e.g. if we have
51 * one thread per core and we want to get threads out of futex waits fast.
53 static int __init idle_setup(char *str)
58 if (!strcmp(str, "poll")) {
59 pr_info("using polling idle threads\n");
60 cpu_idle_poll_ctrl(true);
62 } else if (!strcmp(str, "halt")) {
67 early_param("idle", idle_setup);
69 void arch_cpu_idle(void)
71 __this_cpu_write(irq_stat.idle_timestamp, jiffies);
76 * Release a thread_info structure
78 void arch_release_thread_stack(unsigned long *stack)
80 struct thread_info *info = (void *)stack;
81 struct single_step_state *step_state = info->step_state;
86 * FIXME: we don't munmap step_state->buffer
87 * because the mm_struct for this process (info->task->mm)
88 * has already been zeroed in exit_mm(). Keeping a
89 * reference to it here seems like a bad move, so this
90 * means we can't munmap() the buffer, and therefore if we
91 * ptrace multiple threads in a process, we will slowly
92 * leak user memory. (Note that as soon as the last
93 * thread in a process dies, we will reclaim all user
94 * memory including single-step buffers in the usual way.)
95 * We should either assign a kernel VA to this buffer
96 * somehow, or we should associate the buffer(s) with the
97 * mm itself so we can clean them up that way.
103 static void save_arch_state(struct thread_struct *t);
105 int copy_thread(unsigned long clone_flags, unsigned long sp,
106 unsigned long arg, struct task_struct *p)
108 struct pt_regs *childregs = task_pt_regs(p);
110 unsigned long *callee_regs;
113 * Set up the stack and stack pointer appropriately for the
114 * new child to find itself woken up in __switch_to().
115 * The callee-saved registers must be on the stack to be read;
116 * the new task will then jump to assembly support to handle
117 * calling schedule_tail(), etc., and (for userspace tasks)
118 * returning to the context set up in the pt_regs.
120 ksp = (unsigned long) childregs;
121 ksp -= C_ABI_SAVE_AREA_SIZE; /* interrupt-entry save area */
122 ((long *)ksp)[0] = ((long *)ksp)[1] = 0;
123 ksp -= CALLEE_SAVED_REGS_COUNT * sizeof(unsigned long);
124 callee_regs = (unsigned long *)ksp;
125 ksp -= C_ABI_SAVE_AREA_SIZE; /* __switch_to() save area */
126 ((long *)ksp)[0] = ((long *)ksp)[1] = 0;
129 /* Record the pid of the task that created this one. */
130 p->thread.creator_pid = current->pid;
132 if (unlikely(p->flags & PF_KTHREAD)) {
134 memset(childregs, 0, sizeof(struct pt_regs));
135 memset(&callee_regs[2], 0,
136 (CALLEE_SAVED_REGS_COUNT - 2) * sizeof(unsigned long));
137 callee_regs[0] = sp; /* r30 = function */
138 callee_regs[1] = arg; /* r31 = arg */
139 p->thread.pc = (unsigned long) ret_from_kernel_thread;
144 * Start new thread in ret_from_fork so it schedules properly
145 * and then return from interrupt like the parent.
147 p->thread.pc = (unsigned long) ret_from_fork;
150 * Do not clone step state from the parent; each thread
151 * must make its own lazily.
153 task_thread_info(p)->step_state = NULL;
157 * Do not clone unalign jit fixup from the parent; each thread
158 * must allocate its own on demand.
160 task_thread_info(p)->unalign_jit_base = NULL;
164 * Copy the registers onto the kernel stack so the
165 * return-from-interrupt code will reload it into registers.
167 *childregs = *current_pt_regs();
168 childregs->regs[0] = 0; /* return value is zero */
170 childregs->sp = sp; /* override with new user stack pointer */
171 memcpy(callee_regs, &childregs->regs[CALLEE_SAVED_FIRST_REG],
172 CALLEE_SAVED_REGS_COUNT * sizeof(unsigned long));
174 /* Save user stack top pointer so we can ID the stack vm area later. */
175 p->thread.usp0 = childregs->sp;
178 * If CLONE_SETTLS is set, set "tp" in the new task to "r4",
179 * which is passed in as arg #5 to sys_clone().
181 if (clone_flags & CLONE_SETTLS)
182 childregs->tp = childregs->regs[4];
185 #if CHIP_HAS_TILE_DMA()
187 * No DMA in the new thread. We model this on the fact that
188 * fork() clears the pending signals, alarms, and aio for the child.
190 memset(&p->thread.tile_dma_state, 0, sizeof(struct tile_dma_state));
191 memset(&p->thread.dma_async_tlb, 0, sizeof(struct async_tlb));
194 /* New thread has its miscellaneous processor state bits clear. */
195 p->thread.proc_status = 0;
197 #ifdef CONFIG_HARDWALL
198 /* New thread does not own any networks. */
199 memset(&p->thread.hardwall[0], 0,
200 sizeof(struct hardwall_task) * HARDWALL_TYPES);
205 * Start the new thread with the current architecture state
206 * (user interrupt masks, etc.).
208 save_arch_state(&p->thread);
213 int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
215 task_thread_info(tsk)->align_ctl = val;
219 int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
221 return put_user(task_thread_info(tsk)->align_ctl,
222 (unsigned int __user *)adr);
225 static struct task_struct corrupt_current = { .comm = "<corrupt>" };
228 * Return "current" if it looks plausible, or else a pointer to a dummy.
229 * This can be helpful if we are just trying to emit a clean panic.
231 struct task_struct *validate_current(void)
233 struct task_struct *tsk = current;
234 if (unlikely((unsigned long)tsk < PAGE_OFFSET ||
235 (high_memory && (void *)tsk > high_memory) ||
236 ((unsigned long)tsk & (__alignof__(*tsk) - 1)) != 0)) {
237 pr_err("Corrupt 'current' %p (sp %#lx)\n", tsk, stack_pointer);
238 tsk = &corrupt_current;
243 /* Take and return the pointer to the previous task, for schedule_tail(). */
244 struct task_struct *sim_notify_fork(struct task_struct *prev)
246 struct task_struct *tsk = current;
247 __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_FORK_PARENT |
248 (tsk->thread.creator_pid << _SIM_CONTROL_OPERATOR_BITS));
249 __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_FORK |
250 (tsk->pid << _SIM_CONTROL_OPERATOR_BITS));
254 int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
256 struct pt_regs *ptregs = task_pt_regs(tsk);
257 elf_core_copy_regs(regs, ptregs);
261 #if CHIP_HAS_TILE_DMA()
263 /* Allow user processes to access the DMA SPRs */
264 void grant_dma_mpls(void)
266 #if CONFIG_KERNEL_PL == 2
267 __insn_mtspr(SPR_MPL_DMA_CPL_SET_1, 1);
268 __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_1, 1);
270 __insn_mtspr(SPR_MPL_DMA_CPL_SET_0, 1);
271 __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_0, 1);
275 /* Forbid user processes from accessing the DMA SPRs */
276 void restrict_dma_mpls(void)
278 #if CONFIG_KERNEL_PL == 2
279 __insn_mtspr(SPR_MPL_DMA_CPL_SET_2, 1);
280 __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_2, 1);
282 __insn_mtspr(SPR_MPL_DMA_CPL_SET_1, 1);
283 __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_1, 1);
287 /* Pause the DMA engine, then save off its state registers. */
288 static void save_tile_dma_state(struct tile_dma_state *dma)
290 unsigned long state = __insn_mfspr(SPR_DMA_USER_STATUS);
291 unsigned long post_suspend_state;
293 /* If we're running, suspend the engine. */
294 if ((state & DMA_STATUS_MASK) == SPR_DMA_STATUS__RUNNING_MASK)
295 __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__SUSPEND_MASK);
298 * Wait for the engine to idle, then save regs. Note that we
299 * want to record the "running" bit from before suspension,
300 * and the "done" bit from after, so that we can properly
301 * distinguish a case where the user suspended the engine from
302 * the case where the kernel suspended as part of the context
306 post_suspend_state = __insn_mfspr(SPR_DMA_USER_STATUS);
307 } while (post_suspend_state & SPR_DMA_STATUS__BUSY_MASK);
309 dma->src = __insn_mfspr(SPR_DMA_SRC_ADDR);
310 dma->src_chunk = __insn_mfspr(SPR_DMA_SRC_CHUNK_ADDR);
311 dma->dest = __insn_mfspr(SPR_DMA_DST_ADDR);
312 dma->dest_chunk = __insn_mfspr(SPR_DMA_DST_CHUNK_ADDR);
313 dma->strides = __insn_mfspr(SPR_DMA_STRIDE);
314 dma->chunk_size = __insn_mfspr(SPR_DMA_CHUNK_SIZE);
315 dma->byte = __insn_mfspr(SPR_DMA_BYTE);
316 dma->status = (state & SPR_DMA_STATUS__RUNNING_MASK) |
317 (post_suspend_state & SPR_DMA_STATUS__DONE_MASK);
320 /* Restart a DMA that was running before we were context-switched out. */
321 static void restore_tile_dma_state(struct thread_struct *t)
323 const struct tile_dma_state *dma = &t->tile_dma_state;
326 * The only way to restore the done bit is to run a zero
327 * length transaction.
329 if ((dma->status & SPR_DMA_STATUS__DONE_MASK) &&
330 !(__insn_mfspr(SPR_DMA_USER_STATUS) & SPR_DMA_STATUS__DONE_MASK)) {
331 __insn_mtspr(SPR_DMA_BYTE, 0);
332 __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__REQUEST_MASK);
333 while (__insn_mfspr(SPR_DMA_USER_STATUS) &
334 SPR_DMA_STATUS__BUSY_MASK)
338 __insn_mtspr(SPR_DMA_SRC_ADDR, dma->src);
339 __insn_mtspr(SPR_DMA_SRC_CHUNK_ADDR, dma->src_chunk);
340 __insn_mtspr(SPR_DMA_DST_ADDR, dma->dest);
341 __insn_mtspr(SPR_DMA_DST_CHUNK_ADDR, dma->dest_chunk);
342 __insn_mtspr(SPR_DMA_STRIDE, dma->strides);
343 __insn_mtspr(SPR_DMA_CHUNK_SIZE, dma->chunk_size);
344 __insn_mtspr(SPR_DMA_BYTE, dma->byte);
347 * Restart the engine if we were running and not done.
348 * Clear a pending async DMA fault that we were waiting on return
349 * to user space to execute, since we expect the DMA engine
350 * to regenerate those faults for us now. Note that we don't
351 * try to clear the TIF_ASYNC_TLB flag, since it's relatively
352 * harmless if set, and it covers both DMA and the SN processor.
354 if ((dma->status & DMA_STATUS_MASK) == SPR_DMA_STATUS__RUNNING_MASK) {
355 t->dma_async_tlb.fault_num = 0;
356 __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__REQUEST_MASK);
362 static void save_arch_state(struct thread_struct *t)
364 #if CHIP_HAS_SPLIT_INTR_MASK()
365 t->interrupt_mask = __insn_mfspr(SPR_INTERRUPT_MASK_0_0) |
366 ((u64)__insn_mfspr(SPR_INTERRUPT_MASK_0_1) << 32);
368 t->interrupt_mask = __insn_mfspr(SPR_INTERRUPT_MASK_0);
370 t->ex_context[0] = __insn_mfspr(SPR_EX_CONTEXT_0_0);
371 t->ex_context[1] = __insn_mfspr(SPR_EX_CONTEXT_0_1);
372 t->system_save[0] = __insn_mfspr(SPR_SYSTEM_SAVE_0_0);
373 t->system_save[1] = __insn_mfspr(SPR_SYSTEM_SAVE_0_1);
374 t->system_save[2] = __insn_mfspr(SPR_SYSTEM_SAVE_0_2);
375 t->system_save[3] = __insn_mfspr(SPR_SYSTEM_SAVE_0_3);
376 t->intctrl_0 = __insn_mfspr(SPR_INTCTRL_0_STATUS);
377 t->proc_status = __insn_mfspr(SPR_PROC_STATUS);
378 #if !CHIP_HAS_FIXED_INTVEC_BASE()
379 t->interrupt_vector_base = __insn_mfspr(SPR_INTERRUPT_VECTOR_BASE_0);
381 t->tile_rtf_hwm = __insn_mfspr(SPR_TILE_RTF_HWM);
382 #if CHIP_HAS_DSTREAM_PF()
383 t->dstream_pf = __insn_mfspr(SPR_DSTREAM_PF);
387 static void restore_arch_state(const struct thread_struct *t)
389 #if CHIP_HAS_SPLIT_INTR_MASK()
390 __insn_mtspr(SPR_INTERRUPT_MASK_0_0, (u32) t->interrupt_mask);
391 __insn_mtspr(SPR_INTERRUPT_MASK_0_1, t->interrupt_mask >> 32);
393 __insn_mtspr(SPR_INTERRUPT_MASK_0, t->interrupt_mask);
395 __insn_mtspr(SPR_EX_CONTEXT_0_0, t->ex_context[0]);
396 __insn_mtspr(SPR_EX_CONTEXT_0_1, t->ex_context[1]);
397 __insn_mtspr(SPR_SYSTEM_SAVE_0_0, t->system_save[0]);
398 __insn_mtspr(SPR_SYSTEM_SAVE_0_1, t->system_save[1]);
399 __insn_mtspr(SPR_SYSTEM_SAVE_0_2, t->system_save[2]);
400 __insn_mtspr(SPR_SYSTEM_SAVE_0_3, t->system_save[3]);
401 __insn_mtspr(SPR_INTCTRL_0_STATUS, t->intctrl_0);
402 __insn_mtspr(SPR_PROC_STATUS, t->proc_status);
403 #if !CHIP_HAS_FIXED_INTVEC_BASE()
404 __insn_mtspr(SPR_INTERRUPT_VECTOR_BASE_0, t->interrupt_vector_base);
406 __insn_mtspr(SPR_TILE_RTF_HWM, t->tile_rtf_hwm);
407 #if CHIP_HAS_DSTREAM_PF()
408 __insn_mtspr(SPR_DSTREAM_PF, t->dstream_pf);
413 void _prepare_arch_switch(struct task_struct *next)
415 #if CHIP_HAS_TILE_DMA()
416 struct tile_dma_state *dma = ¤t->thread.tile_dma_state;
418 save_tile_dma_state(dma);
423 struct task_struct *__sched _switch_to(struct task_struct *prev,
424 struct task_struct *next)
426 /* DMA state is already saved; save off other arch state. */
427 save_arch_state(&prev->thread);
429 #if CHIP_HAS_TILE_DMA()
431 * Restore DMA in new task if desired.
432 * Note that it is only safe to restart here since interrupts
433 * are disabled, so we can't take any DMATLB miss or access
434 * interrupts before we have finished switching stacks.
436 if (next->thread.tile_dma_state.enabled) {
437 restore_tile_dma_state(&next->thread);
444 /* Restore other arch state. */
445 restore_arch_state(&next->thread);
447 #ifdef CONFIG_HARDWALL
448 /* Enable or disable access to the network registers appropriately. */
449 hardwall_switch_tasks(prev, next);
452 /* Notify the simulator of task exit. */
453 if (unlikely(prev->state == TASK_DEAD))
454 __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_EXIT |
455 (prev->pid << _SIM_CONTROL_OPERATOR_BITS));
458 * Switch kernel SP, PC, and callee-saved registers.
459 * In the context of the new task, return the old task pointer
460 * (i.e. the task that actually called __switch_to).
461 * Pass the value to use for SYSTEM_SAVE_K_0 when we reset our sp.
463 return __switch_to(prev, next, next_current_ksp0(next));
467 * This routine is called on return from interrupt if any of the
468 * TIF_ALLWORK_MASK flags are set in thread_info->flags. It is
469 * entered with interrupts disabled so we don't miss an event that
470 * modified the thread_info flags. We loop until all the tested flags
471 * are clear. Note that the function is called on certain conditions
472 * that are not listed in the loop condition here (e.g. SINGLESTEP)
473 * which guarantees we will do those things once, and redo them if any
474 * of the other work items is re-done, but won't continue looping if
475 * all the other work is done.
477 void prepare_exit_to_usermode(struct pt_regs *regs, u32 thread_info_flags)
479 if (WARN_ON(!user_mode(regs)))
485 if (thread_info_flags & _TIF_NEED_RESCHED)
488 #if CHIP_HAS_TILE_DMA()
489 if (thread_info_flags & _TIF_ASYNC_TLB)
490 do_async_page_fault(regs);
493 if (thread_info_flags & _TIF_SIGPENDING)
496 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
497 clear_thread_flag(TIF_NOTIFY_RESUME);
498 tracehook_notify_resume(regs);
502 thread_info_flags = READ_ONCE(current_thread_info()->flags);
504 } while (thread_info_flags & _TIF_WORK_MASK);
506 if (thread_info_flags & _TIF_SINGLESTEP) {
507 single_step_once(regs);
510 * FIXME: on tilepro, since we enable interrupts in
511 * this routine, it's possible that we miss a signal
512 * or other asynchronous event.
521 unsigned long get_wchan(struct task_struct *p)
523 struct KBacktraceIterator kbt;
525 if (!p || p == current || p->state == TASK_RUNNING)
528 for (KBacktraceIterator_init(&kbt, p, NULL);
529 !KBacktraceIterator_end(&kbt);
530 KBacktraceIterator_next(&kbt)) {
531 if (!in_sched_functions(kbt.it.pc))
538 /* Flush thread state. */
539 void flush_thread(void)
545 * Free current thread data structures etc..
547 void exit_thread(struct task_struct *tsk)
549 #ifdef CONFIG_HARDWALL
551 * Remove the task from the list of tasks that are associated
552 * with any live hardwalls. (If the task that is exiting held
553 * the last reference to a hardwall fd, it would already have
554 * been released and deactivated at this point.)
556 hardwall_deactivate_all(tsk);
560 void tile_show_regs(struct pt_regs *regs)
564 for (i = 0; i < 17; i++)
565 pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
566 i, regs->regs[i], i+18, regs->regs[i+18],
567 i+36, regs->regs[i+36]);
568 pr_err(" r17: "REGFMT" r35: "REGFMT" tp : "REGFMT"\n",
569 regs->regs[17], regs->regs[35], regs->tp);
570 pr_err(" sp : "REGFMT" lr : "REGFMT"\n", regs->sp, regs->lr);
572 for (i = 0; i < 13; i++)
573 pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT
574 " r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
575 i, regs->regs[i], i+14, regs->regs[i+14],
576 i+27, regs->regs[i+27], i+40, regs->regs[i+40]);
577 pr_err(" r13: "REGFMT" tp : "REGFMT" sp : "REGFMT" lr : "REGFMT"\n",
578 regs->regs[13], regs->tp, regs->sp, regs->lr);
580 pr_err(" pc : "REGFMT" ex1: %ld faultnum: %ld flags:%s%s%s%s\n",
581 regs->pc, regs->ex1, regs->faultnum,
582 is_compat_task() ? " compat" : "",
583 (regs->flags & PT_FLAGS_DISABLE_IRQ) ? " noirq" : "",
584 !(regs->flags & PT_FLAGS_CALLER_SAVES) ? " nocallersave" : "",
585 (regs->flags & PT_FLAGS_RESTORE_REGS) ? " restoreregs" : "");
588 void show_regs(struct pt_regs *regs)
590 struct KBacktraceIterator kbt;
592 show_regs_print_info(KERN_DEFAULT);
593 tile_show_regs(regs);
595 KBacktraceIterator_init(&kbt, NULL, regs);
596 tile_show_stack(&kbt);
600 void nmi_raise_cpu_backtrace(struct cpumask *in_mask)
604 unsigned int timeout;
606 HV_NMI_Info info[NR_CPUS];
608 /* Tentatively dump stack on remote tiles via NMI. */
610 cpumask_copy(&mask, in_mask);
611 while (!cpumask_empty(&mask) && timeout) {
612 for_each_cpu(cpu, &mask) {
615 info[cpu] = hv_send_nmi(tile, TILE_NMI_DUMP_STACK, 0);
616 if (info[cpu].result == HV_NMI_RESULT_OK)
617 cpumask_clear_cpu(cpu, &mask);
621 touch_softlockup_watchdog();
625 /* Warn about cpus stuck in ICS. */
626 if (!cpumask_empty(&mask)) {
627 for_each_cpu(cpu, &mask) {
629 /* Clear the bit as if nmi_cpu_backtrace() ran. */
630 cpumask_clear_cpu(cpu, in_mask);
632 switch (info[cpu].result) {
633 case HV_NMI_RESULT_FAIL_ICS:
634 pr_warn("Skipping stack dump of cpu %d in ICS at pc %#llx\n",
637 case HV_NMI_RESULT_FAIL_HV:
638 pr_warn("Skipping stack dump of cpu %d in hypervisor\n",
642 WARN_ONCE(1, "Hypervisor too old to allow remote stack dumps.\n");
644 default: /* should not happen */
645 pr_warn("Skipping stack dump of cpu %d [%d,%#llx]\n",
646 cpu, info[cpu].result, info[cpu].pc);
653 void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
655 nmi_trigger_cpumask_backtrace(mask, exclude_self,
656 nmi_raise_cpu_backtrace);
658 #endif /* __tilegx_ */