1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
5 * kernel entry points (interruptions, system call wrappers)
6 * Copyright (C) 1999,2000 Philipp Rumpf
7 * Copyright (C) 1999 SuSE GmbH Nuernberg
8 * Copyright (C) 2000 Hewlett-Packard (John Marvin)
9 * Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
12 #include <asm/asm-offsets.h>
14 /* we have the following possibilities to act on an interruption:
15 * - handle in assembly and use shadowed registers only
16 * - save registers to kernel stack and handle in assembly or C */
20 #include <asm/cache.h> /* for L1_CACHE_SHIFT */
21 #include <asm/assembly.h> /* for LDREG/STREG defines */
22 #include <asm/signal.h>
23 #include <asm/unistd.h>
25 #include <asm/traps.h>
26 #include <asm/thread_info.h>
27 #include <asm/alternative.h>
28 #include <asm/spinlock_types.h>
30 #include <linux/linkage.h>
31 #include <linux/pgtable.h>
39 /* Get aligned page_table_lock address for this mm from cr28/tr4 */
44 /* space_to_prot macro creates a prot id from a space id */
46 #if (SPACEID_SHIFT) == 0
47 .macro space_to_prot spc prot
48 depd,z \spc,62,31,\prot
51 .macro space_to_prot spc prot
52 extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
56 * The "get_stack" macros are responsible for determining the
60 * Already using a kernel stack, so call the
61 * get_stack_use_r30 macro to push a pt_regs structure
62 * on the stack, and store registers there.
64 * Need to set up a kernel stack, so call the
65 * get_stack_use_cr30 macro to set up a pointer
66 * to the pt_regs structure contained within the
67 * task pointer pointed to by cr30. Load the stack
68 * pointer from the task structure.
70 * Note that we use shadowed registers for temps until
71 * we can save %r26 and %r29. %r26 is used to preserve
72 * %r8 (a shadowed register) which temporarily contained
73 * either the fault type ("code") or the eirr. We need
74 * to use a non-shadowed register to carry the value over
75 * the rfir in virt_map. We use %r26 since this value winds
76 * up being passed as the argument to either do_cpu_irq_mask
77 * or handle_interruption. %r29 is used to hold a pointer
78 * the register save area, and once again, it needs to
79 * be a non-shadowed register so that it survives the rfir.
82 .macro get_stack_use_cr30
84 /* we save the registers in the task struct */
88 tophys %r1,%r9 /* task_struct */
89 LDREG TASK_STACK(%r9),%r30
90 ldo PT_SZ_ALGN(%r30),%r30
91 mtsp %r0,%sr7 /* clear sr7 after kernel stack was set! */
93 ldo TASK_REGS(%r9),%r9
94 STREG %r17,PT_GR30(%r9)
95 STREG %r29,PT_GR29(%r9)
96 STREG %r26,PT_GR26(%r9)
97 STREG %r16,PT_SR7(%r9)
101 .macro get_stack_use_r30
103 /* we put a struct pt_regs on the stack and save the registers there */
107 ldo PT_SZ_ALGN(%r30),%r30
108 STREG %r1,PT_GR30(%r9)
109 STREG %r29,PT_GR29(%r9)
110 STREG %r26,PT_GR26(%r9)
111 STREG %r16,PT_SR7(%r9)
116 LDREG PT_GR1(%r29), %r1
117 LDREG PT_GR30(%r29),%r30
118 LDREG PT_GR29(%r29),%r29
121 /* default interruption handler
122 * (calls traps.c:handle_interruption) */
129 /* Interrupt interruption handler
130 * (calls irq.c:do_cpu_irq_mask) */
137 .import os_hpmc, code
141 nop /* must be a NOP, will be patched later */
142 load32 PA(os_hpmc), %r3
145 .word 0 /* checksum (will be patched) */
146 .word 0 /* address of handler */
147 .word 0 /* length of handler */
151 * Performance Note: Instructions will be moved up into
152 * this part of the code later on, once we are sure
153 * that the tlb miss handlers are close to final form.
156 /* Register definitions for tlb miss handler macros */
158 va = r8 /* virtual address for which the trap occurred */
159 spc = r24 /* space for which the trap occurred */
164 * itlb miss interruption handler (parisc 1.1 - 32 bit)
178 * itlb miss interruption handler (parisc 2.0)
195 * naitlb miss interruption handler (parisc 1.1 - 32 bit)
198 .macro naitlb_11 code
209 * naitlb miss interruption handler (parisc 2.0)
212 .macro naitlb_20 code
227 * dtlb miss interruption handler (parisc 1.1 - 32 bit)
241 * dtlb miss interruption handler (parisc 2.0)
258 /* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
260 .macro nadtlb_11 code
270 /* nadtlb miss interruption handler (parisc 2.0) */
272 .macro nadtlb_20 code
287 * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
301 * dirty bit trap interruption handler (parisc 2.0)
317 /* In LP64, the space contains part of the upper 32 bits of the
318 * fault. We have to extract this and place it in the va,
319 * zeroing the corresponding bits in the space register */
320 .macro space_adjust spc,va,tmp
322 extrd,u \spc,63,SPACEID_SHIFT,\tmp
323 depd %r0,63,SPACEID_SHIFT,\spc
324 depd \tmp,31,SPACEID_SHIFT,\va
328 .import swapper_pg_dir,code
330 /* Get the pgd. For faults on space zero (kernel space), this
331 * is simply swapper_pg_dir. For user space faults, the
332 * pgd is stored in %cr25 */
333 .macro get_pgd spc,reg
334 ldil L%PA(swapper_pg_dir),\reg
335 ldo R%PA(swapper_pg_dir)(\reg),\reg
336 or,COND(=) %r0,\spc,%r0
341 space_check(spc,tmp,fault)
343 spc - The space we saw the fault with.
344 tmp - The place to store the current space.
345 fault - Function to call on failure.
347 Only allow faults on different spaces from the
348 currently active one if we're the kernel
351 .macro space_check spc,tmp,fault
353 /* check against %r0 which is same value as LINUX_GATEWAY_SPACE */
354 or,COND(<>) %r0,\spc,%r0 /* user may execute gateway page
355 * as kernel, so defeat the space
358 or,COND(=) %r0,\tmp,%r0 /* nullify if executing as kernel */
359 cmpb,COND(<>),n \tmp,\spc,\fault
362 /* Look up a PTE in a 2-Level scheme (faulting at each
363 * level if the entry isn't present
365 * NOTE: we use ldw even for LP64, since the short pointers
366 * can address up to 1TB
368 .macro L2_ptep pmd,pte,index,va,fault
369 #if CONFIG_PGTABLE_LEVELS == 3
370 extru_safe \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
372 extru_safe \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
374 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
375 #if CONFIG_PGTABLE_LEVELS < 3
378 ldw,s \index(\pmd),\pmd
379 bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault
380 dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
381 SHLREG \pmd,PxD_VALUE_SHIFT,\pmd
382 extru_safe \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
383 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
384 shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */
387 /* Look up PTE in a 3-Level scheme. */
388 .macro L3_ptep pgd,pte,index,va,fault
389 #if CONFIG_PGTABLE_LEVELS == 3
391 extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
392 ldw,s \index(\pgd),\pgd
393 bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault
394 shld \pgd,PxD_VALUE_SHIFT,\pgd
396 L2_ptep \pgd,\pte,\index,\va,\fault
399 /* Acquire page_table_lock and check page is present. */
400 .macro ptl_lock spc,ptp,pte,tmp,tmp1,fault
401 #ifdef CONFIG_TLB_PTLOCK
402 98: cmpib,COND(=),n 0,\spc,2f
404 1: LDCW 0(\tmp),\tmp1
405 cmpib,COND(=) 0,\tmp1,1b
408 bb,<,n \pte,_PAGE_PRESENT_BIT,3f
411 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
413 2: LDREG 0(\ptp),\pte
414 bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault
418 /* Release page_table_lock without reloading lock address.
419 We use an ordered store to ensure all prior accesses are
420 performed prior to releasing the lock. */
421 .macro ptl_unlock0 spc,tmp,tmp2
422 #ifdef CONFIG_TLB_PTLOCK
423 98: ldi __ARCH_SPIN_LOCK_UNLOCKED_VAL, \tmp2
424 or,COND(=) %r0,\spc,%r0
426 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
430 /* Release page_table_lock. */
431 .macro ptl_unlock1 spc,tmp,tmp2
432 #ifdef CONFIG_TLB_PTLOCK
434 ptl_unlock0 \spc,\tmp,\tmp2
435 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
439 /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and
440 * don't needlessly dirty the cache line if it was already set */
441 .macro update_accessed ptp,pte,tmp,tmp1
442 ldi _PAGE_ACCESSED,\tmp1
444 and,COND(<>) \tmp1,\pte,%r0
448 /* Set the dirty bit (and accessed bit). No need to be
449 * clever, this is only used from the dirty fault */
450 .macro update_dirty ptp,pte,tmp
451 ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
456 /* We have (depending on the page size):
457 * - 38 to 52-bit Physical Page Number
458 * - 12 to 26-bit page offset
460 /* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
461 * to a CPU TLB 4k PFN (4k => 12 bits to shift) */
462 #define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
463 #define PAGE_ADD_HUGE_SHIFT (REAL_HPAGE_SHIFT-12)
465 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
466 .macro convert_for_tlb_insert20 pte,tmp
467 #ifdef CONFIG_HUGETLB_PAGE
469 extrd,u \tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
470 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
472 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
473 (63-58)+PAGE_ADD_SHIFT,\pte
474 extrd,u,*= \tmp,_PAGE_HPAGE_BIT+32,1,%r0
475 depdi _HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\
476 (63-58)+PAGE_ADD_HUGE_SHIFT,\pte
477 #else /* Huge pages disabled */
478 extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
479 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
480 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
481 (63-58)+PAGE_ADD_SHIFT,\pte
485 /* Convert the pte and prot to tlb insertion values. How
486 * this happens is quite subtle, read below */
487 .macro make_insert_tlb spc,pte,prot,tmp
488 space_to_prot \spc \prot /* create prot id from space */
489 /* The following is the real subtlety. This is depositing
490 * T <-> _PAGE_REFTRAP
492 * B <-> _PAGE_DMB (memory break)
494 * Then incredible subtlety: The access rights are
495 * _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE
496 * See 3-14 of the parisc 2.0 manual
498 * Finally, _PAGE_READ goes in the top bit of PL1 (so we
499 * trigger an access rights trap in user space if the user
500 * tries to read an unreadable page */
501 #if _PAGE_SPECIAL_BIT == _PAGE_DMB_BIT
502 /* need to drop DMB bit, as it's used as SPECIAL flag */
503 depi 0,_PAGE_SPECIAL_BIT,1,\pte
507 /* PAGE_USER indicates the page can be read with user privileges,
508 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
509 * contains _PAGE_READ) */
510 extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0
512 /* If we're a gateway page, drop PL2 back to zero for promotion
513 * to kernel privilege (so we can execute the page as kernel).
514 * Any privilege promotion page always denys read and write */
515 extrd,u,*= \pte,_PAGE_GATEWAY_BIT+32,1,%r0
516 depd %r0,11,2,\prot /* If Gateway, Set PL2 to 0 */
518 /* Enforce uncacheable pages.
519 * This should ONLY be use for MMIO on PA 2.0 machines.
520 * Memory/DMA is cache coherent on all PA2.0 machines we support
521 * (that means T-class is NOT supported) and the memory controllers
522 * on most of those machines only handles cache transactions.
524 extrd,u,*= \pte,_PAGE_NO_CACHE_BIT+32,1,%r0
527 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
528 convert_for_tlb_insert20 \pte \tmp
531 /* Identical macro to make_insert_tlb above, except it
532 * makes the tlb entry for the differently formatted pa11
533 * insertion instructions */
534 .macro make_insert_tlb_11 spc,pte,prot
535 #if _PAGE_SPECIAL_BIT == _PAGE_DMB_BIT
536 /* need to drop DMB bit, as it's used as SPECIAL flag */
537 depi 0,_PAGE_SPECIAL_BIT,1,\pte
539 zdep \spc,30,15,\prot
541 extru,= \pte,_PAGE_NO_CACHE_BIT,1,%r0
543 extru,= \pte,_PAGE_USER_BIT,1,%r0
544 depi 7,11,3,\prot /* Set for user space (1 rsvd for read) */
545 extru,= \pte,_PAGE_GATEWAY_BIT,1,%r0
546 depi 0,11,2,\prot /* If Gateway, Set PL2 to 0 */
548 /* Get rid of prot bits and convert to page addr for iitlba */
550 depi 0,31,ASM_PFN_PTE_SHIFT,\pte
551 SHRREG \pte,(ASM_PFN_PTE_SHIFT-(31-26)),\pte
554 /* This is for ILP32 PA2.0 only. The TLB insertion needs
555 * to extend into I/O space if the address is 0xfXXXXXXX
556 * so we extend the f's into the top word of the pte in
558 .macro f_extend pte,tmp
559 extrd,s \pte,42,4,\tmp
561 extrd,s \pte,63,25,\pte
564 /* The alias region is comprised of a pair of 4 MB regions
565 * aligned to 8 MB. It is used to clear/copy/flush user pages
566 * using kernel virtual addresses congruent with the user
569 * To use the alias page, you set %r26 up with the to TLB
570 * entry (identifying the physical page) and %r23 up with
571 * the from tlb entry (or nothing if only a to entry---for
572 * clear_user_page_asm) */
573 .macro do_alias spc,tmp,tmp1,va,pte,prot,fault,patype
574 cmpib,COND(<>),n 0,\spc,\fault
575 ldil L%(TMPALIAS_MAP_START),\tmp
577 depi_safe 0,31,TMPALIAS_SIZE_BITS+1,\tmp1
578 cmpb,COND(<>),n \tmp,\tmp1,\fault
579 mfctl %cr19,\tmp /* iir */
580 /* get the opcode (first six bits) into \tmp */
581 extrw,u \tmp,5,6,\tmp
583 * Only setting the T bit prevents data cache movein
584 * Setting access rights to zero prevents instruction cache movein
586 * Note subtlety here: _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE go
587 * to type field and _PAGE_READ goes to top bit of PL1
589 ldi (_PAGE_REFTRAP|_PAGE_READ|_PAGE_WRITE),\prot
591 * so if the opcode is one (i.e. this is a memory management
592 * instruction) nullify the next load so \prot is only T.
593 * Otherwise this is a normal data operation
595 cmpiclr,= 0x01,\tmp,%r0
596 ldi (_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
598 depd,z \prot,8,7,\prot
601 depw,z \prot,8,7,\prot
603 .error "undefined PA type to do_alias"
607 * OK, it is in the temp alias region, check whether "from" or "to".
608 * Check "subtle" note in pacache.S re: r23/r26.
610 extrw,u,= \va,31-TMPALIAS_SIZE_BITS,1,%r0
611 or,COND(tr) %r23,%r0,\pte
614 /* convert phys addr in \pte (from r23 or r26) to tlb insert format */
615 SHRREG \pte,PAGE_SHIFT+PAGE_ADD_SHIFT-5, \pte
616 depi_safe _PAGE_SIZE_ENCODING_DEFAULT, 31,5, \pte
621 * Fault_vectors are architecturally required to be aligned on a 2K
628 ENTRY(fault_vector_20)
629 /* First vector is invalid (0) */
630 .ascii "cows can fly"
639 itlb_20 PARISC_ITLB_TRAP
671 ENTRY(fault_vector_11)
672 /* First vector is invalid (0) */
673 .ascii "cows can fly"
682 itlb_11 PARISC_ITLB_TRAP
711 /* Fault vector is separately protected and *must* be on its own page */
714 .import handle_interruption,code
715 .import do_cpu_irq_mask,code
720 * copy_thread moved args into task save area.
723 ENTRY(ret_from_kernel_thread)
724 /* Call schedule_tail first though */
725 BL schedule_tail, %r2
728 mfctl %cr30,%r1 /* task_struct */
729 LDREG TASK_PT_GR25(%r1), %r26
731 LDREG TASK_PT_GR27(%r1), %r27
733 LDREG TASK_PT_GR26(%r1), %r1
736 b finish_child_return
738 END(ret_from_kernel_thread)
742 * struct task_struct *_switch_to(struct task_struct *prev,
743 * struct task_struct *next)
745 * switch kernel stacks and return prev */
746 ENTRY_CFI(_switch_to)
747 STREG %r2, -RP_OFFSET(%r30)
752 load32 _switch_to_ret, %r2
754 STREG %r2, TASK_PT_KPC(%r26)
755 LDREG TASK_PT_KPC(%r25), %r2
757 STREG %r30, TASK_PT_KSP(%r26)
758 LDREG TASK_PT_KSP(%r25), %r30
762 ENTRY(_switch_to_ret)
763 mtctl %r0, %cr0 /* Needed for single stepping */
767 LDREG -RP_OFFSET(%r30), %r2
770 ENDPROC_CFI(_switch_to)
773 * Common rfi return path for interruptions, kernel execve, and
774 * sys_rt_sigreturn (sometimes). The sys_rt_sigreturn syscall will
775 * return via this path if the signal was received when the process
776 * was running; if the process was blocked on a syscall then the
777 * normal syscall_exit path is used. All syscalls for traced
778 * proceses exit via intr_restore.
780 * XXX If any syscalls that change a processes space id ever exit
781 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and
788 ENTRY_CFI(syscall_exit_rfi)
789 mfctl %cr30,%r16 /* task_struct */
790 ldo TASK_REGS(%r16),%r16
791 /* Force iaoq to userspace, as the user has had access to our current
792 * context via sigcontext. Also Filter the PSW for the same reason.
794 LDREG PT_IAOQ0(%r16),%r19
795 depi PRIV_USER,31,2,%r19
796 STREG %r19,PT_IAOQ0(%r16)
797 LDREG PT_IAOQ1(%r16),%r19
798 depi PRIV_USER,31,2,%r19
799 STREG %r19,PT_IAOQ1(%r16)
800 LDREG PT_PSW(%r16),%r19
801 load32 USER_PSW_MASK,%r1
803 load32 USER_PSW_HI_MASK,%r20
806 and %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
808 or %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
809 STREG %r19,PT_PSW(%r16)
812 * If we aren't being traced, we never saved space registers
813 * (we don't store them in the sigcontext), so set them
814 * to "proper" values now (otherwise we'll wind up restoring
815 * whatever was last stored in the task structure, which might
816 * be inconsistent if an interrupt occurred while on the gateway
817 * page). Note that we may be "trashing" values the user put in
818 * them, but we don't support the user changing them.
821 STREG %r0,PT_SR2(%r16)
823 STREG %r19,PT_SR0(%r16)
824 STREG %r19,PT_SR1(%r16)
825 STREG %r19,PT_SR3(%r16)
826 STREG %r19,PT_SR4(%r16)
827 STREG %r19,PT_SR5(%r16)
828 STREG %r19,PT_SR6(%r16)
829 STREG %r19,PT_SR7(%r16)
832 /* check for reschedule */
834 LDREG TASK_TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */
835 bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
837 .import do_notify_resume,code
841 LDREG TASK_TI_FLAGS(%r1),%r19
842 ldi (_TIF_USER_WORK_MASK & ~_TIF_NEED_RESCHED), %r20
843 and,COND(<>) %r19, %r20, %r0
844 b,n intr_restore /* skip past if we've nothing to do */
846 /* This check is critical to having LWS
847 * working. The IASQ is zero on the gateway
848 * page and we cannot deliver any signals until
849 * we get off the gateway page.
851 * Only do signals if we are returning to user space
853 LDREG PT_IASQ0(%r16), %r20
854 cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* forward */
855 LDREG PT_IASQ1(%r16), %r20
856 cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* forward */
858 copy %r0, %r25 /* long in_syscall = 0 */
860 ldo -16(%r30),%r29 /* Reference param save area */
863 /* NOTE: We need to enable interrupts if we have to deliver
864 * signals. We used to do this earlier but it caused kernel
865 * stack overflows. */
868 BL do_notify_resume,%r2
869 copy %r16, %r26 /* struct pt_regs *regs */
875 ldo PT_FR31(%r29),%r1
879 /* inverse of virt_map */
881 rsm PSW_SM_QUIET,%r0 /* prepare for rfi */
884 /* Restore space id's and special cr's from PT_REGS
885 * structure pointed to by r29
889 /* IMPORTANT: rest_stack restores r29 last (we are using it)!
890 * It also restores r1 and r30.
897 #ifndef CONFIG_PREEMPTION
898 # define intr_do_preempt intr_restore
899 #endif /* !CONFIG_PREEMPTION */
901 .import schedule,code
903 /* Only call schedule on return to userspace. If we're returning
904 * to kernel space, we may schedule if CONFIG_PREEMPTION, otherwise
905 * we jump back to intr_restore.
907 LDREG PT_IASQ0(%r16), %r20
908 cmpib,COND(=) 0, %r20, intr_do_preempt
910 LDREG PT_IASQ1(%r16), %r20
911 cmpib,COND(=) 0, %r20, intr_do_preempt
914 /* NOTE: We need to enable interrupts if we schedule. We used
915 * to do this earlier but it caused kernel stack overflows. */
919 ldo -16(%r30),%r29 /* Reference param save area */
922 ldil L%intr_check_sig, %r2
926 load32 schedule, %r20
929 ldo R%intr_check_sig(%r2), %r2
931 /* preempt the current task on returning to kernel
932 * mode from an interrupt, iff need_resched is set,
933 * and preempt_count is 0. otherwise, we continue on
934 * our merry way back to the current running task.
936 #ifdef CONFIG_PREEMPTION
937 .import preempt_schedule_irq,code
939 rsm PSW_SM_I, %r0 /* disable interrupts */
941 /* current_thread_info()->preempt_count */
943 ldw TI_PRE_COUNT(%r1), %r19
944 cmpib,<> 0, %r19, intr_restore /* if preempt_count > 0 */
945 nop /* prev insn branched backwards */
947 /* check if we interrupted a critical path */
948 LDREG PT_PSW(%r16), %r20
949 bb,<,n %r20, 31 - PSW_SM_I, intr_restore
952 /* ssm PSW_SM_I done later in intr_restore */
953 #ifdef CONFIG_MLONGCALLS
954 ldil L%intr_restore, %r2
955 load32 preempt_schedule_irq, %r1
957 ldo R%intr_restore(%r2), %r2
959 ldil L%intr_restore, %r1
960 BL preempt_schedule_irq, %r2
961 ldo R%intr_restore(%r1), %r2
963 #endif /* CONFIG_PREEMPTION */
966 * External interrupts.
970 cmpib,COND(=),n 0,%r16,1f
982 ldo PT_FR0(%r29), %r24
987 copy %r29, %r26 /* arg0 is pt_regs */
988 copy %r29, %r16 /* save pt_regs */
990 ldil L%intr_return, %r2
993 ldo -16(%r30),%r29 /* Reference param save area */
997 ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */
998 ENDPROC_CFI(syscall_exit_rfi)
1001 /* Generic interruptions (illegal insn, unaligned, page fault, etc) */
1003 ENTRY_CFI(intr_save) /* for os_hpmc */
1005 cmpib,COND(=),n 0,%r16,1f
1017 /* If this trap is a itlb miss, skip saving/adjusting isr/ior */
1018 cmpib,COND(=),n PARISC_ITLB_TRAP,%r26,skip_save_ior
1022 nop /* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
1028 * If the interrupted code was running with W bit off (32 bit),
1029 * clear the b bits (bits 0 & 1) in the ior.
1030 * save_specials left ipsw value in r8 for us to test.
1032 extrd,u,*<> %r8,PSW_W_BIT,1,%r0
1035 /* adjust isr/ior: get high bits from isr and deposit in ior */
1036 space_adjust %r16,%r17,%r1
1038 STREG %r16, PT_ISR(%r29)
1039 STREG %r17, PT_IOR(%r29)
1041 #if 0 && defined(CONFIG_64BIT)
1042 /* Revisit when we have 64-bit code above 4Gb */
1046 /* We have a itlb miss, and when executing code above 4 Gb on ILP64, we
1047 * need to adjust iasq/iaoq here in the same way we adjusted isr/ior
1050 extrd,u,* %r8,PSW_W_BIT,1,%r1
1051 cmpib,COND(=),n 1,%r1,intr_save2
1052 LDREG PT_IASQ0(%r29), %r16
1053 LDREG PT_IAOQ0(%r29), %r17
1054 /* adjust iasq/iaoq */
1055 space_adjust %r16,%r17,%r1
1056 STREG %r16, PT_IASQ0(%r29)
1057 STREG %r17, PT_IAOQ0(%r29)
1066 ldo PT_FR0(%r29), %r25
1071 copy %r29, %r25 /* arg1 is pt_regs */
1073 ldo -16(%r30),%r29 /* Reference param save area */
1076 ldil L%intr_check_sig, %r2
1077 copy %r25, %r16 /* save pt_regs */
1079 b handle_interruption
1080 ldo R%intr_check_sig(%r2), %r2
1081 ENDPROC_CFI(intr_save)
1085 * Note for all tlb miss handlers:
1087 * cr24 contains a pointer to the kernel address space
1090 * cr25 contains a pointer to the current user address
1091 * space page directory.
1093 * sr3 will contain the space id of the user address space
1094 * of the current running thread while that thread is
1095 * running in the kernel.
1099 * register number allocations. Note that these are all
1100 * in the shadowed registers
1103 t0 = r1 /* temporary register 0 */
1104 va = r8 /* virtual address for which the trap occurred */
1105 t1 = r9 /* temporary register 1 */
1106 pte = r16 /* pte/phys page # */
1107 prot = r17 /* prot bits */
1108 spc = r24 /* space for which the trap occurred */
1109 ptp = r25 /* page directory/page table pointer */
1114 space_adjust spc,va,t0
1116 space_check spc,t0,dtlb_fault
1118 L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w
1120 ptl_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w
1121 update_accessed ptp,pte,t0,t1
1123 make_insert_tlb spc,pte,prot,t1
1127 ptl_unlock1 spc,t0,t1
1131 dtlb_check_alias_20w:
1132 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20
1140 space_adjust spc,va,t0
1142 space_check spc,t0,nadtlb_fault
1144 L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w
1146 ptl_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w
1147 update_accessed ptp,pte,t0,t1
1149 make_insert_tlb spc,pte,prot,t1
1153 ptl_unlock1 spc,t0,t1
1157 nadtlb_check_alias_20w:
1158 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1170 space_check spc,t0,dtlb_fault
1172 L2_ptep ptp,pte,t0,va,dtlb_check_alias_11
1174 ptl_lock spc,ptp,pte,t0,t1,dtlb_check_alias_11
1175 update_accessed ptp,pte,t0,t1
1177 make_insert_tlb_11 spc,pte,prot
1179 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1182 idtlba pte,(%sr1,va)
1183 idtlbp prot,(%sr1,va)
1185 mtsp t1, %sr1 /* Restore sr1 */
1187 ptl_unlock1 spc,t0,t1
1191 dtlb_check_alias_11:
1192 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,11
1203 space_check spc,t0,nadtlb_fault
1205 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11
1207 ptl_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_11
1208 update_accessed ptp,pte,t0,t1
1210 make_insert_tlb_11 spc,pte,prot
1212 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1215 idtlba pte,(%sr1,va)
1216 idtlbp prot,(%sr1,va)
1218 mtsp t1, %sr1 /* Restore sr1 */
1220 ptl_unlock1 spc,t0,t1
1224 nadtlb_check_alias_11:
1225 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,11
1234 space_adjust spc,va,t0
1236 space_check spc,t0,dtlb_fault
1238 L2_ptep ptp,pte,t0,va,dtlb_check_alias_20
1240 ptl_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20
1241 update_accessed ptp,pte,t0,t1
1243 make_insert_tlb spc,pte,prot,t1
1249 ptl_unlock1 spc,t0,t1
1253 dtlb_check_alias_20:
1254 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20
1264 space_check spc,t0,nadtlb_fault
1266 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20
1268 ptl_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20
1269 update_accessed ptp,pte,t0,t1
1271 make_insert_tlb spc,pte,prot,t1
1277 ptl_unlock1 spc,t0,t1
1281 nadtlb_check_alias_20:
1282 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1294 * Non-access misses can be caused by fdc,fic,pdc,lpa,probe and
1295 * probei instructions. The kernel no longer faults doing flushes.
1296 * Use of lpa and probe instructions is rare. Given the issue
1297 * with shadow registers, we defer everything to the "slow" path.
1305 * I miss is a little different, since we allow users to fault
1306 * on the gateway page which is in the kernel address space.
1309 space_adjust spc,va,t0
1311 space_check spc,t0,itlb_fault
1313 L3_ptep ptp,pte,t0,va,itlb_fault
1315 ptl_lock spc,ptp,pte,t0,t1,itlb_fault
1316 update_accessed ptp,pte,t0,t1
1318 make_insert_tlb spc,pte,prot,t1
1322 ptl_unlock1 spc,t0,t1
1329 * I miss is a little different, since we allow users to fault
1330 * on the gateway page which is in the kernel address space.
1333 space_adjust spc,va,t0
1335 space_check spc,t0,naitlb_fault
1337 L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w
1339 ptl_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w
1340 update_accessed ptp,pte,t0,t1
1342 make_insert_tlb spc,pte,prot,t1
1346 ptl_unlock1 spc,t0,t1
1350 naitlb_check_alias_20w:
1351 do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20
1363 space_check spc,t0,itlb_fault
1365 L2_ptep ptp,pte,t0,va,itlb_fault
1367 ptl_lock spc,ptp,pte,t0,t1,itlb_fault
1368 update_accessed ptp,pte,t0,t1
1370 make_insert_tlb_11 spc,pte,prot
1372 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1375 iitlba pte,(%sr1,va)
1376 iitlbp prot,(%sr1,va)
1378 mtsp t1, %sr1 /* Restore sr1 */
1380 ptl_unlock1 spc,t0,t1
1387 space_check spc,t0,naitlb_fault
1389 L2_ptep ptp,pte,t0,va,naitlb_check_alias_11
1391 ptl_lock spc,ptp,pte,t0,t1,naitlb_check_alias_11
1392 update_accessed ptp,pte,t0,t1
1394 make_insert_tlb_11 spc,pte,prot
1396 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1399 iitlba pte,(%sr1,va)
1400 iitlbp prot,(%sr1,va)
1402 mtsp t1, %sr1 /* Restore sr1 */
1404 ptl_unlock1 spc,t0,t1
1408 naitlb_check_alias_11:
1409 do_alias spc,t0,t1,va,pte,prot,itlb_fault,11
1411 iitlba pte,(%sr0, va)
1412 iitlbp prot,(%sr0, va)
1421 space_check spc,t0,itlb_fault
1423 L2_ptep ptp,pte,t0,va,itlb_fault
1425 ptl_lock spc,ptp,pte,t0,t1,itlb_fault
1426 update_accessed ptp,pte,t0,t1
1428 make_insert_tlb spc,pte,prot,t1
1434 ptl_unlock1 spc,t0,t1
1441 space_check spc,t0,naitlb_fault
1443 L2_ptep ptp,pte,t0,va,naitlb_check_alias_20
1445 ptl_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20
1446 update_accessed ptp,pte,t0,t1
1448 make_insert_tlb spc,pte,prot,t1
1454 ptl_unlock1 spc,t0,t1
1458 naitlb_check_alias_20:
1459 do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20
1471 space_adjust spc,va,t0
1473 space_check spc,t0,dbit_fault
1475 L3_ptep ptp,pte,t0,va,dbit_fault
1477 ptl_lock spc,ptp,pte,t0,t1,dbit_fault
1478 update_dirty ptp,pte,t1
1480 make_insert_tlb spc,pte,prot,t1
1484 ptl_unlock0 spc,t0,t1
1493 space_check spc,t0,dbit_fault
1495 L2_ptep ptp,pte,t0,va,dbit_fault
1497 ptl_lock spc,ptp,pte,t0,t1,dbit_fault
1498 update_dirty ptp,pte,t1
1500 make_insert_tlb_11 spc,pte,prot
1502 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1505 idtlba pte,(%sr1,va)
1506 idtlbp prot,(%sr1,va)
1508 mtsp t1, %sr1 /* Restore sr1 */
1510 ptl_unlock0 spc,t0,t1
1517 space_check spc,t0,dbit_fault
1519 L2_ptep ptp,pte,t0,va,dbit_fault
1521 ptl_lock spc,ptp,pte,t0,t1,dbit_fault
1522 update_dirty ptp,pte,t1
1524 make_insert_tlb spc,pte,prot,t1
1530 ptl_unlock0 spc,t0,t1
1535 .import handle_interruption,code
1539 ldi 31,%r8 /* Use an unused code */
1547 ldi PARISC_ITLB_TRAP,%r8
1561 /* Register saving semantics for system calls:
1563 %r1 clobbered by system call macro in userspace
1564 %r2 saved in PT_REGS by gateway page
1565 %r3 - %r18 preserved by C code (saved by signal code)
1566 %r19 - %r20 saved in PT_REGS by gateway page
1567 %r21 - %r22 non-standard syscall args
1568 stored in kernel stack by gateway page
1569 %r23 - %r26 arg3-arg0, saved in PT_REGS by gateway page
1570 %r27 - %r30 saved in PT_REGS by gateway page
1571 %r31 syscall return pointer
1574 /* Floating point registers (FIXME: what do we do with these?)
1576 %fr0 - %fr3 status/exception, not preserved
1577 %fr4 - %fr7 arguments
1578 %fr8 - %fr11 not preserved by C code
1579 %fr12 - %fr21 preserved by C code
1580 %fr22 - %fr31 not preserved by C code
1583 .macro reg_save regs
1584 STREG %r3, PT_GR3(\regs)
1585 STREG %r4, PT_GR4(\regs)
1586 STREG %r5, PT_GR5(\regs)
1587 STREG %r6, PT_GR6(\regs)
1588 STREG %r7, PT_GR7(\regs)
1589 STREG %r8, PT_GR8(\regs)
1590 STREG %r9, PT_GR9(\regs)
1591 STREG %r10,PT_GR10(\regs)
1592 STREG %r11,PT_GR11(\regs)
1593 STREG %r12,PT_GR12(\regs)
1594 STREG %r13,PT_GR13(\regs)
1595 STREG %r14,PT_GR14(\regs)
1596 STREG %r15,PT_GR15(\regs)
1597 STREG %r16,PT_GR16(\regs)
1598 STREG %r17,PT_GR17(\regs)
1599 STREG %r18,PT_GR18(\regs)
1602 .macro reg_restore regs
1603 LDREG PT_GR3(\regs), %r3
1604 LDREG PT_GR4(\regs), %r4
1605 LDREG PT_GR5(\regs), %r5
1606 LDREG PT_GR6(\regs), %r6
1607 LDREG PT_GR7(\regs), %r7
1608 LDREG PT_GR8(\regs), %r8
1609 LDREG PT_GR9(\regs), %r9
1610 LDREG PT_GR10(\regs),%r10
1611 LDREG PT_GR11(\regs),%r11
1612 LDREG PT_GR12(\regs),%r12
1613 LDREG PT_GR13(\regs),%r13
1614 LDREG PT_GR14(\regs),%r14
1615 LDREG PT_GR15(\regs),%r15
1616 LDREG PT_GR16(\regs),%r16
1617 LDREG PT_GR17(\regs),%r17
1618 LDREG PT_GR18(\regs),%r18
1621 .macro fork_like name
1622 ENTRY_CFI(sys_\name\()_wrapper)
1624 ldo TASK_REGS(%r1),%r1
1627 ldil L%sys_\name, %r31
1628 be R%sys_\name(%sr4,%r31)
1629 STREG %r28, PT_CR27(%r1)
1630 ENDPROC_CFI(sys_\name\()_wrapper)
1638 /* Set the return value for the child */
1640 BL schedule_tail, %r2
1642 finish_child_return:
1644 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1646 LDREG PT_CR27(%r1), %r3
1653 ENTRY_CFI(sys_rt_sigreturn_wrapper)
1655 ldo TASK_REGS(%r26),%r26 /* get pt regs */
1656 /* Don't save regs, we are going to restore them from sigcontext. */
1657 STREG %r2, -RP_OFFSET(%r30)
1659 ldo FRAME_SIZE(%r30), %r30
1660 BL sys_rt_sigreturn,%r2
1661 ldo -16(%r30),%r29 /* Reference param save area */
1663 BL sys_rt_sigreturn,%r2
1664 ldo FRAME_SIZE(%r30), %r30
1667 ldo -FRAME_SIZE(%r30), %r30
1668 LDREG -RP_OFFSET(%r30), %r2
1670 /* FIXME: I think we need to restore a few more things here. */
1672 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1675 /* If the signal was received while the process was blocked on a
1676 * syscall, then r2 will take us to syscall_exit; otherwise r2 will
1677 * take us to syscall_exit_rfi and on to intr_return.
1680 LDREG PT_GR28(%r1),%r28 /* reload original r28 for syscall_exit */
1681 ENDPROC_CFI(sys_rt_sigreturn_wrapper)
1684 /* NOTE: Not all syscalls exit this way. rt_sigreturn will exit
1685 * via syscall_exit_rfi if the signal was received while the process
1689 /* save return value now */
1691 STREG %r28,TASK_PT_GR28(%r1)
1693 /* Seems to me that dp could be wrong here, if the syscall involved
1694 * calling a module, and nothing got round to restoring dp on return.
1698 syscall_check_resched:
1700 /* check for reschedule */
1702 LDREG TASK_TI_FLAGS(%r19),%r19 /* long */
1703 bb,<,n %r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
1705 .import do_signal,code
1708 LDREG TASK_TI_FLAGS(%r19),%r19
1709 ldi (_TIF_USER_WORK_MASK & ~_TIF_NEED_RESCHED), %r26
1710 and,COND(<>) %r19, %r26, %r0
1711 b,n syscall_restore /* skip past if we've nothing to do */
1714 /* Save callee-save registers (for sigcontext).
1715 * FIXME: After this point the process structure should be
1716 * consistent with all the relevant state of the process
1717 * before the syscall. We need to verify this.
1720 ldo TASK_REGS(%r1), %r26 /* struct pt_regs *regs */
1724 ldo -16(%r30),%r29 /* Reference param save area */
1727 BL do_notify_resume,%r2
1728 ldi 1, %r25 /* long in_syscall = 1 */
1731 ldo TASK_REGS(%r1), %r20 /* reload pt_regs */
1734 b,n syscall_check_sig
1739 /* Are we being ptraced? */
1740 LDREG TASK_TI_FLAGS(%r1),%r19
1741 ldi _TIF_SINGLESTEP|_TIF_BLOCKSTEP,%r2
1742 and,COND(=) %r19,%r2,%r0
1743 b,n syscall_restore_rfi
1745 ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */
1748 LDREG TASK_PT_SAR(%r1),%r19 /* restore SAR */
1751 LDREG TASK_PT_GR2(%r1),%r2 /* restore user rp */
1752 LDREG TASK_PT_GR19(%r1),%r19
1753 LDREG TASK_PT_GR20(%r1),%r20
1754 LDREG TASK_PT_GR21(%r1),%r21
1755 LDREG TASK_PT_GR22(%r1),%r22
1756 LDREG TASK_PT_GR23(%r1),%r23
1757 LDREG TASK_PT_GR24(%r1),%r24
1758 LDREG TASK_PT_GR25(%r1),%r25
1759 LDREG TASK_PT_GR26(%r1),%r26
1760 LDREG TASK_PT_GR27(%r1),%r27 /* restore user dp */
1761 LDREG TASK_PT_GR28(%r1),%r28 /* syscall return value */
1762 LDREG TASK_PT_GR29(%r1),%r29
1763 LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */
1765 /* NOTE: We use rsm/ssm pair to make this operation atomic */
1766 LDREG TASK_PT_GR30(%r1),%r1 /* Get user sp */
1768 copy %r1,%r30 /* Restore user sp */
1769 mfsp %sr3,%r1 /* Get user space id */
1770 mtsp %r1,%sr7 /* Restore sr7 */
1773 /* Set sr2 to zero for userspace syscalls to work. */
1775 mtsp %r1,%sr4 /* Restore sr4 */
1776 mtsp %r1,%sr5 /* Restore sr5 */
1777 mtsp %r1,%sr6 /* Restore sr6 */
1779 depi PRIV_USER,31,2,%r31 /* ensure return to user mode. */
1782 /* decide whether to reset the wide mode bit
1784 * For a syscall, the W bit is stored in the lowest bit
1785 * of sp. Extract it and reset W if it is zero */
1786 extrd,u,*<> %r30,63,1,%r1
1788 /* now reset the lowest bit of sp if it was set */
1791 be,n 0(%sr3,%r31) /* return to user space */
1793 /* We have to return via an RFI, so that PSW T and R bits can be set
1795 * This sets up pt_regs so we can return via intr_restore, which is not
1796 * the most efficient way of doing things, but it works.
1798 syscall_restore_rfi:
1799 ldo -1(%r0),%r2 /* Set recovery cntr to -1 */
1800 mtctl %r2,%cr0 /* for immediate trap */
1801 LDREG TASK_PT_PSW(%r1),%r2 /* Get old PSW */
1802 ldi 0x0b,%r20 /* Create new PSW */
1803 depi -1,13,1,%r20 /* C, Q, D, and I bits */
1805 /* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are
1806 * set in thread_info.h and converted to PA bitmap
1807 * numbers in asm-offsets.c */
1809 /* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */
1810 extru,= %r19,TIF_SINGLESTEP_PA_BIT,1,%r0
1811 depi -1,27,1,%r20 /* R bit */
1813 /* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */
1814 extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0
1815 depi -1,7,1,%r20 /* T bit */
1817 STREG %r20,TASK_PT_PSW(%r1)
1819 /* Always store space registers, since sr3 can be changed (e.g. fork) */
1822 STREG %r25,TASK_PT_SR3(%r1)
1823 STREG %r25,TASK_PT_SR4(%r1)
1824 STREG %r25,TASK_PT_SR5(%r1)
1825 STREG %r25,TASK_PT_SR6(%r1)
1826 STREG %r25,TASK_PT_SR7(%r1)
1827 STREG %r25,TASK_PT_IASQ0(%r1)
1828 STREG %r25,TASK_PT_IASQ1(%r1)
1831 /* Now if old D bit is clear, it means we didn't save all registers
1832 * on syscall entry, so do that now. This only happens on TRACEME
1833 * calls, or if someone attached to us while we were on a syscall.
1834 * We could make this more efficient by not saving r3-r18, but
1835 * then we wouldn't be able to use the common intr_restore path.
1836 * It is only for traced processes anyway, so performance is not
1839 bb,< %r2,30,pt_regs_ok /* Branch if D set */
1840 ldo TASK_REGS(%r1),%r25
1841 reg_save %r25 /* Save r3 to r18 */
1843 /* Save the current sr */
1845 STREG %r2,TASK_PT_SR0(%r1)
1847 /* Save the scratch sr */
1849 STREG %r2,TASK_PT_SR1(%r1)
1851 /* sr2 should be set to zero for userspace syscalls */
1852 STREG %r0,TASK_PT_SR2(%r1)
1854 LDREG TASK_PT_GR31(%r1),%r2
1855 depi PRIV_USER,31,2,%r2 /* ensure return to user mode. */
1856 STREG %r2,TASK_PT_IAOQ0(%r1)
1858 STREG %r2,TASK_PT_IAOQ1(%r1)
1863 LDREG TASK_PT_IAOQ0(%r1),%r2
1864 depi PRIV_USER,31,2,%r2 /* ensure return to user mode. */
1865 STREG %r2,TASK_PT_IAOQ0(%r1)
1866 LDREG TASK_PT_IAOQ1(%r1),%r2
1867 depi PRIV_USER,31,2,%r2
1868 STREG %r2,TASK_PT_IAOQ1(%r1)
1873 load32 syscall_check_resched,%r2 /* if resched, we start over again */
1874 load32 schedule,%r19
1875 bv %r0(%r19) /* jumps to schedule() */
1877 ldo -16(%r30),%r29 /* Reference param save area */
1884 #ifdef CONFIG_FUNCTION_TRACER
1886 .import ftrace_function_trampoline,code
1887 .align L1_CACHE_BYTES
1888 ENTRY_CFI(mcount, caller)
1890 .export _mcount,data
1892 * The 64bit mcount() function pointer needs 4 dwords, of which the
1893 * first two are free. We optimize it here and put 2 instructions for
1894 * calling mcount(), and 2 instructions for ftrace_stub(). That way we
1895 * have all on one L1 cacheline.
1898 b ftrace_function_trampoline
1899 copy %r3, %arg2 /* caller original %sp */
1902 .type ftrace_stub, @function
1911 .dword 0 /* code in head.S puts value of global gp here */
1915 #ifdef CONFIG_DYNAMIC_FTRACE
1918 #define FTRACE_FRAME_SIZE (2*FRAME_SIZE)
1920 #define FTRACE_FRAME_SIZE FRAME_SIZE
1922 ENTRY_CFI(ftrace_caller, caller,frame=FTRACE_FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP)
1924 .global ftrace_caller
1926 STREG %r3, -FTRACE_FRAME_SIZE+1*REG_SZ(%sp)
1927 ldo -FTRACE_FRAME_SIZE(%sp), %r3
1928 STREG %rp, -RP_OFFSET(%r3)
1930 /* Offset 0 is already allocated for %r1 */
1931 STREG %r23, 2*REG_SZ(%r3)
1932 STREG %r24, 3*REG_SZ(%r3)
1933 STREG %r25, 4*REG_SZ(%r3)
1934 STREG %r26, 5*REG_SZ(%r3)
1935 STREG %r28, 6*REG_SZ(%r3)
1936 STREG %r29, 7*REG_SZ(%r3)
1938 STREG %r19, 8*REG_SZ(%r3)
1939 STREG %r20, 9*REG_SZ(%r3)
1940 STREG %r21, 10*REG_SZ(%r3)
1941 STREG %r22, 11*REG_SZ(%r3)
1942 STREG %r27, 12*REG_SZ(%r3)
1943 STREG %r31, 13*REG_SZ(%r3)
1950 ldi 0, %r23 /* no pt_regs */
1951 b,l ftrace_function_trampoline, %rp
1954 LDREG -RP_OFFSET(%r3), %rp
1955 LDREG 2*REG_SZ(%r3), %r23
1956 LDREG 3*REG_SZ(%r3), %r24
1957 LDREG 4*REG_SZ(%r3), %r25
1958 LDREG 5*REG_SZ(%r3), %r26
1959 LDREG 6*REG_SZ(%r3), %r28
1960 LDREG 7*REG_SZ(%r3), %r29
1962 LDREG 8*REG_SZ(%r3), %r19
1963 LDREG 9*REG_SZ(%r3), %r20
1964 LDREG 10*REG_SZ(%r3), %r21
1965 LDREG 11*REG_SZ(%r3), %r22
1966 LDREG 12*REG_SZ(%r3), %r27
1967 LDREG 13*REG_SZ(%r3), %r31
1969 LDREG 1*REG_SZ(%r3), %r3
1971 LDREGM -FTRACE_FRAME_SIZE(%sp), %r1
1972 /* Adjust return point to jump back to beginning of traced function */
1976 ENDPROC_CFI(ftrace_caller)
1978 #ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS
1979 ENTRY_CFI(ftrace_regs_caller,caller,frame=FTRACE_FRAME_SIZE+PT_SZ_ALGN,
1980 CALLS,SAVE_RP,SAVE_SP)
1982 .global ftrace_regs_caller
1984 ldo -FTRACE_FRAME_SIZE(%sp), %r1
1985 STREG %rp, -RP_OFFSET(%r1)
1988 ldo PT_SZ_ALGN(%sp), %sp
1990 STREG %rp, PT_GR2(%r1)
1991 STREG %r3, PT_GR3(%r1)
1992 STREG %r4, PT_GR4(%r1)
1993 STREG %r5, PT_GR5(%r1)
1994 STREG %r6, PT_GR6(%r1)
1995 STREG %r7, PT_GR7(%r1)
1996 STREG %r8, PT_GR8(%r1)
1997 STREG %r9, PT_GR9(%r1)
1998 STREG %r10, PT_GR10(%r1)
1999 STREG %r11, PT_GR11(%r1)
2000 STREG %r12, PT_GR12(%r1)
2001 STREG %r13, PT_GR13(%r1)
2002 STREG %r14, PT_GR14(%r1)
2003 STREG %r15, PT_GR15(%r1)
2004 STREG %r16, PT_GR16(%r1)
2005 STREG %r17, PT_GR17(%r1)
2006 STREG %r18, PT_GR18(%r1)
2007 STREG %r19, PT_GR19(%r1)
2008 STREG %r20, PT_GR20(%r1)
2009 STREG %r21, PT_GR21(%r1)
2010 STREG %r22, PT_GR22(%r1)
2011 STREG %r23, PT_GR23(%r1)
2012 STREG %r24, PT_GR24(%r1)
2013 STREG %r25, PT_GR25(%r1)
2014 STREG %r26, PT_GR26(%r1)
2015 STREG %r27, PT_GR27(%r1)
2016 STREG %r28, PT_GR28(%r1)
2017 STREG %r29, PT_GR29(%r1)
2018 STREG %r30, PT_GR30(%r1)
2019 STREG %r31, PT_GR31(%r1)
2021 STREG %r26, PT_SAR(%r1)
2024 LDREG -FTRACE_FRAME_SIZE-PT_SZ_ALGN(%sp), %r25
2026 ldo -FTRACE_FRAME_SIZE(%r1), %arg2
2027 b,l ftrace_function_trampoline, %rp
2028 copy %r1, %arg3 /* struct pt_regs */
2030 ldo -PT_SZ_ALGN(%sp), %r1
2032 LDREG PT_SAR(%r1), %rp
2035 LDREG PT_GR2(%r1), %rp
2036 LDREG PT_GR3(%r1), %r3
2037 LDREG PT_GR4(%r1), %r4
2038 LDREG PT_GR5(%r1), %r5
2039 LDREG PT_GR6(%r1), %r6
2040 LDREG PT_GR7(%r1), %r7
2041 LDREG PT_GR8(%r1), %r8
2042 LDREG PT_GR9(%r1), %r9
2043 LDREG PT_GR10(%r1),%r10
2044 LDREG PT_GR11(%r1),%r11
2045 LDREG PT_GR12(%r1),%r12
2046 LDREG PT_GR13(%r1),%r13
2047 LDREG PT_GR14(%r1),%r14
2048 LDREG PT_GR15(%r1),%r15
2049 LDREG PT_GR16(%r1),%r16
2050 LDREG PT_GR17(%r1),%r17
2051 LDREG PT_GR18(%r1),%r18
2052 LDREG PT_GR19(%r1),%r19
2053 LDREG PT_GR20(%r1),%r20
2054 LDREG PT_GR21(%r1),%r21
2055 LDREG PT_GR22(%r1),%r22
2056 LDREG PT_GR23(%r1),%r23
2057 LDREG PT_GR24(%r1),%r24
2058 LDREG PT_GR25(%r1),%r25
2059 LDREG PT_GR26(%r1),%r26
2060 LDREG PT_GR27(%r1),%r27
2061 LDREG PT_GR28(%r1),%r28
2062 LDREG PT_GR29(%r1),%r29
2063 LDREG PT_GR30(%r1),%r30
2064 LDREG PT_GR31(%r1),%r31
2066 ldo -PT_SZ_ALGN(%sp), %sp
2067 LDREGM -FTRACE_FRAME_SIZE(%sp), %r1
2068 /* Adjust return point to jump back to beginning of traced function */
2072 ENDPROC_CFI(ftrace_regs_caller)
2077 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2079 ENTRY_CFI(return_to_handler, caller,frame=FRAME_SIZE)
2080 .export parisc_return_to_handler,data
2081 parisc_return_to_handler:
2083 STREG %r0,-RP_OFFSET(%sp) /* store 0 as %rp */
2085 STREGM %r1,FRAME_SIZE(%sp)
2093 /* call ftrace_return_to_handler(0) */
2094 .import ftrace_return_to_handler,code
2095 load32 ftrace_return_to_handler,%ret0
2096 load32 .Lftrace_ret,%r2
2098 ldo -16(%sp),%ret1 /* Reference param save area */
2107 /* restore original return values */
2111 /* return from function */
2117 LDREGM -FRAME_SIZE(%sp),%r3
2118 ENDPROC_CFI(return_to_handler)
2120 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2122 #endif /* CONFIG_FUNCTION_TRACER */
2124 #ifdef CONFIG_IRQSTACKS
2125 /* void call_on_stack(unsigned long param1, void *func,
2126 unsigned long new_stack) */
2127 ENTRY_CFI(call_on_stack, FRAME=2*FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP)
2128 ENTRY(_call_on_stack)
2131 /* Regarding the HPPA calling conventions for function pointers,
2132 we assume the PIC register is not changed across call. For
2133 CONFIG_64BIT, the argument pointer is left to point at the
2134 argument region allocated for the call to call_on_stack. */
2136 /* Switch to new stack. We allocate two frames. */
2137 ldo 2*FRAME_SIZE(%arg2), %sp
2138 # ifdef CONFIG_64BIT
2139 /* Save previous stack pointer and return pointer in frame marker */
2140 STREG %rp, -FRAME_SIZE-RP_OFFSET(%sp)
2141 /* Calls always use function descriptor */
2142 LDREG 16(%arg1), %arg1
2144 STREG %r1, -FRAME_SIZE-REG_SZ(%sp)
2145 LDREG -FRAME_SIZE-RP_OFFSET(%sp), %rp
2147 LDREG -FRAME_SIZE-REG_SZ(%sp), %sp
2149 /* Save previous stack pointer and return pointer in frame marker */
2150 STREG %r1, -FRAME_SIZE-REG_SZ(%sp)
2151 STREG %rp, -FRAME_SIZE-RP_OFFSET(%sp)
2152 /* Calls use function descriptor if PLABEL bit is set */
2153 bb,>=,n %arg1, 30, 1f
2155 LDREG 0(%arg1), %arg1
2157 be,l 0(%sr4,%arg1), %sr0, %r31
2159 LDREG -FRAME_SIZE-RP_OFFSET(%sp), %rp
2161 LDREG -FRAME_SIZE-REG_SZ(%sp), %sp
2162 # endif /* CONFIG_64BIT */
2163 ENDPROC_CFI(call_on_stack)
2164 #endif /* CONFIG_IRQSTACKS */
2166 ENTRY_CFI(get_register)
2168 * get_register is used by the non access tlb miss handlers to
2169 * copy the value of the general register specified in r8 into
2170 * r1. This routine can't be used for shadowed registers, since
2171 * the rfir will restore the original value. So, for the shadowed
2172 * registers we put a -1 into r1 to indicate that the register
2173 * should not be used (the register being copied could also have
2174 * a -1 in it, but that is OK, it just means that we will have
2175 * to use the slow path instead).
2179 bv %r0(%r25) /* r0 */
2181 bv %r0(%r25) /* r1 - shadowed */
2183 bv %r0(%r25) /* r2 */
2185 bv %r0(%r25) /* r3 */
2187 bv %r0(%r25) /* r4 */
2189 bv %r0(%r25) /* r5 */
2191 bv %r0(%r25) /* r6 */
2193 bv %r0(%r25) /* r7 */
2195 bv %r0(%r25) /* r8 - shadowed */
2197 bv %r0(%r25) /* r9 - shadowed */
2199 bv %r0(%r25) /* r10 */
2201 bv %r0(%r25) /* r11 */
2203 bv %r0(%r25) /* r12 */
2205 bv %r0(%r25) /* r13 */
2207 bv %r0(%r25) /* r14 */
2209 bv %r0(%r25) /* r15 */
2211 bv %r0(%r25) /* r16 - shadowed */
2213 bv %r0(%r25) /* r17 - shadowed */
2215 bv %r0(%r25) /* r18 */
2217 bv %r0(%r25) /* r19 */
2219 bv %r0(%r25) /* r20 */
2221 bv %r0(%r25) /* r21 */
2223 bv %r0(%r25) /* r22 */
2225 bv %r0(%r25) /* r23 */
2227 bv %r0(%r25) /* r24 - shadowed */
2229 bv %r0(%r25) /* r25 - shadowed */
2231 bv %r0(%r25) /* r26 */
2233 bv %r0(%r25) /* r27 */
2235 bv %r0(%r25) /* r28 */
2237 bv %r0(%r25) /* r29 */
2239 bv %r0(%r25) /* r30 */
2241 bv %r0(%r25) /* r31 */
2243 ENDPROC_CFI(get_register)
2246 ENTRY_CFI(set_register)
2248 * set_register is used by the non access tlb miss handlers to
2249 * copy the value of r1 into the general register specified in
2254 bv %r0(%r25) /* r0 (silly, but it is a place holder) */
2256 bv %r0(%r25) /* r1 */
2258 bv %r0(%r25) /* r2 */
2260 bv %r0(%r25) /* r3 */
2262 bv %r0(%r25) /* r4 */
2264 bv %r0(%r25) /* r5 */
2266 bv %r0(%r25) /* r6 */
2268 bv %r0(%r25) /* r7 */
2270 bv %r0(%r25) /* r8 */
2272 bv %r0(%r25) /* r9 */
2274 bv %r0(%r25) /* r10 */
2276 bv %r0(%r25) /* r11 */
2278 bv %r0(%r25) /* r12 */
2280 bv %r0(%r25) /* r13 */
2282 bv %r0(%r25) /* r14 */
2284 bv %r0(%r25) /* r15 */
2286 bv %r0(%r25) /* r16 */
2288 bv %r0(%r25) /* r17 */
2290 bv %r0(%r25) /* r18 */
2292 bv %r0(%r25) /* r19 */
2294 bv %r0(%r25) /* r20 */
2296 bv %r0(%r25) /* r21 */
2298 bv %r0(%r25) /* r22 */
2300 bv %r0(%r25) /* r23 */
2302 bv %r0(%r25) /* r24 */
2304 bv %r0(%r25) /* r25 */
2306 bv %r0(%r25) /* r26 */
2308 bv %r0(%r25) /* r27 */
2310 bv %r0(%r25) /* r28 */
2312 bv %r0(%r25) /* r29 */
2314 bv %r0(%r25) /* r30 */
2316 bv %r0(%r25) /* r31 */
2318 ENDPROC_CFI(set_register)