1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
5 * kernel entry points (interruptions, system call wrappers)
6 * Copyright (C) 1999,2000 Philipp Rumpf
7 * Copyright (C) 1999 SuSE GmbH Nuernberg
8 * Copyright (C) 2000 Hewlett-Packard (John Marvin)
9 * Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
12 #include <asm/asm-offsets.h>
14 /* we have the following possibilities to act on an interruption:
15 * - handle in assembly and use shadowed registers only
16 * - save registers to kernel stack and handle in assembly or C */
20 #include <asm/cache.h> /* for L1_CACHE_SHIFT */
21 #include <asm/assembly.h> /* for LDREG/STREG defines */
22 #include <asm/signal.h>
23 #include <asm/unistd.h>
25 #include <asm/traps.h>
26 #include <asm/thread_info.h>
27 #include <asm/alternative.h>
29 #include <linux/linkage.h>
30 #include <linux/pgtable.h>
38 /* Get aligned page_table_lock address for this mm from cr28/tr4 */
43 /* space_to_prot macro creates a prot id from a space id */
45 #if (SPACEID_SHIFT) == 0
46 .macro space_to_prot spc prot
47 depd,z \spc,62,31,\prot
50 .macro space_to_prot spc prot
51 extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
55 * The "get_stack" macros are responsible for determining the
59 * Already using a kernel stack, so call the
60 * get_stack_use_r30 macro to push a pt_regs structure
61 * on the stack, and store registers there.
63 * Need to set up a kernel stack, so call the
64 * get_stack_use_cr30 macro to set up a pointer
65 * to the pt_regs structure contained within the
66 * task pointer pointed to by cr30. Load the stack
67 * pointer from the task structure.
69 * Note that we use shadowed registers for temps until
70 * we can save %r26 and %r29. %r26 is used to preserve
71 * %r8 (a shadowed register) which temporarily contained
72 * either the fault type ("code") or the eirr. We need
73 * to use a non-shadowed register to carry the value over
74 * the rfir in virt_map. We use %r26 since this value winds
75 * up being passed as the argument to either do_cpu_irq_mask
76 * or handle_interruption. %r29 is used to hold a pointer
77 * the register save area, and once again, it needs to
78 * be a non-shadowed register so that it survives the rfir.
81 .macro get_stack_use_cr30
83 /* we save the registers in the task struct */
87 tophys %r1,%r9 /* task_struct */
88 LDREG TASK_STACK(%r9),%r30
89 ldo PT_SZ_ALGN(%r30),%r30
90 mtsp %r0,%sr7 /* clear sr7 after kernel stack was set! */
92 ldo TASK_REGS(%r9),%r9
93 STREG %r17,PT_GR30(%r9)
94 STREG %r29,PT_GR29(%r9)
95 STREG %r26,PT_GR26(%r9)
96 STREG %r16,PT_SR7(%r9)
100 .macro get_stack_use_r30
102 /* we put a struct pt_regs on the stack and save the registers there */
106 ldo PT_SZ_ALGN(%r30),%r30
107 STREG %r1,PT_GR30(%r9)
108 STREG %r29,PT_GR29(%r9)
109 STREG %r26,PT_GR26(%r9)
110 STREG %r16,PT_SR7(%r9)
115 LDREG PT_GR1(%r29), %r1
116 LDREG PT_GR30(%r29),%r30
117 LDREG PT_GR29(%r29),%r29
120 /* default interruption handler
121 * (calls traps.c:handle_interruption) */
128 /* Interrupt interruption handler
129 * (calls irq.c:do_cpu_irq_mask) */
136 .import os_hpmc, code
140 nop /* must be a NOP, will be patched later */
141 load32 PA(os_hpmc), %r3
144 .word 0 /* checksum (will be patched) */
145 .word 0 /* address of handler */
146 .word 0 /* length of handler */
150 * Performance Note: Instructions will be moved up into
151 * this part of the code later on, once we are sure
152 * that the tlb miss handlers are close to final form.
155 /* Register definitions for tlb miss handler macros */
157 va = r8 /* virtual address for which the trap occurred */
158 spc = r24 /* space for which the trap occurred */
163 * itlb miss interruption handler (parisc 1.1 - 32 bit)
177 * itlb miss interruption handler (parisc 2.0)
194 * naitlb miss interruption handler (parisc 1.1 - 32 bit)
197 .macro naitlb_11 code
208 * naitlb miss interruption handler (parisc 2.0)
211 .macro naitlb_20 code
226 * dtlb miss interruption handler (parisc 1.1 - 32 bit)
240 * dtlb miss interruption handler (parisc 2.0)
257 /* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
259 .macro nadtlb_11 code
269 /* nadtlb miss interruption handler (parisc 2.0) */
271 .macro nadtlb_20 code
286 * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
300 * dirty bit trap interruption handler (parisc 2.0)
316 /* In LP64, the space contains part of the upper 32 bits of the
317 * fault. We have to extract this and place it in the va,
318 * zeroing the corresponding bits in the space register */
319 .macro space_adjust spc,va,tmp
321 extrd,u \spc,63,SPACEID_SHIFT,\tmp
322 depd %r0,63,SPACEID_SHIFT,\spc
323 depd \tmp,31,SPACEID_SHIFT,\va
327 .import swapper_pg_dir,code
329 /* Get the pgd. For faults on space zero (kernel space), this
330 * is simply swapper_pg_dir. For user space faults, the
331 * pgd is stored in %cr25 */
332 .macro get_pgd spc,reg
333 ldil L%PA(swapper_pg_dir),\reg
334 ldo R%PA(swapper_pg_dir)(\reg),\reg
335 or,COND(=) %r0,\spc,%r0
340 space_check(spc,tmp,fault)
342 spc - The space we saw the fault with.
343 tmp - The place to store the current space.
344 fault - Function to call on failure.
346 Only allow faults on different spaces from the
347 currently active one if we're the kernel
350 .macro space_check spc,tmp,fault
352 /* check against %r0 which is same value as LINUX_GATEWAY_SPACE */
353 or,COND(<>) %r0,\spc,%r0 /* user may execute gateway page
354 * as kernel, so defeat the space
357 or,COND(=) %r0,\tmp,%r0 /* nullify if executing as kernel */
358 cmpb,COND(<>),n \tmp,\spc,\fault
361 /* Look up a PTE in a 2-Level scheme (faulting at each
362 * level if the entry isn't present
364 * NOTE: we use ldw even for LP64, since the short pointers
365 * can address up to 1TB
367 .macro L2_ptep pmd,pte,index,va,fault
368 #if CONFIG_PGTABLE_LEVELS == 3
369 extru_safe \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
371 extru_safe \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
373 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
374 #if CONFIG_PGTABLE_LEVELS < 3
377 ldw,s \index(\pmd),\pmd
378 bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault
379 dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
380 SHLREG \pmd,PxD_VALUE_SHIFT,\pmd
381 extru_safe \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
382 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
383 shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */
386 /* Look up PTE in a 3-Level scheme. */
387 .macro L3_ptep pgd,pte,index,va,fault
388 #if CONFIG_PGTABLE_LEVELS == 3
390 extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
391 ldw,s \index(\pgd),\pgd
392 bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault
393 shld \pgd,PxD_VALUE_SHIFT,\pgd
395 L2_ptep \pgd,\pte,\index,\va,\fault
398 /* Acquire page_table_lock and check page is present. */
399 .macro ptl_lock spc,ptp,pte,tmp,tmp1,fault
400 #ifdef CONFIG_TLB_PTLOCK
401 98: cmpib,COND(=),n 0,\spc,2f
403 1: LDCW 0(\tmp),\tmp1
404 cmpib,COND(=) 0,\tmp1,1b
407 bb,<,n \pte,_PAGE_PRESENT_BIT,3f
410 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
412 2: LDREG 0(\ptp),\pte
413 bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault
417 /* Release page_table_lock without reloading lock address.
418 Note that the values in the register spc are limited to
419 NR_SPACE_IDS (262144). Thus, the stw instruction always
420 stores a nonzero value even when register spc is 64 bits.
421 We use an ordered store to ensure all prior accesses are
422 performed prior to releasing the lock. */
423 .macro ptl_unlock0 spc,tmp
424 #ifdef CONFIG_TLB_PTLOCK
425 98: or,COND(=) %r0,\spc,%r0
427 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
431 /* Release page_table_lock. */
432 .macro ptl_unlock1 spc,tmp
433 #ifdef CONFIG_TLB_PTLOCK
435 ptl_unlock0 \spc,\tmp
436 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
440 /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and
441 * don't needlessly dirty the cache line if it was already set */
442 .macro update_accessed ptp,pte,tmp,tmp1
443 ldi _PAGE_ACCESSED,\tmp1
445 and,COND(<>) \tmp1,\pte,%r0
449 /* Set the dirty bit (and accessed bit). No need to be
450 * clever, this is only used from the dirty fault */
451 .macro update_dirty ptp,pte,tmp
452 ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
457 /* We have (depending on the page size):
458 * - 38 to 52-bit Physical Page Number
459 * - 12 to 26-bit page offset
461 /* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
462 * to a CPU TLB 4k PFN (4k => 12 bits to shift) */
463 #define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
464 #define PAGE_ADD_HUGE_SHIFT (REAL_HPAGE_SHIFT-12)
466 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
467 .macro convert_for_tlb_insert20 pte,tmp
468 #ifdef CONFIG_HUGETLB_PAGE
470 extrd,u \tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
471 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
473 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
474 (63-58)+PAGE_ADD_SHIFT,\pte
475 extrd,u,*= \tmp,_PAGE_HPAGE_BIT+32,1,%r0
476 depdi _HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\
477 (63-58)+PAGE_ADD_HUGE_SHIFT,\pte
478 #else /* Huge pages disabled */
479 extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
480 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
481 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
482 (63-58)+PAGE_ADD_SHIFT,\pte
486 /* Convert the pte and prot to tlb insertion values. How
487 * this happens is quite subtle, read below */
488 .macro make_insert_tlb spc,pte,prot,tmp
489 space_to_prot \spc \prot /* create prot id from space */
490 /* The following is the real subtlety. This is depositing
491 * T <-> _PAGE_REFTRAP
493 * B <-> _PAGE_DMB (memory break)
495 * Then incredible subtlety: The access rights are
496 * _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE
497 * See 3-14 of the parisc 2.0 manual
499 * Finally, _PAGE_READ goes in the top bit of PL1 (so we
500 * trigger an access rights trap in user space if the user
501 * tries to read an unreadable page */
502 #if _PAGE_SPECIAL_BIT == _PAGE_DMB_BIT
503 /* need to drop DMB bit, as it's used as SPECIAL flag */
504 depi 0,_PAGE_SPECIAL_BIT,1,\pte
508 /* PAGE_USER indicates the page can be read with user privileges,
509 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
510 * contains _PAGE_READ) */
511 extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0
513 /* If we're a gateway page, drop PL2 back to zero for promotion
514 * to kernel privilege (so we can execute the page as kernel).
515 * Any privilege promotion page always denys read and write */
516 extrd,u,*= \pte,_PAGE_GATEWAY_BIT+32,1,%r0
517 depd %r0,11,2,\prot /* If Gateway, Set PL2 to 0 */
519 /* Enforce uncacheable pages.
520 * This should ONLY be use for MMIO on PA 2.0 machines.
521 * Memory/DMA is cache coherent on all PA2.0 machines we support
522 * (that means T-class is NOT supported) and the memory controllers
523 * on most of those machines only handles cache transactions.
525 extrd,u,*= \pte,_PAGE_NO_CACHE_BIT+32,1,%r0
528 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
529 convert_for_tlb_insert20 \pte \tmp
532 /* Identical macro to make_insert_tlb above, except it
533 * makes the tlb entry for the differently formatted pa11
534 * insertion instructions */
535 .macro make_insert_tlb_11 spc,pte,prot
536 #if _PAGE_SPECIAL_BIT == _PAGE_DMB_BIT
537 /* need to drop DMB bit, as it's used as SPECIAL flag */
538 depi 0,_PAGE_SPECIAL_BIT,1,\pte
540 zdep \spc,30,15,\prot
542 extru,= \pte,_PAGE_NO_CACHE_BIT,1,%r0
544 extru,= \pte,_PAGE_USER_BIT,1,%r0
545 depi 7,11,3,\prot /* Set for user space (1 rsvd for read) */
546 extru,= \pte,_PAGE_GATEWAY_BIT,1,%r0
547 depi 0,11,2,\prot /* If Gateway, Set PL2 to 0 */
549 /* Get rid of prot bits and convert to page addr for iitlba */
551 depi 0,31,ASM_PFN_PTE_SHIFT,\pte
552 SHRREG \pte,(ASM_PFN_PTE_SHIFT-(31-26)),\pte
555 /* This is for ILP32 PA2.0 only. The TLB insertion needs
556 * to extend into I/O space if the address is 0xfXXXXXXX
557 * so we extend the f's into the top word of the pte in
559 .macro f_extend pte,tmp
560 extrd,s \pte,42,4,\tmp
562 extrd,s \pte,63,25,\pte
565 /* The alias region is comprised of a pair of 4 MB regions
566 * aligned to 8 MB. It is used to clear/copy/flush user pages
567 * using kernel virtual addresses congruent with the user
570 * To use the alias page, you set %r26 up with the to TLB
571 * entry (identifying the physical page) and %r23 up with
572 * the from tlb entry (or nothing if only a to entry---for
573 * clear_user_page_asm) */
574 .macro do_alias spc,tmp,tmp1,va,pte,prot,fault,patype
575 cmpib,COND(<>),n 0,\spc,\fault
576 ldil L%(TMPALIAS_MAP_START),\tmp
578 depi_safe 0,31,TMPALIAS_SIZE_BITS+1,\tmp1
579 cmpb,COND(<>),n \tmp,\tmp1,\fault
580 mfctl %cr19,\tmp /* iir */
581 /* get the opcode (first six bits) into \tmp */
582 extrw,u \tmp,5,6,\tmp
584 * Only setting the T bit prevents data cache movein
585 * Setting access rights to zero prevents instruction cache movein
587 * Note subtlety here: _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE go
588 * to type field and _PAGE_READ goes to top bit of PL1
590 ldi (_PAGE_REFTRAP|_PAGE_READ|_PAGE_WRITE),\prot
592 * so if the opcode is one (i.e. this is a memory management
593 * instruction) nullify the next load so \prot is only T.
594 * Otherwise this is a normal data operation
596 cmpiclr,= 0x01,\tmp,%r0
597 ldi (_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
599 depd,z \prot,8,7,\prot
602 depw,z \prot,8,7,\prot
604 .error "undefined PA type to do_alias"
608 * OK, it is in the temp alias region, check whether "from" or "to".
609 * Check "subtle" note in pacache.S re: r23/r26.
611 extrw,u,= \va,31-TMPALIAS_SIZE_BITS,1,%r0
612 or,COND(tr) %r23,%r0,\pte
615 /* convert phys addr in \pte (from r23 or r26) to tlb insert format */
616 SHRREG \pte,PAGE_SHIFT+PAGE_ADD_SHIFT-5, \pte
617 depi_safe _PAGE_SIZE_ENCODING_DEFAULT, 31,5, \pte
622 * Fault_vectors are architecturally required to be aligned on a 2K
629 ENTRY(fault_vector_20)
630 /* First vector is invalid (0) */
631 .ascii "cows can fly"
640 itlb_20 PARISC_ITLB_TRAP
672 ENTRY(fault_vector_11)
673 /* First vector is invalid (0) */
674 .ascii "cows can fly"
683 itlb_11 PARISC_ITLB_TRAP
712 /* Fault vector is separately protected and *must* be on its own page */
715 .import handle_interruption,code
716 .import do_cpu_irq_mask,code
721 * copy_thread moved args into task save area.
724 ENTRY(ret_from_kernel_thread)
725 /* Call schedule_tail first though */
726 BL schedule_tail, %r2
729 mfctl %cr30,%r1 /* task_struct */
730 LDREG TASK_PT_GR25(%r1), %r26
732 LDREG TASK_PT_GR27(%r1), %r27
734 LDREG TASK_PT_GR26(%r1), %r1
737 b finish_child_return
739 END(ret_from_kernel_thread)
743 * struct task_struct *_switch_to(struct task_struct *prev,
744 * struct task_struct *next)
746 * switch kernel stacks and return prev */
747 ENTRY_CFI(_switch_to)
748 STREG %r2, -RP_OFFSET(%r30)
753 load32 _switch_to_ret, %r2
755 STREG %r2, TASK_PT_KPC(%r26)
756 LDREG TASK_PT_KPC(%r25), %r2
758 STREG %r30, TASK_PT_KSP(%r26)
759 LDREG TASK_PT_KSP(%r25), %r30
763 ENTRY(_switch_to_ret)
764 mtctl %r0, %cr0 /* Needed for single stepping */
768 LDREG -RP_OFFSET(%r30), %r2
771 ENDPROC_CFI(_switch_to)
774 * Common rfi return path for interruptions, kernel execve, and
775 * sys_rt_sigreturn (sometimes). The sys_rt_sigreturn syscall will
776 * return via this path if the signal was received when the process
777 * was running; if the process was blocked on a syscall then the
778 * normal syscall_exit path is used. All syscalls for traced
779 * proceses exit via intr_restore.
781 * XXX If any syscalls that change a processes space id ever exit
782 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and
789 ENTRY_CFI(syscall_exit_rfi)
790 mfctl %cr30,%r16 /* task_struct */
791 ldo TASK_REGS(%r16),%r16
792 /* Force iaoq to userspace, as the user has had access to our current
793 * context via sigcontext. Also Filter the PSW for the same reason.
795 LDREG PT_IAOQ0(%r16),%r19
796 depi PRIV_USER,31,2,%r19
797 STREG %r19,PT_IAOQ0(%r16)
798 LDREG PT_IAOQ1(%r16),%r19
799 depi PRIV_USER,31,2,%r19
800 STREG %r19,PT_IAOQ1(%r16)
801 LDREG PT_PSW(%r16),%r19
802 load32 USER_PSW_MASK,%r1
804 load32 USER_PSW_HI_MASK,%r20
807 and %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
809 or %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
810 STREG %r19,PT_PSW(%r16)
813 * If we aren't being traced, we never saved space registers
814 * (we don't store them in the sigcontext), so set them
815 * to "proper" values now (otherwise we'll wind up restoring
816 * whatever was last stored in the task structure, which might
817 * be inconsistent if an interrupt occurred while on the gateway
818 * page). Note that we may be "trashing" values the user put in
819 * them, but we don't support the user changing them.
822 STREG %r0,PT_SR2(%r16)
824 STREG %r19,PT_SR0(%r16)
825 STREG %r19,PT_SR1(%r16)
826 STREG %r19,PT_SR3(%r16)
827 STREG %r19,PT_SR4(%r16)
828 STREG %r19,PT_SR5(%r16)
829 STREG %r19,PT_SR6(%r16)
830 STREG %r19,PT_SR7(%r16)
833 /* check for reschedule */
835 LDREG TASK_TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */
836 bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
838 .import do_notify_resume,code
842 LDREG TASK_TI_FLAGS(%r1),%r19
843 ldi (_TIF_USER_WORK_MASK & ~_TIF_NEED_RESCHED), %r20
844 and,COND(<>) %r19, %r20, %r0
845 b,n intr_restore /* skip past if we've nothing to do */
847 /* This check is critical to having LWS
848 * working. The IASQ is zero on the gateway
849 * page and we cannot deliver any signals until
850 * we get off the gateway page.
852 * Only do signals if we are returning to user space
854 LDREG PT_IASQ0(%r16), %r20
855 cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* forward */
856 LDREG PT_IASQ1(%r16), %r20
857 cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* forward */
859 copy %r0, %r25 /* long in_syscall = 0 */
861 ldo -16(%r30),%r29 /* Reference param save area */
864 /* NOTE: We need to enable interrupts if we have to deliver
865 * signals. We used to do this earlier but it caused kernel
866 * stack overflows. */
869 BL do_notify_resume,%r2
870 copy %r16, %r26 /* struct pt_regs *regs */
876 ldo PT_FR31(%r29),%r1
880 /* inverse of virt_map */
882 rsm PSW_SM_QUIET,%r0 /* prepare for rfi */
885 /* Restore space id's and special cr's from PT_REGS
886 * structure pointed to by r29
890 /* IMPORTANT: rest_stack restores r29 last (we are using it)!
891 * It also restores r1 and r30.
898 #ifndef CONFIG_PREEMPTION
899 # define intr_do_preempt intr_restore
900 #endif /* !CONFIG_PREEMPTION */
902 .import schedule,code
904 /* Only call schedule on return to userspace. If we're returning
905 * to kernel space, we may schedule if CONFIG_PREEMPTION, otherwise
906 * we jump back to intr_restore.
908 LDREG PT_IASQ0(%r16), %r20
909 cmpib,COND(=) 0, %r20, intr_do_preempt
911 LDREG PT_IASQ1(%r16), %r20
912 cmpib,COND(=) 0, %r20, intr_do_preempt
915 /* NOTE: We need to enable interrupts if we schedule. We used
916 * to do this earlier but it caused kernel stack overflows. */
920 ldo -16(%r30),%r29 /* Reference param save area */
923 ldil L%intr_check_sig, %r2
927 load32 schedule, %r20
930 ldo R%intr_check_sig(%r2), %r2
932 /* preempt the current task on returning to kernel
933 * mode from an interrupt, iff need_resched is set,
934 * and preempt_count is 0. otherwise, we continue on
935 * our merry way back to the current running task.
937 #ifdef CONFIG_PREEMPTION
938 .import preempt_schedule_irq,code
940 rsm PSW_SM_I, %r0 /* disable interrupts */
942 /* current_thread_info()->preempt_count */
944 ldw TI_PRE_COUNT(%r1), %r19
945 cmpib,<> 0, %r19, intr_restore /* if preempt_count > 0 */
946 nop /* prev insn branched backwards */
948 /* check if we interrupted a critical path */
949 LDREG PT_PSW(%r16), %r20
950 bb,<,n %r20, 31 - PSW_SM_I, intr_restore
953 /* ssm PSW_SM_I done later in intr_restore */
954 #ifdef CONFIG_MLONGCALLS
955 ldil L%intr_restore, %r2
956 load32 preempt_schedule_irq, %r1
958 ldo R%intr_restore(%r2), %r2
960 ldil L%intr_restore, %r1
961 BL preempt_schedule_irq, %r2
962 ldo R%intr_restore(%r1), %r2
964 #endif /* CONFIG_PREEMPTION */
967 * External interrupts.
971 cmpib,COND(=),n 0,%r16,1f
983 ldo PT_FR0(%r29), %r24
988 copy %r29, %r26 /* arg0 is pt_regs */
989 copy %r29, %r16 /* save pt_regs */
991 ldil L%intr_return, %r2
994 ldo -16(%r30),%r29 /* Reference param save area */
998 ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */
999 ENDPROC_CFI(syscall_exit_rfi)
1002 /* Generic interruptions (illegal insn, unaligned, page fault, etc) */
1004 ENTRY_CFI(intr_save) /* for os_hpmc */
1006 cmpib,COND(=),n 0,%r16,1f
1018 /* If this trap is a itlb miss, skip saving/adjusting isr/ior */
1019 cmpib,COND(=),n PARISC_ITLB_TRAP,%r26,skip_save_ior
1023 nop /* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
1029 * If the interrupted code was running with W bit off (32 bit),
1030 * clear the b bits (bits 0 & 1) in the ior.
1031 * save_specials left ipsw value in r8 for us to test.
1033 extrd,u,*<> %r8,PSW_W_BIT,1,%r0
1036 /* adjust isr/ior: get high bits from isr and deposit in ior */
1037 space_adjust %r16,%r17,%r1
1039 STREG %r16, PT_ISR(%r29)
1040 STREG %r17, PT_IOR(%r29)
1042 #if 0 && defined(CONFIG_64BIT)
1043 /* Revisit when we have 64-bit code above 4Gb */
1047 /* We have a itlb miss, and when executing code above 4 Gb on ILP64, we
1048 * need to adjust iasq/iaoq here in the same way we adjusted isr/ior
1051 extrd,u,* %r8,PSW_W_BIT,1,%r1
1052 cmpib,COND(=),n 1,%r1,intr_save2
1053 LDREG PT_IASQ0(%r29), %r16
1054 LDREG PT_IAOQ0(%r29), %r17
1055 /* adjust iasq/iaoq */
1056 space_adjust %r16,%r17,%r1
1057 STREG %r16, PT_IASQ0(%r29)
1058 STREG %r17, PT_IAOQ0(%r29)
1067 ldo PT_FR0(%r29), %r25
1072 copy %r29, %r25 /* arg1 is pt_regs */
1074 ldo -16(%r30),%r29 /* Reference param save area */
1077 ldil L%intr_check_sig, %r2
1078 copy %r25, %r16 /* save pt_regs */
1080 b handle_interruption
1081 ldo R%intr_check_sig(%r2), %r2
1082 ENDPROC_CFI(intr_save)
1086 * Note for all tlb miss handlers:
1088 * cr24 contains a pointer to the kernel address space
1091 * cr25 contains a pointer to the current user address
1092 * space page directory.
1094 * sr3 will contain the space id of the user address space
1095 * of the current running thread while that thread is
1096 * running in the kernel.
1100 * register number allocations. Note that these are all
1101 * in the shadowed registers
1104 t0 = r1 /* temporary register 0 */
1105 va = r8 /* virtual address for which the trap occurred */
1106 t1 = r9 /* temporary register 1 */
1107 pte = r16 /* pte/phys page # */
1108 prot = r17 /* prot bits */
1109 spc = r24 /* space for which the trap occurred */
1110 ptp = r25 /* page directory/page table pointer */
1115 space_adjust spc,va,t0
1117 space_check spc,t0,dtlb_fault
1119 L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w
1121 ptl_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w
1122 update_accessed ptp,pte,t0,t1
1124 make_insert_tlb spc,pte,prot,t1
1132 dtlb_check_alias_20w:
1133 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20
1141 space_adjust spc,va,t0
1143 space_check spc,t0,nadtlb_fault
1145 L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w
1147 ptl_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w
1148 update_accessed ptp,pte,t0,t1
1150 make_insert_tlb spc,pte,prot,t1
1158 nadtlb_check_alias_20w:
1159 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1171 space_check spc,t0,dtlb_fault
1173 L2_ptep ptp,pte,t0,va,dtlb_check_alias_11
1175 ptl_lock spc,ptp,pte,t0,t1,dtlb_check_alias_11
1176 update_accessed ptp,pte,t0,t1
1178 make_insert_tlb_11 spc,pte,prot
1180 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1183 idtlba pte,(%sr1,va)
1184 idtlbp prot,(%sr1,va)
1186 mtsp t1, %sr1 /* Restore sr1 */
1192 dtlb_check_alias_11:
1193 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,11
1204 space_check spc,t0,nadtlb_fault
1206 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11
1208 ptl_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_11
1209 update_accessed ptp,pte,t0,t1
1211 make_insert_tlb_11 spc,pte,prot
1213 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1216 idtlba pte,(%sr1,va)
1217 idtlbp prot,(%sr1,va)
1219 mtsp t1, %sr1 /* Restore sr1 */
1225 nadtlb_check_alias_11:
1226 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,11
1235 space_adjust spc,va,t0
1237 space_check spc,t0,dtlb_fault
1239 L2_ptep ptp,pte,t0,va,dtlb_check_alias_20
1241 ptl_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20
1242 update_accessed ptp,pte,t0,t1
1244 make_insert_tlb spc,pte,prot,t1
1254 dtlb_check_alias_20:
1255 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20
1265 space_check spc,t0,nadtlb_fault
1267 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20
1269 ptl_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20
1270 update_accessed ptp,pte,t0,t1
1272 make_insert_tlb spc,pte,prot,t1
1282 nadtlb_check_alias_20:
1283 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1295 * Non-access misses can be caused by fdc,fic,pdc,lpa,probe and
1296 * probei instructions. The kernel no longer faults doing flushes.
1297 * Use of lpa and probe instructions is rare. Given the issue
1298 * with shadow registers, we defer everything to the "slow" path.
1306 * I miss is a little different, since we allow users to fault
1307 * on the gateway page which is in the kernel address space.
1310 space_adjust spc,va,t0
1312 space_check spc,t0,itlb_fault
1314 L3_ptep ptp,pte,t0,va,itlb_fault
1316 ptl_lock spc,ptp,pte,t0,t1,itlb_fault
1317 update_accessed ptp,pte,t0,t1
1319 make_insert_tlb spc,pte,prot,t1
1330 * I miss is a little different, since we allow users to fault
1331 * on the gateway page which is in the kernel address space.
1334 space_adjust spc,va,t0
1336 space_check spc,t0,naitlb_fault
1338 L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w
1340 ptl_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w
1341 update_accessed ptp,pte,t0,t1
1343 make_insert_tlb spc,pte,prot,t1
1351 naitlb_check_alias_20w:
1352 do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20
1364 space_check spc,t0,itlb_fault
1366 L2_ptep ptp,pte,t0,va,itlb_fault
1368 ptl_lock spc,ptp,pte,t0,t1,itlb_fault
1369 update_accessed ptp,pte,t0,t1
1371 make_insert_tlb_11 spc,pte,prot
1373 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1376 iitlba pte,(%sr1,va)
1377 iitlbp prot,(%sr1,va)
1379 mtsp t1, %sr1 /* Restore sr1 */
1388 space_check spc,t0,naitlb_fault
1390 L2_ptep ptp,pte,t0,va,naitlb_check_alias_11
1392 ptl_lock spc,ptp,pte,t0,t1,naitlb_check_alias_11
1393 update_accessed ptp,pte,t0,t1
1395 make_insert_tlb_11 spc,pte,prot
1397 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1400 iitlba pte,(%sr1,va)
1401 iitlbp prot,(%sr1,va)
1403 mtsp t1, %sr1 /* Restore sr1 */
1409 naitlb_check_alias_11:
1410 do_alias spc,t0,t1,va,pte,prot,itlb_fault,11
1412 iitlba pte,(%sr0, va)
1413 iitlbp prot,(%sr0, va)
1422 space_check spc,t0,itlb_fault
1424 L2_ptep ptp,pte,t0,va,itlb_fault
1426 ptl_lock spc,ptp,pte,t0,t1,itlb_fault
1427 update_accessed ptp,pte,t0,t1
1429 make_insert_tlb spc,pte,prot,t1
1442 space_check spc,t0,naitlb_fault
1444 L2_ptep ptp,pte,t0,va,naitlb_check_alias_20
1446 ptl_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20
1447 update_accessed ptp,pte,t0,t1
1449 make_insert_tlb spc,pte,prot,t1
1459 naitlb_check_alias_20:
1460 do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20
1472 space_adjust spc,va,t0
1474 space_check spc,t0,dbit_fault
1476 L3_ptep ptp,pte,t0,va,dbit_fault
1478 ptl_lock spc,ptp,pte,t0,t1,dbit_fault
1479 update_dirty ptp,pte,t1
1481 make_insert_tlb spc,pte,prot,t1
1494 space_check spc,t0,dbit_fault
1496 L2_ptep ptp,pte,t0,va,dbit_fault
1498 ptl_lock spc,ptp,pte,t0,t1,dbit_fault
1499 update_dirty ptp,pte,t1
1501 make_insert_tlb_11 spc,pte,prot
1503 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1506 idtlba pte,(%sr1,va)
1507 idtlbp prot,(%sr1,va)
1509 mtsp t1, %sr1 /* Restore sr1 */
1518 space_check spc,t0,dbit_fault
1520 L2_ptep ptp,pte,t0,va,dbit_fault
1522 ptl_lock spc,ptp,pte,t0,t1,dbit_fault
1523 update_dirty ptp,pte,t1
1525 make_insert_tlb spc,pte,prot,t1
1536 .import handle_interruption,code
1540 ldi 31,%r8 /* Use an unused code */
1548 ldi PARISC_ITLB_TRAP,%r8
1562 /* Register saving semantics for system calls:
1564 %r1 clobbered by system call macro in userspace
1565 %r2 saved in PT_REGS by gateway page
1566 %r3 - %r18 preserved by C code (saved by signal code)
1567 %r19 - %r20 saved in PT_REGS by gateway page
1568 %r21 - %r22 non-standard syscall args
1569 stored in kernel stack by gateway page
1570 %r23 - %r26 arg3-arg0, saved in PT_REGS by gateway page
1571 %r27 - %r30 saved in PT_REGS by gateway page
1572 %r31 syscall return pointer
1575 /* Floating point registers (FIXME: what do we do with these?)
1577 %fr0 - %fr3 status/exception, not preserved
1578 %fr4 - %fr7 arguments
1579 %fr8 - %fr11 not preserved by C code
1580 %fr12 - %fr21 preserved by C code
1581 %fr22 - %fr31 not preserved by C code
1584 .macro reg_save regs
1585 STREG %r3, PT_GR3(\regs)
1586 STREG %r4, PT_GR4(\regs)
1587 STREG %r5, PT_GR5(\regs)
1588 STREG %r6, PT_GR6(\regs)
1589 STREG %r7, PT_GR7(\regs)
1590 STREG %r8, PT_GR8(\regs)
1591 STREG %r9, PT_GR9(\regs)
1592 STREG %r10,PT_GR10(\regs)
1593 STREG %r11,PT_GR11(\regs)
1594 STREG %r12,PT_GR12(\regs)
1595 STREG %r13,PT_GR13(\regs)
1596 STREG %r14,PT_GR14(\regs)
1597 STREG %r15,PT_GR15(\regs)
1598 STREG %r16,PT_GR16(\regs)
1599 STREG %r17,PT_GR17(\regs)
1600 STREG %r18,PT_GR18(\regs)
1603 .macro reg_restore regs
1604 LDREG PT_GR3(\regs), %r3
1605 LDREG PT_GR4(\regs), %r4
1606 LDREG PT_GR5(\regs), %r5
1607 LDREG PT_GR6(\regs), %r6
1608 LDREG PT_GR7(\regs), %r7
1609 LDREG PT_GR8(\regs), %r8
1610 LDREG PT_GR9(\regs), %r9
1611 LDREG PT_GR10(\regs),%r10
1612 LDREG PT_GR11(\regs),%r11
1613 LDREG PT_GR12(\regs),%r12
1614 LDREG PT_GR13(\regs),%r13
1615 LDREG PT_GR14(\regs),%r14
1616 LDREG PT_GR15(\regs),%r15
1617 LDREG PT_GR16(\regs),%r16
1618 LDREG PT_GR17(\regs),%r17
1619 LDREG PT_GR18(\regs),%r18
1622 .macro fork_like name
1623 ENTRY_CFI(sys_\name\()_wrapper)
1625 ldo TASK_REGS(%r1),%r1
1628 ldil L%sys_\name, %r31
1629 be R%sys_\name(%sr4,%r31)
1630 STREG %r28, PT_CR27(%r1)
1631 ENDPROC_CFI(sys_\name\()_wrapper)
1639 /* Set the return value for the child */
1641 BL schedule_tail, %r2
1643 finish_child_return:
1645 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1647 LDREG PT_CR27(%r1), %r3
1654 ENTRY_CFI(sys_rt_sigreturn_wrapper)
1656 ldo TASK_REGS(%r26),%r26 /* get pt regs */
1657 /* Don't save regs, we are going to restore them from sigcontext. */
1658 STREG %r2, -RP_OFFSET(%r30)
1660 ldo FRAME_SIZE(%r30), %r30
1661 BL sys_rt_sigreturn,%r2
1662 ldo -16(%r30),%r29 /* Reference param save area */
1664 BL sys_rt_sigreturn,%r2
1665 ldo FRAME_SIZE(%r30), %r30
1668 ldo -FRAME_SIZE(%r30), %r30
1669 LDREG -RP_OFFSET(%r30), %r2
1671 /* FIXME: I think we need to restore a few more things here. */
1673 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1676 /* If the signal was received while the process was blocked on a
1677 * syscall, then r2 will take us to syscall_exit; otherwise r2 will
1678 * take us to syscall_exit_rfi and on to intr_return.
1681 LDREG PT_GR28(%r1),%r28 /* reload original r28 for syscall_exit */
1682 ENDPROC_CFI(sys_rt_sigreturn_wrapper)
1685 /* NOTE: Not all syscalls exit this way. rt_sigreturn will exit
1686 * via syscall_exit_rfi if the signal was received while the process
1690 /* save return value now */
1692 STREG %r28,TASK_PT_GR28(%r1)
1694 /* Seems to me that dp could be wrong here, if the syscall involved
1695 * calling a module, and nothing got round to restoring dp on return.
1699 syscall_check_resched:
1701 /* check for reschedule */
1703 LDREG TASK_TI_FLAGS(%r19),%r19 /* long */
1704 bb,<,n %r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
1706 .import do_signal,code
1709 LDREG TASK_TI_FLAGS(%r19),%r19
1710 ldi (_TIF_USER_WORK_MASK & ~_TIF_NEED_RESCHED), %r26
1711 and,COND(<>) %r19, %r26, %r0
1712 b,n syscall_restore /* skip past if we've nothing to do */
1715 /* Save callee-save registers (for sigcontext).
1716 * FIXME: After this point the process structure should be
1717 * consistent with all the relevant state of the process
1718 * before the syscall. We need to verify this.
1721 ldo TASK_REGS(%r1), %r26 /* struct pt_regs *regs */
1725 ldo -16(%r30),%r29 /* Reference param save area */
1728 BL do_notify_resume,%r2
1729 ldi 1, %r25 /* long in_syscall = 1 */
1732 ldo TASK_REGS(%r1), %r20 /* reload pt_regs */
1735 b,n syscall_check_sig
1740 /* Are we being ptraced? */
1741 LDREG TASK_TI_FLAGS(%r1),%r19
1742 ldi _TIF_SINGLESTEP|_TIF_BLOCKSTEP,%r2
1743 and,COND(=) %r19,%r2,%r0
1744 b,n syscall_restore_rfi
1746 ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */
1749 LDREG TASK_PT_SAR(%r1),%r19 /* restore SAR */
1752 LDREG TASK_PT_GR2(%r1),%r2 /* restore user rp */
1753 LDREG TASK_PT_GR19(%r1),%r19
1754 LDREG TASK_PT_GR20(%r1),%r20
1755 LDREG TASK_PT_GR21(%r1),%r21
1756 LDREG TASK_PT_GR22(%r1),%r22
1757 LDREG TASK_PT_GR23(%r1),%r23
1758 LDREG TASK_PT_GR24(%r1),%r24
1759 LDREG TASK_PT_GR25(%r1),%r25
1760 LDREG TASK_PT_GR26(%r1),%r26
1761 LDREG TASK_PT_GR27(%r1),%r27 /* restore user dp */
1762 LDREG TASK_PT_GR28(%r1),%r28 /* syscall return value */
1763 LDREG TASK_PT_GR29(%r1),%r29
1764 LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */
1766 /* NOTE: We use rsm/ssm pair to make this operation atomic */
1767 LDREG TASK_PT_GR30(%r1),%r1 /* Get user sp */
1769 copy %r1,%r30 /* Restore user sp */
1770 mfsp %sr3,%r1 /* Get user space id */
1771 mtsp %r1,%sr7 /* Restore sr7 */
1774 /* Set sr2 to zero for userspace syscalls to work. */
1776 mtsp %r1,%sr4 /* Restore sr4 */
1777 mtsp %r1,%sr5 /* Restore sr5 */
1778 mtsp %r1,%sr6 /* Restore sr6 */
1780 depi PRIV_USER,31,2,%r31 /* ensure return to user mode. */
1783 /* decide whether to reset the wide mode bit
1785 * For a syscall, the W bit is stored in the lowest bit
1786 * of sp. Extract it and reset W if it is zero */
1787 extrd,u,*<> %r30,63,1,%r1
1789 /* now reset the lowest bit of sp if it was set */
1792 be,n 0(%sr3,%r31) /* return to user space */
1794 /* We have to return via an RFI, so that PSW T and R bits can be set
1796 * This sets up pt_regs so we can return via intr_restore, which is not
1797 * the most efficient way of doing things, but it works.
1799 syscall_restore_rfi:
1800 ldo -1(%r0),%r2 /* Set recovery cntr to -1 */
1801 mtctl %r2,%cr0 /* for immediate trap */
1802 LDREG TASK_PT_PSW(%r1),%r2 /* Get old PSW */
1803 ldi 0x0b,%r20 /* Create new PSW */
1804 depi -1,13,1,%r20 /* C, Q, D, and I bits */
1806 /* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are
1807 * set in thread_info.h and converted to PA bitmap
1808 * numbers in asm-offsets.c */
1810 /* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */
1811 extru,= %r19,TIF_SINGLESTEP_PA_BIT,1,%r0
1812 depi -1,27,1,%r20 /* R bit */
1814 /* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */
1815 extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0
1816 depi -1,7,1,%r20 /* T bit */
1818 STREG %r20,TASK_PT_PSW(%r1)
1820 /* Always store space registers, since sr3 can be changed (e.g. fork) */
1823 STREG %r25,TASK_PT_SR3(%r1)
1824 STREG %r25,TASK_PT_SR4(%r1)
1825 STREG %r25,TASK_PT_SR5(%r1)
1826 STREG %r25,TASK_PT_SR6(%r1)
1827 STREG %r25,TASK_PT_SR7(%r1)
1828 STREG %r25,TASK_PT_IASQ0(%r1)
1829 STREG %r25,TASK_PT_IASQ1(%r1)
1832 /* Now if old D bit is clear, it means we didn't save all registers
1833 * on syscall entry, so do that now. This only happens on TRACEME
1834 * calls, or if someone attached to us while we were on a syscall.
1835 * We could make this more efficient by not saving r3-r18, but
1836 * then we wouldn't be able to use the common intr_restore path.
1837 * It is only for traced processes anyway, so performance is not
1840 bb,< %r2,30,pt_regs_ok /* Branch if D set */
1841 ldo TASK_REGS(%r1),%r25
1842 reg_save %r25 /* Save r3 to r18 */
1844 /* Save the current sr */
1846 STREG %r2,TASK_PT_SR0(%r1)
1848 /* Save the scratch sr */
1850 STREG %r2,TASK_PT_SR1(%r1)
1852 /* sr2 should be set to zero for userspace syscalls */
1853 STREG %r0,TASK_PT_SR2(%r1)
1855 LDREG TASK_PT_GR31(%r1),%r2
1856 depi PRIV_USER,31,2,%r2 /* ensure return to user mode. */
1857 STREG %r2,TASK_PT_IAOQ0(%r1)
1859 STREG %r2,TASK_PT_IAOQ1(%r1)
1864 LDREG TASK_PT_IAOQ0(%r1),%r2
1865 depi PRIV_USER,31,2,%r2 /* ensure return to user mode. */
1866 STREG %r2,TASK_PT_IAOQ0(%r1)
1867 LDREG TASK_PT_IAOQ1(%r1),%r2
1868 depi PRIV_USER,31,2,%r2
1869 STREG %r2,TASK_PT_IAOQ1(%r1)
1874 load32 syscall_check_resched,%r2 /* if resched, we start over again */
1875 load32 schedule,%r19
1876 bv %r0(%r19) /* jumps to schedule() */
1878 ldo -16(%r30),%r29 /* Reference param save area */
1885 #ifdef CONFIG_FUNCTION_TRACER
1887 .import ftrace_function_trampoline,code
1888 .align L1_CACHE_BYTES
1889 ENTRY_CFI(mcount, caller)
1891 .export _mcount,data
1893 * The 64bit mcount() function pointer needs 4 dwords, of which the
1894 * first two are free. We optimize it here and put 2 instructions for
1895 * calling mcount(), and 2 instructions for ftrace_stub(). That way we
1896 * have all on one L1 cacheline.
1899 b ftrace_function_trampoline
1900 copy %r3, %arg2 /* caller original %sp */
1903 .type ftrace_stub, @function
1912 .dword 0 /* code in head.S puts value of global gp here */
1916 #ifdef CONFIG_DYNAMIC_FTRACE
1919 #define FTRACE_FRAME_SIZE (2*FRAME_SIZE)
1921 #define FTRACE_FRAME_SIZE FRAME_SIZE
1923 ENTRY_CFI(ftrace_caller, caller,frame=FTRACE_FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP)
1925 .global ftrace_caller
1927 STREG %r3, -FTRACE_FRAME_SIZE+1*REG_SZ(%sp)
1928 ldo -FTRACE_FRAME_SIZE(%sp), %r3
1929 STREG %rp, -RP_OFFSET(%r3)
1931 /* Offset 0 is already allocated for %r1 */
1932 STREG %r23, 2*REG_SZ(%r3)
1933 STREG %r24, 3*REG_SZ(%r3)
1934 STREG %r25, 4*REG_SZ(%r3)
1935 STREG %r26, 5*REG_SZ(%r3)
1936 STREG %r28, 6*REG_SZ(%r3)
1937 STREG %r29, 7*REG_SZ(%r3)
1939 STREG %r19, 8*REG_SZ(%r3)
1940 STREG %r20, 9*REG_SZ(%r3)
1941 STREG %r21, 10*REG_SZ(%r3)
1942 STREG %r22, 11*REG_SZ(%r3)
1943 STREG %r27, 12*REG_SZ(%r3)
1944 STREG %r31, 13*REG_SZ(%r3)
1951 ldi 0, %r23 /* no pt_regs */
1952 b,l ftrace_function_trampoline, %rp
1955 LDREG -RP_OFFSET(%r3), %rp
1956 LDREG 2*REG_SZ(%r3), %r23
1957 LDREG 3*REG_SZ(%r3), %r24
1958 LDREG 4*REG_SZ(%r3), %r25
1959 LDREG 5*REG_SZ(%r3), %r26
1960 LDREG 6*REG_SZ(%r3), %r28
1961 LDREG 7*REG_SZ(%r3), %r29
1963 LDREG 8*REG_SZ(%r3), %r19
1964 LDREG 9*REG_SZ(%r3), %r20
1965 LDREG 10*REG_SZ(%r3), %r21
1966 LDREG 11*REG_SZ(%r3), %r22
1967 LDREG 12*REG_SZ(%r3), %r27
1968 LDREG 13*REG_SZ(%r3), %r31
1970 LDREG 1*REG_SZ(%r3), %r3
1972 LDREGM -FTRACE_FRAME_SIZE(%sp), %r1
1973 /* Adjust return point to jump back to beginning of traced function */
1977 ENDPROC_CFI(ftrace_caller)
1979 #ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS
1980 ENTRY_CFI(ftrace_regs_caller,caller,frame=FTRACE_FRAME_SIZE+PT_SZ_ALGN,
1981 CALLS,SAVE_RP,SAVE_SP)
1983 .global ftrace_regs_caller
1985 ldo -FTRACE_FRAME_SIZE(%sp), %r1
1986 STREG %rp, -RP_OFFSET(%r1)
1989 ldo PT_SZ_ALGN(%sp), %sp
1991 STREG %rp, PT_GR2(%r1)
1992 STREG %r3, PT_GR3(%r1)
1993 STREG %r4, PT_GR4(%r1)
1994 STREG %r5, PT_GR5(%r1)
1995 STREG %r6, PT_GR6(%r1)
1996 STREG %r7, PT_GR7(%r1)
1997 STREG %r8, PT_GR8(%r1)
1998 STREG %r9, PT_GR9(%r1)
1999 STREG %r10, PT_GR10(%r1)
2000 STREG %r11, PT_GR11(%r1)
2001 STREG %r12, PT_GR12(%r1)
2002 STREG %r13, PT_GR13(%r1)
2003 STREG %r14, PT_GR14(%r1)
2004 STREG %r15, PT_GR15(%r1)
2005 STREG %r16, PT_GR16(%r1)
2006 STREG %r17, PT_GR17(%r1)
2007 STREG %r18, PT_GR18(%r1)
2008 STREG %r19, PT_GR19(%r1)
2009 STREG %r20, PT_GR20(%r1)
2010 STREG %r21, PT_GR21(%r1)
2011 STREG %r22, PT_GR22(%r1)
2012 STREG %r23, PT_GR23(%r1)
2013 STREG %r24, PT_GR24(%r1)
2014 STREG %r25, PT_GR25(%r1)
2015 STREG %r26, PT_GR26(%r1)
2016 STREG %r27, PT_GR27(%r1)
2017 STREG %r28, PT_GR28(%r1)
2018 STREG %r29, PT_GR29(%r1)
2019 STREG %r30, PT_GR30(%r1)
2020 STREG %r31, PT_GR31(%r1)
2022 STREG %r26, PT_SAR(%r1)
2025 LDREG -FTRACE_FRAME_SIZE-PT_SZ_ALGN(%sp), %r25
2027 ldo -FTRACE_FRAME_SIZE(%r1), %arg2
2028 b,l ftrace_function_trampoline, %rp
2029 copy %r1, %arg3 /* struct pt_regs */
2031 ldo -PT_SZ_ALGN(%sp), %r1
2033 LDREG PT_SAR(%r1), %rp
2036 LDREG PT_GR2(%r1), %rp
2037 LDREG PT_GR3(%r1), %r3
2038 LDREG PT_GR4(%r1), %r4
2039 LDREG PT_GR5(%r1), %r5
2040 LDREG PT_GR6(%r1), %r6
2041 LDREG PT_GR7(%r1), %r7
2042 LDREG PT_GR8(%r1), %r8
2043 LDREG PT_GR9(%r1), %r9
2044 LDREG PT_GR10(%r1),%r10
2045 LDREG PT_GR11(%r1),%r11
2046 LDREG PT_GR12(%r1),%r12
2047 LDREG PT_GR13(%r1),%r13
2048 LDREG PT_GR14(%r1),%r14
2049 LDREG PT_GR15(%r1),%r15
2050 LDREG PT_GR16(%r1),%r16
2051 LDREG PT_GR17(%r1),%r17
2052 LDREG PT_GR18(%r1),%r18
2053 LDREG PT_GR19(%r1),%r19
2054 LDREG PT_GR20(%r1),%r20
2055 LDREG PT_GR21(%r1),%r21
2056 LDREG PT_GR22(%r1),%r22
2057 LDREG PT_GR23(%r1),%r23
2058 LDREG PT_GR24(%r1),%r24
2059 LDREG PT_GR25(%r1),%r25
2060 LDREG PT_GR26(%r1),%r26
2061 LDREG PT_GR27(%r1),%r27
2062 LDREG PT_GR28(%r1),%r28
2063 LDREG PT_GR29(%r1),%r29
2064 LDREG PT_GR30(%r1),%r30
2065 LDREG PT_GR31(%r1),%r31
2067 ldo -PT_SZ_ALGN(%sp), %sp
2068 LDREGM -FTRACE_FRAME_SIZE(%sp), %r1
2069 /* Adjust return point to jump back to beginning of traced function */
2073 ENDPROC_CFI(ftrace_regs_caller)
2078 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2080 ENTRY_CFI(return_to_handler, caller,frame=FRAME_SIZE)
2081 .export parisc_return_to_handler,data
2082 parisc_return_to_handler:
2084 STREG %r0,-RP_OFFSET(%sp) /* store 0 as %rp */
2086 STREGM %r1,FRAME_SIZE(%sp)
2094 /* call ftrace_return_to_handler(0) */
2095 .import ftrace_return_to_handler,code
2096 load32 ftrace_return_to_handler,%ret0
2097 load32 .Lftrace_ret,%r2
2099 ldo -16(%sp),%ret1 /* Reference param save area */
2108 /* restore original return values */
2112 /* return from function */
2118 LDREGM -FRAME_SIZE(%sp),%r3
2119 ENDPROC_CFI(return_to_handler)
2121 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2123 #endif /* CONFIG_FUNCTION_TRACER */
2125 #ifdef CONFIG_IRQSTACKS
2126 /* void call_on_stack(unsigned long param1, void *func,
2127 unsigned long new_stack) */
2128 ENTRY_CFI(call_on_stack, FRAME=2*FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP)
2129 ENTRY(_call_on_stack)
2132 /* Regarding the HPPA calling conventions for function pointers,
2133 we assume the PIC register is not changed across call. For
2134 CONFIG_64BIT, the argument pointer is left to point at the
2135 argument region allocated for the call to call_on_stack. */
2137 /* Switch to new stack. We allocate two frames. */
2138 ldo 2*FRAME_SIZE(%arg2), %sp
2139 # ifdef CONFIG_64BIT
2140 /* Save previous stack pointer and return pointer in frame marker */
2141 STREG %rp, -FRAME_SIZE-RP_OFFSET(%sp)
2142 /* Calls always use function descriptor */
2143 LDREG 16(%arg1), %arg1
2145 STREG %r1, -FRAME_SIZE-REG_SZ(%sp)
2146 LDREG -FRAME_SIZE-RP_OFFSET(%sp), %rp
2148 LDREG -FRAME_SIZE-REG_SZ(%sp), %sp
2150 /* Save previous stack pointer and return pointer in frame marker */
2151 STREG %r1, -FRAME_SIZE-REG_SZ(%sp)
2152 STREG %rp, -FRAME_SIZE-RP_OFFSET(%sp)
2153 /* Calls use function descriptor if PLABEL bit is set */
2154 bb,>=,n %arg1, 30, 1f
2156 LDREG 0(%arg1), %arg1
2158 be,l 0(%sr4,%arg1), %sr0, %r31
2160 LDREG -FRAME_SIZE-RP_OFFSET(%sp), %rp
2162 LDREG -FRAME_SIZE-REG_SZ(%sp), %sp
2163 # endif /* CONFIG_64BIT */
2164 ENDPROC_CFI(call_on_stack)
2165 #endif /* CONFIG_IRQSTACKS */
2167 ENTRY_CFI(get_register)
2169 * get_register is used by the non access tlb miss handlers to
2170 * copy the value of the general register specified in r8 into
2171 * r1. This routine can't be used for shadowed registers, since
2172 * the rfir will restore the original value. So, for the shadowed
2173 * registers we put a -1 into r1 to indicate that the register
2174 * should not be used (the register being copied could also have
2175 * a -1 in it, but that is OK, it just means that we will have
2176 * to use the slow path instead).
2180 bv %r0(%r25) /* r0 */
2182 bv %r0(%r25) /* r1 - shadowed */
2184 bv %r0(%r25) /* r2 */
2186 bv %r0(%r25) /* r3 */
2188 bv %r0(%r25) /* r4 */
2190 bv %r0(%r25) /* r5 */
2192 bv %r0(%r25) /* r6 */
2194 bv %r0(%r25) /* r7 */
2196 bv %r0(%r25) /* r8 - shadowed */
2198 bv %r0(%r25) /* r9 - shadowed */
2200 bv %r0(%r25) /* r10 */
2202 bv %r0(%r25) /* r11 */
2204 bv %r0(%r25) /* r12 */
2206 bv %r0(%r25) /* r13 */
2208 bv %r0(%r25) /* r14 */
2210 bv %r0(%r25) /* r15 */
2212 bv %r0(%r25) /* r16 - shadowed */
2214 bv %r0(%r25) /* r17 - shadowed */
2216 bv %r0(%r25) /* r18 */
2218 bv %r0(%r25) /* r19 */
2220 bv %r0(%r25) /* r20 */
2222 bv %r0(%r25) /* r21 */
2224 bv %r0(%r25) /* r22 */
2226 bv %r0(%r25) /* r23 */
2228 bv %r0(%r25) /* r24 - shadowed */
2230 bv %r0(%r25) /* r25 - shadowed */
2232 bv %r0(%r25) /* r26 */
2234 bv %r0(%r25) /* r27 */
2236 bv %r0(%r25) /* r28 */
2238 bv %r0(%r25) /* r29 */
2240 bv %r0(%r25) /* r30 */
2242 bv %r0(%r25) /* r31 */
2244 ENDPROC_CFI(get_register)
2247 ENTRY_CFI(set_register)
2249 * set_register is used by the non access tlb miss handlers to
2250 * copy the value of r1 into the general register specified in
2255 bv %r0(%r25) /* r0 (silly, but it is a place holder) */
2257 bv %r0(%r25) /* r1 */
2259 bv %r0(%r25) /* r2 */
2261 bv %r0(%r25) /* r3 */
2263 bv %r0(%r25) /* r4 */
2265 bv %r0(%r25) /* r5 */
2267 bv %r0(%r25) /* r6 */
2269 bv %r0(%r25) /* r7 */
2271 bv %r0(%r25) /* r8 */
2273 bv %r0(%r25) /* r9 */
2275 bv %r0(%r25) /* r10 */
2277 bv %r0(%r25) /* r11 */
2279 bv %r0(%r25) /* r12 */
2281 bv %r0(%r25) /* r13 */
2283 bv %r0(%r25) /* r14 */
2285 bv %r0(%r25) /* r15 */
2287 bv %r0(%r25) /* r16 */
2289 bv %r0(%r25) /* r17 */
2291 bv %r0(%r25) /* r18 */
2293 bv %r0(%r25) /* r19 */
2295 bv %r0(%r25) /* r20 */
2297 bv %r0(%r25) /* r21 */
2299 bv %r0(%r25) /* r22 */
2301 bv %r0(%r25) /* r23 */
2303 bv %r0(%r25) /* r24 */
2305 bv %r0(%r25) /* r25 */
2307 bv %r0(%r25) /* r26 */
2309 bv %r0(%r25) /* r27 */
2311 bv %r0(%r25) /* r28 */
2313 bv %r0(%r25) /* r29 */
2315 bv %r0(%r25) /* r30 */
2317 bv %r0(%r25) /* r31 */
2319 ENDPROC_CFI(set_register)