2 * arch/powerpc/kernel/misc64.S
4 * This file contains miscellaneous low-level functions.
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
9 * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
10 * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
19 #include <linux/config.h>
20 #include <linux/sys.h>
21 #include <asm/unistd.h>
22 #include <asm/errno.h>
23 #include <asm/processor.h>
25 #include <asm/cache.h>
26 #include <asm/ppc_asm.h>
27 #include <asm/asm-offsets.h>
28 #include <asm/cputable.h>
33 * Returns (address we are running at) - (address we were linked at)
34 * for use before the text and data are mapped to KERNELBASE.
47 * add_reloc_offset(x) returns x + reloc_offset().
49 _GLOBAL(add_reloc_offset)
79 #ifdef CONFIG_IRQSTACKS
80 _GLOBAL(call_do_softirq)
83 stdu r1,THREAD_SIZE-112(r3)
91 _GLOBAL(call_handle_IRQ_event)
94 stdu r1,THREAD_SIZE-112(r6)
101 #endif /* CONFIG_IRQSTACKS */
104 * To be called by C code which needs to do some operations with MMU
105 * disabled. Note that interrupts have to be disabled by the caller
106 * prior to calling us. The code called _MUST_ be in the RMO of course
107 * and part of the linear mapping as we don't attempt to translate the
108 * stack pointer at all. The function is called with the stack switched
109 * to this CPU emergency stack
111 * prototype is void *call_with_mmu_off(void *func, void *data);
113 * the called function is expected to be of the form
115 * void *called(void *data);
117 _GLOBAL(call_with_mmu_off)
118 mflr r0 /* get link, save it on stackframe */
120 mr r1,r5 /* save old stack ptr */
121 ld r1,PACAEMERGSP(r13) /* get emerg. stack */
122 subi r1,r1,STACK_FRAME_OVERHEAD
123 std r0,16(r1) /* save link on emerg. stack */
124 std r5,0(r1) /* save old stack ptr in backchain */
125 ld r3,0(r3) /* get to real function ptr (assume same TOC) */
126 bl 2f /* we need LR to return, continue at label 2 */
128 ld r0,16(r1) /* we return here from the call, get LR and */
129 ld r1,0(r1) /* .. old stack ptr */
130 mtspr SPRN_SRR0,r0 /* and get back to virtual mode with these */
132 ori r4,r4,MSR_IR|MSR_DR
136 2: mtspr SPRN_SRR0,r3 /* coming from above, enter real mode */
137 mr r3,r4 /* get parameter */
139 ori r0,r0,MSR_IR|MSR_DR
140 xori r0,r0,MSR_IR|MSR_DR
147 .tc ppc64_caches[TC],ppc64_caches
151 * Write any modified data cache blocks out to memory
152 * and invalidate the corresponding instruction cache blocks.
154 * flush_icache_range(unsigned long start, unsigned long stop)
156 * flush all bytes from start through stop-1 inclusive
159 _KPROBE(__flush_icache_range)
162 * Flush the data cache to memory
164 * Different systems have different cache line sizes
165 * and in some cases i-cache and d-cache line sizes differ from
168 ld r10,PPC64_CACHES@toc(r2)
169 lwz r7,DCACHEL1LINESIZE(r10)/* Get cache line size */
171 andc r6,r3,r5 /* round low to line bdy */
172 subf r8,r6,r4 /* compute length */
173 add r8,r8,r5 /* ensure we get enough */
174 lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of cache line size */
175 srw. r8,r8,r9 /* compute line count */
176 beqlr /* nothing to do? */
183 /* Now invalidate the instruction cache */
185 lwz r7,ICACHEL1LINESIZE(r10) /* Get Icache line size */
187 andc r6,r3,r5 /* round low to line bdy */
188 subf r8,r6,r4 /* compute length */
190 lwz r9,ICACHEL1LOGLINESIZE(r10) /* Get log-2 of Icache line size */
191 srw. r8,r8,r9 /* compute line count */
192 beqlr /* nothing to do? */
201 * Like above, but only do the D-cache.
203 * flush_dcache_range(unsigned long start, unsigned long stop)
205 * flush all bytes from start to stop-1 inclusive
207 _GLOBAL(flush_dcache_range)
210 * Flush the data cache to memory
212 * Different systems have different cache line sizes
214 ld r10,PPC64_CACHES@toc(r2)
215 lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
217 andc r6,r3,r5 /* round low to line bdy */
218 subf r8,r6,r4 /* compute length */
219 add r8,r8,r5 /* ensure we get enough */
220 lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of dcache line size */
221 srw. r8,r8,r9 /* compute line count */
222 beqlr /* nothing to do? */
231 * Like above, but works on non-mapped physical addresses.
232 * Use only for non-LPAR setups ! It also assumes real mode
233 * is cacheable. Used for flushing out the DART before using
234 * it as uncacheable memory
236 * flush_dcache_phys_range(unsigned long start, unsigned long stop)
238 * flush all bytes from start to stop-1 inclusive
240 _GLOBAL(flush_dcache_phys_range)
241 ld r10,PPC64_CACHES@toc(r2)
242 lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
244 andc r6,r3,r5 /* round low to line bdy */
245 subf r8,r6,r4 /* compute length */
246 add r8,r8,r5 /* ensure we get enough */
247 lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of dcache line size */
248 srw. r8,r8,r9 /* compute line count */
249 beqlr /* nothing to do? */
250 mfmsr r5 /* Disable MMU Data Relocation */
263 mtmsr r5 /* Re-enable MMU Data Relocation */
268 _GLOBAL(flush_inval_dcache_range)
269 ld r10,PPC64_CACHES@toc(r2)
270 lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
272 andc r6,r3,r5 /* round low to line bdy */
273 subf r8,r6,r4 /* compute length */
274 add r8,r8,r5 /* ensure we get enough */
275 lwz r9,DCACHEL1LOGLINESIZE(r10)/* Get log-2 of dcache line size */
276 srw. r8,r8,r9 /* compute line count */
277 beqlr /* nothing to do? */
290 * Flush a particular page from the data cache to RAM.
291 * Note: this is necessary because the instruction cache does *not*
292 * snoop from the data cache.
294 * void __flush_dcache_icache(void *page)
296 _GLOBAL(__flush_dcache_icache)
298 * Flush the data cache to memory
300 * Different systems have different cache line sizes
303 /* Flush the dcache */
304 ld r7,PPC64_CACHES@toc(r2)
305 clrrdi r3,r3,PAGE_SHIFT /* Page align */
306 lwz r4,DCACHEL1LINESPERPAGE(r7) /* Get # dcache lines per page */
307 lwz r5,DCACHEL1LINESIZE(r7) /* Get dcache line size */
315 /* Now invalidate the icache */
317 lwz r4,ICACHEL1LINESPERPAGE(r7) /* Get # icache lines per page */
318 lwz r5,ICACHEL1LINESIZE(r7) /* Get icache line size */
327 * I/O string operations
329 * insb(port, buf, len)
330 * outsb(port, buf, len)
331 * insw(port, buf, len)
332 * outsw(port, buf, len)
333 * insl(port, buf, len)
334 * outsl(port, buf, len)
335 * insw_ns(port, buf, len)
336 * outsw_ns(port, buf, len)
337 * insl_ns(port, buf, len)
338 * outsl_ns(port, buf, len)
340 * The *_ns versions don't do byte-swapping.
414 /* _GLOBAL(ide_insw) now in drivers/ide/ide-iops.c */
428 /* _GLOBAL(ide_outsw) now in drivers/ide/ide-iops.c */
466 lfd 0,0(r5) /* load up fpscr value */
470 mffs 0 /* save new fpscr value */
475 lfd 0,0(r5) /* load up fpscr value */
479 mffs 0 /* save new fpscr value */
484 * identify_cpu and calls setup_cpu
485 * In: r3 = base of the cpu_specs array
486 * r4 = address of cur_cpu_spec
487 * r5 = relocation offset
489 _GLOBAL(identify_cpu)
492 lwz r8,CPU_SPEC_PVR_MASK(r3)
494 lwz r9,CPU_SPEC_PVR_VALUE(r3)
497 addi r3,r3,CPU_SPEC_ENTRY_SIZE
502 ld r4,CPU_SPEC_SETUP(r3)
507 /* Calling convention for cpu setup is r3=offset, r4=cur_cpu_spec */
513 * do_cpu_ftr_fixups - goes through the list of CPU feature fixups
514 * and writes nop's over sections of code that don't apply for this cpu.
515 * r3 = data offset (not changed)
517 _GLOBAL(do_cpu_ftr_fixups)
518 /* Get CPU 0 features */
519 LOADADDR(r6,cur_cpu_spec)
523 ld r4,CPU_SPEC_FEATURES(r4)
524 /* Get the fixup table */
525 LOADADDR(r6,__start___ftr_fixup)
527 LOADADDR(r7,__stop___ftr_fixup)
533 ld r8,-32(r6) /* mask */
535 ld r9,-24(r6) /* value */
538 ld r8,-16(r6) /* section begin */
539 ld r9,-8(r6) /* section end */
542 /* write nops over the section of code */
543 /* todo: if large section, add a branch at the start of it */
547 lis r0,0x60000000@h /* nop */
549 andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l
551 dcbst 0,r8 /* suboptimal, but simpler */
556 sync /* additional sync needed on g4 */
560 #if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
562 * Do an IO access in real mode
593 * Do an IO access in real mode
622 #endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */
625 * Create a kernel thread
626 * kernel_thread(fn, arg, flags)
628 _GLOBAL(kernel_thread)
631 stdu r1,-STACK_FRAME_OVERHEAD(r1)
634 ori r3,r5,CLONE_VM /* flags */
635 oris r3,r3,(CLONE_UNTRACED>>16)
636 li r4,0 /* new sp (unused) */
639 cmpdi 0,r3,0 /* parent or child? */
640 bne 1f /* return if parent */
642 stdu r0,-STACK_FRAME_OVERHEAD(r1)
645 mtlr r29 /* fn addr in lr */
646 mr r3,r30 /* load arg and call fn */
648 li r0,__NR_exit /* exit after child exits */
651 1: addi r1,r1,STACK_FRAME_OVERHEAD
657 * disable_kernel_fp()
660 _GLOBAL(disable_kernel_fp)
662 rldicl r0,r3,(63-MSR_FP_LG),1
663 rldicl r3,r0,(MSR_FP_LG+1),0
664 mtmsrd r3 /* disable use of fpu now */
668 #ifdef CONFIG_ALTIVEC
670 #if 0 /* this has no callers for now */
672 * disable_kernel_altivec()
675 _GLOBAL(disable_kernel_altivec)
677 rldicl r0,r3,(63-MSR_VEC_LG),1
678 rldicl r3,r0,(MSR_VEC_LG+1),0
679 mtmsrd r3 /* disable use of VMX now */
685 * giveup_altivec(tsk)
686 * Disable VMX for the task given as the argument,
687 * and save the vector registers in its thread_struct.
688 * Enables the VMX for use in the kernel on return.
690 _GLOBAL(giveup_altivec)
693 mtmsrd r5 /* enable use of VMX now */
696 beqlr- /* if no previous owner, done */
697 addi r3,r3,THREAD /* want THREAD of task */
705 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
707 andc r4,r4,r3 /* disable FP for previous task */
708 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
712 ld r4,last_task_used_altivec@got(r2)
714 #endif /* CONFIG_SMP */
717 #endif /* CONFIG_ALTIVEC */
719 _GLOBAL(__setup_cpu_power3)
729 /* kexec_wait(phys_cpu)
731 * wait for the flag to change, indicating this kernel is going away but
732 * the slave code for the next one is at addresses 0 to 100.
734 * This is used by all slaves.
736 * Physical (hardware) cpu id should be in r3.
741 addi r5,r5,kexec_flag-1b
744 #ifdef CONFIG_KEXEC /* use no memory without kexec */
751 /* this can be in text because we won't change it until we are
752 * running in real anyways
760 /* kexec_smp_wait(void)
762 * call with interrupts off
763 * note: this is a terminal routine, it does not save lr
765 * get phys id from paca
766 * set paca id to -1 to say we got here
767 * switch to real mode
768 * join other cpus in kexec_wait(phys_id)
770 _GLOBAL(kexec_smp_wait)
771 lhz r3,PACAHWCPUID(r13)
773 sth r4,PACAHWCPUID(r13) /* let others know we left */
778 * switch to real mode (turn mmu off)
779 * we use the early kernel trick that the hardware ignores bits
780 * 0 and 1 (big endian) of the effective address in real mode
782 * don't overwrite r3 here, it is live for kexec_wait above.
784 real_mode: /* assume normal blr return */
787 mflr r11 /* return address to SRR0 */
799 * kexec_sequence(newstack, start, image, control, clear_all())
801 * does the grungy work with stack switching and real mode switches
802 * also does simple calls to other code
805 _GLOBAL(kexec_sequence)
809 /* switch stacks to newstack -- &kexec_stack.stack */
810 stdu r1,THREAD_SIZE-112(r3)
816 /* save regs for local vars on new stack.
817 * yes, we won't go back, but ...
829 /* save args into preserved regs */
830 mr r31,r3 /* newstack (both) */
831 mr r30,r4 /* start (real) */
832 mr r29,r5 /* image (virt) */
833 mr r28,r6 /* control, unused */
834 mr r27,r7 /* clear_all() fn desc */
835 mr r26,r8 /* spare */
836 lhz r25,PACAHWCPUID(r13) /* get our phys cpu from paca */
838 /* disable interrupts, we are overwriting kernel data next */
843 /* copy dest pages, flush whole dest image */
845 bl .kexec_copy_flush /* (image) */
850 /* clear out hardware hash page table and tlb */
851 ld r5,0(r27) /* deref function descriptor */
853 bctrl /* ppc_md.hash_clear_all(void); */
856 * kexec image calling is:
857 * the first 0x100 bytes of the entry point are copied to 0
859 * all slaves branch to slave = 0x60 (absolute)
860 * slave(phys_cpu_id);
862 * master goes to start = entry point
863 * start(phys_cpu_id, start, 0);
866 * a wrapper is needed to call existing kernels, here is an approximate
867 * description of one method:
870 * start will be near the boot_block (maybe 0x100 bytes before it?)
871 * it will have a 0x60, which will b to boot_block, where it will wait
872 * and 0 will store phys into struct boot-block and load r3 from there,
873 * copy kernel 0-0x100 and tell slaves to back down to 0x60 again
876 * boot block will have all cpus scanning device tree to see if they
877 * are the boot cpu ?????
878 * other device tree differences (prop sizes, va vs pa, etc)...
881 /* copy 0x100 bytes starting at start to 0 */
886 bl .copy_and_flush /* (dest, src, copy limit, start offset) */
887 1: /* assume normal blr return */
889 /* release other cpus to the new kernel secondary start at 0x60 */
892 stw r6,kexec_flag-1b(5)
893 mr r3,r25 # my phys cpu
894 mr r4,r30 # start, aka phys mem offset
897 blr /* image->start(physid, image->start, 0); */
898 #endif /* CONFIG_KEXEC */