* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1999 by Helge Deller
+ * Copyright (C) 1999-2007 by Helge Deller <deller@gmx.de>
* Copyright 1999 SuSE GmbH (Philipp Rumpf)
* Copyright 1999 Philipp Rumpf (prumpf@tux.org)
* Copyright 2000 Hewlett Packard (Paul Bame, bame@puffin.external.hp.com)
#include <asm/assembly.h>
#include <asm/pgtable.h>
+#include <linux/linkage.h>
+
.level LEVEL
.data
-
- .export boot_args
-boot_args:
+ENTRY(boot_args)
.word 0 /* arg0 */
.word 0 /* arg1 */
.word 0 /* arg2 */
.word 0 /* arg3 */
+END(boot_args)
.text
.align 4
.import fault_vector_11,code /* IVA parisc 1.1 32 bit */
.import $global$ /* forward declaration */
#endif /*!CONFIG_64BIT*/
- .export stext
.export _stext,data /* Kernel want it this way! */
_stext:
-stext:
+ENTRY(stext)
.proc
.callinfo
.procend
#endif /* CONFIG_SMP */
+
+ENDPROC(stext)
+
#ifndef CONFIG_64BIT
.data
#include <asm/assembly.h>
#include <asm/pdc.h>
+#include <linux/linkage.h>
+
/*
* stack for os_hpmc, the HPMC handler.
* buffer for IODC procedures (for the HPMC handler).
#define HPMC_PIM_DATA_SIZE 896 /* Enough to hold all architected 2.0 state */
- .export hpmc_pim_data, data
.align 8
-hpmc_pim_data:
+ENTRY(hpmc_pim_data)
.block HPMC_PIM_DATA_SIZE
+END(hpmc_pim_data)
.text
- .export os_hpmc, code
.import intr_save, code
-
-os_hpmc:
+ENTRY(os_hpmc)
/*
* registers modified:
b .
nop
+ENDPROC(os_hpmc)
/* this label used to compute os_hpmc checksum */
-
- .export os_hpmc_end, code
-
-os_hpmc_end:
+ENTRY(os_hpmc_end)
nop
#include <asm/assembly.h>
#include <asm/pgtable.h>
#include <asm/cache.h>
+#include <linux/linkage.h>
.text
.align 128
- .export flush_tlb_all_local,code
-
-flush_tlb_all_local:
+ENTRY(flush_tlb_all_local)
.proc
.callinfo NO_CALLS
.entry
.exit
.procend
+ENDPROC(flush_tlb_all_local)
- .export flush_instruction_cache_local,code
.import cache_info,data
-flush_instruction_cache_local:
+ENTRY(flush_instruction_cache_local)
.proc
.callinfo NO_CALLS
.entry
.exit
.procend
+ENDPROC(flush_instruction_cache_local)
- .export flush_data_cache_local, code
- .import cache_info, data
-flush_data_cache_local:
+ .import cache_info, data
+ENTRY(flush_data_cache_local)
.proc
.callinfo NO_CALLS
.entry
.exit
.procend
+ENDPROC(flush_data_cache_local)
- .export copy_user_page_asm,code
.align 16
-copy_user_page_asm:
+ENTRY(copy_user_page_asm)
.proc
.callinfo NO_CALLS
.entry
.exit
.procend
+ENDPROC(copy_user_page_asm)
/*
* NOTE: Code in clear_user_page has a hard coded dependency on the
* lobby for such a change.
*/
- .export copy_user_page_asm,code
-
-copy_user_page_asm:
+ENTRY(copy_user_page_asm)
.proc
.callinfo NO_CALLS
.entry
.exit
.procend
+ENDPROC(copy_user_page_asm)
#endif
- .export __clear_user_page_asm,code
-
-__clear_user_page_asm:
+ENTRY(__clear_user_page_asm)
.proc
.callinfo NO_CALLS
.entry
.exit
.procend
+ENDPROC(__clear_user_page_asm)
- .export flush_kernel_dcache_page_asm
-
-flush_kernel_dcache_page_asm:
+ENTRY(flush_kernel_dcache_page_asm)
.proc
.callinfo NO_CALLS
.entry
.exit
.procend
+ENDPROC(flush_kernel_dcache_page_asm)
- .export flush_user_dcache_page
-
-flush_user_dcache_page:
+ENTRY(flush_user_dcache_page)
.proc
.callinfo NO_CALLS
.entry
.exit
.procend
+ENDPROC(flush_user_dcache_page)
- .export flush_user_icache_page
-
-flush_user_icache_page:
+ENTRY(flush_user_icache_page)
.proc
.callinfo NO_CALLS
.entry
.exit
.procend
+ENDPROC(flush_user_icache_page)
- .export purge_kernel_dcache_page
-
-purge_kernel_dcache_page:
+ENTRY(purge_kernel_dcache_page)
.proc
.callinfo NO_CALLS
.entry
.exit
.procend
+ENDPROC(purge_kernel_dcache_page)
#if 0
/* Currently not used, but it still is a possible alternate
* solution.
*/
- .export flush_alias_page
-
-flush_alias_page:
+ENTRY(flush_alias_page)
.proc
.callinfo NO_CALLS
.entry
.exit
.procend
+ENDPROC(flush_alias_page)
- .export flush_kernel_dcache_range_asm
-
-flush_kernel_dcache_range_asm:
+ENTRY(flush_kernel_dcache_range_asm)
.proc
.callinfo NO_CALLS
.entry
.exit
.procend
+ENDPROC(flush_kernel_dcache_range_asm)
- .export flush_user_icache_range_asm
-
-flush_user_icache_range_asm:
+ENTRY(flush_user_icache_range_asm)
.proc
.callinfo NO_CALLS
.entry
.exit
.procend
+ENDPROC(flush_user_icache_range_asm)
- .export flush_kernel_icache_page
-
-flush_kernel_icache_page:
+ENTRY(flush_kernel_icache_page)
.proc
.callinfo NO_CALLS
.entry
.exit
.procend
+ENDPROC(flush_kernel_icache_page)
- .export flush_kernel_icache_range_asm
-
-flush_kernel_icache_range_asm:
+ENTRY(flush_kernel_icache_range_asm)
.proc
.callinfo NO_CALLS
.entry
nop
.exit
.procend
+ENDPROC(flush_kernel_icache_range_asm)
/* align should cover use of rfi in disable_sr_hashing_asm and
* srdis_done.
*/
.align 256
- .export disable_sr_hashing_asm,code
-
-disable_sr_hashing_asm:
+ENTRY(disable_sr_hashing_asm)
.proc
.callinfo NO_CALLS
.entry
.exit
.procend
+ENDPROC(disable_sr_hashing_asm)
.end
*/
#include <asm/assembly.h>
+#include <linux/linkage.h>
#ifdef CONFIG_64BIT
.level 2.0w
; starting/stopping the coprocessor with the pmenb/pmdis.
;
.text
- .align 32
- .export perf_intrigue_enable_perf_counters,code
-perf_intrigue_enable_perf_counters:
+ENTRY(perf_intrigue_enable_perf_counters)
.proc
.callinfo frame=0,NO_CALLS
.entry
nop
.exit
.procend
+ENDPROC(perf_intrigue_enable_perf_counters)
- .export perf_intrigue_disable_perf_counters,code
-perf_intrigue_disable_perf_counters:
+ENTRY(perf_intrigue_disable_perf_counters)
.proc
.callinfo frame=0,NO_CALLS
.entry
mtctl %r26,ccr ; turn off performance coprocessor
.exit
.procend
+ENDPROC(perf_intrigue_disable_perf_counters)
;***********************************************************************
;*
;*
;***********************************************************************
- .export perf_rdr_shift_in_W,code
-perf_rdr_shift_in_W:
+ENTRY(perf_rdr_shift_in_W)
.proc
.callinfo frame=0,NO_CALLS
.entry
.exit
MTDIAG_2 (24) ; restore DR2
.procend
+ENDPROC(perf_rdr_shift_in_W)
;***********************************************************************
;*
;***********************************************************************
- .export perf_rdr_shift_out_W,code
-perf_rdr_shift_out_W:
+ENTRY(perf_rdr_shift_out_W)
.proc
.callinfo frame=0,NO_CALLS
.entry
.exit
MTDIAG_2 (23) ; restore DR2
.procend
+ENDPROC(perf_rdr_shift_out_W)
;***********************************************************************
;*
;***********************************************************************
- .export perf_rdr_shift_in_U,code
-perf_rdr_shift_in_U:
+ENTRY(perf_rdr_shift_in_U)
.proc
.callinfo frame=0,NO_CALLS
.entry
.exit
MTDIAG_2 (24) ; restore DR2
.procend
+ENDPROC(perf_rdr_shift_in_U)
;***********************************************************************
;*
;*
;***********************************************************************
- .export perf_rdr_shift_out_U,code
-perf_rdr_shift_out_U:
+ENTRY(perf_rdr_shift_out_U)
.proc
.callinfo frame=0,NO_CALLS
.entry
.exit
MTDIAG_2 (23) ; restore DR2
.procend
+ENDPROC(perf_rdr_shift_out_U)
#include <asm/psw.h>
#include <asm/assembly.h>
+#include <linux/linkage.h>
+
.section .bss
.export real_stack
.export real32_stack
.text
- .export real32_call_asm
-
/* unsigned long real32_call_asm(unsigned int *sp,
* unsigned int *arg0p,
* unsigned int iodc_fn)
* iodc_fn is the IODC function to call
*/
-real32_call_asm:
+ENTRY(real32_call_asm)
STREG %rp, -RP_OFFSET(%sp) /* save RP */
#ifdef CONFIG_64BIT
callee_save
LDREG -RP_OFFSET(%sp), %rp /* restore RP */
bv 0(%rp)
nop
+ENDPROC(real32_call_asm)
# define PUSH_CR(r, where) mfctl r, %r1 ! STREG,ma %r1, REG_SZ(where)
/************************ 64-bit real-mode calls ***********************/
/* This is only usable in wide kernels right now and will probably stay so */
.text
- .export real64_call_asm
/* unsigned long real64_call_asm(unsigned long *sp,
* unsigned long *arg0p,
* unsigned long fn)
* arg0p points to where saved arg values may be found
* iodc_fn is the IODC function to call
*/
-real64_call_asm:
+ENTRY(real64_call_asm)
std %rp, -0x10(%sp) /* save RP */
std %sp, -8(%arg0) /* save SP on real-mode stack */
copy %arg0, %sp /* adopt the real-mode SP */
ldd -0x10(%sp), %rp /* restore RP */
bv 0(%rp)
nop
+ENDPROC(real64_call_asm)
#endif
- .export __canonicalize_funcptr_for_compare
.text
/* http://lists.parisc-linux.org/hypermail/parisc-linux/10916.html
** GCC 3.3 and later has a new function in libgcc.a for
** comparing function pointers.
*/
-__canonicalize_funcptr_for_compare:
+ENTRY(__canonicalize_funcptr_for_compare)
#ifdef CONFIG_64BIT
bve (%r2)
#else
bv %r0(%r2)
#endif
copy %r26,%r28
+ENDPROC(__canonicalize_funcptr_for_compare)
+
#include <asm/errno.h>
#include <asm/psw.h>
#include <asm/thread_info.h>
-
#include <asm/assembly.h>
#include <asm/processor.h>
+#include <linux/linkage.h>
+
/* We fill the empty parts of the gateway page with
* something that will kill the kernel or a
* userspace application.
.level 1.1
#endif
+/* on 64bit pad to 64bit values */
+#ifdef CONFIG_64BIT
+#define ULONG_WORD(x) .word 0, x
+#else
+#define ULONG_WORD(x) .word x
+#endif
+
+
.text
.import syscall_exit,code
.import syscall_exit_rfi,code
- .export linux_gateway_page
/* Linux gateway page is aliased to virtual page 0 in the kernel
* address space. Since it is a gateway page it cannot be
*/
.align ASM_PAGE_SIZE
-linux_gateway_page:
+ENTRY(linux_gateway_page)
/* ADDRESS 0x00 to 0xb0 = 176 bytes / 4 bytes per insn = 44 insns */
.rept 44
the other for the store. Either return -EFAULT.
Each of the entries must be relocated. */
.section __ex_table,"aw"
-#ifdef CONFIG_64BIT
- /* Pad the address calculation */
- .word 0,(2b - linux_gateway_page)
- .word 0,(3b - linux_gateway_page)
-#else
- .word (2b - linux_gateway_page)
- .word (3b - linux_gateway_page)
-#endif
+ ULONG_WORD(2b - linux_gateway_page)
+ ULONG_WORD(3b - linux_gateway_page)
.previous
.section __ex_table,"aw"
-#ifdef CONFIG_64BIT
- /* Pad the address calculation */
- .word 0,(1b - linux_gateway_page)
- .word 0,(3b - linux_gateway_page)
-#else
- .word (1b - linux_gateway_page)
- .word (3b - linux_gateway_page)
-#endif
+ ULONG_WORD(1b - linux_gateway_page)
+ ULONG_WORD(3b - linux_gateway_page)
.previous
end_compare_and_swap:
/* Make sure nothing else is placed on this page */
.align ASM_PAGE_SIZE
- .export end_linux_gateway_page
-end_linux_gateway_page:
+END(linux_gateway_page)
+ENTRY(end_linux_gateway_page)
/* Relocate symbols assuming linux_gateway_page is mapped
to virtual address 0x0 */
-#ifdef CONFIG_64BIT
- /* FIXME: The code will always be on the gateay page
- and thus it will be on the first 4k, the
- assembler seems to think that the final
- subtraction result is only a word in
- length, so we pad the value.
- */
-#define LWS_ENTRY(_name_) .word 0,(lws_##_name_ - linux_gateway_page)
-#else
-#define LWS_ENTRY(_name_) .word (lws_##_name_ - linux_gateway_page)
-#endif
+
+#define LWS_ENTRY(_name_) ULONG_WORD(lws_##_name_ - linux_gateway_page)
.section .rodata,"a"
.align ASM_PAGE_SIZE
/* Light-weight-syscall table */
/* Start of lws table. */
- .export lws_table
-.Llws_table:
-lws_table:
+ENTRY(lws_table)
LWS_ENTRY(compare_and_swap32) /* 0 - ELF32 Atomic compare and swap */
LWS_ENTRY(compare_and_swap64) /* 1 - ELF64 Atomic compare and swap */
+END(lws_table)
/* End of lws table */
.align ASM_PAGE_SIZE
- .export sys_call_table
-.Lsys_call_table:
-sys_call_table:
+ENTRY(sys_call_table)
#include "syscall_table.S"
+END(sys_call_table)
#ifdef CONFIG_64BIT
.align ASM_PAGE_SIZE
- .export sys_call_table64
-.Lsys_call_table64:
-sys_call_table64:
+ENTRY(sys_call_table64)
#define SYSCALL_TABLE_64BIT
#include "syscall_table.S"
+END(sys_call_table64)
#endif
#ifdef CONFIG_SMP
*/
.section .data
.align 4096
- .export lws_lock_start
-.Llws_lock_start:
-lws_lock_start:
+ENTRY(lws_lock_start)
/* lws locks */
.align 16
.rept 16
.word 0
.word 0
.endr
+END(lws_lock_start)
.previous
#endif
/* CONFIG_SMP for lws_lock_start */