Use those, instead of doing it all by hand.
Signed-off-by: David S. Miller <davem@davemloft.net>
* Copyright (C) 1999 David S. Miller (davem@redhat.com)
*/
+#include <linux/linkage.h>
+
.text
- .align 4
- .globl __ashldi3
-__ashldi3:
+ENTRY(__ashldi3)
cmp %o2, 0
be 9f
mov 0x20, %g2
9:
retl
nop
+ENDPROC(__ashldi3)
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*/
+#include <linux/linkage.h>
+
.text
- .align 4
- .globl __ashrdi3
-__ashrdi3:
+ENTRY(__ashrdi3)
tst %o2
be 3f
or %g0, 32, %g2
3:
jmpl %o7 + 8, %g0
nop
+ENDPROC(__ashrdi3)
* Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net)
*/
+#include <linux/linkage.h>
#include <asm/asi.h>
#include <asm/backoff.h>
* memory barriers, and a second which returns
* a value and does the barriers.
*/
- .globl atomic_add
- .type atomic_add,#function
-atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
+ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
BACKOFF_SETUP(%o2)
1: lduw [%o1], %g1
add %g1, %o0, %g7
retl
nop
2: BACKOFF_SPIN(%o2, %o3, 1b)
- .size atomic_add, .-atomic_add
+ENDPROC(atomic_add)
- .globl atomic_sub
- .type atomic_sub,#function
-atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
+ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
BACKOFF_SETUP(%o2)
1: lduw [%o1], %g1
sub %g1, %o0, %g7
retl
nop
2: BACKOFF_SPIN(%o2, %o3, 1b)
- .size atomic_sub, .-atomic_sub
+ENDPROC(atomic_sub)
- .globl atomic_add_ret
- .type atomic_add_ret,#function
-atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
+ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
BACKOFF_SETUP(%o2)
1: lduw [%o1], %g1
add %g1, %o0, %g7
retl
sra %g1, 0, %o0
2: BACKOFF_SPIN(%o2, %o3, 1b)
- .size atomic_add_ret, .-atomic_add_ret
+ENDPROC(atomic_add_ret)
- .globl atomic_sub_ret
- .type atomic_sub_ret,#function
-atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
+ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
BACKOFF_SETUP(%o2)
1: lduw [%o1], %g1
sub %g1, %o0, %g7
retl
sra %g1, 0, %o0
2: BACKOFF_SPIN(%o2, %o3, 1b)
- .size atomic_sub_ret, .-atomic_sub_ret
+ENDPROC(atomic_sub_ret)
- .globl atomic64_add
- .type atomic64_add,#function
-atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
+ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
BACKOFF_SETUP(%o2)
1: ldx [%o1], %g1
add %g1, %o0, %g7
retl
nop
2: BACKOFF_SPIN(%o2, %o3, 1b)
- .size atomic64_add, .-atomic64_add
+ENDPROC(atomic64_add)
- .globl atomic64_sub
- .type atomic64_sub,#function
-atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
+ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
BACKOFF_SETUP(%o2)
1: ldx [%o1], %g1
sub %g1, %o0, %g7
retl
nop
2: BACKOFF_SPIN(%o2, %o3, 1b)
- .size atomic64_sub, .-atomic64_sub
+ENDPROC(atomic64_sub)
- .globl atomic64_add_ret
- .type atomic64_add_ret,#function
-atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
+ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
BACKOFF_SETUP(%o2)
1: ldx [%o1], %g1
add %g1, %o0, %g7
retl
add %g1, %o0, %o0
2: BACKOFF_SPIN(%o2, %o3, 1b)
- .size atomic64_add_ret, .-atomic64_add_ret
+ENDPROC(atomic64_add_ret)
- .globl atomic64_sub_ret
- .type atomic64_sub_ret,#function
-atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
+ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
BACKOFF_SETUP(%o2)
1: ldx [%o1], %g1
sub %g1, %o0, %g7
retl
sub %g1, %o0, %o0
2: BACKOFF_SPIN(%o2, %o3, 1b)
- .size atomic64_sub_ret, .-atomic64_sub_ret
+ENDPROC(atomic64_sub_ret)
* Copyright (C) 2000, 2007 David S. Miller (davem@davemloft.net)
*/
+#include <linux/linkage.h>
#include <asm/asi.h>
#include <asm/backoff.h>
.text
- .globl test_and_set_bit
- .type test_and_set_bit,#function
-test_and_set_bit: /* %o0=nr, %o1=addr */
+ENTRY(test_and_set_bit) /* %o0=nr, %o1=addr */
BACKOFF_SETUP(%o3)
srlx %o0, 6, %g1
mov 1, %o2
retl
nop
2: BACKOFF_SPIN(%o3, %o4, 1b)
- .size test_and_set_bit, .-test_and_set_bit
+ENDPROC(test_and_set_bit)
- .globl test_and_clear_bit
- .type test_and_clear_bit,#function
-test_and_clear_bit: /* %o0=nr, %o1=addr */
+ENTRY(test_and_clear_bit) /* %o0=nr, %o1=addr */
BACKOFF_SETUP(%o3)
srlx %o0, 6, %g1
mov 1, %o2
retl
nop
2: BACKOFF_SPIN(%o3, %o4, 1b)
- .size test_and_clear_bit, .-test_and_clear_bit
+ENDPROC(test_and_clear_bit)
- .globl test_and_change_bit
- .type test_and_change_bit,#function
-test_and_change_bit: /* %o0=nr, %o1=addr */
+ENTRY(test_and_change_bit) /* %o0=nr, %o1=addr */
BACKOFF_SETUP(%o3)
srlx %o0, 6, %g1
mov 1, %o2
retl
nop
2: BACKOFF_SPIN(%o3, %o4, 1b)
- .size test_and_change_bit, .-test_and_change_bit
+ENDPROC(test_and_change_bit)
- .globl set_bit
- .type set_bit,#function
-set_bit: /* %o0=nr, %o1=addr */
+ENTRY(set_bit) /* %o0=nr, %o1=addr */
BACKOFF_SETUP(%o3)
srlx %o0, 6, %g1
mov 1, %o2
retl
nop
2: BACKOFF_SPIN(%o3, %o4, 1b)
- .size set_bit, .-set_bit
+ENDPROC(set_bit)
- .globl clear_bit
- .type clear_bit,#function
-clear_bit: /* %o0=nr, %o1=addr */
+ENTRY(clear_bit) /* %o0=nr, %o1=addr */
BACKOFF_SETUP(%o3)
srlx %o0, 6, %g1
mov 1, %o2
retl
nop
2: BACKOFF_SPIN(%o3, %o4, 1b)
- .size clear_bit, .-clear_bit
+ENDPROC(clear_bit)
- .globl change_bit
- .type change_bit,#function
-change_bit: /* %o0=nr, %o1=addr */
+ENTRY(change_bit) /* %o0=nr, %o1=addr */
BACKOFF_SETUP(%o3)
srlx %o0, 6, %g1
mov 1, %o2
retl
nop
2: BACKOFF_SPIN(%o3, %o4, 1b)
- .size change_bit, .-change_bit
+ENDPROC(change_bit)
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
+#include <linux/linkage.h>
#include <asm/page.h>
/* Zero out 64 bytes of memory at (buf + offset).
*/
.text
- .align 4
- .globl bzero_1page, __copy_1page
-
-bzero_1page:
+ENTRY(bzero_1page)
/* NOTE: If you change the number of insns of this routine, please check
* arch/sparc/mm/hypersparc.S */
/* %o0 = buf */
retl
nop
+ENDPROC(bzero_1page)
-__copy_1page:
+ENTRY(__copy_1page)
/* NOTE: If you change the number of insns of this routine, please check
* arch/sparc/mm/hypersparc.S */
/* %o0 = dst, %o1 = src */
retl
nop
+ENDPROC(__copy_1page)
* Copyright (C) 2005 David S. Miller <davem@davemloft.net>
*/
+#include <linux/linkage.h>
+
.text
- .globl memset
- .type memset, #function
-memset: /* %o0=buf, %o1=pat, %o2=len */
+ENTRY(memset) /* %o0=buf, %o1=pat, %o2=len */
and %o1, 0xff, %o3
mov %o2, %o1
sllx %o3, 8, %g1
ba,pt %xcc, 1f
or %g1, %o2, %o2
- .globl __bzero
- .type __bzero, #function
-__bzero: /* %o0=buf, %o1=len */
+ENTRY(__bzero) /* %o0=buf, %o1=len */
clr %o2
1: mov %o0, %o3
brz,pn %o1, __bzero_done
__bzero_done:
retl
mov %o3, %o0
- .size __bzero, .-__bzero
- .size memset, .-memset
+ENDPROC(__bzero)
+ENDPROC(memset)
#define EX_ST(x,y) \
98: x,y; \
.text; \
.align 4;
- .globl __clear_user
- .type __clear_user, #function
-__clear_user: /* %o0=buf, %o1=len */
+ENTRY(__clear_user) /* %o0=buf, %o1=len */
brz,pn %o1, __clear_user_done
cmp %o1, 16
bl,pn %icc, __clear_user_tiny
__clear_user_done:
retl
clr %o0
- .size __clear_user, .-__clear_user
+ENDPROC(__clear_user)
+#include <linux/linkage.h>
+
.text
- .align 32
- .globl ip_fast_csum
- .type ip_fast_csum,#function
-ip_fast_csum: /* %o0 = iph, %o1 = ihl */
+ENTRY(ip_fast_csum) /* %o0 = iph, %o1 = ihl */
sub %o1, 4, %g7
lduw [%o0 + 0x00], %o2
lduw [%o0 + 0x04], %g2
set 0xffff, %o1
retl
and %o2, %o1, %o0
- .size ip_fast_csum, .-ip_fast_csum
+ENDPROC(ip_fast_csum)
+#include <linux/linkage.h>
- .globl __lshrdi3
-__lshrdi3:
+ENTRY(__lshrdi3)
cmp %o2, 0
be 3f
mov 0x20, %g2
3:
retl
nop
+ENDPROC(__lshrdi3)
* Copyright (C) 1996, 1997, 1998, 1999 Jakub Jelinek (jj@ultra.linux.cz)
*/
+#include <linux/linkage.h>
+
.text
- .align 32
- .globl memmove
- .type memmove,#function
-memmove: /* o0=dst o1=src o2=len */
+ENTRY(memmove) /* o0=dst o1=src o2=len */
mov %o0, %g1
cmp %o0, %o1
bleu,pt %xcc, memcpy
retl
mov %g1, %o0
- .size memmove, .-memmove
+ENDPROC(memmove)
* Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
+#include <linux/linkage.h>
#include <asm/asi.h>
#define LO_MAGIC 0x01010101
#define HI_MAGIC 0x80808080
.align 4
- .global __strlen_user, __strnlen_user
-__strlen_user:
+ENTRY(__strlen_user)
sethi %hi(32768), %o1
-__strnlen_user:
+ENTRY(__strnlen_user)
mov %o1, %g1
mov %o0, %o1
andcc %o0, 3, %g0
mov 2, %o0
23: retl
mov 3, %o0
+ENDPROC(__strlen_user)
+ENDPROC(__strnlen_user)
.section .fixup,#alloc,#execinstr
.align 4
* generic strncmp routine.
*/
+#include <linux/linkage.h>
+
.text
- .align 4
- .global strncmp
-strncmp:
+ENTRY(strncmp)
mov %o0, %g3
mov 0, %o3
and %g2, 0xff, %o0
retl
sub %o3, %o0, %o0
+ENDPROC(strncmp)
* Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
+#include <linux/linkage.h>
#include <asm/asi.h>
.text
- .align 32
- .globl strncmp
- .type strncmp,#function
-strncmp:
+ENTRY(strncmp)
brlez,pn %o2, 3f
lduba [%o0] (ASI_PNF), %o3
1:
3:
retl
clr %o0
- .size strncmp, .-strncmp
+ENDPROC(strncmp)
* Copyright(C) 1996 David S. Miller
*/
+#include <linux/linkage.h>
#include <asm/ptrace.h>
#include <asm/errno.h>
.text
- .align 4
/* Must return:
*
* bytes copied if we hit a null byte
*/
- .globl __strncpy_from_user
-__strncpy_from_user:
+ENTRY(__strncpy_from_user)
/* %o0=dest, %o1=src, %o2=count */
mov %o2, %o3
1:
add %o2, 1, %o0
retl
sub %o3, %o0, %o0
+ENDPROC(__strncpy_from_user)
.section .fixup,#alloc,#execinstr
.align 4
* Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
*/
+#include <linux/linkage.h>
#include <asm/asi.h>
#include <asm/errno.h>
0: .xword 0x0101010101010101
.text
- .align 32
/* Must return:
*
* and average length is 18 or so.
*/
- .globl __strncpy_from_user
- .type __strncpy_from_user,#function
-__strncpy_from_user:
+ENTRY(__strncpy_from_user)
/* %o0=dest, %o1=src, %o2=count */
andcc %o1, 7, %g0 ! IEU1 Group
bne,pn %icc, 30f ! CTI
mov %o2, %o0
2: retl
add %o2, %o3, %o0
- .size __strncpy_from_user, .-__strncpy_from_user
+ENDPROC(__strncpy_from_user)
.section __ex_table,"a"
.align 4
* Copyright (C) 2006 David S. Miller <davem@davemloft.net>
*/
+#include <linux/linkage.h>
#include <asm/visasm.h>
#include <asm/asi.h>
#include <asm/dcu.h>
* !(len & 127) && len >= 256
*/
.text
- .align 32
/* VIS versions. */
- .globl xor_vis_2
- .type xor_vis_2,#function
-xor_vis_2:
+ENTRY(xor_vis_2)
rd %fprs, %o5
andcc %o5, FPRS_FEF|FPRS_DU, %g0
be,pt %icc, 0f
wr %g1, %g0, %asi
retl
wr %g0, 0, %fprs
- .size xor_vis_2, .-xor_vis_2
+ENDPROC(xor_vis_2)
- .globl xor_vis_3
- .type xor_vis_3,#function
-xor_vis_3:
+ENTRY(xor_vis_3)
rd %fprs, %o5
andcc %o5, FPRS_FEF|FPRS_DU, %g0
be,pt %icc, 0f
wr %g1, %g0, %asi
retl
wr %g0, 0, %fprs
- .size xor_vis_3, .-xor_vis_3
+ENDPROC(xor_vis_3)
- .globl xor_vis_4
- .type xor_vis_4,#function
-xor_vis_4:
+ENTRY(xor_vis_4)
rd %fprs, %o5
andcc %o5, FPRS_FEF|FPRS_DU, %g0
be,pt %icc, 0f
wr %g1, %g0, %asi
retl
wr %g0, 0, %fprs
- .size xor_vis_4, .-xor_vis_4
+ENDPROC(xor_vis_4)
- .globl xor_vis_5
- .type xor_vis_5,#function
-xor_vis_5:
+ENTRY(xor_vis_5)
save %sp, -192, %sp
rd %fprs, %o5
andcc %o5, FPRS_FEF|FPRS_DU, %g0
wr %g0, 0, %fprs
ret
restore
- .size xor_vis_5, .-xor_vis_5
+ENDPROC(xor_vis_5)
/* Niagara versions. */
- .globl xor_niagara_2
- .type xor_niagara_2,#function
-xor_niagara_2: /* %o0=bytes, %o1=dest, %o2=src */
+ENTRY(xor_niagara_2) /* %o0=bytes, %o1=dest, %o2=src */
save %sp, -192, %sp
prefetch [%i1], #n_writes
prefetch [%i2], #one_read
wr %g7, 0x0, %asi
ret
restore
- .size xor_niagara_2, .-xor_niagara_2
+ENDPROC(xor_niagara_2)
- .globl xor_niagara_3
- .type xor_niagara_3,#function
-xor_niagara_3: /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2 */
+ENTRY(xor_niagara_3) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2 */
save %sp, -192, %sp
prefetch [%i1], #n_writes
prefetch [%i2], #one_read
wr %g7, 0x0, %asi
ret
restore
- .size xor_niagara_3, .-xor_niagara_3
+ENDPROC(xor_niagara_3)
- .globl xor_niagara_4
- .type xor_niagara_4,#function
-xor_niagara_4: /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3 */
+ENTRY(xor_niagara_4) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3 */
save %sp, -192, %sp
prefetch [%i1], #n_writes
prefetch [%i2], #one_read
wr %g7, 0x0, %asi
ret
restore
- .size xor_niagara_4, .-xor_niagara_4
+ENDPROC(xor_niagara_4)
- .globl xor_niagara_5
- .type xor_niagara_5,#function
-xor_niagara_5: /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3, %o5=src4 */
+ENTRY(xor_niagara_5) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3, %o5=src4 */
save %sp, -192, %sp
prefetch [%i1], #n_writes
prefetch [%i2], #one_read
wr %g7, 0x0, %asi
ret
restore
- .size xor_niagara_5, .-xor_niagara_5
+ENDPROC(xor_niagara_5)