Patch from Nicolas Pitre
For assembly labels to actually be local they must start with ".L" and
not only "." otherwise they still remain visible in the final link and
clutter kallsyms needlessly, and possibly make for unclear symbolic
backtrace. This patch simply inserts a"L" where appropriate. The code
itself is unchanged.
Signed-off-by: Nicolas Pitre <nico@cam.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
td2 .req r5 @ save before use
td3 .req lr
td2 .req r5 @ save before use
td3 .req lr
add sp, sp, #4
ldr pc, [sp], #4
add sp, sp, #4
ldr pc, [sp], #4
* Handle 0 to 7 bytes, with any alignment of source and
* destination pointers. Note that when we get here, C = 0
*/
* Handle 0 to 7 bytes, with any alignment of source and
* destination pointers. Note that when we get here, C = 0
*/
-.less8: teq len, #0 @ check for zero count
- beq .zero
+.Lless8: teq len, #0 @ check for zero count
+ beq .Lzero
/* we must have at least one byte. */
tst buf, #1 @ odd address?
/* we must have at least one byte. */
tst buf, #1 @ odd address?
subne len, len, #1
adcnes sum, sum, td0, put_byte_1
subne len, len, #1
adcnes sum, sum, td0, put_byte_1
-.less4: tst len, #6
- beq .less8_byte
+.Lless4: tst len, #6
+ beq .Lless8_byte
/* we are now half-word aligned */
/* we are now half-word aligned */
#if __LINUX_ARM_ARCH__ >= 4
ldrh td0, [buf], #2
sub len, len, #2
#if __LINUX_ARM_ARCH__ >= 4
ldrh td0, [buf], #2
sub len, len, #2
#endif
adcs sum, sum, td0
tst len, #6
#endif
adcs sum, sum, td0
tst len, #6
-.less8_byte: tst len, #1 @ odd number of bytes
+.Lless8_byte: tst len, #1 @ odd number of bytes
ldrneb td0, [buf], #1 @ include last byte
adcnes sum, sum, td0, put_byte_0 @ update checksum
ldrneb td0, [buf], #1 @ include last byte
adcnes sum, sum, td0, put_byte_0 @ update checksum
-.done: adc r0, sum, #0 @ collect up the last carry
+.Ldone: adc r0, sum, #0 @ collect up the last carry
ldr td0, [sp], #4
tst td0, #1 @ check buffer alignment
movne r0, r0, ror #8 @ rotate checksum by 8 bits
ldr pc, [sp], #4 @ return
ldr td0, [sp], #4
tst td0, #1 @ check buffer alignment
movne r0, r0, ror #8 @ rotate checksum by 8 bits
ldr pc, [sp], #4 @ return
-.not_aligned: tst buf, #1 @ odd address
+.Lnot_aligned: tst buf, #1 @ odd address
ldrneb td0, [buf], #1 @ make even
subne len, len, #1
adcnes sum, sum, td0, put_byte_1 @ update checksum
ldrneb td0, [buf], #1 @ make even
subne len, len, #1
adcnes sum, sum, td0, put_byte_1 @ update checksum
ENTRY(csum_partial)
stmfd sp!, {buf, lr}
cmp len, #8 @ Ensure that we have at least
ENTRY(csum_partial)
stmfd sp!, {buf, lr}
cmp len, #8 @ Ensure that we have at least
- blo .less8 @ 8 bytes to copy.
+ blo .Lless8 @ 8 bytes to copy.
tst buf, #1
movne sum, sum, ror #8
adds sum, sum, #0 @ C = 0
tst buf, #3 @ Test destination alignment
tst buf, #1
movne sum, sum, ror #8
adds sum, sum, #0 @ C = 0
tst buf, #3 @ Test destination alignment
- blne .not_aligned @ aligh destination, return here
+ blne .Lnot_aligned @ align destination, return here
1: bics ip, len, #31
beq 3f
1: bics ip, len, #31
beq 3f
ldmfd sp!, {r4 - r5}
3: tst len, #0x1c @ should not change C
ldmfd sp!, {r4 - r5}
3: tst len, #0x1c @ should not change C
4: ldr td0, [buf], #4
sub len, len, #4
adcs sum, sum, td0
tst len, #0x1c
bne 4b
4: ldr td0, [buf], #4
sub len, len, #4
adcs sum, sum, td0
tst len, #0x1c
bne 4b
* the length. Note that the source pointer hasn't been
* aligned yet.
*/
* the length. Note that the source pointer hasn't been
* aligned yet.
*/
-.dst_unaligned: tst dst, #1
- beq .dst_16bit
+.Ldst_unaligned:
+ tst dst, #1
+ beq .Ldst_16bit
load1b ip
sub len, len, #1
load1b ip
sub len, len, #1
tst dst, #2
moveq pc, lr @ dst is now 32bit aligned
tst dst, #2
moveq pc, lr @ dst is now 32bit aligned
-.dst_16bit: load2b r8, ip
+.Ldst_16bit: load2b r8, ip
sub len, len, #2
adcs sum, sum, r8, put_byte_0
strb r8, [dst], #1
sub len, len, #2
adcs sum, sum, r8, put_byte_0
strb r8, [dst], #1
* Handle 0 to 7 bytes, with any alignment of source and
* destination pointers. Note that when we get here, C = 0
*/
* Handle 0 to 7 bytes, with any alignment of source and
* destination pointers. Note that when we get here, C = 0
*/
-.less8: teq len, #0 @ check for zero count
- beq .zero
+.Lless8: teq len, #0 @ check for zero count
+ beq .Lzero
/* we must have at least one byte. */
tst dst, #1 @ dst 16-bit aligned
/* we must have at least one byte. */
tst dst, #1 @ dst 16-bit aligned
/* Align dst */
load1b ip
/* Align dst */
load1b ip
adcs sum, sum, ip, put_byte_1 @ update checksum
strb ip, [dst], #1
tst len, #6
adcs sum, sum, ip, put_byte_1 @ update checksum
strb ip, [dst], #1
tst len, #6
1: load2b r8, ip
sub len, len, #2
1: load2b r8, ip
sub len, len, #2
strb r8, [dst], #1
adcs sum, sum, ip, put_byte_1
strb ip, [dst], #1
strb r8, [dst], #1
adcs sum, sum, ip, put_byte_1
strb ip, [dst], #1
-.less8_aligned: tst len, #6
+.Lless8_aligned:
+ tst len, #6
load1b r8
adcs sum, sum, r8, put_byte_0 @ update checksum
strb r8, [dst], #1
load1b r8
adcs sum, sum, r8, put_byte_0 @ update checksum
strb r8, [dst], #1
sub fp, ip, #4
cmp len, #8 @ Ensure that we have at least
sub fp, ip, #4
cmp len, #8 @ Ensure that we have at least
- blo .less8 @ 8 bytes to copy.
+ blo .Lless8 @ 8 bytes to copy.
adds sum, sum, #0 @ C = 0
tst dst, #3 @ Test destination alignment
adds sum, sum, #0 @ C = 0
tst dst, #3 @ Test destination alignment
- blne .dst_unaligned @ align destination, return here
+ blne .Ldst_unaligned @ align destination, return here
/*
* Ok, the dst pointer is now 32bit aligned, and we know
/*
* Ok, the dst pointer is now 32bit aligned, and we know
*/
tst src, #3 @ Test source alignment
*/
tst src, #3 @ Test source alignment
/* Routine for src & dst aligned */
/* Routine for src & dst aligned */
adcs sum, sum, r4
4: ands len, len, #3
adcs sum, sum, r4
4: ands len, len, #3
load1l r4
tst len, #2
mov r5, r4, get_byte_0
load1l r4
tst len, #2
mov r5, r4, get_byte_0
adcs sum, sum, r4, push #16
strb r5, [dst], #1
mov r5, r4, get_byte_1
strb r5, [dst], #1
mov r5, r4, get_byte_2
adcs sum, sum, r4, push #16
strb r5, [dst], #1
mov r5, r4, get_byte_1
strb r5, [dst], #1
mov r5, r4, get_byte_2
strneb r5, [dst], #1
andne r5, r5, #255
adcnes sum, sum, r5, put_byte_0
strneb r5, [dst], #1
andne r5, r5, #255
adcnes sum, sum, r5, put_byte_0
* the inefficient byte manipulations in the
* architecture independent code.
*/
* the inefficient byte manipulations in the
* architecture independent code.
*/
ldr sum, [sp, #0] @ dst
tst sum, #1
movne r0, r0, ror #8
load_regs ea
ldr sum, [sp, #0] @ dst
tst sum, #1
movne r0, r0, ror #8
load_regs ea
adc sum, sum, #0 @ include C from dst alignment
and ip, src, #3
bic src, src, #3
load1l r5
cmp ip, #2
adc sum, sum, #0 @ include C from dst alignment
and ip, src, #3
bic src, src, #3
load1l r5
cmp ip, #2
- beq .src2_aligned
- bhi .src3_aligned
+ beq .Lsrc2_aligned
+ bhi .Lsrc3_aligned
mov r4, r5, pull #8 @ C = 0
bics ip, len, #15
beq 2f
mov r4, r5, pull #8 @ C = 0
bics ip, len, #15
beq 2f
adcs sum, sum, r4
mov r4, r5, pull #8
4: ands len, len, #3
adcs sum, sum, r4
mov r4, r5, pull #8
4: ands len, len, #3
mov r5, r4, get_byte_0
tst len, #2
mov r5, r4, get_byte_0
tst len, #2
adcs sum, sum, r4, push #16
strb r5, [dst], #1
mov r5, r4, get_byte_1
strb r5, [dst], #1
mov r5, r4, get_byte_2
adcs sum, sum, r4, push #16
strb r5, [dst], #1
mov r5, r4, get_byte_1
strb r5, [dst], #1
mov r5, r4, get_byte_2
-.src2_aligned: mov r4, r5, pull #16
+.Lsrc2_aligned: mov r4, r5, pull #16
adds sum, sum, #0
bics ip, len, #15
beq 2f
adds sum, sum, #0
bics ip, len, #15
beq 2f
adcs sum, sum, r4
mov r4, r5, pull #16
4: ands len, len, #3
adcs sum, sum, r4
mov r4, r5, pull #16
4: ands len, len, #3
mov r5, r4, get_byte_0
tst len, #2
mov r5, r4, get_byte_0
tst len, #2
adcs sum, sum, r4
strb r5, [dst], #1
mov r5, r4, get_byte_1
strb r5, [dst], #1
tst len, #1
adcs sum, sum, r4
strb r5, [dst], #1
mov r5, r4, get_byte_1
strb r5, [dst], #1
tst len, #1
-.src3_aligned: mov r4, r5, pull #24
+.Lsrc3_aligned: mov r4, r5, pull #24
adds sum, sum, #0
bics ip, len, #15
beq 2f
adds sum, sum, #0
bics ip, len, #15
beq 2f
adcs sum, sum, r4
mov r4, r5, pull #24
4: ands len, len, #3
adcs sum, sum, r4
mov r4, r5, pull #24
4: ands len, len, #3
mov r5, r4, get_byte_0
tst len, #2
mov r5, r4, get_byte_0
tst len, #2
strb r5, [dst], #1
adcs sum, sum, r4
load1l r4
strb r5, [dst], #1
adcs sum, sum, r4
load1l r4
strb r5, [dst], #1
adcs sum, sum, r4, push #24
mov r5, r4, get_byte_1
strb r5, [dst], #1
adcs sum, sum, r4, push #24
mov r5, r4, get_byte_1
#include <asm/assembler.h>
.text
#include <asm/assembler.h>
.text
-LC0: .word loops_per_jiffy
+.LC0: .word loops_per_jiffy
orr r2, r2, #0x00db
mul r0, r2, r0
ENTRY(__const_udelay) @ 0 <= r0 <= 0x01ffffff
orr r2, r2, #0x00db
mul r0, r2, r0
ENTRY(__const_udelay) @ 0 <= r0 <= 0x01ffffff
ldr r2, [r2] @ max = 0x0fffffff
mov r0, r0, lsr #11 @ max = 0x00003fff
mov r2, r2, lsr #11 @ max = 0x0003ffff
ldr r2, [r2] @ max = 0x0fffffff
mov r0, r0, lsr #11 @ max = 0x00003fff
mov r2, r2, lsr #11 @ max = 0x0003ffff
mov r2, #0
1: ldrb r3, [r0, r2, lsr #3]
eors r3, r3, #0xff @ invert bits
mov r2, #0
1: ldrb r3, [r0, r2, lsr #3]
eors r3, r3, #0xff @ invert bits
- bne .found @ any now set - found zero bit
+ bne .L_found @ any now set - found zero bit
add r2, r2, #8 @ next bit pointer
2: cmp r2, r1 @ any more?
blo 1b
add r2, r2, #8 @ next bit pointer
2: cmp r2, r1 @ any more?
blo 1b
ldrb r3, [r0, r2, lsr #3]
eor r3, r3, #0xff @ now looking for a 1 bit
movs r3, r3, lsr ip @ shift off unused bits
ldrb r3, [r0, r2, lsr #3]
eor r3, r3, #0xff @ now looking for a 1 bit
movs r3, r3, lsr ip @ shift off unused bits
orr r2, r2, #7 @ if zero, then no bits here
add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit
orr r2, r2, #7 @ if zero, then no bits here
add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit
mov r2, #0
1: ldrb r3, [r0, r2, lsr #3]
movs r3, r3
mov r2, #0
1: ldrb r3, [r0, r2, lsr #3]
movs r3, r3
- bne .found @ any now set - found zero bit
+ bne .L_found @ any now set - found zero bit
add r2, r2, #8 @ next bit pointer
2: cmp r2, r1 @ any more?
blo 1b
add r2, r2, #8 @ next bit pointer
2: cmp r2, r1 @ any more?
blo 1b
beq 1b @ If new byte, goto old routine
ldrb r3, [r0, r2, lsr #3]
movs r3, r3, lsr ip @ shift off unused bits
beq 1b @ If new byte, goto old routine
ldrb r3, [r0, r2, lsr #3]
movs r3, r3, lsr ip @ shift off unused bits
orr r2, r2, #7 @ if zero, then no bits here
add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit
orr r2, r2, #7 @ if zero, then no bits here
add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit
1: eor r3, r2, #0x18 @ big endian byte ordering
ldrb r3, [r0, r3, lsr #3]
eors r3, r3, #0xff @ invert bits
1: eor r3, r2, #0x18 @ big endian byte ordering
ldrb r3, [r0, r3, lsr #3]
eors r3, r3, #0xff @ invert bits
- bne .found @ any now set - found zero bit
+ bne .L_found @ any now set - found zero bit
add r2, r2, #8 @ next bit pointer
2: cmp r2, r1 @ any more?
blo 1b
add r2, r2, #8 @ next bit pointer
2: cmp r2, r1 @ any more?
blo 1b
ldrb r3, [r0, r3, lsr #3]
eor r3, r3, #0xff @ now looking for a 1 bit
movs r3, r3, lsr ip @ shift off unused bits
ldrb r3, [r0, r3, lsr #3]
eor r3, r3, #0xff @ now looking for a 1 bit
movs r3, r3, lsr ip @ shift off unused bits
orr r2, r2, #7 @ if zero, then no bits here
add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit
orr r2, r2, #7 @ if zero, then no bits here
add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit
1: eor r3, r2, #0x18 @ big endian byte ordering
ldrb r3, [r0, r3, lsr #3]
movs r3, r3
1: eor r3, r2, #0x18 @ big endian byte ordering
ldrb r3, [r0, r3, lsr #3]
movs r3, r3
- bne .found @ any now set - found zero bit
+ bne .L_found @ any now set - found zero bit
add r2, r2, #8 @ next bit pointer
2: cmp r2, r1 @ any more?
blo 1b
add r2, r2, #8 @ next bit pointer
2: cmp r2, r1 @ any more?
blo 1b
eor r3, r2, #0x18 @ big endian byte ordering
ldrb r3, [r0, r3, lsr #3]
movs r3, r3, lsr ip @ shift off unused bits
eor r3, r2, #0x18 @ big endian byte ordering
ldrb r3, [r0, r3, lsr #3]
movs r3, r3, lsr ip @ shift off unused bits
orr r2, r2, #7 @ if zero, then no bits here
add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit
orr r2, r2, #7 @ if zero, then no bits here
add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit
/*
* One or more bits in the LSB of r3 are assumed to be set.
*/
/*
* One or more bits in the LSB of r3 are assumed to be set.
*/
#if __LINUX_ARM_ARCH__ >= 5
rsb r1, r3, #0
and r3, r3, r1
#if __LINUX_ARM_ARCH__ >= 5
rsb r1, r3, #0
and r3, r3, r1