aarch64: fix strcpy and strnlen for big-endian [BZ #25824]
authorLexi Shao <shaolexi@huawei.com>
Fri, 15 May 2020 10:48:59 +0000 (18:48 +0800)
committerSzabolcs Nagy <szabolcs.nagy@arm.com>
Fri, 15 May 2020 11:15:56 +0000 (12:15 +0100)
This patch fixes the optimized implementation of strcpy and strnlen
on a big-endian arm64 machine.

The optimized method uses neon, which can process 128bit with one
instruction. On a big-endian machine, the bit order should be reversed
for the whole 128-bits double word. But with instuction
rev64 datav.16b, datav.16b
it reverses 64bits in the two halves rather than reversing 128bits.
There is no such instruction as rev128 to reverse the 128bits, but we
can fix this by loading the data registers accordingly.

Fixes 0237b61526e7("aarch64: Optimized implementation of strcpy") and
2911cb68ed3d("aarch64: Optimized implementation of strnlen").

Signed-off-by: Lexi Shao <shaolexi@huawei.com>
Reviewed-by: Szabolcs Nagy <szabolcs.nagy@arm.com>
sysdeps/aarch64/strcpy.S
sysdeps/aarch64/strnlen.S

index 548130e..a8ff52c 100644 (file)
@@ -234,8 +234,13 @@ L(entry_no_page_cross):
 #endif
        /* calculate the loc value */
        cmeq    datav.16b, datav.16b, #0
+#ifdef __AARCH64EB__
+       mov     data1, datav.d[1]
+       mov     data2, datav.d[0]
+#else
        mov     data1, datav.d[0]
        mov     data2, datav.d[1]
+#endif
        cmp     data1, 0
        csel    data1, data1, data2, ne
        mov     pos, 8
index 5981247..086a5c7 100644 (file)
@@ -154,8 +154,13 @@ L(loop_end):
           byte.  */
 
        cmeq    datav.16b, datav.16b, #0
+#ifdef __AARCH64EB__
+       mov     data1, datav.d[1]
+       mov     data2, datav.d[0]
+#else
        mov     data1, datav.d[0]
        mov     data2, datav.d[1]
+#endif
        cmp     data1, 0
        csel    data1, data1, data2, ne
        sub     len, src, srcin