Commit 6a8ec449 authored by Andrey Konovalov's avatar Andrey Konovalov

Merge branch 'tracking-cortex-strings-arm64' into merge-linux-linaro

parents 1e847a47 8c7fa436
......@@ -22,6 +22,18 @@ extern char *strrchr(const char *, int c);
#define __HAVE_ARCH_STRCHR
extern char *strchr(const char *, int c);
#define __HAVE_ARCH_STRCMP
extern int strcmp(const char *, const char *);
#define __HAVE_ARCH_STRNCMP
extern int strncmp(const char *, const char *, __kernel_size_t);
#define __HAVE_ARCH_STRLEN
extern __kernel_size_t strlen(const char *);
#define __HAVE_ARCH_STRNLEN
extern __kernel_size_t strnlen(const char *, __kernel_size_t);
#define __HAVE_ARCH_MEMCPY
extern void *memcpy(void *, const void *, __kernel_size_t);
......@@ -34,4 +46,7 @@ extern void *memchr(const void *, int, __kernel_size_t);
#define __HAVE_ARCH_MEMSET
extern void *memset(void *, int, __kernel_size_t);
#define __HAVE_ARCH_MEMCMP
extern int memcmp(const void *, const void *, size_t);
#endif
......@@ -44,10 +44,15 @@ EXPORT_SYMBOL(memstart_addr);
/* string / mem functions */
EXPORT_SYMBOL(strchr);
EXPORT_SYMBOL(strrchr);
EXPORT_SYMBOL(strcmp);
EXPORT_SYMBOL(strncmp);
EXPORT_SYMBOL(strlen);
EXPORT_SYMBOL(strnlen);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(memchr);
EXPORT_SYMBOL(memcmp);
/* atomic bitops */
EXPORT_SYMBOL(set_bit);
......
lib-y := bitops.o clear_user.o delay.o copy_from_user.o \
copy_to_user.o copy_in_user.o copy_page.o \
clear_page.o memchr.o memcpy.o memmove.o memset.o \
memcmp.o strcmp.o strncmp.o strlen.o strnlen.o \
strchr.o strrchr.o
ifeq ($(COMPILER),clang)
......
/*
* Copyright (C) 2013 ARM Ltd.
* Copyright (C) 2013 Linaro.
*
* This code is based on glibc cortex strings work originally authored by Linaro
* and re-licensed under GPLv2 for the Linux kernel. The original code can
* be found @
*
* http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
* files/head:/src/aarch64/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
/*
* compare memory areas(when two memory areas' offset are different,
* alignment handled by the hardware)
*
* Parameters:
* x0 - const memory area 1 pointer
* x1 - const memory area 2 pointer
* x2 - the maximal compare byte length
* Returns:
* x0 - a compare result, maybe less than, equal to, or greater than ZERO
*/
/* Parameters and result. */
src1 .req x0
src2 .req x1
limit .req x2
result .req x0
/* Internal variables. */
data1 .req x3
data1w .req w3
data2 .req x4
data2w .req w4
has_nul .req x5
diff .req x6
endloop .req x7
tmp1 .req x8
tmp2 .req x9
tmp3 .req x10
pos .req x11
limit_wd .req x12
mask .req x13
ENTRY(memcmp)
cbz limit, .Lret0
eor tmp1, src1, src2
tst tmp1, #7
b.ne .Lmisaligned8
ands tmp1, src1, #7
b.ne .Lmutual_align
sub limit_wd, limit, #1 /* limit != 0, so no underflow. */
lsr limit_wd, limit_wd, #3 /* Convert to Dwords. */
/*
* The input source addresses are at alignment boundary.
* Directly compare eight bytes each time.
*/
.Lloop_aligned:
ldr data1, [src1], #8
ldr data2, [src2], #8
.Lstart_realigned:
subs limit_wd, limit_wd, #1
eor diff, data1, data2 /* Non-zero if differences found. */
csinv endloop, diff, xzr, cs /* Last Dword or differences. */
cbz endloop, .Lloop_aligned
/* Not reached the limit, must have found a diff. */
tbz limit_wd, #63, .Lnot_limit
/* Limit % 8 == 0 => the diff is in the last 8 bytes. */
ands limit, limit, #7
b.eq .Lnot_limit
/*
* The remained bytes less than 8. It is needed to extract valid data
* from last eight bytes of the intended memory range.
*/
lsl limit, limit, #3 /* bytes-> bits. */
mov mask, #~0
CPU_BE( lsr mask, mask, limit )
CPU_LE( lsl mask, mask, limit )
bic data1, data1, mask
bic data2, data2, mask
orr diff, diff, mask
b .Lnot_limit
.Lmutual_align:
/*
* Sources are mutually aligned, but are not currently at an
* alignment boundary. Round down the addresses and then mask off
* the bytes that precede the start point.
*/
bic src1, src1, #7
bic src2, src2, #7
ldr data1, [src1], #8
ldr data2, [src2], #8
/*
* We can not add limit with tmp1 here.Since the addition probably
* make the limit overflown.
*/
sub limit_wd, limit, #1/*limit != 0, so no underflow.*/
and tmp3, limit_wd, #7
lsr limit_wd, limit_wd, #3
add tmp3, tmp3, tmp1
add limit_wd, limit_wd, tmp3, lsr #3
add limit, limit, tmp1/* Adjust the limit for the extra. */
lsl tmp1, tmp1, #3/* Bytes beyond alignment -> bits.*/
neg tmp1, tmp1/* Bits to alignment -64. */
mov tmp2, #~0
/*mask off the non-intended bytes before the start address.*/
CPU_BE( lsl tmp2, tmp2, tmp1 )/*Big-endian.Early bytes are at MSB*/
/* Little-endian. Early bytes are at LSB. */
CPU_LE( lsr tmp2, tmp2, tmp1 )
orr data1, data1, tmp2
orr data2, data2, tmp2
b .Lstart_realigned
/*src1 and src2 have different alignment offset.*/
.Lmisaligned8:
cmp limit, #8
b.lo .Ltiny8proc /*limit < 8: compare byte by byte*/
and tmp1, src1, #7
neg tmp1, tmp1
add tmp1, tmp1, #8/*valid length in the first 8 bytes of src1*/
and tmp2, src2, #7
neg tmp2, tmp2
add tmp2, tmp2, #8/*valid length in the first 8 bytes of src2*/
subs tmp3, tmp1, tmp2
csel pos, tmp1, tmp2, hi /*Choose the maximum.*/
sub limit, limit, pos
/*compare the proceeding bytes in the first 8 byte segment.*/
.Ltinycmp:
ldrb data1w, [src1], #1
ldrb data2w, [src2], #1
subs pos, pos, #1
ccmp data1w, data2w, #0, ne /* NZCV = 0b0000. */
b.eq .Ltinycmp
cbnz pos, 1f /*diff occurred before the last byte.*/
cmp data1w, data2w
b.eq .Lstart_align
1:
sub result, data1, data2
ret
.Lstart_align:
lsr limit_wd, limit, #3
cbz limit_wd, .Lremain8
ands xzr, src1, #7
b.eq .Lrecal_offset
/*process more leading bytes to make src1 aligned...*/
add src1, src1, tmp3 /*backwards src1 to alignment boundary*/
add src2, src2, tmp3
sub limit, limit, tmp3
lsr limit_wd, limit, #3
cbz limit_wd, .Lremain8
/*load 8 bytes from aligned SRC1..*/
ldr data1, [src1], #8
ldr data2, [src2], #8
subs limit_wd, limit_wd, #1
eor diff, data1, data2 /*Non-zero if differences found.*/
csinv endloop, diff, xzr, ne
cbnz endloop, .Lunequal_proc
/*How far is the current SRC2 from the alignment boundary...*/
and tmp3, tmp3, #7
.Lrecal_offset:/*src1 is aligned now..*/
neg pos, tmp3
.Lloopcmp_proc:
/*
* Divide the eight bytes into two parts. First,backwards the src2
* to an alignment boundary,load eight bytes and compare from
* the SRC2 alignment boundary. If all 8 bytes are equal,then start
* the second part's comparison. Otherwise finish the comparison.
* This special handle can garantee all the accesses are in the
* thread/task space...
*/
ldr data1, [src1,pos]
ldr data2, [src2,pos]
eor diff, data1, data2 /* Non-zero if differences found. */
cbnz diff, .Lnot_limit
/*The second part process*/
ldr data1, [src1], #8
ldr data2, [src2], #8
eor diff, data1, data2 /* Non-zero if differences found. */
subs limit_wd, limit_wd, #1
csinv endloop, diff, xzr, ne/*if limit_wd is 0,will finish the cmp*/
cbz endloop, .Lloopcmp_proc
.Lunequal_proc:
cbz diff, .Lremain8
/*There is diff occured in the latest comparison.*/
.Lnot_limit:
/*
* For little endian,reverse the low significant equal bits into MSB,then
* following CLZ can find how many equal bits exist.
*/
CPU_LE( rev diff, diff )
CPU_LE( rev data1, data1 )
CPU_LE( rev data2, data2 )
/*
* The MS-non-zero bit of DIFF marks either the first bit
* that is different, or the end of the significant data.
* Shifting left now will bring the critical information into the
* top bits.
*/
clz pos, diff
lsl data1, data1, pos
lsl data2, data2, pos
/*
* We need to zero-extend (char is unsigned) the value and then
* perform a signed subtraction.
*/
lsr data1, data1, #56
sub result, data1, data2, lsr #56
ret
.Lremain8:
/* Limit % 8 == 0 =>. all data are equal.*/
ands limit, limit, #7
b.eq .Lret0
.Ltiny8proc:
ldrb data1w, [src1], #1
ldrb data2w, [src2], #1
subs limit, limit, #1
ccmp data1w, data2w, #0, ne /* NZCV = 0b0000. */
b.eq .Ltiny8proc
sub result, data1, data2
ret
.Lret0:
mov result, #0
ret
ENDPROC(memcmp)
/*
* Copyright (C) 2013 ARM Ltd.
* Copyright (C) 2013 Linaro.
*
* This code is based on glibc cortex strings work originally authored by Linaro
* and re-licensed under GPLv2 for the Linux kernel. The original code can
* be found @
*
* http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
* files/head:/src/aarch64/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
......@@ -27,27 +35,164 @@
* Returns:
* x0 - dest
*/
dstin .req x0
src .req x1
count .req x2
tmp1 .req x3
tmp1w .req w3
tmp2 .req x4
tmp2w .req w4
tmp3 .req x5
tmp3w .req w5
dst .req x6
A_l .req x7
A_h .req x8
B_l .req x9
B_h .req x10
C_l .req x11
C_h .req x12
D_l .req x13
D_h .req x14
ENTRY(memcpy)
mov x4, x0
subs x2, x2, #8
b.mi 2f
1: ldr x3, [x1], #8
subs x2, x2, #8
str x3, [x4], #8
b.pl 1b
2: adds x2, x2, #4
b.mi 3f
ldr w3, [x1], #4
sub x2, x2, #4
str w3, [x4], #4
3: adds x2, x2, #2
b.mi 4f
ldrh w3, [x1], #2
sub x2, x2, #2
strh w3, [x4], #2
4: adds x2, x2, #1
b.mi 5f
ldrb w3, [x1]
strb w3, [x4]
5: ret
mov dst, dstin
cmp count, #16
/*When memory length is less than 16, the accessed are not aligned.*/
b.lo .Ltiny15
neg tmp2, src
ands tmp2, tmp2, #15/* Bytes to reach alignment. */
b.eq .LSrcAligned
sub count, count, tmp2
/*
* Copy the leading memory data from src to dst in an increasing
* address order.By this way,the risk of overwritting the source
* memory data is eliminated when the distance between src and
* dst is not less than 16. The memory accesses here are
* alignment.
*/
tbz tmp2, #0, 1f
ldrb tmp1w, [src], #1
strb tmp1w, [dst], #1
1:
tbz tmp2, #1, 2f
ldrh tmp1w, [src], #2
strh tmp1w, [dst], #2
2:
tbz tmp2, #2, 3f
ldr tmp1w, [src], #4
str tmp1w, [dst], #4
3:
tbz tmp2, #3, .LSrcAligned
ldr tmp1, [src],#8
str tmp1, [dst],#8
.LSrcAligned:
cmp count, #64
b.ge .Lcpy_over64
/*
* Deal with small copies quickly by dropping straight into the
* exit block.
*/
.Ltail63:
/*
* Copy up to 48 bytes of data. At this point we only need the
* bottom 6 bits of count to be accurate.
*/
ands tmp1, count, #0x30
b.eq .Ltiny15
cmp tmp1w, #0x20
b.eq 1f
b.lt 2f
ldp A_l, A_h, [src], #16
stp A_l, A_h, [dst], #16
1:
ldp A_l, A_h, [src], #16
stp A_l, A_h, [dst], #16
2:
ldp A_l, A_h, [src], #16
stp A_l, A_h, [dst], #16
.Ltiny15:
/*
* To make memmove simpler, we prefer to break one ldp/stp into
* several load/store to access memory in an increasing address order,
* rather than load/store 16 bytes just as the old cortex memcpy.
* The process in original cortex need to satisfy the procondition
* that the src address is at least 16 bytes bigger than dst address,
* otherwise some source data will be overwritten.
*/
tbz count, #3, 1f
ldr tmp1, [src], #8
str tmp1, [dst], #8
1:
tbz count, #2, 2f
ldr tmp1w, [src], #4
str tmp1w, [dst], #4
2:
tbz count, #1, 3f
ldrh tmp1w, [src], #2
strh tmp1w, [dst], #2
3:
tbz count, #0, .Lexitfunc
ldrb tmp1w, [src]
strb tmp1w, [dst]
.Lexitfunc:
ret
.Lcpy_over64:
subs count, count, #128
b.ge .Lcpy_body_large
/*
* Less than 128 bytes to copy, so handle 64 here and then jump
* to the tail.
*/
ldp A_l, A_h, [src],#16
stp A_l, A_h, [dst],#16
ldp B_l, B_h, [src],#16
ldp C_l, C_h, [src],#16
stp B_l, B_h, [dst],#16
stp C_l, C_h, [dst],#16
ldp D_l, D_h, [src],#16
stp D_l, D_h, [dst],#16
tst count, #0x3f
b.ne .Ltail63
ret
/*
* Critical loop. Start at a new cache line boundary. Assuming
* 64 bytes per line this ensures the entire loop is in one line.
*/
.p2align 6
.Lcpy_body_large:
/* pre-get 64 bytes data. */
ldp A_l, A_h, [src],#16
ldp B_l, B_h, [src],#16
ldp C_l, C_h, [src],#16
ldp D_l, D_h, [src],#16
1:
/*
* interlace the load of next 64 bytes data block with store of the last
* loaded 64 bytes data.
*/
stp A_l, A_h, [dst],#16
ldp A_l, A_h, [src],#16
stp B_l, B_h, [dst],#16
ldp B_l, B_h, [src],#16
stp C_l, C_h, [dst],#16
ldp C_l, C_h, [src],#16
stp D_l, D_h, [dst],#16
ldp D_l, D_h, [src],#16
subs count, count, #64
b.ge 1b
stp A_l, A_h, [dst],#16
stp B_l, B_h, [dst],#16
stp C_l, C_h, [dst],#16
stp D_l, D_h, [dst],#16
tst count, #0x3f
b.ne .Ltail63
ret
ENDPROC(memcpy)
/*
* Copyright (C) 2013 ARM Ltd.
* Copyright (C) 2013 Linaro.
*
* This code is based on glibc cortex strings work originally authored by Linaro
* and re-licensed under GPLv2 for the Linux kernel. The original code can
* be found @
*
* http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
* files/head:/src/aarch64/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
......@@ -28,30 +36,161 @@
* Returns:
* x0 - dest
*/
dstin .req x0
src .req x1
count .req x2
tmp1 .req x3
tmp1w .req w3
tmp2 .req x4
tmp2w .req w4
tmp3 .req x5
tmp3w .req w5
dst .req x6
A_l .req x7
A_h .req x8
B_l .req x9
B_h .req x10
C_l .req x11
C_h .req x12
D_l .req x13
D_h .req x14
ENTRY(memmove)
cmp x0, x1
b.ls memcpy
add x4, x0, x2
add x1, x1, x2
subs x2, x2, #8
b.mi 2f
1: ldr x3, [x1, #-8]!
subs x2, x2, #8
str x3, [x4, #-8]!
b.pl 1b
2: adds x2, x2, #4
b.mi 3f
ldr w3, [x1, #-4]!
sub x2, x2, #4
str w3, [x4, #-4]!
3: adds x2, x2, #2
b.mi 4f
ldrh w3, [x1, #-2]!
sub x2, x2, #2
strh w3, [x4, #-2]!
4: adds x2, x2, #1
b.mi 5f
ldrb w3, [x1, #-1]
strb w3, [x4, #-1]
5: ret
cmp dstin, src
b.lo memcpy
add tmp1, src, count
cmp dstin, tmp1
b.hs memcpy /* No overlap. */
add dst, dstin, count
add src, src, count
cmp count, #16
b.lo .Ltail15 /*probably non-alignment accesses.*/
ands tmp2, src, #15 /* Bytes to reach alignment. */
b.eq .LSrcAligned
sub count, count, tmp2
/*
* process the aligned offset length to make the src aligned firstly.
* those extra instructions' cost is acceptable. It also make the
* coming accesses are based on aligned address.
*/
tbz tmp2, #0, 1f
ldrb tmp1w, [src, #-1]!
strb tmp1w, [dst, #-1]!
1:
tbz tmp2, #1, 2f
ldrh tmp1w, [src, #-2]!
strh tmp1w, [dst, #-2]!
2:
tbz tmp2, #2, 3f
ldr tmp1w, [src, #-4]!
str tmp1w, [dst, #-4]!
3:
tbz tmp2, #3, .LSrcAligned
ldr tmp1, [src, #-8]!
str tmp1, [dst, #-8]!
.LSrcAligned:
cmp count, #64
b.ge .Lcpy_over64
/*
* Deal with small copies quickly by dropping straight into the
* exit block.
*/
.Ltail63:
/*
* Copy up to 48 bytes of data. At this point we only need the
* bottom 6 bits of count to be accurate.
*/
ands tmp1, count, #0x30
b.eq .Ltail15
cmp tmp1w, #0x20
b.eq 1f
b.lt 2f
ldp A_l, A_h, [src, #-16]!
stp A_l, A_h, [dst, #-16]!
1:
ldp A_l, A_h, [src, #-16]!
stp A_l, A_h, [dst, #-16]!
2:
ldp A_l, A_h, [src, #-16]!
stp A_l, A_h, [dst, #-16]!
.Ltail15:
tbz count, #3, 1f
ldr tmp1, [src, #-8]!
str tmp1, [dst, #-8]!
1:
tbz count, #2, 2f
ldr tmp1w, [src, #-4]!
str tmp1w, [dst, #-4]!
2:
tbz count, #1, 3f
ldrh tmp1w, [src, #-2]!
strh tmp1w, [dst, #-2]!
3:
tbz count, #0, .Lexitfunc
ldrb tmp1w, [src, #-1]
strb tmp1w, [dst, #-1]
.Lexitfunc:
ret
.Lcpy_over64:
subs count, count, #128
b.ge .Lcpy_body_large
/*
* Less than 128 bytes to copy, so handle 64 here and then jump
* to the tail.
*/
ldp A_l, A_h, [src, #-16]