aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorWilco Dijkstra <wilco.dijkstra@arm.com>2023-01-10 14:24:53 +0000
committerSzabolcs Nagy <szabolcs.nagy@arm.com>2023-01-10 14:25:04 +0000
commit10589b2c95e4d482f09ac6a705918fdc32c8421a (patch)
treeb528f3e1eb044580cad874499d4c85c80bf70683
parent268217a15421eb9dfa42d3f8b8c078dd6de06b61 (diff)
downloadarm-optimized-routines-10589b2c95e4d482f09ac6a705918fdc32c8421a.tar.gz
string: Improve strrchr-mte
Use shrn for narrowing the mask which simplifies code. Unroll the strchr search loop which improves performance on large strings.
-rw-r--r--string/aarch64/strrchr-mte.S52
1 files changed, 31 insertions, 21 deletions
diff --git a/string/aarch64/strrchr-mte.S b/string/aarch64/strrchr-mte.S
index e05579f..c451d72 100644
--- a/string/aarch64/strrchr-mte.S
+++ b/string/aarch64/strrchr-mte.S
@@ -19,7 +19,6 @@
#define src x2
#define tmp x3
-#define wtmp w3
#define synd x3
#define shift x4
#define src_match x4
@@ -31,7 +30,6 @@
#define vhas_nul v2
#define vhas_chr v3
#define vrepmask v4
-#define vrepmask2 v5
#define vend v5
#define dend d5
@@ -47,55 +45,67 @@ ENTRY (__strrchr_aarch64_mte)
PTR_ARG (0)
bic src, srcin, 15
dup vrepchr.16b, chrin
- mov wtmp, 0x3003
- dup vrepmask.8h, wtmp
- tst srcin, 15
- beq L(loop1)
-
- ld1 {vdata.16b}, [src], 16
+ movi vrepmask.16b, 0x33
+ ld1 {vdata.16b}, [src]
cmeq vhas_nul.16b, vdata.16b, 0
cmeq vhas_chr.16b, vdata.16b, vrepchr.16b
- mov wtmp, 0xf00f
- dup vrepmask2.8h, wtmp
bit vhas_nul.16b, vhas_chr.16b, vrepmask.16b
- and vhas_nul.16b, vhas_nul.16b, vrepmask2.16b
- addp vend.16b, vhas_nul.16b, vhas_nul.16b
+ shrn vend.8b, vhas_nul.8h, 4
lsl shift, srcin, 2
fmov synd, dend
lsr synd, synd, shift
lsl synd, synd, shift
ands nul_match, synd, 0xcccccccccccccccc
bne L(tail)
- cbnz synd, L(loop2)
+ cbnz synd, L(loop2_start)
- .p2align 5
+ .p2align 4
L(loop1):
- ld1 {vdata.16b}, [src], 16
+ ldr q1, [src, 16]
+ cmeq vhas_chr.16b, vdata.16b, vrepchr.16b
+ cmhs vhas_nul.16b, vhas_chr.16b, vdata.16b
+ umaxp vend.16b, vhas_nul.16b, vhas_nul.16b
+ fmov synd, dend
+ cbnz synd, L(loop1_end)
+ ldr q1, [src, 32]!
cmeq vhas_chr.16b, vdata.16b, vrepchr.16b
cmhs vhas_nul.16b, vhas_chr.16b, vdata.16b
umaxp vend.16b, vhas_nul.16b, vhas_nul.16b
fmov synd, dend
cbz synd, L(loop1)
-
+ sub src, src, 16
+L(loop1_end):
+ add src, src, 16
cmeq vhas_nul.16b, vdata.16b, 0
+#ifdef __AARCH64EB__
+ bif vhas_nul.16b, vhas_chr.16b, vrepmask.16b
+ shrn vend.8b, vhas_nul.8h, 4
+ fmov synd, dend
+ rbit synd, synd
+#else
bit vhas_nul.16b, vhas_chr.16b, vrepmask.16b
- bic vhas_nul.8h, 0x0f, lsl 8
- addp vend.16b, vhas_nul.16b, vhas_nul.16b
+ shrn vend.8b, vhas_nul.8h, 4
fmov synd, dend
+#endif
ands nul_match, synd, 0xcccccccccccccccc
- beq L(loop2)
-
+ beq L(loop2_start)
L(tail):
sub nul_match, nul_match, 1
and chr_match, synd, 0x3333333333333333
ands chr_match, chr_match, nul_match
- sub result, src, 1
+ add result, src, 15
clz tmp, chr_match
sub result, result, tmp, lsr 2
csel result, result, xzr, ne
ret
.p2align 4
+ nop
+ nop
+L(loop2_start):
+ add src, src, 16
+ bic vrepmask.8h, 0xf0
+
L(loop2):
cmp synd, 0
csel src_match, src, src_match, ne