diff options
author | Wilco Dijkstra <wilco.dijkstra@arm.com> | 2022-08-22 13:22:05 +0100 |
---|---|---|
committer | Szabolcs Nagy <szabolcs.nagy@arm.com> | 2022-08-23 10:09:05 +0100 |
commit | 43c24ad1c17de4b9d084f61ab8361d3736f2e527 (patch) | |
tree | 49acae416976c57951502bb0d154397f651d4b3b /string/aarch64 | |
parent | a1547d148400deffeaab1cd484638ec03a519682 (diff) | |
download | arm-optimized-routines-43c24ad1c17de4b9d084f61ab8361d3736f2e527.tar.gz |
string: Optimize memchr-mte
Optimize the main loop - large strings are 40% faster.
Diffstat (limited to 'string/aarch64')
-rw-r--r-- | string/aarch64/memchr-mte.S | 28 |
1 files changed, 15 insertions, 13 deletions
diff --git a/string/aarch64/memchr-mte.S b/string/aarch64/memchr-mte.S index 0f434cf..d4673b3 100644 --- a/string/aarch64/memchr-mte.S +++ b/string/aarch64/memchr-mte.S @@ -48,49 +48,51 @@ ENTRY (__memchr_aarch64_mte) dup vrepchr.16b, chrin cmeq vhas_chr.16b, vdata.16b, vrepchr.16b lsl shift, srcin, 2 - shrn vend.8b, vhas_chr.8h, 4 /* 128->64 */ + shrn vend.8b, vhas_chr.8h, 4 /* 128->64 */ fmov synd, dend lsr synd, synd, shift cbz synd, L(start_loop) rbit synd, synd clz synd, synd - add result, srcin, synd, lsr 2 cmp cntin, synd, lsr 2 + add result, srcin, synd, lsr 2 csel result, result, xzr, hi ret + .p2align 3 L(start_loop): sub tmp, src, srcin - add tmp, tmp, 16 + add tmp, tmp, 17 subs cntrem, cntin, tmp - b.ls L(nomatch) + b.lo L(nomatch) /* Make sure that it won't overread by a 16-byte chunk */ - add tmp, cntrem, 15 - tbnz tmp, 4, L(loop32_2) - + tbz cntrem, 4, L(loop32_2) + sub src, src, 16 .p2align 4 L(loop32): - ldr qdata, [src, 16]! + ldr qdata, [src, 32]! cmeq vhas_chr.16b, vdata.16b, vrepchr.16b umaxp vend.16b, vhas_chr.16b, vhas_chr.16b /* 128->64 */ fmov synd, dend cbnz synd, L(end) L(loop32_2): - ldr qdata, [src, 16]! - subs cntrem, cntrem, 32 + ldr qdata, [src, 16] cmeq vhas_chr.16b, vdata.16b, vrepchr.16b - b.ls L(end) + subs cntrem, cntrem, 32 + b.lo L(end_2) umaxp vend.16b, vhas_chr.16b, vhas_chr.16b /* 128->64 */ fmov synd, dend cbz synd, L(loop32) +L(end_2): + add src, src, 16 L(end): shrn vend.8b, vhas_chr.8h, 4 /* 128->64 */ + sub cntrem, src, srcin fmov synd, dend - add tmp, srcin, cntin - sub cntrem, tmp, src + sub cntrem, cntin, cntrem #ifndef __AARCH64EB__ rbit synd, synd #endif |