aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJoe Ramsay <joe.ramsay@arm.com>2022-08-15 10:48:58 +0100
committerJoe Ramsay <joe.ramsay@arm.com>2022-08-15 10:48:58 +0100
commit5d326aad40b2f804e672321d3412e67a14814190 (patch)
tree3960519f7e7039007af7dfd01ced3b4b9ae9f439
parent250db3398da6774290a75e099a7beaa4c940f079 (diff)
downloadarm-optimized-routines-5d326aad40b2f804e672321d3412e67a14814190.tar.gz
pl/math: Add vector/Neon log2f
The new routine is based on the scalar algorithm in the main math directory, but with all arithmetic done in single precision. invc is represented with a high and a low part. The routine is accurate to 2.6 ULPs.
-rw-r--r--pl/math/include/mathlib.h5
-rw-r--r--pl/math/math_config.h12
-rw-r--r--pl/math/s_log2f_2u6.c6
-rw-r--r--pl/math/test/mathbench_funcs.h6
-rwxr-xr-xpl/math/test/runulp.sh14
-rw-r--r--pl/math/test/testcases/directed/log2f.tst27
-rw-r--r--pl/math/test/ulp_funcs.h4
-rw-r--r--pl/math/test/ulp_wrappers.h3
-rw-r--r--pl/math/v_log2f_2u6.c108
-rw-r--r--pl/math/v_log2f_data.c37
-rw-r--r--pl/math/vn_log2f_2u6.c12
11 files changed, 234 insertions, 0 deletions
diff --git a/pl/math/include/mathlib.h b/pl/math/include/mathlib.h
index 6f3b537..2ef8198 100644
--- a/pl/math/include/mathlib.h
+++ b/pl/math/include/mathlib.h
@@ -15,6 +15,7 @@ float erfcf (float);
float erff (float);
float log10f (float);
float log1pf (float);
+float log2f (float);
double asinh (double);
double atan2 (double, double);
@@ -28,6 +29,7 @@ float __s_erfcf (float);
float __s_erff (float);
float __s_log10f (float);
float __s_log1pf (float);
+float __s_log2f (float);
double __s_atan (double);
double __s_atan2 (double, double);
@@ -59,6 +61,7 @@ __f64x2_t __v_erfc (__f64x2_t);
__f32x4_t __v_log10f (__f32x4_t);
__f64x2_t __v_log10 (__f64x2_t);
__f32x4_t __v_log1pf (__f32x4_t);
+__f32x4_t __v_log2f (__f32x4_t);
#if __GNUC__ >= 9 || __clang_major__ >= 8
#define __vpcs __attribute__((__aarch64_vector_pcs__))
@@ -76,6 +79,7 @@ __vpcs __f64x2_t __vn_erfc (__f64x2_t);
__vpcs __f32x4_t __vn_log10f (__f32x4_t);
__vpcs __f64x2_t __vn_log10 (__f64x2_t);
__vpcs __f32x4_t __vn_log1pf (__f32x4_t);
+__vpcs __f32x4_t __vn_log2f (__f32x4_t);
/* Vector functions following the vector PCS using ABI names. */
__vpcs __f32x4_t _ZGVnN4v_asinhf (__f32x4_t);
@@ -90,6 +94,7 @@ __vpcs __f64x2_t _ZGVnN2v_erfc (__f64x2_t);
__vpcs __f32x4_t _ZGVnN4v_log10f (__f32x4_t);
__vpcs __f64x2_t _ZGVnN2v_log10 (__f64x2_t);
__vpcs __f32x4_t _ZGVnN4v_log1pf (__f32x4_t);
+__vpcs __f32x4_t _ZGVnN4v_log2f (__f32x4_t);
#endif
diff --git a/pl/math/math_config.h b/pl/math/math_config.h
index f058753..a2719e0 100644
--- a/pl/math/math_config.h
+++ b/pl/math/math_config.h
@@ -466,4 +466,16 @@ extern const struct log1pf_data
{
float coeffs[LOG1PF_NCOEFFS];
} __log1pf_data HIDDEN;
+
+#define V_LOG2F_TABLE_BITS 4
+#define V_LOG2F_POLY_ORDER 4
+extern const struct v_log2f_data
+{
+ struct
+ {
+ /* Pad with dummy for quad-aligned memory access. */
+ float invc_hi, invc_lo, logc, dummy;
+ } tab[1 << V_LOG2F_TABLE_BITS];
+ float poly[V_LOG2F_POLY_ORDER];
+} __v_log2f_data HIDDEN;
#endif
diff --git a/pl/math/s_log2f_2u6.c b/pl/math/s_log2f_2u6.c
new file mode 100644
index 0000000..8e5569d
--- /dev/null
+++ b/pl/math/s_log2f_2u6.c
@@ -0,0 +1,6 @@
+/*
+ * Copyright (c) 2022, Arm Limited.
+ * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
+ */
+#define SCALAR 1
+#include "v_log2f_2u6.c"
diff --git a/pl/math/test/mathbench_funcs.h b/pl/math/test/mathbench_funcs.h
index 85ec906..338f050 100644
--- a/pl/math/test/mathbench_funcs.h
+++ b/pl/math/test/mathbench_funcs.h
@@ -13,6 +13,7 @@ F (erfcf, -4.0, 10.0)
F (erff, -4.0, 4.0)
F (log10f, 0.01, 11.1)
F (log1pf, -0.9, 10.0)
+F (log2f, 0.01, 11.1)
D (asinh, -10.0, 10.0)
D (atan, -10.0, 10.0)
@@ -36,6 +37,7 @@ D (__s_erfc, -6.0, 28.0)
F (__s_log10f, 0.01, 11.1)
D (__s_log10, 0.01, 11.1)
F (__s_log1pf, -0.9, 10.0)
+F (__s_log2f, 0.01, 11.1)
#if __aarch64__
VF (__v_asinhf, -10.0, 10.0)
VF (__v_atanf, -10.0, 10.0)
@@ -49,6 +51,7 @@ VD (__v_erfc, -6.0, 28.0)
VD (__v_log10, 0.01, 11.1)
VF (__v_log10f, 0.01, 11.1)
VF (__v_log1pf, -0.9, 10.0)
+VF (__v_log2f, 0.01, 11.1)
#ifdef __vpcs
VNF (__vn_asinhf, -10.0, 10.0)
VNF (_ZGVnN4v_asinhf, -10.0, 10.0)
@@ -85,6 +88,9 @@ VND (_ZGVnN2v_log10, 0.01, 11.1)
VNF (__vn_log1pf, -0.9, 10.0)
VNF (_ZGVnN4v_log1pf, -0.9, 10.0)
+
+VNF (__vn_log2f, 0.01, 11.1)
+VNF (_ZGVnN4v_log2f, 0.01, 11.1)
#endif
#endif
#if WANT_SVE_MATH
diff --git a/pl/math/test/runulp.sh b/pl/math/test/runulp.sh
index d5ae82b..2ef5d5b 100755
--- a/pl/math/test/runulp.sh
+++ b/pl/math/test/runulp.sh
@@ -242,6 +242,15 @@ range_asinhf='
-0x1p11 -inf 20000
'
+range_log2f='
+ -0.0 -0x1p126 100
+ 0x1p-149 0x1p-126 4000
+ 0x1p-126 0x1p-23 50000
+ 0x1p-23 1.0 50000
+ 1.0 100 50000
+ 100 inf 50000
+'
+
range_sve_cosf='
0 0xffff0000 10000
0x1p-4 0x1p4 500000
@@ -265,6 +274,7 @@ L_atan2f=3.0
L_atanf=3.0
L_log1pf=2.0
L_asinhf=2.2
+L_log2f=2.6
L_sve_cosf=1.6
L_sve_cos=2.0
@@ -335,6 +345,10 @@ asinhf __s_asinhf $runs
asinhf __v_asinhf $runv
asinhf __vn_asinhf $runvn
asinhf _ZGVnN4v_asinhf $runvn
+log2f __s_log2f $runs
+log2f __v_log2f $runv
+log2f __vn_log2f $runvn
+log2f _ZGVnN4v_log2f $runvn
if [ $WANT_SVE_MATH -eq 1 ]; then
sve_cosf __sv_cosf $runsv
diff --git a/pl/math/test/testcases/directed/log2f.tst b/pl/math/test/testcases/directed/log2f.tst
new file mode 100644
index 0000000..9e99c53
--- /dev/null
+++ b/pl/math/test/testcases/directed/log2f.tst
@@ -0,0 +1,27 @@
+; log2f.tst - Directed test cases for log2f
+;
+; Copyright (c) 2017-2022, Arm Limited.
+; SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
+
+func=log2f op1=7fc00001 result=7fc00001 errno=0
+func=log2f op1=ffc00001 result=7fc00001 errno=0
+func=log2f op1=7f800001 result=7fc00001 errno=0 status=i
+func=log2f op1=ff800001 result=7fc00001 errno=0 status=i
+func=log2f op1=ff810000 result=7fc00001 errno=0 status=i
+func=log2f op1=7f800000 result=7f800000 errno=0
+func=log2f op1=ff800000 result=7fc00001 errno=EDOM status=i
+func=log2f op1=3f800000 result=00000000 errno=0
+func=log2f op1=00000000 result=ff800000 errno=ERANGE status=z
+func=log2f op1=80000000 result=ff800000 errno=ERANGE status=z
+func=log2f op1=80000001 result=7fc00001 errno=EDOM status=i
+
+func=log2f op1=3f7d70a4 result=bc6d8f8b.7d4 error=0
+func=log2f op1=3f604189 result=be4394c8.395 error=0
+func=log2f op1=3f278034 result=bf1caa73.88e error=0
+func=log2f op1=3edd3c36 result=bf9af3b9.619 error=0
+func=log2f op1=3e61259a result=c00bdb95.650 error=0
+func=log2f op1=3f8147ae result=3c6b3267.d6a error=0
+func=log2f op1=3f8fbe77 result=3e2b5fe2.a1c error=0
+func=log2f op1=3fac3eea result=3edb4d5e.1fc error=0
+func=log2f op1=3fd6e632 result=3f3f5d3a.827 error=0
+func=log2f op1=40070838 result=3f89e055.a0a error=0
diff --git a/pl/math/test/ulp_funcs.h b/pl/math/test/ulp_funcs.h
index d317352..f9f0233 100644
--- a/pl/math/test/ulp_funcs.h
+++ b/pl/math/test/ulp_funcs.h
@@ -28,6 +28,7 @@ F (__s_erfc, __s_erfc, erfcl, mpfr_erfc, 1, 0, d1, 0)
F (__s_log10f, __s_log10f, log10, mpfr_log10, 1, 1, f1, 0)
F (__s_log10, __s_log10, log10l, mpfr_log10, 1, 0, d1, 0)
F (__s_log1pf, __s_log1pf, log1p, mpfr_log1p, 1, 1, f1, 0)
+F (__s_log2f, __s_log2f, log2, mpfr_log2, 1, 1, f1, 0)
#if __aarch64__
F (__v_asinhf, v_asinhf, asinh, mpfr_asinh, 1, 1, f1, 1)
F (__v_atanf, v_atanf, atan, mpfr_atan, 1, 1, f1, 1)
@@ -41,6 +42,7 @@ F (__v_erfc, v_erfc, erfcl, mpfr_erfc, 1, 0, d1, 1)
F (__v_log10f, v_log10f, log10, mpfr_log10, 1, 1, f1, 1)
F (__v_log10, v_log10, log10l, mpfr_log10, 1, 0, d1, 1)
F (__v_log1pf, v_log1pf, log1p, mpfr_log1p, 1, 1, f1, 1)
+F (__v_log2f, v_log2f, log2, mpfr_log2, 1, 1, f1, 1)
#ifdef __vpcs
F (__vn_asinhf, vn_asinhf, asinh, mpfr_asinh, 1, 1, f1, 1)
F (__vn_atanf, vn_atanf, atan, mpfr_atan, 1, 1, f1, 1)
@@ -54,6 +56,7 @@ F (__vn_erfc, vn_erfc, erfcl, mpfr_erfc, 1, 0, d1, 1)
F (__vn_log10f, vn_log10f, log10, mpfr_log10, 1, 1, f1, 1)
F (__vn_log10, vn_log10, log10l, mpfr_log10, 1, 0, d1, 1)
F (__vn_log1pf, vn_log1pf, log1p, mpfr_log1p, 1, 1, f1, 1)
+F (__vn_log2f, vn_log2f, log2, mpfr_log2, 1, 1, f1, 1)
F (_ZGVnN4v_asinhf, Z_asinhf, asinh, mpfr_asinh, 1, 1, f1, 1)
F (_ZGVnN4v_atanf, Z_atanf, atan, mpfr_atan, 1, 1, f1, 1)
F (_ZGVnN2v_atan, Z_atan, atanl, mpfr_atan, 1, 0, d1, 1)
@@ -66,6 +69,7 @@ F (_ZGVnN2v_erfc, Z_erfc, erfcl, mpfr_erfc, 1, 0, d1, 1)
F (_ZGVnN4v_log10f, Z_log10f, log10, mpfr_log10, 1, 1, f1, 1)
F (_ZGVnN2v_log10, Z_log10, log10l, mpfr_log10, 1, 0, d1, 1)
F (_ZGVnN4v_log1pf, Z_log1pf, log1p, mpfr_log1p, 1, 1, f1, 1)
+F (_ZGVnN4v_log2f, Z_log2f, log2, mpfr_log2, 1, 1, f1, 1)
#endif
#endif
#if WANT_SVE_MATH
diff --git a/pl/math/test/ulp_wrappers.h b/pl/math/test/ulp_wrappers.h
index 14a32b7..274e119 100644
--- a/pl/math/test/ulp_wrappers.h
+++ b/pl/math/test/ulp_wrappers.h
@@ -26,6 +26,7 @@ static float v_erff(float x) { return __v_erff(argf(x))[0]; }
static float v_erfcf(float x) { return __v_erfcf(argf(x))[0]; }
static float v_log10f(float x) { return __v_log10f(argf(x))[0]; }
static float v_log1pf(float x) { return __v_log1pf(argf(x))[0]; }
+static float v_log2f(float x) { return __v_log2f(argf(x))[0]; }
static double v_atan(double x) { return __v_atan(argd(x))[0]; }
static double v_atan2(double x, double y) { return __v_atan2(argd(x), argd(y))[0]; }
static double v_erf(double x) { return __v_erf(argd(x))[0]; }
@@ -39,6 +40,7 @@ static float vn_erff(float x) { return __vn_erff(argf(x))[0]; }
static float vn_erfcf(float x) { return __vn_erfcf(argf(x))[0]; }
static float vn_log10f(float x) { return __vn_log10f(argf(x))[0]; }
static float vn_log1pf(float x) { return __vn_log1pf(argf(x))[0]; }
+static float vn_log2f(float x) { return __vn_log2f(argf(x))[0]; }
static double vn_atan(double x) { return __vn_atan(argd(x))[0]; }
static double vn_atan2(double x, double y) { return __vn_atan2(argd(x), argd(y))[0]; }
static double vn_erf(double x) { return __vn_erf(argd(x))[0]; }
@@ -52,6 +54,7 @@ static float Z_erff(float x) { return _ZGVnN4v_erff(argf(x))[0]; }
static float Z_erfcf(float x) { return _ZGVnN4v_erfcf(argf(x))[0]; }
static float Z_log10f(float x) { return _ZGVnN4v_log10f(argf(x))[0]; }
static float Z_log1pf(float x) { return _ZGVnN4v_log1pf(argf(x))[0]; }
+static float Z_log2f(float x) { return _ZGVnN4v_log2f(argf(x))[0]; }
static double Z_atan(double x) { return _ZGVnN2v_atan(argd(x))[0]; }
static double Z_atan2(double x, double y) { return _ZGVnN2vv_atan2(argd(x), argd(y))[0]; }
static double Z_erf(double x) { return _ZGVnN2v_erf(argd(x))[0]; }
diff --git a/pl/math/v_log2f_2u6.c b/pl/math/v_log2f_2u6.c
new file mode 100644
index 0000000..ce46206
--- /dev/null
+++ b/pl/math/v_log2f_2u6.c
@@ -0,0 +1,108 @@
+/*
+ * Single-precision vector log2 function.
+ *
+ * Copyright (c) 2022, Arm Limited.
+ * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
+ */
+
+#include "math_config.h"
+#include "v_math.h"
+#if V_SUPPORTED
+
+#define N (1 << V_LOG2F_TABLE_BITS)
+#define T __v_log2f_data.tab
+#define A __v_log2f_data.poly
+#define OFF 0x3f330000
+
+static float
+handle_special (float x)
+{
+ if (x < 0)
+ /* log2f(-anything) = NaN. */
+ return NAN;
+ if (x == 0)
+ /* log2f(0) = Inf. */
+ return __math_divzerof (1);
+ /* log2f(Inf) = Inf
+ log2f(Nan) = Nan
+ log2f(-NaN) = -NaN. */
+ return x;
+}
+
+static float
+normalise (float x)
+{
+ return asfloat (asuint (x * 0x1p23f) - (23 << 23));
+}
+
+#ifdef SCALAR
+
+#define DEFINE_LOOKUP_FUNC(p) \
+ static inline float lookup_##p (uint32_t i) { return T[i].p; }
+
+#else
+
+#define DEFINE_LOOKUP_FUNC(p) \
+ static inline v_f32_t lookup_##p (v_u32_t i) \
+ { \
+ return (v_f32_t){T[i[0]].p, T[i[1]].p, T[i[2]].p, T[i[3]].p}; \
+ }
+
+#endif
+
+DEFINE_LOOKUP_FUNC (invc_lo)
+DEFINE_LOOKUP_FUNC (invc_hi)
+DEFINE_LOOKUP_FUNC (logc)
+
+/* Single-precision vector log2 routine. Implements the same algorithms as
+ scalar log2f, but using only single-precision arithmetic, with invc
+ represented as a two-limb float. Accurate to 2.6 ulp. The maximum error is
+ near sqrt(2):
+ __v_log2f(0x1.6a0484p+0) got 0x1.ffea02p-2
+ want 0x1.ffea08p-2. */
+VPCS_ATTR v_f32_t V_NAME (log2f) (v_f32_t x)
+{
+ v_u32_t ix = v_as_u32_f32 (x);
+
+ /* x is +-Inf, +-NaN, 0 or -ve. */
+ v_u32_t special = v_cond_u32 (ix >= 0x7f800000) | v_cond_u32 (ix == 0);
+ /* |x| < 2^126 (i.e. x is subnormal). */
+ v_u32_t subnorm = v_cond_u32 (v_calt_f32 (x, v_f32 (0x1p-126f)));
+
+ if (unlikely (v_any_u32 (subnorm)))
+ /* Normalize any subnormals. */
+ ix = v_as_u32_f32 (v_call_f32 (normalise, x, x, subnorm));
+
+ /* x = 2^k z; where z is in range [OFF,2*OFF] and exact.
+ The range is split into N subintervals.
+ The ith subinterval contains z and c is near its center. */
+ v_u32_t tmp = ix - OFF;
+ v_u32_t i = (tmp >> (23 - V_LOG2F_TABLE_BITS)) % N;
+ v_u32_t top = tmp & 0xff800000;
+ v_u32_t iz = ix - top;
+ v_f32_t k = v_to_f32_s32 (v_as_s32_u32 (tmp) >> 23); /* Arithmetic shift. */
+ v_f32_t z = v_as_f32_u32 (iz);
+
+ v_f32_t invc_lo = lookup_invc_lo (i);
+ v_f32_t invc_hi = lookup_invc_hi (i);
+ v_f32_t logc = lookup_logc (i);
+
+ /* log2(x) = log1p(z/c-1)/ln2 + log2(c) + k. */
+ v_f32_t r = v_fma_f32 (z, invc_hi, v_f32 (-1));
+ r = v_fma_f32 (z, invc_lo, r);
+ v_f32_t y0 = logc + k;
+
+ /* Pipelined polynomial evaluation to approximate log1p(r)/ln2. */
+ v_f32_t r2 = r * r;
+ v_f32_t y = v_fma_f32 (v_f32 (A[1]), r, v_f32 (A[2]));
+ y = v_fma_f32 (v_f32 (A[0]), r2, y);
+ v_f32_t p = v_fma_f32 (v_f32 (A[3]), r, y0);
+ y = v_fma_f32 (y, r2, p);
+
+ if (unlikely (v_any_u32 (special)))
+ return v_call_f32 (handle_special, x, y, special);
+
+ return y;
+}
+VPCS_ALIAS
+#endif
diff --git a/pl/math/v_log2f_data.c b/pl/math/v_log2f_data.c
new file mode 100644
index 0000000..e6c1f71
--- /dev/null
+++ b/pl/math/v_log2f_data.c
@@ -0,0 +1,37 @@
+/*
+ * Coefficients and table entries for vector log2f
+ *
+ * Copyright (c) 2022, Arm Limited.
+ * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
+ */
+
+#include "math_config.h"
+
+const struct v_log2f_data __v_log2f_data = {
+/* All values here are derived from the values in math/log2f_data.c.
+ For all i:
+ tab[i].invc_hi = (float) log2f_data.invc
+ tab[i].invc_lo = log2f_data.invc - (double) tab[i].invc_hi
+ tab[i].logc = (float) log2f_data.logc
+ poly[i] = (float) log2f_data.poly[i]. */
+ .tab = {
+ { 0x1.661ec8p+0, -0x1.81c31p-26, -0x1.efec66p-2, 0},
+ { 0x1.571ed4p+0, 0x1.55f108p-25, -0x1.b0b684p-2, 0},
+ { 0x1.4953ap+0, -0x1.e1fdeap-25, -0x1.7418bp-2, 0},
+ { 0x1.3c995cp+0, -0x1.e8ff9p-25, -0x1.39de92p-2, 0},
+ { 0x1.30d19p+0, 0x1.910c94p-25, -0x1.01d9cp-2, 0},
+ { 0x1.25e228p+0, -0x1.3d1c58p-26, -0x1.97c1d2p-3, 0},
+ { 0x1.1bb4a4p+0, 0x1.434688p-25, -0x1.2f9e3ap-3, 0},
+ { 0x1.12359p+0, -0x1.eea348p-25, -0x1.960cbcp-4, 0},
+ { 0x1.0953f4p+0, 0x1.9900a8p-28, -0x1.a6f9dcp-5, 0},
+ { 0x1p+0, 0x0p+0, 0x0p+0, 0},
+ { 0x1.e608dp-1, -0x1.32dc2ap-28, 0x1.338caap-4, 0},
+ { 0x1.ca4b32p-1, -0x1.fb2acp-30, 0x1.476a96p-3, 0},
+ { 0x1.b20366p-1, -0x1.12a064p-26, 0x1.e840b4p-3, 0},
+ { 0x1.9c2d16p-1, 0x1.d0d516p-28, 0x1.40646p-2, 0},
+ { 0x1.886e6p-1, 0x1.bc20f6p-28, 0x1.88e9c2p-2, 0},
+ { 0x1.767ddp-1, -0x1.5596f4p-26, 0x1.ce0a44p-2, 0},
+ },
+ .poly = { -0x1.712b70p-2, 0x1.ecabf4p-2,
+ -0x1.71547ap-1, 0x1.715476p+0 }
+};
diff --git a/pl/math/vn_log2f_2u6.c b/pl/math/vn_log2f_2u6.c
new file mode 100644
index 0000000..dc5ab03
--- /dev/null
+++ b/pl/math/vn_log2f_2u6.c
@@ -0,0 +1,12 @@
+/*
+ * AdvSIMD vector PCS variant of __v_log2f.
+ *
+ * Copyright (c) 2022, Arm Limited.
+ * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
+ */
+#include "include/mathlib.h"
+#ifdef __vpcs
+#define VPCS 1
+#define VPCS_ALIAS strong_alias (__vn_log2f, _ZGVnN4v_log2f)
+#include "v_log2f_2u6.c"
+#endif