aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJoe Ramsay <Joe.Ramsay@arm.com>2022-12-22 17:50:40 +0000
committerJoe Ramsay <joe.ramsay@arm.com>2022-12-22 17:50:40 +0000
commit0d000be25df5ecebba2cf95f219a53c218fcb761 (patch)
tree8ab547267830546c2e5a22ab694e26ca99041648
parent2015eee4012b8fa766855328438f48aaf424a835 (diff)
downloadarm-optimized-routines-0d000be25df5ecebba2cf95f219a53c218fcb761.tar.gz
pl/math: Add scalar & vector/Neon atanh
New routines are both based on existing log1p routines. Scalar is accurate to 3 ULP, Neon to 3.5 ULP. Both set fp exceptions correctly regardless of build config.
-rw-r--r--pl/math/atanh_3u.c85
-rw-r--r--pl/math/include/mathlib.h5
-rw-r--r--pl/math/pairwise_horner_wrap.h22
-rw-r--r--pl/math/s_atanh_3u5.c6
-rw-r--r--pl/math/test/testcases/directed/atanh.tst22
-rw-r--r--pl/math/v_atanh_3u5.c104
-rw-r--r--pl/math/vn_atanh_3u5.c12
7 files changed, 251 insertions, 5 deletions
diff --git a/pl/math/atanh_3u.c b/pl/math/atanh_3u.c
new file mode 100644
index 0000000..b72326c
--- /dev/null
+++ b/pl/math/atanh_3u.c
@@ -0,0 +1,85 @@
+/*
+ * Double-precision atanh(x) function.
+ * Copyright (c) 2022, Arm Limited.
+ * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
+ */
+
+#include "math_config.h"
+#include "estrin.h"
+#include "pl_sig.h"
+#include "pl_test.h"
+
+#define AbsMask 0x7fffffffffffffff
+#define Half 0x3fe0000000000000
+#define One 0x3ff0000000000000
+#define Ln2Hi 0x1.62e42fefa3800p-1
+#define Ln2Lo 0x1.ef35793c76730p-45
+#define OneMHfRt2Top \
+ 0x00095f62 /* top32(asuint64(1)) - top32(asuint64(sqrt(2)/2)). */
+#define OneTop12 0x3ff
+#define HfRt2Top 0x3fe6a09e /* top32(asuint64(sqrt(2)/2)). */
+#define BottomMask 0xffffffff
+#define C(i) __log1p_data.coeffs[i]
+
+static inline double
+log1p_inline (double x)
+{
+ /* Helper for calculating log(1 + x) using order-18 polynomial on a reduced
+ interval. Copied from log1p_2u.c, with no special-case handling. See that
+ file for details of the algorithm. */
+ double m = x + 1;
+ uint64_t mi = asuint64 (m);
+
+ /* Decompose x + 1 into (f + 1) * 2^k, with k chosen such that f is in
+ [sqrt(2)/2, sqrt(2)]. */
+ uint32_t u = (mi >> 32) + OneMHfRt2Top;
+ int32_t k = (int32_t) (u >> 20) - OneTop12;
+ uint32_t utop = (u & 0x000fffff) + HfRt2Top;
+ uint64_t u_red = ((uint64_t) utop << 32) | (mi & BottomMask);
+ double f = asdouble (u_red) - 1;
+
+ /* Correction term for round-off in f. */
+ double cm = (x - (m - 1)) / m;
+
+ /* Approximate log1p(f) with polynomial. */
+ double f2 = f * f;
+ double f4 = f2 * f2;
+ double f8 = f4 * f4;
+ double p = fma (f, ESTRIN_18 (f, f2, f4, f8, f8 * f8, C) * f, f);
+
+ /* Recombine log1p(x) = k*log2 + log1p(f) + c/m. */
+ double kd = k;
+ double y = fma (Ln2Lo, kd, cm);
+ return y + fma (Ln2Hi, kd, p);
+}
+
+/* Approximation for double-precision inverse tanh(x), using a simplified
+ version of log1p. Greatest observed error is 3.00 ULP:
+ atanh(0x1.e58f3c108d714p-4) got 0x1.e7da77672a647p-4
+ want 0x1.e7da77672a64ap-4. */
+double
+atanh (double x)
+{
+ uint64_t ix = asuint64 (x);
+ uint64_t sign = ix & ~AbsMask;
+ uint64_t ia = ix & AbsMask;
+
+ if (unlikely (ia == One))
+ return __math_divzero (sign >> 32);
+
+ if (unlikely (ia > One))
+ return __math_invalid (x);
+
+ double halfsign = asdouble (Half | sign);
+ double ax = asdouble (ia);
+ return halfsign * log1p_inline ((2 * ax) / (1 - ax));
+}
+
+PL_SIG (S, D, 1, atanh, -1.0, 1.0)
+PL_TEST_ULP (atanh, 3.00)
+PL_TEST_INTERVAL (atanh, 0, 0x1p-23, 10000)
+PL_TEST_INTERVAL (atanh, -0, -0x1p-23, 10000)
+PL_TEST_INTERVAL (atanh, 0x1p-23, 1, 90000)
+PL_TEST_INTERVAL (atanh, -0x1p-23, -1, 90000)
+PL_TEST_INTERVAL (atanh, 1, inf, 100)
+PL_TEST_INTERVAL (atanh, -1, -inf, 100)
diff --git a/pl/math/include/mathlib.h b/pl/math/include/mathlib.h
index 05fa306..041b407 100644
--- a/pl/math/include/mathlib.h
+++ b/pl/math/include/mathlib.h
@@ -29,6 +29,7 @@ double acosh (double);
double asinh (double);
double atan (double);
double atan2 (double, double);
+double atanh (double);
double cbrt (double);
double cosh (double);
double erfc (double);
@@ -56,6 +57,7 @@ float __s_tanhf (float);
double __s_asinh (double);
double __s_atan (double);
double __s_atan2 (double, double);
+double __s_atanh (double);
double __s_cbrt (double);
double __s_cosh (double);
double __s_erf (double);
@@ -85,6 +87,7 @@ __f64x2_t __v_atan (__f64x2_t);
__f32x4_t __v_atan2f (__f32x4_t, __f32x4_t);
__f64x2_t __v_atan2 (__f64x2_t, __f64x2_t);
__f32x4_t __v_atanhf (__f32x4_t);
+__f64x2_t __v_atanh (__f64x2_t);
__f32x4_t __v_cbrtf (__f32x4_t);
__f64x2_t __v_cbrt (__f64x2_t);
__f32x4_t __v_coshf (__f32x4_t);
@@ -117,6 +120,7 @@ __vpcs __f64x2_t __vn_atan (__f64x2_t);
__vpcs __f32x4_t __vn_atan2f (__f32x4_t, __f32x4_t);
__vpcs __f64x2_t __vn_atan2 (__f64x2_t, __f64x2_t);
__vpcs __f32x4_t __vn_atanhf (__f32x4_t);
+__vpcs __f64x2_t __vn_atanh (__f64x2_t);
__vpcs __f32x4_t __vn_cbrtf (__f32x4_t);
__vpcs __f64x2_t __vn_cbrt (__f64x2_t);
__vpcs __f32x4_t __vn_coshf (__f32x4_t);
@@ -146,6 +150,7 @@ __vpcs __f64x2_t _ZGVnN2v_atan (__f64x2_t);
__vpcs __f32x4_t _ZGVnN4vv_atan2f (__f32x4_t, __f32x4_t);
__vpcs __f64x2_t _ZGVnN2vv_atan2 (__f64x2_t, __f64x2_t);
__vpcs __f32x4_t _ZGVnN4v_atanhf (__f32x4_t);
+__vpcs __f64x2_t _ZGVnN2v_atanh (__f64x2_t);
__vpcs __f32x4_t _ZGVnN4v_cbrtf (__f32x4_t);
__vpcs __f64x2_t _ZGVnN2v_cbrt (__f64x2_t);
__vpcs __f32x4_t _ZGVnN4v_coshf (__f32x4_t);
diff --git a/pl/math/pairwise_horner_wrap.h b/pl/math/pairwise_horner_wrap.h
index 5bc287b..e75a491 100644
--- a/pl/math/pairwise_horner_wrap.h
+++ b/pl/math/pairwise_horner_wrap.h
@@ -7,11 +7,14 @@
// clang-format off
#define PW_HORNER_1_(x, c, i) FMA(x, C(i + 1), C(i))
-#define PW_HORNER_3_(x, x2, c, i) FMA(x2, PW_HORNER_1_(x, c, i + 2), PW_HORNER_1_(x, c, i))
-#define PW_HORNER_5_(x, x2, c, i) FMA(x2, PW_HORNER_3_(x, x2, c, i + 2), PW_HORNER_1_(x, c, i))
-#define PW_HORNER_7_(x, x2, c, i) FMA(x2, PW_HORNER_5_(x, x2, c, i + 2), PW_HORNER_1_(x, c, i))
-#define PW_HORNER_9_(x, x2, c, i) FMA(x2, PW_HORNER_7_(x, x2, c, i + 2), PW_HORNER_1_(x, c, i))
-#define PW_HORNER_11_(x, x2, c, i) FMA(x2, PW_HORNER_9_(x, x2, c, i + 2), PW_HORNER_1_(x, c, i))
+#define PW_HORNER_3_(x, x2, c, i) FMA(x2, PW_HORNER_1_ (x, c, i + 2), PW_HORNER_1_(x, c, i))
+#define PW_HORNER_5_(x, x2, c, i) FMA(x2, PW_HORNER_3_ (x, x2, c, i + 2), PW_HORNER_1_(x, c, i))
+#define PW_HORNER_7_(x, x2, c, i) FMA(x2, PW_HORNER_5_ (x, x2, c, i + 2), PW_HORNER_1_(x, c, i))
+#define PW_HORNER_9_(x, x2, c, i) FMA(x2, PW_HORNER_7_ (x, x2, c, i + 2), PW_HORNER_1_(x, c, i))
+#define PW_HORNER_11_(x, x2, c, i) FMA(x2, PW_HORNER_9_ (x, x2, c, i + 2), PW_HORNER_1_(x, c, i))
+#define PW_HORNER_13_(x, x2, c, i) FMA(x2, PW_HORNER_11_(x, x2, c, i + 2), PW_HORNER_1_(x, c, i))
+#define PW_HORNER_15_(x, x2, c, i) FMA(x2, PW_HORNER_13_(x, x2, c, i + 2), PW_HORNER_1_(x, c, i))
+#define PW_HORNER_17_(x, x2, c, i) FMA(x2, PW_HORNER_15_(x, x2, c, i + 2), PW_HORNER_1_(x, c, i))
#define PAIRWISE_HORNER_1(x, c) PW_HORNER_1_ (x, c, 0)
#define PAIRWISE_HORNER_3(x, x2, c) PW_HORNER_3_ (x, x2, c, 0)
@@ -19,6 +22,9 @@
#define PAIRWISE_HORNER_7(x, x2, c) PW_HORNER_7_ (x, x2, c, 0)
#define PAIRWISE_HORNER_9(x, x2, c) PW_HORNER_9_ (x, x2, c, 0)
#define PAIRWISE_HORNER_11(x, x2, c) PW_HORNER_11_(x, x2, c, 0)
+#define PAIRWISE_HORNER_13(x, x2, c) PW_HORNER_13_(x, x2, c, 0)
+#define PAIRWISE_HORNER_15(x, x2, c) PW_HORNER_15_(x, x2, c, 0)
+#define PAIRWISE_HORNER_17(x, x2, c) PW_HORNER_17_(x, x2, c, 0)
#define PW_HORNER_2_(x, x2, c, i) FMA(x2, c(i + 2), PW_HORNER_1_(x, c, i))
#define PW_HORNER_4_(x, x2, c, i) FMA(x2, PW_HORNER_2_ (x, x2, c, i + 2), PW_HORNER_1_(x, c, i))
@@ -26,6 +32,9 @@
#define PW_HORNER_8_(x, x2, c, i) FMA(x2, PW_HORNER_6_ (x, x2, c, i + 2), PW_HORNER_1_(x, c, i))
#define PW_HORNER_10_(x, x2, c, i) FMA(x2, PW_HORNER_8_ (x, x2, c, i + 2), PW_HORNER_1_(x, c, i))
#define PW_HORNER_12_(x, x2, c, i) FMA(x2, PW_HORNER_10_(x, x2, c, i + 2), PW_HORNER_1_(x, c, i))
+#define PW_HORNER_14_(x, x2, c, i) FMA(x2, PW_HORNER_12_(x, x2, c, i + 2), PW_HORNER_1_(x, c, i))
+#define PW_HORNER_16_(x, x2, c, i) FMA(x2, PW_HORNER_14_(x, x2, c, i + 2), PW_HORNER_1_(x, c, i))
+#define PW_HORNER_18_(x, x2, c, i) FMA(x2, PW_HORNER_16_(x, x2, c, i + 2), PW_HORNER_1_(x, c, i))
#define PAIRWISE_HORNER_2(x, x2, c) PW_HORNER_2_ (x, x2, c, 0)
#define PAIRWISE_HORNER_4(x, x2, c) PW_HORNER_4_ (x, x2, c, 0)
@@ -33,4 +42,7 @@
#define PAIRWISE_HORNER_8(x, x2, c) PW_HORNER_8_(x, x2, c, 0)
#define PAIRWISE_HORNER_10(x, x2, c) PW_HORNER_10_(x, x2, c, 0)
#define PAIRWISE_HORNER_12(x, x2, c) PW_HORNER_12_(x, x2, c, 0)
+#define PAIRWISE_HORNER_14(x, x2, c) PW_HORNER_14_(x, x2, c, 0)
+#define PAIRWISE_HORNER_16(x, x2, c) PW_HORNER_16_(x, x2, c, 0)
+#define PAIRWISE_HORNER_18(x, x2, c) PW_HORNER_18_(x, x2, c, 0)
// clang-format on
diff --git a/pl/math/s_atanh_3u5.c b/pl/math/s_atanh_3u5.c
new file mode 100644
index 0000000..11877c6
--- /dev/null
+++ b/pl/math/s_atanh_3u5.c
@@ -0,0 +1,6 @@
+/*
+ * Copyright (c) 2022, Arm Limited.
+ * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
+ */
+#define SCALAR 1
+#include "v_atanh_3u5.c"
diff --git a/pl/math/test/testcases/directed/atanh.tst b/pl/math/test/testcases/directed/atanh.tst
new file mode 100644
index 0000000..530df8b
--- /dev/null
+++ b/pl/math/test/testcases/directed/atanh.tst
@@ -0,0 +1,22 @@
+; atanh.tst
+;
+; Copyright 2009-2022, Arm Limited.
+; SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
+
+func=atanh op1=7ff80000.00000001 result=7ff80000.00000001 errno=0
+func=atanh op1=fff80000.00000001 result=7ff80000.00000001 errno=0
+func=atanh op1=7ff00000.00000001 result=7ff80000.00000001 errno=0 status=i
+func=atanh op1=fff00000.00000001 result=7ff80000.00000001 errno=0 status=i
+func=atanh op1=7ff00000.00000000 result=7ff80000.00000001 errno=EDOM status=i
+func=atanh op1=fff00000.00000000 result=7ff80000.00000001 errno=EDOM status=i
+func=atanh op1=3ff00000.00000001 result=7ff80000.00000001 errno=EDOM status=i
+func=atanh op1=bff00000.00000001 result=7ff80000.00000001 errno=EDOM status=i
+func=atanh op1=3ff00000.00000000 result=7ff00000.00000000 errno=ERANGE status=z
+func=atanh op1=bff00000.00000000 result=fff00000.00000000 errno=ERANGE status=z
+func=atanh op1=00000000.00000000 result=00000000.00000000 errno=0
+func=atanh op1=80000000.00000000 result=80000000.00000000 errno=0
+; No exception is raised with certain versions of glibc. Functions
+; approximated by x near zero may not generate/implement flops and
+; thus may not raise exceptions.
+func=atanh op1=00000000.00000001 result=00000000.00000001 errno=0 maybestatus=ux
+func=atanh op1=80000000.00000001 result=80000000.00000001 errno=0 maybestatus=ux
diff --git a/pl/math/v_atanh_3u5.c b/pl/math/v_atanh_3u5.c
new file mode 100644
index 0000000..ca68020
--- /dev/null
+++ b/pl/math/v_atanh_3u5.c
@@ -0,0 +1,104 @@
+/*
+ * Double-precision vector atanh(x) function.
+ *
+ * Copyright (c) 2022, Arm Limited.
+ * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
+ */
+
+#include "v_math.h"
+#include "pairwise_horner.h"
+#include "pl_sig.h"
+#include "pl_test.h"
+
+#if V_SUPPORTED
+
+#define Ln2Hi v_f64 (0x1.62e42fefa3800p-1)
+#define Ln2Lo v_f64 (0x1.ef35793c76730p-45)
+#define HfRt2Top 0x3fe6a09e00000000 /* top32(asuint64(sqrt(2)/2)) << 32. */
+#define OneMHfRt2Top \
+ 0x00095f6200000000 /* (top32(asuint64(1)) - top32(asuint64(sqrt(2)/2))) \
+ << 32. */
+#define OneTop12 0x3ff
+#define BottomMask 0xffffffff
+
+#define AbsMask 0x7fffffffffffffff
+#define Half 0x3fe0000000000000
+#define One 0x3ff0000000000000
+
+#define C(i) v_f64 (__log1p_data.coeffs[i])
+
+static inline v_f64_t
+log1p_inline (v_f64_t x)
+{
+ /* Helper for calculating log(1 + x) using order-18 polynomial on a reduced
+ interval. Copied from v_log1p_2u5.c, with the following modifications:
+ - No special-case handling.
+ - Pairwise Horner instead of Estrin for improved accuracy.
+ - Slightly different recombination to reuse f2.
+ See original source for details of the algorithm. */
+ v_f64_t m = x + 1;
+ v_u64_t mi = v_as_u64_f64 (m);
+
+ /* Decompose x + 1 into (f + 1) * 2^k, with k chosen such that f is in
+ [sqrt(2)/2, sqrt(2)]. */
+ v_u64_t u = mi + OneMHfRt2Top;
+ v_s64_t ki = v_as_s64_u64 (u >> 52) - OneTop12;
+ v_f64_t k = v_to_f64_s64 (ki);
+ v_u64_t utop = (u & 0x000fffff00000000) + HfRt2Top;
+ v_u64_t u_red = utop | (mi & BottomMask);
+ v_f64_t f = v_as_f64_u64 (u_red) - 1;
+
+ /* Correction term for round-off in f. */
+ v_f64_t cm = (x - (m - 1)) / m;
+
+ /* Approximate log1p(f) with polynomial. */
+ v_f64_t f2 = f * f;
+ v_f64_t p = PAIRWISE_HORNER_18 (f, f2, C);
+
+ /* Recombine log1p(x) = k*log2 + log1p(f) + c/m. */
+ v_f64_t ylo = v_fma_f64 (k, Ln2Lo, cm);
+ v_f64_t yhi = v_fma_f64 (k, Ln2Hi, f);
+ v_f64_t y = v_fma_f64 (f2, p, ylo + yhi);
+ return y;
+}
+
+VPCS_ATTR
+NOINLINE static v_f64_t
+specialcase (v_f64_t x, v_f64_t y, v_u64_t special)
+{
+ return v_call_f64 (atanh, x, y, special);
+}
+
+/* Approximation for vector double-precision atanh(x) using modified log1p.
+ The greatest observed error is 3.31 ULP:
+ __v_atanh(0x1.ffae6288b601p-6) got 0x1.ffd8ff31b5019p-6
+ want 0x1.ffd8ff31b501cp-6. */
+VPCS_ATTR
+v_f64_t V_NAME (atanh) (v_f64_t x)
+{
+ v_u64_t ix = v_as_u64_f64 (x);
+ v_u64_t sign = ix & ~AbsMask;
+ v_u64_t ia = ix & AbsMask;
+ v_u64_t special = v_cond_u64 (ia >= One);
+ v_f64_t halfsign = v_as_f64_u64 (sign | Half);
+
+ /* Mask special lanes with 0 to prevent spurious underflow. */
+ v_f64_t ax = v_sel_f64 (special, v_f64 (0), v_as_f64_u64 (ia));
+ v_f64_t y = halfsign * log1p_inline ((2 * ax) / (1 - ax));
+
+ if (unlikely (v_any_u64 (special)))
+ return specialcase (x, y, special);
+ return y;
+}
+VPCS_ALIAS
+
+PL_SIG (V, D, 1, atanh, -1.0, 1.0)
+PL_TEST_EXPECT_FENV_ALWAYS (V_NAME (atanh))
+PL_TEST_ULP (V_NAME (atanh), 3.32)
+PL_TEST_INTERVAL_C (V_NAME (atanh), 0, 0x1p-23, 10000, 0)
+PL_TEST_INTERVAL_C (V_NAME (atanh), -0, -0x1p-23, 10000, 0)
+PL_TEST_INTERVAL_C (V_NAME (atanh), 0x1p-23, 1, 90000, 0)
+PL_TEST_INTERVAL_C (V_NAME (atanh), -0x1p-23, -1, 90000, 0)
+PL_TEST_INTERVAL_C (V_NAME (atanh), 1, inf, 100, 0)
+PL_TEST_INTERVAL_C (V_NAME (atanh), -1, -inf, 100, 0)
+#endif
diff --git a/pl/math/vn_atanh_3u5.c b/pl/math/vn_atanh_3u5.c
new file mode 100644
index 0000000..27a5af5
--- /dev/null
+++ b/pl/math/vn_atanh_3u5.c
@@ -0,0 +1,12 @@
+/*
+ * AdvSIMD vector PCS variant of __v_atanh.
+ *
+ * Copyright (c) 2022, Arm Limited.
+ * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
+ */
+#include "include/mathlib.h"
+#ifdef __vpcs
+#define VPCS 1
+#define VPCS_ALIAS PL_ALIAS (__vn_atanh, _ZGVnN2v_atanh)
+#include "v_atanh_3u5.c"
+#endif