aboutsummaryrefslogtreecommitdiff
path: root/math/pow.c
diff options
context:
space:
mode:
authorSzabolcs Nagy <szabolcs.nagy@arm.com>2018-06-06 18:17:16 +0100
committerSzabolcs Nagy <szabolcs.nagy@arm.com>2018-06-11 16:01:55 +0100
commited0ecfff891f9b69d5dfd86fa877b90b4566634c (patch)
tree4bbe6eacb0892b4e870356726480adb5110ace8d /math/pow.c
parentf79ee89ad4abe65c84d95c359c577f346cb2aabf (diff)
downloadarm-optimized-routines-ed0ecfff891f9b69d5dfd86fa877b90b4566634c.tar.gz
Add new pow implementation
The algorithm is exp(y * log(x)), where log(x) is computed with about 1.8*2^-66 relative error, returning the result in two doubles, and the exp part uses the same algorithm (and lookup tables) as exp, but takes the input as two doubles and a sign (to handle negative bases with odd integer exponent). There is separate code path when fma is not available but the worst case error is about 0.67 ULP in both cases. The lookup table and consts for log are 4224 bytes, the code is 1196 bytes. The non-nearest rounding error is less than 1 ULP. Improvements on Cortex-A72 compared to current glibc master: latency: 1.8x thruput: 2.5x
Diffstat (limited to 'math/pow.c')
-rw-r--r--math/pow.c371
1 files changed, 371 insertions, 0 deletions
diff --git a/math/pow.c b/math/pow.c
new file mode 100644
index 0000000..3d92784
--- /dev/null
+++ b/math/pow.c
@@ -0,0 +1,371 @@
+/*
+ * Double-precision x^y function.
+ *
+ * Copyright (c) 2018, Arm Limited.
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <math.h>
+#include <stdint.h>
+#include "math_config.h"
+
+/*
+Worst-case error: 0.67 ULP (~= ulperr_exp + 1024*Ln2*relerr_log*2^53)
+relerr_log: 1.8 * 2^-66 (Relative error of log)
+ulperr_exp: 0.509 ULP (ULP error of exp)
+*/
+
+#define T __pow_log_data.tab
+#define B __pow_log_data.poly1
+#define A __pow_log_data.poly
+#define Ln2hi __pow_log_data.ln2hi
+#define Ln2lo __pow_log_data.ln2lo
+#define N (1 << POW_LOG_TABLE_BITS)
+#define OFF 0x3fe6000000000000
+
+static inline uint32_t
+top16 (double x)
+{
+ return asuint64 (x) >> 48;
+}
+
+static inline double_t
+log_inline (uint64_t ix, double_t *tail)
+{
+ /* double_t for better performance on targets with FLT_EVAL_METHOD==2. */
+ double_t w, z, zhi, zlo, r, r2, r3, y, invc, logc, kd, hi, lo, khi, rhi, rlo, p, q;
+ uint64_t iz, tmp;
+ int k, i;
+
+#if POW_LOG_POLY1_ORDER == 9
+# define LO asuint64 (1 - 0x1.1p-7)
+# define HI asuint64 (1 + 0x1.98p-7)
+#endif
+ if (unlikely (ix - LO < HI - LO))
+ {
+ r = asdouble (ix) - 1.0;
+ /* Split r into top and bottom half. */
+ w = r * 0x1p27;
+ rhi = r + w - w;
+ rlo = r - rhi;
+ /* Compute r - r*r/2 precisely into hi+lo. */
+ w = rhi*rhi*B[0]; /* B[0] == -0.5. */
+ hi = r + w;
+ lo = r - hi + w;
+ lo += B[0]*rlo*(rhi + r);
+ r2 = r*r;
+ r3 = r*r2;
+#if POW_LOG_POLY1_ORDER == 9
+ p = B[1] + r*(B[2] + r*B[3] + r2*B[4] + r3*(B[5] + r*B[6] + r2*B[7]));
+#endif
+ q = lo + r3*p;
+ y = hi + q;
+ *tail = (hi - y) + q;
+ return y;
+ }
+
+ /* x = 2^k z; where z is in range [OFF,2*OFF) and exact.
+ The range is split into N subintervals.
+ The ith subinterval contains z and c is near its center. */
+ tmp = ix - OFF;
+ i = (tmp >> (52 - POW_LOG_TABLE_BITS)) % N;
+ k = (int64_t) tmp >> 52; /* arithmetic shift */
+ iz = ix - (tmp & 0xfffULL << 52);
+ invc = T[i].invc;
+ logc = T[i].logc;
+ z = asdouble (iz);
+ zhi = asdouble ((iz + (1ULL<<31)) & (-1ULL << 32));
+ zlo = z - zhi;
+
+ /* log(x) = log1p(z/c-1) + log(c) + k*Ln2 */
+ rhi = zhi * invc - 1.0;
+ rlo = zlo * invc;
+ kd = (double_t) k;
+
+ /* hi + lo = r + log(c) + k*Ln2. */
+ khi = kd * Ln2hi;
+ w = khi + logc;
+ lo = khi - w + logc;
+ hi = w + rhi;
+ lo = w - hi + rhi + (lo + kd*Ln2lo) + rlo;
+
+ /* log(x) = lo + (log1p(r) - r) + hi. */
+ /* Evaluation is optimized assuming superscalar pipelined execution. */
+#if HAVE_FAST_FMA
+ r = fma (z, invc, -1.0);
+#else
+ r = rhi + rlo;
+#endif
+ r2 = r * r;
+
+#if POW_LOG_POLY_ORDER == 7
+ p = lo + r*r2*(A[1] + r*A[2] + r2*(A[3] + r*A[4] + r2*A[5]));
+#endif
+ q = A[0]*r2; /* A[0] == -0.5. */
+ w = q + hi;
+ p += hi - w + q;
+ y = p + w;
+ *tail = w - y + p;
+ return y;
+}
+
+#undef N
+#undef T
+#define N (1 << EXP_TABLE_BITS)
+#define InvLn2N __exp_data.invln2N
+#define NegLn2hiN __exp_data.negln2hiN
+#define NegLn2loN __exp_data.negln2loN
+#define Shift __exp_data.shift
+#define T __exp_data.tab
+#define C2 __exp_data.poly[5 - EXP_POLY_ORDER]
+#define C3 __exp_data.poly[6 - EXP_POLY_ORDER]
+#define C4 __exp_data.poly[7 - EXP_POLY_ORDER]
+#define C5 __exp_data.poly[8 - EXP_POLY_ORDER]
+#define C6 __exp_data.poly[9 - EXP_POLY_ORDER]
+
+static inline double
+specialcase (double_t tmp, uint64_t sbits, uint64_t ki)
+{
+ double_t scale, y;
+
+ if ((ki & 0x80000000) == 0)
+ {
+ /* k > 0, the exponent of scale might have overflowed by <= 85. */
+ sbits -= 97ull << 52;
+ scale = asdouble (sbits);
+ y = 0x1p97 * (scale + scale * tmp);
+ return check_oflow (y);
+ }
+ /* k < 0, need special care in the subnormal range. */
+ sbits += 1022ull << 52;
+ /* Note: sbits is signed scale. */
+ scale = asdouble (sbits);
+ y = scale + scale * tmp;
+ if (fabs (y) < 1.0)
+ {
+ /* Round y to the right precision before scaling it into the subnormal
+ range to avoid double rounding that can cause 0.5+E/2 ulp error where
+ E is the worst-case ulp error outside the subnormal range. So this
+ is only useful if the goal is better than 1 ulp worst-case error. */
+ double_t hi, lo, one = 1.0;
+ if (y < 0.0)
+ one = -1.0;
+ lo = scale - y + scale * tmp;
+ hi = one + y;
+ lo = one - hi + y + lo;
+ y = eval_as_double (hi + lo) - one;
+ /* Avoid -0.0 with downward rounding. */
+ if (WANT_ROUNDING && y == 0.0)
+ y = asdouble (sbits & 0x8000000000000000);
+ /* The underflow exception needs to be signaled explicitly. */
+ force_eval_double (0x1p-1022 * 0x1p-1022);
+ }
+ y = 0x1p-1022 * y;
+ return check_uflow (y);
+}
+
+#define SIGN_BIAS (0x800 << EXP_TABLE_BITS)
+
+static inline double
+exp_inline (double x, double xtail, uint32_t sign_bias)
+{
+ uint32_t abstop;
+ uint64_t ki, idx, top, sbits;
+ /* double_t for better performance on targets with FLT_EVAL_METHOD==2. */
+ double_t kd, z, r, r2, scale, tail, tmp;
+
+ abstop = top16 (x) & 0x7fff;
+ if (unlikely (abstop - top16 (0x1p-54) >= top16 (704.0) - top16 (0x1p-54)))
+ {
+ if (abstop - top16 (0x1p-54) >= 0x80000000)
+ {
+ /* Avoid spurious underflow for tiny x. */
+ /* Note: 0 is common input. */
+ double_t one = WANT_ROUNDING ? 1.0 + x : 1.0;
+ return sign_bias ? -one : one;
+ }
+ if (abstop >= top16 (768.0))
+ {
+ /* Note: inf and nan are already handled. */
+ if (asuint64 (x) >> 63)
+ return __math_uflow (sign_bias);
+ else
+ return __math_oflow (sign_bias);
+ }
+ /* Large x is special cased below. */
+ abstop = 0;
+ }
+
+ /* exp(x) = 2^(k/N) * exp(r), with exp(r) in [2^(-1/2N),2^(1/2N)]. */
+ /* x = ln2/N*k + r, with int k and r in [-ln2/2N, ln2/2N]. */
+ z = InvLn2N * x;
+#if TOINT_INTRINSICS
+ kd = roundtoint (z);
+ ki = converttoint (z);
+#elif EXP_USE_TOINT_NARROW
+ /* z - kd is in [-0.5-2^-16, 0.5] in all rounding modes. */
+ kd = eval_as_double (z + Shift);
+ ki = asuint64 (kd) >> 16;
+ kd = (double_t) (int32_t) ki;
+#else
+ /* z - kd is in [-1, 1] in non-nearest rounding modes. */
+ kd = eval_as_double (z + Shift);
+ ki = asuint64 (kd);
+ kd -= Shift;
+#endif
+ r = x + kd*NegLn2hiN + kd*NegLn2loN;
+ r += xtail;
+ /* 2^(k/N) ~= scale * (1 + tail). */
+ idx = 2*(ki % N);
+ top = (ki + sign_bias) << (52 - EXP_TABLE_BITS);
+ tail = asdouble (T[idx]);
+ /* This is only a valid scale when -1023*N < k < 1024*N. */
+ sbits = T[idx + 1] + top;
+ /* exp(x) = 2^(k/N) * exp(r) ~= scale + scale * (tail + exp(r) - 1). */
+ /* Evaluation is optimized assuming superscalar pipelined execution. */
+ r2 = r*r;
+ /* Without fma the worst case error is 0.25/N ulp larger. */
+ /* Worst case error is less than 0.5+1.11/N+(abs poly error * 2^53) ulp. */
+#if EXP_POLY_ORDER == 4
+ tmp = tail + r + r2*C2 + r*r2*(C3 + r*C4);
+#elif EXP_POLY_ORDER == 5
+ tmp = tail + r + r2*(C2 + r*C3) + r2*r2*(C4 + r*C5);
+#elif EXP_POLY_ORDER == 6
+ tmp = tail + r + r2*(0.5 + r*C3) + r2*r2*(C4 + r*C5 + r2*C6);
+#endif
+ if (unlikely (abstop == 0))
+ return specialcase (tmp, sbits, ki);
+ scale = asdouble (sbits);
+ return scale + scale * tmp;
+}
+
+/* Returns 0 if not int, 1 if odd int, 2 if even int. */
+static inline int
+checkint (uint64_t iy)
+{
+ int e = iy >> 52 & 0x7ff;
+ if (e < 0x3ff)
+ return 0;
+ if (e > 0x3ff + 52)
+ return 2;
+ if (iy & ((1ULL << (0x3ff + 52 - e)) - 1))
+ return 0;
+ if (iy & (1ULL << (0x3ff + 52 - e)))
+ return 1;
+ return 2;
+}
+
+static inline int
+zeroinfnan (uint64_t i)
+{
+ return 2 * i - 1 >= 2 * asuint64 (INFINITY) - 1;
+}
+
+double
+pow (double x, double y)
+{
+ uint64_t sign_bias = 0;
+ uint64_t ix, iy;
+ uint32_t topx, topy;
+
+ ix = asuint64 (x);
+ iy = asuint64 (y);
+ topx = ix >> 52;
+ topy = iy >> 52;
+ if (unlikely (topx - 0x001 >= 0x7ff - 0x001 || (topy & 0x7ff) - 0x3be >= 0x43e - 0x3be))
+ {
+ /* Note: if |y| > 1075 * ln2 * 2^53 ~= 0x1.749p62 then pow(x,y) = inf/0
+ and if |y| < 2^-54 / 1075 ~= 0x1.e7b6p-65 then pow(x,y) = +-1. */
+ /* Special cases: (x < 0x1p-126 or inf or nan) or
+ (|y| < 0x1p-65 or |y| >= 0x1p63 or nan). */
+ if (unlikely (zeroinfnan (iy)))
+ {
+ if (2 * iy == 0)
+ return issignaling_inline (x) ? x + y : 1.0;
+ if (ix == asuint64 (1.0))
+ return issignaling_inline (y) ? x + y : 1.0;
+ if (2 * ix > 2 * asuint64 (INFINITY) || 2 * iy > 2 * asuint64 (INFINITY))
+ return x + y;
+ if (2 * ix == 2 * asuint64 (1.0))
+ return 1.0;
+ if ((2 * ix < 2 * asuint64 (1.0)) == !(iy >> 63))
+ return 0.0; /* |x|<1 && y==inf or |x|>1 && y==-inf. */
+ return y * y;
+ }
+ if (unlikely (zeroinfnan (ix)))
+ {
+ double_t x2 = x * x;
+ if (ix >> 63 && checkint (iy) == 1)
+ {
+ x2 = -x2;
+ sign_bias = 1;
+ }
+ if (WANT_ERRNO && 2 * ix == 0 && iy >> 63)
+ return __math_divzero (sign_bias);
+ return iy >> 63 ? 1 / x2 : x2;
+ }
+ /* Here x and y are non-zero finite. */
+ if (ix >> 63)
+ {
+ /* Finite x < 0. */
+ int yint = checkint (iy);
+ if (yint == 0)
+ return __math_invalid (x);
+ if (yint == 1)
+ sign_bias = SIGN_BIAS;
+ ix &= 0x7fffffffffffffff;
+ topx &= 0x7ff;
+ }
+ if ((topy & 0x7ff) - 0x3be >= 0x43e - 0x3be)
+ {
+ /* Note: sign_bias == 0 here because y is not odd. */
+ if (ix == asuint64 (1.0))
+ return 1.0;
+ if ((topy & 0x7ff) < 0x3be)
+ {
+ /* |y| < 2^-65, x^y ~= 1 + y*log(x). */
+ if (WANT_ROUNDING)
+ return ix > asuint64 (1.0) ? 1.0 + y : 1.0 - y;
+ else
+ return 1.0;
+ }
+ return (ix > asuint64 (1.0)) == (topy < 0x800)
+ ? __math_oflow (0) : __math_uflow (0);
+ }
+ if (topx == 0)
+ {
+ /* Normalize subnormal x so exponent becomes negative. */
+ ix = asuint64 (x * 0x1p52);
+ ix &= 0x7fffffffffffffff;
+ ix -= 52ULL << 52;
+ }
+ }
+
+ double_t lo;
+ double_t hi = log_inline (ix, &lo);
+ double_t ehi, elo;
+#if HAVE_FAST_FMA
+ ehi = y*hi;
+ elo = y*lo + fma (y, hi, -ehi);
+#else
+ double_t yhi = asdouble (iy & -1ULL << 27);
+ double_t ylo = y - yhi;
+ double_t lhi = asdouble (asuint64 (hi) & -1ULL << 27);
+ double_t llo = hi - lhi + lo;
+ ehi = yhi*lhi;
+ elo = ylo*lhi + y*llo; /* |elo| < |ehi| * 2^-25. */
+#endif
+ return exp_inline (ehi, elo, sign_bias);
+}