aboutsummaryrefslogtreecommitdiff
path: root/math/pow.c
diff options
context:
space:
mode:
authorSzabolcs Nagy <szabolcs.nagy@arm.com>2018-12-07 14:58:51 +0000
committerSzabolcs Nagy <szabolcs.nagy@arm.com>2018-12-07 14:58:51 +0000
commit04884bd04eac4b251da4026900010ea7d8850edc (patch)
tree33c966ee96c845a40dc64ecf3bac969e76c69bc1 /math/pow.c
parent75b8d8c6c12b37a38211defcc5b941adb7de121f (diff)
downloadarm-optimized-routines-04884bd04eac4b251da4026900010ea7d8850edc.tar.gz
More consistent excess precision handling
The current code aims to support FLT_EVAL_METHOD!=0 targets (such as i386 with x87 fpu or m68k) assuming appropriate narrowing eval functions are defined for them. But the narrowing eval functions were not used consistently: the return statement may not guarantee narrowing (e.g. that was the C99 behaviour which got changed in C11 annex F) so we should use the narrowing eval_as_ functions at return statements too. Results should be correct if narrowing only happens at eval_as_ calls. On most targets this change has no effect because eval_as_ is a noop. Most math implementations that care about excess precision already compile in a mode that narrows at returns so this change is not necessary for them, just better documents the assumptions.
Diffstat (limited to 'math/pow.c')
-rw-r--r--math/pow.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/math/pow.c b/math/pow.c
index a040b3d..e55f159 100644
--- a/math/pow.c
+++ b/math/pow.c
@@ -134,7 +134,7 @@ specialcase (double_t tmp, uint64_t sbits, uint64_t ki)
sbits -= 1009ull << 52;
scale = asdouble (sbits);
y = 0x1p1009 * (scale + scale * tmp);
- return check_oflow (y);
+ return check_oflow (eval_as_double (y));
}
/* k < 0, need special care in the subnormal range. */
sbits += 1022ull << 52;
@@ -161,7 +161,7 @@ specialcase (double_t tmp, uint64_t sbits, uint64_t ki)
force_eval_double (opt_barrier_double (0x1p-1022) * 0x1p-1022);
}
y = 0x1p-1022 * y;
- return check_uflow (y);
+ return check_uflow (eval_as_double (y));
}
#define SIGN_BIAS (0x800 << EXP_TABLE_BITS)
@@ -169,7 +169,7 @@ specialcase (double_t tmp, uint64_t sbits, uint64_t ki)
/* Computes sign*exp(x+xtail) where |xtail| < 2^-8/N and |xtail| <= |x|.
The sign_bias argument is SIGN_BIAS or 0 and sets the sign to -1 or 1. */
static inline double
-exp_inline (double x, double xtail, uint32_t sign_bias)
+exp_inline (double_t x, double_t xtail, uint32_t sign_bias)
{
uint32_t abstop;
uint64_t ki, idx, top, sbits;
@@ -241,7 +241,7 @@ exp_inline (double x, double xtail, uint32_t sign_bias)
scale = asdouble (sbits);
/* Note: tmp == 0 or |tmp| > 2^-200 and scale > 2^-739, so there
is no spurious underflow here even without fma. */
- return scale + scale * tmp;
+ return eval_as_double (scale + scale * tmp);
}
/* Returns 0 if not int, 1 if odd int, 2 if even int. The argument is