aboutsummaryrefslogtreecommitdiff
path: root/math/exp.c
diff options
context:
space:
mode:
authorSzabolcs Nagy <szabolcs.nagy@arm.com>2018-12-07 14:58:51 +0000
committerSzabolcs Nagy <szabolcs.nagy@arm.com>2018-12-07 14:58:51 +0000
commit04884bd04eac4b251da4026900010ea7d8850edc (patch)
tree33c966ee96c845a40dc64ecf3bac969e76c69bc1 /math/exp.c
parent75b8d8c6c12b37a38211defcc5b941adb7de121f (diff)
downloadarm-optimized-routines-04884bd04eac4b251da4026900010ea7d8850edc.tar.gz
More consistent excess precision handling
The current code aims to support FLT_EVAL_METHOD!=0 targets (such as i386 with x87 fpu or m68k) assuming appropriate narrowing eval functions are defined for them. But the narrowing eval functions were not used consistently: the return statement may not guarantee narrowing (e.g. that was the C99 behaviour which got changed in C11 annex F) so we should use the narrowing eval_as_ functions at return statements too. Results should be correct if narrowing only happens at eval_as_ calls. On most targets this change has no effect because eval_as_ is a noop. Most math implementations that care about excess precision already compile in a mode that narrows at returns so this change is not necessary for them, just better documents the assumptions.
Diffstat (limited to 'math/exp.c')
-rw-r--r--math/exp.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/math/exp.c b/math/exp.c
index fa913b4..ffd3111 100644
--- a/math/exp.c
+++ b/math/exp.c
@@ -39,7 +39,7 @@ specialcase (double_t tmp, uint64_t sbits, uint64_t ki)
sbits -= 1009ull << 52;
scale = asdouble (sbits);
y = 0x1p1009 * (scale + scale * tmp);
- return check_oflow (y);
+ return check_oflow (eval_as_double (y));
}
/* k < 0, need special care in the subnormal range. */
sbits += 1022ull << 52;
@@ -63,7 +63,7 @@ specialcase (double_t tmp, uint64_t sbits, uint64_t ki)
force_eval_double (opt_barrier_double (0x1p-1022) * 0x1p-1022);
}
y = 0x1p-1022 * y;
- return check_uflow (y);
+ return check_uflow (eval_as_double (y));
}
/* Top 12 bits of a double (sign and exponent bits). */
@@ -149,7 +149,7 @@ exp_inline (double x, double xtail, int hastail)
scale = asdouble (sbits);
/* Note: tmp == 0 or |tmp| > 2^-200 and scale > 2^-739, so there
is no spurious underflow here even without fma. */
- return scale + scale * tmp;
+ return eval_as_double (scale + scale * tmp);
}
double