summaryrefslogtreecommitdiff
path: root/src/crypto/fipsmodule/bn/montgomery_inv.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/crypto/fipsmodule/bn/montgomery_inv.c')
-rw-r--r--src/crypto/fipsmodule/bn/montgomery_inv.c190
1 files changed, 95 insertions, 95 deletions
diff --git a/src/crypto/fipsmodule/bn/montgomery_inv.c b/src/crypto/fipsmodule/bn/montgomery_inv.c
index aa2574b0..c3c788ab 100644
--- a/src/crypto/fipsmodule/bn/montgomery_inv.c
+++ b/src/crypto/fipsmodule/bn/montgomery_inv.c
@@ -28,47 +28,47 @@ OPENSSL_COMPILE_ASSERT(sizeof(uint64_t) ==
BN_MONT_CTX_N0_LIMBS * sizeof(BN_ULONG),
BN_MONT_CTX_N0_LIMBS_DOES_NOT_MATCH_UINT64_T);
-/* LG_LITTLE_R is log_2(r). */
+// LG_LITTLE_R is log_2(r).
#define LG_LITTLE_R (BN_MONT_CTX_N0_LIMBS * BN_BITS2)
uint64_t bn_mont_n0(const BIGNUM *n) {
- /* These conditions are checked by the caller, |BN_MONT_CTX_set|. */
+ // These conditions are checked by the caller, |BN_MONT_CTX_set|.
assert(!BN_is_zero(n));
assert(!BN_is_negative(n));
assert(BN_is_odd(n));
- /* r == 2**(BN_MONT_CTX_N0_LIMBS * BN_BITS2) and LG_LITTLE_R == lg(r). This
- * ensures that we can do integer division by |r| by simply ignoring
- * |BN_MONT_CTX_N0_LIMBS| limbs. Similarly, we can calculate values modulo
- * |r| by just looking at the lowest |BN_MONT_CTX_N0_LIMBS| limbs. This is
- * what makes Montgomery multiplication efficient.
- *
- * As shown in Algorithm 1 of "Fast Prime Field Elliptic Curve Cryptography
- * with 256 Bit Primes" by Shay Gueron and Vlad Krasnov, in the loop of a
- * multi-limb Montgomery multiplication of |a * b (mod n)|, given the
- * unreduced product |t == a * b|, we repeatedly calculate:
- *
- * t1 := t % r |t1| is |t|'s lowest limb (see previous paragraph).
- * t2 := t1*n0*n
- * t3 := t + t2
- * t := t3 / r copy all limbs of |t3| except the lowest to |t|.
- *
- * In the last step, it would only make sense to ignore the lowest limb of
- * |t3| if it were zero. The middle steps ensure that this is the case:
- *
- * t3 == 0 (mod r)
- * t + t2 == 0 (mod r)
- * t + t1*n0*n == 0 (mod r)
- * t1*n0*n == -t (mod r)
- * t*n0*n == -t (mod r)
- * n0*n == -1 (mod r)
- * n0 == -1/n (mod r)
- *
- * Thus, in each iteration of the loop, we multiply by the constant factor
- * |n0|, the negative inverse of n (mod r). */
-
- /* n_mod_r = n % r. As explained above, this is done by taking the lowest
- * |BN_MONT_CTX_N0_LIMBS| limbs of |n|. */
+ // r == 2**(BN_MONT_CTX_N0_LIMBS * BN_BITS2) and LG_LITTLE_R == lg(r). This
+ // ensures that we can do integer division by |r| by simply ignoring
+ // |BN_MONT_CTX_N0_LIMBS| limbs. Similarly, we can calculate values modulo
+ // |r| by just looking at the lowest |BN_MONT_CTX_N0_LIMBS| limbs. This is
+ // what makes Montgomery multiplication efficient.
+ //
+ // As shown in Algorithm 1 of "Fast Prime Field Elliptic Curve Cryptography
+ // with 256 Bit Primes" by Shay Gueron and Vlad Krasnov, in the loop of a
+ // multi-limb Montgomery multiplication of |a * b (mod n)|, given the
+ // unreduced product |t == a * b|, we repeatedly calculate:
+ //
+ // t1 := t % r |t1| is |t|'s lowest limb (see previous paragraph).
+ // t2 := t1*n0*n
+ // t3 := t + t2
+ // t := t3 / r copy all limbs of |t3| except the lowest to |t|.
+ //
+ // In the last step, it would only make sense to ignore the lowest limb of
+ // |t3| if it were zero. The middle steps ensure that this is the case:
+ //
+ // t3 == 0 (mod r)
+ // t + t2 == 0 (mod r)
+ // t + t1*n0*n == 0 (mod r)
+ // t1*n0*n == -t (mod r)
+ // t*n0*n == -t (mod r)
+ // n0*n == -1 (mod r)
+ // n0 == -1/n (mod r)
+ //
+ // Thus, in each iteration of the loop, we multiply by the constant factor
+ // |n0|, the negative inverse of n (mod r).
+
+ // n_mod_r = n % r. As explained above, this is done by taking the lowest
+ // |BN_MONT_CTX_N0_LIMBS| limbs of |n|.
uint64_t n_mod_r = n->d[0];
#if BN_MONT_CTX_N0_LIMBS == 2
if (n->top > 1) {
@@ -79,32 +79,32 @@ uint64_t bn_mont_n0(const BIGNUM *n) {
return bn_neg_inv_mod_r_u64(n_mod_r);
}
-/* bn_neg_inv_r_mod_n_u64 calculates the -1/n mod r; i.e. it calculates |v|
- * such that u*r - v*n == 1. |r| is the constant defined in |bn_mont_n0|. |n|
- * must be odd.
- *
- * This is derived from |xbinGCD| in Henry S. Warren, Jr.'s "Montgomery
- * Multiplication" (http://www.hackersdelight.org/MontgomeryMultiplication.pdf).
- * It is very similar to the MODULAR-INVERSE function in Stephen R. Dussé's and
- * Burton S. Kaliski Jr.'s "A Cryptographic Library for the Motorola DSP56000"
- * (http://link.springer.com/chapter/10.1007%2F3-540-46877-3_21).
- *
- * This is inspired by Joppe W. Bos's "Constant Time Modular Inversion"
- * (http://www.joppebos.com/files/CTInversion.pdf) so that the inversion is
- * constant-time with respect to |n|. We assume uint64_t additions,
- * subtractions, shifts, and bitwise operations are all constant time, which
- * may be a large leap of faith on 32-bit targets. We avoid division and
- * multiplication, which tend to be the most problematic in terms of timing
- * leaks.
- *
- * Most GCD implementations return values such that |u*r + v*n == 1|, so the
- * caller would have to negate the resultant |v| for the purpose of Montgomery
- * multiplication. This implementation does the negation implicitly by doing
- * the computations as a difference instead of a sum. */
+// bn_neg_inv_r_mod_n_u64 calculates the -1/n mod r; i.e. it calculates |v|
+// such that u*r - v*n == 1. |r| is the constant defined in |bn_mont_n0|. |n|
+// must be odd.
+//
+// This is derived from |xbinGCD| in Henry S. Warren, Jr.'s "Montgomery
+// Multiplication" (http://www.hackersdelight.org/MontgomeryMultiplication.pdf).
+// It is very similar to the MODULAR-INVERSE function in Stephen R. Dussé's and
+// Burton S. Kaliski Jr.'s "A Cryptographic Library for the Motorola DSP56000"
+// (http://link.springer.com/chapter/10.1007%2F3-540-46877-3_21).
+//
+// This is inspired by Joppe W. Bos's "Constant Time Modular Inversion"
+// (http://www.joppebos.com/files/CTInversion.pdf) so that the inversion is
+// constant-time with respect to |n|. We assume uint64_t additions,
+// subtractions, shifts, and bitwise operations are all constant time, which
+// may be a large leap of faith on 32-bit targets. We avoid division and
+// multiplication, which tend to be the most problematic in terms of timing
+// leaks.
+//
+// Most GCD implementations return values such that |u*r + v*n == 1|, so the
+// caller would have to negate the resultant |v| for the purpose of Montgomery
+// multiplication. This implementation does the negation implicitly by doing
+// the computations as a difference instead of a sum.
static uint64_t bn_neg_inv_mod_r_u64(uint64_t n) {
assert(n % 2 == 1);
- /* alpha == 2**(lg r - 1) == r / 2. */
+ // alpha == 2**(lg r - 1) == r / 2.
static const uint64_t alpha = UINT64_C(1) << (LG_LITTLE_R - 1);
const uint64_t beta = n;
@@ -112,46 +112,46 @@ static uint64_t bn_neg_inv_mod_r_u64(uint64_t n) {
uint64_t u = 1;
uint64_t v = 0;
- /* The invariant maintained from here on is:
- * 2**(lg r - i) == u*2*alpha - v*beta. */
+ // The invariant maintained from here on is:
+ // 2**(lg r - i) == u*2*alpha - v*beta.
for (size_t i = 0; i < LG_LITTLE_R; ++i) {
#if BN_BITS2 == 64 && defined(BN_ULLONG)
assert((BN_ULLONG)(1) << (LG_LITTLE_R - i) ==
((BN_ULLONG)u * 2 * alpha) - ((BN_ULLONG)v * beta));
#endif
- /* Delete a common factor of 2 in u and v if |u| is even. Otherwise, set
- * |u = (u + beta) / 2| and |v = (v / 2) + alpha|. */
-
- uint64_t u_is_odd = UINT64_C(0) - (u & 1); /* Either 0xff..ff or 0. */
-
- /* The addition can overflow, so use Dietz's method for it.
- *
- * Dietz calculates (x+y)/2 by (x⊕y)>>1 + x&y. This is valid for all
- * (unsigned) x and y, even when x+y overflows. Evidence for 32-bit values
- * (embedded in 64 bits to so that overflow can be ignored):
- *
- * (declare-fun x () (_ BitVec 64))
- * (declare-fun y () (_ BitVec 64))
- * (assert (let (
- * (one (_ bv1 64))
- * (thirtyTwo (_ bv32 64)))
- * (and
- * (bvult x (bvshl one thirtyTwo))
- * (bvult y (bvshl one thirtyTwo))
- * (not (=
- * (bvadd (bvlshr (bvxor x y) one) (bvand x y))
- * (bvlshr (bvadd x y) one)))
- * )))
- * (check-sat) */
- uint64_t beta_if_u_is_odd = beta & u_is_odd; /* Either |beta| or 0. */
+ // Delete a common factor of 2 in u and v if |u| is even. Otherwise, set
+ // |u = (u + beta) / 2| and |v = (v / 2) + alpha|.
+
+ uint64_t u_is_odd = UINT64_C(0) - (u & 1); // Either 0xff..ff or 0.
+
+ // The addition can overflow, so use Dietz's method for it.
+ //
+ // Dietz calculates (x+y)/2 by (x⊕y)>>1 + x&y. This is valid for all
+ // (unsigned) x and y, even when x+y overflows. Evidence for 32-bit values
+ // (embedded in 64 bits to so that overflow can be ignored):
+ //
+ // (declare-fun x () (_ BitVec 64))
+ // (declare-fun y () (_ BitVec 64))
+ // (assert (let (
+ // (one (_ bv1 64))
+ // (thirtyTwo (_ bv32 64)))
+ // (and
+ // (bvult x (bvshl one thirtyTwo))
+ // (bvult y (bvshl one thirtyTwo))
+ // (not (=
+ // (bvadd (bvlshr (bvxor x y) one) (bvand x y))
+ // (bvlshr (bvadd x y) one)))
+ // )))
+ // (check-sat)
+ uint64_t beta_if_u_is_odd = beta & u_is_odd; // Either |beta| or 0.
u = ((u ^ beta_if_u_is_odd) >> 1) + (u & beta_if_u_is_odd);
- uint64_t alpha_if_u_is_odd = alpha & u_is_odd; /* Either |alpha| or 0. */
+ uint64_t alpha_if_u_is_odd = alpha & u_is_odd; // Either |alpha| or 0.
v = (v >> 1) + alpha_if_u_is_odd;
}
- /* The invariant now shows that u*r - v*n == 1 since r == 2 * alpha. */
+ // The invariant now shows that u*r - v*n == 1 since r == 2 * alpha.
#if BN_BITS2 == 64 && defined(BN_ULLONG)
assert(1 == ((BN_ULLONG)u * 2 * alpha) - ((BN_ULLONG)v * beta));
#endif
@@ -159,9 +159,9 @@ static uint64_t bn_neg_inv_mod_r_u64(uint64_t n) {
return v;
}
-/* bn_mod_exp_base_2_vartime calculates r = 2**p (mod n). |p| must be larger
- * than log_2(n); i.e. 2**p must be larger than |n|. |n| must be positive and
- * odd. */
+// bn_mod_exp_base_2_vartime calculates r = 2**p (mod n). |p| must be larger
+// than log_2(n); i.e. 2**p must be larger than |n|. |n| must be positive and
+// odd.
int bn_mod_exp_base_2_vartime(BIGNUM *r, unsigned p, const BIGNUM *n) {
assert(!BN_is_zero(n));
assert(!BN_is_negative(n));
@@ -175,13 +175,13 @@ int bn_mod_exp_base_2_vartime(BIGNUM *r, unsigned p, const BIGNUM *n) {
return 1;
}
- /* Set |r| to the smallest power of two larger than |n|. */
+ // Set |r| to the smallest power of two larger than |n|.
assert(p > n_bits);
if (!BN_set_bit(r, n_bits)) {
return 0;
}
- /* Unconditionally reduce |r|. */
+ // Unconditionally reduce |r|.
assert(BN_cmp(r, n) > 0);
if (!BN_usub(r, r, n)) {
return 0;
@@ -189,10 +189,10 @@ int bn_mod_exp_base_2_vartime(BIGNUM *r, unsigned p, const BIGNUM *n) {
assert(BN_cmp(r, n) < 0);
for (unsigned i = n_bits; i < p; ++i) {
- /* This is like |BN_mod_lshift1_quick| except using |BN_usub|.
- *
- * TODO: Replace this with the use of a constant-time variant of
- * |BN_mod_lshift1_quick|. */
+ // This is like |BN_mod_lshift1_quick| except using |BN_usub|.
+ //
+ // TODO: Replace this with the use of a constant-time variant of
+ // |BN_mod_lshift1_quick|.
if (!BN_lshift1(r, r)) {
return 0;
}