summaryrefslogtreecommitdiff
path: root/src/crypto/fipsmodule/ec/p256-64.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/crypto/fipsmodule/ec/p256-64.c')
-rw-r--r--src/crypto/fipsmodule/ec/p256-64.c831
1 files changed, 414 insertions, 417 deletions
diff --git a/src/crypto/fipsmodule/ec/p256-64.c b/src/crypto/fipsmodule/ec/p256-64.c
index 8952aa2e..f7d1ff11 100644
--- a/src/crypto/fipsmodule/ec/p256-64.c
+++ b/src/crypto/fipsmodule/ec/p256-64.c
@@ -12,12 +12,12 @@
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
-/* A 64-bit implementation of the NIST P-256 elliptic curve point
- * multiplication
- *
- * OpenSSL integration was taken from Emilia Kasper's work in ecp_nistp224.c.
- * Otherwise based on Emilia's P224 work, which was inspired by my curve25519
- * work which got its smarts from Daniel J. Bernstein's work on the same. */
+// A 64-bit implementation of the NIST P-256 elliptic curve point
+// multiplication
+//
+// OpenSSL integration was taken from Emilia Kasper's work in ecp_nistp224.c.
+// Otherwise based on Emilia's P224 work, which was inspired by my curve25519
+// work which got its smarts from Daniel J. Bernstein's work on the same.
#include <openssl/base.h>
@@ -35,29 +35,29 @@
#include "internal.h"
-/* The underlying field. P256 operates over GF(2^256-2^224+2^192+2^96-1). We
- * can serialise an element of this field into 32 bytes. We call this an
- * felem_bytearray. */
+// The underlying field. P256 operates over GF(2^256-2^224+2^192+2^96-1). We
+// can serialise an element of this field into 32 bytes. We call this an
+// felem_bytearray.
typedef uint8_t felem_bytearray[32];
-/* The representation of field elements.
- * ------------------------------------
- *
- * We represent field elements with either four 128-bit values, eight 128-bit
- * values, or four 64-bit values. The field element represented is:
- * v[0]*2^0 + v[1]*2^64 + v[2]*2^128 + v[3]*2^192 (mod p)
- * or:
- * v[0]*2^0 + v[1]*2^64 + v[2]*2^128 + ... + v[8]*2^512 (mod p)
- *
- * 128-bit values are called 'limbs'. Since the limbs are spaced only 64 bits
- * apart, but are 128-bits wide, the most significant bits of each limb overlap
- * with the least significant bits of the next.
- *
- * A field element with four limbs is an 'felem'. One with eight limbs is a
- * 'longfelem'
- *
- * A field element with four, 64-bit values is called a 'smallfelem'. Small
- * values are used as intermediate values before multiplication. */
+// The representation of field elements.
+// ------------------------------------
+//
+// We represent field elements with either four 128-bit values, eight 128-bit
+// values, or four 64-bit values. The field element represented is:
+// v[0]*2^0 + v[1]*2^64 + v[2]*2^128 + v[3]*2^192 (mod p)
+// or:
+// v[0]*2^0 + v[1]*2^64 + v[2]*2^128 + ... + v[8]*2^512 (mod p)
+//
+// 128-bit values are called 'limbs'. Since the limbs are spaced only 64 bits
+// apart, but are 128-bits wide, the most significant bits of each limb overlap
+// with the least significant bits of the next.
+//
+// A field element with four limbs is an 'felem'. One with eight limbs is a
+// 'longfelem'
+//
+// A field element with four, 64-bit values is called a 'smallfelem'. Small
+// values are used as intermediate values before multiplication.
#define NLIMBS 4
@@ -66,7 +66,7 @@ typedef limb felem[NLIMBS];
typedef limb longfelem[NLIMBS * 2];
typedef uint64_t smallfelem[NLIMBS];
-/* This is the value of the prime as four 64-bit words, little-endian. */
+// This is the value of the prime as four 64-bit words, little-endian.
static const uint64_t kPrime[4] = {0xfffffffffffffffful, 0xffffffff, 0,
0xffffffff00000001ul};
static const uint64_t bottom63bits = 0x7ffffffffffffffful;
@@ -81,8 +81,8 @@ static void store_u64(uint8_t out[8], uint64_t in) {
OPENSSL_memcpy(out, &in, sizeof(in));
}
-/* bin32_to_felem takes a little-endian byte array and converts it into felem
- * form. This assumes that the CPU is little-endian. */
+// bin32_to_felem takes a little-endian byte array and converts it into felem
+// form. This assumes that the CPU is little-endian.
static void bin32_to_felem(felem out, const uint8_t in[32]) {
out[0] = load_u64(&in[0]);
out[1] = load_u64(&in[8]);
@@ -90,8 +90,8 @@ static void bin32_to_felem(felem out, const uint8_t in[32]) {
out[3] = load_u64(&in[24]);
}
-/* smallfelem_to_bin32 takes a smallfelem and serialises into a little endian,
- * 32 byte array. This assumes that the CPU is little-endian. */
+// smallfelem_to_bin32 takes a smallfelem and serialises into a little endian,
+// 32 byte array. This assumes that the CPU is little-endian.
static void smallfelem_to_bin32(uint8_t out[32], const smallfelem in) {
store_u64(&out[0], in[0]);
store_u64(&out[8], in[1]);
@@ -99,14 +99,14 @@ static void smallfelem_to_bin32(uint8_t out[32], const smallfelem in) {
store_u64(&out[24], in[3]);
}
-/* To preserve endianness when using BN_bn2bin and BN_bin2bn. */
+// To preserve endianness when using BN_bn2bin and BN_bin2bn.
static void flip_endian(uint8_t *out, const uint8_t *in, size_t len) {
for (size_t i = 0; i < len; ++i) {
out[i] = in[len - 1 - i];
}
}
-/* BN_to_felem converts an OpenSSL BIGNUM into an felem. */
+// BN_to_felem converts an OpenSSL BIGNUM into an felem.
static int BN_to_felem(felem out, const BIGNUM *bn) {
if (BN_is_negative(bn)) {
OPENSSL_PUT_ERROR(EC, EC_R_BIGNUM_OUT_OF_RANGE);
@@ -114,7 +114,7 @@ static int BN_to_felem(felem out, const BIGNUM *bn) {
}
felem_bytearray b_out;
- /* BN_bn2bin eats leading zeroes */
+ // BN_bn2bin eats leading zeroes
OPENSSL_memset(b_out, 0, sizeof(b_out));
size_t num_bytes = BN_num_bytes(bn);
if (num_bytes > sizeof(b_out)) {
@@ -129,7 +129,7 @@ static int BN_to_felem(felem out, const BIGNUM *bn) {
return 1;
}
-/* felem_to_BN converts an felem into an OpenSSL BIGNUM. */
+// felem_to_BN converts an felem into an OpenSSL BIGNUM.
static BIGNUM *smallfelem_to_BN(BIGNUM *out, const smallfelem in) {
felem_bytearray b_in, b_out;
smallfelem_to_bin32(b_in, in);
@@ -137,7 +137,7 @@ static BIGNUM *smallfelem_to_BN(BIGNUM *out, const smallfelem in) {
return BN_bin2bn(b_out, sizeof(b_out), out);
}
-/* Field operations. */
+// Field operations.
static void felem_assign(felem out, const felem in) {
out[0] = in[0];
@@ -146,7 +146,7 @@ static void felem_assign(felem out, const felem in) {
out[3] = in[3];
}
-/* felem_sum sets out = out + in. */
+// felem_sum sets out = out + in.
static void felem_sum(felem out, const felem in) {
out[0] += in[0];
out[1] += in[1];
@@ -154,7 +154,7 @@ static void felem_sum(felem out, const felem in) {
out[3] += in[3];
}
-/* felem_small_sum sets out = out + in. */
+// felem_small_sum sets out = out + in.
static void felem_small_sum(felem out, const smallfelem in) {
out[0] += in[0];
out[1] += in[1];
@@ -162,7 +162,7 @@ static void felem_small_sum(felem out, const smallfelem in) {
out[3] += in[3];
}
-/* felem_scalar sets out = out * scalar */
+// felem_scalar sets out = out * scalar
static void felem_scalar(felem out, const uint64_t scalar) {
out[0] *= scalar;
out[1] *= scalar;
@@ -170,7 +170,7 @@ static void felem_scalar(felem out, const uint64_t scalar) {
out[3] *= scalar;
}
-/* longfelem_scalar sets out = out * scalar */
+// longfelem_scalar sets out = out * scalar
static void longfelem_scalar(longfelem out, const uint64_t scalar) {
out[0] *= scalar;
out[1] *= scalar;
@@ -186,27 +186,27 @@ static void longfelem_scalar(longfelem out, const uint64_t scalar) {
#define two105 (((limb)1) << 105)
#define two105m41p9 ((((limb)1) << 105) - (((limb)1) << 41) + (((limb)1) << 9))
-/* zero105 is 0 mod p */
+// zero105 is 0 mod p
static const felem zero105 = {two105m41m9, two105, two105m41p9, two105m41p9};
-/* smallfelem_neg sets |out| to |-small|
- * On exit:
- * out[i] < out[i] + 2^105 */
+// smallfelem_neg sets |out| to |-small|
+// On exit:
+// out[i] < out[i] + 2^105
static void smallfelem_neg(felem out, const smallfelem small) {
- /* In order to prevent underflow, we subtract from 0 mod p. */
+ // In order to prevent underflow, we subtract from 0 mod p.
out[0] = zero105[0] - small[0];
out[1] = zero105[1] - small[1];
out[2] = zero105[2] - small[2];
out[3] = zero105[3] - small[3];
}
-/* felem_diff subtracts |in| from |out|
- * On entry:
- * in[i] < 2^104
- * On exit:
- * out[i] < out[i] + 2^105. */
+// felem_diff subtracts |in| from |out|
+// On entry:
+// in[i] < 2^104
+// On exit:
+// out[i] < out[i] + 2^105.
static void felem_diff(felem out, const felem in) {
- /* In order to prevent underflow, we add 0 mod p before subtracting. */
+ // In order to prevent underflow, we add 0 mod p before subtracting.
out[0] += zero105[0];
out[1] += zero105[1];
out[2] += zero105[2];
@@ -224,17 +224,17 @@ static void felem_diff(felem out, const felem in) {
#define two107m43p11 \
((((limb)1) << 107) - (((limb)1) << 43) + (((limb)1) << 11))
-/* zero107 is 0 mod p */
+// zero107 is 0 mod p
static const felem zero107 = {two107m43m11, two107, two107m43p11, two107m43p11};
-/* An alternative felem_diff for larger inputs |in|
- * felem_diff_zero107 subtracts |in| from |out|
- * On entry:
- * in[i] < 2^106
- * On exit:
- * out[i] < out[i] + 2^107. */
+// An alternative felem_diff for larger inputs |in|
+// felem_diff_zero107 subtracts |in| from |out|
+// On entry:
+// in[i] < 2^106
+// On exit:
+// out[i] < out[i] + 2^107.
static void felem_diff_zero107(felem out, const felem in) {
- /* In order to prevent underflow, we add 0 mod p before subtracting. */
+ // In order to prevent underflow, we add 0 mod p before subtracting.
out[0] += zero107[0];
out[1] += zero107[1];
out[2] += zero107[2];
@@ -246,11 +246,11 @@ static void felem_diff_zero107(felem out, const felem in) {
out[3] -= in[3];
}
-/* longfelem_diff subtracts |in| from |out|
- * On entry:
- * in[i] < 7*2^67
- * On exit:
- * out[i] < out[i] + 2^70 + 2^40. */
+// longfelem_diff subtracts |in| from |out|
+// On entry:
+// in[i] < 7*2^67
+// On exit:
+// out[i] < out[i] + 2^70 + 2^40.
static void longfelem_diff(longfelem out, const longfelem in) {
static const limb two70m8p6 =
(((limb)1) << 70) - (((limb)1) << 8) + (((limb)1) << 6);
@@ -260,7 +260,7 @@ static void longfelem_diff(longfelem out, const longfelem in) {
(((limb)1) << 38) + (((limb)1) << 6);
static const limb two70m6 = (((limb)1) << 70) - (((limb)1) << 6);
- /* add 0 mod p to avoid underflow */
+ // add 0 mod p to avoid underflow
out[0] += two70m8p6;
out[1] += two70p40;
out[2] += two70;
@@ -270,7 +270,7 @@ static void longfelem_diff(longfelem out, const longfelem in) {
out[6] += two70m6;
out[7] += two70m6;
- /* in[i] < 7*2^67 < 2^70 - 2^40 - 2^38 + 2^6 */
+ // in[i] < 7*2^67 < 2^70 - 2^40 - 2^38 + 2^6
out[0] -= in[0];
out[1] -= in[1];
out[2] -= in[2];
@@ -286,80 +286,80 @@ static void longfelem_diff(longfelem out, const longfelem in) {
#define two64m46 ((((limb)1) << 64) - (((limb)1) << 46))
#define two64m32 ((((limb)1) << 64) - (((limb)1) << 32))
-/* zero110 is 0 mod p. */
+// zero110 is 0 mod p.
static const felem zero110 = {two64m0, two110p32m0, two64m46, two64m32};
-/* felem_shrink converts an felem into a smallfelem. The result isn't quite
- * minimal as the value may be greater than p.
- *
- * On entry:
- * in[i] < 2^109
- * On exit:
- * out[i] < 2^64. */
+// felem_shrink converts an felem into a smallfelem. The result isn't quite
+// minimal as the value may be greater than p.
+//
+// On entry:
+// in[i] < 2^109
+// On exit:
+// out[i] < 2^64.
static void felem_shrink(smallfelem out, const felem in) {
felem tmp;
uint64_t a, b, mask;
int64_t high, low;
static const uint64_t kPrime3Test =
- 0x7fffffff00000001ul; /* 2^63 - 2^32 + 1 */
+ 0x7fffffff00000001ul; // 2^63 - 2^32 + 1
- /* Carry 2->3 */
+ // Carry 2->3
tmp[3] = zero110[3] + in[3] + ((uint64_t)(in[2] >> 64));
- /* tmp[3] < 2^110 */
+ // tmp[3] < 2^110
tmp[2] = zero110[2] + (uint64_t)in[2];
tmp[0] = zero110[0] + in[0];
tmp[1] = zero110[1] + in[1];
- /* tmp[0] < 2**110, tmp[1] < 2^111, tmp[2] < 2**65 */
+ // tmp[0] < 2**110, tmp[1] < 2^111, tmp[2] < 2**65
- /* We perform two partial reductions where we eliminate the high-word of
- * tmp[3]. We don't update the other words till the end. */
- a = tmp[3] >> 64; /* a < 2^46 */
+ // We perform two partial reductions where we eliminate the high-word of
+ // tmp[3]. We don't update the other words till the end.
+ a = tmp[3] >> 64; // a < 2^46
tmp[3] = (uint64_t)tmp[3];
tmp[3] -= a;
tmp[3] += ((limb)a) << 32;
- /* tmp[3] < 2^79 */
+ // tmp[3] < 2^79
b = a;
- a = tmp[3] >> 64; /* a < 2^15 */
- b += a; /* b < 2^46 + 2^15 < 2^47 */
+ a = tmp[3] >> 64; // a < 2^15
+ b += a; // b < 2^46 + 2^15 < 2^47
tmp[3] = (uint64_t)tmp[3];
tmp[3] -= a;
tmp[3] += ((limb)a) << 32;
- /* tmp[3] < 2^64 + 2^47 */
+ // tmp[3] < 2^64 + 2^47
- /* This adjusts the other two words to complete the two partial
- * reductions. */
+ // This adjusts the other two words to complete the two partial
+ // reductions.
tmp[0] += b;
tmp[1] -= (((limb)b) << 32);
- /* In order to make space in tmp[3] for the carry from 2 -> 3, we
- * conditionally subtract kPrime if tmp[3] is large enough. */
+ // In order to make space in tmp[3] for the carry from 2 -> 3, we
+ // conditionally subtract kPrime if tmp[3] is large enough.
high = tmp[3] >> 64;
- /* As tmp[3] < 2^65, high is either 1 or 0 */
+ // As tmp[3] < 2^65, high is either 1 or 0
high = ~(high - 1);
- /* high is:
- * all ones if the high word of tmp[3] is 1
- * all zeros if the high word of tmp[3] if 0 */
+ // high is:
+ // all ones if the high word of tmp[3] is 1
+ // all zeros if the high word of tmp[3] if 0
low = tmp[3];
mask = low >> 63;
- /* mask is:
- * all ones if the MSB of low is 1
- * all zeros if the MSB of low if 0 */
+ // mask is:
+ // all ones if the MSB of low is 1
+ // all zeros if the MSB of low if 0
low &= bottom63bits;
low -= kPrime3Test;
- /* if low was greater than kPrime3Test then the MSB is zero */
+ // if low was greater than kPrime3Test then the MSB is zero
low = ~low;
low >>= 63;
- /* low is:
- * all ones if low was > kPrime3Test
- * all zeros if low was <= kPrime3Test */
+ // low is:
+ // all ones if low was > kPrime3Test
+ // all zeros if low was <= kPrime3Test
mask = (mask & low) | high;
tmp[0] -= mask & kPrime[0];
tmp[1] -= mask & kPrime[1];
- /* kPrime[2] is zero, so omitted */
+ // kPrime[2] is zero, so omitted
tmp[3] -= mask & kPrime[3];
- /* tmp[3] < 2**64 - 2**32 + 1 */
+ // tmp[3] < 2**64 - 2**32 + 1
tmp[1] += ((uint64_t)(tmp[0] >> 64));
tmp[0] = (uint64_t)tmp[0];
@@ -367,7 +367,7 @@ static void felem_shrink(smallfelem out, const felem in) {
tmp[1] = (uint64_t)tmp[1];
tmp[3] += ((uint64_t)(tmp[2] >> 64));
tmp[2] = (uint64_t)tmp[2];
- /* tmp[i] < 2^64 */
+ // tmp[i] < 2^64
out[0] = tmp[0];
out[1] = tmp[1];
@@ -375,7 +375,7 @@ static void felem_shrink(smallfelem out, const felem in) {
out[3] = tmp[3];
}
-/* smallfelem_expand converts a smallfelem to an felem */
+// smallfelem_expand converts a smallfelem to an felem
static void smallfelem_expand(felem out, const smallfelem in) {
out[0] = in[0];
out[1] = in[1];
@@ -383,11 +383,11 @@ static void smallfelem_expand(felem out, const smallfelem in) {
out[3] = in[3];
}
-/* smallfelem_square sets |out| = |small|^2
- * On entry:
- * small[i] < 2^64
- * On exit:
- * out[i] < 7 * 2^64 < 2^67 */
+// smallfelem_square sets |out| = |small|^2
+// On entry:
+// small[i] < 2^64
+// On exit:
+// out[i] < 7 * 2^64 < 2^67
static void smallfelem_square(longfelem out, const smallfelem small) {
limb a;
uint64_t high, low;
@@ -459,23 +459,23 @@ static void smallfelem_square(longfelem out, const smallfelem small) {
out[7] = high;
}
-/*felem_square sets |out| = |in|^2
- * On entry:
- * in[i] < 2^109
- * On exit:
- * out[i] < 7 * 2^64 < 2^67. */
+//felem_square sets |out| = |in|^2
+// On entry:
+// in[i] < 2^109
+// On exit:
+// out[i] < 7 * 2^64 < 2^67.
static void felem_square(longfelem out, const felem in) {
uint64_t small[4];
felem_shrink(small, in);
smallfelem_square(out, small);
}
-/* smallfelem_mul sets |out| = |small1| * |small2|
- * On entry:
- * small1[i] < 2^64
- * small2[i] < 2^64
- * On exit:
- * out[i] < 7 * 2^64 < 2^67. */
+// smallfelem_mul sets |out| = |small1| * |small2|
+// On entry:
+// small1[i] < 2^64
+// small2[i] < 2^64
+// On exit:
+// out[i] < 7 * 2^64 < 2^67.
static void smallfelem_mul(longfelem out, const smallfelem small1,
const smallfelem small2) {
limb a;
@@ -578,12 +578,12 @@ static void smallfelem_mul(longfelem out, const smallfelem small1,
out[7] = high;
}
-/* felem_mul sets |out| = |in1| * |in2|
- * On entry:
- * in1[i] < 2^109
- * in2[i] < 2^109
- * On exit:
- * out[i] < 7 * 2^64 < 2^67 */
+// felem_mul sets |out| = |in1| * |in2|
+// On entry:
+// in1[i] < 2^109
+// in2[i] < 2^109
+// On exit:
+// out[i] < 7 * 2^64 < 2^67
static void felem_mul(longfelem out, const felem in1, const felem in2) {
smallfelem small1, small2;
felem_shrink(small1, in1);
@@ -591,12 +591,12 @@ static void felem_mul(longfelem out, const felem in1, const felem in2) {
smallfelem_mul(out, small1, small2);
}
-/* felem_small_mul sets |out| = |small1| * |in2|
- * On entry:
- * small1[i] < 2^64
- * in2[i] < 2^109
- * On exit:
- * out[i] < 7 * 2^64 < 2^67 */
+// felem_small_mul sets |out| = |small1| * |in2|
+// On entry:
+// small1[i] < 2^64
+// in2[i] < 2^109
+// On exit:
+// out[i] < 7 * 2^64 < 2^67
static void felem_small_mul(longfelem out, const smallfelem small1,
const felem in2) {
smallfelem small2;
@@ -608,24 +608,24 @@ static void felem_small_mul(longfelem out, const smallfelem small1,
#define two100 (((limb)1) << 100)
#define two100m36p4 ((((limb)1) << 100) - (((limb)1) << 36) + (((limb)1) << 4))
-/* zero100 is 0 mod p */
+// zero100 is 0 mod p
static const felem zero100 = {two100m36m4, two100, two100m36p4, two100m36p4};
-/* Internal function for the different flavours of felem_reduce.
- * felem_reduce_ reduces the higher coefficients in[4]-in[7].
- * On entry:
- * out[0] >= in[6] + 2^32*in[6] + in[7] + 2^32*in[7]
- * out[1] >= in[7] + 2^32*in[4]
- * out[2] >= in[5] + 2^32*in[5]
- * out[3] >= in[4] + 2^32*in[5] + 2^32*in[6]
- * On exit:
- * out[0] <= out[0] + in[4] + 2^32*in[5]
- * out[1] <= out[1] + in[5] + 2^33*in[6]
- * out[2] <= out[2] + in[7] + 2*in[6] + 2^33*in[7]
- * out[3] <= out[3] + 2^32*in[4] + 3*in[7] */
+// Internal function for the different flavours of felem_reduce.
+// felem_reduce_ reduces the higher coefficients in[4]-in[7].
+// On entry:
+// out[0] >= in[6] + 2^32*in[6] + in[7] + 2^32*in[7]
+// out[1] >= in[7] + 2^32*in[4]
+// out[2] >= in[5] + 2^32*in[5]
+// out[3] >= in[4] + 2^32*in[5] + 2^32*in[6]
+// On exit:
+// out[0] <= out[0] + in[4] + 2^32*in[5]
+// out[1] <= out[1] + in[5] + 2^33*in[6]
+// out[2] <= out[2] + in[7] + 2*in[6] + 2^33*in[7]
+// out[3] <= out[3] + 2^32*in[4] + 3*in[7]
static void felem_reduce_(felem out, const longfelem in) {
int128_t c;
- /* combine common terms from below */
+ // combine common terms from below
c = in[4] + (in[5] << 32);
out[0] += c;
out[3] -= c;
@@ -634,35 +634,35 @@ static void felem_reduce_(felem out, const longfelem in) {
out[1] += c;
out[2] -= c;
- /* the remaining terms */
- /* 256: [(0,1),(96,-1),(192,-1),(224,1)] */
+ // the remaining terms
+ // 256: [(0,1),(96,-1),(192,-1),(224,1)]
out[1] -= (in[4] << 32);
out[3] += (in[4] << 32);
- /* 320: [(32,1),(64,1),(128,-1),(160,-1),(224,-1)] */
+ // 320: [(32,1),(64,1),(128,-1),(160,-1),(224,-1)]
out[2] -= (in[5] << 32);
- /* 384: [(0,-1),(32,-1),(96,2),(128,2),(224,-1)] */
+ // 384: [(0,-1),(32,-1),(96,2),(128,2),(224,-1)]
out[0] -= in[6];
out[0] -= (in[6] << 32);
out[1] += (in[6] << 33);
out[2] += (in[6] * 2);
out[3] -= (in[6] << 32);
- /* 448: [(0,-1),(32,-1),(64,-1),(128,1),(160,2),(192,3)] */
+ // 448: [(0,-1),(32,-1),(64,-1),(128,1),(160,2),(192,3)]
out[0] -= in[7];
out[0] -= (in[7] << 32);
out[2] += (in[7] << 33);
out[3] += (in[7] * 3);
}
-/* felem_reduce converts a longfelem into an felem.
- * To be called directly after felem_square or felem_mul.
- * On entry:
- * in[0] < 2^64, in[1] < 3*2^64, in[2] < 5*2^64, in[3] < 7*2^64
- * in[4] < 7*2^64, in[5] < 5*2^64, in[6] < 3*2^64, in[7] < 2*64
- * On exit:
- * out[i] < 2^101 */
+// felem_reduce converts a longfelem into an felem.
+// To be called directly after felem_square or felem_mul.
+// On entry:
+// in[0] < 2^64, in[1] < 3*2^64, in[2] < 5*2^64, in[3] < 7*2^64
+// in[4] < 7*2^64, in[5] < 5*2^64, in[6] < 3*2^64, in[7] < 2*64
+// On exit:
+// out[i] < 2^101
static void felem_reduce(felem out, const longfelem in) {
out[0] = zero100[0] + in[0];
out[1] = zero100[1] + in[1];
@@ -671,22 +671,22 @@ static void felem_reduce(felem out, const longfelem in) {
felem_reduce_(out, in);
- /* out[0] > 2^100 - 2^36 - 2^4 - 3*2^64 - 3*2^96 - 2^64 - 2^96 > 0
- * out[1] > 2^100 - 2^64 - 7*2^96 > 0
- * out[2] > 2^100 - 2^36 + 2^4 - 5*2^64 - 5*2^96 > 0
- * out[3] > 2^100 - 2^36 + 2^4 - 7*2^64 - 5*2^96 - 3*2^96 > 0
- *
- * out[0] < 2^100 + 2^64 + 7*2^64 + 5*2^96 < 2^101
- * out[1] < 2^100 + 3*2^64 + 5*2^64 + 3*2^97 < 2^101
- * out[2] < 2^100 + 5*2^64 + 2^64 + 3*2^65 + 2^97 < 2^101
- * out[3] < 2^100 + 7*2^64 + 7*2^96 + 3*2^64 < 2^101 */
+ // out[0] > 2^100 - 2^36 - 2^4 - 3*2^64 - 3*2^96 - 2^64 - 2^96 > 0
+ // out[1] > 2^100 - 2^64 - 7*2^96 > 0
+ // out[2] > 2^100 - 2^36 + 2^4 - 5*2^64 - 5*2^96 > 0
+ // out[3] > 2^100 - 2^36 + 2^4 - 7*2^64 - 5*2^96 - 3*2^96 > 0
+ //
+ // out[0] < 2^100 + 2^64 + 7*2^64 + 5*2^96 < 2^101
+ // out[1] < 2^100 + 3*2^64 + 5*2^64 + 3*2^97 < 2^101
+ // out[2] < 2^100 + 5*2^64 + 2^64 + 3*2^65 + 2^97 < 2^101
+ // out[3] < 2^100 + 7*2^64 + 7*2^96 + 3*2^64 < 2^101
}
-/* felem_reduce_zero105 converts a larger longfelem into an felem.
- * On entry:
- * in[0] < 2^71
- * On exit:
- * out[i] < 2^106 */
+// felem_reduce_zero105 converts a larger longfelem into an felem.
+// On entry:
+// in[0] < 2^71
+// On exit:
+// out[i] < 2^106
static void felem_reduce_zero105(felem out, const longfelem in) {
out[0] = zero105[0] + in[0];
out[1] = zero105[1] + in[1];
@@ -695,19 +695,19 @@ static void felem_reduce_zero105(felem out, const longfelem in) {
felem_reduce_(out, in);
- /* out[0] > 2^105 - 2^41 - 2^9 - 2^71 - 2^103 - 2^71 - 2^103 > 0
- * out[1] > 2^105 - 2^71 - 2^103 > 0
- * out[2] > 2^105 - 2^41 + 2^9 - 2^71 - 2^103 > 0
- * out[3] > 2^105 - 2^41 + 2^9 - 2^71 - 2^103 - 2^103 > 0
- *
- * out[0] < 2^105 + 2^71 + 2^71 + 2^103 < 2^106
- * out[1] < 2^105 + 2^71 + 2^71 + 2^103 < 2^106
- * out[2] < 2^105 + 2^71 + 2^71 + 2^71 + 2^103 < 2^106
- * out[3] < 2^105 + 2^71 + 2^103 + 2^71 < 2^106 */
+ // out[0] > 2^105 - 2^41 - 2^9 - 2^71 - 2^103 - 2^71 - 2^103 > 0
+ // out[1] > 2^105 - 2^71 - 2^103 > 0
+ // out[2] > 2^105 - 2^41 + 2^9 - 2^71 - 2^103 > 0
+ // out[3] > 2^105 - 2^41 + 2^9 - 2^71 - 2^103 - 2^103 > 0
+ //
+ // out[0] < 2^105 + 2^71 + 2^71 + 2^103 < 2^106
+ // out[1] < 2^105 + 2^71 + 2^71 + 2^103 < 2^106
+ // out[2] < 2^105 + 2^71 + 2^71 + 2^71 + 2^103 < 2^106
+ // out[3] < 2^105 + 2^71 + 2^103 + 2^71 < 2^106
}
-/* subtract_u64 sets *result = *result - v and *carry to one if the
- * subtraction underflowed. */
+// subtract_u64 sets *result = *result - v and *carry to one if the
+// subtraction underflowed.
static void subtract_u64(uint64_t *result, uint64_t *carry, uint64_t v) {
uint128_t r = *result;
r -= v;
@@ -715,28 +715,28 @@ static void subtract_u64(uint64_t *result, uint64_t *carry, uint64_t v) {
*result = (uint64_t)r;
}
-/* felem_contract converts |in| to its unique, minimal representation. On
- * entry: in[i] < 2^109. */
+// felem_contract converts |in| to its unique, minimal representation. On
+// entry: in[i] < 2^109.
static void felem_contract(smallfelem out, const felem in) {
uint64_t all_equal_so_far = 0, result = 0;
felem_shrink(out, in);
- /* small is minimal except that the value might be > p */
+ // small is minimal except that the value might be > p
all_equal_so_far--;
- /* We are doing a constant time test if out >= kPrime. We need to compare
- * each uint64_t, from most-significant to least significant. For each one, if
- * all words so far have been equal (m is all ones) then a non-equal
- * result is the answer. Otherwise we continue. */
+ // We are doing a constant time test if out >= kPrime. We need to compare
+ // each uint64_t, from most-significant to least significant. For each one, if
+ // all words so far have been equal (m is all ones) then a non-equal
+ // result is the answer. Otherwise we continue.
for (size_t i = 3; i < 4; i--) {
uint64_t equal;
uint128_t a = ((uint128_t)kPrime[i]) - out[i];
- /* if out[i] > kPrime[i] then a will underflow and the high 64-bits
- * will all be set. */
+ // if out[i] > kPrime[i] then a will underflow and the high 64-bits
+ // will all be set.
result |= all_equal_so_far & ((uint64_t)(a >> 64));
- /* if kPrime[i] == out[i] then |equal| will be all zeros and the
- * decrement will make it all ones. */
+ // if kPrime[i] == out[i] then |equal| will be all zeros and the
+ // decrement will make it all ones.
equal = kPrime[i] ^ out[i];
equal--;
equal &= equal << 32;
@@ -750,11 +750,11 @@ static void felem_contract(smallfelem out, const felem in) {
all_equal_so_far &= equal;
}
- /* if all_equal_so_far is still all ones then the two values are equal
- * and so out >= kPrime is true. */
+ // if all_equal_so_far is still all ones then the two values are equal
+ // and so out >= kPrime is true.
result |= all_equal_so_far;
- /* if out >= kPrime then we subtract kPrime. */
+ // if out >= kPrime then we subtract kPrime.
uint64_t carry;
subtract_u64(&out[0], &carry, result & kPrime[0]);
subtract_u64(&out[1], &carry, carry);
@@ -771,10 +771,10 @@ static void felem_contract(smallfelem out, const felem in) {
subtract_u64(&out[3], &carry, result & kPrime[3]);
}
-/* felem_is_zero returns a limb with all bits set if |in| == 0 (mod p) and 0
- * otherwise.
- * On entry:
- * small[i] < 2^64 */
+// felem_is_zero returns a limb with all bits set if |in| == 0 (mod p) and 0
+// otherwise.
+// On entry:
+// small[i] < 2^64
static limb smallfelem_is_zero(const smallfelem small) {
limb result;
uint64_t is_p;
@@ -807,118 +807,118 @@ static limb smallfelem_is_zero(const smallfelem small) {
return result;
}
-/* felem_inv calculates |out| = |in|^{-1}
- *
- * Based on Fermat's Little Theorem:
- * a^p = a (mod p)
- * a^{p-1} = 1 (mod p)
- * a^{p-2} = a^{-1} (mod p) */
+// felem_inv calculates |out| = |in|^{-1}
+//
+// Based on Fermat's Little Theorem:
+// a^p = a (mod p)
+// a^{p-1} = 1 (mod p)
+// a^{p-2} = a^{-1} (mod p)
static void felem_inv(felem out, const felem in) {
felem ftmp, ftmp2;
- /* each e_I will hold |in|^{2^I - 1} */
+ // each e_I will hold |in|^{2^I - 1}
felem e2, e4, e8, e16, e32, e64;
longfelem tmp;
felem_square(tmp, in);
- felem_reduce(ftmp, tmp); /* 2^1 */
+ felem_reduce(ftmp, tmp); // 2^1
felem_mul(tmp, in, ftmp);
- felem_reduce(ftmp, tmp); /* 2^2 - 2^0 */
+ felem_reduce(ftmp, tmp); // 2^2 - 2^0
felem_assign(e2, ftmp);
felem_square(tmp, ftmp);
- felem_reduce(ftmp, tmp); /* 2^3 - 2^1 */
+ felem_reduce(ftmp, tmp); // 2^3 - 2^1
felem_square(tmp, ftmp);
- felem_reduce(ftmp, tmp); /* 2^4 - 2^2 */
+ felem_reduce(ftmp, tmp); // 2^4 - 2^2
felem_mul(tmp, ftmp, e2);
- felem_reduce(ftmp, tmp); /* 2^4 - 2^0 */
+ felem_reduce(ftmp, tmp); // 2^4 - 2^0
felem_assign(e4, ftmp);
felem_square(tmp, ftmp);
- felem_reduce(ftmp, tmp); /* 2^5 - 2^1 */
+ felem_reduce(ftmp, tmp); // 2^5 - 2^1
felem_square(tmp, ftmp);
- felem_reduce(ftmp, tmp); /* 2^6 - 2^2 */
+ felem_reduce(ftmp, tmp); // 2^6 - 2^2
felem_square(tmp, ftmp);
- felem_reduce(ftmp, tmp); /* 2^7 - 2^3 */
+ felem_reduce(ftmp, tmp); // 2^7 - 2^3
felem_square(tmp, ftmp);
- felem_reduce(ftmp, tmp); /* 2^8 - 2^4 */
+ felem_reduce(ftmp, tmp); // 2^8 - 2^4
felem_mul(tmp, ftmp, e4);
- felem_reduce(ftmp, tmp); /* 2^8 - 2^0 */
+ felem_reduce(ftmp, tmp); // 2^8 - 2^0
felem_assign(e8, ftmp);
for (size_t i = 0; i < 8; i++) {
felem_square(tmp, ftmp);
felem_reduce(ftmp, tmp);
- } /* 2^16 - 2^8 */
+ } // 2^16 - 2^8
felem_mul(tmp, ftmp, e8);
- felem_reduce(ftmp, tmp); /* 2^16 - 2^0 */
+ felem_reduce(ftmp, tmp); // 2^16 - 2^0
felem_assign(e16, ftmp);
for (size_t i = 0; i < 16; i++) {
felem_square(tmp, ftmp);
felem_reduce(ftmp, tmp);
- } /* 2^32 - 2^16 */
+ } // 2^32 - 2^16
felem_mul(tmp, ftmp, e16);
- felem_reduce(ftmp, tmp); /* 2^32 - 2^0 */
+ felem_reduce(ftmp, tmp); // 2^32 - 2^0
felem_assign(e32, ftmp);
for (size_t i = 0; i < 32; i++) {
felem_square(tmp, ftmp);
felem_reduce(ftmp, tmp);
- } /* 2^64 - 2^32 */
+ } // 2^64 - 2^32
felem_assign(e64, ftmp);
felem_mul(tmp, ftmp, in);
- felem_reduce(ftmp, tmp); /* 2^64 - 2^32 + 2^0 */
+ felem_reduce(ftmp, tmp); // 2^64 - 2^32 + 2^0
for (size_t i = 0; i < 192; i++) {
felem_square(tmp, ftmp);
felem_reduce(ftmp, tmp);
- } /* 2^256 - 2^224 + 2^192 */
+ } // 2^256 - 2^224 + 2^192
felem_mul(tmp, e64, e32);
- felem_reduce(ftmp2, tmp); /* 2^64 - 2^0 */
+ felem_reduce(ftmp2, tmp); // 2^64 - 2^0
for (size_t i = 0; i < 16; i++) {
felem_square(tmp, ftmp2);
felem_reduce(ftmp2, tmp);
- } /* 2^80 - 2^16 */
+ } // 2^80 - 2^16
felem_mul(tmp, ftmp2, e16);
- felem_reduce(ftmp2, tmp); /* 2^80 - 2^0 */
+ felem_reduce(ftmp2, tmp); // 2^80 - 2^0
for (size_t i = 0; i < 8; i++) {
felem_square(tmp, ftmp2);
felem_reduce(ftmp2, tmp);
- } /* 2^88 - 2^8 */
+ } // 2^88 - 2^8
felem_mul(tmp, ftmp2, e8);
- felem_reduce(ftmp2, tmp); /* 2^88 - 2^0 */
+ felem_reduce(ftmp2, tmp); // 2^88 - 2^0
for (size_t i = 0; i < 4; i++) {
felem_square(tmp, ftmp2);
felem_reduce(ftmp2, tmp);
- } /* 2^92 - 2^4 */
+ } // 2^92 - 2^4
felem_mul(tmp, ftmp2, e4);
- felem_reduce(ftmp2, tmp); /* 2^92 - 2^0 */
+ felem_reduce(ftmp2, tmp); // 2^92 - 2^0
felem_square(tmp, ftmp2);
- felem_reduce(ftmp2, tmp); /* 2^93 - 2^1 */
+ felem_reduce(ftmp2, tmp); // 2^93 - 2^1
felem_square(tmp, ftmp2);
- felem_reduce(ftmp2, tmp); /* 2^94 - 2^2 */
+ felem_reduce(ftmp2, tmp); // 2^94 - 2^2
felem_mul(tmp, ftmp2, e2);
- felem_reduce(ftmp2, tmp); /* 2^94 - 2^0 */
+ felem_reduce(ftmp2, tmp); // 2^94 - 2^0
felem_square(tmp, ftmp2);
- felem_reduce(ftmp2, tmp); /* 2^95 - 2^1 */
+ felem_reduce(ftmp2, tmp); // 2^95 - 2^1
felem_square(tmp, ftmp2);
- felem_reduce(ftmp2, tmp); /* 2^96 - 2^2 */
+ felem_reduce(ftmp2, tmp); // 2^96 - 2^2
felem_mul(tmp, ftmp2, in);
- felem_reduce(ftmp2, tmp); /* 2^96 - 3 */
+ felem_reduce(ftmp2, tmp); // 2^96 - 3
felem_mul(tmp, ftmp2, ftmp);
- felem_reduce(out, tmp); /* 2^256 - 2^224 + 2^192 + 2^96 - 3 */
+ felem_reduce(out, tmp); // 2^256 - 2^224 + 2^192 + 2^96 - 3
}
-/* Group operations
- * ----------------
- *
- * Building on top of the field operations we have the operations on the
- * elliptic curve group itself. Points on the curve are represented in Jacobian
- * coordinates. */
-
-/* point_double calculates 2*(x_in, y_in, z_in)
- *
- * The method is taken from:
- * http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#doubling-dbl-2001-b
- *
- * Outputs can equal corresponding inputs, i.e., x_out == x_in is allowed.
- * while x_out == y_in is not (maybe this works, but it's not tested). */
+// Group operations
+// ----------------
+//
+// Building on top of the field operations we have the operations on the
+// elliptic curve group itself. Points on the curve are represented in Jacobian
+// coordinates.
+
+// point_double calculates 2*(x_in, y_in, z_in)
+//
+// The method is taken from:
+// http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#doubling-dbl-2001-b
+//
+// Outputs can equal corresponding inputs, i.e., x_out == x_in is allowed.
+// while x_out == y_in is not (maybe this works, but it's not tested).
static void point_double(felem x_out, felem y_out, felem z_out,
const felem x_in, const felem y_in, const felem z_in) {
longfelem tmp, tmp2;
@@ -926,77 +926,77 @@ static void point_double(felem x_out, felem y_out, felem z_out,
smallfelem small1, small2;
felem_assign(ftmp, x_in);
- /* ftmp[i] < 2^106 */
+ // ftmp[i] < 2^106
felem_assign(ftmp2, x_in);
- /* ftmp2[i] < 2^106 */
+ // ftmp2[i] < 2^106
- /* delta = z^2 */
+ // delta = z^2
felem_square(tmp, z_in);
felem_reduce(delta, tmp);
- /* delta[i] < 2^101 */
+ // delta[i] < 2^101
- /* gamma = y^2 */
+ // gamma = y^2
felem_square(tmp, y_in);
felem_reduce(gamma, tmp);
- /* gamma[i] < 2^101 */
+ // gamma[i] < 2^101
felem_shrink(small1, gamma);
- /* beta = x*gamma */
+ // beta = x*gamma
felem_small_mul(tmp, small1, x_in);
felem_reduce(beta, tmp);
- /* beta[i] < 2^101 */
+ // beta[i] < 2^101
- /* alpha = 3*(x-delta)*(x+delta) */
+ // alpha = 3*(x-delta)*(x+delta)
felem_diff(ftmp, delta);
- /* ftmp[i] < 2^105 + 2^106 < 2^107 */
+ // ftmp[i] < 2^105 + 2^106 < 2^107
felem_sum(ftmp2, delta);
- /* ftmp2[i] < 2^105 + 2^106 < 2^107 */
+ // ftmp2[i] < 2^105 + 2^106 < 2^107
felem_scalar(ftmp2, 3);
- /* ftmp2[i] < 3 * 2^107 < 2^109 */
+ // ftmp2[i] < 3 * 2^107 < 2^109
felem_mul(tmp, ftmp, ftmp2);
felem_reduce(alpha, tmp);
- /* alpha[i] < 2^101 */
+ // alpha[i] < 2^101
felem_shrink(small2, alpha);
- /* x' = alpha^2 - 8*beta */
+ // x' = alpha^2 - 8*beta
smallfelem_square(tmp, small2);
felem_reduce(x_out, tmp);
felem_assign(ftmp, beta);
felem_scalar(ftmp, 8);
- /* ftmp[i] < 8 * 2^101 = 2^104 */
+ // ftmp[i] < 8 * 2^101 = 2^104
felem_diff(x_out, ftmp);
- /* x_out[i] < 2^105 + 2^101 < 2^106 */
+ // x_out[i] < 2^105 + 2^101 < 2^106
- /* z' = (y + z)^2 - gamma - delta */
+ // z' = (y + z)^2 - gamma - delta
felem_sum(delta, gamma);
- /* delta[i] < 2^101 + 2^101 = 2^102 */
+ // delta[i] < 2^101 + 2^101 = 2^102
felem_assign(ftmp, y_in);
felem_sum(ftmp, z_in);
- /* ftmp[i] < 2^106 + 2^106 = 2^107 */
+ // ftmp[i] < 2^106 + 2^106 = 2^107
felem_square(tmp, ftmp);
felem_reduce(z_out, tmp);
felem_diff(z_out, delta);
- /* z_out[i] < 2^105 + 2^101 < 2^106 */
+ // z_out[i] < 2^105 + 2^101 < 2^106
- /* y' = alpha*(4*beta - x') - 8*gamma^2 */
+ // y' = alpha*(4*beta - x') - 8*gamma^2
felem_scalar(beta, 4);
- /* beta[i] < 4 * 2^101 = 2^103 */
+ // beta[i] < 4 * 2^101 = 2^103
felem_diff_zero107(beta, x_out);
- /* beta[i] < 2^107 + 2^103 < 2^108 */
+ // beta[i] < 2^107 + 2^103 < 2^108
felem_small_mul(tmp, small2, beta);
- /* tmp[i] < 7 * 2^64 < 2^67 */
+ // tmp[i] < 7 * 2^64 < 2^67
smallfelem_square(tmp2, small1);
- /* tmp2[i] < 7 * 2^64 */
+ // tmp2[i] < 7 * 2^64
longfelem_scalar(tmp2, 8);
- /* tmp2[i] < 8 * 7 * 2^64 = 7 * 2^67 */
+ // tmp2[i] < 8 * 7 * 2^64 = 7 * 2^67
longfelem_diff(tmp, tmp2);
- /* tmp[i] < 2^67 + 2^70 + 2^40 < 2^71 */
+ // tmp[i] < 2^67 + 2^70 + 2^40 < 2^71
felem_reduce_zero105(y_out, tmp);
- /* y_out[i] < 2^106 */
+ // y_out[i] < 2^106
}
-/* point_double_small is the same as point_double, except that it operates on
- * smallfelems. */
+// point_double_small is the same as point_double, except that it operates on
+// smallfelems.
static void point_double_small(smallfelem x_out, smallfelem y_out,
smallfelem z_out, const smallfelem x_in,
const smallfelem y_in, const smallfelem z_in) {
@@ -1013,7 +1013,7 @@ static void point_double_small(smallfelem x_out, smallfelem y_out,
felem_shrink(z_out, felem_z_out);
}
-/* p256_copy_conditional copies in to out iff mask is all ones. */
+// p256_copy_conditional copies in to out iff mask is all ones.
static void p256_copy_conditional(felem out, const felem in, limb mask) {
for (size_t i = 0; i < NLIMBS; ++i) {
const limb tmp = mask & (in[i] ^ out[i]);
@@ -1021,7 +1021,7 @@ static void p256_copy_conditional(felem out, const felem in, limb mask) {
}
}
-/* copy_small_conditional copies in to out iff mask is all ones. */
+// copy_small_conditional copies in to out iff mask is all ones.
static void copy_small_conditional(felem out, const smallfelem in, limb mask) {
const uint64_t mask64 = mask;
for (size_t i = 0; i < NLIMBS; ++i) {
@@ -1029,16 +1029,16 @@ static void copy_small_conditional(felem out, const smallfelem in, limb mask) {
}
}
-/* point_add calcuates (x1, y1, z1) + (x2, y2, z2)
- *
- * The method is taken from:
- * http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#addition-add-2007-bl,
- * adapted for mixed addition (z2 = 1, or z2 = 0 for the point at infinity).
- *
- * This function includes a branch for checking whether the two input points
- * are equal, (while not equal to the point at infinity). This case never
- * happens during single point multiplication, so there is no timing leak for
- * ECDH or ECDSA signing. */
+// point_add calcuates (x1, y1, z1) + (x2, y2, z2)
+//
+// The method is taken from:
+// http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#addition-add-2007-bl,
+// adapted for mixed addition (z2 = 1, or z2 = 0 for the point at infinity).
+//
+// This function includes a branch for checking whether the two input points
+// are equal, (while not equal to the point at infinity). This case never
+// happens during single point multiplication, so there is no timing leak for
+// ECDH or ECDSA signing.
static void point_add(felem x3, felem y3, felem z3, const felem x1,
const felem y1, const felem z1, const int mixed,
const smallfelem x2, const smallfelem y2,
@@ -1053,94 +1053,94 @@ static void point_add(felem x3, felem y3, felem z3, const felem x1,
z1_is_zero = smallfelem_is_zero(small3);
z2_is_zero = smallfelem_is_zero(z2);
- /* ftmp = z1z1 = z1**2 */
+ // ftmp = z1z1 = z1**2
smallfelem_square(tmp, small3);
felem_reduce(ftmp, tmp);
- /* ftmp[i] < 2^101 */
+ // ftmp[i] < 2^101
felem_shrink(small1, ftmp);
if (!mixed) {
- /* ftmp2 = z2z2 = z2**2 */
+ // ftmp2 = z2z2 = z2**2
smallfelem_square(tmp, z2);
felem_reduce(ftmp2, tmp);
- /* ftmp2[i] < 2^101 */
+ // ftmp2[i] < 2^101
felem_shrink(small2, ftmp2);
felem_shrink(small5, x1);
- /* u1 = ftmp3 = x1*z2z2 */
+ // u1 = ftmp3 = x1*z2z2
smallfelem_mul(tmp, small5, small2);
felem_reduce(ftmp3, tmp);
- /* ftmp3[i] < 2^101 */
+ // ftmp3[i] < 2^101
- /* ftmp5 = z1 + z2 */
+ // ftmp5 = z1 + z2
felem_assign(ftmp5, z1);
felem_small_sum(ftmp5, z2);
- /* ftmp5[i] < 2^107 */
+ // ftmp5[i] < 2^107
- /* ftmp5 = (z1 + z2)**2 - (z1z1 + z2z2) = 2z1z2 */
+ // ftmp5 = (z1 + z2)**2 - (z1z1 + z2z2) = 2z1z2
felem_square(tmp, ftmp5);
felem_reduce(ftmp5, tmp);
- /* ftmp2 = z2z2 + z1z1 */
+ // ftmp2 = z2z2 + z1z1
felem_sum(ftmp2, ftmp);
- /* ftmp2[i] < 2^101 + 2^101 = 2^102 */
+ // ftmp2[i] < 2^101 + 2^101 = 2^102
felem_diff(ftmp5, ftmp2);
- /* ftmp5[i] < 2^105 + 2^101 < 2^106 */
+ // ftmp5[i] < 2^105 + 2^101 < 2^106
- /* ftmp2 = z2 * z2z2 */
+ // ftmp2 = z2 * z2z2
smallfelem_mul(tmp, small2, z2);
felem_reduce(ftmp2, tmp);
- /* s1 = ftmp2 = y1 * z2**3 */
+ // s1 = ftmp2 = y1 * z2**3
felem_mul(tmp, y1, ftmp2);
felem_reduce(ftmp6, tmp);
- /* ftmp6[i] < 2^101 */
+ // ftmp6[i] < 2^101
} else {
- /* We'll assume z2 = 1 (special case z2 = 0 is handled later). */
+ // We'll assume z2 = 1 (special case z2 = 0 is handled later).
- /* u1 = ftmp3 = x1*z2z2 */
+ // u1 = ftmp3 = x1*z2z2
felem_assign(ftmp3, x1);
- /* ftmp3[i] < 2^106 */
+ // ftmp3[i] < 2^106
- /* ftmp5 = 2z1z2 */
+ // ftmp5 = 2z1z2
felem_assign(ftmp5, z1);
felem_scalar(ftmp5, 2);
- /* ftmp5[i] < 2*2^106 = 2^107 */
+ // ftmp5[i] < 2*2^106 = 2^107
- /* s1 = ftmp2 = y1 * z2**3 */
+ // s1 = ftmp2 = y1 * z2**3
felem_assign(ftmp6, y1);
- /* ftmp6[i] < 2^106 */
+ // ftmp6[i] < 2^106
}
- /* u2 = x2*z1z1 */
+ // u2 = x2*z1z1
smallfelem_mul(tmp, x2, small1);
felem_reduce(ftmp4, tmp);
- /* h = ftmp4 = u2 - u1 */
+ // h = ftmp4 = u2 - u1
felem_diff_zero107(ftmp4, ftmp3);
- /* ftmp4[i] < 2^107 + 2^101 < 2^108 */
+ // ftmp4[i] < 2^107 + 2^101 < 2^108
felem_shrink(small4, ftmp4);
x_equal = smallfelem_is_zero(small4);
- /* z_out = ftmp5 * h */
+ // z_out = ftmp5 * h
felem_small_mul(tmp, small4, ftmp5);
felem_reduce(z_out, tmp);
- /* z_out[i] < 2^101 */
+ // z_out[i] < 2^101
- /* ftmp = z1 * z1z1 */
+ // ftmp = z1 * z1z1
smallfelem_mul(tmp, small1, small3);
felem_reduce(ftmp, tmp);
- /* s2 = tmp = y2 * z1**3 */
+ // s2 = tmp = y2 * z1**3
felem_small_mul(tmp, y2, ftmp);
felem_reduce(ftmp5, tmp);
- /* r = ftmp5 = (s2 - s1)*2 */
+ // r = ftmp5 = (s2 - s1)*2
felem_diff_zero107(ftmp5, ftmp6);
- /* ftmp5[i] < 2^107 + 2^107 = 2^108 */
+ // ftmp5[i] < 2^107 + 2^107 = 2^108
felem_scalar(ftmp5, 2);
- /* ftmp5[i] < 2^109 */
+ // ftmp5[i] < 2^109
felem_shrink(small1, ftmp5);
y_equal = smallfelem_is_zero(small1);
@@ -1149,42 +1149,42 @@ static void point_add(felem x3, felem y3, felem z3, const felem x1,
return;
}
- /* I = ftmp = (2h)**2 */
+ // I = ftmp = (2h)**2
felem_assign(ftmp, ftmp4);
felem_scalar(ftmp, 2);
- /* ftmp[i] < 2*2^108 = 2^109 */
+ // ftmp[i] < 2*2^108 = 2^109
felem_square(tmp, ftmp);
felem_reduce(ftmp, tmp);
- /* J = ftmp2 = h * I */
+ // J = ftmp2 = h * I
felem_mul(tmp, ftmp4, ftmp);
felem_reduce(ftmp2, tmp);
- /* V = ftmp4 = U1 * I */
+ // V = ftmp4 = U1 * I
felem_mul(tmp, ftmp3, ftmp);
felem_reduce(ftmp4, tmp);
- /* x_out = r**2 - J - 2V */
+ // x_out = r**2 - J - 2V
smallfelem_square(tmp, small1);
felem_reduce(x_out, tmp);
felem_assign(ftmp3, ftmp4);
felem_scalar(ftmp4, 2);
felem_sum(ftmp4, ftmp2);
- /* ftmp4[i] < 2*2^101 + 2^101 < 2^103 */
+ // ftmp4[i] < 2*2^101 + 2^101 < 2^103
felem_diff(x_out, ftmp4);
- /* x_out[i] < 2^105 + 2^101 */
+ // x_out[i] < 2^105 + 2^101
- /* y_out = r(V-x_out) - 2 * s1 * J */
+ // y_out = r(V-x_out) - 2 * s1 * J
felem_diff_zero107(ftmp3, x_out);
- /* ftmp3[i] < 2^107 + 2^101 < 2^108 */
+ // ftmp3[i] < 2^107 + 2^101 < 2^108
felem_small_mul(tmp, small1, ftmp3);
felem_mul(tmp2, ftmp6, ftmp2);
longfelem_scalar(tmp2, 2);
- /* tmp2[i] < 2*2^67 = 2^68 */
+ // tmp2[i] < 2*2^67 = 2^68
longfelem_diff(tmp, tmp2);
- /* tmp[i] < 2^67 + 2^70 + 2^40 < 2^71 */
+ // tmp[i] < 2^67 + 2^70 + 2^40 < 2^71
felem_reduce_zero105(y_out, tmp);
- /* y_out[i] < 2^106 */
+ // y_out[i] < 2^106
copy_small_conditional(x_out, x2, z1_is_zero);
p256_copy_conditional(x_out, x1, z2_is_zero);
@@ -1197,8 +1197,8 @@ static void point_add(felem x3, felem y3, felem z3, const felem x1,
felem_assign(z3, z_out);
}
-/* point_add_small is the same as point_add, except that it operates on
- * smallfelems. */
+// point_add_small is the same as point_add, except that it operates on
+// smallfelems.
static void point_add_small(smallfelem x3, smallfelem y3, smallfelem z3,
smallfelem x1, smallfelem y1, smallfelem z1,
smallfelem x2, smallfelem y2, smallfelem z2) {
@@ -1214,42 +1214,42 @@ static void point_add_small(smallfelem x3, smallfelem y3, smallfelem z3,
felem_shrink(z3, felem_z3);
}
-/* Base point pre computation
- * --------------------------
- *
- * Two different sorts of precomputed tables are used in the following code.
- * Each contain various points on the curve, where each point is three field
- * elements (x, y, z).
- *
- * For the base point table, z is usually 1 (0 for the point at infinity).
- * This table has 2 * 16 elements, starting with the following:
- * index | bits | point
- * ------+---------+------------------------------
- * 0 | 0 0 0 0 | 0G
- * 1 | 0 0 0 1 | 1G
- * 2 | 0 0 1 0 | 2^64G
- * 3 | 0 0 1 1 | (2^64 + 1)G
- * 4 | 0 1 0 0 | 2^128G
- * 5 | 0 1 0 1 | (2^128 + 1)G
- * 6 | 0 1 1 0 | (2^128 + 2^64)G
- * 7 | 0 1 1 1 | (2^128 + 2^64 + 1)G
- * 8 | 1 0 0 0 | 2^192G
- * 9 | 1 0 0 1 | (2^192 + 1)G
- * 10 | 1 0 1 0 | (2^192 + 2^64)G
- * 11 | 1 0 1 1 | (2^192 + 2^64 + 1)G
- * 12 | 1 1 0 0 | (2^192 + 2^128)G
- * 13 | 1 1 0 1 | (2^192 + 2^128 + 1)G
- * 14 | 1 1 1 0 | (2^192 + 2^128 + 2^64)G
- * 15 | 1 1 1 1 | (2^192 + 2^128 + 2^64 + 1)G
- * followed by a copy of this with each element multiplied by 2^32.
- *
- * The reason for this is so that we can clock bits into four different
- * locations when doing simple scalar multiplies against the base point,
- * and then another four locations using the second 16 elements.
- *
- * Tables for other points have table[i] = iG for i in 0 .. 16. */
-
-/* g_pre_comp is the table of precomputed base points */
+// Base point pre computation
+// --------------------------
+//
+// Two different sorts of precomputed tables are used in the following code.
+// Each contain various points on the curve, where each point is three field
+// elements (x, y, z).
+//
+// For the base point table, z is usually 1 (0 for the point at infinity).
+// This table has 2 * 16 elements, starting with the following:
+// index | bits | point
+// ------+---------+------------------------------
+// 0 | 0 0 0 0 | 0G
+// 1 | 0 0 0 1 | 1G
+// 2 | 0 0 1 0 | 2^64G
+// 3 | 0 0 1 1 | (2^64 + 1)G
+// 4 | 0 1 0 0 | 2^128G
+// 5 | 0 1 0 1 | (2^128 + 1)G
+// 6 | 0 1 1 0 | (2^128 + 2^64)G
+// 7 | 0 1 1 1 | (2^128 + 2^64 + 1)G
+// 8 | 1 0 0 0 | 2^192G
+// 9 | 1 0 0 1 | (2^192 + 1)G
+// 10 | 1 0 1 0 | (2^192 + 2^64)G
+// 11 | 1 0 1 1 | (2^192 + 2^64 + 1)G
+// 12 | 1 1 0 0 | (2^192 + 2^128)G
+// 13 | 1 1 0 1 | (2^192 + 2^128 + 1)G
+// 14 | 1 1 1 0 | (2^192 + 2^128 + 2^64)G
+// 15 | 1 1 1 1 | (2^192 + 2^128 + 2^64 + 1)G
+// followed by a copy of this with each element multiplied by 2^32.
+//
+// The reason for this is so that we can clock bits into four different
+// locations when doing simple scalar multiplies against the base point,
+// and then another four locations using the second 16 elements.
+//
+// Tables for other points have table[i] = iG for i in 0 .. 16.
+
+// g_pre_comp is the table of precomputed base points
static const smallfelem g_pre_comp[2][16][3] = {
{{{0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}},
{{0xf4a13945d898c296, 0x77037d812deb33a0, 0xf8bce6e563a440f2,
@@ -1404,8 +1404,8 @@ static const smallfelem g_pre_comp[2][16][3] = {
0x4ab5b6b2b8753f81},
{1, 0, 0, 0}}}};
-/* select_point selects the |idx|th point from a precomputation table and
- * copies it to out. */
+// select_point selects the |idx|th point from a precomputation table and
+// copies it to out.
static void select_point(const uint64_t idx, size_t size,
const smallfelem pre_comp[/*size*/][3],
smallfelem out[3]) {
@@ -1426,7 +1426,7 @@ static void select_point(const uint64_t idx, size_t size,
}
}
-/* get_bit returns the |i|th bit in |in| */
+// get_bit returns the |i|th bit in |in|
static char get_bit(const felem_bytearray in, int i) {
if (i < 0 || i >= 256) {
return 0;
@@ -1434,11 +1434,11 @@ static char get_bit(const felem_bytearray in, int i) {
return (in[i >> 3] >> (i & 7)) & 1;
}
-/* Interleaved point multiplication using precomputed point multiples: The
- * small point multiples 0*P, 1*P, ..., 17*P are in p_pre_comp, the scalar
- * in p_scalar, if non-NULL. If g_scalar is non-NULL, we also add this multiple
- * of the generator, using certain (large) precomputed multiples in g_pre_comp.
- * Output point (X, Y, Z) is stored in x_out, y_out, z_out. */
+// Interleaved point multiplication using precomputed point multiples: The
+// small point multiples 0*P, 1*P, ..., 17*P are in p_pre_comp, the scalar
+// in p_scalar, if non-NULL. If g_scalar is non-NULL, we also add this multiple
+// of the generator, using certain (large) precomputed multiples in g_pre_comp.
+// Output point (X, Y, Z) is stored in x_out, y_out, z_out.
static void batch_mul(felem x_out, felem y_out, felem z_out,
const uint8_t *p_scalar, const uint8_t *g_scalar,
const smallfelem p_pre_comp[17][3]) {
@@ -1447,29 +1447,29 @@ static void batch_mul(felem x_out, felem y_out, felem z_out,
uint64_t bits;
uint8_t sign, digit;
- /* set nq to the point at infinity */
+ // set nq to the point at infinity
OPENSSL_memset(nq, 0, 3 * sizeof(felem));
- /* Loop over both scalars msb-to-lsb, interleaving additions of multiples
- * of the generator (two in each of the last 32 rounds) and additions of p
- * (every 5th round). */
+ // Loop over both scalars msb-to-lsb, interleaving additions of multiples
+ // of the generator (two in each of the last 32 rounds) and additions of p
+ // (every 5th round).
- int skip = 1; /* save two point operations in the first round */
+ int skip = 1; // save two point operations in the first round
size_t i = p_scalar != NULL ? 255 : 31;
for (;;) {
- /* double */
+ // double
if (!skip) {
point_double(nq[0], nq[1], nq[2], nq[0], nq[1], nq[2]);
}
- /* add multiples of the generator */
+ // add multiples of the generator
if (g_scalar != NULL && i <= 31) {
- /* first, look 32 bits upwards */
+ // first, look 32 bits upwards
bits = get_bit(g_scalar, i + 224) << 3;
bits |= get_bit(g_scalar, i + 160) << 2;
bits |= get_bit(g_scalar, i + 96) << 1;
bits |= get_bit(g_scalar, i + 32);
- /* select the point to add, in constant time */
+ // select the point to add, in constant time
select_point(bits, 16, g_pre_comp[1], tmp);
if (!skip) {
@@ -1482,18 +1482,18 @@ static void batch_mul(felem x_out, felem y_out, felem z_out,
skip = 0;
}
- /* second, look at the current position */
+ // second, look at the current position
bits = get_bit(g_scalar, i + 192) << 3;
bits |= get_bit(g_scalar, i + 128) << 2;
bits |= get_bit(g_scalar, i + 64) << 1;
bits |= get_bit(g_scalar, i);
- /* select the point to add, in constant time */
+ // select the point to add, in constant time
select_point(bits, 16, g_pre_comp[0], tmp);
point_add(nq[0], nq[1], nq[2], nq[0], nq[1], nq[2], 1 /* mixed */, tmp[0],
tmp[1], tmp[2]);
}
- /* do other additions every 5 doublings */
+ // do other additions every 5 doublings
if (p_scalar != NULL && i % 5 == 0) {
bits = get_bit(p_scalar, i + 4) << 5;
bits |= get_bit(p_scalar, i + 3) << 4;
@@ -1503,10 +1503,10 @@ static void batch_mul(felem x_out, felem y_out, felem z_out,
bits |= get_bit(p_scalar, i - 1);
ec_GFp_nistp_recode_scalar_bits(&sign, &digit, bits);
- /* select the point to add or subtract, in constant time. */
+ // select the point to add or subtract, in constant time.
select_point(digit, 17, p_pre_comp, tmp);
- smallfelem_neg(ftmp, tmp[1]); /* (X, -Y, Z) is the negative
- * point */
+ smallfelem_neg(ftmp, tmp[1]); // (X, -Y, Z) is the negative
+ // point
copy_small_conditional(ftmp, tmp[1], (((limb)sign) - 1));
felem_contract(tmp[1], ftmp);
@@ -1531,13 +1531,10 @@ static void batch_mul(felem x_out, felem y_out, felem z_out,
felem_assign(z_out, nq[2]);
}
-/******************************************************************************/
-/*
- * OPENSSL EC_METHOD FUNCTIONS
- */
+// OPENSSL EC_METHOD FUNCTIONS
-/* Takes the Jacobian coordinates (X, Y, Z) of a point and returns (X', Y') =
- * (X/Z^2, Y/Z^3). */
+// Takes the Jacobian coordinates (X, Y, Z) of a point and returns (X', Y') =
+// (X/Z^2, Y/Z^3).
static int ec_GFp_nistp256_point_get_affine_coordinates(const EC_GROUP *group,
const EC_POINT *point,
BIGNUM *x, BIGNUM *y,
@@ -1612,14 +1609,14 @@ static int ec_GFp_nistp256_points_mul(const EC_GROUP *group, EC_POINT *r,
}
if (p != NULL && p_scalar != NULL) {
- /* We treat NULL scalars as 0, and NULL points as points at infinity, i.e.,
- * they contribute nothing to the linear combination. */
+ // We treat NULL scalars as 0, and NULL points as points at infinity, i.e.,
+ // they contribute nothing to the linear combination.
OPENSSL_memset(&p_secret, 0, sizeof(p_secret));
OPENSSL_memset(&p_pre_comp, 0, sizeof(p_pre_comp));
size_t num_bytes;
- /* Reduce g_scalar to 0 <= g_scalar < 2^256. */
+ // Reduce g_scalar to 0 <= g_scalar < 2^256.
if (BN_num_bits(p_scalar) > 256 || BN_is_negative(p_scalar)) {
- /* This is an unusual input, and we don't guarantee constant-timeness. */
+ // This is an unusual input, and we don't guarantee constant-timeness.
if (!BN_nnmod(tmp_scalar, p_scalar, &group->order, ctx)) {
OPENSSL_PUT_ERROR(EC, ERR_R_BN_LIB);
goto err;
@@ -1629,7 +1626,7 @@ static int ec_GFp_nistp256_points_mul(const EC_GROUP *group, EC_POINT *r,
num_bytes = BN_bn2bin(p_scalar, tmp);
}
flip_endian(p_secret, tmp, num_bytes);
- /* Precompute multiples. */
+ // Precompute multiples.
if (!BN_to_felem(x_out, &p->X) ||
!BN_to_felem(y_out, &p->Y) ||
!BN_to_felem(z_out, &p->Z)) {
@@ -1657,10 +1654,10 @@ static int ec_GFp_nistp256_points_mul(const EC_GROUP *group, EC_POINT *r,
size_t num_bytes;
OPENSSL_memset(g_secret, 0, sizeof(g_secret));
- /* reduce g_scalar to 0 <= g_scalar < 2^256 */
+ // reduce g_scalar to 0 <= g_scalar < 2^256
if (BN_num_bits(g_scalar) > 256 || BN_is_negative(g_scalar)) {
- /* this is an unusual input, and we don't guarantee
- * constant-timeness. */
+ // this is an unusual input, and we don't guarantee
+ // constant-timeness.
if (!BN_nnmod(tmp_scalar, g_scalar, &group->order, ctx)) {
OPENSSL_PUT_ERROR(EC, ERR_R_BN_LIB);
goto err;
@@ -1676,7 +1673,7 @@ static int ec_GFp_nistp256_points_mul(const EC_GROUP *group, EC_POINT *r,
g_scalar != NULL ? g_secret : NULL,
(const smallfelem(*)[3]) &p_pre_comp);
- /* reduce the output to its unique minimal representation */
+ // reduce the output to its unique minimal representation
felem_contract(x_in, x_out);
felem_contract(y_in, y_out);
felem_contract(z_in, z_out);
@@ -1708,4 +1705,4 @@ DEFINE_METHOD_FUNCTION(EC_METHOD, EC_GFp_nistp256_method) {
out->field_decode = NULL;
};
-#endif /* 64_BIT && !WINDOWS */
+#endif // 64_BIT && !WINDOWS