diff --git a/src/scalar.h b/src/scalar.h index df0b40ea138..2f5ba0d447a 100644 --- a/src/scalar.h +++ b/src/scalar.h @@ -42,8 +42,8 @@ static void secp256k1_scalar_set_int(secp256k1_scalar_t *r, unsigned int v); /** Convert a scalar to a byte array. */ static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar_t* a); -/** Add two scalars together (modulo the group order). */ -static void secp256k1_scalar_add(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b); +/** Add two scalars together (modulo the group order). Returns whether it overflowed. */ +static int secp256k1_scalar_add(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b); /** Add a power of two to a scalar. The result is not allowed to overflow. */ static void secp256k1_scalar_add_bit(secp256k1_scalar_t *r, unsigned int bit); diff --git a/src/scalar_4x64_impl.h b/src/scalar_4x64_impl.h index 5706e89bdb0..d1447752202 100644 --- a/src/scalar_4x64_impl.h +++ b/src/scalar_4x64_impl.h @@ -81,7 +81,7 @@ SECP256K1_INLINE static int secp256k1_scalar_reduce(secp256k1_scalar_t *r, unsig return overflow; } -static void secp256k1_scalar_add(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) { +static int secp256k1_scalar_add(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) { uint128_t t = (uint128_t)a->d[0] + b->d[0]; r->d[0] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; t += (uint128_t)a->d[1] + b->d[1]; @@ -90,7 +90,10 @@ static void secp256k1_scalar_add(secp256k1_scalar_t *r, const secp256k1_scalar_t r->d[2] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; t += (uint128_t)a->d[3] + b->d[3]; r->d[3] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64; - secp256k1_scalar_reduce(r, t + secp256k1_scalar_check_overflow(r)); + int overflow = t + secp256k1_scalar_check_overflow(r); + VERIFY_CHECK(overflow == 0 || overflow == 1); + secp256k1_scalar_reduce(r, overflow); + return overflow; } static void secp256k1_scalar_add_bit(secp256k1_scalar_t *r, unsigned int bit) { diff --git a/src/scalar_8x32_impl.h b/src/scalar_8x32_impl.h index 8136480502f..915cbcddbe8 100644 --- a/src/scalar_8x32_impl.h +++ b/src/scalar_8x32_impl.h @@ -111,7 +111,7 @@ SECP256K1_INLINE static int secp256k1_scalar_reduce(secp256k1_scalar_t *r, uint3 return overflow; } -static void secp256k1_scalar_add(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) { +static int secp256k1_scalar_add(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) { uint64_t t = (uint64_t)a->d[0] + b->d[0]; r->d[0] = t & 0xFFFFFFFFULL; t >>= 32; t += (uint64_t)a->d[1] + b->d[1]; @@ -128,7 +128,10 @@ static void secp256k1_scalar_add(secp256k1_scalar_t *r, const secp256k1_scalar_t r->d[6] = t & 0xFFFFFFFFULL; t >>= 32; t += (uint64_t)a->d[7] + b->d[7]; r->d[7] = t & 0xFFFFFFFFULL; t >>= 32; - secp256k1_scalar_reduce(r, t + secp256k1_scalar_check_overflow(r)); + int overflow = t + secp256k1_scalar_check_overflow(r); + VERIFY_CHECK(overflow == 0 || overflow == 1); + secp256k1_scalar_reduce(r, overflow); + return overflow; } static void secp256k1_scalar_add_bit(secp256k1_scalar_t *r, unsigned int bit) { diff --git a/src/tests.c b/src/tests.c index 0836e1cfe04..f41046c4958 100644 --- a/src/tests.c +++ b/src/tests.c @@ -346,8 +346,7 @@ void scalar_test(void) { secp256k1_scalar_add(&b, &b, &b); } secp256k1_scalar_t r1 = s1, r2 = s1; - secp256k1_scalar_add(&r1, &r1, &b); - if (!(secp256k1_scalar_get_bits(&s1, 255, 1) == 1 && secp256k1_scalar_get_bits(&r1, 255, 1) == 0)) { + if (!secp256k1_scalar_add(&r1, &r1, &b)) { /* No overflow happened. */ secp256k1_scalar_add_bit(&r2, bit); CHECK(secp256k1_scalar_eq(&r1, &r2));