Merge pull request #317

cfe0ed9 Fix miscellaneous style nits that irritate overactive static analysis. (Gregory Maxwell)
pull/11871/head
Pieter Wuille 9 years ago
commit fe0d463494
No known key found for this signature in database
GPG Key ID: 57896D2FF8F0B657

@ -55,7 +55,7 @@
* Numbers reference steps of `Algorithm SPA-resistant Width-w NAF with Odd Scalar` on pp. 335
*/
static int secp256k1_wnaf_const(int *wnaf, secp256k1_scalar s, int w) {
int global_sign = 1;
int global_sign;
int skew = 0;
int word = 0;
/* 1 2 3 */
@ -63,6 +63,10 @@ static int secp256k1_wnaf_const(int *wnaf, secp256k1_scalar s, int w) {
int u;
#ifdef USE_ENDOMORPHISM
int flip;
int bit;
secp256k1_scalar neg_s;
int not_neg_one;
/* If we are using the endomorphism, we cannot handle even numbers by negating
* them, since we are working with 128-bit numbers whose negations would be 256
* bits, eliminating the performance advantage. Instead we use a technique from
@ -70,12 +74,10 @@ static int secp256k1_wnaf_const(int *wnaf, secp256k1_scalar s, int w) {
* or 2 (for odd) to the number we are encoding, then compensating after the
* multiplication. */
/* Negative 128-bit numbers will be negated, since otherwise they are 256-bit */
int flip = secp256k1_scalar_is_high(&s);
flip = secp256k1_scalar_is_high(&s);
/* We add 1 to even numbers, 2 to odd ones, noting that negation flips parity */
int bit = flip ^ (s.d[0] & 1);
bit = flip ^ (s.d[0] & 1);
/* We check for negative one, since adding 2 to it will cause an overflow */
secp256k1_scalar neg_s;
int not_neg_one;
secp256k1_scalar_negate(&neg_s, &s);
not_neg_one = !secp256k1_scalar_is_one(&neg_s);
secp256k1_scalar_cadd_bit(&s, bit, not_neg_one);

@ -105,10 +105,10 @@ static void secp256k1_fe_inv_var(secp256k1_fe *r, const secp256k1_fe *a);
static void secp256k1_fe_inv_all_var(size_t len, secp256k1_fe *r, const secp256k1_fe *a);
/** Convert a field element to the storage type. */
static void secp256k1_fe_to_storage(secp256k1_fe_storage *r, const secp256k1_fe*);
static void secp256k1_fe_to_storage(secp256k1_fe_storage *r, const secp256k1_fe *a);
/** Convert a field element back from the storage type. */
static void secp256k1_fe_from_storage(secp256k1_fe *r, const secp256k1_fe_storage*);
static void secp256k1_fe_from_storage(secp256k1_fe *r, const secp256k1_fe_storage *a);
/** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. */
static void secp256k1_fe_storage_cmov(secp256k1_fe_storage *r, const secp256k1_fe_storage *a, int flag);

@ -21,14 +21,14 @@ typedef struct {
/* Unpacks a constant into a overlapping multi-limbed FE element. */
#define SECP256K1_FE_CONST_INNER(d7, d6, d5, d4, d3, d2, d1, d0) { \
(d0) & 0x3FFFFFFUL, \
(((uint32_t)d0) >> 26) | ((uint32_t)(d1) & 0xFFFFFUL) << 6, \
(((uint32_t)d1) >> 20) | ((uint32_t)(d2) & 0x3FFFUL) << 12, \
(((uint32_t)d2) >> 14) | ((uint32_t)(d3) & 0xFFUL) << 18, \
(((uint32_t)d3) >> 8) | ((uint32_t)(d4) & 0x3UL) << 24, \
(((uint32_t)d0) >> 26) | (((uint32_t)(d1) & 0xFFFFFUL) << 6), \
(((uint32_t)d1) >> 20) | (((uint32_t)(d2) & 0x3FFFUL) << 12), \
(((uint32_t)d2) >> 14) | (((uint32_t)(d3) & 0xFFUL) << 18), \
(((uint32_t)d3) >> 8) | (((uint32_t)(d4) & 0x3UL) << 24), \
(((uint32_t)d4) >> 2) & 0x3FFFFFFUL, \
(((uint32_t)d4) >> 28) | ((uint32_t)(d5) & 0x3FFFFFUL) << 4, \
(((uint32_t)d5) >> 22) | ((uint32_t)(d6) & 0xFFFFUL) << 10, \
(((uint32_t)d6) >> 16) | ((uint32_t)(d7) & 0x3FFUL) << 16, \
(((uint32_t)d4) >> 28) | (((uint32_t)(d5) & 0x3FFFFFUL) << 4), \
(((uint32_t)d5) >> 22) | (((uint32_t)(d6) & 0xFFFFUL) << 10), \
(((uint32_t)d6) >> 16) | (((uint32_t)(d7) & 0x3FFUL) << 16), \
(((uint32_t)d7) >> 10) \
}

@ -252,7 +252,7 @@ static int secp256k1_fe_normalizes_to_zero_var(secp256k1_fe *r) {
t9 &= 0x03FFFFFUL;
t1 += (x << 6);
t1 += (t0 >> 26); t0 = z0;
t1 += (t0 >> 26);
t2 += (t1 >> 26); t1 &= 0x3FFFFFFUL; z0 |= t1; z1 &= t1 ^ 0x40UL;
t3 += (t2 >> 26); t2 &= 0x3FFFFFFUL; z0 |= t2; z1 &= t2;
t4 += (t3 >> 26); t3 &= 0x3FFFFFFUL; z0 |= t3; z1 &= t3;

@ -20,11 +20,11 @@ typedef struct {
/* Unpacks a constant into a overlapping multi-limbed FE element. */
#define SECP256K1_FE_CONST_INNER(d7, d6, d5, d4, d3, d2, d1, d0) { \
(d0) | ((uint64_t)(d1) & 0xFFFFFUL) << 32, \
((uint64_t)(d1) >> 20) | ((uint64_t)(d2)) << 12 | ((uint64_t)(d3) & 0xFFUL) << 44, \
((uint64_t)(d3) >> 8) | ((uint64_t)(d4) & 0xFFFFFFFUL) << 24, \
((uint64_t)(d4) >> 28) | ((uint64_t)(d5)) << 4 | ((uint64_t)(d6) & 0xFFFFUL) << 36, \
((uint64_t)(d6) >> 16) | ((uint64_t)(d7)) << 16 \
(d0) | (((uint64_t)(d1) & 0xFFFFFUL) << 32), \
((uint64_t)(d1) >> 20) | (((uint64_t)(d2)) << 12) | (((uint64_t)(d3) & 0xFFUL) << 44), \
((uint64_t)(d3) >> 8) | (((uint64_t)(d4) & 0xFFFFFFFUL) << 24), \
((uint64_t)(d4) >> 28) | (((uint64_t)(d5)) << 4) | (((uint64_t)(d6) & 0xFFFFUL) << 36), \
((uint64_t)(d6) >> 16) | (((uint64_t)(d7)) << 16) \
}
#ifdef VERIFY
@ -38,10 +38,10 @@ typedef struct {
} secp256k1_fe_storage;
#define SECP256K1_FE_STORAGE_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {{ \
(d0) | ((uint64_t)(d1)) << 32, \
(d2) | ((uint64_t)(d3)) << 32, \
(d4) | ((uint64_t)(d5)) << 32, \
(d6) | ((uint64_t)(d7)) << 32 \
(d0) | (((uint64_t)(d1)) << 32), \
(d2) | (((uint64_t)(d3)) << 32), \
(d4) | (((uint64_t)(d5)) << 32), \
(d6) | (((uint64_t)(d7)) << 32) \
}}
#endif

@ -213,8 +213,8 @@ static void secp256k1_fe_inv_var(secp256k1_fe *r, const secp256k1_fe *a) {
#elif defined(USE_FIELD_INV_NUM)
secp256k1_num n, m;
static const secp256k1_fe negone = SECP256K1_FE_CONST(
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFE, 0xFFFFFC2E
0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL,
0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFEUL, 0xFFFFFC2EUL
);
/* secp256k1 field prime, value p defined in "Standards for Efficient Cryptography" (SEC2) 2.7.1. */
static const unsigned char prime[32] = {
@ -260,7 +260,7 @@ static void secp256k1_fe_inv_all_var(size_t len, secp256k1_fe *r, const secp256k
secp256k1_fe_inv_var(&u, &r[--i]);
while (i > 0) {
int j = i--;
size_t j = i--;
secp256k1_fe_mul(&r[j], &r[i], &u);
secp256k1_fe_mul(&u, &u, &a[j]);
}

@ -127,10 +127,10 @@ static void secp256k1_gej_clear(secp256k1_gej *r);
static void secp256k1_ge_clear(secp256k1_ge *r);
/** Convert a group element to the storage type. */
static void secp256k1_ge_to_storage(secp256k1_ge_storage *r, const secp256k1_ge*);
static void secp256k1_ge_to_storage(secp256k1_ge_storage *r, const secp256k1_ge *a);
/** Convert a group element back from the storage type. */
static void secp256k1_ge_from_storage(secp256k1_ge *r, const secp256k1_ge_storage*);
static void secp256k1_ge_from_storage(secp256k1_ge *r, const secp256k1_ge_storage *a);
/** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. */
static void secp256k1_ge_storage_cmov(secp256k1_ge_storage *r, const secp256k1_ge_storage *a, int flag);

@ -89,7 +89,6 @@ int secp256k1_ecdsa_sign_recoverable(const secp256k1_context* ctx, secp256k1_ecd
int recid;
int ret = 0;
int overflow = 0;
unsigned int count = 0;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx));
ARG_CHECK(msg32 != NULL);
@ -102,6 +101,7 @@ int secp256k1_ecdsa_sign_recoverable(const secp256k1_context* ctx, secp256k1_ecd
secp256k1_scalar_set_b32(&sec, seckey, &overflow);
/* Fail if the secret key is invalid. */
if (!overflow && !secp256k1_scalar_is_zero(&sec)) {
unsigned int count = 0;
secp256k1_scalar_set_b32(&msg, msg32, NULL);
while (1) {
unsigned char nonce32[32];

@ -17,7 +17,7 @@ static void secp256k1_schnorr_msghash_sha256(unsigned char *h32, const unsigned
secp256k1_sha256_finalize(&sha, h32);
}
static const unsigned char secp256k1_schnorr_algo16[16] = "Schnorr+SHA256 ";
static const unsigned char secp256k1_schnorr_algo16[17] = "Schnorr+SHA256 ";
int secp256k1_schnorr_sign(const secp256k1_context* ctx, unsigned char *sig64, const unsigned char *msg32, const unsigned char *seckey, secp256k1_nonce_function noncefp, const void* noncedata) {
secp256k1_scalar sec, non;

@ -232,12 +232,12 @@ static void secp256k1_num_mul(secp256k1_num *r, const secp256k1_num *a, const se
}
static void secp256k1_num_shift(secp256k1_num *r, int bits) {
int i;
if (bits % GMP_NUMB_BITS) {
/* Shift within limbs. */
mpn_rshift(r->data, r->data, r->limbs, bits % GMP_NUMB_BITS);
}
if (bits >= GMP_NUMB_BITS) {
int i;
/* Shift full limbs. */
for (i = 0; i < r->limbs; i++) {
int index = i + (bits / GMP_NUMB_BITS);

@ -738,7 +738,7 @@ static void secp256k1_scalar_mul_512(uint64_t l[8], const secp256k1_scalar *a, c
extract(l[5]);
muladd_fast(a->d[3], b->d[3]);
extract_fast(l[6]);
VERIFY_CHECK(c1 <= 0);
VERIFY_CHECK(c1 == 0);
l[7] = c0;
#endif
}

@ -270,7 +270,6 @@ int secp256k1_ecdsa_sign(const secp256k1_context* ctx, secp256k1_ecdsa_signature
secp256k1_scalar sec, non, msg;
int ret = 0;
int overflow = 0;
unsigned int count = 0;
VERIFY_CHECK(ctx != NULL);
ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx));
ARG_CHECK(msg32 != NULL);
@ -283,6 +282,7 @@ int secp256k1_ecdsa_sign(const secp256k1_context* ctx, secp256k1_ecdsa_signature
secp256k1_scalar_set_b32(&sec, seckey, &overflow);
/* Fail if the secret key is invalid. */
if (!overflow && !secp256k1_scalar_is_zero(&sec)) {
unsigned int count = 0;
secp256k1_scalar_set_b32(&msg, msg32, NULL);
while (1) {
unsigned char nonce32[32];
@ -292,7 +292,7 @@ int secp256k1_ecdsa_sign(const secp256k1_context* ctx, secp256k1_ecdsa_signature
}
secp256k1_scalar_set_b32(&non, nonce32, &overflow);
memset(nonce32, 0, 32);
if (!secp256k1_scalar_is_zero(&non) && !overflow) {
if (!overflow && !secp256k1_scalar_is_zero(&non)) {
if (secp256k1_ecdsa_sig_sign(&ctx->ecmult_gen_ctx, &r, &s, &sec, &msg, &non, NULL)) {
break;
}
@ -320,7 +320,7 @@ int secp256k1_ec_seckey_verify(const secp256k1_context* ctx, const unsigned char
(void)ctx;
secp256k1_scalar_set_b32(&sec, seckey, &overflow);
ret = !secp256k1_scalar_is_zero(&sec) && !overflow;
ret = !overflow && !secp256k1_scalar_is_zero(&sec);
secp256k1_scalar_clear(&sec);
return ret;
}
@ -337,7 +337,7 @@ int secp256k1_ec_pubkey_create(const secp256k1_context* ctx, secp256k1_pubkey *p
ARG_CHECK(seckey != NULL);
secp256k1_scalar_set_b32(&sec, seckey, &overflow);
ret = !overflow & !secp256k1_scalar_is_zero(&sec);
ret = (!overflow) & (!secp256k1_scalar_is_zero(&sec));
secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &pj, &sec);
secp256k1_ge_set_gej(&p, &pj);
secp256k1_pubkey_save(pubkey, &p);
@ -361,7 +361,7 @@ int secp256k1_ec_privkey_tweak_add(const secp256k1_context* ctx, unsigned char *
secp256k1_scalar_set_b32(&term, tweak, &overflow);
secp256k1_scalar_set_b32(&sec, seckey, NULL);
ret = secp256k1_eckey_privkey_tweak_add(&sec, &term) && !overflow;
ret = !overflow && secp256k1_eckey_privkey_tweak_add(&sec, &term);
if (ret) {
secp256k1_scalar_get_b32(seckey, &sec);
}
@ -406,7 +406,7 @@ int secp256k1_ec_privkey_tweak_mul(const secp256k1_context* ctx, unsigned char *
secp256k1_scalar_set_b32(&factor, tweak, &overflow);
secp256k1_scalar_set_b32(&sec, seckey, NULL);
ret = secp256k1_eckey_privkey_tweak_mul(&sec, &factor) && !overflow;
ret = !overflow && secp256k1_eckey_privkey_tweak_mul(&sec, &factor);
if (ret) {
secp256k1_scalar_get_b32(seckey, &sec);
}

@ -501,9 +501,9 @@ void scalar_test(void) {
/* test secp256k1_scalar_shr_int */
secp256k1_scalar r;
int i;
int low;
random_scalar_order_test(&r);
for (i = 0; i < 100; ++i) {
int low;
int shift = 1 + (secp256k1_rand32() % 15);
int expected = r.d[0] % (1 << shift);
low = secp256k1_scalar_shr_int(&r, shift);
@ -1329,7 +1329,7 @@ void run_ecmult_chain(void) {
secp256k1_scalar ae = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1);
secp256k1_scalar ge = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0);
/* actual points */
secp256k1_gej x = a;
secp256k1_gej x;
secp256k1_gej x2;
int i;
@ -1960,7 +1960,7 @@ void test_random_pubkeys(void) {
}
r>>=8;
if (len == 65) {
in[0] = (r & 2) ? 4 : (r & 1? 6 : 7);
in[0] = (r & 2) ? 4 : ((r & 1)? 6 : 7);
} else {
in[0] = (r & 1) ? 2 : 3;
}
@ -2281,7 +2281,7 @@ int main(int argc, char **argv) {
if (secp256k1_rand32() & 1) {
secp256k1_rand256(run32);
CHECK(secp256k1_context_randomize(ctx, secp256k1_rand32() & 1 ? run32 : NULL));
CHECK(secp256k1_context_randomize(ctx, (secp256k1_rand32() & 1) ? run32 : NULL));
}
run_sha256_tests();

Loading…
Cancel
Save