Merge pull request #230

2632019 Brace all the if/for/while. (Gregory Maxwell)
pull/11871/head
Pieter Wuille 10 years ago
commit 41603aa727
No known key found for this signature in database
GPG Key ID: 57896D2FF8F0B657

@ -53,35 +53,59 @@ static int secp256k1_ecdsa_sig_parse(secp256k1_ecdsa_sig_t *r, const unsigned ch
int lenr;
int lens;
int overflow;
if (sig[0] != 0x30) return 0;
if (sig[0] != 0x30) {
return 0;
}
lenr = sig[3];
if (5+lenr >= size) return 0;
if (5+lenr >= size) {
return 0;
}
lens = sig[lenr+5];
if (sig[1] != lenr+lens+4) return 0;
if (lenr+lens+6 > size) return 0;
if (sig[2] != 0x02) return 0;
if (lenr == 0) return 0;
if (sig[lenr+4] != 0x02) return 0;
if (lens == 0) return 0;
if (sig[1] != lenr+lens+4) {
return 0;
}
if (lenr+lens+6 > size) {
return 0;
}
if (sig[2] != 0x02) {
return 0;
}
if (lenr == 0) {
return 0;
}
if (sig[lenr+4] != 0x02) {
return 0;
}
if (lens == 0) {
return 0;
}
sp = sig + 6 + lenr;
while (lens > 0 && sp[0] == 0) {
lens--;
sp++;
}
if (lens > 32) return 0;
if (lens > 32) {
return 0;
}
rp = sig + 4;
while (lenr > 0 && rp[0] == 0) {
lenr--;
rp++;
}
if (lenr > 32) return 0;
if (lenr > 32) {
return 0;
}
memcpy(ra + 32 - lenr, rp, lenr);
memcpy(sa + 32 - lens, sp, lens);
overflow = 0;
secp256k1_scalar_set_b32(&r->r, ra, &overflow);
if (overflow) return 0;
if (overflow) {
return 0;
}
secp256k1_scalar_set_b32(&r->s, sa, &overflow);
if (overflow) return 0;
if (overflow) {
return 0;
}
return 1;
}
@ -93,8 +117,9 @@ static int secp256k1_ecdsa_sig_serialize(unsigned char *sig, int *size, const se
secp256k1_scalar_get_b32(&s[1], &a->s);
while (lenR > 1 && rp[0] == 0 && rp[1] < 0x80) { lenR--; rp++; }
while (lenS > 1 && sp[0] == 0 && sp[1] < 0x80) { lenS--; sp++; }
if (*size < 6+lenS+lenR)
if (*size < 6+lenS+lenR) {
return 0;
}
*size = 6 + lenS + lenR;
sig[0] = 0x30;
sig[1] = 4 + lenS + lenR;
@ -114,8 +139,9 @@ static int secp256k1_ecdsa_sig_verify(const secp256k1_ecdsa_sig_t *sig, const se
secp256k1_gej_t pubkeyj;
secp256k1_gej_t pr;
if (secp256k1_scalar_is_zero(&sig->r) || secp256k1_scalar_is_zero(&sig->s))
if (secp256k1_scalar_is_zero(&sig->r) || secp256k1_scalar_is_zero(&sig->s)) {
return 0;
}
secp256k1_scalar_inverse_var(&sn, &sig->s);
secp256k1_scalar_mul(&u1, &sn, message);
@ -168,18 +194,21 @@ static int secp256k1_ecdsa_sig_recover(const secp256k1_ecdsa_sig_t *sig, secp256
secp256k1_scalar_t rn, u1, u2;
secp256k1_gej_t qj;
if (secp256k1_scalar_is_zero(&sig->r) || secp256k1_scalar_is_zero(&sig->s))
if (secp256k1_scalar_is_zero(&sig->r) || secp256k1_scalar_is_zero(&sig->s)) {
return 0;
}
secp256k1_scalar_get_b32(brx, &sig->r);
VERIFY_CHECK(secp256k1_fe_set_b32(&fx, brx)); /* brx comes from a scalar, so is less than the order; certainly less than p */
if (recid & 2) {
if (secp256k1_fe_cmp_var(&fx, &secp256k1_ecdsa_const_p_minus_order) >= 0)
if (secp256k1_fe_cmp_var(&fx, &secp256k1_ecdsa_const_p_minus_order) >= 0) {
return 0;
}
secp256k1_fe_add(&fx, &secp256k1_ecdsa_const_order_as_fe);
}
if (!secp256k1_ge_set_xo_var(&x, &fx, recid & 1))
if (!secp256k1_ge_set_xo_var(&x, &fx, recid & 1)) {
return 0;
}
secp256k1_gej_set_ge(&xj, &x);
secp256k1_scalar_inverse_var(&rn, &sig->r);
secp256k1_scalar_mul(&u1, &rn, message);
@ -209,8 +238,9 @@ static int secp256k1_ecdsa_sig_sign(secp256k1_ecdsa_sig_t *sig, const secp256k1_
secp256k1_ge_clear(&r);
return 0;
}
if (recid)
if (recid) {
*recid = (overflow ? 2 : 0) | (secp256k1_fe_is_odd(&r.y) ? 1 : 0);
}
secp256k1_scalar_mul(&n, &sig->r, seckey);
secp256k1_scalar_add(&n, &n, message);
secp256k1_scalar_inverse(&sig->s, nonce);
@ -218,12 +248,14 @@ static int secp256k1_ecdsa_sig_sign(secp256k1_ecdsa_sig_t *sig, const secp256k1_
secp256k1_scalar_clear(&n);
secp256k1_gej_clear(&rp);
secp256k1_ge_clear(&r);
if (secp256k1_scalar_is_zero(&sig->s))
if (secp256k1_scalar_is_zero(&sig->s)) {
return 0;
}
if (secp256k1_scalar_is_high(&sig->s)) {
secp256k1_scalar_negate(&sig->s, &sig->s);
if (recid)
if (recid) {
*recid ^= 1;
}
}
return 1;
}

@ -24,8 +24,9 @@ static int secp256k1_eckey_pubkey_parse(secp256k1_ge_t *elem, const unsigned cha
return 0;
}
secp256k1_ge_set_xy(elem, &x, &y);
if ((pub[0] == 0x06 || pub[0] == 0x07) && secp256k1_fe_is_odd(&y) != (pub[0] == 0x07))
if ((pub[0] == 0x06 || pub[0] == 0x07) && secp256k1_fe_is_odd(&y) != (pub[0] == 0x07)) {
return 0;
}
return secp256k1_ge_is_valid_var(elem);
} else {
return 0;
@ -57,29 +58,36 @@ static int secp256k1_eckey_privkey_parse(secp256k1_scalar_t *key, const unsigned
int len = 0;
int overflow = 0;
/* sequence header */
if (end < privkey+1 || *privkey != 0x30)
if (end < privkey+1 || *privkey != 0x30) {
return 0;
}
privkey++;
/* sequence length constructor */
if (end < privkey+1 || !(*privkey & 0x80))
if (end < privkey+1 || !(*privkey & 0x80)) {
return 0;
}
lenb = *privkey & ~0x80; privkey++;
if (lenb < 1 || lenb > 2)
if (lenb < 1 || lenb > 2) {
return 0;
if (end < privkey+lenb)
}
if (end < privkey+lenb) {
return 0;
}
/* sequence length */
len = privkey[lenb-1] | (lenb > 1 ? privkey[lenb-2] << 8 : 0);
privkey += lenb;
if (end < privkey+len)
if (end < privkey+len) {
return 0;
}
/* sequence element 0: version number (=1) */
if (end < privkey+3 || privkey[0] != 0x02 || privkey[1] != 0x01 || privkey[2] != 0x01)
if (end < privkey+3 || privkey[0] != 0x02 || privkey[1] != 0x01 || privkey[2] != 0x01) {
return 0;
}
privkey += 3;
/* sequence element 1: octet string, up to 32 bytes */
if (end < privkey+2 || privkey[0] != 0x04 || privkey[1] > 0x20 || end < privkey+2+privkey[1])
if (end < privkey+2 || privkey[0] != 0x04 || privkey[1] > 0x20 || end < privkey+2+privkey[1]) {
return 0;
}
memcpy(c + 32 - privkey[1], privkey + 2, privkey[1]);
secp256k1_scalar_set_b32(key, c, &overflow);
memset(c, 0, 32);
@ -148,8 +156,9 @@ static int secp256k1_eckey_privkey_serialize(unsigned char *privkey, int *privke
static int secp256k1_eckey_privkey_tweak_add(secp256k1_scalar_t *key, const secp256k1_scalar_t *tweak) {
secp256k1_scalar_add(key, key, tweak);
if (secp256k1_scalar_is_zero(key))
if (secp256k1_scalar_is_zero(key)) {
return 0;
}
return 1;
}
@ -160,15 +169,17 @@ static int secp256k1_eckey_pubkey_tweak_add(secp256k1_ge_t *key, const secp256k1
secp256k1_scalar_set_int(&one, 1);
secp256k1_ecmult(&pt, &pt, &one, tweak);
if (secp256k1_gej_is_infinity(&pt))
if (secp256k1_gej_is_infinity(&pt)) {
return 0;
}
secp256k1_ge_set_gej(key, &pt);
return 1;
}
static int secp256k1_eckey_privkey_tweak_mul(secp256k1_scalar_t *key, const secp256k1_scalar_t *tweak) {
if (secp256k1_scalar_is_zero(tweak))
if (secp256k1_scalar_is_zero(tweak)) {
return 0;
}
secp256k1_scalar_mul(key, key, tweak);
return 1;
@ -177,8 +188,9 @@ static int secp256k1_eckey_privkey_tweak_mul(secp256k1_scalar_t *key, const secp
static int secp256k1_eckey_pubkey_tweak_mul(secp256k1_ge_t *key, const secp256k1_scalar_t *tweak) {
secp256k1_scalar_t zero;
secp256k1_gej_t pt;
if (secp256k1_scalar_is_zero(tweak))
if (secp256k1_scalar_is_zero(tweak)) {
return 0;
}
secp256k1_scalar_set_int(&zero, 0);
secp256k1_gej_set_ge(&pt, key);

@ -35,8 +35,9 @@ static void secp256k1_ecmult_gen_start(void) {
secp256k1_gej_t nums_gej;
secp256k1_ecmult_gen_consts_t *ret;
int i, j;
if (secp256k1_ecmult_gen_consts != NULL)
if (secp256k1_ecmult_gen_consts != NULL) {
return;
}
/* Allocate the precomputation table. */
ret = (secp256k1_ecmult_gen_consts_t*)checked_malloc(sizeof(secp256k1_ecmult_gen_consts_t));
@ -95,8 +96,9 @@ static void secp256k1_ecmult_gen_start(void) {
static void secp256k1_ecmult_gen_stop(void) {
secp256k1_ecmult_gen_consts_t *c;
if (secp256k1_ecmult_gen_consts == NULL)
if (secp256k1_ecmult_gen_consts == NULL) {
return;
}
c = (secp256k1_ecmult_gen_consts_t*)secp256k1_ecmult_gen_consts;
secp256k1_ecmult_gen_consts = NULL;

@ -41,8 +41,9 @@ static void secp256k1_ecmult_table_precomp_gej_var(secp256k1_gej_t *pre, const s
int i;
pre[0] = *a;
secp256k1_gej_double_var(&d, &pre[0]);
for (i = 1; i < (1 << (w-2)); i++)
for (i = 1; i < (1 << (w-2)); i++) {
secp256k1_gej_add_var(&pre[i], &d, &pre[i-1]);
}
}
static void secp256k1_ecmult_table_precomp_ge_storage_var(secp256k1_ge_storage_t *pre, const secp256k1_gej_t *a, int w) {
@ -73,18 +74,19 @@ static void secp256k1_ecmult_table_precomp_ge_storage_var(secp256k1_ge_storage_t
VERIFY_CHECK(((n) & 1) == 1); \
VERIFY_CHECK((n) >= -((1 << ((w)-1)) - 1)); \
VERIFY_CHECK((n) <= ((1 << ((w)-1)) - 1)); \
if ((n) > 0) \
if ((n) > 0) { \
*(r) = (pre)[((n)-1)/2]; \
else \
} else { \
secp256k1_gej_neg((r), &(pre)[(-(n)-1)/2]); \
} \
} while(0)
#define ECMULT_TABLE_GET_GE_STORAGE(r,pre,n,w) do { \
VERIFY_CHECK(((n) & 1) == 1); \
VERIFY_CHECK((n) >= -((1 << ((w)-1)) - 1)); \
VERIFY_CHECK((n) <= ((1 << ((w)-1)) - 1)); \
if ((n) > 0) \
if ((n) > 0) { \
secp256k1_ge_from_storage((r), &(pre)[((n)-1)/2]); \
else {\
} else { \
secp256k1_ge_from_storage((r), &(pre)[(-(n)-1)/2]); \
secp256k1_ge_neg((r), (r)); \
} \
@ -103,8 +105,9 @@ static const secp256k1_ecmult_consts_t *secp256k1_ecmult_consts = NULL;
static void secp256k1_ecmult_start(void) {
secp256k1_gej_t gj;
secp256k1_ecmult_consts_t *ret;
if (secp256k1_ecmult_consts != NULL)
if (secp256k1_ecmult_consts != NULL) {
return;
}
/* Allocate the precomputation table. */
ret = (secp256k1_ecmult_consts_t*)checked_malloc(sizeof(secp256k1_ecmult_consts_t));
@ -122,8 +125,9 @@ static void secp256k1_ecmult_start(void) {
int i;
/* calculate 2^128*generator */
g_128j = gj;
for (i = 0; i < 128; i++)
for (i = 0; i < 128; i++) {
secp256k1_gej_double_var(&g_128j, &g_128j);
}
secp256k1_ecmult_table_precomp_ge_storage_var(ret->pre_g_128, &g_128j, WINDOW_G);
}
#endif
@ -134,8 +138,9 @@ static void secp256k1_ecmult_start(void) {
static void secp256k1_ecmult_stop(void) {
secp256k1_ecmult_consts_t *c;
if (secp256k1_ecmult_consts == NULL)
if (secp256k1_ecmult_consts == NULL) {
return;
}
c = (secp256k1_ecmult_consts_t*)secp256k1_ecmult_consts;
secp256k1_ecmult_consts = NULL;
@ -223,7 +228,9 @@ static void secp256k1_ecmult(secp256k1_gej_t *r, const secp256k1_gej_t *a, const
VERIFY_CHECK(bits_na_1 <= 130);
VERIFY_CHECK(bits_na_lam <= 130);
bits = bits_na_1;
if (bits_na_lam > bits) bits = bits_na_lam;
if (bits_na_lam > bits) {
bits = bits_na_lam;
}
#else
/* build wnaf representation for na. */
bits_na = secp256k1_ecmult_wnaf(wnaf_na, na, WINDOW_A);
@ -234,8 +241,9 @@ static void secp256k1_ecmult(secp256k1_gej_t *r, const secp256k1_gej_t *a, const
secp256k1_ecmult_table_precomp_gej_var(pre_a, a, WINDOW_A);
#ifdef USE_ENDOMORPHISM
for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++)
for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) {
secp256k1_gej_mul_lambda(&pre_a_lam[i], &pre_a[i]);
}
/* split ng into ng_1 and ng_128 (where gn = gn_1 + gn_128*2^128, and gn_1 and gn_128 are ~128 bit) */
secp256k1_scalar_split_128(&ng_1, &ng_128, ng);
@ -243,11 +251,17 @@ static void secp256k1_ecmult(secp256k1_gej_t *r, const secp256k1_gej_t *a, const
/* Build wnaf representation for ng_1 and ng_128 */
bits_ng_1 = secp256k1_ecmult_wnaf(wnaf_ng_1, &ng_1, WINDOW_G);
bits_ng_128 = secp256k1_ecmult_wnaf(wnaf_ng_128, &ng_128, WINDOW_G);
if (bits_ng_1 > bits) bits = bits_ng_1;
if (bits_ng_128 > bits) bits = bits_ng_128;
if (bits_ng_1 > bits) {
bits = bits_ng_1;
}
if (bits_ng_128 > bits) {
bits = bits_ng_128;
}
#else
bits_ng = secp256k1_ecmult_wnaf(wnaf_ng, ng, WINDOW_G);
if (bits_ng > bits) bits = bits_ng;
if (bits_ng > bits) {
bits = bits_ng;
}
#endif
secp256k1_gej_set_infinity(r);

@ -236,8 +236,9 @@ static int secp256k1_fe_normalizes_to_zero_var(secp256k1_fe_t *r) {
z1 = z0 ^ 0x3D0UL;
/* Fast return path should catch the majority of cases */
if ((z0 != 0UL) & (z1 != 0x3FFFFFFUL))
if ((z0 != 0UL) & (z1 != 0x3FFFFFFUL)) {
return 0;
}
t1 = r->n[1];
t2 = r->n[2];
@ -315,8 +316,12 @@ static int secp256k1_fe_cmp_var(const secp256k1_fe_t *a, const secp256k1_fe_t *b
secp256k1_fe_verify(b);
#endif
for (i = 9; i >= 0; i--) {
if (a->n[i] > b->n[i]) return 1;
if (a->n[i] < b->n[i]) return -1;
if (a->n[i] > b->n[i]) {
return 1;
}
if (a->n[i] < b->n[i]) {
return -1;
}
}
return 0;
}

@ -209,8 +209,9 @@ static int secp256k1_fe_normalizes_to_zero_var(secp256k1_fe_t *r) {
z1 = z0 ^ 0x1000003D0ULL;
/* Fast return path should catch the majority of cases */
if ((z0 != 0ULL) & (z1 != 0xFFFFFFFFFFFFFULL))
if ((z0 != 0ULL) & (z1 != 0xFFFFFFFFFFFFFULL)) {
return 0;
}
t1 = r->n[1];
t2 = r->n[2];
@ -277,8 +278,12 @@ static int secp256k1_fe_cmp_var(const secp256k1_fe_t *a, const secp256k1_fe_t *b
secp256k1_fe_verify(b);
#endif
for (i = 4; i >= 0; i--) {
if (a->n[i] > b->n[i]) return 1;
if (a->n[i] < b->n[i]) return -1;
if (a->n[i] > b->n[i]) {
return 1;
}
if (a->n[i] < b->n[i]) {
return -1;
}
}
return 0;
}

@ -44,47 +44,69 @@ static int secp256k1_fe_sqrt_var(secp256k1_fe_t *r, const secp256k1_fe_t *a) {
secp256k1_fe_mul(&x3, &x3, a);
x6 = x3;
for (j=0; j<3; j++) secp256k1_fe_sqr(&x6, &x6);
for (j=0; j<3; j++) {
secp256k1_fe_sqr(&x6, &x6);
}
secp256k1_fe_mul(&x6, &x6, &x3);
x9 = x6;
for (j=0; j<3; j++) secp256k1_fe_sqr(&x9, &x9);
for (j=0; j<3; j++) {
secp256k1_fe_sqr(&x9, &x9);
}
secp256k1_fe_mul(&x9, &x9, &x3);
x11 = x9;
for (j=0; j<2; j++) secp256k1_fe_sqr(&x11, &x11);
for (j=0; j<2; j++) {
secp256k1_fe_sqr(&x11, &x11);
}
secp256k1_fe_mul(&x11, &x11, &x2);
x22 = x11;
for (j=0; j<11; j++) secp256k1_fe_sqr(&x22, &x22);
for (j=0; j<11; j++) {
secp256k1_fe_sqr(&x22, &x22);
}
secp256k1_fe_mul(&x22, &x22, &x11);
x44 = x22;
for (j=0; j<22; j++) secp256k1_fe_sqr(&x44, &x44);
for (j=0; j<22; j++) {
secp256k1_fe_sqr(&x44, &x44);
}
secp256k1_fe_mul(&x44, &x44, &x22);
x88 = x44;
for (j=0; j<44; j++) secp256k1_fe_sqr(&x88, &x88);
for (j=0; j<44; j++) {
secp256k1_fe_sqr(&x88, &x88);
}
secp256k1_fe_mul(&x88, &x88, &x44);
x176 = x88;
for (j=0; j<88; j++) secp256k1_fe_sqr(&x176, &x176);
for (j=0; j<88; j++) {
secp256k1_fe_sqr(&x176, &x176);
}
secp256k1_fe_mul(&x176, &x176, &x88);
x220 = x176;
for (j=0; j<44; j++) secp256k1_fe_sqr(&x220, &x220);
for (j=0; j<44; j++) {
secp256k1_fe_sqr(&x220, &x220);
}
secp256k1_fe_mul(&x220, &x220, &x44);
x223 = x220;
for (j=0; j<3; j++) secp256k1_fe_sqr(&x223, &x223);
for (j=0; j<3; j++) {
secp256k1_fe_sqr(&x223, &x223);
}
secp256k1_fe_mul(&x223, &x223, &x3);
/* The final result is then assembled using a sliding window over the blocks. */
t1 = x223;
for (j=0; j<23; j++) secp256k1_fe_sqr(&t1, &t1);
for (j=0; j<23; j++) {
secp256k1_fe_sqr(&t1, &t1);
}
secp256k1_fe_mul(&t1, &t1, &x22);
for (j=0; j<6; j++) secp256k1_fe_sqr(&t1, &t1);
for (j=0; j<6; j++) {
secp256k1_fe_sqr(&t1, &t1);
}
secp256k1_fe_mul(&t1, &t1, &x2);
secp256k1_fe_sqr(&t1, &t1);
secp256k1_fe_sqr(r, &t1);
@ -111,51 +133,77 @@ static void secp256k1_fe_inv(secp256k1_fe_t *r, const secp256k1_fe_t *a) {
secp256k1_fe_mul(&x3, &x3, a);
x6 = x3;
for (j=0; j<3; j++) secp256k1_fe_sqr(&x6, &x6);
for (j=0; j<3; j++) {
secp256k1_fe_sqr(&x6, &x6);
}
secp256k1_fe_mul(&x6, &x6, &x3);
x9 = x6;
for (j=0; j<3; j++) secp256k1_fe_sqr(&x9, &x9);
for (j=0; j<3; j++) {
secp256k1_fe_sqr(&x9, &x9);
}
secp256k1_fe_mul(&x9, &x9, &x3);
x11 = x9;
for (j=0; j<2; j++) secp256k1_fe_sqr(&x11, &x11);
for (j=0; j<2; j++) {
secp256k1_fe_sqr(&x11, &x11);
}
secp256k1_fe_mul(&x11, &x11, &x2);
x22 = x11;
for (j=0; j<11; j++) secp256k1_fe_sqr(&x22, &x22);
for (j=0; j<11; j++) {
secp256k1_fe_sqr(&x22, &x22);
}
secp256k1_fe_mul(&x22, &x22, &x11);
x44 = x22;
for (j=0; j<22; j++) secp256k1_fe_sqr(&x44, &x44);
for (j=0; j<22; j++) {
secp256k1_fe_sqr(&x44, &x44);
}
secp256k1_fe_mul(&x44, &x44, &x22);
x88 = x44;
for (j=0; j<44; j++) secp256k1_fe_sqr(&x88, &x88);
for (j=0; j<44; j++) {
secp256k1_fe_sqr(&x88, &x88);
}
secp256k1_fe_mul(&x88, &x88, &x44);
x176 = x88;
for (j=0; j<88; j++) secp256k1_fe_sqr(&x176, &x176);
for (j=0; j<88; j++) {
secp256k1_fe_sqr(&x176, &x176);
}
secp256k1_fe_mul(&x176, &x176, &x88);
x220 = x176;
for (j=0; j<44; j++) secp256k1_fe_sqr(&x220, &x220);
for (j=0; j<44; j++) {
secp256k1_fe_sqr(&x220, &x220);
}
secp256k1_fe_mul(&x220, &x220, &x44);
x223 = x220;
for (j=0; j<3; j++) secp256k1_fe_sqr(&x223, &x223);
for (j=0; j<3; j++) {
secp256k1_fe_sqr(&x223, &x223);
}
secp256k1_fe_mul(&x223, &x223, &x3);
/* The final result is then assembled using a sliding window over the blocks. */
t1 = x223;
for (j=0; j<23; j++) secp256k1_fe_sqr(&t1, &t1);
for (j=0; j<23; j++) {
secp256k1_fe_sqr(&t1, &t1);
}
secp256k1_fe_mul(&t1, &t1, &x22);
for (j=0; j<5; j++) secp256k1_fe_sqr(&t1, &t1);
for (j=0; j<5; j++) {
secp256k1_fe_sqr(&t1, &t1);
}
secp256k1_fe_mul(&t1, &t1, a);
for (j=0; j<3; j++) secp256k1_fe_sqr(&t1, &t1);
for (j=0; j<3; j++) {
secp256k1_fe_sqr(&t1, &t1);
}
secp256k1_fe_mul(&t1, &t1, &x2);
for (j=0; j<2; j++) secp256k1_fe_sqr(&t1, &t1);
for (j=0; j<2; j++) {
secp256k1_fe_sqr(&t1, &t1);
}
secp256k1_fe_mul(r, a, &t1);
}
@ -188,8 +236,9 @@ static void secp256k1_fe_inv_var(secp256k1_fe_t *r, const secp256k1_fe_t *a) {
static void secp256k1_fe_inv_all_var(size_t len, secp256k1_fe_t *r, const secp256k1_fe_t *a) {
secp256k1_fe_t u;
size_t i;
if (len < 1)
if (len < 1) {
return;
}
VERIFY_CHECK((r + len <= a) || (a + len <= r));

@ -138,11 +138,13 @@ static int secp256k1_ge_set_xo_var(secp256k1_ge_t *r, const secp256k1_fe_t *x, i
r->infinity = 0;
secp256k1_fe_set_int(&c, 7);
secp256k1_fe_add(&c, &x3);
if (!secp256k1_fe_sqrt_var(&r->y, &c))
if (!secp256k1_fe_sqrt_var(&r->y, &c)) {
return 0;
}
secp256k1_fe_normalize_var(&r->y);
if (secp256k1_fe_is_odd(&r->y) != odd)
if (secp256k1_fe_is_odd(&r->y) != odd) {
secp256k1_fe_negate(&r->y, &r->y, 1);
}
return 1;
}
@ -176,8 +178,9 @@ static int secp256k1_gej_is_infinity(const secp256k1_gej_t *a) {
static int secp256k1_gej_is_valid_var(const secp256k1_gej_t *a) {
secp256k1_fe_t y2, x3, z2, z6;
if (a->infinity)
if (a->infinity) {
return 0;
}
/** y^2 = x^3 + 7
* (Y/Z^3)^2 = (X/Z^2)^3 + 7
* Y^2 / Z^6 = X^3 / Z^6 + 7
@ -195,8 +198,9 @@ static int secp256k1_gej_is_valid_var(const secp256k1_gej_t *a) {
static int secp256k1_ge_is_valid_var(const secp256k1_ge_t *a) {
secp256k1_fe_t y2, x3, c;
if (a->infinity)
if (a->infinity) {
return 0;
}
/* y^2 = x^3 + 7 */
secp256k1_fe_sqr(&y2, &a->y);
secp256k1_fe_sqr(&x3, &a->x); secp256k1_fe_mul(&x3, &x3, &a->x);

@ -176,13 +176,15 @@ static void secp256k1_hmac_sha256_initialize(secp256k1_hmac_sha256_t *hash, cons
}
secp256k1_sha256_initialize(&hash->outer);
for (n = 0; n < 64; n++)
for (n = 0; n < 64; n++) {
rkey[n] ^= 0x5c;
}
secp256k1_sha256_write(&hash->outer, rkey, 64);
secp256k1_sha256_initialize(&hash->inner);
for (n = 0; n < 64; n++)
for (n = 0; n < 64; n++) {
rkey[n] ^= 0x5c ^ 0x36;
}
secp256k1_sha256_write(&hash->inner, rkey, 64);
memset(rkey, 0, 64);
}

@ -54,7 +54,9 @@ static void secp256k1_num_set_bin(secp256k1_num_t *r, const unsigned char *a, un
VERIFY_CHECK(len <= NUM_LIMBS*2);
r->limbs = len;
r->neg = 0;
while (r->limbs > 1 && r->data[r->limbs-1]==0) r->limbs--;
while (r->limbs > 1 && r->data[r->limbs-1]==0) {
r->limbs--;
}
}
static void secp256k1_num_add_abs(secp256k1_num_t *r, const secp256k1_num_t *a, const secp256k1_num_t *b) {
@ -70,7 +72,9 @@ static void secp256k1_num_sub_abs(secp256k1_num_t *r, const secp256k1_num_t *a,
mp_limb_t c = mpn_sub(r->data, a->data, a->limbs, b->data, b->limbs);
VERIFY_CHECK(c == 0);
r->limbs = a->limbs;
while (r->limbs > 1 && r->data[r->limbs-1]==0) r->limbs--;
while (r->limbs > 1 && r->data[r->limbs-1]==0) {
r->limbs--;
}
}
static void secp256k1_num_mod(secp256k1_num_t *r, const secp256k1_num_t *m) {
@ -82,7 +86,9 @@ static void secp256k1_num_mod(secp256k1_num_t *r, const secp256k1_num_t *m) {
mpn_tdiv_qr(t, r->data, 0, r->data, r->limbs, m->data, m->limbs);
memset(t, 0, sizeof(t));
r->limbs = m->limbs;
while (r->limbs > 1 && r->data[r->limbs-1]==0) r->limbs--;
while (r->limbs > 1 && r->data[r->limbs-1]==0) {
r->limbs--;
}
}
if (r->neg && (r->limbs > 1 || r->data[0] != 0)) {
@ -125,7 +131,9 @@ static void secp256k1_num_mod_inverse(secp256k1_num_t *r, const secp256k1_num_t
if (sn < 0) {
mpn_sub(r->data, m->data, m->limbs, r->data, -sn);
r->limbs = m->limbs;
while (r->limbs > 1 && r->data[r->limbs-1]==0) r->limbs--;
while (r->limbs > 1 && r->data[r->limbs-1]==0) {
r->limbs--;
}
} else {
r->limbs = sn;
}
@ -143,15 +151,25 @@ static int secp256k1_num_is_neg(const secp256k1_num_t *a) {
}
static int secp256k1_num_cmp(const secp256k1_num_t *a, const secp256k1_num_t *b) {
if (a->limbs > b->limbs) return 1;
if (a->limbs < b->limbs) return -1;
if (a->limbs > b->limbs) {
return 1;
}
if (a->limbs < b->limbs) {
return -1;
}
return mpn_cmp(a->data, b->data, a->limbs);
}
static int secp256k1_num_eq(const secp256k1_num_t *a, const secp256k1_num_t *b) {
if (a->limbs > b->limbs) return 0;
if (a->limbs < b->limbs) return 0;
if ((a->neg && !secp256k1_num_is_zero(a)) != (b->neg && !secp256k1_num_is_zero(b))) return 0;
if (a->limbs > b->limbs) {
return 0;
}
if (a->limbs < b->limbs) {
return 0;
}
if ((a->neg && !secp256k1_num_is_zero(a)) != (b->neg && !secp256k1_num_is_zero(b))) {
return 0;
}
return mpn_cmp(a->data, b->data, a->limbs) == 0;
}
@ -198,12 +216,15 @@ static void secp256k1_num_mul(secp256k1_num_t *r, const secp256k1_num_t *a, cons
r->data[0] = 0;
return;
}
if (a->limbs >= b->limbs)
if (a->limbs >= b->limbs) {
mpn_mul(tmp, a->data, a->limbs, b->data, b->limbs);
else
} else {
mpn_mul(tmp, b->data, b->limbs, a->data, a->limbs);
}
r->limbs = a->limbs + b->limbs;
if (r->limbs > 1 && tmp[r->limbs - 1]==0) r->limbs--;
if (r->limbs > 1 && tmp[r->limbs - 1]==0) {
r->limbs--;
}
VERIFY_CHECK(r->limbs <= 2*NUM_LIMBS);
mpn_copyi(r->data, tmp, r->limbs);
r->neg = a->neg ^ b->neg;
@ -227,7 +248,9 @@ static void secp256k1_num_shift(secp256k1_num_t *r, int bits) {
}
}
}
while (r->limbs>1 && r->data[r->limbs-1]==0) r->limbs--;
while (r->limbs>1 && r->data[r->limbs-1]==0) {
r->limbs--;
}
}
static void secp256k1_num_negate(secp256k1_num_t *r) {

@ -69,130 +69,168 @@ static void secp256k1_scalar_inverse(secp256k1_scalar_t *r, const secp256k1_scal
secp256k1_scalar_mul(&x8, &x8, x);
secp256k1_scalar_sqr(&x15, &x8);
for (i = 0; i < 6; i++)
for (i = 0; i < 6; i++) {
secp256k1_scalar_sqr(&x15, &x15);
}
secp256k1_scalar_mul(&x15, &x15, &x7);
secp256k1_scalar_sqr(&x30, &x15);
for (i = 0; i < 14; i++)
for (i = 0; i < 14; i++) {
secp256k1_scalar_sqr(&x30, &x30);
}
secp256k1_scalar_mul(&x30, &x30, &x15);
secp256k1_scalar_sqr(&x60, &x30);
for (i = 0; i < 29; i++)
for (i = 0; i < 29; i++) {
secp256k1_scalar_sqr(&x60, &x60);
}
secp256k1_scalar_mul(&x60, &x60, &x30);
secp256k1_scalar_sqr(&x120, &x60);
for (i = 0; i < 59; i++)
for (i = 0; i < 59; i++) {
secp256k1_scalar_sqr(&x120, &x120);
}
secp256k1_scalar_mul(&x120, &x120, &x60);
secp256k1_scalar_sqr(&x127, &x120);
for (i = 0; i < 6; i++)
for (i = 0; i < 6; i++) {
secp256k1_scalar_sqr(&x127, &x127);
}
secp256k1_scalar_mul(&x127, &x127, &x7);
/* Then accumulate the final result (t starts at x127). */
t = &x127;
for (i = 0; i < 2; i++) /* 0 */
for (i = 0; i < 2; i++) { /* 0 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, x); /* 1 */
for (i = 0; i < 4; i++) /* 0 */
for (i = 0; i < 4; i++) { /* 0 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &x3); /* 111 */
for (i = 0; i < 2; i++) /* 0 */
for (i = 0; i < 2; i++) { /* 0 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, x); /* 1 */
for (i = 0; i < 2; i++) /* 0 */
for (i = 0; i < 2; i++) { /* 0 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, x); /* 1 */
for (i = 0; i < 2; i++) /* 0 */
for (i = 0; i < 2; i++) { /* 0 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, x); /* 1 */
for (i = 0; i < 4; i++) /* 0 */
for (i = 0; i < 4; i++) { /* 0 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &x3); /* 111 */
for (i = 0; i < 3; i++) /* 0 */
for (i = 0; i < 3; i++) { /* 0 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &x2); /* 11 */
for (i = 0; i < 4; i++) /* 0 */
for (i = 0; i < 4; i++) { /* 0 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &x3); /* 111 */
for (i = 0; i < 5; i++) /* 00 */
for (i = 0; i < 5; i++) { /* 00 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &x3); /* 111 */
for (i = 0; i < 4; i++) /* 00 */
for (i = 0; i < 4; i++) { /* 00 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &x2); /* 11 */
for (i = 0; i < 2; i++) /* 0 */
for (i = 0; i < 2; i++) { /* 0 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, x); /* 1 */
for (i = 0; i < 2; i++) /* 0 */
for (i = 0; i < 2; i++) { /* 0 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, x); /* 1 */
for (i = 0; i < 5; i++) /* 0 */
for (i = 0; i < 5; i++) { /* 0 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &x4); /* 1111 */
for (i = 0; i < 2; i++) /* 0 */
for (i = 0; i < 2; i++) { /* 0 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, x); /* 1 */
for (i = 0; i < 3; i++) /* 00 */
for (i = 0; i < 3; i++) { /* 00 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, x); /* 1 */
for (i = 0; i < 4; i++) /* 000 */
for (i = 0; i < 4; i++) { /* 000 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, x); /* 1 */
for (i = 0; i < 2; i++) /* 0 */
for (i = 0; i < 2; i++) { /* 0 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, x); /* 1 */
for (i = 0; i < 10; i++) /* 0000000 */
for (i = 0; i < 10; i++) { /* 0000000 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &x3); /* 111 */
for (i = 0; i < 4; i++) /* 0 */
for (i = 0; i < 4; i++) { /* 0 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &x3); /* 111 */
for (i = 0; i < 9; i++) /* 0 */
for (i = 0; i < 9; i++) { /* 0 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &x8); /* 11111111 */
for (i = 0; i < 2; i++) /* 0 */
for (i = 0; i < 2; i++) { /* 0 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, x); /* 1 */
for (i = 0; i < 3; i++) /* 00 */
for (i = 0; i < 3; i++) { /* 00 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, x); /* 1 */
for (i = 0; i < 3; i++) /* 00 */
for (i = 0; i < 3; i++) { /* 00 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, x); /* 1 */
for (i = 0; i < 5; i++) /* 0 */
for (i = 0; i < 5; i++) { /* 0 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &x4); /* 1111 */
for (i = 0; i < 2; i++) /* 0 */
for (i = 0; i < 2; i++) { /* 0 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, x); /* 1 */
for (i = 0; i < 5; i++) /* 000 */
for (i = 0; i < 5; i++) { /* 000 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &x2); /* 11 */
for (i = 0; i < 4; i++) /* 00 */
for (i = 0; i < 4; i++) { /* 00 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &x2); /* 11 */
for (i = 0; i < 2; i++) /* 0 */
for (i = 0; i < 2; i++) { /* 0 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, x); /* 1 */
for (i = 0; i < 8; i++) /* 000000 */
for (i = 0; i < 8; i++) { /* 000000 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &x2); /* 11 */
for (i = 0; i < 3; i++) /* 0 */
for (i = 0; i < 3; i++) { /* 0 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, &x2); /* 11 */
for (i = 0; i < 3; i++) /* 00 */
for (i = 0; i < 3; i++) { /* 00 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, x); /* 1 */
for (i = 0; i < 6; i++) /* 00000 */
for (i = 0; i < 6; i++) { /* 00000 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(t, t, x); /* 1 */
for (i = 0; i < 8; i++) /* 00 */
for (i = 0; i < 8; i++) { /* 00 */
secp256k1_scalar_sqr(t, t);
}
secp256k1_scalar_mul(r, t, &x6); /* 111111 */
}

@ -55,8 +55,9 @@ void random_group_element_test(secp256k1_ge_t *ge) {
secp256k1_fe_t fe;
do {
random_field_element_test(&fe);
if (secp256k1_ge_set_xo_var(ge, &fe, secp256k1_rand32() & 1))
if (secp256k1_ge_set_xo_var(ge, &fe, secp256k1_rand32() & 1)) {
break;
}
} while(1);
}
@ -81,8 +82,9 @@ void random_scalar_order_test(secp256k1_scalar_t *num) {
int overflow = 0;
secp256k1_rand256_test(b32);
secp256k1_scalar_set_b32(num, b32, &overflow);
if (overflow || secp256k1_scalar_is_zero(num))
if (overflow || secp256k1_scalar_is_zero(num)) {
continue;
}
break;
} while(1);
}
@ -93,8 +95,9 @@ void random_scalar_order(secp256k1_scalar_t *num) {
int overflow = 0;
secp256k1_rand256(b32);
secp256k1_scalar_set_b32(num, b32, &overflow);
if (overflow || secp256k1_scalar_is_zero(num))
if (overflow || secp256k1_scalar_is_zero(num)) {
continue;
}
break;
} while(1);
}
@ -229,8 +232,9 @@ void run_rfc6979_hmac_sha256_tests(void) {
#ifndef USE_NUM_NONE
void random_num_negate(secp256k1_num_t *num) {
if (secp256k1_rand32() & 1)
if (secp256k1_rand32() & 1) {
secp256k1_num_negate(num);
}
}
void random_num_order_test(secp256k1_num_t *num) {
@ -624,8 +628,9 @@ void random_fe_non_zero(secp256k1_fe_t *nz) {
while (--tries >= 0) {
random_fe(nz);
secp256k1_fe_normalize(nz);
if (!secp256k1_fe_is_zero(nz))
if (!secp256k1_fe_is_zero(nz)) {
break;
}
}
/* Infinitesimal probability of spurious failure here */
CHECK(tries >= 0);
@ -765,14 +770,17 @@ void run_field_inv_all_var(void) {
for (i = 0; i < count; i++) {
size_t j;
size_t len = (secp256k1_rand32() & 15) + 1;
for (j = 0; j < len; j++)
for (j = 0; j < len; j++) {
random_fe_non_zero(&x[j]);
}
secp256k1_fe_inv_all_var(len, xi, x);
for (j = 0; j < len; j++)
for (j = 0; j < len; j++) {
CHECK(check_fe_inverse(&x[j], &xi[j]));
}
secp256k1_fe_inv_all_var(len, xii, xi);
for (j = 0; j < len; j++)
for (j = 0; j < len; j++) {
CHECK(check_fe_equal(&x[j], &xii[j]));
}
}
}
@ -844,8 +852,9 @@ void run_sqrt(void) {
void ge_equals_ge(const secp256k1_ge_t *a, const secp256k1_ge_t *b) {
CHECK(a->infinity == b->infinity);
if (a->infinity)
if (a->infinity) {
return;
}
CHECK(secp256k1_fe_equal_var(&a->x, &b->x));
CHECK(secp256k1_fe_equal_var(&b->y, &b->y));
}
@ -854,8 +863,9 @@ void ge_equals_gej(const secp256k1_ge_t *a, const secp256k1_gej_t *b) {
secp256k1_fe_t z2s;
secp256k1_fe_t u1, u2, s1, s2;
CHECK(a->infinity == b->infinity);
if (a->infinity)
if (a->infinity) {
return;
}
/* Check a.x * b.z^2 == b.x && a.y * b.z^3 == b.y, to avoid inverses. */
secp256k1_fe_sqr(&z2s, &b->z);
secp256k1_fe_mul(&u1, &a->x, &z2s);
@ -1141,8 +1151,6 @@ void run_wnaf(void) {
secp256k1_scalar_t n;
for (i = 0; i < count; i++) {
random_scalar_order(&n);
if (i % 1)
secp256k1_scalar_negate(&n, &n);
test_wnaf(&n, 4+(i%10));
}
}
@ -1168,7 +1176,9 @@ void test_ecdsa_sign_verify(void) {
secp256k1_ge_set_gej(&pub, &pubj);
getrec = secp256k1_rand32()&1;
random_sign(&sig, &key, &msg, getrec?&recid:NULL);
if (getrec) CHECK(recid >= 0 && recid < 4);
if (getrec) {
CHECK(recid >= 0 && recid < 4);
}
CHECK(secp256k1_ecdsa_sig_verify(&sig, &pub, &msg));
secp256k1_scalar_set_int(&one, 1);
secp256k1_scalar_add(&msg, &msg, &one);
@ -1192,7 +1202,9 @@ static int precomputed_nonce_function(unsigned char *nonce32, const unsigned cha
static int nonce_function_test_fail(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, unsigned int counter, const void *data) {
/* Dummy nonce generator that has a fatal error on the first counter value. */
if (counter == 0) return 0;
if (counter == 0) {
return 0;
}
return nonce_function_rfc6979(nonce32, msg32, key32, counter - 1, data);
}
@ -1200,7 +1212,9 @@ static int nonce_function_test_retry(unsigned char *nonce32, const unsigned char
/* Dummy nonce generator that produces unacceptable nonces for the first several counter values. */
if (counter < 3) {
memset(nonce32, counter==0 ? 0 : 255, 32);
if (counter == 2) nonce32[31]--;
if (counter == 2) {
nonce32[31]--;
}
return 1;
}
if (counter < 5) {
@ -1211,12 +1225,16 @@ static int nonce_function_test_retry(unsigned char *nonce32, const unsigned char
0xBF,0xD2,0x5E,0x8C,0xD0,0x36,0x41,0x41
};
memcpy(nonce32, order, 32);
if (counter == 4) nonce32[31]++;
if (counter == 4) {
nonce32[31]++;
}
return 1;
}
/* Retry rate of 6979 is negligible esp. as we only call this in determinstic tests. */
/* If someone does fine a case where it retries for secp256k1, we'd like to know. */
if (counter > 5) return 0;
if (counter > 5) {
return 0;
}
return nonce_function_rfc6979(nonce32, msg32, key32, counter - 5, data);
}
@ -1280,7 +1298,9 @@ void test_ecdsa_end_to_end(void) {
ret1 = secp256k1_ec_privkey_tweak_add(privkey, rnd);
ret2 = secp256k1_ec_pubkey_tweak_add(pubkey, pubkeylen, rnd);
CHECK(ret1 == ret2);
if (ret1 == 0) return;
if (ret1 == 0) {
return;
}
CHECK(secp256k1_ec_pubkey_create(pubkey2, &pubkeylen2, privkey, pubkeylen == 33) == 1);
CHECK(memcmp(pubkey, pubkey2, pubkeylen) == 0);
}
@ -1296,7 +1316,9 @@ void test_ecdsa_end_to_end(void) {
ret1 = secp256k1_ec_privkey_tweak_mul(privkey, rnd);
ret2 = secp256k1_ec_pubkey_tweak_mul(pubkey, pubkeylen, rnd);
CHECK(ret1 == ret2);
if (ret1 == 0) return;
if (ret1 == 0) {
return;
}
CHECK(secp256k1_ec_pubkey_create(pubkey2, &pubkeylen2, privkey, pubkeylen == 33) == 1);
CHECK(memcmp(pubkey, pubkey2, pubkeylen) == 0);
}
@ -1351,7 +1373,9 @@ void test_random_pubkeys(void) {
uint32_t r = secp256k1_rand32();
int len = (r & 3) == 0 ? 65 : 33;
r>>=2;
if ((r & 3) == 0) len = (r & 252) >> 3;
if ((r & 3) == 0) {
len = (r & 252) >> 3;
}
r>>=8;
if (len == 65) {
in[0] = (r & 2) ? 4 : (r & 1? 6 : 7);
@ -1359,10 +1383,16 @@ void test_random_pubkeys(void) {
in[0] = (r & 1) ? 2 : 3;
}
r>>=2;
if ((r & 7) == 0) in[0] = (r & 2040) >> 3;
if ((r & 7) == 0) {
in[0] = (r & 2040) >> 3;
}
r>>=11;
if (len > 1) secp256k1_rand256(&in[1]);
if (len > 33) secp256k1_rand256(&in[33]);
if (len > 1) {
secp256k1_rand256(&in[1]);
}
if (len > 33) {
secp256k1_rand256(&in[33]);
}
if (secp256k1_eckey_pubkey_parse(&elem, in, len)) {
unsigned char out[65];
unsigned char firstb;
@ -1374,7 +1404,9 @@ void test_random_pubkeys(void) {
CHECK(size == len);
CHECK(memcmp(&in[1], &out[1], len-1) == 0);
/* ... except for the type of hybrid inputs. */
if ((in[0] != 6) && (in[0] != 7)) CHECK(in[0] == out[0]);
if ((in[0] != 6) && (in[0] != 7)) {
CHECK(in[0] == out[0]);
}
size = 65;
CHECK(secp256k1_eckey_pubkey_serialize(&elem, in, &size, 0));
CHECK(size == 65);
@ -1384,8 +1416,11 @@ void test_random_pubkeys(void) {
in[0] = (r & 1) ? 6 : 7;
res = secp256k1_eckey_pubkey_parse(&elem2, in, size);
if (firstb == 2 || firstb == 3) {
if (in[0] == firstb + 4) CHECK(res);
else CHECK(!res);
if (in[0] == firstb + 4) {
CHECK(res);
} else {
CHECK(!res);
}
}
if (res) {
ge_equals_ge(&elem,&elem2);
@ -1528,7 +1563,9 @@ void test_ecdsa_edge_cases(void) {
unsigned char orig = sigbder[i];
/*Try every single-byte change.*/
for (c = 0; c < 256; c++) {
if (c == orig ) continue;
if (c == orig ) {
continue;
}
sigbder[i] = c;
CHECK(secp256k1_ecdsa_verify(msg32, sigbder, sizeof(sigbder), pubkeyb, pubkeyblen) ==
(i==4 || i==7) ? 0 : -2 );

Loading…
Cancel
Save