Rewrite 5x52 normalize method to be faster

pull/11871/head
Peter Dettman 11 years ago
parent 5355746867
commit 9037707720

@ -34,41 +34,39 @@ void static secp256k1_fe_inner_start(void) {}
void static secp256k1_fe_inner_stop(void) {} void static secp256k1_fe_inner_stop(void) {}
void static secp256k1_fe_normalize(secp256k1_fe_t *r) { void static secp256k1_fe_normalize(secp256k1_fe_t *r) {
uint64_t c; uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4];
c = r->n[0];
uint64_t t0 = c & 0xFFFFFFFFFFFFFULL; // Reduce t4 at the start so there will be at most a single carry from the first pass
c = (c >> 52) + r->n[1]; uint64_t x = t4 >> 48; t4 &= 0x0FFFFFFFFFFFFULL;
uint64_t t1 = c & 0xFFFFFFFFFFFFFULL;
c = (c >> 52) + r->n[2]; // The first pass ensures the magnitude is 1, ...
uint64_t t2 = c & 0xFFFFFFFFFFFFFULL; t0 += x * 0x1000003D1ULL;
c = (c >> 52) + r->n[3]; t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL;
uint64_t t3 = c & 0xFFFFFFFFFFFFFULL; t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL;
c = (c >> 52) + r->n[4]; t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL;
uint64_t t4 = c & 0x0FFFFFFFFFFFFULL; t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL;
c >>= 48;
// ... except for a possible carry at bit 48 of t4 (i.e. bit 256 of the field element)
// The following code will not modify the t's if c is initially 0. assert(t4 >> 49 == 0);
c = c * 0x1000003D1ULL + t0;
t0 = c & 0xFFFFFFFFFFFFFULL; // At most a single final reduction is needed; check if the value is >= the field characteristic
c = (c >> 52) + t1; x = (t4 >> 48) | ((t4 == 0x0FFFFFFFFFFFFULL)
t1 = c & 0xFFFFFFFFFFFFFULL; & ((t3 & t2 & t1) == 0xFFFFFFFFFFFFFULL)
c = (c >> 52) + t2; & (t0 >= 0xFFFFEFFFFFC2FULL));
t2 = c & 0xFFFFFFFFFFFFFULL;
c = (c >> 52) + t3; // Apply the final reduction (for constant-time behaviour, we do it always)
t3 = c & 0xFFFFFFFFFFFFFULL; t0 += x * 0x1000003D1ULL;
c = (c >> 52) + t4; t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL;
t4 = c & 0x0FFFFFFFFFFFFULL; t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL;
assert((c >> 48) == 0); t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL;
t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL;
// Subtract p if result >= p
uint64_t mask = -(int64_t)((t4 < 0xFFFFFFFFFFFFULL) | (t3 < 0xFFFFFFFFFFFFFULL) | (t2 < 0xFFFFFFFFFFFFFULL) | (t1 < 0xFFFFFFFFFFFFFULL) | (t0 < 0xFFFFEFFFFFC2FULL)); // If t4 didn't carry to bit 48 already, then it should have after any final reduction
t4 &= mask; assert(t4 >> 48 == x);
t3 &= mask;
t2 &= mask; // Mask off the possible multiple of 2^256 from the final reduction
t1 &= mask; t4 &= 0x0FFFFFFFFFFFFULL;
t0 -= (~mask & 0xFFFFEFFFFFC2FULL);
// push internal variables back
r->n[0] = t0; r->n[1] = t1; r->n[2] = t2; r->n[3] = t3; r->n[4] = t4; r->n[0] = t0; r->n[1] = t1; r->n[2] = t2; r->n[3] = t3; r->n[4] = t4;
#ifdef VERIFY #ifdef VERIFY

Loading…
Cancel
Save