@ -42,8 +42,8 @@ static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar_
/** Add two scalars together (modulo the group order). Returns whether it overflowed. */
static int secp256k1_scalar_add ( secp256k1_scalar_t * r , const secp256k1_scalar_t * a , const secp256k1_scalar_t * b ) ;
/** A dd a power of two to a scalar. The result is not allowed to overflow. */
static void secp256k1_scalar_ add_bit( secp256k1_scalar_t * r , unsigned int bit ) ;
/** Conditionally a dd a power of two to a scalar. The result is not allowed to overflow. */
static void secp256k1_scalar_ c add_bit( secp256k1_scalar_t * r , unsigned int bit , int flag ) ;
/** Multiply two scalars (modulo the group order). */
static void secp256k1_scalar_mul ( secp256k1_scalar_t * r , const secp256k1_scalar_t * a , const secp256k1_scalar_t * b ) ;
@ -95,7 +95,7 @@ static int secp256k1_scalar_eq(const secp256k1_scalar_t *a, const secp256k1_scal
/** Find r1 and r2 such that r1+r2*2^128 = a. */
static void secp256k1_scalar_split_128 ( secp256k1_scalar_t * r1 , secp256k1_scalar_t * r2 , const secp256k1_scalar_t * a ) ;
/** Find r1 and r2 such that r1+r2*lambda = a, and r1 and r2 are maximum 128 bits long (see secp256k1_gej_mul_lambda). */
static void secp256k1_scalar_split_lambda _var ( secp256k1_scalar_t * r1 , secp256k1_scalar_t * r2 , const secp256k1_scalar_t * a ) ;
static void secp256k1_scalar_split_lambda ( secp256k1_scalar_t * r1 , secp256k1_scalar_t * r2 , const secp256k1_scalar_t * a ) ;
# endif
/** Multiply a and b (without taking the modulus!), divide by 2**shift, and round to the nearest integer. Shift must be at least 256. */