Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add invariant checking for scalars #1373

Merged
merged 3 commits into from
Aug 18, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions src/modules/schnorrsig/tests_exhaustive_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -110,15 +110,15 @@ static void test_exhaustive_schnorrsig_verify(const secp256k1_context *ctx, cons
if (!e_done[e]) {
/* Iterate over the possible valid last 32 bytes in the signature.
0..order=that s value; order+1=random bytes */
int count_valid = 0, s;
int count_valid = 0;
unsigned int s;
for (s = 0; s <= EXHAUSTIVE_TEST_ORDER + 1; ++s) {
int expect_valid, valid;
if (s <= EXHAUSTIVE_TEST_ORDER) {
secp256k1_scalar s_s;
secp256k1_scalar_set_int(&s_s, s);
secp256k1_scalar_get_b32(sig64 + 32, &s_s);
memset(sig64 + 32, 0, 32);
secp256k1_write_be32(sig64 + 60, s);
expect_valid = actual_k != -1 && s != EXHAUSTIVE_TEST_ORDER &&
(s_s == (actual_k + actual_d * e) % EXHAUSTIVE_TEST_ORDER);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Unrelated note: looking at that replaced line, I was confused for a moment that comparing secp256k1_scalar directly with an integer is possible, until I realized that it's simply a uint32_t typedef in the scalar_low implementation (would have expected that it's still wrapped in a struct), and that's of course the only one used in exhaustive tests.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yep, indeed. I guess that's okay for test code, introducing another function here will be overkill.

(s == (actual_k + actual_d * e) % EXHAUSTIVE_TEST_ORDER);
} else {
secp256k1_testrand256(sig64 + 32);
expect_valid = 0;
Expand Down
3 changes: 3 additions & 0 deletions src/scalar.h
Original file line number Diff line number Diff line change
Expand Up @@ -99,4 +99,7 @@ static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, const secp256k1_
/** If flag is true, set *r equal to *a; otherwise leave it. Constant-time. Both *r and *a must be initialized.*/
static void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag);

/** Check invariants on a scalar (no-op unless VERIFY is enabled). */
static void secp256k1_scalar_verify(const secp256k1_scalar *r);

#endif /* SECP256K1_SCALAR_H */
79 changes: 72 additions & 7 deletions src/scalar_4x64_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -41,16 +41,22 @@ SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsig
r->d[1] = 0;
r->d[2] = 0;
r->d[3] = 0;

secp256k1_scalar_verify(r);
}

SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
secp256k1_scalar_verify(a);
VERIFY_CHECK((offset + count - 1) >> 6 == offset >> 6);

return (a->d[offset >> 6] >> (offset & 0x3F)) & ((((uint64_t)1) << count) - 1);
}

SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
secp256k1_scalar_verify(a);
VERIFY_CHECK(count < 32);
VERIFY_CHECK(offset + count <= 256);

if ((offset + count - 1) >> 6 == offset >> 6) {
return secp256k1_scalar_get_bits(a, offset, count);
} else {
Expand All @@ -74,6 +80,7 @@ SECP256K1_INLINE static int secp256k1_scalar_check_overflow(const secp256k1_scal
SECP256K1_INLINE static int secp256k1_scalar_reduce(secp256k1_scalar *r, unsigned int overflow) {
secp256k1_uint128 t;
VERIFY_CHECK(overflow <= 1);

secp256k1_u128_from_u64(&t, r->d[0]);
secp256k1_u128_accum_u64(&t, overflow * SECP256K1_N_C_0);
r->d[0] = secp256k1_u128_to_u64(&t); secp256k1_u128_rshift(&t, 64);
Expand All @@ -85,12 +92,17 @@ SECP256K1_INLINE static int secp256k1_scalar_reduce(secp256k1_scalar *r, unsigne
r->d[2] = secp256k1_u128_to_u64(&t); secp256k1_u128_rshift(&t, 64);
secp256k1_u128_accum_u64(&t, r->d[3]);
r->d[3] = secp256k1_u128_to_u64(&t);

secp256k1_scalar_verify(r);
return overflow;
}

static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
int overflow;
secp256k1_uint128 t;
secp256k1_scalar_verify(a);
secp256k1_scalar_verify(b);

secp256k1_u128_from_u64(&t, a->d[0]);
secp256k1_u128_accum_u64(&t, b->d[0]);
r->d[0] = secp256k1_u128_to_u64(&t); secp256k1_u128_rshift(&t, 64);
Expand All @@ -106,13 +118,17 @@ static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a,
overflow = secp256k1_u128_to_u64(&t) + secp256k1_scalar_check_overflow(r);
VERIFY_CHECK(overflow == 0 || overflow == 1);
secp256k1_scalar_reduce(r, overflow);

secp256k1_scalar_verify(r);
return overflow;
}

static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) {
secp256k1_uint128 t;
volatile int vflag = flag;
secp256k1_scalar_verify(r);
VERIFY_CHECK(bit < 256);

bit += ((uint32_t) vflag - 1) & 0x100; /* forcing (bit >> 6) > 3 makes this a noop */
secp256k1_u128_from_u64(&t, r->d[0]);
secp256k1_u128_accum_u64(&t, ((uint64_t)((bit >> 6) == 0)) << (bit & 0x3F));
Expand All @@ -126,6 +142,8 @@ static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int
secp256k1_u128_accum_u64(&t, r->d[3]);
secp256k1_u128_accum_u64(&t, ((uint64_t)((bit >> 6) == 3)) << (bit & 0x3F));
r->d[3] = secp256k1_u128_to_u64(&t);

secp256k1_scalar_verify(r);
#ifdef VERIFY
VERIFY_CHECK(secp256k1_u128_hi_u64(&t) == 0);
#endif
Expand All @@ -141,22 +159,30 @@ static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b
if (overflow) {
*overflow = over;
}

secp256k1_scalar_verify(r);
}

static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) {
secp256k1_scalar_verify(a);

secp256k1_write_be64(&bin[0], a->d[3]);
secp256k1_write_be64(&bin[8], a->d[2]);
secp256k1_write_be64(&bin[16], a->d[1]);
secp256k1_write_be64(&bin[24], a->d[0]);
}

SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a) {
secp256k1_scalar_verify(a);

return (a->d[0] | a->d[1] | a->d[2] | a->d[3]) == 0;
}

static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a) {
uint64_t nonzero = 0xFFFFFFFFFFFFFFFFULL * (secp256k1_scalar_is_zero(a) == 0);
secp256k1_uint128 t;
secp256k1_scalar_verify(a);

secp256k1_u128_from_u64(&t, ~a->d[0]);
secp256k1_u128_accum_u64(&t, SECP256K1_N_0 + 1);
r->d[0] = secp256k1_u128_to_u64(&t) & nonzero; secp256k1_u128_rshift(&t, 64);
Expand All @@ -169,15 +195,21 @@ static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar
secp256k1_u128_accum_u64(&t, ~a->d[3]);
secp256k1_u128_accum_u64(&t, SECP256K1_N_3);
r->d[3] = secp256k1_u128_to_u64(&t) & nonzero;

secp256k1_scalar_verify(r);
}

SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) {
secp256k1_scalar_verify(a);

return ((a->d[0] ^ 1) | a->d[1] | a->d[2] | a->d[3]) == 0;
}

static int secp256k1_scalar_is_high(const secp256k1_scalar *a) {
int yes = 0;
int no = 0;
secp256k1_scalar_verify(a);

no |= (a->d[3] < SECP256K1_N_H_3);
yes |= (a->d[3] > SECP256K1_N_H_3) & ~no;
no |= (a->d[2] < SECP256K1_N_H_2) & ~yes; /* No need for a > check. */
Expand All @@ -194,6 +226,8 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
uint64_t mask = -vflag;
uint64_t nonzero = (secp256k1_scalar_is_zero(r) != 0) - 1;
secp256k1_uint128 t;
secp256k1_scalar_verify(r);

secp256k1_u128_from_u64(&t, r->d[0] ^ mask);
secp256k1_u128_accum_u64(&t, (SECP256K1_N_0 + 1) & mask);
r->d[0] = secp256k1_u128_to_u64(&t) & nonzero; secp256k1_u128_rshift(&t, 64);
Expand All @@ -206,6 +240,8 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
secp256k1_u128_accum_u64(&t, r->d[3] ^ mask);
secp256k1_u128_accum_u64(&t, SECP256K1_N_3 & mask);
r->d[3] = secp256k1_u128_to_u64(&t) & nonzero;

secp256k1_scalar_verify(r);
return 2 * (mask == 0) - 1;
}

Expand Down Expand Up @@ -764,23 +800,34 @@ static void secp256k1_scalar_mul_512(uint64_t l[8], const secp256k1_scalar *a, c

static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
uint64_t l[8];
secp256k1_scalar_verify(a);
secp256k1_scalar_verify(b);

secp256k1_scalar_mul_512(l, a, b);
secp256k1_scalar_reduce_512(r, l);

secp256k1_scalar_verify(r);
}

static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n) {
int ret;
secp256k1_scalar_verify(r);
VERIFY_CHECK(n > 0);
VERIFY_CHECK(n < 16);

ret = r->d[0] & ((1 << n) - 1);
r->d[0] = (r->d[0] >> n) + (r->d[1] << (64 - n));
r->d[1] = (r->d[1] >> n) + (r->d[2] << (64 - n));
r->d[2] = (r->d[2] >> n) + (r->d[3] << (64 - n));
r->d[3] = (r->d[3] >> n);

secp256k1_scalar_verify(r);
return ret;
}

static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k) {
secp256k1_scalar_verify(k);

r1->d[0] = k->d[0];
r1->d[1] = k->d[1];
r1->d[2] = 0;
Expand All @@ -789,9 +836,15 @@ static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r
r2->d[1] = k->d[3];
r2->d[2] = 0;
r2->d[3] = 0;

secp256k1_scalar_verify(r1);
secp256k1_scalar_verify(r2);
}

SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) {
secp256k1_scalar_verify(a);
secp256k1_scalar_verify(b);

return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3])) == 0;
}

Expand All @@ -800,7 +853,10 @@ SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r,
unsigned int shiftlimbs;
unsigned int shiftlow;
unsigned int shifthigh;
secp256k1_scalar_verify(a);
secp256k1_scalar_verify(b);
VERIFY_CHECK(shift >= 256);

secp256k1_scalar_mul_512(l, a, b);
shiftlimbs = shift >> 6;
shiftlow = shift & 0x3F;
Expand All @@ -810,18 +866,24 @@ SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r,
r->d[2] = shift < 384 ? (l[2 + shiftlimbs] >> shiftlow | (shift < 320 && shiftlow ? (l[3 + shiftlimbs] << shifthigh) : 0)) : 0;
r->d[3] = shift < 320 ? (l[3 + shiftlimbs] >> shiftlow) : 0;
secp256k1_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 6] >> ((shift - 1) & 0x3f)) & 1);

secp256k1_scalar_verify(r);
}

static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag) {
uint64_t mask0, mask1;
volatile int vflag = flag;
secp256k1_scalar_verify(a);
SECP256K1_CHECKMEM_CHECK_VERIFY(r->d, sizeof(r->d));

mask0 = vflag + ~((uint64_t)0);
mask1 = ~mask0;
r->d[0] = (r->d[0] & mask0) | (a->d[0] & mask1);
r->d[1] = (r->d[1] & mask0) | (a->d[1] & mask1);
r->d[2] = (r->d[2] & mask0) | (a->d[2] & mask1);
r->d[3] = (r->d[3] & mask0) | (a->d[3] & mask1);

secp256k1_scalar_verify(r);
}

static void secp256k1_scalar_from_signed62(secp256k1_scalar *r, const secp256k1_modinv64_signed62 *a) {
Expand All @@ -841,18 +903,13 @@ static void secp256k1_scalar_from_signed62(secp256k1_scalar *r, const secp256k1_
r->d[2] = a2 >> 4 | a3 << 58;
r->d[3] = a3 >> 6 | a4 << 56;

#ifdef VERIFY
VERIFY_CHECK(secp256k1_scalar_check_overflow(r) == 0);
#endif
secp256k1_scalar_verify(r);
}

static void secp256k1_scalar_to_signed62(secp256k1_modinv64_signed62 *r, const secp256k1_scalar *a) {
const uint64_t M62 = UINT64_MAX >> 2;
const uint64_t a0 = a->d[0], a1 = a->d[1], a2 = a->d[2], a3 = a->d[3];

#ifdef VERIFY
VERIFY_CHECK(secp256k1_scalar_check_overflow(a) == 0);
#endif
secp256k1_scalar_verify(a);

r->v[0] = a0 & M62;
r->v[1] = (a0 >> 62 | a1 << 2) & M62;
Expand All @@ -871,10 +928,13 @@ static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar
#ifdef VERIFY
int zero_in = secp256k1_scalar_is_zero(x);
#endif
secp256k1_scalar_verify(x);

secp256k1_scalar_to_signed62(&s, x);
secp256k1_modinv64(&s, &secp256k1_const_modinfo_scalar);
secp256k1_scalar_from_signed62(r, &s);

secp256k1_scalar_verify(r);
#ifdef VERIFY
VERIFY_CHECK(secp256k1_scalar_is_zero(r) == zero_in);
#endif
Expand All @@ -885,16 +945,21 @@ static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_sc
#ifdef VERIFY
int zero_in = secp256k1_scalar_is_zero(x);
#endif
secp256k1_scalar_verify(x);

secp256k1_scalar_to_signed62(&s, x);
secp256k1_modinv64_var(&s, &secp256k1_const_modinfo_scalar);
secp256k1_scalar_from_signed62(r, &s);

secp256k1_scalar_verify(r);
#ifdef VERIFY
VERIFY_CHECK(secp256k1_scalar_is_zero(r) == zero_in);
#endif
}

SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) {
secp256k1_scalar_verify(a);

return !(a->d[0] & 1);
}

Expand Down
Loading