From f65f1a43e8389cdcce2cdfc5e7df9ffeb872b134 Mon Sep 17 00:00:00 2001 From: Tim Ruffing Date: Wed, 17 Jan 2024 19:03:06 +0100 Subject: [PATCH] Be stricter with side effects in VERIFY Adds a rule to CONTRIBUTING.md and makes the code adhere to it. --- CONTRIBUTING.md | 4 ++++ src/ecmult_const_impl.h | 11 +++++++---- src/ecmult_impl.h | 13 ++++++------- src/field_impl.h | 11 +++++++---- src/group_impl.h | 30 +++++++++++++++++++++--------- src/modinv32_impl.h | 20 +++++++++++--------- src/modinv64_impl.h | 20 +++++++++++--------- src/modules/ellswift/main_impl.h | 24 ++++++++++++------------ src/scalar_4x64_impl.h | 25 +++++++++++-------------- src/scalar_8x32_impl.h | 10 ++-------- 10 files changed, 92 insertions(+), 76 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 5fbf7332c9..a314fdd0a6 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -49,6 +49,10 @@ In addition, libsecp256k1 tries to maintain the following coding conventions: * Operations involving secret data should be tested for being constant time with respect to the secrets (see [src/ctime_tests.c](src/ctime_tests.c)). * Local variables containing secret data should be cleared explicitly to try to delete secrets from memory. * Use `secp256k1_memcmp_var` instead of `memcmp` (see [#823](https://github.com/bitcoin-core/secp256k1/issues/823)). +* Use `VERIFY_CHECK(cond)` for test-only assertions. + * These are active only when the `VERIFY` macro is defined. The build system defines this macro when building test targets such as the `tests` and `exhaustive_tests` executables, but not when building the actual library. + * If you need additional code lines to prepare the call to `VERIFY_CHECK`, then wrap them and the call into `#ifdef VERIFY ... #endif`. Start a new block (see style conventions below) inside the `#ifdef VERIFY` if appropriate, and suffix `VERIFY`-only variables with `_ver`. + * In any `VERIFY` code, avoid side effects to variables used in non-`VERIFY` code. Note that `VERIFY_CHECK(cond)` is a no-op if `VERIFY` is not defined, so avoid also side effects in `cond`. #### Style conventions diff --git a/src/ecmult_const_impl.h b/src/ecmult_const_impl.h index 7dc4aac25d..1c324e801f 100644 --- a/src/ecmult_const_impl.h +++ b/src/ecmult_const_impl.h @@ -212,10 +212,13 @@ static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, cons secp256k1_scalar_add(&v2, &v2, &S_OFFSET); #ifdef VERIFY - /* Verify that v1 and v2 are in range [0, 2^129-1]. */ - for (i = 129; i < 256; ++i) { - VERIFY_CHECK(secp256k1_scalar_get_bits(&v1, i, 1) == 0); - VERIFY_CHECK(secp256k1_scalar_get_bits(&v2, i, 1) == 0); + { + int i_ver; + /* Verify that v1 and v2 are in range [0, 2^129-1]. */ + for (i_ver = 129; i_ver < 256; ++i_ver) { + VERIFY_CHECK(secp256k1_scalar_get_bits(&v1, i_ver, 1) == 0); + VERIFY_CHECK(secp256k1_scalar_get_bits(&v2, i_ver, 1) == 0); + } } #endif diff --git a/src/ecmult_impl.h b/src/ecmult_impl.h index 6d14c7ac4b..859d825a52 100644 --- a/src/ecmult_impl.h +++ b/src/ecmult_impl.h @@ -202,15 +202,14 @@ static int secp256k1_ecmult_wnaf(int *wnaf, int len, const secp256k1_scalar *a, bit += now; } + VERIFY_CHECK(carry == 0); + #ifdef VERIFY { - int verify_bit = bit; - - VERIFY_CHECK(carry == 0); - - while (verify_bit < 256) { - VERIFY_CHECK(secp256k1_scalar_get_bits(&s, verify_bit, 1) == 0); - verify_bit++; + int bit_ver = bit; + while (bit_ver < 256) { + VERIFY_CHECK(secp256k1_scalar_get_bits(&s, bit_ver, 1) == 0); + bit_ver++; } } #endif diff --git a/src/field_impl.h b/src/field_impl.h index 989e9cdb2f..bb846fa12b 100644 --- a/src/field_impl.h +++ b/src/field_impl.h @@ -132,10 +132,13 @@ static int secp256k1_fe_sqrt(secp256k1_fe * SECP256K1_RESTRICT r, const secp256k ret = secp256k1_fe_equal(&t1, a); #ifdef VERIFY - if (!ret) { - secp256k1_fe_negate(&t1, &t1, 1); - secp256k1_fe_normalize_var(&t1); - VERIFY_CHECK(secp256k1_fe_equal(&t1, a)); + { + secp256k1_fe t1_ver; + if (!ret) { + secp256k1_fe_negate(&t1_ver, &t1, 1); + secp256k1_fe_normalize_var(&t1_ver); + VERIFY_CHECK(secp256k1_fe_equal(&t1_ver, a)); + } } #endif return ret; diff --git a/src/group_impl.h b/src/group_impl.h index 537be32ff6..633e5a9112 100644 --- a/src/group_impl.h +++ b/src/group_impl.h @@ -198,8 +198,11 @@ static void secp256k1_ge_set_all_gej_var(secp256k1_ge *r, const secp256k1_gej *a size_t i; size_t last_i = SIZE_MAX; #ifdef VERIFY - for (i = 0; i < len; i++) { - SECP256K1_GEJ_VERIFY(&a[i]); + { + size_t i_ver; + for (i_ver = 0; i_ver < len; i_ver++) { + SECP256K1_GEJ_VERIFY(&a[i_ver]); + } } #endif @@ -240,8 +243,11 @@ static void secp256k1_ge_set_all_gej_var(secp256k1_ge *r, const secp256k1_gej *a } #ifdef VERIFY - for (i = 0; i < len; i++) { - SECP256K1_GE_VERIFY(&r[i]); + { + size_t i_ver; + for (i_ver = 0; i_ver < len; i_ver++) { + SECP256K1_GE_VERIFY(&r[i_ver]); + } } #endif } @@ -250,9 +256,12 @@ static void secp256k1_ge_table_set_globalz(size_t len, secp256k1_ge *a, const se size_t i; secp256k1_fe zs; #ifdef VERIFY - for (i = 0; i < len; i++) { - SECP256K1_GE_VERIFY(&a[i]); - SECP256K1_FE_VERIFY(&zr[i]); + { + size_t i_ver; + for (i_ver = 0; i_ver < len; i_ver++) { + SECP256K1_GE_VERIFY(&a[i_ver]); + SECP256K1_FE_VERIFY(&zr[i_ver]); + } } #endif @@ -273,8 +282,11 @@ static void secp256k1_ge_table_set_globalz(size_t len, secp256k1_ge *a, const se } #ifdef VERIFY - for (i = 0; i < len; i++) { - SECP256K1_GE_VERIFY(&a[i]); + { + size_t i_ver; + for (i_ver = 0; i_ver < len; i_ver++) { + SECP256K1_GE_VERIFY(&a[i_ver]); + } } #endif } diff --git a/src/modinv32_impl.h b/src/modinv32_impl.h index 75eb354ff0..b16e3e3b02 100644 --- a/src/modinv32_impl.h +++ b/src/modinv32_impl.h @@ -67,14 +67,16 @@ static void secp256k1_modinv32_normalize_30(secp256k1_modinv32_signed30 *r, int3 volatile int32_t cond_add, cond_negate; #ifdef VERIFY - /* Verify that all limbs are in range (-2^30,2^30). */ - int i; - for (i = 0; i < 9; ++i) { - VERIFY_CHECK(r->v[i] >= -M30); - VERIFY_CHECK(r->v[i] <= M30); + { + /* Verify that all limbs are in range (-2^30,2^30). */ + int i_ver; + for (i_ver = 0; i_ver < 9; ++i_ver) { + VERIFY_CHECK(r->v[i_ver] >= -M30); + VERIFY_CHECK(r->v[i_ver] <= M30); + } + VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, -2) > 0); /* r > -2*modulus */ + VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, 1) < 0); /* r < modulus */ } - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, -2) > 0); /* r > -2*modulus */ - VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, 1) < 0); /* r < modulus */ #endif /* In a first step, add the modulus if the input is negative, and then negate if requested. @@ -586,7 +588,7 @@ static void secp256k1_modinv32_var(secp256k1_modinv32_signed30 *x, const secp256 secp256k1_modinv32_signed30 f = modinfo->modulus; secp256k1_modinv32_signed30 g = *x; #ifdef VERIFY - int i = 0; + int i_ver = 0; #endif int j, len = 9; int32_t eta = -1; /* eta = -delta; delta is initially 1 (faster for the variable-time code) */ @@ -631,7 +633,7 @@ static void secp256k1_modinv32_var(secp256k1_modinv32_signed30 *x, const secp256 --len; } - VERIFY_CHECK(++i < 25); /* We should never need more than 25*30 = 750 divsteps */ + VERIFY_CHECK(++i_ver < 25); /* We should never need more than 25*30 = 750 divsteps */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ VERIFY_CHECK(secp256k1_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */ diff --git a/src/modinv64_impl.h b/src/modinv64_impl.h index 0dc1e80696..4ea2d5aeff 100644 --- a/src/modinv64_impl.h +++ b/src/modinv64_impl.h @@ -91,14 +91,16 @@ static void secp256k1_modinv64_normalize_62(secp256k1_modinv64_signed62 *r, int6 volatile int64_t cond_add, cond_negate; #ifdef VERIFY - /* Verify that all limbs are in range (-2^62,2^62). */ - int i; - for (i = 0; i < 5; ++i) { - VERIFY_CHECK(r->v[i] >= -M62); - VERIFY_CHECK(r->v[i] <= M62); + { + /* Verify that all limbs are in range (-2^62,2^62). */ + int i_ver; + for (i_ver = 0; i_ver < 5; ++i_ver) { + VERIFY_CHECK(r->v[i_ver] >= -M62); + VERIFY_CHECK(r->v[i_ver] <= M62); + } + VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, -2) > 0); /* r > -2*modulus */ + VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 1) < 0); /* r < modulus */ } - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, -2) > 0); /* r > -2*modulus */ - VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 1) < 0); /* r < modulus */ #endif /* In a first step, add the modulus if the input is negative, and then negate if requested. @@ -642,7 +644,7 @@ static void secp256k1_modinv64_var(secp256k1_modinv64_signed62 *x, const secp256 secp256k1_modinv64_signed62 f = modinfo->modulus; secp256k1_modinv64_signed62 g = *x; #ifdef VERIFY - int i = 0; + int i_ver = 0; #endif int j, len = 5; int64_t eta = -1; /* eta = -delta; delta is initially 1 */ @@ -686,7 +688,7 @@ static void secp256k1_modinv64_var(secp256k1_modinv64_signed62 *x, const secp256 --len; } - VERIFY_CHECK(++i < 12); /* We should never need more than 12*62 = 744 divsteps */ + VERIFY_CHECK(++i_ver < 12); /* We should never need more than 12*62 = 744 divsteps */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */ diff --git a/src/modules/ellswift/main_impl.h b/src/modules/ellswift/main_impl.h index b54ec08a22..01c63f7da2 100644 --- a/src/modules/ellswift/main_impl.h +++ b/src/modules/ellswift/main_impl.h @@ -187,7 +187,7 @@ static int secp256k1_ellswift_xswiftec_inv_var(secp256k1_fe *t, const secp256k1_ * - If (c & 5) = 5: return -w*(c4*u + v). */ secp256k1_fe x = *x_in, u = *u_in, g, v, s, m, r, q; - int ret; + int ret_ver; secp256k1_fe_normalize_weak(&x); secp256k1_fe_normalize_weak(&u); @@ -266,11 +266,11 @@ static int secp256k1_ellswift_xswiftec_inv_var(secp256k1_fe *t, const secp256k1_ secp256k1_fe_mul(&q, &q, &s); /* q = s*(4*(u^3+7)+3*u^2*s) */ secp256k1_fe_negate(&q, &q, 1); /* q = -s*(4*(u^3+7)+3*u^2*s) */ if (!secp256k1_fe_is_square_var(&q)) return 0; - ret = secp256k1_fe_sqrt(&r, &q); /* r = sqrt(-s*(4*(u^3+7)+3*u^2*s)) */ + ret_ver = secp256k1_fe_sqrt(&r, &q); /* r = sqrt(-s*(4*(u^3+7)+3*u^2*s)) */ #ifdef VERIFY - VERIFY_CHECK(ret); + VERIFY_CHECK(ret_ver); #else - (void)ret; + (void)ret_ver; #endif /* If (c & 1) = 1 and r = 0, fail. */ @@ -287,8 +287,8 @@ static int secp256k1_ellswift_xswiftec_inv_var(secp256k1_fe *t, const secp256k1_ } /* Let w = sqrt(s). */ - ret = secp256k1_fe_sqrt(&m, &s); /* m = sqrt(s) = w */ - VERIFY_CHECK(ret); + ret_ver = secp256k1_fe_sqrt(&m, &s); /* m = sqrt(s) = w */ + VERIFY_CHECK(ret_ver); /* Return logic. */ if ((c & 5) == 0 || (c & 5) == 5) { @@ -311,7 +311,7 @@ static void secp256k1_ellswift_prng(unsigned char* out32, const secp256k1_sha256 secp256k1_sha256 hash = *hasher; unsigned char buf4[4]; #ifdef VERIFY - size_t blocks = hash.bytes >> 6; + size_t blocks_ver = hash.bytes >> 6; #endif buf4[0] = cnt; buf4[1] = cnt >> 8; @@ -321,7 +321,7 @@ static void secp256k1_ellswift_prng(unsigned char* out32, const secp256k1_sha256 secp256k1_sha256_finalize(&hash, out32); /* Writing and finalizing together should trigger exactly one SHA256 compression. */ - VERIFY_CHECK(((hash.bytes) >> 6) == (blocks + 1)); + VERIFY_CHECK(((hash.bytes) >> 6) == (blocks_ver + 1)); } /** Find an ElligatorSwift encoding (u, t) for X coordinate x, and random Y coordinate. @@ -407,17 +407,17 @@ int secp256k1_ellswift_encode(const secp256k1_context *ctx, unsigned char *ell64 secp256k1_fe t; unsigned char p64[64] = {0}; size_t ser_size; - int ser_ret; + int ser_ret_ver; secp256k1_sha256 hash; /* Set up hasher state; the used RNG is H(pubkey || "\x00"*31 || rnd32 || cnt++), using * BIP340 tagged hash with tag "secp256k1_ellswift_encode". */ secp256k1_ellswift_sha256_init_encode(&hash); - ser_ret = secp256k1_eckey_pubkey_serialize(&p, p64, &ser_size, 1); + ser_ret_ver = secp256k1_eckey_pubkey_serialize(&p, p64, &ser_size, 1); #ifdef VERIFY - VERIFY_CHECK(ser_ret && ser_size == 33); + VERIFY_CHECK(ser_ret_ver && ser_size == 33); #else - (void)ser_ret; + (void)ser_ret_ver; #endif secp256k1_sha256_write(&hash, p64, sizeof(p64)); secp256k1_sha256_write(&hash, rnd32, 32); diff --git a/src/scalar_4x64_impl.h b/src/scalar_4x64_impl.h index 82cd957f16..a2337cdc5f 100644 --- a/src/scalar_4x64_impl.h +++ b/src/scalar_4x64_impl.h @@ -227,12 +227,15 @@ static void secp256k1_scalar_half(secp256k1_scalar *r, const secp256k1_scalar *a r->d[2] = secp256k1_u128_to_u64(&t); secp256k1_u128_rshift(&t, 64); r->d[3] = secp256k1_u128_to_u64(&t) + (a->d[3] >> 1) + (SECP256K1_N_H_3 & mask); #ifdef VERIFY - /* The line above only computed the bottom 64 bits of r->d[3]; redo the computation - * in full 128 bits to make sure the top 64 bits are indeed zero. */ - secp256k1_u128_accum_u64(&t, a->d[3] >> 1); - secp256k1_u128_accum_u64(&t, SECP256K1_N_H_3 & mask); - secp256k1_u128_rshift(&t, 64); - VERIFY_CHECK(secp256k1_u128_to_u64(&t) == 0); + { + /* The line above only computed the bottom 64 bits of r->d[3]; redo the computation + * in full 128 bits to make sure the top 64 bits are indeed zero. */ + secp256k1_uint128 t_ver = t; + secp256k1_u128_accum_u64(&t_ver, a->d[3] >> 1); + secp256k1_u128_accum_u64(&t_ver, SECP256K1_N_H_3 & mask); + secp256k1_u128_rshift(&t_ver, 64); + VERIFY_CHECK(secp256k1_u128_to_u64(&t_ver) == 0); + } SECP256K1_SCALAR_VERIFY(r); #endif @@ -969,32 +972,26 @@ static const secp256k1_modinv64_modinfo secp256k1_const_modinfo_scalar = { static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar *x) { secp256k1_modinv64_signed62 s; -#ifdef VERIFY - int zero_in = secp256k1_scalar_is_zero(x); -#endif SECP256K1_SCALAR_VERIFY(x); secp256k1_scalar_to_signed62(&s, x); secp256k1_modinv64(&s, &secp256k1_const_modinfo_scalar); secp256k1_scalar_from_signed62(r, &s); + VERIFY_CHECK(secp256k1_scalar_is_zero(r) == secp256k1_scalar_is_zero(x)); SECP256K1_SCALAR_VERIFY(r); - VERIFY_CHECK(secp256k1_scalar_is_zero(r) == zero_in); } static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x) { secp256k1_modinv64_signed62 s; -#ifdef VERIFY - int zero_in = secp256k1_scalar_is_zero(x); -#endif SECP256K1_SCALAR_VERIFY(x); secp256k1_scalar_to_signed62(&s, x); secp256k1_modinv64_var(&s, &secp256k1_const_modinfo_scalar); secp256k1_scalar_from_signed62(r, &s); + VERIFY_CHECK(secp256k1_scalar_is_zero(r) == secp256k1_scalar_is_zero(x)); SECP256K1_SCALAR_VERIFY(r); - VERIFY_CHECK(secp256k1_scalar_is_zero(r) == zero_in); } SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) { diff --git a/src/scalar_8x32_impl.h b/src/scalar_8x32_impl.h index 58ae51bc02..4d18b69bad 100644 --- a/src/scalar_8x32_impl.h +++ b/src/scalar_8x32_impl.h @@ -789,32 +789,26 @@ static const secp256k1_modinv32_modinfo secp256k1_const_modinfo_scalar = { static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar *x) { secp256k1_modinv32_signed30 s; -#ifdef VERIFY - int zero_in = secp256k1_scalar_is_zero(x); -#endif SECP256K1_SCALAR_VERIFY(x); secp256k1_scalar_to_signed30(&s, x); secp256k1_modinv32(&s, &secp256k1_const_modinfo_scalar); secp256k1_scalar_from_signed30(r, &s); + VERIFY_CHECK(secp256k1_scalar_is_zero(r) == secp256k1_scalar_is_zero(x)); SECP256K1_SCALAR_VERIFY(r); - VERIFY_CHECK(secp256k1_scalar_is_zero(r) == zero_in); } static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x) { secp256k1_modinv32_signed30 s; -#ifdef VERIFY - int zero_in = secp256k1_scalar_is_zero(x); -#endif SECP256K1_SCALAR_VERIFY(x); secp256k1_scalar_to_signed30(&s, x); secp256k1_modinv32_var(&s, &secp256k1_const_modinfo_scalar); secp256k1_scalar_from_signed30(r, &s); + VERIFY_CHECK(secp256k1_scalar_is_zero(r) == secp256k1_scalar_is_zero(x)); SECP256K1_SCALAR_VERIFY(r); - VERIFY_CHECK(secp256k1_scalar_is_zero(r) == zero_in); } SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) {