Skip to content

Commit a0fb68a

Browse files
committed
introduce and use SECP256K1_SCALAR_VERIFY macro
By providing an uppercase variant of these verification functions, it is better visible that it is test code.
1 parent cf25c86 commit a0fb68a

5 files changed

+123
-123
lines changed

src/scalar.h

+1
Original file line numberDiff line numberDiff line change
@@ -100,5 +100,6 @@ static void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a
100100

101101
/** Check invariants on a scalar (no-op unless VERIFY is enabled). */
102102
static void secp256k1_scalar_verify(const secp256k1_scalar *r);
103+
#define SECP256K1_SCALAR_VERIFY(r) secp256k1_scalar_verify(r)
103104

104105
#endif /* SECP256K1_SCALAR_H */

src/scalar_4x64_impl.h

+40-40
Original file line numberDiff line numberDiff line change
@@ -42,18 +42,18 @@ SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsig
4242
r->d[2] = 0;
4343
r->d[3] = 0;
4444

45-
secp256k1_scalar_verify(r);
45+
SECP256K1_SCALAR_VERIFY(r);
4646
}
4747

4848
SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
49-
secp256k1_scalar_verify(a);
49+
SECP256K1_SCALAR_VERIFY(a);
5050
VERIFY_CHECK((offset + count - 1) >> 6 == offset >> 6);
5151

5252
return (a->d[offset >> 6] >> (offset & 0x3F)) & ((((uint64_t)1) << count) - 1);
5353
}
5454

5555
SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
56-
secp256k1_scalar_verify(a);
56+
SECP256K1_SCALAR_VERIFY(a);
5757
VERIFY_CHECK(count < 32);
5858
VERIFY_CHECK(offset + count <= 256);
5959

@@ -93,15 +93,15 @@ SECP256K1_INLINE static int secp256k1_scalar_reduce(secp256k1_scalar *r, unsigne
9393
secp256k1_u128_accum_u64(&t, r->d[3]);
9494
r->d[3] = secp256k1_u128_to_u64(&t);
9595

96-
secp256k1_scalar_verify(r);
96+
SECP256K1_SCALAR_VERIFY(r);
9797
return overflow;
9898
}
9999

100100
static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
101101
int overflow;
102102
secp256k1_uint128 t;
103-
secp256k1_scalar_verify(a);
104-
secp256k1_scalar_verify(b);
103+
SECP256K1_SCALAR_VERIFY(a);
104+
SECP256K1_SCALAR_VERIFY(b);
105105

106106
secp256k1_u128_from_u64(&t, a->d[0]);
107107
secp256k1_u128_accum_u64(&t, b->d[0]);
@@ -119,14 +119,14 @@ static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a,
119119
VERIFY_CHECK(overflow == 0 || overflow == 1);
120120
secp256k1_scalar_reduce(r, overflow);
121121

122-
secp256k1_scalar_verify(r);
122+
SECP256K1_SCALAR_VERIFY(r);
123123
return overflow;
124124
}
125125

126126
static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) {
127127
secp256k1_uint128 t;
128128
volatile int vflag = flag;
129-
secp256k1_scalar_verify(r);
129+
SECP256K1_SCALAR_VERIFY(r);
130130
VERIFY_CHECK(bit < 256);
131131

132132
bit += ((uint32_t) vflag - 1) & 0x100; /* forcing (bit >> 6) > 3 makes this a noop */
@@ -143,7 +143,7 @@ static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int
143143
secp256k1_u128_accum_u64(&t, ((uint64_t)((bit >> 6) == 3)) << (bit & 0x3F));
144144
r->d[3] = secp256k1_u128_to_u64(&t);
145145

146-
secp256k1_scalar_verify(r);
146+
SECP256K1_SCALAR_VERIFY(r);
147147
VERIFY_CHECK(secp256k1_u128_hi_u64(&t) == 0);
148148
}
149149

@@ -158,11 +158,11 @@ static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b
158158
*overflow = over;
159159
}
160160

161-
secp256k1_scalar_verify(r);
161+
SECP256K1_SCALAR_VERIFY(r);
162162
}
163163

164164
static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) {
165-
secp256k1_scalar_verify(a);
165+
SECP256K1_SCALAR_VERIFY(a);
166166

167167
secp256k1_write_be64(&bin[0], a->d[3]);
168168
secp256k1_write_be64(&bin[8], a->d[2]);
@@ -171,15 +171,15 @@ static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar*
171171
}
172172

173173
SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a) {
174-
secp256k1_scalar_verify(a);
174+
SECP256K1_SCALAR_VERIFY(a);
175175

176176
return (a->d[0] | a->d[1] | a->d[2] | a->d[3]) == 0;
177177
}
178178

179179
static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a) {
180180
uint64_t nonzero = 0xFFFFFFFFFFFFFFFFULL * (secp256k1_scalar_is_zero(a) == 0);
181181
secp256k1_uint128 t;
182-
secp256k1_scalar_verify(a);
182+
SECP256K1_SCALAR_VERIFY(a);
183183

184184
secp256k1_u128_from_u64(&t, ~a->d[0]);
185185
secp256k1_u128_accum_u64(&t, SECP256K1_N_0 + 1);
@@ -194,7 +194,7 @@ static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar
194194
secp256k1_u128_accum_u64(&t, SECP256K1_N_3);
195195
r->d[3] = secp256k1_u128_to_u64(&t) & nonzero;
196196

197-
secp256k1_scalar_verify(r);
197+
SECP256K1_SCALAR_VERIFY(r);
198198
}
199199

200200
static void secp256k1_scalar_half(secp256k1_scalar *r, const secp256k1_scalar *a) {
@@ -214,7 +214,7 @@ static void secp256k1_scalar_half(secp256k1_scalar *r, const secp256k1_scalar *a
214214
*/
215215
uint64_t mask = -(uint64_t)(a->d[0] & 1U);
216216
secp256k1_uint128 t;
217-
secp256k1_scalar_verify(a);
217+
SECP256K1_SCALAR_VERIFY(a);
218218

219219
secp256k1_u128_from_u64(&t, (a->d[0] >> 1) | (a->d[1] << 63));
220220
secp256k1_u128_accum_u64(&t, (SECP256K1_N_H_0 + 1U) & mask);
@@ -234,20 +234,20 @@ static void secp256k1_scalar_half(secp256k1_scalar *r, const secp256k1_scalar *a
234234
secp256k1_u128_rshift(&t, 64);
235235
VERIFY_CHECK(secp256k1_u128_to_u64(&t) == 0);
236236

237-
secp256k1_scalar_verify(r);
237+
SECP256K1_SCALAR_VERIFY(r);
238238
#endif
239239
}
240240

241241
SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) {
242-
secp256k1_scalar_verify(a);
242+
SECP256K1_SCALAR_VERIFY(a);
243243

244244
return ((a->d[0] ^ 1) | a->d[1] | a->d[2] | a->d[3]) == 0;
245245
}
246246

247247
static int secp256k1_scalar_is_high(const secp256k1_scalar *a) {
248248
int yes = 0;
249249
int no = 0;
250-
secp256k1_scalar_verify(a);
250+
SECP256K1_SCALAR_VERIFY(a);
251251

252252
no |= (a->d[3] < SECP256K1_N_H_3);
253253
yes |= (a->d[3] > SECP256K1_N_H_3) & ~no;
@@ -265,7 +265,7 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
265265
uint64_t mask = -vflag;
266266
uint64_t nonzero = (secp256k1_scalar_is_zero(r) != 0) - 1;
267267
secp256k1_uint128 t;
268-
secp256k1_scalar_verify(r);
268+
SECP256K1_SCALAR_VERIFY(r);
269269

270270
secp256k1_u128_from_u64(&t, r->d[0] ^ mask);
271271
secp256k1_u128_accum_u64(&t, (SECP256K1_N_0 + 1) & mask);
@@ -280,7 +280,7 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
280280
secp256k1_u128_accum_u64(&t, SECP256K1_N_3 & mask);
281281
r->d[3] = secp256k1_u128_to_u64(&t) & nonzero;
282282

283-
secp256k1_scalar_verify(r);
283+
SECP256K1_SCALAR_VERIFY(r);
284284
return 2 * (mask == 0) - 1;
285285
}
286286

@@ -839,17 +839,17 @@ static void secp256k1_scalar_mul_512(uint64_t l[8], const secp256k1_scalar *a, c
839839

840840
static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
841841
uint64_t l[8];
842-
secp256k1_scalar_verify(a);
843-
secp256k1_scalar_verify(b);
842+
SECP256K1_SCALAR_VERIFY(a);
843+
SECP256K1_SCALAR_VERIFY(b);
844844

845845
secp256k1_scalar_mul_512(l, a, b);
846846
secp256k1_scalar_reduce_512(r, l);
847847

848-
secp256k1_scalar_verify(r);
848+
SECP256K1_SCALAR_VERIFY(r);
849849
}
850850

851851
static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k) {
852-
secp256k1_scalar_verify(k);
852+
SECP256K1_SCALAR_VERIFY(k);
853853

854854
r1->d[0] = k->d[0];
855855
r1->d[1] = k->d[1];
@@ -860,13 +860,13 @@ static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r
860860
r2->d[2] = 0;
861861
r2->d[3] = 0;
862862

863-
secp256k1_scalar_verify(r1);
864-
secp256k1_scalar_verify(r2);
863+
SECP256K1_SCALAR_VERIFY(r1);
864+
SECP256K1_SCALAR_VERIFY(r2);
865865
}
866866

867867
SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) {
868-
secp256k1_scalar_verify(a);
869-
secp256k1_scalar_verify(b);
868+
SECP256K1_SCALAR_VERIFY(a);
869+
SECP256K1_SCALAR_VERIFY(b);
870870

871871
return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3])) == 0;
872872
}
@@ -876,8 +876,8 @@ SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r,
876876
unsigned int shiftlimbs;
877877
unsigned int shiftlow;
878878
unsigned int shifthigh;
879-
secp256k1_scalar_verify(a);
880-
secp256k1_scalar_verify(b);
879+
SECP256K1_SCALAR_VERIFY(a);
880+
SECP256K1_SCALAR_VERIFY(b);
881881
VERIFY_CHECK(shift >= 256);
882882

883883
secp256k1_scalar_mul_512(l, a, b);
@@ -890,13 +890,13 @@ SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r,
890890
r->d[3] = shift < 320 ? (l[3 + shiftlimbs] >> shiftlow) : 0;
891891
secp256k1_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 6] >> ((shift - 1) & 0x3f)) & 1);
892892

893-
secp256k1_scalar_verify(r);
893+
SECP256K1_SCALAR_VERIFY(r);
894894
}
895895

896896
static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag) {
897897
uint64_t mask0, mask1;
898898
volatile int vflag = flag;
899-
secp256k1_scalar_verify(a);
899+
SECP256K1_SCALAR_VERIFY(a);
900900
SECP256K1_CHECKMEM_CHECK_VERIFY(r->d, sizeof(r->d));
901901

902902
mask0 = vflag + ~((uint64_t)0);
@@ -906,7 +906,7 @@ static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const se
906906
r->d[2] = (r->d[2] & mask0) | (a->d[2] & mask1);
907907
r->d[3] = (r->d[3] & mask0) | (a->d[3] & mask1);
908908

909-
secp256k1_scalar_verify(r);
909+
SECP256K1_SCALAR_VERIFY(r);
910910
}
911911

912912
static void secp256k1_scalar_from_signed62(secp256k1_scalar *r, const secp256k1_modinv64_signed62 *a) {
@@ -926,13 +926,13 @@ static void secp256k1_scalar_from_signed62(secp256k1_scalar *r, const secp256k1_
926926
r->d[2] = a2 >> 4 | a3 << 58;
927927
r->d[3] = a3 >> 6 | a4 << 56;
928928

929-
secp256k1_scalar_verify(r);
929+
SECP256K1_SCALAR_VERIFY(r);
930930
}
931931

932932
static void secp256k1_scalar_to_signed62(secp256k1_modinv64_signed62 *r, const secp256k1_scalar *a) {
933933
const uint64_t M62 = UINT64_MAX >> 2;
934934
const uint64_t a0 = a->d[0], a1 = a->d[1], a2 = a->d[2], a3 = a->d[3];
935-
secp256k1_scalar_verify(a);
935+
SECP256K1_SCALAR_VERIFY(a);
936936

937937
r->v[0] = a0 & M62;
938938
r->v[1] = (a0 >> 62 | a1 << 2) & M62;
@@ -951,13 +951,13 @@ static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar
951951
#ifdef VERIFY
952952
int zero_in = secp256k1_scalar_is_zero(x);
953953
#endif
954-
secp256k1_scalar_verify(x);
954+
SECP256K1_SCALAR_VERIFY(x);
955955

956956
secp256k1_scalar_to_signed62(&s, x);
957957
secp256k1_modinv64(&s, &secp256k1_const_modinfo_scalar);
958958
secp256k1_scalar_from_signed62(r, &s);
959959

960-
secp256k1_scalar_verify(r);
960+
SECP256K1_SCALAR_VERIFY(r);
961961
VERIFY_CHECK(secp256k1_scalar_is_zero(r) == zero_in);
962962
}
963963

@@ -966,18 +966,18 @@ static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_sc
966966
#ifdef VERIFY
967967
int zero_in = secp256k1_scalar_is_zero(x);
968968
#endif
969-
secp256k1_scalar_verify(x);
969+
SECP256K1_SCALAR_VERIFY(x);
970970

971971
secp256k1_scalar_to_signed62(&s, x);
972972
secp256k1_modinv64_var(&s, &secp256k1_const_modinfo_scalar);
973973
secp256k1_scalar_from_signed62(r, &s);
974974

975-
secp256k1_scalar_verify(r);
975+
SECP256K1_SCALAR_VERIFY(r);
976976
VERIFY_CHECK(secp256k1_scalar_is_zero(r) == zero_in);
977977
}
978978

979979
SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) {
980-
secp256k1_scalar_verify(a);
980+
SECP256K1_SCALAR_VERIFY(a);
981981

982982
return !(a->d[0] & 1);
983983
}

0 commit comments

Comments
 (0)