@@ -42,18 +42,18 @@ SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsig
42
42
r -> d [2 ] = 0 ;
43
43
r -> d [3 ] = 0 ;
44
44
45
- secp256k1_scalar_verify (r );
45
+ SECP256K1_SCALAR_VERIFY (r );
46
46
}
47
47
48
48
SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits (const secp256k1_scalar * a , unsigned int offset , unsigned int count ) {
49
- secp256k1_scalar_verify (a );
49
+ SECP256K1_SCALAR_VERIFY (a );
50
50
VERIFY_CHECK ((offset + count - 1 ) >> 6 == offset >> 6 );
51
51
52
52
return (a -> d [offset >> 6 ] >> (offset & 0x3F )) & ((((uint64_t )1 ) << count ) - 1 );
53
53
}
54
54
55
55
SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var (const secp256k1_scalar * a , unsigned int offset , unsigned int count ) {
56
- secp256k1_scalar_verify (a );
56
+ SECP256K1_SCALAR_VERIFY (a );
57
57
VERIFY_CHECK (count < 32 );
58
58
VERIFY_CHECK (offset + count <= 256 );
59
59
@@ -93,15 +93,15 @@ SECP256K1_INLINE static int secp256k1_scalar_reduce(secp256k1_scalar *r, unsigne
93
93
secp256k1_u128_accum_u64 (& t , r -> d [3 ]);
94
94
r -> d [3 ] = secp256k1_u128_to_u64 (& t );
95
95
96
- secp256k1_scalar_verify (r );
96
+ SECP256K1_SCALAR_VERIFY (r );
97
97
return overflow ;
98
98
}
99
99
100
100
static int secp256k1_scalar_add (secp256k1_scalar * r , const secp256k1_scalar * a , const secp256k1_scalar * b ) {
101
101
int overflow ;
102
102
secp256k1_uint128 t ;
103
- secp256k1_scalar_verify (a );
104
- secp256k1_scalar_verify (b );
103
+ SECP256K1_SCALAR_VERIFY (a );
104
+ SECP256K1_SCALAR_VERIFY (b );
105
105
106
106
secp256k1_u128_from_u64 (& t , a -> d [0 ]);
107
107
secp256k1_u128_accum_u64 (& t , b -> d [0 ]);
@@ -119,14 +119,14 @@ static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a,
119
119
VERIFY_CHECK (overflow == 0 || overflow == 1 );
120
120
secp256k1_scalar_reduce (r , overflow );
121
121
122
- secp256k1_scalar_verify (r );
122
+ SECP256K1_SCALAR_VERIFY (r );
123
123
return overflow ;
124
124
}
125
125
126
126
static void secp256k1_scalar_cadd_bit (secp256k1_scalar * r , unsigned int bit , int flag ) {
127
127
secp256k1_uint128 t ;
128
128
volatile int vflag = flag ;
129
- secp256k1_scalar_verify (r );
129
+ SECP256K1_SCALAR_VERIFY (r );
130
130
VERIFY_CHECK (bit < 256 );
131
131
132
132
bit += ((uint32_t ) vflag - 1 ) & 0x100 ; /* forcing (bit >> 6) > 3 makes this a noop */
@@ -143,7 +143,7 @@ static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int
143
143
secp256k1_u128_accum_u64 (& t , ((uint64_t )((bit >> 6 ) == 3 )) << (bit & 0x3F ));
144
144
r -> d [3 ] = secp256k1_u128_to_u64 (& t );
145
145
146
- secp256k1_scalar_verify (r );
146
+ SECP256K1_SCALAR_VERIFY (r );
147
147
VERIFY_CHECK (secp256k1_u128_hi_u64 (& t ) == 0 );
148
148
}
149
149
@@ -158,11 +158,11 @@ static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b
158
158
* overflow = over ;
159
159
}
160
160
161
- secp256k1_scalar_verify (r );
161
+ SECP256K1_SCALAR_VERIFY (r );
162
162
}
163
163
164
164
static void secp256k1_scalar_get_b32 (unsigned char * bin , const secp256k1_scalar * a ) {
165
- secp256k1_scalar_verify (a );
165
+ SECP256K1_SCALAR_VERIFY (a );
166
166
167
167
secp256k1_write_be64 (& bin [0 ], a -> d [3 ]);
168
168
secp256k1_write_be64 (& bin [8 ], a -> d [2 ]);
@@ -171,15 +171,15 @@ static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar*
171
171
}
172
172
173
173
SECP256K1_INLINE static int secp256k1_scalar_is_zero (const secp256k1_scalar * a ) {
174
- secp256k1_scalar_verify (a );
174
+ SECP256K1_SCALAR_VERIFY (a );
175
175
176
176
return (a -> d [0 ] | a -> d [1 ] | a -> d [2 ] | a -> d [3 ]) == 0 ;
177
177
}
178
178
179
179
static void secp256k1_scalar_negate (secp256k1_scalar * r , const secp256k1_scalar * a ) {
180
180
uint64_t nonzero = 0xFFFFFFFFFFFFFFFFULL * (secp256k1_scalar_is_zero (a ) == 0 );
181
181
secp256k1_uint128 t ;
182
- secp256k1_scalar_verify (a );
182
+ SECP256K1_SCALAR_VERIFY (a );
183
183
184
184
secp256k1_u128_from_u64 (& t , ~a -> d [0 ]);
185
185
secp256k1_u128_accum_u64 (& t , SECP256K1_N_0 + 1 );
@@ -194,19 +194,19 @@ static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar
194
194
secp256k1_u128_accum_u64 (& t , SECP256K1_N_3 );
195
195
r -> d [3 ] = secp256k1_u128_to_u64 (& t ) & nonzero ;
196
196
197
- secp256k1_scalar_verify (r );
197
+ SECP256K1_SCALAR_VERIFY (r );
198
198
}
199
199
200
200
SECP256K1_INLINE static int secp256k1_scalar_is_one (const secp256k1_scalar * a ) {
201
- secp256k1_scalar_verify (a );
201
+ SECP256K1_SCALAR_VERIFY (a );
202
202
203
203
return ((a -> d [0 ] ^ 1 ) | a -> d [1 ] | a -> d [2 ] | a -> d [3 ]) == 0 ;
204
204
}
205
205
206
206
static int secp256k1_scalar_is_high (const secp256k1_scalar * a ) {
207
207
int yes = 0 ;
208
208
int no = 0 ;
209
- secp256k1_scalar_verify (a );
209
+ SECP256K1_SCALAR_VERIFY (a );
210
210
211
211
no |= (a -> d [3 ] < SECP256K1_N_H_3 );
212
212
yes |= (a -> d [3 ] > SECP256K1_N_H_3 ) & ~no ;
@@ -224,7 +224,7 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
224
224
uint64_t mask = - vflag ;
225
225
uint64_t nonzero = (secp256k1_scalar_is_zero (r ) != 0 ) - 1 ;
226
226
secp256k1_uint128 t ;
227
- secp256k1_scalar_verify (r );
227
+ SECP256K1_SCALAR_VERIFY (r );
228
228
229
229
secp256k1_u128_from_u64 (& t , r -> d [0 ] ^ mask );
230
230
secp256k1_u128_accum_u64 (& t , (SECP256K1_N_0 + 1 ) & mask );
@@ -239,7 +239,7 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
239
239
secp256k1_u128_accum_u64 (& t , SECP256K1_N_3 & mask );
240
240
r -> d [3 ] = secp256k1_u128_to_u64 (& t ) & nonzero ;
241
241
242
- secp256k1_scalar_verify (r );
242
+ SECP256K1_SCALAR_VERIFY (r );
243
243
return 2 * (mask == 0 ) - 1 ;
244
244
}
245
245
@@ -798,18 +798,18 @@ static void secp256k1_scalar_mul_512(uint64_t l[8], const secp256k1_scalar *a, c
798
798
799
799
static void secp256k1_scalar_mul (secp256k1_scalar * r , const secp256k1_scalar * a , const secp256k1_scalar * b ) {
800
800
uint64_t l [8 ];
801
- secp256k1_scalar_verify (a );
802
- secp256k1_scalar_verify (b );
801
+ SECP256K1_SCALAR_VERIFY (a );
802
+ SECP256K1_SCALAR_VERIFY (b );
803
803
804
804
secp256k1_scalar_mul_512 (l , a , b );
805
805
secp256k1_scalar_reduce_512 (r , l );
806
806
807
- secp256k1_scalar_verify (r );
807
+ SECP256K1_SCALAR_VERIFY (r );
808
808
}
809
809
810
810
static int secp256k1_scalar_shr_int (secp256k1_scalar * r , int n ) {
811
811
int ret ;
812
- secp256k1_scalar_verify (r );
812
+ SECP256K1_SCALAR_VERIFY (r );
813
813
VERIFY_CHECK (n > 0 );
814
814
VERIFY_CHECK (n < 16 );
815
815
@@ -819,12 +819,12 @@ static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n) {
819
819
r -> d [2 ] = (r -> d [2 ] >> n ) + (r -> d [3 ] << (64 - n ));
820
820
r -> d [3 ] = (r -> d [3 ] >> n );
821
821
822
- secp256k1_scalar_verify (r );
822
+ SECP256K1_SCALAR_VERIFY (r );
823
823
return ret ;
824
824
}
825
825
826
826
static void secp256k1_scalar_split_128 (secp256k1_scalar * r1 , secp256k1_scalar * r2 , const secp256k1_scalar * k ) {
827
- secp256k1_scalar_verify (k );
827
+ SECP256K1_SCALAR_VERIFY (k );
828
828
829
829
r1 -> d [0 ] = k -> d [0 ];
830
830
r1 -> d [1 ] = k -> d [1 ];
@@ -835,13 +835,13 @@ static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r
835
835
r2 -> d [2 ] = 0 ;
836
836
r2 -> d [3 ] = 0 ;
837
837
838
- secp256k1_scalar_verify (r1 );
839
- secp256k1_scalar_verify (r2 );
838
+ SECP256K1_SCALAR_VERIFY (r1 );
839
+ SECP256K1_SCALAR_VERIFY (r2 );
840
840
}
841
841
842
842
SECP256K1_INLINE static int secp256k1_scalar_eq (const secp256k1_scalar * a , const secp256k1_scalar * b ) {
843
- secp256k1_scalar_verify (a );
844
- secp256k1_scalar_verify (b );
843
+ SECP256K1_SCALAR_VERIFY (a );
844
+ SECP256K1_SCALAR_VERIFY (b );
845
845
846
846
return ((a -> d [0 ] ^ b -> d [0 ]) | (a -> d [1 ] ^ b -> d [1 ]) | (a -> d [2 ] ^ b -> d [2 ]) | (a -> d [3 ] ^ b -> d [3 ])) == 0 ;
847
847
}
@@ -851,8 +851,8 @@ SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r,
851
851
unsigned int shiftlimbs ;
852
852
unsigned int shiftlow ;
853
853
unsigned int shifthigh ;
854
- secp256k1_scalar_verify (a );
855
- secp256k1_scalar_verify (b );
854
+ SECP256K1_SCALAR_VERIFY (a );
855
+ SECP256K1_SCALAR_VERIFY (b );
856
856
VERIFY_CHECK (shift >= 256 );
857
857
858
858
secp256k1_scalar_mul_512 (l , a , b );
@@ -865,13 +865,13 @@ SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r,
865
865
r -> d [3 ] = shift < 320 ? (l [3 + shiftlimbs ] >> shiftlow ) : 0 ;
866
866
secp256k1_scalar_cadd_bit (r , 0 , (l [(shift - 1 ) >> 6 ] >> ((shift - 1 ) & 0x3f )) & 1 );
867
867
868
- secp256k1_scalar_verify (r );
868
+ SECP256K1_SCALAR_VERIFY (r );
869
869
}
870
870
871
871
static SECP256K1_INLINE void secp256k1_scalar_cmov (secp256k1_scalar * r , const secp256k1_scalar * a , int flag ) {
872
872
uint64_t mask0 , mask1 ;
873
873
volatile int vflag = flag ;
874
- secp256k1_scalar_verify (a );
874
+ SECP256K1_SCALAR_VERIFY (a );
875
875
SECP256K1_CHECKMEM_CHECK_VERIFY (r -> d , sizeof (r -> d ));
876
876
877
877
mask0 = vflag + ~((uint64_t )0 );
@@ -881,7 +881,7 @@ static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const se
881
881
r -> d [2 ] = (r -> d [2 ] & mask0 ) | (a -> d [2 ] & mask1 );
882
882
r -> d [3 ] = (r -> d [3 ] & mask0 ) | (a -> d [3 ] & mask1 );
883
883
884
- secp256k1_scalar_verify (r );
884
+ SECP256K1_SCALAR_VERIFY (r );
885
885
}
886
886
887
887
static void secp256k1_scalar_from_signed62 (secp256k1_scalar * r , const secp256k1_modinv64_signed62 * a ) {
@@ -901,13 +901,13 @@ static void secp256k1_scalar_from_signed62(secp256k1_scalar *r, const secp256k1_
901
901
r -> d [2 ] = a2 >> 4 | a3 << 58 ;
902
902
r -> d [3 ] = a3 >> 6 | a4 << 56 ;
903
903
904
- secp256k1_scalar_verify (r );
904
+ SECP256K1_SCALAR_VERIFY (r );
905
905
}
906
906
907
907
static void secp256k1_scalar_to_signed62 (secp256k1_modinv64_signed62 * r , const secp256k1_scalar * a ) {
908
908
const uint64_t M62 = UINT64_MAX >> 2 ;
909
909
const uint64_t a0 = a -> d [0 ], a1 = a -> d [1 ], a2 = a -> d [2 ], a3 = a -> d [3 ];
910
- secp256k1_scalar_verify (a );
910
+ SECP256K1_SCALAR_VERIFY (a );
911
911
912
912
r -> v [0 ] = a0 & M62 ;
913
913
r -> v [1 ] = (a0 >> 62 | a1 << 2 ) & M62 ;
@@ -926,13 +926,13 @@ static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar
926
926
#ifdef VERIFY
927
927
int zero_in = secp256k1_scalar_is_zero (x );
928
928
#endif
929
- secp256k1_scalar_verify (x );
929
+ SECP256K1_SCALAR_VERIFY (x );
930
930
931
931
secp256k1_scalar_to_signed62 (& s , x );
932
932
secp256k1_modinv64 (& s , & secp256k1_const_modinfo_scalar );
933
933
secp256k1_scalar_from_signed62 (r , & s );
934
934
935
- secp256k1_scalar_verify (r );
935
+ SECP256K1_SCALAR_VERIFY (r );
936
936
VERIFY_CHECK (secp256k1_scalar_is_zero (r ) == zero_in );
937
937
}
938
938
@@ -941,18 +941,18 @@ static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_sc
941
941
#ifdef VERIFY
942
942
int zero_in = secp256k1_scalar_is_zero (x );
943
943
#endif
944
- secp256k1_scalar_verify (x );
944
+ SECP256K1_SCALAR_VERIFY (x );
945
945
946
946
secp256k1_scalar_to_signed62 (& s , x );
947
947
secp256k1_modinv64_var (& s , & secp256k1_const_modinfo_scalar );
948
948
secp256k1_scalar_from_signed62 (r , & s );
949
949
950
- secp256k1_scalar_verify (r );
950
+ SECP256K1_SCALAR_VERIFY (r );
951
951
VERIFY_CHECK (secp256k1_scalar_is_zero (r ) == zero_in );
952
952
}
953
953
954
954
SECP256K1_INLINE static int secp256k1_scalar_is_even (const secp256k1_scalar * a ) {
955
- secp256k1_scalar_verify (a );
955
+ SECP256K1_SCALAR_VERIFY (a );
956
956
957
957
return !(a -> d [0 ] & 1 );
958
958
}
0 commit comments