@@ -185,70 +185,55 @@ static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
185
185
186
186
/** Add a*b to the number defined by (c0,c1,c2). c2 must never overflow. */
187
187
#define muladd (a ,b ) { \
188
- uint64_t tl, th; \
189
- { \
190
- uint128_t t = (uint128_t)a * b; \
191
- th = t >> 64; /* at most 0xFFFFFFFFFFFFFFFE */ \
192
- tl = t ; \
193
- } \
194
- c0 += tl ; /* overflow is handled on the next line */ \
195
- th += (c0 < tl ); /* at most 0xFFFFFFFFFFFFFFFF */ \
196
- c1 += th ; /* overflow is handled on the next line */ \
197
- c2 += (c1 < th ); /* never overflows by contract (verified in the next line) */ \
198
- VERIFY_CHECK ((c1 >= th ) || (c2 != 0 )); \
188
+ uint128_t t = (uint128_t)a * b + c0; \
189
+ c0 = t; \
190
+ t >>= 64; \
191
+ t += c1; \
192
+ c1 = t; \
193
+ c2 += (t >> 64); \
194
+ VERIFY_CHECK(c2 >= (t >> 64)); /* c2 must never overflow. */ \
199
195
}
200
196
201
197
/** Add a*b to the number defined by (c0,c1). c1 must never overflow. */
202
198
#define muladd_fast (a ,b ) { \
203
- uint64_t tl, th; \
204
- { \
205
- uint128_t t = (uint128_t)a * b; \
206
- th = t >> 64; /* at most 0xFFFFFFFFFFFFFFFE */ \
207
- tl = t ; \
208
- } \
209
- c0 += tl ; /* overflow is handled on the next line */ \
210
- th += (c0 < tl ); /* at most 0xFFFFFFFFFFFFFFFF */ \
211
- c1 += th ; /* never overflows by contract (verified in the next line) */ \
212
- VERIFY_CHECK (c1 >= th ); \
199
+ uint128_t t = (uint128_t)a * b + c0; \
200
+ c0 = t; \
201
+ c1 += (t >> 64); \
202
+ VERIFY_CHECK(c1 >= (t >> 64)); /* c1 must never overflow. */ \
213
203
}
214
204
215
205
/** Add 2*a*b to the number defined by (c0,c1,c2). c2 must never overflow. */
216
206
#define muladd2 (a ,b ) { \
217
- uint64_t tl, th, th2, tl2; \
218
- { \
219
- uint128_t t = (uint128_t)a * b; \
220
- th = t >> 64; /* at most 0xFFFFFFFFFFFFFFFE */ \
221
- tl = t ; \
222
- } \
223
- th2 = th + th ; /* at most 0xFFFFFFFFFFFFFFFE (in case th was 0x7FFFFFFFFFFFFFFF) */ \
224
- c2 += (th2 < th ); /* never overflows by contract (verified the next line) */ \
225
- VERIFY_CHECK ((th2 >= th ) || (c2 != 0 )); \
226
- tl2 = tl + tl ; /* at most 0xFFFFFFFFFFFFFFFE (in case the lowest 63 bits of tl were 0x7FFFFFFFFFFFFFFF) */ \
227
- th2 += (tl2 < tl ); /* at most 0xFFFFFFFFFFFFFFFF */ \
228
- c0 += tl2 ; /* overflow is handled on the next line */ \
229
- th2 += (c0 < tl2 ); /* second overflow is handled on the next line */ \
230
- c2 += (c0 < tl2 ) & (th2 == 0 ); /* never overflows by contract (verified the next line) */ \
231
- VERIFY_CHECK ((c0 >= tl2 ) || (th2 != 0 ) || (c2 != 0 )); \
232
- c1 += th2 ; /* overflow is handled on the next line */ \
233
- c2 += (c1 < th2 ); /* never overflows by contract (verified the next line) */ \
234
- VERIFY_CHECK ((c1 >= th2 ) || (c2 != 0 )); \
207
+ uint128_t t1 = (uint128_t)a * b; \
208
+ uint64_t t1l = t1; \
209
+ uint64_t t1h = t1 >> 64; \
210
+ uint128_t t = (uint128_t)t1l + t1l + c0; \
211
+ c0 = t; \
212
+ t >>= 64; \
213
+ t += c1 + t1h + t1h; \
214
+ c1 = t; \
215
+ c2 += (t >> 64); \
216
+ VERIFY_CHECK(c2 >= (t >> 64)); /* c2 must never overflow. */ \
235
217
}
236
218
237
219
/** Add a to the number defined by (c0,c1,c2). c2 must never overflow. */
238
220
#define sumadd (a ) { \
239
- unsigned int over; \
240
- c0 += (a); /* overflow is handled on the next line */ \
241
- over = (c0 < (a )); \
242
- c1 += over ; /* overflow is handled on the next line */ \
243
- c2 += (c1 < over ); /* never overflows by contract */ \
221
+ uint128_t t = (uint128_t)a + c0; \
222
+ c0 = t; \
223
+ t >>= 64; \
224
+ t += c1; \
225
+ c1 = t; \
226
+ c2 += (t >> 64); \
227
+ VERIFY_CHECK(c2 >= (t >> 64)); /* c2 must never overflow. */ \
244
228
}
245
229
246
230
/** Add a to the number defined by (c0,c1). c1 must never overflow, c2 must be zero. */
247
231
#define sumadd_fast (a ) { \
248
- c0 += (a); /* overflow is handled on the next line */ \
249
- c1 += (c0 < (a )); /* never overflows by contract (verified the next line) */ \
250
- VERIFY_CHECK ((c1 != 0 ) | (c0 >= (a ))); \
251
- VERIFY_CHECK (c2 == 0 ); \
232
+ uint128_t t = (uint128_t)a + c0; \
233
+ c0 = t; \
234
+ c1 += (t >> 64); \
235
+ VERIFY_CHECK(c1 >= (t >> 64)); /* c1 must never overflow. */ \
236
+ VERIFY_CHECK (c2 == 0 ); /* c2 must be zero. */ \
252
237
}
253
238
254
239
/** Extract the lowest 64 bits of (c0,c1,c2) into n, and left shift the number 64 bits. */
0 commit comments