forked from bitcoin-core/secp256k1
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathecmult_gen_impl.h
372 lines (338 loc) · 12.8 KB
/
ecmult_gen_impl.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
/**********************************************************************
* Copyright (c) 2013, 2014, 2015 Pieter Wuille, Gregory Maxwell *
* Distributed under the MIT software license, see the accompanying *
* file COPYING or http://www.opensource.org/licenses/mit-license.php.*
**********************************************************************/
#ifndef SECP256K1_ECMULT_GEN_IMPL_H
#define SECP256K1_ECMULT_GEN_IMPL_H
#include "scalar.h"
#include "group.h"
#include "ecmult_gen.h"
#include "hash_impl.h"
#ifdef USE_ECMULT_STATIC_PRECOMPUTATION
#include "ecmult_static_context.h"
#endif
static void secp256k1_ecmult_gen_context_init(secp256k1_ecmult_gen_context *ctx) {
ctx->prec = NULL;
}
static void secp256k1_ecmult_gen_context_build(secp256k1_ecmult_gen_context *ctx, const secp256k1_callback* cb) {
#ifndef USE_ECMULT_STATIC_PRECOMPUTATION
#if USE_COMB
secp256k1_ge prec[COMB_POINTS_TOTAL + COMB_OFFSET];
secp256k1_gej u, sum;
int block, index, spacing, stride, tooth;
#else
secp256k1_ge prec[1024];
secp256k1_gej gj;
secp256k1_gej nums_gej;
int i, j;
#endif
#endif
if (ctx->prec != NULL) {
return;
}
#ifndef USE_ECMULT_STATIC_PRECOMPUTATION
#if USE_COMB
ctx->prec = (secp256k1_ge_storage (*)[COMB_BLOCKS][COMB_POINTS])checked_malloc(cb, sizeof(*ctx->prec));
/* get the generator */
secp256k1_gej_set_ge(&u, &secp256k1_ge_const_g);
/* compute prec. */
{
secp256k1_gej ds[COMB_TEETH];
secp256k1_gej vs[COMB_POINTS_TOTAL + COMB_OFFSET];
int vs_pos = 0;
for (block = 0; block < COMB_BLOCKS; ++block) {
secp256k1_gej_set_infinity(&sum);
for (tooth = 0; tooth < COMB_TEETH; ++tooth) {
secp256k1_gej_add_var(&sum, &sum, &u, NULL);
secp256k1_gej_double(&u, &u);
ds[tooth] = u;
for (spacing = 1; spacing < COMB_SPACING; ++spacing) {
secp256k1_gej_double(&u, &u);
}
}
secp256k1_gej_neg(&vs[vs_pos++], &sum);
for (tooth = 0; tooth < (COMB_TEETH - 1); ++tooth) {
stride = 1 << tooth;
for (index = 0; index < stride; ++index, ++vs_pos) {
secp256k1_gej_add_var(&vs[vs_pos], &vs[vs_pos - stride], &ds[tooth], NULL);
}
}
}
VERIFY_CHECK(vs_pos == COMB_POINTS_TOTAL);
#if COMB_OFFSET
vs[COMB_POINTS_TOTAL] = ds[COMB_TEETH - 1];
#endif
secp256k1_ge_set_all_gej_var(prec, vs, COMB_POINTS_TOTAL + COMB_OFFSET, cb);
}
for (block = 0; block < COMB_BLOCKS; ++block) {
for (index = 0; index < COMB_POINTS; ++index) {
secp256k1_ge_to_storage(&(*ctx->prec)[block][index], &prec[block * COMB_POINTS + index]);
}
}
#if COMB_OFFSET
ctx->offset = prec[COMB_POINTS_TOTAL];
#endif
#else
ctx->prec = (secp256k1_ge_storage (*)[64][16])checked_malloc(cb, sizeof(*ctx->prec));
/* get the generator */
secp256k1_gej_set_ge(&gj, &secp256k1_ge_const_g);
/* Construct a group element with no known corresponding scalar (nothing up my sleeve). */
{
static const unsigned char nums_b32[33] = "The scalar for this x is unknown";
secp256k1_fe nums_x;
secp256k1_ge nums_ge;
int r;
r = secp256k1_fe_set_b32(&nums_x, nums_b32);
(void)r;
VERIFY_CHECK(r);
r = secp256k1_ge_set_xo_var(&nums_ge, &nums_x, 0);
(void)r;
VERIFY_CHECK(r);
secp256k1_gej_set_ge(&nums_gej, &nums_ge);
/* Add G to make the bits in x uniformly distributed. */
secp256k1_gej_add_ge_var(&nums_gej, &nums_gej, &secp256k1_ge_const_g, NULL);
}
/* compute prec. */
{
secp256k1_gej precj[1024]; /* Jacobian versions of prec. */
secp256k1_gej gbase;
secp256k1_gej numsbase;
gbase = gj; /* 16^j * G */
numsbase = nums_gej; /* 2^j * nums. */
for (j = 0; j < 64; j++) {
/* Set precj[j*16 .. j*16+15] to (numsbase, numsbase + gbase, ..., numsbase + 15*gbase). */
precj[j*16] = numsbase;
for (i = 1; i < 16; i++) {
secp256k1_gej_add_var(&precj[j*16 + i], &precj[j*16 + i - 1], &gbase, NULL);
}
/* Multiply gbase by 16. */
for (i = 0; i < 4; i++) {
secp256k1_gej_double_var(&gbase, &gbase, NULL);
}
/* Multiply numbase by 2. */
secp256k1_gej_double_var(&numsbase, &numsbase, NULL);
if (j == 62) {
/* In the last iteration, numsbase is (1 - 2^j) * nums instead. */
secp256k1_gej_neg(&numsbase, &numsbase);
secp256k1_gej_add_var(&numsbase, &numsbase, &nums_gej, NULL);
}
}
secp256k1_ge_set_all_gej_var(prec, precj, 1024, cb);
}
for (j = 0; j < 64; j++) {
for (i = 0; i < 16; i++) {
secp256k1_ge_to_storage(&(*ctx->prec)[j][i], &prec[j*16 + i]);
}
}
#endif
#else
(void)cb;
#if USE_COMB
ctx->prec = (secp256k1_ge_storage (*)[COMB_BLOCKS][COMB_POINTS])secp256k1_ecmult_gen_ctx_prec;
#if COMB_OFFSET
secp256k1_ge_from_storage(&ctx->offset, &secp256k1_ecmult_gen_ctx_offset);
#endif
#else
ctx->prec = (secp256k1_ge_storage (*)[64][16])secp256k1_ecmult_gen_ctx_prec;
#endif
#endif
secp256k1_ecmult_gen_blind(ctx, NULL);
}
static int secp256k1_ecmult_gen_context_is_built(const secp256k1_ecmult_gen_context* ctx) {
return ctx->prec != NULL;
}
static void secp256k1_ecmult_gen_context_clone(secp256k1_ecmult_gen_context *dst,
const secp256k1_ecmult_gen_context *src, const secp256k1_callback* cb) {
if (src->prec == NULL) {
dst->prec = NULL;
} else {
#ifndef USE_ECMULT_STATIC_PRECOMPUTATION
#if USE_COMB
dst->prec = (secp256k1_ge_storage (*)[COMB_BLOCKS][COMB_POINTS])checked_malloc(cb, sizeof(*dst->prec));
#else
dst->prec = (secp256k1_ge_storage (*)[64][16])checked_malloc(cb, sizeof(*dst->prec));
#endif
memcpy(dst->prec, src->prec, sizeof(*dst->prec));
#else
(void)cb;
dst->prec = src->prec;
#endif
#if USE_COMB
#if COMB_OFFSET
dst->offset = src->offset;
#endif
#endif
dst->initial = src->initial;
dst->blind = src->blind;
}
}
static void secp256k1_ecmult_gen_context_clear(secp256k1_ecmult_gen_context *ctx) {
#ifndef USE_ECMULT_STATIC_PRECOMPUTATION
free(ctx->prec);
#endif
#if USE_COMB
#if COMB_OFFSET
secp256k1_ge_clear(&ctx->offset);
#endif
#endif
secp256k1_scalar_clear(&ctx->blind);
secp256k1_gej_clear(&ctx->initial);
ctx->prec = NULL;
}
static void secp256k1_ecmult_gen(const secp256k1_ecmult_gen_context *ctx, secp256k1_gej *r, const secp256k1_scalar *gn) {
secp256k1_ge add;
secp256k1_ge_storage adds;
secp256k1_scalar gnb;
int bits;
#if USE_COMB
int abs, bit_pos, block, comb_off, index, sign;
#if !COMB_GROUPED
int bit, tooth;
#endif
uint32_t recoded[9];
secp256k1_fe neg;
memset(&adds, 0, sizeof(adds));
*r = ctx->initial;
/* Blind scalar/point multiplication by computing (n-b)G + bG instead of nG. */
secp256k1_scalar_add(&gnb, gn, &ctx->blind);
secp256k1_scalar_signed_recoding(recoded, &gnb, COMB_BITS + COMB_OFFSET);
comb_off = COMB_SPACING - 1;
for (;;) {
bit_pos = comb_off;
for (block = 0; block < COMB_BLOCKS; ++block) {
#if COMB_GROUPED
bits = recoded[bit_pos >> 5] >> (bit_pos & 0x1F);
bit_pos += COMB_TEETH;
#else
bits = 0;
for (tooth = 0; tooth < COMB_TEETH; ++tooth) {
bit = recoded[bit_pos >> 5] >> (bit_pos & 0x1F);
bits &= ~(1 << tooth);
bits ^= bit << tooth;
bit_pos += COMB_SPACING;
}
#endif
sign = (bits >> (COMB_TEETH - 1)) & 1;
abs = (bits ^ -sign) & COMB_MASK;
VERIFY_CHECK(sign == 0 || sign == 1);
VERIFY_CHECK(0 <= abs && abs < COMB_POINTS);
for (index = 0; index < COMB_POINTS; ++index) {
secp256k1_ge_storage_cmov(&adds, &(*ctx->prec)[block][index], index == abs);
}
secp256k1_ge_from_storage(&add, &adds);
secp256k1_fe_negate(&neg, &add.y, 1);
secp256k1_fe_cmov(&add.y, &neg, sign);
secp256k1_gej_add_ge(r, r, &add);
}
if (--comb_off < 0) {
break;
}
secp256k1_gej_double(r, r);
}
secp256k1_fe_clear(&neg);
memset(recoded, 0, sizeof(recoded));
abs = 0;
sign = 0;
#else
int i, j;
memset(&adds, 0, sizeof(adds));
*r = ctx->initial;
/* Blind scalar/point multiplication by computing (n-b)G + bG instead of nG. */
secp256k1_scalar_add(&gnb, gn, &ctx->blind);
add.infinity = 0;
for (j = 0; j < 64; j++) {
bits = secp256k1_scalar_get_bits(&gnb, j * 4, 4);
for (i = 0; i < 16; i++) {
/** This uses a conditional move to avoid any secret data in array indexes.
* _Any_ use of secret indexes has been demonstrated to result in timing
* sidechannels, even when the cache-line access patterns are uniform.
* See also:
* "A word of warning", CHES 2013 Rump Session, by Daniel J. Bernstein and Peter Schwabe
* (https://cryptojedi.org/peter/data/chesrump-20130822.pdf) and
* "Cache Attacks and Countermeasures: the Case of AES", RSA 2006,
* by Dag Arne Osvik, Adi Shamir, and Eran Tromer
* (http://www.tau.ac.il/~tromer/papers/cache.pdf)
*/
secp256k1_ge_storage_cmov(&adds, &(*ctx->prec)[j][i], i == bits);
}
secp256k1_ge_from_storage(&add, &adds);
secp256k1_gej_add_ge(r, r, &add);
}
#endif
bits = 0;
secp256k1_ge_clear(&add);
memset(&adds, 0, sizeof(adds));
secp256k1_scalar_clear(&gnb);
}
/* Setup blinding values for secp256k1_ecmult_gen. */
static void secp256k1_ecmult_gen_blind(secp256k1_ecmult_gen_context *ctx, const unsigned char *seed32) {
#if USE_COMB
int spacing;
#endif
secp256k1_scalar b;
secp256k1_gej gb;
secp256k1_fe s;
unsigned char nonce32[32];
secp256k1_rfc6979_hmac_sha256 rng;
int retry;
unsigned char keydata[64] = {0};
if (seed32 == NULL) {
/* When seed is NULL, reset the initial point and blinding value. */
secp256k1_gej_set_ge(&ctx->initial, &secp256k1_ge_const_g);
secp256k1_gej_neg(&ctx->initial, &ctx->initial);
secp256k1_scalar_set_int(&ctx->blind, 1);
#if USE_COMB
for (spacing = 1; spacing < COMB_SPACING; ++spacing) {
secp256k1_scalar_add(&ctx->blind, &ctx->blind, &ctx->blind);
}
#if COMB_OFFSET
secp256k1_gej_add_ge(&ctx->initial, &ctx->initial, &ctx->offset);
#endif
#endif
}
/* The prior blinding value (if not reset) is chained forward by including it in the hash. */
secp256k1_scalar_get_b32(nonce32, &ctx->blind);
/** Using a CSPRNG allows a failure free interface, avoids needing large amounts of random data,
* and guards against weak or adversarial seeds. This is a simpler and safer interface than
* asking the caller for blinding values directly and expecting them to retry on failure.
*/
memcpy(keydata, nonce32, 32);
if (seed32 != NULL) {
memcpy(keydata + 32, seed32, 32);
}
secp256k1_rfc6979_hmac_sha256_initialize(&rng, keydata, seed32 ? 64 : 32);
memset(keydata, 0, sizeof(keydata));
/* Retry for out of range results to achieve uniformity. */
do {
secp256k1_rfc6979_hmac_sha256_generate(&rng, nonce32, 32);
retry = !secp256k1_fe_set_b32(&s, nonce32);
retry |= secp256k1_fe_is_zero(&s);
} while (retry); /* This branch true is cryptographically unreachable. Requires sha256_hmac output > Fp. */
/* Randomize the projection to defend against multiplier sidechannels. */
secp256k1_gej_rescale(&ctx->initial, &s);
secp256k1_fe_clear(&s);
do {
secp256k1_rfc6979_hmac_sha256_generate(&rng, nonce32, 32);
secp256k1_scalar_set_b32(&b, nonce32, &retry);
/* A blinding value of 0 works, but would undermine the projection hardening. */
retry |= secp256k1_scalar_is_zero(&b);
} while (retry); /* This branch true is cryptographically unreachable. Requires sha256_hmac output > order. */
secp256k1_rfc6979_hmac_sha256_finalize(&rng);
memset(nonce32, 0, 32);
secp256k1_ecmult_gen(ctx, &gb, &b);
secp256k1_scalar_negate(&b, &b);
ctx->blind = b;
ctx->initial = gb;
#if USE_COMB
for (spacing = 1; spacing < COMB_SPACING; ++spacing) {
secp256k1_scalar_add(&ctx->blind, &ctx->blind, &ctx->blind);
}
#if COMB_OFFSET
secp256k1_gej_add_ge(&ctx->initial, &ctx->initial, &ctx->offset);
#endif
#endif
secp256k1_scalar_clear(&b);
secp256k1_gej_clear(&gb);
}
#endif /* SECP256K1_ECMULT_GEN_IMPL_H */