@@ -43,7 +43,7 @@ static int secp256k1_scalar_inner_product(
43
43
/* Computes the q-weighted inner product of two vectors of scalars
44
44
* for elements starting from offset a and offset b respectively with the
45
45
* given step.
46
- * Returns: Sum_{i=0..len-1}(a[offset_a + step*i] * b[offset_b2 + step*i]*q ^(i+1)) */
46
+ * Returns: Sum_{i=0..len-1}(a[offset_a + step*i] * b[offset_b2 + step*i]*mu ^(i+1)) */
47
47
static int secp256k1_weighted_scalar_inner_product (
48
48
secp256k1_scalar * res ,
49
49
const secp256k1_scalar * a_vec ,
@@ -52,29 +52,29 @@ static int secp256k1_weighted_scalar_inner_product(
52
52
const size_t b_offset ,
53
53
const size_t step ,
54
54
const size_t len ,
55
- const secp256k1_scalar * q
55
+ const secp256k1_scalar * mu
56
56
) {
57
- secp256k1_scalar q_pow ;
57
+ secp256k1_scalar mu_pow ;
58
58
size_t i ;
59
59
secp256k1_scalar_set_int (res , 0 );
60
- q_pow = * q ;
60
+ mu_pow = * mu ;
61
61
for (i = 0 ; i < len ; i ++ ) {
62
62
secp256k1_scalar term ;
63
63
secp256k1_scalar_mul (& term , & a_vec [a_offset + step * i ], & b_vec [b_offset + step * i ]);
64
- secp256k1_scalar_mul (& term , & term , & q_pow );
65
- secp256k1_scalar_mul (& q_pow , & q_pow , q );
64
+ secp256k1_scalar_mul (& term , & term , & mu_pow );
65
+ secp256k1_scalar_mul (& mu_pow , & mu_pow , mu );
66
66
secp256k1_scalar_add (res , res , & term );
67
67
}
68
68
return 1 ;
69
69
}
70
70
71
- /* Compute the powers of r as r, r ^2, r ^4 ... r ^(2^(n-1)) */
72
- static void secp256k1_bppp_powers_of_r (secp256k1_scalar * powers , const secp256k1_scalar * r , size_t n ) {
71
+ /* Compute the powers of rho as rho, rho ^2, rho ^4 ... rho ^(2^(n-1)) */
72
+ static void secp256k1_bppp_powers_of_rho (secp256k1_scalar * powers , const secp256k1_scalar * rho , size_t n ) {
73
73
size_t i ;
74
74
if (n == 0 ) {
75
75
return ;
76
76
}
77
- powers [0 ] = * r ;
77
+ powers [0 ] = * rho ;
78
78
for (i = 1 ; i < n ; i ++ ) {
79
79
secp256k1_scalar_sqr (& powers [i ], & powers [i - 1 ]);
80
80
}
@@ -99,7 +99,7 @@ static int ecmult_bp_commit_cb(secp256k1_scalar *sc, secp256k1_ge *pt, size_t id
99
99
}
100
100
101
101
/* Create a commitment `commit` = vG + n_vec*G_vec + l_vec*H_vec where
102
- v = |n_vec*n_vec|_q + <l_vec, c_vec>. |w|_q denotes q -weighted norm of w and
102
+ v = |n_vec*n_vec|_mu + <l_vec, c_vec>. |w|_mu denotes mu -weighted norm of w and
103
103
<l, r> denotes inner product of l and r.
104
104
*/
105
105
static int secp256k1_bppp_commit (
@@ -113,7 +113,7 @@ static int secp256k1_bppp_commit(
113
113
size_t l_vec_len ,
114
114
const secp256k1_scalar * c_vec ,
115
115
size_t c_vec_len ,
116
- const secp256k1_scalar * q
116
+ const secp256k1_scalar * mu
117
117
) {
118
118
secp256k1_scalar v , l_c ;
119
119
/* First n_vec_len generators are Gs, rest are Hs*/
@@ -125,8 +125,8 @@ static int secp256k1_bppp_commit(
125
125
VERIFY_CHECK (secp256k1_is_power_of_two (n_vec_len ));
126
126
VERIFY_CHECK (secp256k1_is_power_of_two (c_vec_len ));
127
127
128
- /* Compute v = n_vec*n_vec*q + l_vec*c_vec */
129
- secp256k1_weighted_scalar_inner_product (& v , n_vec , 0 /*a offset */ , n_vec , 0 /*b offset*/ , 1 /*step*/ , n_vec_len , q );
128
+ /* Compute v = n_vec*n_vec*mu + l_vec*c_vec */
129
+ secp256k1_weighted_scalar_inner_product (& v , n_vec , 0 /*a offset */ , n_vec , 0 /*b offset*/ , 1 /*step*/ , n_vec_len , mu );
130
130
secp256k1_scalar_inner_product (& l_c , l_vec , 0 /*a offset */ , c_vec , 0 /*b offset*/ , 1 /*step*/ , l_vec_len );
131
131
secp256k1_scalar_add (& v , & v , & l_c );
132
132
@@ -150,8 +150,8 @@ typedef struct ecmult_x_cb_data {
150
150
const secp256k1_scalar * n ;
151
151
const secp256k1_ge * g ;
152
152
const secp256k1_scalar * l ;
153
- const secp256k1_scalar * r ;
154
- const secp256k1_scalar * r_inv ;
153
+ const secp256k1_scalar * rho ;
154
+ const secp256k1_scalar * rho_inv ;
155
155
size_t G_GENS_LEN ; /* Figure out initialization syntax so that this can also be const */
156
156
size_t n_len ;
157
157
} ecmult_x_cb_data ;
@@ -160,10 +160,10 @@ static int ecmult_x_cb(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void
160
160
ecmult_x_cb_data * data = (ecmult_x_cb_data * ) cbdata ;
161
161
if (idx < data -> n_len ) {
162
162
if (idx % 2 == 0 ) {
163
- secp256k1_scalar_mul (sc , & data -> n [idx + 1 ], data -> r );
163
+ secp256k1_scalar_mul (sc , & data -> n [idx + 1 ], data -> rho );
164
164
* pt = data -> g [idx ];
165
165
} else {
166
- secp256k1_scalar_mul (sc , & data -> n [idx - 1 ], data -> r_inv );
166
+ secp256k1_scalar_mul (sc , & data -> n [idx - 1 ], data -> rho_inv );
167
167
* pt = data -> g [idx ];
168
168
}
169
169
} else {
@@ -201,11 +201,11 @@ static int ecmult_r_cb(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void
201
201
}
202
202
203
203
/* Recursively compute the norm argument proof satisfying the relation
204
- * <n_vec, n_vec>_q + <c_vec, l_vec> = v for some commitment
205
- * C = v*G + <n_vec, G_vec> + <l_vec, H_vec>. <x, x>_q is the weighted inner
206
- * product of x with itself, where the weights are the first n powers of q .
207
- * <x, x>_q = q *x_1^2 + q ^2*x_2^2 + q ^3*x_3^2 + ... + q ^n*x_n^2.
208
- * The API computes q as square of the r challenge (`r^2`).
204
+ * <n_vec, n_vec>_mu + <c_vec, l_vec> = v for some commitment
205
+ * C = v*G + <n_vec, G_vec> + <l_vec, H_vec>. <x, x>_mu is the weighted inner
206
+ * product of x with itself, where the weights are the first n powers of mu .
207
+ * <x, x>_mu = mu *x_1^2 + mu ^2*x_2^2 + mu ^3*x_3^2 + ... + mu ^n*x_n^2.
208
+ * The API computes mu as square of the r challenge (`r^2`).
209
209
*
210
210
* The norm argument is not zero knowledge and does not operate on any secret data.
211
211
* Thus the following code uses variable time operations while computing the proof.
@@ -222,7 +222,7 @@ static int secp256k1_bppp_rangeproof_norm_product_prove(
222
222
unsigned char * proof ,
223
223
size_t * proof_len ,
224
224
secp256k1_sha256 * transcript , /* Transcript hash of the parent protocol */
225
- const secp256k1_scalar * r ,
225
+ const secp256k1_scalar * rho ,
226
226
secp256k1_ge * g_vec ,
227
227
size_t g_vec_len ,
228
228
secp256k1_scalar * n_vec ,
@@ -232,7 +232,7 @@ static int secp256k1_bppp_rangeproof_norm_product_prove(
232
232
secp256k1_scalar * c_vec ,
233
233
size_t c_vec_len
234
234
) {
235
- secp256k1_scalar q_f , r_f = * r ;
235
+ secp256k1_scalar mu_f , rho_f = * rho ;
236
236
size_t proof_idx = 0 ;
237
237
ecmult_x_cb_data x_cb_data ;
238
238
ecmult_r_cb_data r_cb_data ;
@@ -259,38 +259,38 @@ static int secp256k1_bppp_rangeproof_norm_product_prove(
259
259
r_cb_data .g1 = g_vec ;
260
260
r_cb_data .l1 = l_vec ;
261
261
r_cb_data .G_GENS_LEN = G_GENS_LEN ;
262
- secp256k1_scalar_sqr (& q_f , & r_f );
262
+ secp256k1_scalar_sqr (& mu_f , & rho_f );
263
263
264
264
265
265
while (g_len > 1 || h_len > 1 ) {
266
266
size_t i , num_points ;
267
- secp256k1_scalar q_sq , r_inv , c0_l1 , c1_l0 , x_v , c1_l1 , r_v ;
267
+ secp256k1_scalar mu_sq , rho_inv , c0_l1 , c1_l0 , x_v , c1_l1 , r_v ;
268
268
secp256k1_gej rj , xj ;
269
269
secp256k1_ge r_ge , x_ge ;
270
- secp256k1_scalar e ;
270
+ secp256k1_scalar gamma ;
271
271
272
- secp256k1_scalar_inverse_var (& r_inv , & r_f );
273
- secp256k1_scalar_sqr (& q_sq , & q_f );
272
+ secp256k1_scalar_inverse_var (& rho_inv , & rho_f );
273
+ secp256k1_scalar_sqr (& mu_sq , & mu_f );
274
274
275
- /* Compute the X commitment X = WIP(r_inv *n0,n1)_q2 * g + r<n1,G> + <r_inv *x0, G1> */
275
+ /* Compute the X commitment X = WIP(rho_inv *n0,n1)_mu2 * g + r<n1,G> + <rho_inv *x0, G1> */
276
276
secp256k1_scalar_inner_product (& c0_l1 , c_vec , 0 , l_vec , 1 , 2 , h_len /2 );
277
277
secp256k1_scalar_inner_product (& c1_l0 , c_vec , 1 , l_vec , 0 , 2 , h_len /2 );
278
- secp256k1_weighted_scalar_inner_product (& x_v , n_vec , 0 , n_vec , 1 , 2 , g_len /2 , & q_sq );
279
- secp256k1_scalar_mul (& x_v , & x_v , & r_inv );
278
+ secp256k1_weighted_scalar_inner_product (& x_v , n_vec , 0 , n_vec , 1 , 2 , g_len /2 , & mu_sq );
279
+ secp256k1_scalar_mul (& x_v , & x_v , & rho_inv );
280
280
secp256k1_scalar_add (& x_v , & x_v , & x_v );
281
281
secp256k1_scalar_add (& x_v , & x_v , & c0_l1 );
282
282
secp256k1_scalar_add (& x_v , & x_v , & c1_l0 );
283
283
284
- x_cb_data .r = & r_f ;
285
- x_cb_data .r_inv = & r_inv ;
284
+ x_cb_data .rho = & rho_f ;
285
+ x_cb_data .rho_inv = & rho_inv ;
286
286
x_cb_data .n_len = g_len >= 2 ? g_len : 0 ;
287
287
num_points = x_cb_data .n_len + (h_len >= 2 ? h_len : 0 );
288
288
289
289
if (!secp256k1_ecmult_multi_var (& ctx -> error_callback , scratch , & xj , & x_v , ecmult_x_cb , (void * )& x_cb_data , num_points )) {
290
290
return 0 ;
291
291
}
292
292
293
- secp256k1_weighted_scalar_inner_product (& r_v , n_vec , 1 , n_vec , 1 , 2 , g_len /2 , & q_sq );
293
+ secp256k1_weighted_scalar_inner_product (& r_v , n_vec , 1 , n_vec , 1 , 2 , g_len /2 , & mu_sq );
294
294
secp256k1_scalar_inner_product (& c1_l1 , c_vec , 1 , l_vec , 1 , 2 , h_len /2 );
295
295
secp256k1_scalar_add (& r_v , & r_v , & c1_l1 );
296
296
@@ -314,22 +314,22 @@ static int secp256k1_bppp_rangeproof_norm_product_prove(
314
314
secp256k1_bppp_serialize_points (& proof [proof_idx ], & x_ge , & r_ge );
315
315
proof_idx += 65 ;
316
316
317
- /* Obtain challenge e for the the next round */
317
+ /* Obtain challenge gamma for the the next round */
318
318
secp256k1_sha256_write (transcript , & proof [proof_idx - 65 ], 65 );
319
- secp256k1_bppp_challenge_scalar (& e , transcript , 0 );
319
+ secp256k1_bppp_challenge_scalar (& gamma , transcript , 0 );
320
320
321
321
if (g_len > 1 ) {
322
322
for (i = 0 ; i < g_len ; i = i + 2 ) {
323
323
secp256k1_scalar nl , nr ;
324
324
secp256k1_gej gl , gr ;
325
- secp256k1_scalar_mul (& nl , & n_vec [i ], & r_inv );
326
- secp256k1_scalar_mul (& nr , & n_vec [i + 1 ], & e );
325
+ secp256k1_scalar_mul (& nl , & n_vec [i ], & rho_inv );
326
+ secp256k1_scalar_mul (& nr , & n_vec [i + 1 ], & gamma );
327
327
secp256k1_scalar_add (& n_vec [i /2 ], & nl , & nr );
328
328
329
329
secp256k1_gej_set_ge (& gl , & g_vec [i ]);
330
- secp256k1_ecmult (& gl , & gl , & r_f , NULL );
330
+ secp256k1_ecmult (& gl , & gl , & rho_f , NULL );
331
331
secp256k1_gej_set_ge (& gr , & g_vec [i + 1 ]);
332
- secp256k1_ecmult (& gr , & gr , & e , NULL );
332
+ secp256k1_ecmult (& gr , & gr , & gamma , NULL );
333
333
secp256k1_gej_add_var (& gl , & gl , & gr , NULL );
334
334
secp256k1_ge_set_gej_var (& g_vec [i /2 ], & gl );
335
335
}
@@ -339,22 +339,22 @@ static int secp256k1_bppp_rangeproof_norm_product_prove(
339
339
for (i = 0 ; i < h_len ; i = i + 2 ) {
340
340
secp256k1_scalar temp1 ;
341
341
secp256k1_gej grj ;
342
- secp256k1_scalar_mul (& temp1 , & c_vec [i + 1 ], & e );
342
+ secp256k1_scalar_mul (& temp1 , & c_vec [i + 1 ], & gamma );
343
343
secp256k1_scalar_add (& c_vec [i /2 ], & c_vec [i ], & temp1 );
344
344
345
- secp256k1_scalar_mul (& temp1 , & l_vec [i + 1 ], & e );
345
+ secp256k1_scalar_mul (& temp1 , & l_vec [i + 1 ], & gamma );
346
346
secp256k1_scalar_add (& l_vec [i /2 ], & l_vec [i ], & temp1 );
347
347
348
348
secp256k1_gej_set_ge (& grj , & g_vec [G_GENS_LEN + i + 1 ]);
349
- secp256k1_ecmult (& grj , & grj , & e , NULL );
349
+ secp256k1_ecmult (& grj , & grj , & gamma , NULL );
350
350
secp256k1_gej_add_ge_var (& grj , & grj , & g_vec [G_GENS_LEN + i ], NULL );
351
351
secp256k1_ge_set_gej_var (& g_vec [G_GENS_LEN + i /2 ], & grj );
352
352
}
353
353
}
354
354
g_len = g_len / 2 ;
355
355
h_len = h_len / 2 ;
356
- r_f = q_f ;
357
- q_f = q_sq ;
356
+ rho_f = mu_f ;
357
+ mu_f = mu_sq ;
358
358
}
359
359
360
360
secp256k1_scalar_get_b32 (& proof [proof_idx ], & n_vec [0 ]);
@@ -367,7 +367,7 @@ static int secp256k1_bppp_rangeproof_norm_product_prove(
367
367
typedef struct ec_mult_verify_cb_data1 {
368
368
const unsigned char * proof ;
369
369
const secp256k1_ge * commit ;
370
- const secp256k1_scalar * challenges ;
370
+ const secp256k1_scalar * gammas ;
371
371
} ec_mult_verify_cb_data1 ;
372
372
373
373
static int ec_mult_verify_cb1 (secp256k1_scalar * sc , secp256k1_ge * pt , size_t idx , void * cbdata ) {
@@ -381,7 +381,7 @@ static int ec_mult_verify_cb1(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx
381
381
if (idx % 2 == 0 ) {
382
382
unsigned char pk_buf [33 ];
383
383
idx /= 2 ;
384
- * sc = data -> challenges [idx ];
384
+ * sc = data -> gammas [idx ];
385
385
pk_buf [0 ] = 2 | (data -> proof [65 * idx ] >> 1 );
386
386
memcpy (& pk_buf [1 ], & data -> proof [65 * idx + 1 ], 32 );
387
387
if (!secp256k1_eckey_pubkey_parse (pt , pk_buf , sizeof (pk_buf ))) {
@@ -393,7 +393,7 @@ static int ec_mult_verify_cb1(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx
393
393
idx /= 2 ;
394
394
secp256k1_scalar_set_int (& neg_one , 1 );
395
395
secp256k1_scalar_negate (& neg_one , & neg_one );
396
- * sc = data -> challenges [idx ];
396
+ * sc = data -> gammas [idx ];
397
397
secp256k1_scalar_sqr (sc , sc );
398
398
secp256k1_scalar_add (sc , sc , & neg_one );
399
399
pk_buf [0 ] = 2 | data -> proof [65 * idx ];
@@ -432,15 +432,15 @@ static int secp256k1_bppp_rangeproof_norm_product_verify(
432
432
const unsigned char * proof ,
433
433
size_t proof_len ,
434
434
secp256k1_sha256 * transcript ,
435
- const secp256k1_scalar * r ,
435
+ const secp256k1_scalar * rho ,
436
436
const secp256k1_bppp_generators * g_vec ,
437
437
size_t g_len ,
438
438
const secp256k1_scalar * c_vec ,
439
439
size_t c_vec_len ,
440
440
const secp256k1_ge * commit
441
441
) {
442
- secp256k1_scalar r_f , q_f , v , n , l , r_inv , h_c ;
443
- secp256k1_scalar * es , * s_g , * s_h , * r_inv_pows ;
442
+ secp256k1_scalar rho_f , mu_f , v , n , l , rho_inv , h_c ;
443
+ secp256k1_scalar * gammas , * s_g , * s_h , * rho_inv_pows ;
444
444
secp256k1_gej res1 , res2 ;
445
445
size_t i = 0 , scratch_checkpoint ;
446
446
int overflow ;
@@ -467,69 +467,69 @@ static int secp256k1_bppp_rangeproof_norm_product_verify(
467
467
if (overflow ) return 0 ;
468
468
secp256k1_scalar_set_b32 (& l , & proof [n_rounds * 65 + 32 ], & overflow ); /* l */
469
469
if (overflow ) return 0 ;
470
- if (secp256k1_scalar_is_zero (r )) return 0 ;
470
+ if (secp256k1_scalar_is_zero (rho )) return 0 ;
471
471
472
- /* Collect the challenges in a new vector */
472
+ /* Collect the gammas in a new vector */
473
473
scratch_checkpoint = secp256k1_scratch_checkpoint (& ctx -> error_callback , scratch );
474
- es = (secp256k1_scalar * )secp256k1_scratch_alloc (& ctx -> error_callback , scratch , n_rounds * sizeof (secp256k1_scalar ));
474
+ gammas = (secp256k1_scalar * )secp256k1_scratch_alloc (& ctx -> error_callback , scratch , n_rounds * sizeof (secp256k1_scalar ));
475
475
s_g = (secp256k1_scalar * )secp256k1_scratch_alloc (& ctx -> error_callback , scratch , g_len * sizeof (secp256k1_scalar ));
476
476
s_h = (secp256k1_scalar * )secp256k1_scratch_alloc (& ctx -> error_callback , scratch , h_len * sizeof (secp256k1_scalar ));
477
- r_inv_pows = (secp256k1_scalar * )secp256k1_scratch_alloc (& ctx -> error_callback , scratch , log_g_len * sizeof (secp256k1_scalar ));
478
- if (es == NULL || s_g == NULL || s_h == NULL || r_inv_pows == NULL ) {
477
+ rho_inv_pows = (secp256k1_scalar * )secp256k1_scratch_alloc (& ctx -> error_callback , scratch , log_g_len * sizeof (secp256k1_scalar ));
478
+ if (gammas == NULL || s_g == NULL || s_h == NULL || rho_inv_pows == NULL ) {
479
479
secp256k1_scratch_apply_checkpoint (& ctx -> error_callback , scratch , scratch_checkpoint );
480
480
return 0 ;
481
481
}
482
482
483
- /* Compute powers of r_inv . Later used in g_factor computations*/
484
- secp256k1_scalar_inverse_var (& r_inv , r );
485
- secp256k1_bppp_powers_of_r ( r_inv_pows , & r_inv , log_g_len );
483
+ /* Compute powers of rho_inv . Later used in g_factor computations*/
484
+ secp256k1_scalar_inverse_var (& rho_inv , rho );
485
+ secp256k1_bppp_powers_of_rho ( rho_inv_pows , & rho_inv , log_g_len );
486
486
487
- /* Compute r_f = r ^(2^log_g_len) */
488
- r_f = * r ;
487
+ /* Compute rho_f = rho ^(2^log_g_len) */
488
+ rho_f = * rho ;
489
489
for (i = 0 ; i < log_g_len ; i ++ ) {
490
- secp256k1_scalar_sqr (& r_f , & r_f );
490
+ secp256k1_scalar_sqr (& rho_f , & rho_f );
491
491
}
492
492
493
493
for (i = 0 ; i < n_rounds ; i ++ ) {
494
- secp256k1_scalar e ;
494
+ secp256k1_scalar gamma ;
495
495
secp256k1_sha256_write (transcript , & proof [i * 65 ], 65 );
496
- secp256k1_bppp_challenge_scalar (& e , transcript , 0 );
497
- es [i ] = e ;
496
+ secp256k1_bppp_challenge_scalar (& gamma , transcript , 0 );
497
+ gammas [i ] = gamma ;
498
498
}
499
- /* s_g[0] = n * \prod_{j=0}^{log_g_len - 1} r ^(2^j)
500
- * = n * r ^(2^log_g_len - 1)
501
- * = n * r_f * r_inv */
502
- secp256k1_scalar_mul (& s_g [0 ], & n , & r_f );
503
- secp256k1_scalar_mul (& s_g [0 ], & s_g [0 ], & r_inv );
499
+ /* s_g[0] = n * \prod_{j=0}^{log_g_len - 1} rho ^(2^j)
500
+ * = n * rho ^(2^log_g_len - 1)
501
+ * = n * rho_f * rho_inv */
502
+ secp256k1_scalar_mul (& s_g [0 ], & n , & rho_f );
503
+ secp256k1_scalar_mul (& s_g [0 ], & s_g [0 ], & rho_inv );
504
504
for (i = 1 ; i < g_len ; i ++ ) {
505
505
size_t log_i = secp256k1_bppp_log2 (i );
506
506
size_t nearest_pow_of_two = (size_t )1 << log_i ;
507
- /* This combines the two multiplications of challenges and r_invs in a
507
+ /* This combines the two multiplications of gammas and rho_invs in a
508
508
* single loop.
509
509
* s_g[i] = s_g[i - nearest_pow_of_two]
510
- * * e[log_i] * r_inv ^(2^log_i) */
511
- secp256k1_scalar_mul (& s_g [i ], & s_g [i - nearest_pow_of_two ], & es [log_i ]);
512
- secp256k1_scalar_mul (& s_g [i ], & s_g [i ], & r_inv_pows [log_i ]);
510
+ * * e[log_i] * rho_inv ^(2^log_i) */
511
+ secp256k1_scalar_mul (& s_g [i ], & s_g [i - nearest_pow_of_two ], & gammas [log_i ]);
512
+ secp256k1_scalar_mul (& s_g [i ], & s_g [i ], & rho_inv_pows [log_i ]);
513
513
}
514
514
s_h [0 ] = l ;
515
515
secp256k1_scalar_set_int (& h_c , 0 );
516
516
for (i = 1 ; i < h_len ; i ++ ) {
517
517
size_t log_i = secp256k1_bppp_log2 (i );
518
518
size_t nearest_pow_of_two = (size_t )1 << log_i ;
519
- secp256k1_scalar_mul (& s_h [i ], & s_h [i - nearest_pow_of_two ], & es [log_i ]);
519
+ secp256k1_scalar_mul (& s_h [i ], & s_h [i - nearest_pow_of_two ], & gammas [log_i ]);
520
520
}
521
521
secp256k1_scalar_inner_product (& h_c , c_vec , 0 /* a_offset */ , s_h , 0 /* b_offset */ , 1 /* step */ , h_len );
522
- /* Compute v = n*n*q_f + l*h_c where q_f = r_f ^2 */
523
- secp256k1_scalar_sqr (& q_f , & r_f );
522
+ /* Compute v = n*n*mu_f + l*h_c where mu_f = rho_f ^2 */
523
+ secp256k1_scalar_sqr (& mu_f , & rho_f );
524
524
secp256k1_scalar_mul (& v , & n , & n );
525
- secp256k1_scalar_mul (& v , & v , & q_f );
525
+ secp256k1_scalar_mul (& v , & v , & mu_f );
526
526
secp256k1_scalar_add (& v , & v , & h_c );
527
527
528
528
{
529
529
ec_mult_verify_cb_data1 data ;
530
530
data .proof = proof ;
531
531
data .commit = commit ;
532
- data .challenges = es ;
532
+ data .gammas = gammas ;
533
533
534
534
if (!secp256k1_ecmult_multi_var (& ctx -> error_callback , scratch , & res1 , NULL , ec_mult_verify_cb1 , & data , 2 * n_rounds + 1 )) {
535
535
secp256k1_scratch_apply_checkpoint (& ctx -> error_callback , scratch , scratch_checkpoint );
0 commit comments