@@ -140,4 +140,251 @@ static int secp256k1_bulletproofs_commit(
140
140
}
141
141
return 1 ;
142
142
}
143
+
144
+ typedef struct ec_mult_x_cb_data {
145
+ const secp256k1_scalar * n ;
146
+ const secp256k1_ge * g ;
147
+ const secp256k1_scalar * l ;
148
+ const secp256k1_scalar * r ;
149
+ const secp256k1_scalar * r_inv ;
150
+ size_t G_GENS_LEN ; /* Figure out initialization syntax so that this can also be const */
151
+ size_t n_len ;
152
+ } ec_mult_x_cb_data ;
153
+
154
+ static int ec_mult_x_cb (secp256k1_scalar * sc , secp256k1_ge * pt , size_t idx , void * cbdata ) {
155
+ ec_mult_x_cb_data * data = (ec_mult_x_cb_data * ) cbdata ;
156
+ if (idx < data -> n_len ) {
157
+ if (idx % 2 == 0 ) {
158
+ secp256k1_scalar_mul (sc , & data -> n [idx + 1 ], data -> r );
159
+ * pt = data -> g [idx ];
160
+ } else {
161
+ secp256k1_scalar_mul (sc , & data -> n [idx - 1 ], data -> r_inv );
162
+ * pt = data -> g [idx ];
163
+ }
164
+ } else {
165
+ idx -= data -> n_len ;
166
+ if (idx % 2 == 0 ) {
167
+ * sc = data -> l [idx + 1 ];
168
+ * pt = data -> g [data -> G_GENS_LEN + idx ];
169
+ } else {
170
+ * sc = data -> l [idx - 1 ];
171
+ * pt = data -> g [data -> G_GENS_LEN + idx ];
172
+ }
173
+ }
174
+ return 1 ;
175
+ }
176
+
177
+ typedef struct ec_mult_r_cb_data {
178
+ const secp256k1_scalar * n1 ;
179
+ const secp256k1_ge * g1 ;
180
+ const secp256k1_scalar * l1 ;
181
+ size_t G_GENS_LEN ;
182
+ size_t n_len ;
183
+ } ec_mult_r_cb_data ;
184
+
185
+ static int ec_mult_r_cb (secp256k1_scalar * sc , secp256k1_ge * pt , size_t idx , void * cbdata ) {
186
+ ec_mult_r_cb_data * data = (ec_mult_r_cb_data * ) cbdata ;
187
+ if (idx < data -> n_len ) {
188
+ * sc = data -> n1 [2 * idx + 1 ];
189
+ * pt = data -> g1 [2 * idx + 1 ];
190
+ } else {
191
+ idx -= data -> n_len ;
192
+ * sc = data -> l1 [2 * idx + 1 ];
193
+ * pt = data -> g1 [data -> G_GENS_LEN + 2 * idx + 1 ];
194
+ }
195
+ return 1 ;
196
+ }
197
+
198
+ /* Recursively compute the norm argument proof satisfying the relation
199
+ * <n_vec, n_vec>_q + <c_vec, l_vec> = v for some commitment
200
+ * C = v*G + <n_vec, G_vec> + <l_vec, H_vec>. <x, x>_q is the weighted inner
201
+ * product of x with itself, where the weights are the first n powers of q.
202
+ * <x, x>_q = q*x_1^2 + q^2*x_2^2 + q^3*x_3^2 + ... + q^n*x_n^2
203
+ *
204
+ * The norm argument is not zero knowledge and does not operate on any secret data.
205
+ * Thus the following code uses variable time operations while computing the proof.
206
+ */
207
+ int secp256k1_bulletproofs_pp_rangeproof_norm_product_prove (
208
+ const secp256k1_context * ctx ,
209
+ secp256k1_scratch_space * scratch ,
210
+ unsigned char * proof ,
211
+ size_t * proof_len ,
212
+ unsigned char * transcript_hash32 , /* Transcript hash of the parent protocol */
213
+ const secp256k1_scalar * r_ch ,
214
+ const secp256k1_bulletproofs_generators * g_vec ,
215
+ const secp256k1_scalar * n_vec ,
216
+ size_t n_vec_len ,
217
+ const secp256k1_scalar * l_vec ,
218
+ size_t l_vec_len ,
219
+ const secp256k1_scalar * c_vec ,
220
+ size_t c_vec_len ,
221
+ const secp256k1_ge * commit
222
+ ) {
223
+ secp256k1_scalar q , r = * r_ch ;
224
+ secp256k1_sha256 sha256 ;
225
+ unsigned char ser_commit [33 ];
226
+ size_t start_idx = 0 ;
227
+ ec_mult_x_cb_data x_cb_data ;
228
+ ec_mult_r_cb_data r_cb_data ;
229
+ size_t g_len = n_vec_len , h_len = l_vec_len ;
230
+ const size_t G_GENS_LEN = g_len ;
231
+ /* The initial pointers to the callback function remain the same. We mutate
232
+ * the values and len in loop*/
233
+ secp256k1_scalar * ns , * ls , * cs ;
234
+ secp256k1_ge * gs , comm = * commit ;
235
+ size_t scratch_checkpoint ;
236
+ size_t log_n = secp256k1_bulletproofs_pp_log2 (g_len ), log_m = secp256k1_bulletproofs_pp_log2 (h_len );
237
+ size_t n_rounds = log_n > log_m ? log_n : log_m ;
238
+
239
+ /* Check proof sizes.*/
240
+ if (* proof_len < 65 * n_rounds + 64 ) {
241
+ return 0 ;
242
+ }
243
+
244
+ if (g_vec -> n != (n_vec_len + l_vec_len ) || l_vec_len != c_vec_len ) {
245
+ return 0 ;
246
+ }
247
+
248
+ if (!secp256k1_check_power_of_two (n_vec_len ) || !secp256k1_check_power_of_two (c_vec_len )) {
249
+ return 0 ;
250
+ }
251
+ /* We can get away with allocating only half the size of arrays and unrolling the first iteration of the loop.
252
+ This would increase the code complexity and can be done as an optimization in a future PR. */
253
+ scratch_checkpoint = secp256k1_scratch_checkpoint (& ctx -> error_callback , scratch );
254
+ ns = (secp256k1_scalar * )secp256k1_scratch_alloc (& ctx -> error_callback , scratch , g_len * sizeof (secp256k1_scalar ));
255
+ ls = (secp256k1_scalar * )secp256k1_scratch_alloc (& ctx -> error_callback , scratch , h_len * sizeof (secp256k1_scalar ));
256
+ cs = (secp256k1_scalar * )secp256k1_scratch_alloc (& ctx -> error_callback , scratch , h_len * sizeof (secp256k1_scalar ));
257
+ gs = (secp256k1_ge * )secp256k1_scratch_alloc (& ctx -> error_callback , scratch , (g_len + h_len ) * sizeof (secp256k1_ge ));
258
+ if (ns == NULL || ls == NULL || cs == NULL || gs == NULL ) {
259
+ secp256k1_scratch_apply_checkpoint (& ctx -> error_callback , scratch , scratch_checkpoint );
260
+ return 0 ;
261
+ }
262
+ memcpy (ns , n_vec , g_len * sizeof (secp256k1_scalar ));
263
+ memcpy (ls , l_vec , h_len * sizeof (secp256k1_scalar ));
264
+ memcpy (cs , c_vec , h_len * sizeof (secp256k1_scalar ));
265
+ memcpy (gs , g_vec -> gens , (g_len + h_len ) * sizeof (secp256k1_ge ));
266
+
267
+ x_cb_data .n = ns ;
268
+ x_cb_data .g = gs ;
269
+ x_cb_data .l = ls ;
270
+ x_cb_data .G_GENS_LEN = G_GENS_LEN ;
271
+
272
+ r_cb_data .n1 = ns ;
273
+ r_cb_data .g1 = gs ;
274
+ r_cb_data .l1 = ls ;
275
+ r_cb_data .G_GENS_LEN = G_GENS_LEN ;
276
+ secp256k1_scalar_sqr (& q , & r );
277
+
278
+
279
+ secp256k1_sha256_initialize (& sha256 );
280
+ secp256k1_sha256_write (& sha256 , transcript_hash32 , 32 );
281
+ secp256k1_fe_normalize_var (& comm .x );
282
+ secp256k1_fe_normalize_var (& comm .y );
283
+ ser_commit [0 ] = 0x02 | secp256k1_fe_is_odd (& comm .y );
284
+ secp256k1_fe_get_b32 (& ser_commit [1 ], & comm .x );
285
+ secp256k1_sha256_write (& sha256 , ser_commit , 33 );
286
+ secp256k1_sha256_finalize (& sha256 , transcript_hash32 );
287
+
288
+ while (g_len > 1 || h_len > 1 ) {
289
+ size_t i , num_points ;
290
+ secp256k1_scalar q_sq , r_inv , c0_l1 , c1_l0 , x_v , c1_l1 , r_v ;
291
+ secp256k1_gej rj , xj ;
292
+ secp256k1_ge ge ;
293
+ int overflow ;
294
+ secp256k1_scalar e ;
295
+
296
+ secp256k1_scalar_inverse_var (& r_inv , & r );
297
+ secp256k1_scalar_sqr (& q_sq , & q );
298
+
299
+ /* Compute the X commitment X = WIP(r_inv*n0,n1)_q2 * g + r<n1,G> + <r_inv*x0, G1> */
300
+ secp256k1_scalar_inner_product (& c0_l1 , cs , 0 , ls , 1 , 2 , h_len /2 );
301
+ secp256k1_scalar_inner_product (& c1_l0 , cs , 1 , ls , 0 , 2 , h_len /2 );
302
+ secp256k1_weighted_scalar_inner_product (& x_v , ns , 0 , ns , 1 , 2 , g_len /2 , & q_sq );
303
+ secp256k1_scalar_mul (& x_v , & x_v , & r_inv );
304
+ secp256k1_scalar_add (& x_v , & x_v , & x_v );
305
+ secp256k1_scalar_add (& x_v , & x_v , & c0_l1 );
306
+ secp256k1_scalar_add (& x_v , & x_v , & c1_l0 );
307
+
308
+ x_cb_data .r = & r ;
309
+ x_cb_data .r_inv = & r_inv ;
310
+ x_cb_data .n_len = g_len >= 2 ? g_len : 0 ;
311
+ num_points = x_cb_data .n_len + (h_len >= 2 ? h_len : 0 );
312
+
313
+ if (!secp256k1_ecmult_multi_var (& ctx -> error_callback , scratch , & xj , & x_v , ec_mult_x_cb , (void * )& x_cb_data , num_points )) {
314
+ return 0 ;
315
+ }
316
+
317
+ secp256k1_weighted_scalar_inner_product (& r_v , ns , 1 , ns , 1 , 2 , g_len /2 , & q_sq );
318
+ secp256k1_scalar_inner_product (& c1_l1 , cs , 1 , ls , 1 , 2 , h_len /2 );
319
+ secp256k1_scalar_add (& r_v , & r_v , & c1_l1 );
320
+
321
+ r_cb_data .n_len = g_len /2 ;
322
+ num_points = r_cb_data .n_len + h_len /2 ;
323
+ if (!secp256k1_ecmult_multi_var (& ctx -> error_callback , scratch , & rj , & r_v , ec_mult_r_cb , (void * )& r_cb_data , num_points )) {
324
+ return 0 ;
325
+ }
326
+
327
+ secp256k1_ge_set_gej_var (& ge , & xj );
328
+ secp256k1_fe_normalize_var (& ge .x );
329
+ secp256k1_fe_normalize_var (& ge .y );
330
+ proof [start_idx ] = secp256k1_fe_is_odd (& ge .y ) << 1 ;
331
+ secp256k1_fe_get_b32 (& proof [start_idx + 1 ], & ge .x );
332
+ secp256k1_ge_set_gej_var (& ge , & rj );
333
+ secp256k1_fe_normalize_var (& ge .x );
334
+ secp256k1_fe_normalize_var (& ge .y );
335
+ proof [start_idx ] |= secp256k1_fe_is_odd (& ge .y );
336
+ secp256k1_fe_get_b32 (& proof [start_idx + 33 ], & ge .x );
337
+
338
+ /* x additionally covers the next 65 bytes */
339
+ secp256k1_sha256_initialize (& sha256 );
340
+ secp256k1_sha256_write (& sha256 , transcript_hash32 , 32 );
341
+ secp256k1_sha256_write (& sha256 , & proof [start_idx ], 65 );
342
+ secp256k1_sha256_finalize (& sha256 , transcript_hash32 );
343
+ secp256k1_scalar_set_b32 (& e , transcript_hash32 , & overflow ); /* Ignore overflow*/
344
+
345
+ if (g_len > 1 ) {
346
+ for (i = 0 ; i < g_len ; i = i + 2 ) {
347
+ secp256k1_scalar nl , nr ;
348
+ secp256k1_gej gl , gr ;
349
+ secp256k1_scalar_mul (& nl , & ns [i ], & r_inv );
350
+ secp256k1_scalar_mul (& nr , & ns [i + 1 ], & e );
351
+ secp256k1_scalar_add (& ns [i /2 ], & nl , & nr );
352
+
353
+ secp256k1_gej_set_ge (& gl , & gs [i ]);
354
+ secp256k1_ecmult (& gl , & gl , & r , NULL );
355
+ secp256k1_gej_set_ge (& gr , & gs [i + 1 ]);
356
+ secp256k1_ecmult (& gr , & gr , & e , NULL );
357
+ secp256k1_gej_add_var (& gl , & gl , & gr , NULL );
358
+ secp256k1_ge_set_gej_var (& gs [i /2 ], & gl );
359
+ }
360
+ }
361
+
362
+ if (h_len > 1 ) {
363
+ for (i = 0 ; i < h_len ; i = i + 2 ) {
364
+ secp256k1_scalar temp1 ;
365
+ secp256k1_gej grj ;
366
+ secp256k1_scalar_mul (& temp1 , & cs [i + 1 ], & e );
367
+ secp256k1_scalar_add (& cs [i /2 ], & cs [i ], & temp1 );
368
+
369
+ secp256k1_scalar_mul (& temp1 , & ls [i + 1 ], & e );
370
+ secp256k1_scalar_add (& ls [i /2 ], & ls [i ], & temp1 );
371
+
372
+ secp256k1_gej_set_ge (& grj , & gs [G_GENS_LEN + i + 1 ]);
373
+ secp256k1_ecmult (& grj , & grj , & e , NULL );
374
+ secp256k1_gej_add_ge_var (& grj , & grj , & gs [G_GENS_LEN + i ], NULL );
375
+ secp256k1_ge_set_gej_var (& gs [G_GENS_LEN + i /2 ], & grj );
376
+ }
377
+ }
378
+ g_len = g_len / 2 ;
379
+ h_len = h_len / 2 ;
380
+ r = q ;
381
+ q = q_sq ;
382
+ start_idx += 65 ;
383
+ }
384
+
385
+ secp256k1_scalar_get_b32 (& proof [start_idx ], & ns [0 ]);
386
+ secp256k1_scalar_get_b32 (& proof [start_idx + 32 ], & ls [0 ]);
387
+ * proof_len = start_idx + 64 ;
388
+ return 1 ;
389
+ }
143
390
#endif
0 commit comments