@@ -152,10 +152,11 @@ static void secp256k1_ecmult_gen(const secp256k1_ecmult_gen_context *ctx, secp25
152
152
#if COMB_BITS > 256
153
153
} else if (EXPECT (bit_pos >= 256 , 0 )) {
154
154
/* Some bit(s) of (mask(block) << comb_off) are outside of [0,256). This means
155
- * we are also done constructing bits, but know its top bit is zero, and no
155
+ * we are also done constructing bits, but know its top bit should be zero, and no
156
156
* flipping/negating is needed. The table lookup can also be done over a
157
157
* smaller number of entries. */
158
- VERIFY_CHECK (bits < (1U << tooth ));
158
+ /* Mask out junk in bits variable. */
159
+ bits &= ((1U << tooth ) - 1 );
159
160
VERIFY_CHECK (bits < COMB_POINTS );
160
161
for (index = 0 ; (index >> tooth ) == 0 ; ++ index ) {
161
162
secp256k1_ge_storage_cmov (& adds , & secp256k1_ecmult_gen_prec_table [block ][index ], index == bits );
@@ -164,10 +165,16 @@ static void secp256k1_ecmult_gen(const secp256k1_ecmult_gen_context *ctx, secp25
164
165
break ;
165
166
#endif
166
167
} else {
167
- /* Gather another bit. */
168
- uint32_t bit = secp256k1_scalar_get_bits (& recoded , bit_pos , 1 );
168
+ /* Gather another bit. To reduce side-channels from single-bit reads, don't
169
+ * actually fetch a single bit, but read higher bits too, which are XOR'ed
170
+ * into the upper bits of bits. On every iteration, an addition bits is
171
+ * made correct, starting at the bottom. The bits above that contain junk.
172
+ * See https://www.usenix.org/system/files/conference/usenixsecurity18/sec18-alam.pdf
173
+ */
174
+ uint32_t bitdata = secp256k1_scalar_get_bits (& recoded , bit_pos & ~0x1f , 32 ) >> (bit_pos & 0x1f );
169
175
VERIFY_CHECK (bit_pos < COMB_BITS && bit_pos < 256 );
170
- bits |= bit << tooth ;
176
+ bits &= ~(1 << tooth );
177
+ bits ^= bitdata << tooth ;
171
178
bit_pos += COMB_SPACING ;
172
179
++ tooth ;
173
180
}
0 commit comments