Skip to content

Commit 985fd63

Browse files
committedNov 5, 2014
Merge pull request #77
1d52a8b Implementations for scalar without data-dependent branches. (Pieter Wuille)
2 parents 0ce80ef + 1d52a8b commit 985fd63

10 files changed

+1194
-57
lines changed
 

‎.travis.yml

+4-2
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,10 @@ install:
66
- if [ "$FIELD" = "64bit_asm" ]; then sudo apt-get install -qq yasm; fi
77
env:
88
global:
9-
- FIELD=auto BIGNUM=auto ENDOMORPHISM=no BUILD=check
9+
- FIELD=auto BIGNUM=auto SCALAR=auto ENDOMORPHISM=no BUILD=check
1010
matrix:
11+
- SCALAR=32bit
12+
- SCALAR=64bit
1113
- FIELD=gmp
1214
- FIELD=gmp ENDOMORPHISM=yes
1315
- FIELD=64bit_asm
@@ -18,5 +20,5 @@ env:
1820
- FIELD=32bit ENDOMORPHISM=yes
1921
- BUILD=distcheck
2022
before_script: ./autogen.sh
21-
script: ./configure --enable-endomorphism=$ENDOMORPHISM --with-field=$FIELD --with-bignum=$BIGNUM && make -j2 $BUILD
23+
script: ./configure --enable-endomorphism=$ENDOMORPHISM --with-field=$FIELD --with-bignum=$BIGNUM --with-scalar=$SCALAR && make -j2 $BUILD
2224
os: linux

‎Makefile.am

+4
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,11 @@ noinst_LTLIBRARIES = $(COMMON_LIB)
1010
include_HEADERS = include/secp256k1.h
1111
noinst_HEADERS =
1212
noinst_HEADERS += src/scalar.h
13+
noinst_HEADERS += src/scalar_4x64.h
14+
noinst_HEADERS += src/scalar_8x32.h
1315
noinst_HEADERS += src/scalar_impl.h
16+
noinst_HEADERS += src/scalar_4x64_impl.h
17+
noinst_HEADERS += src/scalar_8x32_impl.h
1418
noinst_HEADERS += src/group.h
1519
noinst_HEADERS += src/group_impl.h
1620
noinst_HEADERS += src/num_gmp.h

‎configure.ac

+44
Original file line numberDiff line numberDiff line change
@@ -64,13 +64,19 @@ AC_ARG_WITH([field], [AS_HELP_STRING([--with-field=gmp|64bit|64bit_asm|32bit|aut
6464
AC_ARG_WITH([bignum], [AS_HELP_STRING([--with-bignum=gmp|auto],
6565
[Specify Bignum Implementation. Default is auto])],[req_bignum=$withval], [req_bignum=auto])
6666

67+
AC_ARG_WITH([scalar], [AS_HELP_STRING([--with-scalar=64bit|32bit|auto],
68+
[Specify scalar implementation. Default is auto])],[req_scalar=$withval], [req_scalar=auto])
69+
6770
AC_CHECK_TYPES([__int128])
6871

6972
AC_DEFUN([SECP_INT128_CHECK],[
7073
has_int128=$ac_cv_type___int128
7174
if test x"$has_int128" != x"yes" && test x"$set_field" = x"64bit"; then
7275
AC_MSG_ERROR([$set_field field support explicitly requested but is not compatible with this host])
7376
fi
77+
if test x"$has_int128" != x"yes" && test x"$set_scalar" = x"64bit"; then
78+
AC_MSG_ERROR([$set_scalar scalar support explicitly requested but is not compatible with this host])
79+
fi
7480
])
7581

7682
AC_DEFUN([SECP_64BIT_ASM_CHECK],[
@@ -194,6 +200,30 @@ else
194200
esac
195201
fi
196202

203+
if test x"$req_scalar" = x"auto"; then
204+
if test x"$set_scalar" = x; then
205+
SECP_INT128_CHECK
206+
if test x"$has_int128" = x"yes"; then
207+
set_scalar=64bit
208+
fi
209+
fi
210+
if test x"$set_scalar" = x; then
211+
set_scalar=32bit
212+
fi
213+
else
214+
set_scalar=$req_scalar
215+
case $set_scalar in
216+
64bit)
217+
SECP_INT128_CHECK
218+
;;
219+
32bit)
220+
;;
221+
*)
222+
AC_MSG_ERROR([invalid scalar implementation selected])
223+
;;
224+
esac
225+
fi
226+
197227
if test x"$req_bignum" = x"auto"; then
198228
SECP_GMP_CHECK
199229
if test x"$has_gmp" = x"yes"; then
@@ -252,6 +282,19 @@ gmp)
252282
;;
253283
esac
254284

285+
#select scalar implementation
286+
case $set_scalar in
287+
64bit)
288+
AC_DEFINE(USE_SCALAR_4X64, 1, [Define this symbol to use the 4x64 scalar implementation])
289+
;;
290+
32bit)
291+
AC_DEFINE(USE_SCALAR_8X32, 1, [Define this symbol to use the 8x32 scalar implementation])
292+
;;
293+
*)
294+
AC_MSG_ERROR([invalid scalar implementation])
295+
;;
296+
esac
297+
255298
if test x"$use_tests" = x"yes"; then
256299
SECP_OPENSSL_CHECK
257300
if test x"$has_openssl_ec" == x"yes"; then
@@ -278,6 +321,7 @@ fi
278321

279322
AC_MSG_NOTICE([Using field implementation: $set_field])
280323
AC_MSG_NOTICE([Using bignum implementation: $set_bignum])
324+
AC_MSG_NOTICE([Using scalar implementation: $set_scalar])
281325

282326
AC_CONFIG_HEADERS([src/libsecp256k1-config.h])
283327
AC_CONFIG_FILES([Makefile libsecp256k1.pc])

‎src/scalar.h

+14-4
Original file line numberDiff line numberDiff line change
@@ -7,10 +7,17 @@
77

88
#include "num.h"
99

10-
/** A scalar modulo the group order of the secp256k1 curve. */
11-
typedef struct {
12-
secp256k1_num_t n;
13-
} secp256k1_scalar_t;
10+
#if defined HAVE_CONFIG_H
11+
#include "libsecp256k1-config.h"
12+
#endif
13+
14+
#if defined(USE_SCALAR_4X64)
15+
#include "scalar_4x64.h"
16+
#elif defined(USE_SCALAR_8X32)
17+
#include "scalar_8x32.h"
18+
#else
19+
#error "Please select scalar implementation"
20+
#endif
1421

1522
/** Clear a scalar to prevent the leak of sensitive data. */
1623
void static secp256k1_scalar_clear(secp256k1_scalar_t *r);
@@ -30,6 +37,9 @@ void static secp256k1_scalar_add(secp256k1_scalar_t *r, const secp256k1_scalar_t
3037
/** Multiply two scalars (modulo the group order). */
3138
void static secp256k1_scalar_mul(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b);
3239

40+
/** Compute the square of a scalar (modulo the group order). */
41+
void static secp256k1_scalar_sqr(secp256k1_scalar_t *r, const secp256k1_scalar_t *a);
42+
3343
/** Compute the inverse of a scalar (modulo the group order). */
3444
void static secp256k1_scalar_inverse(secp256k1_scalar_t *r, const secp256k1_scalar_t *a);
3545

‎src/scalar_4x64.h

+15
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
// Copyright (c) 2014 Pieter Wuille
2+
// Distributed under the MIT software license, see the accompanying
3+
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
4+
5+
#ifndef _SECP256K1_SCALAR_REPR_
6+
#define _SECP256K1_SCALAR_REPR_
7+
8+
#include <stdint.h>
9+
10+
/** A scalar modulo the group order of the secp256k1 curve. */
11+
typedef struct {
12+
uint64_t d[4];
13+
} secp256k1_scalar_t;
14+
15+
#endif

‎src/scalar_4x64_impl.h

+357
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,357 @@
1+
// Copyright (c) 2014 Pieter Wuille
2+
// Distributed under the MIT software license, see the accompanying
3+
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
4+
5+
#ifndef _SECP256K1_SCALAR_REPR_IMPL_H_
6+
#define _SECP256K1_SCALAR_REPR_IMPL_H_
7+
8+
typedef unsigned __int128 uint128_t;
9+
10+
// Limbs of the secp256k1 order.
11+
#define SECP256K1_N_0 ((uint64_t)0xBFD25E8CD0364141ULL)
12+
#define SECP256K1_N_1 ((uint64_t)0xBAAEDCE6AF48A03BULL)
13+
#define SECP256K1_N_2 ((uint64_t)0xFFFFFFFFFFFFFFFEULL)
14+
#define SECP256K1_N_3 ((uint64_t)0xFFFFFFFFFFFFFFFFULL)
15+
16+
// Limbs of 2^256 minus the secp256k1 order.
17+
#define SECP256K1_N_C_0 (~SECP256K1_N_0 + 1)
18+
#define SECP256K1_N_C_1 (~SECP256K1_N_1)
19+
#define SECP256K1_N_C_2 (1)
20+
21+
// Limbs of half the secp256k1 order.
22+
#define SECP256K1_N_H_0 ((uint64_t)0xDFE92F46681B20A0ULL)
23+
#define SECP256K1_N_H_1 ((uint64_t)0x5D576E7357A4501DULL)
24+
#define SECP256K1_N_H_2 ((uint64_t)0xFFFFFFFFFFFFFFFFULL)
25+
#define SECP256K1_N_H_3 ((uint64_t)0x7FFFFFFFFFFFFFFFULL)
26+
27+
void static inline secp256k1_scalar_clear(secp256k1_scalar_t *r) {
28+
r->d[0] = 0;
29+
r->d[1] = 0;
30+
r->d[2] = 0;
31+
r->d[3] = 0;
32+
}
33+
34+
int static inline secp256k1_scalar_get_bits(const secp256k1_scalar_t *a, int offset, int count) {
35+
VERIFY_CHECK((offset + count - 1) / 64 == offset / 64);
36+
return (a->d[offset / 64] >> (offset % 64)) & ((((uint64_t)1) << count) - 1);
37+
}
38+
39+
int static inline secp256k1_scalar_check_overflow(const secp256k1_scalar_t *a) {
40+
int yes = 0;
41+
int no = 0;
42+
no |= (a->d[3] < SECP256K1_N_3); // No need for a > check.
43+
no |= (a->d[2] < SECP256K1_N_2);
44+
yes |= (a->d[2] > SECP256K1_N_2) & ~no;
45+
no |= (a->d[1] < SECP256K1_N_1);
46+
yes |= (a->d[1] > SECP256K1_N_1) & ~no;
47+
yes |= (a->d[0] >= SECP256K1_N_0) & ~no;
48+
return yes;
49+
}
50+
51+
int static inline secp256k1_scalar_reduce(secp256k1_scalar_t *r, unsigned int overflow) {
52+
VERIFY_CHECK(overflow <= 1);
53+
uint128_t t = (uint128_t)r->d[0] + overflow * SECP256K1_N_C_0;
54+
r->d[0] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
55+
t += (uint128_t)r->d[1] + overflow * SECP256K1_N_C_1;
56+
r->d[1] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
57+
t += (uint128_t)r->d[2] + overflow * SECP256K1_N_C_2;
58+
r->d[2] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
59+
t += (uint64_t)r->d[3];
60+
r->d[3] = t & 0xFFFFFFFFFFFFFFFFULL;
61+
return overflow;
62+
}
63+
64+
void static secp256k1_scalar_add(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) {
65+
uint128_t t = (uint128_t)a->d[0] + b->d[0];
66+
r->d[0] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
67+
t += (uint128_t)a->d[1] + b->d[1];
68+
r->d[1] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
69+
t += (uint128_t)a->d[2] + b->d[2];
70+
r->d[2] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
71+
t += (uint128_t)a->d[3] + b->d[3];
72+
r->d[3] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
73+
secp256k1_scalar_reduce(r, t + secp256k1_scalar_check_overflow(r));
74+
}
75+
76+
void static secp256k1_scalar_set_b32(secp256k1_scalar_t *r, const unsigned char *b32, int *overflow) {
77+
r->d[0] = (uint64_t)b32[31] | (uint64_t)b32[30] << 8 | (uint64_t)b32[29] << 16 | (uint64_t)b32[28] << 24 | (uint64_t)b32[27] << 32 | (uint64_t)b32[26] << 40 | (uint64_t)b32[25] << 48 | (uint64_t)b32[24] << 56;
78+
r->d[1] = (uint64_t)b32[23] | (uint64_t)b32[22] << 8 | (uint64_t)b32[21] << 16 | (uint64_t)b32[20] << 24 | (uint64_t)b32[19] << 32 | (uint64_t)b32[18] << 40 | (uint64_t)b32[17] << 48 | (uint64_t)b32[16] << 56;
79+
r->d[2] = (uint64_t)b32[15] | (uint64_t)b32[14] << 8 | (uint64_t)b32[13] << 16 | (uint64_t)b32[12] << 24 | (uint64_t)b32[11] << 32 | (uint64_t)b32[10] << 40 | (uint64_t)b32[9] << 48 | (uint64_t)b32[8] << 56;
80+
r->d[3] = (uint64_t)b32[7] | (uint64_t)b32[6] << 8 | (uint64_t)b32[5] << 16 | (uint64_t)b32[4] << 24 | (uint64_t)b32[3] << 32 | (uint64_t)b32[2] << 40 | (uint64_t)b32[1] << 48 | (uint64_t)b32[0] << 56;
81+
int over = secp256k1_scalar_reduce(r, secp256k1_scalar_check_overflow(r));
82+
if (overflow) {
83+
*overflow = over;
84+
}
85+
}
86+
87+
void static secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar_t* a) {
88+
bin[0] = a->d[3] >> 56; bin[1] = a->d[3] >> 48; bin[2] = a->d[3] >> 40; bin[3] = a->d[3] >> 32; bin[4] = a->d[3] >> 24; bin[5] = a->d[3] >> 16; bin[6] = a->d[3] >> 8; bin[7] = a->d[3];
89+
bin[8] = a->d[2] >> 56; bin[9] = a->d[2] >> 48; bin[10] = a->d[2] >> 40; bin[11] = a->d[2] >> 32; bin[12] = a->d[2] >> 24; bin[13] = a->d[2] >> 16; bin[14] = a->d[2] >> 8; bin[15] = a->d[2];
90+
bin[16] = a->d[1] >> 56; bin[17] = a->d[1] >> 48; bin[18] = a->d[1] >> 40; bin[19] = a->d[1] >> 32; bin[20] = a->d[1] >> 24; bin[21] = a->d[1] >> 16; bin[22] = a->d[1] >> 8; bin[23] = a->d[1];
91+
bin[24] = a->d[0] >> 56; bin[25] = a->d[0] >> 48; bin[26] = a->d[0] >> 40; bin[27] = a->d[0] >> 32; bin[28] = a->d[0] >> 24; bin[29] = a->d[0] >> 16; bin[30] = a->d[0] >> 8; bin[31] = a->d[0];
92+
}
93+
94+
int static inline secp256k1_scalar_is_zero(const secp256k1_scalar_t *a) {
95+
return (a->d[0] | a->d[1] | a->d[2] | a->d[3]) == 0;
96+
}
97+
98+
void static secp256k1_scalar_negate(secp256k1_scalar_t *r, const secp256k1_scalar_t *a) {
99+
uint64_t nonzero = 0xFFFFFFFFFFFFFFFFULL * (secp256k1_scalar_is_zero(a) == 0);
100+
uint128_t t = (uint128_t)(~a->d[0]) + SECP256K1_N_0 + 1;
101+
r->d[0] = t & nonzero; t >>= 64;
102+
t += (uint128_t)(~a->d[1]) + SECP256K1_N_1;
103+
r->d[1] = t & nonzero; t >>= 64;
104+
t += (uint128_t)(~a->d[2]) + SECP256K1_N_2;
105+
r->d[2] = t & nonzero; t >>= 64;
106+
t += (uint128_t)(~a->d[3]) + SECP256K1_N_3;
107+
r->d[3] = t & nonzero;
108+
}
109+
110+
int static inline secp256k1_scalar_is_one(const secp256k1_scalar_t *a) {
111+
return ((a->d[0] ^ 1) | a->d[1] | a->d[2] | a->d[3]) == 0;
112+
}
113+
114+
int static secp256k1_scalar_is_high(const secp256k1_scalar_t *a) {
115+
int yes = 0;
116+
int no = 0;
117+
no |= (a->d[3] < SECP256K1_N_H_3);
118+
yes |= (a->d[3] > SECP256K1_N_H_3) & ~no;
119+
no |= (a->d[2] < SECP256K1_N_H_2) & ~yes; // No need for a > check.
120+
no |= (a->d[1] < SECP256K1_N_H_1) & ~yes;
121+
yes |= (a->d[1] > SECP256K1_N_H_1) & ~no;
122+
yes |= (a->d[0] > SECP256K1_N_H_0) & ~no;
123+
return yes;
124+
}
125+
126+
// Inspired by the macros in OpenSSL's crypto/bn/asm/x86_64-gcc.c.
127+
128+
/** Add a*b to the number defined by (c0,c1,c2). c2 must never overflow. */
129+
#define muladd(a,b) { \
130+
uint64_t tl, th; \
131+
{ \
132+
uint128_t t = (uint128_t)a * b; \
133+
th = t >> 64; /* at most 0xFFFFFFFFFFFFFFFE */ \
134+
tl = t; \
135+
} \
136+
c0 += tl; /* overflow is handled on the next line */ \
137+
th += (c0 < tl) ? 1 : 0; /* at most 0xFFFFFFFFFFFFFFFF */ \
138+
c1 += th; /* overflow is handled on the next line */ \
139+
c2 += (c1 < th) ? 1 : 0; /* never overflows by contract (verified in the next line) */ \
140+
VERIFY_CHECK((c1 >= th) || (c2 != 0)); \
141+
}
142+
143+
/** Add a*b to the number defined by (c0,c1). c1 must never overflow. */
144+
#define muladd_fast(a,b) { \
145+
uint64_t tl, th; \
146+
{ \
147+
uint128_t t = (uint128_t)a * b; \
148+
th = t >> 64; /* at most 0xFFFFFFFFFFFFFFFE */ \
149+
tl = t; \
150+
} \
151+
c0 += tl; /* overflow is handled on the next line */ \
152+
th += (c0 < tl) ? 1 : 0; /* at most 0xFFFFFFFFFFFFFFFF */ \
153+
c1 += th; /* never overflows by contract (verified in the next line) */ \
154+
VERIFY_CHECK(c1 >= th); \
155+
}
156+
157+
/** Add 2*a*b to the number defined by (c0,c1,c2). c2 must never overflow. */
158+
#define muladd2(a,b) { \
159+
uint64_t tl, th; \
160+
{ \
161+
uint128_t t = (uint128_t)a * b; \
162+
th = t >> 64; /* at most 0xFFFFFFFFFFFFFFFE */ \
163+
tl = t; \
164+
} \
165+
uint64_t th2 = th + th; /* at most 0xFFFFFFFFFFFFFFFE (in case th was 0x7FFFFFFFFFFFFFFF) */ \
166+
c2 += (th2 < th) ? 1 : 0; /* never overflows by contract (verified the next line) */ \
167+
VERIFY_CHECK((th2 >= th) || (c2 != 0)); \
168+
uint64_t tl2 = tl + tl; /* at most 0xFFFFFFFFFFFFFFFE (in case the lowest 63 bits of tl were 0x7FFFFFFFFFFFFFFF) */ \
169+
th2 += (tl2 < tl) ? 1 : 0; /* at most 0xFFFFFFFFFFFFFFFF */ \
170+
c0 += tl2; /* overflow is handled on the next line */ \
171+
th2 += (c0 < tl2) ? 1 : 0; /* second overflow is handled on the next line */ \
172+
c2 += (c0 < tl2) & (th2 == 0); /* never overflows by contract (verified the next line) */ \
173+
VERIFY_CHECK((c0 >= tl2) || (th2 != 0) || (c2 != 0)); \
174+
c1 += th2; /* overflow is handled on the next line */ \
175+
c2 += (c1 < th2) ? 1 : 0; /* never overflows by contract (verified the next line) */ \
176+
VERIFY_CHECK((c1 >= th2) || (c2 != 0)); \
177+
}
178+
179+
/** Add a to the number defined by (c0,c1,c2). c2 must never overflow. */
180+
#define sumadd(a) { \
181+
c0 += (a); /* overflow is handled on the next line */ \
182+
int over = (c0 < (a)) ? 1 : 0; \
183+
c1 += over; /* overflow is handled on the next line */ \
184+
c2 += (c1 < over) ? 1 : 0; /* never overflows by contract */ \
185+
}
186+
187+
/** Add a to the number defined by (c0,c1). c1 must never overflow, c2 must be zero. */
188+
#define sumadd_fast(a) { \
189+
c0 += (a); /* overflow is handled on the next line */ \
190+
c1 += (c0 < (a)) ? 1 : 0; /* never overflows by contract (verified the next line) */ \
191+
VERIFY_CHECK((c1 != 0) | (c0 >= (a))); \
192+
VERIFY_CHECK(c2 == 0); \
193+
}
194+
195+
/** Extract the lowest 64 bits of (c0,c1,c2) into n, and left shift the number 64 bits. */
196+
#define extract(n) { \
197+
(n) = c0; \
198+
c0 = c1; \
199+
c1 = c2; \
200+
c2 = 0; \
201+
}
202+
203+
/** Extract the lowest 64 bits of (c0,c1,c2) into n, and left shift the number 64 bits. c2 is required to be zero. */
204+
#define extract_fast(n) { \
205+
(n) = c0; \
206+
c0 = c1; \
207+
c1 = 0; \
208+
VERIFY_CHECK(c2 == 0); \
209+
}
210+
211+
void static secp256k1_scalar_reduce_512(secp256k1_scalar_t *r, const uint64_t *l) {
212+
uint64_t n0 = l[4], n1 = l[5], n2 = l[6], n3 = l[7];
213+
214+
// 160 bit accumulator.
215+
uint64_t c0, c1;
216+
uint32_t c2;
217+
218+
// Reduce 512 bits into 385.
219+
// m[0..6] = l[0..3] + n[0..3] * SECP256K1_N_C.
220+
c0 = l[0]; c1 = 0; c2 = 0;
221+
muladd_fast(n0, SECP256K1_N_C_0);
222+
uint64_t m0; extract_fast(m0);
223+
sumadd_fast(l[1]);
224+
muladd(n1, SECP256K1_N_C_0);
225+
muladd(n0, SECP256K1_N_C_1);
226+
uint64_t m1; extract(m1);
227+
sumadd(l[2]);
228+
muladd(n2, SECP256K1_N_C_0);
229+
muladd(n1, SECP256K1_N_C_1);
230+
sumadd(n0);
231+
uint64_t m2; extract(m2);
232+
sumadd(l[3]);
233+
muladd(n3, SECP256K1_N_C_0);
234+
muladd(n2, SECP256K1_N_C_1);
235+
sumadd(n1);
236+
uint64_t m3; extract(m3);
237+
muladd(n3, SECP256K1_N_C_1);
238+
sumadd(n2);
239+
uint64_t m4; extract(m4);
240+
sumadd_fast(n3);
241+
uint64_t m5; extract_fast(m5);
242+
VERIFY_CHECK(c0 <= 1);
243+
uint32_t m6 = c0;
244+
245+
// Reduce 385 bits into 258.
246+
// p[0..4] = m[0..3] + m[4..6] * SECP256K1_N_C.
247+
c0 = m0; c1 = 0; c2 = 0;
248+
muladd_fast(m4, SECP256K1_N_C_0);
249+
uint64_t p0; extract_fast(p0);
250+
sumadd_fast(m1);
251+
muladd(m5, SECP256K1_N_C_0);
252+
muladd(m4, SECP256K1_N_C_1);
253+
uint64_t p1; extract(p1);
254+
sumadd(m2);
255+
muladd(m6, SECP256K1_N_C_0);
256+
muladd(m5, SECP256K1_N_C_1);
257+
sumadd(m4);
258+
uint64_t p2; extract(p2);
259+
sumadd_fast(m3);
260+
muladd_fast(m6, SECP256K1_N_C_1);
261+
sumadd_fast(m5);
262+
uint64_t p3; extract_fast(p3);
263+
uint32_t p4 = c0 + m6;
264+
VERIFY_CHECK(p4 <= 2);
265+
266+
// Reduce 258 bits into 256.
267+
// r[0..3] = p[0..3] + p[4] * SECP256K1_N_C.
268+
uint128_t c = p0 + (uint128_t)SECP256K1_N_C_0 * p4;
269+
r->d[0] = c & 0xFFFFFFFFFFFFFFFFULL; c >>= 64;
270+
c += p1 + (uint128_t)SECP256K1_N_C_1 * p4;
271+
r->d[1] = c & 0xFFFFFFFFFFFFFFFFULL; c >>= 64;
272+
c += p2 + (uint128_t)p4;
273+
r->d[2] = c & 0xFFFFFFFFFFFFFFFFULL; c >>= 64;
274+
c += p3;
275+
r->d[3] = c & 0xFFFFFFFFFFFFFFFFULL; c >>= 64;
276+
277+
// Final reduction of r.
278+
secp256k1_scalar_reduce(r, c + secp256k1_scalar_check_overflow(r));
279+
}
280+
281+
void static secp256k1_scalar_mul(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) {
282+
// 160 bit accumulator.
283+
uint64_t c0 = 0, c1 = 0;
284+
uint32_t c2 = 0;
285+
286+
uint64_t l[8];
287+
288+
// l[0..7] = a[0..3] * b[0..3].
289+
muladd_fast(a->d[0], b->d[0]);
290+
extract_fast(l[0]);
291+
muladd(a->d[0], b->d[1]);
292+
muladd(a->d[1], b->d[0]);
293+
extract(l[1]);
294+
muladd(a->d[0], b->d[2]);
295+
muladd(a->d[1], b->d[1]);
296+
muladd(a->d[2], b->d[0]);
297+
extract(l[2]);
298+
muladd(a->d[0], b->d[3]);
299+
muladd(a->d[1], b->d[2]);
300+
muladd(a->d[2], b->d[1]);
301+
muladd(a->d[3], b->d[0]);
302+
extract(l[3]);
303+
muladd(a->d[1], b->d[3]);
304+
muladd(a->d[2], b->d[2]);
305+
muladd(a->d[3], b->d[1]);
306+
extract(l[4]);
307+
muladd(a->d[2], b->d[3]);
308+
muladd(a->d[3], b->d[2]);
309+
extract(l[5]);
310+
muladd_fast(a->d[3], b->d[3]);
311+
extract_fast(l[6]);
312+
VERIFY_CHECK(c1 <= 0);
313+
l[7] = c0;
314+
315+
secp256k1_scalar_reduce_512(r, l);
316+
}
317+
318+
void static secp256k1_scalar_sqr(secp256k1_scalar_t *r, const secp256k1_scalar_t *a) {
319+
// 160 bit accumulator.
320+
uint64_t c0 = 0, c1 = 0;
321+
uint32_t c2 = 0;
322+
323+
uint64_t l[8];
324+
325+
// l[0..7] = a[0..3] * b[0..3].
326+
muladd_fast(a->d[0], a->d[0]);
327+
extract_fast(l[0]);
328+
muladd2(a->d[0], a->d[1]);
329+
extract(l[1]);
330+
muladd2(a->d[0], a->d[2]);
331+
muladd(a->d[1], a->d[1]);
332+
extract(l[2]);
333+
muladd2(a->d[0], a->d[3]);
334+
muladd2(a->d[1], a->d[2]);
335+
extract(l[3]);
336+
muladd2(a->d[1], a->d[3]);
337+
muladd(a->d[2], a->d[2]);
338+
extract(l[4]);
339+
muladd2(a->d[2], a->d[3]);
340+
extract(l[5]);
341+
muladd_fast(a->d[3], a->d[3]);
342+
extract_fast(l[6]);
343+
VERIFY_CHECK(c1 == 0);
344+
l[7] = c0;
345+
346+
secp256k1_scalar_reduce_512(r, l);
347+
}
348+
349+
#undef sumadd
350+
#undef sumadd_fast
351+
#undef muladd
352+
#undef muladd_fast
353+
#undef muladd2
354+
#undef extract
355+
#undef extract_fast
356+
357+
#endif

‎src/scalar_8x32.h

+15
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
// Copyright (c) 2014 Pieter Wuille
2+
// Distributed under the MIT software license, see the accompanying
3+
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
4+
5+
#ifndef _SECP256K1_SCALAR_REPR_
6+
#define _SECP256K1_SCALAR_REPR_
7+
8+
#include <stdint.h>
9+
10+
/** A scalar modulo the group order of the secp256k1 curve. */
11+
typedef struct {
12+
uint32_t d[8];
13+
} secp256k1_scalar_t;
14+
15+
#endif

‎src/scalar_8x32_impl.h

+570
Large diffs are not rendered by default.

‎src/scalar_impl.h

+163-51
Original file line numberDiff line numberDiff line change
@@ -9,62 +9,174 @@
99

1010
#include "scalar.h"
1111

12-
#include "group.h"
13-
14-
void static secp256k1_scalar_clear(secp256k1_scalar_t *r) {
15-
secp256k1_num_clear(&r->n);
16-
}
17-
18-
int static secp256k1_scalar_get_bits(const secp256k1_scalar_t *a, int offset, int count) {
19-
return secp256k1_num_get_bits(&a->n, offset, count);
20-
}
21-
22-
void static secp256k1_scalar_set_b32(secp256k1_scalar_t *r, const unsigned char *bin, int *overflow) {
23-
secp256k1_num_set_bin(&r->n, bin, 32);
24-
if (overflow) {
25-
*overflow = secp256k1_num_cmp(&r->n, &secp256k1_ge_consts->order) >= 0;
26-
}
27-
secp256k1_num_mod(&r->n, &secp256k1_ge_consts->order);
28-
}
29-
30-
void static secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar_t* a) {
31-
secp256k1_num_get_bin(bin, 32, &a->n);
32-
}
33-
34-
void static secp256k1_scalar_add(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) {
35-
secp256k1_num_add(&r->n, &a->n, &b->n);
36-
secp256k1_num_mod(&r->n, &secp256k1_ge_consts->order);
37-
}
38-
39-
void static secp256k1_scalar_mul(secp256k1_scalar_t *r, const secp256k1_scalar_t *a, const secp256k1_scalar_t *b) {
40-
secp256k1_num_mod_mul(&r->n, &a->n, &b->n, &secp256k1_ge_consts->order);
41-
}
42-
43-
void static secp256k1_scalar_inverse(secp256k1_scalar_t *r, const secp256k1_scalar_t *a) {
44-
secp256k1_num_mod_inverse(&r->n, &a->n, &secp256k1_ge_consts->order);
45-
}
46-
47-
void static secp256k1_scalar_negate(secp256k1_scalar_t *r, const secp256k1_scalar_t *a) {
48-
secp256k1_num_sub(&r->n, &secp256k1_ge_consts->order, &a->n);
49-
secp256k1_num_mod(&r->n, &secp256k1_ge_consts->order);
50-
}
51-
52-
int static secp256k1_scalar_is_zero(const secp256k1_scalar_t *a) {
53-
return secp256k1_num_is_zero(&a->n);
54-
}
55-
56-
int static secp256k1_scalar_is_one(const secp256k1_scalar_t *a) {
57-
return secp256k1_num_bits(&a->n) == 1;
58-
}
12+
#if defined HAVE_CONFIG_H
13+
#include "libsecp256k1-config.h"
14+
#endif
5915

60-
int static secp256k1_scalar_is_high(const secp256k1_scalar_t *a) {
61-
return secp256k1_num_cmp(&a->n, &secp256k1_ge_consts->half_order) > 0;
62-
}
16+
#if defined(USE_SCALAR_4X64)
17+
#include "scalar_4x64_impl.h"
18+
#elif defined(USE_SCALAR_8X32)
19+
#include "scalar_8x32_impl.h"
20+
#else
21+
#error "Please select scalar implementation"
22+
#endif
6323

6424
void static secp256k1_scalar_get_num(secp256k1_num_t *r, const secp256k1_scalar_t *a) {
6525
unsigned char c[32];
66-
secp256k1_num_get_bin(c, 32, &a->n);
26+
secp256k1_scalar_get_b32(c, a);
6727
secp256k1_num_set_bin(r, c, 32);
6828
}
6929

30+
31+
void static secp256k1_scalar_inverse(secp256k1_scalar_t *r, const secp256k1_scalar_t *x) {
32+
// First compute x ^ (2^N - 1) for some values of N.
33+
secp256k1_scalar_t x2, x3, x4, x6, x7, x8, x15, x30, x60, x120, x127;
34+
35+
secp256k1_scalar_sqr(&x2, x);
36+
secp256k1_scalar_mul(&x2, &x2, x);
37+
38+
secp256k1_scalar_sqr(&x3, &x2);
39+
secp256k1_scalar_mul(&x3, &x3, x);
40+
41+
secp256k1_scalar_sqr(&x4, &x3);
42+
secp256k1_scalar_mul(&x4, &x4, x);
43+
44+
secp256k1_scalar_sqr(&x6, &x4);
45+
secp256k1_scalar_sqr(&x6, &x6);
46+
secp256k1_scalar_mul(&x6, &x6, &x2);
47+
48+
secp256k1_scalar_sqr(&x7, &x6);
49+
secp256k1_scalar_mul(&x7, &x7, x);
50+
51+
secp256k1_scalar_sqr(&x8, &x7);
52+
secp256k1_scalar_mul(&x8, &x8, x);
53+
54+
secp256k1_scalar_sqr(&x15, &x8);
55+
for (int i=0; i<6; i++)
56+
secp256k1_scalar_sqr(&x15, &x15);
57+
secp256k1_scalar_mul(&x15, &x15, &x7);
58+
59+
secp256k1_scalar_sqr(&x30, &x15);
60+
for (int i=0; i<14; i++)
61+
secp256k1_scalar_sqr(&x30, &x30);
62+
secp256k1_scalar_mul(&x30, &x30, &x15);
63+
64+
secp256k1_scalar_sqr(&x60, &x30);
65+
for (int i=0; i<29; i++)
66+
secp256k1_scalar_sqr(&x60, &x60);
67+
secp256k1_scalar_mul(&x60, &x60, &x30);
68+
69+
secp256k1_scalar_sqr(&x120, &x60);
70+
for (int i=0; i<59; i++)
71+
secp256k1_scalar_sqr(&x120, &x120);
72+
secp256k1_scalar_mul(&x120, &x120, &x60);
73+
74+
secp256k1_scalar_sqr(&x127, &x120);
75+
for (int i=0; i<6; i++)
76+
secp256k1_scalar_sqr(&x127, &x127);
77+
secp256k1_scalar_mul(&x127, &x127, &x7);
78+
79+
// Then accumulate the final result (t starts at x127).
80+
secp256k1_scalar_t *t = &x127;
81+
for (int i=0; i<2; i++) // 0
82+
secp256k1_scalar_sqr(t, t);
83+
secp256k1_scalar_mul(t, t, x); // 1
84+
for (int i=0; i<4; i++) // 0
85+
secp256k1_scalar_sqr(t, t);
86+
secp256k1_scalar_mul(t, t, &x3); // 111
87+
for (int i=0; i<2; i++) // 0
88+
secp256k1_scalar_sqr(t, t);
89+
secp256k1_scalar_mul(t, t, x); // 1
90+
for (int i=0; i<2; i++) // 0
91+
secp256k1_scalar_sqr(t, t);
92+
secp256k1_scalar_mul(t, t, x); // 1
93+
for (int i=0; i<2; i++) // 0
94+
secp256k1_scalar_sqr(t, t);
95+
secp256k1_scalar_mul(t, t, x); // 1
96+
for (int i=0; i<4; i++) // 0
97+
secp256k1_scalar_sqr(t, t);
98+
secp256k1_scalar_mul(t, t, &x3); // 111
99+
for (int i=0; i<3; i++) // 0
100+
secp256k1_scalar_sqr(t, t);
101+
secp256k1_scalar_mul(t, t, &x2); // 11
102+
for (int i=0; i<4; i++) // 0
103+
secp256k1_scalar_sqr(t, t);
104+
secp256k1_scalar_mul(t, t, &x3); // 111
105+
for (int i=0; i<5; i++) // 00
106+
secp256k1_scalar_sqr(t, t);
107+
secp256k1_scalar_mul(t, t, &x3); // 111
108+
for (int i=0; i<4; i++) // 00
109+
secp256k1_scalar_sqr(t, t);
110+
secp256k1_scalar_mul(t, t, &x2); // 11
111+
for (int i=0; i<2; i++) // 0
112+
secp256k1_scalar_sqr(t, t);
113+
secp256k1_scalar_mul(t, t, x); // 1
114+
for (int i=0; i<2; i++) // 0
115+
secp256k1_scalar_sqr(t, t);
116+
secp256k1_scalar_mul(t, t, x); // 1
117+
for (int i=0; i<5; i++) // 0
118+
secp256k1_scalar_sqr(t, t);
119+
secp256k1_scalar_mul(t, t, &x4); // 1111
120+
for (int i=0; i<2; i++) // 0
121+
secp256k1_scalar_sqr(t, t);
122+
secp256k1_scalar_mul(t, t, x); // 1
123+
for (int i=0; i<3; i++) // 00
124+
secp256k1_scalar_sqr(t, t);
125+
secp256k1_scalar_mul(t, t, x); // 1
126+
for (int i=0; i<4; i++) // 000
127+
secp256k1_scalar_sqr(t, t);
128+
secp256k1_scalar_mul(t, t, x); // 1
129+
for (int i=0; i<2; i++) // 0
130+
secp256k1_scalar_sqr(t, t);
131+
secp256k1_scalar_mul(t, t, x); // 1
132+
for (int i=0; i<10; i++) // 0000000
133+
secp256k1_scalar_sqr(t, t);
134+
secp256k1_scalar_mul(t, t, &x3); // 111
135+
for (int i=0; i<4; i++) // 0
136+
secp256k1_scalar_sqr(t, t);
137+
secp256k1_scalar_mul(t, t, &x3); // 111
138+
for (int i=0; i<9; i++) // 0
139+
secp256k1_scalar_sqr(t, t);
140+
secp256k1_scalar_mul(t, t, &x8); // 11111111
141+
for (int i=0; i<2; i++) // 0
142+
secp256k1_scalar_sqr(t, t);
143+
secp256k1_scalar_mul(t, t, x); // 1
144+
for (int i=0; i<3; i++) // 00
145+
secp256k1_scalar_sqr(t, t);
146+
secp256k1_scalar_mul(t, t, x); // 1
147+
for (int i=0; i<3; i++) // 00
148+
secp256k1_scalar_sqr(t, t);
149+
secp256k1_scalar_mul(t, t, x); // 1
150+
for (int i=0; i<5; i++) // 0
151+
secp256k1_scalar_sqr(t, t);
152+
secp256k1_scalar_mul(t, t, &x4); // 1111
153+
for (int i=0; i<2; i++) // 0
154+
secp256k1_scalar_sqr(t, t);
155+
secp256k1_scalar_mul(t, t, x); // 1
156+
for (int i=0; i<5; i++) // 000
157+
secp256k1_scalar_sqr(t, t);
158+
secp256k1_scalar_mul(t, t, &x2); // 11
159+
for (int i=0; i<4; i++) // 00
160+
secp256k1_scalar_sqr(t, t);
161+
secp256k1_scalar_mul(t, t, &x2); // 11
162+
for (int i=0; i<2; i++) // 0
163+
secp256k1_scalar_sqr(t, t);
164+
secp256k1_scalar_mul(t, t, x); // 1
165+
for (int i=0; i<8; i++) // 000000
166+
secp256k1_scalar_sqr(t, t);
167+
secp256k1_scalar_mul(t, t, &x2); // 11
168+
for (int i=0; i<3; i++) // 0
169+
secp256k1_scalar_sqr(t, t);
170+
secp256k1_scalar_mul(t, t, &x2); // 11
171+
for (int i=0; i<3; i++) // 00
172+
secp256k1_scalar_sqr(t, t);
173+
secp256k1_scalar_mul(t, t, x); // 1
174+
for (int i=0; i<6; i++) // 00000
175+
secp256k1_scalar_sqr(t, t);
176+
secp256k1_scalar_mul(t, t, x); // 1
177+
for (int i=0; i<8; i++) // 00
178+
secp256k1_scalar_sqr(t, t);
179+
secp256k1_scalar_mul(r, t, &x6); // 111111
180+
}
181+
70182
#endif

‎src/tests.c

+8
Original file line numberDiff line numberDiff line change
@@ -373,6 +373,14 @@ void scalar_test(void) {
373373
secp256k1_scalar_add(&r2, &r2, &t);
374374
CHECK(secp256k1_scalar_eq(&r1, &r2));
375375
}
376+
377+
{
378+
// Test square.
379+
secp256k1_scalar_t r1, r2;
380+
secp256k1_scalar_sqr(&r1, &s1);
381+
secp256k1_scalar_mul(&r2, &s1, &s1);
382+
CHECK(secp256k1_scalar_eq(&r1, &r2));
383+
}
376384
}
377385

378386
void run_scalar_tests(void) {

0 commit comments

Comments
 (0)
Please sign in to comment.