7#ifndef SECP256K1_SCALAR_REPR_IMPL_H
8#define SECP256K1_SCALAR_REPR_IMPL_H
13#define SECP256K1_N_0 ((uint32_t)0xD0364141UL)
14#define SECP256K1_N_1 ((uint32_t)0xBFD25E8CUL)
15#define SECP256K1_N_2 ((uint32_t)0xAF48A03BUL)
16#define SECP256K1_N_3 ((uint32_t)0xBAAEDCE6UL)
17#define SECP256K1_N_4 ((uint32_t)0xFFFFFFFEUL)
18#define SECP256K1_N_5 ((uint32_t)0xFFFFFFFFUL)
19#define SECP256K1_N_6 ((uint32_t)0xFFFFFFFFUL)
20#define SECP256K1_N_7 ((uint32_t)0xFFFFFFFFUL)
23#define SECP256K1_N_C_0 (~SECP256K1_N_0 + 1)
24#define SECP256K1_N_C_1 (~SECP256K1_N_1)
25#define SECP256K1_N_C_2 (~SECP256K1_N_2)
26#define SECP256K1_N_C_3 (~SECP256K1_N_3)
27#define SECP256K1_N_C_4 (1)
30#define SECP256K1_N_H_0 ((uint32_t)0x681B20A0UL)
31#define SECP256K1_N_H_1 ((uint32_t)0xDFE92F46UL)
32#define SECP256K1_N_H_2 ((uint32_t)0x57A4501DUL)
33#define SECP256K1_N_H_3 ((uint32_t)0x5D576E73UL)
34#define SECP256K1_N_H_4 ((uint32_t)0xFFFFFFFFUL)
35#define SECP256K1_N_H_5 ((uint32_t)0xFFFFFFFFUL)
36#define SECP256K1_N_H_6 ((uint32_t)0xFFFFFFFFUL)
37#define SECP256K1_N_H_7 ((uint32_t)0x7FFFFFFFUL)
99 r->
d[0] =
t & 0xFFFFFFFFUL;
t >>= 32;
101 r->
d[1] =
t & 0xFFFFFFFFUL;
t >>= 32;
103 r->
d[2] =
t & 0xFFFFFFFFUL;
t >>= 32;
105 r->
d[3] =
t & 0xFFFFFFFFUL;
t >>= 32;
107 r->
d[4] =
t & 0xFFFFFFFFUL;
t >>= 32;
109 r->
d[5] =
t & 0xFFFFFFFFUL;
t >>= 32;
111 r->
d[6] =
t & 0xFFFFFFFFUL;
t >>= 32;
113 r->
d[7] =
t & 0xFFFFFFFFUL;
120 r->
d[0] =
t & 0xFFFFFFFFULL;
t >>= 32;
122 r->
d[1] =
t & 0xFFFFFFFFULL;
t >>= 32;
124 r->
d[2] =
t & 0xFFFFFFFFULL;
t >>= 32;
126 r->
d[3] =
t & 0xFFFFFFFFULL;
t >>= 32;
128 r->
d[4] =
t & 0xFFFFFFFFULL;
t >>= 32;
130 r->
d[5] =
t & 0xFFFFFFFFULL;
t >>= 32;
132 r->
d[6] =
t & 0xFFFFFFFFULL;
t >>= 32;
134 r->
d[7] =
t & 0xFFFFFFFFULL;
t >>= 32;
146 r->
d[0] =
t & 0xFFFFFFFFULL;
t >>= 32;
148 r->
d[1] =
t & 0xFFFFFFFFULL;
t >>= 32;
150 r->
d[2] =
t & 0xFFFFFFFFULL;
t >>= 32;
152 r->
d[3] =
t & 0xFFFFFFFFULL;
t >>= 32;
154 r->
d[4] =
t & 0xFFFFFFFFULL;
t >>= 32;
156 r->
d[5] =
t & 0xFFFFFFFFULL;
t >>= 32;
158 r->
d[6] =
t & 0xFFFFFFFFULL;
t >>= 32;
160 r->
d[7] =
t & 0xFFFFFFFFULL;
184 bin[0] =
a->d[7] >> 24;
bin[1] =
a->d[7] >> 16;
bin[2] =
a->d[7] >> 8;
bin[3] =
a->d[7];
185 bin[4] =
a->d[6] >> 24;
bin[5] =
a->d[6] >> 16;
bin[6] =
a->d[6] >> 8;
bin[7] =
a->d[6];
186 bin[8] =
a->d[5] >> 24;
bin[9] =
a->d[5] >> 16;
bin[10] =
a->d[5] >> 8;
bin[11] =
a->d[5];
187 bin[12] =
a->d[4] >> 24;
bin[13] =
a->d[4] >> 16;
bin[14] =
a->d[4] >> 8;
bin[15] =
a->d[4];
188 bin[16] =
a->d[3] >> 24;
bin[17] =
a->d[3] >> 16;
bin[18] =
a->d[3] >> 8;
bin[19] =
a->d[3];
189 bin[20] =
a->d[2] >> 24;
bin[21] =
a->d[2] >> 16;
bin[22] =
a->d[2] >> 8;
bin[23] =
a->d[2];
190 bin[24] =
a->d[1] >> 24;
bin[25] =
a->d[1] >> 16;
bin[26] =
a->d[1] >> 8;
bin[27] =
a->d[1];
191 bin[28] =
a->d[0] >> 24;
bin[29] =
a->d[0] >> 16;
bin[30] =
a->d[0] >> 8;
bin[31] =
a->d[0];
195 return (
a->d[0] |
a->d[1] |
a->d[2] |
a->d[3] |
a->d[4] |
a->d[5] |
a->d[6] |
a->d[7]) == 0;
219 return ((
a->d[0] ^ 1) |
a->d[1] |
a->d[2] |
a->d[3] |
a->d[4] |
a->d[5] |
a->d[6] |
a->d[7]) == 0;
261 return 2 * (
mask == 0) - 1;
268#define muladd(a,b) { \
271 uint64_t t = (uint64_t)a * b; \
279 VERIFY_CHECK((c1 >= th) || (c2 != 0)); \
283#define muladd_fast(a,b) { \
286 uint64_t t = (uint64_t)a * b; \
293 VERIFY_CHECK(c1 >= th); \
306#define sumadd_fast(a) { \
309 VERIFY_CHECK((c1 != 0) | (c0 >= (a))); \
310 VERIFY_CHECK(c2 == 0); \
314#define extract(n) { \
322#define extract_fast(n) { \
326 VERIFY_CHECK(c2 == 0); \
332 uint32_t m0,
m1,
m2,
m3,
m4,
m5,
m6,
m7,
m8,
m9,
m10,
m11,
m12;
451 r->
d[0] =
c & 0xFFFFFFFFUL;
c >>= 32;
453 r->
d[1] =
c & 0xFFFFFFFFUL;
c >>= 32;
455 r->
d[2] =
c & 0xFFFFFFFFUL;
c >>= 32;
457 r->
d[3] =
c & 0xFFFFFFFFUL;
c >>= 32;
459 r->
d[4] =
c & 0xFFFFFFFFUL;
c >>= 32;
461 r->
d[5] =
c & 0xFFFFFFFFUL;
c >>= 32;
463 r->
d[6] =
c & 0xFFFFFFFFUL;
c >>= 32;
465 r->
d[7] =
c & 0xFFFFFFFFUL;
c >>= 32;
576 ret = r->
d[0] & ((1 << n) - 1);
577 r->
d[0] = (r->
d[0] >> n) + (r->
d[1] << (32 - n));
578 r->
d[1] = (r->
d[1] >> n) + (r->
d[2] << (32 - n));
579 r->
d[2] = (r->
d[2] >> n) + (r->
d[3] << (32 - n));
580 r->
d[3] = (r->
d[3] >> n) + (r->
d[4] << (32 - n));
581 r->
d[4] = (r->
d[4] >> n) + (r->
d[5] << (32 - n));
582 r->
d[5] = (r->
d[5] >> n) + (r->
d[6] << (32 - n));
583 r->
d[6] = (r->
d[6] >> n) + (r->
d[7] << (32 - n));
584 r->
d[7] = (r->
d[7] >> n);
608 return ((
a->d[0] ^
b->d[0]) | (
a->d[1] ^
b->d[1]) | (
a->d[2] ^
b->d[2]) | (
a->d[3] ^
b->d[3]) | (
a->d[4] ^
b->d[4]) | (
a->d[5] ^
b->d[5]) | (
a->d[6] ^
b->d[6]) | (
a->d[7] ^
b->d[7])) == 0;
664 r->
d[0] =
a0 |
a1 << 30;
665 r->
d[1] =
a1 >> 2 |
a2 << 28;
666 r->
d[2] =
a2 >> 4 |
a3 << 26;
667 r->
d[3] =
a3 >> 6 |
a4 << 24;
668 r->
d[4] =
a4 >> 8 |
a5 << 22;
669 r->
d[5] =
a5 >> 10 |
a6 << 20;
670 r->
d[6] =
a6 >> 12 |
a7 << 18;
671 r->
d[7] =
a7 >> 14 |
a8 << 16;
688 r->
v[1] = (
a0 >> 30 |
a1 << 2) &
M30;
689 r->
v[2] = (
a1 >> 28 |
a2 << 4) &
M30;
690 r->
v[3] = (
a2 >> 26 |
a3 << 6) &
M30;
691 r->
v[4] = (
a3 >> 24 |
a4 << 8) &
M30;
692 r->
v[5] = (
a4 >> 22 |
a5 << 10) &
M30;
693 r->
v[6] = (
a5 >> 20 |
a6 << 12) &
M30;
694 r->
v[7] = (
a6 >> 18 |
a7 << 14) &
M30;
699 {{0x10364141L, 0x3F497A33L, 0x348A03BBL, 0x2BB739ABL, -0x146L, 0, 0, 0, 65536}},
732 return !(
a->d[0] & 1);
static void secp256k1_modinv32_var(secp256k1_modinv32_signed30 *x, const secp256k1_modinv32_modinfo *modinfo)
static void secp256k1_modinv32(secp256k1_modinv32_signed30 *x, const secp256k1_modinv32_modinfo *modinfo)
T GetRand(T nMax=std::numeric_limits< T >::max()) noexcept
Generate a uniform random integer of type T in the range [0..nMax) nMax defaults to std::numeric_limi...
static SECP256K1_INLINE int secp256k1_scalar_is_even(const secp256k1_scalar *a)
static SECP256K1_INLINE int secp256k1_scalar_check_overflow(const secp256k1_scalar *a)
static SECP256K1_INLINE void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b, unsigned int shift)
static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k)
static SECP256K1_INLINE unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count)
static SECP256K1_INLINE void secp256k1_scalar_clear(secp256k1_scalar *r)
#define extract(n)
Extract the lowest 32 bits of (c0,c1,c2) into n, and left shift the number 32 bits.
static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow)
static void secp256k1_scalar_mul_512(uint32_t *l, const secp256k1_scalar *a, const secp256k1_scalar *b)
static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x)
#define sumadd_fast(a)
Add a to the number defined by (c0,c1).
static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar *a)
static SECP256K1_INLINE void secp256k1_scalar_set_int(secp256k1_scalar *r, unsigned int v)
static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar *x)
static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag)
#define extract_fast(n)
Extract the lowest 32 bits of (c0,c1,c2) into n, and left shift the number 32 bits.
#define muladd(a, b)
Add a*b to the number defined by (c0,c1,c2).
static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint32_t *l)
static SECP256K1_INLINE int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b)
static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b)
#define sumadd(a)
Add a to the number defined by (c0,c1,c2).
static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag)
static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b)
static const secp256k1_modinv32_modinfo secp256k1_const_modinfo_scalar
static SECP256K1_INLINE int secp256k1_scalar_reduce(secp256k1_scalar *r, uint32_t overflow)
static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a)
static SECP256K1_INLINE int secp256k1_scalar_is_zero(const secp256k1_scalar *a)
static int secp256k1_scalar_is_high(const secp256k1_scalar *a)
static SECP256K1_INLINE unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count)
static void secp256k1_scalar_from_signed30(secp256k1_scalar *r, const secp256k1_modinv32_signed30 *a)
static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag)
#define muladd_fast(a, b)
Add a*b to the number defined by (c0,c1).
static SECP256K1_INLINE int secp256k1_scalar_is_one(const secp256k1_scalar *a)
static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n)
static void secp256k1_scalar_to_signed30(secp256k1_modinv32_signed30 *r, const secp256k1_scalar *a)
#define VG_CHECK_VERIFY(x, y)
#define VERIFY_CHECK(cond)
A scalar modulo the group order of the secp256k1 curve.