7#ifndef SECP256K1_SCALAR_REPR_IMPL_H
8#define SECP256K1_SCALAR_REPR_IMPL_H
13#define SECP256K1_N_0 ((uint64_t)0xBFD25E8CD0364141ULL)
14#define SECP256K1_N_1 ((uint64_t)0xBAAEDCE6AF48A03BULL)
15#define SECP256K1_N_2 ((uint64_t)0xFFFFFFFFFFFFFFFEULL)
16#define SECP256K1_N_3 ((uint64_t)0xFFFFFFFFFFFFFFFFULL)
19#define SECP256K1_N_C_0 (~SECP256K1_N_0 + 1)
20#define SECP256K1_N_C_1 (~SECP256K1_N_1)
21#define SECP256K1_N_C_2 (1)
24#define SECP256K1_N_H_0 ((uint64_t)0xDFE92F46681B20A0ULL)
25#define SECP256K1_N_H_1 ((uint64_t)0x5D576E7357A4501DULL)
26#define SECP256K1_N_H_2 ((uint64_t)0xFFFFFFFFFFFFFFFFULL)
27#define SECP256K1_N_H_3 ((uint64_t)0x7FFFFFFFFFFFFFFFULL)
75 r->
d[0] =
t & 0xFFFFFFFFFFFFFFFFULL;
t >>= 64;
77 r->
d[1] =
t & 0xFFFFFFFFFFFFFFFFULL;
t >>= 64;
79 r->
d[2] =
t & 0xFFFFFFFFFFFFFFFFULL;
t >>= 64;
81 r->
d[3] =
t & 0xFFFFFFFFFFFFFFFFULL;
88 r->
d[0] =
t & 0xFFFFFFFFFFFFFFFFULL;
t >>= 64;
90 r->
d[1] =
t & 0xFFFFFFFFFFFFFFFFULL;
t >>= 64;
92 r->
d[2] =
t & 0xFFFFFFFFFFFFFFFFULL;
t >>= 64;
94 r->
d[3] =
t & 0xFFFFFFFFFFFFFFFFULL;
t >>= 64;
106 r->
d[0] =
t & 0xFFFFFFFFFFFFFFFFULL;
t >>= 64;
108 r->
d[1] =
t & 0xFFFFFFFFFFFFFFFFULL;
t >>= 64;
110 r->
d[2] =
t & 0xFFFFFFFFFFFFFFFFULL;
t >>= 64;
112 r->
d[3] =
t & 0xFFFFFFFFFFFFFFFFULL;
132 bin[0] =
a->d[3] >> 56;
bin[1] =
a->d[3] >> 48;
bin[2] =
a->d[3] >> 40;
bin[3] =
a->d[3] >> 32;
bin[4] =
a->d[3] >> 24;
bin[5] =
a->d[3] >> 16;
bin[6] =
a->d[3] >> 8;
bin[7] =
a->d[3];
133 bin[8] =
a->d[2] >> 56;
bin[9] =
a->d[2] >> 48;
bin[10] =
a->d[2] >> 40;
bin[11] =
a->d[2] >> 32;
bin[12] =
a->d[2] >> 24;
bin[13] =
a->d[2] >> 16;
bin[14] =
a->d[2] >> 8;
bin[15] =
a->d[2];
134 bin[16] =
a->d[1] >> 56;
bin[17] =
a->d[1] >> 48;
bin[18] =
a->d[1] >> 40;
bin[19] =
a->d[1] >> 32;
bin[20] =
a->d[1] >> 24;
bin[21] =
a->d[1] >> 16;
bin[22] =
a->d[1] >> 8;
bin[23] =
a->d[1];
135 bin[24] =
a->d[0] >> 56;
bin[25] =
a->d[0] >> 48;
bin[26] =
a->d[0] >> 40;
bin[27] =
a->d[0] >> 32;
bin[28] =
a->d[0] >> 24;
bin[29] =
a->d[0] >> 16;
bin[30] =
a->d[0] >> 8;
bin[31] =
a->d[0];
139 return (
a->d[0] |
a->d[1] |
a->d[2] |
a->d[3]) == 0;
155 return ((
a->d[0] ^ 1) |
a->d[1] |
a->d[2] |
a->d[3]) == 0;
183 return 2 * (
mask == 0) - 1;
189#define muladd(a,b) { \
192 uint128_t t = (uint128_t)a * b; \
200 VERIFY_CHECK((c1 >= th) || (c2 != 0)); \
204#define muladd_fast(a,b) { \
207 uint128_t t = (uint128_t)a * b; \
214 VERIFY_CHECK(c1 >= th); \
227#define sumadd_fast(a) { \
230 VERIFY_CHECK((c1 != 0) | (c0 >= (a))); \
231 VERIFY_CHECK(c2 == 0); \
235#define extract(n) { \
243#define extract_fast(n) { \
247 VERIFY_CHECK(c2 == 0); \
259 "movq 32(%%rsi), %%r11\n"
260 "movq 40(%%rsi), %%r12\n"
261 "movq 48(%%rsi), %%r13\n"
262 "movq 56(%%rsi), %%r14\n"
264 "movq 0(%%rsi), %%r8\n"
266 "xorq %%r10, %%r10\n"
276 "addq 8(%%rsi), %%r9\n"
282 "adcq %%rdx, %%r10\n"
288 "adcq %%rdx, %%r10\n"
294 "addq 16(%%rsi), %%r10\n"
300 "addq %%rax, %%r10\n"
306 "addq %%rax, %%r10\n"
310 "addq %%r11, %%r10\n"
315 "xorq %%r10, %%r10\n"
317 "addq 24(%%rsi), %%r8\n"
343 "adcq %%rdx, %%r10\n"
352 "addq %%r14, %%r10\n"
358 :
"=g"(
m0),
"=g"(
m1),
"=g"(
m2),
"=g"(
m3),
"=g"(
m4),
"=g"(
m5),
"=g"(
m6)
360 :
"rax",
"rdx",
"r8",
"r9",
"r10",
"r11",
"r12",
"r13",
"r14",
"cc");
371 "xorq %%r10, %%r10\n"
387 "adcq %%rdx, %%r10\n"
393 "adcq %%rdx, %%r10\n"
405 "addq %%rax, %%r10\n"
411 "addq %%rax, %%r10\n"
415 "addq %%r11, %%r10\n"
437 :
"=&g"(
p0),
"=&g"(
p1),
"=&g"(
p2),
"=g"(
p3),
"=g"(
p4)
439 :
"rax",
"rdx",
"r8",
"r9",
"r10",
"r11",
"r12",
"r13",
"cc");
452 "movq %%rax, 0(%q6)\n"
465 "movq %%r8, 8(%q6)\n"
474 "movq %%r9, 16(%q6)\n"
480 "movq %%r8, 24(%q6)\n"
485 :
"rax",
"rdx",
"r8",
"r9",
"r10",
"cc",
"memory");
546 r->
d[0] =
c & 0xFFFFFFFFFFFFFFFFULL;
c >>= 64;
548 r->
d[1] =
c & 0xFFFFFFFFFFFFFFFFULL;
c >>= 64;
550 r->
d[2] =
c & 0xFFFFFFFFFFFFFFFFULL;
c >>= 64;
552 r->
d[3] =
c & 0xFFFFFFFFFFFFFFFFULL;
c >>= 64;
564 "movq 0(%%rdi), %%r15\n"
565 "movq 8(%%rdi), %%rbx\n"
566 "movq 16(%%rdi), %%rcx\n"
567 "movq 0(%%rdx), %%r11\n"
568 "movq 8(%%rdx), %%r12\n"
569 "movq 16(%%rdx), %%r13\n"
570 "movq 24(%%rdx), %%r14\n"
572 "movq %%r15, %%rax\n"
575 "movq %%rax, 0(%%rsi)\n"
579 "xorq %%r10, %%r10\n"
581 "movq %%r15, %%rax\n"
587 "movq %%rbx, %%rax\n"
593 "movq %%r8, 8(%%rsi)\n"
596 "movq %%r15, %%rax\n"
599 "adcq %%rdx, %%r10\n"
602 "movq %%rbx, %%rax\n"
605 "adcq %%rdx, %%r10\n"
608 "movq %%rcx, %%rax\n"
611 "adcq %%rdx, %%r10\n"
614 "movq %%r9, 16(%%rsi)\n"
617 "movq %%r15, %%rax\n"
619 "addq %%rax, %%r10\n"
623 "movq 24(%%rdi), %%r15\n"
625 "movq %%rbx, %%rax\n"
627 "addq %%rax, %%r10\n"
631 "movq %%rcx, %%rax\n"
633 "addq %%rax, %%r10\n"
637 "movq %%r15, %%rax\n"
639 "addq %%rax, %%r10\n"
643 "movq %%r10, 24(%%rsi)\n"
644 "xorq %%r10, %%r10\n"
646 "movq %%rbx, %%rax\n"
652 "movq %%rcx, %%rax\n"
658 "movq %%r15, %%rax\n"
664 "movq %%r8, 32(%%rsi)\n"
667 "movq %%rcx, %%rax\n"
670 "adcq %%rdx, %%r10\n"
673 "movq %%r15, %%rax\n"
676 "adcq %%rdx, %%r10\n"
679 "movq %%r9, 40(%%rsi)\n"
681 "movq %%r15, %%rax\n"
683 "addq %%rax, %%r10\n"
686 "movq %%r10, 48(%%rsi)\n"
688 "movq %%r8, 56(%%rsi)\n"
691 :
"rax",
"rbx",
"rcx",
"r8",
"r9",
"r10",
"r11",
"r12",
"r13",
"r14",
"r15",
"cc",
"memory");
743 ret = r->
d[0] & ((1 << n) - 1);
744 r->
d[0] = (r->
d[0] >> n) + (r->
d[1] << (64 - n));
745 r->
d[1] = (r->
d[1] >> n) + (r->
d[2] << (64 - n));
746 r->
d[2] = (r->
d[2] >> n) + (r->
d[3] << (64 - n));
747 r->
d[3] = (r->
d[3] >> n);
763 return ((
a->d[0] ^
b->d[0]) | (
a->d[1] ^
b->d[1]) | (
a->d[2] ^
b->d[2]) | (
a->d[3] ^
b->d[3])) == 0;
806 r->
d[0] =
a0 |
a1 << 62;
807 r->
d[1] =
a1 >> 2 |
a2 << 60;
808 r->
d[2] =
a2 >> 4 |
a3 << 58;
809 r->
d[3] =
a3 >> 6 |
a4 << 56;
825 r->
v[1] = (
a0 >> 62 |
a1 << 2) &
M62;
826 r->
v[2] = (
a1 >> 60 |
a2 << 4) &
M62;
827 r->
v[3] = (
a2 >> 58 |
a3 << 6) &
M62;
832 {{0x3FD25E8CD0364141LL, 0x2ABB739ABD2280EELL, -0x15LL, 0, 256}},
865 return !(
a->d[0] & 1);
static void secp256k1_modinv64(secp256k1_modinv64_signed62 *x, const secp256k1_modinv64_modinfo *modinfo)
static void secp256k1_modinv64_var(secp256k1_modinv64_signed62 *x, const secp256k1_modinv64_modinfo *modinfo)
T GetRand(T nMax=std::numeric_limits< T >::max()) noexcept
Generate a uniform random integer of type T in the range [0..nMax) nMax defaults to std::numeric_limi...
static SECP256K1_INLINE int secp256k1_scalar_is_even(const secp256k1_scalar *a)
static SECP256K1_INLINE int secp256k1_scalar_check_overflow(const secp256k1_scalar *a)
static SECP256K1_INLINE void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b, unsigned int shift)
static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k)
static SECP256K1_INLINE unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count)
static SECP256K1_INLINE void secp256k1_scalar_clear(secp256k1_scalar *r)
#define extract(n)
Extract the lowest 64 bits of (c0,c1,c2) into n, and left shift the number 64 bits.
static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow)
static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x)
static const secp256k1_modinv64_modinfo secp256k1_const_modinfo_scalar
#define sumadd_fast(a)
Add a to the number defined by (c0,c1).
static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar *a)
static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l)
static void secp256k1_scalar_from_signed62(secp256k1_scalar *r, const secp256k1_modinv64_signed62 *a)
static SECP256K1_INLINE void secp256k1_scalar_set_int(secp256k1_scalar *r, unsigned int v)
static void secp256k1_scalar_mul_512(uint64_t l[8], const secp256k1_scalar *a, const secp256k1_scalar *b)
static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar *x)
static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag)
#define extract_fast(n)
Extract the lowest 64 bits of (c0,c1,c2) into n, and left shift the number 64 bits.
#define muladd(a, b)
Add a*b to the number defined by (c0,c1,c2).
static void secp256k1_scalar_to_signed62(secp256k1_modinv64_signed62 *r, const secp256k1_scalar *a)
static SECP256K1_INLINE int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b)
static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b)
#define sumadd(a)
Add a to the number defined by (c0,c1,c2).
static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag)
static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b)
static SECP256K1_INLINE int secp256k1_scalar_reduce(secp256k1_scalar *r, unsigned int overflow)
static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a)
static SECP256K1_INLINE int secp256k1_scalar_is_zero(const secp256k1_scalar *a)
static int secp256k1_scalar_is_high(const secp256k1_scalar *a)
static SECP256K1_INLINE unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count)
static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag)
#define muladd_fast(a, b)
Add a*b to the number defined by (c0,c1).
static SECP256K1_INLINE int secp256k1_scalar_is_one(const secp256k1_scalar *a)
static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n)
#define VG_CHECK_VERIFY(x, y)
#define VERIFY_CHECK(cond)
A scalar modulo the group order of the secp256k1 curve.