Bitcoin Core  27.99.0
P2P Digital Currency
int128_struct_impl.h
Go to the documentation of this file.
1 #ifndef SECP256K1_INT128_STRUCT_IMPL_H
2 #define SECP256K1_INT128_STRUCT_IMPL_H
3 
4 #include "int128.h"
5 #include "util.h"
6 
7 #if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_ARM64)) /* MSVC */
8 # include <intrin.h>
9 # if defined(_M_ARM64) || defined(SECP256K1_MSVC_MULH_TEST_OVERRIDE)
10 /* On ARM64 MSVC, use __(u)mulh for the upper half of 64x64 multiplications.
11  (Define SECP256K1_MSVC_MULH_TEST_OVERRIDE to test this code path on X64,
12  which supports both __(u)mulh and _umul128.) */
13 # if defined(SECP256K1_MSVC_MULH_TEST_OVERRIDE)
14 # pragma message(__FILE__ ": SECP256K1_MSVC_MULH_TEST_OVERRIDE is defined, forcing use of __(u)mulh.")
15 # endif
16 static SECP256K1_INLINE uint64_t secp256k1_umul128(uint64_t a, uint64_t b, uint64_t* hi) {
17  *hi = __umulh(a, b);
18  return a * b;
19 }
20 
21 static SECP256K1_INLINE int64_t secp256k1_mul128(int64_t a, int64_t b, int64_t* hi) {
22  *hi = __mulh(a, b);
23  return (uint64_t)a * (uint64_t)b;
24 }
25 # else
26 /* On x84_64 MSVC, use native _(u)mul128 for 64x64->128 multiplications. */
27 # define secp256k1_umul128 _umul128
28 # define secp256k1_mul128 _mul128
29 # endif
30 #else
31 /* On other systems, emulate 64x64->128 multiplications using 32x32->64 multiplications. */
32 static SECP256K1_INLINE uint64_t secp256k1_umul128(uint64_t a, uint64_t b, uint64_t* hi) {
33  uint64_t ll = (uint64_t)(uint32_t)a * (uint32_t)b;
34  uint64_t lh = (uint32_t)a * (b >> 32);
35  uint64_t hl = (a >> 32) * (uint32_t)b;
36  uint64_t hh = (a >> 32) * (b >> 32);
37  uint64_t mid34 = (ll >> 32) + (uint32_t)lh + (uint32_t)hl;
38  *hi = hh + (lh >> 32) + (hl >> 32) + (mid34 >> 32);
39  return (mid34 << 32) + (uint32_t)ll;
40 }
41 
42 static SECP256K1_INLINE int64_t secp256k1_mul128(int64_t a, int64_t b, int64_t* hi) {
43  uint64_t ll = (uint64_t)(uint32_t)a * (uint32_t)b;
44  int64_t lh = (uint32_t)a * (b >> 32);
45  int64_t hl = (a >> 32) * (uint32_t)b;
46  int64_t hh = (a >> 32) * (b >> 32);
47  uint64_t mid34 = (ll >> 32) + (uint32_t)lh + (uint32_t)hl;
48  *hi = hh + (lh >> 32) + (hl >> 32) + (mid34 >> 32);
49  return (mid34 << 32) + (uint32_t)ll;
50 }
51 #endif
52 
53 static SECP256K1_INLINE void secp256k1_u128_load(secp256k1_uint128 *r, uint64_t hi, uint64_t lo) {
54  r->hi = hi;
55  r->lo = lo;
56 }
57 
58 static SECP256K1_INLINE void secp256k1_u128_mul(secp256k1_uint128 *r, uint64_t a, uint64_t b) {
59  r->lo = secp256k1_umul128(a, b, &r->hi);
60 }
61 
62 static SECP256K1_INLINE void secp256k1_u128_accum_mul(secp256k1_uint128 *r, uint64_t a, uint64_t b) {
63  uint64_t lo, hi;
64  lo = secp256k1_umul128(a, b, &hi);
65  r->lo += lo;
66  r->hi += hi + (r->lo < lo);
67 }
68 
70  r->lo += a;
71  r->hi += r->lo < a;
72 }
73 
74 /* Unsigned (logical) right shift.
75  * Non-constant time in n.
76  */
77 static SECP256K1_INLINE void secp256k1_u128_rshift(secp256k1_uint128 *r, unsigned int n) {
78  VERIFY_CHECK(n < 128);
79  if (n >= 64) {
80  r->lo = r->hi >> (n-64);
81  r->hi = 0;
82  } else if (n > 0) {
83 #if defined(_MSC_VER) && defined(_M_X64)
84  VERIFY_CHECK(n < 64);
85  r->lo = __shiftright128(r->lo, r->hi, n);
86 #else
87  r->lo = ((1U * r->hi) << (64-n)) | r->lo >> n;
88 #endif
89  r->hi >>= n;
90  }
91 }
92 
94  return a->lo;
95 }
96 
98  return a->hi;
99 }
100 
102  r->hi = 0;
103  r->lo = a;
104 }
105 
106 static SECP256K1_INLINE int secp256k1_u128_check_bits(const secp256k1_uint128 *r, unsigned int n) {
107  VERIFY_CHECK(n < 128);
108  return n >= 64 ? r->hi >> (n - 64) == 0
109  : r->hi == 0 && r->lo >> n == 0;
110 }
111 
112 static SECP256K1_INLINE void secp256k1_i128_load(secp256k1_int128 *r, int64_t hi, uint64_t lo) {
113  r->hi = hi;
114  r->lo = lo;
115 }
116 
117 static SECP256K1_INLINE void secp256k1_i128_mul(secp256k1_int128 *r, int64_t a, int64_t b) {
118  int64_t hi;
119  r->lo = (uint64_t)secp256k1_mul128(a, b, &hi);
120  r->hi = (uint64_t)hi;
121 }
122 
123 static SECP256K1_INLINE void secp256k1_i128_accum_mul(secp256k1_int128 *r, int64_t a, int64_t b) {
124  int64_t hi;
125  uint64_t lo = (uint64_t)secp256k1_mul128(a, b, &hi);
126  r->lo += lo;
127  hi += r->lo < lo;
128  /* Verify no overflow.
129  * If r represents a positive value (the sign bit is not set) and the value we are adding is a positive value (the sign bit is not set),
130  * then we require that the resulting value also be positive (the sign bit is not set).
131  * Note that (X <= Y) means (X implies Y) when X and Y are boolean values (i.e. 0 or 1).
132  */
133  VERIFY_CHECK((r->hi <= 0x7fffffffffffffffu && (uint64_t)hi <= 0x7fffffffffffffffu) <= (r->hi + (uint64_t)hi <= 0x7fffffffffffffffu));
134  /* Verify no underflow.
135  * If r represents a negative value (the sign bit is set) and the value we are adding is a negative value (the sign bit is set),
136  * then we require that the resulting value also be negative (the sign bit is set).
137  */
138  VERIFY_CHECK((r->hi > 0x7fffffffffffffffu && (uint64_t)hi > 0x7fffffffffffffffu) <= (r->hi + (uint64_t)hi > 0x7fffffffffffffffu));
139  r->hi += hi;
140 }
141 
142 static SECP256K1_INLINE void secp256k1_i128_dissip_mul(secp256k1_int128 *r, int64_t a, int64_t b) {
143  int64_t hi;
144  uint64_t lo = (uint64_t)secp256k1_mul128(a, b, &hi);
145  hi += r->lo < lo;
146  /* Verify no overflow.
147  * If r represents a positive value (the sign bit is not set) and the value we are subtracting is a negative value (the sign bit is set),
148  * then we require that the resulting value also be positive (the sign bit is not set).
149  */
150  VERIFY_CHECK((r->hi <= 0x7fffffffffffffffu && (uint64_t)hi > 0x7fffffffffffffffu) <= (r->hi - (uint64_t)hi <= 0x7fffffffffffffffu));
151  /* Verify no underflow.
152  * If r represents a negative value (the sign bit is set) and the value we are subtracting is a positive value (the sign sign bit is not set),
153  * then we require that the resulting value also be negative (the sign bit is set).
154  */
155  VERIFY_CHECK((r->hi > 0x7fffffffffffffffu && (uint64_t)hi <= 0x7fffffffffffffffu) <= (r->hi - (uint64_t)hi > 0x7fffffffffffffffu));
156  r->hi -= hi;
157  r->lo -= lo;
158 }
159 
160 static SECP256K1_INLINE void secp256k1_i128_det(secp256k1_int128 *r, int64_t a, int64_t b, int64_t c, int64_t d) {
161  secp256k1_i128_mul(r, a, d);
162  secp256k1_i128_dissip_mul(r, b, c);
163 }
164 
165 /* Signed (arithmetic) right shift.
166  * Non-constant time in n.
167  */
168 static SECP256K1_INLINE void secp256k1_i128_rshift(secp256k1_int128 *r, unsigned int n) {
169  VERIFY_CHECK(n < 128);
170  if (n >= 64) {
171  r->lo = (uint64_t)((int64_t)(r->hi) >> (n-64));
172  r->hi = (uint64_t)((int64_t)(r->hi) >> 63);
173  } else if (n > 0) {
174  r->lo = ((1U * r->hi) << (64-n)) | r->lo >> n;
175  r->hi = (uint64_t)((int64_t)(r->hi) >> n);
176  }
177 }
178 
180  return a->lo;
181 }
182 
184  /* Verify that a represents a 64 bit signed value by checking that the high bits are a sign extension of the low bits. */
185  VERIFY_CHECK(a->hi == -(a->lo >> 63));
186  return (int64_t)secp256k1_i128_to_u64(a);
187 }
188 
190  r->hi = (uint64_t)(a >> 63);
191  r->lo = (uint64_t)a;
192 }
193 
195  return a->hi == b->hi && a->lo == b->lo;
196 }
197 
198 static SECP256K1_INLINE int secp256k1_i128_check_pow2(const secp256k1_int128 *r, unsigned int n, int sign) {
199  VERIFY_CHECK(n < 127);
200  VERIFY_CHECK(sign == 1 || sign == -1);
201  return n >= 64 ? r->hi == (uint64_t)sign << (n - 64) && r->lo == 0
202  : r->hi == (uint64_t)(sign >> 1) && r->lo == (uint64_t)sign << n;
203 }
204 
205 #endif
int128_t secp256k1_int128
Definition: int128_native.h:17
static SECP256K1_INLINE void secp256k1_i128_load(secp256k1_int128 *r, int64_t hi, uint64_t lo)
static SECP256K1_INLINE void secp256k1_i128_det(secp256k1_int128 *r, int64_t a, int64_t b, int64_t c, int64_t d)
static SECP256K1_INLINE int secp256k1_u128_check_bits(const secp256k1_uint128 *r, unsigned int n)
static SECP256K1_INLINE void secp256k1_i128_rshift(secp256k1_int128 *r, unsigned int n)
static SECP256K1_INLINE uint64_t secp256k1_u128_hi_u64(const secp256k1_uint128 *a)
static SECP256K1_INLINE uint64_t secp256k1_i128_to_u64(const secp256k1_int128 *a)
static SECP256K1_INLINE void secp256k1_i128_from_i64(secp256k1_int128 *r, int64_t a)
static SECP256K1_INLINE void secp256k1_u128_from_u64(secp256k1_uint128 *r, uint64_t a)
static SECP256K1_INLINE int secp256k1_i128_eq_var(const secp256k1_int128 *a, const secp256k1_int128 *b)
static SECP256K1_INLINE int64_t secp256k1_i128_to_i64(const secp256k1_int128 *a)
static SECP256K1_INLINE void secp256k1_i128_mul(secp256k1_int128 *r, int64_t a, int64_t b)
static SECP256K1_INLINE int64_t secp256k1_mul128(int64_t a, int64_t b, int64_t *hi)
static SECP256K1_INLINE uint64_t secp256k1_umul128(uint64_t a, uint64_t b, uint64_t *hi)
static SECP256K1_INLINE void secp256k1_u128_rshift(secp256k1_uint128 *r, unsigned int n)
static SECP256K1_INLINE int secp256k1_i128_check_pow2(const secp256k1_int128 *r, unsigned int n, int sign)
static SECP256K1_INLINE void secp256k1_u128_accum_u64(secp256k1_uint128 *r, uint64_t a)
static SECP256K1_INLINE void secp256k1_i128_dissip_mul(secp256k1_int128 *r, int64_t a, int64_t b)
static SECP256K1_INLINE void secp256k1_i128_accum_mul(secp256k1_int128 *r, int64_t a, int64_t b)
static SECP256K1_INLINE void secp256k1_u128_accum_mul(secp256k1_uint128 *r, uint64_t a, uint64_t b)
static SECP256K1_INLINE void secp256k1_u128_load(secp256k1_uint128 *r, uint64_t hi, uint64_t lo)
static SECP256K1_INLINE void secp256k1_u128_mul(secp256k1_uint128 *r, uint64_t a, uint64_t b)
static SECP256K1_INLINE uint64_t secp256k1_u128_to_u64(const secp256k1_uint128 *a)
#define SECP256K1_INLINE
Definition: util.h:48
#define VERIFY_CHECK(cond)
Definition: util.h:153