Bitcoin Core  24.99.0
P2P Digital Currency
modinv64_impl.h
Go to the documentation of this file.
1 /***********************************************************************
2  * Copyright (c) 2020 Peter Dettman *
3  * Distributed under the MIT software license, see the accompanying *
4  * file COPYING or https://www.opensource.org/licenses/mit-license.php.*
5  **********************************************************************/
6 
7 #ifndef SECP256K1_MODINV64_IMPL_H
8 #define SECP256K1_MODINV64_IMPL_H
9 
10 #include "int128.h"
11 #include "modinv64.h"
12 
13 /* This file implements modular inversion based on the paper "Fast constant-time gcd computation and
14  * modular inversion" by Daniel J. Bernstein and Bo-Yin Yang.
15  *
16  * For an explanation of the algorithm, see doc/safegcd_implementation.md. This file contains an
17  * implementation for N=62, using 62-bit signed limbs represented as int64_t.
18  */
19 
20 /* Data type for transition matrices (see section 3 of explanation).
21  *
22  * t = [ u v ]
23  * [ q r ]
24  */
25 typedef struct {
26  int64_t u, v, q, r;
28 
29 #ifdef VERIFY
30 /* Helper function to compute the absolute value of an int64_t.
31  * (we don't use abs/labs/llabs as it depends on the int sizes). */
32 static int64_t secp256k1_modinv64_abs(int64_t v) {
33  VERIFY_CHECK(v > INT64_MIN);
34  if (v < 0) return -v;
35  return v;
36 }
37 
38 static const secp256k1_modinv64_signed62 SECP256K1_SIGNED62_ONE = {{1}};
39 
40 /* Compute a*factor and put it in r. All but the top limb in r will be in range [0,2^62). */
41 static void secp256k1_modinv64_mul_62(secp256k1_modinv64_signed62 *r, const secp256k1_modinv64_signed62 *a, int alen, int64_t factor) {
42  const int64_t M62 = (int64_t)(UINT64_MAX >> 2);
43  secp256k1_int128 c, d;
44  int i;
46  for (i = 0; i < 4; ++i) {
47  if (i < alen) secp256k1_i128_accum_mul(&c, a->v[i], factor);
48  r->v[i] = secp256k1_i128_to_i64(&c) & M62; secp256k1_i128_rshift(&c, 62);
49  }
50  if (4 < alen) secp256k1_i128_accum_mul(&c, a->v[4], factor);
53  r->v[4] = secp256k1_i128_to_i64(&c);
54 }
55 
56 /* Return -1 for a<b*factor, 0 for a==b*factor, 1 for a>b*factor. A has alen limbs; b has 5. */
57 static int secp256k1_modinv64_mul_cmp_62(const secp256k1_modinv64_signed62 *a, int alen, const secp256k1_modinv64_signed62 *b, int64_t factor) {
58  int i;
60  secp256k1_modinv64_mul_62(&am, a, alen, 1); /* Normalize all but the top limb of a. */
61  secp256k1_modinv64_mul_62(&bm, b, 5, factor);
62  for (i = 0; i < 4; ++i) {
63  /* Verify that all but the top limb of a and b are normalized. */
64  VERIFY_CHECK(am.v[i] >> 62 == 0);
65  VERIFY_CHECK(bm.v[i] >> 62 == 0);
66  }
67  for (i = 4; i >= 0; --i) {
68  if (am.v[i] < bm.v[i]) return -1;
69  if (am.v[i] > bm.v[i]) return 1;
70  }
71  return 0;
72 }
73 
74 /* Check if the determinant of t is equal to 1 << n. */
75 static int secp256k1_modinv64_det_check_pow2(const secp256k1_modinv64_trans2x2 *t, unsigned int n) {
77  secp256k1_i128_det(&a, t->u, t->v, t->q, t->r);
78  return secp256k1_i128_check_pow2(&a, n);
79 }
80 #endif
81 
82 /* Take as input a signed62 number in range (-2*modulus,modulus), and add a multiple of the modulus
83  * to it to bring it to range [0,modulus). If sign < 0, the input will also be negated in the
84  * process. The input must have limbs in range (-2^62,2^62). The output will have limbs in range
85  * [0,2^62). */
87  const int64_t M62 = (int64_t)(UINT64_MAX >> 2);
88  int64_t r0 = r->v[0], r1 = r->v[1], r2 = r->v[2], r3 = r->v[3], r4 = r->v[4];
89  int64_t cond_add, cond_negate;
90 
91 #ifdef VERIFY
92  /* Verify that all limbs are in range (-2^62,2^62). */
93  int i;
94  for (i = 0; i < 5; ++i) {
95  VERIFY_CHECK(r->v[i] >= -M62);
96  VERIFY_CHECK(r->v[i] <= M62);
97  }
98  VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, -2) > 0); /* r > -2*modulus */
99  VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 1) < 0); /* r < modulus */
100 #endif
101 
102  /* In a first step, add the modulus if the input is negative, and then negate if requested.
103  * This brings r from range (-2*modulus,modulus) to range (-modulus,modulus). As all input
104  * limbs are in range (-2^62,2^62), this cannot overflow an int64_t. Note that the right
105  * shifts below are signed sign-extending shifts (see assumptions.h for tests that that is
106  * indeed the behavior of the right shift operator). */
107  cond_add = r4 >> 63;
108  r0 += modinfo->modulus.v[0] & cond_add;
109  r1 += modinfo->modulus.v[1] & cond_add;
110  r2 += modinfo->modulus.v[2] & cond_add;
111  r3 += modinfo->modulus.v[3] & cond_add;
112  r4 += modinfo->modulus.v[4] & cond_add;
113  cond_negate = sign >> 63;
114  r0 = (r0 ^ cond_negate) - cond_negate;
115  r1 = (r1 ^ cond_negate) - cond_negate;
116  r2 = (r2 ^ cond_negate) - cond_negate;
117  r3 = (r3 ^ cond_negate) - cond_negate;
118  r4 = (r4 ^ cond_negate) - cond_negate;
119  /* Propagate the top bits, to bring limbs back to range (-2^62,2^62). */
120  r1 += r0 >> 62; r0 &= M62;
121  r2 += r1 >> 62; r1 &= M62;
122  r3 += r2 >> 62; r2 &= M62;
123  r4 += r3 >> 62; r3 &= M62;
124 
125  /* In a second step add the modulus again if the result is still negative, bringing
126  * r to range [0,modulus). */
127  cond_add = r4 >> 63;
128  r0 += modinfo->modulus.v[0] & cond_add;
129  r1 += modinfo->modulus.v[1] & cond_add;
130  r2 += modinfo->modulus.v[2] & cond_add;
131  r3 += modinfo->modulus.v[3] & cond_add;
132  r4 += modinfo->modulus.v[4] & cond_add;
133  /* And propagate again. */
134  r1 += r0 >> 62; r0 &= M62;
135  r2 += r1 >> 62; r1 &= M62;
136  r3 += r2 >> 62; r2 &= M62;
137  r4 += r3 >> 62; r3 &= M62;
138 
139  r->v[0] = r0;
140  r->v[1] = r1;
141  r->v[2] = r2;
142  r->v[3] = r3;
143  r->v[4] = r4;
144 
145 #ifdef VERIFY
146  VERIFY_CHECK(r0 >> 62 == 0);
147  VERIFY_CHECK(r1 >> 62 == 0);
148  VERIFY_CHECK(r2 >> 62 == 0);
149  VERIFY_CHECK(r3 >> 62 == 0);
150  VERIFY_CHECK(r4 >> 62 == 0);
151  VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 0) >= 0); /* r >= 0 */
152  VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 1) < 0); /* r < modulus */
153 #endif
154 }
155 
156 /* Compute the transition matrix and eta for 59 divsteps (where zeta=-(delta+1/2)).
157  * Note that the transformation matrix is scaled by 2^62 and not 2^59.
158  *
159  * Input: zeta: initial zeta
160  * f0: bottom limb of initial f
161  * g0: bottom limb of initial g
162  * Output: t: transition matrix
163  * Return: final zeta
164  *
165  * Implements the divsteps_n_matrix function from the explanation.
166  */
167 static int64_t secp256k1_modinv64_divsteps_59(int64_t zeta, uint64_t f0, uint64_t g0, secp256k1_modinv64_trans2x2 *t) {
168  /* u,v,q,r are the elements of the transformation matrix being built up,
169  * starting with the identity matrix times 8 (because the caller expects
170  * a result scaled by 2^62). Semantically they are signed integers
171  * in range [-2^62,2^62], but here represented as unsigned mod 2^64. This
172  * permits left shifting (which is UB for negative numbers). The range
173  * being inside [-2^63,2^63) means that casting to signed works correctly.
174  */
175  uint64_t u = 8, v = 0, q = 0, r = 8;
176  uint64_t c1, c2, f = f0, g = g0, x, y, z;
177  int i;
178 
179  for (i = 3; i < 62; ++i) {
180  VERIFY_CHECK((f & 1) == 1); /* f must always be odd */
181  VERIFY_CHECK((u * f0 + v * g0) == f << i);
182  VERIFY_CHECK((q * f0 + r * g0) == g << i);
183  /* Compute conditional masks for (zeta < 0) and for (g & 1). */
184  c1 = zeta >> 63;
185  c2 = -(g & 1);
186  /* Compute x,y,z, conditionally negated versions of f,u,v. */
187  x = (f ^ c1) - c1;
188  y = (u ^ c1) - c1;
189  z = (v ^ c1) - c1;
190  /* Conditionally add x,y,z to g,q,r. */
191  g += x & c2;
192  q += y & c2;
193  r += z & c2;
194  /* In what follows, c1 is a condition mask for (zeta < 0) and (g & 1). */
195  c1 &= c2;
196  /* Conditionally change zeta into -zeta-2 or zeta-1. */
197  zeta = (zeta ^ c1) - 1;
198  /* Conditionally add g,q,r to f,u,v. */
199  f += g & c1;
200  u += q & c1;
201  v += r & c1;
202  /* Shifts */
203  g >>= 1;
204  u <<= 1;
205  v <<= 1;
206  /* Bounds on zeta that follow from the bounds on iteration count (max 10*59 divsteps). */
207  VERIFY_CHECK(zeta >= -591 && zeta <= 591);
208  }
209  /* Return data in t and return value. */
210  t->u = (int64_t)u;
211  t->v = (int64_t)v;
212  t->q = (int64_t)q;
213  t->r = (int64_t)r;
214 #ifdef VERIFY
215  /* The determinant of t must be a power of two. This guarantees that multiplication with t
216  * does not change the gcd of f and g, apart from adding a power-of-2 factor to it (which
217  * will be divided out again). As each divstep's individual matrix has determinant 2, the
218  * aggregate of 59 of them will have determinant 2^59. Multiplying with the initial
219  * 8*identity (which has determinant 2^6) means the overall outputs has determinant
220  * 2^65. */
221  VERIFY_CHECK(secp256k1_modinv64_det_check_pow2(t, 65));
222 #endif
223  return zeta;
224 }
225 
226 /* Compute the transition matrix and eta for 62 divsteps (variable time, eta=-delta).
227  *
228  * Input: eta: initial eta
229  * f0: bottom limb of initial f
230  * g0: bottom limb of initial g
231  * Output: t: transition matrix
232  * Return: final eta
233  *
234  * Implements the divsteps_n_matrix_var function from the explanation.
235  */
236 static int64_t secp256k1_modinv64_divsteps_62_var(int64_t eta, uint64_t f0, uint64_t g0, secp256k1_modinv64_trans2x2 *t) {
237  /* Transformation matrix; see comments in secp256k1_modinv64_divsteps_62. */
238  uint64_t u = 1, v = 0, q = 0, r = 1;
239  uint64_t f = f0, g = g0, m;
240  uint32_t w;
241  int i = 62, limit, zeros;
242 
243  for (;;) {
244  /* Use a sentinel bit to count zeros only up to i. */
245  zeros = secp256k1_ctz64_var(g | (UINT64_MAX << i));
246  /* Perform zeros divsteps at once; they all just divide g by two. */
247  g >>= zeros;
248  u <<= zeros;
249  v <<= zeros;
250  eta -= zeros;
251  i -= zeros;
252  /* We're done once we've done 62 divsteps. */
253  if (i == 0) break;
254  VERIFY_CHECK((f & 1) == 1);
255  VERIFY_CHECK((g & 1) == 1);
256  VERIFY_CHECK((u * f0 + v * g0) == f << (62 - i));
257  VERIFY_CHECK((q * f0 + r * g0) == g << (62 - i));
258  /* Bounds on eta that follow from the bounds on iteration count (max 12*62 divsteps). */
259  VERIFY_CHECK(eta >= -745 && eta <= 745);
260  /* If eta is negative, negate it and replace f,g with g,-f. */
261  if (eta < 0) {
262  uint64_t tmp;
263  eta = -eta;
264  tmp = f; f = g; g = -tmp;
265  tmp = u; u = q; q = -tmp;
266  tmp = v; v = r; r = -tmp;
267  /* Use a formula to cancel out up to 6 bits of g. Also, no more than i can be cancelled
268  * out (as we'd be done before that point), and no more than eta+1 can be done as its
269  * will flip again once that happens. */
270  limit = ((int)eta + 1) > i ? i : ((int)eta + 1);
271  VERIFY_CHECK(limit > 0 && limit <= 62);
272  /* m is a mask for the bottom min(limit, 6) bits. */
273  m = (UINT64_MAX >> (64 - limit)) & 63U;
274  /* Find what multiple of f must be added to g to cancel its bottom min(limit, 6)
275  * bits. */
276  w = (f * g * (f * f - 2)) & m;
277  } else {
278  /* In this branch, use a simpler formula that only lets us cancel up to 4 bits of g, as
279  * eta tends to be smaller here. */
280  limit = ((int)eta + 1) > i ? i : ((int)eta + 1);
281  VERIFY_CHECK(limit > 0 && limit <= 62);
282  /* m is a mask for the bottom min(limit, 4) bits. */
283  m = (UINT64_MAX >> (64 - limit)) & 15U;
284  /* Find what multiple of f must be added to g to cancel its bottom min(limit, 4)
285  * bits. */
286  w = f + (((f + 1) & 4) << 1);
287  w = (-w * g) & m;
288  }
289  g += f * w;
290  q += u * w;
291  r += v * w;
292  VERIFY_CHECK((g & m) == 0);
293  }
294  /* Return data in t and return value. */
295  t->u = (int64_t)u;
296  t->v = (int64_t)v;
297  t->q = (int64_t)q;
298  t->r = (int64_t)r;
299 #ifdef VERIFY
300  /* The determinant of t must be a power of two. This guarantees that multiplication with t
301  * does not change the gcd of f and g, apart from adding a power-of-2 factor to it (which
302  * will be divided out again). As each divstep's individual matrix has determinant 2, the
303  * aggregate of 62 of them will have determinant 2^62. */
304  VERIFY_CHECK(secp256k1_modinv64_det_check_pow2(t, 62));
305 #endif
306  return eta;
307 }
308 
309 /* Compute (t/2^62) * [d, e] mod modulus, where t is a transition matrix scaled by 2^62.
310  *
311  * On input and output, d and e are in range (-2*modulus,modulus). All output limbs will be in range
312  * (-2^62,2^62).
313  *
314  * This implements the update_de function from the explanation.
315  */
317  const int64_t M62 = (int64_t)(UINT64_MAX >> 2);
318  const int64_t d0 = d->v[0], d1 = d->v[1], d2 = d->v[2], d3 = d->v[3], d4 = d->v[4];
319  const int64_t e0 = e->v[0], e1 = e->v[1], e2 = e->v[2], e3 = e->v[3], e4 = e->v[4];
320  const int64_t u = t->u, v = t->v, q = t->q, r = t->r;
321  int64_t md, me, sd, se;
322  secp256k1_int128 cd, ce;
323 #ifdef VERIFY
324  VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, -2) > 0); /* d > -2*modulus */
325  VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, 1) < 0); /* d < modulus */
326  VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, -2) > 0); /* e > -2*modulus */
327  VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, 1) < 0); /* e < modulus */
328  VERIFY_CHECK((secp256k1_modinv64_abs(u) + secp256k1_modinv64_abs(v)) >= 0); /* |u|+|v| doesn't overflow */
329  VERIFY_CHECK((secp256k1_modinv64_abs(q) + secp256k1_modinv64_abs(r)) >= 0); /* |q|+|r| doesn't overflow */
330  VERIFY_CHECK((secp256k1_modinv64_abs(u) + secp256k1_modinv64_abs(v)) <= M62 + 1); /* |u|+|v| <= 2^62 */
331  VERIFY_CHECK((secp256k1_modinv64_abs(q) + secp256k1_modinv64_abs(r)) <= M62 + 1); /* |q|+|r| <= 2^62 */
332 #endif
333  /* [md,me] start as zero; plus [u,q] if d is negative; plus [v,r] if e is negative. */
334  sd = d4 >> 63;
335  se = e4 >> 63;
336  md = (u & sd) + (v & se);
337  me = (q & sd) + (r & se);
338  /* Begin computing t*[d,e]. */
339  secp256k1_i128_mul(&cd, u, d0);
340  secp256k1_i128_accum_mul(&cd, v, e0);
341  secp256k1_i128_mul(&ce, q, d0);
342  secp256k1_i128_accum_mul(&ce, r, e0);
343  /* Correct md,me so that t*[d,e]+modulus*[md,me] has 62 zero bottom bits. */
344  md -= (modinfo->modulus_inv62 * (uint64_t)secp256k1_i128_to_i64(&cd) + md) & M62;
345  me -= (modinfo->modulus_inv62 * (uint64_t)secp256k1_i128_to_i64(&ce) + me) & M62;
346  /* Update the beginning of computation for t*[d,e]+modulus*[md,me] now md,me are known. */
347  secp256k1_i128_accum_mul(&cd, modinfo->modulus.v[0], md);
348  secp256k1_i128_accum_mul(&ce, modinfo->modulus.v[0], me);
349  /* Verify that the low 62 bits of the computation are indeed zero, and then throw them away. */
350  VERIFY_CHECK((secp256k1_i128_to_i64(&cd) & M62) == 0); secp256k1_i128_rshift(&cd, 62);
351  VERIFY_CHECK((secp256k1_i128_to_i64(&ce) & M62) == 0); secp256k1_i128_rshift(&ce, 62);
352  /* Compute limb 1 of t*[d,e]+modulus*[md,me], and store it as output limb 0 (= down shift). */
353  secp256k1_i128_accum_mul(&cd, u, d1);
354  secp256k1_i128_accum_mul(&cd, v, e1);
355  secp256k1_i128_accum_mul(&ce, q, d1);
356  secp256k1_i128_accum_mul(&ce, r, e1);
357  if (modinfo->modulus.v[1]) { /* Optimize for the case where limb of modulus is zero. */
358  secp256k1_i128_accum_mul(&cd, modinfo->modulus.v[1], md);
359  secp256k1_i128_accum_mul(&ce, modinfo->modulus.v[1], me);
360  }
361  d->v[0] = secp256k1_i128_to_i64(&cd) & M62; secp256k1_i128_rshift(&cd, 62);
362  e->v[0] = secp256k1_i128_to_i64(&ce) & M62; secp256k1_i128_rshift(&ce, 62);
363  /* Compute limb 2 of t*[d,e]+modulus*[md,me], and store it as output limb 1. */
364  secp256k1_i128_accum_mul(&cd, u, d2);
365  secp256k1_i128_accum_mul(&cd, v, e2);
366  secp256k1_i128_accum_mul(&ce, q, d2);
367  secp256k1_i128_accum_mul(&ce, r, e2);
368  if (modinfo->modulus.v[2]) { /* Optimize for the case where limb of modulus is zero. */
369  secp256k1_i128_accum_mul(&cd, modinfo->modulus.v[2], md);
370  secp256k1_i128_accum_mul(&ce, modinfo->modulus.v[2], me);
371  }
372  d->v[1] = secp256k1_i128_to_i64(&cd) & M62; secp256k1_i128_rshift(&cd, 62);
373  e->v[1] = secp256k1_i128_to_i64(&ce) & M62; secp256k1_i128_rshift(&ce, 62);
374  /* Compute limb 3 of t*[d,e]+modulus*[md,me], and store it as output limb 2. */
375  secp256k1_i128_accum_mul(&cd, u, d3);
376  secp256k1_i128_accum_mul(&cd, v, e3);
377  secp256k1_i128_accum_mul(&ce, q, d3);
378  secp256k1_i128_accum_mul(&ce, r, e3);
379  if (modinfo->modulus.v[3]) { /* Optimize for the case where limb of modulus is zero. */
380  secp256k1_i128_accum_mul(&cd, modinfo->modulus.v[3], md);
381  secp256k1_i128_accum_mul(&ce, modinfo->modulus.v[3], me);
382  }
383  d->v[2] = secp256k1_i128_to_i64(&cd) & M62; secp256k1_i128_rshift(&cd, 62);
384  e->v[2] = secp256k1_i128_to_i64(&ce) & M62; secp256k1_i128_rshift(&ce, 62);
385  /* Compute limb 4 of t*[d,e]+modulus*[md,me], and store it as output limb 3. */
386  secp256k1_i128_accum_mul(&cd, u, d4);
387  secp256k1_i128_accum_mul(&cd, v, e4);
388  secp256k1_i128_accum_mul(&ce, q, d4);
389  secp256k1_i128_accum_mul(&ce, r, e4);
390  secp256k1_i128_accum_mul(&cd, modinfo->modulus.v[4], md);
391  secp256k1_i128_accum_mul(&ce, modinfo->modulus.v[4], me);
392  d->v[3] = secp256k1_i128_to_i64(&cd) & M62; secp256k1_i128_rshift(&cd, 62);
393  e->v[3] = secp256k1_i128_to_i64(&ce) & M62; secp256k1_i128_rshift(&ce, 62);
394  /* What remains is limb 5 of t*[d,e]+modulus*[md,me]; store it as output limb 4. */
395  d->v[4] = secp256k1_i128_to_i64(&cd);
396  e->v[4] = secp256k1_i128_to_i64(&ce);
397 #ifdef VERIFY
398  VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, -2) > 0); /* d > -2*modulus */
399  VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, 1) < 0); /* d < modulus */
400  VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, -2) > 0); /* e > -2*modulus */
401  VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, 1) < 0); /* e < modulus */
402 #endif
403 }
404 
405 /* Compute (t/2^62) * [f, g], where t is a transition matrix scaled by 2^62.
406  *
407  * This implements the update_fg function from the explanation.
408  */
410  const int64_t M62 = (int64_t)(UINT64_MAX >> 2);
411  const int64_t f0 = f->v[0], f1 = f->v[1], f2 = f->v[2], f3 = f->v[3], f4 = f->v[4];
412  const int64_t g0 = g->v[0], g1 = g->v[1], g2 = g->v[2], g3 = g->v[3], g4 = g->v[4];
413  const int64_t u = t->u, v = t->v, q = t->q, r = t->r;
414  secp256k1_int128 cf, cg;
415  /* Start computing t*[f,g]. */
416  secp256k1_i128_mul(&cf, u, f0);
417  secp256k1_i128_accum_mul(&cf, v, g0);
418  secp256k1_i128_mul(&cg, q, f0);
419  secp256k1_i128_accum_mul(&cg, r, g0);
420  /* Verify that the bottom 62 bits of the result are zero, and then throw them away. */
421  VERIFY_CHECK((secp256k1_i128_to_i64(&cf) & M62) == 0); secp256k1_i128_rshift(&cf, 62);
422  VERIFY_CHECK((secp256k1_i128_to_i64(&cg) & M62) == 0); secp256k1_i128_rshift(&cg, 62);
423  /* Compute limb 1 of t*[f,g], and store it as output limb 0 (= down shift). */
424  secp256k1_i128_accum_mul(&cf, u, f1);
425  secp256k1_i128_accum_mul(&cf, v, g1);
426  secp256k1_i128_accum_mul(&cg, q, f1);
427  secp256k1_i128_accum_mul(&cg, r, g1);
428  f->v[0] = secp256k1_i128_to_i64(&cf) & M62; secp256k1_i128_rshift(&cf, 62);
429  g->v[0] = secp256k1_i128_to_i64(&cg) & M62; secp256k1_i128_rshift(&cg, 62);
430  /* Compute limb 2 of t*[f,g], and store it as output limb 1. */
431  secp256k1_i128_accum_mul(&cf, u, f2);
432  secp256k1_i128_accum_mul(&cf, v, g2);
433  secp256k1_i128_accum_mul(&cg, q, f2);
434  secp256k1_i128_accum_mul(&cg, r, g2);
435  f->v[1] = secp256k1_i128_to_i64(&cf) & M62; secp256k1_i128_rshift(&cf, 62);
436  g->v[1] = secp256k1_i128_to_i64(&cg) & M62; secp256k1_i128_rshift(&cg, 62);
437  /* Compute limb 3 of t*[f,g], and store it as output limb 2. */
438  secp256k1_i128_accum_mul(&cf, u, f3);
439  secp256k1_i128_accum_mul(&cf, v, g3);
440  secp256k1_i128_accum_mul(&cg, q, f3);
441  secp256k1_i128_accum_mul(&cg, r, g3);
442  f->v[2] = secp256k1_i128_to_i64(&cf) & M62; secp256k1_i128_rshift(&cf, 62);
443  g->v[2] = secp256k1_i128_to_i64(&cg) & M62; secp256k1_i128_rshift(&cg, 62);
444  /* Compute limb 4 of t*[f,g], and store it as output limb 3. */
445  secp256k1_i128_accum_mul(&cf, u, f4);
446  secp256k1_i128_accum_mul(&cf, v, g4);
447  secp256k1_i128_accum_mul(&cg, q, f4);
448  secp256k1_i128_accum_mul(&cg, r, g4);
449  f->v[3] = secp256k1_i128_to_i64(&cf) & M62; secp256k1_i128_rshift(&cf, 62);
450  g->v[3] = secp256k1_i128_to_i64(&cg) & M62; secp256k1_i128_rshift(&cg, 62);
451  /* What remains is limb 5 of t*[f,g]; store it as output limb 4. */
452  f->v[4] = secp256k1_i128_to_i64(&cf);
453  g->v[4] = secp256k1_i128_to_i64(&cg);
454 }
455 
456 /* Compute (t/2^62) * [f, g], where t is a transition matrix for 62 divsteps.
457  *
458  * Version that operates on a variable number of limbs in f and g.
459  *
460  * This implements the update_fg function from the explanation.
461  */
463  const int64_t M62 = (int64_t)(UINT64_MAX >> 2);
464  const int64_t u = t->u, v = t->v, q = t->q, r = t->r;
465  int64_t fi, gi;
466  secp256k1_int128 cf, cg;
467  int i;
468  VERIFY_CHECK(len > 0);
469  /* Start computing t*[f,g]. */
470  fi = f->v[0];
471  gi = g->v[0];
472  secp256k1_i128_mul(&cf, u, fi);
473  secp256k1_i128_accum_mul(&cf, v, gi);
474  secp256k1_i128_mul(&cg, q, fi);
475  secp256k1_i128_accum_mul(&cg, r, gi);
476  /* Verify that the bottom 62 bits of the result are zero, and then throw them away. */
477  VERIFY_CHECK((secp256k1_i128_to_i64(&cf) & M62) == 0); secp256k1_i128_rshift(&cf, 62);
478  VERIFY_CHECK((secp256k1_i128_to_i64(&cg) & M62) == 0); secp256k1_i128_rshift(&cg, 62);
479  /* Now iteratively compute limb i=1..len of t*[f,g], and store them in output limb i-1 (shifting
480  * down by 62 bits). */
481  for (i = 1; i < len; ++i) {
482  fi = f->v[i];
483  gi = g->v[i];
484  secp256k1_i128_accum_mul(&cf, u, fi);
485  secp256k1_i128_accum_mul(&cf, v, gi);
486  secp256k1_i128_accum_mul(&cg, q, fi);
487  secp256k1_i128_accum_mul(&cg, r, gi);
488  f->v[i - 1] = secp256k1_i128_to_i64(&cf) & M62; secp256k1_i128_rshift(&cf, 62);
489  g->v[i - 1] = secp256k1_i128_to_i64(&cg) & M62; secp256k1_i128_rshift(&cg, 62);
490  }
491  /* What remains is limb (len) of t*[f,g]; store it as output limb (len-1). */
492  f->v[len - 1] = secp256k1_i128_to_i64(&cf);
493  g->v[len - 1] = secp256k1_i128_to_i64(&cg);
494 }
495 
496 /* Compute the inverse of x modulo modinfo->modulus, and replace x with it (constant time in x). */
498  /* Start with d=0, e=1, f=modulus, g=x, zeta=-1. */
499  secp256k1_modinv64_signed62 d = {{0, 0, 0, 0, 0}};
500  secp256k1_modinv64_signed62 e = {{1, 0, 0, 0, 0}};
503  int i;
504  int64_t zeta = -1; /* zeta = -(delta+1/2); delta starts at 1/2. */
505 
506  /* Do 10 iterations of 59 divsteps each = 590 divsteps. This suffices for 256-bit inputs. */
507  for (i = 0; i < 10; ++i) {
508  /* Compute transition matrix and new zeta after 59 divsteps. */
510  zeta = secp256k1_modinv64_divsteps_59(zeta, f.v[0], g.v[0], &t);
511  /* Update d,e using that transition matrix. */
512  secp256k1_modinv64_update_de_62(&d, &e, &t, modinfo);
513  /* Update f,g using that transition matrix. */
514 #ifdef VERIFY
515  VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) > 0); /* f > -modulus */
516  VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) <= 0); /* f <= modulus */
517  VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, -1) > 0); /* g > -modulus */
518  VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, 1) < 0); /* g < modulus */
519 #endif
521 #ifdef VERIFY
522  VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) > 0); /* f > -modulus */
523  VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) <= 0); /* f <= modulus */
524  VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, -1) > 0); /* g > -modulus */
525  VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, 1) < 0); /* g < modulus */
526 #endif
527  }
528 
529  /* At this point sufficient iterations have been performed that g must have reached 0
530  * and (if g was not originally 0) f must now equal +/- GCD of the initial f, g
531  * values i.e. +/- 1, and d now contains +/- the modular inverse. */
532 #ifdef VERIFY
533  /* g == 0 */
534  VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &SECP256K1_SIGNED62_ONE, 0) == 0);
535  /* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */
536  VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, 5, &SECP256K1_SIGNED62_ONE, -1) == 0 ||
537  secp256k1_modinv64_mul_cmp_62(&f, 5, &SECP256K1_SIGNED62_ONE, 1) == 0 ||
538  (secp256k1_modinv64_mul_cmp_62(x, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 &&
539  secp256k1_modinv64_mul_cmp_62(&d, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 &&
540  (secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) == 0 ||
541  secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) == 0)));
542 #endif
543 
544  /* Optionally negate d, normalize to [0,modulus), and return it. */
545  secp256k1_modinv64_normalize_62(&d, f.v[4], modinfo);
546  *x = d;
547 }
548 
549 /* Compute the inverse of x modulo modinfo->modulus, and replace x with it (variable time). */
551  /* Start with d=0, e=1, f=modulus, g=x, eta=-1. */
552  secp256k1_modinv64_signed62 d = {{0, 0, 0, 0, 0}};
553  secp256k1_modinv64_signed62 e = {{1, 0, 0, 0, 0}};
556 #ifdef VERIFY
557  int i = 0;
558 #endif
559  int j, len = 5;
560  int64_t eta = -1; /* eta = -delta; delta is initially 1 */
561  int64_t cond, fn, gn;
562 
563  /* Do iterations of 62 divsteps each until g=0. */
564  while (1) {
565  /* Compute transition matrix and new eta after 62 divsteps. */
567  eta = secp256k1_modinv64_divsteps_62_var(eta, f.v[0], g.v[0], &t);
568  /* Update d,e using that transition matrix. */
569  secp256k1_modinv64_update_de_62(&d, &e, &t, modinfo);
570  /* Update f,g using that transition matrix. */
571 #ifdef VERIFY
572  VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */
573  VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */
574  VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */
575  VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */
576 #endif
578  /* If the bottom limb of g is zero, there is a chance that g=0. */
579  if (g.v[0] == 0) {
580  cond = 0;
581  /* Check if the other limbs are also 0. */
582  for (j = 1; j < len; ++j) {
583  cond |= g.v[j];
584  }
585  /* If so, we're done. */
586  if (cond == 0) break;
587  }
588 
589  /* Determine if len>1 and limb (len-1) of both f and g is 0 or -1. */
590  fn = f.v[len - 1];
591  gn = g.v[len - 1];
592  cond = ((int64_t)len - 2) >> 63;
593  cond |= fn ^ (fn >> 63);
594  cond |= gn ^ (gn >> 63);
595  /* If so, reduce length, propagating the sign of f and g's top limb into the one below. */
596  if (cond == 0) {
597  f.v[len - 2] |= (uint64_t)fn << 62;
598  g.v[len - 2] |= (uint64_t)gn << 62;
599  --len;
600  }
601 #ifdef VERIFY
602  VERIFY_CHECK(++i < 12); /* We should never need more than 12*62 = 744 divsteps */
603  VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */
604  VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */
605  VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */
606  VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */
607 #endif
608  }
609 
610  /* At this point g is 0 and (if g was not originally 0) f must now equal +/- GCD of
611  * the initial f, g values i.e. +/- 1, and d now contains +/- the modular inverse. */
612 #ifdef VERIFY
613  /* g == 0 */
614  VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &SECP256K1_SIGNED62_ONE, 0) == 0);
615  /* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */
616  VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &SECP256K1_SIGNED62_ONE, -1) == 0 ||
617  secp256k1_modinv64_mul_cmp_62(&f, len, &SECP256K1_SIGNED62_ONE, 1) == 0 ||
618  (secp256k1_modinv64_mul_cmp_62(x, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 &&
619  secp256k1_modinv64_mul_cmp_62(&d, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 &&
620  (secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) == 0 ||
621  secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) == 0)));
622 #endif
623 
624  /* Optionally negate d, normalize to [0,modulus), and return it. */
625  secp256k1_modinv64_normalize_62(&d, f.v[len - 1], modinfo);
626  *x = d;
627 }
628 
629 #endif /* SECP256K1_MODINV64_IMPL_H */
int128_t secp256k1_int128
Definition: int128_native.h:17
static SECP256K1_INLINE void secp256k1_i128_det(secp256k1_int128 *r, int64_t a, int64_t b, int64_t c, int64_t d)
static SECP256K1_INLINE void secp256k1_i128_rshift(secp256k1_int128 *r, unsigned int n)
static SECP256K1_INLINE void secp256k1_i128_from_i64(secp256k1_int128 *r, int64_t a)
static SECP256K1_INLINE int secp256k1_i128_eq_var(const secp256k1_int128 *a, const secp256k1_int128 *b)
static SECP256K1_INLINE int64_t secp256k1_i128_to_i64(const secp256k1_int128 *a)
static SECP256K1_INLINE void secp256k1_i128_mul(secp256k1_int128 *r, int64_t a, int64_t b)
static SECP256K1_INLINE int secp256k1_i128_check_pow2(const secp256k1_int128 *r, unsigned int n)
static SECP256K1_INLINE void secp256k1_i128_accum_mul(secp256k1_int128 *r, int64_t a, int64_t b)
static int64_t secp256k1_modinv64_divsteps_62_var(int64_t eta, uint64_t f0, uint64_t g0, secp256k1_modinv64_trans2x2 *t)
static void secp256k1_modinv64_normalize_62(secp256k1_modinv64_signed62 *r, int64_t sign, const secp256k1_modinv64_modinfo *modinfo)
Definition: modinv64_impl.h:86
static void secp256k1_modinv64(secp256k1_modinv64_signed62 *x, const secp256k1_modinv64_modinfo *modinfo)
static void secp256k1_modinv64_var(secp256k1_modinv64_signed62 *x, const secp256k1_modinv64_modinfo *modinfo)
static void secp256k1_modinv64_update_fg_62_var(int len, secp256k1_modinv64_signed62 *f, secp256k1_modinv64_signed62 *g, const secp256k1_modinv64_trans2x2 *t)
static int64_t secp256k1_modinv64_divsteps_59(int64_t zeta, uint64_t f0, uint64_t g0, secp256k1_modinv64_trans2x2 *t)
static void secp256k1_modinv64_update_fg_62(secp256k1_modinv64_signed62 *f, secp256k1_modinv64_signed62 *g, const secp256k1_modinv64_trans2x2 *t)
static void secp256k1_modinv64_update_de_62(secp256k1_modinv64_signed62 *d, secp256k1_modinv64_signed62 *e, const secp256k1_modinv64_trans2x2 *t, const secp256k1_modinv64_modinfo *modinfo)
static SECP256K1_INLINE int secp256k1_ctz64_var(uint64_t x)
Definition: util.h:312
#define VERIFY_CHECK(cond)
Definition: util.h:100
secp256k1_modinv64_signed62 modulus
Definition: modinv64.h:29