Bitcoin ABC 0.26.3
P2P Digital Currency
Loading...
Searching...
No Matches
modinv64_impl.h
Go to the documentation of this file.
1/***********************************************************************
2 * Copyright (c) 2020 Peter Dettman *
3 * Distributed under the MIT software license, see the accompanying *
4 * file COPYING or https://www.opensource.org/licenses/mit-license.php.*
5 **********************************************************************/
6
7#ifndef SECP256K1_MODINV64_IMPL_H
8#define SECP256K1_MODINV64_IMPL_H
9
10#include "modinv64.h"
11
12#include "util.h"
13
14/* This file implements modular inversion based on the paper "Fast constant-time gcd computation and
15 * modular inversion" by Daniel J. Bernstein and Bo-Yin Yang.
16 *
17 * For an explanation of the algorithm, see doc/safegcd_implementation.md. This file contains an
18 * implementation for N=62, using 62-bit signed limbs represented as int64_t.
19 */
20
21#ifdef VERIFY
22/* Helper function to compute the absolute value of an int64_t.
23 * (we don't use abs/labs/llabs as it depends on the int sizes). */
26 if (v < 0) return -v;
27 return v;
28}
29
31
32/* Compute a*factor and put it in r. All but the top limb in r will be in range [0,2^62). */
34 const int64_t M62 = (int64_t)(UINT64_MAX >> 2);
35 int128_t c = 0;
36 int i;
37 for (i = 0; i < 4; ++i) {
38 if (i < alen) c += (int128_t)a->v[i] * factor;
39 r->v[i] = (int64_t)c & M62; c >>= 62;
40 }
41 if (4 < alen) c += (int128_t)a->v[4] * factor;
43 r->v[4] = (int64_t)c;
44}
45
46/* Return -1 for a<b*factor, 0 for a==b*factor, 1 for a>b*factor. A has alen limbs; b has 5. */
48 int i;
50 secp256k1_modinv64_mul_62(&am, a, alen, 1); /* Normalize all but the top limb of a. */
51 secp256k1_modinv64_mul_62(&bm, b, 5, factor);
52 for (i = 0; i < 4; ++i) {
53 /* Verify that all but the top limb of a and b are normalized. */
54 VERIFY_CHECK(am.v[i] >> 62 == 0);
55 VERIFY_CHECK(bm.v[i] >> 62 == 0);
56 }
57 for (i = 4; i >= 0; --i) {
58 if (am.v[i] < bm.v[i]) return -1;
59 if (am.v[i] > bm.v[i]) return 1;
60 }
61 return 0;
62}
63#endif
64
65/* Take as input a signed62 number in range (-2*modulus,modulus), and add a multiple of the modulus
66 * to it to bring it to range [0,modulus). If sign < 0, the input will also be negated in the
67 * process. The input must have limbs in range (-2^62,2^62). The output will have limbs in range
68 * [0,2^62). */
70 const int64_t M62 = (int64_t)(UINT64_MAX >> 2);
71 int64_t r0 = r->v[0], r1 = r->v[1], r2 = r->v[2], r3 = r->v[3], r4 = r->v[4];
73
74#ifdef VERIFY
75 /* Verify that all limbs are in range (-2^62,2^62). */
76 int i;
77 for (i = 0; i < 5; ++i) {
78 VERIFY_CHECK(r->v[i] >= -M62);
79 VERIFY_CHECK(r->v[i] <= M62);
80 }
81 VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, -2) > 0); /* r > -2*modulus */
82 VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 1) < 0); /* r < modulus */
83#endif
84
85 /* In a first step, add the modulus if the input is negative, and then negate if requested.
86 * This brings r from range (-2*modulus,modulus) to range (-modulus,modulus). As all input
87 * limbs are in range (-2^62,2^62), this cannot overflow an int64_t. Note that the right
88 * shifts below are signed sign-extending shifts (see assumptions.h for tests that that is
89 * indeed the behavior of the right shift operator). */
90 cond_add = r4 >> 63;
91 r0 += modinfo->modulus.v[0] & cond_add;
92 r1 += modinfo->modulus.v[1] & cond_add;
93 r2 += modinfo->modulus.v[2] & cond_add;
94 r3 += modinfo->modulus.v[3] & cond_add;
95 r4 += modinfo->modulus.v[4] & cond_add;
96 cond_negate = sign >> 63;
102 /* Propagate the top bits, to bring limbs back to range (-2^62,2^62). */
103 r1 += r0 >> 62; r0 &= M62;
104 r2 += r1 >> 62; r1 &= M62;
105 r3 += r2 >> 62; r2 &= M62;
106 r4 += r3 >> 62; r3 &= M62;
107
108 /* In a second step add the modulus again if the result is still negative, bringing
109 * r to range [0,modulus). */
110 cond_add = r4 >> 63;
111 r0 += modinfo->modulus.v[0] & cond_add;
112 r1 += modinfo->modulus.v[1] & cond_add;
113 r2 += modinfo->modulus.v[2] & cond_add;
114 r3 += modinfo->modulus.v[3] & cond_add;
115 r4 += modinfo->modulus.v[4] & cond_add;
116 /* And propagate again. */
117 r1 += r0 >> 62; r0 &= M62;
118 r2 += r1 >> 62; r1 &= M62;
119 r3 += r2 >> 62; r2 &= M62;
120 r4 += r3 >> 62; r3 &= M62;
121
122 r->v[0] = r0;
123 r->v[1] = r1;
124 r->v[2] = r2;
125 r->v[3] = r3;
126 r->v[4] = r4;
127
128#ifdef VERIFY
129 VERIFY_CHECK(r0 >> 62 == 0);
130 VERIFY_CHECK(r1 >> 62 == 0);
131 VERIFY_CHECK(r2 >> 62 == 0);
132 VERIFY_CHECK(r3 >> 62 == 0);
133 VERIFY_CHECK(r4 >> 62 == 0);
134 VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 0) >= 0); /* r >= 0 */
135 VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 1) < 0); /* r < modulus */
136#endif
137}
138
139/* Data type for transition matrices (see section 3 of explanation).
140 *
141 * t = [ u v ]
142 * [ q r ]
143 */
144typedef struct {
145 int64_t u, v, q, r;
147
148/* Compute the transition matrix and eta for 62 divsteps.
149 *
150 * Input: eta: initial eta
151 * f0: bottom limb of initial f
152 * g0: bottom limb of initial g
153 * Output: t: transition matrix
154 * Return: final eta
155 *
156 * Implements the divsteps_n_matrix function from the explanation.
157 */
159 /* u,v,q,r are the elements of the transformation matrix being built up,
160 * starting with the identity matrix. Semantically they are signed integers
161 * in range [-2^62,2^62], but here represented as unsigned mod 2^64. This
162 * permits left shifting (which is UB for negative numbers). The range
163 * being inside [-2^63,2^63) means that casting to signed works correctly.
164 */
165 uint64_t u = 1, v = 0, q = 0, r = 1;
166 uint64_t c1, c2, f = f0, g = g0, x, y, z;
167 int i;
168
169 for (i = 0; i < 62; ++i) {
170 VERIFY_CHECK((f & 1) == 1); /* f must always be odd */
171 VERIFY_CHECK((u * f0 + v * g0) == f << i);
172 VERIFY_CHECK((q * f0 + r * g0) == g << i);
173 /* Compute conditional masks for (eta < 0) and for (g & 1). */
174 c1 = eta >> 63;
175 c2 = -(g & 1);
176 /* Compute x,y,z, conditionally negated versions of f,u,v. */
177 x = (f ^ c1) - c1;
178 y = (u ^ c1) - c1;
179 z = (v ^ c1) - c1;
180 /* Conditionally add x,y,z to g,q,r. */
181 g += x & c2;
182 q += y & c2;
183 r += z & c2;
184 /* In what follows, c1 is a condition mask for (eta < 0) and (g & 1). */
185 c1 &= c2;
186 /* Conditionally negate eta, and unconditionally subtract 1. */
187 eta = (eta ^ c1) - (c1 + 1);
188 /* Conditionally add g,q,r to f,u,v. */
189 f += g & c1;
190 u += q & c1;
191 v += r & c1;
192 /* Shifts */
193 g >>= 1;
194 u <<= 1;
195 v <<= 1;
196 /* Bounds on eta that follow from the bounds on iteration count (max 12*62 divsteps). */
197 VERIFY_CHECK(eta >= -745 && eta <= 745);
198 }
199 /* Return data in t and return value. */
200 t->u = (int64_t)u;
201 t->v = (int64_t)v;
202 t->q = (int64_t)q;
203 t->r = (int64_t)r;
204 /* The determinant of t must be a power of two. This guarantees that multiplication with t
205 * does not change the gcd of f and g, apart from adding a power-of-2 factor to it (which
206 * will be divided out again). As each divstep's individual matrix has determinant 2, the
207 * aggregate of 62 of them will have determinant 2^62. */
208 VERIFY_CHECK((int128_t)t->u * t->r - (int128_t)t->v * t->q == ((int128_t)1) << 62);
209 return eta;
210}
211
212/* Compute the transition matrix and eta for 62 divsteps (variable time).
213 *
214 * Input: eta: initial eta
215 * f0: bottom limb of initial f
216 * g0: bottom limb of initial g
217 * Output: t: transition matrix
218 * Return: final eta
219 *
220 * Implements the divsteps_n_matrix_var function from the explanation.
221 */
223 /* Transformation matrix; see comments in secp256k1_modinv64_divsteps_62. */
224 uint64_t u = 1, v = 0, q = 0, r = 1;
225 uint64_t f = f0, g = g0, m;
226 uint32_t w;
227 int i = 62, limit, zeros;
228
229 for (;;) {
230 /* Use a sentinel bit to count zeros only up to i. */
232 /* Perform zeros divsteps at once; they all just divide g by two. */
233 g >>= zeros;
234 u <<= zeros;
235 v <<= zeros;
236 eta -= zeros;
237 i -= zeros;
238 /* We're done once we've done 62 divsteps. */
239 if (i == 0) break;
240 VERIFY_CHECK((f & 1) == 1);
241 VERIFY_CHECK((g & 1) == 1);
242 VERIFY_CHECK((u * f0 + v * g0) == f << (62 - i));
243 VERIFY_CHECK((q * f0 + r * g0) == g << (62 - i));
244 /* Bounds on eta that follow from the bounds on iteration count (max 12*62 divsteps). */
245 VERIFY_CHECK(eta >= -745 && eta <= 745);
246 /* If eta is negative, negate it and replace f,g with g,-f. */
247 if (eta < 0) {
248 uint64_t tmp;
249 eta = -eta;
250 tmp = f; f = g; g = -tmp;
251 tmp = u; u = q; q = -tmp;
252 tmp = v; v = r; r = -tmp;
253 /* Use a formula to cancel out up to 6 bits of g. Also, no more than i can be cancelled
254 * out (as we'd be done before that point), and no more than eta+1 can be done as its
255 * will flip again once that happens. */
256 limit = ((int)eta + 1) > i ? i : ((int)eta + 1);
257 VERIFY_CHECK(limit > 0 && limit <= 62);
258 /* m is a mask for the bottom min(limit, 6) bits. */
259 m = (UINT64_MAX >> (64 - limit)) & 63U;
260 /* Find what multiple of f must be added to g to cancel its bottom min(limit, 6)
261 * bits. */
262 w = (f * g * (f * f - 2)) & m;
263 } else {
264 /* In this branch, use a simpler formula that only lets us cancel up to 4 bits of g, as
265 * eta tends to be smaller here. */
266 limit = ((int)eta + 1) > i ? i : ((int)eta + 1);
267 VERIFY_CHECK(limit > 0 && limit <= 62);
268 /* m is a mask for the bottom min(limit, 4) bits. */
269 m = (UINT64_MAX >> (64 - limit)) & 15U;
270 /* Find what multiple of f must be added to g to cancel its bottom min(limit, 4)
271 * bits. */
272 w = f + (((f + 1) & 4) << 1);
273 w = (-w * g) & m;
274 }
275 g += f * w;
276 q += u * w;
277 r += v * w;
278 VERIFY_CHECK((g & m) == 0);
279 }
280 /* Return data in t and return value. */
281 t->u = (int64_t)u;
282 t->v = (int64_t)v;
283 t->q = (int64_t)q;
284 t->r = (int64_t)r;
285 /* The determinant of t must be a power of two. This guarantees that multiplication with t
286 * does not change the gcd of f and g, apart from adding a power-of-2 factor to it (which
287 * will be divided out again). As each divstep's individual matrix has determinant 2, the
288 * aggregate of 62 of them will have determinant 2^62. */
289 VERIFY_CHECK((int128_t)t->u * t->r - (int128_t)t->v * t->q == ((int128_t)1) << 62);
290 return eta;
291}
292
293/* Compute (t/2^62) * [d, e] mod modulus, where t is a transition matrix for 62 divsteps.
294 *
295 * On input and output, d and e are in range (-2*modulus,modulus). All output limbs will be in range
296 * (-2^62,2^62).
297 *
298 * This implements the update_de function from the explanation.
299 */
301 const int64_t M62 = (int64_t)(UINT64_MAX >> 2);
302 const int64_t d0 = d->v[0], d1 = d->v[1], d2 = d->v[2], d3 = d->v[3], d4 = d->v[4];
303 const int64_t e0 = e->v[0], e1 = e->v[1], e2 = e->v[2], e3 = e->v[3], e4 = e->v[4];
304 const int64_t u = t->u, v = t->v, q = t->q, r = t->r;
305 int64_t md, me, sd, se;
306 int128_t cd, ce;
307#ifdef VERIFY
308 VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, -2) > 0); /* d > -2*modulus */
309 VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, 1) < 0); /* d < modulus */
310 VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, -2) > 0); /* e > -2*modulus */
311 VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, 1) < 0); /* e < modulus */
312 VERIFY_CHECK((secp256k1_modinv64_abs(u) + secp256k1_modinv64_abs(v)) >= 0); /* |u|+|v| doesn't overflow */
313 VERIFY_CHECK((secp256k1_modinv64_abs(q) + secp256k1_modinv64_abs(r)) >= 0); /* |q|+|r| doesn't overflow */
314 VERIFY_CHECK((secp256k1_modinv64_abs(u) + secp256k1_modinv64_abs(v)) <= M62 + 1); /* |u|+|v| <= 2^62 */
315 VERIFY_CHECK((secp256k1_modinv64_abs(q) + secp256k1_modinv64_abs(r)) <= M62 + 1); /* |q|+|r| <= 2^62 */
316#endif
317 /* [md,me] start as zero; plus [u,q] if d is negative; plus [v,r] if e is negative. */
318 sd = d4 >> 63;
319 se = e4 >> 63;
320 md = (u & sd) + (v & se);
321 me = (q & sd) + (r & se);
322 /* Begin computing t*[d,e]. */
323 cd = (int128_t)u * d0 + (int128_t)v * e0;
324 ce = (int128_t)q * d0 + (int128_t)r * e0;
325 /* Correct md,me so that t*[d,e]+modulus*[md,me] has 62 zero bottom bits. */
326 md -= (modinfo->modulus_inv62 * (uint64_t)cd + md) & M62;
327 me -= (modinfo->modulus_inv62 * (uint64_t)ce + me) & M62;
328 /* Update the beginning of computation for t*[d,e]+modulus*[md,me] now md,me are known. */
329 cd += (int128_t)modinfo->modulus.v[0] * md;
330 ce += (int128_t)modinfo->modulus.v[0] * me;
331 /* Verify that the low 62 bits of the computation are indeed zero, and then throw them away. */
332 VERIFY_CHECK(((int64_t)cd & M62) == 0); cd >>= 62;
333 VERIFY_CHECK(((int64_t)ce & M62) == 0); ce >>= 62;
334 /* Compute limb 1 of t*[d,e]+modulus*[md,me], and store it as output limb 0 (= down shift). */
335 cd += (int128_t)u * d1 + (int128_t)v * e1;
336 ce += (int128_t)q * d1 + (int128_t)r * e1;
337 if (modinfo->modulus.v[1]) { /* Optimize for the case where limb of modulus is zero. */
338 cd += (int128_t)modinfo->modulus.v[1] * md;
339 ce += (int128_t)modinfo->modulus.v[1] * me;
340 }
341 d->v[0] = (int64_t)cd & M62; cd >>= 62;
342 e->v[0] = (int64_t)ce & M62; ce >>= 62;
343 /* Compute limb 2 of t*[d,e]+modulus*[md,me], and store it as output limb 1. */
344 cd += (int128_t)u * d2 + (int128_t)v * e2;
345 ce += (int128_t)q * d2 + (int128_t)r * e2;
346 if (modinfo->modulus.v[2]) { /* Optimize for the case where limb of modulus is zero. */
347 cd += (int128_t)modinfo->modulus.v[2] * md;
348 ce += (int128_t)modinfo->modulus.v[2] * me;
349 }
350 d->v[1] = (int64_t)cd & M62; cd >>= 62;
351 e->v[1] = (int64_t)ce & M62; ce >>= 62;
352 /* Compute limb 3 of t*[d,e]+modulus*[md,me], and store it as output limb 2. */
353 cd += (int128_t)u * d3 + (int128_t)v * e3;
354 ce += (int128_t)q * d3 + (int128_t)r * e3;
355 if (modinfo->modulus.v[3]) { /* Optimize for the case where limb of modulus is zero. */
356 cd += (int128_t)modinfo->modulus.v[3] * md;
357 ce += (int128_t)modinfo->modulus.v[3] * me;
358 }
359 d->v[2] = (int64_t)cd & M62; cd >>= 62;
360 e->v[2] = (int64_t)ce & M62; ce >>= 62;
361 /* Compute limb 4 of t*[d,e]+modulus*[md,me], and store it as output limb 3. */
362 cd += (int128_t)u * d4 + (int128_t)v * e4;
363 ce += (int128_t)q * d4 + (int128_t)r * e4;
364 cd += (int128_t)modinfo->modulus.v[4] * md;
365 ce += (int128_t)modinfo->modulus.v[4] * me;
366 d->v[3] = (int64_t)cd & M62; cd >>= 62;
367 e->v[3] = (int64_t)ce & M62; ce >>= 62;
368 /* What remains is limb 5 of t*[d,e]+modulus*[md,me]; store it as output limb 4. */
369 d->v[4] = (int64_t)cd;
370 e->v[4] = (int64_t)ce;
371#ifdef VERIFY
372 VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, -2) > 0); /* d > -2*modulus */
373 VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, 1) < 0); /* d < modulus */
374 VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, -2) > 0); /* e > -2*modulus */
375 VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, 1) < 0); /* e < modulus */
376#endif
377}
378
379/* Compute (t/2^62) * [f, g], where t is a transition matrix for 62 divsteps.
380 *
381 * This implements the update_fg function from the explanation.
382 */
384 const int64_t M62 = (int64_t)(UINT64_MAX >> 2);
385 const int64_t f0 = f->v[0], f1 = f->v[1], f2 = f->v[2], f3 = f->v[3], f4 = f->v[4];
386 const int64_t g0 = g->v[0], g1 = g->v[1], g2 = g->v[2], g3 = g->v[3], g4 = g->v[4];
387 const int64_t u = t->u, v = t->v, q = t->q, r = t->r;
388 int128_t cf, cg;
389 /* Start computing t*[f,g]. */
390 cf = (int128_t)u * f0 + (int128_t)v * g0;
391 cg = (int128_t)q * f0 + (int128_t)r * g0;
392 /* Verify that the bottom 62 bits of the result are zero, and then throw them away. */
393 VERIFY_CHECK(((int64_t)cf & M62) == 0); cf >>= 62;
394 VERIFY_CHECK(((int64_t)cg & M62) == 0); cg >>= 62;
395 /* Compute limb 1 of t*[f,g], and store it as output limb 0 (= down shift). */
396 cf += (int128_t)u * f1 + (int128_t)v * g1;
397 cg += (int128_t)q * f1 + (int128_t)r * g1;
398 f->v[0] = (int64_t)cf & M62; cf >>= 62;
399 g->v[0] = (int64_t)cg & M62; cg >>= 62;
400 /* Compute limb 2 of t*[f,g], and store it as output limb 1. */
401 cf += (int128_t)u * f2 + (int128_t)v * g2;
402 cg += (int128_t)q * f2 + (int128_t)r * g2;
403 f->v[1] = (int64_t)cf & M62; cf >>= 62;
404 g->v[1] = (int64_t)cg & M62; cg >>= 62;
405 /* Compute limb 3 of t*[f,g], and store it as output limb 2. */
406 cf += (int128_t)u * f3 + (int128_t)v * g3;
407 cg += (int128_t)q * f3 + (int128_t)r * g3;
408 f->v[2] = (int64_t)cf & M62; cf >>= 62;
409 g->v[2] = (int64_t)cg & M62; cg >>= 62;
410 /* Compute limb 4 of t*[f,g], and store it as output limb 3. */
411 cf += (int128_t)u * f4 + (int128_t)v * g4;
412 cg += (int128_t)q * f4 + (int128_t)r * g4;
413 f->v[3] = (int64_t)cf & M62; cf >>= 62;
414 g->v[3] = (int64_t)cg & M62; cg >>= 62;
415 /* What remains is limb 5 of t*[f,g]; store it as output limb 4. */
416 f->v[4] = (int64_t)cf;
417 g->v[4] = (int64_t)cg;
418}
419
420/* Compute (t/2^62) * [f, g], where t is a transition matrix for 62 divsteps.
421 *
422 * Version that operates on a variable number of limbs in f and g.
423 *
424 * This implements the update_fg function from the explanation.
425 */
427 const int64_t M62 = (int64_t)(UINT64_MAX >> 2);
428 const int64_t u = t->u, v = t->v, q = t->q, r = t->r;
429 int64_t fi, gi;
430 int128_t cf, cg;
431 int i;
432 VERIFY_CHECK(len > 0);
433 /* Start computing t*[f,g]. */
434 fi = f->v[0];
435 gi = g->v[0];
436 cf = (int128_t)u * fi + (int128_t)v * gi;
437 cg = (int128_t)q * fi + (int128_t)r * gi;
438 /* Verify that the bottom 62 bits of the result are zero, and then throw them away. */
439 VERIFY_CHECK(((int64_t)cf & M62) == 0); cf >>= 62;
440 VERIFY_CHECK(((int64_t)cg & M62) == 0); cg >>= 62;
441 /* Now iteratively compute limb i=1..len of t*[f,g], and store them in output limb i-1 (shifting
442 * down by 62 bits). */
443 for (i = 1; i < len; ++i) {
444 fi = f->v[i];
445 gi = g->v[i];
446 cf += (int128_t)u * fi + (int128_t)v * gi;
447 cg += (int128_t)q * fi + (int128_t)r * gi;
448 f->v[i - 1] = (int64_t)cf & M62; cf >>= 62;
449 g->v[i - 1] = (int64_t)cg & M62; cg >>= 62;
450 }
451 /* What remains is limb (len) of t*[f,g]; store it as output limb (len-1). */
452 f->v[len - 1] = (int64_t)cf;
453 g->v[len - 1] = (int64_t)cg;
454}
455
456/* Compute the inverse of x modulo modinfo->modulus, and replace x with it (constant time in x). */
458 /* Start with d=0, e=1, f=modulus, g=x, eta=-1. */
459 secp256k1_modinv64_signed62 d = {{0, 0, 0, 0, 0}};
460 secp256k1_modinv64_signed62 e = {{1, 0, 0, 0, 0}};
463 int i;
464 int64_t eta = -1;
465
466 /* Do 12 iterations of 62 divsteps each = 744 divsteps. 724 suffices for 256-bit inputs. */
467 for (i = 0; i < 12; ++i) {
468 /* Compute transition matrix and new eta after 62 divsteps. */
470 eta = secp256k1_modinv64_divsteps_62(eta, f.v[0], g.v[0], &t);
471 /* Update d,e using that transition matrix. */
473 /* Update f,g using that transition matrix. */
474#ifdef VERIFY
475 VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) > 0); /* f > -modulus */
476 VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) <= 0); /* f <= modulus */
477 VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, -1) > 0); /* g > -modulus */
478 VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, 1) < 0); /* g < modulus */
479#endif
481#ifdef VERIFY
482 VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) > 0); /* f > -modulus */
483 VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) <= 0); /* f <= modulus */
484 VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, -1) > 0); /* g > -modulus */
485 VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, 1) < 0); /* g < modulus */
486#endif
487 }
488
489 /* At this point sufficient iterations have been performed that g must have reached 0
490 * and (if g was not originally 0) f must now equal +/- GCD of the initial f, g
491 * values i.e. +/- 1, and d now contains +/- the modular inverse. */
492#ifdef VERIFY
493 /* g == 0 */
495 /* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */
500 (secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) == 0 ||
501 secp256k1_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) == 0)));
502#endif
503
504 /* Optionally negate d, normalize to [0,modulus), and return it. */
506 *x = d;
507}
508
509/* Compute the inverse of x modulo modinfo->modulus, and replace x with it (variable time). */
511 /* Start with d=0, e=1, f=modulus, g=x, eta=-1. */
512 secp256k1_modinv64_signed62 d = {{0, 0, 0, 0, 0}};
513 secp256k1_modinv64_signed62 e = {{1, 0, 0, 0, 0}};
516#ifdef VERIFY
517 int i = 0;
518#endif
519 int j, len = 5;
520 int64_t eta = -1;
521 int64_t cond, fn, gn;
522
523 /* Do iterations of 62 divsteps each until g=0. */
524 while (1) {
525 /* Compute transition matrix and new eta after 62 divsteps. */
528 /* Update d,e using that transition matrix. */
530 /* Update f,g using that transition matrix. */
531#ifdef VERIFY
532 VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */
533 VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */
534 VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */
535 VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */
536#endif
538 /* If the bottom limb of g is zero, there is a chance that g=0. */
539 if (g.v[0] == 0) {
540 cond = 0;
541 /* Check if the other limbs are also 0. */
542 for (j = 1; j < len; ++j) {
543 cond |= g.v[j];
544 }
545 /* If so, we're done. */
546 if (cond == 0) break;
547 }
548
549 /* Determine if len>1 and limb (len-1) of both f and g is 0 or -1. */
550 fn = f.v[len - 1];
551 gn = g.v[len - 1];
552 cond = ((int64_t)len - 2) >> 63;
553 cond |= fn ^ (fn >> 63);
554 cond |= gn ^ (gn >> 63);
555 /* If so, reduce length, propagating the sign of f and g's top limb into the one below. */
556 if (cond == 0) {
557 f.v[len - 2] |= (uint64_t)fn << 62;
558 g.v[len - 2] |= (uint64_t)gn << 62;
559 --len;
560 }
561#ifdef VERIFY
562 VERIFY_CHECK(++i < 12); /* We should never need more than 12*62 = 744 divsteps */
563 VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */
564 VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */
565 VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */
566 VERIFY_CHECK(secp256k1_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */
567#endif
568 }
569
570 /* At this point g is 0 and (if g was not originally 0) f must now equal +/- GCD of
571 * the initial f, g values i.e. +/- 1, and d now contains +/- the modular inverse. */
572#ifdef VERIFY
573 /* g == 0 */
575 /* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */
580 (secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) == 0 ||
581 secp256k1_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) == 0)));
582#endif
583
584 /* Optionally negate d, normalize to [0,modulus), and return it. */
586 *x = d;
587}
588
589#endif /* SECP256K1_MODINV64_IMPL_H */
static int64_t secp256k1_modinv64_divsteps_62_var(int64_t eta, uint64_t f0, uint64_t g0, secp256k1_modinv64_trans2x2 *t)
static void secp256k1_modinv64_normalize_62(secp256k1_modinv64_signed62 *r, int64_t sign, const secp256k1_modinv64_modinfo *modinfo)
static void secp256k1_modinv64(secp256k1_modinv64_signed62 *x, const secp256k1_modinv64_modinfo *modinfo)
static void secp256k1_modinv64_var(secp256k1_modinv64_signed62 *x, const secp256k1_modinv64_modinfo *modinfo)
static void secp256k1_modinv64_update_fg_62_var(int len, secp256k1_modinv64_signed62 *f, secp256k1_modinv64_signed62 *g, const secp256k1_modinv64_trans2x2 *t)
static void secp256k1_modinv64_update_fg_62(secp256k1_modinv64_signed62 *f, secp256k1_modinv64_signed62 *g, const secp256k1_modinv64_trans2x2 *t)
static int64_t secp256k1_modinv64_divsteps_62(int64_t eta, uint64_t f0, uint64_t g0, secp256k1_modinv64_trans2x2 *t)
static void secp256k1_modinv64_update_de_62(secp256k1_modinv64_signed62 *d, secp256k1_modinv64_signed62 *e, const secp256k1_modinv64_trans2x2 *t, const secp256k1_modinv64_modinfo *modinfo)
T GetRand(T nMax=std::numeric_limits< T >::max()) noexcept
Generate a uniform random integer of type T in the range [0..nMax) nMax defaults to std::numeric_limi...
Definition random.h:85
static SECP256K1_INLINE int secp256k1_ctz64_var(uint64_t x)
Definition util.h:327
#define VERIFY_CHECK(cond)
Definition util.h:68