Bitcoin Core  24.99.0
P2P Digital Currency
field_5x52_impl.h
Go to the documentation of this file.
1 /***********************************************************************
2  * Copyright (c) 2013, 2014 Pieter Wuille *
3  * Distributed under the MIT software license, see the accompanying *
4  * file COPYING or https://www.opensource.org/licenses/mit-license.php.*
5  ***********************************************************************/
6 
7 #ifndef SECP256K1_FIELD_REPR_IMPL_H
8 #define SECP256K1_FIELD_REPR_IMPL_H
9 
10 #if defined HAVE_CONFIG_H
11 #include "libsecp256k1-config.h"
12 #endif
13 
14 #include "util.h"
15 #include "field.h"
16 #include "modinv64_impl.h"
17 
18 #if defined(USE_ASM_X86_64)
19 #include "field_5x52_asm_impl.h"
20 #else
21 #include "field_5x52_int128_impl.h"
22 #endif
23 
39 #ifdef VERIFY
40 static void secp256k1_fe_verify(const secp256k1_fe *a) {
41  const uint64_t *d = a->n;
42  int m = a->normalized ? 1 : 2 * a->magnitude, r = 1;
43  /* secp256k1 'p' value defined in "Standards for Efficient Cryptography" (SEC2) 2.7.1. */
44  r &= (d[0] <= 0xFFFFFFFFFFFFFULL * m);
45  r &= (d[1] <= 0xFFFFFFFFFFFFFULL * m);
46  r &= (d[2] <= 0xFFFFFFFFFFFFFULL * m);
47  r &= (d[3] <= 0xFFFFFFFFFFFFFULL * m);
48  r &= (d[4] <= 0x0FFFFFFFFFFFFULL * m);
49  r &= (a->magnitude >= 0);
50  r &= (a->magnitude <= 2048);
51  if (a->normalized) {
52  r &= (a->magnitude <= 1);
53  if (r && (d[4] == 0x0FFFFFFFFFFFFULL) && ((d[3] & d[2] & d[1]) == 0xFFFFFFFFFFFFFULL)) {
54  r &= (d[0] < 0xFFFFEFFFFFC2FULL);
55  }
56  }
57  VERIFY_CHECK(r == 1);
58 }
59 #endif
60 
61 static void secp256k1_fe_get_bounds(secp256k1_fe *r, int m) {
62  VERIFY_CHECK(m >= 0);
63  VERIFY_CHECK(m <= 2048);
64  r->n[0] = 0xFFFFFFFFFFFFFULL * 2 * m;
65  r->n[1] = 0xFFFFFFFFFFFFFULL * 2 * m;
66  r->n[2] = 0xFFFFFFFFFFFFFULL * 2 * m;
67  r->n[3] = 0xFFFFFFFFFFFFFULL * 2 * m;
68  r->n[4] = 0x0FFFFFFFFFFFFULL * 2 * m;
69 #ifdef VERIFY
70  r->magnitude = m;
71  r->normalized = (m == 0);
72  secp256k1_fe_verify(r);
73 #endif
74 }
75 
77  uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4];
78 
79  /* Reduce t4 at the start so there will be at most a single carry from the first pass */
80  uint64_t m;
81  uint64_t x = t4 >> 48; t4 &= 0x0FFFFFFFFFFFFULL;
82 
83  /* The first pass ensures the magnitude is 1, ... */
84  t0 += x * 0x1000003D1ULL;
85  t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL;
86  t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL; m = t1;
87  t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL; m &= t2;
88  t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL; m &= t3;
89 
90  /* ... except for a possible carry at bit 48 of t4 (i.e. bit 256 of the field element) */
91  VERIFY_CHECK(t4 >> 49 == 0);
92 
93  /* At most a single final reduction is needed; check if the value is >= the field characteristic */
94  x = (t4 >> 48) | ((t4 == 0x0FFFFFFFFFFFFULL) & (m == 0xFFFFFFFFFFFFFULL)
95  & (t0 >= 0xFFFFEFFFFFC2FULL));
96 
97  /* Apply the final reduction (for constant-time behaviour, we do it always) */
98  t0 += x * 0x1000003D1ULL;
99  t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL;
100  t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL;
101  t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL;
102  t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL;
103 
104  /* If t4 didn't carry to bit 48 already, then it should have after any final reduction */
105  VERIFY_CHECK(t4 >> 48 == x);
106 
107  /* Mask off the possible multiple of 2^256 from the final reduction */
108  t4 &= 0x0FFFFFFFFFFFFULL;
109 
110  r->n[0] = t0; r->n[1] = t1; r->n[2] = t2; r->n[3] = t3; r->n[4] = t4;
111 
112 #ifdef VERIFY
113  r->magnitude = 1;
114  r->normalized = 1;
115  secp256k1_fe_verify(r);
116 #endif
117 }
118 
120  uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4];
121 
122  /* Reduce t4 at the start so there will be at most a single carry from the first pass */
123  uint64_t x = t4 >> 48; t4 &= 0x0FFFFFFFFFFFFULL;
124 
125  /* The first pass ensures the magnitude is 1, ... */
126  t0 += x * 0x1000003D1ULL;
127  t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL;
128  t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL;
129  t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL;
130  t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL;
131 
132  /* ... except for a possible carry at bit 48 of t4 (i.e. bit 256 of the field element) */
133  VERIFY_CHECK(t4 >> 49 == 0);
134 
135  r->n[0] = t0; r->n[1] = t1; r->n[2] = t2; r->n[3] = t3; r->n[4] = t4;
136 
137 #ifdef VERIFY
138  r->magnitude = 1;
139  secp256k1_fe_verify(r);
140 #endif
141 }
142 
144  uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4];
145 
146  /* Reduce t4 at the start so there will be at most a single carry from the first pass */
147  uint64_t m;
148  uint64_t x = t4 >> 48; t4 &= 0x0FFFFFFFFFFFFULL;
149 
150  /* The first pass ensures the magnitude is 1, ... */
151  t0 += x * 0x1000003D1ULL;
152  t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL;
153  t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL; m = t1;
154  t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL; m &= t2;
155  t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL; m &= t3;
156 
157  /* ... except for a possible carry at bit 48 of t4 (i.e. bit 256 of the field element) */
158  VERIFY_CHECK(t4 >> 49 == 0);
159 
160  /* At most a single final reduction is needed; check if the value is >= the field characteristic */
161  x = (t4 >> 48) | ((t4 == 0x0FFFFFFFFFFFFULL) & (m == 0xFFFFFFFFFFFFFULL)
162  & (t0 >= 0xFFFFEFFFFFC2FULL));
163 
164  if (x) {
165  t0 += 0x1000003D1ULL;
166  t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL;
167  t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL;
168  t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL;
169  t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL;
170 
171  /* If t4 didn't carry to bit 48 already, then it should have after any final reduction */
172  VERIFY_CHECK(t4 >> 48 == x);
173 
174  /* Mask off the possible multiple of 2^256 from the final reduction */
175  t4 &= 0x0FFFFFFFFFFFFULL;
176  }
177 
178  r->n[0] = t0; r->n[1] = t1; r->n[2] = t2; r->n[3] = t3; r->n[4] = t4;
179 
180 #ifdef VERIFY
181  r->magnitude = 1;
182  r->normalized = 1;
183  secp256k1_fe_verify(r);
184 #endif
185 }
186 
188  uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4];
189 
190  /* z0 tracks a possible raw value of 0, z1 tracks a possible raw value of P */
191  uint64_t z0, z1;
192 
193  /* Reduce t4 at the start so there will be at most a single carry from the first pass */
194  uint64_t x = t4 >> 48; t4 &= 0x0FFFFFFFFFFFFULL;
195 
196  /* The first pass ensures the magnitude is 1, ... */
197  t0 += x * 0x1000003D1ULL;
198  t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL; z0 = t0; z1 = t0 ^ 0x1000003D0ULL;
199  t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL; z0 |= t1; z1 &= t1;
200  t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL; z0 |= t2; z1 &= t2;
201  t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL; z0 |= t3; z1 &= t3;
202  z0 |= t4; z1 &= t4 ^ 0xF000000000000ULL;
203 
204  /* ... except for a possible carry at bit 48 of t4 (i.e. bit 256 of the field element) */
205  VERIFY_CHECK(t4 >> 49 == 0);
206 
207  return (z0 == 0) | (z1 == 0xFFFFFFFFFFFFFULL);
208 }
209 
211  uint64_t t0, t1, t2, t3, t4;
212  uint64_t z0, z1;
213  uint64_t x;
214 
215  t0 = r->n[0];
216  t4 = r->n[4];
217 
218  /* Reduce t4 at the start so there will be at most a single carry from the first pass */
219  x = t4 >> 48;
220 
221  /* The first pass ensures the magnitude is 1, ... */
222  t0 += x * 0x1000003D1ULL;
223 
224  /* z0 tracks a possible raw value of 0, z1 tracks a possible raw value of P */
225  z0 = t0 & 0xFFFFFFFFFFFFFULL;
226  z1 = z0 ^ 0x1000003D0ULL;
227 
228  /* Fast return path should catch the majority of cases */
229  if ((z0 != 0ULL) & (z1 != 0xFFFFFFFFFFFFFULL)) {
230  return 0;
231  }
232 
233  t1 = r->n[1];
234  t2 = r->n[2];
235  t3 = r->n[3];
236 
237  t4 &= 0x0FFFFFFFFFFFFULL;
238 
239  t1 += (t0 >> 52);
240  t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL; z0 |= t1; z1 &= t1;
241  t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL; z0 |= t2; z1 &= t2;
242  t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL; z0 |= t3; z1 &= t3;
243  z0 |= t4; z1 &= t4 ^ 0xF000000000000ULL;
244 
245  /* ... except for a possible carry at bit 48 of t4 (i.e. bit 256 of the field element) */
246  VERIFY_CHECK(t4 >> 49 == 0);
247 
248  return (z0 == 0) | (z1 == 0xFFFFFFFFFFFFFULL);
249 }
250 
252  VERIFY_CHECK(0 <= a && a <= 0x7FFF);
253  r->n[0] = a;
254  r->n[1] = r->n[2] = r->n[3] = r->n[4] = 0;
255 #ifdef VERIFY
256  r->magnitude = (a != 0);
257  r->normalized = 1;
258  secp256k1_fe_verify(r);
259 #endif
260 }
261 
263  const uint64_t *t = a->n;
264 #ifdef VERIFY
265  VERIFY_CHECK(a->normalized);
266  secp256k1_fe_verify(a);
267 #endif
268  return (t[0] | t[1] | t[2] | t[3] | t[4]) == 0;
269 }
270 
272 #ifdef VERIFY
273  VERIFY_CHECK(a->normalized);
274  secp256k1_fe_verify(a);
275 #endif
276  return a->n[0] & 1;
277 }
278 
280  int i;
281 #ifdef VERIFY
282  a->magnitude = 0;
283  a->normalized = 1;
284 #endif
285  for (i=0; i<5; i++) {
286  a->n[i] = 0;
287  }
288 }
289 
290 static int secp256k1_fe_cmp_var(const secp256k1_fe *a, const secp256k1_fe *b) {
291  int i;
292 #ifdef VERIFY
293  VERIFY_CHECK(a->normalized);
294  VERIFY_CHECK(b->normalized);
295  secp256k1_fe_verify(a);
296  secp256k1_fe_verify(b);
297 #endif
298  for (i = 4; i >= 0; i--) {
299  if (a->n[i] > b->n[i]) {
300  return 1;
301  }
302  if (a->n[i] < b->n[i]) {
303  return -1;
304  }
305  }
306  return 0;
307 }
308 
309 static int secp256k1_fe_set_b32(secp256k1_fe *r, const unsigned char *a) {
310  int ret;
311  r->n[0] = (uint64_t)a[31]
312  | ((uint64_t)a[30] << 8)
313  | ((uint64_t)a[29] << 16)
314  | ((uint64_t)a[28] << 24)
315  | ((uint64_t)a[27] << 32)
316  | ((uint64_t)a[26] << 40)
317  | ((uint64_t)(a[25] & 0xF) << 48);
318  r->n[1] = (uint64_t)((a[25] >> 4) & 0xF)
319  | ((uint64_t)a[24] << 4)
320  | ((uint64_t)a[23] << 12)
321  | ((uint64_t)a[22] << 20)
322  | ((uint64_t)a[21] << 28)
323  | ((uint64_t)a[20] << 36)
324  | ((uint64_t)a[19] << 44);
325  r->n[2] = (uint64_t)a[18]
326  | ((uint64_t)a[17] << 8)
327  | ((uint64_t)a[16] << 16)
328  | ((uint64_t)a[15] << 24)
329  | ((uint64_t)a[14] << 32)
330  | ((uint64_t)a[13] << 40)
331  | ((uint64_t)(a[12] & 0xF) << 48);
332  r->n[3] = (uint64_t)((a[12] >> 4) & 0xF)
333  | ((uint64_t)a[11] << 4)
334  | ((uint64_t)a[10] << 12)
335  | ((uint64_t)a[9] << 20)
336  | ((uint64_t)a[8] << 28)
337  | ((uint64_t)a[7] << 36)
338  | ((uint64_t)a[6] << 44);
339  r->n[4] = (uint64_t)a[5]
340  | ((uint64_t)a[4] << 8)
341  | ((uint64_t)a[3] << 16)
342  | ((uint64_t)a[2] << 24)
343  | ((uint64_t)a[1] << 32)
344  | ((uint64_t)a[0] << 40);
345  ret = !((r->n[4] == 0x0FFFFFFFFFFFFULL) & ((r->n[3] & r->n[2] & r->n[1]) == 0xFFFFFFFFFFFFFULL) & (r->n[0] >= 0xFFFFEFFFFFC2FULL));
346 #ifdef VERIFY
347  r->magnitude = 1;
348  if (ret) {
349  r->normalized = 1;
350  secp256k1_fe_verify(r);
351  } else {
352  r->normalized = 0;
353  }
354 #endif
355  return ret;
356 }
357 
359 static void secp256k1_fe_get_b32(unsigned char *r, const secp256k1_fe *a) {
360 #ifdef VERIFY
361  VERIFY_CHECK(a->normalized);
362  secp256k1_fe_verify(a);
363 #endif
364  r[0] = (a->n[4] >> 40) & 0xFF;
365  r[1] = (a->n[4] >> 32) & 0xFF;
366  r[2] = (a->n[4] >> 24) & 0xFF;
367  r[3] = (a->n[4] >> 16) & 0xFF;
368  r[4] = (a->n[4] >> 8) & 0xFF;
369  r[5] = a->n[4] & 0xFF;
370  r[6] = (a->n[3] >> 44) & 0xFF;
371  r[7] = (a->n[3] >> 36) & 0xFF;
372  r[8] = (a->n[3] >> 28) & 0xFF;
373  r[9] = (a->n[3] >> 20) & 0xFF;
374  r[10] = (a->n[3] >> 12) & 0xFF;
375  r[11] = (a->n[3] >> 4) & 0xFF;
376  r[12] = ((a->n[2] >> 48) & 0xF) | ((a->n[3] & 0xF) << 4);
377  r[13] = (a->n[2] >> 40) & 0xFF;
378  r[14] = (a->n[2] >> 32) & 0xFF;
379  r[15] = (a->n[2] >> 24) & 0xFF;
380  r[16] = (a->n[2] >> 16) & 0xFF;
381  r[17] = (a->n[2] >> 8) & 0xFF;
382  r[18] = a->n[2] & 0xFF;
383  r[19] = (a->n[1] >> 44) & 0xFF;
384  r[20] = (a->n[1] >> 36) & 0xFF;
385  r[21] = (a->n[1] >> 28) & 0xFF;
386  r[22] = (a->n[1] >> 20) & 0xFF;
387  r[23] = (a->n[1] >> 12) & 0xFF;
388  r[24] = (a->n[1] >> 4) & 0xFF;
389  r[25] = ((a->n[0] >> 48) & 0xF) | ((a->n[1] & 0xF) << 4);
390  r[26] = (a->n[0] >> 40) & 0xFF;
391  r[27] = (a->n[0] >> 32) & 0xFF;
392  r[28] = (a->n[0] >> 24) & 0xFF;
393  r[29] = (a->n[0] >> 16) & 0xFF;
394  r[30] = (a->n[0] >> 8) & 0xFF;
395  r[31] = a->n[0] & 0xFF;
396 }
397 
399 #ifdef VERIFY
400  VERIFY_CHECK(a->magnitude <= m);
401  secp256k1_fe_verify(a);
402  VERIFY_CHECK(0xFFFFEFFFFFC2FULL * 2 * (m + 1) >= 0xFFFFFFFFFFFFFULL * 2 * m);
403  VERIFY_CHECK(0xFFFFFFFFFFFFFULL * 2 * (m + 1) >= 0xFFFFFFFFFFFFFULL * 2 * m);
404  VERIFY_CHECK(0x0FFFFFFFFFFFFULL * 2 * (m + 1) >= 0x0FFFFFFFFFFFFULL * 2 * m);
405 #endif
406  r->n[0] = 0xFFFFEFFFFFC2FULL * 2 * (m + 1) - a->n[0];
407  r->n[1] = 0xFFFFFFFFFFFFFULL * 2 * (m + 1) - a->n[1];
408  r->n[2] = 0xFFFFFFFFFFFFFULL * 2 * (m + 1) - a->n[2];
409  r->n[3] = 0xFFFFFFFFFFFFFULL * 2 * (m + 1) - a->n[3];
410  r->n[4] = 0x0FFFFFFFFFFFFULL * 2 * (m + 1) - a->n[4];
411 #ifdef VERIFY
412  r->magnitude = m + 1;
413  r->normalized = 0;
414  secp256k1_fe_verify(r);
415 #endif
416 }
417 
419  r->n[0] *= a;
420  r->n[1] *= a;
421  r->n[2] *= a;
422  r->n[3] *= a;
423  r->n[4] *= a;
424 #ifdef VERIFY
425  r->magnitude *= a;
426  r->normalized = 0;
427  secp256k1_fe_verify(r);
428 #endif
429 }
430 
432 #ifdef VERIFY
433  secp256k1_fe_verify(a);
434 #endif
435  r->n[0] += a->n[0];
436  r->n[1] += a->n[1];
437  r->n[2] += a->n[2];
438  r->n[3] += a->n[3];
439  r->n[4] += a->n[4];
440 #ifdef VERIFY
441  r->magnitude += a->magnitude;
442  r->normalized = 0;
443  secp256k1_fe_verify(r);
444 #endif
445 }
446 
448 #ifdef VERIFY
449  VERIFY_CHECK(a->magnitude <= 8);
450  VERIFY_CHECK(b->magnitude <= 8);
451  secp256k1_fe_verify(a);
452  secp256k1_fe_verify(b);
453  VERIFY_CHECK(r != b);
454  VERIFY_CHECK(a != b);
455 #endif
456  secp256k1_fe_mul_inner(r->n, a->n, b->n);
457 #ifdef VERIFY
458  r->magnitude = 1;
459  r->normalized = 0;
460  secp256k1_fe_verify(r);
461 #endif
462 }
463 
464 static void secp256k1_fe_sqr(secp256k1_fe *r, const secp256k1_fe *a) {
465 #ifdef VERIFY
466  VERIFY_CHECK(a->magnitude <= 8);
467  secp256k1_fe_verify(a);
468 #endif
469  secp256k1_fe_sqr_inner(r->n, a->n);
470 #ifdef VERIFY
471  r->magnitude = 1;
472  r->normalized = 0;
473  secp256k1_fe_verify(r);
474 #endif
475 }
476 
477 static SECP256K1_INLINE void secp256k1_fe_cmov(secp256k1_fe *r, const secp256k1_fe *a, int flag) {
478  uint64_t mask0, mask1;
479  VG_CHECK_VERIFY(r->n, sizeof(r->n));
480  mask0 = flag + ~((uint64_t)0);
481  mask1 = ~mask0;
482  r->n[0] = (r->n[0] & mask0) | (a->n[0] & mask1);
483  r->n[1] = (r->n[1] & mask0) | (a->n[1] & mask1);
484  r->n[2] = (r->n[2] & mask0) | (a->n[2] & mask1);
485  r->n[3] = (r->n[3] & mask0) | (a->n[3] & mask1);
486  r->n[4] = (r->n[4] & mask0) | (a->n[4] & mask1);
487 #ifdef VERIFY
488  if (flag) {
489  r->magnitude = a->magnitude;
490  r->normalized = a->normalized;
491  }
492 #endif
493 }
494 
496  uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4];
497  uint64_t one = (uint64_t)1;
498  uint64_t mask = -(t0 & one) >> 12;
499 
500 #ifdef VERIFY
501  secp256k1_fe_verify(r);
502  VERIFY_CHECK(r->magnitude < 32);
503 #endif
504 
505  /* Bounds analysis (over the rationals).
506  *
507  * Let m = r->magnitude
508  * C = 0xFFFFFFFFFFFFFULL * 2
509  * D = 0x0FFFFFFFFFFFFULL * 2
510  *
511  * Initial bounds: t0..t3 <= C * m
512  * t4 <= D * m
513  */
514 
515  t0 += 0xFFFFEFFFFFC2FULL & mask;
516  t1 += mask;
517  t2 += mask;
518  t3 += mask;
519  t4 += mask >> 4;
520 
521  VERIFY_CHECK((t0 & one) == 0);
522 
523  /* t0..t3: added <= C/2
524  * t4: added <= D/2
525  *
526  * Current bounds: t0..t3 <= C * (m + 1/2)
527  * t4 <= D * (m + 1/2)
528  */
529 
530  r->n[0] = (t0 >> 1) + ((t1 & one) << 51);
531  r->n[1] = (t1 >> 1) + ((t2 & one) << 51);
532  r->n[2] = (t2 >> 1) + ((t3 & one) << 51);
533  r->n[3] = (t3 >> 1) + ((t4 & one) << 51);
534  r->n[4] = (t4 >> 1);
535 
536  /* t0..t3: shifted right and added <= C/4 + 1/2
537  * t4: shifted right
538  *
539  * Current bounds: t0..t3 <= C * (m/2 + 1/2)
540  * t4 <= D * (m/2 + 1/4)
541  */
542 
543 #ifdef VERIFY
544  /* Therefore the output magnitude (M) has to be set such that:
545  * t0..t3: C * M >= C * (m/2 + 1/2)
546  * t4: D * M >= D * (m/2 + 1/4)
547  *
548  * It suffices for all limbs that, for any input magnitude m:
549  * M >= m/2 + 1/2
550  *
551  * and since we want the smallest such integer value for M:
552  * M == floor(m/2) + 1
553  */
554  r->magnitude = (r->magnitude >> 1) + 1;
555  r->normalized = 0;
556  secp256k1_fe_verify(r);
557 #endif
558 }
559 
561  uint64_t mask0, mask1;
562  VG_CHECK_VERIFY(r->n, sizeof(r->n));
563  mask0 = flag + ~((uint64_t)0);
564  mask1 = ~mask0;
565  r->n[0] = (r->n[0] & mask0) | (a->n[0] & mask1);
566  r->n[1] = (r->n[1] & mask0) | (a->n[1] & mask1);
567  r->n[2] = (r->n[2] & mask0) | (a->n[2] & mask1);
568  r->n[3] = (r->n[3] & mask0) | (a->n[3] & mask1);
569 }
570 
572 #ifdef VERIFY
573  VERIFY_CHECK(a->normalized);
574 #endif
575  r->n[0] = a->n[0] | a->n[1] << 52;
576  r->n[1] = a->n[1] >> 12 | a->n[2] << 40;
577  r->n[2] = a->n[2] >> 24 | a->n[3] << 28;
578  r->n[3] = a->n[3] >> 36 | a->n[4] << 16;
579 }
580 
582  r->n[0] = a->n[0] & 0xFFFFFFFFFFFFFULL;
583  r->n[1] = a->n[0] >> 52 | ((a->n[1] << 12) & 0xFFFFFFFFFFFFFULL);
584  r->n[2] = a->n[1] >> 40 | ((a->n[2] << 24) & 0xFFFFFFFFFFFFFULL);
585  r->n[3] = a->n[2] >> 28 | ((a->n[3] << 36) & 0xFFFFFFFFFFFFFULL);
586  r->n[4] = a->n[3] >> 16;
587 #ifdef VERIFY
588  r->magnitude = 1;
589  r->normalized = 1;
590  secp256k1_fe_verify(r);
591 #endif
592 }
593 
595  const uint64_t M52 = UINT64_MAX >> 12;
596  const uint64_t a0 = a->v[0], a1 = a->v[1], a2 = a->v[2], a3 = a->v[3], a4 = a->v[4];
597 
598  /* The output from secp256k1_modinv64{_var} should be normalized to range [0,modulus), and
599  * have limbs in [0,2^62). The modulus is < 2^256, so the top limb must be below 2^(256-62*4).
600  */
601  VERIFY_CHECK(a0 >> 62 == 0);
602  VERIFY_CHECK(a1 >> 62 == 0);
603  VERIFY_CHECK(a2 >> 62 == 0);
604  VERIFY_CHECK(a3 >> 62 == 0);
605  VERIFY_CHECK(a4 >> 8 == 0);
606 
607  r->n[0] = a0 & M52;
608  r->n[1] = (a0 >> 52 | a1 << 10) & M52;
609  r->n[2] = (a1 >> 42 | a2 << 20) & M52;
610  r->n[3] = (a2 >> 32 | a3 << 30) & M52;
611  r->n[4] = (a3 >> 22 | a4 << 40);
612 
613 #ifdef VERIFY
614  r->magnitude = 1;
615  r->normalized = 1;
616  secp256k1_fe_verify(r);
617 #endif
618 }
619 
621  const uint64_t M62 = UINT64_MAX >> 2;
622  const uint64_t a0 = a->n[0], a1 = a->n[1], a2 = a->n[2], a3 = a->n[3], a4 = a->n[4];
623 
624 #ifdef VERIFY
625  VERIFY_CHECK(a->normalized);
626 #endif
627 
628  r->v[0] = (a0 | a1 << 52) & M62;
629  r->v[1] = (a1 >> 10 | a2 << 42) & M62;
630  r->v[2] = (a2 >> 20 | a3 << 32) & M62;
631  r->v[3] = (a3 >> 30 | a4 << 22) & M62;
632  r->v[4] = a4 >> 40;
633 }
634 
636  {{-0x1000003D1LL, 0, 0, 0, 256}},
637  0x27C7F6E22DDACACFLL
638 };
639 
640 static void secp256k1_fe_inv(secp256k1_fe *r, const secp256k1_fe *x) {
641  secp256k1_fe tmp;
643 
644  tmp = *x;
646  secp256k1_fe_to_signed62(&s, &tmp);
649 
650 #ifdef VERIFY
652 #endif
653 }
654 
655 static void secp256k1_fe_inv_var(secp256k1_fe *r, const secp256k1_fe *x) {
656  secp256k1_fe tmp;
658 
659  tmp = *x;
661  secp256k1_fe_to_signed62(&s, &tmp);
664 
665 #ifdef VERIFY
667 #endif
668 }
669 
670 #endif /* SECP256K1_FIELD_REPR_IMPL_H */
int ret
static SECP256K1_INLINE void secp256k1_fe_sqr_inner(uint32_t *r, const uint32_t *a)
static SECP256K1_INLINE void secp256k1_fe_mul_inner(uint32_t *r, const uint32_t *a, const uint32_t *SECP256K1_RESTRICT b)
static int secp256k1_fe_normalizes_to_zero_var(const secp256k1_fe *r)
static SECP256K1_INLINE void secp256k1_fe_set_int(secp256k1_fe *r, int a)
static void secp256k1_fe_normalize_weak(secp256k1_fe *r)
static SECP256K1_INLINE int secp256k1_fe_is_zero(const secp256k1_fe *a)
static void secp256k1_fe_normalize_var(secp256k1_fe *r)
static SECP256K1_INLINE void secp256k1_fe_mul_int(secp256k1_fe *r, int a)
static void secp256k1_fe_get_bounds(secp256k1_fe *r, int m)
Implements arithmetic modulo FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFE FFFFFC2F,...
static void secp256k1_fe_mul(secp256k1_fe *r, const secp256k1_fe *a, const secp256k1_fe *SECP256K1_RESTRICT b)
static int secp256k1_fe_set_b32(secp256k1_fe *r, const unsigned char *a)
static SECP256K1_INLINE void secp256k1_fe_storage_cmov(secp256k1_fe_storage *r, const secp256k1_fe_storage *a, int flag)
static void secp256k1_fe_sqr(secp256k1_fe *r, const secp256k1_fe *a)
static SECP256K1_INLINE void secp256k1_fe_cmov(secp256k1_fe *r, const secp256k1_fe *a, int flag)
static int secp256k1_fe_normalizes_to_zero(const secp256k1_fe *r)
static SECP256K1_INLINE void secp256k1_fe_negate(secp256k1_fe *r, const secp256k1_fe *a, int m)
static void secp256k1_fe_to_signed62(secp256k1_modinv64_signed62 *r, const secp256k1_fe *a)
static void secp256k1_fe_normalize(secp256k1_fe *r)
static SECP256K1_INLINE int secp256k1_fe_is_odd(const secp256k1_fe *a)
static SECP256K1_INLINE void secp256k1_fe_clear(secp256k1_fe *a)
static void secp256k1_fe_to_storage(secp256k1_fe_storage *r, const secp256k1_fe *a)
static SECP256K1_INLINE void secp256k1_fe_from_storage(secp256k1_fe *r, const secp256k1_fe_storage *a)
static void secp256k1_fe_get_b32(unsigned char *r, const secp256k1_fe *a)
Convert a field element to a 32-byte big endian value.
static SECP256K1_INLINE void secp256k1_fe_half(secp256k1_fe *r)
static SECP256K1_INLINE void secp256k1_fe_add(secp256k1_fe *r, const secp256k1_fe *a)
static void secp256k1_fe_from_signed62(secp256k1_fe *r, const secp256k1_modinv64_signed62 *a)
static const secp256k1_modinv64_modinfo secp256k1_const_modinfo_fe
static int secp256k1_fe_cmp_var(const secp256k1_fe *a, const secp256k1_fe *b)
static void secp256k1_fe_inv(secp256k1_fe *r, const secp256k1_fe *x)
static void secp256k1_fe_inv_var(secp256k1_fe *r, const secp256k1_fe *x)
static void secp256k1_modinv64(secp256k1_modinv64_signed62 *x, const secp256k1_modinv64_modinfo *modinfo)
static void secp256k1_modinv64_var(secp256k1_modinv64_signed62 *x, const secp256k1_modinv64_modinfo *modinfo)
#define VG_CHECK_VERIFY(x, y)
Definition: util.h:120
#define VERIFY_CHECK(cond)
Definition: util.h:100
#define SECP256K1_RESTRICT
Definition: util.h:160
#define SECP256K1_INLINE
Definition: secp256k1.h:131
uint32_t n[10]
Definition: field_10x26.h:16