Bitcoin Core  22.99.0
P2P Digital Currency
ecmult_impl.h
Go to the documentation of this file.
1 /******************************************************************************
2  * Copyright (c) 2013, 2014, 2017 Pieter Wuille, Andrew Poelstra, Jonas Nick *
3  * Distributed under the MIT software license, see the accompanying *
4  * file COPYING or https://www.opensource.org/licenses/mit-license.php. *
5  ******************************************************************************/
6 
7 #ifndef SECP256K1_ECMULT_IMPL_H
8 #define SECP256K1_ECMULT_IMPL_H
9 
10 #include <string.h>
11 #include <stdint.h>
12 
13 #include "util.h"
14 #include "group.h"
15 #include "scalar.h"
16 #include "ecmult.h"
17 #include "ecmult_static_pre_g.h"
18 
19 #if defined(EXHAUSTIVE_TEST_ORDER)
20 /* We need to lower these values for exhaustive tests because
21  * the tables cannot have infinities in them (this breaks the
22  * affine-isomorphism stuff which tracks z-ratios) */
23 # if EXHAUSTIVE_TEST_ORDER > 128
24 # define WINDOW_A 5
25 # elif EXHAUSTIVE_TEST_ORDER > 8
26 # define WINDOW_A 4
27 # else
28 # define WINDOW_A 2
29 # endif
30 #else
31 /* optimal for 128-bit and 256-bit exponents. */
32 # define WINDOW_A 5
33 
42 #endif
43 
44 #define WNAF_BITS 128
45 #define WNAF_SIZE_BITS(bits, w) (((bits) + (w) - 1) / (w))
46 #define WNAF_SIZE(w) WNAF_SIZE_BITS(WNAF_BITS, w)
47 
48 /* The number of objects allocated on the scratch space for ecmult_multi algorithms */
49 #define PIPPENGER_SCRATCH_OBJECTS 6
50 #define STRAUSS_SCRATCH_OBJECTS 7
51 
52 #define PIPPENGER_MAX_BUCKET_WINDOW 12
53 
54 /* Minimum number of points for which pippenger_wnaf is faster than strauss wnaf */
55 #define ECMULT_PIPPENGER_THRESHOLD 88
56 
57 #define ECMULT_MAX_POINTS_PER_BATCH 5000000
58 
65  secp256k1_gej d;
66  secp256k1_ge a_ge, d_ge;
67  int i;
68 
70 
71  secp256k1_gej_double_var(&d, a, NULL);
72 
73  /*
74  * Perform the additions on an isomorphism where 'd' is affine: drop the z coordinate
75  * of 'd', and scale the 1P starting value's x/y coordinates without changing its z.
76  */
77  d_ge.x = d.x;
78  d_ge.y = d.y;
79  d_ge.infinity = 0;
80 
81  secp256k1_ge_set_gej_zinv(&a_ge, a, &d.z);
82  prej[0].x = a_ge.x;
83  prej[0].y = a_ge.y;
84  prej[0].z = a->z;
85  prej[0].infinity = 0;
86 
87  zr[0] = d.z;
88  for (i = 1; i < n; i++) {
89  secp256k1_gej_add_ge_var(&prej[i], &prej[i-1], &d_ge, &zr[i]);
90  }
91 
92  /*
93  * Each point in 'prej' has a z coordinate too small by a factor of 'd.z'. Only
94  * the final point's z coordinate is actually used though, so just update that.
95  */
96  secp256k1_fe_mul(&prej[n-1].z, &prej[n-1].z, &d.z);
97 }
98 
111 
112  /* Compute the odd multiples in Jacobian form. */
114  /* Bring them to the same Z denominator. */
116 }
117 
120 #define ECMULT_TABLE_GET_GE(r,pre,n,w) do { \
121  VERIFY_CHECK(((n) & 1) == 1); \
122  VERIFY_CHECK((n) >= -((1 << ((w)-1)) - 1)); \
123  VERIFY_CHECK((n) <= ((1 << ((w)-1)) - 1)); \
124  if ((n) > 0) { \
125  *(r) = (pre)[((n)-1)/2]; \
126  } else { \
127  *(r) = (pre)[(-(n)-1)/2]; \
128  secp256k1_fe_negate(&((r)->y), &((r)->y), 1); \
129  } \
130 } while(0)
131 
132 #define ECMULT_TABLE_GET_GE_STORAGE(r,pre,n,w) do { \
133  VERIFY_CHECK(((n) & 1) == 1); \
134  VERIFY_CHECK((n) >= -((1 << ((w)-1)) - 1)); \
135  VERIFY_CHECK((n) <= ((1 << ((w)-1)) - 1)); \
136  if ((n) > 0) { \
137  secp256k1_ge_from_storage((r), &(pre)[((n)-1)/2]); \
138  } else { \
139  secp256k1_ge_from_storage((r), &(pre)[(-(n)-1)/2]); \
140  secp256k1_fe_negate(&((r)->y), &((r)->y), 1); \
141  } \
142 } while(0)
143 
151 static int secp256k1_ecmult_wnaf(int *wnaf, int len, const secp256k1_scalar *a, int w) {
153  int last_set_bit = -1;
154  int bit = 0;
155  int sign = 1;
156  int carry = 0;
157 
158  VERIFY_CHECK(wnaf != NULL);
159  VERIFY_CHECK(0 <= len && len <= 256);
160  VERIFY_CHECK(a != NULL);
161  VERIFY_CHECK(2 <= w && w <= 31);
162 
163  memset(wnaf, 0, len * sizeof(wnaf[0]));
164 
165  s = *a;
166  if (secp256k1_scalar_get_bits(&s, 255, 1)) {
167  secp256k1_scalar_negate(&s, &s);
168  sign = -1;
169  }
170 
171  while (bit < len) {
172  int now;
173  int word;
174  if (secp256k1_scalar_get_bits(&s, bit, 1) == (unsigned int)carry) {
175  bit++;
176  continue;
177  }
178 
179  now = w;
180  if (now > len - bit) {
181  now = len - bit;
182  }
183 
184  word = secp256k1_scalar_get_bits_var(&s, bit, now) + carry;
185 
186  carry = (word >> (w-1)) & 1;
187  word -= carry << w;
188 
189  wnaf[bit] = sign * word;
190  last_set_bit = bit;
191 
192  bit += now;
193  }
194 #ifdef VERIFY
195  CHECK(carry == 0);
196  while (bit < 256) {
197  CHECK(secp256k1_scalar_get_bits(&s, bit++, 1) == 0);
198  }
199 #endif
200  return last_set_bit + 1;
201 }
202 
205  int wnaf_na_1[129];
206  int wnaf_na_lam[129];
209  size_t input_pos;
210 };
211 
218 };
219 
220 static void secp256k1_ecmult_strauss_wnaf(const struct secp256k1_strauss_state *state, secp256k1_gej *r, size_t num, const secp256k1_gej *a, const secp256k1_scalar *na, const secp256k1_scalar *ng) {
221  secp256k1_ge tmpa;
222  secp256k1_fe Z;
223  /* Split G factors. */
224  secp256k1_scalar ng_1, ng_128;
225  int wnaf_ng_1[129];
226  int bits_ng_1 = 0;
227  int wnaf_ng_128[129];
228  int bits_ng_128 = 0;
229  int i;
230  int bits = 0;
231  size_t np;
232  size_t no = 0;
233 
234  for (np = 0; np < num; ++np) {
235  if (secp256k1_scalar_is_zero(&na[np]) || secp256k1_gej_is_infinity(&a[np])) {
236  continue;
237  }
238  state->ps[no].input_pos = np;
239  /* split na into na_1 and na_lam (where na = na_1 + na_lam*lambda, and na_1 and na_lam are ~128 bit) */
240  secp256k1_scalar_split_lambda(&state->ps[no].na_1, &state->ps[no].na_lam, &na[np]);
241 
242  /* build wnaf representation for na_1 and na_lam. */
243  state->ps[no].bits_na_1 = secp256k1_ecmult_wnaf(state->ps[no].wnaf_na_1, 129, &state->ps[no].na_1, WINDOW_A);
244  state->ps[no].bits_na_lam = secp256k1_ecmult_wnaf(state->ps[no].wnaf_na_lam, 129, &state->ps[no].na_lam, WINDOW_A);
245  VERIFY_CHECK(state->ps[no].bits_na_1 <= 129);
246  VERIFY_CHECK(state->ps[no].bits_na_lam <= 129);
247  if (state->ps[no].bits_na_1 > bits) {
248  bits = state->ps[no].bits_na_1;
249  }
250  if (state->ps[no].bits_na_lam > bits) {
251  bits = state->ps[no].bits_na_lam;
252  }
253  ++no;
254  }
255 
256  /* Calculate odd multiples of a.
257  * All multiples are brought to the same Z 'denominator', which is stored
258  * in Z. Due to secp256k1' isomorphism we can do all operations pretending
259  * that the Z coordinate was 1, use affine addition formulae, and correct
260  * the Z coordinate of the result once at the end.
261  * The exception is the precomputed G table points, which are actually
262  * affine. Compared to the base used for other points, they have a Z ratio
263  * of 1/Z, so we can use secp256k1_gej_add_zinv_var, which uses the same
264  * isomorphism to efficiently add with a known Z inverse.
265  */
266  if (no > 0) {
267  /* Compute the odd multiples in Jacobian form. */
269  for (np = 1; np < no; ++np) {
270  secp256k1_gej tmp = a[state->ps[np].input_pos];
271 #ifdef VERIFY
273 #endif
274  secp256k1_gej_rescale(&tmp, &(state->prej[(np - 1) * ECMULT_TABLE_SIZE(WINDOW_A) + ECMULT_TABLE_SIZE(WINDOW_A) - 1].z));
276  secp256k1_fe_mul(state->zr + np * ECMULT_TABLE_SIZE(WINDOW_A), state->zr + np * ECMULT_TABLE_SIZE(WINDOW_A), &(a[state->ps[np].input_pos].z));
277  }
278  /* Bring them to the same Z denominator. */
279  secp256k1_ge_globalz_set_table_gej(ECMULT_TABLE_SIZE(WINDOW_A) * no, state->pre_a, &Z, state->prej, state->zr);
280  } else {
281  secp256k1_fe_set_int(&Z, 1);
282  }
283 
284  for (np = 0; np < no; ++np) {
285  for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) {
287  }
288  }
289 
290  if (ng) {
291  /* split ng into ng_1 and ng_128 (where gn = gn_1 + gn_128*2^128, and gn_1 and gn_128 are ~128 bit) */
292  secp256k1_scalar_split_128(&ng_1, &ng_128, ng);
293 
294  /* Build wnaf representation for ng_1 and ng_128 */
295  bits_ng_1 = secp256k1_ecmult_wnaf(wnaf_ng_1, 129, &ng_1, WINDOW_G);
296  bits_ng_128 = secp256k1_ecmult_wnaf(wnaf_ng_128, 129, &ng_128, WINDOW_G);
297  if (bits_ng_1 > bits) {
298  bits = bits_ng_1;
299  }
300  if (bits_ng_128 > bits) {
301  bits = bits_ng_128;
302  }
303  }
304 
306 
307  for (i = bits - 1; i >= 0; i--) {
308  int n;
309  secp256k1_gej_double_var(r, r, NULL);
310  for (np = 0; np < no; ++np) {
311  if (i < state->ps[np].bits_na_1 && (n = state->ps[np].wnaf_na_1[i])) {
312  ECMULT_TABLE_GET_GE(&tmpa, state->pre_a + np * ECMULT_TABLE_SIZE(WINDOW_A), n, WINDOW_A);
313  secp256k1_gej_add_ge_var(r, r, &tmpa, NULL);
314  }
315  if (i < state->ps[np].bits_na_lam && (n = state->ps[np].wnaf_na_lam[i])) {
317  secp256k1_gej_add_ge_var(r, r, &tmpa, NULL);
318  }
319  }
320  if (i < bits_ng_1 && (n = wnaf_ng_1[i])) {
322  secp256k1_gej_add_zinv_var(r, r, &tmpa, &Z);
323  }
324  if (i < bits_ng_128 && (n = wnaf_ng_128[i])) {
326  secp256k1_gej_add_zinv_var(r, r, &tmpa, &Z);
327  }
328  }
329 
330  if (!r->infinity) {
331  secp256k1_fe_mul(&r->z, &r->z, &Z);
332  }
333 }
334 
335 static void secp256k1_ecmult(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_scalar *na, const secp256k1_scalar *ng) {
339  struct secp256k1_strauss_point_state ps[1];
341  struct secp256k1_strauss_state state;
342 
343  state.prej = prej;
344  state.zr = zr;
345  state.pre_a = pre_a;
346  state.pre_a_lam = pre_a_lam;
347  state.ps = ps;
348  secp256k1_ecmult_strauss_wnaf(&state, r, 1, a, na, ng);
349 }
350 
351 static size_t secp256k1_strauss_scratch_size(size_t n_points) {
352  static const size_t point_size = (2 * sizeof(secp256k1_ge) + sizeof(secp256k1_gej) + sizeof(secp256k1_fe)) * ECMULT_TABLE_SIZE(WINDOW_A) + sizeof(struct secp256k1_strauss_point_state) + sizeof(secp256k1_gej) + sizeof(secp256k1_scalar);
353  return n_points*point_size;
354 }
355 
356 static int secp256k1_ecmult_strauss_batch(const secp256k1_callback* error_callback, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) {
357  secp256k1_gej* points;
358  secp256k1_scalar* scalars;
359  struct secp256k1_strauss_state state;
360  size_t i;
361  const size_t scratch_checkpoint = secp256k1_scratch_checkpoint(error_callback, scratch);
362 
364  if (inp_g_sc == NULL && n_points == 0) {
365  return 1;
366  }
367 
368  /* We allocate STRAUSS_SCRATCH_OBJECTS objects on the scratch space. If these
369  * allocations change, make sure to update the STRAUSS_SCRATCH_OBJECTS
370  * constant and strauss_scratch_size accordingly. */
371  points = (secp256k1_gej*)secp256k1_scratch_alloc(error_callback, scratch, n_points * sizeof(secp256k1_gej));
372  scalars = (secp256k1_scalar*)secp256k1_scratch_alloc(error_callback, scratch, n_points * sizeof(secp256k1_scalar));
373  state.prej = (secp256k1_gej*)secp256k1_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_gej));
374  state.zr = (secp256k1_fe*)secp256k1_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_fe));
375  state.pre_a = (secp256k1_ge*)secp256k1_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_ge));
376  state.pre_a_lam = (secp256k1_ge*)secp256k1_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_ge));
377  state.ps = (struct secp256k1_strauss_point_state*)secp256k1_scratch_alloc(error_callback, scratch, n_points * sizeof(struct secp256k1_strauss_point_state));
378 
379  if (points == NULL || scalars == NULL || state.prej == NULL || state.zr == NULL || state.pre_a == NULL || state.pre_a_lam == NULL || state.ps == NULL) {
380  secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
381  return 0;
382  }
383 
384  for (i = 0; i < n_points; i++) {
385  secp256k1_ge point;
386  if (!cb(&scalars[i], &point, i+cb_offset, cbdata)) {
387  secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
388  return 0;
389  }
390  secp256k1_gej_set_ge(&points[i], &point);
391  }
392  secp256k1_ecmult_strauss_wnaf(&state, r, n_points, points, scalars, inp_g_sc);
393  secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
394  return 1;
395 }
396 
397 /* Wrapper for secp256k1_ecmult_multi_func interface */
398 static int secp256k1_ecmult_strauss_batch_single(const secp256k1_callback* error_callback, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n) {
399  return secp256k1_ecmult_strauss_batch(error_callback, scratch, r, inp_g_sc, cb, cbdata, n, 0);
400 }
401 
402 static size_t secp256k1_strauss_max_points(const secp256k1_callback* error_callback, secp256k1_scratch *scratch) {
404 }
405 
413 static int secp256k1_wnaf_fixed(int *wnaf, const secp256k1_scalar *s, int w) {
414  int skew = 0;
415  int pos;
416  int max_pos;
417  int last_w;
418  const secp256k1_scalar *work = s;
419 
420  if (secp256k1_scalar_is_zero(s)) {
421  for (pos = 0; pos < WNAF_SIZE(w); pos++) {
422  wnaf[pos] = 0;
423  }
424  return 0;
425  }
426 
427  if (secp256k1_scalar_is_even(s)) {
428  skew = 1;
429  }
430 
431  wnaf[0] = secp256k1_scalar_get_bits_var(work, 0, w) + skew;
432  /* Compute last window size. Relevant when window size doesn't divide the
433  * number of bits in the scalar */
434  last_w = WNAF_BITS - (WNAF_SIZE(w) - 1) * w;
435 
436  /* Store the position of the first nonzero word in max_pos to allow
437  * skipping leading zeros when calculating the wnaf. */
438  for (pos = WNAF_SIZE(w) - 1; pos > 0; pos--) {
439  int val = secp256k1_scalar_get_bits_var(work, pos * w, pos == WNAF_SIZE(w)-1 ? last_w : w);
440  if(val != 0) {
441  break;
442  }
443  wnaf[pos] = 0;
444  }
445  max_pos = pos;
446  pos = 1;
447 
448  while (pos <= max_pos) {
449  int val = secp256k1_scalar_get_bits_var(work, pos * w, pos == WNAF_SIZE(w)-1 ? last_w : w);
450  if ((val & 1) == 0) {
451  wnaf[pos - 1] -= (1 << w);
452  wnaf[pos] = (val + 1);
453  } else {
454  wnaf[pos] = val;
455  }
456  /* Set a coefficient to zero if it is 1 or -1 and the proceeding digit
457  * is strictly negative or strictly positive respectively. Only change
458  * coefficients at previous positions because above code assumes that
459  * wnaf[pos - 1] is odd.
460  */
461  if (pos >= 2 && ((wnaf[pos - 1] == 1 && wnaf[pos - 2] < 0) || (wnaf[pos - 1] == -1 && wnaf[pos - 2] > 0))) {
462  if (wnaf[pos - 1] == 1) {
463  wnaf[pos - 2] += 1 << w;
464  } else {
465  wnaf[pos - 2] -= 1 << w;
466  }
467  wnaf[pos - 1] = 0;
468  }
469  ++pos;
470  }
471 
472  return skew;
473 }
474 
476  int skew_na;
477  size_t input_pos;
478 };
479 
481  int *wnaf_na;
483 };
484 
485 /*
486  * pippenger_wnaf computes the result of a multi-point multiplication as
487  * follows: The scalars are brought into wnaf with n_wnaf elements each. Then
488  * for every i < n_wnaf, first each point is added to a "bucket" corresponding
489  * to the point's wnaf[i]. Second, the buckets are added together such that
490  * r += 1*bucket[0] + 3*bucket[1] + 5*bucket[2] + ...
491  */
492 static int secp256k1_ecmult_pippenger_wnaf(secp256k1_gej *buckets, int bucket_window, struct secp256k1_pippenger_state *state, secp256k1_gej *r, const secp256k1_scalar *sc, const secp256k1_ge *pt, size_t num) {
493  size_t n_wnaf = WNAF_SIZE(bucket_window+1);
494  size_t np;
495  size_t no = 0;
496  int i;
497  int j;
498 
499  for (np = 0; np < num; ++np) {
500  if (secp256k1_scalar_is_zero(&sc[np]) || secp256k1_ge_is_infinity(&pt[np])) {
501  continue;
502  }
503  state->ps[no].input_pos = np;
504  state->ps[no].skew_na = secp256k1_wnaf_fixed(&state->wnaf_na[no*n_wnaf], &sc[np], bucket_window+1);
505  no++;
506  }
508 
509  if (no == 0) {
510  return 1;
511  }
512 
513  for (i = n_wnaf - 1; i >= 0; i--) {
514  secp256k1_gej running_sum;
515 
516  for(j = 0; j < ECMULT_TABLE_SIZE(bucket_window+2); j++) {
517  secp256k1_gej_set_infinity(&buckets[j]);
518  }
519 
520  for (np = 0; np < no; ++np) {
521  int n = state->wnaf_na[np*n_wnaf + i];
522  struct secp256k1_pippenger_point_state point_state = state->ps[np];
523  secp256k1_ge tmp;
524  int idx;
525 
526  if (i == 0) {
527  /* correct for wnaf skew */
528  int skew = point_state.skew_na;
529  if (skew) {
530  secp256k1_ge_neg(&tmp, &pt[point_state.input_pos]);
531  secp256k1_gej_add_ge_var(&buckets[0], &buckets[0], &tmp, NULL);
532  }
533  }
534  if (n > 0) {
535  idx = (n - 1)/2;
536  secp256k1_gej_add_ge_var(&buckets[idx], &buckets[idx], &pt[point_state.input_pos], NULL);
537  } else if (n < 0) {
538  idx = -(n + 1)/2;
539  secp256k1_ge_neg(&tmp, &pt[point_state.input_pos]);
540  secp256k1_gej_add_ge_var(&buckets[idx], &buckets[idx], &tmp, NULL);
541  }
542  }
543 
544  for(j = 0; j < bucket_window; j++) {
545  secp256k1_gej_double_var(r, r, NULL);
546  }
547 
548  secp256k1_gej_set_infinity(&running_sum);
549  /* Accumulate the sum: bucket[0] + 3*bucket[1] + 5*bucket[2] + 7*bucket[3] + ...
550  * = bucket[0] + bucket[1] + bucket[2] + bucket[3] + ...
551  * + 2 * (bucket[1] + 2*bucket[2] + 3*bucket[3] + ...)
552  * using an intermediate running sum:
553  * running_sum = bucket[0] + bucket[1] + bucket[2] + ...
554  *
555  * The doubling is done implicitly by deferring the final window doubling (of 'r').
556  */
557  for(j = ECMULT_TABLE_SIZE(bucket_window+2) - 1; j > 0; j--) {
558  secp256k1_gej_add_var(&running_sum, &running_sum, &buckets[j], NULL);
559  secp256k1_gej_add_var(r, r, &running_sum, NULL);
560  }
561 
562  secp256k1_gej_add_var(&running_sum, &running_sum, &buckets[0], NULL);
563  secp256k1_gej_double_var(r, r, NULL);
564  secp256k1_gej_add_var(r, r, &running_sum, NULL);
565  }
566  return 1;
567 }
568 
573 static int secp256k1_pippenger_bucket_window(size_t n) {
574  if (n <= 1) {
575  return 1;
576  } else if (n <= 4) {
577  return 2;
578  } else if (n <= 20) {
579  return 3;
580  } else if (n <= 57) {
581  return 4;
582  } else if (n <= 136) {
583  return 5;
584  } else if (n <= 235) {
585  return 6;
586  } else if (n <= 1260) {
587  return 7;
588  } else if (n <= 4420) {
589  return 9;
590  } else if (n <= 7880) {
591  return 10;
592  } else if (n <= 16050) {
593  return 11;
594  } else {
596  }
597 }
598 
602 static size_t secp256k1_pippenger_bucket_window_inv(int bucket_window) {
603  switch(bucket_window) {
604  case 1: return 1;
605  case 2: return 4;
606  case 3: return 20;
607  case 4: return 57;
608  case 5: return 136;
609  case 6: return 235;
610  case 7: return 1260;
611  case 8: return 1260;
612  case 9: return 4420;
613  case 10: return 7880;
614  case 11: return 16050;
615  case PIPPENGER_MAX_BUCKET_WINDOW: return SIZE_MAX;
616  }
617  return 0;
618 }
619 
620 
622  secp256k1_scalar tmp = *s1;
623  secp256k1_scalar_split_lambda(s1, s2, &tmp);
624  secp256k1_ge_mul_lambda(p2, p1);
625 
626  if (secp256k1_scalar_is_high(s1)) {
627  secp256k1_scalar_negate(s1, s1);
628  secp256k1_ge_neg(p1, p1);
629  }
630  if (secp256k1_scalar_is_high(s2)) {
631  secp256k1_scalar_negate(s2, s2);
632  secp256k1_ge_neg(p2, p2);
633  }
634 }
635 
640 static size_t secp256k1_pippenger_scratch_size(size_t n_points, int bucket_window) {
641  size_t entries = 2*n_points + 2;
642  size_t entry_size = sizeof(secp256k1_ge) + sizeof(secp256k1_scalar) + sizeof(struct secp256k1_pippenger_point_state) + (WNAF_SIZE(bucket_window+1)+1)*sizeof(int);
643  return (sizeof(secp256k1_gej) << bucket_window) + sizeof(struct secp256k1_pippenger_state) + entries * entry_size;
644 }
645 
646 static int secp256k1_ecmult_pippenger_batch(const secp256k1_callback* error_callback, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) {
647  const size_t scratch_checkpoint = secp256k1_scratch_checkpoint(error_callback, scratch);
648  /* Use 2(n+1) with the endomorphism, when calculating batch
649  * sizes. The reason for +1 is that we add the G scalar to the list of
650  * other scalars. */
651  size_t entries = 2*n_points + 2;
652  secp256k1_ge *points;
653  secp256k1_scalar *scalars;
654  secp256k1_gej *buckets;
655  struct secp256k1_pippenger_state *state_space;
656  size_t idx = 0;
657  size_t point_idx = 0;
658  int i, j;
659  int bucket_window;
660 
662  if (inp_g_sc == NULL && n_points == 0) {
663  return 1;
664  }
665  bucket_window = secp256k1_pippenger_bucket_window(n_points);
666 
667  /* We allocate PIPPENGER_SCRATCH_OBJECTS objects on the scratch space. If
668  * these allocations change, make sure to update the
669  * PIPPENGER_SCRATCH_OBJECTS constant and pippenger_scratch_size
670  * accordingly. */
671  points = (secp256k1_ge *) secp256k1_scratch_alloc(error_callback, scratch, entries * sizeof(*points));
672  scalars = (secp256k1_scalar *) secp256k1_scratch_alloc(error_callback, scratch, entries * sizeof(*scalars));
673  state_space = (struct secp256k1_pippenger_state *) secp256k1_scratch_alloc(error_callback, scratch, sizeof(*state_space));
674  if (points == NULL || scalars == NULL || state_space == NULL) {
675  secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
676  return 0;
677  }
678  state_space->ps = (struct secp256k1_pippenger_point_state *) secp256k1_scratch_alloc(error_callback, scratch, entries * sizeof(*state_space->ps));
679  state_space->wnaf_na = (int *) secp256k1_scratch_alloc(error_callback, scratch, entries*(WNAF_SIZE(bucket_window+1)) * sizeof(int));
680  buckets = (secp256k1_gej *) secp256k1_scratch_alloc(error_callback, scratch, (1<<bucket_window) * sizeof(*buckets));
681  if (state_space->ps == NULL || state_space->wnaf_na == NULL || buckets == NULL) {
682  secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
683  return 0;
684  }
685 
686  if (inp_g_sc != NULL) {
687  scalars[0] = *inp_g_sc;
688  points[0] = secp256k1_ge_const_g;
689  idx++;
690  secp256k1_ecmult_endo_split(&scalars[0], &scalars[1], &points[0], &points[1]);
691  idx++;
692  }
693 
694  while (point_idx < n_points) {
695  if (!cb(&scalars[idx], &points[idx], point_idx + cb_offset, cbdata)) {
696  secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
697  return 0;
698  }
699  idx++;
700  secp256k1_ecmult_endo_split(&scalars[idx - 1], &scalars[idx], &points[idx - 1], &points[idx]);
701  idx++;
702  point_idx++;
703  }
704 
705  secp256k1_ecmult_pippenger_wnaf(buckets, bucket_window, state_space, r, scalars, points, idx);
706 
707  /* Clear data */
708  for(i = 0; (size_t)i < idx; i++) {
709  secp256k1_scalar_clear(&scalars[i]);
710  state_space->ps[i].skew_na = 0;
711  for(j = 0; j < WNAF_SIZE(bucket_window+1); j++) {
712  state_space->wnaf_na[i * WNAF_SIZE(bucket_window+1) + j] = 0;
713  }
714  }
715  for(i = 0; i < 1<<bucket_window; i++) {
716  secp256k1_gej_clear(&buckets[i]);
717  }
718  secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint);
719  return 1;
720 }
721 
722 /* Wrapper for secp256k1_ecmult_multi_func interface */
723 static int secp256k1_ecmult_pippenger_batch_single(const secp256k1_callback* error_callback, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n) {
724  return secp256k1_ecmult_pippenger_batch(error_callback, scratch, r, inp_g_sc, cb, cbdata, n, 0);
725 }
726 
732 static size_t secp256k1_pippenger_max_points(const secp256k1_callback* error_callback, secp256k1_scratch *scratch) {
733  size_t max_alloc = secp256k1_scratch_max_allocation(error_callback, scratch, PIPPENGER_SCRATCH_OBJECTS);
734  int bucket_window;
735  size_t res = 0;
736 
737  for (bucket_window = 1; bucket_window <= PIPPENGER_MAX_BUCKET_WINDOW; bucket_window++) {
738  size_t n_points;
739  size_t max_points = secp256k1_pippenger_bucket_window_inv(bucket_window);
740  size_t space_for_points;
741  size_t space_overhead;
742  size_t entry_size = sizeof(secp256k1_ge) + sizeof(secp256k1_scalar) + sizeof(struct secp256k1_pippenger_point_state) + (WNAF_SIZE(bucket_window+1)+1)*sizeof(int);
743 
744  entry_size = 2*entry_size;
745  space_overhead = (sizeof(secp256k1_gej) << bucket_window) + entry_size + sizeof(struct secp256k1_pippenger_state);
746  if (space_overhead > max_alloc) {
747  break;
748  }
749  space_for_points = max_alloc - space_overhead;
750 
751  n_points = space_for_points/entry_size;
752  n_points = n_points > max_points ? max_points : n_points;
753  if (n_points > res) {
754  res = n_points;
755  }
756  if (n_points < max_points) {
757  /* A larger bucket_window may support even more points. But if we
758  * would choose that then the caller couldn't safely use any number
759  * smaller than what this function returns */
760  break;
761  }
762  }
763  return res;
764 }
765 
766 /* Computes ecmult_multi by simply multiplying and adding each point. Does not
767  * require a scratch space */
768 static int secp256k1_ecmult_multi_simple_var(secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n_points) {
769  size_t point_idx;
770  secp256k1_scalar szero;
771  secp256k1_gej tmpj;
772 
773  secp256k1_scalar_set_int(&szero, 0);
776  /* r = inp_g_sc*G */
777  secp256k1_ecmult(r, &tmpj, &szero, inp_g_sc);
778  for (point_idx = 0; point_idx < n_points; point_idx++) {
779  secp256k1_ge point;
780  secp256k1_gej pointj;
781  secp256k1_scalar scalar;
782  if (!cb(&scalar, &point, point_idx, cbdata)) {
783  return 0;
784  }
785  /* r += scalar*point */
786  secp256k1_gej_set_ge(&pointj, &point);
787  secp256k1_ecmult(&tmpj, &pointj, &scalar, NULL);
788  secp256k1_gej_add_var(r, r, &tmpj, NULL);
789  }
790  return 1;
791 }
792 
793 /* Compute the number of batches and the batch size given the maximum batch size and the
794  * total number of points */
795 static int secp256k1_ecmult_multi_batch_size_helper(size_t *n_batches, size_t *n_batch_points, size_t max_n_batch_points, size_t n) {
796  if (max_n_batch_points == 0) {
797  return 0;
798  }
799  if (max_n_batch_points > ECMULT_MAX_POINTS_PER_BATCH) {
800  max_n_batch_points = ECMULT_MAX_POINTS_PER_BATCH;
801  }
802  if (n == 0) {
803  *n_batches = 0;
804  *n_batch_points = 0;
805  return 1;
806  }
807  /* Compute ceil(n/max_n_batch_points) and ceil(n/n_batches) */
808  *n_batches = 1 + (n - 1) / max_n_batch_points;
809  *n_batch_points = 1 + (n - 1) / *n_batches;
810  return 1;
811 }
812 
814 static int secp256k1_ecmult_multi_var(const secp256k1_callback* error_callback, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n) {
815  size_t i;
816 
817  int (*f)(const secp256k1_callback* error_callback, secp256k1_scratch*, secp256k1_gej*, const secp256k1_scalar*, secp256k1_ecmult_multi_callback cb, void*, size_t, size_t);
818  size_t n_batches;
819  size_t n_batch_points;
820 
822  if (inp_g_sc == NULL && n == 0) {
823  return 1;
824  } else if (n == 0) {
825  secp256k1_scalar szero;
826  secp256k1_scalar_set_int(&szero, 0);
827  secp256k1_ecmult(r, r, &szero, inp_g_sc);
828  return 1;
829  }
830  if (scratch == NULL) {
831  return secp256k1_ecmult_multi_simple_var(r, inp_g_sc, cb, cbdata, n);
832  }
833 
834  /* Compute the batch sizes for Pippenger's algorithm given a scratch space. If it's greater than
835  * a threshold use Pippenger's algorithm. Otherwise use Strauss' algorithm.
836  * As a first step check if there's enough space for Pippenger's algo (which requires less space
837  * than Strauss' algo) and if not, use the simple algorithm. */
838  if (!secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, secp256k1_pippenger_max_points(error_callback, scratch), n)) {
839  return secp256k1_ecmult_multi_simple_var(r, inp_g_sc, cb, cbdata, n);
840  }
841  if (n_batch_points >= ECMULT_PIPPENGER_THRESHOLD) {
843  } else {
844  if (!secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, secp256k1_strauss_max_points(error_callback, scratch), n)) {
845  return secp256k1_ecmult_multi_simple_var(r, inp_g_sc, cb, cbdata, n);
846  }
848  }
849  for(i = 0; i < n_batches; i++) {
850  size_t nbp = n < n_batch_points ? n : n_batch_points;
851  size_t offset = n_batch_points*i;
852  secp256k1_gej tmp;
853  if (!f(error_callback, scratch, &tmp, i == 0 ? inp_g_sc : NULL, cb, cbdata, nbp, offset)) {
854  return 0;
855  }
856  secp256k1_gej_add_var(r, r, &tmp, NULL);
857  n -= nbp;
858  }
859  return 1;
860 }
861 
862 #endif /* SECP256K1_ECMULT_IMPL_H */
secp256k1_gej::infinity
int infinity
Definition: group.h:27
secp256k1_gej_set_infinity
static void secp256k1_gej_set_infinity(secp256k1_gej *r)
Set a group element (jacobian) equal to the point at infinity.
secp256k1_strauss_state::prej
secp256k1_gej * prej
Definition: ecmult_impl.h:213
secp256k1_scalar_negate
static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a)
Compute the complement of a scalar (modulo the group order).
secp256k1_strauss_state::zr
secp256k1_fe * zr
Definition: ecmult_impl.h:214
VERIFY_CHECK
#define VERIFY_CHECK(cond)
Definition: util.h:95
secp256k1_pippenger_max_points
static size_t secp256k1_pippenger_max_points(const secp256k1_callback *error_callback, secp256k1_scratch *scratch)
Returns the maximum number of points in addition to G that can be used with a given scratch space.
Definition: ecmult_impl.h:732
secp256k1_ge::y
secp256k1_fe y
Definition: group.h:19
secp256k1_strauss_state::ps
struct secp256k1_strauss_point_state * ps
Definition: ecmult_impl.h:217
secp256k1_scalar_split_lambda
static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k)
Find r1 and r2 such that r1+r2*lambda = k, where r1 and r2 or their negations are maximum 128 bits lo...
string.h
secp256k1_fe_normalize_var
static void secp256k1_fe_normalize_var(secp256k1_fe *r)
Normalize a field element, without constant-time guarantee.
secp256k1_scalar_is_high
static int secp256k1_scalar_is_high(const secp256k1_scalar *a)
Check whether a scalar is higher than the group order divided by 2.
secp256k1_gej::x
secp256k1_fe x
Definition: group.h:24
secp256k1_scratch_alloc
static void * secp256k1_scratch_alloc(const secp256k1_callback *error_callback, secp256k1_scratch *scratch, size_t n)
Returns a pointer into the most recently allocated frame, or NULL if there is insufficient available ...
group.h
secp256k1_ecmult_multi_batch_size_helper
static int secp256k1_ecmult_multi_batch_size_helper(size_t *n_batches, size_t *n_batch_points, size_t max_n_batch_points, size_t n)
Definition: ecmult_impl.h:795
secp256k1_ge_mul_lambda
static void secp256k1_ge_mul_lambda(secp256k1_ge *r, const secp256k1_ge *a)
Set r to be equal to lambda times a, where lambda is chosen in a way such that this is very fast.
secp256k1_scratch_space_struct
Definition: scratch.h:12
ecmult.h
util.h
secp256k1_strauss_point_state::wnaf_na_1
int wnaf_na_1[129]
Definition: ecmult_impl.h:205
secp256k1_strauss_state::pre_a
secp256k1_ge * pre_a
Definition: ecmult_impl.h:215
secp256k1_pippenger_bucket_window
static int secp256k1_pippenger_bucket_window(size_t n)
Returns optimal bucket_window (number of bits of a scalar represented by a set of buckets) for a give...
Definition: ecmult_impl.h:573
secp256k1_gej::z
secp256k1_fe z
Definition: group.h:26
secp256k1_wnaf_fixed
static int secp256k1_wnaf_fixed(int *wnaf, const secp256k1_scalar *s, int w)
Convert a number to WNAF notation.
Definition: ecmult_impl.h:413
secp256k1_gej_rescale
static void secp256k1_gej_rescale(secp256k1_gej *r, const secp256k1_fe *b)
Rescale a jacobian point by b which must be non-zero.
WNAF_BITS
#define WNAF_BITS
Larger values for ECMULT_WINDOW_SIZE result in possibly better performance at the cost of an exponent...
Definition: ecmult_impl.h:44
secp256k1_pippenger_state
Definition: ecmult_impl.h:480
secp256k1_scalar
A scalar modulo the group order of the secp256k1 curve.
Definition: scalar_4x64.h:13
WINDOW_A
#define WINDOW_A
Definition: ecmult_impl.h:32
STRAUSS_SCRATCH_OBJECTS
#define STRAUSS_SCRATCH_OBJECTS
Definition: ecmult_impl.h:50
secp256k1_ge_const_g
static const secp256k1_ge secp256k1_ge_const_g
Definition: group_impl.h:62
secp256k1_ecmult
static void secp256k1_ecmult(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_scalar *na, const secp256k1_scalar *ng)
Definition: ecmult_impl.h:335
WNAF_SIZE
#define WNAF_SIZE(w)
Definition: ecmult_impl.h:46
secp256k1_strauss_point_state
Definition: ecmult_impl.h:203
secp256k1_scratch_checkpoint
static size_t secp256k1_scratch_checkpoint(const secp256k1_callback *error_callback, const secp256k1_scratch *scratch)
Returns an opaque object used to "checkpoint" a scratch space.
secp256k1_ecmult_pippenger_batch_single
static int secp256k1_ecmult_pippenger_batch_single(const secp256k1_callback *error_callback, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n)
Definition: ecmult_impl.h:723
secp256k1_gej
A group element of the secp256k1 curve, in jacobian coordinates.
Definition: group.h:23
secp256k1_ecmult_multi_func
int(* secp256k1_ecmult_multi_func)(const secp256k1_callback *error_callback, secp256k1_scratch *, secp256k1_gej *, const secp256k1_scalar *, secp256k1_ecmult_multi_callback cb, void *, size_t)
Definition: ecmult_impl.h:813
secp256k1_fe_mul
static void secp256k1_fe_mul(secp256k1_fe *r, const secp256k1_fe *a, const secp256k1_fe *SECP256K1_RESTRICT b)
Sets a field element to be the product of two others.
secp256k1_ge_globalz_set_table_gej
static void secp256k1_ge_globalz_set_table_gej(size_t len, secp256k1_ge *r, secp256k1_fe *globalz, const secp256k1_gej *a, const secp256k1_fe *zr)
Bring a batch inputs given in jacobian coordinates (with known z-ratios) to the same global z "denomi...
ecmult_static_pre_g.h
secp256k1_pippenger_state::wnaf_na
int * wnaf_na
Definition: ecmult_impl.h:481
secp256k1_ecmult_odd_multiples_table
static void secp256k1_ecmult_odd_multiples_table(int n, secp256k1_gej *prej, secp256k1_fe *zr, const secp256k1_gej *a)
Fill a table 'prej' with precomputed odd multiples of a.
Definition: ecmult_impl.h:64
ECMULT_TABLE_GET_GE_STORAGE
#define ECMULT_TABLE_GET_GE_STORAGE(r, pre, n, w)
Definition: ecmult_impl.h:132
secp256k1_fe
Definition: field_10x26.h:12
secp256k1_scalar_is_even
static int secp256k1_scalar_is_even(const secp256k1_scalar *a)
Check whether a scalar, considered as an nonnegative integer, is even.
secp256k1_gej::y
secp256k1_fe y
Definition: group.h:25
secp256k1_ge_neg
static void secp256k1_ge_neg(secp256k1_ge *r, const secp256k1_ge *a)
Set r equal to the inverse of a (i.e., mirrored around the X axis)
secp256k1_strauss_point_state::wnaf_na_lam
int wnaf_na_lam[129]
Definition: ecmult_impl.h:206
secp256k1_strauss_point_state::na_1
secp256k1_scalar na_1
Definition: ecmult_impl.h:204
secp256k1_scratch_max_allocation
static size_t secp256k1_scratch_max_allocation(const secp256k1_callback *error_callback, const secp256k1_scratch *scratch, size_t n_objects)
Returns the maximum allocation the scratch space will allow.
secp256k1_pre_g_128
static const secp256k1_ge_storage secp256k1_pre_g_128[ECMULT_TABLE_SIZE(WINDOW_G)]
Definition: ecmult_static_pre_g.h:8389
secp256k1_scalar_split_128
static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k)
Find r1 and r2 such that r1+r2*2^128 = k.
CHECK
#define CHECK(cond)
Unconditional failure on condition failure.
Definition: util.h:35
secp256k1_ecmult_wnaf
static int secp256k1_ecmult_wnaf(int *wnaf, int len, const secp256k1_scalar *a, int w)
Convert a number to WNAF notation.
Definition: ecmult_impl.h:151
secp256k1_scalar_get_bits_var
static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count)
Access bits from a scalar.
secp256k1_strauss_state::pre_a_lam
secp256k1_ge * pre_a_lam
Definition: ecmult_impl.h:216
secp256k1_fe_set_int
static void secp256k1_fe_set_int(secp256k1_fe *r, int a)
Set a field element equal to a small (not greater than 0x7FFF), non-negative integer.
secp256k1_gej_add_var
static void secp256k1_gej_add_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_gej *b, secp256k1_fe *rzr)
Set r equal to the sum of a and b.
secp256k1_strauss_max_points
static size_t secp256k1_strauss_max_points(const secp256k1_callback *error_callback, secp256k1_scratch *scratch)
Definition: ecmult_impl.h:402
scalar.h
secp256k1_ge::infinity
int infinity
Definition: group.h:20
secp256k1_ecmult_strauss_batch
static int secp256k1_ecmult_strauss_batch(const secp256k1_callback *error_callback, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset)
Definition: ecmult_impl.h:356
secp256k1_pippenger_state::ps
struct secp256k1_pippenger_point_state * ps
Definition: ecmult_impl.h:482
secp256k1_pippenger_scratch_size
static size_t secp256k1_pippenger_scratch_size(size_t n_points, int bucket_window)
Returns the scratch size required for a given number of points (excluding base point G) without consi...
Definition: ecmult_impl.h:640
secp256k1_pippenger_point_state::input_pos
size_t input_pos
Definition: ecmult_impl.h:477
secp256k1_strauss_point_state::bits_na_lam
int bits_na_lam
Definition: ecmult_impl.h:208
secp256k1_ge_set_gej_zinv
static void secp256k1_ge_set_gej_zinv(secp256k1_ge *r, const secp256k1_gej *a, const secp256k1_fe *zi)
Definition: group_impl.h:67
secp256k1_scratch_apply_checkpoint
static void secp256k1_scratch_apply_checkpoint(const secp256k1_callback *error_callback, secp256k1_scratch *scratch, size_t checkpoint)
Applies a check point received from secp256k1_scratch_checkpoint, undoing all allocations since that ...
PIPPENGER_SCRATCH_OBJECTS
#define PIPPENGER_SCRATCH_OBJECTS
Definition: ecmult_impl.h:49
secp256k1_strauss_state
Definition: ecmult_impl.h:212
secp256k1_callback
Definition: util.h:19
secp256k1_scalar_clear
static void secp256k1_scalar_clear(secp256k1_scalar *r)
Clear a scalar to prevent the leak of sensitive data.
secp256k1_gej_add_ge_var
static void secp256k1_gej_add_ge_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b, secp256k1_fe *rzr)
Set r equal to the sum of a and b (with b given in affine coordinates).
secp256k1_pippenger_bucket_window_inv
static size_t secp256k1_pippenger_bucket_window_inv(int bucket_window)
Returns the maximum optimal number of points for a bucket_window.
Definition: ecmult_impl.h:602
secp256k1_pippenger_point_state
Definition: ecmult_impl.h:475
secp256k1_scalar_is_zero
static int secp256k1_scalar_is_zero(const secp256k1_scalar *a)
Check whether a scalar equals zero.
secp256k1_gej_double_var
static void secp256k1_gej_double_var(secp256k1_gej *r, const secp256k1_gej *a, secp256k1_fe *rzr)
Set r equal to the double of a.
secp256k1_ecmult_multi_callback
int() secp256k1_ecmult_multi_callback(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *data)
Definition: ecmult.h:35
ECMULT_TABLE_SIZE
#define ECMULT_TABLE_SIZE(w)
The number of entries a table with precomputed multiples needs to have.
Definition: ecmult.h:30
secp256k1_ecmult_multi_var
static int secp256k1_ecmult_multi_var(const secp256k1_callback *error_callback, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n)
Definition: ecmult_impl.h:814
secp256k1_ecmult_odd_multiples_table_globalz_windowa
static void secp256k1_ecmult_odd_multiples_table_globalz_windowa(secp256k1_ge *pre, secp256k1_fe *globalz, const secp256k1_gej *a)
Fill a table 'pre' with precomputed odd multiples of a.
Definition: ecmult_impl.h:108
secp256k1_pre_g
static const secp256k1_ge_storage secp256k1_pre_g[ECMULT_TABLE_SIZE(WINDOW_G)]
Definition: ecmult_static_pre_g.h:169
secp256k1_ecmult_endo_split
static SECP256K1_INLINE void secp256k1_ecmult_endo_split(secp256k1_scalar *s1, secp256k1_scalar *s2, secp256k1_ge *p1, secp256k1_ge *p2)
Definition: ecmult_impl.h:621
secp256k1_gej_clear
static void secp256k1_gej_clear(secp256k1_gej *r)
Clear a secp256k1_gej to prevent leaking sensitive information.
SECP256K1_INLINE
#define SECP256K1_INLINE
Definition: secp256k1.h:127
ECMULT_PIPPENGER_THRESHOLD
#define ECMULT_PIPPENGER_THRESHOLD
Definition: ecmult_impl.h:55
secp256k1_ecmult_strauss_batch_single
static int secp256k1_ecmult_strauss_batch_single(const secp256k1_callback *error_callback, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n)
Definition: ecmult_impl.h:398
secp256k1_gej_is_infinity
static int secp256k1_gej_is_infinity(const secp256k1_gej *a)
Check whether a group element is the point at infinity.
secp256k1_ecmult_strauss_wnaf
static void secp256k1_ecmult_strauss_wnaf(const struct secp256k1_strauss_state *state, secp256k1_gej *r, size_t num, const secp256k1_gej *a, const secp256k1_scalar *na, const secp256k1_scalar *ng)
Definition: ecmult_impl.h:220
secp256k1_gej_add_zinv_var
static void secp256k1_gej_add_zinv_var(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_ge *b, const secp256k1_fe *bzinv)
Set r equal to the sum of a and b (with the inverse of b's Z coordinate passed as bzinv).
PIPPENGER_MAX_BUCKET_WINDOW
#define PIPPENGER_MAX_BUCKET_WINDOW
Definition: ecmult_impl.h:52
WINDOW_G
#define WINDOW_G
Definition: ecmult_static_pre_g.h:168
secp256k1_pippenger_point_state::skew_na
int skew_na
Definition: ecmult_impl.h:476
secp256k1_strauss_scratch_size
static size_t secp256k1_strauss_scratch_size(size_t n_points)
Definition: ecmult_impl.h:351
secp256k1_ecmult_pippenger_wnaf
static int secp256k1_ecmult_pippenger_wnaf(secp256k1_gej *buckets, int bucket_window, struct secp256k1_pippenger_state *state, secp256k1_gej *r, const secp256k1_scalar *sc, const secp256k1_ge *pt, size_t num)
Definition: ecmult_impl.h:492
secp256k1_ge::x
secp256k1_fe x
Definition: group.h:18
secp256k1_strauss_point_state::bits_na_1
int bits_na_1
Definition: ecmult_impl.h:207
secp256k1_ge_is_infinity
static int secp256k1_ge_is_infinity(const secp256k1_ge *a)
Check whether a group element is the point at infinity.
ECMULT_MAX_POINTS_PER_BATCH
#define ECMULT_MAX_POINTS_PER_BATCH
Definition: ecmult_impl.h:57
secp256k1_scalar_get_bits
static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count)
Access bits from a scalar.
secp256k1_ecmult_multi_simple_var
static int secp256k1_ecmult_multi_simple_var(secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n_points)
Definition: ecmult_impl.h:768
secp256k1_ge
A group element of the secp256k1 curve, in affine coordinates.
Definition: group.h:13
secp256k1_strauss_point_state::input_pos
size_t input_pos
Definition: ecmult_impl.h:209
secp256k1_ecmult_pippenger_batch
static int secp256k1_ecmult_pippenger_batch(const secp256k1_callback *error_callback, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset)
Definition: ecmult_impl.h:646
ECMULT_TABLE_GET_GE
#define ECMULT_TABLE_GET_GE(r, pre, n, w)
The following two macro retrieves a particular odd multiple from a table of precomputed multiples.
Definition: ecmult_impl.h:120
secp256k1_scalar_set_int
static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsigned int v)
Set a scalar to an unsigned integer.
secp256k1_strauss_point_state::na_lam
secp256k1_scalar na_lam
Definition: ecmult_impl.h:204
secp256k1_gej_set_ge
static void secp256k1_gej_set_ge(secp256k1_gej *r, const secp256k1_ge *a)
Set a group element (jacobian) equal to another which is given in affine coordinates.