A Discrete-Event Network Simulator
API
Loading...
Searching...
No Matches
cairo-wideint.c
Go to the documentation of this file.
1// NOLINTBEGIN
2// clang-format off
3
4/* cairo - a vector graphics library with display and print output
5 *
6 * Copyright © 2004 Keith Packard
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation;
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * The original code as contributed to the cairo library under
22 * the dual license MPL+LGPL. We used the LGPL relicensing clause to
23 * get a GPL version of this code which now lives here. This header is
24 * unmodified other than the licensing clause.
25 *
26 * The Original Code is the cairo graphics library.
27 *
28 * The Initial Developer of the Original Code is Keith Packard
29 *
30 * Contributor(s):
31 * Keith R. Packard <keithp@keithp.com>
32 *
33 * Code changes for ns-3 from upstream are marked with `//PDB'
34 */
35
37
38#include <climits>
39
40/**
41 * \file
42 * \ingroup highprec
43 * Implementation of the cairo_x functions which implement high precision arithmetic.
44 */
45
46#if HAVE_UINT64_T
47
48//PDB original string literal causes access violation on Windows when accessed
49// by binaries that link to core, e.g. tests
50// Substitute function call
51const char *
53{
54 return "uint64_t";
55}
56
57#define _cairo_uint32s_to_uint64(h,l) ((uint64_t) (h) << 32 | (l))
58
60_cairo_uint64_divrem (cairo_uint64_t num, cairo_uint64_t den)
61{
63
64 qr.quo = num / den;
65 qr.rem = num % den;
66 return qr;
67}
68
69#else
70
71//PDB original string literal causes access violation on Windows when accessed
72// by binaries that link to core, e.g. tests
73// Substitute function call
74const char *
76{
77 return "uint32_t";
78}
79
80cairo_uint64_t
82{
83 cairo_uint64_t q;
84
85 q.lo = i;
86 q.hi = 0;
87 return q;
88}
89
92{
93 cairo_uint64_t q;
94
95 q.lo = i;
96 q.hi = i < 0 ? -1 : 0;
97 return q;
98}
99
100static cairo_uint64_t
102{
103 cairo_uint64_t q;
104
105 q.lo = l;
106 q.hi = h;
107 return q;
108}
109
110cairo_uint64_t
111_cairo_uint64_add (cairo_uint64_t a, cairo_uint64_t b)
112{
113 cairo_uint64_t s;
114
115 s.hi = a.hi + b.hi;
116 s.lo = a.lo + b.lo;
117 if (s.lo < a.lo)
118 s.hi++;
119 return s;
120}
121
122cairo_uint64_t
123_cairo_uint64_sub (cairo_uint64_t a, cairo_uint64_t b)
124{
125 cairo_uint64_t s;
126
127 s.hi = a.hi - b.hi;
128 s.lo = a.lo - b.lo;
129 if (s.lo > a.lo)
130 s.hi--;
131 return s;
132}
133
134#define uint32_lo(i) ((i) & 0xffff)
135#define uint32_hi(i) ((i) >> 16)
136#define uint32_carry16 ((1) << 16)
137
138cairo_uint64_t
140{
141 cairo_uint64_t s;
142
143 uint16_t ah, al, bh, bl;
144 uint32_t r0, r1, r2, r3;
145
146 al = uint32_lo (a);
147 ah = uint32_hi (a);
148 bl = uint32_lo (b);
149 bh = uint32_hi (b);
150
151 r0 = (uint32_t) al * bl;
152 r1 = (uint32_t) al * bh;
153 r2 = (uint32_t) ah * bl;
154 r3 = (uint32_t) ah * bh;
155
156 r1 += uint32_hi(r0); /* no carry possible */
157 r1 += r2; /* but this can carry */
158 if (r1 < r2) /* check */
159 r3 += uint32_carry16;
160
161 s.hi = r3 + uint32_hi(r1);
162 s.lo = (uint32_lo (r1) << 16) + uint32_lo (r0);
163 return s;
164}
165
168{
171 if (a < 0)
172 s.hi -= b;
173 if (b < 0)
174 s.hi -= a;
175 return s;
176}
177
178cairo_uint64_t
179_cairo_uint64_mul (cairo_uint64_t a, cairo_uint64_t b)
180{
181 cairo_uint64_t s;
182
183 s = _cairo_uint32x32_64_mul (a.lo, b.lo);
184 s.hi += a.lo * b.hi + a.hi * b.lo;
185 return s;
186}
187
188cairo_uint64_t
189_cairo_uint64_lsl (cairo_uint64_t a, int shift)
190{
191 if (shift >= 32)
192 {
193 a.hi = a.lo;
194 a.lo = 0;
195 shift -= 32;
196 }
197 if (shift)
198 {
199 a.hi = a.hi << shift | a.lo >> (32 - shift);
200 a.lo = a.lo << shift;
201 }
202 return a;
203}
204
205cairo_uint64_t
206_cairo_uint64_rsl (cairo_uint64_t a, int shift)
207{
208 if (shift >= 32)
209 {
210 a.lo = a.hi;
211 a.hi = 0;
212 shift -= 32;
213 }
214 if (shift)
215 {
216 a.lo = a.lo >> shift | a.hi << (32 - shift);
217 a.hi = a.hi >> shift;
218 }
219 return a;
220}
221
222#define _cairo_uint32_rsa(a,n) ((uint32_t) (((int32_t) (a)) >> (n)))
223
226{
227 if (shift >= 32)
228 {
229 a.lo = a.hi;
230 a.hi = _cairo_uint32_rsa (a.hi, 31);
231 shift -= 32;
232 }
233 if (shift)
234 {
235 a.lo = a.lo >> shift | a.hi << (32 - shift);
236 a.hi = _cairo_uint32_rsa (a.hi, shift);
237 }
238 return a;
239}
240
241int
242_cairo_uint64_lt (cairo_uint64_t a, cairo_uint64_t b)
243{
244 return (a.hi < b.hi ||
245 (a.hi == b.hi && a.lo < b.lo));
246}
247
248int
249_cairo_uint64_eq (cairo_uint64_t a, cairo_uint64_t b)
250{
251 return a.hi == b.hi && a.lo == b.lo;
252}
253
254int
256{
258 return 1;
260 return 0;
261 return _cairo_uint64_lt (a, b);
262}
263
264cairo_uint64_t
265_cairo_uint64_not (cairo_uint64_t a)
266{
267 a.lo = ~a.lo;
268 a.hi = ~a.hi;
269 return a;
270}
271
272cairo_uint64_t
273_cairo_uint64_negate (cairo_uint64_t a)
274{
275 a.lo = ~a.lo;
276 a.hi = ~a.hi;
277 if (++a.lo == 0)
278 ++a.hi;
279 return a;
280}
281
282/*
283 * Simple bit-at-a-time divide.
284 */
286_cairo_uint64_divrem (cairo_uint64_t num, cairo_uint64_t den)
287{
289 cairo_uint64_t bit;
290 cairo_uint64_t quo;
291
292 bit = _cairo_uint32_to_uint64 (1);
293
294 /* normalize to make den >= num, but not overflow */
295 while (_cairo_uint64_lt (den, num) && (den.hi & 0x80000000) == 0)
296 {
297 bit = _cairo_uint64_lsl (bit, 1);
298 den = _cairo_uint64_lsl (den, 1);
299 }
300 quo = _cairo_uint32_to_uint64 (0);
301
302 /* generate quotient, one bit at a time */
303 while (bit.hi | bit.lo)
304 {
305 if (_cairo_uint64_le (den, num))
306 {
307 num = _cairo_uint64_sub (num, den);
308 quo = _cairo_uint64_add (quo, bit);
309 }
310 bit = _cairo_uint64_rsl (bit, 1);
311 den = _cairo_uint64_rsl (den, 1);
312 }
313 qr.quo = quo;
314 qr.rem = num;
315 return qr;
316}
317
318#endif /* !HAVE_UINT64_T */
319
322{
323 int num_neg = _cairo_int64_negative (num);
324 int den_neg = _cairo_int64_negative (den);
327
328 if (num_neg)
329 num = _cairo_int64_negate (num);
330 if (den_neg)
331 den = _cairo_int64_negate (den);
332 uqr = _cairo_uint64_divrem (num, den);
333 if (num_neg)
334 qr.rem = _cairo_int64_negate ((cairo_int64_t)uqr.rem); //PDB cast
335 else
336 qr.rem = uqr.rem;
337 if (num_neg != den_neg)
338 qr.quo = (cairo_int64_t) _cairo_int64_negate ((cairo_int64_t)uqr.quo); //PDB cast
339 else
340 qr.quo = (cairo_int64_t) uqr.quo;
341 return qr;
342}
343
344#if HAVE_UINT128_T
345
346//PDB original string literal causes access violation on Windows when accessed
347// by binaries that link to core, e.g. tests
348// Substitute function call
349const char *
351{
352 return "uint128_t";
353}
354
356_cairo_uint128_divrem (cairo_uint128_t num, cairo_uint128_t den)
357{
359
360 qr.quo = num / den;
361 qr.rem = num % den;
362 return qr;
363}
364
365#else
366
367//PDB original string literal causes access violation on Windows when accessed
368// by binaries that link to core, e.g. tests
369// Substitute function call
370const char *
372{
373 return "cairo_uint64_t";
374}
375
376cairo_uint128_t
378{
379 cairo_uint128_t q;
380
381 q.lo = _cairo_uint32_to_uint64 (i);
382 q.hi = _cairo_uint32_to_uint64 (0);
383 return q;
384}
385
388{
390
392 q.hi = _cairo_int32_to_int64 (i < 0 ? -1 : 0);
393 return q;
394}
395
396cairo_uint128_t
397_cairo_uint64_to_uint128 (cairo_uint64_t i)
398{
399 cairo_uint128_t q;
400
401 q.lo = i;
402 q.hi = _cairo_uint32_to_uint64 (0);
403 return q;
404}
405
408{
410
411 q.lo = i;
413 return q;
414}
415
416cairo_uint128_t
417_cairo_uint128_add (cairo_uint128_t a, cairo_uint128_t b)
418{
419 cairo_uint128_t s;
420
421 s.hi = _cairo_uint64_add (a.hi, b.hi);
422 s.lo = _cairo_uint64_add (a.lo, b.lo);
423 if (_cairo_uint64_lt (s.lo, a.lo))
425 return s;
426}
427
428cairo_uint128_t
429_cairo_uint128_sub (cairo_uint128_t a, cairo_uint128_t b)
430{
431 cairo_uint128_t s;
432
433 s.hi = _cairo_uint64_sub (a.hi, b.hi);
434 s.lo = _cairo_uint64_sub (a.lo, b.lo);
435 if (_cairo_uint64_gt (s.lo, a.lo))
437 return s;
438}
439
440#if HAVE_UINT64_T
441
442#define uint64_lo32(i) ((i) & 0xffffffff)
443#define uint64_hi32(i) ((i) >> 32)
444#define uint64_lo(i) ((i) & 0xffffffff)
445#define uint64_hi(i) ((i) >> 32)
446#define uint64_shift32(i) ((i) << 32)
447#define uint64_carry32 (((uint64_t) 1) << 32)
448
449#else
450
451#define uint64_lo32(i) ((i).lo)
452#define uint64_hi32(i) ((i).hi)
453
454static cairo_uint64_t
455uint64_lo (cairo_uint64_t i)
456{
457 cairo_uint64_t s;
458
459 s.lo = i.lo;
460 s.hi = 0;
461 return s;
462}
463
464static cairo_uint64_t
465uint64_hi (cairo_uint64_t i)
466{
467 cairo_uint64_t s;
468
469 s.lo = i.hi;
470 s.hi = 0;
471 return s;
472}
473
474static cairo_uint64_t
475uint64_shift32 (cairo_uint64_t i)
476{
477 cairo_uint64_t s;
478
479 s.lo = 0;
480 s.hi = i.lo;
481 return s;
482}
483
484static const cairo_uint64_t uint64_carry32 = { 0, 1 };
485
486#endif
487
488cairo_uint128_t
489_cairo_uint64x64_128_mul (cairo_uint64_t a, cairo_uint64_t b)
490{
491 cairo_uint128_t s;
492 uint32_t ah, al, bh, bl;
493 cairo_uint64_t r0, r1, r2, r3;
494
495 al = uint64_lo32 (a);
496 ah = uint64_hi32 (a);
497 bl = uint64_lo32 (b);
498 bh = uint64_hi32 (b);
499
500 r0 = _cairo_uint32x32_64_mul (al, bl);
501 r1 = _cairo_uint32x32_64_mul (al, bh);
502 r2 = _cairo_uint32x32_64_mul (ah, bl);
503 r3 = _cairo_uint32x32_64_mul (ah, bh);
504
505 r1 = _cairo_uint64_add (r1, uint64_hi (r0)); /* no carry possible */
506 r1 = _cairo_uint64_add (r1, r2); /* but this can carry */
507 if (_cairo_uint64_lt (r1, r2)) /* check */
509
510 s.hi = _cairo_uint64_add (r3, uint64_hi(r1));
512 uint64_lo (r0));
513 return s;
514}
515
530
531cairo_uint128_t
532_cairo_uint128_mul (cairo_uint128_t a, cairo_uint128_t b)
533{
534 cairo_uint128_t s;
535
536 s = _cairo_uint64x64_128_mul (a.lo, b.lo);
537 s.hi = _cairo_uint64_add (s.hi,
538 _cairo_uint64_mul (a.lo, b.hi));
539 s.hi = _cairo_uint64_add (s.hi,
540 _cairo_uint64_mul (a.hi, b.lo));
541 return s;
542}
543
544cairo_uint128_t
545_cairo_uint128_lsl (cairo_uint128_t a, int shift)
546{
547 if (shift >= 64)
548 {
549 a.hi = a.lo;
550 a.lo = _cairo_uint32_to_uint64 (0);
551 shift -= 64;
552 }
553 if (shift)
554 {
555 a.hi = _cairo_uint64_add (_cairo_uint64_lsl (a.hi, shift),
556 _cairo_uint64_rsl (a.lo, (64 - shift)));
557 a.lo = _cairo_uint64_lsl (a.lo, shift);
558 }
559 return a;
560}
561
562cairo_uint128_t
563_cairo_uint128_rsl (cairo_uint128_t a, int shift)
564{
565 if (shift >= 64)
566 {
567 a.lo = a.hi;
568 a.hi = _cairo_uint32_to_uint64 (0);
569 shift -= 64;
570 }
571 if (shift)
572 {
573 a.lo = _cairo_uint64_add (_cairo_uint64_rsl (a.lo, shift),
574 _cairo_uint64_lsl (a.hi, (64 - shift)));
575 a.hi = _cairo_uint64_rsl (a.hi, shift);
576 }
577 return a;
578}
579
580cairo_uint128_t
581_cairo_uint128_rsa (cairo_uint128_t a, int shift)
582{
583 if (shift >= 64)
584 {
585 a.lo = a.hi;
586 a.hi = _cairo_uint64_rsa (a.hi, 64-1);
587 shift -= 64;
588 }
589 if (shift)
590 {
591 a.lo = _cairo_uint64_add (_cairo_uint64_rsl (a.lo, shift),
592 _cairo_uint64_lsl (a.hi, (64 - shift)));
593 a.hi = _cairo_uint64_rsa (a.hi, shift);
594 }
595 return a;
596}
597
598int
599_cairo_uint128_lt (cairo_uint128_t a, cairo_uint128_t b)
600{
601 return (_cairo_uint64_lt (a.hi, b.hi) ||
602 (_cairo_uint64_eq (a.hi, b.hi) &&
603 _cairo_uint64_lt (a.lo, b.lo)));
604}
605
606int
608{
610 return 1;
612 return 0;
613 return _cairo_uint128_lt (a, b);
614}
615
616int
617_cairo_uint128_eq (cairo_uint128_t a, cairo_uint128_t b)
618{
619 return (_cairo_uint64_eq (a.hi, b.hi) &&
620 _cairo_uint64_eq (a.lo, b.lo));
621}
622
623#if HAVE_UINT64_T
624#define _cairo_msbset64(q) (q & ((uint64_t) 1 << 63))
625#else
626#define _cairo_msbset64(q) (q.hi & ((uint32_t) 1 << 31))
627#endif
628
630_cairo_uint128_divrem (cairo_uint128_t num, cairo_uint128_t den)
631{
633 cairo_uint128_t bit;
634 cairo_uint128_t quo;
635
636 bit = _cairo_uint32_to_uint128 (1);
637
638 /* normalize to make den >= num, but not overflow */
639 while (_cairo_uint128_lt (den, num) && !_cairo_msbset64(den.hi))
640 {
641 bit = _cairo_uint128_lsl (bit, 1);
642 den = _cairo_uint128_lsl (den, 1);
643 }
644 quo = _cairo_uint32_to_uint128 (0);
645
646 /* generate quotient, one bit at a time */
648 {
649 if (_cairo_uint128_le (den, num))
650 {
651 num = _cairo_uint128_sub (num, den);
652 quo = _cairo_uint128_add (quo, bit);
653 }
654 bit = _cairo_uint128_rsl (bit, 1);
655 den = _cairo_uint128_rsl (den, 1);
656 }
657 qr.quo = quo;
658 qr.rem = num;
659 return qr;
660}
661
662cairo_uint128_t
663_cairo_uint128_negate (cairo_uint128_t a)
664{
665 a.lo = _cairo_uint64_not (a.lo);
666 a.hi = _cairo_uint64_not (a.hi);
668}
669
670cairo_uint128_t
671_cairo_uint128_not (cairo_uint128_t a)
672{
673 a.lo = _cairo_uint64_not (a.lo);
674 a.hi = _cairo_uint64_not (a.hi);
675 return a;
676}
677
678#endif /* !HAVE_UINT128_T */
679
682{
683 int num_neg = _cairo_int128_negative (num);
684 int den_neg = _cairo_int128_negative (den);
687
688 if (num_neg)
689 num = _cairo_int128_negate (num);
690 if (den_neg)
691 den = _cairo_int128_negate (den);
692 uqr = _cairo_uint128_divrem (num, den);
693 if (num_neg)
694 qr.rem = _cairo_int128_negate (uqr.rem);
695 else
696 qr.rem = uqr.rem;
697 if (num_neg != den_neg)
698 qr.quo = _cairo_int128_negate (uqr.quo);
699 else
700 qr.quo = uqr.quo;
701 return qr;
702}
703
704/**
705 * _cairo_uint_96by64_32x64_divrem:
706 *
707 * Compute a 32 bit quotient and 64 bit remainder of a 96 bit unsigned
708 * dividend and 64 bit divisor. If the quotient doesn't fit into 32
709 * bits then the returned remainder is equal to the divisor, and the
710 * quotient is the largest representable 64 bit integer. It is an
711 * error to call this function with the high 32 bits of `num' being
712 * non-zero. */
715 cairo_uint64_t den)
716{
717 cairo_uquorem64_t result;
718 cairo_uint64_t B = _cairo_uint32s_to_uint64 (1, 0);
719
720 /* These are the high 64 bits of the *96* bit numerator. We're
721 * going to represent the numerator as xB + y, where x is a 64,
722 * and y is a 32 bit number. */
723 cairo_uint64_t x = _cairo_uint128_to_uint64 (_cairo_uint128_rsl(num, 32));
724
725 /* Initialise the result to indicate overflow. */
726 result.quo = _cairo_uint32s_to_uint64 (UINT_MAX, UINT_MAX); //PDB cast
727 result.rem = den;
728
729 /* Don't bother if the quotient is going to overflow. */
730 if (_cairo_uint64_ge (x, den)) {
731 return /* overflow */ result;
732 }
733
734 if (_cairo_uint64_lt (x, B)) {
735 /* When the final quotient is known to fit in 32 bits, then
736 * num < 2^64 if and only if den < 2^32. */
738 }
739 else {
740 /* Denominator is >= 2^32. the numerator is >= 2^64, and the
741 * division won't overflow: need two divrems. Write the
742 * numerator and denominator as
743 *
744 * num = xB + y x : 64 bits, y : 32 bits
745 * den = uB + v u, v : 32 bits
746 */
748 uint32_t u = uint64_hi32 (den);
750
751 /* Compute a lower bound approximate quotient of num/den
752 * from x/(u+1). Then we have
753 *
754 * x = q(u+1) + r ; q : 32 bits, r <= u : 32 bits.
755 *
756 * xB + y = q(u+1)B + (rB+y)
757 * = q(uB + B + v - v) + (rB+y)
758 * = q(uB + v) + qB - qv + (rB+y)
759 * = q(uB + v) + q(B-v) + (rB+y)
760 *
761 * The true quotient of num/den then is q plus the
762 * contribution of q(B-v) + (rB+y). The main contribution
763 * comes from the term q(B-v), with the term (rB+y) only
764 * contributing at most one part.
765 *
766 * The term q(B-v) must fit into 64 bits, since q fits into 32
767 * bits on account of being a lower bound to the true
768 * quotient, and as B-v <= 2^32, we may safely use a single
769 * 64/64 bit division to find its contribution. */
770
771 cairo_uquorem64_t quorem;
772 cairo_uint64_t remainder; /* will contain final remainder */
773 uint32_t quotient; /* will contain final quotient. */
774 uint32_t q;
775 uint32_t r;
776
777 /* Approximate quotient by dividing the high 64 bits of num by
778 * u+1. Watch out for overflow of u+1. */
779 if (u+1) {
781 q = _cairo_uint64_to_uint32 (quorem.quo);
782 r = _cairo_uint64_to_uint32 (quorem.rem);
783 }
784 else {
785 q = uint64_hi32 (x);
787 }
788 quotient = q;
789
790 /* Add the main term's contribution to quotient. Note B-v =
791 * -v as an uint32 (unless v = 0) */
792 if (v)
793 quorem = _cairo_uint64_divrem (_cairo_uint32x32_64_mul (q, -(int32_t)v), den); //PDB cast
794 else
795 quorem = _cairo_uint64_divrem (_cairo_uint32s_to_uint64 (q, 0), den);
796 quotient += _cairo_uint64_to_uint32 (quorem.quo);
797
798 /* Add the contribution of the subterm and start computing the
799 * true remainder. */
800 remainder = _cairo_uint32s_to_uint64 (r, y);
801 if (_cairo_uint64_ge (remainder, den)) {
802 remainder = _cairo_uint64_sub (remainder, den);
803 quotient++;
804 }
805
806 /* Add the contribution of the main term's remainder. The
807 * funky test here checks that remainder + main_rem >= den,
808 * taking into account overflow of the addition. */
809 remainder = _cairo_uint64_add (remainder, quorem.rem);
810 if (_cairo_uint64_ge (remainder, den) ||
811 _cairo_uint64_lt (remainder, quorem.rem))
812 {
813 remainder = _cairo_uint64_sub (remainder, den);
814 quotient++;
815 }
816
817 result.quo = _cairo_uint32_to_uint64 (quotient);
818 result.rem = remainder;
819 }
820 return result;
821}
822
825{
826 int num_neg = _cairo_int128_negative (num);
827 int den_neg = _cairo_int64_negative (den);
828 cairo_uint64_t nonneg_den;
831
832 if (num_neg)
833 num = _cairo_int128_negate (num);
834 if (den_neg)
835 nonneg_den = _cairo_int64_negate (den);
836 else
837 nonneg_den = den;
838
839 uqr = _cairo_uint_96by64_32x64_divrem (num, nonneg_den);
840 if (_cairo_uint64_eq (uqr.rem, _cairo_int64_to_uint64 (nonneg_den))) {
841 /* bail on overflow. */
842 qr.quo = _cairo_uint32s_to_uint64 (0x7FFFFFFF, UINT_MAX); //PDB cast
843 qr.rem = den;
844 return qr;
845 }
846
847 if (num_neg)
848 qr.rem = _cairo_int64_negate ((cairo_int64_t)uqr.rem); //PDB cast
849 else
850 qr.rem = uqr.rem;
851 if (num_neg != den_neg)
852 qr.quo = _cairo_int64_negate ((cairo_int64_t)uqr.quo); //PDB cast
853 else
854 qr.quo = uqr.quo;
855 return qr;
856}
857
858// clang-format on
859// NOLINTEND
cairo_x function declarations, which provide the fallback high precision arithmetic implementation.
#define _cairo_int64_negate(a)
#define _cairo_uint64_ge(a, b)
#define _cairo_uint64_gt(a, b)
#define _cairo_uint128_ne(a, b)
#define _cairo_uint64_le(a, b)
#define _cairo_uint128_le(a, b)
#define _cairo_int64_to_uint64(i)
#define _cairo_int64_negative(a)
#define _cairo_uint128_to_uint64(a)
#define _cairo_int128_negative(a)
#define _cairo_uint128_to_uint32(a)
#define _cairo_int128_negate(a)
#define _cairo_uint64_to_uint32(a)
const char * cairo_impl64()
cairo_uint64_t _cairo_uint64_mul(cairo_uint64_t a, cairo_uint64_t b)
cairo_uint128_t _cairo_uint128_lsl(cairo_uint128_t a, int shift)
cairo_int64_t _cairo_uint64_rsa(cairo_int64_t a, int shift)
cairo_uint64_t _cairo_uint64_add(cairo_uint64_t a, cairo_uint64_t b)
int _cairo_int64_lt(cairo_int64_t a, cairo_int64_t b)
cairo_uint64_t _cairo_uint32x32_64_mul(uint32_t a, uint32_t b)
cairo_uint64_t _cairo_uint64_sub(cairo_uint64_t a, cairo_uint64_t b)
cairo_uint128_t _cairo_uint32_to_uint128(uint32_t i)
#define uint32_lo(i)
int _cairo_uint64_eq(cairo_uint64_t a, cairo_uint64_t b)
cairo_uint128_t _cairo_uint128_negate(cairo_uint128_t a)
cairo_uint128_t _cairo_uint128_not(cairo_uint128_t a)
cairo_quorem64_t _cairo_int_96by64_32x64_divrem(cairo_int128_t num, cairo_int64_t den)
cairo_uint128_t _cairo_uint64_to_uint128(cairo_uint64_t i)
static cairo_uint64_t uint64_hi(cairo_uint64_t i)
cairo_uint128_t _cairo_uint128_sub(cairo_uint128_t a, cairo_uint128_t b)
cairo_quorem128_t _cairo_int128_divrem(cairo_int128_t num, cairo_int128_t den)
#define _cairo_uint32_rsa(a, n)
#define _cairo_msbset64(q)
const char * cairo_impl128()
cairo_uquorem64_t _cairo_uint64_divrem(cairo_uint64_t num, cairo_uint64_t den)
int _cairo_uint128_eq(cairo_uint128_t a, cairo_uint128_t b)
cairo_uint128_t _cairo_uint128_rsl(cairo_uint128_t a, int shift)
cairo_uint64_t _cairo_uint32_to_uint64(uint32_t i)
cairo_uquorem64_t _cairo_uint_96by64_32x64_divrem(cairo_uint128_t num, cairo_uint64_t den)
_cairo_uint_96by64_32x64_divrem:
cairo_int128_t _cairo_int64x64_128_mul(cairo_int64_t a, cairo_int64_t b)
static cairo_uint64_t uint64_lo(cairo_uint64_t i)
static cairo_uint64_t _cairo_uint32s_to_uint64(uint32_t h, uint32_t l)
#define uint64_hi32(i)
#define uint32_carry16
#define uint64_lo32(i)
cairo_uint64_t _cairo_uint64_lsl(cairo_uint64_t a, int shift)
cairo_uint128_t _cairo_uint128_rsa(cairo_uint128_t a, int shift)
cairo_int128_t _cairo_int32_to_int128(int32_t i)
cairo_uquorem128_t _cairo_uint128_divrem(cairo_uint128_t num, cairo_uint128_t den)
int _cairo_uint128_lt(cairo_uint128_t a, cairo_uint128_t b)
cairo_int128_t _cairo_int64_to_int128(cairo_int64_t i)
cairo_int64_t _cairo_int32_to_int64(int32_t i)
cairo_uint64_t _cairo_uint64_negate(cairo_uint64_t a)
static const cairo_uint64_t uint64_carry32
cairo_uint128_t _cairo_uint128_add(cairo_uint128_t a, cairo_uint128_t b)
static cairo_uint64_t uint64_shift32(cairo_uint64_t i)
cairo_uint64_t _cairo_uint64_not(cairo_uint64_t a)
cairo_quorem64_t _cairo_int64_divrem(cairo_int64_t num, cairo_int64_t den)
cairo_uint128_t _cairo_uint64x64_128_mul(cairo_uint64_t a, cairo_uint64_t b)
int _cairo_int128_lt(cairo_int128_t a, cairo_int128_t b)
int _cairo_uint64_lt(cairo_uint64_t a, cairo_uint64_t b)
cairo_uint64_t _cairo_uint64_rsl(cairo_uint64_t a, int shift)
#define uint32_hi(i)
cairo_int64_t _cairo_int32x32_64_mul(int32_t a, int32_t b)
cairo_uint128_t _cairo_uint128_mul(cairo_uint128_t a, cairo_uint128_t b)