A Discrete-Event Network Simulator
API
cairo-wideint.c
Go to the documentation of this file.
1/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
2/* cairo - a vector graphics library with display and print output
3 *
4 * Copyright © 2004 Keith Packard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation;
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 *
19 * The original code as contributed to the cairo library under
20 * the dual license MPL+LGPL. We used the LGPL relicensing clause to
21 * get a GPL version of this code which now lives here. This header is
22 * unmodified other than the licensing clause.
23 *
24 * The Original Code is the cairo graphics library.
25 *
26 * The Initial Developer of the Original Code is Keith Packard
27 *
28 * Contributor(s):
29 * Keith R. Packard <keithp@keithp.com>
30 *
31 * Code changes for ns-3 from upstream are marked with `//PDB'
32 */
33
34#include <climits>
36
43// *NS_CHECK_STYLE_OFF*
44
45#if HAVE_UINT64_T
46
47const char * cairo_impl64 = "uint64_t";
48
49#define _cairo_uint32s_to_uint64(h,l) ((uint64_t) (h) << 32 | (l))
50
53{
55
56 qr.quo = num / den;
57 qr.rem = num % den;
58 return qr;
59}
60
61#else
62
63const char * cairo_impl64 = "uint32_t";
64
67{
69
70 q.lo = i;
71 q.hi = 0;
72 return q;
73}
74
77{
79
80 q.lo = i;
81 q.hi = i < 0 ? -1 : 0;
82 return q;
83}
84
85static cairo_uint64_t
87{
89
90 q.lo = l;
91 q.hi = h;
92 return q;
93}
94
97{
99
100 s.hi = a.hi + b.hi;
101 s.lo = a.lo + b.lo;
102 if (s.lo < a.lo)
103 s.hi++;
104 return s;
105}
106
109{
111
112 s.hi = a.hi - b.hi;
113 s.lo = a.lo - b.lo;
114 if (s.lo > a.lo)
115 s.hi--;
116 return s;
117}
118
119#define uint32_lo(i) ((i) & 0xffff)
120#define uint32_hi(i) ((i) >> 16)
121#define uint32_carry16 ((1) << 16)
122
125{
127
128 uint16_t ah, al, bh, bl;
129 uint32_t r0, r1, r2, r3;
130
131 al = uint32_lo (a);
132 ah = uint32_hi (a);
133 bl = uint32_lo (b);
134 bh = uint32_hi (b);
135
136 r0 = (uint32_t) al * bl;
137 r1 = (uint32_t) al * bh;
138 r2 = (uint32_t) ah * bl;
139 r3 = (uint32_t) ah * bh;
140
141 r1 += uint32_hi(r0); /* no carry possible */
142 r1 += r2; /* but this can carry */
143 if (r1 < r2) /* check */
144 r3 += uint32_carry16;
145
146 s.hi = r3 + uint32_hi(r1);
147 s.lo = (uint32_lo (r1) << 16) + uint32_lo (r0);
148 return s;
149}
150
153{
156 if (a < 0)
157 s.hi -= b;
158 if (b < 0)
159 s.hi -= a;
160 return s;
161}
162
165{
167
168 s = _cairo_uint32x32_64_mul (a.lo, b.lo);
169 s.hi += a.lo * b.hi + a.hi * b.lo;
170 return s;
171}
172
175{
176 if (shift >= 32)
177 {
178 a.hi = a.lo;
179 a.lo = 0;
180 shift -= 32;
181 }
182 if (shift)
183 {
184 a.hi = a.hi << shift | a.lo >> (32 - shift);
185 a.lo = a.lo << shift;
186 }
187 return a;
188}
189
192{
193 if (shift >= 32)
194 {
195 a.lo = a.hi;
196 a.hi = 0;
197 shift -= 32;
198 }
199 if (shift)
200 {
201 a.lo = a.lo >> shift | a.hi << (32 - shift);
202 a.hi = a.hi >> shift;
203 }
204 return a;
205}
206
207#define _cairo_uint32_rsa(a,n) ((uint32_t) (((int32_t) (a)) >> (n)))
208
210_cairo_uint64_rsa (cairo_int64_t a, int shift)
211{
212 if (shift >= 32)
213 {
214 a.lo = a.hi;
215 a.hi = _cairo_uint32_rsa (a.hi, 31);
216 shift -= 32;
217 }
218 if (shift)
219 {
220 a.lo = a.lo >> shift | a.hi << (32 - shift);
221 a.hi = _cairo_uint32_rsa (a.hi, shift);
222 }
223 return a;
224}
225
226int
228{
229 return (a.hi < b.hi ||
230 (a.hi == b.hi && a.lo < b.lo));
231}
232
233int
235{
236 return a.hi == b.hi && a.lo == b.lo;
237}
238
239int
241{
243 return 1;
245 return 0;
246 return _cairo_uint64_lt (a, b);
247}
248
251{
252 a.lo = ~a.lo;
253 a.hi = ~a.hi;
254 return a;
255}
256
259{
260 a.lo = ~a.lo;
261 a.hi = ~a.hi;
262 if (++a.lo == 0)
263 ++a.hi;
264 return a;
265}
266
267/*
268 * Simple bit-at-a-time divide.
269 */
272{
274 cairo_uint64_t bit;
275 cairo_uint64_t quo;
276
277 bit = _cairo_uint32_to_uint64 (1);
278
279 /* normalize to make den >= num, but not overflow */
280 while (_cairo_uint64_lt (den, num) && (den.hi & 0x80000000) == 0)
281 {
282 bit = _cairo_uint64_lsl (bit, 1);
283 den = _cairo_uint64_lsl (den, 1);
284 }
285 quo = _cairo_uint32_to_uint64 (0);
286
287 /* generate quotient, one bit at a time */
288 while (bit.hi | bit.lo)
289 {
290 if (_cairo_uint64_le (den, num))
291 {
292 num = _cairo_uint64_sub (num, den);
293 quo = _cairo_uint64_add (quo, bit);
294 }
295 bit = _cairo_uint64_rsl (bit, 1);
296 den = _cairo_uint64_rsl (den, 1);
297 }
298 qr.quo = quo;
299 qr.rem = num;
300 return qr;
301}
302
303#endif /* !HAVE_UINT64_T */
304
307{
308 int num_neg = _cairo_int64_negative (num);
309 int den_neg = _cairo_int64_negative (den);
312
313 if (num_neg)
314 num = _cairo_int64_negate (num);
315 if (den_neg)
316 den = _cairo_int64_negate (den);
317 uqr = _cairo_uint64_divrem (num, den);
318 if (num_neg)
319 qr.rem = _cairo_int64_negate ((cairo_int64_t)uqr.rem); //PDB cast
320 else
321 qr.rem = uqr.rem;
322 if (num_neg != den_neg)
323 qr.quo = (cairo_int64_t) _cairo_int64_negate ((cairo_int64_t)uqr.quo); //PDB cast
324 else
325 qr.quo = (cairo_int64_t) uqr.quo;
326 return qr;
327}
328
329#if HAVE_UINT128_T
330
331const char * cairo_impl128 = "uint128_t";
332
334_cairo_uint128_divrem (cairo_uint128_t num, cairo_uint128_t den)
335{
337
338 qr.quo = num / den;
339 qr.rem = num % den;
340 return qr;
341}
342
343#else
344
345const char * cairo_impl128 = "cairo_uint64_t";
346
347cairo_uint128_t
349{
350 cairo_uint128_t q;
351
352 q.lo = _cairo_uint32_to_uint64 (i);
353 q.hi = _cairo_uint32_to_uint64 (0);
354 return q;
355}
356
359{
361
363 q.hi = _cairo_int32_to_int64 (i < 0 ? -1 : 0);
364 return q;
365}
366
367cairo_uint128_t
369{
370 cairo_uint128_t q;
371
372 q.lo = i;
373 q.hi = _cairo_uint32_to_uint64 (0);
374 return q;
375}
376
379{
381
382 q.lo = i;
384 return q;
385}
386
387cairo_uint128_t
388_cairo_uint128_add (cairo_uint128_t a, cairo_uint128_t b)
389{
390 cairo_uint128_t s;
391
392 s.hi = _cairo_uint64_add (a.hi, b.hi);
393 s.lo = _cairo_uint64_add (a.lo, b.lo);
394 if (_cairo_uint64_lt (s.lo, a.lo))
396 return s;
397}
398
399cairo_uint128_t
400_cairo_uint128_sub (cairo_uint128_t a, cairo_uint128_t b)
401{
402 cairo_uint128_t s;
403
404 s.hi = _cairo_uint64_sub (a.hi, b.hi);
405 s.lo = _cairo_uint64_sub (a.lo, b.lo);
406 if (_cairo_uint64_gt (s.lo, a.lo))
408 return s;
409}
410
411#if HAVE_UINT64_T
412
413#define uint64_lo32(i) ((i) & 0xffffffff)
414#define uint64_hi32(i) ((i) >> 32)
415#define uint64_lo(i) ((i) & 0xffffffff)
416#define uint64_hi(i) ((i) >> 32)
417#define uint64_shift32(i) ((i) << 32)
418#define uint64_carry32 (((uint64_t) 1) << 32)
419
420#else
421
422#define uint64_lo32(i) ((i).lo)
423#define uint64_hi32(i) ((i).hi)
424
425static cairo_uint64_t
427{
429
430 s.lo = i.lo;
431 s.hi = 0;
432 return s;
433}
434
435static cairo_uint64_t
437{
439
440 s.lo = i.hi;
441 s.hi = 0;
442 return s;
443}
444
445static cairo_uint64_t
447{
449
450 s.lo = 0;
451 s.hi = i.lo;
452 return s;
453}
454
455static const cairo_uint64_t uint64_carry32 = { 0, 1 };
456
457#endif
458
459cairo_uint128_t
461{
462 cairo_uint128_t s;
463 uint32_t ah, al, bh, bl;
464 cairo_uint64_t r0, r1, r2, r3;
465
466 al = uint64_lo32 (a);
467 ah = uint64_hi32 (a);
468 bl = uint64_lo32 (b);
469 bh = uint64_hi32 (b);
470
471 r0 = _cairo_uint32x32_64_mul (al, bl);
472 r1 = _cairo_uint32x32_64_mul (al, bh);
473 r2 = _cairo_uint32x32_64_mul (ah, bl);
474 r3 = _cairo_uint32x32_64_mul (ah, bh);
475
476 r1 = _cairo_uint64_add (r1, uint64_hi (r0)); /* no carry possible */
477 r1 = _cairo_uint64_add (r1, r2); /* but this can carry */
478 if (_cairo_uint64_lt (r1, r2)) /* check */
480
481 s.hi = _cairo_uint64_add (r3, uint64_hi(r1));
483 uint64_lo (r0));
484 return s;
485}
486
489{
493 if (_cairo_int64_negative (a))
494 s.hi = _cairo_uint64_sub (s.hi,
496 if (_cairo_int64_negative (b))
497 s.hi = _cairo_uint64_sub (s.hi,
499 return s;
500}
501
502cairo_uint128_t
503_cairo_uint128_mul (cairo_uint128_t a, cairo_uint128_t b)
504{
505 cairo_uint128_t s;
506
507 s = _cairo_uint64x64_128_mul (a.lo, b.lo);
508 s.hi = _cairo_uint64_add (s.hi,
509 _cairo_uint64_mul (a.lo, b.hi));
510 s.hi = _cairo_uint64_add (s.hi,
511 _cairo_uint64_mul (a.hi, b.lo));
512 return s;
513}
514
515cairo_uint128_t
516_cairo_uint128_lsl (cairo_uint128_t a, int shift)
517{
518 if (shift >= 64)
519 {
520 a.hi = a.lo;
521 a.lo = _cairo_uint32_to_uint64 (0);
522 shift -= 64;
523 }
524 if (shift)
525 {
526 a.hi = _cairo_uint64_add (_cairo_uint64_lsl (a.hi, shift),
527 _cairo_uint64_rsl (a.lo, (64 - shift)));
528 a.lo = _cairo_uint64_lsl (a.lo, shift);
529 }
530 return a;
531}
532
533cairo_uint128_t
534_cairo_uint128_rsl (cairo_uint128_t a, int shift)
535{
536 if (shift >= 64)
537 {
538 a.lo = a.hi;
539 a.hi = _cairo_uint32_to_uint64 (0);
540 shift -= 64;
541 }
542 if (shift)
543 {
544 a.lo = _cairo_uint64_add (_cairo_uint64_rsl (a.lo, shift),
545 _cairo_uint64_lsl (a.hi, (64 - shift)));
546 a.hi = _cairo_uint64_rsl (a.hi, shift);
547 }
548 return a;
549}
550
551cairo_uint128_t
553{
554 if (shift >= 64)
555 {
556 a.lo = a.hi;
557 a.hi = _cairo_uint64_rsa (a.hi, 64-1);
558 shift -= 64;
559 }
560 if (shift)
561 {
563 _cairo_uint64_lsl (a.hi, (64 - shift)));
564 a.hi = _cairo_uint64_rsa (a.hi, shift);
565 }
566 return a;
567}
568
569int
570_cairo_uint128_lt (cairo_uint128_t a, cairo_uint128_t b)
571{
572 return (_cairo_uint64_lt (a.hi, b.hi) ||
573 (_cairo_uint64_eq (a.hi, b.hi) &&
574 _cairo_uint64_lt (a.lo, b.lo)));
575}
576
577int
579{
581 return 1;
583 return 0;
584 return _cairo_uint128_lt (a, b);
585}
586
587int
588_cairo_uint128_eq (cairo_uint128_t a, cairo_uint128_t b)
589{
590 return (_cairo_uint64_eq (a.hi, b.hi) &&
591 _cairo_uint64_eq (a.lo, b.lo));
592}
593
594#if HAVE_UINT64_T
595#define _cairo_msbset64(q) (q & ((uint64_t) 1 << 63))
596#else
597#define _cairo_msbset64(q) (q.hi & ((uint32_t) 1 << 31))
598#endif
599
601_cairo_uint128_divrem (cairo_uint128_t num, cairo_uint128_t den)
602{
604 cairo_uint128_t bit;
605 cairo_uint128_t quo;
606
607 bit = _cairo_uint32_to_uint128 (1);
608
609 /* normalize to make den >= num, but not overflow */
610 while (_cairo_uint128_lt (den, num) && !_cairo_msbset64(den.hi))
611 {
612 bit = _cairo_uint128_lsl (bit, 1);
613 den = _cairo_uint128_lsl (den, 1);
614 }
615 quo = _cairo_uint32_to_uint128 (0);
616
617 /* generate quotient, one bit at a time */
619 {
620 if (_cairo_uint128_le (den, num))
621 {
622 num = _cairo_uint128_sub (num, den);
623 quo = _cairo_uint128_add (quo, bit);
624 }
625 bit = _cairo_uint128_rsl (bit, 1);
626 den = _cairo_uint128_rsl (den, 1);
627 }
628 qr.quo = quo;
629 qr.rem = num;
630 return qr;
631}
632
633cairo_uint128_t
634_cairo_uint128_negate (cairo_uint128_t a)
635{
636 a.lo = _cairo_uint64_not (a.lo);
637 a.hi = _cairo_uint64_not (a.hi);
639}
640
641cairo_uint128_t
642_cairo_uint128_not (cairo_uint128_t a)
643{
644 a.lo = _cairo_uint64_not (a.lo);
645 a.hi = _cairo_uint64_not (a.hi);
646 return a;
647}
648
649#endif /* !HAVE_UINT128_T */
650
653{
654 int num_neg = _cairo_int128_negative (num);
655 int den_neg = _cairo_int128_negative (den);
658
659 if (num_neg)
660 num = _cairo_int128_negate (num);
661 if (den_neg)
662 den = _cairo_int128_negate (den);
663 uqr = _cairo_uint128_divrem (num, den);
664 if (num_neg)
665 qr.rem = _cairo_int128_negate (uqr.rem);
666 else
667 qr.rem = uqr.rem;
668 if (num_neg != den_neg)
669 qr.quo = _cairo_int128_negate (uqr.quo);
670 else
671 qr.quo = uqr.quo;
672 return qr;
673}
674
686 cairo_uint64_t den)
687{
690
691 /* These are the high 64 bits of the *96* bit numerator. We're
692 * going to represent the numerator as xB + y, where x is a 64,
693 * and y is a 32 bit number. */
695
696 /* Initialise the result to indicate overflow. */
697 result.quo = _cairo_uint32s_to_uint64 (UINT_MAX, UINT_MAX); //PDB cast
698 result.rem = den;
699
700 /* Don't bother if the quotient is going to overflow. */
701 if (_cairo_uint64_ge (x, den)) {
702 return /* overflow */ result;
703 }
704
705 if (_cairo_uint64_lt (x, B)) {
706 /* When the final quotient is known to fit in 32 bits, then
707 * num < 2^64 if and only if den < 2^32. */
709 }
710 else {
711 /* Denominator is >= 2^32. the numerator is >= 2^64, and the
712 * division won't overflow: need two divrems. Write the
713 * numerator and denominator as
714 *
715 * num = xB + y x : 64 bits, y : 32 bits
716 * den = uB + v u, v : 32 bits
717 */
719 uint32_t u = uint64_hi32 (den);
721
722 /* Compute a lower bound approximate quotient of num/den
723 * from x/(u+1). Then we have
724 *
725 * x = q(u+1) + r ; q : 32 bits, r <= u : 32 bits.
726 *
727 * xB + y = q(u+1)B + (rB+y)
728 * = q(uB + B + v - v) + (rB+y)
729 * = q(uB + v) + qB - qv + (rB+y)
730 * = q(uB + v) + q(B-v) + (rB+y)
731 *
732 * The true quotient of num/den then is q plus the
733 * contribution of q(B-v) + (rB+y). The main contribution
734 * comes from the term q(B-v), with the term (rB+y) only
735 * contributing at most one part.
736 *
737 * The term q(B-v) must fit into 64 bits, since q fits into 32
738 * bits on account of being a lower bound to the true
739 * quotient, and as B-v <= 2^32, we may safely use a single
740 * 64/64 bit division to find its contribution. */
741
742 cairo_uquorem64_t quorem;
743 cairo_uint64_t remainder; /* will contain final remainder */
744 uint32_t quotient; /* will contain final quotient. */
745 uint32_t q;
746 uint32_t r;
747
748 /* Approximate quotient by dividing the high 64 bits of num by
749 * u+1. Watch out for overflow of u+1. */
750 if (u+1) {
752 q = _cairo_uint64_to_uint32 (quorem.quo);
753 r = _cairo_uint64_to_uint32 (quorem.rem);
754 }
755 else {
756 q = uint64_hi32 (x);
758 }
759 quotient = q;
760
761 /* Add the main term's contribution to quotient. Note B-v =
762 * -v as an uint32 (unless v = 0) */
763 if (v)
764 quorem = _cairo_uint64_divrem (_cairo_uint32x32_64_mul (q, -(int32_t)v), den); //PDB cast
765 else
766 quorem = _cairo_uint64_divrem (_cairo_uint32s_to_uint64 (q, 0), den);
767 quotient += _cairo_uint64_to_uint32 (quorem.quo);
768
769 /* Add the contribution of the subterm and start computing the
770 * true remainder. */
771 remainder = _cairo_uint32s_to_uint64 (r, y);
772 if (_cairo_uint64_ge (remainder, den)) {
773 remainder = _cairo_uint64_sub (remainder, den);
774 quotient++;
775 }
776
777 /* Add the contribution of the main term's remainder. The
778 * funky test here checks that remainder + main_rem >= den,
779 * taking into account overflow of the addition. */
780 remainder = _cairo_uint64_add (remainder, quorem.rem);
781 if (_cairo_uint64_ge (remainder, den) ||
782 _cairo_uint64_lt (remainder, quorem.rem))
783 {
784 remainder = _cairo_uint64_sub (remainder, den);
785 quotient++;
786 }
787
788 result.quo = _cairo_uint32_to_uint64 (quotient);
789 result.rem = remainder;
790 }
791 return result;
792}
793
796{
797 int num_neg = _cairo_int128_negative (num);
798 int den_neg = _cairo_int64_negative (den);
799 cairo_uint64_t nonneg_den;
802
803 if (num_neg)
804 num = _cairo_int128_negate (num);
805 if (den_neg)
806 nonneg_den = _cairo_int64_negate (den);
807 else
808 nonneg_den = den;
809
810 uqr = _cairo_uint_96by64_32x64_divrem (num, nonneg_den);
811 if (_cairo_uint64_eq (uqr.rem, _cairo_int64_to_uint64 (nonneg_den))) {
812 /* bail on overflow. */
813 qr.quo = _cairo_uint32s_to_uint64 (0x7FFFFFFF, UINT_MAX); //PDB cast
814 qr.rem = den;
815 return qr;
816 }
817
818 if (num_neg)
819 qr.rem = _cairo_int64_negate ((cairo_int64_t)uqr.rem); //PDB cast
820 else
821 qr.rem = uqr.rem;
822 if (num_neg != den_neg)
823 qr.quo = _cairo_int64_negate ((cairo_int64_t)uqr.quo); //PDB cast
824 else
825 qr.quo = uqr.quo;
826 return qr;
827}
cairo_x function declarations, which provide the fallback high precision arithmetic implementation.
#define _cairo_int64_lt(a, b)
#define _cairo_uint64_to_uint32(i)
#define _cairo_uint64_eq(a, b)
#define _cairo_int64_negate(a)
#define _cairo_uint64_add(a, b)
#define _cairo_uint64_negate(a)
#define _cairo_uint64_lt(a, b)
int64_t cairo_int64_t
#define _cairo_int32x32_64_mul(a, b)
#define _cairo_uint64_mul(a, b)
#define _cairo_uint32x32_64_mul(a, b)
#define _cairo_uint64_ge(a, b)
#define _cairo_uint64_gt(a, b)
#define _cairo_uint64_lsl(a, b)
#define _cairo_int32_to_int64(i)
#define _cairo_uint128_ne(a, b)
#define _cairo_uint64_le(a, b)
#define _cairo_uint128_le(a, b)
#define _cairo_int64_to_uint64(i)
#define _cairo_uint64_sub(a, b)
#define _cairo_int64_negative(a)
#define _cairo_uint128_to_uint64(a)
#define _cairo_uint64_rsa(a, b)
#define _cairo_int128_negative(a)
#define _cairo_uint64_rsl(a, b)
uint64_t cairo_uint64_t
#define _cairo_uint128_to_uint32(a)
#define _cairo_uint64_not(a)
#define _cairo_uint32_to_uint64(i)
#define _cairo_int128_negate(a)
#define uint64_shift32(i)
cairo_uint128_t _cairo_uint128_lsl(cairo_uint128_t a, int shift)
cairo_uint128_t _cairo_uint32_to_uint128(uint32_t i)
#define uint64_lo(i)
cairo_uint128_t _cairo_uint128_negate(cairo_uint128_t a)
#define uint64_hi(i)
cairo_uint128_t _cairo_uint128_not(cairo_uint128_t a)
cairo_quorem64_t _cairo_int_96by64_32x64_divrem(cairo_int128_t num, cairo_int64_t den)
cairo_uint128_t _cairo_uint64_to_uint128(cairo_uint64_t i)
#define uint64_carry32
const char * cairo_impl64
Definition: cairo-wideint.c:47
cairo_uint128_t _cairo_uint128_sub(cairo_uint128_t a, cairo_uint128_t b)
cairo_quorem128_t _cairo_int128_divrem(cairo_int128_t num, cairo_int128_t den)
#define _cairo_msbset64(q)
cairo_uquorem64_t _cairo_uint64_divrem(cairo_uint64_t num, cairo_uint64_t den)
Definition: cairo-wideint.c:52
int _cairo_uint128_eq(cairo_uint128_t a, cairo_uint128_t b)
cairo_uint128_t _cairo_uint128_rsl(cairo_uint128_t a, int shift)
const char * cairo_impl128
#define _cairo_uint32s_to_uint64(h, l)
Definition: cairo-wideint.c:49
cairo_uquorem64_t _cairo_uint_96by64_32x64_divrem(cairo_uint128_t num, cairo_uint64_t den)
_cairo_uint_96by64_32x64_divrem:
cairo_int128_t _cairo_int64x64_128_mul(cairo_int64_t a, cairo_int64_t b)
#define uint64_hi32(i)
#define uint64_lo32(i)
cairo_int128_t _cairo_int32_to_int128(int32_t i)
cairo_uquorem128_t _cairo_uint128_divrem(cairo_uint128_t num, cairo_uint128_t den)
int _cairo_uint128_lt(cairo_uint128_t a, cairo_uint128_t b)
cairo_int128_t _cairo_int64_to_int128(cairo_int64_t i)
cairo_uint128_t _cairo_uint128_add(cairo_uint128_t a, cairo_uint128_t b)
cairo_quorem64_t _cairo_int64_divrem(cairo_int64_t num, cairo_int64_t den)
cairo_uint128_t _cairo_uint64x64_128_mul(cairo_uint64_t a, cairo_uint64_t b)
int _cairo_int128_lt(cairo_int128_t a, cairo_int128_t b)
cairo_uint128_t _cairo_uint128_mul(cairo_uint128_t a, cairo_uint128_t b)
cairo_uint128_t _cairo_uint128_rsa(cairo_int128_t a, int shift)
list x
Random number samples.