1 // Copyright 2017 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
14 func TestUintSize(t *testing.T) {
16 if want := unsafe.Sizeof(x) * 8; UintSize != want {
17 t.Fatalf("UintSize = %d; want %d", UintSize, want)
21 func TestLeadingZeros(t *testing.T) {
22 for i := 0; i < 256; i++ {
24 for k := 0; k < 64-8; k++ {
25 x := uint64(i) << uint(k)
27 got := LeadingZeros8(uint8(x))
28 want := nlz - k + (8 - 8)
33 t.Fatalf("LeadingZeros8(%#02x) == %d; want %d", x, got, want)
38 got := LeadingZeros16(uint16(x))
39 want := nlz - k + (16 - 8)
44 t.Fatalf("LeadingZeros16(%#04x) == %d; want %d", x, got, want)
49 got := LeadingZeros32(uint32(x))
50 want := nlz - k + (32 - 8)
55 t.Fatalf("LeadingZeros32(%#08x) == %d; want %d", x, got, want)
58 got = LeadingZeros(uint(x))
60 t.Fatalf("LeadingZeros(%#08x) == %d; want %d", x, got, want)
66 got := LeadingZeros64(uint64(x))
67 want := nlz - k + (64 - 8)
72 t.Fatalf("LeadingZeros64(%#016x) == %d; want %d", x, got, want)
75 got = LeadingZeros(uint(x))
77 t.Fatalf("LeadingZeros(%#016x) == %d; want %d", x, got, want)
85 // Exported (global) variable serving as input for some
86 // of the benchmarks to ensure side-effect free calls
87 // are not optimized away.
88 var Input uint64 = DeBruijn64
90 // Exported (global) variable to store function results
91 // during benchmarking to ensure side-effect free calls
92 // are not optimized away.
95 func BenchmarkLeadingZeros(b *testing.B) {
97 for i := 0; i < b.N; i++ {
98 s += LeadingZeros(uint(Input) >> (uint(i) % UintSize))
103 func BenchmarkLeadingZeros8(b *testing.B) {
105 for i := 0; i < b.N; i++ {
106 s += LeadingZeros8(uint8(Input) >> (uint(i) % 8))
111 func BenchmarkLeadingZeros16(b *testing.B) {
113 for i := 0; i < b.N; i++ {
114 s += LeadingZeros16(uint16(Input) >> (uint(i) % 16))
119 func BenchmarkLeadingZeros32(b *testing.B) {
121 for i := 0; i < b.N; i++ {
122 s += LeadingZeros32(uint32(Input) >> (uint(i) % 32))
127 func BenchmarkLeadingZeros64(b *testing.B) {
129 for i := 0; i < b.N; i++ {
130 s += LeadingZeros64(uint64(Input) >> (uint(i) % 64))
135 func TestTrailingZeros(t *testing.T) {
136 for i := 0; i < 256; i++ {
138 for k := 0; k < 64-8; k++ {
139 x := uint64(i) << uint(k)
142 got := TrailingZeros8(uint8(x))
147 t.Fatalf("TrailingZeros8(%#02x) == %d; want %d", x, got, want)
152 got := TrailingZeros16(uint16(x))
157 t.Fatalf("TrailingZeros16(%#04x) == %d; want %d", x, got, want)
162 got := TrailingZeros32(uint32(x))
167 t.Fatalf("TrailingZeros32(%#08x) == %d; want %d", x, got, want)
170 got = TrailingZeros(uint(x))
172 t.Fatalf("TrailingZeros(%#08x) == %d; want %d", x, got, want)
178 got := TrailingZeros64(uint64(x))
183 t.Fatalf("TrailingZeros64(%#016x) == %d; want %d", x, got, want)
186 got = TrailingZeros(uint(x))
188 t.Fatalf("TrailingZeros(%#016x) == %d; want %d", x, got, want)
196 func BenchmarkTrailingZeros(b *testing.B) {
198 for i := 0; i < b.N; i++ {
199 s += TrailingZeros(uint(Input) << (uint(i) % UintSize))
204 func BenchmarkTrailingZeros8(b *testing.B) {
206 for i := 0; i < b.N; i++ {
207 s += TrailingZeros8(uint8(Input) << (uint(i) % 8))
212 func BenchmarkTrailingZeros16(b *testing.B) {
214 for i := 0; i < b.N; i++ {
215 s += TrailingZeros16(uint16(Input) << (uint(i) % 16))
220 func BenchmarkTrailingZeros32(b *testing.B) {
222 for i := 0; i < b.N; i++ {
223 s += TrailingZeros32(uint32(Input) << (uint(i) % 32))
228 func BenchmarkTrailingZeros64(b *testing.B) {
230 for i := 0; i < b.N; i++ {
231 s += TrailingZeros64(uint64(Input) << (uint(i) % 64))
236 func TestOnesCount(t *testing.T) {
238 for i := 0; i <= 64; i++ {
239 testOnesCount(t, x, i)
243 for i := 64; i >= 0; i-- {
244 testOnesCount(t, x, i)
248 for i := 0; i < 256; i++ {
249 for k := 0; k < 64-8; k++ {
250 testOnesCount(t, uint64(i)<<uint(k), tab[i].pop)
255 func testOnesCount(t *testing.T, x uint64, want int) {
257 got := OnesCount8(uint8(x))
259 t.Fatalf("OnesCount8(%#02x) == %d; want %d", uint8(x), got, want)
264 got := OnesCount16(uint16(x))
266 t.Fatalf("OnesCount16(%#04x) == %d; want %d", uint16(x), got, want)
271 got := OnesCount32(uint32(x))
273 t.Fatalf("OnesCount32(%#08x) == %d; want %d", uint32(x), got, want)
276 got = OnesCount(uint(x))
278 t.Fatalf("OnesCount(%#08x) == %d; want %d", uint32(x), got, want)
284 got := OnesCount64(uint64(x))
286 t.Fatalf("OnesCount64(%#016x) == %d; want %d", x, got, want)
289 got = OnesCount(uint(x))
291 t.Fatalf("OnesCount(%#016x) == %d; want %d", x, got, want)
297 func BenchmarkOnesCount(b *testing.B) {
299 for i := 0; i < b.N; i++ {
300 s += OnesCount(uint(Input))
305 func BenchmarkOnesCount8(b *testing.B) {
307 for i := 0; i < b.N; i++ {
308 s += OnesCount8(uint8(Input))
313 func BenchmarkOnesCount16(b *testing.B) {
315 for i := 0; i < b.N; i++ {
316 s += OnesCount16(uint16(Input))
321 func BenchmarkOnesCount32(b *testing.B) {
323 for i := 0; i < b.N; i++ {
324 s += OnesCount32(uint32(Input))
329 func BenchmarkOnesCount64(b *testing.B) {
331 for i := 0; i < b.N; i++ {
332 s += OnesCount64(uint64(Input))
337 func TestRotateLeft(t *testing.T) {
338 var m uint64 = DeBruijn64
340 for k := uint(0); k < 128; k++ {
342 got8 := RotateLeft8(x8, int(k))
343 want8 := x8<<(k&0x7) | x8>>(8-k&0x7)
345 t.Fatalf("RotateLeft8(%#02x, %d) == %#02x; want %#02x", x8, k, got8, want8)
347 got8 = RotateLeft8(want8, -int(k))
349 t.Fatalf("RotateLeft8(%#02x, -%d) == %#02x; want %#02x", want8, k, got8, x8)
353 got16 := RotateLeft16(x16, int(k))
354 want16 := x16<<(k&0xf) | x16>>(16-k&0xf)
356 t.Fatalf("RotateLeft16(%#04x, %d) == %#04x; want %#04x", x16, k, got16, want16)
358 got16 = RotateLeft16(want16, -int(k))
360 t.Fatalf("RotateLeft16(%#04x, -%d) == %#04x; want %#04x", want16, k, got16, x16)
364 got32 := RotateLeft32(x32, int(k))
365 want32 := x32<<(k&0x1f) | x32>>(32-k&0x1f)
367 t.Fatalf("RotateLeft32(%#08x, %d) == %#08x; want %#08x", x32, k, got32, want32)
369 got32 = RotateLeft32(want32, -int(k))
371 t.Fatalf("RotateLeft32(%#08x, -%d) == %#08x; want %#08x", want32, k, got32, x32)
375 got := RotateLeft(x, int(k))
376 want := x<<(k&0x1f) | x>>(32-k&0x1f)
378 t.Fatalf("RotateLeft(%#08x, %d) == %#08x; want %#08x", x, k, got, want)
380 got = RotateLeft(want, -int(k))
382 t.Fatalf("RotateLeft(%#08x, -%d) == %#08x; want %#08x", want, k, got, x)
387 got64 := RotateLeft64(x64, int(k))
388 want64 := x64<<(k&0x3f) | x64>>(64-k&0x3f)
390 t.Fatalf("RotateLeft64(%#016x, %d) == %#016x; want %#016x", x64, k, got64, want64)
392 got64 = RotateLeft64(want64, -int(k))
394 t.Fatalf("RotateLeft64(%#016x, -%d) == %#016x; want %#016x", want64, k, got64, x64)
398 got := RotateLeft(x, int(k))
399 want := x<<(k&0x3f) | x>>(64-k&0x3f)
401 t.Fatalf("RotateLeft(%#016x, %d) == %#016x; want %#016x", x, k, got, want)
403 got = RotateLeft(want, -int(k))
405 t.Fatalf("RotateLeft(%#08x, -%d) == %#08x; want %#08x", want, k, got, x)
411 func BenchmarkRotateLeft(b *testing.B) {
413 for i := 0; i < b.N; i++ {
414 s += RotateLeft(uint(Input), i)
419 func BenchmarkRotateLeft8(b *testing.B) {
421 for i := 0; i < b.N; i++ {
422 s += RotateLeft8(uint8(Input), i)
427 func BenchmarkRotateLeft16(b *testing.B) {
429 for i := 0; i < b.N; i++ {
430 s += RotateLeft16(uint16(Input), i)
435 func BenchmarkRotateLeft32(b *testing.B) {
437 for i := 0; i < b.N; i++ {
438 s += RotateLeft32(uint32(Input), i)
443 func BenchmarkRotateLeft64(b *testing.B) {
445 for i := 0; i < b.N; i++ {
446 s += RotateLeft64(uint64(Input), i)
451 func TestReverse(t *testing.T) {
453 for i := uint(0); i < 64; i++ {
454 testReverse(t, uint64(1)<<i, uint64(1)<<(63-i))
457 // test a few patterns
458 for _, test := range []struct {
477 {0x5686487, 0xe12616a000000000},
478 {0x0123456789abcdef, 0xf7b3d591e6a2c480},
480 testReverse(t, test.x, test.r)
481 testReverse(t, test.r, test.x)
485 func testReverse(t *testing.T, x64, want64 uint64) {
488 want8 := uint8(want64 >> (64 - 8))
490 t.Fatalf("Reverse8(%#02x) == %#02x; want %#02x", x8, got8, want8)
494 got16 := Reverse16(x16)
495 want16 := uint16(want64 >> (64 - 16))
497 t.Fatalf("Reverse16(%#04x) == %#04x; want %#04x", x16, got16, want16)
501 got32 := Reverse32(x32)
502 want32 := uint32(want64 >> (64 - 32))
504 t.Fatalf("Reverse32(%#08x) == %#08x; want %#08x", x32, got32, want32)
511 t.Fatalf("Reverse(%#08x) == %#08x; want %#08x", x, got, want)
515 got64 := Reverse64(x64)
517 t.Fatalf("Reverse64(%#016x) == %#016x; want %#016x", x64, got64, want64)
524 t.Fatalf("Reverse(%#08x) == %#016x; want %#016x", x, got, want)
529 func BenchmarkReverse(b *testing.B) {
531 for i := 0; i < b.N; i++ {
532 s += Reverse(uint(i))
537 func BenchmarkReverse8(b *testing.B) {
539 for i := 0; i < b.N; i++ {
540 s += Reverse8(uint8(i))
545 func BenchmarkReverse16(b *testing.B) {
547 for i := 0; i < b.N; i++ {
548 s += Reverse16(uint16(i))
553 func BenchmarkReverse32(b *testing.B) {
555 for i := 0; i < b.N; i++ {
556 s += Reverse32(uint32(i))
561 func BenchmarkReverse64(b *testing.B) {
563 for i := 0; i < b.N; i++ {
564 s += Reverse64(uint64(i))
569 func TestReverseBytes(t *testing.T) {
570 for _, test := range []struct {
575 {0x0123, 0x2301 << 48},
576 {0x012345, 0x452301 << 40},
577 {0x01234567, 0x67452301 << 32},
578 {0x0123456789, 0x8967452301 << 24},
579 {0x0123456789ab, 0xab8967452301 << 16},
580 {0x0123456789abcd, 0xcdab8967452301 << 8},
581 {0x0123456789abcdef, 0xefcdab8967452301 << 0},
583 testReverseBytes(t, test.x, test.r)
584 testReverseBytes(t, test.r, test.x)
588 func testReverseBytes(t *testing.T, x64, want64 uint64) {
590 got16 := ReverseBytes16(x16)
591 want16 := uint16(want64 >> (64 - 16))
593 t.Fatalf("ReverseBytes16(%#04x) == %#04x; want %#04x", x16, got16, want16)
597 got32 := ReverseBytes32(x32)
598 want32 := uint32(want64 >> (64 - 32))
600 t.Fatalf("ReverseBytes32(%#08x) == %#08x; want %#08x", x32, got32, want32)
604 got := ReverseBytes(x)
607 t.Fatalf("ReverseBytes(%#08x) == %#08x; want %#08x", x, got, want)
611 got64 := ReverseBytes64(x64)
613 t.Fatalf("ReverseBytes64(%#016x) == %#016x; want %#016x", x64, got64, want64)
617 got := ReverseBytes(x)
620 t.Fatalf("ReverseBytes(%#016x) == %#016x; want %#016x", x, got, want)
625 func BenchmarkReverseBytes(b *testing.B) {
627 for i := 0; i < b.N; i++ {
628 s += ReverseBytes(uint(i))
633 func BenchmarkReverseBytes16(b *testing.B) {
635 for i := 0; i < b.N; i++ {
636 s += ReverseBytes16(uint16(i))
641 func BenchmarkReverseBytes32(b *testing.B) {
643 for i := 0; i < b.N; i++ {
644 s += ReverseBytes32(uint32(i))
649 func BenchmarkReverseBytes64(b *testing.B) {
651 for i := 0; i < b.N; i++ {
652 s += ReverseBytes64(uint64(i))
657 func TestLen(t *testing.T) {
658 for i := 0; i < 256; i++ {
659 len := 8 - tab[i].nlz
660 for k := 0; k < 64-8; k++ {
661 x := uint64(i) << uint(k)
667 got := Len8(uint8(x))
669 t.Fatalf("Len8(%#02x) == %d; want %d", x, got, want)
674 got := Len16(uint16(x))
676 t.Fatalf("Len16(%#04x) == %d; want %d", x, got, want)
681 got := Len32(uint32(x))
683 t.Fatalf("Len32(%#08x) == %d; want %d", x, got, want)
688 t.Fatalf("Len(%#08x) == %d; want %d", x, got, want)
694 got := Len64(uint64(x))
696 t.Fatalf("Len64(%#016x) == %d; want %d", x, got, want)
701 t.Fatalf("Len(%#016x) == %d; want %d", x, got, want)
715 func TestAddSubUint(t *testing.T) {
716 test := func(msg string, f func(x, y, c uint) (z, cout uint), x, y, c, z, cout uint) {
717 z1, cout1 := f(x, y, c)
718 if z1 != z || cout1 != cout {
719 t.Errorf("%s: got z:cout = %#x:%#x; want %#x:%#x", msg, z1, cout1, z, cout)
722 for _, a := range []struct{ x, y, c, z, cout uint }{
727 {12345, 67890, 0, 80235, 0},
728 {12345, 67890, 1, 80236, 0},
732 {_M, _M, 0, _M - 1, 1},
735 test("Add", Add, a.x, a.y, a.c, a.z, a.cout)
736 test("Add symmetric", Add, a.y, a.x, a.c, a.z, a.cout)
737 test("Sub", Sub, a.z, a.x, a.c, a.y, a.cout)
738 test("Sub symmetric", Sub, a.z, a.y, a.c, a.x, a.cout)
739 // The above code can't test intrinsic implementation, because the passed function is not called directly.
740 // The following code uses a closure to test the intrinsic version in case the function is intrinsified.
741 test("Add intrinsic", func(x, y, c uint) (uint, uint) { return Add(x, y, c) }, a.x, a.y, a.c, a.z, a.cout)
742 test("Add intrinsic symmetric", func(x, y, c uint) (uint, uint) { return Add(x, y, c) }, a.y, a.x, a.c, a.z, a.cout)
743 test("Sub intrinsic", func(x, y, c uint) (uint, uint) { return Sub(x, y, c) }, a.z, a.x, a.c, a.y, a.cout)
744 test("Sub intrinsic symmetric", func(x, y, c uint) (uint, uint) { return Sub(x, y, c) }, a.z, a.y, a.c, a.x, a.cout)
749 func TestAddSubUint32(t *testing.T) {
750 test := func(msg string, f func(x, y, c uint32) (z, cout uint32), x, y, c, z, cout uint32) {
751 z1, cout1 := f(x, y, c)
752 if z1 != z || cout1 != cout {
753 t.Errorf("%s: got z:cout = %#x:%#x; want %#x:%#x", msg, z1, cout1, z, cout)
756 for _, a := range []struct{ x, y, c, z, cout uint32 }{
761 {12345, 67890, 0, 80235, 0},
762 {12345, 67890, 1, 80236, 0},
766 {_M32, _M32, 0, _M32 - 1, 1},
767 {_M32, _M32, 1, _M32, 1},
769 test("Add32", Add32, a.x, a.y, a.c, a.z, a.cout)
770 test("Add32 symmetric", Add32, a.y, a.x, a.c, a.z, a.cout)
771 test("Sub32", Sub32, a.z, a.x, a.c, a.y, a.cout)
772 test("Sub32 symmetric", Sub32, a.z, a.y, a.c, a.x, a.cout)
776 func TestAddSubUint64(t *testing.T) {
777 test := func(msg string, f func(x, y, c uint64) (z, cout uint64), x, y, c, z, cout uint64) {
778 z1, cout1 := f(x, y, c)
779 if z1 != z || cout1 != cout {
780 t.Errorf("%s: got z:cout = %#x:%#x; want %#x:%#x", msg, z1, cout1, z, cout)
783 for _, a := range []struct{ x, y, c, z, cout uint64 }{
788 {12345, 67890, 0, 80235, 0},
789 {12345, 67890, 1, 80236, 0},
793 {_M64, _M64, 0, _M64 - 1, 1},
794 {_M64, _M64, 1, _M64, 1},
796 test("Add64", Add64, a.x, a.y, a.c, a.z, a.cout)
797 test("Add64 symmetric", Add64, a.y, a.x, a.c, a.z, a.cout)
798 test("Sub64", Sub64, a.z, a.x, a.c, a.y, a.cout)
799 test("Sub64 symmetric", Sub64, a.z, a.y, a.c, a.x, a.cout)
800 // The above code can't test intrinsic implementation, because the passed function is not called directly.
801 // The following code uses a closure to test the intrinsic version in case the function is intrinsified.
802 test("Add64 intrinsic", func(x, y, c uint64) (uint64, uint64) { return Add64(x, y, c) }, a.x, a.y, a.c, a.z, a.cout)
803 test("Add64 intrinsic symmetric", func(x, y, c uint64) (uint64, uint64) { return Add64(x, y, c) }, a.y, a.x, a.c, a.z, a.cout)
804 test("Sub64 intrinsic", func(x, y, c uint64) (uint64, uint64) { return Sub64(x, y, c) }, a.z, a.x, a.c, a.y, a.cout)
805 test("Sub64 intrinsic symmetric", func(x, y, c uint64) (uint64, uint64) { return Sub64(x, y, c) }, a.z, a.y, a.c, a.x, a.cout)
809 func TestMulDiv(t *testing.T) {
810 testMul := func(msg string, f func(x, y uint) (hi, lo uint), x, y, hi, lo uint) {
812 if hi1 != hi || lo1 != lo {
813 t.Errorf("%s: got hi:lo = %#x:%#x; want %#x:%#x", msg, hi1, lo1, hi, lo)
816 testDiv := func(msg string, f func(hi, lo, y uint) (q, r uint), hi, lo, y, q, r uint) {
817 q1, r1 := f(hi, lo, y)
818 if q1 != q || r1 != r {
819 t.Errorf("%s: got q:r = %#x:%#x; want %#x:%#x", msg, q1, r1, q, r)
822 for _, a := range []struct {
826 {1 << (UintSize - 1), 2, 1, 0, 1},
827 {_M, _M, _M - 1, 1, 42},
829 testMul("Mul", Mul, a.x, a.y, a.hi, a.lo)
830 testMul("Mul symmetric", Mul, a.y, a.x, a.hi, a.lo)
831 testDiv("Div", Div, a.hi, a.lo+a.r, a.y, a.x, a.r)
832 testDiv("Div symmetric", Div, a.hi, a.lo+a.r, a.x, a.y, a.r)
833 // The above code can't test intrinsic implementation, because the passed function is not called directly.
834 // The following code uses a closure to test the intrinsic version in case the function is intrinsified.
835 testMul("Mul intrinsic", func(x, y uint) (uint, uint) { return Mul(x, y) }, a.x, a.y, a.hi, a.lo)
836 testMul("Mul intrinsic symmetric", func(x, y uint) (uint, uint) { return Mul(x, y) }, a.y, a.x, a.hi, a.lo)
837 testDiv("Div intrinsic", func(hi, lo, y uint) (uint, uint) { return Div(hi, lo, y) }, a.hi, a.lo+a.r, a.y, a.x, a.r)
838 testDiv("Div intrinsic symmetric", func(hi, lo, y uint) (uint, uint) { return Div(hi, lo, y) }, a.hi, a.lo+a.r, a.x, a.y, a.r)
842 func TestMulDiv32(t *testing.T) {
843 testMul := func(msg string, f func(x, y uint32) (hi, lo uint32), x, y, hi, lo uint32) {
845 if hi1 != hi || lo1 != lo {
846 t.Errorf("%s: got hi:lo = %#x:%#x; want %#x:%#x", msg, hi1, lo1, hi, lo)
849 testDiv := func(msg string, f func(hi, lo, y uint32) (q, r uint32), hi, lo, y, q, r uint32) {
850 q1, r1 := f(hi, lo, y)
851 if q1 != q || r1 != r {
852 t.Errorf("%s: got q:r = %#x:%#x; want %#x:%#x", msg, q1, r1, q, r)
855 for _, a := range []struct {
859 {1 << 31, 2, 1, 0, 1},
860 {0xc47dfa8c, 50911, 0x98a4, 0x998587f4, 13},
861 {_M32, _M32, _M32 - 1, 1, 42},
863 testMul("Mul32", Mul32, a.x, a.y, a.hi, a.lo)
864 testMul("Mul32 symmetric", Mul32, a.y, a.x, a.hi, a.lo)
865 testDiv("Div32", Div32, a.hi, a.lo+a.r, a.y, a.x, a.r)
866 testDiv("Div32 symmetric", Div32, a.hi, a.lo+a.r, a.x, a.y, a.r)
870 func TestMulDiv64(t *testing.T) {
871 testMul := func(msg string, f func(x, y uint64) (hi, lo uint64), x, y, hi, lo uint64) {
873 if hi1 != hi || lo1 != lo {
874 t.Errorf("%s: got hi:lo = %#x:%#x; want %#x:%#x", msg, hi1, lo1, hi, lo)
877 testDiv := func(msg string, f func(hi, lo, y uint64) (q, r uint64), hi, lo, y, q, r uint64) {
878 q1, r1 := f(hi, lo, y)
879 if q1 != q || r1 != r {
880 t.Errorf("%s: got q:r = %#x:%#x; want %#x:%#x", msg, q1, r1, q, r)
883 for _, a := range []struct {
887 {1 << 63, 2, 1, 0, 1},
888 {0x3626229738a3b9, 0xd8988a9f1cc4a61, 0x2dd0712657fe8, 0x9dd6a3364c358319, 13},
889 {_M64, _M64, _M64 - 1, 1, 42},
891 testMul("Mul64", Mul64, a.x, a.y, a.hi, a.lo)
892 testMul("Mul64 symmetric", Mul64, a.y, a.x, a.hi, a.lo)
893 testDiv("Div64", Div64, a.hi, a.lo+a.r, a.y, a.x, a.r)
894 testDiv("Div64 symmetric", Div64, a.hi, a.lo+a.r, a.x, a.y, a.r)
895 // The above code can't test intrinsic implementation, because the passed function is not called directly.
896 // The following code uses a closure to test the intrinsic version in case the function is intrinsified.
897 testMul("Mul64 intrinsic", func(x, y uint64) (uint64, uint64) { return Mul64(x, y) }, a.x, a.y, a.hi, a.lo)
898 testMul("Mul64 intrinsic symmetric", func(x, y uint64) (uint64, uint64) { return Mul64(x, y) }, a.y, a.x, a.hi, a.lo)
899 testDiv("Div64 intrinsic", func(hi, lo, y uint64) (uint64, uint64) { return Div64(hi, lo, y) }, a.hi, a.lo+a.r, a.y, a.x, a.r)
900 testDiv("Div64 intrinsic symmetric", func(hi, lo, y uint64) (uint64, uint64) { return Div64(hi, lo, y) }, a.hi, a.lo+a.r, a.x, a.y, a.r)
905 divZeroError = "runtime error: integer divide by zero"
906 overflowError = "runtime error: integer overflow"
909 func TestDivPanicOverflow(t *testing.T) {
912 if err := recover(); err == nil {
913 t.Error("Div should have panicked when y<=hi")
914 } else if e, ok := err.(runtime.Error); !ok || e.Error() != overflowError {
915 t.Errorf("Div expected panic: %q, got: %q ", overflowError, e.Error())
919 t.Errorf("undefined q, r = %v, %v calculated when Div should have panicked", q, r)
922 func TestDiv32PanicOverflow(t *testing.T) {
925 if err := recover(); err == nil {
926 t.Error("Div32 should have panicked when y<=hi")
927 } else if e, ok := err.(runtime.Error); !ok || e.Error() != overflowError {
928 t.Errorf("Div32 expected panic: %q, got: %q ", overflowError, e.Error())
931 q, r := Div32(1, 0, 1)
932 t.Errorf("undefined q, r = %v, %v calculated when Div32 should have panicked", q, r)
935 func TestDiv64PanicOverflow(t *testing.T) {
938 if err := recover(); err == nil {
939 t.Error("Div64 should have panicked when y<=hi")
940 } else if e, ok := err.(runtime.Error); !ok || e.Error() != overflowError {
941 t.Errorf("Div64 expected panic: %q, got: %q ", overflowError, e.Error())
944 q, r := Div64(1, 0, 1)
945 t.Errorf("undefined q, r = %v, %v calculated when Div64 should have panicked", q, r)
948 func TestDivPanicZero(t *testing.T) {
951 if err := recover(); err == nil {
952 t.Error("Div should have panicked when y==0")
953 } else if e, ok := err.(runtime.Error); !ok || e.Error() != divZeroError {
954 t.Errorf("Div expected panic: %q, got: %q ", divZeroError, e.Error())
958 t.Errorf("undefined q, r = %v, %v calculated when Div should have panicked", q, r)
961 func TestDiv32PanicZero(t *testing.T) {
964 if err := recover(); err == nil {
965 t.Error("Div32 should have panicked when y==0")
966 } else if e, ok := err.(runtime.Error); !ok || e.Error() != divZeroError {
967 t.Errorf("Div32 expected panic: %q, got: %q ", divZeroError, e.Error())
970 q, r := Div32(1, 1, 0)
971 t.Errorf("undefined q, r = %v, %v calculated when Div32 should have panicked", q, r)
974 func TestDiv64PanicZero(t *testing.T) {
977 if err := recover(); err == nil {
978 t.Error("Div64 should have panicked when y==0")
979 } else if e, ok := err.(runtime.Error); !ok || e.Error() != divZeroError {
980 t.Errorf("Div64 expected panic: %q, got: %q ", divZeroError, e.Error())
983 q, r := Div64(1, 1, 0)
984 t.Errorf("undefined q, r = %v, %v calculated when Div64 should have panicked", q, r)
987 func BenchmarkAdd(b *testing.B) {
989 for i := 0; i < b.N; i++ {
990 z, c = Add(uint(Input), uint(i), c)
995 func BenchmarkAdd32(b *testing.B) {
997 for i := 0; i < b.N; i++ {
998 z, c = Add32(uint32(Input), uint32(i), c)
1003 func BenchmarkAdd64(b *testing.B) {
1005 for i := 0; i < b.N; i++ {
1006 z, c = Add64(uint64(Input), uint64(i), c)
1011 func BenchmarkAdd64multiple(b *testing.B) {
1012 var z0 = uint64(Input)
1013 var z1 = uint64(Input)
1014 var z2 = uint64(Input)
1015 var z3 = uint64(Input)
1016 for i := 0; i < b.N; i++ {
1018 z0, c = Add64(z0, uint64(i), c)
1019 z1, c = Add64(z1, uint64(i), c)
1020 z2, c = Add64(z2, uint64(i), c)
1021 z3, _ = Add64(z3, uint64(i), c)
1023 Output = int(z0 + z1 + z2 + z3)
1026 func BenchmarkSub(b *testing.B) {
1028 for i := 0; i < b.N; i++ {
1029 z, c = Sub(uint(Input), uint(i), c)
1034 func BenchmarkSub32(b *testing.B) {
1036 for i := 0; i < b.N; i++ {
1037 z, c = Sub32(uint32(Input), uint32(i), c)
1042 func BenchmarkSub64(b *testing.B) {
1044 for i := 0; i < b.N; i++ {
1045 z, c = Sub64(uint64(Input), uint64(i), c)
1050 func BenchmarkSub64multiple(b *testing.B) {
1051 var z0 = uint64(Input)
1052 var z1 = uint64(Input)
1053 var z2 = uint64(Input)
1054 var z3 = uint64(Input)
1055 for i := 0; i < b.N; i++ {
1057 z0, c = Sub64(z0, uint64(i), c)
1058 z1, c = Sub64(z1, uint64(i), c)
1059 z2, c = Sub64(z2, uint64(i), c)
1060 z3, _ = Sub64(z3, uint64(i), c)
1062 Output = int(z0 + z1 + z2 + z3)
1065 func BenchmarkMul(b *testing.B) {
1067 for i := 0; i < b.N; i++ {
1068 hi, lo = Mul(uint(Input), uint(i))
1070 Output = int(hi + lo)
1073 func BenchmarkMul32(b *testing.B) {
1075 for i := 0; i < b.N; i++ {
1076 hi, lo = Mul32(uint32(Input), uint32(i))
1078 Output = int(hi + lo)
1081 func BenchmarkMul64(b *testing.B) {
1083 for i := 0; i < b.N; i++ {
1084 hi, lo = Mul64(uint64(Input), uint64(i))
1086 Output = int(hi + lo)
1089 func BenchmarkDiv(b *testing.B) {
1091 for i := 0; i < b.N; i++ {
1092 q, r = Div(1, uint(i), uint(Input))
1097 func BenchmarkDiv32(b *testing.B) {
1099 for i := 0; i < b.N; i++ {
1100 q, r = Div32(1, uint32(i), uint32(Input))
1105 func BenchmarkDiv64(b *testing.B) {
1107 for i := 0; i < b.N; i++ {
1108 q, r = Div64(1, uint64(i), uint64(Input))
1113 // ----------------------------------------------------------------------------
1116 type entry = struct {
1120 // tab contains results for all uint8 values
1124 tab[0] = entry{8, 8, 0}
1125 for i := 1; i < len(tab); i++ {