Source file
src/runtime/gc_test.go
1
2
3
4
5 package runtime_test
6
7 import (
8 "fmt"
9 "internal/testenv"
10 "internal/weak"
11 "math/bits"
12 "math/rand"
13 "os"
14 "reflect"
15 "runtime"
16 "runtime/debug"
17 "slices"
18 "strings"
19 "sync"
20 "sync/atomic"
21 "testing"
22 "time"
23 "unsafe"
24 )
25
26 func TestGcSys(t *testing.T) {
27 t.Skip("skipping known-flaky test; golang.org/issue/37331")
28 if os.Getenv("GOGC") == "off" {
29 t.Skip("skipping test; GOGC=off in environment")
30 }
31 got := runTestProg(t, "testprog", "GCSys")
32 want := "OK\n"
33 if got != want {
34 t.Fatalf("expected %q, but got %q", want, got)
35 }
36 }
37
38 func TestGcDeepNesting(t *testing.T) {
39 type T [2][2][2][2][2][2][2][2][2][2]*int
40 a := new(T)
41
42
43
44 t.Logf("%p", a)
45
46 a[0][0][0][0][0][0][0][0][0][0] = new(int)
47 *a[0][0][0][0][0][0][0][0][0][0] = 13
48 runtime.GC()
49 if *a[0][0][0][0][0][0][0][0][0][0] != 13 {
50 t.Fail()
51 }
52 }
53
54 func TestGcMapIndirection(t *testing.T) {
55 defer debug.SetGCPercent(debug.SetGCPercent(1))
56 runtime.GC()
57 type T struct {
58 a [256]int
59 }
60 m := make(map[T]T)
61 for i := 0; i < 2000; i++ {
62 var a T
63 a.a[0] = i
64 m[a] = T{}
65 }
66 }
67
68 func TestGcArraySlice(t *testing.T) {
69 type X struct {
70 buf [1]byte
71 nextbuf []byte
72 next *X
73 }
74 var head *X
75 for i := 0; i < 10; i++ {
76 p := &X{}
77 p.buf[0] = 42
78 p.next = head
79 if head != nil {
80 p.nextbuf = head.buf[:]
81 }
82 head = p
83 runtime.GC()
84 }
85 for p := head; p != nil; p = p.next {
86 if p.buf[0] != 42 {
87 t.Fatal("corrupted heap")
88 }
89 }
90 }
91
92 func TestGcRescan(t *testing.T) {
93 type X struct {
94 c chan error
95 nextx *X
96 }
97 type Y struct {
98 X
99 nexty *Y
100 p *int
101 }
102 var head *Y
103 for i := 0; i < 10; i++ {
104 p := &Y{}
105 p.c = make(chan error)
106 if head != nil {
107 p.nextx = &head.X
108 }
109 p.nexty = head
110 p.p = new(int)
111 *p.p = 42
112 head = p
113 runtime.GC()
114 }
115 for p := head; p != nil; p = p.nexty {
116 if *p.p != 42 {
117 t.Fatal("corrupted heap")
118 }
119 }
120 }
121
122 func TestGcLastTime(t *testing.T) {
123 ms := new(runtime.MemStats)
124 t0 := time.Now().UnixNano()
125 runtime.GC()
126 t1 := time.Now().UnixNano()
127 runtime.ReadMemStats(ms)
128 last := int64(ms.LastGC)
129 if t0 > last || last > t1 {
130 t.Fatalf("bad last GC time: got %v, want [%v, %v]", last, t0, t1)
131 }
132 pause := ms.PauseNs[(ms.NumGC+255)%256]
133
134
135 if pause == 0 {
136 t.Logf("last GC pause was 0")
137 } else if pause > 10e9 {
138 t.Logf("bad last GC pause: got %v, want [0, 10e9]", pause)
139 }
140 }
141
142 var hugeSink any
143
144 func TestHugeGCInfo(t *testing.T) {
145
146
147 if hugeSink != nil {
148
149 const n = (400 << 20) + (unsafe.Sizeof(uintptr(0))-4)<<40
150 hugeSink = new([n]*byte)
151 hugeSink = new([n]uintptr)
152 hugeSink = new(struct {
153 x float64
154 y [n]*byte
155 z []string
156 })
157 hugeSink = new(struct {
158 x float64
159 y [n]uintptr
160 z []string
161 })
162 }
163 }
164
165 func TestPeriodicGC(t *testing.T) {
166 if runtime.GOARCH == "wasm" {
167 t.Skip("no sysmon on wasm yet")
168 }
169
170
171 runtime.GC()
172
173 var ms1, ms2 runtime.MemStats
174 runtime.ReadMemStats(&ms1)
175
176
177 orig := *runtime.ForceGCPeriod
178 *runtime.ForceGCPeriod = 0
179
180
181
182
183
184 var numGCs uint32
185 const want = 2
186 for i := 0; i < 200 && numGCs < want; i++ {
187 time.Sleep(5 * time.Millisecond)
188
189
190 runtime.ReadMemStats(&ms2)
191 numGCs = ms2.NumGC - ms1.NumGC
192 }
193 *runtime.ForceGCPeriod = orig
194
195 if numGCs < want {
196 t.Fatalf("no periodic GC: got %v GCs, want >= 2", numGCs)
197 }
198 }
199
200 func TestGcZombieReporting(t *testing.T) {
201
202
203
204
205 got := runTestProg(t, "testprog", "GCZombie", "GODEBUG=invalidptr=0")
206 want := "found pointer to free object"
207 if !strings.Contains(got, want) {
208 t.Fatalf("expected %q in output, but got %q", want, got)
209 }
210 }
211
212 func TestGCTestMoveStackOnNextCall(t *testing.T) {
213 t.Parallel()
214 var onStack int
215
216
217
218 for retry := 0; retry < 5; retry++ {
219 runtime.GCTestMoveStackOnNextCall()
220 if moveStackCheck(t, &onStack, uintptr(unsafe.Pointer(&onStack))) {
221
222 return
223 }
224 }
225 t.Fatal("stack did not move")
226 }
227
228
229
230
231
232 func moveStackCheck(t *testing.T, new *int, old uintptr) bool {
233
234
235
236
237
238 new2 := uintptr(unsafe.Pointer(new))
239
240 t.Logf("old stack pointer %x, new stack pointer %x", old, new2)
241 if new2 == old {
242
243 if cls := runtime.GCTestPointerClass(unsafe.Pointer(new)); cls != "stack" {
244 t.Fatalf("test bug: new (%#x) should be a stack pointer, not %s", new2, cls)
245 }
246
247 return false
248 }
249 return true
250 }
251
252 func TestGCTestMoveStackRepeatedly(t *testing.T) {
253
254
255 for i := 0; i < 100; i++ {
256 runtime.GCTestMoveStackOnNextCall()
257 moveStack1(false)
258 }
259 }
260
261
262 func moveStack1(x bool) {
263
264 if x {
265 println("x")
266 }
267 }
268
269 func TestGCTestIsReachable(t *testing.T) {
270 var all, half []unsafe.Pointer
271 var want uint64
272 for i := 0; i < 16; i++ {
273
274
275 p := unsafe.Pointer(new(*int))
276 all = append(all, p)
277 if i%2 == 0 {
278 half = append(half, p)
279 want |= 1 << i
280 }
281 }
282
283 got := runtime.GCTestIsReachable(all...)
284 if got&want != want {
285
286
287 t.Fatalf("live object not in reachable set; want %b, got %b", want, got)
288 }
289 if bits.OnesCount64(got&^want) > 1 {
290
291
292
293
294 t.Fatalf("dead object in reachable set; want %b, got %b", want, got)
295 }
296 runtime.KeepAlive(half)
297 }
298
299 var pointerClassBSS *int
300 var pointerClassData = 42
301
302 func TestGCTestPointerClass(t *testing.T) {
303 t.Parallel()
304 check := func(p unsafe.Pointer, want string) {
305 t.Helper()
306 got := runtime.GCTestPointerClass(p)
307 if got != want {
308
309
310 t.Errorf("for %#x, want class %s, got %s", uintptr(p), want, got)
311 }
312 }
313 var onStack int
314 var notOnStack int
315 check(unsafe.Pointer(&onStack), "stack")
316 check(unsafe.Pointer(runtime.Escape(¬OnStack)), "heap")
317 check(unsafe.Pointer(&pointerClassBSS), "bss")
318 check(unsafe.Pointer(&pointerClassData), "data")
319 check(nil, "other")
320 }
321
322 func BenchmarkAllocation(b *testing.B) {
323 type T struct {
324 x, y *byte
325 }
326 ngo := runtime.GOMAXPROCS(0)
327 work := make(chan bool, b.N+ngo)
328 result := make(chan *T)
329 for i := 0; i < b.N; i++ {
330 work <- true
331 }
332 for i := 0; i < ngo; i++ {
333 work <- false
334 }
335 for i := 0; i < ngo; i++ {
336 go func() {
337 var x *T
338 for <-work {
339 for i := 0; i < 1000; i++ {
340 x = &T{}
341 }
342 }
343 result <- x
344 }()
345 }
346 for i := 0; i < ngo; i++ {
347 <-result
348 }
349 }
350
351 func TestPrintGC(t *testing.T) {
352 if testing.Short() {
353 t.Skip("Skipping in short mode")
354 }
355 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
356 done := make(chan bool)
357 go func() {
358 for {
359 select {
360 case <-done:
361 return
362 default:
363 runtime.GC()
364 }
365 }
366 }()
367 for i := 0; i < 1e4; i++ {
368 func() {
369 defer print("")
370 }()
371 }
372 close(done)
373 }
374
375 func testTypeSwitch(x any) error {
376 switch y := x.(type) {
377 case nil:
378
379 case error:
380 return y
381 }
382 return nil
383 }
384
385 func testAssert(x any) error {
386 if y, ok := x.(error); ok {
387 return y
388 }
389 return nil
390 }
391
392 func testAssertVar(x any) error {
393 var y, ok = x.(error)
394 if ok {
395 return y
396 }
397 return nil
398 }
399
400 var a bool
401
402
403 func testIfaceEqual(x any) {
404 if x == "abc" {
405 a = true
406 }
407 }
408
409 func TestPageAccounting(t *testing.T) {
410
411
412
413 const blockSize = 64 << 10
414 blocks := make([]*[blockSize]byte, (64<<20)/blockSize)
415 for i := range blocks {
416 blocks[i] = new([blockSize]byte)
417 }
418
419
420 pagesInUse, counted := runtime.CountPagesInUse()
421 if pagesInUse != counted {
422 t.Fatalf("mheap_.pagesInUse is %d, but direct count is %d", pagesInUse, counted)
423 }
424 }
425
426 func init() {
427
428 *runtime.DoubleCheckReadMemStats = true
429 }
430
431 func TestReadMemStats(t *testing.T) {
432 base, slow := runtime.ReadMemStatsSlow()
433 if base != slow {
434 logDiff(t, "MemStats", reflect.ValueOf(base), reflect.ValueOf(slow))
435 t.Fatal("memstats mismatch")
436 }
437 }
438
439 func logDiff(t *testing.T, prefix string, got, want reflect.Value) {
440 typ := got.Type()
441 switch typ.Kind() {
442 case reflect.Array, reflect.Slice:
443 if got.Len() != want.Len() {
444 t.Logf("len(%s): got %v, want %v", prefix, got, want)
445 return
446 }
447 for i := 0; i < got.Len(); i++ {
448 logDiff(t, fmt.Sprintf("%s[%d]", prefix, i), got.Index(i), want.Index(i))
449 }
450 case reflect.Struct:
451 for i := 0; i < typ.NumField(); i++ {
452 gf, wf := got.Field(i), want.Field(i)
453 logDiff(t, prefix+"."+typ.Field(i).Name, gf, wf)
454 }
455 case reflect.Map:
456 t.Fatal("not implemented: logDiff for map")
457 default:
458 if got.Interface() != want.Interface() {
459 t.Logf("%s: got %v, want %v", prefix, got, want)
460 }
461 }
462 }
463
464 func BenchmarkReadMemStats(b *testing.B) {
465 var ms runtime.MemStats
466 const heapSize = 100 << 20
467 x := make([]*[1024]byte, heapSize/1024)
468 for i := range x {
469 x[i] = new([1024]byte)
470 }
471
472 b.ResetTimer()
473 for i := 0; i < b.N; i++ {
474 runtime.ReadMemStats(&ms)
475 }
476
477 runtime.KeepAlive(x)
478 }
479
480 func applyGCLoad(b *testing.B) func() {
481
482
483
484
485 maxProcs := runtime.GOMAXPROCS(-1)
486 if maxProcs == 1 {
487 b.Skip("This benchmark can only be run with GOMAXPROCS > 1")
488 }
489
490
491 type node struct {
492 children [16]*node
493 }
494 var buildTree func(depth int) *node
495 buildTree = func(depth int) *node {
496 tree := new(node)
497 if depth != 0 {
498 for i := range tree.children {
499 tree.children[i] = buildTree(depth - 1)
500 }
501 }
502 return tree
503 }
504
505
506 done := make(chan struct{})
507 var wg sync.WaitGroup
508 for i := 0; i < maxProcs-1; i++ {
509 wg.Add(1)
510 go func() {
511 defer wg.Done()
512 var hold *node
513 loop:
514 for {
515 hold = buildTree(5)
516 select {
517 case <-done:
518 break loop
519 default:
520 }
521 }
522 runtime.KeepAlive(hold)
523 }()
524 }
525 return func() {
526 close(done)
527 wg.Wait()
528 }
529 }
530
531 func BenchmarkReadMemStatsLatency(b *testing.B) {
532 stop := applyGCLoad(b)
533
534
535 latencies := make([]time.Duration, 0, 1024)
536
537
538
539 b.ResetTimer()
540 var ms runtime.MemStats
541 for i := 0; i < b.N; i++ {
542
543
544 time.Sleep(100 * time.Millisecond)
545 start := time.Now()
546 runtime.ReadMemStats(&ms)
547 latencies = append(latencies, time.Since(start))
548 }
549
550
551
552 b.StopTimer()
553 stop()
554
555
556
557
558 b.ReportMetric(0, "ns/op")
559 b.ReportMetric(0, "B/op")
560 b.ReportMetric(0, "allocs/op")
561
562
563 slices.Sort(latencies)
564 b.ReportMetric(float64(latencies[len(latencies)*50/100]), "p50-ns")
565 b.ReportMetric(float64(latencies[len(latencies)*90/100]), "p90-ns")
566 b.ReportMetric(float64(latencies[len(latencies)*99/100]), "p99-ns")
567 }
568
569 func TestUserForcedGC(t *testing.T) {
570
571 defer debug.SetGCPercent(debug.SetGCPercent(-1))
572
573 var ms1, ms2 runtime.MemStats
574 runtime.ReadMemStats(&ms1)
575 runtime.GC()
576 runtime.ReadMemStats(&ms2)
577 if ms1.NumGC == ms2.NumGC {
578 t.Fatalf("runtime.GC() did not trigger GC")
579 }
580 if ms1.NumForcedGC == ms2.NumForcedGC {
581 t.Fatalf("runtime.GC() was not accounted in NumForcedGC")
582 }
583 }
584
585 func writeBarrierBenchmark(b *testing.B, f func()) {
586 runtime.GC()
587 var ms runtime.MemStats
588 runtime.ReadMemStats(&ms)
589
590
591
592
593 var stop uint32
594 done := make(chan bool)
595 go func() {
596 for atomic.LoadUint32(&stop) == 0 {
597 runtime.GC()
598 }
599 close(done)
600 }()
601 defer func() {
602 atomic.StoreUint32(&stop, 1)
603 <-done
604 }()
605
606 b.ResetTimer()
607 f()
608 b.StopTimer()
609 }
610
611 func BenchmarkWriteBarrier(b *testing.B) {
612 if runtime.GOMAXPROCS(-1) < 2 {
613
614 b.Skip("need GOMAXPROCS >= 2")
615 }
616
617
618
619 type node struct {
620 l, r *node
621 }
622 var wbRoots []*node
623 var mkTree func(level int) *node
624 mkTree = func(level int) *node {
625 if level == 0 {
626 return nil
627 }
628 n := &node{mkTree(level - 1), mkTree(level - 1)}
629 if level == 10 {
630
631
632
633 wbRoots = append(wbRoots, n)
634 }
635 return n
636 }
637 const depth = 22
638 root := mkTree(22)
639
640 writeBarrierBenchmark(b, func() {
641 var stack [depth]*node
642 tos := -1
643
644
645 for i := 0; i < b.N; i += 2 {
646 if tos == -1 {
647 stack[0] = root
648 tos = 0
649 }
650
651
652 n := stack[tos]
653 if n.l == nil {
654 tos--
655 } else {
656 n.l, n.r = n.r, n.l
657 stack[tos] = n.l
658 stack[tos+1] = n.r
659 tos++
660 }
661
662 if i%(1<<12) == 0 {
663
664 runtime.Gosched()
665 }
666 }
667 })
668
669 runtime.KeepAlive(wbRoots)
670 }
671
672 func BenchmarkBulkWriteBarrier(b *testing.B) {
673 if runtime.GOMAXPROCS(-1) < 2 {
674
675 b.Skip("need GOMAXPROCS >= 2")
676 }
677
678
679 const heapSize = 64 << 20
680 type obj [16]*byte
681 ptrs := make([]*obj, heapSize/unsafe.Sizeof(obj{}))
682 for i := range ptrs {
683 ptrs[i] = new(obj)
684 }
685
686 writeBarrierBenchmark(b, func() {
687 const blockSize = 1024
688 var pos int
689 for i := 0; i < b.N; i += blockSize {
690
691 block := ptrs[pos : pos+blockSize]
692 first := block[0]
693 copy(block, block[1:])
694 block[blockSize-1] = first
695
696 pos += blockSize
697 if pos+blockSize > len(ptrs) {
698 pos = 0
699 }
700
701 runtime.Gosched()
702 }
703 })
704
705 runtime.KeepAlive(ptrs)
706 }
707
708 func BenchmarkScanStackNoLocals(b *testing.B) {
709 var ready sync.WaitGroup
710 teardown := make(chan bool)
711 for j := 0; j < 10; j++ {
712 ready.Add(1)
713 go func() {
714 x := 100000
715 countpwg(&x, &ready, teardown)
716 }()
717 }
718 ready.Wait()
719 b.ResetTimer()
720 for i := 0; i < b.N; i++ {
721 b.StartTimer()
722 runtime.GC()
723 runtime.GC()
724 b.StopTimer()
725 }
726 close(teardown)
727 }
728
729 func BenchmarkMSpanCountAlloc(b *testing.B) {
730
731 s := runtime.AllocMSpan()
732 defer runtime.FreeMSpan(s)
733
734
735
736
737 for _, n := range []int{8, 16, 32, 64, 128} {
738 b.Run(fmt.Sprintf("bits=%d", n*8), func(b *testing.B) {
739
740 bits := make([]byte, n)
741 rand.Read(bits)
742
743 b.ResetTimer()
744 for i := 0; i < b.N; i++ {
745 runtime.MSpanCountAlloc(s, bits)
746 }
747 })
748 }
749 }
750
751 func countpwg(n *int, ready *sync.WaitGroup, teardown chan bool) {
752 if *n == 0 {
753 ready.Done()
754 <-teardown
755 return
756 }
757 *n--
758 countpwg(n, ready, teardown)
759 }
760
761 func TestMemoryLimit(t *testing.T) {
762 if testing.Short() {
763 t.Skip("stress test that takes time to run")
764 }
765 if runtime.NumCPU() < 4 {
766 t.Skip("want at least 4 CPUs for this test")
767 }
768 got := runTestProg(t, "testprog", "GCMemoryLimit")
769 want := "OK\n"
770 if got != want {
771 t.Fatalf("expected %q, but got %q", want, got)
772 }
773 }
774
775 func TestMemoryLimitNoGCPercent(t *testing.T) {
776 if testing.Short() {
777 t.Skip("stress test that takes time to run")
778 }
779 if runtime.NumCPU() < 4 {
780 t.Skip("want at least 4 CPUs for this test")
781 }
782 got := runTestProg(t, "testprog", "GCMemoryLimitNoGCPercent")
783 want := "OK\n"
784 if got != want {
785 t.Fatalf("expected %q, but got %q", want, got)
786 }
787 }
788
789 func TestMyGenericFunc(t *testing.T) {
790 runtime.MyGenericFunc[int]()
791 }
792
793 func TestWeakToStrongMarkTermination(t *testing.T) {
794 testenv.MustHaveParallelism(t)
795
796 type T struct {
797 a *int
798 b int
799 }
800 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
801 defer debug.SetGCPercent(debug.SetGCPercent(-1))
802 w := make([]weak.Pointer[T], 2048)
803
804
805 runtime.GC()
806
807
808 for i := range w {
809 x := new(T)
810 x.a = new(int)
811 w[i] = weak.Make(x)
812 }
813
814
815 runtime.GCMarkDoneResetRestartFlag()
816
817
818 runtime.SetSpinInGCMarkDone(true)
819
820
821
822
823
824 done := make(chan struct{})
825 go func() {
826 runtime.GC()
827 done <- struct{}{}
828 }()
829 go func() {
830 time.Sleep(100 * time.Millisecond)
831
832
833 runtime.SetSpinInGCMarkDone(false)
834 }()
835 time.Sleep(10 * time.Millisecond)
836
837
838 var wg sync.WaitGroup
839 for _, wp := range w {
840 wg.Add(1)
841 go func() {
842 defer wg.Done()
843 wp.Strong()
844 }()
845 }
846
847
848 <-done
849
850
851 wg.Wait()
852
853
854
855
856
857
858
859
860
861
862
863 if runtime.GCMarkDoneRestarted() {
864 t.Errorf("gcMarkDone restarted")
865 }
866 }
867
View as plain text