Source file
src/runtime/proc.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/cpu"
10 "internal/goarch"
11 "internal/goos"
12 "internal/runtime/atomic"
13 "internal/runtime/exithook"
14 "internal/stringslite"
15 "runtime/internal/sys"
16 "unsafe"
17 )
18
19
20 var modinfo string
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116 var (
117 m0 m
118 g0 g
119 mcache0 *mcache
120 raceprocctx0 uintptr
121 raceFiniLock mutex
122 )
123
124
125
126 var runtime_inittasks []*initTask
127
128
129
130
131
132 var main_init_done chan bool
133
134
135 func main_main()
136
137
138 var mainStarted bool
139
140
141 var runtimeInitTime int64
142
143
144 var initSigmask sigset
145
146
147 func main() {
148 mp := getg().m
149
150
151
152 mp.g0.racectx = 0
153
154
155
156
157 if goarch.PtrSize == 8 {
158 maxstacksize = 1000000000
159 } else {
160 maxstacksize = 250000000
161 }
162
163
164
165
166 maxstackceiling = 2 * maxstacksize
167
168
169 mainStarted = true
170
171 if haveSysmon {
172 systemstack(func() {
173 newm(sysmon, nil, -1)
174 })
175 }
176
177
178
179
180
181
182
183 lockOSThread()
184
185 if mp != &m0 {
186 throw("runtime.main not on m0")
187 }
188
189
190
191 runtimeInitTime = nanotime()
192 if runtimeInitTime == 0 {
193 throw("nanotime returning zero")
194 }
195
196 if debug.inittrace != 0 {
197 inittrace.id = getg().goid
198 inittrace.active = true
199 }
200
201 doInit(runtime_inittasks)
202
203
204 needUnlock := true
205 defer func() {
206 if needUnlock {
207 unlockOSThread()
208 }
209 }()
210
211 gcenable()
212
213 main_init_done = make(chan bool)
214 if iscgo {
215 if _cgo_pthread_key_created == nil {
216 throw("_cgo_pthread_key_created missing")
217 }
218
219 if _cgo_thread_start == nil {
220 throw("_cgo_thread_start missing")
221 }
222 if GOOS != "windows" {
223 if _cgo_setenv == nil {
224 throw("_cgo_setenv missing")
225 }
226 if _cgo_unsetenv == nil {
227 throw("_cgo_unsetenv missing")
228 }
229 }
230 if _cgo_notify_runtime_init_done == nil {
231 throw("_cgo_notify_runtime_init_done missing")
232 }
233
234
235 if set_crosscall2 == nil {
236 throw("set_crosscall2 missing")
237 }
238 set_crosscall2()
239
240
241
242 startTemplateThread()
243 cgocall(_cgo_notify_runtime_init_done, nil)
244 }
245
246
247
248
249
250
251
252
253 for m := &firstmoduledata; m != nil; m = m.next {
254 doInit(m.inittasks)
255 }
256
257
258
259 inittrace.active = false
260
261 close(main_init_done)
262
263 needUnlock = false
264 unlockOSThread()
265
266 if isarchive || islibrary {
267
268
269 return
270 }
271 fn := main_main
272 fn()
273 if raceenabled {
274 runExitHooks(0)
275 racefini()
276 }
277
278
279
280
281
282 if runningPanicDefers.Load() != 0 {
283
284 for c := 0; c < 1000; c++ {
285 if runningPanicDefers.Load() == 0 {
286 break
287 }
288 Gosched()
289 }
290 }
291 if panicking.Load() != 0 {
292 gopark(nil, nil, waitReasonPanicWait, traceBlockForever, 1)
293 }
294 runExitHooks(0)
295
296 exit(0)
297 for {
298 var x *int32
299 *x = 0
300 }
301 }
302
303
304
305
306 func os_beforeExit(exitCode int) {
307 runExitHooks(exitCode)
308 if exitCode == 0 && raceenabled {
309 racefini()
310 }
311 }
312
313 func init() {
314 exithook.Gosched = Gosched
315 exithook.Goid = func() uint64 { return getg().goid }
316 exithook.Throw = throw
317 }
318
319 func runExitHooks(code int) {
320 exithook.Run(code)
321 }
322
323
324 func init() {
325 go forcegchelper()
326 }
327
328 func forcegchelper() {
329 forcegc.g = getg()
330 lockInit(&forcegc.lock, lockRankForcegc)
331 for {
332 lock(&forcegc.lock)
333 if forcegc.idle.Load() {
334 throw("forcegc: phase error")
335 }
336 forcegc.idle.Store(true)
337 goparkunlock(&forcegc.lock, waitReasonForceGCIdle, traceBlockSystemGoroutine, 1)
338
339 if debug.gctrace > 0 {
340 println("GC forced")
341 }
342
343 gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()})
344 }
345 }
346
347
348
349
350
351 func Gosched() {
352 checkTimeouts()
353 mcall(gosched_m)
354 }
355
356
357
358
359
360 func goschedguarded() {
361 mcall(goschedguarded_m)
362 }
363
364
365
366
367
368
369 func goschedIfBusy() {
370 gp := getg()
371
372
373 if !gp.preempt && sched.npidle.Load() > 0 {
374 return
375 }
376 mcall(gosched_m)
377 }
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceReason traceBlockReason, traceskip int) {
408 if reason != waitReasonSleep {
409 checkTimeouts()
410 }
411 mp := acquirem()
412 gp := mp.curg
413 status := readgstatus(gp)
414 if status != _Grunning && status != _Gscanrunning {
415 throw("gopark: bad g status")
416 }
417 mp.waitlock = lock
418 mp.waitunlockf = unlockf
419 gp.waitreason = reason
420 mp.waitTraceBlockReason = traceReason
421 mp.waitTraceSkip = traceskip
422 releasem(mp)
423
424 mcall(park_m)
425 }
426
427
428
429 func goparkunlock(lock *mutex, reason waitReason, traceReason traceBlockReason, traceskip int) {
430 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceReason, traceskip)
431 }
432
433
434
435
436
437
438
439
440
441
442
443 func goready(gp *g, traceskip int) {
444 systemstack(func() {
445 ready(gp, traceskip, true)
446 })
447 }
448
449
450 func acquireSudog() *sudog {
451
452
453
454
455
456
457
458
459 mp := acquirem()
460 pp := mp.p.ptr()
461 if len(pp.sudogcache) == 0 {
462 lock(&sched.sudoglock)
463
464 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
465 s := sched.sudogcache
466 sched.sudogcache = s.next
467 s.next = nil
468 pp.sudogcache = append(pp.sudogcache, s)
469 }
470 unlock(&sched.sudoglock)
471
472 if len(pp.sudogcache) == 0 {
473 pp.sudogcache = append(pp.sudogcache, new(sudog))
474 }
475 }
476 n := len(pp.sudogcache)
477 s := pp.sudogcache[n-1]
478 pp.sudogcache[n-1] = nil
479 pp.sudogcache = pp.sudogcache[:n-1]
480 if s.elem != nil {
481 throw("acquireSudog: found s.elem != nil in cache")
482 }
483 releasem(mp)
484 return s
485 }
486
487
488 func releaseSudog(s *sudog) {
489 if s.elem != nil {
490 throw("runtime: sudog with non-nil elem")
491 }
492 if s.isSelect {
493 throw("runtime: sudog with non-false isSelect")
494 }
495 if s.next != nil {
496 throw("runtime: sudog with non-nil next")
497 }
498 if s.prev != nil {
499 throw("runtime: sudog with non-nil prev")
500 }
501 if s.waitlink != nil {
502 throw("runtime: sudog with non-nil waitlink")
503 }
504 if s.c != nil {
505 throw("runtime: sudog with non-nil c")
506 }
507 gp := getg()
508 if gp.param != nil {
509 throw("runtime: releaseSudog with non-nil gp.param")
510 }
511 mp := acquirem()
512 pp := mp.p.ptr()
513 if len(pp.sudogcache) == cap(pp.sudogcache) {
514
515 var first, last *sudog
516 for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
517 n := len(pp.sudogcache)
518 p := pp.sudogcache[n-1]
519 pp.sudogcache[n-1] = nil
520 pp.sudogcache = pp.sudogcache[:n-1]
521 if first == nil {
522 first = p
523 } else {
524 last.next = p
525 }
526 last = p
527 }
528 lock(&sched.sudoglock)
529 last.next = sched.sudogcache
530 sched.sudogcache = first
531 unlock(&sched.sudoglock)
532 }
533 pp.sudogcache = append(pp.sudogcache, s)
534 releasem(mp)
535 }
536
537
538 func badmcall(fn func(*g)) {
539 throw("runtime: mcall called on m->g0 stack")
540 }
541
542 func badmcall2(fn func(*g)) {
543 throw("runtime: mcall function returned")
544 }
545
546 func badreflectcall() {
547 panic(plainError("arg size to reflect.call more than 1GB"))
548 }
549
550
551
552 func badmorestackg0() {
553 if !crashStackImplemented {
554 writeErrStr("fatal: morestack on g0\n")
555 return
556 }
557
558 g := getg()
559 switchToCrashStack(func() {
560 print("runtime: morestack on g0, stack [", hex(g.stack.lo), " ", hex(g.stack.hi), "], sp=", hex(g.sched.sp), ", called from\n")
561 g.m.traceback = 2
562 traceback1(g.sched.pc, g.sched.sp, g.sched.lr, g, 0)
563 print("\n")
564
565 throw("morestack on g0")
566 })
567 }
568
569
570
571 func badmorestackgsignal() {
572 writeErrStr("fatal: morestack on gsignal\n")
573 }
574
575
576 func badctxt() {
577 throw("ctxt != 0")
578 }
579
580
581
582 var gcrash g
583
584 var crashingG atomic.Pointer[g]
585
586
587
588
589
590
591
592
593
594 func switchToCrashStack(fn func()) {
595 me := getg()
596 if crashingG.CompareAndSwapNoWB(nil, me) {
597 switchToCrashStack0(fn)
598 abort()
599 }
600 if crashingG.Load() == me {
601
602 writeErrStr("fatal: recursive switchToCrashStack\n")
603 abort()
604 }
605
606 usleep_no_g(100)
607 writeErrStr("fatal: concurrent switchToCrashStack\n")
608 abort()
609 }
610
611
612
613
614 const crashStackImplemented = GOOS != "windows"
615
616
617 func switchToCrashStack0(fn func())
618
619 func lockedOSThread() bool {
620 gp := getg()
621 return gp.lockedm != 0 && gp.m.lockedg != 0
622 }
623
624 var (
625
626
627
628
629
630
631 allglock mutex
632 allgs []*g
633
634
635
636
637
638
639
640
641
642
643
644
645
646 allglen uintptr
647 allgptr **g
648 )
649
650 func allgadd(gp *g) {
651 if readgstatus(gp) == _Gidle {
652 throw("allgadd: bad status Gidle")
653 }
654
655 lock(&allglock)
656 allgs = append(allgs, gp)
657 if &allgs[0] != allgptr {
658 atomicstorep(unsafe.Pointer(&allgptr), unsafe.Pointer(&allgs[0]))
659 }
660 atomic.Storeuintptr(&allglen, uintptr(len(allgs)))
661 unlock(&allglock)
662 }
663
664
665
666
667 func allGsSnapshot() []*g {
668 assertWorldStoppedOrLockHeld(&allglock)
669
670
671
672
673
674
675 return allgs[:len(allgs):len(allgs)]
676 }
677
678
679 func atomicAllG() (**g, uintptr) {
680 length := atomic.Loaduintptr(&allglen)
681 ptr := (**g)(atomic.Loadp(unsafe.Pointer(&allgptr)))
682 return ptr, length
683 }
684
685
686 func atomicAllGIndex(ptr **g, i uintptr) *g {
687 return *(**g)(add(unsafe.Pointer(ptr), i*goarch.PtrSize))
688 }
689
690
691
692
693 func forEachG(fn func(gp *g)) {
694 lock(&allglock)
695 for _, gp := range allgs {
696 fn(gp)
697 }
698 unlock(&allglock)
699 }
700
701
702
703
704
705 func forEachGRace(fn func(gp *g)) {
706 ptr, length := atomicAllG()
707 for i := uintptr(0); i < length; i++ {
708 gp := atomicAllGIndex(ptr, i)
709 fn(gp)
710 }
711 return
712 }
713
714 const (
715
716
717 _GoidCacheBatch = 16
718 )
719
720
721
722 func cpuinit(env string) {
723 switch GOOS {
724 case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
725 cpu.DebugOptions = true
726 }
727 cpu.Initialize(env)
728
729
730
731 switch GOARCH {
732 case "386", "amd64":
733 x86HasPOPCNT = cpu.X86.HasPOPCNT
734 x86HasSSE41 = cpu.X86.HasSSE41
735 x86HasFMA = cpu.X86.HasFMA
736
737 case "arm":
738 armHasVFPv4 = cpu.ARM.HasVFPv4
739
740 case "arm64":
741 arm64HasATOMICS = cpu.ARM64.HasATOMICS
742 }
743 }
744
745
746
747
748 func getGodebugEarly() string {
749 const prefix = "GODEBUG="
750 var env string
751 switch GOOS {
752 case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
753
754
755
756 n := int32(0)
757 for argv_index(argv, argc+1+n) != nil {
758 n++
759 }
760
761 for i := int32(0); i < n; i++ {
762 p := argv_index(argv, argc+1+i)
763 s := unsafe.String(p, findnull(p))
764
765 if stringslite.HasPrefix(s, prefix) {
766 env = gostring(p)[len(prefix):]
767 break
768 }
769 }
770 }
771 return env
772 }
773
774
775
776
777
778
779
780
781
782 func schedinit() {
783 lockInit(&sched.lock, lockRankSched)
784 lockInit(&sched.sysmonlock, lockRankSysmon)
785 lockInit(&sched.deferlock, lockRankDefer)
786 lockInit(&sched.sudoglock, lockRankSudog)
787 lockInit(&deadlock, lockRankDeadlock)
788 lockInit(&paniclk, lockRankPanic)
789 lockInit(&allglock, lockRankAllg)
790 lockInit(&allpLock, lockRankAllp)
791 lockInit(&reflectOffs.lock, lockRankReflectOffs)
792 lockInit(&finlock, lockRankFin)
793 lockInit(&cpuprof.lock, lockRankCpuprof)
794 allocmLock.init(lockRankAllocmR, lockRankAllocmRInternal, lockRankAllocmW)
795 execLock.init(lockRankExecR, lockRankExecRInternal, lockRankExecW)
796 traceLockInit()
797
798
799
800 lockInit(&memstats.heapStats.noPLock, lockRankLeafRank)
801
802
803
804 gp := getg()
805 if raceenabled {
806 gp.racectx, raceprocctx0 = raceinit()
807 }
808
809 sched.maxmcount = 10000
810 crashFD.Store(^uintptr(0))
811
812
813 worldStopped()
814
815 ticks.init()
816 moduledataverify()
817 stackinit()
818 mallocinit()
819 godebug := getGodebugEarly()
820 cpuinit(godebug)
821 randinit()
822 alginit()
823 mcommoninit(gp.m, -1)
824 modulesinit()
825 typelinksinit()
826 itabsinit()
827 stkobjinit()
828
829 sigsave(&gp.m.sigmask)
830 initSigmask = gp.m.sigmask
831
832 goargs()
833 goenvs()
834 secure()
835 checkfds()
836 parsedebugvars()
837 gcinit()
838
839
840
841 gcrash.stack = stackalloc(16384)
842 gcrash.stackguard0 = gcrash.stack.lo + 1000
843 gcrash.stackguard1 = gcrash.stack.lo + 1000
844
845
846
847
848
849 if disableMemoryProfiling {
850 MemProfileRate = 0
851 }
852
853
854 mProfStackInit(gp.m)
855
856 lock(&sched.lock)
857 sched.lastpoll.Store(nanotime())
858 procs := ncpu
859 if n, ok := atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 {
860 procs = n
861 }
862 if procresize(procs) != nil {
863 throw("unknown runnable goroutine during bootstrap")
864 }
865 unlock(&sched.lock)
866
867
868 worldStarted()
869
870 if buildVersion == "" {
871
872
873 buildVersion = "unknown"
874 }
875 if len(modinfo) == 1 {
876
877
878 modinfo = ""
879 }
880 }
881
882 func dumpgstatus(gp *g) {
883 thisg := getg()
884 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
885 print("runtime: getg: g=", thisg, ", goid=", thisg.goid, ", g->atomicstatus=", readgstatus(thisg), "\n")
886 }
887
888
889 func checkmcount() {
890 assertLockHeld(&sched.lock)
891
892
893
894
895
896
897
898
899
900 count := mcount() - int32(extraMInUse.Load()) - int32(extraMLength.Load())
901 if count > sched.maxmcount {
902 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
903 throw("thread exhaustion")
904 }
905 }
906
907
908
909
910
911 func mReserveID() int64 {
912 assertLockHeld(&sched.lock)
913
914 if sched.mnext+1 < sched.mnext {
915 throw("runtime: thread ID overflow")
916 }
917 id := sched.mnext
918 sched.mnext++
919 checkmcount()
920 return id
921 }
922
923
924 func mcommoninit(mp *m, id int64) {
925 gp := getg()
926
927
928 if gp != gp.m.g0 {
929 callers(1, mp.createstack[:])
930 }
931
932 lock(&sched.lock)
933
934 if id >= 0 {
935 mp.id = id
936 } else {
937 mp.id = mReserveID()
938 }
939
940 mrandinit(mp)
941
942 mpreinit(mp)
943 if mp.gsignal != nil {
944 mp.gsignal.stackguard1 = mp.gsignal.stack.lo + stackGuard
945 }
946
947
948
949 mp.alllink = allm
950
951
952
953 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
954 unlock(&sched.lock)
955
956
957 if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" {
958 mp.cgoCallers = new(cgoCallers)
959 }
960 mProfStackInit(mp)
961 }
962
963
964
965
966
967 func mProfStackInit(mp *m) {
968 if debug.profstackdepth == 0 {
969
970
971 return
972 }
973 mp.profStack = makeProfStackFP()
974 mp.mLockProfile.stack = makeProfStackFP()
975 }
976
977
978
979
980 func makeProfStackFP() []uintptr {
981
982
983
984
985
986
987 return make([]uintptr, 1+maxSkip+debug.profstackdepth)
988 }
989
990
991
992 func makeProfStack() []uintptr { return make([]uintptr, debug.profstackdepth) }
993
994
995 func pprof_makeProfStack() []uintptr { return makeProfStack() }
996
997 func (mp *m) becomeSpinning() {
998 mp.spinning = true
999 sched.nmspinning.Add(1)
1000 sched.needspinning.Store(0)
1001 }
1002
1003 func (mp *m) hasCgoOnStack() bool {
1004 return mp.ncgo > 0 || mp.isextra
1005 }
1006
1007 const (
1008
1009
1010 osHasLowResTimer = GOOS == "windows" || GOOS == "openbsd" || GOOS == "netbsd"
1011
1012
1013
1014 osHasLowResClockInt = goos.IsWindows
1015
1016
1017
1018 osHasLowResClock = osHasLowResClockInt > 0
1019 )
1020
1021
1022 func ready(gp *g, traceskip int, next bool) {
1023 status := readgstatus(gp)
1024
1025
1026 mp := acquirem()
1027 if status&^_Gscan != _Gwaiting {
1028 dumpgstatus(gp)
1029 throw("bad g->status in ready")
1030 }
1031
1032
1033 trace := traceAcquire()
1034 casgstatus(gp, _Gwaiting, _Grunnable)
1035 if trace.ok() {
1036 trace.GoUnpark(gp, traceskip)
1037 traceRelease(trace)
1038 }
1039 runqput(mp.p.ptr(), gp, next)
1040 wakep()
1041 releasem(mp)
1042 }
1043
1044
1045
1046 const freezeStopWait = 0x7fffffff
1047
1048
1049
1050 var freezing atomic.Bool
1051
1052
1053
1054
1055 func freezetheworld() {
1056 freezing.Store(true)
1057 if debug.dontfreezetheworld > 0 {
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082 usleep(1000)
1083 return
1084 }
1085
1086
1087
1088
1089 for i := 0; i < 5; i++ {
1090
1091 sched.stopwait = freezeStopWait
1092 sched.gcwaiting.Store(true)
1093
1094 if !preemptall() {
1095 break
1096 }
1097 usleep(1000)
1098 }
1099
1100 usleep(1000)
1101 preemptall()
1102 usleep(1000)
1103 }
1104
1105
1106
1107
1108
1109 func readgstatus(gp *g) uint32 {
1110 return gp.atomicstatus.Load()
1111 }
1112
1113
1114
1115
1116
1117 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
1118 success := false
1119
1120
1121 switch oldval {
1122 default:
1123 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1124 dumpgstatus(gp)
1125 throw("casfrom_Gscanstatus:top gp->status is not in scan state")
1126 case _Gscanrunnable,
1127 _Gscanwaiting,
1128 _Gscanrunning,
1129 _Gscansyscall,
1130 _Gscanpreempted:
1131 if newval == oldval&^_Gscan {
1132 success = gp.atomicstatus.CompareAndSwap(oldval, newval)
1133 }
1134 }
1135 if !success {
1136 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1137 dumpgstatus(gp)
1138 throw("casfrom_Gscanstatus: gp->status is not in scan state")
1139 }
1140 releaseLockRankAndM(lockRankGscan)
1141 }
1142
1143
1144
1145 func castogscanstatus(gp *g, oldval, newval uint32) bool {
1146 switch oldval {
1147 case _Grunnable,
1148 _Grunning,
1149 _Gwaiting,
1150 _Gsyscall:
1151 if newval == oldval|_Gscan {
1152 r := gp.atomicstatus.CompareAndSwap(oldval, newval)
1153 if r {
1154 acquireLockRankAndM(lockRankGscan)
1155 }
1156 return r
1157
1158 }
1159 }
1160 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
1161 throw("castogscanstatus")
1162 panic("not reached")
1163 }
1164
1165
1166
1167 var casgstatusAlwaysTrack = false
1168
1169
1170
1171
1172
1173
1174
1175 func casgstatus(gp *g, oldval, newval uint32) {
1176 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
1177 systemstack(func() {
1178
1179
1180 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
1181 throw("casgstatus: bad incoming values")
1182 })
1183 }
1184
1185 lockWithRankMayAcquire(nil, lockRankGscan)
1186
1187
1188 const yieldDelay = 5 * 1000
1189 var nextYield int64
1190
1191
1192
1193 for i := 0; !gp.atomicstatus.CompareAndSwap(oldval, newval); i++ {
1194 if oldval == _Gwaiting && gp.atomicstatus.Load() == _Grunnable {
1195 systemstack(func() {
1196
1197
1198 throw("casgstatus: waiting for Gwaiting but is Grunnable")
1199 })
1200 }
1201 if i == 0 {
1202 nextYield = nanotime() + yieldDelay
1203 }
1204 if nanotime() < nextYield {
1205 for x := 0; x < 10 && gp.atomicstatus.Load() != oldval; x++ {
1206 procyield(1)
1207 }
1208 } else {
1209 osyield()
1210 nextYield = nanotime() + yieldDelay/2
1211 }
1212 }
1213
1214 if oldval == _Grunning {
1215
1216 if casgstatusAlwaysTrack || gp.trackingSeq%gTrackingPeriod == 0 {
1217 gp.tracking = true
1218 }
1219 gp.trackingSeq++
1220 }
1221 if !gp.tracking {
1222 return
1223 }
1224
1225
1226
1227
1228
1229
1230 switch oldval {
1231 case _Grunnable:
1232
1233
1234
1235 now := nanotime()
1236 gp.runnableTime += now - gp.trackingStamp
1237 gp.trackingStamp = 0
1238 case _Gwaiting:
1239 if !gp.waitreason.isMutexWait() {
1240
1241 break
1242 }
1243
1244
1245
1246
1247
1248 now := nanotime()
1249 sched.totalMutexWaitTime.Add((now - gp.trackingStamp) * gTrackingPeriod)
1250 gp.trackingStamp = 0
1251 }
1252 switch newval {
1253 case _Gwaiting:
1254 if !gp.waitreason.isMutexWait() {
1255
1256 break
1257 }
1258
1259 now := nanotime()
1260 gp.trackingStamp = now
1261 case _Grunnable:
1262
1263
1264 now := nanotime()
1265 gp.trackingStamp = now
1266 case _Grunning:
1267
1268
1269
1270 gp.tracking = false
1271 sched.timeToRun.record(gp.runnableTime)
1272 gp.runnableTime = 0
1273 }
1274 }
1275
1276
1277
1278
1279 func casGToWaiting(gp *g, old uint32, reason waitReason) {
1280
1281 gp.waitreason = reason
1282 casgstatus(gp, old, _Gwaiting)
1283 }
1284
1285
1286
1287
1288
1289 func casGToWaitingForGC(gp *g, old uint32, reason waitReason) {
1290 if !reason.isWaitingForGC() {
1291 throw("casGToWaitingForGC with non-isWaitingForGC wait reason")
1292 }
1293 casGToWaiting(gp, old, reason)
1294 }
1295
1296
1297
1298
1299
1300
1301
1302
1303 func casgcopystack(gp *g) uint32 {
1304 for {
1305 oldstatus := readgstatus(gp) &^ _Gscan
1306 if oldstatus != _Gwaiting && oldstatus != _Grunnable {
1307 throw("copystack: bad status, not Gwaiting or Grunnable")
1308 }
1309 if gp.atomicstatus.CompareAndSwap(oldstatus, _Gcopystack) {
1310 return oldstatus
1311 }
1312 }
1313 }
1314
1315
1316
1317
1318
1319 func casGToPreemptScan(gp *g, old, new uint32) {
1320 if old != _Grunning || new != _Gscan|_Gpreempted {
1321 throw("bad g transition")
1322 }
1323 acquireLockRankAndM(lockRankGscan)
1324 for !gp.atomicstatus.CompareAndSwap(_Grunning, _Gscan|_Gpreempted) {
1325 }
1326 }
1327
1328
1329
1330
1331 func casGFromPreempted(gp *g, old, new uint32) bool {
1332 if old != _Gpreempted || new != _Gwaiting {
1333 throw("bad g transition")
1334 }
1335 gp.waitreason = waitReasonPreempted
1336 return gp.atomicstatus.CompareAndSwap(_Gpreempted, _Gwaiting)
1337 }
1338
1339
1340 type stwReason uint8
1341
1342
1343
1344
1345 const (
1346 stwUnknown stwReason = iota
1347 stwGCMarkTerm
1348 stwGCSweepTerm
1349 stwWriteHeapDump
1350 stwGoroutineProfile
1351 stwGoroutineProfileCleanup
1352 stwAllGoroutinesStack
1353 stwReadMemStats
1354 stwAllThreadsSyscall
1355 stwGOMAXPROCS
1356 stwStartTrace
1357 stwStopTrace
1358 stwForTestCountPagesInUse
1359 stwForTestReadMetricsSlow
1360 stwForTestReadMemStatsSlow
1361 stwForTestPageCachePagesLeaked
1362 stwForTestResetDebugLog
1363 )
1364
1365 func (r stwReason) String() string {
1366 return stwReasonStrings[r]
1367 }
1368
1369 func (r stwReason) isGC() bool {
1370 return r == stwGCMarkTerm || r == stwGCSweepTerm
1371 }
1372
1373
1374
1375
1376 var stwReasonStrings = [...]string{
1377 stwUnknown: "unknown",
1378 stwGCMarkTerm: "GC mark termination",
1379 stwGCSweepTerm: "GC sweep termination",
1380 stwWriteHeapDump: "write heap dump",
1381 stwGoroutineProfile: "goroutine profile",
1382 stwGoroutineProfileCleanup: "goroutine profile cleanup",
1383 stwAllGoroutinesStack: "all goroutines stack trace",
1384 stwReadMemStats: "read mem stats",
1385 stwAllThreadsSyscall: "AllThreadsSyscall",
1386 stwGOMAXPROCS: "GOMAXPROCS",
1387 stwStartTrace: "start trace",
1388 stwStopTrace: "stop trace",
1389 stwForTestCountPagesInUse: "CountPagesInUse (test)",
1390 stwForTestReadMetricsSlow: "ReadMetricsSlow (test)",
1391 stwForTestReadMemStatsSlow: "ReadMemStatsSlow (test)",
1392 stwForTestPageCachePagesLeaked: "PageCachePagesLeaked (test)",
1393 stwForTestResetDebugLog: "ResetDebugLog (test)",
1394 }
1395
1396
1397
1398 type worldStop struct {
1399 reason stwReason
1400 startedStopping int64
1401 finishedStopping int64
1402 stoppingCPUTime int64
1403 }
1404
1405
1406
1407
1408 var stopTheWorldContext worldStop
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427 func stopTheWorld(reason stwReason) worldStop {
1428 semacquire(&worldsema)
1429 gp := getg()
1430 gp.m.preemptoff = reason.String()
1431 systemstack(func() {
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446 casGToWaitingForGC(gp, _Grunning, waitReasonStoppingTheWorld)
1447 stopTheWorldContext = stopTheWorldWithSema(reason)
1448 casgstatus(gp, _Gwaiting, _Grunning)
1449 })
1450 return stopTheWorldContext
1451 }
1452
1453
1454
1455
1456 func startTheWorld(w worldStop) {
1457 systemstack(func() { startTheWorldWithSema(0, w) })
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474 mp := acquirem()
1475 mp.preemptoff = ""
1476 semrelease1(&worldsema, true, 0)
1477 releasem(mp)
1478 }
1479
1480
1481
1482
1483 func stopTheWorldGC(reason stwReason) worldStop {
1484 semacquire(&gcsema)
1485 return stopTheWorld(reason)
1486 }
1487
1488
1489
1490
1491 func startTheWorldGC(w worldStop) {
1492 startTheWorld(w)
1493 semrelease(&gcsema)
1494 }
1495
1496
1497 var worldsema uint32 = 1
1498
1499
1500
1501
1502
1503
1504
1505 var gcsema uint32 = 1
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537 func stopTheWorldWithSema(reason stwReason) worldStop {
1538 trace := traceAcquire()
1539 if trace.ok() {
1540 trace.STWStart(reason)
1541 traceRelease(trace)
1542 }
1543 gp := getg()
1544
1545
1546
1547 if gp.m.locks > 0 {
1548 throw("stopTheWorld: holding locks")
1549 }
1550
1551 lock(&sched.lock)
1552 start := nanotime()
1553 sched.stopwait = gomaxprocs
1554 sched.gcwaiting.Store(true)
1555 preemptall()
1556
1557 gp.m.p.ptr().status = _Pgcstop
1558 gp.m.p.ptr().gcStopTime = start
1559 sched.stopwait--
1560
1561 trace = traceAcquire()
1562 for _, pp := range allp {
1563 s := pp.status
1564 if s == _Psyscall && atomic.Cas(&pp.status, s, _Pgcstop) {
1565 if trace.ok() {
1566 trace.ProcSteal(pp, false)
1567 }
1568 pp.syscalltick++
1569 pp.gcStopTime = nanotime()
1570 sched.stopwait--
1571 }
1572 }
1573 if trace.ok() {
1574 traceRelease(trace)
1575 }
1576
1577
1578 now := nanotime()
1579 for {
1580 pp, _ := pidleget(now)
1581 if pp == nil {
1582 break
1583 }
1584 pp.status = _Pgcstop
1585 pp.gcStopTime = nanotime()
1586 sched.stopwait--
1587 }
1588 wait := sched.stopwait > 0
1589 unlock(&sched.lock)
1590
1591
1592 if wait {
1593 for {
1594
1595 if notetsleep(&sched.stopnote, 100*1000) {
1596 noteclear(&sched.stopnote)
1597 break
1598 }
1599 preemptall()
1600 }
1601 }
1602
1603 finish := nanotime()
1604 startTime := finish - start
1605 if reason.isGC() {
1606 sched.stwStoppingTimeGC.record(startTime)
1607 } else {
1608 sched.stwStoppingTimeOther.record(startTime)
1609 }
1610
1611
1612
1613
1614
1615 stoppingCPUTime := int64(0)
1616 bad := ""
1617 if sched.stopwait != 0 {
1618 bad = "stopTheWorld: not stopped (stopwait != 0)"
1619 } else {
1620 for _, pp := range allp {
1621 if pp.status != _Pgcstop {
1622 bad = "stopTheWorld: not stopped (status != _Pgcstop)"
1623 }
1624 if pp.gcStopTime == 0 && bad == "" {
1625 bad = "stopTheWorld: broken CPU time accounting"
1626 }
1627 stoppingCPUTime += finish - pp.gcStopTime
1628 pp.gcStopTime = 0
1629 }
1630 }
1631 if freezing.Load() {
1632
1633
1634
1635
1636 lock(&deadlock)
1637 lock(&deadlock)
1638 }
1639 if bad != "" {
1640 throw(bad)
1641 }
1642
1643 worldStopped()
1644
1645 return worldStop{
1646 reason: reason,
1647 startedStopping: start,
1648 finishedStopping: finish,
1649 stoppingCPUTime: stoppingCPUTime,
1650 }
1651 }
1652
1653
1654
1655
1656
1657
1658
1659 func startTheWorldWithSema(now int64, w worldStop) int64 {
1660 assertWorldStopped()
1661
1662 mp := acquirem()
1663 if netpollinited() {
1664 list, delta := netpoll(0)
1665 injectglist(&list)
1666 netpollAdjustWaiters(delta)
1667 }
1668 lock(&sched.lock)
1669
1670 procs := gomaxprocs
1671 if newprocs != 0 {
1672 procs = newprocs
1673 newprocs = 0
1674 }
1675 p1 := procresize(procs)
1676 sched.gcwaiting.Store(false)
1677 if sched.sysmonwait.Load() {
1678 sched.sysmonwait.Store(false)
1679 notewakeup(&sched.sysmonnote)
1680 }
1681 unlock(&sched.lock)
1682
1683 worldStarted()
1684
1685 for p1 != nil {
1686 p := p1
1687 p1 = p1.link.ptr()
1688 if p.m != 0 {
1689 mp := p.m.ptr()
1690 p.m = 0
1691 if mp.nextp != 0 {
1692 throw("startTheWorld: inconsistent mp->nextp")
1693 }
1694 mp.nextp.set(p)
1695 notewakeup(&mp.park)
1696 } else {
1697
1698 newm(nil, p, -1)
1699 }
1700 }
1701
1702
1703 if now == 0 {
1704 now = nanotime()
1705 }
1706 totalTime := now - w.startedStopping
1707 if w.reason.isGC() {
1708 sched.stwTotalTimeGC.record(totalTime)
1709 } else {
1710 sched.stwTotalTimeOther.record(totalTime)
1711 }
1712 trace := traceAcquire()
1713 if trace.ok() {
1714 trace.STWDone()
1715 traceRelease(trace)
1716 }
1717
1718
1719
1720
1721 wakep()
1722
1723 releasem(mp)
1724
1725 return now
1726 }
1727
1728
1729
1730 func usesLibcall() bool {
1731 switch GOOS {
1732 case "aix", "darwin", "illumos", "ios", "solaris", "windows":
1733 return true
1734 case "openbsd":
1735 return GOARCH != "mips64"
1736 }
1737 return false
1738 }
1739
1740
1741
1742 func mStackIsSystemAllocated() bool {
1743 switch GOOS {
1744 case "aix", "darwin", "plan9", "illumos", "ios", "solaris", "windows":
1745 return true
1746 case "openbsd":
1747 return GOARCH != "mips64"
1748 }
1749 return false
1750 }
1751
1752
1753
1754 func mstart()
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765 func mstart0() {
1766 gp := getg()
1767
1768 osStack := gp.stack.lo == 0
1769 if osStack {
1770
1771
1772
1773
1774
1775
1776
1777
1778 size := gp.stack.hi
1779 if size == 0 {
1780 size = 16384 * sys.StackGuardMultiplier
1781 }
1782 gp.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
1783 gp.stack.lo = gp.stack.hi - size + 1024
1784 }
1785
1786
1787 gp.stackguard0 = gp.stack.lo + stackGuard
1788
1789
1790 gp.stackguard1 = gp.stackguard0
1791 mstart1()
1792
1793
1794 if mStackIsSystemAllocated() {
1795
1796
1797
1798 osStack = true
1799 }
1800 mexit(osStack)
1801 }
1802
1803
1804
1805
1806
1807 func mstart1() {
1808 gp := getg()
1809
1810 if gp != gp.m.g0 {
1811 throw("bad runtime·mstart")
1812 }
1813
1814
1815
1816
1817
1818
1819
1820 gp.sched.g = guintptr(unsafe.Pointer(gp))
1821 gp.sched.pc = getcallerpc()
1822 gp.sched.sp = getcallersp()
1823
1824 asminit()
1825 minit()
1826
1827
1828
1829 if gp.m == &m0 {
1830 mstartm0()
1831 }
1832
1833 if fn := gp.m.mstartfn; fn != nil {
1834 fn()
1835 }
1836
1837 if gp.m != &m0 {
1838 acquirep(gp.m.nextp.ptr())
1839 gp.m.nextp = 0
1840 }
1841 schedule()
1842 }
1843
1844
1845
1846
1847
1848
1849
1850 func mstartm0() {
1851
1852
1853
1854 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
1855 cgoHasExtraM = true
1856 newextram()
1857 }
1858 initsig(false)
1859 }
1860
1861
1862
1863
1864 func mPark() {
1865 gp := getg()
1866 notesleep(&gp.m.park)
1867 noteclear(&gp.m.park)
1868 }
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880 func mexit(osStack bool) {
1881 mp := getg().m
1882
1883 if mp == &m0 {
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895 handoffp(releasep())
1896 lock(&sched.lock)
1897 sched.nmfreed++
1898 checkdead()
1899 unlock(&sched.lock)
1900 mPark()
1901 throw("locked m0 woke up")
1902 }
1903
1904 sigblock(true)
1905 unminit()
1906
1907
1908 if mp.gsignal != nil {
1909 stackfree(mp.gsignal.stack)
1910
1911
1912
1913
1914 mp.gsignal = nil
1915 }
1916
1917
1918 lock(&sched.lock)
1919 for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink {
1920 if *pprev == mp {
1921 *pprev = mp.alllink
1922 goto found
1923 }
1924 }
1925 throw("m not found in allm")
1926 found:
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941 mp.freeWait.Store(freeMWait)
1942 mp.freelink = sched.freem
1943 sched.freem = mp
1944 unlock(&sched.lock)
1945
1946 atomic.Xadd64(&ncgocall, int64(mp.ncgocall))
1947 sched.totalRuntimeLockWaitTime.Add(mp.mLockProfile.waitTime.Load())
1948
1949
1950 handoffp(releasep())
1951
1952
1953
1954
1955
1956 lock(&sched.lock)
1957 sched.nmfreed++
1958 checkdead()
1959 unlock(&sched.lock)
1960
1961 if GOOS == "darwin" || GOOS == "ios" {
1962
1963
1964 if mp.signalPending.Load() != 0 {
1965 pendingPreemptSignals.Add(-1)
1966 }
1967 }
1968
1969
1970
1971 mdestroy(mp)
1972
1973 if osStack {
1974
1975 mp.freeWait.Store(freeMRef)
1976
1977
1978
1979 return
1980 }
1981
1982
1983
1984
1985
1986 exitThread(&mp.freeWait)
1987 }
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999 func forEachP(reason waitReason, fn func(*p)) {
2000 systemstack(func() {
2001 gp := getg().m.curg
2002
2003
2004
2005
2006
2007
2008
2009
2010 casGToWaitingForGC(gp, _Grunning, reason)
2011 forEachPInternal(fn)
2012 casgstatus(gp, _Gwaiting, _Grunning)
2013 })
2014 }
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025 func forEachPInternal(fn func(*p)) {
2026 mp := acquirem()
2027 pp := getg().m.p.ptr()
2028
2029 lock(&sched.lock)
2030 if sched.safePointWait != 0 {
2031 throw("forEachP: sched.safePointWait != 0")
2032 }
2033 sched.safePointWait = gomaxprocs - 1
2034 sched.safePointFn = fn
2035
2036
2037 for _, p2 := range allp {
2038 if p2 != pp {
2039 atomic.Store(&p2.runSafePointFn, 1)
2040 }
2041 }
2042 preemptall()
2043
2044
2045
2046
2047
2048
2049
2050 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
2051 if atomic.Cas(&p.runSafePointFn, 1, 0) {
2052 fn(p)
2053 sched.safePointWait--
2054 }
2055 }
2056
2057 wait := sched.safePointWait > 0
2058 unlock(&sched.lock)
2059
2060
2061 fn(pp)
2062
2063
2064
2065 for _, p2 := range allp {
2066 s := p2.status
2067
2068
2069
2070 trace := traceAcquire()
2071 if s == _Psyscall && p2.runSafePointFn == 1 && atomic.Cas(&p2.status, s, _Pidle) {
2072 if trace.ok() {
2073
2074 trace.ProcSteal(p2, false)
2075 traceRelease(trace)
2076 }
2077 p2.syscalltick++
2078 handoffp(p2)
2079 } else if trace.ok() {
2080 traceRelease(trace)
2081 }
2082 }
2083
2084
2085 if wait {
2086 for {
2087
2088
2089
2090
2091 if notetsleep(&sched.safePointNote, 100*1000) {
2092 noteclear(&sched.safePointNote)
2093 break
2094 }
2095 preemptall()
2096 }
2097 }
2098 if sched.safePointWait != 0 {
2099 throw("forEachP: not done")
2100 }
2101 for _, p2 := range allp {
2102 if p2.runSafePointFn != 0 {
2103 throw("forEachP: P did not run fn")
2104 }
2105 }
2106
2107 lock(&sched.lock)
2108 sched.safePointFn = nil
2109 unlock(&sched.lock)
2110 releasem(mp)
2111 }
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124 func runSafePointFn() {
2125 p := getg().m.p.ptr()
2126
2127
2128
2129 if !atomic.Cas(&p.runSafePointFn, 1, 0) {
2130 return
2131 }
2132 sched.safePointFn(p)
2133 lock(&sched.lock)
2134 sched.safePointWait--
2135 if sched.safePointWait == 0 {
2136 notewakeup(&sched.safePointNote)
2137 }
2138 unlock(&sched.lock)
2139 }
2140
2141
2142
2143
2144 var cgoThreadStart unsafe.Pointer
2145
2146 type cgothreadstart struct {
2147 g guintptr
2148 tls *uint64
2149 fn unsafe.Pointer
2150 }
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161 func allocm(pp *p, fn func(), id int64) *m {
2162 allocmLock.rlock()
2163
2164
2165
2166
2167 acquirem()
2168
2169 gp := getg()
2170 if gp.m.p == 0 {
2171 acquirep(pp)
2172 }
2173
2174
2175
2176 if sched.freem != nil {
2177 lock(&sched.lock)
2178 var newList *m
2179 for freem := sched.freem; freem != nil; {
2180
2181 wait := freem.freeWait.Load()
2182 if wait == freeMWait {
2183 next := freem.freelink
2184 freem.freelink = newList
2185 newList = freem
2186 freem = next
2187 continue
2188 }
2189
2190
2191
2192 if traceEnabled() || traceShuttingDown() {
2193 traceThreadDestroy(freem)
2194 }
2195
2196
2197
2198 if wait == freeMStack {
2199
2200
2201
2202 systemstack(func() {
2203 stackfree(freem.g0.stack)
2204 })
2205 }
2206 freem = freem.freelink
2207 }
2208 sched.freem = newList
2209 unlock(&sched.lock)
2210 }
2211
2212 mp := new(m)
2213 mp.mstartfn = fn
2214 mcommoninit(mp, id)
2215
2216
2217
2218 if iscgo || mStackIsSystemAllocated() {
2219 mp.g0 = malg(-1)
2220 } else {
2221 mp.g0 = malg(16384 * sys.StackGuardMultiplier)
2222 }
2223 mp.g0.m = mp
2224
2225 if pp == gp.m.p.ptr() {
2226 releasep()
2227 }
2228
2229 releasem(gp.m)
2230 allocmLock.runlock()
2231 return mp
2232 }
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273 func needm(signal bool) {
2274 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
2275
2276
2277
2278
2279
2280
2281 writeErrStr("fatal error: cgo callback before cgo call\n")
2282 exit(1)
2283 }
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293 var sigmask sigset
2294 sigsave(&sigmask)
2295 sigblock(false)
2296
2297
2298
2299
2300 mp, last := getExtraM()
2301
2302
2303
2304
2305
2306
2307
2308
2309 mp.needextram = last
2310
2311
2312 mp.sigmask = sigmask
2313
2314
2315
2316 osSetupTLS(mp)
2317
2318
2319
2320 setg(mp.g0)
2321 sp := getcallersp()
2322 callbackUpdateSystemStack(mp, sp, signal)
2323
2324
2325
2326
2327 mp.isExtraInC = false
2328
2329
2330 asminit()
2331 minit()
2332
2333
2334
2335
2336
2337
2338 var trace traceLocker
2339 if !signal {
2340 trace = traceAcquire()
2341 }
2342
2343
2344 casgstatus(mp.curg, _Gdead, _Gsyscall)
2345 sched.ngsys.Add(-1)
2346
2347 if !signal {
2348 if trace.ok() {
2349 trace.GoCreateSyscall(mp.curg)
2350 traceRelease(trace)
2351 }
2352 }
2353 mp.isExtraInSig = signal
2354 }
2355
2356
2357
2358
2359 func needAndBindM() {
2360 needm(false)
2361
2362 if _cgo_pthread_key_created != nil && *(*uintptr)(_cgo_pthread_key_created) != 0 {
2363 cgoBindM()
2364 }
2365 }
2366
2367
2368
2369
2370 func newextram() {
2371 c := extraMWaiters.Swap(0)
2372 if c > 0 {
2373 for i := uint32(0); i < c; i++ {
2374 oneNewExtraM()
2375 }
2376 } else if extraMLength.Load() == 0 {
2377
2378 oneNewExtraM()
2379 }
2380 }
2381
2382
2383 func oneNewExtraM() {
2384
2385
2386
2387
2388
2389 mp := allocm(nil, nil, -1)
2390 gp := malg(4096)
2391 gp.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
2392 gp.sched.sp = gp.stack.hi
2393 gp.sched.sp -= 4 * goarch.PtrSize
2394 gp.sched.lr = 0
2395 gp.sched.g = guintptr(unsafe.Pointer(gp))
2396 gp.syscallpc = gp.sched.pc
2397 gp.syscallsp = gp.sched.sp
2398 gp.stktopsp = gp.sched.sp
2399
2400
2401
2402
2403 casgstatus(gp, _Gidle, _Gdead)
2404 gp.m = mp
2405 mp.curg = gp
2406 mp.isextra = true
2407
2408 mp.isExtraInC = true
2409 mp.lockedInt++
2410 mp.lockedg.set(gp)
2411 gp.lockedm.set(mp)
2412 gp.goid = sched.goidgen.Add(1)
2413 if raceenabled {
2414 gp.racectx = racegostart(abi.FuncPCABIInternal(newextram) + sys.PCQuantum)
2415 }
2416
2417 allgadd(gp)
2418
2419
2420
2421
2422
2423 sched.ngsys.Add(1)
2424
2425
2426 addExtraM(mp)
2427 }
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462 func dropm() {
2463
2464
2465
2466 mp := getg().m
2467
2468
2469
2470
2471
2472 var trace traceLocker
2473 if !mp.isExtraInSig {
2474 trace = traceAcquire()
2475 }
2476
2477
2478 casgstatus(mp.curg, _Gsyscall, _Gdead)
2479 mp.curg.preemptStop = false
2480 sched.ngsys.Add(1)
2481
2482 if !mp.isExtraInSig {
2483 if trace.ok() {
2484 trace.GoDestroySyscall()
2485 traceRelease(trace)
2486 }
2487 }
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502 mp.syscalltick--
2503
2504
2505
2506 mp.curg.trace.reset()
2507
2508
2509
2510
2511 if traceEnabled() || traceShuttingDown() {
2512
2513
2514
2515
2516
2517
2518
2519 lock(&sched.lock)
2520 traceThreadDestroy(mp)
2521 unlock(&sched.lock)
2522 }
2523 mp.isExtraInSig = false
2524
2525
2526
2527
2528
2529 sigmask := mp.sigmask
2530 sigblock(false)
2531 unminit()
2532
2533 setg(nil)
2534
2535
2536
2537 g0 := mp.g0
2538 g0.stack.hi = 0
2539 g0.stack.lo = 0
2540 g0.stackguard0 = 0
2541 g0.stackguard1 = 0
2542 mp.g0StackAccurate = false
2543
2544 putExtraM(mp)
2545
2546 msigrestore(sigmask)
2547 }
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569 func cgoBindM() {
2570 if GOOS == "windows" || GOOS == "plan9" {
2571 fatal("bindm in unexpected GOOS")
2572 }
2573 g := getg()
2574 if g.m.g0 != g {
2575 fatal("the current g is not g0")
2576 }
2577 if _cgo_bindm != nil {
2578 asmcgocall(_cgo_bindm, unsafe.Pointer(g))
2579 }
2580 }
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593 func getm() uintptr {
2594 return uintptr(unsafe.Pointer(getg().m))
2595 }
2596
2597 var (
2598
2599
2600
2601
2602
2603
2604 extraM atomic.Uintptr
2605
2606 extraMLength atomic.Uint32
2607
2608 extraMWaiters atomic.Uint32
2609
2610
2611 extraMInUse atomic.Uint32
2612 )
2613
2614
2615
2616
2617
2618
2619
2620
2621 func lockextra(nilokay bool) *m {
2622 const locked = 1
2623
2624 incr := false
2625 for {
2626 old := extraM.Load()
2627 if old == locked {
2628 osyield_no_g()
2629 continue
2630 }
2631 if old == 0 && !nilokay {
2632 if !incr {
2633
2634
2635
2636 extraMWaiters.Add(1)
2637 incr = true
2638 }
2639 usleep_no_g(1)
2640 continue
2641 }
2642 if extraM.CompareAndSwap(old, locked) {
2643 return (*m)(unsafe.Pointer(old))
2644 }
2645 osyield_no_g()
2646 continue
2647 }
2648 }
2649
2650
2651 func unlockextra(mp *m, delta int32) {
2652 extraMLength.Add(delta)
2653 extraM.Store(uintptr(unsafe.Pointer(mp)))
2654 }
2655
2656
2657
2658
2659
2660
2661
2662
2663 func getExtraM() (mp *m, last bool) {
2664 mp = lockextra(false)
2665 extraMInUse.Add(1)
2666 unlockextra(mp.schedlink.ptr(), -1)
2667 return mp, mp.schedlink.ptr() == nil
2668 }
2669
2670
2671
2672
2673
2674 func putExtraM(mp *m) {
2675 extraMInUse.Add(-1)
2676 addExtraM(mp)
2677 }
2678
2679
2680
2681
2682 func addExtraM(mp *m) {
2683 mnext := lockextra(true)
2684 mp.schedlink.set(mnext)
2685 unlockextra(mp, 1)
2686 }
2687
2688 var (
2689
2690
2691
2692 allocmLock rwmutex
2693
2694
2695
2696
2697 execLock rwmutex
2698 )
2699
2700
2701
2702 const (
2703 failthreadcreate = "runtime: failed to create new OS thread\n"
2704 failallocatestack = "runtime: failed to allocate stack for the new OS thread\n"
2705 )
2706
2707
2708
2709
2710 var newmHandoff struct {
2711 lock mutex
2712
2713
2714
2715 newm muintptr
2716
2717
2718
2719 waiting bool
2720 wake note
2721
2722
2723
2724
2725 haveTemplateThread uint32
2726 }
2727
2728
2729
2730
2731
2732
2733
2734
2735 func newm(fn func(), pp *p, id int64) {
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746 acquirem()
2747
2748 mp := allocm(pp, fn, id)
2749 mp.nextp.set(pp)
2750 mp.sigmask = initSigmask
2751 if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763 lock(&newmHandoff.lock)
2764 if newmHandoff.haveTemplateThread == 0 {
2765 throw("on a locked thread with no template thread")
2766 }
2767 mp.schedlink = newmHandoff.newm
2768 newmHandoff.newm.set(mp)
2769 if newmHandoff.waiting {
2770 newmHandoff.waiting = false
2771 notewakeup(&newmHandoff.wake)
2772 }
2773 unlock(&newmHandoff.lock)
2774
2775
2776
2777 releasem(getg().m)
2778 return
2779 }
2780 newm1(mp)
2781 releasem(getg().m)
2782 }
2783
2784 func newm1(mp *m) {
2785 if iscgo {
2786 var ts cgothreadstart
2787 if _cgo_thread_start == nil {
2788 throw("_cgo_thread_start missing")
2789 }
2790 ts.g.set(mp.g0)
2791 ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
2792 ts.fn = unsafe.Pointer(abi.FuncPCABI0(mstart))
2793 if msanenabled {
2794 msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2795 }
2796 if asanenabled {
2797 asanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2798 }
2799 execLock.rlock()
2800 asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
2801 execLock.runlock()
2802 return
2803 }
2804 execLock.rlock()
2805 newosproc(mp)
2806 execLock.runlock()
2807 }
2808
2809
2810
2811
2812
2813 func startTemplateThread() {
2814 if GOARCH == "wasm" {
2815 return
2816 }
2817
2818
2819
2820 mp := acquirem()
2821 if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) {
2822 releasem(mp)
2823 return
2824 }
2825 newm(templateThread, nil, -1)
2826 releasem(mp)
2827 }
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841 func templateThread() {
2842 lock(&sched.lock)
2843 sched.nmsys++
2844 checkdead()
2845 unlock(&sched.lock)
2846
2847 for {
2848 lock(&newmHandoff.lock)
2849 for newmHandoff.newm != 0 {
2850 newm := newmHandoff.newm.ptr()
2851 newmHandoff.newm = 0
2852 unlock(&newmHandoff.lock)
2853 for newm != nil {
2854 next := newm.schedlink.ptr()
2855 newm.schedlink = 0
2856 newm1(newm)
2857 newm = next
2858 }
2859 lock(&newmHandoff.lock)
2860 }
2861 newmHandoff.waiting = true
2862 noteclear(&newmHandoff.wake)
2863 unlock(&newmHandoff.lock)
2864 notesleep(&newmHandoff.wake)
2865 }
2866 }
2867
2868
2869
2870 func stopm() {
2871 gp := getg()
2872
2873 if gp.m.locks != 0 {
2874 throw("stopm holding locks")
2875 }
2876 if gp.m.p != 0 {
2877 throw("stopm holding p")
2878 }
2879 if gp.m.spinning {
2880 throw("stopm spinning")
2881 }
2882
2883 lock(&sched.lock)
2884 mput(gp.m)
2885 unlock(&sched.lock)
2886 mPark()
2887 acquirep(gp.m.nextp.ptr())
2888 gp.m.nextp = 0
2889 }
2890
2891 func mspinning() {
2892
2893 getg().m.spinning = true
2894 }
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913 func startm(pp *p, spinning, lockheld bool) {
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930 mp := acquirem()
2931 if !lockheld {
2932 lock(&sched.lock)
2933 }
2934 if pp == nil {
2935 if spinning {
2936
2937
2938
2939 throw("startm: P required for spinning=true")
2940 }
2941 pp, _ = pidleget(0)
2942 if pp == nil {
2943 if !lockheld {
2944 unlock(&sched.lock)
2945 }
2946 releasem(mp)
2947 return
2948 }
2949 }
2950 nmp := mget()
2951 if nmp == nil {
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966 id := mReserveID()
2967 unlock(&sched.lock)
2968
2969 var fn func()
2970 if spinning {
2971
2972 fn = mspinning
2973 }
2974 newm(fn, pp, id)
2975
2976 if lockheld {
2977 lock(&sched.lock)
2978 }
2979
2980
2981 releasem(mp)
2982 return
2983 }
2984 if !lockheld {
2985 unlock(&sched.lock)
2986 }
2987 if nmp.spinning {
2988 throw("startm: m is spinning")
2989 }
2990 if nmp.nextp != 0 {
2991 throw("startm: m has p")
2992 }
2993 if spinning && !runqempty(pp) {
2994 throw("startm: p has runnable gs")
2995 }
2996
2997 nmp.spinning = spinning
2998 nmp.nextp.set(pp)
2999 notewakeup(&nmp.park)
3000
3001
3002 releasem(mp)
3003 }
3004
3005
3006
3007
3008
3009 func handoffp(pp *p) {
3010
3011
3012
3013
3014 if !runqempty(pp) || sched.runqsize != 0 {
3015 startm(pp, false, false)
3016 return
3017 }
3018
3019 if (traceEnabled() || traceShuttingDown()) && traceReaderAvailable() != nil {
3020 startm(pp, false, false)
3021 return
3022 }
3023
3024 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(pp) {
3025 startm(pp, false, false)
3026 return
3027 }
3028
3029
3030 if sched.nmspinning.Load()+sched.npidle.Load() == 0 && sched.nmspinning.CompareAndSwap(0, 1) {
3031 sched.needspinning.Store(0)
3032 startm(pp, true, false)
3033 return
3034 }
3035 lock(&sched.lock)
3036 if sched.gcwaiting.Load() {
3037 pp.status = _Pgcstop
3038 pp.gcStopTime = nanotime()
3039 sched.stopwait--
3040 if sched.stopwait == 0 {
3041 notewakeup(&sched.stopnote)
3042 }
3043 unlock(&sched.lock)
3044 return
3045 }
3046 if pp.runSafePointFn != 0 && atomic.Cas(&pp.runSafePointFn, 1, 0) {
3047 sched.safePointFn(pp)
3048 sched.safePointWait--
3049 if sched.safePointWait == 0 {
3050 notewakeup(&sched.safePointNote)
3051 }
3052 }
3053 if sched.runqsize != 0 {
3054 unlock(&sched.lock)
3055 startm(pp, false, false)
3056 return
3057 }
3058
3059
3060 if sched.npidle.Load() == gomaxprocs-1 && sched.lastpoll.Load() != 0 {
3061 unlock(&sched.lock)
3062 startm(pp, false, false)
3063 return
3064 }
3065
3066
3067
3068 when := pp.timers.wakeTime()
3069 pidleput(pp, 0)
3070 unlock(&sched.lock)
3071
3072 if when != 0 {
3073 wakeNetPoller(when)
3074 }
3075 }
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090 func wakep() {
3091
3092
3093 if sched.nmspinning.Load() != 0 || !sched.nmspinning.CompareAndSwap(0, 1) {
3094 return
3095 }
3096
3097
3098
3099
3100
3101
3102 mp := acquirem()
3103
3104 var pp *p
3105 lock(&sched.lock)
3106 pp, _ = pidlegetSpinning(0)
3107 if pp == nil {
3108 if sched.nmspinning.Add(-1) < 0 {
3109 throw("wakep: negative nmspinning")
3110 }
3111 unlock(&sched.lock)
3112 releasem(mp)
3113 return
3114 }
3115
3116
3117
3118
3119 unlock(&sched.lock)
3120
3121 startm(pp, true, false)
3122
3123 releasem(mp)
3124 }
3125
3126
3127
3128 func stoplockedm() {
3129 gp := getg()
3130
3131 if gp.m.lockedg == 0 || gp.m.lockedg.ptr().lockedm.ptr() != gp.m {
3132 throw("stoplockedm: inconsistent locking")
3133 }
3134 if gp.m.p != 0 {
3135
3136 pp := releasep()
3137 handoffp(pp)
3138 }
3139 incidlelocked(1)
3140
3141 mPark()
3142 status := readgstatus(gp.m.lockedg.ptr())
3143 if status&^_Gscan != _Grunnable {
3144 print("runtime:stoplockedm: lockedg (atomicstatus=", status, ") is not Grunnable or Gscanrunnable\n")
3145 dumpgstatus(gp.m.lockedg.ptr())
3146 throw("stoplockedm: not runnable")
3147 }
3148 acquirep(gp.m.nextp.ptr())
3149 gp.m.nextp = 0
3150 }
3151
3152
3153
3154
3155
3156 func startlockedm(gp *g) {
3157 mp := gp.lockedm.ptr()
3158 if mp == getg().m {
3159 throw("startlockedm: locked to me")
3160 }
3161 if mp.nextp != 0 {
3162 throw("startlockedm: m has p")
3163 }
3164
3165 incidlelocked(-1)
3166 pp := releasep()
3167 mp.nextp.set(pp)
3168 notewakeup(&mp.park)
3169 stopm()
3170 }
3171
3172
3173
3174 func gcstopm() {
3175 gp := getg()
3176
3177 if !sched.gcwaiting.Load() {
3178 throw("gcstopm: not waiting for gc")
3179 }
3180 if gp.m.spinning {
3181 gp.m.spinning = false
3182
3183
3184 if sched.nmspinning.Add(-1) < 0 {
3185 throw("gcstopm: negative nmspinning")
3186 }
3187 }
3188 pp := releasep()
3189 lock(&sched.lock)
3190 pp.status = _Pgcstop
3191 pp.gcStopTime = nanotime()
3192 sched.stopwait--
3193 if sched.stopwait == 0 {
3194 notewakeup(&sched.stopnote)
3195 }
3196 unlock(&sched.lock)
3197 stopm()
3198 }
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209 func execute(gp *g, inheritTime bool) {
3210 mp := getg().m
3211
3212 if goroutineProfile.active {
3213
3214
3215
3216 tryRecordGoroutineProfile(gp, nil, osyield)
3217 }
3218
3219
3220
3221 mp.curg = gp
3222 gp.m = mp
3223 casgstatus(gp, _Grunnable, _Grunning)
3224 gp.waitsince = 0
3225 gp.preempt = false
3226 gp.stackguard0 = gp.stack.lo + stackGuard
3227 if !inheritTime {
3228 mp.p.ptr().schedtick++
3229 }
3230
3231
3232 hz := sched.profilehz
3233 if mp.profilehz != hz {
3234 setThreadCPUProfiler(hz)
3235 }
3236
3237 trace := traceAcquire()
3238 if trace.ok() {
3239 trace.GoStart()
3240 traceRelease(trace)
3241 }
3242
3243 gogo(&gp.sched)
3244 }
3245
3246
3247
3248
3249
3250 func findRunnable() (gp *g, inheritTime, tryWakeP bool) {
3251 mp := getg().m
3252
3253
3254
3255
3256
3257 top:
3258 pp := mp.p.ptr()
3259 if sched.gcwaiting.Load() {
3260 gcstopm()
3261 goto top
3262 }
3263 if pp.runSafePointFn != 0 {
3264 runSafePointFn()
3265 }
3266
3267
3268
3269
3270
3271 now, pollUntil, _ := pp.timers.check(0)
3272
3273
3274 if traceEnabled() || traceShuttingDown() {
3275 gp := traceReader()
3276 if gp != nil {
3277 trace := traceAcquire()
3278 casgstatus(gp, _Gwaiting, _Grunnable)
3279 if trace.ok() {
3280 trace.GoUnpark(gp, 0)
3281 traceRelease(trace)
3282 }
3283 return gp, false, true
3284 }
3285 }
3286
3287
3288 if gcBlackenEnabled != 0 {
3289 gp, tnow := gcController.findRunnableGCWorker(pp, now)
3290 if gp != nil {
3291 return gp, false, true
3292 }
3293 now = tnow
3294 }
3295
3296
3297
3298
3299 if pp.schedtick%61 == 0 && sched.runqsize > 0 {
3300 lock(&sched.lock)
3301 gp := globrunqget(pp, 1)
3302 unlock(&sched.lock)
3303 if gp != nil {
3304 return gp, false, false
3305 }
3306 }
3307
3308
3309 if fingStatus.Load()&(fingWait|fingWake) == fingWait|fingWake {
3310 if gp := wakefing(); gp != nil {
3311 ready(gp, 0, true)
3312 }
3313 }
3314 if *cgo_yield != nil {
3315 asmcgocall(*cgo_yield, nil)
3316 }
3317
3318
3319 if gp, inheritTime := runqget(pp); gp != nil {
3320 return gp, inheritTime, false
3321 }
3322
3323
3324 if sched.runqsize != 0 {
3325 lock(&sched.lock)
3326 gp := globrunqget(pp, 0)
3327 unlock(&sched.lock)
3328 if gp != nil {
3329 return gp, false, false
3330 }
3331 }
3332
3333
3334
3335
3336
3337
3338
3339
3340 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 {
3341 if list, delta := netpoll(0); !list.empty() {
3342 gp := list.pop()
3343 injectglist(&list)
3344 netpollAdjustWaiters(delta)
3345 trace := traceAcquire()
3346 casgstatus(gp, _Gwaiting, _Grunnable)
3347 if trace.ok() {
3348 trace.GoUnpark(gp, 0)
3349 traceRelease(trace)
3350 }
3351 return gp, false, false
3352 }
3353 }
3354
3355
3356
3357
3358
3359
3360 if mp.spinning || 2*sched.nmspinning.Load() < gomaxprocs-sched.npidle.Load() {
3361 if !mp.spinning {
3362 mp.becomeSpinning()
3363 }
3364
3365 gp, inheritTime, tnow, w, newWork := stealWork(now)
3366 if gp != nil {
3367
3368 return gp, inheritTime, false
3369 }
3370 if newWork {
3371
3372
3373 goto top
3374 }
3375
3376 now = tnow
3377 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3378
3379 pollUntil = w
3380 }
3381 }
3382
3383
3384
3385
3386
3387 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(pp) && gcController.addIdleMarkWorker() {
3388 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3389 if node != nil {
3390 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3391 gp := node.gp.ptr()
3392
3393 trace := traceAcquire()
3394 casgstatus(gp, _Gwaiting, _Grunnable)
3395 if trace.ok() {
3396 trace.GoUnpark(gp, 0)
3397 traceRelease(trace)
3398 }
3399 return gp, false, false
3400 }
3401 gcController.removeIdleMarkWorker()
3402 }
3403
3404
3405
3406
3407
3408 gp, otherReady := beforeIdle(now, pollUntil)
3409 if gp != nil {
3410 trace := traceAcquire()
3411 casgstatus(gp, _Gwaiting, _Grunnable)
3412 if trace.ok() {
3413 trace.GoUnpark(gp, 0)
3414 traceRelease(trace)
3415 }
3416 return gp, false, false
3417 }
3418 if otherReady {
3419 goto top
3420 }
3421
3422
3423
3424
3425
3426 allpSnapshot := allp
3427
3428
3429 idlepMaskSnapshot := idlepMask
3430 timerpMaskSnapshot := timerpMask
3431
3432
3433 lock(&sched.lock)
3434 if sched.gcwaiting.Load() || pp.runSafePointFn != 0 {
3435 unlock(&sched.lock)
3436 goto top
3437 }
3438 if sched.runqsize != 0 {
3439 gp := globrunqget(pp, 0)
3440 unlock(&sched.lock)
3441 return gp, false, false
3442 }
3443 if !mp.spinning && sched.needspinning.Load() == 1 {
3444
3445 mp.becomeSpinning()
3446 unlock(&sched.lock)
3447 goto top
3448 }
3449 if releasep() != pp {
3450 throw("findrunnable: wrong p")
3451 }
3452 now = pidleput(pp, now)
3453 unlock(&sched.lock)
3454
3455
3456
3457
3458
3459
3460
3461
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489
3490
3491 wasSpinning := mp.spinning
3492 if mp.spinning {
3493 mp.spinning = false
3494 if sched.nmspinning.Add(-1) < 0 {
3495 throw("findrunnable: negative nmspinning")
3496 }
3497
3498
3499
3500
3501
3502
3503
3504
3505
3506
3507
3508
3509 lock(&sched.lock)
3510 if sched.runqsize != 0 {
3511 pp, _ := pidlegetSpinning(0)
3512 if pp != nil {
3513 gp := globrunqget(pp, 0)
3514 if gp == nil {
3515 throw("global runq empty with non-zero runqsize")
3516 }
3517 unlock(&sched.lock)
3518 acquirep(pp)
3519 mp.becomeSpinning()
3520 return gp, false, false
3521 }
3522 }
3523 unlock(&sched.lock)
3524
3525 pp := checkRunqsNoP(allpSnapshot, idlepMaskSnapshot)
3526 if pp != nil {
3527 acquirep(pp)
3528 mp.becomeSpinning()
3529 goto top
3530 }
3531
3532
3533 pp, gp := checkIdleGCNoP()
3534 if pp != nil {
3535 acquirep(pp)
3536 mp.becomeSpinning()
3537
3538
3539 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3540 trace := traceAcquire()
3541 casgstatus(gp, _Gwaiting, _Grunnable)
3542 if trace.ok() {
3543 trace.GoUnpark(gp, 0)
3544 traceRelease(trace)
3545 }
3546 return gp, false, false
3547 }
3548
3549
3550
3551
3552
3553
3554
3555 pollUntil = checkTimersNoP(allpSnapshot, timerpMaskSnapshot, pollUntil)
3556 }
3557
3558
3559 if netpollinited() && (netpollAnyWaiters() || pollUntil != 0) && sched.lastpoll.Swap(0) != 0 {
3560 sched.pollUntil.Store(pollUntil)
3561 if mp.p != 0 {
3562 throw("findrunnable: netpoll with p")
3563 }
3564 if mp.spinning {
3565 throw("findrunnable: netpoll with spinning")
3566 }
3567 delay := int64(-1)
3568 if pollUntil != 0 {
3569 if now == 0 {
3570 now = nanotime()
3571 }
3572 delay = pollUntil - now
3573 if delay < 0 {
3574 delay = 0
3575 }
3576 }
3577 if faketime != 0 {
3578
3579 delay = 0
3580 }
3581 list, delta := netpoll(delay)
3582
3583 now = nanotime()
3584 sched.pollUntil.Store(0)
3585 sched.lastpoll.Store(now)
3586 if faketime != 0 && list.empty() {
3587
3588
3589 stopm()
3590 goto top
3591 }
3592 lock(&sched.lock)
3593 pp, _ := pidleget(now)
3594 unlock(&sched.lock)
3595 if pp == nil {
3596 injectglist(&list)
3597 netpollAdjustWaiters(delta)
3598 } else {
3599 acquirep(pp)
3600 if !list.empty() {
3601 gp := list.pop()
3602 injectglist(&list)
3603 netpollAdjustWaiters(delta)
3604 trace := traceAcquire()
3605 casgstatus(gp, _Gwaiting, _Grunnable)
3606 if trace.ok() {
3607 trace.GoUnpark(gp, 0)
3608 traceRelease(trace)
3609 }
3610 return gp, false, false
3611 }
3612 if wasSpinning {
3613 mp.becomeSpinning()
3614 }
3615 goto top
3616 }
3617 } else if pollUntil != 0 && netpollinited() {
3618 pollerPollUntil := sched.pollUntil.Load()
3619 if pollerPollUntil == 0 || pollerPollUntil > pollUntil {
3620 netpollBreak()
3621 }
3622 }
3623 stopm()
3624 goto top
3625 }
3626
3627
3628
3629
3630
3631 func pollWork() bool {
3632 if sched.runqsize != 0 {
3633 return true
3634 }
3635 p := getg().m.p.ptr()
3636 if !runqempty(p) {
3637 return true
3638 }
3639 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 {
3640 if list, delta := netpoll(0); !list.empty() {
3641 injectglist(&list)
3642 netpollAdjustWaiters(delta)
3643 return true
3644 }
3645 }
3646 return false
3647 }
3648
3649
3650
3651
3652
3653
3654
3655 func stealWork(now int64) (gp *g, inheritTime bool, rnow, pollUntil int64, newWork bool) {
3656 pp := getg().m.p.ptr()
3657
3658 ranTimer := false
3659
3660 const stealTries = 4
3661 for i := 0; i < stealTries; i++ {
3662 stealTimersOrRunNextG := i == stealTries-1
3663
3664 for enum := stealOrder.start(cheaprand()); !enum.done(); enum.next() {
3665 if sched.gcwaiting.Load() {
3666
3667 return nil, false, now, pollUntil, true
3668 }
3669 p2 := allp[enum.position()]
3670 if pp == p2 {
3671 continue
3672 }
3673
3674
3675
3676
3677
3678
3679
3680
3681
3682
3683
3684
3685
3686
3687 if stealTimersOrRunNextG && timerpMask.read(enum.position()) {
3688 tnow, w, ran := p2.timers.check(now)
3689 now = tnow
3690 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3691 pollUntil = w
3692 }
3693 if ran {
3694
3695
3696
3697
3698
3699
3700
3701
3702 if gp, inheritTime := runqget(pp); gp != nil {
3703 return gp, inheritTime, now, pollUntil, ranTimer
3704 }
3705 ranTimer = true
3706 }
3707 }
3708
3709
3710 if !idlepMask.read(enum.position()) {
3711 if gp := runqsteal(pp, p2, stealTimersOrRunNextG); gp != nil {
3712 return gp, false, now, pollUntil, ranTimer
3713 }
3714 }
3715 }
3716 }
3717
3718
3719
3720
3721 return nil, false, now, pollUntil, ranTimer
3722 }
3723
3724
3725
3726
3727
3728
3729 func checkRunqsNoP(allpSnapshot []*p, idlepMaskSnapshot pMask) *p {
3730 for id, p2 := range allpSnapshot {
3731 if !idlepMaskSnapshot.read(uint32(id)) && !runqempty(p2) {
3732 lock(&sched.lock)
3733 pp, _ := pidlegetSpinning(0)
3734 if pp == nil {
3735
3736 unlock(&sched.lock)
3737 return nil
3738 }
3739 unlock(&sched.lock)
3740 return pp
3741 }
3742 }
3743
3744
3745 return nil
3746 }
3747
3748
3749
3750
3751 func checkTimersNoP(allpSnapshot []*p, timerpMaskSnapshot pMask, pollUntil int64) int64 {
3752 for id, p2 := range allpSnapshot {
3753 if timerpMaskSnapshot.read(uint32(id)) {
3754 w := p2.timers.wakeTime()
3755 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3756 pollUntil = w
3757 }
3758 }
3759 }
3760
3761 return pollUntil
3762 }
3763
3764
3765
3766
3767
3768 func checkIdleGCNoP() (*p, *g) {
3769
3770
3771
3772
3773
3774
3775 if atomic.Load(&gcBlackenEnabled) == 0 || !gcController.needIdleMarkWorker() {
3776 return nil, nil
3777 }
3778 if !gcMarkWorkAvailable(nil) {
3779 return nil, nil
3780 }
3781
3782
3783
3784
3785
3786
3787
3788
3789
3790
3791
3792
3793
3794
3795
3796
3797
3798
3799 lock(&sched.lock)
3800 pp, now := pidlegetSpinning(0)
3801 if pp == nil {
3802 unlock(&sched.lock)
3803 return nil, nil
3804 }
3805
3806
3807 if gcBlackenEnabled == 0 || !gcController.addIdleMarkWorker() {
3808 pidleput(pp, now)
3809 unlock(&sched.lock)
3810 return nil, nil
3811 }
3812
3813 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3814 if node == nil {
3815 pidleput(pp, now)
3816 unlock(&sched.lock)
3817 gcController.removeIdleMarkWorker()
3818 return nil, nil
3819 }
3820
3821 unlock(&sched.lock)
3822
3823 return pp, node.gp.ptr()
3824 }
3825
3826
3827
3828
3829 func wakeNetPoller(when int64) {
3830 if sched.lastpoll.Load() == 0 {
3831
3832
3833
3834
3835 pollerPollUntil := sched.pollUntil.Load()
3836 if pollerPollUntil == 0 || pollerPollUntil > when {
3837 netpollBreak()
3838 }
3839 } else {
3840
3841
3842 if GOOS != "plan9" {
3843 wakep()
3844 }
3845 }
3846 }
3847
3848 func resetspinning() {
3849 gp := getg()
3850 if !gp.m.spinning {
3851 throw("resetspinning: not a spinning m")
3852 }
3853 gp.m.spinning = false
3854 nmspinning := sched.nmspinning.Add(-1)
3855 if nmspinning < 0 {
3856 throw("findrunnable: negative nmspinning")
3857 }
3858
3859
3860
3861 wakep()
3862 }
3863
3864
3865
3866
3867
3868
3869
3870
3871
3872 func injectglist(glist *gList) {
3873 if glist.empty() {
3874 return
3875 }
3876
3877
3878
3879 head := glist.head.ptr()
3880 var tail *g
3881 qsize := 0
3882 trace := traceAcquire()
3883 for gp := head; gp != nil; gp = gp.schedlink.ptr() {
3884 tail = gp
3885 qsize++
3886 casgstatus(gp, _Gwaiting, _Grunnable)
3887 if trace.ok() {
3888 trace.GoUnpark(gp, 0)
3889 }
3890 }
3891 if trace.ok() {
3892 traceRelease(trace)
3893 }
3894
3895
3896 var q gQueue
3897 q.head.set(head)
3898 q.tail.set(tail)
3899 *glist = gList{}
3900
3901 startIdle := func(n int) {
3902 for i := 0; i < n; i++ {
3903 mp := acquirem()
3904 lock(&sched.lock)
3905
3906 pp, _ := pidlegetSpinning(0)
3907 if pp == nil {
3908 unlock(&sched.lock)
3909 releasem(mp)
3910 break
3911 }
3912
3913 startm(pp, false, true)
3914 unlock(&sched.lock)
3915 releasem(mp)
3916 }
3917 }
3918
3919 pp := getg().m.p.ptr()
3920 if pp == nil {
3921 lock(&sched.lock)
3922 globrunqputbatch(&q, int32(qsize))
3923 unlock(&sched.lock)
3924 startIdle(qsize)
3925 return
3926 }
3927
3928 npidle := int(sched.npidle.Load())
3929 var (
3930 globq gQueue
3931 n int
3932 )
3933 for n = 0; n < npidle && !q.empty(); n++ {
3934 g := q.pop()
3935 globq.pushBack(g)
3936 }
3937 if n > 0 {
3938 lock(&sched.lock)
3939 globrunqputbatch(&globq, int32(n))
3940 unlock(&sched.lock)
3941 startIdle(n)
3942 qsize -= n
3943 }
3944
3945 if !q.empty() {
3946 runqputbatch(pp, &q, qsize)
3947 }
3948
3949
3950
3951
3952
3953
3954
3955
3956
3957
3958
3959
3960
3961
3962 wakep()
3963 }
3964
3965
3966
3967 func schedule() {
3968 mp := getg().m
3969
3970 if mp.locks != 0 {
3971 throw("schedule: holding locks")
3972 }
3973
3974 if mp.lockedg != 0 {
3975 stoplockedm()
3976 execute(mp.lockedg.ptr(), false)
3977 }
3978
3979
3980
3981 if mp.incgo {
3982 throw("schedule: in cgo")
3983 }
3984
3985 top:
3986 pp := mp.p.ptr()
3987 pp.preempt = false
3988
3989
3990
3991
3992 if mp.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) {
3993 throw("schedule: spinning with local work")
3994 }
3995
3996 gp, inheritTime, tryWakeP := findRunnable()
3997
3998 if debug.dontfreezetheworld > 0 && freezing.Load() {
3999
4000
4001
4002
4003
4004
4005
4006 lock(&deadlock)
4007 lock(&deadlock)
4008 }
4009
4010
4011
4012
4013 if mp.spinning {
4014 resetspinning()
4015 }
4016
4017 if sched.disable.user && !schedEnabled(gp) {
4018
4019
4020
4021 lock(&sched.lock)
4022 if schedEnabled(gp) {
4023
4024
4025 unlock(&sched.lock)
4026 } else {
4027 sched.disable.runnable.pushBack(gp)
4028 sched.disable.n++
4029 unlock(&sched.lock)
4030 goto top
4031 }
4032 }
4033
4034
4035
4036 if tryWakeP {
4037 wakep()
4038 }
4039 if gp.lockedm != 0 {
4040
4041
4042 startlockedm(gp)
4043 goto top
4044 }
4045
4046 execute(gp, inheritTime)
4047 }
4048
4049
4050
4051
4052
4053
4054
4055
4056 func dropg() {
4057 gp := getg()
4058
4059 setMNoWB(&gp.m.curg.m, nil)
4060 setGNoWB(&gp.m.curg, nil)
4061 }
4062
4063 func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
4064 unlock((*mutex)(lock))
4065 return true
4066 }
4067
4068
4069 func park_m(gp *g) {
4070 mp := getg().m
4071
4072 trace := traceAcquire()
4073
4074 if trace.ok() {
4075
4076
4077
4078 trace.GoPark(mp.waitTraceBlockReason, mp.waitTraceSkip)
4079 }
4080
4081
4082 casgstatus(gp, _Grunning, _Gwaiting)
4083 if trace.ok() {
4084 traceRelease(trace)
4085 }
4086
4087 dropg()
4088
4089 if fn := mp.waitunlockf; fn != nil {
4090 ok := fn(gp, mp.waitlock)
4091 mp.waitunlockf = nil
4092 mp.waitlock = nil
4093 if !ok {
4094 trace := traceAcquire()
4095 casgstatus(gp, _Gwaiting, _Grunnable)
4096 if trace.ok() {
4097 trace.GoUnpark(gp, 2)
4098 traceRelease(trace)
4099 }
4100 execute(gp, true)
4101 }
4102 }
4103 schedule()
4104 }
4105
4106 func goschedImpl(gp *g, preempted bool) {
4107 trace := traceAcquire()
4108 status := readgstatus(gp)
4109 if status&^_Gscan != _Grunning {
4110 dumpgstatus(gp)
4111 throw("bad g status")
4112 }
4113 if trace.ok() {
4114
4115
4116
4117 if preempted {
4118 trace.GoPreempt()
4119 } else {
4120 trace.GoSched()
4121 }
4122 }
4123 casgstatus(gp, _Grunning, _Grunnable)
4124 if trace.ok() {
4125 traceRelease(trace)
4126 }
4127
4128 dropg()
4129 lock(&sched.lock)
4130 globrunqput(gp)
4131 unlock(&sched.lock)
4132
4133 if mainStarted {
4134 wakep()
4135 }
4136
4137 schedule()
4138 }
4139
4140
4141 func gosched_m(gp *g) {
4142 goschedImpl(gp, false)
4143 }
4144
4145
4146 func goschedguarded_m(gp *g) {
4147 if !canPreemptM(gp.m) {
4148 gogo(&gp.sched)
4149 }
4150 goschedImpl(gp, false)
4151 }
4152
4153 func gopreempt_m(gp *g) {
4154 goschedImpl(gp, true)
4155 }
4156
4157
4158
4159
4160 func preemptPark(gp *g) {
4161 status := readgstatus(gp)
4162 if status&^_Gscan != _Grunning {
4163 dumpgstatus(gp)
4164 throw("bad g status")
4165 }
4166
4167 if gp.asyncSafePoint {
4168
4169
4170
4171 f := findfunc(gp.sched.pc)
4172 if !f.valid() {
4173 throw("preempt at unknown pc")
4174 }
4175 if f.flag&abi.FuncFlagSPWrite != 0 {
4176 println("runtime: unexpected SPWRITE function", funcname(f), "in async preempt")
4177 throw("preempt SPWRITE")
4178 }
4179 }
4180
4181
4182
4183
4184
4185
4186
4187 casGToPreemptScan(gp, _Grunning, _Gscan|_Gpreempted)
4188 dropg()
4189
4190
4191
4192
4193
4194
4195
4196
4197
4198
4199
4200
4201
4202
4203
4204
4205 trace := traceAcquire()
4206 if trace.ok() {
4207 trace.GoPark(traceBlockPreempted, 0)
4208 }
4209 casfrom_Gscanstatus(gp, _Gscan|_Gpreempted, _Gpreempted)
4210 if trace.ok() {
4211 traceRelease(trace)
4212 }
4213 schedule()
4214 }
4215
4216
4217
4218
4219
4220
4221
4222
4223
4224
4225
4226
4227
4228
4229
4230 func goyield() {
4231 checkTimeouts()
4232 mcall(goyield_m)
4233 }
4234
4235 func goyield_m(gp *g) {
4236 trace := traceAcquire()
4237 pp := gp.m.p.ptr()
4238 if trace.ok() {
4239
4240
4241
4242 trace.GoPreempt()
4243 }
4244 casgstatus(gp, _Grunning, _Grunnable)
4245 if trace.ok() {
4246 traceRelease(trace)
4247 }
4248 dropg()
4249 runqput(pp, gp, false)
4250 schedule()
4251 }
4252
4253
4254 func goexit1() {
4255 if raceenabled {
4256 racegoend()
4257 }
4258 trace := traceAcquire()
4259 if trace.ok() {
4260 trace.GoEnd()
4261 traceRelease(trace)
4262 }
4263 mcall(goexit0)
4264 }
4265
4266
4267 func goexit0(gp *g) {
4268 gdestroy(gp)
4269 schedule()
4270 }
4271
4272 func gdestroy(gp *g) {
4273 mp := getg().m
4274 pp := mp.p.ptr()
4275
4276 casgstatus(gp, _Grunning, _Gdead)
4277 gcController.addScannableStack(pp, -int64(gp.stack.hi-gp.stack.lo))
4278 if isSystemGoroutine(gp, false) {
4279 sched.ngsys.Add(-1)
4280 }
4281 gp.m = nil
4282 locked := gp.lockedm != 0
4283 gp.lockedm = 0
4284 mp.lockedg = 0
4285 gp.preemptStop = false
4286 gp.paniconfault = false
4287 gp._defer = nil
4288 gp._panic = nil
4289 gp.writebuf = nil
4290 gp.waitreason = waitReasonZero
4291 gp.param = nil
4292 gp.labels = nil
4293 gp.timer = nil
4294
4295 if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 {
4296
4297
4298
4299 assistWorkPerByte := gcController.assistWorkPerByte.Load()
4300 scanCredit := int64(assistWorkPerByte * float64(gp.gcAssistBytes))
4301 gcController.bgScanCredit.Add(scanCredit)
4302 gp.gcAssistBytes = 0
4303 }
4304
4305 dropg()
4306
4307 if GOARCH == "wasm" {
4308 gfput(pp, gp)
4309 return
4310 }
4311
4312 if locked && mp.lockedInt != 0 {
4313 print("runtime: mp.lockedInt = ", mp.lockedInt, "\n")
4314 throw("exited a goroutine internally locked to the OS thread")
4315 }
4316 gfput(pp, gp)
4317 if locked {
4318
4319
4320
4321
4322
4323
4324 if GOOS != "plan9" {
4325 gogo(&mp.g0.sched)
4326 } else {
4327
4328
4329 mp.lockedExt = 0
4330 }
4331 }
4332 }
4333
4334
4335
4336
4337
4338
4339
4340
4341
4342 func save(pc, sp, bp uintptr) {
4343 gp := getg()
4344
4345 if gp == gp.m.g0 || gp == gp.m.gsignal {
4346
4347
4348
4349
4350
4351 throw("save on system g not allowed")
4352 }
4353
4354 gp.sched.pc = pc
4355 gp.sched.sp = sp
4356 gp.sched.lr = 0
4357 gp.sched.ret = 0
4358 gp.sched.bp = bp
4359
4360
4361
4362 if gp.sched.ctxt != nil {
4363 badctxt()
4364 }
4365 }
4366
4367
4368
4369
4370
4371
4372
4373
4374
4375
4376
4377
4378
4379
4380
4381
4382
4383
4384
4385
4386
4387
4388
4389
4390
4391 func reentersyscall(pc, sp, bp uintptr) {
4392 trace := traceAcquire()
4393 gp := getg()
4394
4395
4396
4397 gp.m.locks++
4398
4399
4400
4401
4402
4403 gp.stackguard0 = stackPreempt
4404 gp.throwsplit = true
4405
4406
4407 save(pc, sp, bp)
4408 gp.syscallsp = sp
4409 gp.syscallpc = pc
4410 gp.syscallbp = bp
4411 casgstatus(gp, _Grunning, _Gsyscall)
4412 if staticLockRanking {
4413
4414
4415 save(pc, sp, bp)
4416 }
4417 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4418 systemstack(func() {
4419 print("entersyscall inconsistent sp ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4420 throw("entersyscall")
4421 })
4422 }
4423 if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
4424 systemstack(func() {
4425 print("entersyscall inconsistent bp ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4426 throw("entersyscall")
4427 })
4428 }
4429
4430 if trace.ok() {
4431 systemstack(func() {
4432 trace.GoSysCall()
4433 traceRelease(trace)
4434 })
4435
4436
4437
4438 save(pc, sp, bp)
4439 }
4440
4441 if sched.sysmonwait.Load() {
4442 systemstack(entersyscall_sysmon)
4443 save(pc, sp, bp)
4444 }
4445
4446 if gp.m.p.ptr().runSafePointFn != 0 {
4447
4448 systemstack(runSafePointFn)
4449 save(pc, sp, bp)
4450 }
4451
4452 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4453 pp := gp.m.p.ptr()
4454 pp.m = 0
4455 gp.m.oldp.set(pp)
4456 gp.m.p = 0
4457 atomic.Store(&pp.status, _Psyscall)
4458 if sched.gcwaiting.Load() {
4459 systemstack(entersyscall_gcwait)
4460 save(pc, sp, bp)
4461 }
4462
4463 gp.m.locks--
4464 }
4465
4466
4467
4468
4469
4470
4471
4472
4473
4474
4475
4476
4477
4478
4479
4480 func entersyscall() {
4481
4482
4483
4484
4485 fp := getcallerfp()
4486 reentersyscall(getcallerpc(), getcallersp(), fp)
4487 }
4488
4489 func entersyscall_sysmon() {
4490 lock(&sched.lock)
4491 if sched.sysmonwait.Load() {
4492 sched.sysmonwait.Store(false)
4493 notewakeup(&sched.sysmonnote)
4494 }
4495 unlock(&sched.lock)
4496 }
4497
4498 func entersyscall_gcwait() {
4499 gp := getg()
4500 pp := gp.m.oldp.ptr()
4501
4502 lock(&sched.lock)
4503 trace := traceAcquire()
4504 if sched.stopwait > 0 && atomic.Cas(&pp.status, _Psyscall, _Pgcstop) {
4505 if trace.ok() {
4506
4507
4508
4509
4510
4511
4512
4513
4514
4515 trace.ProcSteal(pp, true)
4516 traceRelease(trace)
4517 }
4518 pp.gcStopTime = nanotime()
4519 pp.syscalltick++
4520 if sched.stopwait--; sched.stopwait == 0 {
4521 notewakeup(&sched.stopnote)
4522 }
4523 } else if trace.ok() {
4524 traceRelease(trace)
4525 }
4526 unlock(&sched.lock)
4527 }
4528
4529
4530
4531
4532
4533
4534
4535
4536
4537
4538
4539
4540
4541 func entersyscallblock() {
4542 gp := getg()
4543
4544 gp.m.locks++
4545 gp.throwsplit = true
4546 gp.stackguard0 = stackPreempt
4547 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4548 gp.m.p.ptr().syscalltick++
4549
4550
4551 pc := getcallerpc()
4552 sp := getcallersp()
4553 bp := getcallerfp()
4554 save(pc, sp, bp)
4555 gp.syscallsp = gp.sched.sp
4556 gp.syscallpc = gp.sched.pc
4557 gp.syscallbp = gp.sched.bp
4558 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4559 sp1 := sp
4560 sp2 := gp.sched.sp
4561 sp3 := gp.syscallsp
4562 systemstack(func() {
4563 print("entersyscallblock inconsistent sp ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4564 throw("entersyscallblock")
4565 })
4566 }
4567 casgstatus(gp, _Grunning, _Gsyscall)
4568 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4569 systemstack(func() {
4570 print("entersyscallblock inconsistent sp ", hex(sp), " ", hex(gp.sched.sp), " ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4571 throw("entersyscallblock")
4572 })
4573 }
4574 if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
4575 systemstack(func() {
4576 print("entersyscallblock inconsistent bp ", hex(bp), " ", hex(gp.sched.bp), " ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4577 throw("entersyscallblock")
4578 })
4579 }
4580
4581 systemstack(entersyscallblock_handoff)
4582
4583
4584 save(getcallerpc(), getcallersp(), getcallerfp())
4585
4586 gp.m.locks--
4587 }
4588
4589 func entersyscallblock_handoff() {
4590 trace := traceAcquire()
4591 if trace.ok() {
4592 trace.GoSysCall()
4593 traceRelease(trace)
4594 }
4595 handoffp(releasep())
4596 }
4597
4598
4599
4600
4601
4602
4603
4604
4605
4606
4607
4608
4609
4610
4611
4612
4613
4614
4615
4616
4617
4618 func exitsyscall() {
4619 gp := getg()
4620
4621 gp.m.locks++
4622 if getcallersp() > gp.syscallsp {
4623 throw("exitsyscall: syscall frame is no longer valid")
4624 }
4625
4626 gp.waitsince = 0
4627 oldp := gp.m.oldp.ptr()
4628 gp.m.oldp = 0
4629 if exitsyscallfast(oldp) {
4630
4631
4632 if goroutineProfile.active {
4633
4634
4635
4636 systemstack(func() {
4637 tryRecordGoroutineProfileWB(gp)
4638 })
4639 }
4640 trace := traceAcquire()
4641 if trace.ok() {
4642 lostP := oldp != gp.m.p.ptr() || gp.m.syscalltick != gp.m.p.ptr().syscalltick
4643 systemstack(func() {
4644
4645
4646
4647
4648 trace.GoSysExit(lostP)
4649 if lostP {
4650
4651
4652
4653
4654 trace.GoStart()
4655 }
4656 })
4657 }
4658
4659 gp.m.p.ptr().syscalltick++
4660
4661 casgstatus(gp, _Gsyscall, _Grunning)
4662 if trace.ok() {
4663 traceRelease(trace)
4664 }
4665
4666
4667
4668 gp.syscallsp = 0
4669 gp.m.locks--
4670 if gp.preempt {
4671
4672 gp.stackguard0 = stackPreempt
4673 } else {
4674
4675 gp.stackguard0 = gp.stack.lo + stackGuard
4676 }
4677 gp.throwsplit = false
4678
4679 if sched.disable.user && !schedEnabled(gp) {
4680
4681 Gosched()
4682 }
4683
4684 return
4685 }
4686
4687 gp.m.locks--
4688
4689
4690 mcall(exitsyscall0)
4691
4692
4693
4694
4695
4696
4697
4698 gp.syscallsp = 0
4699 gp.m.p.ptr().syscalltick++
4700 gp.throwsplit = false
4701 }
4702
4703
4704 func exitsyscallfast(oldp *p) bool {
4705
4706 if sched.stopwait == freezeStopWait {
4707 return false
4708 }
4709
4710
4711 trace := traceAcquire()
4712 if oldp != nil && oldp.status == _Psyscall && atomic.Cas(&oldp.status, _Psyscall, _Pidle) {
4713
4714 wirep(oldp)
4715 exitsyscallfast_reacquired(trace)
4716 if trace.ok() {
4717 traceRelease(trace)
4718 }
4719 return true
4720 }
4721 if trace.ok() {
4722 traceRelease(trace)
4723 }
4724
4725
4726 if sched.pidle != 0 {
4727 var ok bool
4728 systemstack(func() {
4729 ok = exitsyscallfast_pidle()
4730 })
4731 if ok {
4732 return true
4733 }
4734 }
4735 return false
4736 }
4737
4738
4739
4740
4741
4742
4743 func exitsyscallfast_reacquired(trace traceLocker) {
4744 gp := getg()
4745 if gp.m.syscalltick != gp.m.p.ptr().syscalltick {
4746 if trace.ok() {
4747
4748
4749
4750 systemstack(func() {
4751
4752
4753 trace.ProcSteal(gp.m.p.ptr(), true)
4754 trace.ProcStart()
4755 })
4756 }
4757 gp.m.p.ptr().syscalltick++
4758 }
4759 }
4760
4761 func exitsyscallfast_pidle() bool {
4762 lock(&sched.lock)
4763 pp, _ := pidleget(0)
4764 if pp != nil && sched.sysmonwait.Load() {
4765 sched.sysmonwait.Store(false)
4766 notewakeup(&sched.sysmonnote)
4767 }
4768 unlock(&sched.lock)
4769 if pp != nil {
4770 acquirep(pp)
4771 return true
4772 }
4773 return false
4774 }
4775
4776
4777
4778
4779
4780
4781
4782 func exitsyscall0(gp *g) {
4783 var trace traceLocker
4784 traceExitingSyscall()
4785 trace = traceAcquire()
4786 casgstatus(gp, _Gsyscall, _Grunnable)
4787 traceExitedSyscall()
4788 if trace.ok() {
4789
4790
4791
4792
4793 trace.GoSysExit(true)
4794 traceRelease(trace)
4795 }
4796 dropg()
4797 lock(&sched.lock)
4798 var pp *p
4799 if schedEnabled(gp) {
4800 pp, _ = pidleget(0)
4801 }
4802 var locked bool
4803 if pp == nil {
4804 globrunqput(gp)
4805
4806
4807
4808
4809
4810
4811 locked = gp.lockedm != 0
4812 } else if sched.sysmonwait.Load() {
4813 sched.sysmonwait.Store(false)
4814 notewakeup(&sched.sysmonnote)
4815 }
4816 unlock(&sched.lock)
4817 if pp != nil {
4818 acquirep(pp)
4819 execute(gp, false)
4820 }
4821 if locked {
4822
4823
4824
4825
4826 stoplockedm()
4827 execute(gp, false)
4828 }
4829 stopm()
4830 schedule()
4831 }
4832
4833
4834
4835
4836
4837
4838
4839
4840
4841
4842
4843
4844
4845
4846 func syscall_runtime_BeforeFork() {
4847 gp := getg().m.curg
4848
4849
4850
4851
4852 gp.m.locks++
4853 sigsave(&gp.m.sigmask)
4854 sigblock(false)
4855
4856
4857
4858
4859
4860 gp.stackguard0 = stackFork
4861 }
4862
4863
4864
4865
4866
4867
4868
4869
4870
4871
4872
4873
4874
4875
4876 func syscall_runtime_AfterFork() {
4877 gp := getg().m.curg
4878
4879
4880 gp.stackguard0 = gp.stack.lo + stackGuard
4881
4882 msigrestore(gp.m.sigmask)
4883
4884 gp.m.locks--
4885 }
4886
4887
4888
4889 var inForkedChild bool
4890
4891
4892
4893
4894
4895
4896
4897
4898
4899
4900
4901
4902
4903
4904
4905
4906
4907
4908
4909
4910
4911 func syscall_runtime_AfterForkInChild() {
4912
4913
4914
4915
4916 inForkedChild = true
4917
4918 clearSignalHandlers()
4919
4920
4921
4922 msigrestore(getg().m.sigmask)
4923
4924 inForkedChild = false
4925 }
4926
4927
4928
4929
4930 var pendingPreemptSignals atomic.Int32
4931
4932
4933
4934
4935 func syscall_runtime_BeforeExec() {
4936
4937 execLock.lock()
4938
4939
4940
4941 if GOOS == "darwin" || GOOS == "ios" {
4942 for pendingPreemptSignals.Load() > 0 {
4943 osyield()
4944 }
4945 }
4946 }
4947
4948
4949
4950
4951 func syscall_runtime_AfterExec() {
4952 execLock.unlock()
4953 }
4954
4955
4956 func malg(stacksize int32) *g {
4957 newg := new(g)
4958 if stacksize >= 0 {
4959 stacksize = round2(stackSystem + stacksize)
4960 systemstack(func() {
4961 newg.stack = stackalloc(uint32(stacksize))
4962 })
4963 newg.stackguard0 = newg.stack.lo + stackGuard
4964 newg.stackguard1 = ^uintptr(0)
4965
4966
4967 *(*uintptr)(unsafe.Pointer(newg.stack.lo)) = 0
4968 }
4969 return newg
4970 }
4971
4972
4973
4974
4975 func newproc(fn *funcval) {
4976 gp := getg()
4977 pc := getcallerpc()
4978 systemstack(func() {
4979 newg := newproc1(fn, gp, pc, false, waitReasonZero)
4980
4981 pp := getg().m.p.ptr()
4982 runqput(pp, newg, true)
4983
4984 if mainStarted {
4985 wakep()
4986 }
4987 })
4988 }
4989
4990
4991
4992
4993 func newproc1(fn *funcval, callergp *g, callerpc uintptr, parked bool, waitreason waitReason) *g {
4994 if fn == nil {
4995 fatal("go of nil func value")
4996 }
4997
4998 mp := acquirem()
4999 pp := mp.p.ptr()
5000 newg := gfget(pp)
5001 if newg == nil {
5002 newg = malg(stackMin)
5003 casgstatus(newg, _Gidle, _Gdead)
5004 allgadd(newg)
5005 }
5006 if newg.stack.hi == 0 {
5007 throw("newproc1: newg missing stack")
5008 }
5009
5010 if readgstatus(newg) != _Gdead {
5011 throw("newproc1: new g is not Gdead")
5012 }
5013
5014 totalSize := uintptr(4*goarch.PtrSize + sys.MinFrameSize)
5015 totalSize = alignUp(totalSize, sys.StackAlign)
5016 sp := newg.stack.hi - totalSize
5017 if usesLR {
5018
5019 *(*uintptr)(unsafe.Pointer(sp)) = 0
5020 prepGoExitFrame(sp)
5021 }
5022 if GOARCH == "arm64" {
5023
5024 *(*uintptr)(unsafe.Pointer(sp - goarch.PtrSize)) = 0
5025 }
5026
5027 memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
5028 newg.sched.sp = sp
5029 newg.stktopsp = sp
5030 newg.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
5031 newg.sched.g = guintptr(unsafe.Pointer(newg))
5032 gostartcallfn(&newg.sched, fn)
5033 newg.parentGoid = callergp.goid
5034 newg.gopc = callerpc
5035 newg.ancestors = saveAncestors(callergp)
5036 newg.startpc = fn.fn
5037 if isSystemGoroutine(newg, false) {
5038 sched.ngsys.Add(1)
5039 } else {
5040
5041 if mp.curg != nil {
5042 newg.labels = mp.curg.labels
5043 }
5044 if goroutineProfile.active {
5045
5046
5047
5048
5049
5050 newg.goroutineProfiled.Store(goroutineProfileSatisfied)
5051 }
5052 }
5053
5054 newg.trackingSeq = uint8(cheaprand())
5055 if newg.trackingSeq%gTrackingPeriod == 0 {
5056 newg.tracking = true
5057 }
5058 gcController.addScannableStack(pp, int64(newg.stack.hi-newg.stack.lo))
5059
5060
5061 trace := traceAcquire()
5062 var status uint32 = _Grunnable
5063 if parked {
5064 status = _Gwaiting
5065 newg.waitreason = waitreason
5066 }
5067 casgstatus(newg, _Gdead, status)
5068 if pp.goidcache == pp.goidcacheend {
5069
5070
5071
5072 pp.goidcache = sched.goidgen.Add(_GoidCacheBatch)
5073 pp.goidcache -= _GoidCacheBatch - 1
5074 pp.goidcacheend = pp.goidcache + _GoidCacheBatch
5075 }
5076 newg.goid = pp.goidcache
5077 pp.goidcache++
5078 newg.trace.reset()
5079 if trace.ok() {
5080 trace.GoCreate(newg, newg.startpc, parked)
5081 traceRelease(trace)
5082 }
5083
5084
5085 if raceenabled {
5086 newg.racectx = racegostart(callerpc)
5087 newg.raceignore = 0
5088 if newg.labels != nil {
5089
5090
5091 racereleasemergeg(newg, unsafe.Pointer(&labelSync))
5092 }
5093 }
5094 releasem(mp)
5095
5096 return newg
5097 }
5098
5099
5100
5101
5102 func saveAncestors(callergp *g) *[]ancestorInfo {
5103
5104 if debug.tracebackancestors <= 0 || callergp.goid == 0 {
5105 return nil
5106 }
5107 var callerAncestors []ancestorInfo
5108 if callergp.ancestors != nil {
5109 callerAncestors = *callergp.ancestors
5110 }
5111 n := int32(len(callerAncestors)) + 1
5112 if n > debug.tracebackancestors {
5113 n = debug.tracebackancestors
5114 }
5115 ancestors := make([]ancestorInfo, n)
5116 copy(ancestors[1:], callerAncestors)
5117
5118 var pcs [tracebackInnerFrames]uintptr
5119 npcs := gcallers(callergp, 0, pcs[:])
5120 ipcs := make([]uintptr, npcs)
5121 copy(ipcs, pcs[:])
5122 ancestors[0] = ancestorInfo{
5123 pcs: ipcs,
5124 goid: callergp.goid,
5125 gopc: callergp.gopc,
5126 }
5127
5128 ancestorsp := new([]ancestorInfo)
5129 *ancestorsp = ancestors
5130 return ancestorsp
5131 }
5132
5133
5134
5135 func gfput(pp *p, gp *g) {
5136 if readgstatus(gp) != _Gdead {
5137 throw("gfput: bad status (not Gdead)")
5138 }
5139
5140 stksize := gp.stack.hi - gp.stack.lo
5141
5142 if stksize != uintptr(startingStackSize) {
5143
5144 stackfree(gp.stack)
5145 gp.stack.lo = 0
5146 gp.stack.hi = 0
5147 gp.stackguard0 = 0
5148 }
5149
5150 pp.gFree.push(gp)
5151 pp.gFree.n++
5152 if pp.gFree.n >= 64 {
5153 var (
5154 inc int32
5155 stackQ gQueue
5156 noStackQ gQueue
5157 )
5158 for pp.gFree.n >= 32 {
5159 gp := pp.gFree.pop()
5160 pp.gFree.n--
5161 if gp.stack.lo == 0 {
5162 noStackQ.push(gp)
5163 } else {
5164 stackQ.push(gp)
5165 }
5166 inc++
5167 }
5168 lock(&sched.gFree.lock)
5169 sched.gFree.noStack.pushAll(noStackQ)
5170 sched.gFree.stack.pushAll(stackQ)
5171 sched.gFree.n += inc
5172 unlock(&sched.gFree.lock)
5173 }
5174 }
5175
5176
5177
5178 func gfget(pp *p) *g {
5179 retry:
5180 if pp.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) {
5181 lock(&sched.gFree.lock)
5182
5183 for pp.gFree.n < 32 {
5184
5185 gp := sched.gFree.stack.pop()
5186 if gp == nil {
5187 gp = sched.gFree.noStack.pop()
5188 if gp == nil {
5189 break
5190 }
5191 }
5192 sched.gFree.n--
5193 pp.gFree.push(gp)
5194 pp.gFree.n++
5195 }
5196 unlock(&sched.gFree.lock)
5197 goto retry
5198 }
5199 gp := pp.gFree.pop()
5200 if gp == nil {
5201 return nil
5202 }
5203 pp.gFree.n--
5204 if gp.stack.lo != 0 && gp.stack.hi-gp.stack.lo != uintptr(startingStackSize) {
5205
5206
5207
5208 systemstack(func() {
5209 stackfree(gp.stack)
5210 gp.stack.lo = 0
5211 gp.stack.hi = 0
5212 gp.stackguard0 = 0
5213 })
5214 }
5215 if gp.stack.lo == 0 {
5216
5217 systemstack(func() {
5218 gp.stack = stackalloc(startingStackSize)
5219 })
5220 gp.stackguard0 = gp.stack.lo + stackGuard
5221 } else {
5222 if raceenabled {
5223 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5224 }
5225 if msanenabled {
5226 msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5227 }
5228 if asanenabled {
5229 asanunpoison(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5230 }
5231 }
5232 return gp
5233 }
5234
5235
5236 func gfpurge(pp *p) {
5237 var (
5238 inc int32
5239 stackQ gQueue
5240 noStackQ gQueue
5241 )
5242 for !pp.gFree.empty() {
5243 gp := pp.gFree.pop()
5244 pp.gFree.n--
5245 if gp.stack.lo == 0 {
5246 noStackQ.push(gp)
5247 } else {
5248 stackQ.push(gp)
5249 }
5250 inc++
5251 }
5252 lock(&sched.gFree.lock)
5253 sched.gFree.noStack.pushAll(noStackQ)
5254 sched.gFree.stack.pushAll(stackQ)
5255 sched.gFree.n += inc
5256 unlock(&sched.gFree.lock)
5257 }
5258
5259
5260 func Breakpoint() {
5261 breakpoint()
5262 }
5263
5264
5265
5266
5267
5268
5269 func dolockOSThread() {
5270 if GOARCH == "wasm" {
5271 return
5272 }
5273 gp := getg()
5274 gp.m.lockedg.set(gp)
5275 gp.lockedm.set(gp.m)
5276 }
5277
5278
5279
5280
5281
5282
5283
5284
5285
5286
5287
5288
5289
5290
5291
5292
5293
5294 func LockOSThread() {
5295 if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" {
5296
5297
5298
5299 startTemplateThread()
5300 }
5301 gp := getg()
5302 gp.m.lockedExt++
5303 if gp.m.lockedExt == 0 {
5304 gp.m.lockedExt--
5305 panic("LockOSThread nesting overflow")
5306 }
5307 dolockOSThread()
5308 }
5309
5310
5311 func lockOSThread() {
5312 getg().m.lockedInt++
5313 dolockOSThread()
5314 }
5315
5316
5317
5318
5319
5320
5321 func dounlockOSThread() {
5322 if GOARCH == "wasm" {
5323 return
5324 }
5325 gp := getg()
5326 if gp.m.lockedInt != 0 || gp.m.lockedExt != 0 {
5327 return
5328 }
5329 gp.m.lockedg = 0
5330 gp.lockedm = 0
5331 }
5332
5333
5334
5335
5336
5337
5338
5339
5340
5341
5342
5343
5344
5345
5346
5347 func UnlockOSThread() {
5348 gp := getg()
5349 if gp.m.lockedExt == 0 {
5350 return
5351 }
5352 gp.m.lockedExt--
5353 dounlockOSThread()
5354 }
5355
5356
5357 func unlockOSThread() {
5358 gp := getg()
5359 if gp.m.lockedInt == 0 {
5360 systemstack(badunlockosthread)
5361 }
5362 gp.m.lockedInt--
5363 dounlockOSThread()
5364 }
5365
5366 func badunlockosthread() {
5367 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
5368 }
5369
5370 func gcount() int32 {
5371 n := int32(atomic.Loaduintptr(&allglen)) - sched.gFree.n - sched.ngsys.Load()
5372 for _, pp := range allp {
5373 n -= pp.gFree.n
5374 }
5375
5376
5377
5378 if n < 1 {
5379 n = 1
5380 }
5381 return n
5382 }
5383
5384 func mcount() int32 {
5385 return int32(sched.mnext - sched.nmfreed)
5386 }
5387
5388 var prof struct {
5389 signalLock atomic.Uint32
5390
5391
5392
5393 hz atomic.Int32
5394 }
5395
5396 func _System() { _System() }
5397 func _ExternalCode() { _ExternalCode() }
5398 func _LostExternalCode() { _LostExternalCode() }
5399 func _GC() { _GC() }
5400 func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
5401 func _LostContendedRuntimeLock() { _LostContendedRuntimeLock() }
5402 func _VDSO() { _VDSO() }
5403
5404
5405
5406
5407
5408 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
5409 if prof.hz.Load() == 0 {
5410 return
5411 }
5412
5413
5414
5415
5416 if mp != nil && mp.profilehz == 0 {
5417 return
5418 }
5419
5420
5421
5422
5423
5424
5425
5426 if GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm" {
5427 if f := findfunc(pc); f.valid() {
5428 if stringslite.HasPrefix(funcname(f), "internal/runtime/atomic") {
5429 cpuprof.lostAtomic++
5430 return
5431 }
5432 }
5433 if GOARCH == "arm" && goarm < 7 && GOOS == "linux" && pc&0xffff0000 == 0xffff0000 {
5434
5435
5436
5437 cpuprof.lostAtomic++
5438 return
5439 }
5440 }
5441
5442
5443
5444
5445
5446
5447
5448 getg().m.mallocing++
5449
5450 var u unwinder
5451 var stk [maxCPUProfStack]uintptr
5452 n := 0
5453 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 {
5454 cgoOff := 0
5455
5456
5457
5458
5459
5460 if mp.cgoCallersUse.Load() == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 {
5461 for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 {
5462 cgoOff++
5463 }
5464 n += copy(stk[:], mp.cgoCallers[:cgoOff])
5465 mp.cgoCallers[0] = 0
5466 }
5467
5468
5469 u.initAt(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, unwindSilentErrors)
5470 } else if usesLibcall() && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
5471
5472
5473 u.initAt(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), unwindSilentErrors)
5474 } else if mp != nil && mp.vdsoSP != 0 {
5475
5476
5477 u.initAt(mp.vdsoPC, mp.vdsoSP, 0, gp, unwindSilentErrors|unwindJumpStack)
5478 } else {
5479 u.initAt(pc, sp, lr, gp, unwindSilentErrors|unwindTrap|unwindJumpStack)
5480 }
5481 n += tracebackPCs(&u, 0, stk[n:])
5482
5483 if n <= 0 {
5484
5485
5486 n = 2
5487 if inVDSOPage(pc) {
5488 pc = abi.FuncPCABIInternal(_VDSO) + sys.PCQuantum
5489 } else if pc > firstmoduledata.etext {
5490
5491 pc = abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum
5492 }
5493 stk[0] = pc
5494 if mp.preemptoff != "" {
5495 stk[1] = abi.FuncPCABIInternal(_GC) + sys.PCQuantum
5496 } else {
5497 stk[1] = abi.FuncPCABIInternal(_System) + sys.PCQuantum
5498 }
5499 }
5500
5501 if prof.hz.Load() != 0 {
5502
5503
5504
5505 var tagPtr *unsafe.Pointer
5506 if gp != nil && gp.m != nil && gp.m.curg != nil {
5507 tagPtr = &gp.m.curg.labels
5508 }
5509 cpuprof.add(tagPtr, stk[:n])
5510
5511 gprof := gp
5512 var mp *m
5513 var pp *p
5514 if gp != nil && gp.m != nil {
5515 if gp.m.curg != nil {
5516 gprof = gp.m.curg
5517 }
5518 mp = gp.m
5519 pp = gp.m.p.ptr()
5520 }
5521 traceCPUSample(gprof, mp, pp, stk[:n])
5522 }
5523 getg().m.mallocing--
5524 }
5525
5526
5527
5528 func setcpuprofilerate(hz int32) {
5529
5530 if hz < 0 {
5531 hz = 0
5532 }
5533
5534
5535
5536 gp := getg()
5537 gp.m.locks++
5538
5539
5540
5541
5542 setThreadCPUProfiler(0)
5543
5544 for !prof.signalLock.CompareAndSwap(0, 1) {
5545 osyield()
5546 }
5547 if prof.hz.Load() != hz {
5548 setProcessCPUProfiler(hz)
5549 prof.hz.Store(hz)
5550 }
5551 prof.signalLock.Store(0)
5552
5553 lock(&sched.lock)
5554 sched.profilehz = hz
5555 unlock(&sched.lock)
5556
5557 if hz != 0 {
5558 setThreadCPUProfiler(hz)
5559 }
5560
5561 gp.m.locks--
5562 }
5563
5564
5565
5566 func (pp *p) init(id int32) {
5567 pp.id = id
5568 pp.status = _Pgcstop
5569 pp.sudogcache = pp.sudogbuf[:0]
5570 pp.deferpool = pp.deferpoolbuf[:0]
5571 pp.wbBuf.reset()
5572 if pp.mcache == nil {
5573 if id == 0 {
5574 if mcache0 == nil {
5575 throw("missing mcache?")
5576 }
5577
5578
5579 pp.mcache = mcache0
5580 } else {
5581 pp.mcache = allocmcache()
5582 }
5583 }
5584 if raceenabled && pp.raceprocctx == 0 {
5585 if id == 0 {
5586 pp.raceprocctx = raceprocctx0
5587 raceprocctx0 = 0
5588 } else {
5589 pp.raceprocctx = raceproccreate()
5590 }
5591 }
5592 lockInit(&pp.timers.mu, lockRankTimers)
5593
5594
5595
5596 timerpMask.set(id)
5597
5598
5599 idlepMask.clear(id)
5600 }
5601
5602
5603
5604
5605
5606 func (pp *p) destroy() {
5607 assertLockHeld(&sched.lock)
5608 assertWorldStopped()
5609
5610
5611 for pp.runqhead != pp.runqtail {
5612
5613 pp.runqtail--
5614 gp := pp.runq[pp.runqtail%uint32(len(pp.runq))].ptr()
5615
5616 globrunqputhead(gp)
5617 }
5618 if pp.runnext != 0 {
5619 globrunqputhead(pp.runnext.ptr())
5620 pp.runnext = 0
5621 }
5622
5623
5624 getg().m.p.ptr().timers.take(&pp.timers)
5625
5626
5627 if gcphase != _GCoff {
5628 wbBufFlush1(pp)
5629 pp.gcw.dispose()
5630 }
5631 for i := range pp.sudogbuf {
5632 pp.sudogbuf[i] = nil
5633 }
5634 pp.sudogcache = pp.sudogbuf[:0]
5635 pp.pinnerCache = nil
5636 for j := range pp.deferpoolbuf {
5637 pp.deferpoolbuf[j] = nil
5638 }
5639 pp.deferpool = pp.deferpoolbuf[:0]
5640 systemstack(func() {
5641 for i := 0; i < pp.mspancache.len; i++ {
5642
5643 mheap_.spanalloc.free(unsafe.Pointer(pp.mspancache.buf[i]))
5644 }
5645 pp.mspancache.len = 0
5646 lock(&mheap_.lock)
5647 pp.pcache.flush(&mheap_.pages)
5648 unlock(&mheap_.lock)
5649 })
5650 freemcache(pp.mcache)
5651 pp.mcache = nil
5652 gfpurge(pp)
5653 if raceenabled {
5654 if pp.timers.raceCtx != 0 {
5655
5656
5657
5658
5659
5660 mp := getg().m
5661 phold := mp.p.ptr()
5662 mp.p.set(pp)
5663
5664 racectxend(pp.timers.raceCtx)
5665 pp.timers.raceCtx = 0
5666
5667 mp.p.set(phold)
5668 }
5669 raceprocdestroy(pp.raceprocctx)
5670 pp.raceprocctx = 0
5671 }
5672 pp.gcAssistTime = 0
5673 pp.status = _Pdead
5674 }
5675
5676
5677
5678
5679
5680
5681
5682
5683
5684 func procresize(nprocs int32) *p {
5685 assertLockHeld(&sched.lock)
5686 assertWorldStopped()
5687
5688 old := gomaxprocs
5689 if old < 0 || nprocs <= 0 {
5690 throw("procresize: invalid arg")
5691 }
5692 trace := traceAcquire()
5693 if trace.ok() {
5694 trace.Gomaxprocs(nprocs)
5695 traceRelease(trace)
5696 }
5697
5698
5699 now := nanotime()
5700 if sched.procresizetime != 0 {
5701 sched.totaltime += int64(old) * (now - sched.procresizetime)
5702 }
5703 sched.procresizetime = now
5704
5705 maskWords := (nprocs + 31) / 32
5706
5707
5708 if nprocs > int32(len(allp)) {
5709
5710
5711 lock(&allpLock)
5712 if nprocs <= int32(cap(allp)) {
5713 allp = allp[:nprocs]
5714 } else {
5715 nallp := make([]*p, nprocs)
5716
5717
5718 copy(nallp, allp[:cap(allp)])
5719 allp = nallp
5720 }
5721
5722 if maskWords <= int32(cap(idlepMask)) {
5723 idlepMask = idlepMask[:maskWords]
5724 timerpMask = timerpMask[:maskWords]
5725 } else {
5726 nidlepMask := make([]uint32, maskWords)
5727
5728 copy(nidlepMask, idlepMask)
5729 idlepMask = nidlepMask
5730
5731 ntimerpMask := make([]uint32, maskWords)
5732 copy(ntimerpMask, timerpMask)
5733 timerpMask = ntimerpMask
5734 }
5735 unlock(&allpLock)
5736 }
5737
5738
5739 for i := old; i < nprocs; i++ {
5740 pp := allp[i]
5741 if pp == nil {
5742 pp = new(p)
5743 }
5744 pp.init(i)
5745 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
5746 }
5747
5748 gp := getg()
5749 if gp.m.p != 0 && gp.m.p.ptr().id < nprocs {
5750
5751 gp.m.p.ptr().status = _Prunning
5752 gp.m.p.ptr().mcache.prepareForSweep()
5753 } else {
5754
5755
5756
5757
5758
5759 if gp.m.p != 0 {
5760 trace := traceAcquire()
5761 if trace.ok() {
5762
5763
5764
5765 trace.GoSched()
5766 trace.ProcStop(gp.m.p.ptr())
5767 traceRelease(trace)
5768 }
5769 gp.m.p.ptr().m = 0
5770 }
5771 gp.m.p = 0
5772 pp := allp[0]
5773 pp.m = 0
5774 pp.status = _Pidle
5775 acquirep(pp)
5776 trace := traceAcquire()
5777 if trace.ok() {
5778 trace.GoStart()
5779 traceRelease(trace)
5780 }
5781 }
5782
5783
5784 mcache0 = nil
5785
5786
5787 for i := nprocs; i < old; i++ {
5788 pp := allp[i]
5789 pp.destroy()
5790
5791 }
5792
5793
5794 if int32(len(allp)) != nprocs {
5795 lock(&allpLock)
5796 allp = allp[:nprocs]
5797 idlepMask = idlepMask[:maskWords]
5798 timerpMask = timerpMask[:maskWords]
5799 unlock(&allpLock)
5800 }
5801
5802 var runnablePs *p
5803 for i := nprocs - 1; i >= 0; i-- {
5804 pp := allp[i]
5805 if gp.m.p.ptr() == pp {
5806 continue
5807 }
5808 pp.status = _Pidle
5809 if runqempty(pp) {
5810 pidleput(pp, now)
5811 } else {
5812 pp.m.set(mget())
5813 pp.link.set(runnablePs)
5814 runnablePs = pp
5815 }
5816 }
5817 stealOrder.reset(uint32(nprocs))
5818 var int32p *int32 = &gomaxprocs
5819 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
5820 if old != nprocs {
5821
5822 gcCPULimiter.resetCapacity(now, nprocs)
5823 }
5824 return runnablePs
5825 }
5826
5827
5828
5829
5830
5831
5832
5833 func acquirep(pp *p) {
5834
5835 wirep(pp)
5836
5837
5838
5839
5840
5841 pp.mcache.prepareForSweep()
5842
5843 trace := traceAcquire()
5844 if trace.ok() {
5845 trace.ProcStart()
5846 traceRelease(trace)
5847 }
5848 }
5849
5850
5851
5852
5853
5854
5855
5856 func wirep(pp *p) {
5857 gp := getg()
5858
5859 if gp.m.p != 0 {
5860
5861
5862 systemstack(func() {
5863 throw("wirep: already in go")
5864 })
5865 }
5866 if pp.m != 0 || pp.status != _Pidle {
5867
5868
5869 systemstack(func() {
5870 id := int64(0)
5871 if pp.m != 0 {
5872 id = pp.m.ptr().id
5873 }
5874 print("wirep: p->m=", pp.m, "(", id, ") p->status=", pp.status, "\n")
5875 throw("wirep: invalid p state")
5876 })
5877 }
5878 gp.m.p.set(pp)
5879 pp.m.set(gp.m)
5880 pp.status = _Prunning
5881 }
5882
5883
5884 func releasep() *p {
5885 trace := traceAcquire()
5886 if trace.ok() {
5887 trace.ProcStop(getg().m.p.ptr())
5888 traceRelease(trace)
5889 }
5890 return releasepNoTrace()
5891 }
5892
5893
5894 func releasepNoTrace() *p {
5895 gp := getg()
5896
5897 if gp.m.p == 0 {
5898 throw("releasep: invalid arg")
5899 }
5900 pp := gp.m.p.ptr()
5901 if pp.m.ptr() != gp.m || pp.status != _Prunning {
5902 print("releasep: m=", gp.m, " m->p=", gp.m.p.ptr(), " p->m=", hex(pp.m), " p->status=", pp.status, "\n")
5903 throw("releasep: invalid p state")
5904 }
5905 gp.m.p = 0
5906 pp.m = 0
5907 pp.status = _Pidle
5908 return pp
5909 }
5910
5911 func incidlelocked(v int32) {
5912 lock(&sched.lock)
5913 sched.nmidlelocked += v
5914 if v > 0 {
5915 checkdead()
5916 }
5917 unlock(&sched.lock)
5918 }
5919
5920
5921
5922
5923 func checkdead() {
5924 assertLockHeld(&sched.lock)
5925
5926
5927
5928
5929 if islibrary || isarchive {
5930 return
5931 }
5932
5933
5934
5935
5936
5937 if panicking.Load() > 0 {
5938 return
5939 }
5940
5941
5942
5943
5944
5945 var run0 int32
5946 if !iscgo && cgoHasExtraM && extraMLength.Load() > 0 {
5947 run0 = 1
5948 }
5949
5950 run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys
5951 if run > run0 {
5952 return
5953 }
5954 if run < 0 {
5955 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n")
5956 unlock(&sched.lock)
5957 throw("checkdead: inconsistent counts")
5958 }
5959
5960 grunning := 0
5961 forEachG(func(gp *g) {
5962 if isSystemGoroutine(gp, false) {
5963 return
5964 }
5965 s := readgstatus(gp)
5966 switch s &^ _Gscan {
5967 case _Gwaiting,
5968 _Gpreempted:
5969 grunning++
5970 case _Grunnable,
5971 _Grunning,
5972 _Gsyscall:
5973 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
5974 unlock(&sched.lock)
5975 throw("checkdead: runnable g")
5976 }
5977 })
5978 if grunning == 0 {
5979 unlock(&sched.lock)
5980 fatal("no goroutines (main called runtime.Goexit) - deadlock!")
5981 }
5982
5983
5984 if faketime != 0 {
5985 if when := timeSleepUntil(); when < maxWhen {
5986 faketime = when
5987
5988
5989 pp, _ := pidleget(faketime)
5990 if pp == nil {
5991
5992
5993 unlock(&sched.lock)
5994 throw("checkdead: no p for timer")
5995 }
5996 mp := mget()
5997 if mp == nil {
5998
5999
6000 unlock(&sched.lock)
6001 throw("checkdead: no m for timer")
6002 }
6003
6004
6005
6006 sched.nmspinning.Add(1)
6007 mp.spinning = true
6008 mp.nextp.set(pp)
6009 notewakeup(&mp.park)
6010 return
6011 }
6012 }
6013
6014
6015 for _, pp := range allp {
6016 if len(pp.timers.heap) > 0 {
6017 return
6018 }
6019 }
6020
6021 unlock(&sched.lock)
6022 fatal("all goroutines are asleep - deadlock!")
6023 }
6024
6025
6026
6027
6028
6029
6030 var forcegcperiod int64 = 2 * 60 * 1e9
6031
6032
6033
6034 var needSysmonWorkaround bool = false
6035
6036
6037
6038
6039 const haveSysmon = GOARCH != "wasm"
6040
6041
6042
6043
6044 func sysmon() {
6045 lock(&sched.lock)
6046 sched.nmsys++
6047 checkdead()
6048 unlock(&sched.lock)
6049
6050 lasttrace := int64(0)
6051 idle := 0
6052 delay := uint32(0)
6053
6054 for {
6055 if idle == 0 {
6056 delay = 20
6057 } else if idle > 50 {
6058 delay *= 2
6059 }
6060 if delay > 10*1000 {
6061 delay = 10 * 1000
6062 }
6063 usleep(delay)
6064
6065
6066
6067
6068
6069
6070
6071
6072
6073
6074
6075
6076
6077
6078
6079
6080 now := nanotime()
6081 if debug.schedtrace <= 0 && (sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs) {
6082 lock(&sched.lock)
6083 if sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs {
6084 syscallWake := false
6085 next := timeSleepUntil()
6086 if next > now {
6087 sched.sysmonwait.Store(true)
6088 unlock(&sched.lock)
6089
6090
6091 sleep := forcegcperiod / 2
6092 if next-now < sleep {
6093 sleep = next - now
6094 }
6095 shouldRelax := sleep >= osRelaxMinNS
6096 if shouldRelax {
6097 osRelax(true)
6098 }
6099 syscallWake = notetsleep(&sched.sysmonnote, sleep)
6100 if shouldRelax {
6101 osRelax(false)
6102 }
6103 lock(&sched.lock)
6104 sched.sysmonwait.Store(false)
6105 noteclear(&sched.sysmonnote)
6106 }
6107 if syscallWake {
6108 idle = 0
6109 delay = 20
6110 }
6111 }
6112 unlock(&sched.lock)
6113 }
6114
6115 lock(&sched.sysmonlock)
6116
6117
6118 now = nanotime()
6119
6120
6121 if *cgo_yield != nil {
6122 asmcgocall(*cgo_yield, nil)
6123 }
6124
6125 lastpoll := sched.lastpoll.Load()
6126 if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now {
6127 sched.lastpoll.CompareAndSwap(lastpoll, now)
6128 list, delta := netpoll(0)
6129 if !list.empty() {
6130
6131
6132
6133
6134
6135
6136
6137 incidlelocked(-1)
6138 injectglist(&list)
6139 incidlelocked(1)
6140 netpollAdjustWaiters(delta)
6141 }
6142 }
6143 if GOOS == "netbsd" && needSysmonWorkaround {
6144
6145
6146
6147
6148
6149
6150
6151
6152
6153
6154
6155
6156
6157
6158
6159 if next := timeSleepUntil(); next < now {
6160 startm(nil, false, false)
6161 }
6162 }
6163 if scavenger.sysmonWake.Load() != 0 {
6164
6165 scavenger.wake()
6166 }
6167
6168
6169 if retake(now) != 0 {
6170 idle = 0
6171 } else {
6172 idle++
6173 }
6174
6175 if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && forcegc.idle.Load() {
6176 lock(&forcegc.lock)
6177 forcegc.idle.Store(false)
6178 var list gList
6179 list.push(forcegc.g)
6180 injectglist(&list)
6181 unlock(&forcegc.lock)
6182 }
6183 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
6184 lasttrace = now
6185 schedtrace(debug.scheddetail > 0)
6186 }
6187 unlock(&sched.sysmonlock)
6188 }
6189 }
6190
6191 type sysmontick struct {
6192 schedtick uint32
6193 syscalltick uint32
6194 schedwhen int64
6195 syscallwhen int64
6196 }
6197
6198
6199
6200 const forcePreemptNS = 10 * 1000 * 1000
6201
6202 func retake(now int64) uint32 {
6203 n := 0
6204
6205
6206 lock(&allpLock)
6207
6208
6209
6210 for i := 0; i < len(allp); i++ {
6211 pp := allp[i]
6212 if pp == nil {
6213
6214
6215 continue
6216 }
6217 pd := &pp.sysmontick
6218 s := pp.status
6219 sysretake := false
6220 if s == _Prunning || s == _Psyscall {
6221
6222
6223
6224
6225 t := int64(pp.schedtick)
6226 if int64(pd.schedtick) != t {
6227 pd.schedtick = uint32(t)
6228 pd.schedwhen = now
6229 } else if pd.schedwhen+forcePreemptNS <= now {
6230 preemptone(pp)
6231
6232
6233 sysretake = true
6234 }
6235 }
6236 if s == _Psyscall {
6237
6238 t := int64(pp.syscalltick)
6239 if !sysretake && int64(pd.syscalltick) != t {
6240 pd.syscalltick = uint32(t)
6241 pd.syscallwhen = now
6242 continue
6243 }
6244
6245
6246
6247 if runqempty(pp) && sched.nmspinning.Load()+sched.npidle.Load() > 0 && pd.syscallwhen+10*1000*1000 > now {
6248 continue
6249 }
6250
6251 unlock(&allpLock)
6252
6253
6254
6255
6256 incidlelocked(-1)
6257 trace := traceAcquire()
6258 if atomic.Cas(&pp.status, s, _Pidle) {
6259 if trace.ok() {
6260 trace.ProcSteal(pp, false)
6261 traceRelease(trace)
6262 }
6263 n++
6264 pp.syscalltick++
6265 handoffp(pp)
6266 } else if trace.ok() {
6267 traceRelease(trace)
6268 }
6269 incidlelocked(1)
6270 lock(&allpLock)
6271 }
6272 }
6273 unlock(&allpLock)
6274 return uint32(n)
6275 }
6276
6277
6278
6279
6280
6281
6282 func preemptall() bool {
6283 res := false
6284 for _, pp := range allp {
6285 if pp.status != _Prunning {
6286 continue
6287 }
6288 if preemptone(pp) {
6289 res = true
6290 }
6291 }
6292 return res
6293 }
6294
6295
6296
6297
6298
6299
6300
6301
6302
6303
6304
6305 func preemptone(pp *p) bool {
6306 mp := pp.m.ptr()
6307 if mp == nil || mp == getg().m {
6308 return false
6309 }
6310 gp := mp.curg
6311 if gp == nil || gp == mp.g0 {
6312 return false
6313 }
6314
6315 gp.preempt = true
6316
6317
6318
6319
6320
6321 gp.stackguard0 = stackPreempt
6322
6323
6324 if preemptMSupported && debug.asyncpreemptoff == 0 {
6325 pp.preempt = true
6326 preemptM(mp)
6327 }
6328
6329 return true
6330 }
6331
6332 var starttime int64
6333
6334 func schedtrace(detailed bool) {
6335 now := nanotime()
6336 if starttime == 0 {
6337 starttime = now
6338 }
6339
6340 lock(&sched.lock)
6341 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle.Load(), " threads=", mcount(), " spinningthreads=", sched.nmspinning.Load(), " needspinning=", sched.needspinning.Load(), " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize)
6342 if detailed {
6343 print(" gcwaiting=", sched.gcwaiting.Load(), " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait.Load(), "\n")
6344 }
6345
6346
6347
6348 for i, pp := range allp {
6349 mp := pp.m.ptr()
6350 h := atomic.Load(&pp.runqhead)
6351 t := atomic.Load(&pp.runqtail)
6352 if detailed {
6353 print(" P", i, ": status=", pp.status, " schedtick=", pp.schedtick, " syscalltick=", pp.syscalltick, " m=")
6354 if mp != nil {
6355 print(mp.id)
6356 } else {
6357 print("nil")
6358 }
6359 print(" runqsize=", t-h, " gfreecnt=", pp.gFree.n, " timerslen=", len(pp.timers.heap), "\n")
6360 } else {
6361
6362
6363 print(" ")
6364 if i == 0 {
6365 print("[")
6366 }
6367 print(t - h)
6368 if i == len(allp)-1 {
6369 print("]\n")
6370 }
6371 }
6372 }
6373
6374 if !detailed {
6375 unlock(&sched.lock)
6376 return
6377 }
6378
6379 for mp := allm; mp != nil; mp = mp.alllink {
6380 pp := mp.p.ptr()
6381 print(" M", mp.id, ": p=")
6382 if pp != nil {
6383 print(pp.id)
6384 } else {
6385 print("nil")
6386 }
6387 print(" curg=")
6388 if mp.curg != nil {
6389 print(mp.curg.goid)
6390 } else {
6391 print("nil")
6392 }
6393 print(" mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, " locks=", mp.locks, " dying=", mp.dying, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=")
6394 if lockedg := mp.lockedg.ptr(); lockedg != nil {
6395 print(lockedg.goid)
6396 } else {
6397 print("nil")
6398 }
6399 print("\n")
6400 }
6401
6402 forEachG(func(gp *g) {
6403 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason.String(), ") m=")
6404 if gp.m != nil {
6405 print(gp.m.id)
6406 } else {
6407 print("nil")
6408 }
6409 print(" lockedm=")
6410 if lockedm := gp.lockedm.ptr(); lockedm != nil {
6411 print(lockedm.id)
6412 } else {
6413 print("nil")
6414 }
6415 print("\n")
6416 })
6417 unlock(&sched.lock)
6418 }
6419
6420
6421
6422
6423
6424
6425 func schedEnableUser(enable bool) {
6426 lock(&sched.lock)
6427 if sched.disable.user == !enable {
6428 unlock(&sched.lock)
6429 return
6430 }
6431 sched.disable.user = !enable
6432 if enable {
6433 n := sched.disable.n
6434 sched.disable.n = 0
6435 globrunqputbatch(&sched.disable.runnable, n)
6436 unlock(&sched.lock)
6437 for ; n != 0 && sched.npidle.Load() != 0; n-- {
6438 startm(nil, false, false)
6439 }
6440 } else {
6441 unlock(&sched.lock)
6442 }
6443 }
6444
6445
6446
6447
6448
6449 func schedEnabled(gp *g) bool {
6450 assertLockHeld(&sched.lock)
6451
6452 if sched.disable.user {
6453 return isSystemGoroutine(gp, true)
6454 }
6455 return true
6456 }
6457
6458
6459
6460
6461
6462
6463 func mput(mp *m) {
6464 assertLockHeld(&sched.lock)
6465
6466 mp.schedlink = sched.midle
6467 sched.midle.set(mp)
6468 sched.nmidle++
6469 checkdead()
6470 }
6471
6472
6473
6474
6475
6476
6477 func mget() *m {
6478 assertLockHeld(&sched.lock)
6479
6480 mp := sched.midle.ptr()
6481 if mp != nil {
6482 sched.midle = mp.schedlink
6483 sched.nmidle--
6484 }
6485 return mp
6486 }
6487
6488
6489
6490
6491
6492
6493 func globrunqput(gp *g) {
6494 assertLockHeld(&sched.lock)
6495
6496 sched.runq.pushBack(gp)
6497 sched.runqsize++
6498 }
6499
6500
6501
6502
6503
6504
6505 func globrunqputhead(gp *g) {
6506 assertLockHeld(&sched.lock)
6507
6508 sched.runq.push(gp)
6509 sched.runqsize++
6510 }
6511
6512
6513
6514
6515
6516
6517
6518 func globrunqputbatch(batch *gQueue, n int32) {
6519 assertLockHeld(&sched.lock)
6520
6521 sched.runq.pushBackAll(*batch)
6522 sched.runqsize += n
6523 *batch = gQueue{}
6524 }
6525
6526
6527
6528 func globrunqget(pp *p, max int32) *g {
6529 assertLockHeld(&sched.lock)
6530
6531 if sched.runqsize == 0 {
6532 return nil
6533 }
6534
6535 n := sched.runqsize/gomaxprocs + 1
6536 if n > sched.runqsize {
6537 n = sched.runqsize
6538 }
6539 if max > 0 && n > max {
6540 n = max
6541 }
6542 if n > int32(len(pp.runq))/2 {
6543 n = int32(len(pp.runq)) / 2
6544 }
6545
6546 sched.runqsize -= n
6547
6548 gp := sched.runq.pop()
6549 n--
6550 for ; n > 0; n-- {
6551 gp1 := sched.runq.pop()
6552 runqput(pp, gp1, false)
6553 }
6554 return gp
6555 }
6556
6557
6558 type pMask []uint32
6559
6560
6561 func (p pMask) read(id uint32) bool {
6562 word := id / 32
6563 mask := uint32(1) << (id % 32)
6564 return (atomic.Load(&p[word]) & mask) != 0
6565 }
6566
6567
6568 func (p pMask) set(id int32) {
6569 word := id / 32
6570 mask := uint32(1) << (id % 32)
6571 atomic.Or(&p[word], mask)
6572 }
6573
6574
6575 func (p pMask) clear(id int32) {
6576 word := id / 32
6577 mask := uint32(1) << (id % 32)
6578 atomic.And(&p[word], ^mask)
6579 }
6580
6581
6582
6583
6584
6585
6586
6587
6588
6589
6590
6591
6592 func pidleput(pp *p, now int64) int64 {
6593 assertLockHeld(&sched.lock)
6594
6595 if !runqempty(pp) {
6596 throw("pidleput: P has non-empty run queue")
6597 }
6598 if now == 0 {
6599 now = nanotime()
6600 }
6601 if pp.timers.len.Load() == 0 {
6602 timerpMask.clear(pp.id)
6603 }
6604 idlepMask.set(pp.id)
6605 pp.link = sched.pidle
6606 sched.pidle.set(pp)
6607 sched.npidle.Add(1)
6608 if !pp.limiterEvent.start(limiterEventIdle, now) {
6609 throw("must be able to track idle limiter event")
6610 }
6611 return now
6612 }
6613
6614
6615
6616
6617
6618
6619
6620
6621 func pidleget(now int64) (*p, int64) {
6622 assertLockHeld(&sched.lock)
6623
6624 pp := sched.pidle.ptr()
6625 if pp != nil {
6626
6627 if now == 0 {
6628 now = nanotime()
6629 }
6630 timerpMask.set(pp.id)
6631 idlepMask.clear(pp.id)
6632 sched.pidle = pp.link
6633 sched.npidle.Add(-1)
6634 pp.limiterEvent.stop(limiterEventIdle, now)
6635 }
6636 return pp, now
6637 }
6638
6639
6640
6641
6642
6643
6644
6645
6646
6647
6648
6649 func pidlegetSpinning(now int64) (*p, int64) {
6650 assertLockHeld(&sched.lock)
6651
6652 pp, now := pidleget(now)
6653 if pp == nil {
6654
6655
6656
6657 sched.needspinning.Store(1)
6658 return nil, now
6659 }
6660
6661 return pp, now
6662 }
6663
6664
6665
6666 func runqempty(pp *p) bool {
6667
6668
6669
6670
6671 for {
6672 head := atomic.Load(&pp.runqhead)
6673 tail := atomic.Load(&pp.runqtail)
6674 runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&pp.runnext)))
6675 if tail == atomic.Load(&pp.runqtail) {
6676 return head == tail && runnext == 0
6677 }
6678 }
6679 }
6680
6681
6682
6683
6684
6685
6686
6687
6688
6689
6690 const randomizeScheduler = raceenabled
6691
6692
6693
6694
6695
6696
6697 func runqput(pp *p, gp *g, next bool) {
6698 if !haveSysmon && next {
6699
6700
6701
6702
6703
6704
6705
6706
6707 next = false
6708 }
6709 if randomizeScheduler && next && randn(2) == 0 {
6710 next = false
6711 }
6712
6713 if next {
6714 retryNext:
6715 oldnext := pp.runnext
6716 if !pp.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
6717 goto retryNext
6718 }
6719 if oldnext == 0 {
6720 return
6721 }
6722
6723 gp = oldnext.ptr()
6724 }
6725
6726 retry:
6727 h := atomic.LoadAcq(&pp.runqhead)
6728 t := pp.runqtail
6729 if t-h < uint32(len(pp.runq)) {
6730 pp.runq[t%uint32(len(pp.runq))].set(gp)
6731 atomic.StoreRel(&pp.runqtail, t+1)
6732 return
6733 }
6734 if runqputslow(pp, gp, h, t) {
6735 return
6736 }
6737
6738 goto retry
6739 }
6740
6741
6742
6743 func runqputslow(pp *p, gp *g, h, t uint32) bool {
6744 var batch [len(pp.runq)/2 + 1]*g
6745
6746
6747 n := t - h
6748 n = n / 2
6749 if n != uint32(len(pp.runq)/2) {
6750 throw("runqputslow: queue is not full")
6751 }
6752 for i := uint32(0); i < n; i++ {
6753 batch[i] = pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
6754 }
6755 if !atomic.CasRel(&pp.runqhead, h, h+n) {
6756 return false
6757 }
6758 batch[n] = gp
6759
6760 if randomizeScheduler {
6761 for i := uint32(1); i <= n; i++ {
6762 j := cheaprandn(i + 1)
6763 batch[i], batch[j] = batch[j], batch[i]
6764 }
6765 }
6766
6767
6768 for i := uint32(0); i < n; i++ {
6769 batch[i].schedlink.set(batch[i+1])
6770 }
6771 var q gQueue
6772 q.head.set(batch[0])
6773 q.tail.set(batch[n])
6774
6775
6776 lock(&sched.lock)
6777 globrunqputbatch(&q, int32(n+1))
6778 unlock(&sched.lock)
6779 return true
6780 }
6781
6782
6783
6784
6785
6786 func runqputbatch(pp *p, q *gQueue, qsize int) {
6787 h := atomic.LoadAcq(&pp.runqhead)
6788 t := pp.runqtail
6789 n := uint32(0)
6790 for !q.empty() && t-h < uint32(len(pp.runq)) {
6791 gp := q.pop()
6792 pp.runq[t%uint32(len(pp.runq))].set(gp)
6793 t++
6794 n++
6795 }
6796 qsize -= int(n)
6797
6798 if randomizeScheduler {
6799 off := func(o uint32) uint32 {
6800 return (pp.runqtail + o) % uint32(len(pp.runq))
6801 }
6802 for i := uint32(1); i < n; i++ {
6803 j := cheaprandn(i + 1)
6804 pp.runq[off(i)], pp.runq[off(j)] = pp.runq[off(j)], pp.runq[off(i)]
6805 }
6806 }
6807
6808 atomic.StoreRel(&pp.runqtail, t)
6809 if !q.empty() {
6810 lock(&sched.lock)
6811 globrunqputbatch(q, int32(qsize))
6812 unlock(&sched.lock)
6813 }
6814 }
6815
6816
6817
6818
6819
6820 func runqget(pp *p) (gp *g, inheritTime bool) {
6821
6822 next := pp.runnext
6823
6824
6825
6826 if next != 0 && pp.runnext.cas(next, 0) {
6827 return next.ptr(), true
6828 }
6829
6830 for {
6831 h := atomic.LoadAcq(&pp.runqhead)
6832 t := pp.runqtail
6833 if t == h {
6834 return nil, false
6835 }
6836 gp := pp.runq[h%uint32(len(pp.runq))].ptr()
6837 if atomic.CasRel(&pp.runqhead, h, h+1) {
6838 return gp, false
6839 }
6840 }
6841 }
6842
6843
6844
6845 func runqdrain(pp *p) (drainQ gQueue, n uint32) {
6846 oldNext := pp.runnext
6847 if oldNext != 0 && pp.runnext.cas(oldNext, 0) {
6848 drainQ.pushBack(oldNext.ptr())
6849 n++
6850 }
6851
6852 retry:
6853 h := atomic.LoadAcq(&pp.runqhead)
6854 t := pp.runqtail
6855 qn := t - h
6856 if qn == 0 {
6857 return
6858 }
6859 if qn > uint32(len(pp.runq)) {
6860 goto retry
6861 }
6862
6863 if !atomic.CasRel(&pp.runqhead, h, h+qn) {
6864 goto retry
6865 }
6866
6867
6868
6869
6870
6871
6872
6873
6874 for i := uint32(0); i < qn; i++ {
6875 gp := pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
6876 drainQ.pushBack(gp)
6877 n++
6878 }
6879 return
6880 }
6881
6882
6883
6884
6885
6886 func runqgrab(pp *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
6887 for {
6888 h := atomic.LoadAcq(&pp.runqhead)
6889 t := atomic.LoadAcq(&pp.runqtail)
6890 n := t - h
6891 n = n - n/2
6892 if n == 0 {
6893 if stealRunNextG {
6894
6895 if next := pp.runnext; next != 0 {
6896 if pp.status == _Prunning {
6897
6898
6899
6900
6901
6902
6903
6904
6905
6906
6907 if !osHasLowResTimer {
6908 usleep(3)
6909 } else {
6910
6911
6912
6913 osyield()
6914 }
6915 }
6916 if !pp.runnext.cas(next, 0) {
6917 continue
6918 }
6919 batch[batchHead%uint32(len(batch))] = next
6920 return 1
6921 }
6922 }
6923 return 0
6924 }
6925 if n > uint32(len(pp.runq)/2) {
6926 continue
6927 }
6928 for i := uint32(0); i < n; i++ {
6929 g := pp.runq[(h+i)%uint32(len(pp.runq))]
6930 batch[(batchHead+i)%uint32(len(batch))] = g
6931 }
6932 if atomic.CasRel(&pp.runqhead, h, h+n) {
6933 return n
6934 }
6935 }
6936 }
6937
6938
6939
6940
6941 func runqsteal(pp, p2 *p, stealRunNextG bool) *g {
6942 t := pp.runqtail
6943 n := runqgrab(p2, &pp.runq, t, stealRunNextG)
6944 if n == 0 {
6945 return nil
6946 }
6947 n--
6948 gp := pp.runq[(t+n)%uint32(len(pp.runq))].ptr()
6949 if n == 0 {
6950 return gp
6951 }
6952 h := atomic.LoadAcq(&pp.runqhead)
6953 if t-h+n >= uint32(len(pp.runq)) {
6954 throw("runqsteal: runq overflow")
6955 }
6956 atomic.StoreRel(&pp.runqtail, t+n)
6957 return gp
6958 }
6959
6960
6961
6962 type gQueue struct {
6963 head guintptr
6964 tail guintptr
6965 }
6966
6967
6968 func (q *gQueue) empty() bool {
6969 return q.head == 0
6970 }
6971
6972
6973 func (q *gQueue) push(gp *g) {
6974 gp.schedlink = q.head
6975 q.head.set(gp)
6976 if q.tail == 0 {
6977 q.tail.set(gp)
6978 }
6979 }
6980
6981
6982 func (q *gQueue) pushBack(gp *g) {
6983 gp.schedlink = 0
6984 if q.tail != 0 {
6985 q.tail.ptr().schedlink.set(gp)
6986 } else {
6987 q.head.set(gp)
6988 }
6989 q.tail.set(gp)
6990 }
6991
6992
6993
6994 func (q *gQueue) pushBackAll(q2 gQueue) {
6995 if q2.tail == 0 {
6996 return
6997 }
6998 q2.tail.ptr().schedlink = 0
6999 if q.tail != 0 {
7000 q.tail.ptr().schedlink = q2.head
7001 } else {
7002 q.head = q2.head
7003 }
7004 q.tail = q2.tail
7005 }
7006
7007
7008
7009 func (q *gQueue) pop() *g {
7010 gp := q.head.ptr()
7011 if gp != nil {
7012 q.head = gp.schedlink
7013 if q.head == 0 {
7014 q.tail = 0
7015 }
7016 }
7017 return gp
7018 }
7019
7020
7021 func (q *gQueue) popList() gList {
7022 stack := gList{q.head}
7023 *q = gQueue{}
7024 return stack
7025 }
7026
7027
7028
7029 type gList struct {
7030 head guintptr
7031 }
7032
7033
7034 func (l *gList) empty() bool {
7035 return l.head == 0
7036 }
7037
7038
7039 func (l *gList) push(gp *g) {
7040 gp.schedlink = l.head
7041 l.head.set(gp)
7042 }
7043
7044
7045 func (l *gList) pushAll(q gQueue) {
7046 if !q.empty() {
7047 q.tail.ptr().schedlink = l.head
7048 l.head = q.head
7049 }
7050 }
7051
7052
7053 func (l *gList) pop() *g {
7054 gp := l.head.ptr()
7055 if gp != nil {
7056 l.head = gp.schedlink
7057 }
7058 return gp
7059 }
7060
7061
7062 func setMaxThreads(in int) (out int) {
7063 lock(&sched.lock)
7064 out = int(sched.maxmcount)
7065 if in > 0x7fffffff {
7066 sched.maxmcount = 0x7fffffff
7067 } else {
7068 sched.maxmcount = int32(in)
7069 }
7070 checkmcount()
7071 unlock(&sched.lock)
7072 return
7073 }
7074
7075
7076
7077
7078
7079
7080
7081
7082
7083
7084
7085
7086
7087 func procPin() int {
7088 gp := getg()
7089 mp := gp.m
7090
7091 mp.locks++
7092 return int(mp.p.ptr().id)
7093 }
7094
7095
7096
7097
7098
7099
7100
7101
7102
7103
7104
7105
7106
7107 func procUnpin() {
7108 gp := getg()
7109 gp.m.locks--
7110 }
7111
7112
7113
7114 func sync_runtime_procPin() int {
7115 return procPin()
7116 }
7117
7118
7119
7120 func sync_runtime_procUnpin() {
7121 procUnpin()
7122 }
7123
7124
7125
7126 func sync_atomic_runtime_procPin() int {
7127 return procPin()
7128 }
7129
7130
7131
7132 func sync_atomic_runtime_procUnpin() {
7133 procUnpin()
7134 }
7135
7136
7137
7138
7139
7140
7141
7142
7143
7144
7145
7146
7147
7148
7149
7150 func sync_runtime_canSpin(i int) bool {
7151
7152
7153
7154
7155
7156 if i >= active_spin || ncpu <= 1 || gomaxprocs <= sched.npidle.Load()+sched.nmspinning.Load()+1 {
7157 return false
7158 }
7159 if p := getg().m.p.ptr(); !runqempty(p) {
7160 return false
7161 }
7162 return true
7163 }
7164
7165
7166
7167
7168
7169
7170
7171
7172
7173
7174
7175
7176
7177 func sync_runtime_doSpin() {
7178 procyield(active_spin_cnt)
7179 }
7180
7181 var stealOrder randomOrder
7182
7183
7184
7185
7186
7187 type randomOrder struct {
7188 count uint32
7189 coprimes []uint32
7190 }
7191
7192 type randomEnum struct {
7193 i uint32
7194 count uint32
7195 pos uint32
7196 inc uint32
7197 }
7198
7199 func (ord *randomOrder) reset(count uint32) {
7200 ord.count = count
7201 ord.coprimes = ord.coprimes[:0]
7202 for i := uint32(1); i <= count; i++ {
7203 if gcd(i, count) == 1 {
7204 ord.coprimes = append(ord.coprimes, i)
7205 }
7206 }
7207 }
7208
7209 func (ord *randomOrder) start(i uint32) randomEnum {
7210 return randomEnum{
7211 count: ord.count,
7212 pos: i % ord.count,
7213 inc: ord.coprimes[i/ord.count%uint32(len(ord.coprimes))],
7214 }
7215 }
7216
7217 func (enum *randomEnum) done() bool {
7218 return enum.i == enum.count
7219 }
7220
7221 func (enum *randomEnum) next() {
7222 enum.i++
7223 enum.pos = (enum.pos + enum.inc) % enum.count
7224 }
7225
7226 func (enum *randomEnum) position() uint32 {
7227 return enum.pos
7228 }
7229
7230 func gcd(a, b uint32) uint32 {
7231 for b != 0 {
7232 a, b = b, a%b
7233 }
7234 return a
7235 }
7236
7237
7238
7239 type initTask struct {
7240 state uint32
7241 nfns uint32
7242
7243 }
7244
7245
7246
7247 var inittrace tracestat
7248
7249 type tracestat struct {
7250 active bool
7251 id uint64
7252 allocs uint64
7253 bytes uint64
7254 }
7255
7256 func doInit(ts []*initTask) {
7257 for _, t := range ts {
7258 doInit1(t)
7259 }
7260 }
7261
7262 func doInit1(t *initTask) {
7263 switch t.state {
7264 case 2:
7265 return
7266 case 1:
7267 throw("recursive call during initialization - linker skew")
7268 default:
7269 t.state = 1
7270
7271 var (
7272 start int64
7273 before tracestat
7274 )
7275
7276 if inittrace.active {
7277 start = nanotime()
7278
7279 before = inittrace
7280 }
7281
7282 if t.nfns == 0 {
7283
7284 throw("inittask with no functions")
7285 }
7286
7287 firstFunc := add(unsafe.Pointer(t), 8)
7288 for i := uint32(0); i < t.nfns; i++ {
7289 p := add(firstFunc, uintptr(i)*goarch.PtrSize)
7290 f := *(*func())(unsafe.Pointer(&p))
7291 f()
7292 }
7293
7294 if inittrace.active {
7295 end := nanotime()
7296
7297 after := inittrace
7298
7299 f := *(*func())(unsafe.Pointer(&firstFunc))
7300 pkg := funcpkgpath(findfunc(abi.FuncPCABIInternal(f)))
7301
7302 var sbuf [24]byte
7303 print("init ", pkg, " @")
7304 print(string(fmtNSAsMS(sbuf[:], uint64(start-runtimeInitTime))), " ms, ")
7305 print(string(fmtNSAsMS(sbuf[:], uint64(end-start))), " ms clock, ")
7306 print(string(itoa(sbuf[:], after.bytes-before.bytes)), " bytes, ")
7307 print(string(itoa(sbuf[:], after.allocs-before.allocs)), " allocs")
7308 print("\n")
7309 }
7310
7311 t.state = 2
7312 }
7313 }
7314
View as plain text