LEFT | RIGHT |
1 // Copyright 2009 The Go Authors. All rights reserved. | 1 // Copyright 2009 The Go Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style | 2 // Use of this source code is governed by a BSD-style |
3 // license that can be found in the LICENSE file. | 3 // license that can be found in the LICENSE file. |
4 | 4 |
5 #include "runtime.h" | 5 #include "runtime.h" |
6 #include "arch_GOARCH.h" | 6 #include "arch_GOARCH.h" |
7 #include "malloc.h" | 7 #include "malloc.h" |
8 #include "stack.h" | 8 #include "stack.h" |
9 #include "race.h" | 9 #include "race.h" |
10 #include "type.h" | 10 #include "type.h" |
| 11 #include "../../cmd/ld/textflag.h" |
11 | 12 |
12 // Goroutine scheduler | 13 // Goroutine scheduler |
13 // The scheduler's job is to distribute ready-to-run goroutines over worker thre
ads. | 14 // The scheduler's job is to distribute ready-to-run goroutines over worker thre
ads. |
14 // | 15 // |
15 // The main concepts are: | 16 // The main concepts are: |
16 // G - goroutine. | 17 // G - goroutine. |
17 // M - worker thread, or machine. | 18 // M - worker thread, or machine. |
18 // P - processor, a resource that is required to execute Go code. | 19 // P - processor, a resource that is required to execute Go code. |
19 // M must have an associated P to execute Go code, however it can be | 20 // M must have an associated P to execute Go code, however it can be |
20 // blocked or in a syscall w/o an associated P. | 21 // blocked or in a syscall w/o an associated P. |
21 // | 22 // |
22 // Design doc at http://golang.org/s/go11sched. | 23 // Design doc at http://golang.org/s/go11sched. |
23 | 24 |
24 typedef struct Sched Sched; | 25 typedef struct Sched Sched; |
25 struct Sched { | 26 struct Sched { |
26 Lock; | 27 Lock; |
27 | 28 |
28 uint64 goidgen; | 29 uint64 goidgen; |
29 | 30 |
30 M* midle; // idle m's waiting for work | 31 M* midle; // idle m's waiting for work |
31 int32 nmidle; // number of idle m's waiting for work | 32 int32 nmidle; // number of idle m's waiting for work |
32 » int32» mlocked; // number of locked m's waiting for work | 33 » int32» nmidlelocked; // number of locked m's waiting for work |
33 int32 mcount; // number of m's that have been created | 34 int32 mcount; // number of m's that have been created |
34 | 35 |
35 P* pidle; // idle P's | 36 P* pidle; // idle P's |
36 uint32 npidle; | 37 uint32 npidle; |
37 uint32 nmspinning; | 38 uint32 nmspinning; |
38 | 39 |
39 // Global runnable queue. | 40 // Global runnable queue. |
40 G* runqhead; | 41 G* runqhead; |
41 G* runqtail; | 42 G* runqtail; |
42 int32 runqsize; | 43 int32 runqsize; |
(...skipping 11 matching lines...) Expand all Loading... |
54 int32 profilehz; // cpu profiling rate | 55 int32 profilehz; // cpu profiling rate |
55 }; | 56 }; |
56 | 57 |
57 // The max value of GOMAXPROCS. | 58 // The max value of GOMAXPROCS. |
58 // There are no fundamental restrictions on the value. | 59 // There are no fundamental restrictions on the value. |
59 enum { MaxGomaxprocs = 1<<8 }; | 60 enum { MaxGomaxprocs = 1<<8 }; |
60 | 61 |
61 Sched runtime·sched; | 62 Sched runtime·sched; |
62 int32 runtime·gomaxprocs; | 63 int32 runtime·gomaxprocs; |
63 uint32 runtime·needextram; | 64 uint32 runtime·needextram; |
64 bool runtime·singleproc; | |
65 bool runtime·iscgo; | 65 bool runtime·iscgo; |
66 uint8 runtime·islibrary; // set by 5l in shared library mode | 66 uint8 runtime·islibrary; // set by 5l in shared library mode |
67 uint32 runtime·gcwaiting; | 67 uint32 runtime·gcwaiting; |
68 M runtime·m0; | 68 M runtime·m0; |
69 G runtime·g0; // idle goroutine for m0 | 69 G runtime·g0; // idle goroutine for m0 |
70 G* runtime·allg; | 70 G* runtime·allg; |
71 G* runtime·lastg; | 71 G* runtime·lastg; |
72 M* runtime·allm; | 72 M* runtime·allm; |
73 M* runtime·extram; | 73 M* runtime·extram; |
74 int8* runtime·goos; | 74 int8* runtime·goos; |
75 int32 runtime·ncpu; | 75 int32 runtime·ncpu; |
76 static int32 newprocs; | 76 static int32 newprocs; |
77 | 77 |
78 void runtime·mstart(void); | 78 void runtime·mstart(void); |
79 static void runqput(P*, G*); | 79 static void runqput(P*, G*); |
80 static G* runqget(P*); | 80 static G* runqget(P*); |
81 static void runqgrow(P*); | 81 static void runqgrow(P*); |
82 static G* runqsteal(P*, P*); | 82 static G* runqsteal(P*, P*); |
83 static void mput(M*); | 83 static void mput(M*); |
84 static M* mget(void); | 84 static M* mget(void); |
85 static void mcommoninit(M*); | 85 static void mcommoninit(M*); |
86 static void schedule(void); | 86 static void schedule(void); |
87 static void procresize(int32); | 87 static void procresize(int32); |
88 static void acquirep(P*); | 88 static void acquirep(P*); |
89 static P* releasep(void); | 89 static P* releasep(void); |
90 static void newm(void(*)(void), P*); | 90 static void newm(void(*)(void), P*); |
91 static void goidle(void); | |
92 static void stopm(void); | 91 static void stopm(void); |
93 static void startm(P*, bool); | 92 static void startm(P*, bool); |
94 static void handoffp(P*); | 93 static void handoffp(P*); |
95 static void wakep(void); | 94 static void wakep(void); |
96 static void stoplockedm(void); | 95 static void stoplockedm(void); |
97 static void startlockedm(G*); | 96 static void startlockedm(G*); |
98 static void sysmon(void); | 97 static void sysmon(void); |
99 static uint32 retake(uint32*); | 98 static uint32 retake(int64); |
100 static void inclocked(int32); | 99 static void incidlelocked(int32); |
101 static void checkdead(void); | 100 static void checkdead(void); |
102 static void exitsyscall0(G*); | 101 static void exitsyscall0(G*); |
103 static void park0(G*); | 102 static void park0(G*); |
104 static void gosched0(G*); | |
105 static void goexit0(G*); | 103 static void goexit0(G*); |
106 static void gfput(P*, G*); | 104 static void gfput(P*, G*); |
107 static G* gfget(P*); | 105 static G* gfget(P*); |
108 static void gfpurge(P*); | 106 static void gfpurge(P*); |
109 static void globrunqput(G*); | 107 static void globrunqput(G*); |
110 static G* globrunqget(P*, int32); | 108 static G* globrunqget(P*, int32); |
111 static P* pidleget(void); | 109 static P* pidleget(void); |
112 static void pidleput(P*); | 110 static void pidleput(P*); |
113 static void injectglist(G*); | 111 static void injectglist(G*); |
114 static void preemptall(void); | 112 static bool preemptall(void); |
115 static void preemptone(P*); | 113 static bool preemptone(P*); |
| 114 static bool exitsyscallfast(void); |
116 | 115 |
117 // The bootstrap sequence is: | 116 // The bootstrap sequence is: |
118 // | 117 // |
119 // call osinit | 118 // call osinit |
120 // call schedinit | 119 // call schedinit |
121 // make & queue new G | 120 // make & queue new G |
122 // call runtime·mstart | 121 // call runtime·mstart |
123 // | 122 // |
124 // The new G calls runtime·main. | 123 // The new G calls runtime·main. |
125 void | 124 void |
126 runtime·schedinit(void) | 125 runtime·schedinit(void) |
127 { | 126 { |
128 int32 n, procs; | 127 int32 n, procs; |
129 byte *p; | 128 byte *p; |
130 | 129 |
131 m->nomemprof++; | 130 m->nomemprof++; |
132 runtime·mprofinit(); | 131 runtime·mprofinit(); |
133 runtime·mallocinit(); | 132 runtime·mallocinit(); |
134 mcommoninit(m); | 133 mcommoninit(m); |
135 | 134 |
136 runtime·goargs(); | 135 runtime·goargs(); |
137 runtime·goenvs(); | 136 runtime·goenvs(); |
| 137 runtime·parsedebugvars(); |
138 | 138 |
139 // Allocate internal symbol table representation now, we need it for GC
anyway. | 139 // Allocate internal symbol table representation now, we need it for GC
anyway. |
140 runtime·symtabinit(); | 140 runtime·symtabinit(); |
141 | 141 |
142 runtime·sched.lastpoll = runtime·nanotime(); | 142 runtime·sched.lastpoll = runtime·nanotime(); |
143 procs = 1; | 143 procs = 1; |
144 p = runtime·getenv("GOMAXPROCS"); | 144 p = runtime·getenv("GOMAXPROCS"); |
145 if(p != nil && (n = runtime·atoi(p)) > 0) { | 145 if(p != nil && (n = runtime·atoi(p)) > 0) { |
146 if(n > MaxGomaxprocs) | 146 if(n > MaxGomaxprocs) |
147 n = MaxGomaxprocs; | 147 n = MaxGomaxprocs; |
148 procs = n; | 148 procs = n; |
149 } | 149 } |
150 runtime·allp = runtime·malloc((MaxGomaxprocs+1)*sizeof(runtime·allp[0]))
; | 150 runtime·allp = runtime·malloc((MaxGomaxprocs+1)*sizeof(runtime·allp[0]))
; |
151 procresize(procs); | 151 procresize(procs); |
152 | 152 |
153 mstats.enablegc = 1; | 153 mstats.enablegc = 1; |
154 m->nomemprof--; | 154 m->nomemprof--; |
155 | 155 |
156 if(raceenabled) | 156 if(raceenabled) |
157 g->racectx = runtime·raceinit(); | 157 g->racectx = runtime·raceinit(); |
158 } | 158 } |
159 | 159 |
160 extern void main·init(void); | 160 extern void main·init(void); |
161 extern void main·main(void); | 161 extern void main·main(void); |
162 | 162 |
163 static FuncVal scavenger = {runtime·MHeap_Scavenger}; | 163 static FuncVal scavenger = {runtime·MHeap_Scavenger}; |
164 | 164 |
| 165 static FuncVal initDone = { runtime·unlockOSThread }; |
| 166 |
165 // Initalize runtime in shared library mode | 167 // Initalize runtime in shared library mode |
166 extern void runtime·lib_init(void); | 168 extern void runtime·lib_init(void); |
167 | 169 |
168 // On library mode, _cgo_lib_init is called on first cgo callback | 170 // On library mode, _cgo_lib_init is called on first cgo callback |
169 void *_cgo_lib_init; | 171 void *_cgo_lib_init; |
170 // _cgo_lib_setup is called in library mode to hand cgo the runtime entry point | 172 // _cgo_lib_setup is called in library mode to hand cgo the runtime entry point |
171 void *_cgo_lib_setup; | 173 void *_cgo_lib_setup; |
172 // _cgo_lib_init_done is used is used by the runtime in shared library mode to· | 174 // _cgo_lib_init_done is used is used by the runtime in shared library mode to· |
173 // signal that runtime initialization is done. | 175 // signal that runtime initialization is done. |
174 void (*_cgo_lib_init_done)(void*); | 176 void (*_cgo_lib_init_done)(void*); |
175 | 177 |
176 // The main goroutine. | 178 // The main goroutine. |
177 void | 179 void |
178 runtime·main(void) | 180 runtime·main(void) |
179 { | 181 { |
| 182 Defer d; |
| 183 |
180 newm(sysmon, nil); | 184 newm(sysmon, nil); |
181 | 185 |
182 // Lock the main goroutine onto this, the main OS thread, | 186 // Lock the main goroutine onto this, the main OS thread, |
183 // during initialization. Most programs won't care, but a few | 187 // during initialization. Most programs won't care, but a few |
184 // do require certain calls to be made by the main thread. | 188 // do require certain calls to be made by the main thread. |
185 // Those can arrange for main.main to run in the main thread | 189 // Those can arrange for main.main to run in the main thread |
186 // by calling runtime.LockOSThread during initialization | 190 // by calling runtime.LockOSThread during initialization |
187 // to preserve the lock. | 191 // to preserve the lock. |
188 runtime·lockOSThread(); | 192 runtime·lockOSThread(); |
| 193 ········ |
| 194 // Defer unlock so that runtime.Goexit during init does the unlock too. |
| 195 d.fn = &initDone; |
| 196 d.siz = 0; |
| 197 d.link = g->defer; |
| 198 d.argp = (void*)-1; |
| 199 d.special = true; |
| 200 d.free = false; |
| 201 g->defer = &d; |
| 202 |
189 if(m != &runtime·m0) | 203 if(m != &runtime·m0) |
190 runtime·throw("runtime·main not on m0"); | 204 runtime·throw("runtime·main not on m0"); |
191 runtime·newproc1(&scavenger, nil, 0, 0, runtime·main); | 205 runtime·newproc1(&scavenger, nil, 0, 0, runtime·main); |
192 main·init(); | 206 main·init(); |
| 207 |
| 208 if(g->defer != &d || d.fn != &initDone) |
| 209 runtime·throw("runtime: bad defer entry after init"); |
| 210 g->defer = d.link; |
193 runtime·unlockOSThread(); | 211 runtime·unlockOSThread(); |
194 | 212 |
195 main·main(); | 213 main·main(); |
196 if(runtime·islibrary) { | 214 if(runtime·islibrary) { |
197 // Allocate extra m now, because the foreign main program might
call into Go | 215 // Allocate extra m now, because the foreign main program might
call into Go |
198 // before Go calls any cgo functions. | 216 // before Go calls any cgo functions. |
199 if(runtime·needextram && runtime·cas(&runtime·needextram, 1, 0)) | 217 if(runtime·needextram && runtime·cas(&runtime·needextram, 1, 0)) |
200 runtime·newextram(); | 218 runtime·newextram(); |
201 runtime·asmcgocall(_cgo_lib_init_done, nil); | 219 runtime·asmcgocall(_cgo_lib_init_done, nil); |
202 return; | 220 return; |
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
248 runtime·printf("goroutine %D [%s]:\n", gp->goid, status); | 266 runtime·printf("goroutine %D [%s]:\n", gp->goid, status); |
249 } | 267 } |
250 | 268 |
251 void | 269 void |
252 runtime·tracebackothers(G *me) | 270 runtime·tracebackothers(G *me) |
253 { | 271 { |
254 G *gp; | 272 G *gp; |
255 int32 traceback; | 273 int32 traceback; |
256 | 274 |
257 traceback = runtime·gotraceback(nil); | 275 traceback = runtime·gotraceback(nil); |
| 276 ········ |
| 277 // Show the current goroutine first, if we haven't already. |
| 278 if((gp = m->curg) != nil && gp != me) { |
| 279 runtime·printf("\n"); |
| 280 runtime·goroutineheader(gp); |
| 281 runtime·traceback(gp->sched.pc, gp->sched.sp, gp->sched.lr, gp); |
| 282 } |
| 283 |
258 for(gp = runtime·allg; gp != nil; gp = gp->alllink) { | 284 for(gp = runtime·allg; gp != nil; gp = gp->alllink) { |
259 » » if(gp == me || gp->status == Gdead) | 285 » » if(gp == me || gp == m->curg || gp->status == Gdead) |
260 continue; | 286 continue; |
261 if(gp->issystem && traceback < 2) | 287 if(gp->issystem && traceback < 2) |
262 continue; | 288 continue; |
263 runtime·printf("\n"); | 289 runtime·printf("\n"); |
264 runtime·goroutineheader(gp); | 290 runtime·goroutineheader(gp); |
265 » » runtime·traceback(gp->sched.pc, gp->sched.sp, 0, gp); | 291 » » if(gp->status == Grunning) { |
| 292 » » » runtime·printf("\tgoroutine running on other thread; sta
ck unavailable\n"); |
| 293 » » » runtime·printcreatedby(gp); |
| 294 » » } else |
| 295 » » » runtime·traceback(gp->sched.pc, gp->sched.sp, gp->sched.
lr, gp); |
266 } | 296 } |
267 } | 297 } |
268 | 298 |
269 static void | 299 static void |
270 mcommoninit(M *mp) | 300 mcommoninit(M *mp) |
271 { | 301 { |
272 // If there is no mcache runtime·callers() will crash, | 302 // If there is no mcache runtime·callers() will crash, |
273 // and we are most likely in sysmon thread so the stack is senseless any
way. | 303 // and we are most likely in sysmon thread so the stack is senseless any
way. |
274 if(m->mcache) | 304 if(m->mcache) |
275 runtime·callers(1, mp->createstack, nelem(mp->createstack)); | 305 runtime·callers(1, mp->createstack, nelem(mp->createstack)); |
(...skipping 22 matching lines...) Expand all Loading... |
298 m->locks++; // disable preemption because it can be holding p in a loca
l var | 328 m->locks++; // disable preemption because it can be holding p in a loca
l var |
299 if(gp->status != Gwaiting) { | 329 if(gp->status != Gwaiting) { |
300 runtime·printf("goroutine %D has status %d\n", gp->goid, gp->sta
tus); | 330 runtime·printf("goroutine %D has status %d\n", gp->goid, gp->sta
tus); |
301 runtime·throw("bad g->status in ready"); | 331 runtime·throw("bad g->status in ready"); |
302 } | 332 } |
303 gp->status = Grunnable; | 333 gp->status = Grunnable; |
304 runqput(m->p, gp); | 334 runqput(m->p, gp); |
305 if(runtime·atomicload(&runtime·sched.npidle) != 0 && runtime·atomicload(
&runtime·sched.nmspinning) == 0) // TODO: fast atomic | 335 if(runtime·atomicload(&runtime·sched.npidle) != 0 && runtime·atomicload(
&runtime·sched.nmspinning) == 0) // TODO: fast atomic |
306 wakep(); | 336 wakep(); |
307 m->locks--; | 337 m->locks--; |
| 338 if(m->locks == 0 && g->preempt) // restore the preemption request in ca
se we've cleared it in newstack |
| 339 g->stackguard0 = StackPreempt; |
308 } | 340 } |
309 | 341 |
310 int32 | 342 int32 |
311 runtime·gcprocs(void) | 343 runtime·gcprocs(void) |
312 { | 344 { |
313 int32 n; | 345 int32 n; |
314 | 346 |
315 // Figure out how many CPUs to use during GC. | 347 // Figure out how many CPUs to use during GC. |
316 // Limited by gomaxprocs, number of actual CPUs, and MaxGcproc. | 348 // Limited by gomaxprocs, number of actual CPUs, and MaxGcproc. |
317 runtime·lock(&runtime·sched); | 349 runtime·lock(&runtime·sched); |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
357 if(mp == nil) | 389 if(mp == nil) |
358 runtime·throw("runtime·gcprocs inconsistency"); | 390 runtime·throw("runtime·gcprocs inconsistency"); |
359 mp->helpgc = n; | 391 mp->helpgc = n; |
360 mp->mcache = runtime·allp[pos]->mcache; | 392 mp->mcache = runtime·allp[pos]->mcache; |
361 pos++; | 393 pos++; |
362 runtime·notewakeup(&mp->park); | 394 runtime·notewakeup(&mp->park); |
363 } | 395 } |
364 runtime·unlock(&runtime·sched); | 396 runtime·unlock(&runtime·sched); |
365 } | 397 } |
366 | 398 |
| 399 // Similar to stoptheworld but best-effort and can be called several times. |
| 400 // There is no reverse operation, used during crashing. |
| 401 // This function must not lock any mutexes. |
| 402 void |
| 403 runtime·freezetheworld(void) |
| 404 { |
| 405 int32 i; |
| 406 |
| 407 if(runtime·gomaxprocs == 1) |
| 408 return; |
| 409 // stopwait and preemption requests can be lost |
| 410 // due to races with concurrently executing threads, |
| 411 // so try several times |
| 412 for(i = 0; i < 5; i++) { |
| 413 // this should tell the scheduler to not start any new goroutine
s |
| 414 runtime·sched.stopwait = 0x7fffffff; |
| 415 runtime·atomicstore((uint32*)&runtime·gcwaiting, 1); |
| 416 // this should stop running goroutines |
| 417 if(!preemptall()) |
| 418 break; // no running goroutines |
| 419 runtime·usleep(1000); |
| 420 } |
| 421 // to be sure |
| 422 runtime·usleep(1000); |
| 423 preemptall(); |
| 424 runtime·usleep(1000); |
| 425 } |
| 426 |
367 void | 427 void |
368 runtime·stoptheworld(void) | 428 runtime·stoptheworld(void) |
369 { | 429 { |
370 int32 i; | 430 int32 i; |
371 uint32 s; | 431 uint32 s; |
372 P *p; | 432 P *p; |
373 bool wait; | 433 bool wait; |
374 | 434 |
375 runtime·lock(&runtime·sched); | 435 runtime·lock(&runtime·sched); |
376 runtime·sched.stopwait = runtime·gomaxprocs; | 436 runtime·sched.stopwait = runtime·gomaxprocs; |
377 runtime·atomicstore((uint32*)&runtime·gcwaiting, 1); | 437 runtime·atomicstore((uint32*)&runtime·gcwaiting, 1); |
| 438 preemptall(); |
378 // stop current P | 439 // stop current P |
379 m->p->status = Pgcstop; | 440 m->p->status = Pgcstop; |
380 runtime·sched.stopwait--; | 441 runtime·sched.stopwait--; |
381 // try to retake all P's in Psyscall status | 442 // try to retake all P's in Psyscall status |
382 for(i = 0; i < runtime·gomaxprocs; i++) { | 443 for(i = 0; i < runtime·gomaxprocs; i++) { |
383 p = runtime·allp[i]; | 444 p = runtime·allp[i]; |
384 s = p->status; | 445 s = p->status; |
385 if(s == Psyscall && runtime·cas(&p->status, s, Pgcstop)) | 446 if(s == Psyscall && runtime·cas(&p->status, s, Pgcstop)) |
386 runtime·sched.stopwait--; | 447 runtime·sched.stopwait--; |
387 } | 448 } |
388 // stop idle P's | 449 // stop idle P's |
389 while(p = pidleget()) { | 450 while(p = pidleget()) { |
390 p->status = Pgcstop; | 451 p->status = Pgcstop; |
391 runtime·sched.stopwait--; | 452 runtime·sched.stopwait--; |
392 } | 453 } |
393 wait = runtime·sched.stopwait > 0; | 454 wait = runtime·sched.stopwait > 0; |
394 runtime·unlock(&runtime·sched); | 455 runtime·unlock(&runtime·sched); |
395 | 456 |
396 » // wait for remaining P's to stop voluntary | 457 » // wait for remaining P's to stop voluntarily |
397 if(wait) { | 458 if(wait) { |
398 » » runtime·notesleep(&runtime·sched.stopnote); | 459 » » for(;;) { |
399 » » runtime·noteclear(&runtime·sched.stopnote); | 460 » » » // wait for 100us, then try to re-preempt in case of any
races |
| 461 » » » if(runtime·notetsleep(&runtime·sched.stopnote, 100*1000)
) { |
| 462 » » » » runtime·noteclear(&runtime·sched.stopnote); |
| 463 » » » » break; |
| 464 » » » } |
| 465 » » » preemptall(); |
| 466 » » } |
400 } | 467 } |
401 if(runtime·sched.stopwait) | 468 if(runtime·sched.stopwait) |
402 runtime·throw("stoptheworld: not stopped"); | 469 runtime·throw("stoptheworld: not stopped"); |
403 for(i = 0; i < runtime·gomaxprocs; i++) { | 470 for(i = 0; i < runtime·gomaxprocs; i++) { |
404 p = runtime·allp[i]; | 471 p = runtime·allp[i]; |
405 if(p->status != Pgcstop) | 472 if(p->status != Pgcstop) |
406 runtime·throw("stoptheworld: not stopped"); | 473 runtime·throw("stoptheworld: not stopped"); |
407 } | 474 } |
408 } | 475 } |
409 | 476 |
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
472 // If GC could have used another helper proc, start one now, | 539 // If GC could have used another helper proc, start one now, |
473 // in the hope that it will be available next time. | 540 // in the hope that it will be available next time. |
474 // It would have been even better to start it before the collect
ion, | 541 // It would have been even better to start it before the collect
ion, |
475 // but doing so requires allocating memory, so it's tricky to | 542 // but doing so requires allocating memory, so it's tricky to |
476 // coordinate. This lazy approach works out in practice: | 543 // coordinate. This lazy approach works out in practice: |
477 // we don't mind if the first couple gc rounds don't have quite | 544 // we don't mind if the first couple gc rounds don't have quite |
478 // the maximum number of procs. | 545 // the maximum number of procs. |
479 newm(mhelpgc, nil); | 546 newm(mhelpgc, nil); |
480 } | 547 } |
481 m->locks--; | 548 m->locks--; |
| 549 if(m->locks == 0 && g->preempt) // restore the preemption request in ca
se we've cleared it in newstack |
| 550 g->stackguard0 = StackPreempt; |
482 } | 551 } |
483 | 552 |
484 // Called to start an M. | 553 // Called to start an M. |
485 void | 554 void |
486 runtime·mstart(void) | 555 runtime·mstart(void) |
487 { | 556 { |
| 557 #ifdef GOOS_windows |
| 558 #ifdef GOARCH_386 |
488 // It is used by windows-386 only. Unfortunately, seh needs | 559 // It is used by windows-386 only. Unfortunately, seh needs |
489 // to be located on os stack, and mstart runs on os stack | 560 // to be located on os stack, and mstart runs on os stack |
490 // for both m0 and m. | 561 // for both m0 and m. |
491 SEH seh; | 562 SEH seh; |
| 563 #endif |
| 564 #endif |
492 | 565 |
493 if(g != m->g0) | 566 if(g != m->g0) |
494 runtime·throw("bad runtime·mstart"); | 567 runtime·throw("bad runtime·mstart"); |
495 | 568 |
496 // Record top of stack for use by mcall. | 569 // Record top of stack for use by mcall. |
497 // Once we call schedule we're never coming back, | 570 // Once we call schedule we're never coming back, |
498 // so other calls can reuse this stack space. | 571 // so other calls can reuse this stack space. |
499 runtime·gosave(&m->g0->sched); | 572 runtime·gosave(&m->g0->sched); |
500 m->g0->sched.pc = (uintptr)-1; // make sure it is never used | 573 m->g0->sched.pc = (uintptr)-1; // make sure it is never used |
501 m->g0->stackguard = m->g0->stackguard0; // cgo sets only stackguard0, c
opy it to stackguard | 574 m->g0->stackguard = m->g0->stackguard0; // cgo sets only stackguard0, c
opy it to stackguard |
| 575 #ifdef GOOS_windows |
| 576 #ifdef GOARCH_386 |
502 m->seh = &seh; | 577 m->seh = &seh; |
| 578 #endif |
| 579 #endif |
503 runtime·asminit(); | 580 runtime·asminit(); |
504 runtime·minit(); | 581 runtime·minit(); |
505 | 582 |
506 // Install signal handlers; after minit so that minit can | 583 // Install signal handlers; after minit so that minit can |
507 // prepare the thread to be able to handle the signals. | 584 // prepare the thread to be able to handle the signals. |
508 if(m == &runtime·m0) | 585 if(m == &runtime·m0) |
509 runtime·initsig(); | 586 runtime·initsig(); |
510 ········ | 587 ········ |
511 if(m->mstartfn) | 588 if(m->mstartfn) |
512 m->mstartfn(); | 589 m->mstartfn(); |
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
561 // In case of cgo, pthread_create will make us a stack. | 638 // In case of cgo, pthread_create will make us a stack. |
562 // Windows will layout sched stack on OS stack. | 639 // Windows will layout sched stack on OS stack. |
563 if(runtime·iscgo || Windows) | 640 if(runtime·iscgo || Windows) |
564 mp->g0 = runtime·malg(-1); | 641 mp->g0 = runtime·malg(-1); |
565 else | 642 else |
566 mp->g0 = runtime·malg(8192); | 643 mp->g0 = runtime·malg(8192); |
567 | 644 |
568 if(p == m->p) | 645 if(p == m->p) |
569 releasep(); | 646 releasep(); |
570 m->locks--; | 647 m->locks--; |
| 648 if(m->locks == 0 && g->preempt) // restore the preemption request in ca
se we've cleared it in newstack |
| 649 g->stackguard0 = StackPreempt; |
571 | 650 |
572 return mp; | 651 return mp; |
573 } | 652 } |
574 | 653 |
575 static M* lockextra(bool nilokay); | 654 static M* lockextra(bool nilokay); |
576 static void unlockextra(M*); | 655 static void unlockextra(M*); |
577 | 656 |
578 // needm is called when a cgo callback happens on a | 657 // needm is called when a cgo callback happens on a |
579 // thread without an m (a thread not created by Go). | 658 // thread without an m (a thread not created by Go). |
580 // In this case, needm is expected to find an m to use | 659 // In this case, needm is expected to find an m to use |
(...skipping 20 matching lines...) Expand all Loading... |
601 // If needm finds that it has taken the last m off the list, its job | 680 // If needm finds that it has taken the last m off the list, its job |
602 // is - once it has installed its own m so that it can do things like | 681 // is - once it has installed its own m so that it can do things like |
603 // allocate memory - to create a spare m and put it on the list. | 682 // allocate memory - to create a spare m and put it on the list. |
604 // | 683 // |
605 // Each of these extra m's also has a g0 and a curg that are | 684 // Each of these extra m's also has a g0 and a curg that are |
606 // pressed into service as the scheduling stack and current | 685 // pressed into service as the scheduling stack and current |
607 // goroutine for the duration of the cgo callback. | 686 // goroutine for the duration of the cgo callback. |
608 // | 687 // |
609 // When the callback is done with the m, it calls dropm to | 688 // When the callback is done with the m, it calls dropm to |
610 // put the m back on the list. | 689 // put the m back on the list. |
611 #pragma textflag 7 | 690 #pragma textflag NOSPLIT |
612 void | 691 void |
613 runtime·needm(byte x) | 692 runtime·needm(byte x) |
614 { | 693 { |
615 M *mp; | 694 M *mp; |
616 | 695 |
617 if(runtime·needextram) { | 696 if(runtime·needextram) { |
618 if(runtime·islibrary) | 697 if(runtime·islibrary) |
619 runtime·lib_init(); | 698 runtime·lib_init(); |
620 if(runtime·needextram) { | 699 if(runtime·needextram) { |
621 // Can happen if C/C++ code calls Go from a global ctor. | 700 // Can happen if C/C++ code calls Go from a global ctor. |
(...skipping 23 matching lines...) Expand all Loading... |
645 // Install m and g (= m->g0) and set the stack bounds | 724 // Install m and g (= m->g0) and set the stack bounds |
646 // to match the current stack. We don't actually know | 725 // to match the current stack. We don't actually know |
647 // how big the stack is, like we don't know how big any | 726 // how big the stack is, like we don't know how big any |
648 // scheduling stack is, but we assume there's at least 32 kB, | 727 // scheduling stack is, but we assume there's at least 32 kB, |
649 // which is more than enough for us. | 728 // which is more than enough for us. |
650 runtime·setmg(mp, mp->g0); | 729 runtime·setmg(mp, mp->g0); |
651 g->stackbase = (uintptr)(&x + 1024); | 730 g->stackbase = (uintptr)(&x + 1024); |
652 g->stackguard = (uintptr)(&x - 32*1024); | 731 g->stackguard = (uintptr)(&x - 32*1024); |
653 g->stackguard0 = g->stackguard; | 732 g->stackguard0 = g->stackguard; |
654 | 733 |
| 734 #ifdef GOOS_windows |
| 735 #ifdef GOARCH_386 |
655 // On windows/386, we need to put an SEH frame (two words) | 736 // On windows/386, we need to put an SEH frame (two words) |
656 » // somewhere on the current stack. We are called | 737 » // somewhere on the current stack. We are called from cgocallback_gofunc |
657 » // from needm, and we know there is some available | 738 » // and we know that it will leave two unused words below m->curg->sched.
sp. |
658 » // space one word into the argument frame. Use that. | 739 » // Use those. |
659 m->seh = (SEH*)((uintptr*)&x + 1); | 740 m->seh = (SEH*)((uintptr*)&x + 1); |
| 741 #endif |
| 742 #endif |
660 | 743 |
661 // Initialize this thread to use the m. | 744 // Initialize this thread to use the m. |
662 runtime·asminit(); | 745 runtime·asminit(); |
663 runtime·minit(); | 746 runtime·minit(); |
664 } | 747 } |
665 | 748 |
666 // newextram allocates an m and puts it on the extra list. | 749 // newextram allocates an m and puts it on the extra list. |
667 // It is called with a working local m, so that it can do things | 750 // It is called with a working local m, so that it can do things |
668 // like call schedlock and allocate. | 751 // like call schedlock and allocate. |
669 void | 752 void |
670 runtime·newextram(void) | 753 runtime·newextram(void) |
671 { | 754 { |
672 M *mp, *mnext; | 755 M *mp, *mnext; |
673 G *gp; | 756 G *gp; |
674 | 757 |
675 // Create extra goroutine locked to extra m. | 758 // Create extra goroutine locked to extra m. |
676 // The goroutine is the context in which the cgo callback will run. | 759 // The goroutine is the context in which the cgo callback will run. |
677 // The sched.pc will never be returned to, but setting it to | 760 // The sched.pc will never be returned to, but setting it to |
678 // runtime.goexit makes clear to the traceback routines where | 761 // runtime.goexit makes clear to the traceback routines where |
679 // the goroutine stack ends. | 762 // the goroutine stack ends. |
680 mp = runtime·allocm(nil); | 763 mp = runtime·allocm(nil); |
681 gp = runtime·malg(4096); | 764 gp = runtime·malg(4096); |
682 gp->sched.pc = (uintptr)runtime·goexit; | 765 gp->sched.pc = (uintptr)runtime·goexit; |
683 gp->sched.sp = gp->stackbase; | 766 gp->sched.sp = gp->stackbase; |
| 767 gp->sched.lr = 0; |
684 gp->sched.g = gp; | 768 gp->sched.g = gp; |
| 769 gp->syscallpc = gp->sched.pc; |
| 770 gp->syscallsp = gp->sched.sp; |
| 771 gp->syscallstack = gp->stackbase; |
| 772 gp->syscallguard = gp->stackguard; |
685 gp->status = Gsyscall; | 773 gp->status = Gsyscall; |
686 mp->curg = gp; | 774 mp->curg = gp; |
687 mp->locked = LockInternal; | 775 mp->locked = LockInternal; |
688 mp->lockedg = gp; | 776 mp->lockedg = gp; |
689 gp->lockedm = mp; | 777 gp->lockedm = mp; |
| 778 gp->goid = runtime·xadd64(&runtime·sched.goidgen, 1); |
| 779 if(raceenabled) |
| 780 gp->racectx = runtime·racegostart(runtime·newextram); |
690 // put on allg for garbage collector | 781 // put on allg for garbage collector |
691 runtime·lock(&runtime·sched); | 782 runtime·lock(&runtime·sched); |
692 if(runtime·lastg == nil) | 783 if(runtime·lastg == nil) |
693 runtime·allg = gp; | 784 runtime·allg = gp; |
694 else | 785 else |
695 runtime·lastg->alllink = gp; | 786 runtime·lastg->alllink = gp; |
696 runtime·lastg = gp; | 787 runtime·lastg = gp; |
697 runtime·unlock(&runtime·sched); | 788 runtime·unlock(&runtime·sched); |
698 gp->goid = runtime·xadd64(&runtime·sched.goidgen, 1); | |
699 if(raceenabled) | |
700 gp->racectx = runtime·racegostart(runtime·newextram); | |
701 | 789 |
702 // Add m to the extra list. | 790 // Add m to the extra list. |
703 mnext = lockextra(true); | 791 mnext = lockextra(true); |
704 mp->schedlink = mnext; | 792 mp->schedlink = mnext; |
705 unlockextra(mp); | 793 unlockextra(mp); |
706 } | 794 } |
707 | 795 |
708 // dropm is called when a cgo callback has called needm but is now | 796 // dropm is called when a cgo callback has called needm but is now |
709 // done with the callback and returning back into the non-Go thread. | 797 // done with the callback and returning back into the non-Go thread. |
710 // It puts the current m back onto the extra list. | 798 // It puts the current m back onto the extra list. |
(...skipping 17 matching lines...) Expand all Loading... |
728 // in which dropm happens on each cgo call, is still correct too. | 816 // in which dropm happens on each cgo call, is still correct too. |
729 // We may have to keep the current version on systems with cgo | 817 // We may have to keep the current version on systems with cgo |
730 // but without pthreads, like Windows. | 818 // but without pthreads, like Windows. |
731 void | 819 void |
732 runtime·dropm(void) | 820 runtime·dropm(void) |
733 { | 821 { |
734 M *mp, *mnext; | 822 M *mp, *mnext; |
735 | 823 |
736 // Undo whatever initialization minit did during needm. | 824 // Undo whatever initialization minit did during needm. |
737 runtime·unminit(); | 825 runtime·unminit(); |
| 826 |
| 827 #ifdef GOOS_windows |
| 828 #ifdef GOARCH_386 |
738 m->seh = nil; // reset dangling typed pointer | 829 m->seh = nil; // reset dangling typed pointer |
| 830 #endif |
| 831 #endif |
739 | 832 |
740 // Clear m and g, and return m to the extra list. | 833 // Clear m and g, and return m to the extra list. |
741 // After the call to setmg we can only call nosplit functions. | 834 // After the call to setmg we can only call nosplit functions. |
742 mp = m; | 835 mp = m; |
743 runtime·setmg(nil, nil); | 836 runtime·setmg(nil, nil); |
744 | 837 |
745 mnext = lockextra(true); | 838 mnext = lockextra(true); |
746 mp->schedlink = mnext; | 839 mp->schedlink = mnext; |
747 unlockextra(mp); | 840 unlockextra(mp); |
748 } | 841 } |
749 | 842 |
750 #define MLOCKED ((M*)1) | 843 #define MLOCKED ((M*)1) |
751 | 844 |
752 // lockextra locks the extra list and returns the list head. | 845 // lockextra locks the extra list and returns the list head. |
753 // The caller must unlock the list by storing a new list head | 846 // The caller must unlock the list by storing a new list head |
754 // to runtime.extram. If nilokay is true, then lockextra will | 847 // to runtime.extram. If nilokay is true, then lockextra will |
755 // return a nil list head if that's what it finds. If nilokay is false, | 848 // return a nil list head if that's what it finds. If nilokay is false, |
756 // lockextra will keep waiting until the list head is no longer nil. | 849 // lockextra will keep waiting until the list head is no longer nil. |
757 #pragma textflag 7 | 850 #pragma textflag NOSPLIT |
758 static M* | 851 static M* |
759 lockextra(bool nilokay) | 852 lockextra(bool nilokay) |
760 { | 853 { |
761 M *mp; | 854 M *mp; |
762 void (*yield)(void); | 855 void (*yield)(void); |
763 | 856 |
764 for(;;) { | 857 for(;;) { |
765 mp = runtime·atomicloadp(&runtime·extram); | 858 mp = runtime·atomicloadp(&runtime·extram); |
766 if(mp == MLOCKED) { | 859 if(mp == MLOCKED) { |
767 yield = runtime·osyield; | 860 yield = runtime·osyield; |
768 yield(); | 861 yield(); |
769 continue; | 862 continue; |
770 } | 863 } |
771 if(mp == nil && !nilokay) { | 864 if(mp == nil && !nilokay) { |
772 runtime·usleep(1); | 865 runtime·usleep(1); |
773 continue; | 866 continue; |
774 } | 867 } |
775 if(!runtime·casp(&runtime·extram, mp, MLOCKED)) { | 868 if(!runtime·casp(&runtime·extram, mp, MLOCKED)) { |
776 yield = runtime·osyield; | 869 yield = runtime·osyield; |
777 yield(); | 870 yield(); |
778 continue; | 871 continue; |
779 } | 872 } |
780 break; | 873 break; |
781 } | 874 } |
782 return mp; | 875 return mp; |
783 } | 876 } |
784 | 877 |
785 #pragma textflag 7 | 878 #pragma textflag NOSPLIT |
786 static void | 879 static void |
787 unlockextra(M *mp) | 880 unlockextra(M *mp) |
788 { | 881 { |
789 runtime·atomicstorep(&runtime·extram, mp); | 882 runtime·atomicstorep(&runtime·extram, mp); |
790 } | 883 } |
791 | 884 |
792 | 885 |
793 // Create a new m. It will start off with a call to fn, or else the scheduler. | 886 // Create a new m. It will start off with a call to fn, or else the scheduler. |
794 static void | 887 static void |
795 newm(void(*fn)(void), P *p) | 888 newm(void(*fn)(void), P *p) |
(...skipping 148 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
944 { | 1037 { |
945 P *p; | 1038 P *p; |
946 | 1039 |
947 if(m->lockedg == nil || m->lockedg->lockedm != m) | 1040 if(m->lockedg == nil || m->lockedg->lockedm != m) |
948 runtime·throw("stoplockedm: inconsistent locking"); | 1041 runtime·throw("stoplockedm: inconsistent locking"); |
949 if(m->p) { | 1042 if(m->p) { |
950 // Schedule another M to run this p. | 1043 // Schedule another M to run this p. |
951 p = releasep(); | 1044 p = releasep(); |
952 handoffp(p); | 1045 handoffp(p); |
953 } | 1046 } |
954 » inclocked(1); | 1047 » incidlelocked(1); |
955 // Wait until another thread schedules lockedg again. | 1048 // Wait until another thread schedules lockedg again. |
956 runtime·notesleep(&m->park); | 1049 runtime·notesleep(&m->park); |
957 runtime·noteclear(&m->park); | 1050 runtime·noteclear(&m->park); |
958 if(m->lockedg->status != Grunnable) | 1051 if(m->lockedg->status != Grunnable) |
959 runtime·throw("stoplockedm: not runnable"); | 1052 runtime·throw("stoplockedm: not runnable"); |
960 acquirep(m->nextp); | 1053 acquirep(m->nextp); |
961 m->nextp = nil; | 1054 m->nextp = nil; |
962 } | 1055 } |
963 | 1056 |
964 // Schedules the locked m to run the locked gp. | 1057 // Schedules the locked m to run the locked gp. |
965 static void | 1058 static void |
966 startlockedm(G *gp) | 1059 startlockedm(G *gp) |
967 { | 1060 { |
968 M *mp; | 1061 M *mp; |
969 P *p; | 1062 P *p; |
970 | 1063 |
971 mp = gp->lockedm; | 1064 mp = gp->lockedm; |
972 if(mp == m) | 1065 if(mp == m) |
973 runtime·throw("startlockedm: locked to me"); | 1066 runtime·throw("startlockedm: locked to me"); |
974 if(mp->nextp) | 1067 if(mp->nextp) |
975 runtime·throw("startlockedm: m has p"); | 1068 runtime·throw("startlockedm: m has p"); |
976 // directly handoff current P to the locked m | 1069 // directly handoff current P to the locked m |
977 » inclocked(-1); | 1070 » incidlelocked(-1); |
978 p = releasep(); | 1071 p = releasep(); |
979 mp->nextp = p; | 1072 mp->nextp = p; |
980 runtime·notewakeup(&mp->park); | 1073 runtime·notewakeup(&mp->park); |
981 stopm(); | 1074 stopm(); |
982 } | 1075 } |
983 | 1076 |
984 // Stops the current m for stoptheworld. | 1077 // Stops the current m for stoptheworld. |
985 // Returns when the world is restarted. | 1078 // Returns when the world is restarted. |
986 static void | 1079 static void |
987 gcstopm(void) | 1080 gcstopm(void) |
(...skipping 20 matching lines...) Expand all Loading... |
1008 static void | 1101 static void |
1009 execute(G *gp) | 1102 execute(G *gp) |
1010 { | 1103 { |
1011 int32 hz; | 1104 int32 hz; |
1012 | 1105 |
1013 if(gp->status != Grunnable) { | 1106 if(gp->status != Grunnable) { |
1014 runtime·printf("execute: bad g status %d\n", gp->status); | 1107 runtime·printf("execute: bad g status %d\n", gp->status); |
1015 runtime·throw("execute: bad g status"); | 1108 runtime·throw("execute: bad g status"); |
1016 } | 1109 } |
1017 gp->status = Grunning; | 1110 gp->status = Grunning; |
| 1111 gp->preempt = false; |
1018 gp->stackguard0 = gp->stackguard; | 1112 gp->stackguard0 = gp->stackguard; |
1019 » m->p->tick++; | 1113 » m->p->schedtick++; |
1020 m->curg = gp; | 1114 m->curg = gp; |
1021 gp->m = m; | 1115 gp->m = m; |
1022 | 1116 |
1023 // Check whether the profiler needs to be turned on or off. | 1117 // Check whether the profiler needs to be turned on or off. |
1024 hz = runtime·sched.profilehz; | 1118 hz = runtime·sched.profilehz; |
1025 if(m->profilehz != hz) | 1119 if(m->profilehz != hz) |
1026 runtime·resetcpuprofiler(hz); | 1120 runtime·resetcpuprofiler(hz); |
1027 | 1121 |
1028 runtime·gogo(&gp->sched); | 1122 runtime·gogo(&gp->sched); |
1029 } | 1123 } |
(...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1133 gp->status = Grunnable; | 1227 gp->status = Grunnable; |
1134 return gp; | 1228 return gp; |
1135 } | 1229 } |
1136 injectglist(gp); | 1230 injectglist(gp); |
1137 } | 1231 } |
1138 } | 1232 } |
1139 stopm(); | 1233 stopm(); |
1140 goto top; | 1234 goto top; |
1141 } | 1235 } |
1142 | 1236 |
| 1237 static void |
| 1238 resetspinning(void) |
| 1239 { |
| 1240 int32 nmspinning; |
| 1241 |
| 1242 if(m->spinning) { |
| 1243 m->spinning = false; |
| 1244 nmspinning = runtime·xadd(&runtime·sched.nmspinning, -1); |
| 1245 if(nmspinning < 0) |
| 1246 runtime·throw("findrunnable: negative nmspinning"); |
| 1247 } else |
| 1248 nmspinning = runtime·atomicload(&runtime·sched.nmspinning); |
| 1249 |
| 1250 // M wakeup policy is deliberately somewhat conservative (see nmspinning
handling), |
| 1251 // so see if we need to wakeup another P here. |
| 1252 if (nmspinning == 0 && runtime·atomicload(&runtime·sched.npidle) > 0) |
| 1253 wakep(); |
| 1254 } |
| 1255 |
1143 // Injects the list of runnable G's into the scheduler. | 1256 // Injects the list of runnable G's into the scheduler. |
1144 // Can run concurrently with GC. | 1257 // Can run concurrently with GC. |
1145 static void | 1258 static void |
1146 injectglist(G *glist) | 1259 injectglist(G *glist) |
1147 { | 1260 { |
1148 int32 n; | 1261 int32 n; |
1149 G *gp; | 1262 G *gp; |
1150 | 1263 |
1151 if(glist == nil) | 1264 if(glist == nil) |
1152 return; | 1265 return; |
(...skipping 24 matching lines...) Expand all Loading... |
1177 top: | 1290 top: |
1178 if(runtime·gcwaiting) { | 1291 if(runtime·gcwaiting) { |
1179 gcstopm(); | 1292 gcstopm(); |
1180 goto top; | 1293 goto top; |
1181 } | 1294 } |
1182 | 1295 |
1183 gp = nil; | 1296 gp = nil; |
1184 // Check the global runnable queue once in a while to ensure fairness. | 1297 // Check the global runnable queue once in a while to ensure fairness. |
1185 // Otherwise two goroutines can completely occupy the local runqueue | 1298 // Otherwise two goroutines can completely occupy the local runqueue |
1186 // by constantly respawning each other. | 1299 // by constantly respawning each other. |
1187 » tick = m->p->tick; | 1300 » tick = m->p->schedtick; |
1188 // This is a fancy way to say tick%61==0, | 1301 // This is a fancy way to say tick%61==0, |
1189 // it uses 2 MUL instructions instead of a single DIV and so is faster o
n modern processors. | 1302 // it uses 2 MUL instructions instead of a single DIV and so is faster o
n modern processors. |
1190 if(tick - (((uint64)tick*0x4325c53fu)>>36)*61 == 0 && runtime·sched.runq
size > 0) { | 1303 if(tick - (((uint64)tick*0x4325c53fu)>>36)*61 == 0 && runtime·sched.runq
size > 0) { |
1191 runtime·lock(&runtime·sched); | 1304 runtime·lock(&runtime·sched); |
1192 gp = globrunqget(m->p, 1); | 1305 gp = globrunqget(m->p, 1); |
1193 runtime·unlock(&runtime·sched); | 1306 runtime·unlock(&runtime·sched); |
| 1307 if(gp) |
| 1308 resetspinning(); |
1194 } | 1309 } |
1195 if(gp == nil) { | 1310 if(gp == nil) { |
1196 gp = runqget(m->p); | 1311 gp = runqget(m->p); |
1197 if(gp && m->spinning) | 1312 if(gp && m->spinning) |
1198 runtime·throw("schedule: spinning with local work"); | 1313 runtime·throw("schedule: spinning with local work"); |
1199 } | 1314 } |
1200 » if(gp == nil) | 1315 » if(gp == nil) { |
1201 » » gp = findrunnable(); | 1316 » » gp = findrunnable(); // blocks until work is available |
1202 | 1317 » » resetspinning(); |
1203 » if(m->spinning) { | 1318 » } |
1204 » » m->spinning = false; | |
1205 » » runtime·xadd(&runtime·sched.nmspinning, -1); | |
1206 » } | |
1207 | |
1208 » // M wakeup policy is deliberately somewhat conservative (see nmspinning
handling), | |
1209 » // so see if we need to wakeup another M here. | |
1210 » if (m->p->runqhead != m->p->runqtail && | |
1211 » » runtime·atomicload(&runtime·sched.nmspinning) == 0 && | |
1212 » » runtime·atomicload(&runtime·sched.npidle) > 0) // TODO: fast at
omic | |
1213 » » wakep(); | |
1214 | 1319 |
1215 if(gp->lockedm) { | 1320 if(gp->lockedm) { |
| 1321 // Hands off own p to the locked m, |
| 1322 // then blocks waiting for a new p. |
1216 startlockedm(gp); | 1323 startlockedm(gp); |
1217 goto top; | 1324 goto top; |
1218 } | 1325 } |
1219 | 1326 |
1220 execute(gp); | 1327 execute(gp); |
1221 } | 1328 } |
1222 | 1329 |
1223 // Puts the current goroutine into a waiting state and unlocks the lock. | 1330 // Puts the current goroutine into a waiting state and unlocks the lock. |
1224 // The goroutine can be made runnable again by calling runtime·ready(gp). | 1331 // The goroutine can be made runnable again by calling runtime·ready(gp). |
1225 void | 1332 void |
(...skipping 21 matching lines...) Expand all Loading... |
1247 stoplockedm(); | 1354 stoplockedm(); |
1248 execute(gp); // Never returns. | 1355 execute(gp); // Never returns. |
1249 } | 1356 } |
1250 schedule(); | 1357 schedule(); |
1251 } | 1358 } |
1252 | 1359 |
1253 // Scheduler yield. | 1360 // Scheduler yield. |
1254 void | 1361 void |
1255 runtime·gosched(void) | 1362 runtime·gosched(void) |
1256 { | 1363 { |
1257 » runtime·mcall(gosched0); | 1364 » runtime·mcall(runtime·gosched0); |
1258 } | 1365 } |
1259 | 1366 |
1260 // runtime·gosched continuation on g0. | 1367 // runtime·gosched continuation on g0. |
1261 static void | 1368 void |
1262 gosched0(G *gp) | 1369 runtime·gosched0(G *gp) |
1263 { | 1370 { |
1264 gp->status = Grunnable; | 1371 gp->status = Grunnable; |
1265 gp->m = nil; | 1372 gp->m = nil; |
1266 m->curg = nil; | 1373 m->curg = nil; |
1267 runtime·lock(&runtime·sched); | 1374 runtime·lock(&runtime·sched); |
1268 globrunqput(gp); | 1375 globrunqput(gp); |
1269 runtime·unlock(&runtime·sched); | 1376 runtime·unlock(&runtime·sched); |
1270 if(m->lockedg) { | 1377 if(m->lockedg) { |
1271 stoplockedm(); | 1378 stoplockedm(); |
1272 execute(gp); // Never returns. | 1379 execute(gp); // Never returns. |
1273 } | 1380 } |
1274 schedule(); | 1381 schedule(); |
1275 } | 1382 } |
1276 | 1383 |
1277 // Finishes execution of the current goroutine. | 1384 // Finishes execution of the current goroutine. |
1278 // Need to mark it as nosplit, because it runs with sp > stackbase (as runtime·l
essstack). | 1385 // Need to mark it as nosplit, because it runs with sp > stackbase (as runtime·l
essstack). |
1279 // Since it does not return it does not matter. But if it is preempted | 1386 // Since it does not return it does not matter. But if it is preempted |
1280 // at the split stack check, GC will complain about inconsistent sp. | 1387 // at the split stack check, GC will complain about inconsistent sp. |
1281 #pragma textflag 7 | 1388 #pragma textflag NOSPLIT |
1282 void | 1389 void |
1283 runtime·goexit(void) | 1390 runtime·goexit(void) |
1284 { | 1391 { |
1285 if(raceenabled) | 1392 if(raceenabled) |
1286 runtime·racegoend(); | 1393 runtime·racegoend(); |
1287 runtime·mcall(goexit0); | 1394 runtime·mcall(goexit0); |
1288 } | 1395 } |
1289 | 1396 |
1290 // runtime·goexit continuation on g0. | 1397 // runtime·goexit continuation on g0. |
1291 static void | 1398 static void |
1292 goexit0(G *gp) | 1399 goexit0(G *gp) |
1293 { | 1400 { |
1294 gp->status = Gdead; | 1401 gp->status = Gdead; |
1295 gp->m = nil; | 1402 gp->m = nil; |
1296 gp->lockedm = nil; | 1403 gp->lockedm = nil; |
1297 m->curg = nil; | 1404 m->curg = nil; |
1298 m->lockedg = nil; | 1405 m->lockedg = nil; |
1299 if(m->locked & ~LockExternal) { | 1406 if(m->locked & ~LockExternal) { |
1300 » » runtime·printf("invalid m->locked = %d", m->locked); | 1407 » » runtime·printf("invalid m->locked = %d\n", m->locked); |
1301 runtime·throw("internal lockOSThread error"); | 1408 runtime·throw("internal lockOSThread error"); |
1302 }······· | 1409 }······· |
1303 m->locked = 0; | 1410 m->locked = 0; |
1304 runtime·unwindstack(gp, nil); | 1411 runtime·unwindstack(gp, nil); |
1305 gfput(m->p, gp); | 1412 gfput(m->p, gp); |
1306 schedule(); | 1413 schedule(); |
1307 } | 1414 } |
1308 | 1415 |
| 1416 #pragma textflag NOSPLIT |
1309 static void | 1417 static void |
1310 save(void *pc, uintptr sp) | 1418 save(void *pc, uintptr sp) |
1311 { | 1419 { |
1312 g->gcpc = (uintptr)pc; | |
1313 g->gcsp = sp; | |
1314 g->sched.pc = (uintptr)pc; | 1420 g->sched.pc = (uintptr)pc; |
1315 g->sched.sp = sp; | 1421 g->sched.sp = sp; |
1316 g->sched.lr = 0; | 1422 g->sched.lr = 0; |
1317 g->sched.ret = 0; | 1423 g->sched.ret = 0; |
1318 g->sched.ctxt = 0; | 1424 g->sched.ctxt = 0; |
1319 g->sched.g = g; | 1425 g->sched.g = g; |
1320 } | 1426 } |
1321 | 1427 |
1322 // The goroutine g is about to enter a system call. | 1428 // The goroutine g is about to enter a system call. |
1323 // Record that it's not using the cpu anymore. | 1429 // Record that it's not using the cpu anymore. |
1324 // This is called only from the go syscall library and cgocall, | 1430 // This is called only from the go syscall library and cgocall, |
1325 // not from the low-level system calls used by the runtime. | 1431 // not from the low-level system calls used by the runtime. |
1326 // | 1432 // |
1327 // Entersyscall cannot split the stack: the runtime·gosave must | 1433 // Entersyscall cannot split the stack: the runtime·gosave must |
1328 // make g->sched refer to the caller's stack segment, because | 1434 // make g->sched refer to the caller's stack segment, because |
1329 // entersyscall is going to return immediately after. | 1435 // entersyscall is going to return immediately after. |
1330 #pragma textflag 7 | 1436 #pragma textflag NOSPLIT |
1331 void | 1437 void |
1332 ·entersyscall(int32 dummy) | 1438 ·entersyscall(int32 dummy) |
1333 { | 1439 { |
1334 » if(m->profilehz > 0) | 1440 » // Disable preemption because during this function g is in Gsyscall stat
us, |
1335 » » runtime·setprof(false); | 1441 » // but can have inconsistent g->sched, do not let GC observe it. |
1336 | 1442 » m->locks++; |
1337 » // Leave SP around for gc and traceback. | 1443 |
| 1444 » // Leave SP around for GC and traceback. |
1338 save(runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy)); | 1445 save(runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy)); |
1339 | 1446 » g->syscallsp = g->sched.sp; |
1340 » g->gcstack = g->stackbase; | 1447 » g->syscallpc = g->sched.pc; |
1341 » g->gcguard = g->stackguard; | 1448 » g->syscallstack = g->stackbase; |
| 1449 » g->syscallguard = g->stackguard; |
1342 g->status = Gsyscall; | 1450 g->status = Gsyscall; |
1343 » if(g->gcsp < g->gcguard-StackGuard || g->gcstack < g->gcsp) { | 1451 » if(g->syscallsp < g->syscallguard-StackGuard || g->syscallstack < g->sys
callsp) { |
1344 // runtime·printf("entersyscall inconsistent %p [%p,%p]\n", | 1452 // runtime·printf("entersyscall inconsistent %p [%p,%p]\n", |
1345 » » //» g->gcsp, g->gcguard-StackGuard, g->gcstack); | 1453 » » //» g->syscallsp, g->syscallguard-StackGuard, g->syscallstac
k); |
1346 runtime·throw("entersyscall"); | 1454 runtime·throw("entersyscall"); |
1347 } | 1455 } |
1348 | 1456 |
1349 if(runtime·atomicload(&runtime·sched.sysmonwait)) { // TODO: fast atomi
c | 1457 if(runtime·atomicload(&runtime·sched.sysmonwait)) { // TODO: fast atomi
c |
1350 runtime·lock(&runtime·sched); | 1458 runtime·lock(&runtime·sched); |
1351 if(runtime·atomicload(&runtime·sched.sysmonwait)) { | 1459 if(runtime·atomicload(&runtime·sched.sysmonwait)) { |
1352 runtime·atomicstore(&runtime·sched.sysmonwait, 0); | 1460 runtime·atomicstore(&runtime·sched.sysmonwait, 0); |
1353 runtime·notewakeup(&runtime·sched.sysmonnote); | 1461 runtime·notewakeup(&runtime·sched.sysmonnote); |
1354 } | 1462 } |
1355 runtime·unlock(&runtime·sched); | 1463 runtime·unlock(&runtime·sched); |
1356 save(runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy)); | 1464 save(runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy)); |
1357 } | 1465 } |
1358 | 1466 |
1359 m->mcache = nil; | 1467 m->mcache = nil; |
1360 m->p->tick++; | |
1361 m->p->m = nil; | 1468 m->p->m = nil; |
1362 runtime·atomicstore(&m->p->status, Psyscall); | 1469 runtime·atomicstore(&m->p->status, Psyscall); |
1363 if(runtime·gcwaiting) { | 1470 if(runtime·gcwaiting) { |
1364 runtime·lock(&runtime·sched); | 1471 runtime·lock(&runtime·sched); |
1365 if (runtime·sched.stopwait > 0 && runtime·cas(&m->p->status, Psy
scall, Pgcstop)) { | 1472 if (runtime·sched.stopwait > 0 && runtime·cas(&m->p->status, Psy
scall, Pgcstop)) { |
1366 if(--runtime·sched.stopwait == 0) | 1473 if(--runtime·sched.stopwait == 0) |
1367 runtime·notewakeup(&runtime·sched.stopnote); | 1474 runtime·notewakeup(&runtime·sched.stopnote); |
1368 } | 1475 } |
1369 runtime·unlock(&runtime·sched); | 1476 runtime·unlock(&runtime·sched); |
1370 save(runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy)); | 1477 save(runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy)); |
1371 } | 1478 } |
| 1479 |
| 1480 // Goroutines must not split stacks in Gsyscall status (it would corrupt
g->sched). |
| 1481 // We set stackguard to StackPreempt so that first split stack check cal
ls morestack. |
| 1482 // Morestack detects this case and throws. |
| 1483 g->stackguard0 = StackPreempt; |
| 1484 m->locks--; |
1372 } | 1485 } |
1373 | 1486 |
1374 // The same as runtime·entersyscall(), but with a hint that the syscall is block
ing. | 1487 // The same as runtime·entersyscall(), but with a hint that the syscall is block
ing. |
1375 #pragma textflag 7 | 1488 #pragma textflag NOSPLIT |
1376 void | 1489 void |
1377 ·entersyscallblock(int32 dummy) | 1490 ·entersyscallblock(int32 dummy) |
1378 { | 1491 { |
1379 P *p; | 1492 P *p; |
1380 | 1493 |
1381 » if(m->profilehz > 0) | 1494 » m->locks++; // see comment in entersyscall |
1382 » » runtime·setprof(false); | 1495 |
1383 | 1496 » // Leave SP around for GC and traceback. |
1384 » // Leave SP around for gc and traceback. | |
1385 save(runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy)); | 1497 save(runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy)); |
1386 » g->gcsp = g->sched.sp; | 1498 » g->syscallsp = g->sched.sp; |
1387 » g->gcpc = g->sched.pc; | 1499 » g->syscallpc = g->sched.pc; |
1388 » g->gcstack = g->stackbase; | 1500 » g->syscallstack = g->stackbase; |
1389 » g->gcguard = g->stackguard; | 1501 » g->syscallguard = g->stackguard; |
1390 g->status = Gsyscall; | 1502 g->status = Gsyscall; |
1391 » if(g->gcsp < g->gcguard-StackGuard || g->gcstack < g->gcsp) { | 1503 » if(g->syscallsp < g->syscallguard-StackGuard || g->syscallstack < g->sys
callsp) { |
1392 » » // runtime·printf("entersyscallblock inconsistent %p [%p,%p]\n", | 1504 » » // runtime·printf("entersyscall inconsistent %p [%p,%p]\n", |
1393 » » //» g->gcsp, g->gcguard-StackGuard, g->gcstack); | 1505 » » //» g->syscallsp, g->syscallguard-StackGuard, g->syscallstac
k); |
1394 runtime·throw("entersyscallblock"); | 1506 runtime·throw("entersyscallblock"); |
1395 } | 1507 } |
1396 | 1508 |
1397 p = releasep(); | 1509 p = releasep(); |
1398 handoffp(p); | 1510 handoffp(p); |
1399 if(g->isbackground) // do not consider blocked scavenger for deadlock d
etection | 1511 if(g->isbackground) // do not consider blocked scavenger for deadlock d
etection |
1400 » » inclocked(1); | 1512 » » incidlelocked(1); |
1401 | 1513 |
1402 // Resave for traceback during blocked call. | 1514 // Resave for traceback during blocked call. |
1403 save(runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy)); | 1515 save(runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy)); |
| 1516 |
| 1517 g->stackguard0 = StackPreempt; // see comment in entersyscall |
| 1518 m->locks--; |
1404 } | 1519 } |
1405 | 1520 |
1406 // The goroutine g exited its system call. | 1521 // The goroutine g exited its system call. |
1407 // Arrange for it to run on a cpu again. | 1522 // Arrange for it to run on a cpu again. |
1408 // This is called only from the go syscall library, not | 1523 // This is called only from the go syscall library, not |
1409 // from the low-level system calls used by the runtime. | 1524 // from the low-level system calls used by the runtime. |
| 1525 #pragma textflag NOSPLIT |
1410 void | 1526 void |
1411 runtime·exitsyscall(void) | 1527 runtime·exitsyscall(void) |
1412 { | 1528 { |
1413 » P *p; | 1529 » m->locks++; // see comment in entersyscall |
1414 | 1530 |
1415 » // Check whether the profiler needs to be turned on. | 1531 » if(g->isbackground) // do not consider blocked scavenger for deadlock d
etection |
1416 » if(m->profilehz > 0) | 1532 » » incidlelocked(-1); |
1417 » » runtime·setprof(true); | 1533 |
1418 | 1534 » if(exitsyscallfast()) { |
1419 » // Try to re-acquire the last P. | |
1420 » if(m->p && m->p->status == Psyscall && runtime·cas(&m->p->status, Psysca
ll, Prunning)) { | |
1421 // There's a cpu for us, so we can run. | 1535 // There's a cpu for us, so we can run. |
1422 » » m->mcache = m->p->mcache; | 1536 » » m->p->syscalltick++; |
1423 » » m->p->m = m; | |
1424 » » m->p->tick++; | |
1425 g->status = Grunning; | 1537 g->status = Grunning; |
1426 // Garbage collector isn't running (since we are), | 1538 // Garbage collector isn't running (since we are), |
1427 // so okay to clear gcstack and gcsp. | 1539 // so okay to clear gcstack and gcsp. |
1428 » » g->gcstack = (uintptr)nil; | 1540 » » g->syscallstack = (uintptr)nil; |
1429 » » g->gcsp = (uintptr)nil; | 1541 » » g->syscallsp = (uintptr)nil; |
| 1542 » » m->locks--; |
| 1543 » » if(g->preempt) { |
| 1544 » » » // restore the preemption request in case we've cleared
it in newstack |
| 1545 » » » g->stackguard0 = StackPreempt; |
| 1546 » » } else { |
| 1547 » » » // otherwise restore the real stackguard, we've spoiled
it in entersyscall/entersyscallblock |
| 1548 » » » g->stackguard0 = g->stackguard; |
| 1549 » » } |
1430 return; | 1550 return; |
1431 } | 1551 } |
1432 | 1552 |
1433 » if(g->isbackground) // do not consider blocked scavenger for deadlock d
etection | 1553 » m->locks--; |
1434 » » inclocked(-1); | |
1435 » // Try to get any other idle P. | |
1436 » m->p = nil; | |
1437 » if(runtime·sched.pidle) { | |
1438 » » runtime·lock(&runtime·sched); | |
1439 » » p = pidleget(); | |
1440 » » runtime·unlock(&runtime·sched); | |
1441 » » if(p) { | |
1442 » » » acquirep(p); | |
1443 » » » m->p->tick++; | |
1444 » » » g->status = Grunning; | |
1445 » » » g->gcstack = (uintptr)nil; | |
1446 » » » g->gcsp = (uintptr)nil; | |
1447 » » » return; | |
1448 » » } | |
1449 » } | |
1450 | 1554 |
1451 // Call the scheduler. | 1555 // Call the scheduler. |
1452 runtime·mcall(exitsyscall0); | 1556 runtime·mcall(exitsyscall0); |
1453 | 1557 |
1454 // Scheduler returned, so we're allowed to run now. | 1558 // Scheduler returned, so we're allowed to run now. |
1455 // Delete the gcstack information that we left for | 1559 // Delete the gcstack information that we left for |
1456 // the garbage collector during the system call. | 1560 // the garbage collector during the system call. |
1457 // Must wait until now because until gosched returns | 1561 // Must wait until now because until gosched returns |
1458 // we don't know for sure that the garbage collector | 1562 // we don't know for sure that the garbage collector |
1459 // is not running. | 1563 // is not running. |
1460 » g->gcstack = (uintptr)nil; | 1564 » g->syscallstack = (uintptr)nil; |
1461 » g->gcsp = (uintptr)nil; | 1565 » g->syscallsp = (uintptr)nil; |
| 1566 » m->p->syscalltick++; |
| 1567 } |
| 1568 |
| 1569 #pragma textflag NOSPLIT |
| 1570 static bool |
| 1571 exitsyscallfast(void) |
| 1572 { |
| 1573 » P *p; |
| 1574 |
| 1575 » // Freezetheworld sets stopwait but does not retake P's. |
| 1576 » if(runtime·sched.stopwait) { |
| 1577 » » m->p = nil; |
| 1578 » » return false; |
| 1579 » } |
| 1580 |
| 1581 » // Try to re-acquire the last P. |
| 1582 » if(m->p && m->p->status == Psyscall && runtime·cas(&m->p->status, Psysca
ll, Prunning)) { |
| 1583 » » // There's a cpu for us, so we can run. |
| 1584 » » m->mcache = m->p->mcache; |
| 1585 » » m->p->m = m; |
| 1586 » » return true; |
| 1587 » } |
| 1588 » // Try to get any other idle P. |
| 1589 » m->p = nil; |
| 1590 » if(runtime·sched.pidle) { |
| 1591 » » runtime·lock(&runtime·sched); |
| 1592 » » p = pidleget(); |
| 1593 » » if(p && runtime·atomicload(&runtime·sched.sysmonwait)) { |
| 1594 » » » runtime·atomicstore(&runtime·sched.sysmonwait, 0); |
| 1595 » » » runtime·notewakeup(&runtime·sched.sysmonnote); |
| 1596 » » } |
| 1597 » » runtime·unlock(&runtime·sched); |
| 1598 » » if(p) { |
| 1599 » » » acquirep(p); |
| 1600 » » » return true; |
| 1601 » » } |
| 1602 » } |
| 1603 » return false; |
1462 } | 1604 } |
1463 | 1605 |
1464 // runtime·exitsyscall slow path on g0. | 1606 // runtime·exitsyscall slow path on g0. |
1465 // Failed to acquire P, enqueue gp as runnable. | 1607 // Failed to acquire P, enqueue gp as runnable. |
1466 static void | 1608 static void |
1467 exitsyscall0(G *gp) | 1609 exitsyscall0(G *gp) |
1468 { | 1610 { |
1469 P *p; | 1611 P *p; |
1470 | 1612 |
1471 gp->status = Grunnable; | 1613 gp->status = Grunnable; |
1472 gp->m = nil; | 1614 gp->m = nil; |
1473 m->curg = nil; | 1615 m->curg = nil; |
1474 runtime·lock(&runtime·sched); | 1616 runtime·lock(&runtime·sched); |
1475 p = pidleget(); | 1617 p = pidleget(); |
1476 if(p == nil) | 1618 if(p == nil) |
1477 globrunqput(gp); | 1619 globrunqput(gp); |
| 1620 else if(runtime·atomicload(&runtime·sched.sysmonwait)) { |
| 1621 runtime·atomicstore(&runtime·sched.sysmonwait, 0); |
| 1622 runtime·notewakeup(&runtime·sched.sysmonnote); |
| 1623 } |
1478 runtime·unlock(&runtime·sched); | 1624 runtime·unlock(&runtime·sched); |
1479 if(p) { | 1625 if(p) { |
1480 acquirep(p); | 1626 acquirep(p); |
1481 execute(gp); // Never returns. | 1627 execute(gp); // Never returns. |
1482 } | 1628 } |
1483 if(m->lockedg) { | 1629 if(m->lockedg) { |
1484 // Wait until another thread schedules gp and so m again. | 1630 // Wait until another thread schedules gp and so m again. |
1485 stoplockedm(); | 1631 stoplockedm(); |
1486 execute(gp); // Never returns. | 1632 execute(gp); // Never returns. |
1487 } | 1633 } |
1488 stopm(); | 1634 stopm(); |
1489 schedule(); // Never returns. | 1635 schedule(); // Never returns. |
| 1636 } |
| 1637 |
| 1638 // Called from syscall package before fork. |
| 1639 void |
| 1640 syscall·runtime_BeforeFork(void) |
| 1641 { |
| 1642 // Fork can hang if preempted with signals frequently enough (see issue
5517). |
| 1643 // Ensure that we stay on the same M where we disable profiling. |
| 1644 m->locks++; |
| 1645 if(m->profilehz != 0) |
| 1646 runtime·resetcpuprofiler(0); |
| 1647 } |
| 1648 |
| 1649 // Called from syscall package after fork in parent. |
| 1650 void |
| 1651 syscall·runtime_AfterFork(void) |
| 1652 { |
| 1653 int32 hz; |
| 1654 |
| 1655 hz = runtime·sched.profilehz; |
| 1656 if(hz != 0) |
| 1657 runtime·resetcpuprofiler(hz); |
| 1658 m->locks--; |
1490 } | 1659 } |
1491 | 1660 |
1492 // Hook used by runtime·malg to call runtime·stackalloc on the | 1661 // Hook used by runtime·malg to call runtime·stackalloc on the |
1493 // scheduler stack. This exists because runtime·stackalloc insists | 1662 // scheduler stack. This exists because runtime·stackalloc insists |
1494 // on being called on the scheduler stack, to avoid trying to grow | 1663 // on being called on the scheduler stack, to avoid trying to grow |
1495 // the stack while allocating a new stack segment. | 1664 // the stack while allocating a new stack segment. |
1496 static void | 1665 static void |
1497 mstackalloc(G *gp) | 1666 mstackalloc(G *gp) |
1498 { | 1667 { |
1499 gp->param = runtime·stackalloc((uintptr)gp->param); | 1668 gp->param = runtime·stackalloc((uintptr)gp->param); |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1533 return newg; | 1702 return newg; |
1534 } | 1703 } |
1535 | 1704 |
1536 // Create a new g running fn with siz bytes of arguments. | 1705 // Create a new g running fn with siz bytes of arguments. |
1537 // Put it on the queue of g's waiting to run. | 1706 // Put it on the queue of g's waiting to run. |
1538 // The compiler turns a go statement into a call to this. | 1707 // The compiler turns a go statement into a call to this. |
1539 // Cannot split the stack because it assumes that the arguments | 1708 // Cannot split the stack because it assumes that the arguments |
1540 // are available sequentially after &fn; they would not be | 1709 // are available sequentially after &fn; they would not be |
1541 // copied if a stack split occurred. It's OK for this to call | 1710 // copied if a stack split occurred. It's OK for this to call |
1542 // functions that split the stack. | 1711 // functions that split the stack. |
1543 #pragma textflag 7 | 1712 #pragma textflag NOSPLIT |
1544 void | 1713 void |
1545 runtime·newproc(int32 siz, FuncVal* fn, ...) | 1714 runtime·newproc(int32 siz, FuncVal* fn, ...) |
1546 { | 1715 { |
1547 byte *argp; | 1716 byte *argp; |
1548 | 1717 |
1549 if(thechar == '5') | 1718 if(thechar == '5') |
1550 argp = (byte*)(&fn+2); // skip caller's saved LR | 1719 argp = (byte*)(&fn+2); // skip caller's saved LR |
1551 else | 1720 else |
1552 argp = (byte*)(&fn+1); | 1721 argp = (byte*)(&fn+1); |
1553 runtime·newproc1(fn, argp, siz, 0, runtime·getcallerpc(&siz)); | 1722 runtime·newproc1(fn, argp, siz, 0, runtime·getcallerpc(&siz)); |
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1607 newg->gopc = (uintptr)callerpc; | 1776 newg->gopc = (uintptr)callerpc; |
1608 newg->status = Grunnable; | 1777 newg->status = Grunnable; |
1609 newg->goid = runtime·xadd64(&runtime·sched.goidgen, 1); | 1778 newg->goid = runtime·xadd64(&runtime·sched.goidgen, 1); |
1610 if(raceenabled) | 1779 if(raceenabled) |
1611 newg->racectx = runtime·racegostart((void*)callerpc); | 1780 newg->racectx = runtime·racegostart((void*)callerpc); |
1612 runqput(m->p, newg); | 1781 runqput(m->p, newg); |
1613 | 1782 |
1614 if(runtime·atomicload(&runtime·sched.npidle) != 0 && runtime·atomicload(
&runtime·sched.nmspinning) == 0 && fn->fn != runtime·main) // TODO: fast atomic | 1783 if(runtime·atomicload(&runtime·sched.npidle) != 0 && runtime·atomicload(
&runtime·sched.nmspinning) == 0 && fn->fn != runtime·main) // TODO: fast atomic |
1615 wakep(); | 1784 wakep(); |
1616 m->locks--; | 1785 m->locks--; |
| 1786 if(m->locks == 0 && g->preempt) // restore the preemption request in ca
se we've cleared it in newstack |
| 1787 g->stackguard0 = StackPreempt; |
1617 return newg; | 1788 return newg; |
1618 } | 1789 } |
1619 | 1790 |
1620 // Put on gfree list. | 1791 // Put on gfree list. |
1621 // If local list is too long, transfer a batch to the global list. | 1792 // If local list is too long, transfer a batch to the global list. |
1622 static void | 1793 static void |
1623 gfput(P *p, G *gp) | 1794 gfput(P *p, G *gp) |
1624 { | 1795 { |
1625 if(gp->stackguard - StackGuard != gp->stack0) | 1796 if(gp->stackguard - StackGuard != gp->stack0) |
1626 runtime·throw("invalid stack in gfput"); | 1797 runtime·throw("invalid stack in gfput"); |
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1707 if(n > MaxGomaxprocs) | 1878 if(n > MaxGomaxprocs) |
1708 n = MaxGomaxprocs; | 1879 n = MaxGomaxprocs; |
1709 runtime·lock(&runtime·sched); | 1880 runtime·lock(&runtime·sched); |
1710 ret = runtime·gomaxprocs; | 1881 ret = runtime·gomaxprocs; |
1711 if(n <= 0 || n == ret) { | 1882 if(n <= 0 || n == ret) { |
1712 runtime·unlock(&runtime·sched); | 1883 runtime·unlock(&runtime·sched); |
1713 return ret; | 1884 return ret; |
1714 } | 1885 } |
1715 runtime·unlock(&runtime·sched); | 1886 runtime·unlock(&runtime·sched); |
1716 | 1887 |
1717 » runtime·semacquire(&runtime·worldsema); | 1888 » runtime·semacquire(&runtime·worldsema, false); |
1718 m->gcing = 1; | 1889 m->gcing = 1; |
1719 runtime·stoptheworld(); | 1890 runtime·stoptheworld(); |
1720 newprocs = n; | 1891 newprocs = n; |
1721 m->gcing = 0; | 1892 m->gcing = 0; |
1722 runtime·semrelease(&runtime·worldsema); | 1893 runtime·semrelease(&runtime·worldsema); |
1723 runtime·starttheworld(); | 1894 runtime·starttheworld(); |
1724 | 1895 |
1725 return ret; | 1896 return ret; |
1726 } | 1897 } |
1727 | 1898 |
1728 static void | 1899 // lockOSThread is called by runtime.LockOSThread and runtime.lockOSThread below |
1729 LockOSThread(void) | 1900 // after they modify m->locked. Do not allow preemption during this call, |
| 1901 // or else the m might be different in this function than in the caller. |
| 1902 #pragma textflag NOSPLIT |
| 1903 static void |
| 1904 lockOSThread(void) |
1730 { | 1905 { |
1731 m->lockedg = g; | 1906 m->lockedg = g; |
1732 g->lockedm = m; | 1907 g->lockedm = m; |
1733 } | 1908 } |
1734 | 1909 |
1735 void | 1910 void |
1736 runtime·LockOSThread(void) | 1911 runtime·LockOSThread(void) |
1737 { | 1912 { |
1738 m->locked |= LockExternal; | 1913 m->locked |= LockExternal; |
1739 » LockOSThread(); | 1914 » lockOSThread(); |
1740 } | 1915 } |
1741 | 1916 |
1742 void | 1917 void |
1743 runtime·lockOSThread(void) | 1918 runtime·lockOSThread(void) |
1744 { | 1919 { |
1745 m->locked += LockInternal; | 1920 m->locked += LockInternal; |
1746 » LockOSThread(); | 1921 » lockOSThread(); |
1747 } | 1922 } |
1748 | 1923 |
1749 static void | 1924 |
1750 UnlockOSThread(void) | 1925 // unlockOSThread is called by runtime.UnlockOSThread and runtime.unlockOSThread
below |
| 1926 // after they update m->locked. Do not allow preemption during this call, |
| 1927 // or else the m might be in different in this function than in the caller. |
| 1928 #pragma textflag NOSPLIT |
| 1929 static void |
| 1930 unlockOSThread(void) |
1751 { | 1931 { |
1752 if(m->locked != 0) | 1932 if(m->locked != 0) |
1753 return; | 1933 return; |
1754 m->lockedg = nil; | 1934 m->lockedg = nil; |
1755 g->lockedm = nil; | 1935 g->lockedm = nil; |
1756 } | 1936 } |
1757 | 1937 |
1758 void | 1938 void |
1759 runtime·UnlockOSThread(void) | 1939 runtime·UnlockOSThread(void) |
1760 { | 1940 { |
1761 m->locked &= ~LockExternal; | 1941 m->locked &= ~LockExternal; |
1762 » UnlockOSThread(); | 1942 » unlockOSThread(); |
1763 } | 1943 } |
1764 | 1944 |
1765 void | 1945 void |
1766 runtime·unlockOSThread(void) | 1946 runtime·unlockOSThread(void) |
1767 { | 1947 { |
1768 if(m->locked < LockInternal) | 1948 if(m->locked < LockInternal) |
1769 runtime·throw("runtime: internal error: misuse of lockOSThread/u
nlockOSThread"); | 1949 runtime·throw("runtime: internal error: misuse of lockOSThread/u
nlockOSThread"); |
1770 m->locked -= LockInternal; | 1950 m->locked -= LockInternal; |
1771 » UnlockOSThread(); | 1951 » unlockOSThread(); |
1772 } | 1952 } |
1773 | 1953 |
1774 bool | 1954 bool |
1775 runtime·lockedOSThread(void) | 1955 runtime·lockedOSThread(void) |
1776 { | 1956 { |
1777 return g->lockedm != nil && m->lockedg != nil; | 1957 return g->lockedm != nil && m->lockedg != nil; |
1778 } | 1958 } |
1779 | 1959 |
1780 // for testing of callbacks | 1960 // for testing of callbacks |
1781 void | 1961 void |
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1824 { | 2004 { |
1825 runtime·throw("runtime: mcall called on m->g0 stack"); | 2005 runtime·throw("runtime: mcall called on m->g0 stack"); |
1826 } | 2006 } |
1827 | 2007 |
1828 void | 2008 void |
1829 runtime·badmcall2(void) // called from assembly | 2009 runtime·badmcall2(void) // called from assembly |
1830 { | 2010 { |
1831 runtime·throw("runtime: mcall function returned"); | 2011 runtime·throw("runtime: mcall function returned"); |
1832 } | 2012 } |
1833 | 2013 |
| 2014 void |
| 2015 runtime·badreflectcall(void) // called from assembly |
| 2016 { |
| 2017 runtime·panicstring("runtime: arg size to reflect.call more than 1GB"); |
| 2018 } |
| 2019 |
1834 static struct { | 2020 static struct { |
1835 Lock; | 2021 Lock; |
1836 void (*fn)(uintptr*, int32); | 2022 void (*fn)(uintptr*, int32); |
1837 int32 hz; | 2023 int32 hz; |
1838 uintptr pcbuf[100]; | 2024 uintptr pcbuf[100]; |
1839 } prof; | 2025 } prof; |
1840 | 2026 |
| 2027 static void |
| 2028 System(void) |
| 2029 { |
| 2030 } |
| 2031 |
1841 // Called if we receive a SIGPROF signal. | 2032 // Called if we receive a SIGPROF signal. |
1842 void | 2033 void |
1843 runtime·sigprof(uint8 *pc, uint8 *sp, uint8 *lr, G *gp) | 2034 runtime·sigprof(uint8 *pc, uint8 *sp, uint8 *lr, G *gp) |
1844 { | 2035 { |
1845 int32 n; | 2036 int32 n; |
1846 | 2037 » bool traceback; |
| 2038 |
| 2039 » if(prof.fn == nil || prof.hz == 0) |
| 2040 » » return; |
| 2041 » traceback = true; |
1847 // Windows does profiling in a dedicated thread w/o m. | 2042 // Windows does profiling in a dedicated thread w/o m. |
1848 if(!Windows && (m == nil || m->mcache == nil)) | 2043 if(!Windows && (m == nil || m->mcache == nil)) |
1849 » » return; | 2044 » » traceback = false; |
1850 » if(prof.fn == nil || prof.hz == 0) | 2045 » if(gp == m->g0 || gp == m->gsignal) |
1851 » » return; | 2046 » » traceback = false; |
| 2047 » // Race detector calls asmcgocall w/o entersyscall/exitsyscall, |
| 2048 » // we can not currently unwind through asmcgocall. |
| 2049 » if(m != nil && m->racecall) |
| 2050 » » traceback = false; |
1852 | 2051 |
1853 runtime·lock(&prof); | 2052 runtime·lock(&prof); |
1854 if(prof.fn == nil) { | 2053 if(prof.fn == nil) { |
1855 runtime·unlock(&prof); | 2054 runtime·unlock(&prof); |
1856 return; | 2055 return; |
1857 } | 2056 } |
1858 » n = runtime·gentraceback((uintptr)pc, (uintptr)sp, (uintptr)lr, gp, 0, p
rof.pcbuf, nelem(prof.pcbuf), nil, nil); | 2057 » n = 0; |
1859 » if(n > 0) | 2058 » if(traceback) |
1860 » » prof.fn(prof.pcbuf, n); | 2059 » » n = runtime·gentraceback((uintptr)pc, (uintptr)sp, (uintptr)lr,
gp, 0, prof.pcbuf, nelem(prof.pcbuf), nil, nil, false); |
| 2060 » if(!traceback || n <= 0) { |
| 2061 » » n = 2; |
| 2062 » » prof.pcbuf[0] = (uintptr)pc; |
| 2063 » » prof.pcbuf[1] = (uintptr)System + 1; |
| 2064 » } |
| 2065 » prof.fn(prof.pcbuf, n); |
1861 runtime·unlock(&prof); | 2066 runtime·unlock(&prof); |
1862 } | 2067 } |
1863 | 2068 |
1864 // Arrange to call fn with a traceback hz times a second. | 2069 // Arrange to call fn with a traceback hz times a second. |
1865 void | 2070 void |
1866 runtime·setcpuprofilerate(void (*fn)(uintptr*, int32), int32 hz) | 2071 runtime·setcpuprofilerate(void (*fn)(uintptr*, int32), int32 hz) |
1867 { | 2072 { |
1868 // Force sane arguments. | 2073 // Force sane arguments. |
1869 if(hz < 0) | 2074 if(hz < 0) |
1870 hz = 0; | 2075 hz = 0; |
1871 if(hz == 0) | 2076 if(hz == 0) |
1872 fn = nil; | 2077 fn = nil; |
1873 if(fn == nil) | 2078 if(fn == nil) |
1874 hz = 0; | 2079 hz = 0; |
1875 | 2080 |
1876 » // Stop profiler on this cpu so that it is safe to lock prof. | 2081 » // Disable preemption, otherwise we can be rescheduled to another thread |
| 2082 » // that has profiling enabled. |
| 2083 » m->locks++; |
| 2084 |
| 2085 » // Stop profiler on this thread so that it is safe to lock prof. |
1877 // if a profiling signal came in while we had prof locked, | 2086 // if a profiling signal came in while we had prof locked, |
1878 // it would deadlock. | 2087 // it would deadlock. |
1879 runtime·resetcpuprofiler(0); | 2088 runtime·resetcpuprofiler(0); |
1880 | 2089 |
1881 runtime·lock(&prof); | 2090 runtime·lock(&prof); |
1882 prof.fn = fn; | 2091 prof.fn = fn; |
1883 prof.hz = hz; | 2092 prof.hz = hz; |
1884 runtime·unlock(&prof); | 2093 runtime·unlock(&prof); |
1885 runtime·lock(&runtime·sched); | 2094 runtime·lock(&runtime·sched); |
1886 runtime·sched.profilehz = hz; | 2095 runtime·sched.profilehz = hz; |
1887 runtime·unlock(&runtime·sched); | 2096 runtime·unlock(&runtime·sched); |
1888 | 2097 |
1889 if(hz != 0) | 2098 if(hz != 0) |
1890 runtime·resetcpuprofiler(hz); | 2099 runtime·resetcpuprofiler(hz); |
| 2100 |
| 2101 m->locks--; |
1891 } | 2102 } |
1892 | 2103 |
1893 // Change number of processors. The world is stopped, sched is locked. | 2104 // Change number of processors. The world is stopped, sched is locked. |
1894 static void | 2105 static void |
1895 procresize(int32 new) | 2106 procresize(int32 new) |
1896 { | 2107 { |
1897 int32 i, old; | 2108 int32 i, old; |
1898 G *gp; | 2109 G *gp; |
1899 P *p; | 2110 P *p; |
1900 | 2111 |
1901 old = runtime·gomaxprocs; | 2112 old = runtime·gomaxprocs; |
1902 if(old < 0 || old > MaxGomaxprocs || new <= 0 || new >MaxGomaxprocs) | 2113 if(old < 0 || old > MaxGomaxprocs || new <= 0 || new >MaxGomaxprocs) |
1903 runtime·throw("procresize: invalid arg"); | 2114 runtime·throw("procresize: invalid arg"); |
1904 // initialize new P's | 2115 // initialize new P's |
1905 for(i = 0; i < new; i++) { | 2116 for(i = 0; i < new; i++) { |
1906 p = runtime·allp[i]; | 2117 p = runtime·allp[i]; |
1907 if(p == nil) { | 2118 if(p == nil) { |
1908 » » » p = (P*)runtime·mallocgc(sizeof(*p), 0, 0, 1); | 2119 » » » p = (P*)runtime·mallocgc(sizeof(*p), 0, FlagNoInvokeGC); |
| 2120 » » » p->id = i; |
1909 p->status = Pgcstop; | 2121 p->status = Pgcstop; |
1910 runtime·atomicstorep(&runtime·allp[i], p); | 2122 runtime·atomicstorep(&runtime·allp[i], p); |
1911 } | 2123 } |
1912 if(p->mcache == nil) { | 2124 if(p->mcache == nil) { |
1913 if(old==0 && i==0) | 2125 if(old==0 && i==0) |
1914 p->mcache = m->mcache; // bootstrap | 2126 p->mcache = m->mcache; // bootstrap |
1915 else | 2127 else |
1916 p->mcache = runtime·allocmcache(); | 2128 p->mcache = runtime·allocmcache(); |
1917 } | 2129 } |
1918 if(p->runq == nil) { | 2130 if(p->runq == nil) { |
1919 p->runqsize = 128; | 2131 p->runqsize = 128; |
1920 » » » p->runq = (G**)runtime·mallocgc(p->runqsize*sizeof(G*),
0, 0, 1); | 2132 » » » p->runq = (G**)runtime·mallocgc(p->runqsize*sizeof(G*),
0, FlagNoInvokeGC); |
1921 } | 2133 } |
1922 } | 2134 } |
1923 | 2135 |
1924 // redistribute runnable G's evenly | 2136 // redistribute runnable G's evenly |
1925 for(i = 0; i < old; i++) { | 2137 for(i = 0; i < old; i++) { |
1926 p = runtime·allp[i]; | 2138 p = runtime·allp[i]; |
1927 while(gp = runqget(p)) | 2139 while(gp = runqget(p)) |
1928 globrunqput(gp); | 2140 globrunqput(gp); |
1929 } | 2141 } |
1930 // start at 1 because current M already executes some G and will acquire
allp[0] below, | 2142 // start at 1 because current M already executes some G and will acquire
allp[0] below, |
(...skipping 22 matching lines...) Expand all Loading... |
1953 m->mcache = nil; | 2165 m->mcache = nil; |
1954 p = runtime·allp[0]; | 2166 p = runtime·allp[0]; |
1955 p->m = nil; | 2167 p->m = nil; |
1956 p->status = Pidle; | 2168 p->status = Pidle; |
1957 acquirep(p); | 2169 acquirep(p); |
1958 for(i = new-1; i > 0; i--) { | 2170 for(i = new-1; i > 0; i--) { |
1959 p = runtime·allp[i]; | 2171 p = runtime·allp[i]; |
1960 p->status = Pidle; | 2172 p->status = Pidle; |
1961 pidleput(p); | 2173 pidleput(p); |
1962 } | 2174 } |
1963 runtime·singleproc = new == 1; | |
1964 runtime·atomicstore((uint32*)&runtime·gomaxprocs, new); | 2175 runtime·atomicstore((uint32*)&runtime·gomaxprocs, new); |
1965 } | 2176 } |
1966 | 2177 |
1967 // Associate p and the current m. | 2178 // Associate p and the current m. |
1968 static void | 2179 static void |
1969 acquirep(P *p) | 2180 acquirep(P *p) |
1970 { | 2181 { |
1971 if(m->p || m->mcache) | 2182 if(m->p || m->mcache) |
1972 runtime·throw("acquirep: already in go"); | 2183 runtime·throw("acquirep: already in go"); |
1973 if(p->m || p->status != Pidle) { | 2184 if(p->m || p->status != Pidle) { |
(...skipping 21 matching lines...) Expand all Loading... |
1995 runtime·throw("releasep: invalid p state"); | 2206 runtime·throw("releasep: invalid p state"); |
1996 } | 2207 } |
1997 m->p = nil; | 2208 m->p = nil; |
1998 m->mcache = nil; | 2209 m->mcache = nil; |
1999 p->m = nil; | 2210 p->m = nil; |
2000 p->status = Pidle; | 2211 p->status = Pidle; |
2001 return p; | 2212 return p; |
2002 } | 2213 } |
2003 | 2214 |
2004 static void | 2215 static void |
2005 inclocked(int32 v) | 2216 incidlelocked(int32 v) |
2006 { | 2217 { |
2007 runtime·lock(&runtime·sched); | 2218 runtime·lock(&runtime·sched); |
2008 » runtime·sched.mlocked += v; | 2219 » runtime·sched.nmidlelocked += v; |
2009 if(v > 0) | 2220 if(v > 0) |
2010 checkdead(); | 2221 checkdead(); |
2011 runtime·unlock(&runtime·sched); | 2222 runtime·unlock(&runtime·sched); |
2012 } | 2223 } |
2013 | 2224 |
2014 // Check for deadlock situation. | 2225 // Check for deadlock situation. |
2015 // The check is based on number of running M's, if 0 -> deadlock. | 2226 // The check is based on number of running M's, if 0 -> deadlock. |
2016 static void | 2227 static void |
2017 checkdead(void) | 2228 checkdead(void) |
2018 { | 2229 { |
2019 G *gp; | 2230 G *gp; |
2020 int32 run, grunning, s; | 2231 int32 run, grunning, s; |
2021 | 2232 |
2022 // -1 for sysmon | 2233 // -1 for sysmon |
2023 » run = runtime·sched.mcount - runtime·sched.nmidle - runtime·sched.mlocke
d - 1; | 2234 » run = runtime·sched.mcount - runtime·sched.nmidle - runtime·sched.nmidle
locked - 1; |
2024 if(run > 0) | 2235 if(run > 0) |
2025 return; | 2236 return; |
2026 if(run < 0) { | 2237 if(run < 0) { |
2027 » » runtime·printf("checkdead: nmidle=%d mlocked=%d mcount=%d\n", | 2238 » » runtime·printf("checkdead: nmidle=%d nmidlelocked=%d mcount=%d\n
", |
2028 » » » runtime·sched.nmidle, runtime·sched.mlocked, runtime·sch
ed.mcount); | 2239 » » » runtime·sched.nmidle, runtime·sched.nmidlelocked, runtim
e·sched.mcount); |
2029 runtime·throw("checkdead: inconsistent counts"); | 2240 runtime·throw("checkdead: inconsistent counts"); |
2030 } | 2241 } |
2031 grunning = 0; | 2242 grunning = 0; |
2032 for(gp = runtime·allg; gp; gp = gp->alllink) { | 2243 for(gp = runtime·allg; gp; gp = gp->alllink) { |
2033 if(gp->isbackground) | 2244 if(gp->isbackground) |
2034 continue; | 2245 continue; |
2035 s = gp->status; | 2246 s = gp->status; |
2036 if(s == Gwaiting) | 2247 if(s == Gwaiting) |
2037 grunning++; | 2248 grunning++; |
2038 else if(s == Grunnable || s == Grunning || s == Gsyscall) { | 2249 else if(s == Grunnable || s == Grunning || s == Gsyscall) { |
2039 runtime·printf("checkdead: find g %D in status %d\n", gp
->goid, s); | 2250 runtime·printf("checkdead: find g %D in status %d\n", gp
->goid, s); |
2040 runtime·throw("checkdead: runnable g"); | 2251 runtime·throw("checkdead: runnable g"); |
2041 } | 2252 } |
2042 } | 2253 } |
2043 if(grunning == 0) // possible if main goroutine calls runtime·Goexit() | 2254 if(grunning == 0) // possible if main goroutine calls runtime·Goexit() |
2044 runtime·exit(0); | 2255 runtime·exit(0); |
2045 m->throwing = -1; // do not dump full stacks | 2256 m->throwing = -1; // do not dump full stacks |
2046 runtime·throw("all goroutines are asleep - deadlock!"); | 2257 runtime·throw("all goroutines are asleep - deadlock!"); |
2047 } | 2258 } |
2048 | 2259 |
2049 static void | 2260 static void |
2050 sysmon(void) | 2261 sysmon(void) |
2051 { | 2262 { |
2052 uint32 idle, delay; | 2263 uint32 idle, delay; |
2053 » int64 now, lastpoll; | 2264 » int64 now, lastpoll, lasttrace; |
2054 G *gp; | 2265 G *gp; |
2055 » uint32 ticks[MaxGomaxprocs]; | 2266 |
2056 | 2267 » lasttrace = 0; |
2057 idle = 0; // how many cycles in succession we had not wokeup somebody | 2268 idle = 0; // how many cycles in succession we had not wokeup somebody |
2058 delay = 0; | 2269 delay = 0; |
2059 for(;;) { | 2270 for(;;) { |
2060 if(idle == 0) // start with 20us sleep... | 2271 if(idle == 0) // start with 20us sleep... |
2061 delay = 20; | 2272 delay = 20; |
2062 else if(idle > 50) // start doubling the sleep after 1ms... | 2273 else if(idle > 50) // start doubling the sleep after 1ms... |
2063 delay *= 2; | 2274 delay *= 2; |
2064 if(delay > 10*1000) // up to 10ms | 2275 if(delay > 10*1000) // up to 10ms |
2065 delay = 10*1000; | 2276 delay = 10*1000; |
2066 runtime·usleep(delay); | 2277 runtime·usleep(delay); |
2067 » » if(runtime·gcwaiting || runtime·atomicload(&runtime·sched.npidle
) == runtime·gomaxprocs) { // TODO: fast atomic | 2278 » » if(runtime·debug.schedtrace <= 0 && |
| 2279 » » » (runtime·gcwaiting || runtime·atomicload(&runtime·sched.
npidle) == runtime·gomaxprocs)) { // TODO: fast atomic |
2068 runtime·lock(&runtime·sched); | 2280 runtime·lock(&runtime·sched); |
2069 if(runtime·atomicload(&runtime·gcwaiting) || runtime·ato
micload(&runtime·sched.npidle) == runtime·gomaxprocs) { | 2281 if(runtime·atomicload(&runtime·gcwaiting) || runtime·ato
micload(&runtime·sched.npidle) == runtime·gomaxprocs) { |
2070 runtime·atomicstore(&runtime·sched.sysmonwait, 1
); | 2282 runtime·atomicstore(&runtime·sched.sysmonwait, 1
); |
2071 runtime·unlock(&runtime·sched); | 2283 runtime·unlock(&runtime·sched); |
2072 runtime·notesleep(&runtime·sched.sysmonnote); | 2284 runtime·notesleep(&runtime·sched.sysmonnote); |
2073 runtime·noteclear(&runtime·sched.sysmonnote); | 2285 runtime·noteclear(&runtime·sched.sysmonnote); |
2074 idle = 0; | 2286 idle = 0; |
2075 delay = 20; | 2287 delay = 20; |
2076 } else | 2288 } else |
2077 runtime·unlock(&runtime·sched); | 2289 runtime·unlock(&runtime·sched); |
2078 } | 2290 } |
2079 // poll network if not polled for more than 10ms | 2291 // poll network if not polled for more than 10ms |
2080 lastpoll = runtime·atomicload64(&runtime·sched.lastpoll); | 2292 lastpoll = runtime·atomicload64(&runtime·sched.lastpoll); |
2081 now = runtime·nanotime(); | 2293 now = runtime·nanotime(); |
2082 if(lastpoll != 0 && lastpoll + 10*1000*1000 > now) { | 2294 if(lastpoll != 0 && lastpoll + 10*1000*1000 > now) { |
| 2295 runtime·cas64(&runtime·sched.lastpoll, lastpoll, now); |
2083 gp = runtime·netpoll(false); // non-blocking | 2296 gp = runtime·netpoll(false); // non-blocking |
2084 » » » injectglist(gp); | 2297 » » » if(gp) { |
| 2298 » » » » // Need to decrement number of idle locked M's |
| 2299 » » » » // (pretending that one more is running) before
injectglist. |
| 2300 » » » » // Otherwise it can lead to the following situat
ion: |
| 2301 » » » » // injectglist grabs all P's but before it start
s M's to run the P's, |
| 2302 » » » » // another M returns from syscall, finishes runn
ing its G, |
| 2303 » » » » // observes that there is no work to do and no o
ther running M's |
| 2304 » » » » // and reports deadlock. |
| 2305 » » » » incidlelocked(-1); |
| 2306 » » » » injectglist(gp); |
| 2307 » » » » incidlelocked(1); |
| 2308 » » » } |
2085 } | 2309 } |
2086 // retake P's blocked in syscalls | 2310 // retake P's blocked in syscalls |
2087 » » if(retake(ticks)) | 2311 » » // and preempt long running G's |
| 2312 » » if(retake(now)) |
2088 idle = 0; | 2313 idle = 0; |
2089 else | 2314 else |
2090 idle++; | 2315 idle++; |
2091 » } | 2316 |
2092 } | 2317 » » if(runtime·debug.schedtrace > 0 && lasttrace + runtime·debug.sch
edtrace*1000000ll <= now) { |
| 2318 » » » lasttrace = now; |
| 2319 » » » runtime·schedtrace(runtime·debug.scheddetail); |
| 2320 » » } |
| 2321 » } |
| 2322 } |
| 2323 |
| 2324 typedef struct Pdesc Pdesc; |
| 2325 struct Pdesc |
| 2326 { |
| 2327 » uint32» schedtick; |
| 2328 » int64» schedwhen; |
| 2329 » uint32» syscalltick; |
| 2330 » int64» syscallwhen; |
| 2331 }; |
| 2332 static Pdesc pdesc[MaxGomaxprocs]; |
2093 | 2333 |
2094 static uint32 | 2334 static uint32 |
2095 retake(uint32 *ticks) | 2335 retake(int64 now) |
2096 { | 2336 { |
2097 uint32 i, s, n; | 2337 uint32 i, s, n; |
2098 int64 t; | 2338 int64 t; |
2099 P *p; | 2339 P *p; |
| 2340 Pdesc *pd; |
2100 | 2341 |
2101 n = 0; | 2342 n = 0; |
2102 for(i = 0; i < runtime·gomaxprocs; i++) { | 2343 for(i = 0; i < runtime·gomaxprocs; i++) { |
2103 p = runtime·allp[i]; | 2344 p = runtime·allp[i]; |
2104 if(p==nil) | 2345 if(p==nil) |
2105 continue; | 2346 continue; |
2106 » » t = p->tick; | 2347 » » pd = &pdesc[i]; |
2107 » » if(ticks[i] != t) { | |
2108 » » » ticks[i] = t; | |
2109 » » » continue; | |
2110 » » } | |
2111 s = p->status; | 2348 s = p->status; |
2112 » » if(s != Psyscall) | 2349 » » if(s == Psyscall) { |
2113 » » » continue; | 2350 » » » // Retake P from syscall if it's there for more than 1 s
ysmon tick (20us). |
2114 » » if(p->runqhead == p->runqtail && runtime·atomicload(&runtime·sch
ed.nmspinning) + runtime·atomicload(&runtime·sched.npidle) > 0) // TODO: fast a
tomic | 2351 » » » // But only if there is other work to do. |
2115 » » » continue; | 2352 » » » t = p->syscalltick; |
2116 » » // Need to increment number of locked M's before the CAS. | 2353 » » » if(pd->syscalltick != t) { |
2117 » » // Otherwise the M from which we retake can exit the syscall, | 2354 » » » » pd->syscalltick = t; |
2118 » » // increment nmidle and report deadlock. | 2355 » » » » pd->syscallwhen = now; |
2119 » » inclocked(-1); | 2356 » » » » continue; |
2120 » » if(runtime·cas(&p->status, s, Pidle)) { | 2357 » » » } |
2121 » » » n++; | 2358 » » » if(p->runqhead == p->runqtail && |
2122 » » » handoffp(p); | 2359 » » » » runtime·atomicload(&runtime·sched.nmspinning) +
runtime·atomicload(&runtime·sched.npidle) > 0) |
2123 » » } | 2360 » » » » continue; |
2124 » » inclocked(1); | 2361 » » » // Need to decrement number of idle locked M's |
| 2362 » » » // (pretending that one more is running) before the CAS. |
| 2363 » » » // Otherwise the M from which we retake can exit the sys
call, |
| 2364 » » » // increment nmidle and report deadlock. |
| 2365 » » » incidlelocked(-1); |
| 2366 » » » if(runtime·cas(&p->status, s, Pidle)) { |
| 2367 » » » » n++; |
| 2368 » » » » handoffp(p); |
| 2369 » » » } |
| 2370 » » » incidlelocked(1); |
| 2371 » » } else if(s == Prunning) { |
| 2372 » » » // Preempt G if it's running for more than 10ms. |
| 2373 » » » t = p->schedtick; |
| 2374 » » » if(pd->schedtick != t) { |
| 2375 » » » » pd->schedtick = t; |
| 2376 » » » » pd->schedwhen = now; |
| 2377 » » » » continue; |
| 2378 » » » } |
| 2379 » » » if(pd->schedwhen + 10*1000*1000 > now) |
| 2380 » » » » continue; |
| 2381 » » » preemptone(p); |
| 2382 » » } |
2125 } | 2383 } |
2126 return n; | 2384 return n; |
2127 } | 2385 } |
2128 | 2386 |
2129 // Tell all goroutines that they have been preempted and they should stop. | 2387 // Tell all goroutines that they have been preempted and they should stop. |
2130 // This function is purely best-effort. It can fail to inform a goroutine if a | 2388 // This function is purely best-effort. It can fail to inform a goroutine if a |
2131 // processor just started running it. | 2389 // processor just started running it. |
2132 // No locks need to be held. | 2390 // No locks need to be held. |
2133 static void | 2391 // Returns true if preemption request was issued to at least one goroutine. |
| 2392 static bool |
2134 preemptall(void) | 2393 preemptall(void) |
2135 { | 2394 { |
2136 P *p; | 2395 P *p; |
2137 int32 i; | 2396 int32 i; |
2138 | 2397 » bool res; |
| 2398 |
| 2399 » res = false; |
2139 for(i = 0; i < runtime·gomaxprocs; i++) { | 2400 for(i = 0; i < runtime·gomaxprocs; i++) { |
2140 p = runtime·allp[i]; | 2401 p = runtime·allp[i]; |
2141 if(p == nil || p->status != Prunning) | 2402 if(p == nil || p->status != Prunning) |
2142 continue; | 2403 continue; |
2143 » » preemptone(p); | 2404 » » res |= preemptone(p); |
2144 » } | 2405 » } |
| 2406 » return res; |
2145 } | 2407 } |
2146 | 2408 |
2147 // Tell the goroutine running on processor P to stop. | 2409 // Tell the goroutine running on processor P to stop. |
2148 // This function is purely best-effort. It can incorrectly fail to inform the | 2410 // This function is purely best-effort. It can incorrectly fail to inform the |
2149 // goroutine. It can send inform the wrong goroutine. Even if it informs the | 2411 // goroutine. It can send inform the wrong goroutine. Even if it informs the |
2150 // correct goroutine, that goroutine might ignore the request if it is | 2412 // correct goroutine, that goroutine might ignore the request if it is |
2151 // simultaneously executing runtime·newstack. | 2413 // simultaneously executing runtime·newstack. |
2152 // No lock needs to be held. | 2414 // No lock needs to be held. |
2153 static void | 2415 // Returns true if preemption request was issued. |
| 2416 static bool |
2154 preemptone(P *p) | 2417 preemptone(P *p) |
2155 { | 2418 { |
2156 M *mp; | 2419 M *mp; |
2157 G *gp; | 2420 G *gp; |
2158 | 2421 |
2159 mp = p->m; | 2422 mp = p->m; |
2160 if(mp == nil || mp == m) | 2423 if(mp == nil || mp == m) |
2161 » » return; | 2424 » » return false; |
2162 gp = mp->curg; | 2425 gp = mp->curg; |
2163 if(gp == nil || gp == mp->g0) | 2426 if(gp == nil || gp == mp->g0) |
| 2427 return false; |
| 2428 gp->preempt = true; |
| 2429 gp->stackguard0 = StackPreempt; |
| 2430 return true; |
| 2431 } |
| 2432 |
| 2433 void |
| 2434 runtime·schedtrace(bool detailed) |
| 2435 { |
| 2436 static int64 starttime; |
| 2437 int64 now; |
| 2438 int64 id1, id2, id3; |
| 2439 int32 i, q, t, h, s; |
| 2440 int8 *fmt; |
| 2441 M *mp, *lockedm; |
| 2442 G *gp, *lockedg; |
| 2443 P *p; |
| 2444 |
| 2445 now = runtime·nanotime(); |
| 2446 if(starttime == 0) |
| 2447 starttime = now; |
| 2448 |
| 2449 runtime·lock(&runtime·sched); |
| 2450 runtime·printf("SCHED %Dms: gomaxprocs=%d idleprocs=%d threads=%d idleth
reads=%d runqueue=%d", |
| 2451 (now-starttime)/1000000, runtime·gomaxprocs, runtime·sched.npidl
e, runtime·sched.mcount, |
| 2452 runtime·sched.nmidle, runtime·sched.runqsize); |
| 2453 if(detailed) { |
| 2454 runtime·printf(" gcwaiting=%d nmidlelocked=%d nmspinning=%d stop
wait=%d sysmonwait=%d\n", |
| 2455 runtime·gcwaiting, runtime·sched.nmidlelocked, runtime·s
ched.nmspinning, |
| 2456 runtime·sched.stopwait, runtime·sched.sysmonwait); |
| 2457 } |
| 2458 // We must be careful while reading data from P's, M's and G's. |
| 2459 // Even if we hold schedlock, most data can be changed concurrently. |
| 2460 // E.g. (p->m ? p->m->id : -1) can crash if p->m changes from non-nil to
nil. |
| 2461 for(i = 0; i < runtime·gomaxprocs; i++) { |
| 2462 p = runtime·allp[i]; |
| 2463 if(p == nil) |
| 2464 continue; |
| 2465 mp = p->m; |
| 2466 t = p->runqtail; |
| 2467 h = p->runqhead; |
| 2468 s = p->runqsize; |
| 2469 q = t - h; |
| 2470 if(q < 0) |
| 2471 q += s; |
| 2472 if(detailed) |
| 2473 runtime·printf(" P%d: status=%d schedtick=%d syscalltic
k=%d m=%d runqsize=%d/%d gfreecnt=%d\n", |
| 2474 i, p->status, p->schedtick, p->syscalltick, mp ?
mp->id : -1, q, s, p->gfreecnt); |
| 2475 else { |
| 2476 // In non-detailed mode format lengths of per-P run queu
es as: |
| 2477 // [len1 len2 len3 len4] |
| 2478 fmt = " %d"; |
| 2479 if(runtime·gomaxprocs == 1) |
| 2480 fmt = " [%d]\n"; |
| 2481 else if(i == 0) |
| 2482 fmt = " [%d"; |
| 2483 else if(i == runtime·gomaxprocs-1) |
| 2484 fmt = " %d]\n"; |
| 2485 runtime·printf(fmt, q); |
| 2486 } |
| 2487 } |
| 2488 if(!detailed) { |
| 2489 runtime·unlock(&runtime·sched); |
2164 return; | 2490 return; |
2165 » gp->stackguard0 = StackPreempt; | 2491 » } |
| 2492 » for(mp = runtime·allm; mp; mp = mp->alllink) { |
| 2493 » » p = mp->p; |
| 2494 » » gp = mp->curg; |
| 2495 » » lockedg = mp->lockedg; |
| 2496 » » id1 = -1; |
| 2497 » » if(p) |
| 2498 » » » id1 = p->id; |
| 2499 » » id2 = -1; |
| 2500 » » if(gp) |
| 2501 » » » id2 = gp->goid; |
| 2502 » » id3 = -1; |
| 2503 » » if(lockedg) |
| 2504 » » » id3 = lockedg->goid; |
| 2505 » » runtime·printf(" M%d: p=%D curg=%D mallocing=%d throwing=%d gci
ng=%d" |
| 2506 » » » " locks=%d dying=%d helpgc=%d spinning=%d lockedg=%D\n", |
| 2507 » » » mp->id, id1, id2, |
| 2508 » » » mp->mallocing, mp->throwing, mp->gcing, mp->locks, mp->d
ying, mp->helpgc, |
| 2509 » » » mp->spinning, id3); |
| 2510 » } |
| 2511 » for(gp = runtime·allg; gp; gp = gp->alllink) { |
| 2512 » » mp = gp->m; |
| 2513 » » lockedm = gp->lockedm; |
| 2514 » » runtime·printf(" G%D: status=%d(%s) m=%d lockedm=%d\n", |
| 2515 » » » gp->goid, gp->status, gp->waitreason, mp ? mp->id : -1, |
| 2516 » » » lockedm ? lockedm->id : -1); |
| 2517 » } |
| 2518 » runtime·unlock(&runtime·sched); |
2166 } | 2519 } |
2167 | 2520 |
2168 // Put mp on midle list. | 2521 // Put mp on midle list. |
2169 // Sched must be locked. | 2522 // Sched must be locked. |
2170 static void | 2523 static void |
2171 mput(M *mp) | 2524 mput(M *mp) |
2172 { | 2525 { |
2173 mp->schedlink = runtime·sched.midle; | 2526 mp->schedlink = runtime·sched.midle; |
2174 runtime·sched.midle = mp; | 2527 runtime·sched.midle = mp; |
2175 runtime·sched.nmidle++; | 2528 runtime·sched.nmidle++; |
(...skipping 288 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2464 } | 2817 } |
2465 } | 2818 } |
2466 if(s != i/2 && s != i/2+1) { | 2819 if(s != i/2 && s != i/2+1) { |
2467 runtime·printf("bad steal %d, want %d or %d, iter %d\n", | 2820 runtime·printf("bad steal %d, want %d or %d, iter %d\n", |
2468 s, i/2, i/2+1, i); | 2821 s, i/2, i/2+1, i); |
2469 runtime·throw("bad steal"); | 2822 runtime·throw("bad steal"); |
2470 } | 2823 } |
2471 } | 2824 } |
2472 } | 2825 } |
2473 | 2826 |
| 2827 extern void runtime·morestack(void); |
| 2828 |
| 2829 // Does f mark the top of a goroutine stack? |
2474 bool | 2830 bool |
2475 runtime·haszeroargs(uintptr pc) | 2831 runtime·topofstack(Func *f) |
2476 { | 2832 { |
2477 » return pc == (uintptr)runtime·goexit || | 2833 » return f->entry == (uintptr)runtime·goexit || |
2478 » » pc == (uintptr)runtime·mcall || | 2834 » » f->entry == (uintptr)runtime·mstart || |
2479 » » pc == (uintptr)runtime·mstart || | 2835 » » f->entry == (uintptr)runtime·mcall || |
2480 » » pc == (uintptr)_rt0_go; | 2836 » » f->entry == (uintptr)runtime·morestack || |
2481 } | 2837 » » f->entry == (uintptr)runtime·lessstack || |
2482 | 2838 » » f->entry == (uintptr)_rt0_go; |
| 2839 } |
LEFT | RIGHT |