LEFT | RIGHT |
1 // Copyright 2009 The Go Authors. All rights reserved. | 1 // Copyright 2009 The Go Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style | 2 // Use of this source code is governed by a BSD-style |
3 // license that can be found in the LICENSE file. | 3 // license that can be found in the LICENSE file. |
4 | 4 |
5 #include "runtime.h" | 5 #include "runtime.h" |
6 #include "arch_GOARCH.h" | 6 #include "arch_GOARCH.h" |
7 #include "defs_GOOS_GOARCH.h" | 7 #include "defs_GOOS_GOARCH.h" |
8 #include "malloc.h" | 8 #include "malloc.h" |
9 #include "os_GOOS.h" | 9 #include "os_GOOS.h" |
10 #include "stack.h" | 10 #include "stack.h" |
| 11 #include "race.h" |
| 12 #include "type.h" |
11 | 13 |
12 // TODO(dvyukov): if a thread w/o mcache catches a signal (in particular SIGABOR
T), | 14 // TODO(dvyukov): if a thread w/o mcache catches a signal (in particular SIGABOR
T), |
13 // then it can't print dump. | 15 // then it can't print dump. |
14 | 16 |
15 enum { maxgomaxprocs = 1<<10 }; | 17 enum { maxgomaxprocs = 1<<10 }; |
16 enum { debug = 0 }; | 18 #define LOG if(0) runtime·printf |
17 #define LOG if(debug) runtime·printf | |
18 #define LOG1 runtime·printf | 19 #define LOG1 runtime·printf |
19 | 20 #define CHECK(cond, fmt) /*if(cond) {} else { runtime·printf fmt; runtime·throw(
"CHECK"); }*/ |
20 //FIXME: fix the comment. | |
21 | 21 |
22 // Go scheduler | 22 // Go scheduler |
23 // | 23 // |
24 // The go scheduler's job is to match ready-to-run goroutines (`g's) | 24 // The go scheduler's job is to match ready-to-run goroutines (`g's) |
25 // with waiting-for-work schedulers (`m's). If there are ready g's | 25 // with waiting-for-work schedulers (`m's). If there are ready g's |
26 // and no waiting m's, ready() will start a new m running in a new | 26 // and no waiting m's, ready() will start a new m running in a new |
27 // OS thread, so that all ready g's can run simultaneously, up to a limit. | 27 // OS thread, so that all ready g's can run simultaneously, up to a limit. |
28 // For now, m's never go away. | 28 // For now, m's never go away. |
29 // | 29 // |
30 // By default, Go keeps only one kernel thread (m) running user code | 30 // By default, Go keeps only one kernel thread (m) running user code |
31 // at a single time; other threads may be blocked in the operating system. | 31 // at a single time; other threads may be blocked in the operating system. |
32 // Setting the environment variable $GOMAXPROCS or calling | 32 // Setting the environment variable $GOMAXPROCS or calling |
33 // runtime.GOMAXPROCS() will change the number of user threads | 33 // runtime.GOMAXPROCS() will change the number of user threads |
34 // allowed to execute simultaneously. $GOMAXPROCS is thus an | 34 // allowed to execute simultaneously. $GOMAXPROCS is thus an |
35 // approximation of the maximum number of cores to use. | 35 // approximation of the maximum number of cores to use. |
36 // | 36 // |
37 // Even a program that can run without deadlock in a single process | 37 // Even a program that can run without deadlock in a single process |
38 // might use more m's if given the chance. For example, the prime | 38 // might use more m's if given the chance. For example, the prime |
39 // sieve will use as many m's as there are primes (up to runtime·sched.mmax), | 39 // sieve will use as many m's as there are primes (up to $GOMAXPROCS), |
40 // allowing different stages of the pipeline to execute in parallel. | 40 // allowing different stages of the pipeline to execute in parallel. |
41 // We could revisit this choice, only kicking off new m's for blocking | |
42 // system calls, but that would limit the amount of parallel computation | |
43 // that go would try to do. | |
44 // | |
45 // In general, one could imagine all sorts of refinements to the | |
46 // scheduler, but the goal now is just to get something working on | |
47 // Linux and OS X. | |
48 | 41 |
49 typedef struct Sched Sched; | 42 typedef struct Sched Sched; |
50 struct Sched { | 43 struct Sched { |
51 Lock; | 44 Lock; |
52 | 45 |
53 M* mhead; // m's waiting for work | 46 M* mhead; // m's waiting for work |
54 int32 mwait; // number of m's waiting for work | 47 int32 mwait; // number of m's waiting for work |
55 int32 mcount; // number of m's that have been created | 48 int32 mcount; // number of m's that have been created |
56 | 49 |
57 P* pidle; // idle P's | 50 P* pidle; // idle P's |
58 int32 npidle; | 51 int32 npidle; |
59 | 52 |
60 » G*» runq; | 53 » G*» runqhead; |
61 » int32» nrunq; | 54 » G*» runqtail; |
62 | 55 » int32» runqsize; |
| 56 |
| 57 » Lock gflock; |
63 G* gfree; | 58 G* gfree; |
64 int32 goidseq; | 59 int32 goidseq; |
| 60 |
| 61 int32 stopwait; |
| 62 Note stopnote; |
| 63 int32 sysmonwait; |
| 64 Note sysmonnote; |
65 | 65 |
66 int32 profilehz; // cpu profiling rate | 66 int32 profilehz; // cpu profiling rate |
67 | 67 |
68 bool init; // running initialization | 68 bool init; // running initialization |
69 bool lockmain; // init called runtime.LockOSThread | 69 bool lockmain; // init called runtime.LockOSThread |
70 }; | 70 }; |
71 | 71 |
72 Sched runtime·sched; | 72 Sched runtime·sched; |
73 int32 runtime·gomaxprocs; | 73 int32 runtime·gomaxprocs; |
74 bool runtime·singleproc; | 74 bool runtime·singleproc; |
75 bool runtime·iscgo; | 75 bool runtime·iscgo; |
76 int32 runtime·gcwaiting; | 76 int32 runtime·gcwaiting; |
77 M runtime·m0; | 77 M runtime·m0; |
78 G runtime·g0; // idle goroutine for m0 | 78 G runtime·g0; // idle goroutine for m0 |
79 static int32 newprocs; | 79 static int32 newprocs; |
80 | 80 |
81 // Keep trace of scavenger's goroutine for deadlock detection. | 81 // Keep trace of scavenger's goroutine for deadlock detection. |
82 static G *scvg; | 82 static G *scvg; |
83 | 83 |
84 // Scheduling helpers. Sched must be locked. | 84 // Scheduling helpers. Sched must be locked. |
85 static void runqput(P*, G*); // put/get on ghead/gtail | 85 static void runqput(P*, G*); // put/get on ghead/gtail |
86 static G* runqget(P*); | 86 static G* runqget(P*); |
87 static void runqgrow(P*); | 87 static void runqgrow(P*); |
88 static G* runqsteal(P*, P*); | 88 static G* runqsteal(P*, P*); |
89 static void globrunqput(G*); | 89 static void globrunqput(G*); |
| 90 static G* globrunqget(void); |
90 static void mput(M*); // put/get on mhead | 91 static void mput(M*); // put/get on mhead |
91 static M* mget(void); | 92 static M* mget(void); |
92 static void gfput(P*, G*); // put/get on gfree | 93 static void gfput(P*, G*); // put/get on gfree |
93 static G* gfget(P*); | 94 static G* gfget(P*); |
94 static void mcommoninit(M*); | 95 static void mcommoninit(M*); |
95 static void schedule(void); | 96 static void schedule(void); |
96 static void procresize(int32); | 97 static void procresize(int32); |
97 static void entergo(M*, P*); | 98 static void entergo(M*, P*); |
98 static void leavego(M*); | 99 static P* releasep(void); |
99 static M* newm(void(*)(void), P*, bool); | 100 static M* newm(void(*)(void), P*, bool); |
100 static void goidle(void); | 101 static void goidle(void); |
101 static void mstop(void); | 102 static void mstop(void); |
102 static void initgstack(G*, byte*, int32); | 103 static void initgstack(G*, byte*, int32); |
103 static void sysmon(void); | 104 static void sysmon(void); |
104 static void netmon(void); | |
105 static void inject(G*, int32*, int32*); | 105 static void inject(G*, int32*, int32*); |
106 static P* pidleget(void); | 106 static P* pidleget(void); |
107 static void pidleput(P*); | 107 static void pidleput(P*); |
108 | |
109 #define EN 1 | |
110 | |
111 static void | |
112 outputstats(void) | |
113 { | |
114 /* | |
115 M *mp; | |
116 SchedStats s; | |
117 int32 i; | |
118 uint64 *src, *dst; | |
119 | |
120 runtime·memclr((byte*)&s, sizeof(s)); | |
121 for(mp=runtime·allm; mp; mp=mp->alllink) { | |
122 src = (uint64*)&mp->schedstats; | |
123 dst = (uint64*)&s; | |
124 for(i=0; i<sizeof(s)/sizeof(uint64); i++) | |
125 dst[i] += src[i]; | |
126 } | |
127 runtime·printf("SchedStats:\n"); | |
128 runtime·printf("nm %D\n", s.nm); | |
129 runtime·printf("sysexitfast %D\n", s.sysexitfast); | |
130 runtime·printf("sysexitmed %D\n", s.sysexitmed); | |
131 runtime·printf("sysexitslow %D\n", s.sysexitslow); | |
132 runtime·printf("stealempty %D\n", s.stealempty); | |
133 runtime·printf("stealn %D\n", s.stealn); | |
134 runtime·printf("stealcnt %D (%D)\n", s.stealcnt, s.stea
lcnt / (s.stealn ? s.stealn : 1)); | |
135 runtime·printf("gstart %D\n", s.gstart); | |
136 runtime·printf("gend %D\n", s.gend); | |
137 runtime·printf("gfput %D\n", s.gfput); | |
138 runtime·printf("gfget %D\n", s.gfget); | |
139 runtime·printf("galloc %D\n", s.galloc); | |
140 */ | |
141 } | |
142 | 108 |
143 // The bootstrap sequence is: | 109 // The bootstrap sequence is: |
144 // | 110 // |
145 // call osinit | 111 // call osinit |
146 // call schedinit | 112 // call schedinit |
147 // make & queue new G | 113 // make & queue new G |
148 // call runtime·mstart | 114 // call runtime·mstart |
149 // | 115 // |
150 // The new G calls runtime·main. | 116 // The new G calls runtime·main. |
151 void | 117 void |
(...skipping 23 matching lines...) Expand all Loading... |
175 if(p != nil && (n = runtime·atoi(p)) > 0) { | 141 if(p != nil && (n = runtime·atoi(p)) > 0) { |
176 if(n > maxgomaxprocs) | 142 if(n > maxgomaxprocs) |
177 n = maxgomaxprocs; | 143 n = maxgomaxprocs; |
178 procs = n; | 144 procs = n; |
179 } | 145 } |
180 runtime·allp = (P**)runtime·malloc((maxgomaxprocs+1)*sizeof(runtime·allp
[0])); | 146 runtime·allp = (P**)runtime·malloc((maxgomaxprocs+1)*sizeof(runtime·allp
[0])); |
181 procresize(procs); | 147 procresize(procs); |
182 | 148 |
183 mstats.enablegc = 1; | 149 mstats.enablegc = 1; |
184 m->nomemprof--; | 150 m->nomemprof--; |
| 151 |
| 152 if(raceenabled) |
| 153 runtime·raceinit(); |
185 } | 154 } |
186 | 155 |
187 extern void main·init(void); | 156 extern void main·init(void); |
188 extern void main·main(void); | 157 extern void main·main(void); |
189 | 158 |
190 // The main goroutine. | 159 // The main goroutine. |
191 void | 160 void |
192 runtime·main(void) | 161 runtime·main(void) |
193 { | 162 { |
194 LOG("%d: runtime·main\n", m->id); | 163 LOG("%d: runtime·main\n", m->id); |
195 | 164 |
196 //TODO(dvyukov): block signals because that thread can't handle them | 165 //TODO(dvyukov): block signals because that thread can't handle them |
197 newm(sysmon, nil, false); | 166 newm(sysmon, nil, false); |
198 newm(netmon, nil, false); | |
199 | 167 |
200 // Lock the main goroutine onto this, the main OS thread, | 168 // Lock the main goroutine onto this, the main OS thread, |
201 // during initialization. Most programs won't care, but a few | 169 // during initialization. Most programs won't care, but a few |
202 // do require certain calls to be made by the main thread. | 170 // do require certain calls to be made by the main thread. |
203 // Those can arrange for main.main to run in the main thread | 171 // Those can arrange for main.main to run in the main thread |
204 // by calling runtime.LockOSThread during initialization | 172 // by calling runtime.LockOSThread during initialization |
205 // to preserve the lock. | 173 // to preserve the lock. |
206 runtime·LockOSThread(); | 174 runtime·LockOSThread(); |
207 runtime·sched.init = true; | 175 runtime·sched.init = true; |
208 if(m != &runtime·m0) | 176 if(m != &runtime·m0) |
209 runtime·throw("runtime·main not on m0"); | 177 runtime·throw("runtime·main not on m0"); |
210 scvg = runtime·newproc1((byte*)runtime·MHeap_Scavenger, nil, 0, 0, runti
me·main); | 178 scvg = runtime·newproc1((byte*)runtime·MHeap_Scavenger, nil, 0, 0, runti
me·main); |
211 main·init(); | 179 main·init(); |
212 runtime·sched.init = false; | 180 runtime·sched.init = false; |
213 if(!runtime·sched.lockmain) | 181 if(!runtime·sched.lockmain) |
214 runtime·UnlockOSThread(); | 182 runtime·UnlockOSThread(); |
215 | 183 |
216 main·main(); | 184 main·main(); |
217 » outputstats(); | 185 » if(raceenabled) |
| 186 » » runtime·racefini(); |
218 runtime·exit(0); | 187 runtime·exit(0); |
219 for(;;) | 188 for(;;) |
220 *(int32*)runtime·main = 0; | 189 *(int32*)runtime·main = 0; |
221 } | 190 } |
222 | 191 |
223 void | 192 void |
224 runtime·goroutineheader(G *gp) | 193 runtime·goroutineheader(G *gp) |
225 { | 194 { |
226 int8 *status; | 195 int8 *status; |
227 | 196 |
(...skipping 13 matching lines...) Expand all Loading... |
241 case Gwaiting: | 210 case Gwaiting: |
242 if(gp->waitreason) | 211 if(gp->waitreason) |
243 status = gp->waitreason; | 212 status = gp->waitreason; |
244 else | 213 else |
245 status = "waiting"; | 214 status = "waiting"; |
246 break; | 215 break; |
247 default: | 216 default: |
248 status = "???"; | 217 status = "???"; |
249 break; | 218 break; |
250 } | 219 } |
251 » runtime·printf("goroutine %p [%s]:\n", gp, status); | 220 » runtime·printf("goroutine %D [%s]:\n", gp->goid, status); |
252 } | 221 } |
253 | 222 |
254 void | 223 void |
255 runtime·tracebackothers(G *me) | 224 runtime·tracebackothers(G *me) |
256 { | 225 { |
257 G *gp; | 226 G *gp; |
258 | 227 |
259 for(gp = runtime·allg; gp != nil; gp = gp->alllink) { | 228 for(gp = runtime·allg; gp != nil; gp = gp->alllink) { |
260 if(gp == me || gp->status == Gdead) | 229 if(gp == me || gp->status == Gdead) |
261 continue; | 230 continue; |
(...skipping 15 matching lines...) Expand all Loading... |
277 //runtime·callers(1, mp->createstack, nelem(mp->createstack)); | 246 //runtime·callers(1, mp->createstack, nelem(mp->createstack)); |
278 | 247 |
279 // Add to runtime·allm so garbage collector doesn't free m | 248 // Add to runtime·allm so garbage collector doesn't free m |
280 // when it is just in a register or thread-local storage. | 249 // when it is just in a register or thread-local storage. |
281 mp->alllink = runtime·allm; | 250 mp->alllink = runtime·allm; |
282 // runtime·NumCgoCall() iterates over allm w/o locks, | 251 // runtime·NumCgoCall() iterates over allm w/o locks, |
283 // so we need to publish it safely. | 252 // so we need to publish it safely. |
284 runtime·atomicstorep(&runtime·allm, mp); | 253 runtime·atomicstorep(&runtime·allm, mp); |
285 LOG("%d: mcommoninit %d m=%p stackalloc=%p\n", m->id, mp->id, mp, mp->st
ackalloc); | 254 LOG("%d: mcommoninit %d m=%p stackalloc=%p\n", m->id, mp->id, mp, mp->st
ackalloc); |
286 runtime·unlock(&runtime·sched); | 255 runtime·unlock(&runtime·sched); |
| 256 } |
| 257 |
| 258 // Mark g ready to run. |
| 259 void |
| 260 runtime·ready(G *gp) |
| 261 { |
| 262 P *p; |
| 263 M *mp; |
| 264 |
| 265 if(gp->m) |
| 266 runtime·throw("bad g->m in ready"); |
| 267 |
| 268 // Mark runnable. |
| 269 if(gp->status == Grunnable || gp->status == Grunning) { |
| 270 runtime·printf("goroutine %D has status %d\n", gp->goid, gp->sta
tus); |
| 271 runtime·throw("bad g->status in ready"); |
| 272 } |
| 273 gp->status = Grunnable; |
| 274 runqput(m->p, gp); |
| 275 if(runtime·sched.pidle) { |
| 276 runtime·lock(&runtime·sched); |
| 277 p = pidleget(); |
| 278 if(p) { |
| 279 mp = mget(); |
| 280 runtime·unlock(&runtime·sched); |
| 281 if(mp) { |
| 282 entergo(mp, p); |
| 283 runtime·notewakeup(&mp->park); |
| 284 } else { |
| 285 newm(runtime·mstart, p, false); |
| 286 } |
| 287 } else |
| 288 runtime·unlock(&runtime·sched); |
| 289 } |
| 290 } |
| 291 |
| 292 static void |
| 293 munpark(M *mp, P *p) |
| 294 { |
| 295 if(mp) { |
| 296 entergo(mp, p); |
| 297 runtime·notewakeup(&mp->park); |
| 298 } else |
| 299 newm(runtime·mstart, p, false); |
| 300 } |
| 301 ································ |
| 302 int32 |
| 303 runtime·gcprocs(void) |
| 304 { |
| 305 int32 n; |
| 306 |
| 307 runtime·lock(&runtime·sched); |
| 308 n = runtime·gomaxprocs; |
| 309 if(n > runtime·ncpu) |
| 310 n = runtime·ncpu; |
| 311 if(n > MaxGcproc) |
| 312 n = MaxGcproc; |
| 313 runtime·unlock(&runtime·sched); |
| 314 return n; |
| 315 } |
| 316 |
| 317 void |
| 318 runtime·helpgc(int32 nproc) |
| 319 { |
| 320 M *mp; |
| 321 int32 n, pos; |
| 322 |
| 323 LOG("%d: helpgc(%d)\n", m->id, nproc); |
| 324 runtime·lock(&runtime·sched); |
| 325 pos = 0; |
| 326 for(n = 1; n < nproc; n++) { // one M is currently running |
| 327 if(runtime·allp[pos]->mcache == m->mcache) |
| 328 pos++; |
| 329 mp = mget(); |
| 330 if(mp == nil) { |
| 331 runtime·unlock(&runtime·sched); |
| 332 newm(runtime·mstart, runtime·allp[pos], true); |
| 333 runtime·lock(&runtime·sched); |
| 334 pos++; |
| 335 continue; |
| 336 } |
| 337 mp->helpgc = 1; |
| 338 mp->mcache = runtime·allp[pos]->mcache; |
| 339 pos++; |
| 340 LOG("%d: helpgc wake %d\n", m->id, mp->id); |
| 341 runtime·notewakeup(&mp->park); |
| 342 } |
| 343 runtime·unlock(&runtime·sched); |
| 344 } |
| 345 |
| 346 void |
| 347 runtime·stoptheworld(void) |
| 348 { |
| 349 int32 i; |
| 350 uint32 s; |
| 351 P *p; |
| 352 bool wait; |
| 353 |
| 354 LOG("%d: stoptheworld\n", m->id); |
| 355 runtime·lock(&runtime·sched); |
| 356 runtime·gcwaiting = 1; |
| 357 runtime·sched.stopwait = runtime·gomaxprocs; |
| 358 m->p->status = Plocked; |
| 359 runtime·sched.stopwait--; |
| 360 for(i=0; i<runtime·gomaxprocs; i++) { |
| 361 s = runtime·allp[i]->status; |
| 362 if(s == Psyscall && runtime·cas(&runtime·allp[i]->status, s, Plo
cked)) { |
| 363 LOG(" acquired syscall %d\n", i); |
| 364 runtime·sched.stopwait--; |
| 365 } |
| 366 } |
| 367 while(runtime·sched.pidle) { |
| 368 p = pidleget(); |
| 369 p->status = Plocked; |
| 370 runtime·sched.stopwait--; |
| 371 } |
| 372 CHECK(runtime·sched.stopwait >= 0, ("")); |
| 373 wait = runtime·sched.stopwait > 0; |
| 374 runtime·unlock(&runtime·sched); |
| 375 if(wait) { |
| 376 runtime·notesleep(&runtime·sched.stopnote); |
| 377 runtime·noteclear(&runtime·sched.stopnote); |
| 378 } |
| 379 LOG("%d: stoptheworld stopped\n", m->id); |
| 380 CHECK(runtime·sched.stopwait == 0, ("stoptheworld: stopwait == %d\n", ru
ntime·sched.stopwait)); |
| 381 for(i=0; i<runtime·gomaxprocs; i++) { |
| 382 CHECK(runtime·allp[i]->status == Plocked, ("stoptheworld: not st
opped (%d)\n", runtime·allp[i]->status)); |
| 383 } |
| 384 } |
| 385 |
| 386 void |
| 387 runtime·starttheworld(void) |
| 388 { |
| 389 //G *gp; |
| 390 //P *p; |
| 391 //M *mp; |
| 392 //int32 n, w; |
| 393 |
| 394 LOG("%d: starttheworld\n", m->id); |
| 395 runtime·gcwaiting = 0; |
| 396 if(newprocs) { |
| 397 procresize(newprocs); |
| 398 newprocs = 0; |
| 399 } else { |
| 400 procresize(runtime·gomaxprocs); |
| 401 } |
| 402 runtime·lock(&runtime·sched); |
| 403 /* |
| 404 gp = runtime·netwait(0, runtime·gomaxprocs); |
| 405 n = w = 0; |
| 406 inject(gp, &w, &n); |
| 407 while(runtime·sched.pidle) { |
| 408 p = pidleget(); |
| 409 mp = mget(); |
| 410 if(mp) { |
| 411 entergo(mp, p); |
| 412 runtime·notewakeup(&mp->park); |
| 413 } else { |
| 414 runtime·unlock(&runtime·sched); |
| 415 newm(runtime·mstart, p, false); |
| 416 runtime·lock(&runtime·sched); |
| 417 } |
| 418 } |
| 419 */ |
| 420 if(runtime·sched.sysmonwait) { |
| 421 runtime·sched.sysmonwait = 0; |
| 422 runtime·notewakeup(&runtime·sched.sysmonnote); |
| 423 } |
| 424 runtime·unlock(&runtime·sched); |
| 425 } |
| 426 |
| 427 // Called to start an M. |
| 428 void |
| 429 runtime·mstart(void) |
| 430 { |
| 431 // It is used by windows-386 only. Unfortunately, seh needs |
| 432 // to be located on os stack, and mstart runs on os stack |
| 433 // for both m0 and m. |
| 434 SEH seh; |
| 435 P *p; |
| 436 |
| 437 LOG("%d: mstart m=%p\n", m->id, m); |
| 438 if(g != m->g0) |
| 439 runtime·throw("bad runtime·mstart"); |
| 440 |
| 441 // Record top of stack for use by mcall. |
| 442 // Once we call schedule we're never coming back, |
| 443 // so other calls can reuse this stack space. |
| 444 runtime·gosave(&m->g0->sched); |
| 445 m->g0->sched.pc = (void*)-1; // make sure it is never used |
| 446 m->seh = &seh; |
| 447 runtime·asminit(); |
| 448 runtime·minit(); |
| 449 |
| 450 // Install signal handlers; after minit so that minit can |
| 451 // prepare the thread to be able to handle the signals. |
| 452 if(m == &runtime·m0) |
| 453 runtime·initsig(); |
| 454 |
| 455 if(m->helpgc) { |
| 456 LOG("%d: mstart helpgc\n", m->id); |
| 457 m->helpgc = 0; |
| 458 m->mcache = m->p->mcache; |
| 459 runtime·gchelper(); |
| 460 m->mcache = nil; |
| 461 m->p = nil; |
| 462 LOG("%d: gchelper end\n", m->id); |
| 463 mstop(); |
| 464 } else if(m != &runtime·m0) { |
| 465 p = m->p; |
| 466 m->p = nil; |
| 467 entergo(m, p); |
| 468 } |
| 469 LOG("%d: calling schedule\n", m->id); |
| 470 schedule(); |
| 471 |
| 472 // TODO(brainman): This point is never reached, because scheduler |
| 473 // does not release os threads at the moment. But once this path |
| 474 // is enabled, we must remove our seh here. |
| 475 } |
| 476 |
| 477 // When running with cgo, we call libcgo_thread_start |
| 478 // to start threads for us so that we can play nicely with |
| 479 // foreign code. |
| 480 void (*libcgo_thread_start)(void*); |
| 481 |
| 482 typedef struct CgoThreadStart CgoThreadStart; |
| 483 struct CgoThreadStart |
| 484 { |
| 485 M *m; |
| 486 G *g; |
| 487 void (*fn)(void); |
| 488 }; |
| 489 |
| 490 static void |
| 491 initgstack(G *newg, byte *stk, int32 stacksize) |
| 492 { |
| 493 newg->stack0 = (uintptr)stk; |
| 494 newg->stackguard = (uintptr)stk + StackGuard; |
| 495 newg->stackbase = (uintptr)stk + stacksize - sizeof(Stktop); |
| 496 runtime·memclr((byte*)newg->stackbase, sizeof(Stktop)); |
| 497 } |
| 498 |
| 499 // Create a new m. It will start off with a call to runtime·mstart. |
| 500 static M* |
| 501 newm(void(*fn)(void), P *p, bool helpgc) |
| 502 { |
| 503 M *mp; |
| 504 int32 addmem,stksiz, stkoff; |
| 505 //!!!static Type *mtype; // The Go type M |
| 506 |
| 507 LOG("%d: newm\n", m->id); |
| 508 addmem = sizeof(*mp->stackalloc); |
| 509 if(runtime·gsignalstk) |
| 510 addmem += sizeof(G) + runtime·gsignalstk; |
| 511 stkoff = sizeof(M) + addmem; |
| 512 stksiz = StackSystem + (fn == runtime·mstart ? 8192 : 64*1024); |
| 513 if(!runtime·iscgo && !Windows) |
| 514 addmem += stksiz; |
| 515 //!!! all that is now non-GC, can it break something? |
| 516 mp = runtime·SysAlloc(sizeof(M) + addmem); |
| 517 mp->stackalloc = (FixAlloc*)(mp+1); |
| 518 //!!!if(mtype == nil) { |
| 519 //!!! Eface e; |
| 520 //!!! runtime·gc_m_ptr(&e); |
| 521 //!!! mtype = ((PtrType*)e.type)->elem; |
| 522 //!!!} |
| 523 //!!! mp = runtime·cnew(mtype); |
| 524 mcommoninit(mp); |
| 525 mp->g0 = &mp->g0buf; |
| 526 mp->p = p; |
| 527 mp->helpgc = helpgc; |
| 528 if(runtime·gsignalstk) { |
| 529 mp->gsignal = (G*)((byte*)mp+sizeof(*mp)+sizeof(*mp->stackalloc)
); |
| 530 initgstack(mp->gsignal, (byte*)(mp->gsignal+1), runtime·gsignals
tk); |
| 531 } |
| 532 |
| 533 if(runtime·iscgo) { |
| 534 CgoThreadStart ts; |
| 535 |
| 536 if(libcgo_thread_start == nil) |
| 537 runtime·throw("libcgo_thread_start missing"); |
| 538 // pthread_create will make us a stack. |
| 539 ts.m = mp; |
| 540 ts.g = mp->g0; |
| 541 ts.fn = fn; |
| 542 runtime·asmcgocall(libcgo_thread_start, &ts); |
| 543 } else { |
| 544 // windows will layout sched stack on os stack |
| 545 if(!Windows) |
| 546 initgstack(mp->g0, (byte*)mp+stkoff, stksiz); |
| 547 runtime·newosproc(mp, mp->g0, (byte*)mp->g0->stackbase, fn); |
| 548 } |
| 549 return mp; |
| 550 } |
| 551 |
| 552 static void |
| 553 mstop(void) |
| 554 { |
| 555 LOG("%d: mstop\n", m->id); |
| 556 CHECK(m->locks == 0, ("")); |
| 557 CHECK(m->p == nil, ("mstop: p != nil\n")); |
| 558 retry: |
| 559 runtime·lock(&runtime·sched); |
| 560 mput(m); |
| 561 runtime·unlock(&runtime·sched); |
| 562 runtime·notesleep(&m->park); |
| 563 runtime·noteclear(&m->park); |
| 564 if(m->helpgc) { |
| 565 LOG("%d: gchelper\n", m->id); |
| 566 m->helpgc = 0; |
| 567 runtime·gchelper(); |
| 568 m->mcache = nil; |
| 569 LOG("%d: gchelper end\n", m->id); |
| 570 goto retry; |
| 571 } |
| 572 LOG("%d: mstop wake\n", m->id); |
| 573 if(m->p == nil) |
| 574 runtime·throw("mstop: p == nil"); |
| 575 } |
| 576 |
| 577 // Schedules gp to run on M. Never returns. |
| 578 static void |
| 579 execute(G *gp) |
| 580 { |
| 581 int32 hz; |
| 582 |
| 583 LOG("%d: start running goroutine %p\n", m->id, gp); |
| 584 CHECK(m->locks == 0, ("")); |
| 585 CHECK(g == m->g0, ("execute: not on g0\n")); |
| 586 CHECK(m->p != nil, ("execute: no p\n")); |
| 587 CHECK(gp->status == Grunnable, ("execute: gp=%d gp->status=%d\n", gp->go
id, gp->status)); |
| 588 CHECK(gp->m == nil, ("execute: gp->m=%p\n", gp->m)); |
| 589 CHECK(gp->lockedm == nil && m->lockedg == nil || gp->lockedm == m && m->
lockedg == gp, |
| 590 ("bad locking: gp->lockedm=%p m->lockedg=%p\n", gp->lockedm, m->
lockedg)); |
| 591 m->p->tick++; |
| 592 gp->status = Grunning; |
| 593 m->curg = gp; |
| 594 gp->m = m; |
| 595 |
| 596 // Check whether the profiler needs to be turned on or off. |
| 597 hz = runtime·sched.profilehz; |
| 598 if(m->profilehz != hz) |
| 599 runtime·resetcpuprofiler(hz); |
| 600 |
| 601 if(gp->sched.pc == (byte*)runtime·goexit) // kickoff |
| 602 runtime·gogocall(&gp->sched, (void(*)(void))gp->entry); |
| 603 runtime·gogo(&gp->sched, 0); |
| 604 } |
| 605 |
| 606 // One round of scheduler: find a goroutine and run it. |
| 607 // The argument is the goroutine that was running before |
| 608 // schedule was called, or nil if this is the first call. |
| 609 // Never returns. |
| 610 static void |
| 611 schedule(void) |
| 612 { |
| 613 int32 i, try; |
| 614 G *gp, *gp1; |
| 615 P *p; |
| 616 M *mp; |
| 617 |
| 618 LOG("%d: schedule p=%p\n", m->id, m->p); |
| 619 USED(&gp); |
| 620 CHECK(m->locks == 0, ("schedule: holding locks\n")); |
| 621 CHECK(m->lockedg == nil, ("schedule: locked M\n")); |
| 622 |
| 623 top: |
| 624 if(runtime·gcwaiting) { |
| 625 p = releasep(); |
| 626 p->status = Plocked; |
| 627 runtime·lock(&runtime·sched); |
| 628 runtime·sched.stopwait--; |
| 629 if(runtime·sched.stopwait == 0) |
| 630 runtime·notewakeup(&runtime·sched.stopnote); |
| 631 runtime·unlock(&runtime·sched); |
| 632 mstop(); |
| 633 goto top; |
| 634 } |
| 635 |
| 636 gp = runqget(m->p); |
| 637 if(gp == nil) { |
| 638 for(try=0; try<2; try++) { |
| 639 if(runtime·sched.runqsize) { |
| 640 runtime·lock(&runtime·sched); |
| 641 gp = globrunqget(); |
| 642 if(gp) { |
| 643 while(gp->schedlink != nil) { |
| 644 gp1 = gp; |
| 645 gp = gp1->schedlink; |
| 646 runqput(m->p, gp1); |
| 647 } |
| 648 } |
| 649 runtime·unlock(&runtime·sched); |
| 650 if(gp) |
| 651 goto haveg; |
| 652 } |
| 653 for(i=0; i<runtime·gomaxprocs; i++) { |
| 654 if(runtime·gcwaiting) |
| 655 goto top; |
| 656 p = runtime·allp[runtime·fastrand1()%runtime·gomaxprocs]
; |
| 657 if(p == m->p) |
| 658 gp = runqget(p); |
| 659 else |
| 660 gp = runqsteal(m->p, p); |
| 661 if(gp) |
| 662 break; |
| 663 } |
| 664 if(gp) |
| 665 break; |
| 666 if(try==0 && runtime·gcwaiting == 0) |
| 667 runtime·osyield(); |
| 668 } |
| 669 if(gp == nil) { |
| 670 p = releasep(); |
| 671 runtime·lock(&runtime·sched); |
| 672 if(runtime·gcwaiting) { |
| 673 p->status = Plocked; |
| 674 runtime·sched.stopwait--; |
| 675 if(runtime·sched.stopwait == 0) |
| 676 runtime·notewakeup(&runtime·sched.stopno
te); |
| 677 runtime·unlock(&runtime·sched); |
| 678 mstop(); |
| 679 goto top; |
| 680 } |
| 681 pidleput(p); |
| 682 if(runtime·sched.runqsize) { |
| 683 p = pidleget(); |
| 684 runtime·unlock(&runtime·sched); |
| 685 entergo(m, p); |
| 686 goto top; |
| 687 } |
| 688 runtime·unlock(&runtime·sched); |
| 689 for(i=0; i<runtime·gomaxprocs; i++) { |
| 690 p = runtime·allp[i]; |
| 691 if(p && p->runqhead != p->runqtail) { |
| 692 runtime·lock(&runtime·sched); |
| 693 p = pidleget(); |
| 694 runtime·unlock(&runtime·sched); |
| 695 if(p) { |
| 696 entergo(m, p); |
| 697 goto top; |
| 698 } |
| 699 break; |
| 700 } |
| 701 } |
| 702 mstop(); |
| 703 goto top; |
| 704 } |
| 705 } |
| 706 |
| 707 haveg: |
| 708 if(gp->lockedm) { |
| 709 mp = gp->lockedm; |
| 710 p = releasep(); |
| 711 entergo(mp, p); |
| 712 runtime·notewakeup(&mp->park); |
| 713 mstop(); |
| 714 goto top; |
| 715 } |
| 716 |
| 717 execute(gp); |
| 718 } |
| 719 |
| 720 static void |
| 721 park0(G *gp) |
| 722 { |
| 723 P *p; |
| 724 M *mp; |
| 725 |
| 726 USED(&gp); |
| 727 if(m->lockedg) { |
| 728 p = releasep(); |
| 729 if(m->waitunlockf) { |
| 730 m->waitunlockf(m->waitlock); |
| 731 m->waitunlockf = nil; |
| 732 } |
| 733 // After this point another thread may schedule gp on m again. |
| 734 // Schedule another M to run P. |
| 735 runtime·lock(&runtime·sched); |
| 736 mp = mget(); |
| 737 runtime·unlock(&runtime·sched); |
| 738 munpark(mp, p); |
| 739 // Wait until another thread schedules gp and so m again. |
| 740 runtime·notesleep(&m->park); |
| 741 runtime·noteclear(&m->park); |
| 742 execute(gp); // Never returns. |
| 743 } |
| 744 if(m->waitunlockf) { |
| 745 m->waitunlockf(m->waitlock); |
| 746 m->waitunlockf = nil; |
| 747 } |
| 748 schedule(); |
| 749 } |
| 750 |
| 751 // Puts the current goroutine into a waiting state and unlocks the lock. |
| 752 // The goroutine can be made runnable again by calling runtime·ready(gp). |
| 753 void |
| 754 runtime·park(void(*unlockf)(Lock*), Lock *l, int8 *reason) |
| 755 { |
| 756 LOG("%d: park l=%p reason=%s\n", m->id, l, reason); |
| 757 CHECK(g != m->g0, ("park of g0\n")); |
| 758 m->waitlock = l; |
| 759 m->waitunlockf = unlockf; |
| 760 g->status = Gwaiting; |
| 761 g->waitreason = reason; |
| 762 g->m = nil; |
| 763 runtime·mcall(park0); |
| 764 } |
| 765 |
| 766 static void |
| 767 gosched0(G *gp) |
| 768 { |
| 769 P *p; |
| 770 M *mp; |
| 771 |
| 772 LOG("%d: gosched0 gp=%p\n", m->id, gp); |
| 773 gp->status = Grunnable; |
| 774 gp->m = nil; |
| 775 if(m->lockedg) { |
| 776 p = releasep(); |
| 777 runtime·lock(&runtime·sched); |
| 778 globrunqput(gp); |
| 779 // After this point another thread may schedule gp on m again. |
| 780 // Schedule another M to run P. |
| 781 mp = mget(); |
| 782 runtime·unlock(&runtime·sched); |
| 783 munpark(mp, p); |
| 784 // Wait until another thread schedules gp and so m again. |
| 785 runtime·notesleep(&m->park); |
| 786 runtime·noteclear(&m->park); |
| 787 execute(gp); // Never returns. |
| 788 } |
| 789 runtime·lock(&runtime·sched); |
| 790 globrunqput(gp); |
| 791 runtime·unlock(&runtime·sched); |
| 792 schedule(); |
| 793 } |
| 794 |
| 795 void |
| 796 runtime·gosched(void) |
| 797 { |
| 798 runtime·mcall(gosched0); |
| 799 } |
| 800 |
| 801 // On g0. |
| 802 static void |
| 803 goexit0(G *gp) |
| 804 { |
| 805 gp->status = Gdead; |
| 806 gp->m = nil; |
| 807 gp->lockedm = nil; |
| 808 m->lockedg = nil; |
| 809 runtime·unwindstack(gp, nil); |
| 810 gfput(m->p, gp); |
| 811 schedule(); |
| 812 } |
| 813 |
| 814 void |
| 815 runtime·goexit(void) |
| 816 { |
| 817 runtime·mcall(goexit0); |
| 818 } |
| 819 |
| 820 // The goroutine g is about to enter a system call. |
| 821 // Record that it's not using the cpu anymore. |
| 822 // This is called only from the go syscall library and cgocall, |
| 823 // not from the low-level system calls used by the runtime. |
| 824 // |
| 825 // Entersyscall cannot split the stack: the runtime·gosave must |
| 826 // make g->sched refer to the caller's stack segment, because |
| 827 // entersyscall is going to return immediately after. |
| 828 #pragma textflag 7 |
| 829 void |
| 830 runtime·entersyscall(void) |
| 831 { |
| 832 P *p; |
| 833 M *mp; |
| 834 |
| 835 LOG("%d: entersyscall g=%p p=%p\n", m->id, g, m->p); |
| 836 if(m->profilehz > 0) |
| 837 runtime·setprof(false); |
| 838 |
| 839 // Leave SP around for gc and traceback. |
| 840 runtime·gosave(&g->sched); |
| 841 g->gcsp = g->sched.sp; |
| 842 g->gcstack = g->stackbase; |
| 843 g->gcguard = g->stackguard; |
| 844 g->status = Gsyscall; |
| 845 if(g->gcsp < g->gcguard-StackGuard || g->gcstack < g->gcsp) { |
| 846 // runtime·printf("entersyscall inconsistent %p [%p,%p]\n", |
| 847 // g->gcsp, g->gcguard-StackGuard, g->gcstack); |
| 848 runtime·throw("entersyscall"); |
| 849 } |
| 850 |
| 851 if(m->blockingsyscall) { |
| 852 m->blockingsyscall = false; |
| 853 p = releasep(); |
| 854 runtime·lock(&runtime·sched); |
| 855 mp = mget(); |
| 856 runtime·unlock(&runtime·sched); |
| 857 munpark(mp, p); |
| 858 return; |
| 859 } |
| 860 |
| 861 m->mcache = nil; |
| 862 m->p->m = nil; |
| 863 runtime·atomicstore(&m->p->status, Psyscall); |
| 864 if(runtime·gcwaiting) { |
| 865 runtime·lock(&runtime·sched); |
| 866 if (runtime·sched.stopwait > 0 && runtime·cas(&m->p->status, Psy
scall, Plocked)) { |
| 867 runtime·sched.stopwait--; |
| 868 if(runtime·sched.stopwait == 0) |
| 869 runtime·notewakeup(&runtime·sched.stopnote); |
| 870 } |
| 871 runtime·unlock(&runtime·sched); |
| 872 } |
| 873 } |
| 874 |
| 875 #pragma textflag 7 |
| 876 void |
| 877 runtime·entersyscallblock(void) |
| 878 { |
| 879 m->blockingsyscall = true; |
| 880 runtime·entersyscall(); |
| 881 } |
| 882 |
| 883 static void |
| 884 exitsyscall0(G *gp) |
| 885 { |
| 886 P *p; |
| 887 |
| 888 LOG("%d: exitsyscall0\n", m->id); |
| 889 gp->status = Grunnable; |
| 890 gp->m = nil; |
| 891 CHECK(m->park.waitm == nil, ("exitsyscall0: park is signalled\n")); |
| 892 runtime·lock(&runtime·sched); |
| 893 p = pidleget(); |
| 894 if(p == nil) |
| 895 globrunqput(gp); |
| 896 runtime·unlock(&runtime·sched); |
| 897 if(p) { |
| 898 entergo(m, p); |
| 899 execute(gp); // Never returns. |
| 900 } |
| 901 if(m->lockedg) { |
| 902 CHECK(m->lockedg == gp, ("exitsyscall0: inconsistent locking\n")
); |
| 903 // Wait until another thread schedules gp and so m again. |
| 904 runtime·notesleep(&m->park); |
| 905 runtime·noteclear(&m->park); |
| 906 execute(gp); // Never returns. |
| 907 } |
| 908 mstop(); |
| 909 schedule(); |
| 910 } |
| 911 |
| 912 // The goroutine g exited its system call. |
| 913 // Arrange for it to run on a cpu again. |
| 914 // This is called only from the go syscall library, not |
| 915 // from the low-level system calls used by the runtime. |
| 916 void |
| 917 runtime·exitsyscall(void) |
| 918 { |
| 919 uint32 s; |
| 920 P *p; |
| 921 |
| 922 LOG("%d: exitsyscall g=%p\n", m->id, g); |
| 923 |
| 924 // Check whether the profiler needs to be turned on. |
| 925 if(m->profilehz > 0) |
| 926 runtime·setprof(true); |
| 927 |
| 928 // Try to re-acquire the P. |
| 929 s = m->p ? m->p->status : Pidle; |
| 930 if(s == Psyscall && runtime·cas(&m->p->status, s, Pbusy)) { |
| 931 LOG("%d: exitsyscall fast\n", m->id); |
| 932 // There's a cpu for us, so we can run. |
| 933 m->mcache = m->p->mcache; |
| 934 m->p->m = m; |
| 935 g->status = Grunning; |
| 936 // Garbage collector isn't running (since we are), |
| 937 // so okay to clear gcstack. |
| 938 g->gcstack = (uintptr)nil; |
| 939 return; |
| 940 } |
| 941 |
| 942 // Try to get idle P. |
| 943 m->p = nil; |
| 944 if(runtime·sched.pidle) { |
| 945 runtime·lock(&runtime·sched); |
| 946 p = pidleget(); |
| 947 runtime·unlock(&runtime·sched); |
| 948 if(p) { |
| 949 entergo(m, p); |
| 950 g->gcstack = (uintptr)nil; |
| 951 return; |
| 952 } |
| 953 } |
| 954 |
| 955 LOG("%d: exitsyscall slow p->status=%d\n", m->id, s); |
| 956 |
| 957 runtime·mcall(exitsyscall0); |
| 958 |
| 959 // Gosched returned, so we're allowed to run now. |
| 960 // Delete the gcstack information that we left for |
| 961 // the garbage collector during the system call. |
| 962 // Must wait until now because until gosched returns |
| 963 // we don't know for sure that the garbage collector |
| 964 // is not running. |
| 965 g->gcstack = (uintptr)nil; |
| 966 } |
| 967 |
| 968 // Called from runtime·lessstack when returning from a function which |
| 969 // allocated a new stack segment. The function's return value is in |
| 970 // m->cret. |
| 971 void |
| 972 runtime·oldstack(void) |
| 973 { |
| 974 Stktop *top, old; |
| 975 uint32 argsize; |
| 976 uintptr cret; |
| 977 byte *sp; |
| 978 G *g1; |
| 979 |
| 980 //printf("oldstack m->cret=%p\n", m->cret); |
| 981 |
| 982 g1 = m->curg; |
| 983 top = (Stktop*)g1->stackbase; |
| 984 sp = (byte*)top; |
| 985 old = *top; |
| 986 argsize = old.argsize; |
| 987 if(argsize > 0) { |
| 988 sp -= argsize; |
| 989 runtime·memmove(top->argp, sp, argsize); |
| 990 } |
| 991 |
| 992 if(old.free != 0) |
| 993 runtime·stackfree((byte*)g1->stackguard - StackGuard, old.free); |
| 994 g1->stackbase = (uintptr)old.stackbase; |
| 995 g1->stackguard = (uintptr)old.stackguard; |
| 996 |
| 997 cret = m->cret; |
| 998 m->cret = 0; // drop reference |
| 999 runtime·gogo(&old.gobuf, cret); |
| 1000 } |
| 1001 |
| 1002 // Called from reflect·call or from runtime·morestack when a new |
| 1003 // stack segment is needed. Allocate a new stack big enough for |
| 1004 // m->moreframesize bytes, copy m->moreargsize bytes to the new frame, |
| 1005 // and then act as though runtime·lessstack called the function at |
| 1006 // m->morepc. |
| 1007 void |
| 1008 runtime·newstack(void) |
| 1009 { |
| 1010 int32 framesize, argsize; |
| 1011 Stktop *top; |
| 1012 byte *stk, *sp; |
| 1013 G *g1; |
| 1014 Gobuf label; |
| 1015 bool reflectcall; |
| 1016 uintptr free; |
| 1017 |
| 1018 framesize = m->moreframesize; |
| 1019 argsize = m->moreargsize; |
| 1020 g1 = m->curg; |
| 1021 |
| 1022 if(m->morebuf.sp < g1->stackguard - StackGuard) { |
| 1023 runtime·printf("runtime: split stack overflow: %p < %p\n", m->mo
rebuf.sp, g1->stackguard - StackGuard); |
| 1024 runtime·throw("runtime: split stack overflow"); |
| 1025 } |
| 1026 if(argsize % sizeof(uintptr) != 0) { |
| 1027 runtime·printf("runtime: stack split with misaligned argsize %d\
n", argsize); |
| 1028 runtime·throw("runtime: stack split argsize"); |
| 1029 } |
| 1030 |
| 1031 reflectcall = framesize==1; |
| 1032 if(reflectcall) |
| 1033 framesize = 0; |
| 1034 |
| 1035 if(reflectcall && m->morebuf.sp - sizeof(Stktop) - argsize - 32 > g1->st
ackguard) { |
| 1036 // special case: called from reflect.call (framesize==1) |
| 1037 // to call code with an arbitrary argument size, |
| 1038 // and we have enough space on the current stack. |
| 1039 // the new Stktop* is necessary to unwind, but |
| 1040 // we don't need to create a new segment. |
| 1041 top = (Stktop*)(m->morebuf.sp - sizeof(*top)); |
| 1042 stk = (byte*)g1->stackguard - StackGuard; |
| 1043 free = 0; |
| 1044 } else { |
| 1045 // allocate new segment. |
| 1046 framesize += argsize; |
| 1047 framesize += StackExtra; // room for more functions, Stkt
op. |
| 1048 if(framesize < StackMin) |
| 1049 framesize = StackMin; |
| 1050 framesize += StackSystem; |
| 1051 stk = runtime·stackalloc(framesize); |
| 1052 top = (Stktop*)(stk+framesize-sizeof(*top)); |
| 1053 free = framesize; |
| 1054 } |
| 1055 |
| 1056 //runtime·printf("newstack framesize=%d argsize=%d morepc=%p moreargp=%p gobuf=%
p, %p top=%p old=%p\n", |
| 1057 //framesize, argsize, m->morepc, m->moreargp, m->morebuf.pc, m->morebuf.sp, top,
g1->stackbase); |
| 1058 |
| 1059 top->stackbase = (byte*)g1->stackbase; |
| 1060 top->stackguard = (byte*)g1->stackguard; |
| 1061 top->gobuf = m->morebuf; |
| 1062 top->argp = m->moreargp; |
| 1063 top->argsize = argsize; |
| 1064 top->free = free; |
| 1065 m->moreargp = nil; |
| 1066 m->morebuf.pc = nil; |
| 1067 m->morebuf.sp = (uintptr)nil; |
| 1068 |
| 1069 // copy flag from panic |
| 1070 top->panic = g1->ispanic; |
| 1071 g1->ispanic = false; |
| 1072 |
| 1073 g1->stackbase = (uintptr)top; |
| 1074 g1->stackguard = (uintptr)stk + StackGuard; |
| 1075 |
| 1076 sp = (byte*)top; |
| 1077 if(argsize > 0) { |
| 1078 sp -= argsize; |
| 1079 runtime·memmove(sp, top->argp, argsize); |
| 1080 } |
| 1081 if(thechar == '5') { |
| 1082 // caller would have saved its LR below args. |
| 1083 sp -= sizeof(void*); |
| 1084 *(void**)sp = nil; |
| 1085 } |
| 1086 |
| 1087 // Continue as if lessstack had just called m->morepc |
| 1088 // (the PC that decided to grow the stack). |
| 1089 label.sp = (uintptr)sp; |
| 1090 label.pc = (byte*)runtime·lessstack; |
| 1091 label.g = m->curg; |
| 1092 runtime·gogocall(&label, m->morepc); |
| 1093 |
| 1094 *(int32*)345 = 123; // never return |
| 1095 } |
| 1096 |
| 1097 // Hook used by runtime·malg to call runtime·stackalloc on the |
| 1098 // scheduler stack. This exists because runtime·stackalloc insists |
| 1099 // on being called on the scheduler stack, to avoid trying to grow |
| 1100 // the stack while allocating a new stack segment. |
| 1101 static void |
| 1102 mstackalloc(G *gp) |
| 1103 { |
| 1104 gp->param = runtime·stackalloc((uintptr)gp->param); |
| 1105 runtime·gogo(&gp->sched, 0); |
| 1106 } |
| 1107 |
| 1108 // Allocate a new g, with a stack big enough for stacksize bytes. |
| 1109 G* |
| 1110 runtime·malg(int32 stacksize) |
| 1111 { |
| 1112 G *newg; |
| 1113 byte *stk; |
| 1114 |
| 1115 if(StackTop < sizeof(Stktop)) { |
| 1116 runtime·printf("runtime: SizeofStktop=%d, should be >=%d\n", (in
t32)StackTop, (int32)sizeof(Stktop)); |
| 1117 runtime·throw("runtime: bad stack.h"); |
| 1118 } |
| 1119 |
| 1120 newg = runtime·malloc(sizeof(G)); |
| 1121 if(stacksize >= 0) { |
| 1122 if(g == m->g0) { |
| 1123 // running on scheduler stack already. |
| 1124 stk = runtime·stackalloc(StackSystem + stacksize); |
| 1125 } else { |
| 1126 // have to call stackalloc on scheduler stack. |
| 1127 g->param = (void*)(StackSystem + stacksize); |
| 1128 runtime·mcall(mstackalloc); |
| 1129 stk = g->param; |
| 1130 g->param = nil; |
| 1131 } |
| 1132 newg->stack0 = (uintptr)stk; |
| 1133 newg->stackguard = (uintptr)stk + StackGuard; |
| 1134 newg->stackbase = (uintptr)stk + StackSystem + stacksize - sizeo
f(Stktop); |
| 1135 runtime·memclr((byte*)newg->stackbase, sizeof(Stktop)); |
| 1136 } |
| 1137 return newg; |
| 1138 } |
| 1139 |
| 1140 // Create a new g running fn with siz bytes of arguments. |
| 1141 // Put it on the queue of g's waiting to run. |
| 1142 // The compiler turns a go statement into a call to this. |
| 1143 // Cannot split the stack because it assumes that the arguments |
| 1144 // are available sequentially after &fn; they would not be |
| 1145 // copied if a stack split occurred. It's OK for this to call |
| 1146 // functions that split the stack. |
| 1147 #pragma textflag 7 |
| 1148 void |
| 1149 runtime·newproc(int32 siz, byte* fn, ...) |
| 1150 { |
| 1151 byte *argp; |
| 1152 |
| 1153 if(thechar == '5') |
| 1154 argp = (byte*)(&fn+2); // skip caller's saved LR |
| 1155 else |
| 1156 argp = (byte*)(&fn+1); |
| 1157 runtime·newproc1(fn, argp, siz, 0, runtime·getcallerpc(&siz)); |
| 1158 } |
| 1159 |
| 1160 // Create a new g running fn with narg bytes of arguments starting |
| 1161 // at argp and returning nret bytes of results. callerpc is the |
| 1162 // address of the go statement that created this. The new g is put |
| 1163 // on the queue of g's waiting to run. |
| 1164 G* |
| 1165 runtime·newproc1(byte *fn, byte *argp, int32 narg, int32 nret, void *callerpc) |
| 1166 { |
| 1167 byte *sp; |
| 1168 G *newg; |
| 1169 M *mp; |
| 1170 P *p; |
| 1171 int32 siz; |
| 1172 //int64 goid; |
| 1173 |
| 1174 //printf("newproc1 %p %p narg=%d nret=%d\n", fn, argp, narg, nret); |
| 1175 siz = narg + nret; |
| 1176 siz = (siz+7) & ~7; |
| 1177 |
| 1178 // We could instead create a secondary stack frame |
| 1179 // and make it look like goexit was on the original but |
| 1180 // the call to the actual goroutine function was split. |
| 1181 // Not worth it: this is almost always an error. |
| 1182 if(siz > StackMin - 1024) |
| 1183 runtime·throw("runtime.newproc: function arguments too large for
new goroutine"); |
| 1184 |
| 1185 if((newg = gfget(m->p)) != nil) { |
| 1186 //!!!if(raceenabled) |
| 1187 //!!! runtime·racegostart(goid, callerpc); |
| 1188 if(newg->stackguard - StackGuard != newg->stack0) |
| 1189 runtime·throw("invalid stack in newg"); |
| 1190 } else { |
| 1191 newg = runtime·malg(StackMin); |
| 1192 runtime·lock(&runtime·sched); |
| 1193 newg->goid = ++runtime·sched.goidseq; |
| 1194 if(runtime·lastg == nil) |
| 1195 runtime·allg = newg; |
| 1196 else |
| 1197 runtime·lastg->alllink = newg; |
| 1198 runtime·lastg = newg;··········· |
| 1199 runtime·unlock(&runtime·sched); |
| 1200 } |
| 1201 |
| 1202 sp = (byte*)newg->stackbase; |
| 1203 sp -= siz; |
| 1204 runtime·memmove(sp, argp, narg); |
| 1205 if(thechar == '5') { |
| 1206 // caller's LR |
| 1207 sp -= sizeof(void*); |
| 1208 *(void**)sp = nil; |
| 1209 } |
| 1210 |
| 1211 LOG("%d: newproc %p\n", m->id, newg); |
| 1212 newg->sched.sp = (uintptr)sp; |
| 1213 newg->sched.pc = (byte*)runtime·goexit; |
| 1214 newg->sched.g = newg; |
| 1215 newg->entry = fn; |
| 1216 newg->gopc = (uintptr)callerpc; |
| 1217 newg->status = Grunnable; |
| 1218 runqput(m->p, newg); |
| 1219 |
| 1220 if(runtime·sched.pidle && fn != (byte*)runtime·main) { |
| 1221 runtime·lock(&runtime·sched); |
| 1222 p = pidleget(); |
| 1223 if(p) { |
| 1224 mp = mget(); |
| 1225 runtime·unlock(&runtime·sched); |
| 1226 if(mp) { |
| 1227 entergo(mp, p); |
| 1228 runtime·notewakeup(&mp->park); |
| 1229 } else |
| 1230 newm(runtime·mstart, p, false); |
| 1231 } else |
| 1232 runtime·unlock(&runtime·sched); |
| 1233 } |
| 1234 return newg; |
| 1235 } |
| 1236 |
| 1237 void |
| 1238 runtime·Breakpoint(void) |
| 1239 { |
| 1240 runtime·breakpoint(); |
| 1241 } |
| 1242 |
| 1243 void |
| 1244 runtime·Gosched(void) |
| 1245 { |
| 1246 runtime·gosched(); |
| 1247 } |
| 1248 |
| 1249 // Implementation of runtime.GOMAXPROCS. |
| 1250 // delete when scheduler is stronger |
| 1251 int32 |
| 1252 runtime·gomaxprocsfunc(int32 n) |
| 1253 { |
| 1254 int32 ret; |
| 1255 |
| 1256 LOG("%d: gomaxprocsfunc %d\n", m->id, n); |
| 1257 if(n > maxgomaxprocs) |
| 1258 n = maxgomaxprocs; |
| 1259 runtime·lock(&runtime·sched); |
| 1260 ret = runtime·gomaxprocs; |
| 1261 if(n <= 0 || n == ret) { |
| 1262 runtime·unlock(&runtime·sched); |
| 1263 return ret; |
| 1264 } |
| 1265 runtime·unlock(&runtime·sched); |
| 1266 |
| 1267 runtime·semacquire(&runtime·worldsema); |
| 1268 m->gcing = 1; |
| 1269 runtime·stoptheworld(); |
| 1270 newprocs = n; |
| 1271 m->gcing = 0; |
| 1272 runtime·semrelease(&runtime·worldsema); |
| 1273 runtime·starttheworld(); |
| 1274 |
| 1275 return ret; |
| 1276 } |
| 1277 |
| 1278 void |
| 1279 runtime·LockOSThread(void) |
| 1280 { |
| 1281 if(m == &runtime·m0 && runtime·sched.init) { |
| 1282 runtime·sched.lockmain = true; |
| 1283 return; |
| 1284 } |
| 1285 m->lockedg = g; |
| 1286 g->lockedm = m; |
| 1287 } |
| 1288 |
| 1289 void |
| 1290 runtime·UnlockOSThread(void) |
| 1291 { |
| 1292 if(m == &runtime·m0 && runtime·sched.init) { |
| 1293 runtime·sched.lockmain = false; |
| 1294 return; |
| 1295 } |
| 1296 m->lockedg = nil; |
| 1297 g->lockedm = nil; |
| 1298 } |
| 1299 |
| 1300 bool |
| 1301 runtime·lockedOSThread(void) |
| 1302 { |
| 1303 return g->lockedm != nil && m->lockedg != nil; |
| 1304 } |
| 1305 |
| 1306 // for testing of callbacks |
| 1307 void |
| 1308 runtime·golockedOSThread(bool ret) |
| 1309 { |
| 1310 ret = runtime·lockedOSThread(); |
| 1311 FLUSH(&ret); |
| 1312 } |
| 1313 |
| 1314 // for testing of wire, unwire |
| 1315 void |
| 1316 runtime·mid(uint32 ret) |
| 1317 { |
| 1318 ret = m->id; |
| 1319 FLUSH(&ret); |
| 1320 } |
| 1321 |
| 1322 void |
| 1323 runtime·NumGoroutine(intgo ret) |
| 1324 { |
| 1325 //ret = runtime·sched.gcount; |
| 1326 ret = 1; |
| 1327 FLUSH(&ret); |
| 1328 } |
| 1329 |
| 1330 int32 |
| 1331 runtime·gcount(void) |
| 1332 { |
| 1333 //return runtime·sched.gcount; |
| 1334 return 1; |
| 1335 } |
| 1336 |
| 1337 int32 |
| 1338 runtime·mcount(void) |
| 1339 { |
| 1340 return runtime·sched.mcount; |
| 1341 } |
| 1342 |
| 1343 void |
| 1344 runtime·badmcall(void) // called from assembly |
| 1345 { |
| 1346 runtime·throw("runtime: mcall called on m->g0 stack"); |
| 1347 } |
| 1348 |
| 1349 void |
| 1350 runtime·badmcall2(void) // called from assembly |
| 1351 { |
| 1352 runtime·throw("runtime: mcall function returned"); |
| 1353 } |
| 1354 |
| 1355 static struct { |
| 1356 Lock; |
| 1357 void (*fn)(uintptr*, int32); |
| 1358 int32 hz; |
| 1359 uintptr pcbuf[100]; |
| 1360 } prof; |
| 1361 |
| 1362 // Called if we receive a SIGPROF signal. |
| 1363 void |
| 1364 runtime·sigprof(uint8 *pc, uint8 *sp, uint8 *lr, G *gp) |
| 1365 { |
| 1366 int32 n; |
| 1367 |
| 1368 if(prof.fn == nil || prof.hz == 0) |
| 1369 return; |
| 1370 |
| 1371 runtime·lock(&prof); |
| 1372 if(prof.fn == nil) { |
| 1373 runtime·unlock(&prof); |
| 1374 return; |
| 1375 } |
| 1376 n = runtime·gentraceback(pc, sp, lr, gp, 0, prof.pcbuf, nelem(prof.pcbuf
)); |
| 1377 if(n > 0) |
| 1378 prof.fn(prof.pcbuf, n); |
| 1379 runtime·unlock(&prof); |
| 1380 } |
| 1381 |
| 1382 // Arrange to call fn with a traceback hz times a second. |
| 1383 void |
| 1384 runtime·setcpuprofilerate(void (*fn)(uintptr*, int32), int32 hz) |
| 1385 { |
| 1386 // Force sane arguments. |
| 1387 if(hz < 0) |
| 1388 hz = 0; |
| 1389 if(hz == 0) |
| 1390 fn = nil; |
| 1391 if(fn == nil) |
| 1392 hz = 0; |
| 1393 |
| 1394 // Stop profiler on this cpu so that it is safe to lock prof. |
| 1395 // if a profiling signal came in while we had prof locked, |
| 1396 // it would deadlock. |
| 1397 runtime·resetcpuprofiler(0); |
| 1398 |
| 1399 runtime·lock(&prof); |
| 1400 prof.fn = fn; |
| 1401 prof.hz = hz; |
| 1402 runtime·unlock(&prof); |
| 1403 runtime·lock(&runtime·sched); |
| 1404 runtime·sched.profilehz = hz; |
| 1405 runtime·unlock(&runtime·sched); |
| 1406 |
| 1407 if(hz != 0) |
| 1408 runtime·resetcpuprofiler(hz); |
| 1409 } |
| 1410 |
| 1411 // Change number of processors. The world is stopped. |
| 1412 static void |
| 1413 procresize(int32 new) |
| 1414 { |
| 1415 int32 i, old; |
| 1416 G *gp; |
| 1417 P *p; |
| 1418 |
| 1419 runtime·lock(&runtime·sched); //!!! |
| 1420 old = runtime·gomaxprocs; |
| 1421 LOG("%d: procresize %d->%d\n", m->id, old, new); |
| 1422 if(old < 0 || old > maxgomaxprocs || new <= 0 || new > maxgomaxprocs) |
| 1423 runtime·throw("procresize: invalid arg"); |
| 1424 if(old == new) { |
| 1425 for(i=0; i<new; i++) { |
| 1426 p = runtime·allp[i]; |
| 1427 if(p == m->p) |
| 1428 p->status = Pbusy; |
| 1429 else { |
| 1430 p->status = Pidle; |
| 1431 pidleput(p); |
| 1432 } |
| 1433 } |
| 1434 runtime·unlock(&runtime·sched); |
| 1435 return; |
| 1436 } |
| 1437 |
| 1438 runtime·singleproc = new == 1; |
| 1439 runtime·gomaxprocs = new; |
| 1440 for(i=0; i<new; i++) { |
| 1441 p = runtime·allp[i]; |
| 1442 if(p == nil) { |
| 1443 p = (P*)runtime·mallocgc(sizeof(runtime·allp[i][0]), 0,
0, 1); |
| 1444 p->status = Plocked; |
| 1445 runtime·allp[i] = p; //@@@ store-release |
| 1446 } |
| 1447 if(p->mcache == nil) { |
| 1448 if(old==0 && i==0) |
| 1449 p->mcache = m->mcache; |
| 1450 else |
| 1451 p->mcache = runtime·allocmcache(); |
| 1452 } |
| 1453 if(p->runq == nil) { |
| 1454 p->runqsize = 1024; |
| 1455 p->runq = (G**)runtime·mallocgc(p->runqsize*sizeof(G*),
0, 0, 1); |
| 1456 } |
| 1457 } |
| 1458 |
| 1459 for(i=1; i<old; i++) { |
| 1460 for(;;) { |
| 1461 gp = runqget(runtime·allp[i]); |
| 1462 if(gp == nil) |
| 1463 break; |
| 1464 //TODO: spread more evenly. |
| 1465 runqput(runtime·allp[0], gp); |
| 1466 } |
| 1467 } |
| 1468 |
| 1469 for(i=new; i<old; i++) { |
| 1470 runtime·freemcache(runtime·allp[i]->mcache); |
| 1471 runtime·allp[i]->mcache = nil; |
| 1472 runtime·allp[i]->status = Pdead; |
| 1473 //TODO: free freeg |
| 1474 } |
| 1475 |
| 1476 if(m->p) |
| 1477 m->p->m = nil; |
| 1478 m->p = nil; |
| 1479 m->mcache = nil; |
| 1480 runtime·allp[0]->m = nil; |
| 1481 runtime·allp[0]->status = Pidle; |
| 1482 entergo(m, runtime·allp[0]); |
| 1483 for(i=1; i<new; i++) { |
| 1484 p = runtime·allp[i]; |
| 1485 p->status = Pidle; |
| 1486 pidleput(p); |
| 1487 } |
| 1488 runtime·unlock(&runtime·sched); |
| 1489 } |
| 1490 |
| 1491 static void |
| 1492 entergo(M *mp, P *p) |
| 1493 { |
| 1494 LOG("%d: entergo m=%d p=%p p->m=%p, p->status=%d, p->mcache=%p\n", m->id
, mp->id, p, p->m, p->status, p->mcache); |
| 1495 if(mp->p || mp->mcache) |
| 1496 runtime·throw("entergo: already in go"); |
| 1497 if(p->m || p->status != Pidle) { |
| 1498 runtime·printf("entergo: p->m=%p(%d) p->status=%d\n", p->m, p->m
? p->m->id : 0, p->status); |
| 1499 runtime·throw("entergo: invalid p state"); |
| 1500 } |
| 1501 mp->mcache = p->mcache; |
| 1502 mp->p = p; |
| 1503 p->m = mp; |
| 1504 p->status = Pbusy; |
| 1505 } |
| 1506 |
| 1507 static P* |
| 1508 releasep(void) |
| 1509 { |
| 1510 M *mp; |
| 1511 P *p; |
| 1512 |
| 1513 mp = m; |
| 1514 LOG("%d: releasep\n", mp->id); |
| 1515 // sched is locked |
| 1516 if(mp->p == nil || mp->mcache == nil) |
| 1517 runtime·throw("releasep: invalid arg"); |
| 1518 p = mp->p; |
| 1519 if(p->m != mp || p->mcache != mp->mcache || p->status != Pbusy) { |
| 1520 runtime·printf("releasep: m=%p m->p=%p p->m=%p m->mcache=%p p->m
cache=%p p->status=%d\n", |
| 1521 mp, mp->p, p->m, m->mcache, p->mcache, p->status); |
| 1522 runtime·throw("releasep: invalid p state"); |
| 1523 } |
| 1524 mp->p = nil; |
| 1525 mp->mcache = nil; |
| 1526 p->m = nil; |
| 1527 p->status = Pidle; |
| 1528 return p; |
| 1529 } |
| 1530 |
| 1531 typedef struct Pdesc Pdesc; |
| 1532 struct Pdesc |
| 1533 { |
| 1534 uint32 tick; |
| 1535 int64 when; |
| 1536 }; |
| 1537 |
| 1538 static void |
| 1539 retake(int64 now, Pdesc *ps) |
| 1540 { |
| 1541 uint32 i, s; |
| 1542 int64 t; |
| 1543 P *p; |
| 1544 M *mp; |
| 1545 |
| 1546 for(i=0; i<runtime·gomaxprocs; i++) { |
| 1547 p = runtime·allp[i]; |
| 1548 //!!! procresize may be in progress |
| 1549 // do something if GC is in progress (help). |
| 1550 if(p==nil) |
| 1551 continue; |
| 1552 t = p->tick; |
| 1553 if(ps[i].tick != t) { |
| 1554 ps[i].tick = t; |
| 1555 ps[i].when = now; |
| 1556 } |
| 1557 if(ps[i].when + 20*1000 > now) |
| 1558 continue; |
| 1559 s = p->status; |
| 1560 if(s == Psyscall && runtime·cas(&p->status, s, Pidle)) { |
| 1561 LOG("retake %p(%d)\n", p, i); |
| 1562 runtime·lock(&runtime·sched); |
| 1563 mp = mget(); |
| 1564 runtime·unlock(&runtime·sched); |
| 1565 munpark(mp, p); |
| 1566 } |
| 1567 } |
| 1568 } |
| 1569 |
| 1570 static Pdesc ps[maxgomaxprocs]; |
| 1571 |
| 1572 static void |
| 1573 sysmon(void) |
| 1574 { |
| 1575 int64 t0, now; |
| 1576 |
| 1577 // This is a special dedicated thread. |
| 1578 // It works w/o mcache nor stackalloc, it may work concurrently with GC. |
| 1579 runtime·asminit(); |
| 1580 runtime·minit(); |
| 1581 LOG("sysmon\n"); |
| 1582 t0 = runtime·nanotime(); |
| 1583 for(;;) { |
| 1584 //!!! sleep more if possible |
| 1585 runtime·usleep(20); |
| 1586 if(runtime·gcwaiting) { |
| 1587 runtime·lock(&runtime·sched); |
| 1588 if(runtime·gcwaiting) { |
| 1589 runtime·sched.sysmonwait = 1; |
| 1590 runtime·unlock(&runtime·sched); |
| 1591 runtime·notesleep(&runtime·sched.sysmonnote); |
| 1592 runtime·noteclear(&runtime·sched.sysmonnote); |
| 1593 } else |
| 1594 runtime·unlock(&runtime·sched); |
| 1595 } |
| 1596 now = runtime·nanotime() - t0; |
| 1597 retake(now, ps); |
| 1598 } |
| 1599 } |
| 1600 |
| 1601 static void |
| 1602 inject(G *gp0, int32 *w, int32 *n) |
| 1603 { |
| 1604 int32 nw; |
| 1605 G *gp; |
| 1606 M *mp; |
| 1607 P *p; |
| 1608 |
| 1609 runtime·lock(&runtime·sched); |
| 1610 while(gp0) { |
| 1611 gp = gp0; |
| 1612 gp0 = gp->schedlink; |
| 1613 gp->status = Grunnable; |
| 1614 globrunqput(gp); |
| 1615 (*n)++; |
| 1616 } |
| 1617 runtime·unlock(&runtime·sched); |
| 1618 |
| 1619 nw = *n; |
| 1620 while(runtime·sched.pidle && nw) { |
| 1621 runtime·lock(&runtime·sched); |
| 1622 if(runtime·sched.pidle == nil) { |
| 1623 runtime·unlock(&runtime·sched); |
| 1624 break; |
| 1625 } |
| 1626 (*w)++; |
| 1627 nw--; |
| 1628 p = pidleget(); |
| 1629 mp = mget(); |
| 1630 runtime·unlock(&runtime·sched); |
| 1631 if(mp) { |
| 1632 entergo(mp, p); |
| 1633 runtime·notewakeup(&mp->park); |
| 1634 } else |
| 1635 newm(runtime·mstart, p, false); |
| 1636 } |
| 1637 } |
| 1638 |
| 1639 static void |
| 1640 globrunqput(G *gp) |
| 1641 { |
| 1642 gp->schedlink = nil; |
| 1643 if(runtime·sched.runqtail) |
| 1644 runtime·sched.runqtail->schedlink = gp; |
| 1645 else |
| 1646 runtime·sched.runqhead = gp; |
| 1647 runtime·sched.runqtail = gp; |
| 1648 runtime·sched.runqsize++; |
| 1649 } |
| 1650 |
| 1651 static G* |
| 1652 globrunqget(void) |
| 1653 { |
| 1654 G *gp, *gp1; |
| 1655 int32 n; |
| 1656 |
| 1657 if(runtime·sched.runqsize == 0) |
| 1658 return nil; |
| 1659 n = runtime·sched.runqsize/runtime·gomaxprocs+1; |
| 1660 if(n > runtime·sched.runqsize) |
| 1661 n = runtime·sched.runqsize; |
| 1662 runtime·sched.runqsize -= n; |
| 1663 if(runtime·sched.runqsize == 0) |
| 1664 runtime·sched.runqtail = nil; |
| 1665 gp1 = nil; |
| 1666 while(n--) { |
| 1667 gp = runtime·sched.runqhead; |
| 1668 runtime·sched.runqhead = gp->schedlink; |
| 1669 gp->schedlink = gp1; |
| 1670 gp1 = gp; |
| 1671 } |
| 1672 return gp1; |
| 1673 } |
| 1674 |
| 1675 // sched is locked |
| 1676 static P* |
| 1677 pidleget(void) |
| 1678 { |
| 1679 P *p; |
| 1680 ········ |
| 1681 p = runtime·sched.pidle; |
| 1682 if(p) { |
| 1683 runtime·sched.pidle = p->link; |
| 1684 runtime·sched.npidle--; |
| 1685 } |
| 1686 return p; |
| 1687 } |
| 1688 |
| 1689 // sched is locked |
| 1690 static void |
| 1691 pidleput(P *p) |
| 1692 { |
| 1693 p->link = runtime·sched.pidle; |
| 1694 runtime·sched.pidle = p; |
| 1695 runtime·sched.npidle++; |
287 } | 1696 } |
288 | 1697 |
289 static void | 1698 static void |
290 runqgrow(P *p) | 1699 runqgrow(P *p) |
291 { | 1700 { |
292 G **q; | 1701 G **q; |
293 int32 s, t, h, t2; | 1702 int32 s, t, h, t2; |
294 | 1703 |
295 h = p->runqhead; | 1704 h = p->runqhead; |
296 t = p->runqtail; | 1705 t = p->runqtail; |
(...skipping 13 matching lines...) Expand all Loading... |
310 p->runqtail = t2; | 1719 p->runqtail = t2; |
311 p->runqsize = 2*s; | 1720 p->runqsize = 2*s; |
312 } | 1721 } |
313 | 1722 |
314 static G* | 1723 static G* |
315 runqsteal(P *p, P *p2) | 1724 runqsteal(P *p, P *p2) |
316 { | 1725 { |
317 G *gp, *gp1; | 1726 G *gp, *gp1; |
318 int32 t, h, s, t2, h2, s2, c, c1; | 1727 int32 t, h, s, t2, h2, s2, c, c1; |
319 | 1728 |
320 » if(p2->runqhead==p2->runqtail) { | 1729 » if(p2->runqhead==p2->runqtail) |
321 » » m->schedstats.stealempty++; | |
322 return nil; | 1730 return nil; |
323 } | |
324 if(p < p2) | 1731 if(p < p2) |
325 runtime·lock(p); | 1732 runtime·lock(p); |
326 runtime·lock(p2); | 1733 runtime·lock(p2); |
327 if(p2->runqhead==p2->runqtail) { | 1734 if(p2->runqhead==p2->runqtail) { |
328 runtime·unlock(p2); | 1735 runtime·unlock(p2); |
329 if(p < p2) | 1736 if(p < p2) |
330 runtime·unlock(p); | 1737 runtime·unlock(p); |
331 m->schedstats.stealempty++; | |
332 return nil; | 1738 return nil; |
333 } | 1739 } |
334 if(p >= p2) | 1740 if(p >= p2) |
335 runtime·lock(p); | 1741 runtime·lock(p); |
336 h = p->runqhead; | 1742 h = p->runqhead; |
337 t = p->runqtail; | 1743 t = p->runqtail; |
338 s = p->runqsize; | 1744 s = p->runqsize; |
339 h2 = p2->runqhead; | 1745 h2 = p2->runqhead; |
340 t2 = p2->runqtail; | 1746 t2 = p2->runqtail; |
341 s2 = p2->runqsize; | 1747 s2 = p2->runqsize; |
(...skipping 18 matching lines...) Expand all Loading... |
360 if(h2 == s2) | 1766 if(h2 == s2) |
361 h2 = 0; | 1767 h2 = 0; |
362 p->runq[t] = gp1; | 1768 p->runq[t] = gp1; |
363 t++; | 1769 t++; |
364 if(t==s) | 1770 if(t==s) |
365 t = 0; | 1771 t = 0; |
366 c1++; | 1772 c1++; |
367 } | 1773 } |
368 p->runqtail = t; | 1774 p->runqtail = t; |
369 p2->runqhead = h2; | 1775 p2->runqhead = h2; |
370 m->schedstats.stealn++; | |
371 m->schedstats.stealcnt += c1 + 1; | |
372 runtime·unlock(p2); | 1776 runtime·unlock(p2); |
373 runtime·unlock(p); | 1777 runtime·unlock(p); |
374 return gp; | 1778 return gp; |
375 } | 1779 } |
376 | 1780 |
377 // Put g on runnable queue. | 1781 // Put g on runnable queue. |
378 static void | 1782 static void |
379 runqput(P *p, G *gp) | 1783 runqput(P *p, G *gp) |
380 { | 1784 { |
381 int32 h, t, s; | 1785 int32 h, t, s; |
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
438 { | 1842 { |
439 M *mp; | 1843 M *mp; |
440 | 1844 |
441 if((mp = runtime·sched.mhead) != nil){ | 1845 if((mp = runtime·sched.mhead) != nil){ |
442 runtime·sched.mhead = mp->schedlink; | 1846 runtime·sched.mhead = mp->schedlink; |
443 runtime·sched.mwait--; | 1847 runtime·sched.mwait--; |
444 } | 1848 } |
445 return mp; | 1849 return mp; |
446 } | 1850 } |
447 | 1851 |
448 // Mark g ready to run. | 1852 // Put on gfree list. |
449 void | |
450 runtime·ready(G *gp) | |
451 { | |
452 P *p; | |
453 M *mp; | |
454 | |
455 if(gp->m) | |
456 runtime·throw("bad g->m in ready"); | |
457 | |
458 // Mark runnable. | |
459 if(gp->status == Grunnable || gp->status == Grunning) { | |
460 runtime·printf("goroutine %p has status %d\n", gp, gp->status); | |
461 runtime·throw("bad g->status in ready"); | |
462 } | |
463 gp->status = Grunnable; | |
464 runqput(m->p, gp); | |
465 if(runtime·sched.pidle) { | |
466 runtime·lock(&runtime·sched); | |
467 p = pidleget(); | |
468 if(p) { | |
469 mp = mget(); | |
470 runtime·unlock(&runtime·sched); | |
471 if(mp) { | |
472 entergo(mp, p); | |
473 runtime·notewakeup(&mp->park); | |
474 } else { | |
475 newm(runtime·mstart, p, false); | |
476 } | |
477 } else | |
478 runtime·unlock(&runtime·sched); | |
479 } | |
480 } | |
481 | |
482 int32 | |
483 runtime·gcprocs(void) | |
484 { | |
485 int32 n; | |
486 | |
487 runtime·lock(&runtime·sched); | |
488 n = runtime·gomaxprocs; | |
489 if(n > runtime·ncpu) | |
490 n = runtime·ncpu; | |
491 if(n > MaxGcproc) | |
492 n = MaxGcproc; | |
493 runtime·unlock(&runtime·sched); | |
494 return n; | |
495 } | |
496 | |
497 void | |
498 runtime·helpgc(int32 nproc) | |
499 { | |
500 M *mp; | |
501 int32 n, pos; | |
502 | |
503 LOG("%d: helpgc(%d)\n", m->id, nproc); | |
504 runtime·lock(&runtime·sched); | |
505 pos = 0; | |
506 for(n = 1; n < nproc; n++) { // one M is currently running | |
507 if(runtime·allp[pos]->mcache == m->mcache) | |
508 pos++; | |
509 mp = mget(); | |
510 if(mp == nil) { | |
511 runtime·unlock(&runtime·sched); | |
512 newm(runtime·mstart, runtime·allp[pos], true); | |
513 runtime·lock(&runtime·sched); | |
514 pos++; | |
515 continue; | |
516 } | |
517 mp->helpgc = 1; | |
518 mp->mcache = runtime·allp[pos]->mcache; | |
519 pos++; | |
520 LOG("%d: helpgc wake %d\n", m->id, mp->id); | |
521 runtime·notewakeup(&mp->park); | |
522 } | |
523 runtime·unlock(&runtime·sched); | |
524 } | |
525 | |
526 void | |
527 runtime·stoptheworld(void) | |
528 { | |
529 int32 acquired, i; | |
530 uint32 s; | |
531 P *p; | |
532 | |
533 LOG("%d: stoptheworld\n", m->id); | |
534 runtime·gcwaiting = 1; //@ atomic | |
535 acquired = 1; | |
536 m->p->status = Plocked; | |
537 while(acquired != runtime·gomaxprocs) { | |
538 LOG(" idle=%p\n", runtime·sched.pidle); | |
539 for(i=0; i<runtime·gomaxprocs; i++) | |
540 LOG(" %d status=%d\n", i, runtime·allp[i]->status); | |
541 for(i=0; i<runtime·gomaxprocs; i++) { | |
542 s = runtime·allp[i]->status; | |
543 if(s == Psyscall && runtime·cas(&runtime·allp[i]->status
, s, Plocked)) { | |
544 LOG(" acquired syscall %d\n", i); | |
545 acquired++; | |
546 } | |
547 } | |
548 runtime·lock(&runtime·sched); | |
549 while(runtime·sched.pidle) { | |
550 p = pidleget(); | |
551 p->status = Plocked; | |
552 acquired++; | |
553 } | |
554 runtime·unlock(&runtime·sched); | |
555 //!!! replace with blocking | |
556 if(acquired != runtime·gomaxprocs) { | |
557 runtime·usleep(1); | |
558 } | |
559 } | |
560 LOG("%d: stoptheworld stopped\n", m->id); | |
561 } | |
562 | |
563 void | |
564 runtime·starttheworld(void) | |
565 { | |
566 P *p; | |
567 M *mp; | |
568 | |
569 LOG("%d: starttheworld\n", m->id); | |
570 runtime·gcwaiting = 0; | |
571 if(newprocs) { | |
572 procresize(newprocs); | |
573 newprocs = 0; | |
574 } else { | |
575 procresize(runtime·gomaxprocs); | |
576 } | |
577 // TODO(dvyukov): re-balance G's among P's | |
578 runtime·lock(&runtime·sched); | |
579 while(runtime·sched.pidle) { | |
580 p = pidleget(); | |
581 mp = mget(); | |
582 if(mp) { | |
583 entergo(mp, p); | |
584 runtime·notewakeup(&mp->park); | |
585 } else { | |
586 runtime·unlock(&runtime·sched); | |
587 newm(runtime·mstart, p, false); | |
588 runtime·lock(&runtime·sched); | |
589 } | |
590 } | |
591 runtime·unlock(&runtime·sched); | |
592 runtime·gosched(); | |
593 } | |
594 | |
595 // Called to start an M. | |
596 void | |
597 runtime·mstart(void) | |
598 { | |
599 // It is used by windows-386 only. Unfortunately, seh needs | |
600 // to be located on os stack, and mstart runs on os stack | |
601 // for both m0 and m. | |
602 SEH seh; | |
603 P *p; | |
604 | |
605 LOG("%d: mstart m=%p\n", m->id, m); | |
606 if(g != m->g0) | |
607 runtime·throw("bad runtime·mstart"); | |
608 | |
609 // Record top of stack for use by mcall. | |
610 // Once we call schedule we're never coming back, | |
611 // so other calls can reuse this stack space. | |
612 runtime·gosave(&m->g0->sched); | |
613 m->g0->sched.pc = (void*)-1; // make sure it is never used | |
614 m->seh = &seh; | |
615 runtime·asminit(); | |
616 runtime·minit(); | |
617 | |
618 // Install signal handlers; after minit so that minit can | |
619 // prepare the thread to be able to handle the signals. | |
620 if(m == &runtime·m0) | |
621 runtime·initsig(); | |
622 | |
623 if(m->helpgc) { | |
624 LOG("%d: mstart helpgc\n", m->id); | |
625 m->helpgc = 0; | |
626 m->mcache = m->p->mcache; | |
627 runtime·gchelper(); | |
628 m->mcache = nil; | |
629 m->p = nil; | |
630 LOG("%d: gchelper end\n", m->id); | |
631 mstop(); | |
632 } else if(m != &runtime·m0) { | |
633 p = m->p; | |
634 m->p = nil; | |
635 entergo(m, p); | |
636 } | |
637 LOG("%d: calling schedule\n", m->id); | |
638 schedule(); | |
639 | |
640 // TODO(brainman): This point is never reached, because scheduler | |
641 // does not release os threads at the moment. But once this path | |
642 // is enabled, we must remove our seh here. | |
643 } | |
644 | |
645 // When running with cgo, we call libcgo_thread_start | |
646 // to start threads for us so that we can play nicely with | |
647 // foreign code. | |
648 void (*libcgo_thread_start)(void*); | |
649 | |
650 typedef struct CgoThreadStart CgoThreadStart; | |
651 struct CgoThreadStart | |
652 { | |
653 M *m; | |
654 G *g; | |
655 void (*fn)(void); | |
656 }; | |
657 | |
658 static void | |
659 initgstack(G *newg, byte *stk, int32 stacksize) | |
660 { | |
661 newg->stack0 = (uintptr)stk; | |
662 newg->stackguard = (uintptr)stk + StackGuard; | |
663 newg->stackbase = (uintptr)stk + stacksize - sizeof(Stktop); | |
664 runtime·memclr((byte*)newg->stackbase, sizeof(Stktop)); | |
665 } | |
666 | |
667 // Create a new m. It will start off with a call to runtime·mstart. | |
668 static M* | |
669 newm(void(*fn)(void), P *p, bool helpgc) | |
670 { | |
671 M *mp; | |
672 int32 addmem,stksiz, stkoff; | |
673 | |
674 LOG("%d: newm\n", m->id); | |
675 m->schedstats.nm++; | |
676 addmem = sizeof(*mp->stackalloc); | |
677 if(runtime·gsignalstk) | |
678 addmem += sizeof(G) + runtime·gsignalstk; | |
679 stkoff = sizeof(M) + addmem; | |
680 stksiz = StackSystem + (fn == runtime·mstart ? 8192 : 64*1024); | |
681 if(!runtime·iscgo && !Windows) | |
682 addmem += stksiz; | |
683 //!!! all that is now non-GC, can it break something? | |
684 mp = runtime·SysAlloc(sizeof(M) + addmem); | |
685 mp->stackalloc = (FixAlloc*)(mp+1); | |
686 mcommoninit(mp); | |
687 mp->g0 = &mp->g0buf; | |
688 mp->p = p; | |
689 mp->helpgc = helpgc; | |
690 if(runtime·gsignalstk) { | |
691 mp->gsignal = (G*)((byte*)mp+sizeof(*mp)+sizeof(*mp->stackalloc)
); | |
692 initgstack(mp->gsignal, (byte*)(mp->gsignal+1), runtime·gsignals
tk); | |
693 } | |
694 | |
695 if(runtime·iscgo) { | |
696 CgoThreadStart ts; | |
697 | |
698 if(libcgo_thread_start == nil) | |
699 runtime·throw("libcgo_thread_start missing"); | |
700 // pthread_create will make us a stack. | |
701 ts.m = mp; | |
702 ts.g = mp->g0; | |
703 ts.fn = fn; | |
704 runtime·asmcgocall(libcgo_thread_start, &ts); | |
705 } else { | |
706 // windows will layout sched stack on os stack | |
707 if(!Windows) | |
708 initgstack(mp->g0, (byte*)mp+stkoff, stksiz); | |
709 runtime·newosproc(mp, mp->g0, (byte*)mp->g0->stackbase, fn); | |
710 } | |
711 return mp; | |
712 } | |
713 | |
714 static void | |
715 mstop(void) | |
716 { | |
717 LOG("%d: mstop\n", m->id); | |
718 if(m->p != nil) | |
719 runtime·throw("mstop: p != nil"); | |
720 retry: | |
721 runtime·noteclear(&m->park); | |
722 runtime·lock(&runtime·sched); | |
723 mput(m); | |
724 runtime·unlock(&runtime·sched); | |
725 runtime·notesleep(&m->park); | |
726 if(m->helpgc) { | |
727 LOG("%d: gchelper\n", m->id); | |
728 m->helpgc = 0; | |
729 runtime·gchelper(); | |
730 m->mcache = nil; | |
731 LOG("%d: gchelper end\n", m->id); | |
732 goto retry; | |
733 } | |
734 LOG("%d: mstop wake\n", m->id); | |
735 if(m->p == nil) | |
736 runtime·throw("mstop: p == nil"); | |
737 } | |
738 | |
739 // One round of scheduler: find a goroutine and run it. | |
740 // The argument is the goroutine that was running before | |
741 // schedule was called, or nil if this is the first call. | |
742 // Never returns. | |
743 static void | |
744 schedule(void) | |
745 { | |
746 int32 hz, i; | |
747 G *gp; | |
748 P *p; | |
749 | |
750 LOG("%d: schedule p=%p\n", m->id, m->p); | |
751 USED(&gp); | |
752 if(m->locks != 0) | |
753 runtime·throw("schedule holding locks"); | |
754 if(gp == m->g0) | |
755 runtime·throw("schedule of g0"); | |
756 | |
757 top: | |
758 if(runtime·gcwaiting) { | |
759 leavego(m); | |
760 mstop(); | |
761 goto top; | |
762 } | |
763 | |
764 gp = runqget(m->p); | |
765 if(gp == nil) { | |
766 if(runtime·sched.runq) { | |
767 runtime·lock(&runtime·sched); | |
768 //!!! get a batch | |
769 if(runtime·sched.runq) { | |
770 gp = runtime·sched.runq; | |
771 runtime·sched.runq = gp->schedlink; | |
772 runtime·sched.nrunq--; | |
773 } | |
774 runtime·unlock(&runtime·sched); | |
775 if(gp) | |
776 goto haveg; | |
777 } | |
778 for(i=0; i<2*runtime·gomaxprocs; i++) { | |
779 p = runtime·allp[runtime·fastrand1()%runtime·gomaxprocs]
; | |
780 if(p == m->p) | |
781 gp = runqget(p); | |
782 else | |
783 gp = runqsteal(m->p, p); | |
784 if(gp) | |
785 break; | |
786 } | |
787 if(gp == nil) { | |
788 leavego(m); | |
789 for(i=0; i<runtime·gomaxprocs; i++) { | |
790 p = runtime·allp[i]; | |
791 if(p && p->runqhead != p->runqtail) { | |
792 runtime·lock(&runtime·sched); | |
793 p = pidleget(); | |
794 runtime·unlock(&runtime·sched); | |
795 if(p) { | |
796 entergo(m, p); | |
797 goto top; | |
798 } | |
799 break; | |
800 } | |
801 } | |
802 mstop(); | |
803 goto top; | |
804 } | |
805 } | |
806 | |
807 haveg: | |
808 LOG("%d: start running goroutine %p\n", m->id, gp); | |
809 m->p->tick++; | |
810 gp->status = Grunning; | |
811 m->curg = gp; | |
812 gp->m = m; | |
813 | |
814 // Check whether the profiler needs to be turned on or off. | |
815 hz = runtime·sched.profilehz; | |
816 if(m->profilehz != hz) | |
817 runtime·resetcpuprofiler(hz); | |
818 | |
819 if(gp->sched.pc == (byte*)runtime·goexit) { // kickoff | |
820 runtime·gogocall(&gp->sched, (void(*)(void))gp->entry); | |
821 } | |
822 runtime·gogo(&gp->sched, 0); | |
823 } | |
824 | |
825 static void | |
826 park0(G *gp) | |
827 { | |
828 USED(&gp); | |
829 if(m->waitunlockf) { | |
830 m->waitunlockf(m->waitlock); | |
831 m->waitunlockf = nil; | |
832 } | |
833 schedule(); | |
834 } | |
835 | |
836 // Atomically parks g and unlocks the lock. | |
837 void | |
838 runtime·park(void *l, void(*unlockf)(void*), int8 *reason) | |
839 { | |
840 LOG("%d: park l=%p reason=%s\n", m->id, l, reason); | |
841 if(g == m->g0) | |
842 runtime·throw("park of g0"); | |
843 m->waitlock = l; | |
844 m->waitunlockf = unlockf; | |
845 g->status = Gwaiting; | |
846 g->waitreason = reason; | |
847 g->m = nil; | |
848 runtime·mcall(park0); | |
849 } | |
850 | |
851 static void | |
852 gosched0(G *gp) | |
853 { | |
854 LOG("%d: gosched0 gp=%p\n", m->id, gp); | |
855 gp->status = Grunnable; | |
856 gp->m = nil; | |
857 runqput(m->p, gp); | |
858 schedule(); | |
859 } | |
860 | |
861 void | |
862 runtime·gosched(void) | |
863 { | |
864 if(m->p->status != Pbusy) | |
865 runtime·throw("m->p->status != Pbusy"); | |
866 runtime·mcall(gosched0); | |
867 } | |
868 | |
869 // On g0. | |
870 static void | |
871 goexit0(G *gp) | |
872 { | |
873 gp->status = Gdead; | |
874 gp->m = nil; | |
875 if(gp->lockedm) { | |
876 gp->lockedm = nil; | |
877 m->lockedg = nil; | |
878 } | |
879 runtime·unwindstack(gp, nil); | |
880 gfput(m->p, gp); | |
881 m->schedstats.gend++; | |
882 m->schedstats.gfput++; | |
883 schedule(); | |
884 } | |
885 | |
886 void | |
887 runtime·goexit(void) | |
888 { | |
889 runtime·mcall(goexit0); | |
890 } | |
891 | |
892 // The goroutine g is about to enter a system call. | |
893 // Record that it's not using the cpu anymore. | |
894 // This is called only from the go syscall library and cgocall, | |
895 // not from the low-level system calls used by the runtime. | |
896 // | |
897 // Entersyscall cannot split the stack: the runtime·gosave must | |
898 // make g->sched refer to the caller's stack segment, because | |
899 // entersyscall is going to return immediately after. | |
900 #pragma textflag 7 | |
901 void | |
902 runtime·entersyscall(void) | |
903 { | |
904 LOG("%d: entersyscall g=%p p=%p\n", m->id, g, m->p); | |
905 if(m->profilehz > 0) | |
906 runtime·setprof(false); | |
907 | |
908 // Leave SP around for gc and traceback. | |
909 runtime·gosave(&g->sched); | |
910 g->gcsp = g->sched.sp; | |
911 g->gcstack = g->stackbase; | |
912 g->gcguard = g->stackguard; | |
913 g->status = Gsyscall; | |
914 if(g->gcsp < g->gcguard-StackGuard || g->gcstack < g->gcsp) { | |
915 // runtime·printf("entersyscall inconsistent %p [%p,%p]\n", | |
916 // g->gcsp, g->gcguard-StackGuard, g->gcstack); | |
917 runtime·throw("entersyscall"); | |
918 } | |
919 | |
920 m->mcache = nil; | |
921 m->p->tick++; | |
922 m->p->m = nil; | |
923 m->p->status = Psyscall; //@@@ store-release | |
924 } | |
925 | |
926 static void | |
927 exitsyscall0(G *gp) | |
928 { | |
929 P *p; | |
930 | |
931 LOG("%d: exitsyscall0\n", m->id); | |
932 gp->status = Grunnable; | |
933 gp->m = nil; | |
934 runtime·lock(&runtime·sched); | |
935 p = pidleget(); | |
936 if(p == nil) | |
937 globrunqput(gp); | |
938 runtime·unlock(&runtime·sched); | |
939 if(p) { | |
940 runqput(p, gp); | |
941 entergo(m, p); | |
942 schedule(); | |
943 } | |
944 mstop(); | |
945 schedule(); | |
946 } | |
947 | |
948 // The goroutine g exited its system call. | |
949 // Arrange for it to run on a cpu again. | |
950 // This is called only from the go syscall library, not | |
951 // from the low-level system calls used by the runtime. | |
952 void | |
953 runtime·exitsyscall(void) | |
954 { | |
955 uint32 s; | |
956 P *p; | |
957 | |
958 LOG("%d: exitsyscall g=%p\n", m->id, g); | |
959 | |
960 // Check whether the profiler needs to be turned on. | |
961 if(m->profilehz > 0) | |
962 runtime·setprof(true); | |
963 | |
964 // Try to re-acquire the P. | |
965 s = m->p->status; | |
966 if(s == Psyscall && runtime·cas(&m->p->status, s, Pbusy)) { | |
967 LOG("%d: exitsyscall fast\n", m->id); | |
968 m->schedstats.sysexitfast++; | |
969 // There's a cpu for us, so we can run. | |
970 m->mcache = m->p->mcache; | |
971 m->p->m = m; | |
972 g->status = Grunning; | |
973 // Garbage collector isn't running (since we are), | |
974 // so okay to clear gcstack. | |
975 g->gcstack = (uintptr)nil; | |
976 return; | |
977 } | |
978 | |
979 // Try to get idle P. | |
980 m->p = nil; | |
981 if(runtime·sched.pidle) { | |
982 runtime·lock(&runtime·sched); | |
983 p = pidleget(); | |
984 runtime·unlock(&runtime·sched); | |
985 if(p) { | |
986 entergo(m, p); | |
987 g->gcstack = (uintptr)nil; | |
988 return; | |
989 } | |
990 } | |
991 | |
992 m->schedstats.sysexitslow++; | |
993 LOG("%d: exitsyscall slow p->status=%d\n", m->id, s); | |
994 | |
995 runtime·mcall(exitsyscall0); | |
996 | |
997 // Gosched returned, so we're allowed to run now. | |
998 // Delete the gcstack information that we left for | |
999 // the garbage collector during the system call. | |
1000 // Must wait until now because until gosched returns | |
1001 // we don't know for sure that the garbage collector | |
1002 // is not running. | |
1003 g->gcstack = (uintptr)nil; | |
1004 } | |
1005 | |
1006 // Called from runtime·lessstack when returning from a function which | |
1007 // allocated a new stack segment. The function's return value is in | |
1008 // m->cret. | |
1009 void | |
1010 runtime·oldstack(void) | |
1011 { | |
1012 Stktop *top, old; | |
1013 uint32 argsize; | |
1014 uintptr cret; | |
1015 byte *sp; | |
1016 G *g1; | |
1017 | |
1018 //printf("oldstack m->cret=%p\n", m->cret); | |
1019 | |
1020 g1 = m->curg; | |
1021 top = (Stktop*)g1->stackbase; | |
1022 sp = (byte*)top; | |
1023 old = *top; | |
1024 argsize = old.argsize; | |
1025 if(argsize > 0) { | |
1026 sp -= argsize; | |
1027 runtime·memmove(top->argp, sp, argsize); | |
1028 } | |
1029 | |
1030 if(old.free != 0) | |
1031 runtime·stackfree((byte*)g1->stackguard - StackGuard, old.free); | |
1032 g1->stackbase = (uintptr)old.stackbase; | |
1033 g1->stackguard = (uintptr)old.stackguard; | |
1034 | |
1035 cret = m->cret; | |
1036 m->cret = 0; // drop reference | |
1037 runtime·gogo(&old.gobuf, cret); | |
1038 } | |
1039 | |
1040 // Called from reflect·call or from runtime·morestack when a new | |
1041 // stack segment is needed. Allocate a new stack big enough for | |
1042 // m->moreframesize bytes, copy m->moreargsize bytes to the new frame, | |
1043 // and then act as though runtime·lessstack called the function at | |
1044 // m->morepc. | |
1045 void | |
1046 runtime·newstack(void) | |
1047 { | |
1048 int32 framesize, argsize; | |
1049 Stktop *top; | |
1050 byte *stk, *sp; | |
1051 G *g1; | |
1052 Gobuf label; | |
1053 bool reflectcall; | |
1054 uintptr free; | |
1055 | |
1056 framesize = m->moreframesize; | |
1057 argsize = m->moreargsize; | |
1058 g1 = m->curg; | |
1059 | |
1060 if(m->morebuf.sp < g1->stackguard - StackGuard) { | |
1061 runtime·printf("runtime: split stack overflow: %p < %p\n", m->mo
rebuf.sp, g1->stackguard - StackGuard); | |
1062 runtime·throw("runtime: split stack overflow"); | |
1063 } | |
1064 if(argsize % sizeof(uintptr) != 0) { | |
1065 runtime·printf("runtime: stack split with misaligned argsize %d\
n", argsize); | |
1066 runtime·throw("runtime: stack split argsize"); | |
1067 } | |
1068 | |
1069 reflectcall = framesize==1; | |
1070 if(reflectcall) | |
1071 framesize = 0; | |
1072 | |
1073 if(reflectcall && m->morebuf.sp - sizeof(Stktop) - argsize - 32 > g1->st
ackguard) { | |
1074 // special case: called from reflect.call (framesize==1) | |
1075 // to call code with an arbitrary argument size, | |
1076 // and we have enough space on the current stack. | |
1077 // the new Stktop* is necessary to unwind, but | |
1078 // we don't need to create a new segment. | |
1079 top = (Stktop*)(m->morebuf.sp - sizeof(*top)); | |
1080 stk = (byte*)g1->stackguard - StackGuard; | |
1081 free = 0; | |
1082 } else { | |
1083 // allocate new segment. | |
1084 framesize += argsize; | |
1085 framesize += StackExtra; // room for more functions, Stkt
op. | |
1086 if(framesize < StackMin) | |
1087 framesize = StackMin; | |
1088 framesize += StackSystem; | |
1089 stk = runtime·stackalloc(framesize); | |
1090 top = (Stktop*)(stk+framesize-sizeof(*top)); | |
1091 free = framesize; | |
1092 } | |
1093 | |
1094 //runtime·printf("newstack framesize=%d argsize=%d morepc=%p moreargp=%p gobuf=%
p, %p top=%p old=%p\n", | |
1095 //framesize, argsize, m->morepc, m->moreargp, m->morebuf.pc, m->morebuf.sp, top,
g1->stackbase); | |
1096 | |
1097 top->stackbase = (byte*)g1->stackbase; | |
1098 top->stackguard = (byte*)g1->stackguard; | |
1099 top->gobuf = m->morebuf; | |
1100 top->argp = m->moreargp; | |
1101 top->argsize = argsize; | |
1102 top->free = free; | |
1103 m->moreargp = nil; | |
1104 m->morebuf.pc = nil; | |
1105 m->morebuf.sp = (uintptr)nil; | |
1106 | |
1107 // copy flag from panic | |
1108 top->panic = g1->ispanic; | |
1109 g1->ispanic = false; | |
1110 | |
1111 g1->stackbase = (uintptr)top; | |
1112 g1->stackguard = (uintptr)stk + StackGuard; | |
1113 | |
1114 sp = (byte*)top; | |
1115 if(argsize > 0) { | |
1116 sp -= argsize; | |
1117 runtime·memmove(sp, top->argp, argsize); | |
1118 } | |
1119 if(thechar == '5') { | |
1120 // caller would have saved its LR below args. | |
1121 sp -= sizeof(void*); | |
1122 *(void**)sp = nil; | |
1123 } | |
1124 | |
1125 // Continue as if lessstack had just called m->morepc | |
1126 // (the PC that decided to grow the stack). | |
1127 label.sp = (uintptr)sp; | |
1128 label.pc = (byte*)runtime·lessstack; | |
1129 label.g = m->curg; | |
1130 runtime·gogocall(&label, m->morepc); | |
1131 | |
1132 *(int32*)345 = 123; // never return | |
1133 } | |
1134 | |
1135 // Hook used by runtime·malg to call runtime·stackalloc on the | |
1136 // scheduler stack. This exists because runtime·stackalloc insists | |
1137 // on being called on the scheduler stack, to avoid trying to grow | |
1138 // the stack while allocating a new stack segment. | |
1139 static void | |
1140 mstackalloc(G *gp) | |
1141 { | |
1142 gp->param = runtime·stackalloc((uintptr)gp->param); | |
1143 runtime·gogo(&gp->sched, 0); | |
1144 } | |
1145 | |
1146 // Allocate a new g, with a stack big enough for stacksize bytes. | |
1147 G* | |
1148 runtime·malg(int32 stacksize) | |
1149 { | |
1150 G *newg; | |
1151 byte *stk; | |
1152 | |
1153 if(StackTop < sizeof(Stktop)) { | |
1154 runtime·printf("runtime: SizeofStktop=%d, should be >=%d\n", (in
t32)StackTop, (int32)sizeof(Stktop)); | |
1155 runtime·throw("runtime: bad stack.h"); | |
1156 } | |
1157 | |
1158 newg = runtime·malloc(sizeof(G)); | |
1159 if(stacksize >= 0) { | |
1160 if(g == m->g0) { | |
1161 // running on scheduler stack already. | |
1162 stk = runtime·stackalloc(StackSystem + stacksize); | |
1163 } else { | |
1164 // have to call stackalloc on scheduler stack. | |
1165 g->param = (void*)(StackSystem + stacksize); | |
1166 runtime·mcall(mstackalloc); | |
1167 stk = g->param; | |
1168 g->param = nil; | |
1169 } | |
1170 newg->stack0 = (uintptr)stk; | |
1171 newg->stackguard = (uintptr)stk + StackGuard; | |
1172 newg->stackbase = (uintptr)stk + StackSystem + stacksize - sizeo
f(Stktop); | |
1173 runtime·memclr((byte*)newg->stackbase, sizeof(Stktop)); | |
1174 } | |
1175 return newg; | |
1176 } | |
1177 | |
1178 // Create a new g running fn with siz bytes of arguments. | |
1179 // Put it on the queue of g's waiting to run. | |
1180 // The compiler turns a go statement into a call to this. | |
1181 // Cannot split the stack because it assumes that the arguments | |
1182 // are available sequentially after &fn; they would not be | |
1183 // copied if a stack split occurred. It's OK for this to call | |
1184 // functions that split the stack. | |
1185 #pragma textflag 7 | |
1186 void | |
1187 runtime·newproc(int32 siz, byte* fn, ...) | |
1188 { | |
1189 byte *argp; | |
1190 | |
1191 if(thechar == '5') | |
1192 argp = (byte*)(&fn+2); // skip caller's saved LR | |
1193 else | |
1194 argp = (byte*)(&fn+1); | |
1195 runtime·newproc1(fn, argp, siz, 0, runtime·getcallerpc(&siz)); | |
1196 } | |
1197 | |
1198 // Create a new g running fn with narg bytes of arguments starting | |
1199 // at argp and returning nret bytes of results. callerpc is the | |
1200 // address of the go statement that created this. The new g is put | |
1201 // on the queue of g's waiting to run. | |
1202 G* | |
1203 runtime·newproc1(byte *fn, byte *argp, int32 narg, int32 nret, void *callerpc) | |
1204 { | |
1205 byte *sp; | |
1206 G *newg; | |
1207 M *mp; | |
1208 P *p; | |
1209 int32 siz; | |
1210 | |
1211 //printf("newproc1 %p %p narg=%d nret=%d\n", fn, argp, narg, nret); | |
1212 m->schedstats.gstart++; | |
1213 siz = narg + nret; | |
1214 siz = (siz+7) & ~7; | |
1215 | |
1216 // We could instead create a secondary stack frame | |
1217 // and make it look like goexit was on the original but | |
1218 // the call to the actual goroutine function was split. | |
1219 // Not worth it: this is almost always an error. | |
1220 if(siz > StackMin - 1024) | |
1221 runtime·throw("runtime.newproc: function arguments too large for
new goroutine"); | |
1222 | |
1223 if((newg = gfget(m->p)) != nil) { | |
1224 m->schedstats.gfget++; | |
1225 if(newg->stackguard - StackGuard != newg->stack0) | |
1226 runtime·throw("invalid stack in newg"); | |
1227 } else { | |
1228 m->schedstats.galloc++; | |
1229 newg = runtime·malg(StackMin); | |
1230 runtime·lock(&runtime·sched); | |
1231 newg->goid = ++runtime·sched.goidseq; | |
1232 newg->alllink = runtime·allg; | |
1233 runtime·atomicstorep(&runtime·allg, newg); | |
1234 runtime·unlock(&runtime·sched); | |
1235 } | |
1236 | |
1237 sp = (byte*)newg->stackbase; | |
1238 sp -= siz; | |
1239 runtime·memmove(sp, argp, narg); | |
1240 if(thechar == '5') { | |
1241 // caller's LR | |
1242 sp -= sizeof(void*); | |
1243 *(void**)sp = nil; | |
1244 } | |
1245 | |
1246 LOG("%d: newproc %p\n", m->id, newg); | |
1247 newg->sched.sp = (uintptr)sp; | |
1248 newg->sched.pc = (byte*)runtime·goexit; | |
1249 newg->sched.g = newg; | |
1250 newg->entry = fn; | |
1251 newg->gopc = (uintptr)callerpc; | |
1252 newg->status = Grunnable; | |
1253 runqput(m->p, newg); | |
1254 | |
1255 if(runtime·sched.pidle && fn != (byte*)runtime·main) { | |
1256 runtime·lock(&runtime·sched); | |
1257 p = pidleget(); | |
1258 if(p) { | |
1259 mp = mget(); | |
1260 runtime·unlock(&runtime·sched); | |
1261 if(mp) { | |
1262 entergo(mp, p); | |
1263 runtime·notewakeup(&mp->park); | |
1264 } else | |
1265 newm(runtime·mstart, p, false); | |
1266 } else | |
1267 runtime·unlock(&runtime·sched); | |
1268 } | |
1269 return newg; | |
1270 } | |
1271 | |
1272 // Put on gfree list. Sched must be locked. | |
1273 static void | 1853 static void |
1274 gfput(P *p, G *gp) | 1854 gfput(P *p, G *gp) |
1275 { | 1855 { |
1276 if(gp->stackguard - StackGuard != gp->stack0) | 1856 if(gp->stackguard - StackGuard != gp->stack0) |
1277 runtime·throw("invalid stack in gfput"); | 1857 runtime·throw("invalid stack in gfput"); |
1278 gp->schedlink = p->gfree; | 1858 gp->schedlink = p->gfree; |
1279 p->gfree = gp; | 1859 p->gfree = gp; |
1280 p->gfreecnt++; | 1860 p->gfreecnt++; |
1281 if(p->gfreecnt >= 64) { | 1861 if(p->gfreecnt >= 64) { |
1282 » » runtime·lock(&runtime·sched); | 1862 » » runtime·lock(&runtime·sched.gflock); |
1283 while(p->gfreecnt >= 32) { | 1863 while(p->gfreecnt >= 32) { |
1284 p->gfreecnt--; | 1864 p->gfreecnt--; |
1285 gp = p->gfree; | 1865 gp = p->gfree; |
1286 p->gfree = gp->schedlink; | 1866 p->gfree = gp->schedlink; |
1287 gp->schedlink = runtime·sched.gfree; | 1867 gp->schedlink = runtime·sched.gfree; |
1288 runtime·sched.gfree = gp; | 1868 runtime·sched.gfree = gp; |
1289 } | 1869 } |
1290 » » runtime·unlock(&runtime·sched); | 1870 » » runtime·unlock(&runtime·sched.gflock); |
1291 » } | 1871 » } |
1292 } | 1872 } |
1293 | 1873 |
1294 // Get from gfree list. Sched must be locked. | 1874 // Get from gfree list. |
1295 static G* | 1875 static G* |
1296 gfget(P *p) | 1876 gfget(P *p) |
1297 { | 1877 { |
1298 G *gp; | 1878 G *gp; |
1299 | 1879 |
1300 retry: | 1880 retry: |
1301 gp = p->gfree; | 1881 gp = p->gfree; |
1302 if(gp == nil && runtime·sched.gfree) { | 1882 if(gp == nil && runtime·sched.gfree) { |
1303 » » runtime·lock(&runtime·sched); | 1883 » » runtime·lock(&runtime·sched.gflock); |
1304 while(p->gfreecnt < 32 && runtime·sched.gfree) { | 1884 while(p->gfreecnt < 32 && runtime·sched.gfree) { |
1305 p->gfreecnt++; | 1885 p->gfreecnt++; |
1306 gp = runtime·sched.gfree; | 1886 gp = runtime·sched.gfree; |
1307 runtime·sched.gfree = gp->schedlink; | 1887 runtime·sched.gfree = gp->schedlink; |
1308 gp->schedlink = p->gfree; | 1888 gp->schedlink = p->gfree; |
1309 p->gfree = gp; | 1889 p->gfree = gp; |
1310 } | 1890 } |
1311 » » runtime·unlock(&runtime·sched); | 1891 » » runtime·unlock(&runtime·sched.gflock); |
1312 goto retry; | 1892 goto retry; |
1313 } | 1893 } |
1314 if(gp) { | 1894 if(gp) { |
1315 p->gfree = gp->schedlink; | 1895 p->gfree = gp->schedlink; |
1316 p->gfreecnt--; | 1896 p->gfreecnt--; |
1317 } | 1897 } |
1318 return gp; | 1898 return gp; |
1319 } | 1899 } |
1320 | |
1321 void | |
1322 runtime·Breakpoint(void) | |
1323 { | |
1324 runtime·breakpoint(); | |
1325 } | |
1326 | |
1327 void | |
1328 runtime·Gosched(void) | |
1329 { | |
1330 runtime·gosched(); | |
1331 } | |
1332 | |
1333 // Implementation of runtime.GOMAXPROCS. | |
1334 // delete when scheduler is stronger | |
1335 int32 | |
1336 runtime·gomaxprocsfunc(int32 n) | |
1337 { | |
1338 int32 ret; | |
1339 | |
1340 LOG("%d: gomaxprocsfunc %d\n", m->id, n); | |
1341 if(n > maxgomaxprocs) | |
1342 n = maxgomaxprocs; | |
1343 runtime·lock(&runtime·sched); | |
1344 ret = runtime·gomaxprocs; | |
1345 if(n <= 0 || n == ret) { | |
1346 runtime·unlock(&runtime·sched); | |
1347 return ret; | |
1348 } | |
1349 runtime·unlock(&runtime·sched); | |
1350 | |
1351 runtime·semacquire(&runtime·worldsema); | |
1352 m->gcing = 1; | |
1353 runtime·stoptheworld(); | |
1354 newprocs = n; | |
1355 m->gcing = 0; | |
1356 runtime·semrelease(&runtime·worldsema); | |
1357 runtime·starttheworld(); | |
1358 | |
1359 return ret; | |
1360 } | |
1361 | |
1362 void | |
1363 runtime·LockOSThread(void) | |
1364 { | |
1365 //!!! implement me. | |
1366 /* | |
1367 if(m == &runtime·m0 && runtime·sched.init) { | |
1368 runtime·sched.lockmain = true; | |
1369 return; | |
1370 } | |
1371 m->lockedg = g; | |
1372 g->lockedm = m; | |
1373 */ | |
1374 } | |
1375 | |
1376 void | |
1377 runtime·UnlockOSThread(void) | |
1378 { | |
1379 /* | |
1380 if(m == &runtime·m0 && runtime·sched.init) { | |
1381 runtime·sched.lockmain = false; | |
1382 return; | |
1383 } | |
1384 m->lockedg = nil; | |
1385 g->lockedm = nil; | |
1386 */ | |
1387 } | |
1388 | |
1389 bool | |
1390 runtime·lockedOSThread(void) | |
1391 { | |
1392 return g->lockedm != nil && m->lockedg != nil; | |
1393 } | |
1394 | |
1395 // for testing of callbacks | |
1396 void | |
1397 runtime·golockedOSThread(bool ret) | |
1398 { | |
1399 ret = runtime·lockedOSThread(); | |
1400 FLUSH(&ret); | |
1401 } | |
1402 | |
1403 // for testing of wire, unwire | |
1404 void | |
1405 runtime·mid(uint32 ret) | |
1406 { | |
1407 ret = m->id; | |
1408 FLUSH(&ret); | |
1409 } | |
1410 | |
1411 void | |
1412 runtime·NumGoroutine(int32 ret) | |
1413 { | |
1414 //ret = runtime·sched.gcount; | |
1415 ret = 1; | |
1416 FLUSH(&ret); | |
1417 } | |
1418 | |
1419 int32 | |
1420 runtime·gcount(void) | |
1421 { | |
1422 //return runtime·sched.gcount; | |
1423 return 1; | |
1424 } | |
1425 | |
1426 int32 | |
1427 runtime·mcount(void) | |
1428 { | |
1429 return runtime·sched.mcount; | |
1430 } | |
1431 | |
1432 void | |
1433 runtime·badmcall(void) // called from assembly | |
1434 { | |
1435 runtime·throw("runtime: mcall called on m->g0 stack"); | |
1436 } | |
1437 | |
1438 void | |
1439 runtime·badmcall2(void) // called from assembly | |
1440 { | |
1441 runtime·throw("runtime: mcall function returned"); | |
1442 } | |
1443 | |
1444 static struct { | |
1445 Lock; | |
1446 void (*fn)(uintptr*, int32); | |
1447 int32 hz; | |
1448 uintptr pcbuf[100]; | |
1449 } prof; | |
1450 | |
1451 // Called if we receive a SIGPROF signal. | |
1452 void | |
1453 runtime·sigprof(uint8 *pc, uint8 *sp, uint8 *lr, G *gp) | |
1454 { | |
1455 int32 n; | |
1456 | |
1457 if(prof.fn == nil || prof.hz == 0) | |
1458 return; | |
1459 | |
1460 runtime·lock(&prof); | |
1461 if(prof.fn == nil) { | |
1462 runtime·unlock(&prof); | |
1463 return; | |
1464 } | |
1465 n = runtime·gentraceback(pc, sp, lr, gp, 0, prof.pcbuf, nelem(prof.pcbuf
)); | |
1466 if(n > 0) | |
1467 prof.fn(prof.pcbuf, n); | |
1468 runtime·unlock(&prof); | |
1469 } | |
1470 | |
1471 // Arrange to call fn with a traceback hz times a second. | |
1472 void | |
1473 runtime·setcpuprofilerate(void (*fn)(uintptr*, int32), int32 hz) | |
1474 { | |
1475 // Force sane arguments. | |
1476 if(hz < 0) | |
1477 hz = 0; | |
1478 if(hz == 0) | |
1479 fn = nil; | |
1480 if(fn == nil) | |
1481 hz = 0; | |
1482 | |
1483 // Stop profiler on this cpu so that it is safe to lock prof. | |
1484 // if a profiling signal came in while we had prof locked, | |
1485 // it would deadlock. | |
1486 runtime·resetcpuprofiler(0); | |
1487 | |
1488 runtime·lock(&prof); | |
1489 prof.fn = fn; | |
1490 prof.hz = hz; | |
1491 runtime·unlock(&prof); | |
1492 runtime·lock(&runtime·sched); | |
1493 runtime·sched.profilehz = hz; | |
1494 runtime·unlock(&runtime·sched); | |
1495 | |
1496 if(hz != 0) | |
1497 runtime·resetcpuprofiler(hz); | |
1498 } | |
1499 | |
1500 // Change number of processors. The world is stopped. | |
1501 static void | |
1502 procresize(int32 new) | |
1503 { | |
1504 int32 i, old; | |
1505 G *gp; | |
1506 P *p; | |
1507 | |
1508 runtime·lock(&runtime·sched); //!!! | |
1509 old = runtime·gomaxprocs; | |
1510 LOG("%d: procresize %d->%d\n", m->id, old, new); | |
1511 if(old < 0 || old > maxgomaxprocs || new <= 0 || new > maxgomaxprocs) | |
1512 runtime·throw("procresize: invalid arg"); | |
1513 if(old == new) { | |
1514 for(i=0; i<new; i++) { | |
1515 p = runtime·allp[i]; | |
1516 if(p == m->p) | |
1517 p->status = Pbusy; | |
1518 else { | |
1519 p->status = Pidle; | |
1520 pidleput(p); | |
1521 } | |
1522 } | |
1523 runtime·unlock(&runtime·sched); | |
1524 return; | |
1525 } | |
1526 | |
1527 runtime·singleproc = new == 1; | |
1528 runtime·gomaxprocs = new; | |
1529 for(i=0; i<new; i++) { | |
1530 p = runtime·allp[i]; | |
1531 if(p == nil) { | |
1532 p = (P*)runtime·mallocgc(sizeof(runtime·allp[i][0]), 0,
0, 1); | |
1533 p->status = Plocked; | |
1534 runtime·allp[i] = p; //@@@ store-release | |
1535 } | |
1536 if(p->mcache == nil) { | |
1537 if(old==0 && i==0) | |
1538 p->mcache = m->mcache; | |
1539 else | |
1540 p->mcache = runtime·allocmcache(); | |
1541 } | |
1542 if(p->runq == nil) { | |
1543 p->runqsize = 1024; | |
1544 p->runq = (G**)runtime·mallocgc(p->runqsize*sizeof(G*),
0, 0, 1); | |
1545 } | |
1546 } | |
1547 | |
1548 for(i=1; i<old; i++) { | |
1549 for(;;) { | |
1550 gp = runqget(runtime·allp[i]); | |
1551 if(gp == nil) | |
1552 break; | |
1553 //TODO: spread more evenly. | |
1554 runqput(runtime·allp[0], gp); | |
1555 } | |
1556 } | |
1557 | |
1558 for(i=new; i<old; i++) { | |
1559 runtime·freemcache(runtime·allp[i]->mcache); | |
1560 runtime·allp[i]->mcache = nil; | |
1561 runtime·allp[i]->status = Pdead; | |
1562 //TODO: free freeg | |
1563 } | |
1564 | |
1565 if(m->p) | |
1566 m->p->m = nil; | |
1567 m->p = nil; | |
1568 m->mcache = nil; | |
1569 runtime·allp[0]->m = nil; | |
1570 runtime·allp[0]->status = Pidle; | |
1571 entergo(m, runtime·allp[0]); | |
1572 for(i=1; i<new; i++) { | |
1573 p = runtime·allp[i]; | |
1574 p->status = Pidle; | |
1575 pidleput(p); | |
1576 } | |
1577 runtime·unlock(&runtime·sched); | |
1578 } | |
1579 | |
1580 static void | |
1581 entergo(M *mp, P *p) | |
1582 { | |
1583 LOG("%d: entergo m=%d p=%p p->m=%p, p->status=%d, p->mcache=%p\n", m->id
, mp->id, p, p->m, p->status, p->mcache); | |
1584 if(mp->p || mp->mcache) | |
1585 runtime·throw("entergo: already in go"); | |
1586 if(p->m || p->status != Pidle) { | |
1587 runtime·printf("entergo: p->m=%p(%d) p->status=%d\n", p->m, p->m
? p->m->id : 0, p->status); | |
1588 runtime·throw("entergo: invalid p state"); | |
1589 } | |
1590 mp->mcache = p->mcache; | |
1591 mp->p = p; | |
1592 p->m = mp; | |
1593 p->status = Pbusy; | |
1594 } | |
1595 | |
1596 static void | |
1597 leavego(M *mp) | |
1598 { | |
1599 P *p; | |
1600 | |
1601 LOG("%d: leavego\n", mp->id); | |
1602 // sched is locked | |
1603 if(mp->p == nil || mp->mcache == nil) | |
1604 runtime·throw("leavego: invalid arg"); | |
1605 p = mp->p; | |
1606 if(p->m != mp || p->mcache != mp->mcache || p->status != Pbusy) { | |
1607 runtime·printf("leavego: m=%p m->p=%p p->m=%p m->mcache=%p p->mc
ache=%p p->status=%d\n", | |
1608 mp, mp->p, p->m, m->mcache, p->mcache, p->status); | |
1609 runtime·throw("leavego: invalid p state"); | |
1610 } | |
1611 mp->p = nil; | |
1612 mp->mcache = nil; | |
1613 p->m = nil; | |
1614 p->status = Pidle; | |
1615 | |
1616 runtime·lock(&runtime·sched); | |
1617 pidleput(p); | |
1618 runtime·unlock(&runtime·sched); | |
1619 } | |
1620 | |
1621 typedef struct Pdesc Pdesc; | |
1622 struct Pdesc | |
1623 { | |
1624 uint32 tick; | |
1625 int64 when; | |
1626 }; | |
1627 | |
1628 static void | |
1629 retake(int64 now, Pdesc *ps) | |
1630 { | |
1631 uint32 i, s; | |
1632 int64 t; | |
1633 P *p; | |
1634 M *mp; | |
1635 | |
1636 for(i=0; i<runtime·gomaxprocs; i++) { | |
1637 p = runtime·allp[i]; | |
1638 //!!! procresize may be in progress | |
1639 // do something if GC is in progress (help). | |
1640 if(p==nil) | |
1641 continue; | |
1642 t = p->tick; | |
1643 if(ps[i].tick != t) { | |
1644 ps[i].tick = t; | |
1645 ps[i].when = now; | |
1646 } | |
1647 if(ps[i].when + 20*1000 > now) | |
1648 continue; | |
1649 s = p->status; | |
1650 if(s == Psyscall && runtime·cas(&p->status, s, Pidle)) { | |
1651 LOG("retake %p(%d)\n", p, i); | |
1652 runtime·lock(&runtime·sched); | |
1653 mp = mget(); | |
1654 runtime·unlock(&runtime·sched); | |
1655 if(mp) { | |
1656 entergo(mp, p); | |
1657 runtime·notewakeup(&mp->park); | |
1658 } else { | |
1659 newm(runtime·mstart, p, false); | |
1660 } | |
1661 } | |
1662 } | |
1663 } | |
1664 | |
1665 //#define STATS | |
1666 #ifdef STATS | |
1667 static void | |
1668 sysstats(int64 now, bool before) | |
1669 { | |
1670 //G *gp; | |
1671 P *p, **pp; | |
1672 int32 npidle, npbusy, npsyscall; | |
1673 | |
1674 npidle = npbusy = npsyscall = 0; | |
1675 int32 i = 0; | |
1676 for(pp=runtime·allp; p=*pp; pp++) { | |
1677 int32 s = p->status; | |
1678 int32 qt = p->runqtail, qh = p->runqhead, qs = p->runqsize; | |
1679 int32 ss = qt - qh; | |
1680 if(ss < 0) | |
1681 ss = qs + ss; | |
1682 runtime·printf("P%d: status=%d runq=%d\n", i++, s, ss); | |
1683 if(s == Pidle) | |
1684 npidle++; | |
1685 else if(s == Pbusy) | |
1686 npbusy++; | |
1687 else if(s == Psyscall) | |
1688 npsyscall++; | |
1689 } | |
1690 runtime·printf("%D %s npbusy=%d npsyscall=%d npidle=%d\n", | |
1691 now/1000000, before ? ">>>" : "<<<", npbusy, npsyscall, npidle); | |
1692 } | |
1693 #endif | |
1694 | |
1695 static Pdesc ps[maxgomaxprocs]; | |
1696 | |
1697 static void | |
1698 sysmon(void) | |
1699 { | |
1700 int64 t0, now; | |
1701 #ifdef STATS | |
1702 int64 laststats; | |
1703 #endif | |
1704 | |
1705 // This is a special dedicated thread. | |
1706 // It works w/o mcache nor stackalloc, it may work concurrently with GC. | |
1707 runtime·asminit(); | |
1708 runtime·minit(); | |
1709 LOG("sysmon\n"); | |
1710 #ifdef STATS | |
1711 laststats = 0; | |
1712 #endif | |
1713 t0 = runtime·nanotime(); | |
1714 for(;;) { | |
1715 runtime·usleep(20); | |
1716 now = runtime·nanotime() - t0; | |
1717 #ifdef STATS | |
1718 if(now - laststats > 50*1000*1000) { | |
1719 laststats = now; | |
1720 sysstats(now, true); | |
1721 } | |
1722 #endif | |
1723 retake(now, ps); | |
1724 #ifdef STATS | |
1725 if(laststats == now) | |
1726 sysstats(now, false); | |
1727 #endif | |
1728 } | |
1729 } | |
1730 | |
1731 void runtime·raiseprio(int32 *p); | |
1732 | |
1733 static void | |
1734 netmon(void) | |
1735 { | |
1736 G *gp; | |
1737 int32 n, w; | |
1738 #ifdef STATS | |
1739 int32 i0, i1, q0, q1; | |
1740 int64 t0, t1, t2; | |
1741 #endif | |
1742 | |
1743 runtime·asminit(); | |
1744 runtime·minit(); | |
1745 int32 prio = 99; | |
1746 runtime·raiseprio(&prio); | |
1747 for(;;) { | |
1748 LOG("netmon\n"); | |
1749 #ifdef STATS | |
1750 t0 = runtime·cputicks(); | |
1751 #endif | |
1752 gp = runtime·netwait(1); | |
1753 #ifdef STATS | |
1754 t1 = runtime·cputicks(); | |
1755 i0 = runtime·sched.npidle; | |
1756 q0 = runtime·sched.nrunq; | |
1757 #endif | |
1758 n = w = 0; | |
1759 inject(gp, &w, &n); | |
1760 #ifdef STATS | |
1761 t2 = runtime·cputicks(); | |
1762 i1 = runtime·sched.npidle; | |
1763 q1 = runtime·sched.nrunq; | |
1764 runtime·printf("POLL %D\t-> %d\tINJECTS %D\t-> %d\tIDLE %d\t-> %
d\t RUNQ %d\t-> %d\n", (t1-t0)/2400, n, (t2-t1)/2400, w, i0, i1, q0, q1); | |
1765 #endif | |
1766 } | |
1767 } | |
1768 | |
1769 static void | |
1770 inject(G *gp0, int32 *w, int32 *n) | |
1771 { | |
1772 int32 nw; | |
1773 G *gp; | |
1774 M *mp; | |
1775 P *p; | |
1776 | |
1777 runtime·lock(&runtime·sched); | |
1778 while(gp0) { | |
1779 gp = gp0; | |
1780 gp0 = gp->schedlink; | |
1781 globrunqput(gp); | |
1782 (*n)++; | |
1783 } | |
1784 runtime·unlock(&runtime·sched); | |
1785 | |
1786 nw = *n; | |
1787 while(runtime·sched.pidle && nw) { | |
1788 runtime·lock(&runtime·sched); | |
1789 if(runtime·sched.pidle == nil) { | |
1790 runtime·unlock(&runtime·sched); | |
1791 break; | |
1792 } | |
1793 (*w)++; | |
1794 nw--; | |
1795 p = pidleget(); | |
1796 mp = mget(); | |
1797 runtime·unlock(&runtime·sched); | |
1798 if(mp) { | |
1799 entergo(mp, p); | |
1800 runtime·notewakeup(&mp->park); | |
1801 } else | |
1802 newm(runtime·mstart, p, false); | |
1803 } | |
1804 } | |
1805 | |
1806 static void | |
1807 globrunqput(G *gp) | |
1808 { | |
1809 gp->schedlink = runtime·sched.runq; | |
1810 runtime·sched.runq = gp; | |
1811 runtime·sched.nrunq++; | |
1812 } | |
1813 | |
1814 // sched is locked | |
1815 static P* | |
1816 pidleget(void) | |
1817 { | |
1818 P *p; | |
1819 ········ | |
1820 p = runtime·sched.pidle; | |
1821 if(p) { | |
1822 runtime·sched.pidle = p->link; | |
1823 runtime·sched.npidle--; | |
1824 } | |
1825 return p; | |
1826 } | |
1827 | |
1828 // sched is locked | |
1829 static void | |
1830 pidleput(P *p) | |
1831 { | |
1832 p->link = runtime·sched.pidle; | |
1833 runtime·sched.pidle = p; | |
1834 runtime·sched.npidle++; | |
1835 } | |
LEFT | RIGHT |