LEFT | RIGHT |
1 // Copyright 2009 The Go Authors. All rights reserved. | 1 // Copyright 2009 The Go Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style | 2 // Use of this source code is governed by a BSD-style |
3 // license that can be found in the LICENSE file. | 3 // license that can be found in the LICENSE file. |
4 | 4 |
5 #include "runtime.h" | 5 #include "runtime.h" |
6 #include "arch_GOARCH.h" | 6 #include "arch_GOARCH.h" |
7 #include "defs_GOOS_GOARCH.h" | 7 #include "defs_GOOS_GOARCH.h" |
8 #include "malloc.h" | 8 #include "malloc.h" |
9 #include "os_GOOS.h" | 9 #include "os_GOOS.h" |
10 #include "stack.h" | 10 #include "stack.h" |
| 11 #include "race.h" |
| 12 #include "type.h" |
11 | 13 |
12 // TODO(dvyukov): if a thread w/o mcache catches a signal (in particular SIGABOR
T), | 14 // TODO(dvyukov): if a thread w/o mcache catches a signal (in particular SIGABOR
T), |
13 // then it can't print dump. | 15 // then it can't print dump. |
14 | 16 |
15 enum { maxgomaxprocs = 1<<10 }; | 17 enum { maxgomaxprocs = 1<<10 }; |
16 enum { debug = 0 }; | 18 #define LOG if(0) runtime·printf |
17 #define LOG if(debug) runtime·printf | |
18 #define LOG1 runtime·printf | 19 #define LOG1 runtime·printf |
19 | 20 #define CHECK(cond, fmt) /*if(cond) {} else { runtime·printf fmt; runtime·throw(
"CHECK"); }*/ |
20 //FIXME: fix the comment. | |
21 | 21 |
22 // Go scheduler | 22 // Go scheduler |
23 // | 23 // |
24 // The go scheduler's job is to match ready-to-run goroutines (`g's) | 24 // The go scheduler's job is to match ready-to-run goroutines (`g's) |
25 // with waiting-for-work schedulers (`m's). If there are ready g's | 25 // with waiting-for-work schedulers (`m's). If there are ready g's |
26 // and no waiting m's, ready() will start a new m running in a new | 26 // and no waiting m's, ready() will start a new m running in a new |
27 // OS thread, so that all ready g's can run simultaneously, up to a limit. | 27 // OS thread, so that all ready g's can run simultaneously, up to a limit. |
28 // For now, m's never go away. | 28 // For now, m's never go away. |
29 // | 29 // |
30 // By default, Go keeps only one kernel thread (m) running user code | 30 // By default, Go keeps only one kernel thread (m) running user code |
31 // at a single time; other threads may be blocked in the operating system. | 31 // at a single time; other threads may be blocked in the operating system. |
32 // Setting the environment variable $GOMAXPROCS or calling | 32 // Setting the environment variable $GOMAXPROCS or calling |
33 // runtime.GOMAXPROCS() will change the number of user threads | 33 // runtime.GOMAXPROCS() will change the number of user threads |
34 // allowed to execute simultaneously. $GOMAXPROCS is thus an | 34 // allowed to execute simultaneously. $GOMAXPROCS is thus an |
35 // approximation of the maximum number of cores to use. | 35 // approximation of the maximum number of cores to use. |
36 // | 36 // |
37 // Even a program that can run without deadlock in a single process | 37 // Even a program that can run without deadlock in a single process |
38 // might use more m's if given the chance. For example, the prime | 38 // might use more m's if given the chance. For example, the prime |
39 // sieve will use as many m's as there are primes (up to runtime·sched.mmax), | 39 // sieve will use as many m's as there are primes (up to $GOMAXPROCS), |
40 // allowing different stages of the pipeline to execute in parallel. | 40 // allowing different stages of the pipeline to execute in parallel. |
41 // We could revisit this choice, only kicking off new m's for blocking | |
42 // system calls, but that would limit the amount of parallel computation | |
43 // that go would try to do. | |
44 // | |
45 // In general, one could imagine all sorts of refinements to the | |
46 // scheduler, but the goal now is just to get something working on | |
47 // Linux and OS X. | |
48 | 41 |
49 typedef struct Sched Sched; | 42 typedef struct Sched Sched; |
50 struct Sched { | 43 struct Sched { |
51 Lock; | 44 Lock; |
52 | 45 |
53 M* mhead; // m's waiting for work | 46 M* mhead; // m's waiting for work |
54 int32 mwait; // number of m's waiting for work | 47 int32 mwait; // number of m's waiting for work |
55 int32 mcount; // number of m's that have been created | 48 int32 mcount; // number of m's that have been created |
56 | 49 |
57 P* pidle; // idle P's | 50 P* pidle; // idle P's |
58 | 51 » int32» npidle; |
| 52 |
| 53 » G*» runqhead; |
| 54 » G*» runqtail; |
| 55 » int32» runqsize; |
| 56 |
| 57 » Lock gflock; |
59 G* gfree; | 58 G* gfree; |
| 59 int32 goidseq; |
| 60 |
| 61 int32 stopwait; |
| 62 Note stopnote; |
| 63 int32 sysmonwait; |
| 64 Note sysmonnote; |
60 | 65 |
61 int32 profilehz; // cpu profiling rate | 66 int32 profilehz; // cpu profiling rate |
62 | 67 |
63 bool init; // running initialization | 68 bool init; // running initialization |
64 bool lockmain; // init called runtime.LockOSThread | 69 bool lockmain; // init called runtime.LockOSThread |
65 }; | 70 }; |
66 | 71 |
67 Sched runtime·sched; | 72 Sched runtime·sched; |
68 int32 runtime·gomaxprocs; | 73 int32 runtime·gomaxprocs; |
69 bool runtime·singleproc; | 74 bool runtime·singleproc; |
70 bool runtime·iscgo; | 75 bool runtime·iscgo; |
71 int32 runtime·gcwaiting; | 76 int32 runtime·gcwaiting; |
72 M runtime·m0; | 77 M runtime·m0; |
73 G runtime·g0; // idle goroutine for m0 | 78 G runtime·g0; // idle goroutine for m0 |
74 static int32 newprocs; | 79 static int32 newprocs; |
75 | 80 |
76 // Keep trace of scavenger's goroutine for deadlock detection. | 81 // Keep trace of scavenger's goroutine for deadlock detection. |
77 static G *scvg; | 82 static G *scvg; |
78 | 83 |
79 // Scheduling helpers. Sched must be locked. | 84 // Scheduling helpers. Sched must be locked. |
80 static void gput(P*, G*);» // put/get on ghead/gtail | 85 static void runqput(P*, G*);» // put/get on ghead/gtail |
81 static G* gget(P*); | 86 static G* runqget(P*); |
82 static void runqgrow(P*); | 87 static void runqgrow(P*); |
83 static G* runqsteal(P*, P*); | 88 static G* runqsteal(P*, P*); |
| 89 static void globrunqput(G*); |
| 90 static G* globrunqget(void); |
84 static void mput(M*); // put/get on mhead | 91 static void mput(M*); // put/get on mhead |
85 static M* mget(void); | 92 static M* mget(void); |
86 static void gfput(P*, G*); // put/get on gfree | 93 static void gfput(P*, G*); // put/get on gfree |
87 static G* gfget(P*); | 94 static G* gfget(P*); |
88 static void mcommoninit(M*); | 95 static void mcommoninit(M*); |
89 static void schedule(void); | 96 static void schedule(void); |
90 static void procresize(int32); | 97 static void procresize(int32); |
91 static void entergo(M*, P*); | 98 static void entergo(M*, P*); |
92 static void leavego(M*, uint32); | 99 static P* releasep(void); |
93 static void newm(void(*)(void), P*, bool); | 100 static M* newm(void(*)(void), P*, bool); |
94 static void goidle(void); | 101 static void goidle(void); |
95 static void mstop(void); | 102 static void mstop(void); |
96 static void initgstack(G*, byte*, int32); | 103 static void initgstack(G*, byte*, int32); |
97 static void sysmon(void); | 104 static void sysmon(void); |
98 | 105 static void inject(G*, int32*, int32*); |
99 static void | 106 static P* pidleget(void); |
100 outputstats(void) | 107 static void pidleput(P*); |
101 { | |
102 » M *mp; | |
103 » SchedStats s; | |
104 » int32 i; | |
105 » uint64 *src, *dst; | |
106 | |
107 » runtime·memclr((byte*)&s, sizeof(s)); | |
108 » for(mp=runtime·allm; mp; mp=mp->alllink) { | |
109 » » src = (uint64*)&mp->schedstats; | |
110 » » dst = (uint64*)&s; | |
111 » » for(i=0; i<sizeof(s)/sizeof(uint64); i++) | |
112 » » » dst[i] += src[i]; | |
113 » } | |
114 » runtime·printf("SchedStats:\n"); | |
115 » runtime·printf("nm %D\n", s.nm); | |
116 » runtime·printf("sysexitfast %D\n", s.sysexitfast); | |
117 » runtime·printf("sysexitmed %D\n", s.sysexitmed); | |
118 » runtime·printf("sysexitslow %D\n", s.sysexitslow); | |
119 » runtime·printf("stealempty %D\n", s.stealempty); | |
120 » runtime·printf("stealn %D\n", s.stealn); | |
121 » runtime·printf("stealcnt %D (%D)\n", s.stealcnt, s.stea
lcnt / (s.stealn ? s.stealn : 1)); | |
122 » runtime·printf("gstart %D\n", s.gstart); | |
123 » runtime·printf("gend %D\n", s.gend); | |
124 » runtime·printf("gfput %D\n", s.gfput); | |
125 » runtime·printf("gfget %D\n", s.gfget); | |
126 » runtime·printf("galloc %D\n", s.galloc); | |
127 } | |
128 | 108 |
129 // The bootstrap sequence is: | 109 // The bootstrap sequence is: |
130 // | 110 // |
131 // call osinit | 111 // call osinit |
132 // call schedinit | 112 // call schedinit |
133 // make & queue new G | 113 // make & queue new G |
134 // call runtime·mstart | 114 // call runtime·mstart |
135 // | 115 // |
136 // The new G calls runtime·main. | 116 // The new G calls runtime·main. |
137 void | 117 void |
(...skipping 23 matching lines...) Expand all Loading... |
161 if(p != nil && (n = runtime·atoi(p)) > 0) { | 141 if(p != nil && (n = runtime·atoi(p)) > 0) { |
162 if(n > maxgomaxprocs) | 142 if(n > maxgomaxprocs) |
163 n = maxgomaxprocs; | 143 n = maxgomaxprocs; |
164 procs = n; | 144 procs = n; |
165 } | 145 } |
166 runtime·allp = (P**)runtime·malloc((maxgomaxprocs+1)*sizeof(runtime·allp
[0])); | 146 runtime·allp = (P**)runtime·malloc((maxgomaxprocs+1)*sizeof(runtime·allp
[0])); |
167 procresize(procs); | 147 procresize(procs); |
168 | 148 |
169 mstats.enablegc = 1; | 149 mstats.enablegc = 1; |
170 m->nomemprof--; | 150 m->nomemprof--; |
| 151 |
| 152 if(raceenabled) |
| 153 runtime·raceinit(); |
171 } | 154 } |
172 | 155 |
173 extern void main·init(void); | 156 extern void main·init(void); |
174 extern void main·main(void); | 157 extern void main·main(void); |
175 | 158 |
176 // The main goroutine. | 159 // The main goroutine. |
177 void | 160 void |
178 runtime·main(void) | 161 runtime·main(void) |
179 { | 162 { |
180 LOG("%d: runtime·main\n", m->id); | 163 LOG("%d: runtime·main\n", m->id); |
(...skipping 11 matching lines...) Expand all Loading... |
192 runtime·sched.init = true; | 175 runtime·sched.init = true; |
193 if(m != &runtime·m0) | 176 if(m != &runtime·m0) |
194 runtime·throw("runtime·main not on m0"); | 177 runtime·throw("runtime·main not on m0"); |
195 scvg = runtime·newproc1((byte*)runtime·MHeap_Scavenger, nil, 0, 0, runti
me·main); | 178 scvg = runtime·newproc1((byte*)runtime·MHeap_Scavenger, nil, 0, 0, runti
me·main); |
196 main·init(); | 179 main·init(); |
197 runtime·sched.init = false; | 180 runtime·sched.init = false; |
198 if(!runtime·sched.lockmain) | 181 if(!runtime·sched.lockmain) |
199 runtime·UnlockOSThread(); | 182 runtime·UnlockOSThread(); |
200 | 183 |
201 main·main(); | 184 main·main(); |
202 » outputstats(); | 185 » if(raceenabled) |
| 186 » » runtime·racefini(); |
203 runtime·exit(0); | 187 runtime·exit(0); |
204 for(;;) | 188 for(;;) |
205 *(int32*)runtime·main = 0; | 189 *(int32*)runtime·main = 0; |
206 } | 190 } |
207 | 191 |
208 void | 192 void |
209 runtime·goroutineheader(G *gp) | 193 runtime·goroutineheader(G *gp) |
210 { | 194 { |
211 int8 *status; | 195 int8 *status; |
212 | 196 |
(...skipping 13 matching lines...) Expand all Loading... |
226 case Gwaiting: | 210 case Gwaiting: |
227 if(gp->waitreason) | 211 if(gp->waitreason) |
228 status = gp->waitreason; | 212 status = gp->waitreason; |
229 else | 213 else |
230 status = "waiting"; | 214 status = "waiting"; |
231 break; | 215 break; |
232 default: | 216 default: |
233 status = "???"; | 217 status = "???"; |
234 break; | 218 break; |
235 } | 219 } |
236 » runtime·printf("goroutine %p [%s]:\n", gp, status); | 220 » runtime·printf("goroutine %D [%s]:\n", gp->goid, status); |
237 } | 221 } |
238 | 222 |
239 void | 223 void |
240 runtime·tracebackothers(G *me) | 224 runtime·tracebackothers(G *me) |
241 { | 225 { |
242 G *gp; | 226 G *gp; |
243 » P *p, **pp; | 227 |
244 | 228 » for(gp = runtime·allg; gp != nil; gp = gp->alllink) { |
245 » for(pp=runtime·allp; p=*pp; pp++) { | 229 » » if(gp == me || gp->status == Gdead) |
246 » » for(gp = p->allg; gp != nil; gp = gp->alllink) { | 230 » » » continue; |
247 » » » if(gp == me || gp->status == Gdead) | 231 » » runtime·printf("\n"); |
248 » » » » continue; | 232 » » runtime·goroutineheader(gp); |
249 » » » runtime·printf("\n"); | 233 » » runtime·traceback(gp->sched.pc, (byte*)gp->sched.sp, 0, gp); |
250 » » » runtime·goroutineheader(gp); | |
251 » » » runtime·traceback(gp->sched.pc, (byte*)gp->sched.sp, 0,
gp); | |
252 » » } | |
253 } | 234 } |
254 } | 235 } |
255 | 236 |
256 static void | 237 static void |
257 mcommoninit(M *mp) | 238 mcommoninit(M *mp) |
258 { | 239 { |
259 runtime·lock(&runtime·sched); | 240 runtime·lock(&runtime·sched); |
260 mp->id = runtime·sched.mcount++; | 241 mp->id = runtime·sched.mcount++; |
261 mp->fastrand = 0x49f6428aUL + mp->id + runtime·cputicks(); | 242 mp->fastrand = 0x49f6428aUL + mp->id + runtime·cputicks(); |
262 runtime·FixAlloc_Init(mp->stackalloc, FixedStack, runtime·SysAlloc, nil,
nil); | 243 runtime·FixAlloc_Init(mp->stackalloc, FixedStack, runtime·SysAlloc, nil,
nil); |
263 | 244 |
264 // does it allocate? | 245 // does it allocate? |
265 //runtime·callers(1, mp->createstack, nelem(mp->createstack)); | 246 //runtime·callers(1, mp->createstack, nelem(mp->createstack)); |
| 247 |
266 // Add to runtime·allm so garbage collector doesn't free m | 248 // Add to runtime·allm so garbage collector doesn't free m |
267 // when it is just in a register or thread-local storage. | 249 // when it is just in a register or thread-local storage. |
268 mp->alllink = runtime·allm; | 250 mp->alllink = runtime·allm; |
269 // runtime·NumCgoCall() iterates over allm w/o locks, | 251 // runtime·NumCgoCall() iterates over allm w/o locks, |
270 // so we need to publish it safely. | 252 // so we need to publish it safely. |
271 runtime·atomicstorep(&runtime·allm, mp); | 253 runtime·atomicstorep(&runtime·allm, mp); |
272 LOG("%d: mcommoninit %d m=%p stackalloc=%p\n", m->id, mp->id, mp, mp->st
ackalloc); | 254 LOG("%d: mcommoninit %d m=%p stackalloc=%p\n", m->id, mp->id, mp, mp->st
ackalloc); |
273 runtime·unlock(&runtime·sched); | 255 runtime·unlock(&runtime·sched); |
| 256 } |
| 257 |
| 258 // Mark g ready to run. |
| 259 void |
| 260 runtime·ready(G *gp) |
| 261 { |
| 262 P *p; |
| 263 M *mp; |
| 264 |
| 265 if(gp->m) |
| 266 runtime·throw("bad g->m in ready"); |
| 267 |
| 268 // Mark runnable. |
| 269 if(gp->status == Grunnable || gp->status == Grunning) { |
| 270 runtime·printf("goroutine %D has status %d\n", gp->goid, gp->sta
tus); |
| 271 runtime·throw("bad g->status in ready"); |
| 272 } |
| 273 gp->status = Grunnable; |
| 274 runqput(m->p, gp); |
| 275 if(runtime·sched.pidle) { |
| 276 runtime·lock(&runtime·sched); |
| 277 p = pidleget(); |
| 278 if(p) { |
| 279 mp = mget(); |
| 280 runtime·unlock(&runtime·sched); |
| 281 if(mp) { |
| 282 entergo(mp, p); |
| 283 runtime·notewakeup(&mp->park); |
| 284 } else { |
| 285 newm(runtime·mstart, p, false); |
| 286 } |
| 287 } else |
| 288 runtime·unlock(&runtime·sched); |
| 289 } |
| 290 } |
| 291 |
| 292 static void |
| 293 munpark(M *mp, P *p) |
| 294 { |
| 295 if(mp) { |
| 296 entergo(mp, p); |
| 297 runtime·notewakeup(&mp->park); |
| 298 } else |
| 299 newm(runtime·mstart, p, false); |
| 300 } |
| 301 ································ |
| 302 int32 |
| 303 runtime·gcprocs(void) |
| 304 { |
| 305 int32 n; |
| 306 |
| 307 runtime·lock(&runtime·sched); |
| 308 n = runtime·gomaxprocs; |
| 309 if(n > runtime·ncpu) |
| 310 n = runtime·ncpu; |
| 311 if(n > MaxGcproc) |
| 312 n = MaxGcproc; |
| 313 runtime·unlock(&runtime·sched); |
| 314 return n; |
| 315 } |
| 316 |
| 317 void |
| 318 runtime·helpgc(int32 nproc) |
| 319 { |
| 320 M *mp; |
| 321 int32 n, pos; |
| 322 |
| 323 LOG("%d: helpgc(%d)\n", m->id, nproc); |
| 324 runtime·lock(&runtime·sched); |
| 325 pos = 0; |
| 326 for(n = 1; n < nproc; n++) { // one M is currently running |
| 327 if(runtime·allp[pos]->mcache == m->mcache) |
| 328 pos++; |
| 329 mp = mget(); |
| 330 if(mp == nil) { |
| 331 runtime·unlock(&runtime·sched); |
| 332 newm(runtime·mstart, runtime·allp[pos], true); |
| 333 runtime·lock(&runtime·sched); |
| 334 pos++; |
| 335 continue; |
| 336 } |
| 337 mp->helpgc = 1; |
| 338 mp->mcache = runtime·allp[pos]->mcache; |
| 339 pos++; |
| 340 LOG("%d: helpgc wake %d\n", m->id, mp->id); |
| 341 runtime·notewakeup(&mp->park); |
| 342 } |
| 343 runtime·unlock(&runtime·sched); |
| 344 } |
| 345 |
| 346 void |
| 347 runtime·stoptheworld(void) |
| 348 { |
| 349 int32 i; |
| 350 uint32 s; |
| 351 P *p; |
| 352 bool wait; |
| 353 |
| 354 LOG("%d: stoptheworld\n", m->id); |
| 355 runtime·lock(&runtime·sched); |
| 356 runtime·gcwaiting = 1; |
| 357 runtime·sched.stopwait = runtime·gomaxprocs; |
| 358 m->p->status = Plocked; |
| 359 runtime·sched.stopwait--; |
| 360 for(i=0; i<runtime·gomaxprocs; i++) { |
| 361 s = runtime·allp[i]->status; |
| 362 if(s == Psyscall && runtime·cas(&runtime·allp[i]->status, s, Plo
cked)) { |
| 363 LOG(" acquired syscall %d\n", i); |
| 364 runtime·sched.stopwait--; |
| 365 } |
| 366 } |
| 367 while(runtime·sched.pidle) { |
| 368 p = pidleget(); |
| 369 p->status = Plocked; |
| 370 runtime·sched.stopwait--; |
| 371 } |
| 372 CHECK(runtime·sched.stopwait >= 0, ("")); |
| 373 wait = runtime·sched.stopwait > 0; |
| 374 runtime·unlock(&runtime·sched); |
| 375 if(wait) { |
| 376 runtime·notesleep(&runtime·sched.stopnote); |
| 377 runtime·noteclear(&runtime·sched.stopnote); |
| 378 } |
| 379 LOG("%d: stoptheworld stopped\n", m->id); |
| 380 CHECK(runtime·sched.stopwait == 0, ("stoptheworld: stopwait == %d\n", ru
ntime·sched.stopwait)); |
| 381 for(i=0; i<runtime·gomaxprocs; i++) { |
| 382 CHECK(runtime·allp[i]->status == Plocked, ("stoptheworld: not st
opped (%d)\n", runtime·allp[i]->status)); |
| 383 } |
| 384 } |
| 385 |
| 386 void |
| 387 runtime·starttheworld(void) |
| 388 { |
| 389 //G *gp; |
| 390 //P *p; |
| 391 //M *mp; |
| 392 //int32 n, w; |
| 393 |
| 394 LOG("%d: starttheworld\n", m->id); |
| 395 runtime·gcwaiting = 0; |
| 396 if(newprocs) { |
| 397 procresize(newprocs); |
| 398 newprocs = 0; |
| 399 } else { |
| 400 procresize(runtime·gomaxprocs); |
| 401 } |
| 402 runtime·lock(&runtime·sched); |
| 403 /* |
| 404 gp = runtime·netwait(0, runtime·gomaxprocs); |
| 405 n = w = 0; |
| 406 inject(gp, &w, &n); |
| 407 while(runtime·sched.pidle) { |
| 408 p = pidleget(); |
| 409 mp = mget(); |
| 410 if(mp) { |
| 411 entergo(mp, p); |
| 412 runtime·notewakeup(&mp->park); |
| 413 } else { |
| 414 runtime·unlock(&runtime·sched); |
| 415 newm(runtime·mstart, p, false); |
| 416 runtime·lock(&runtime·sched); |
| 417 } |
| 418 } |
| 419 */ |
| 420 if(runtime·sched.sysmonwait) { |
| 421 runtime·sched.sysmonwait = 0; |
| 422 runtime·notewakeup(&runtime·sched.sysmonnote); |
| 423 } |
| 424 runtime·unlock(&runtime·sched); |
| 425 } |
| 426 |
| 427 // Called to start an M. |
| 428 void |
| 429 runtime·mstart(void) |
| 430 { |
| 431 // It is used by windows-386 only. Unfortunately, seh needs |
| 432 // to be located on os stack, and mstart runs on os stack |
| 433 // for both m0 and m. |
| 434 SEH seh; |
| 435 P *p; |
| 436 |
| 437 LOG("%d: mstart m=%p\n", m->id, m); |
| 438 if(g != m->g0) |
| 439 runtime·throw("bad runtime·mstart"); |
| 440 |
| 441 // Record top of stack for use by mcall. |
| 442 // Once we call schedule we're never coming back, |
| 443 // so other calls can reuse this stack space. |
| 444 runtime·gosave(&m->g0->sched); |
| 445 m->g0->sched.pc = (void*)-1; // make sure it is never used |
| 446 m->seh = &seh; |
| 447 runtime·asminit(); |
| 448 runtime·minit(); |
| 449 |
| 450 // Install signal handlers; after minit so that minit can |
| 451 // prepare the thread to be able to handle the signals. |
| 452 if(m == &runtime·m0) |
| 453 runtime·initsig(); |
| 454 |
| 455 if(m->helpgc) { |
| 456 LOG("%d: mstart helpgc\n", m->id); |
| 457 m->helpgc = 0; |
| 458 m->mcache = m->p->mcache; |
| 459 runtime·gchelper(); |
| 460 m->mcache = nil; |
| 461 m->p = nil; |
| 462 LOG("%d: gchelper end\n", m->id); |
| 463 mstop(); |
| 464 } else if(m != &runtime·m0) { |
| 465 p = m->p; |
| 466 m->p = nil; |
| 467 entergo(m, p); |
| 468 } |
| 469 LOG("%d: calling schedule\n", m->id); |
| 470 schedule(); |
| 471 |
| 472 // TODO(brainman): This point is never reached, because scheduler |
| 473 // does not release os threads at the moment. But once this path |
| 474 // is enabled, we must remove our seh here. |
| 475 } |
| 476 |
| 477 // When running with cgo, we call libcgo_thread_start |
| 478 // to start threads for us so that we can play nicely with |
| 479 // foreign code. |
| 480 void (*libcgo_thread_start)(void*); |
| 481 |
| 482 typedef struct CgoThreadStart CgoThreadStart; |
| 483 struct CgoThreadStart |
| 484 { |
| 485 M *m; |
| 486 G *g; |
| 487 void (*fn)(void); |
| 488 }; |
| 489 |
| 490 static void |
| 491 initgstack(G *newg, byte *stk, int32 stacksize) |
| 492 { |
| 493 newg->stack0 = (uintptr)stk; |
| 494 newg->stackguard = (uintptr)stk + StackGuard; |
| 495 newg->stackbase = (uintptr)stk + stacksize - sizeof(Stktop); |
| 496 runtime·memclr((byte*)newg->stackbase, sizeof(Stktop)); |
| 497 } |
| 498 |
| 499 // Create a new m. It will start off with a call to runtime·mstart. |
| 500 static M* |
| 501 newm(void(*fn)(void), P *p, bool helpgc) |
| 502 { |
| 503 M *mp; |
| 504 int32 addmem,stksiz, stkoff; |
| 505 //!!!static Type *mtype; // The Go type M |
| 506 |
| 507 LOG("%d: newm\n", m->id); |
| 508 addmem = sizeof(*mp->stackalloc); |
| 509 if(runtime·gsignalstk) |
| 510 addmem += sizeof(G) + runtime·gsignalstk; |
| 511 stkoff = sizeof(M) + addmem; |
| 512 stksiz = StackSystem + (fn == runtime·mstart ? 8192 : 64*1024); |
| 513 if(!runtime·iscgo && !Windows) |
| 514 addmem += stksiz; |
| 515 //!!! all that is now non-GC, can it break something? |
| 516 mp = runtime·SysAlloc(sizeof(M) + addmem); |
| 517 mp->stackalloc = (FixAlloc*)(mp+1); |
| 518 //!!!if(mtype == nil) { |
| 519 //!!! Eface e; |
| 520 //!!! runtime·gc_m_ptr(&e); |
| 521 //!!! mtype = ((PtrType*)e.type)->elem; |
| 522 //!!!} |
| 523 //!!! mp = runtime·cnew(mtype); |
| 524 mcommoninit(mp); |
| 525 mp->g0 = &mp->g0buf; |
| 526 mp->p = p; |
| 527 mp->helpgc = helpgc; |
| 528 if(runtime·gsignalstk) { |
| 529 mp->gsignal = (G*)((byte*)mp+sizeof(*mp)+sizeof(*mp->stackalloc)
); |
| 530 initgstack(mp->gsignal, (byte*)(mp->gsignal+1), runtime·gsignals
tk); |
| 531 } |
| 532 |
| 533 if(runtime·iscgo) { |
| 534 CgoThreadStart ts; |
| 535 |
| 536 if(libcgo_thread_start == nil) |
| 537 runtime·throw("libcgo_thread_start missing"); |
| 538 // pthread_create will make us a stack. |
| 539 ts.m = mp; |
| 540 ts.g = mp->g0; |
| 541 ts.fn = fn; |
| 542 runtime·asmcgocall(libcgo_thread_start, &ts); |
| 543 } else { |
| 544 // windows will layout sched stack on os stack |
| 545 if(!Windows) |
| 546 initgstack(mp->g0, (byte*)mp+stkoff, stksiz); |
| 547 runtime·newosproc(mp, mp->g0, (byte*)mp->g0->stackbase, fn); |
| 548 } |
| 549 return mp; |
| 550 } |
| 551 |
| 552 static void |
| 553 mstop(void) |
| 554 { |
| 555 LOG("%d: mstop\n", m->id); |
| 556 CHECK(m->locks == 0, ("")); |
| 557 CHECK(m->p == nil, ("mstop: p != nil\n")); |
| 558 retry: |
| 559 runtime·lock(&runtime·sched); |
| 560 mput(m); |
| 561 runtime·unlock(&runtime·sched); |
| 562 runtime·notesleep(&m->park); |
| 563 runtime·noteclear(&m->park); |
| 564 if(m->helpgc) { |
| 565 LOG("%d: gchelper\n", m->id); |
| 566 m->helpgc = 0; |
| 567 runtime·gchelper(); |
| 568 m->mcache = nil; |
| 569 LOG("%d: gchelper end\n", m->id); |
| 570 goto retry; |
| 571 } |
| 572 LOG("%d: mstop wake\n", m->id); |
| 573 if(m->p == nil) |
| 574 runtime·throw("mstop: p == nil"); |
| 575 } |
| 576 |
| 577 // Schedules gp to run on M. Never returns. |
| 578 static void |
| 579 execute(G *gp) |
| 580 { |
| 581 int32 hz; |
| 582 |
| 583 LOG("%d: start running goroutine %p\n", m->id, gp); |
| 584 CHECK(m->locks == 0, ("")); |
| 585 CHECK(g == m->g0, ("execute: not on g0\n")); |
| 586 CHECK(m->p != nil, ("execute: no p\n")); |
| 587 CHECK(gp->status == Grunnable, ("execute: gp=%d gp->status=%d\n", gp->go
id, gp->status)); |
| 588 CHECK(gp->m == nil, ("execute: gp->m=%p\n", gp->m)); |
| 589 CHECK(gp->lockedm == nil && m->lockedg == nil || gp->lockedm == m && m->
lockedg == gp, |
| 590 ("bad locking: gp->lockedm=%p m->lockedg=%p\n", gp->lockedm, m->
lockedg)); |
| 591 m->p->tick++; |
| 592 gp->status = Grunning; |
| 593 m->curg = gp; |
| 594 gp->m = m; |
| 595 |
| 596 // Check whether the profiler needs to be turned on or off. |
| 597 hz = runtime·sched.profilehz; |
| 598 if(m->profilehz != hz) |
| 599 runtime·resetcpuprofiler(hz); |
| 600 |
| 601 if(gp->sched.pc == (byte*)runtime·goexit) // kickoff |
| 602 runtime·gogocall(&gp->sched, (void(*)(void))gp->entry); |
| 603 runtime·gogo(&gp->sched, 0); |
| 604 } |
| 605 |
| 606 // One round of scheduler: find a goroutine and run it. |
| 607 // The argument is the goroutine that was running before |
| 608 // schedule was called, or nil if this is the first call. |
| 609 // Never returns. |
| 610 static void |
| 611 schedule(void) |
| 612 { |
| 613 int32 i, try; |
| 614 G *gp, *gp1; |
| 615 P *p; |
| 616 M *mp; |
| 617 |
| 618 LOG("%d: schedule p=%p\n", m->id, m->p); |
| 619 USED(&gp); |
| 620 CHECK(m->locks == 0, ("schedule: holding locks\n")); |
| 621 CHECK(m->lockedg == nil, ("schedule: locked M\n")); |
| 622 |
| 623 top: |
| 624 if(runtime·gcwaiting) { |
| 625 p = releasep(); |
| 626 p->status = Plocked; |
| 627 runtime·lock(&runtime·sched); |
| 628 runtime·sched.stopwait--; |
| 629 if(runtime·sched.stopwait == 0) |
| 630 runtime·notewakeup(&runtime·sched.stopnote); |
| 631 runtime·unlock(&runtime·sched); |
| 632 mstop(); |
| 633 goto top; |
| 634 } |
| 635 |
| 636 gp = runqget(m->p); |
| 637 if(gp == nil) { |
| 638 for(try=0; try<2; try++) { |
| 639 if(runtime·sched.runqsize) { |
| 640 runtime·lock(&runtime·sched); |
| 641 gp = globrunqget(); |
| 642 if(gp) { |
| 643 while(gp->schedlink != nil) { |
| 644 gp1 = gp; |
| 645 gp = gp1->schedlink; |
| 646 runqput(m->p, gp1); |
| 647 } |
| 648 } |
| 649 runtime·unlock(&runtime·sched); |
| 650 if(gp) |
| 651 goto haveg; |
| 652 } |
| 653 for(i=0; i<runtime·gomaxprocs; i++) { |
| 654 if(runtime·gcwaiting) |
| 655 goto top; |
| 656 p = runtime·allp[runtime·fastrand1()%runtime·gomaxprocs]
; |
| 657 if(p == m->p) |
| 658 gp = runqget(p); |
| 659 else |
| 660 gp = runqsteal(m->p, p); |
| 661 if(gp) |
| 662 break; |
| 663 } |
| 664 if(gp) |
| 665 break; |
| 666 if(try==0 && runtime·gcwaiting == 0) |
| 667 runtime·osyield(); |
| 668 } |
| 669 if(gp == nil) { |
| 670 p = releasep(); |
| 671 runtime·lock(&runtime·sched); |
| 672 if(runtime·gcwaiting) { |
| 673 p->status = Plocked; |
| 674 runtime·sched.stopwait--; |
| 675 if(runtime·sched.stopwait == 0) |
| 676 runtime·notewakeup(&runtime·sched.stopno
te); |
| 677 runtime·unlock(&runtime·sched); |
| 678 mstop(); |
| 679 goto top; |
| 680 } |
| 681 pidleput(p); |
| 682 if(runtime·sched.runqsize) { |
| 683 p = pidleget(); |
| 684 runtime·unlock(&runtime·sched); |
| 685 entergo(m, p); |
| 686 goto top; |
| 687 } |
| 688 runtime·unlock(&runtime·sched); |
| 689 for(i=0; i<runtime·gomaxprocs; i++) { |
| 690 p = runtime·allp[i]; |
| 691 if(p && p->runqhead != p->runqtail) { |
| 692 runtime·lock(&runtime·sched); |
| 693 p = pidleget(); |
| 694 runtime·unlock(&runtime·sched); |
| 695 if(p) { |
| 696 entergo(m, p); |
| 697 goto top; |
| 698 } |
| 699 break; |
| 700 } |
| 701 } |
| 702 mstop(); |
| 703 goto top; |
| 704 } |
| 705 } |
| 706 |
| 707 haveg: |
| 708 if(gp->lockedm) { |
| 709 mp = gp->lockedm; |
| 710 p = releasep(); |
| 711 entergo(mp, p); |
| 712 runtime·notewakeup(&mp->park); |
| 713 mstop(); |
| 714 goto top; |
| 715 } |
| 716 |
| 717 execute(gp); |
| 718 } |
| 719 |
| 720 static void |
| 721 park0(G *gp) |
| 722 { |
| 723 P *p; |
| 724 M *mp; |
| 725 |
| 726 USED(&gp); |
| 727 if(m->lockedg) { |
| 728 p = releasep(); |
| 729 if(m->waitunlockf) { |
| 730 m->waitunlockf(m->waitlock); |
| 731 m->waitunlockf = nil; |
| 732 } |
| 733 // After this point another thread may schedule gp on m again. |
| 734 // Schedule another M to run P. |
| 735 runtime·lock(&runtime·sched); |
| 736 mp = mget(); |
| 737 runtime·unlock(&runtime·sched); |
| 738 munpark(mp, p); |
| 739 // Wait until another thread schedules gp and so m again. |
| 740 runtime·notesleep(&m->park); |
| 741 runtime·noteclear(&m->park); |
| 742 execute(gp); // Never returns. |
| 743 } |
| 744 if(m->waitunlockf) { |
| 745 m->waitunlockf(m->waitlock); |
| 746 m->waitunlockf = nil; |
| 747 } |
| 748 schedule(); |
| 749 } |
| 750 |
| 751 // Puts the current goroutine into a waiting state and unlocks the lock. |
| 752 // The goroutine can be made runnable again by calling runtime·ready(gp). |
| 753 void |
| 754 runtime·park(void(*unlockf)(Lock*), Lock *l, int8 *reason) |
| 755 { |
| 756 LOG("%d: park l=%p reason=%s\n", m->id, l, reason); |
| 757 CHECK(g != m->g0, ("park of g0\n")); |
| 758 m->waitlock = l; |
| 759 m->waitunlockf = unlockf; |
| 760 g->status = Gwaiting; |
| 761 g->waitreason = reason; |
| 762 g->m = nil; |
| 763 runtime·mcall(park0); |
| 764 } |
| 765 |
| 766 static void |
| 767 gosched0(G *gp) |
| 768 { |
| 769 P *p; |
| 770 M *mp; |
| 771 |
| 772 LOG("%d: gosched0 gp=%p\n", m->id, gp); |
| 773 gp->status = Grunnable; |
| 774 gp->m = nil; |
| 775 if(m->lockedg) { |
| 776 p = releasep(); |
| 777 runtime·lock(&runtime·sched); |
| 778 globrunqput(gp); |
| 779 // After this point another thread may schedule gp on m again. |
| 780 // Schedule another M to run P. |
| 781 mp = mget(); |
| 782 runtime·unlock(&runtime·sched); |
| 783 munpark(mp, p); |
| 784 // Wait until another thread schedules gp and so m again. |
| 785 runtime·notesleep(&m->park); |
| 786 runtime·noteclear(&m->park); |
| 787 execute(gp); // Never returns. |
| 788 } |
| 789 runtime·lock(&runtime·sched); |
| 790 globrunqput(gp); |
| 791 runtime·unlock(&runtime·sched); |
| 792 schedule(); |
| 793 } |
| 794 |
| 795 void |
| 796 runtime·gosched(void) |
| 797 { |
| 798 runtime·mcall(gosched0); |
| 799 } |
| 800 |
| 801 // On g0. |
| 802 static void |
| 803 goexit0(G *gp) |
| 804 { |
| 805 gp->status = Gdead; |
| 806 gp->m = nil; |
| 807 gp->lockedm = nil; |
| 808 m->lockedg = nil; |
| 809 runtime·unwindstack(gp, nil); |
| 810 gfput(m->p, gp); |
| 811 schedule(); |
| 812 } |
| 813 |
| 814 void |
| 815 runtime·goexit(void) |
| 816 { |
| 817 runtime·mcall(goexit0); |
| 818 } |
| 819 |
| 820 // The goroutine g is about to enter a system call. |
| 821 // Record that it's not using the cpu anymore. |
| 822 // This is called only from the go syscall library and cgocall, |
| 823 // not from the low-level system calls used by the runtime. |
| 824 // |
| 825 // Entersyscall cannot split the stack: the runtime·gosave must |
| 826 // make g->sched refer to the caller's stack segment, because |
| 827 // entersyscall is going to return immediately after. |
| 828 #pragma textflag 7 |
| 829 void |
| 830 runtime·entersyscall(void) |
| 831 { |
| 832 P *p; |
| 833 M *mp; |
| 834 |
| 835 LOG("%d: entersyscall g=%p p=%p\n", m->id, g, m->p); |
| 836 if(m->profilehz > 0) |
| 837 runtime·setprof(false); |
| 838 |
| 839 // Leave SP around for gc and traceback. |
| 840 runtime·gosave(&g->sched); |
| 841 g->gcsp = g->sched.sp; |
| 842 g->gcstack = g->stackbase; |
| 843 g->gcguard = g->stackguard; |
| 844 g->status = Gsyscall; |
| 845 if(g->gcsp < g->gcguard-StackGuard || g->gcstack < g->gcsp) { |
| 846 // runtime·printf("entersyscall inconsistent %p [%p,%p]\n", |
| 847 // g->gcsp, g->gcguard-StackGuard, g->gcstack); |
| 848 runtime·throw("entersyscall"); |
| 849 } |
| 850 |
| 851 if(m->blockingsyscall) { |
| 852 m->blockingsyscall = false; |
| 853 p = releasep(); |
| 854 runtime·lock(&runtime·sched); |
| 855 mp = mget(); |
| 856 runtime·unlock(&runtime·sched); |
| 857 munpark(mp, p); |
| 858 return; |
| 859 } |
| 860 |
| 861 m->mcache = nil; |
| 862 m->p->m = nil; |
| 863 runtime·atomicstore(&m->p->status, Psyscall); |
| 864 if(runtime·gcwaiting) { |
| 865 runtime·lock(&runtime·sched); |
| 866 if (runtime·sched.stopwait > 0 && runtime·cas(&m->p->status, Psy
scall, Plocked)) { |
| 867 runtime·sched.stopwait--; |
| 868 if(runtime·sched.stopwait == 0) |
| 869 runtime·notewakeup(&runtime·sched.stopnote); |
| 870 } |
| 871 runtime·unlock(&runtime·sched); |
| 872 } |
| 873 } |
| 874 |
| 875 #pragma textflag 7 |
| 876 void |
| 877 runtime·entersyscallblock(void) |
| 878 { |
| 879 m->blockingsyscall = true; |
| 880 runtime·entersyscall(); |
| 881 } |
| 882 |
| 883 static void |
| 884 exitsyscall0(G *gp) |
| 885 { |
| 886 P *p; |
| 887 |
| 888 LOG("%d: exitsyscall0\n", m->id); |
| 889 gp->status = Grunnable; |
| 890 gp->m = nil; |
| 891 CHECK(m->park.waitm == nil, ("exitsyscall0: park is signalled\n")); |
| 892 runtime·lock(&runtime·sched); |
| 893 p = pidleget(); |
| 894 if(p == nil) |
| 895 globrunqput(gp); |
| 896 runtime·unlock(&runtime·sched); |
| 897 if(p) { |
| 898 entergo(m, p); |
| 899 execute(gp); // Never returns. |
| 900 } |
| 901 if(m->lockedg) { |
| 902 CHECK(m->lockedg == gp, ("exitsyscall0: inconsistent locking\n")
); |
| 903 // Wait until another thread schedules gp and so m again. |
| 904 runtime·notesleep(&m->park); |
| 905 runtime·noteclear(&m->park); |
| 906 execute(gp); // Never returns. |
| 907 } |
| 908 mstop(); |
| 909 schedule(); |
| 910 } |
| 911 |
| 912 // The goroutine g exited its system call. |
| 913 // Arrange for it to run on a cpu again. |
| 914 // This is called only from the go syscall library, not |
| 915 // from the low-level system calls used by the runtime. |
| 916 void |
| 917 runtime·exitsyscall(void) |
| 918 { |
| 919 uint32 s; |
| 920 P *p; |
| 921 |
| 922 LOG("%d: exitsyscall g=%p\n", m->id, g); |
| 923 |
| 924 // Check whether the profiler needs to be turned on. |
| 925 if(m->profilehz > 0) |
| 926 runtime·setprof(true); |
| 927 |
| 928 // Try to re-acquire the P. |
| 929 s = m->p ? m->p->status : Pidle; |
| 930 if(s == Psyscall && runtime·cas(&m->p->status, s, Pbusy)) { |
| 931 LOG("%d: exitsyscall fast\n", m->id); |
| 932 // There's a cpu for us, so we can run. |
| 933 m->mcache = m->p->mcache; |
| 934 m->p->m = m; |
| 935 g->status = Grunning; |
| 936 // Garbage collector isn't running (since we are), |
| 937 // so okay to clear gcstack. |
| 938 g->gcstack = (uintptr)nil; |
| 939 return; |
| 940 } |
| 941 |
| 942 // Try to get idle P. |
| 943 m->p = nil; |
| 944 if(runtime·sched.pidle) { |
| 945 runtime·lock(&runtime·sched); |
| 946 p = pidleget(); |
| 947 runtime·unlock(&runtime·sched); |
| 948 if(p) { |
| 949 entergo(m, p); |
| 950 g->gcstack = (uintptr)nil; |
| 951 return; |
| 952 } |
| 953 } |
| 954 |
| 955 LOG("%d: exitsyscall slow p->status=%d\n", m->id, s); |
| 956 |
| 957 runtime·mcall(exitsyscall0); |
| 958 |
| 959 // Gosched returned, so we're allowed to run now. |
| 960 // Delete the gcstack information that we left for |
| 961 // the garbage collector during the system call. |
| 962 // Must wait until now because until gosched returns |
| 963 // we don't know for sure that the garbage collector |
| 964 // is not running. |
| 965 g->gcstack = (uintptr)nil; |
| 966 } |
| 967 |
| 968 // Called from runtime·lessstack when returning from a function which |
| 969 // allocated a new stack segment. The function's return value is in |
| 970 // m->cret. |
| 971 void |
| 972 runtime·oldstack(void) |
| 973 { |
| 974 Stktop *top, old; |
| 975 uint32 argsize; |
| 976 uintptr cret; |
| 977 byte *sp; |
| 978 G *g1; |
| 979 |
| 980 //printf("oldstack m->cret=%p\n", m->cret); |
| 981 |
| 982 g1 = m->curg; |
| 983 top = (Stktop*)g1->stackbase; |
| 984 sp = (byte*)top; |
| 985 old = *top; |
| 986 argsize = old.argsize; |
| 987 if(argsize > 0) { |
| 988 sp -= argsize; |
| 989 runtime·memmove(top->argp, sp, argsize); |
| 990 } |
| 991 |
| 992 if(old.free != 0) |
| 993 runtime·stackfree((byte*)g1->stackguard - StackGuard, old.free); |
| 994 g1->stackbase = (uintptr)old.stackbase; |
| 995 g1->stackguard = (uintptr)old.stackguard; |
| 996 |
| 997 cret = m->cret; |
| 998 m->cret = 0; // drop reference |
| 999 runtime·gogo(&old.gobuf, cret); |
| 1000 } |
| 1001 |
| 1002 // Called from reflect·call or from runtime·morestack when a new |
| 1003 // stack segment is needed. Allocate a new stack big enough for |
| 1004 // m->moreframesize bytes, copy m->moreargsize bytes to the new frame, |
| 1005 // and then act as though runtime·lessstack called the function at |
| 1006 // m->morepc. |
| 1007 void |
| 1008 runtime·newstack(void) |
| 1009 { |
| 1010 int32 framesize, argsize; |
| 1011 Stktop *top; |
| 1012 byte *stk, *sp; |
| 1013 G *g1; |
| 1014 Gobuf label; |
| 1015 bool reflectcall; |
| 1016 uintptr free; |
| 1017 |
| 1018 framesize = m->moreframesize; |
| 1019 argsize = m->moreargsize; |
| 1020 g1 = m->curg; |
| 1021 |
| 1022 if(m->morebuf.sp < g1->stackguard - StackGuard) { |
| 1023 runtime·printf("runtime: split stack overflow: %p < %p\n", m->mo
rebuf.sp, g1->stackguard - StackGuard); |
| 1024 runtime·throw("runtime: split stack overflow"); |
| 1025 } |
| 1026 if(argsize % sizeof(uintptr) != 0) { |
| 1027 runtime·printf("runtime: stack split with misaligned argsize %d\
n", argsize); |
| 1028 runtime·throw("runtime: stack split argsize"); |
| 1029 } |
| 1030 |
| 1031 reflectcall = framesize==1; |
| 1032 if(reflectcall) |
| 1033 framesize = 0; |
| 1034 |
| 1035 if(reflectcall && m->morebuf.sp - sizeof(Stktop) - argsize - 32 > g1->st
ackguard) { |
| 1036 // special case: called from reflect.call (framesize==1) |
| 1037 // to call code with an arbitrary argument size, |
| 1038 // and we have enough space on the current stack. |
| 1039 // the new Stktop* is necessary to unwind, but |
| 1040 // we don't need to create a new segment. |
| 1041 top = (Stktop*)(m->morebuf.sp - sizeof(*top)); |
| 1042 stk = (byte*)g1->stackguard - StackGuard; |
| 1043 free = 0; |
| 1044 } else { |
| 1045 // allocate new segment. |
| 1046 framesize += argsize; |
| 1047 framesize += StackExtra; // room for more functions, Stkt
op. |
| 1048 if(framesize < StackMin) |
| 1049 framesize = StackMin; |
| 1050 framesize += StackSystem; |
| 1051 stk = runtime·stackalloc(framesize); |
| 1052 top = (Stktop*)(stk+framesize-sizeof(*top)); |
| 1053 free = framesize; |
| 1054 } |
| 1055 |
| 1056 //runtime·printf("newstack framesize=%d argsize=%d morepc=%p moreargp=%p gobuf=%
p, %p top=%p old=%p\n", |
| 1057 //framesize, argsize, m->morepc, m->moreargp, m->morebuf.pc, m->morebuf.sp, top,
g1->stackbase); |
| 1058 |
| 1059 top->stackbase = (byte*)g1->stackbase; |
| 1060 top->stackguard = (byte*)g1->stackguard; |
| 1061 top->gobuf = m->morebuf; |
| 1062 top->argp = m->moreargp; |
| 1063 top->argsize = argsize; |
| 1064 top->free = free; |
| 1065 m->moreargp = nil; |
| 1066 m->morebuf.pc = nil; |
| 1067 m->morebuf.sp = (uintptr)nil; |
| 1068 |
| 1069 // copy flag from panic |
| 1070 top->panic = g1->ispanic; |
| 1071 g1->ispanic = false; |
| 1072 |
| 1073 g1->stackbase = (uintptr)top; |
| 1074 g1->stackguard = (uintptr)stk + StackGuard; |
| 1075 |
| 1076 sp = (byte*)top; |
| 1077 if(argsize > 0) { |
| 1078 sp -= argsize; |
| 1079 runtime·memmove(sp, top->argp, argsize); |
| 1080 } |
| 1081 if(thechar == '5') { |
| 1082 // caller would have saved its LR below args. |
| 1083 sp -= sizeof(void*); |
| 1084 *(void**)sp = nil; |
| 1085 } |
| 1086 |
| 1087 // Continue as if lessstack had just called m->morepc |
| 1088 // (the PC that decided to grow the stack). |
| 1089 label.sp = (uintptr)sp; |
| 1090 label.pc = (byte*)runtime·lessstack; |
| 1091 label.g = m->curg; |
| 1092 runtime·gogocall(&label, m->morepc); |
| 1093 |
| 1094 *(int32*)345 = 123; // never return |
| 1095 } |
| 1096 |
| 1097 // Hook used by runtime·malg to call runtime·stackalloc on the |
| 1098 // scheduler stack. This exists because runtime·stackalloc insists |
| 1099 // on being called on the scheduler stack, to avoid trying to grow |
| 1100 // the stack while allocating a new stack segment. |
| 1101 static void |
| 1102 mstackalloc(G *gp) |
| 1103 { |
| 1104 gp->param = runtime·stackalloc((uintptr)gp->param); |
| 1105 runtime·gogo(&gp->sched, 0); |
| 1106 } |
| 1107 |
| 1108 // Allocate a new g, with a stack big enough for stacksize bytes. |
| 1109 G* |
| 1110 runtime·malg(int32 stacksize) |
| 1111 { |
| 1112 G *newg; |
| 1113 byte *stk; |
| 1114 |
| 1115 if(StackTop < sizeof(Stktop)) { |
| 1116 runtime·printf("runtime: SizeofStktop=%d, should be >=%d\n", (in
t32)StackTop, (int32)sizeof(Stktop)); |
| 1117 runtime·throw("runtime: bad stack.h"); |
| 1118 } |
| 1119 |
| 1120 newg = runtime·malloc(sizeof(G)); |
| 1121 if(stacksize >= 0) { |
| 1122 if(g == m->g0) { |
| 1123 // running on scheduler stack already. |
| 1124 stk = runtime·stackalloc(StackSystem + stacksize); |
| 1125 } else { |
| 1126 // have to call stackalloc on scheduler stack. |
| 1127 g->param = (void*)(StackSystem + stacksize); |
| 1128 runtime·mcall(mstackalloc); |
| 1129 stk = g->param; |
| 1130 g->param = nil; |
| 1131 } |
| 1132 newg->stack0 = (uintptr)stk; |
| 1133 newg->stackguard = (uintptr)stk + StackGuard; |
| 1134 newg->stackbase = (uintptr)stk + StackSystem + stacksize - sizeo
f(Stktop); |
| 1135 runtime·memclr((byte*)newg->stackbase, sizeof(Stktop)); |
| 1136 } |
| 1137 return newg; |
| 1138 } |
| 1139 |
| 1140 // Create a new g running fn with siz bytes of arguments. |
| 1141 // Put it on the queue of g's waiting to run. |
| 1142 // The compiler turns a go statement into a call to this. |
| 1143 // Cannot split the stack because it assumes that the arguments |
| 1144 // are available sequentially after &fn; they would not be |
| 1145 // copied if a stack split occurred. It's OK for this to call |
| 1146 // functions that split the stack. |
| 1147 #pragma textflag 7 |
| 1148 void |
| 1149 runtime·newproc(int32 siz, byte* fn, ...) |
| 1150 { |
| 1151 byte *argp; |
| 1152 |
| 1153 if(thechar == '5') |
| 1154 argp = (byte*)(&fn+2); // skip caller's saved LR |
| 1155 else |
| 1156 argp = (byte*)(&fn+1); |
| 1157 runtime·newproc1(fn, argp, siz, 0, runtime·getcallerpc(&siz)); |
| 1158 } |
| 1159 |
| 1160 // Create a new g running fn with narg bytes of arguments starting |
| 1161 // at argp and returning nret bytes of results. callerpc is the |
| 1162 // address of the go statement that created this. The new g is put |
| 1163 // on the queue of g's waiting to run. |
| 1164 G* |
| 1165 runtime·newproc1(byte *fn, byte *argp, int32 narg, int32 nret, void *callerpc) |
| 1166 { |
| 1167 byte *sp; |
| 1168 G *newg; |
| 1169 M *mp; |
| 1170 P *p; |
| 1171 int32 siz; |
| 1172 //int64 goid; |
| 1173 |
| 1174 //printf("newproc1 %p %p narg=%d nret=%d\n", fn, argp, narg, nret); |
| 1175 siz = narg + nret; |
| 1176 siz = (siz+7) & ~7; |
| 1177 |
| 1178 // We could instead create a secondary stack frame |
| 1179 // and make it look like goexit was on the original but |
| 1180 // the call to the actual goroutine function was split. |
| 1181 // Not worth it: this is almost always an error. |
| 1182 if(siz > StackMin - 1024) |
| 1183 runtime·throw("runtime.newproc: function arguments too large for
new goroutine"); |
| 1184 |
| 1185 if((newg = gfget(m->p)) != nil) { |
| 1186 //!!!if(raceenabled) |
| 1187 //!!! runtime·racegostart(goid, callerpc); |
| 1188 if(newg->stackguard - StackGuard != newg->stack0) |
| 1189 runtime·throw("invalid stack in newg"); |
| 1190 } else { |
| 1191 newg = runtime·malg(StackMin); |
| 1192 runtime·lock(&runtime·sched); |
| 1193 newg->goid = ++runtime·sched.goidseq; |
| 1194 if(runtime·lastg == nil) |
| 1195 runtime·allg = newg; |
| 1196 else |
| 1197 runtime·lastg->alllink = newg; |
| 1198 runtime·lastg = newg;··········· |
| 1199 runtime·unlock(&runtime·sched); |
| 1200 } |
| 1201 |
| 1202 sp = (byte*)newg->stackbase; |
| 1203 sp -= siz; |
| 1204 runtime·memmove(sp, argp, narg); |
| 1205 if(thechar == '5') { |
| 1206 // caller's LR |
| 1207 sp -= sizeof(void*); |
| 1208 *(void**)sp = nil; |
| 1209 } |
| 1210 |
| 1211 LOG("%d: newproc %p\n", m->id, newg); |
| 1212 newg->sched.sp = (uintptr)sp; |
| 1213 newg->sched.pc = (byte*)runtime·goexit; |
| 1214 newg->sched.g = newg; |
| 1215 newg->entry = fn; |
| 1216 newg->gopc = (uintptr)callerpc; |
| 1217 newg->status = Grunnable; |
| 1218 runqput(m->p, newg); |
| 1219 |
| 1220 if(runtime·sched.pidle && fn != (byte*)runtime·main) { |
| 1221 runtime·lock(&runtime·sched); |
| 1222 p = pidleget(); |
| 1223 if(p) { |
| 1224 mp = mget(); |
| 1225 runtime·unlock(&runtime·sched); |
| 1226 if(mp) { |
| 1227 entergo(mp, p); |
| 1228 runtime·notewakeup(&mp->park); |
| 1229 } else |
| 1230 newm(runtime·mstart, p, false); |
| 1231 } else |
| 1232 runtime·unlock(&runtime·sched); |
| 1233 } |
| 1234 return newg; |
| 1235 } |
| 1236 |
| 1237 void |
| 1238 runtime·Breakpoint(void) |
| 1239 { |
| 1240 runtime·breakpoint(); |
| 1241 } |
| 1242 |
| 1243 void |
| 1244 runtime·Gosched(void) |
| 1245 { |
| 1246 runtime·gosched(); |
| 1247 } |
| 1248 |
| 1249 // Implementation of runtime.GOMAXPROCS. |
| 1250 // delete when scheduler is stronger |
| 1251 int32 |
| 1252 runtime·gomaxprocsfunc(int32 n) |
| 1253 { |
| 1254 int32 ret; |
| 1255 |
| 1256 LOG("%d: gomaxprocsfunc %d\n", m->id, n); |
| 1257 if(n > maxgomaxprocs) |
| 1258 n = maxgomaxprocs; |
| 1259 runtime·lock(&runtime·sched); |
| 1260 ret = runtime·gomaxprocs; |
| 1261 if(n <= 0 || n == ret) { |
| 1262 runtime·unlock(&runtime·sched); |
| 1263 return ret; |
| 1264 } |
| 1265 runtime·unlock(&runtime·sched); |
| 1266 |
| 1267 runtime·semacquire(&runtime·worldsema); |
| 1268 m->gcing = 1; |
| 1269 runtime·stoptheworld(); |
| 1270 newprocs = n; |
| 1271 m->gcing = 0; |
| 1272 runtime·semrelease(&runtime·worldsema); |
| 1273 runtime·starttheworld(); |
| 1274 |
| 1275 return ret; |
| 1276 } |
| 1277 |
| 1278 void |
| 1279 runtime·LockOSThread(void) |
| 1280 { |
| 1281 if(m == &runtime·m0 && runtime·sched.init) { |
| 1282 runtime·sched.lockmain = true; |
| 1283 return; |
| 1284 } |
| 1285 m->lockedg = g; |
| 1286 g->lockedm = m; |
| 1287 } |
| 1288 |
| 1289 void |
| 1290 runtime·UnlockOSThread(void) |
| 1291 { |
| 1292 if(m == &runtime·m0 && runtime·sched.init) { |
| 1293 runtime·sched.lockmain = false; |
| 1294 return; |
| 1295 } |
| 1296 m->lockedg = nil; |
| 1297 g->lockedm = nil; |
| 1298 } |
| 1299 |
| 1300 bool |
| 1301 runtime·lockedOSThread(void) |
| 1302 { |
| 1303 return g->lockedm != nil && m->lockedg != nil; |
| 1304 } |
| 1305 |
| 1306 // for testing of callbacks |
| 1307 void |
| 1308 runtime·golockedOSThread(bool ret) |
| 1309 { |
| 1310 ret = runtime·lockedOSThread(); |
| 1311 FLUSH(&ret); |
| 1312 } |
| 1313 |
| 1314 // for testing of wire, unwire |
| 1315 void |
| 1316 runtime·mid(uint32 ret) |
| 1317 { |
| 1318 ret = m->id; |
| 1319 FLUSH(&ret); |
| 1320 } |
| 1321 |
| 1322 void |
| 1323 runtime·NumGoroutine(intgo ret) |
| 1324 { |
| 1325 //ret = runtime·sched.gcount; |
| 1326 ret = 1; |
| 1327 FLUSH(&ret); |
| 1328 } |
| 1329 |
| 1330 int32 |
| 1331 runtime·gcount(void) |
| 1332 { |
| 1333 //return runtime·sched.gcount; |
| 1334 return 1; |
| 1335 } |
| 1336 |
| 1337 int32 |
| 1338 runtime·mcount(void) |
| 1339 { |
| 1340 return runtime·sched.mcount; |
| 1341 } |
| 1342 |
| 1343 void |
| 1344 runtime·badmcall(void) // called from assembly |
| 1345 { |
| 1346 runtime·throw("runtime: mcall called on m->g0 stack"); |
| 1347 } |
| 1348 |
| 1349 void |
| 1350 runtime·badmcall2(void) // called from assembly |
| 1351 { |
| 1352 runtime·throw("runtime: mcall function returned"); |
| 1353 } |
| 1354 |
| 1355 static struct { |
| 1356 Lock; |
| 1357 void (*fn)(uintptr*, int32); |
| 1358 int32 hz; |
| 1359 uintptr pcbuf[100]; |
| 1360 } prof; |
| 1361 |
| 1362 // Called if we receive a SIGPROF signal. |
| 1363 void |
| 1364 runtime·sigprof(uint8 *pc, uint8 *sp, uint8 *lr, G *gp) |
| 1365 { |
| 1366 int32 n; |
| 1367 |
| 1368 if(prof.fn == nil || prof.hz == 0) |
| 1369 return; |
| 1370 |
| 1371 runtime·lock(&prof); |
| 1372 if(prof.fn == nil) { |
| 1373 runtime·unlock(&prof); |
| 1374 return; |
| 1375 } |
| 1376 n = runtime·gentraceback(pc, sp, lr, gp, 0, prof.pcbuf, nelem(prof.pcbuf
)); |
| 1377 if(n > 0) |
| 1378 prof.fn(prof.pcbuf, n); |
| 1379 runtime·unlock(&prof); |
| 1380 } |
| 1381 |
| 1382 // Arrange to call fn with a traceback hz times a second. |
| 1383 void |
| 1384 runtime·setcpuprofilerate(void (*fn)(uintptr*, int32), int32 hz) |
| 1385 { |
| 1386 // Force sane arguments. |
| 1387 if(hz < 0) |
| 1388 hz = 0; |
| 1389 if(hz == 0) |
| 1390 fn = nil; |
| 1391 if(fn == nil) |
| 1392 hz = 0; |
| 1393 |
| 1394 // Stop profiler on this cpu so that it is safe to lock prof. |
| 1395 // if a profiling signal came in while we had prof locked, |
| 1396 // it would deadlock. |
| 1397 runtime·resetcpuprofiler(0); |
| 1398 |
| 1399 runtime·lock(&prof); |
| 1400 prof.fn = fn; |
| 1401 prof.hz = hz; |
| 1402 runtime·unlock(&prof); |
| 1403 runtime·lock(&runtime·sched); |
| 1404 runtime·sched.profilehz = hz; |
| 1405 runtime·unlock(&runtime·sched); |
| 1406 |
| 1407 if(hz != 0) |
| 1408 runtime·resetcpuprofiler(hz); |
| 1409 } |
| 1410 |
| 1411 // Change number of processors. The world is stopped. |
| 1412 static void |
| 1413 procresize(int32 new) |
| 1414 { |
| 1415 int32 i, old; |
| 1416 G *gp; |
| 1417 P *p; |
| 1418 |
| 1419 runtime·lock(&runtime·sched); //!!! |
| 1420 old = runtime·gomaxprocs; |
| 1421 LOG("%d: procresize %d->%d\n", m->id, old, new); |
| 1422 if(old < 0 || old > maxgomaxprocs || new <= 0 || new > maxgomaxprocs) |
| 1423 runtime·throw("procresize: invalid arg"); |
| 1424 if(old == new) { |
| 1425 for(i=0; i<new; i++) { |
| 1426 p = runtime·allp[i]; |
| 1427 if(p == m->p) |
| 1428 p->status = Pbusy; |
| 1429 else { |
| 1430 p->status = Pidle; |
| 1431 pidleput(p); |
| 1432 } |
| 1433 } |
| 1434 runtime·unlock(&runtime·sched); |
| 1435 return; |
| 1436 } |
| 1437 |
| 1438 runtime·singleproc = new == 1; |
| 1439 runtime·gomaxprocs = new; |
| 1440 for(i=0; i<new; i++) { |
| 1441 p = runtime·allp[i]; |
| 1442 if(p == nil) { |
| 1443 p = (P*)runtime·mallocgc(sizeof(runtime·allp[i][0]), 0,
0, 1); |
| 1444 p->status = Plocked; |
| 1445 runtime·allp[i] = p; //@@@ store-release |
| 1446 } |
| 1447 if(p->mcache == nil) { |
| 1448 if(old==0 && i==0) |
| 1449 p->mcache = m->mcache; |
| 1450 else |
| 1451 p->mcache = runtime·allocmcache(); |
| 1452 } |
| 1453 if(p->runq == nil) { |
| 1454 p->runqsize = 1024; |
| 1455 p->runq = (G**)runtime·mallocgc(p->runqsize*sizeof(G*),
0, 0, 1); |
| 1456 } |
| 1457 } |
| 1458 |
| 1459 for(i=1; i<old; i++) { |
| 1460 for(;;) { |
| 1461 gp = runqget(runtime·allp[i]); |
| 1462 if(gp == nil) |
| 1463 break; |
| 1464 //TODO: spread more evenly. |
| 1465 runqput(runtime·allp[0], gp); |
| 1466 } |
| 1467 } |
| 1468 |
| 1469 for(i=new; i<old; i++) { |
| 1470 runtime·freemcache(runtime·allp[i]->mcache); |
| 1471 runtime·allp[i]->mcache = nil; |
| 1472 runtime·allp[i]->status = Pdead; |
| 1473 //TODO: free freeg |
| 1474 } |
| 1475 |
| 1476 if(m->p) |
| 1477 m->p->m = nil; |
| 1478 m->p = nil; |
| 1479 m->mcache = nil; |
| 1480 runtime·allp[0]->m = nil; |
| 1481 runtime·allp[0]->status = Pidle; |
| 1482 entergo(m, runtime·allp[0]); |
| 1483 for(i=1; i<new; i++) { |
| 1484 p = runtime·allp[i]; |
| 1485 p->status = Pidle; |
| 1486 pidleput(p); |
| 1487 } |
| 1488 runtime·unlock(&runtime·sched); |
| 1489 } |
| 1490 |
| 1491 static void |
| 1492 entergo(M *mp, P *p) |
| 1493 { |
| 1494 LOG("%d: entergo m=%d p=%p p->m=%p, p->status=%d, p->mcache=%p\n", m->id
, mp->id, p, p->m, p->status, p->mcache); |
| 1495 if(mp->p || mp->mcache) |
| 1496 runtime·throw("entergo: already in go"); |
| 1497 if(p->m || p->status != Pidle) { |
| 1498 runtime·printf("entergo: p->m=%p(%d) p->status=%d\n", p->m, p->m
? p->m->id : 0, p->status); |
| 1499 runtime·throw("entergo: invalid p state"); |
| 1500 } |
| 1501 mp->mcache = p->mcache; |
| 1502 mp->p = p; |
| 1503 p->m = mp; |
| 1504 p->status = Pbusy; |
| 1505 } |
| 1506 |
| 1507 static P* |
| 1508 releasep(void) |
| 1509 { |
| 1510 M *mp; |
| 1511 P *p; |
| 1512 |
| 1513 mp = m; |
| 1514 LOG("%d: releasep\n", mp->id); |
| 1515 // sched is locked |
| 1516 if(mp->p == nil || mp->mcache == nil) |
| 1517 runtime·throw("releasep: invalid arg"); |
| 1518 p = mp->p; |
| 1519 if(p->m != mp || p->mcache != mp->mcache || p->status != Pbusy) { |
| 1520 runtime·printf("releasep: m=%p m->p=%p p->m=%p m->mcache=%p p->m
cache=%p p->status=%d\n", |
| 1521 mp, mp->p, p->m, m->mcache, p->mcache, p->status); |
| 1522 runtime·throw("releasep: invalid p state"); |
| 1523 } |
| 1524 mp->p = nil; |
| 1525 mp->mcache = nil; |
| 1526 p->m = nil; |
| 1527 p->status = Pidle; |
| 1528 return p; |
| 1529 } |
| 1530 |
| 1531 typedef struct Pdesc Pdesc; |
| 1532 struct Pdesc |
| 1533 { |
| 1534 uint32 tick; |
| 1535 int64 when; |
| 1536 }; |
| 1537 |
| 1538 static void |
| 1539 retake(int64 now, Pdesc *ps) |
| 1540 { |
| 1541 uint32 i, s; |
| 1542 int64 t; |
| 1543 P *p; |
| 1544 M *mp; |
| 1545 |
| 1546 for(i=0; i<runtime·gomaxprocs; i++) { |
| 1547 p = runtime·allp[i]; |
| 1548 //!!! procresize may be in progress |
| 1549 // do something if GC is in progress (help). |
| 1550 if(p==nil) |
| 1551 continue; |
| 1552 t = p->tick; |
| 1553 if(ps[i].tick != t) { |
| 1554 ps[i].tick = t; |
| 1555 ps[i].when = now; |
| 1556 } |
| 1557 if(ps[i].when + 20*1000 > now) |
| 1558 continue; |
| 1559 s = p->status; |
| 1560 if(s == Psyscall && runtime·cas(&p->status, s, Pidle)) { |
| 1561 LOG("retake %p(%d)\n", p, i); |
| 1562 runtime·lock(&runtime·sched); |
| 1563 mp = mget(); |
| 1564 runtime·unlock(&runtime·sched); |
| 1565 munpark(mp, p); |
| 1566 } |
| 1567 } |
| 1568 } |
| 1569 |
| 1570 static Pdesc ps[maxgomaxprocs]; |
| 1571 |
| 1572 static void |
| 1573 sysmon(void) |
| 1574 { |
| 1575 int64 t0, now; |
| 1576 |
| 1577 // This is a special dedicated thread. |
| 1578 // It works w/o mcache nor stackalloc, it may work concurrently with GC. |
| 1579 runtime·asminit(); |
| 1580 runtime·minit(); |
| 1581 LOG("sysmon\n"); |
| 1582 t0 = runtime·nanotime(); |
| 1583 for(;;) { |
| 1584 //!!! sleep more if possible |
| 1585 runtime·usleep(20); |
| 1586 if(runtime·gcwaiting) { |
| 1587 runtime·lock(&runtime·sched); |
| 1588 if(runtime·gcwaiting) { |
| 1589 runtime·sched.sysmonwait = 1; |
| 1590 runtime·unlock(&runtime·sched); |
| 1591 runtime·notesleep(&runtime·sched.sysmonnote); |
| 1592 runtime·noteclear(&runtime·sched.sysmonnote); |
| 1593 } else |
| 1594 runtime·unlock(&runtime·sched); |
| 1595 } |
| 1596 now = runtime·nanotime() - t0; |
| 1597 retake(now, ps); |
| 1598 } |
| 1599 } |
| 1600 |
| 1601 static void |
| 1602 inject(G *gp0, int32 *w, int32 *n) |
| 1603 { |
| 1604 int32 nw; |
| 1605 G *gp; |
| 1606 M *mp; |
| 1607 P *p; |
| 1608 |
| 1609 runtime·lock(&runtime·sched); |
| 1610 while(gp0) { |
| 1611 gp = gp0; |
| 1612 gp0 = gp->schedlink; |
| 1613 gp->status = Grunnable; |
| 1614 globrunqput(gp); |
| 1615 (*n)++; |
| 1616 } |
| 1617 runtime·unlock(&runtime·sched); |
| 1618 |
| 1619 nw = *n; |
| 1620 while(runtime·sched.pidle && nw) { |
| 1621 runtime·lock(&runtime·sched); |
| 1622 if(runtime·sched.pidle == nil) { |
| 1623 runtime·unlock(&runtime·sched); |
| 1624 break; |
| 1625 } |
| 1626 (*w)++; |
| 1627 nw--; |
| 1628 p = pidleget(); |
| 1629 mp = mget(); |
| 1630 runtime·unlock(&runtime·sched); |
| 1631 if(mp) { |
| 1632 entergo(mp, p); |
| 1633 runtime·notewakeup(&mp->park); |
| 1634 } else |
| 1635 newm(runtime·mstart, p, false); |
| 1636 } |
| 1637 } |
| 1638 |
| 1639 static void |
| 1640 globrunqput(G *gp) |
| 1641 { |
| 1642 gp->schedlink = nil; |
| 1643 if(runtime·sched.runqtail) |
| 1644 runtime·sched.runqtail->schedlink = gp; |
| 1645 else |
| 1646 runtime·sched.runqhead = gp; |
| 1647 runtime·sched.runqtail = gp; |
| 1648 runtime·sched.runqsize++; |
| 1649 } |
| 1650 |
| 1651 static G* |
| 1652 globrunqget(void) |
| 1653 { |
| 1654 G *gp, *gp1; |
| 1655 int32 n; |
| 1656 |
| 1657 if(runtime·sched.runqsize == 0) |
| 1658 return nil; |
| 1659 n = runtime·sched.runqsize/runtime·gomaxprocs+1; |
| 1660 if(n > runtime·sched.runqsize) |
| 1661 n = runtime·sched.runqsize; |
| 1662 runtime·sched.runqsize -= n; |
| 1663 if(runtime·sched.runqsize == 0) |
| 1664 runtime·sched.runqtail = nil; |
| 1665 gp1 = nil; |
| 1666 while(n--) { |
| 1667 gp = runtime·sched.runqhead; |
| 1668 runtime·sched.runqhead = gp->schedlink; |
| 1669 gp->schedlink = gp1; |
| 1670 gp1 = gp; |
| 1671 } |
| 1672 return gp1; |
| 1673 } |
| 1674 |
| 1675 // sched is locked |
| 1676 static P* |
| 1677 pidleget(void) |
| 1678 { |
| 1679 P *p; |
| 1680 ········ |
| 1681 p = runtime·sched.pidle; |
| 1682 if(p) { |
| 1683 runtime·sched.pidle = p->link; |
| 1684 runtime·sched.npidle--; |
| 1685 } |
| 1686 return p; |
| 1687 } |
| 1688 |
| 1689 // sched is locked |
| 1690 static void |
| 1691 pidleput(P *p) |
| 1692 { |
| 1693 p->link = runtime·sched.pidle; |
| 1694 runtime·sched.pidle = p; |
| 1695 runtime·sched.npidle++; |
274 } | 1696 } |
275 | 1697 |
276 static void | 1698 static void |
277 runqgrow(P *p) | 1699 runqgrow(P *p) |
278 { | 1700 { |
279 G **q; | 1701 G **q; |
280 int32 s, t, h, t2; | 1702 int32 s, t, h, t2; |
281 | 1703 |
282 h = p->runqhead; | 1704 h = p->runqhead; |
283 t = p->runqtail; | 1705 t = p->runqtail; |
(...skipping 13 matching lines...) Expand all Loading... |
297 p->runqtail = t2; | 1719 p->runqtail = t2; |
298 p->runqsize = 2*s; | 1720 p->runqsize = 2*s; |
299 } | 1721 } |
300 | 1722 |
301 static G* | 1723 static G* |
302 runqsteal(P *p, P *p2) | 1724 runqsteal(P *p, P *p2) |
303 { | 1725 { |
304 G *gp, *gp1; | 1726 G *gp, *gp1; |
305 int32 t, h, s, t2, h2, s2, c, c1; | 1727 int32 t, h, s, t2, h2, s2, c, c1; |
306 | 1728 |
307 » if(p2->runqhead==p2->runqtail) { | 1729 » if(p2->runqhead==p2->runqtail) |
308 » » m->schedstats.stealempty++; | |
309 return nil; | 1730 return nil; |
310 } | |
311 if(p < p2) | 1731 if(p < p2) |
312 runtime·lock(p); | 1732 runtime·lock(p); |
313 runtime·lock(p2); | 1733 runtime·lock(p2); |
314 if(p2->runqhead==p2->runqtail) { | 1734 if(p2->runqhead==p2->runqtail) { |
315 runtime·unlock(p2); | 1735 runtime·unlock(p2); |
316 if(p < p2) | 1736 if(p < p2) |
317 runtime·unlock(p); | 1737 runtime·unlock(p); |
318 m->schedstats.stealempty++; | |
319 return nil; | 1738 return nil; |
320 } | 1739 } |
321 if(p >= p2) | 1740 if(p >= p2) |
322 runtime·lock(p); | 1741 runtime·lock(p); |
323 h = p->runqhead; | 1742 h = p->runqhead; |
324 t = p->runqtail; | 1743 t = p->runqtail; |
325 s = p->runqsize; | 1744 s = p->runqsize; |
326 h2 = p2->runqhead; | 1745 h2 = p2->runqhead; |
327 t2 = p2->runqtail; | 1746 t2 = p2->runqtail; |
328 s2 = p2->runqsize; | 1747 s2 = p2->runqsize; |
(...skipping 18 matching lines...) Expand all Loading... |
347 if(h2 == s2) | 1766 if(h2 == s2) |
348 h2 = 0; | 1767 h2 = 0; |
349 p->runq[t] = gp1; | 1768 p->runq[t] = gp1; |
350 t++; | 1769 t++; |
351 if(t==s) | 1770 if(t==s) |
352 t = 0; | 1771 t = 0; |
353 c1++; | 1772 c1++; |
354 } | 1773 } |
355 p->runqtail = t; | 1774 p->runqtail = t; |
356 p2->runqhead = h2; | 1775 p2->runqhead = h2; |
357 m->schedstats.stealn++; | |
358 m->schedstats.stealcnt += c1 + 1; | |
359 runtime·unlock(p2); | 1776 runtime·unlock(p2); |
360 runtime·unlock(p); | 1777 runtime·unlock(p); |
361 return gp; | 1778 return gp; |
362 } | 1779 } |
363 | 1780 |
364 // Put g on runnable queue. | 1781 // Put g on runnable queue. |
365 static void | 1782 static void |
366 gput(P *p, G *gp) | 1783 runqput(P *p, G *gp) |
367 { | 1784 { |
368 int32 h, t, s; | 1785 int32 h, t, s; |
369 | 1786 |
370 runtime·lock(p); | 1787 runtime·lock(p); |
371 retry: | 1788 retry: |
372 h = p->runqhead; | 1789 h = p->runqhead; |
373 t = p->runqtail; | 1790 t = p->runqtail; |
374 s = p->runqsize; | 1791 s = p->runqsize; |
375 if(t==h-1 || (h==0 && t==s-1)) { | 1792 if(t==h-1 || (h==0 && t==s-1)) { |
376 runqgrow(p); | 1793 runqgrow(p); |
377 goto retry; | 1794 goto retry; |
378 } | 1795 } |
379 p->runq[t] = gp; | 1796 p->runq[t] = gp; |
380 t++; | 1797 t++; |
381 if(t==s) | 1798 if(t==s) |
382 t = 0; | 1799 t = 0; |
383 p->runqtail = t; | 1800 p->runqtail = t; |
384 runtime·unlock(p); | 1801 runtime·unlock(p); |
385 } | 1802 } |
386 | 1803 |
387 // Get g from runnable queue. | 1804 // Get g from runnable queue. |
388 static G* | 1805 static G* |
389 gget(P *p) | 1806 runqget(P *p) |
390 { | 1807 { |
391 G *gp; | 1808 G *gp; |
392 int32 t, h, s; | 1809 int32 t, h, s; |
393 | 1810 |
394 if(p->runqhead==p->runqtail) | 1811 if(p->runqhead==p->runqtail) |
395 return nil; | 1812 return nil; |
396 runtime·lock(p); | 1813 runtime·lock(p); |
397 h = p->runqhead; | 1814 h = p->runqhead; |
398 t = p->runqtail; | 1815 t = p->runqtail; |
399 s = p->runqsize; | 1816 s = p->runqsize; |
(...skipping 25 matching lines...) Expand all Loading... |
425 { | 1842 { |
426 M *mp; | 1843 M *mp; |
427 | 1844 |
428 if((mp = runtime·sched.mhead) != nil){ | 1845 if((mp = runtime·sched.mhead) != nil){ |
429 runtime·sched.mhead = mp->schedlink; | 1846 runtime·sched.mhead = mp->schedlink; |
430 runtime·sched.mwait--; | 1847 runtime·sched.mwait--; |
431 } | 1848 } |
432 return mp; | 1849 return mp; |
433 } | 1850 } |
434 | 1851 |
435 // Mark g ready to run. | 1852 // Put on gfree list. |
436 void | |
437 runtime·ready(G *gp) | |
438 { | |
439 if(gp->m) | |
440 runtime·throw("bad g->m in ready"); | |
441 | |
442 // Mark runnable. | |
443 if(gp->status == Grunnable || gp->status == Grunning) { | |
444 runtime·printf("goroutine %p has status %d\n", gp, gp->status); | |
445 runtime·throw("bad g->status in ready"); | |
446 } | |
447 gp->status = Grunnable; | |
448 gput(m->p, gp); | |
449 //!!! check pidle | |
450 } | |
451 | |
452 int32 | |
453 runtime·gcprocs(void) | |
454 { | |
455 int32 n; | |
456 | |
457 runtime·lock(&runtime·sched); | |
458 n = runtime·gomaxprocs; | |
459 if(n > runtime·ncpu) | |
460 n = runtime·ncpu; | |
461 if(n > MaxGcproc) | |
462 n = MaxGcproc; | |
463 runtime·unlock(&runtime·sched); | |
464 return n; | |
465 } | |
466 | |
467 void | |
468 runtime·helpgc(int32 nproc) | |
469 { | |
470 M *mp; | |
471 int32 n, pos; | |
472 | |
473 //!!! goidle might just take one idle m | |
474 LOG("%d: helpgc(%d)\n", m->id, nproc); | |
475 runtime·lock(&runtime·sched); | |
476 pos = 0; | |
477 for(n = 1; n < nproc; n++) { // one M is currently running | |
478 if(runtime·allp[pos]->mcache == m->mcache) | |
479 pos++; | |
480 mp = mget(); | |
481 if(mp == nil) { | |
482 runtime·unlock(&runtime·sched); | |
483 newm(runtime·mstart, runtime·allp[pos], true); | |
484 runtime·lock(&runtime·sched); | |
485 pos++; | |
486 continue; | |
487 } | |
488 mp->helpgc = 1; | |
489 mp->mcache = runtime·allp[pos]->mcache; | |
490 pos++; | |
491 LOG("%d: helpgc wake %d\n", m->id, mp->id); | |
492 runtime·notewakeup(&mp->park); | |
493 } | |
494 runtime·unlock(&runtime·sched); | |
495 } | |
496 | |
497 void | |
498 runtime·stoptheworld(void) | |
499 { | |
500 int32 acquired, i; | |
501 uint32 s; | |
502 P *p; | |
503 | |
504 LOG("%d: stoptheworld\n", m->id); | |
505 runtime·gcwaiting = 1; //@ atomic | |
506 acquired = 1; | |
507 m->p->status = Plocked; | |
508 while(acquired != runtime·gomaxprocs) { | |
509 LOG(" idle=%p\n", runtime·sched.pidle); | |
510 for(i=0; i<runtime·gomaxprocs; i++) | |
511 LOG(" %d status=%d\n", i, runtime·allp[i]->status); | |
512 for(i=0; i<runtime·gomaxprocs; i++) { | |
513 s = runtime·allp[i]->status; | |
514 if(s == Psyscall && runtime·cas(&runtime·allp[i]->status
, s, Plocked)) { | |
515 LOG(" acquired syscall %d\n", i); | |
516 acquired++; | |
517 } | |
518 } | |
519 runtime·lock(&runtime·sched); | |
520 while (runtime·sched.pidle != nil) { | |
521 p = runtime·sched.pidle; | |
522 p->status = Plocked; | |
523 runtime·sched.pidle = p->link; | |
524 LOG(" acquired idle\n"); | |
525 acquired++;············· | |
526 } | |
527 runtime·unlock(&runtime·sched); | |
528 //!!! replace with blocking | |
529 if(acquired != runtime·gomaxprocs) { | |
530 runtime·usleep(1); | |
531 } | |
532 } | |
533 LOG("%d: stoptheworld stopped\n", m->id); | |
534 } | |
535 | |
536 void | |
537 runtime·starttheworld(void) | |
538 { | |
539 P *p; | |
540 M *mp; | |
541 | |
542 LOG("%d: starttheworld\n", m->id); | |
543 runtime·gcwaiting = 0; | |
544 if(newprocs) { | |
545 procresize(newprocs); | |
546 newprocs = 0; | |
547 } else { | |
548 procresize(runtime·gomaxprocs); | |
549 } | |
550 // TODO(dvyukov): re-balance G's among P's | |
551 runtime·lock(&runtime·sched); | |
552 while(runtime·sched.pidle) { | |
553 p = runtime·sched.pidle; | |
554 p->status = Pbusy; | |
555 runtime·sched.pidle = p->link; | |
556 mp = runtime·sched.mhead; | |
557 if(mp) { | |
558 runtime·sched.mhead = mp->schedlink; | |
559 runtime·sched.mcount--; | |
560 } | |
561 if(mp) { | |
562 entergo(mp, p); | |
563 runtime·notewakeup(&mp->park); | |
564 } else { | |
565 runtime·unlock(&runtime·sched); | |
566 newm(runtime·mstart, p, false); | |
567 runtime·lock(&runtime·sched); | |
568 } | |
569 } | |
570 runtime·unlock(&runtime·sched); | |
571 runtime·gosched(); | |
572 } | |
573 | |
574 // Called to start an M. | |
575 void | |
576 runtime·mstart(void) | |
577 { | |
578 // It is used by windows-386 only. Unfortunately, seh needs | |
579 // to be located on os stack, and mstart runs on os stack | |
580 // for both m0 and m. | |
581 SEH seh; | |
582 P *p; | |
583 | |
584 LOG("%d: mstart m=%p\n", m->id, m); | |
585 if(g != m->g0) | |
586 runtime·throw("bad runtime·mstart"); | |
587 | |
588 // Record top of stack for use by mcall. | |
589 // Once we call schedule we're never coming back, | |
590 // so other calls can reuse this stack space. | |
591 runtime·gosave(&m->g0->sched); | |
592 m->g0->sched.pc = (void*)-1; // make sure it is never used | |
593 m->seh = &seh; | |
594 runtime·asminit(); | |
595 runtime·minit(); | |
596 | |
597 // Install signal handlers; after minit so that minit can | |
598 // prepare the thread to be able to handle the signals. | |
599 if(m == &runtime·m0) | |
600 runtime·initsig(); | |
601 | |
602 if(m->helpgc) { | |
603 LOG("%d: mstart helpgc\n", m->id); | |
604 m->helpgc = 0; | |
605 m->mcache = m->p->mcache; | |
606 runtime·gchelper(); | |
607 m->mcache = nil; | |
608 m->p = nil; | |
609 LOG("%d: gchelper end\n", m->id); | |
610 mstop(); | |
611 } else if(m != &runtime·m0) { | |
612 p = m->p; | |
613 m->p = nil; | |
614 entergo(m, p); | |
615 } | |
616 LOG("%d: calling schedule\n", m->id); | |
617 schedule(); | |
618 | |
619 // TODO(brainman): This point is never reached, because scheduler | |
620 // does not release os threads at the moment. But once this path | |
621 // is enabled, we must remove our seh here. | |
622 } | |
623 | |
624 // When running with cgo, we call libcgo_thread_start | |
625 // to start threads for us so that we can play nicely with | |
626 // foreign code. | |
627 void (*libcgo_thread_start)(void*); | |
628 | |
629 typedef struct CgoThreadStart CgoThreadStart; | |
630 struct CgoThreadStart | |
631 { | |
632 M *m; | |
633 G *g; | |
634 void (*fn)(void); | |
635 }; | |
636 | |
637 static void | |
638 initgstack(G *newg, byte *stk, int32 stacksize) | |
639 { | |
640 newg->stack0 = (uintptr)stk; | |
641 newg->stackguard = (uintptr)stk + StackGuard; | |
642 newg->stackbase = (uintptr)stk + stacksize - sizeof(Stktop); | |
643 runtime·memclr((byte*)newg->stackbase, sizeof(Stktop)); | |
644 } | |
645 | |
646 // Create a new m. It will start off with a call to runtime·mstart. | |
647 static void | |
648 newm(void(*fn)(void), P *p, bool helpgc) | |
649 { | |
650 M *mp; | |
651 int32 addmem, stkoff; | |
652 | |
653 LOG("%d: newm\n", m->id); | |
654 m->schedstats.nm++; | |
655 addmem = sizeof(*mp->stackalloc); | |
656 if(runtime·gsignalstk) | |
657 addmem += sizeof(G) + runtime·gsignalstk; | |
658 stkoff = sizeof(M) + addmem; | |
659 if(!runtime·iscgo && !Windows) | |
660 addmem += StackSystem + 8192; | |
661 //!!! all that is now non-GC, can it break something? | |
662 mp = runtime·SysAlloc(sizeof(M) + addmem); | |
663 mp->stackalloc = (FixAlloc*)(mp+1); | |
664 mcommoninit(mp); | |
665 mp->g0 = &mp->g0buf; | |
666 mp->p = p; | |
667 mp->helpgc = helpgc; | |
668 if(runtime·gsignalstk) { | |
669 mp->gsignal = (G*)((byte*)mp+sizeof(*mp)+sizeof(*mp->stackalloc)
); | |
670 initgstack(mp->gsignal, (byte*)(mp->gsignal+1), runtime·gsignals
tk); | |
671 } | |
672 | |
673 if(runtime·iscgo) { | |
674 CgoThreadStart ts; | |
675 | |
676 if(libcgo_thread_start == nil) | |
677 runtime·throw("libcgo_thread_start missing"); | |
678 // pthread_create will make us a stack. | |
679 ts.m = mp; | |
680 ts.g = mp->g0; | |
681 ts.fn = fn; | |
682 runtime·asmcgocall(libcgo_thread_start, &ts); | |
683 } else { | |
684 // windows will layout sched stack on os stack | |
685 if(!Windows) | |
686 initgstack(mp->g0, (byte*)mp+stkoff, StackSystem + 8192)
; | |
687 runtime·newosproc(mp, mp->g0, (byte*)mp->g0->stackbase, fn); | |
688 } | |
689 } | |
690 | |
691 static void | |
692 mstop(void) | |
693 { | |
694 LOG("%d: mstop\n", m->id); | |
695 if(m->p != nil) | |
696 runtime·throw("mstop: p != nil"); | |
697 retry: | |
698 runtime·noteclear(&m->park); | |
699 runtime·lock(&runtime·sched); | |
700 mput(m); | |
701 runtime·unlock(&runtime·sched); | |
702 runtime·notesleep(&m->park); | |
703 if(m->helpgc) { | |
704 LOG("%d: gchelper\n", m->id); | |
705 m->helpgc = 0; | |
706 runtime·gchelper(); | |
707 m->mcache = nil; | |
708 LOG("%d: gchelper end\n", m->id); | |
709 goto retry; | |
710 } | |
711 LOG("%d: mstop wake\n", m->id); | |
712 if(m->p == nil) | |
713 runtime·throw("mstop: p == nil"); | |
714 } | |
715 | |
716 // One round of scheduler: find a goroutine and run it. | |
717 // The argument is the goroutine that was running before | |
718 // schedule was called, or nil if this is the first call. | |
719 // Never returns. | |
720 static void | |
721 schedule(void) | |
722 { | |
723 int32 hz, i, try; | |
724 G *gp; | |
725 P *p; | |
726 | |
727 LOG("%d: schedule p=%p\n", m->id, m->p); | |
728 USED(&gp); | |
729 if(m->locks != 0) | |
730 runtime·throw("schedule holding locks"); | |
731 if(gp == m->g0) | |
732 runtime·throw("schedule of g0"); | |
733 | |
734 top: | |
735 if(runtime·gcwaiting) { | |
736 leavego(m, Pidle); | |
737 mstop(); | |
738 goto top; | |
739 } | |
740 | |
741 gp = gget(m->p); | |
742 if(gp == nil) { | |
743 //!!! random stealing | |
744 for(try=0; try<1; try++) { | |
745 for(i=0; i<runtime·gomaxprocs; i++) { | |
746 p = runtime·allp[i]; | |
747 if(p == m->p) | |
748 gp = gget(p); | |
749 else | |
750 gp = runqsteal(m->p, p); | |
751 if(gp) | |
752 break; | |
753 } | |
754 if(gp) | |
755 break; | |
756 //runtime·usleep(20); | |
757 } | |
758 if(gp == nil) { | |
759 leavego(m, Pidle); | |
760 for(i=0; i<runtime·gomaxprocs; i++) { | |
761 p = runtime·allp[i]; | |
762 if(p && p->runqhead != p->runqtail) { | |
763 runtime·lock(&runtime·sched); | |
764 p = runtime·sched.pidle; | |
765 if(p) | |
766 runtime·sched.pidle = p->link; | |
767 runtime·unlock(&runtime·sched); | |
768 if(p) { | |
769 p->status = Pbusy; | |
770 entergo(m, p); | |
771 goto top; | |
772 } | |
773 break; | |
774 } | |
775 } | |
776 mstop(); | |
777 goto top; | |
778 } | |
779 } | |
780 | |
781 LOG("%d: start running goroutine %p\n", m->id, gp); | |
782 m->p->tick++; | |
783 gp->status = Grunning; | |
784 m->curg = gp; | |
785 gp->m = m; | |
786 | |
787 // Check whether the profiler needs to be turned on or off. | |
788 hz = runtime·sched.profilehz; | |
789 if(m->profilehz != hz) | |
790 runtime·resetcpuprofiler(hz); | |
791 | |
792 if(gp->sched.pc == (byte*)runtime·goexit) { // kickoff | |
793 runtime·gogocall(&gp->sched, (void(*)(void))gp->entry); | |
794 } | |
795 runtime·gogo(&gp->sched, 0); | |
796 } | |
797 | |
798 static void | |
799 park0(G *gp) | |
800 { | |
801 USED(&gp); | |
802 if(m->waitunlockf) { | |
803 m->waitunlockf(m->waitlock); | |
804 m->waitunlockf = nil; | |
805 } | |
806 schedule(); | |
807 } | |
808 | |
809 // Atomically parks g and unlocks the lock. | |
810 void | |
811 runtime·park(void *l, void(*unlockf)(void*), int8 *reason) | |
812 { | |
813 LOG("%d: park l=%p reason=%s\n", m->id, l, reason); | |
814 if(g == m->g0) | |
815 runtime·throw("park of g0"); | |
816 m->waitlock = l; | |
817 m->waitunlockf = unlockf; | |
818 g->status = Gwaiting; | |
819 g->waitreason = reason; | |
820 g->m = nil; | |
821 runtime·mcall(park0); | |
822 } | |
823 | |
824 static void | |
825 gosched0(G *gp) | |
826 { | |
827 LOG("%d: gosched0 gp=%p\n", m->id, gp); | |
828 gp->status = Grunnable; | |
829 gp->m = nil; | |
830 gput(m->p, gp); | |
831 schedule(); | |
832 } | |
833 | |
834 void | |
835 runtime·gosched(void) | |
836 { | |
837 if(m->p->status != Pbusy) | |
838 runtime·throw("m->p->status != Pbusy"); | |
839 runtime·mcall(gosched0); | |
840 } | |
841 | |
842 // On g0. | |
843 static void | |
844 goexit0(G *gp) | |
845 { | |
846 gp->status = Gdead; | |
847 gp->m = nil; | |
848 if(gp->lockedm) { | |
849 gp->lockedm = nil; | |
850 m->lockedg = nil; | |
851 } | |
852 runtime·unwindstack(gp, nil); | |
853 gfput(m->p, gp); | |
854 m->schedstats.gend++; | |
855 m->schedstats.gfput++; | |
856 schedule(); | |
857 } | |
858 | |
859 void | |
860 runtime·goexit(void) | |
861 { | |
862 runtime·mcall(goexit0); | |
863 } | |
864 | |
865 // The goroutine g is about to enter a system call. | |
866 // Record that it's not using the cpu anymore. | |
867 // This is called only from the go syscall library and cgocall, | |
868 // not from the low-level system calls used by the runtime. | |
869 // | |
870 // Entersyscall cannot split the stack: the runtime·gosave must | |
871 // make g->sched refer to the caller's stack segment, because | |
872 // entersyscall is going to return immediately after. | |
873 #pragma textflag 7 | |
874 void | |
875 runtime·entersyscall(void) | |
876 { | |
877 LOG("%d: entersyscall g=%p p=%p\n", m->id, g, m->p); | |
878 //m->sysenterticks = runtime·cputicks(); | |
879 if(m->profilehz > 0) | |
880 runtime·setprof(false); | |
881 | |
882 // Leave SP around for gc and traceback. | |
883 runtime·gosave(&g->sched); | |
884 g->gcsp = g->sched.sp; | |
885 g->gcstack = g->stackbase; | |
886 g->gcguard = g->stackguard; | |
887 g->status = Gsyscall; | |
888 if(g->gcsp < g->gcguard-StackGuard || g->gcstack < g->gcsp) { | |
889 // runtime·printf("entersyscall inconsistent %p [%p,%p]\n", | |
890 // g->gcsp, g->gcguard-StackGuard, g->gcstack); | |
891 runtime·throw("entersyscall"); | |
892 } | |
893 | |
894 m->mcache = nil; | |
895 m->p->tick++; | |
896 m->p->m = nil; | |
897 m->p->status = Psyscall; //@@@ store-release | |
898 } | |
899 | |
900 static void | |
901 exitsyscall0(G *gp) | |
902 { | |
903 P *p; | |
904 | |
905 LOG("%d: exitsyscall0\n", m->id); | |
906 gp->status = Grunnable; | |
907 gp->m = nil; | |
908 //!!! anything better than put to allp[0]? | |
909 gput(runtime·allp[runtime·fastrand1()%runtime·gomaxprocs], gp); //!!! ru
ntime·gomaxprocs can be concurrently changed | |
910 runtime·lock(&runtime·sched); | |
911 p = runtime·sched.pidle; | |
912 if(p) | |
913 runtime·sched.pidle = p->link; | |
914 runtime·unlock(&runtime·sched); | |
915 if(p) { | |
916 p->status = Pbusy; | |
917 entergo(m, p); | |
918 schedule(); | |
919 } | |
920 mstop(); | |
921 schedule(); | |
922 } | |
923 | |
924 // The goroutine g exited its system call. | |
925 // Arrange for it to run on a cpu again. | |
926 // This is called only from the go syscall library, not | |
927 // from the low-level system calls used by the runtime. | |
928 void | |
929 runtime·exitsyscall(void) | |
930 { | |
931 uint32 s; | |
932 int32 other; | |
933 | |
934 other = 0; | |
935 LOG("%d: exitsyscall g=%p\n", m->id, g); | |
936 | |
937 //runtime·printf("syscall=%d\n", (int32)(runtime·cputicks() - m->sysenterticks))
; | |
938 | |
939 s = m->p->status; | |
940 if(s==Psyscall && runtime·cas(&m->p->status, s, Pbusy)) { | |
941 if(other == 0) | |
942 m->schedstats.sysexitfast++; | |
943 else | |
944 m->schedstats.sysexitmed++; | |
945 LOG("%d: exitsyscall fast\n", m->id); | |
946 // There's a cpu for us, so we can run. | |
947 m->mcache = m->p->mcache; | |
948 m->p->m = m; | |
949 g->status = Grunning; | |
950 // Garbage collector isn't running (since we are), | |
951 // so okay to clear gcstack. | |
952 g->gcstack = (uintptr)nil; | |
953 | |
954 // Check whether the profiler needs to be turned on or off. | |
955 if(m->profilehz > 0) | |
956 runtime·setprof(true); | |
957 return; | |
958 } | |
959 | |
960 //!!! try to get pidle | |
961 /* | |
962 P *idlep, *sysp, *p; | |
963 int32 i; | |
964 | |
965 idlep = nil; | |
966 sysp = nil; | |
967 for(i=0; i<runtime·gomaxprocs; i++) { | |
968 p = runtime·allp[i]; | |
969 if(p == nil) | |
970 continue; | |
971 if(p->status == Pidle) { | |
972 idlep = p; | |
973 break; | |
974 } else if(p->status == Psyscall) | |
975 sysp = p; | |
976 } | |
977 if(idlep == nil) | |
978 idlep = sysp; | |
979 if(idlep) { | |
980 other = 1; | |
981 goto retry; | |
982 } | |
983 */ | |
984 | |
985 m->schedstats.sysexitslow++; | |
986 LOG("%d: exitsyscall slow p->status=%d\n", m->id, s); | |
987 if(m->profilehz > 0) | |
988 runtime·setprof(true); | |
989 | |
990 m->p = nil; | |
991 runtime·mcall(exitsyscall0); | |
992 | |
993 // Gosched returned, so we're allowed to run now. | |
994 // Delete the gcstack information that we left for | |
995 // the garbage collector during the system call. | |
996 // Must wait until now because until gosched returns | |
997 // we don't know for sure that the garbage collector | |
998 // is not running. | |
999 g->gcstack = (uintptr)nil; | |
1000 } | |
1001 | |
1002 // Called from runtime·lessstack when returning from a function which | |
1003 // allocated a new stack segment. The function's return value is in | |
1004 // m->cret. | |
1005 void | |
1006 runtime·oldstack(void) | |
1007 { | |
1008 Stktop *top, old; | |
1009 uint32 argsize; | |
1010 uintptr cret; | |
1011 byte *sp; | |
1012 G *g1; | |
1013 | |
1014 //printf("oldstack m->cret=%p\n", m->cret); | |
1015 | |
1016 g1 = m->curg; | |
1017 top = (Stktop*)g1->stackbase; | |
1018 sp = (byte*)top; | |
1019 old = *top; | |
1020 argsize = old.argsize; | |
1021 if(argsize > 0) { | |
1022 sp -= argsize; | |
1023 runtime·memmove(top->argp, sp, argsize); | |
1024 } | |
1025 | |
1026 if(old.free != 0) | |
1027 runtime·stackfree((byte*)g1->stackguard - StackGuard, old.free); | |
1028 g1->stackbase = (uintptr)old.stackbase; | |
1029 g1->stackguard = (uintptr)old.stackguard; | |
1030 | |
1031 cret = m->cret; | |
1032 m->cret = 0; // drop reference | |
1033 runtime·gogo(&old.gobuf, cret); | |
1034 } | |
1035 | |
1036 // Called from reflect·call or from runtime·morestack when a new | |
1037 // stack segment is needed. Allocate a new stack big enough for | |
1038 // m->moreframesize bytes, copy m->moreargsize bytes to the new frame, | |
1039 // and then act as though runtime·lessstack called the function at | |
1040 // m->morepc. | |
1041 void | |
1042 runtime·newstack(void) | |
1043 { | |
1044 int32 framesize, argsize; | |
1045 Stktop *top; | |
1046 byte *stk, *sp; | |
1047 G *g1; | |
1048 Gobuf label; | |
1049 bool reflectcall; | |
1050 uintptr free; | |
1051 | |
1052 framesize = m->moreframesize; | |
1053 argsize = m->moreargsize; | |
1054 g1 = m->curg; | |
1055 | |
1056 if(m->morebuf.sp < g1->stackguard - StackGuard) { | |
1057 runtime·printf("runtime: split stack overflow: %p < %p\n", m->mo
rebuf.sp, g1->stackguard - StackGuard); | |
1058 runtime·throw("runtime: split stack overflow"); | |
1059 } | |
1060 if(argsize % sizeof(uintptr) != 0) { | |
1061 runtime·printf("runtime: stack split with misaligned argsize %d\
n", argsize); | |
1062 runtime·throw("runtime: stack split argsize"); | |
1063 } | |
1064 | |
1065 reflectcall = framesize==1; | |
1066 if(reflectcall) | |
1067 framesize = 0; | |
1068 | |
1069 if(reflectcall && m->morebuf.sp - sizeof(Stktop) - argsize - 32 > g1->st
ackguard) { | |
1070 // special case: called from reflect.call (framesize==1) | |
1071 // to call code with an arbitrary argument size, | |
1072 // and we have enough space on the current stack. | |
1073 // the new Stktop* is necessary to unwind, but | |
1074 // we don't need to create a new segment. | |
1075 top = (Stktop*)(m->morebuf.sp - sizeof(*top)); | |
1076 stk = (byte*)g1->stackguard - StackGuard; | |
1077 free = 0; | |
1078 } else { | |
1079 // allocate new segment. | |
1080 framesize += argsize; | |
1081 framesize += StackExtra; // room for more functions, Stkt
op. | |
1082 if(framesize < StackMin) | |
1083 framesize = StackMin; | |
1084 framesize += StackSystem; | |
1085 stk = runtime·stackalloc(framesize); | |
1086 top = (Stktop*)(stk+framesize-sizeof(*top)); | |
1087 free = framesize; | |
1088 } | |
1089 | |
1090 //runtime·printf("newstack framesize=%d argsize=%d morepc=%p moreargp=%p gobuf=%
p, %p top=%p old=%p\n", | |
1091 //framesize, argsize, m->morepc, m->moreargp, m->morebuf.pc, m->morebuf.sp, top,
g1->stackbase); | |
1092 | |
1093 top->stackbase = (byte*)g1->stackbase; | |
1094 top->stackguard = (byte*)g1->stackguard; | |
1095 top->gobuf = m->morebuf; | |
1096 top->argp = m->moreargp; | |
1097 top->argsize = argsize; | |
1098 top->free = free; | |
1099 m->moreargp = nil; | |
1100 m->morebuf.pc = nil; | |
1101 m->morebuf.sp = (uintptr)nil; | |
1102 | |
1103 // copy flag from panic | |
1104 top->panic = g1->ispanic; | |
1105 g1->ispanic = false; | |
1106 | |
1107 g1->stackbase = (uintptr)top; | |
1108 g1->stackguard = (uintptr)stk + StackGuard; | |
1109 | |
1110 sp = (byte*)top; | |
1111 if(argsize > 0) { | |
1112 sp -= argsize; | |
1113 runtime·memmove(sp, top->argp, argsize); | |
1114 } | |
1115 if(thechar == '5') { | |
1116 // caller would have saved its LR below args. | |
1117 sp -= sizeof(void*); | |
1118 *(void**)sp = nil; | |
1119 } | |
1120 | |
1121 // Continue as if lessstack had just called m->morepc | |
1122 // (the PC that decided to grow the stack). | |
1123 label.sp = (uintptr)sp; | |
1124 label.pc = (byte*)runtime·lessstack; | |
1125 label.g = m->curg; | |
1126 runtime·gogocall(&label, m->morepc); | |
1127 | |
1128 *(int32*)345 = 123; // never return | |
1129 } | |
1130 | |
1131 // Hook used by runtime·malg to call runtime·stackalloc on the | |
1132 // scheduler stack. This exists because runtime·stackalloc insists | |
1133 // on being called on the scheduler stack, to avoid trying to grow | |
1134 // the stack while allocating a new stack segment. | |
1135 static void | |
1136 mstackalloc(G *gp) | |
1137 { | |
1138 gp->param = runtime·stackalloc((uintptr)gp->param); | |
1139 runtime·gogo(&gp->sched, 0); | |
1140 } | |
1141 | |
1142 // Allocate a new g, with a stack big enough for stacksize bytes. | |
1143 G* | |
1144 runtime·malg(int32 stacksize) | |
1145 { | |
1146 G *newg; | |
1147 byte *stk; | |
1148 | |
1149 if(StackTop < sizeof(Stktop)) { | |
1150 runtime·printf("runtime: SizeofStktop=%d, should be >=%d\n", (in
t32)StackTop, (int32)sizeof(Stktop)); | |
1151 runtime·throw("runtime: bad stack.h"); | |
1152 } | |
1153 | |
1154 newg = runtime·malloc(sizeof(G)); | |
1155 if(stacksize >= 0) { | |
1156 if(g == m->g0) { | |
1157 // running on scheduler stack already. | |
1158 stk = runtime·stackalloc(StackSystem + stacksize); | |
1159 } else { | |
1160 // have to call stackalloc on scheduler stack. | |
1161 g->param = (void*)(StackSystem + stacksize); | |
1162 runtime·mcall(mstackalloc); | |
1163 stk = g->param; | |
1164 g->param = nil; | |
1165 } | |
1166 newg->stack0 = (uintptr)stk; | |
1167 newg->stackguard = (uintptr)stk + StackGuard; | |
1168 newg->stackbase = (uintptr)stk + StackSystem + stacksize - sizeo
f(Stktop); | |
1169 runtime·memclr((byte*)newg->stackbase, sizeof(Stktop)); | |
1170 } | |
1171 return newg; | |
1172 } | |
1173 | |
1174 // Create a new g running fn with siz bytes of arguments. | |
1175 // Put it on the queue of g's waiting to run. | |
1176 // The compiler turns a go statement into a call to this. | |
1177 // Cannot split the stack because it assumes that the arguments | |
1178 // are available sequentially after &fn; they would not be | |
1179 // copied if a stack split occurred. It's OK for this to call | |
1180 // functions that split the stack. | |
1181 #pragma textflag 7 | |
1182 void | |
1183 runtime·newproc(int32 siz, byte* fn, ...) | |
1184 { | |
1185 byte *argp; | |
1186 | |
1187 if(thechar == '5') | |
1188 argp = (byte*)(&fn+2); // skip caller's saved LR | |
1189 else | |
1190 argp = (byte*)(&fn+1); | |
1191 runtime·newproc1(fn, argp, siz, 0, runtime·getcallerpc(&siz)); | |
1192 } | |
1193 | |
1194 // Create a new g running fn with narg bytes of arguments starting | |
1195 // at argp and returning nret bytes of results. callerpc is the | |
1196 // address of the go statement that created this. The new g is put | |
1197 // on the queue of g's waiting to run. | |
1198 G* | |
1199 runtime·newproc1(byte *fn, byte *argp, int32 narg, int32 nret, void *callerpc) | |
1200 { | |
1201 byte *sp; | |
1202 G *newg; | |
1203 int32 siz; | |
1204 | |
1205 //printf("newproc1 %p %p narg=%d nret=%d\n", fn, argp, narg, nret); | |
1206 m->schedstats.gstart++; | |
1207 siz = narg + nret; | |
1208 siz = (siz+7) & ~7; | |
1209 | |
1210 // We could instead create a secondary stack frame | |
1211 // and make it look like goexit was on the original but | |
1212 // the call to the actual goroutine function was split. | |
1213 // Not worth it: this is almost always an error. | |
1214 if(siz > StackMin - 1024) | |
1215 runtime·throw("runtime.newproc: function arguments too large for
new goroutine"); | |
1216 | |
1217 if((newg = gfget(m->p)) != nil) { | |
1218 m->schedstats.gfget++; | |
1219 if(newg->stackguard - StackGuard != newg->stack0) | |
1220 runtime·throw("invalid stack in newg"); | |
1221 } else { | |
1222 m->schedstats.galloc++; | |
1223 LOG("%d: MALG %p\n", m->id, fn); | |
1224 newg = runtime·malg(StackMin); | |
1225 newg->alllink = m->p->allg; | |
1226 m->p->allg = newg; | |
1227 } | |
1228 | |
1229 sp = (byte*)newg->stackbase; | |
1230 sp -= siz; | |
1231 runtime·memmove(sp, argp, narg); | |
1232 if(thechar == '5') { | |
1233 // caller's LR | |
1234 sp -= sizeof(void*); | |
1235 *(void**)sp = nil; | |
1236 } | |
1237 | |
1238 LOG("%d: newproc %p\n", m->id, newg); | |
1239 newg->sched.sp = (uintptr)sp; | |
1240 newg->sched.pc = (byte*)runtime·goexit; | |
1241 newg->sched.g = newg; | |
1242 newg->entry = fn; | |
1243 newg->gopc = (uintptr)callerpc; | |
1244 newg->status = Grunnable; | |
1245 gput(m->p, newg); | |
1246 if(runtime·sched.pidle && fn != (byte*)main·main) { | |
1247 runtime·lock(&runtime·sched); | |
1248 if(runtime·sched.pidle) { | |
1249 P *p; | |
1250 p = runtime·sched.pidle; | |
1251 runtime·sched.pidle = p->link; | |
1252 M *mp; | |
1253 mp = runtime·sched.mhead; | |
1254 if(mp) { | |
1255 runtime·sched.mhead = mp->schedlink; | |
1256 runtime·sched.mcount--; | |
1257 } | |
1258 runtime·unlock(&runtime·sched); | |
1259 p->status = Pbusy; | |
1260 if(mp) { | |
1261 entergo(mp, p); | |
1262 runtime·notewakeup(&mp->park); | |
1263 } else { | |
1264 newm(runtime·mstart, p, false); | |
1265 } | |
1266 } else | |
1267 runtime·unlock(&runtime·sched); | |
1268 } | |
1269 return newg; | |
1270 } | |
1271 | |
1272 // Put on gfree list. Sched must be locked. | |
1273 static void | 1853 static void |
1274 gfput(P *p, G *gp) | 1854 gfput(P *p, G *gp) |
1275 { | 1855 { |
1276 if(gp->stackguard - StackGuard != gp->stack0) | 1856 if(gp->stackguard - StackGuard != gp->stack0) |
1277 runtime·throw("invalid stack in gfput"); | 1857 runtime·throw("invalid stack in gfput"); |
1278 gp->schedlink = p->gfree; | 1858 gp->schedlink = p->gfree; |
1279 p->gfree = gp; | 1859 p->gfree = gp; |
1280 p->gfreecnt++; | 1860 p->gfreecnt++; |
1281 if(p->gfreecnt >= 64) { | 1861 if(p->gfreecnt >= 64) { |
1282 » » runtime·lock(&runtime·sched); | 1862 » » runtime·lock(&runtime·sched.gflock); |
1283 while(p->gfreecnt >= 32) { | 1863 while(p->gfreecnt >= 32) { |
1284 p->gfreecnt--; | 1864 p->gfreecnt--; |
1285 gp = p->gfree; | 1865 gp = p->gfree; |
1286 p->gfree = gp->schedlink; | 1866 p->gfree = gp->schedlink; |
1287 gp->schedlink = runtime·sched.gfree; | 1867 gp->schedlink = runtime·sched.gfree; |
1288 runtime·sched.gfree = gp; | 1868 runtime·sched.gfree = gp; |
1289 } | 1869 } |
1290 » » runtime·unlock(&runtime·sched); | 1870 » » runtime·unlock(&runtime·sched.gflock); |
1291 » } | 1871 » } |
1292 } | 1872 } |
1293 | 1873 |
1294 // Get from gfree list. Sched must be locked. | 1874 // Get from gfree list. |
1295 static G* | 1875 static G* |
1296 gfget(P *p) | 1876 gfget(P *p) |
1297 { | 1877 { |
1298 G *gp; | 1878 G *gp; |
1299 | 1879 |
1300 retry: | 1880 retry: |
1301 gp = p->gfree; | 1881 gp = p->gfree; |
1302 if(gp == nil && runtime·sched.gfree) { | 1882 if(gp == nil && runtime·sched.gfree) { |
1303 » » runtime·lock(&runtime·sched); | 1883 » » runtime·lock(&runtime·sched.gflock); |
1304 while(p->gfreecnt < 32 && runtime·sched.gfree) { | 1884 while(p->gfreecnt < 32 && runtime·sched.gfree) { |
1305 p->gfreecnt++; | 1885 p->gfreecnt++; |
1306 gp = runtime·sched.gfree; | 1886 gp = runtime·sched.gfree; |
1307 runtime·sched.gfree = gp->schedlink; | 1887 runtime·sched.gfree = gp->schedlink; |
1308 gp->schedlink = p->gfree; | 1888 gp->schedlink = p->gfree; |
1309 p->gfree = gp; | 1889 p->gfree = gp; |
1310 } | 1890 } |
1311 » » runtime·unlock(&runtime·sched); | 1891 » » runtime·unlock(&runtime·sched.gflock); |
1312 goto retry; | 1892 goto retry; |
1313 } | 1893 } |
1314 if(gp) { | 1894 if(gp) { |
1315 p->gfree = gp->schedlink; | 1895 p->gfree = gp->schedlink; |
1316 p->gfreecnt--; | 1896 p->gfreecnt--; |
1317 } | 1897 } |
1318 return gp; | 1898 return gp; |
1319 } | 1899 } |
1320 | |
1321 void | |
1322 runtime·Breakpoint(void) | |
1323 { | |
1324 runtime·breakpoint(); | |
1325 } | |
1326 | |
1327 void | |
1328 runtime·Gosched(void) | |
1329 { | |
1330 runtime·gosched(); | |
1331 } | |
1332 | |
1333 // Implementation of runtime.GOMAXPROCS. | |
1334 // delete when scheduler is stronger | |
1335 int32 | |
1336 runtime·gomaxprocsfunc(int32 n) | |
1337 { | |
1338 int32 ret; | |
1339 | |
1340 LOG("%d: gomaxprocsfunc %d\n", m->id, n); | |
1341 if(n > maxgomaxprocs) | |
1342 n = maxgomaxprocs; | |
1343 runtime·lock(&runtime·sched); | |
1344 ret = runtime·gomaxprocs; | |
1345 if(n <= 0 || n == ret) { | |
1346 runtime·unlock(&runtime·sched); | |
1347 return ret; | |
1348 } | |
1349 runtime·unlock(&runtime·sched); | |
1350 | |
1351 runtime·semacquire(&runtime·worldsema); | |
1352 m->gcing = 1; | |
1353 runtime·stoptheworld(); | |
1354 newprocs = n; | |
1355 m->gcing = 0; | |
1356 runtime·semrelease(&runtime·worldsema); | |
1357 runtime·starttheworld(); | |
1358 | |
1359 return ret; | |
1360 } | |
1361 | |
1362 void | |
1363 runtime·LockOSThread(void) | |
1364 { | |
1365 //!!! implement me. | |
1366 /* | |
1367 if(m == &runtime·m0 && runtime·sched.init) { | |
1368 runtime·sched.lockmain = true; | |
1369 return; | |
1370 } | |
1371 m->lockedg = g; | |
1372 g->lockedm = m; | |
1373 */ | |
1374 } | |
1375 | |
1376 void | |
1377 runtime·UnlockOSThread(void) | |
1378 { | |
1379 /* | |
1380 if(m == &runtime·m0 && runtime·sched.init) { | |
1381 runtime·sched.lockmain = false; | |
1382 return; | |
1383 } | |
1384 m->lockedg = nil; | |
1385 g->lockedm = nil; | |
1386 */ | |
1387 } | |
1388 | |
1389 bool | |
1390 runtime·lockedOSThread(void) | |
1391 { | |
1392 return g->lockedm != nil && m->lockedg != nil; | |
1393 } | |
1394 | |
1395 // for testing of callbacks | |
1396 void | |
1397 runtime·golockedOSThread(bool ret) | |
1398 { | |
1399 ret = runtime·lockedOSThread(); | |
1400 FLUSH(&ret); | |
1401 } | |
1402 | |
1403 // for testing of wire, unwire | |
1404 void | |
1405 runtime·mid(uint32 ret) | |
1406 { | |
1407 ret = m->id; | |
1408 FLUSH(&ret); | |
1409 } | |
1410 | |
1411 void | |
1412 runtime·NumGoroutine(int32 ret) | |
1413 { | |
1414 //ret = runtime·sched.gcount; | |
1415 ret = 1; | |
1416 FLUSH(&ret); | |
1417 } | |
1418 | |
1419 int32 | |
1420 runtime·gcount(void) | |
1421 { | |
1422 //return runtime·sched.gcount; | |
1423 return 1; | |
1424 } | |
1425 | |
1426 int32 | |
1427 runtime·mcount(void) | |
1428 { | |
1429 return runtime·sched.mcount; | |
1430 } | |
1431 | |
1432 void | |
1433 runtime·badmcall(void) // called from assembly | |
1434 { | |
1435 runtime·throw("runtime: mcall called on m->g0 stack"); | |
1436 } | |
1437 | |
1438 void | |
1439 runtime·badmcall2(void) // called from assembly | |
1440 { | |
1441 runtime·throw("runtime: mcall function returned"); | |
1442 } | |
1443 | |
1444 static struct { | |
1445 Lock; | |
1446 void (*fn)(uintptr*, int32); | |
1447 int32 hz; | |
1448 uintptr pcbuf[100]; | |
1449 } prof; | |
1450 | |
1451 // Called if we receive a SIGPROF signal. | |
1452 void | |
1453 runtime·sigprof(uint8 *pc, uint8 *sp, uint8 *lr, G *gp) | |
1454 { | |
1455 int32 n; | |
1456 | |
1457 if(prof.fn == nil || prof.hz == 0) | |
1458 return; | |
1459 | |
1460 runtime·lock(&prof); | |
1461 if(prof.fn == nil) { | |
1462 runtime·unlock(&prof); | |
1463 return; | |
1464 } | |
1465 n = runtime·gentraceback(pc, sp, lr, gp, 0, prof.pcbuf, nelem(prof.pcbuf
)); | |
1466 if(n > 0) | |
1467 prof.fn(prof.pcbuf, n); | |
1468 runtime·unlock(&prof); | |
1469 } | |
1470 | |
1471 // Arrange to call fn with a traceback hz times a second. | |
1472 void | |
1473 runtime·setcpuprofilerate(void (*fn)(uintptr*, int32), int32 hz) | |
1474 { | |
1475 // Force sane arguments. | |
1476 if(hz < 0) | |
1477 hz = 0; | |
1478 if(hz == 0) | |
1479 fn = nil; | |
1480 if(fn == nil) | |
1481 hz = 0; | |
1482 | |
1483 // Stop profiler on this cpu so that it is safe to lock prof. | |
1484 // if a profiling signal came in while we had prof locked, | |
1485 // it would deadlock. | |
1486 runtime·resetcpuprofiler(0); | |
1487 | |
1488 runtime·lock(&prof); | |
1489 prof.fn = fn; | |
1490 prof.hz = hz; | |
1491 runtime·unlock(&prof); | |
1492 runtime·lock(&runtime·sched); | |
1493 runtime·sched.profilehz = hz; | |
1494 runtime·unlock(&runtime·sched); | |
1495 | |
1496 if(hz != 0) | |
1497 runtime·resetcpuprofiler(hz); | |
1498 } | |
1499 | |
1500 // Change number of processors. The world is stopped. | |
1501 static void | |
1502 procresize(int32 new) | |
1503 { | |
1504 int32 i, old; | |
1505 G *gp; | |
1506 P *p; | |
1507 | |
1508 runtime·lock(&runtime·sched); //!!! | |
1509 old = runtime·gomaxprocs; | |
1510 LOG("%d: procresize %d->%d\n", m->id, old, new); | |
1511 if(old < 0 || old > maxgomaxprocs || new <= 0 || new > maxgomaxprocs) | |
1512 runtime·throw("procresize: invalid arg"); | |
1513 if(old == new) { | |
1514 for(i=0; i<new; i++) { | |
1515 if(runtime·allp[i] == m->p) | |
1516 runtime·allp[i]->status = Pbusy; | |
1517 else { | |
1518 runtime·allp[i]->status = Pidle; | |
1519 runtime·allp[i]->link = runtime·sched.pidle; | |
1520 runtime·sched.pidle = runtime·allp[i]; | |
1521 } | |
1522 } | |
1523 runtime·unlock(&runtime·sched); | |
1524 return; | |
1525 } | |
1526 | |
1527 runtime·singleproc = new == 1; | |
1528 runtime·gomaxprocs = new; | |
1529 for(i=0; i<new; i++) { | |
1530 p = runtime·allp[i]; | |
1531 if(p == nil) { | |
1532 //!!! goidle can see status Pidle | |
1533 p = (P*)runtime·mallocgc(sizeof(runtime·allp[i][0]), 0,
0, 1); | |
1534 p->status = Plocked; | |
1535 runtime·allp[i] = p; //@@@ store-release | |
1536 } | |
1537 if(p->mcache == nil) { | |
1538 if(old==0 && i==0) | |
1539 p->mcache = m->mcache; | |
1540 else | |
1541 p->mcache = runtime·allocmcache(); | |
1542 } | |
1543 if(p->runq == nil) { | |
1544 p->runqsize = 1024; | |
1545 p->runq = (G**)runtime·mallocgc(p->runqsize*sizeof(G*),
0, 0, 1); | |
1546 } | |
1547 } | |
1548 | |
1549 for(i=1; i<old; i++) { | |
1550 for(;;) { | |
1551 gp = gget(runtime·allp[i]); | |
1552 if(gp == nil) | |
1553 break; | |
1554 //TODO: spread more evenly. | |
1555 gput(runtime·allp[0], gp); | |
1556 } | |
1557 } | |
1558 | |
1559 for(i=new; i<old; i++) { | |
1560 runtime·freemcache(runtime·allp[i]->mcache); | |
1561 runtime·allp[i]->mcache = nil; | |
1562 runtime·allp[i]->status = Pdead; | |
1563 //TODO: free freeg | |
1564 } | |
1565 | |
1566 if(m->p) | |
1567 m->p->m = nil; | |
1568 m->p = nil; | |
1569 m->mcache = nil; | |
1570 runtime·allp[0]->m = nil; | |
1571 runtime·allp[0]->status = Pbusy; | |
1572 entergo(m, runtime·allp[0]); | |
1573 for(i=1; i<new; i++) { | |
1574 runtime·allp[i]->status = Pidle; //@@@ store-release | |
1575 runtime·allp[i]->link = runtime·sched.pidle; | |
1576 runtime·sched.pidle = runtime·allp[i]; | |
1577 } | |
1578 runtime·unlock(&runtime·sched); | |
1579 } | |
1580 | |
1581 static void | |
1582 entergo(M *mp, P *p) | |
1583 { | |
1584 LOG("%d: entergo m=%d p=%p p->m=%p, p->status=%d, p->mcache=%p\n", m->id
, mp->id, p, p->m, p->status, p->mcache); | |
1585 if(mp->p || mp->mcache) | |
1586 runtime·throw("entergo: already in go"); | |
1587 if(p->m || p->status != Pbusy) { | |
1588 runtime·printf("entergo: p->m=%p(%d) p->status=%d\n", p->m, p->m
? p->m->id : 0, p->status); | |
1589 runtime·throw("entergo: invalid p state"); | |
1590 } | |
1591 mp->mcache = p->mcache; | |
1592 mp->p = p; | |
1593 p->m = mp; | |
1594 } | |
1595 | |
1596 static void | |
1597 leavego(M *mp, uint32 status) | |
1598 { | |
1599 P *p; | |
1600 | |
1601 LOG("%d: leavego %d\n", mp->id, status); | |
1602 // sched is locked | |
1603 if(mp->p == nil || mp->mcache == nil) | |
1604 runtime·throw("leavego: invalid arg"); | |
1605 p = mp->p; | |
1606 if(p->m != mp || p->mcache != mp->mcache || p->status != Pbusy) { | |
1607 runtime·printf("leavego: m=%p m->p=%p p->m=%p m->mcache=%p p->mc
ache=%p p->status=%d\n", | |
1608 mp, mp->p, p->m, m->mcache, p->mcache, p->status); | |
1609 runtime·throw("leavego: invalid p state"); | |
1610 } | |
1611 mp->p = nil; | |
1612 mp->mcache = nil; | |
1613 p->m = nil; | |
1614 p->status = status; //@@@ store-release | |
1615 | |
1616 runtime·lock(&runtime·sched); | |
1617 p->link = runtime·sched.pidle; | |
1618 runtime·sched.pidle = p; | |
1619 runtime·unlock(&runtime·sched); | |
1620 } | |
1621 | |
1622 static void | |
1623 retake(void) | |
1624 { | |
1625 uint32 i, s; | |
1626 P *p; | |
1627 M *mp; | |
1628 | |
1629 for(i=0; i<runtime·gomaxprocs; i++) { | |
1630 p = runtime·allp[i]; | |
1631 //!!! procresize may be in progress | |
1632 // do something if GC is in progress (help). | |
1633 if(p==nil) | |
1634 continue; | |
1635 s = p->status; | |
1636 if(s == Psyscall && runtime·cas(&p->status, s, Pbusy)) { | |
1637 LOG("retake %p(%d)\n", p, i); | |
1638 runtime·lock(&runtime·sched); | |
1639 mp = mget(); | |
1640 runtime·unlock(&runtime·sched); | |
1641 if(mp) { | |
1642 entergo(mp, p); | |
1643 runtime·notewakeup(&mp->park); | |
1644 } else { | |
1645 newm(runtime·mstart, p, false); | |
1646 } | |
1647 } | |
1648 } | |
1649 } | |
1650 | |
1651 static void | |
1652 sysmon(void) | |
1653 { | |
1654 G *gp0, *gp; | |
1655 P *p; | |
1656 M *mp; | |
1657 bool missed; | |
1658 | |
1659 // This is a special dedicated thread. | |
1660 // It works w/o mcache nor stackalloc, it may work concurrently with GC. | |
1661 runtime·asminit(); | |
1662 runtime·minit(); | |
1663 LOG("sysmon check\n"); | |
1664 for(;;) { | |
1665 LOG("sysmon check\n"); | |
1666 gp0 = runtime·netwait(20); | |
1667 missed = false; | |
1668 while(gp0) { | |
1669 gp = gp0; | |
1670 gp0 = gp->schedlink; | |
1671 if(runtime·sched.pidle) { | |
1672 runtime·lock(&runtime·sched); | |
1673 if(runtime·sched.pidle) { | |
1674 p = runtime·sched.pidle; | |
1675 runtime·sched.pidle = p->link; | |
1676 mp = runtime·sched.mhead; | |
1677 if(mp) { | |
1678 runtime·sched.mhead = mp->schedl
ink; | |
1679 runtime·sched.mcount--; | |
1680 } | |
1681 runtime·unlock(&runtime·sched); | |
1682 gput(p, gp); | |
1683 p->status = Pbusy; | |
1684 if(mp) { | |
1685 entergo(mp, p); | |
1686 runtime·notewakeup(&mp->park); | |
1687 } else | |
1688 newm(runtime·mstart, p, false); | |
1689 continue; | |
1690 } else | |
1691 runtime·unlock(&runtime·sched); | |
1692 } | |
1693 missed = true; | |
1694 p = runtime·allp[runtime·fastrand1() % runtime·gomaxproc
s]; //!!! gomaxprocs can be changed concurrently | |
1695 gput(p, gp); | |
1696 } | |
1697 if(missed && runtime·sched.pidle) { | |
1698 runtime·lock(&runtime·sched); | |
1699 if(runtime·sched.pidle) { | |
1700 p = runtime·sched.pidle; | |
1701 p->status = Pbusy; | |
1702 runtime·sched.pidle = p->link; | |
1703 mp = runtime·sched.mhead; | |
1704 if(mp) { | |
1705 runtime·sched.mhead = mp->schedlink; | |
1706 runtime·sched.mcount--; | |
1707 } | |
1708 runtime·unlock(&runtime·sched); | |
1709 if(mp) { | |
1710 entergo(mp, p); | |
1711 runtime·notewakeup(&mp->park); | |
1712 } else | |
1713 newm(runtime·mstart, p, false); | |
1714 } else | |
1715 runtime·unlock(&runtime·sched); | |
1716 } | |
1717 retake(); | |
1718 } | |
1719 } | |
LEFT | RIGHT |