Rietveld Code Review Tool
Help | Bug tracker | Discussion group | Source code | Sign in
(2)

Delta Between Two Patch Sets: src/pkg/runtime/proc.c

Issue 104200047: code review 104200047: runtime: stack allocator, separate from mallocgc (Closed)
Left Patch Set: diff -r 7d2e78c502ab https://khr%40golang.org@code.google.com/p/go/ Created 10 years, 9 months ago
Right Patch Set: diff -r 26db394e3aca https://khr%40golang.org@code.google.com/p/go/ Created 10 years, 8 months ago
Left:
Right:
Use n/p to move between diff chunks; N/P to move between comments. Please Sign in to add in-line comments.
Jump to:
Left: Side by side diff | Download
Right: Side by side diff | Download
« no previous file with change/comment | « src/pkg/runtime/mheap.c ('k') | src/pkg/runtime/runtime.h » ('j') | no next file with change/comment »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
LEFTRIGHT
1 // Copyright 2009 The Go Authors. All rights reserved. 1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style 2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file. 3 // license that can be found in the LICENSE file.
4 4
5 #include "runtime.h" 5 #include "runtime.h"
6 #include "arch_GOARCH.h" 6 #include "arch_GOARCH.h"
7 #include "zaexperiment.h" 7 #include "zaexperiment.h"
8 #include "malloc.h" 8 #include "malloc.h"
9 #include "stack.h" 9 #include "stack.h"
10 #include "race.h" 10 #include "race.h"
(...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after
136 // call runtime·mstart 136 // call runtime·mstart
137 // 137 //
138 // The new G calls runtime·main. 138 // The new G calls runtime·main.
139 void 139 void
140 runtime·schedinit(void) 140 runtime·schedinit(void)
141 { 141 {
142 int32 n, procs; 142 int32 n, procs;
143 byte *p; 143 byte *p;
144 Eface i; 144 Eface i;
145 145
146 // raceinit must be the first call to race detector.
147 // In particular, it must be done before mallocinit below calls racemaps hadow.
148 if(raceenabled)
149 g->racectx = runtime·raceinit();
150
146 runtime·sched.maxmcount = 10000; 151 runtime·sched.maxmcount = 10000;
147 runtime·precisestack = true; // haveexperiment("precisestack"); 152 runtime·precisestack = true; // haveexperiment("precisestack");
148 153
149 runtime·symtabinit(); 154 runtime·symtabinit();
155 runtime·stackinit();
150 runtime·mallocinit(); 156 runtime·mallocinit();
151 » mcommoninit(m); 157 » mcommoninit(g->m);
152 ········ 158 ········
153 // Initialize the itable value for newErrorCString, 159 // Initialize the itable value for newErrorCString,
154 // so that the next time it gets called, possibly 160 // so that the next time it gets called, possibly
155 // in a fault during a garbage collection, it will not 161 // in a fault during a garbage collection, it will not
156 // need to allocated memory. 162 // need to allocated memory.
157 runtime·newErrorCString(0, &i); 163 runtime·newErrorCString(0, &i);
158 ········ 164 ········
159 // Initialize the cached gotraceback value, since 165 // Initialize the cached gotraceback value, since
160 // gotraceback calls getenv, which mallocs on Plan 9. 166 // gotraceback calls getenv, which mallocs on Plan 9.
161 runtime·gotraceback(nil); 167 runtime·gotraceback(nil);
(...skipping 12 matching lines...) Expand all
174 } 180 }
175 runtime·allp = runtime·malloc((MaxGomaxprocs+1)*sizeof(runtime·allp[0])) ; 181 runtime·allp = runtime·malloc((MaxGomaxprocs+1)*sizeof(runtime·allp[0])) ;
176 procresize(procs); 182 procresize(procs);
177 183
178 runtime·copystack = runtime·precisestack; 184 runtime·copystack = runtime·precisestack;
179 p = runtime·getenv("GOCOPYSTACK"); 185 p = runtime·getenv("GOCOPYSTACK");
180 if(p != nil && !runtime·strcmp(p, (byte*)"0")) 186 if(p != nil && !runtime·strcmp(p, (byte*)"0"))
181 runtime·copystack = false; 187 runtime·copystack = false;
182 188
183 mstats.enablegc = 1; 189 mstats.enablegc = 1;
184
185 if(raceenabled)
186 g->racectx = runtime·raceinit();
187 } 190 }
188 191
189 extern void main·init(void); 192 extern void main·init(void);
190 extern void main·main(void); 193 extern void main·main(void);
191 194
192 static FuncVal scavenger = {runtime·MHeap_Scavenger}; 195 static FuncVal scavenger = {runtime·MHeap_Scavenger};
193 196
194 static FuncVal initDone = { runtime·unlockOSThread }; 197 static FuncVal initDone = { runtime·unlockOSThread };
195 198
196 // The main goroutine. 199 // The main goroutine.
(...skipping 30 matching lines...) Expand all
227 runtime·lockOSThread(); 230 runtime·lockOSThread();
228 ········ 231 ········
229 // Defer unlock so that runtime.Goexit during init does the unlock too. 232 // Defer unlock so that runtime.Goexit during init does the unlock too.
230 d.fn = &initDone; 233 d.fn = &initDone;
231 d.siz = 0; 234 d.siz = 0;
232 d.link = g->defer; 235 d.link = g->defer;
233 d.argp = NoArgs; 236 d.argp = NoArgs;
234 d.special = true; 237 d.special = true;
235 g->defer = &d; 238 g->defer = &d;
236 239
237 » if(m != &runtime·m0) 240 » if(g->m != &runtime·m0)
238 runtime·throw("runtime·main not on m0"); 241 runtime·throw("runtime·main not on m0");
239 runtime·newproc1(&scavenger, nil, 0, 0, runtime·main); 242 runtime·newproc1(&scavenger, nil, 0, 0, runtime·main);
240 main·init(); 243 main·init();
241 244
242 if(g->defer != &d || d.fn != &initDone) 245 if(g->defer != &d || d.fn != &initDone)
243 runtime·throw("runtime: bad defer entry after init"); 246 runtime·throw("runtime: bad defer entry after init");
244 g->defer = d.link; 247 g->defer = d.link;
245 runtime·unlockOSThread(); 248 runtime·unlockOSThread();
246 249
247 main·main(); 250 main·main();
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
288 default: 291 default:
289 status = "???"; 292 status = "???";
290 break; 293 break;
291 } 294 }
292 295
293 // approx time the G is blocked, in minutes 296 // approx time the G is blocked, in minutes
294 waitfor = 0; 297 waitfor = 0;
295 if((gp->status == Gwaiting || gp->status == Gsyscall) && gp->waitsince ! = 0) 298 if((gp->status == Gwaiting || gp->status == Gsyscall) && gp->waitsince ! = 0)
296 waitfor = (runtime·nanotime() - gp->waitsince) / (60LL*1000*1000 *1000); 299 waitfor = (runtime·nanotime() - gp->waitsince) / (60LL*1000*1000 *1000);
297 300
298 » if(waitfor < 1) 301 » runtime·printf("goroutine %D [%s", gp->goid, status);
299 » » runtime·printf("goroutine %D [%s]:\n", gp->goid, status); 302 » if(waitfor >= 1)
300 » else 303 » » runtime·printf(", %D minutes", waitfor);
301 » » runtime·printf("goroutine %D [%s, %D minutes]:\n", gp->goid, sta tus, waitfor); 304 » if(gp->lockedm != nil)
305 » » runtime·printf(", locked to thread");
306 » runtime·printf("]:\n");
302 } 307 }
303 308
304 void 309 void
305 runtime·tracebackothers(G *me) 310 runtime·tracebackothers(G *me)
306 { 311 {
307 G *gp; 312 G *gp;
308 int32 traceback; 313 int32 traceback;
309 uintptr i; 314 uintptr i;
310 315
311 traceback = runtime·gotraceback(nil); 316 traceback = runtime·gotraceback(nil);
312 ········ 317 ········
313 // Show the current goroutine first, if we haven't already. 318 // Show the current goroutine first, if we haven't already.
314 » if((gp = m->curg) != nil && gp != me) { 319 » if((gp = g->m->curg) != nil && gp != me) {
315 runtime·printf("\n"); 320 runtime·printf("\n");
316 runtime·goroutineheader(gp); 321 runtime·goroutineheader(gp);
317 runtime·traceback(~(uintptr)0, ~(uintptr)0, 0, gp); 322 runtime·traceback(~(uintptr)0, ~(uintptr)0, 0, gp);
318 } 323 }
319 324
320 runtime·lock(&allglock); 325 runtime·lock(&allglock);
321 for(i = 0; i < runtime·allglen; i++) { 326 for(i = 0; i < runtime·allglen; i++) {
322 gp = runtime·allg[i]; 327 gp = runtime·allg[i];
323 » » if(gp == me || gp == m->curg || gp->status == Gdead) 328 » » if(gp == me || gp == g->m->curg || gp->status == Gdead)
324 continue; 329 continue;
325 if(gp->issystem && traceback < 2) 330 if(gp->issystem && traceback < 2)
326 continue; 331 continue;
327 runtime·printf("\n"); 332 runtime·printf("\n");
328 runtime·goroutineheader(gp); 333 runtime·goroutineheader(gp);
329 if(gp->status == Grunning) { 334 if(gp->status == Grunning) {
330 runtime·printf("\tgoroutine running on other thread; sta ck unavailable\n"); 335 runtime·printf("\tgoroutine running on other thread; sta ck unavailable\n");
331 runtime·printcreatedby(gp); 336 runtime·printcreatedby(gp);
332 } else 337 } else
333 runtime·traceback(~(uintptr)0, ~(uintptr)0, 0, gp); 338 runtime·traceback(~(uintptr)0, ~(uintptr)0, 0, gp);
334 } 339 }
335 runtime·unlock(&allglock); 340 runtime·unlock(&allglock);
336 } 341 }
337 342
338 static void 343 static void
339 checkmcount(void) 344 checkmcount(void)
340 { 345 {
341 // sched lock is held 346 // sched lock is held
342 if(runtime·sched.mcount > runtime·sched.maxmcount) { 347 if(runtime·sched.mcount > runtime·sched.maxmcount) {
343 runtime·printf("runtime: program exceeds %d-thread limit\n", run time·sched.maxmcount); 348 runtime·printf("runtime: program exceeds %d-thread limit\n", run time·sched.maxmcount);
344 runtime·throw("thread exhaustion"); 349 runtime·throw("thread exhaustion");
345 } 350 }
346 } 351 }
347 352
348 static void 353 static void
349 mcommoninit(M *mp) 354 mcommoninit(M *mp)
350 { 355 {
351 // If there is no mcache runtime·callers() will crash, 356 // If there is no mcache runtime·callers() will crash,
352 // and we are most likely in sysmon thread so the stack is senseless any way. 357 // and we are most likely in sysmon thread so the stack is senseless any way.
353 » if(m->mcache) 358 » if(g->m->mcache)
354 runtime·callers(1, mp->createstack, nelem(mp->createstack)); 359 runtime·callers(1, mp->createstack, nelem(mp->createstack));
355 360
356 mp->fastrand = 0x49f6428aUL + mp->id + runtime·cputicks(); 361 mp->fastrand = 0x49f6428aUL + mp->id + runtime·cputicks();
357 362
358 runtime·lock(&runtime·sched); 363 runtime·lock(&runtime·sched);
359 mp->id = runtime·sched.mcount++; 364 mp->id = runtime·sched.mcount++;
360 checkmcount(); 365 checkmcount();
361 runtime·mpreinit(mp); 366 runtime·mpreinit(mp);
362 367
363 » // Add to runtime·allm so garbage collector doesn't free m 368 » // Add to runtime·allm so garbage collector doesn't free g->m
364 // when it is just in a register or thread-local storage. 369 // when it is just in a register or thread-local storage.
365 mp->alllink = runtime·allm; 370 mp->alllink = runtime·allm;
366 // runtime·NumCgoCall() iterates over allm w/o schedlock, 371 // runtime·NumCgoCall() iterates over allm w/o schedlock,
367 // so we need to publish it safely. 372 // so we need to publish it safely.
368 runtime·atomicstorep(&runtime·allm, mp); 373 runtime·atomicstorep(&runtime·allm, mp);
369 runtime·unlock(&runtime·sched); 374 runtime·unlock(&runtime·sched);
370 } 375 }
371 376
372 // Mark gp ready to run. 377 // Mark gp ready to run.
373 void 378 void
374 runtime·ready(G *gp) 379 runtime·ready(G *gp)
375 { 380 {
376 // Mark runnable. 381 // Mark runnable.
377 » m->locks++; // disable preemption because it can be holding p in a loca l var 382 » g->m->locks++; // disable preemption because it can be holding p in a l ocal var
378 if(gp->status != Gwaiting) { 383 if(gp->status != Gwaiting) {
379 runtime·printf("goroutine %D has status %d\n", gp->goid, gp->sta tus); 384 runtime·printf("goroutine %D has status %d\n", gp->goid, gp->sta tus);
380 runtime·throw("bad g->status in ready"); 385 runtime·throw("bad g->status in ready");
381 } 386 }
382 gp->status = Grunnable; 387 gp->status = Grunnable;
383 » runqput(m->p, gp); 388 » runqput(g->m->p, gp);
384 if(runtime·atomicload(&runtime·sched.npidle) != 0 && runtime·atomicload( &runtime·sched.nmspinning) == 0) // TODO: fast atomic 389 if(runtime·atomicload(&runtime·sched.npidle) != 0 && runtime·atomicload( &runtime·sched.nmspinning) == 0) // TODO: fast atomic
385 wakep(); 390 wakep();
386 » m->locks--; 391 » g->m->locks--;
387 » if(m->locks == 0 && g->preempt) // restore the preemption request in ca se we've cleared it in newstack 392 » if(g->m->locks == 0 && g->preempt) // restore the preemption request in case we've cleared it in newstack
388 g->stackguard0 = StackPreempt; 393 g->stackguard0 = StackPreempt;
389 } 394 }
390 395
391 int32 396 int32
392 runtime·gcprocs(void) 397 runtime·gcprocs(void)
393 { 398 {
394 int32 n; 399 int32 n;
395 400
396 // Figure out how many CPUs to use during GC. 401 // Figure out how many CPUs to use during GC.
397 // Limited by gomaxprocs, number of actual CPUs, and MaxGcproc. 402 // Limited by gomaxprocs, number of actual CPUs, and MaxGcproc.
(...skipping 27 matching lines...) Expand all
425 430
426 void 431 void
427 runtime·helpgc(int32 nproc) 432 runtime·helpgc(int32 nproc)
428 { 433 {
429 M *mp; 434 M *mp;
430 int32 n, pos; 435 int32 n, pos;
431 436
432 runtime·lock(&runtime·sched); 437 runtime·lock(&runtime·sched);
433 pos = 0; 438 pos = 0;
434 for(n = 1; n < nproc; n++) { // one M is currently running 439 for(n = 1; n < nproc; n++) { // one M is currently running
435 » » if(runtime·allp[pos]->mcache == m->mcache) 440 » » if(runtime·allp[pos]->mcache == g->m->mcache)
436 pos++; 441 pos++;
437 mp = mget(); 442 mp = mget();
438 if(mp == nil) 443 if(mp == nil)
439 runtime·throw("runtime·gcprocs inconsistency"); 444 runtime·throw("runtime·gcprocs inconsistency");
440 mp->helpgc = n; 445 mp->helpgc = n;
441 mp->mcache = runtime·allp[pos]->mcache; 446 mp->mcache = runtime·allp[pos]->mcache;
442 pos++; 447 pos++;
443 runtime·notewakeup(&mp->park); 448 runtime·notewakeup(&mp->park);
444 } 449 }
445 runtime·unlock(&runtime·sched); 450 runtime·unlock(&runtime·sched);
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
479 int32 i; 484 int32 i;
480 uint32 s; 485 uint32 s;
481 P *p; 486 P *p;
482 bool wait; 487 bool wait;
483 488
484 runtime·lock(&runtime·sched); 489 runtime·lock(&runtime·sched);
485 runtime·sched.stopwait = runtime·gomaxprocs; 490 runtime·sched.stopwait = runtime·gomaxprocs;
486 runtime·atomicstore((uint32*)&runtime·sched.gcwaiting, 1); 491 runtime·atomicstore((uint32*)&runtime·sched.gcwaiting, 1);
487 preemptall(); 492 preemptall();
488 // stop current P 493 // stop current P
489 » m->p->status = Pgcstop; 494 » g->m->p->status = Pgcstop;
490 runtime·sched.stopwait--; 495 runtime·sched.stopwait--;
491 // try to retake all P's in Psyscall status 496 // try to retake all P's in Psyscall status
492 for(i = 0; i < runtime·gomaxprocs; i++) { 497 for(i = 0; i < runtime·gomaxprocs; i++) {
493 p = runtime·allp[i]; 498 p = runtime·allp[i];
494 s = p->status; 499 s = p->status;
495 if(s == Psyscall && runtime·cas(&p->status, s, Pgcstop)) 500 if(s == Psyscall && runtime·cas(&p->status, s, Pgcstop))
496 runtime·sched.stopwait--; 501 runtime·sched.stopwait--;
497 } 502 }
498 // stop idle P's 503 // stop idle P's
499 while(p = pidleget()) { 504 while(p = pidleget()) {
(...skipping 19 matching lines...) Expand all
519 for(i = 0; i < runtime·gomaxprocs; i++) { 524 for(i = 0; i < runtime·gomaxprocs; i++) {
520 p = runtime·allp[i]; 525 p = runtime·allp[i];
521 if(p->status != Pgcstop) 526 if(p->status != Pgcstop)
522 runtime·throw("stoptheworld: not stopped"); 527 runtime·throw("stoptheworld: not stopped");
523 } 528 }
524 } 529 }
525 530
526 static void 531 static void
527 mhelpgc(void) 532 mhelpgc(void)
528 { 533 {
529 » m->helpgc = -1; 534 » g->m->helpgc = -1;
530 } 535 }
531 536
532 void 537 void
533 runtime·starttheworld(void) 538 runtime·starttheworld(void)
534 { 539 {
535 P *p, *p1; 540 P *p, *p1;
536 M *mp; 541 M *mp;
537 G *gp; 542 G *gp;
538 bool add; 543 bool add;
539 544
540 » m->locks++; // disable preemption because it can be holding p in a loca l var 545 » g->m->locks++; // disable preemption because it can be holding p in a l ocal var
541 gp = runtime·netpoll(false); // non-blocking 546 gp = runtime·netpoll(false); // non-blocking
542 injectglist(gp); 547 injectglist(gp);
543 add = needaddgcproc(); 548 add = needaddgcproc();
544 runtime·lock(&runtime·sched); 549 runtime·lock(&runtime·sched);
545 if(newprocs) { 550 if(newprocs) {
546 procresize(newprocs); 551 procresize(newprocs);
547 newprocs = 0; 552 newprocs = 0;
548 } else 553 } else
549 procresize(runtime·gomaxprocs); 554 procresize(runtime·gomaxprocs);
550 runtime·sched.gcwaiting = 0; 555 runtime·sched.gcwaiting = 0;
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
587 if(add) { 592 if(add) {
588 // If GC could have used another helper proc, start one now, 593 // If GC could have used another helper proc, start one now,
589 // in the hope that it will be available next time. 594 // in the hope that it will be available next time.
590 // It would have been even better to start it before the collect ion, 595 // It would have been even better to start it before the collect ion,
591 // but doing so requires allocating memory, so it's tricky to 596 // but doing so requires allocating memory, so it's tricky to
592 // coordinate. This lazy approach works out in practice: 597 // coordinate. This lazy approach works out in practice:
593 // we don't mind if the first couple gc rounds don't have quite 598 // we don't mind if the first couple gc rounds don't have quite
594 // the maximum number of procs. 599 // the maximum number of procs.
595 newm(mhelpgc, nil); 600 newm(mhelpgc, nil);
596 } 601 }
597 » m->locks--; 602 » g->m->locks--;
598 » if(m->locks == 0 && g->preempt) // restore the preemption request in ca se we've cleared it in newstack 603 » if(g->m->locks == 0 && g->preempt) // restore the preemption request in case we've cleared it in newstack
599 g->stackguard0 = StackPreempt; 604 g->stackguard0 = StackPreempt;
600 } 605 }
601 606
602 // Called to start an M. 607 // Called to start an M.
603 void 608 void
604 runtime·mstart(void) 609 runtime·mstart(void)
605 { 610 {
606 » if(g != m->g0) 611 » if(g != g->m->g0)
607 runtime·throw("bad runtime·mstart"); 612 runtime·throw("bad runtime·mstart");
608 613
609 // Record top of stack for use by mcall. 614 // Record top of stack for use by mcall.
610 // Once we call schedule we're never coming back, 615 // Once we call schedule we're never coming back,
611 // so other calls can reuse this stack space. 616 // so other calls can reuse this stack space.
612 » runtime·gosave(&m->g0->sched); 617 » runtime·gosave(&g->m->g0->sched);
613 » m->g0->sched.pc = (uintptr)-1; // make sure it is never used 618 » g->m->g0->sched.pc = (uintptr)-1; // make sure it is never used
614 » m->g0->stackguard = m->g0->stackguard0; // cgo sets only stackguard0, c opy it to stackguard 619 » g->m->g0->stackguard = g->m->g0->stackguard0; // cgo sets only stackgua rd0, copy it to stackguard
615 runtime·asminit(); 620 runtime·asminit();
616 runtime·minit(); 621 runtime·minit();
617 622
618 // Install signal handlers; after minit so that minit can 623 // Install signal handlers; after minit so that minit can
619 // prepare the thread to be able to handle the signals. 624 // prepare the thread to be able to handle the signals.
620 » if(m == &runtime·m0) 625 » if(g->m == &runtime·m0)
621 runtime·initsig(); 626 runtime·initsig();
622 ········ 627 ········
623 » if(m->mstartfn) 628 » if(g->m->mstartfn)
624 » » m->mstartfn(); 629 » » g->m->mstartfn();
625 630
626 » if(m->helpgc) { 631 » if(g->m->helpgc) {
627 » » m->helpgc = 0; 632 » » g->m->helpgc = 0;
628 stopm(); 633 stopm();
629 » } else if(m != &runtime·m0) { 634 » } else if(g->m != &runtime·m0) {
630 » » acquirep(m->nextp); 635 » » acquirep(g->m->nextp);
631 » » m->nextp = nil; 636 » » g->m->nextp = nil;
632 } 637 }
633 schedule(); 638 schedule();
634 639
635 // TODO(brainman): This point is never reached, because scheduler 640 // TODO(brainman): This point is never reached, because scheduler
636 // does not release os threads at the moment. But once this path 641 // does not release os threads at the moment. But once this path
637 // is enabled, we must remove our seh here. 642 // is enabled, we must remove our seh here.
638 } 643 }
639 644
640 // When running with cgo, we call _cgo_thread_start 645 // When running with cgo, we call _cgo_thread_start
641 // to start threads for us so that we can play nicely with 646 // to start threads for us so that we can play nicely with
642 // foreign code. 647 // foreign code.
643 void (*_cgo_thread_start)(void*); 648 void (*_cgo_thread_start)(void*);
644 649
645 typedef struct CgoThreadStart CgoThreadStart; 650 typedef struct CgoThreadStart CgoThreadStart;
646 struct CgoThreadStart 651 struct CgoThreadStart
647 { 652 {
648 M *m;
649 G *g; 653 G *g;
650 uintptr *tls; 654 uintptr *tls;
651 void (*fn)(void); 655 void (*fn)(void);
652 }; 656 };
653 657
654 // Allocate a new m unassociated with any thread. 658 // Allocate a new m unassociated with any thread.
655 // Can use p for allocation context if needed. 659 // Can use p for allocation context if needed.
656 M* 660 M*
657 runtime·allocm(P *p) 661 runtime·allocm(P *p)
658 { 662 {
659 M *mp; 663 M *mp;
660 static Type *mtype; // The Go type M 664 static Type *mtype; // The Go type M
661 665
662 » m->locks++; // disable GC because it can be called from sysmon 666 » g->m->locks++; // disable GC because it can be called from sysmon
663 » if(m->p == nil) 667 » if(g->m->p == nil)
664 acquirep(p); // temporarily borrow p for mallocs in this functi on 668 acquirep(p); // temporarily borrow p for mallocs in this functi on
665 if(mtype == nil) { 669 if(mtype == nil) {
666 Eface e; 670 Eface e;
667 runtime·gc_m_ptr(&e); 671 runtime·gc_m_ptr(&e);
668 mtype = ((PtrType*)e.type)->elem; 672 mtype = ((PtrType*)e.type)->elem;
669 } 673 }
670 674
671 mp = runtime·cnew(mtype); 675 mp = runtime·cnew(mtype);
672 mcommoninit(mp); 676 mcommoninit(mp);
673 677
674 // In case of cgo or Solaris, pthread_create will make us a stack. 678 // In case of cgo or Solaris, pthread_create will make us a stack.
675 // Windows will layout sched stack on OS stack. 679 // Windows will layout sched stack on OS stack.
676 if(runtime·iscgo || Solaris || Windows) 680 if(runtime·iscgo || Solaris || Windows)
677 mp->g0 = runtime·malg(-1); 681 mp->g0 = runtime·malg(-1);
678 else 682 else
679 mp->g0 = runtime·malg(8192); 683 mp->g0 = runtime·malg(8192);
680 684 » mp->g0->m = mp;
681 » if(p == m->p) 685
686 » if(p == g->m->p)
682 releasep(); 687 releasep();
683 » m->locks--; 688 » g->m->locks--;
684 » if(m->locks == 0 && g->preempt) // restore the preemption request in ca se we've cleared it in newstack 689 » if(g->m->locks == 0 && g->preempt) // restore the preemption request in case we've cleared it in newstack
685 g->stackguard0 = StackPreempt; 690 g->stackguard0 = StackPreempt;
686 691
687 return mp; 692 return mp;
688 } 693 }
689 694
690 static G* 695 static G*
691 allocg(void) 696 allocg(void)
692 { 697 {
693 G *gp; 698 G *gp;
694 static Type *gtype; 699 static Type *gtype;
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after
761 // Set needextram when we've just emptied the list, 766 // Set needextram when we've just emptied the list,
762 // so that the eventual call into cgocallbackg will 767 // so that the eventual call into cgocallbackg will
763 // allocate a new m for the extra list. We delay the 768 // allocate a new m for the extra list. We delay the
764 // allocation until then so that it can be done 769 // allocation until then so that it can be done
765 // after exitsyscall makes sure it is okay to be 770 // after exitsyscall makes sure it is okay to be
766 // running at all (that is, there's no garbage collection 771 // running at all (that is, there's no garbage collection
767 // running right now). 772 // running right now).
768 mp->needextram = mp->schedlink == nil; 773 mp->needextram = mp->schedlink == nil;
769 unlockextra(mp->schedlink); 774 unlockextra(mp->schedlink);
770 775
771 » // Install m and g (= m->g0) and set the stack bounds 776 » // Install g (= m->g0) and set the stack bounds
772 // to match the current stack. We don't actually know 777 // to match the current stack. We don't actually know
773 // how big the stack is, like we don't know how big any 778 // how big the stack is, like we don't know how big any
774 // scheduling stack is, but we assume there's at least 32 kB, 779 // scheduling stack is, but we assume there's at least 32 kB,
775 // which is more than enough for us. 780 // which is more than enough for us.
776 » runtime·setmg(mp, mp->g0); 781 » runtime·setg(mp->g0);
777 g->stackbase = (uintptr)(&x + 1024); 782 g->stackbase = (uintptr)(&x + 1024);
778 g->stackguard = (uintptr)(&x - 32*1024); 783 g->stackguard = (uintptr)(&x - 32*1024);
779 g->stackguard0 = g->stackguard; 784 g->stackguard0 = g->stackguard;
780 785
781 // Initialize this thread to use the m. 786 // Initialize this thread to use the m.
782 runtime·asminit(); 787 runtime·asminit();
783 runtime·minit(); 788 runtime·minit();
784 } 789 }
785 790
786 // newextram allocates an m and puts it on the extra list. 791 // newextram allocates an m and puts it on the extra list.
(...skipping 14 matching lines...) Expand all
801 gp = runtime·malg(4096); 806 gp = runtime·malg(4096);
802 gp->sched.pc = (uintptr)runtime·goexit; 807 gp->sched.pc = (uintptr)runtime·goexit;
803 gp->sched.sp = gp->stackbase; 808 gp->sched.sp = gp->stackbase;
804 gp->sched.lr = 0; 809 gp->sched.lr = 0;
805 gp->sched.g = gp; 810 gp->sched.g = gp;
806 gp->syscallpc = gp->sched.pc; 811 gp->syscallpc = gp->sched.pc;
807 gp->syscallsp = gp->sched.sp; 812 gp->syscallsp = gp->sched.sp;
808 gp->syscallstack = gp->stackbase; 813 gp->syscallstack = gp->stackbase;
809 gp->syscallguard = gp->stackguard; 814 gp->syscallguard = gp->stackguard;
810 gp->status = Gsyscall; 815 gp->status = Gsyscall;
816 gp->m = mp;
811 mp->curg = gp; 817 mp->curg = gp;
812 mp->locked = LockInternal; 818 mp->locked = LockInternal;
813 mp->lockedg = gp; 819 mp->lockedg = gp;
814 gp->lockedm = mp; 820 gp->lockedm = mp;
815 gp->goid = runtime·xadd64(&runtime·sched.goidgen, 1); 821 gp->goid = runtime·xadd64(&runtime·sched.goidgen, 1);
816 if(raceenabled) 822 if(raceenabled)
817 gp->racectx = runtime·racegostart(runtime·newextram); 823 gp->racectx = runtime·racegostart(runtime·newextram);
818 // put on allg for garbage collector 824 // put on allg for garbage collector
819 allgadd(gp); 825 allgadd(gp);
820 826
(...skipping 29 matching lines...) Expand all
850 void 856 void
851 runtime·dropm(void) 857 runtime·dropm(void)
852 { 858 {
853 M *mp, *mnext; 859 M *mp, *mnext;
854 860
855 // Undo whatever initialization minit did during needm. 861 // Undo whatever initialization minit did during needm.
856 runtime·unminit(); 862 runtime·unminit();
857 863
858 // Clear m and g, and return m to the extra list. 864 // Clear m and g, and return m to the extra list.
859 // After the call to setmg we can only call nosplit functions. 865 // After the call to setmg we can only call nosplit functions.
860 » mp = m; 866 » mp = g->m;
861 » runtime·setmg(nil, nil); 867 » runtime·setg(nil);
862 868
863 mnext = lockextra(true); 869 mnext = lockextra(true);
864 mp->schedlink = mnext; 870 mp->schedlink = mnext;
865 unlockextra(mp); 871 unlockextra(mp);
866 } 872 }
867 873
868 #define MLOCKED ((M*)1) 874 #define MLOCKED ((M*)1)
869 875
870 // lockextra locks the extra list and returns the list head. 876 // lockextra locks the extra list and returns the list head.
871 // The caller must unlock the list by storing a new list head 877 // The caller must unlock the list by storing a new list head
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
916 922
917 mp = runtime·allocm(p); 923 mp = runtime·allocm(p);
918 mp->nextp = p; 924 mp->nextp = p;
919 mp->mstartfn = fn; 925 mp->mstartfn = fn;
920 926
921 if(runtime·iscgo) { 927 if(runtime·iscgo) {
922 CgoThreadStart ts; 928 CgoThreadStart ts;
923 929
924 if(_cgo_thread_start == nil) 930 if(_cgo_thread_start == nil)
925 runtime·throw("_cgo_thread_start missing"); 931 runtime·throw("_cgo_thread_start missing");
926 ts.m = mp;
927 ts.g = mp->g0; 932 ts.g = mp->g0;
928 ts.tls = mp->tls; 933 ts.tls = mp->tls;
929 ts.fn = runtime·mstart; 934 ts.fn = runtime·mstart;
930 runtime·asmcgocall(_cgo_thread_start, &ts); 935 runtime·asmcgocall(_cgo_thread_start, &ts);
931 return; 936 return;
932 } 937 }
933 runtime·newosproc(mp, (byte*)mp->g0->stackbase); 938 runtime·newosproc(mp, (byte*)mp->g0->stackbase);
934 } 939 }
935 940
936 // Stops execution of the current m until new work is available. 941 // Stops execution of the current m until new work is available.
937 // Returns with acquired P. 942 // Returns with acquired P.
938 static void 943 static void
939 stopm(void) 944 stopm(void)
940 { 945 {
941 » if(m->locks) 946 » if(g->m->locks)
942 runtime·throw("stopm holding locks"); 947 runtime·throw("stopm holding locks");
943 » if(m->p) 948 » if(g->m->p)
944 runtime·throw("stopm holding p"); 949 runtime·throw("stopm holding p");
945 » if(m->spinning) { 950 » if(g->m->spinning) {
946 » » m->spinning = false; 951 » » g->m->spinning = false;
947 runtime·xadd(&runtime·sched.nmspinning, -1); 952 runtime·xadd(&runtime·sched.nmspinning, -1);
948 } 953 }
949 954
950 retry: 955 retry:
951 runtime·lock(&runtime·sched); 956 runtime·lock(&runtime·sched);
952 » mput(m); 957 » mput(g->m);
953 runtime·unlock(&runtime·sched); 958 runtime·unlock(&runtime·sched);
954 » runtime·notesleep(&m->park); 959 » runtime·notesleep(&g->m->park);
955 » runtime·noteclear(&m->park); 960 » runtime·noteclear(&g->m->park);
956 » if(m->helpgc) { 961 » if(g->m->helpgc) {
957 runtime·gchelper(); 962 runtime·gchelper();
958 » » m->helpgc = 0; 963 » » g->m->helpgc = 0;
959 » » m->mcache = nil; 964 » » g->m->mcache = nil;
960 goto retry; 965 goto retry;
961 } 966 }
962 » acquirep(m->nextp); 967 » acquirep(g->m->nextp);
963 » m->nextp = nil; 968 » g->m->nextp = nil;
964 } 969 }
965 970
966 static void 971 static void
967 mspinning(void) 972 mspinning(void)
968 { 973 {
969 » m->spinning = true; 974 » g->m->spinning = true;
970 } 975 }
971 976
972 // Schedules some M to run the p (creates an M if necessary). 977 // Schedules some M to run the p (creates an M if necessary).
973 // If p==nil, tries to get an idle P, if no idle P's does nothing. 978 // If p==nil, tries to get an idle P, if no idle P's does nothing.
974 static void 979 static void
975 startm(P *p, bool spinning) 980 startm(P *p, bool spinning)
976 { 981 {
977 M *mp; 982 M *mp;
978 void (*fn)(void); 983 void (*fn)(void);
979 984
(...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after
1056 startm(nil, true); 1061 startm(nil, true);
1057 } 1062 }
1058 1063
1059 // Stops execution of the current m that is locked to a g until the g is runnabl e again. 1064 // Stops execution of the current m that is locked to a g until the g is runnabl e again.
1060 // Returns with acquired P. 1065 // Returns with acquired P.
1061 static void 1066 static void
1062 stoplockedm(void) 1067 stoplockedm(void)
1063 { 1068 {
1064 P *p; 1069 P *p;
1065 1070
1066 » if(m->lockedg == nil || m->lockedg->lockedm != m) 1071 » if(g->m->lockedg == nil || g->m->lockedg->lockedm != g->m)
1067 runtime·throw("stoplockedm: inconsistent locking"); 1072 runtime·throw("stoplockedm: inconsistent locking");
1068 » if(m->p) { 1073 » if(g->m->p) {
1069 // Schedule another M to run this p. 1074 // Schedule another M to run this p.
1070 p = releasep(); 1075 p = releasep();
1071 handoffp(p); 1076 handoffp(p);
1072 } 1077 }
1073 incidlelocked(1); 1078 incidlelocked(1);
1074 // Wait until another thread schedules lockedg again. 1079 // Wait until another thread schedules lockedg again.
1075 » runtime·notesleep(&m->park); 1080 » runtime·notesleep(&g->m->park);
1076 » runtime·noteclear(&m->park); 1081 » runtime·noteclear(&g->m->park);
1077 » if(m->lockedg->status != Grunnable) 1082 » if(g->m->lockedg->status != Grunnable)
1078 runtime·throw("stoplockedm: not runnable"); 1083 runtime·throw("stoplockedm: not runnable");
1079 » acquirep(m->nextp); 1084 » acquirep(g->m->nextp);
1080 » m->nextp = nil; 1085 » g->m->nextp = nil;
1081 } 1086 }
1082 1087
1083 // Schedules the locked m to run the locked gp. 1088 // Schedules the locked m to run the locked gp.
1084 static void 1089 static void
1085 startlockedm(G *gp) 1090 startlockedm(G *gp)
1086 { 1091 {
1087 M *mp; 1092 M *mp;
1088 P *p; 1093 P *p;
1089 1094
1090 mp = gp->lockedm; 1095 mp = gp->lockedm;
1091 » if(mp == m) 1096 » if(mp == g->m)
1092 runtime·throw("startlockedm: locked to me"); 1097 runtime·throw("startlockedm: locked to me");
1093 if(mp->nextp) 1098 if(mp->nextp)
1094 runtime·throw("startlockedm: m has p"); 1099 runtime·throw("startlockedm: m has p");
1095 // directly handoff current P to the locked m 1100 // directly handoff current P to the locked m
1096 incidlelocked(-1); 1101 incidlelocked(-1);
1097 p = releasep(); 1102 p = releasep();
1098 mp->nextp = p; 1103 mp->nextp = p;
1099 runtime·notewakeup(&mp->park); 1104 runtime·notewakeup(&mp->park);
1100 stopm(); 1105 stopm();
1101 } 1106 }
1102 1107
1103 // Stops the current m for stoptheworld. 1108 // Stops the current m for stoptheworld.
1104 // Returns when the world is restarted. 1109 // Returns when the world is restarted.
1105 static void 1110 static void
1106 gcstopm(void) 1111 gcstopm(void)
1107 { 1112 {
1108 P *p; 1113 P *p;
1109 1114
1110 if(!runtime·sched.gcwaiting) 1115 if(!runtime·sched.gcwaiting)
1111 runtime·throw("gcstopm: not waiting for gc"); 1116 runtime·throw("gcstopm: not waiting for gc");
1112 » if(m->spinning) { 1117 » if(g->m->spinning) {
1113 » » m->spinning = false; 1118 » » g->m->spinning = false;
1114 runtime·xadd(&runtime·sched.nmspinning, -1); 1119 runtime·xadd(&runtime·sched.nmspinning, -1);
1115 } 1120 }
1116 p = releasep(); 1121 p = releasep();
1117 runtime·lock(&runtime·sched); 1122 runtime·lock(&runtime·sched);
1118 p->status = Pgcstop; 1123 p->status = Pgcstop;
1119 if(--runtime·sched.stopwait == 0) 1124 if(--runtime·sched.stopwait == 0)
1120 runtime·notewakeup(&runtime·sched.stopnote); 1125 runtime·notewakeup(&runtime·sched.stopnote);
1121 runtime·unlock(&runtime·sched); 1126 runtime·unlock(&runtime·sched);
1122 stopm(); 1127 stopm();
1123 } 1128 }
1124 1129
1125 // Schedules gp to run on the current M. 1130 // Schedules gp to run on the current M.
1126 // Never returns. 1131 // Never returns.
1127 static void 1132 static void
1128 execute(G *gp) 1133 execute(G *gp)
1129 { 1134 {
1130 int32 hz; 1135 int32 hz;
1131 1136
1132 if(gp->status != Grunnable) { 1137 if(gp->status != Grunnable) {
1133 runtime·printf("execute: bad g status %d\n", gp->status); 1138 runtime·printf("execute: bad g status %d\n", gp->status);
1134 runtime·throw("execute: bad g status"); 1139 runtime·throw("execute: bad g status");
1135 } 1140 }
1136 gp->status = Grunning; 1141 gp->status = Grunning;
1137 gp->waitsince = 0; 1142 gp->waitsince = 0;
1138 gp->preempt = false; 1143 gp->preempt = false;
1139 gp->stackguard0 = gp->stackguard; 1144 gp->stackguard0 = gp->stackguard;
1140 » m->p->schedtick++; 1145 » g->m->p->schedtick++;
1141 » m->curg = gp; 1146 » g->m->curg = gp;
1142 » gp->m = m; 1147 » gp->m = g->m;
1143 1148
1144 // Check whether the profiler needs to be turned on or off. 1149 // Check whether the profiler needs to be turned on or off.
1145 hz = runtime·sched.profilehz; 1150 hz = runtime·sched.profilehz;
1146 » if(m->profilehz != hz) 1151 » if(g->m->profilehz != hz)
1147 runtime·resetcpuprofiler(hz); 1152 runtime·resetcpuprofiler(hz);
1148 1153
1149 runtime·gogo(&gp->sched); 1154 runtime·gogo(&gp->sched);
1150 } 1155 }
1151 1156
1152 // Finds a runnable goroutine to execute. 1157 // Finds a runnable goroutine to execute.
1153 // Tries to steal from other P's, get g from global queue, poll network. 1158 // Tries to steal from other P's, get g from global queue, poll network.
1154 static G* 1159 static G*
1155 findrunnable(void) 1160 findrunnable(void)
1156 { 1161 {
1157 G *gp; 1162 G *gp;
1158 P *p; 1163 P *p;
1159 int32 i; 1164 int32 i;
1160 1165
1161 top: 1166 top:
1162 if(runtime·sched.gcwaiting) { 1167 if(runtime·sched.gcwaiting) {
1163 gcstopm(); 1168 gcstopm();
1164 goto top; 1169 goto top;
1165 } 1170 }
1166 if(runtime·fingwait && runtime·fingwake && (gp = runtime·wakefing()) != nil) 1171 if(runtime·fingwait && runtime·fingwake && (gp = runtime·wakefing()) != nil)
1167 runtime·ready(gp); 1172 runtime·ready(gp);
1168 // local runq 1173 // local runq
1169 » gp = runqget(m->p); 1174 » gp = runqget(g->m->p);
1170 if(gp) 1175 if(gp)
1171 return gp; 1176 return gp;
1172 // global runq 1177 // global runq
1173 if(runtime·sched.runqsize) { 1178 if(runtime·sched.runqsize) {
1174 runtime·lock(&runtime·sched); 1179 runtime·lock(&runtime·sched);
1175 » » gp = globrunqget(m->p, 0); 1180 » » gp = globrunqget(g->m->p, 0);
1176 runtime·unlock(&runtime·sched); 1181 runtime·unlock(&runtime·sched);
1177 if(gp) 1182 if(gp)
1178 return gp; 1183 return gp;
1179 } 1184 }
1180 // poll network 1185 // poll network
1181 gp = runtime·netpoll(false); // non-blocking 1186 gp = runtime·netpoll(false); // non-blocking
1182 if(gp) { 1187 if(gp) {
1183 injectglist(gp->schedlink); 1188 injectglist(gp->schedlink);
1184 gp->status = Grunnable; 1189 gp->status = Grunnable;
1185 return gp; 1190 return gp;
1186 } 1191 }
1187 // If number of spinning M's >= number of busy P's, block. 1192 // If number of spinning M's >= number of busy P's, block.
1188 // This is necessary to prevent excessive CPU consumption 1193 // This is necessary to prevent excessive CPU consumption
1189 // when GOMAXPROCS>>1 but the program parallelism is low. 1194 // when GOMAXPROCS>>1 but the program parallelism is low.
1190 » if(!m->spinning && 2 * runtime·atomicload(&runtime·sched.nmspinning) >= runtime·gomaxprocs - runtime·atomicload(&runtime·sched.npidle)) // TODO: fast a tomic 1195 » if(!g->m->spinning && 2 * runtime·atomicload(&runtime·sched.nmspinning) >= runtime·gomaxprocs - runtime·atomicload(&runtime·sched.npidle)) // TODO: fas t atomic
1191 goto stop; 1196 goto stop;
1192 » if(!m->spinning) { 1197 » if(!g->m->spinning) {
1193 » » m->spinning = true; 1198 » » g->m->spinning = true;
1194 runtime·xadd(&runtime·sched.nmspinning, 1); 1199 runtime·xadd(&runtime·sched.nmspinning, 1);
1195 } 1200 }
1196 // random steal from other P's 1201 // random steal from other P's
1197 for(i = 0; i < 2*runtime·gomaxprocs; i++) { 1202 for(i = 0; i < 2*runtime·gomaxprocs; i++) {
1198 if(runtime·sched.gcwaiting) 1203 if(runtime·sched.gcwaiting)
1199 goto top; 1204 goto top;
1200 p = runtime·allp[runtime·fastrand1()%runtime·gomaxprocs]; 1205 p = runtime·allp[runtime·fastrand1()%runtime·gomaxprocs];
1201 » » if(p == m->p) 1206 » » if(p == g->m->p)
1202 gp = runqget(p); 1207 gp = runqget(p);
1203 else 1208 else
1204 » » » gp = runqsteal(m->p, p); 1209 » » » gp = runqsteal(g->m->p, p);
1205 if(gp) 1210 if(gp)
1206 return gp; 1211 return gp;
1207 } 1212 }
1208 stop: 1213 stop:
1209 // return P and block 1214 // return P and block
1210 runtime·lock(&runtime·sched); 1215 runtime·lock(&runtime·sched);
1211 if(runtime·sched.gcwaiting) { 1216 if(runtime·sched.gcwaiting) {
1212 runtime·unlock(&runtime·sched); 1217 runtime·unlock(&runtime·sched);
1213 goto top; 1218 goto top;
1214 } 1219 }
1215 if(runtime·sched.runqsize) { 1220 if(runtime·sched.runqsize) {
1216 » » gp = globrunqget(m->p, 0); 1221 » » gp = globrunqget(g->m->p, 0);
1217 runtime·unlock(&runtime·sched); 1222 runtime·unlock(&runtime·sched);
1218 return gp; 1223 return gp;
1219 } 1224 }
1220 p = releasep(); 1225 p = releasep();
1221 pidleput(p); 1226 pidleput(p);
1222 runtime·unlock(&runtime·sched); 1227 runtime·unlock(&runtime·sched);
1223 » if(m->spinning) { 1228 » if(g->m->spinning) {
1224 » » m->spinning = false; 1229 » » g->m->spinning = false;
1225 runtime·xadd(&runtime·sched.nmspinning, -1); 1230 runtime·xadd(&runtime·sched.nmspinning, -1);
1226 } 1231 }
1227 // check all runqueues once again 1232 // check all runqueues once again
1228 for(i = 0; i < runtime·gomaxprocs; i++) { 1233 for(i = 0; i < runtime·gomaxprocs; i++) {
1229 p = runtime·allp[i]; 1234 p = runtime·allp[i];
1230 if(p && p->runqhead != p->runqtail) { 1235 if(p && p->runqhead != p->runqtail) {
1231 runtime·lock(&runtime·sched); 1236 runtime·lock(&runtime·sched);
1232 p = pidleget(); 1237 p = pidleget();
1233 runtime·unlock(&runtime·sched); 1238 runtime·unlock(&runtime·sched);
1234 if(p) { 1239 if(p) {
1235 acquirep(p); 1240 acquirep(p);
1236 goto top; 1241 goto top;
1237 } 1242 }
1238 break; 1243 break;
1239 } 1244 }
1240 } 1245 }
1241 // poll network 1246 // poll network
1242 if(runtime·xchg64(&runtime·sched.lastpoll, 0) != 0) { 1247 if(runtime·xchg64(&runtime·sched.lastpoll, 0) != 0) {
1243 » » if(m->p) 1248 » » if(g->m->p)
1244 runtime·throw("findrunnable: netpoll with p"); 1249 runtime·throw("findrunnable: netpoll with p");
1245 » » if(m->spinning) 1250 » » if(g->m->spinning)
1246 runtime·throw("findrunnable: netpoll with spinning"); 1251 runtime·throw("findrunnable: netpoll with spinning");
1247 gp = runtime·netpoll(true); // block until new work is availabl e 1252 gp = runtime·netpoll(true); // block until new work is availabl e
1248 runtime·atomicstore64(&runtime·sched.lastpoll, runtime·nanotime( )); 1253 runtime·atomicstore64(&runtime·sched.lastpoll, runtime·nanotime( ));
1249 if(gp) { 1254 if(gp) {
1250 runtime·lock(&runtime·sched); 1255 runtime·lock(&runtime·sched);
1251 p = pidleget(); 1256 p = pidleget();
1252 runtime·unlock(&runtime·sched); 1257 runtime·unlock(&runtime·sched);
1253 if(p) { 1258 if(p) {
1254 acquirep(p); 1259 acquirep(p);
1255 injectglist(gp->schedlink); 1260 injectglist(gp->schedlink);
1256 gp->status = Grunnable; 1261 gp->status = Grunnable;
1257 return gp; 1262 return gp;
1258 } 1263 }
1259 injectglist(gp); 1264 injectglist(gp);
1260 } 1265 }
1261 } 1266 }
1262 stopm(); 1267 stopm();
1263 goto top; 1268 goto top;
1264 } 1269 }
1265 1270
1266 static void 1271 static void
1267 resetspinning(void) 1272 resetspinning(void)
1268 { 1273 {
1269 int32 nmspinning; 1274 int32 nmspinning;
1270 1275
1271 » if(m->spinning) { 1276 » if(g->m->spinning) {
1272 » » m->spinning = false; 1277 » » g->m->spinning = false;
1273 nmspinning = runtime·xadd(&runtime·sched.nmspinning, -1); 1278 nmspinning = runtime·xadd(&runtime·sched.nmspinning, -1);
1274 if(nmspinning < 0) 1279 if(nmspinning < 0)
1275 runtime·throw("findrunnable: negative nmspinning"); 1280 runtime·throw("findrunnable: negative nmspinning");
1276 } else 1281 } else
1277 nmspinning = runtime·atomicload(&runtime·sched.nmspinning); 1282 nmspinning = runtime·atomicload(&runtime·sched.nmspinning);
1278 1283
1279 // M wakeup policy is deliberately somewhat conservative (see nmspinning handling), 1284 // M wakeup policy is deliberately somewhat conservative (see nmspinning handling),
1280 // so see if we need to wakeup another P here. 1285 // so see if we need to wakeup another P here.
1281 if (nmspinning == 0 && runtime·atomicload(&runtime·sched.npidle) > 0) 1286 if (nmspinning == 0 && runtime·atomicload(&runtime·sched.npidle) > 0)
1282 wakep(); 1287 wakep();
(...skipping 23 matching lines...) Expand all
1306 } 1311 }
1307 1312
1308 // One round of scheduler: find a runnable goroutine and execute it. 1313 // One round of scheduler: find a runnable goroutine and execute it.
1309 // Never returns. 1314 // Never returns.
1310 static void 1315 static void
1311 schedule(void) 1316 schedule(void)
1312 { 1317 {
1313 G *gp; 1318 G *gp;
1314 uint32 tick; 1319 uint32 tick;
1315 1320
1316 » if(m->locks) 1321 » if(g->m->locks)
1317 runtime·throw("schedule: holding locks"); 1322 runtime·throw("schedule: holding locks");
1318 1323
1319 top: 1324 top:
1320 if(runtime·sched.gcwaiting) { 1325 if(runtime·sched.gcwaiting) {
1321 gcstopm(); 1326 gcstopm();
1322 goto top; 1327 goto top;
1323 } 1328 }
1324 1329
1325 gp = nil; 1330 gp = nil;
1326 // Check the global runnable queue once in a while to ensure fairness. 1331 // Check the global runnable queue once in a while to ensure fairness.
1327 // Otherwise two goroutines can completely occupy the local runqueue 1332 // Otherwise two goroutines can completely occupy the local runqueue
1328 // by constantly respawning each other. 1333 // by constantly respawning each other.
1329 » tick = m->p->schedtick; 1334 » tick = g->m->p->schedtick;
1330 // This is a fancy way to say tick%61==0, 1335 // This is a fancy way to say tick%61==0,
1331 // it uses 2 MUL instructions instead of a single DIV and so is faster o n modern processors. 1336 // it uses 2 MUL instructions instead of a single DIV and so is faster o n modern processors.
1332 if(tick - (((uint64)tick*0x4325c53fu)>>36)*61 == 0 && runtime·sched.runq size > 0) { 1337 if(tick - (((uint64)tick*0x4325c53fu)>>36)*61 == 0 && runtime·sched.runq size > 0) {
1333 runtime·lock(&runtime·sched); 1338 runtime·lock(&runtime·sched);
1334 » » gp = globrunqget(m->p, 1); 1339 » » gp = globrunqget(g->m->p, 1);
1335 runtime·unlock(&runtime·sched); 1340 runtime·unlock(&runtime·sched);
1336 if(gp) 1341 if(gp)
1337 resetspinning(); 1342 resetspinning();
1338 } 1343 }
1339 if(gp == nil) { 1344 if(gp == nil) {
1340 » » gp = runqget(m->p); 1345 » » gp = runqget(g->m->p);
1341 » » if(gp && m->spinning) 1346 » » if(gp && g->m->spinning)
1342 runtime·throw("schedule: spinning with local work"); 1347 runtime·throw("schedule: spinning with local work");
1343 } 1348 }
1344 if(gp == nil) { 1349 if(gp == nil) {
1345 gp = findrunnable(); // blocks until work is available 1350 gp = findrunnable(); // blocks until work is available
1346 resetspinning(); 1351 resetspinning();
1347 } 1352 }
1348 1353
1349 if(gp->lockedm) { 1354 if(gp->lockedm) {
1350 // Hands off own p to the locked m, 1355 // Hands off own p to the locked m,
1351 // then blocks waiting for a new p. 1356 // then blocks waiting for a new p.
1352 startlockedm(gp); 1357 startlockedm(gp);
1353 goto top; 1358 goto top;
1354 } 1359 }
1355 1360
1356 execute(gp); 1361 execute(gp);
1357 } 1362 }
1358 1363
1359 // Puts the current goroutine into a waiting state and calls unlockf. 1364 // Puts the current goroutine into a waiting state and calls unlockf.
1360 // If unlockf returns false, the goroutine is resumed. 1365 // If unlockf returns false, the goroutine is resumed.
1361 void 1366 void
1362 runtime·park(bool(*unlockf)(G*, void*), void *lock, int8 *reason) 1367 runtime·park(bool(*unlockf)(G*, void*), void *lock, int8 *reason)
1363 { 1368 {
1364 if(g->status != Grunning) 1369 if(g->status != Grunning)
1365 runtime·throw("bad g status"); 1370 runtime·throw("bad g status");
1366 » m->waitlock = lock; 1371 » g->m->waitlock = lock;
1367 » m->waitunlockf = unlockf; 1372 » g->m->waitunlockf = unlockf;
1368 g->waitreason = reason; 1373 g->waitreason = reason;
1369 runtime·mcall(park0); 1374 runtime·mcall(park0);
1370 } 1375 }
1371 1376
1372 static bool 1377 static bool
1373 parkunlock(G *gp, void *lock) 1378 parkunlock(G *gp, void *lock)
1374 { 1379 {
1375 USED(gp); 1380 USED(gp);
1376 runtime·unlock(lock); 1381 runtime·unlock(lock);
1377 return true; 1382 return true;
1378 } 1383 }
1379 1384
1380 // Puts the current goroutine into a waiting state and unlocks the lock. 1385 // Puts the current goroutine into a waiting state and unlocks the lock.
1381 // The goroutine can be made runnable again by calling runtime·ready(gp). 1386 // The goroutine can be made runnable again by calling runtime·ready(gp).
1382 void 1387 void
1383 runtime·parkunlock(Lock *lock, int8 *reason) 1388 runtime·parkunlock(Lock *lock, int8 *reason)
1384 { 1389 {
1385 runtime·park(parkunlock, lock, reason); 1390 runtime·park(parkunlock, lock, reason);
1386 } 1391 }
1387 1392
1388 // runtime·park continuation on g0. 1393 // runtime·park continuation on g0.
1389 static void 1394 static void
1390 park0(G *gp) 1395 park0(G *gp)
1391 { 1396 {
1392 bool ok; 1397 bool ok;
1393 1398
1394 gp->status = Gwaiting; 1399 gp->status = Gwaiting;
1395 gp->m = nil; 1400 gp->m = nil;
1396 » m->curg = nil; 1401 » g->m->curg = nil;
1397 » if(m->waitunlockf) { 1402 » if(g->m->waitunlockf) {
1398 » » ok = m->waitunlockf(gp, m->waitlock); 1403 » » ok = g->m->waitunlockf(gp, g->m->waitlock);
1399 » » m->waitunlockf = nil; 1404 » » g->m->waitunlockf = nil;
1400 » » m->waitlock = nil; 1405 » » g->m->waitlock = nil;
1401 if(!ok) { 1406 if(!ok) {
1402 gp->status = Grunnable; 1407 gp->status = Grunnable;
1403 execute(gp); // Schedule it back, never returns. 1408 execute(gp); // Schedule it back, never returns.
1404 } 1409 }
1405 } 1410 }
1406 » if(m->lockedg) { 1411 » if(g->m->lockedg) {
1407 stoplockedm(); 1412 stoplockedm();
1408 execute(gp); // Never returns. 1413 execute(gp); // Never returns.
1409 } 1414 }
1410 schedule(); 1415 schedule();
1411 } 1416 }
1412 1417
1413 // Scheduler yield. 1418 // Scheduler yield.
1414 void 1419 void
1415 runtime·gosched(void) 1420 runtime·gosched(void)
1416 { 1421 {
1417 if(g->status != Grunning) 1422 if(g->status != Grunning)
1418 runtime·throw("bad g status"); 1423 runtime·throw("bad g status");
1419 runtime·mcall(runtime·gosched0); 1424 runtime·mcall(runtime·gosched0);
1420 } 1425 }
1421 1426
1422 // runtime·gosched continuation on g0. 1427 // runtime·gosched continuation on g0.
1423 void 1428 void
1424 runtime·gosched0(G *gp) 1429 runtime·gosched0(G *gp)
1425 { 1430 {
1426 gp->status = Grunnable; 1431 gp->status = Grunnable;
1427 gp->m = nil; 1432 gp->m = nil;
1428 » m->curg = nil; 1433 » g->m->curg = nil;
1429 runtime·lock(&runtime·sched); 1434 runtime·lock(&runtime·sched);
1430 globrunqput(gp); 1435 globrunqput(gp);
1431 runtime·unlock(&runtime·sched); 1436 runtime·unlock(&runtime·sched);
1432 » if(m->lockedg) { 1437 » if(g->m->lockedg) {
1433 stoplockedm(); 1438 stoplockedm();
1434 execute(gp); // Never returns. 1439 execute(gp); // Never returns.
1435 } 1440 }
1436 schedule(); 1441 schedule();
1437 } 1442 }
1438 1443
1439 // Finishes execution of the current goroutine. 1444 // Finishes execution of the current goroutine.
1440 // Need to mark it as nosplit, because it runs with sp > stackbase (as runtime·l essstack). 1445 // Need to mark it as nosplit, because it runs with sp > stackbase (as runtime·l essstack).
1441 // Since it does not return it does not matter. But if it is preempted 1446 // Since it does not return it does not matter. But if it is preempted
1442 // at the split stack check, GC will complain about inconsistent sp. 1447 // at the split stack check, GC will complain about inconsistent sp.
(...skipping 15 matching lines...) Expand all
1458 gp->status = Gdead; 1463 gp->status = Gdead;
1459 gp->m = nil; 1464 gp->m = nil;
1460 gp->lockedm = nil; 1465 gp->lockedm = nil;
1461 gp->paniconfault = 0; 1466 gp->paniconfault = 0;
1462 gp->defer = nil; // should be true already but just in case. 1467 gp->defer = nil; // should be true already but just in case.
1463 gp->panic = nil; // non-nil for Goexit during panic. points at stack-all ocated data. 1468 gp->panic = nil; // non-nil for Goexit during panic. points at stack-all ocated data.
1464 gp->writenbuf = 0; 1469 gp->writenbuf = 0;
1465 gp->writebuf = nil; 1470 gp->writebuf = nil;
1466 gp->waitreason = nil; 1471 gp->waitreason = nil;
1467 gp->param = nil; 1472 gp->param = nil;
1468 » m->curg = nil; 1473 » g->m->curg = nil;
1469 » m->lockedg = nil; 1474 » g->m->lockedg = nil;
1470 » if(m->locked & ~LockExternal) { 1475 » if(g->m->locked & ~LockExternal) {
1471 » » runtime·printf("invalid m->locked = %d\n", m->locked); 1476 » » runtime·printf("invalid m->locked = %d\n", g->m->locked);
1472 runtime·throw("internal lockOSThread error"); 1477 runtime·throw("internal lockOSThread error");
1473 }······· 1478 }·······
1474 » m->locked = 0; 1479 » g->m->locked = 0;
1475 runtime·unwindstack(gp, nil); 1480 runtime·unwindstack(gp, nil);
1476 » gfput(m->p, gp); 1481 » gfput(g->m->p, gp);
1477 schedule(); 1482 schedule();
1478 } 1483 }
1479 1484
1480 #pragma textflag NOSPLIT 1485 #pragma textflag NOSPLIT
1481 static void 1486 static void
1482 save(void *pc, uintptr sp) 1487 save(void *pc, uintptr sp)
1483 { 1488 {
1484 g->sched.pc = (uintptr)pc; 1489 g->sched.pc = (uintptr)pc;
1485 g->sched.sp = sp; 1490 g->sched.sp = sp;
1486 g->sched.lr = 0; 1491 g->sched.lr = 0;
1487 g->sched.ret = 0; 1492 g->sched.ret = 0;
1488 g->sched.ctxt = 0; 1493 g->sched.ctxt = 0;
1489 g->sched.g = g; 1494 g->sched.g = g;
1490 } 1495 }
1491 1496
1492 // The goroutine g is about to enter a system call. 1497 // The goroutine g is about to enter a system call.
1493 // Record that it's not using the cpu anymore. 1498 // Record that it's not using the cpu anymore.
1494 // This is called only from the go syscall library and cgocall, 1499 // This is called only from the go syscall library and cgocall,
1495 // not from the low-level system calls used by the runtime. 1500 // not from the low-level system calls used by the runtime.
1496 // 1501 //
1497 // Entersyscall cannot split the stack: the runtime·gosave must 1502 // Entersyscall cannot split the stack: the runtime·gosave must
1498 // make g->sched refer to the caller's stack segment, because 1503 // make g->sched refer to the caller's stack segment, because
1499 // entersyscall is going to return immediately after. 1504 // entersyscall is going to return immediately after.
1500 #pragma textflag NOSPLIT 1505 #pragma textflag NOSPLIT
1501 void 1506 void
1502 ·entersyscall(int32 dummy) 1507 ·entersyscall(int32 dummy)
1503 { 1508 {
1504 // Disable preemption because during this function g is in Gsyscall stat us, 1509 // Disable preemption because during this function g is in Gsyscall stat us,
1505 // but can have inconsistent g->sched, do not let GC observe it. 1510 // but can have inconsistent g->sched, do not let GC observe it.
1506 » m->locks++; 1511 » g->m->locks++;
1507 1512
1508 // Leave SP around for GC and traceback. 1513 // Leave SP around for GC and traceback.
1509 save(runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy)); 1514 save(runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy));
1510 g->syscallsp = g->sched.sp; 1515 g->syscallsp = g->sched.sp;
1511 g->syscallpc = g->sched.pc; 1516 g->syscallpc = g->sched.pc;
1512 g->syscallstack = g->stackbase; 1517 g->syscallstack = g->stackbase;
1513 g->syscallguard = g->stackguard; 1518 g->syscallguard = g->stackguard;
1514 g->status = Gsyscall; 1519 g->status = Gsyscall;
1515 if(g->syscallsp < g->syscallguard-StackGuard || g->syscallstack < g->sys callsp) { 1520 if(g->syscallsp < g->syscallguard-StackGuard || g->syscallstack < g->sys callsp) {
1516 // runtime·printf("entersyscall inconsistent %p [%p,%p]\n", 1521 // runtime·printf("entersyscall inconsistent %p [%p,%p]\n",
1517 // g->syscallsp, g->syscallguard-StackGuard, g->syscallstac k); 1522 // g->syscallsp, g->syscallguard-StackGuard, g->syscallstac k);
1518 runtime·throw("entersyscall"); 1523 runtime·throw("entersyscall");
1519 } 1524 }
1520 1525
1521 if(runtime·atomicload(&runtime·sched.sysmonwait)) { // TODO: fast atomi c 1526 if(runtime·atomicload(&runtime·sched.sysmonwait)) { // TODO: fast atomi c
1522 runtime·lock(&runtime·sched); 1527 runtime·lock(&runtime·sched);
1523 if(runtime·atomicload(&runtime·sched.sysmonwait)) { 1528 if(runtime·atomicload(&runtime·sched.sysmonwait)) {
1524 runtime·atomicstore(&runtime·sched.sysmonwait, 0); 1529 runtime·atomicstore(&runtime·sched.sysmonwait, 0);
1525 runtime·notewakeup(&runtime·sched.sysmonnote); 1530 runtime·notewakeup(&runtime·sched.sysmonnote);
1526 } 1531 }
1527 runtime·unlock(&runtime·sched); 1532 runtime·unlock(&runtime·sched);
1528 save(runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy)); 1533 save(runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy));
1529 } 1534 }
1530 1535
1531 » m->mcache = nil; 1536 » g->m->mcache = nil;
1532 » m->p->m = nil; 1537 » g->m->p->m = nil;
1533 » runtime·atomicstore(&m->p->status, Psyscall); 1538 » runtime·atomicstore(&g->m->p->status, Psyscall);
1534 if(runtime·sched.gcwaiting) { 1539 if(runtime·sched.gcwaiting) {
1535 runtime·lock(&runtime·sched); 1540 runtime·lock(&runtime·sched);
1536 » » if (runtime·sched.stopwait > 0 && runtime·cas(&m->p->status, Psy scall, Pgcstop)) { 1541 » » if (runtime·sched.stopwait > 0 && runtime·cas(&g->m->p->status, Psyscall, Pgcstop)) {
1537 if(--runtime·sched.stopwait == 0) 1542 if(--runtime·sched.stopwait == 0)
1538 runtime·notewakeup(&runtime·sched.stopnote); 1543 runtime·notewakeup(&runtime·sched.stopnote);
1539 } 1544 }
1540 runtime·unlock(&runtime·sched); 1545 runtime·unlock(&runtime·sched);
1541 save(runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy)); 1546 save(runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy));
1542 } 1547 }
1543 1548
1544 // Goroutines must not split stacks in Gsyscall status (it would corrupt g->sched). 1549 // Goroutines must not split stacks in Gsyscall status (it would corrupt g->sched).
1545 // We set stackguard to StackPreempt so that first split stack check cal ls morestack. 1550 // We set stackguard to StackPreempt so that first split stack check cal ls morestack.
1546 // Morestack detects this case and throws. 1551 // Morestack detects this case and throws.
1547 g->stackguard0 = StackPreempt; 1552 g->stackguard0 = StackPreempt;
1548 » m->locks--; 1553 » g->m->locks--;
1549 } 1554 }
1550 1555
1551 // The same as runtime·entersyscall(), but with a hint that the syscall is block ing. 1556 // The same as runtime·entersyscall(), but with a hint that the syscall is block ing.
1552 #pragma textflag NOSPLIT 1557 #pragma textflag NOSPLIT
1553 void 1558 void
1554 ·entersyscallblock(int32 dummy) 1559 ·entersyscallblock(int32 dummy)
1555 { 1560 {
1556 P *p; 1561 P *p;
1557 1562
1558 » m->locks++; // see comment in entersyscall 1563 » g->m->locks++; // see comment in entersyscall
1559 1564
1560 // Leave SP around for GC and traceback. 1565 // Leave SP around for GC and traceback.
1561 save(runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy)); 1566 save(runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy));
1562 g->syscallsp = g->sched.sp; 1567 g->syscallsp = g->sched.sp;
1563 g->syscallpc = g->sched.pc; 1568 g->syscallpc = g->sched.pc;
1564 g->syscallstack = g->stackbase; 1569 g->syscallstack = g->stackbase;
1565 g->syscallguard = g->stackguard; 1570 g->syscallguard = g->stackguard;
1566 g->status = Gsyscall; 1571 g->status = Gsyscall;
1567 if(g->syscallsp < g->syscallguard-StackGuard || g->syscallstack < g->sys callsp) { 1572 if(g->syscallsp < g->syscallguard-StackGuard || g->syscallstack < g->sys callsp) {
1568 // runtime·printf("entersyscall inconsistent %p [%p,%p]\n", 1573 // runtime·printf("entersyscall inconsistent %p [%p,%p]\n",
1569 // g->syscallsp, g->syscallguard-StackGuard, g->syscallstac k); 1574 // g->syscallsp, g->syscallguard-StackGuard, g->syscallstac k);
1570 runtime·throw("entersyscallblock"); 1575 runtime·throw("entersyscallblock");
1571 } 1576 }
1572 1577
1573 p = releasep(); 1578 p = releasep();
1574 handoffp(p); 1579 handoffp(p);
1575 if(g->isbackground) // do not consider blocked scavenger for deadlock d etection 1580 if(g->isbackground) // do not consider blocked scavenger for deadlock d etection
1576 incidlelocked(1); 1581 incidlelocked(1);
1577 1582
1578 // Resave for traceback during blocked call. 1583 // Resave for traceback during blocked call.
1579 save(runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy)); 1584 save(runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy));
1580 1585
1581 g->stackguard0 = StackPreempt; // see comment in entersyscall 1586 g->stackguard0 = StackPreempt; // see comment in entersyscall
1582 » m->locks--; 1587 » g->m->locks--;
1583 } 1588 }
1584 1589
1585 // The goroutine g exited its system call. 1590 // The goroutine g exited its system call.
1586 // Arrange for it to run on a cpu again. 1591 // Arrange for it to run on a cpu again.
1587 // This is called only from the go syscall library, not 1592 // This is called only from the go syscall library, not
1588 // from the low-level system calls used by the runtime. 1593 // from the low-level system calls used by the runtime.
1589 #pragma textflag NOSPLIT 1594 #pragma textflag NOSPLIT
1590 void 1595 void
1591 runtime·exitsyscall(void) 1596 runtime·exitsyscall(void)
1592 { 1597 {
1593 » m->locks++; // see comment in entersyscall 1598 » g->m->locks++; // see comment in entersyscall
1594 1599
1595 if(g->isbackground) // do not consider blocked scavenger for deadlock d etection 1600 if(g->isbackground) // do not consider blocked scavenger for deadlock d etection
1596 incidlelocked(-1); 1601 incidlelocked(-1);
1597 1602
1598 g->waitsince = 0; 1603 g->waitsince = 0;
1599 if(exitsyscallfast()) { 1604 if(exitsyscallfast()) {
1600 // There's a cpu for us, so we can run. 1605 // There's a cpu for us, so we can run.
1601 » » m->p->syscalltick++; 1606 » » g->m->p->syscalltick++;
1602 g->status = Grunning; 1607 g->status = Grunning;
1603 // Garbage collector isn't running (since we are), 1608 // Garbage collector isn't running (since we are),
1604 // so okay to clear gcstack and gcsp. 1609 // so okay to clear gcstack and gcsp.
1605 g->syscallstack = (uintptr)nil; 1610 g->syscallstack = (uintptr)nil;
1606 g->syscallsp = (uintptr)nil; 1611 g->syscallsp = (uintptr)nil;
1607 » » m->locks--; 1612 » » g->m->locks--;
1608 if(g->preempt) { 1613 if(g->preempt) {
1609 // restore the preemption request in case we've cleared it in newstack 1614 // restore the preemption request in case we've cleared it in newstack
1610 g->stackguard0 = StackPreempt; 1615 g->stackguard0 = StackPreempt;
1611 } else { 1616 } else {
1612 // otherwise restore the real stackguard, we've spoiled it in entersyscall/entersyscallblock 1617 // otherwise restore the real stackguard, we've spoiled it in entersyscall/entersyscallblock
1613 g->stackguard0 = g->stackguard; 1618 g->stackguard0 = g->stackguard;
1614 } 1619 }
1615 return; 1620 return;
1616 } 1621 }
1617 1622
1618 » m->locks--; 1623 » g->m->locks--;
1619 1624
1620 // Call the scheduler. 1625 // Call the scheduler.
1621 runtime·mcall(exitsyscall0); 1626 runtime·mcall(exitsyscall0);
1622 1627
1623 // Scheduler returned, so we're allowed to run now. 1628 // Scheduler returned, so we're allowed to run now.
1624 // Delete the gcstack information that we left for 1629 // Delete the gcstack information that we left for
1625 // the garbage collector during the system call. 1630 // the garbage collector during the system call.
1626 // Must wait until now because until gosched returns 1631 // Must wait until now because until gosched returns
1627 // we don't know for sure that the garbage collector 1632 // we don't know for sure that the garbage collector
1628 // is not running. 1633 // is not running.
1629 g->syscallstack = (uintptr)nil; 1634 g->syscallstack = (uintptr)nil;
1630 g->syscallsp = (uintptr)nil; 1635 g->syscallsp = (uintptr)nil;
1631 » m->p->syscalltick++; 1636 » g->m->p->syscalltick++;
1632 } 1637 }
1633 1638
1634 #pragma textflag NOSPLIT 1639 #pragma textflag NOSPLIT
1635 static bool 1640 static bool
1636 exitsyscallfast(void) 1641 exitsyscallfast(void)
1637 { 1642 {
1638 P *p; 1643 P *p;
1639 1644
1640 // Freezetheworld sets stopwait but does not retake P's. 1645 // Freezetheworld sets stopwait but does not retake P's.
1641 if(runtime·sched.stopwait) { 1646 if(runtime·sched.stopwait) {
1642 » » m->p = nil; 1647 » » g->m->p = nil;
1643 return false; 1648 return false;
1644 } 1649 }
1645 1650
1646 // Try to re-acquire the last P. 1651 // Try to re-acquire the last P.
1647 » if(m->p && m->p->status == Psyscall && runtime·cas(&m->p->status, Psysca ll, Prunning)) { 1652 » if(g->m->p && g->m->p->status == Psyscall && runtime·cas(&g->m->p->statu s, Psyscall, Prunning)) {
1648 // There's a cpu for us, so we can run. 1653 // There's a cpu for us, so we can run.
1649 » » m->mcache = m->p->mcache; 1654 » » g->m->mcache = g->m->p->mcache;
1650 » » m->p->m = m; 1655 » » g->m->p->m = g->m;
1651 return true; 1656 return true;
1652 } 1657 }
1653 // Try to get any other idle P. 1658 // Try to get any other idle P.
1654 » m->p = nil; 1659 » g->m->p = nil;
1655 if(runtime·sched.pidle) { 1660 if(runtime·sched.pidle) {
1656 runtime·lock(&runtime·sched); 1661 runtime·lock(&runtime·sched);
1657 p = pidleget(); 1662 p = pidleget();
1658 if(p && runtime·atomicload(&runtime·sched.sysmonwait)) { 1663 if(p && runtime·atomicload(&runtime·sched.sysmonwait)) {
1659 runtime·atomicstore(&runtime·sched.sysmonwait, 0); 1664 runtime·atomicstore(&runtime·sched.sysmonwait, 0);
1660 runtime·notewakeup(&runtime·sched.sysmonnote); 1665 runtime·notewakeup(&runtime·sched.sysmonnote);
1661 } 1666 }
1662 runtime·unlock(&runtime·sched); 1667 runtime·unlock(&runtime·sched);
1663 if(p) { 1668 if(p) {
1664 acquirep(p); 1669 acquirep(p);
1665 return true; 1670 return true;
1666 } 1671 }
1667 } 1672 }
1668 return false; 1673 return false;
1669 } 1674 }
1670 1675
1671 // runtime·exitsyscall slow path on g0. 1676 // runtime·exitsyscall slow path on g0.
1672 // Failed to acquire P, enqueue gp as runnable. 1677 // Failed to acquire P, enqueue gp as runnable.
1673 static void 1678 static void
1674 exitsyscall0(G *gp) 1679 exitsyscall0(G *gp)
1675 { 1680 {
1676 P *p; 1681 P *p;
1677 1682
1678 gp->status = Grunnable; 1683 gp->status = Grunnable;
1679 gp->m = nil; 1684 gp->m = nil;
1680 » m->curg = nil; 1685 » g->m->curg = nil;
1681 runtime·lock(&runtime·sched); 1686 runtime·lock(&runtime·sched);
1682 p = pidleget(); 1687 p = pidleget();
1683 if(p == nil) 1688 if(p == nil)
1684 globrunqput(gp); 1689 globrunqput(gp);
1685 else if(runtime·atomicload(&runtime·sched.sysmonwait)) { 1690 else if(runtime·atomicload(&runtime·sched.sysmonwait)) {
1686 runtime·atomicstore(&runtime·sched.sysmonwait, 0); 1691 runtime·atomicstore(&runtime·sched.sysmonwait, 0);
1687 runtime·notewakeup(&runtime·sched.sysmonnote); 1692 runtime·notewakeup(&runtime·sched.sysmonnote);
1688 } 1693 }
1689 runtime·unlock(&runtime·sched); 1694 runtime·unlock(&runtime·sched);
1690 if(p) { 1695 if(p) {
1691 acquirep(p); 1696 acquirep(p);
1692 execute(gp); // Never returns. 1697 execute(gp); // Never returns.
1693 } 1698 }
1694 » if(m->lockedg) { 1699 » if(g->m->lockedg) {
1695 // Wait until another thread schedules gp and so m again. 1700 // Wait until another thread schedules gp and so m again.
1696 stoplockedm(); 1701 stoplockedm();
1697 execute(gp); // Never returns. 1702 execute(gp); // Never returns.
1698 } 1703 }
1699 stopm(); 1704 stopm();
1700 schedule(); // Never returns. 1705 schedule(); // Never returns.
1701 } 1706 }
1702 1707
1703 // Called from syscall package before fork. 1708 // Called from syscall package before fork.
1704 #pragma textflag NOSPLIT 1709 #pragma textflag NOSPLIT
1705 void 1710 void
1706 syscall·runtime_BeforeFork(void) 1711 syscall·runtime_BeforeFork(void)
1707 { 1712 {
1708 // Fork can hang if preempted with signals frequently enough (see issue 5517). 1713 // Fork can hang if preempted with signals frequently enough (see issue 5517).
1709 // Ensure that we stay on the same M where we disable profiling. 1714 // Ensure that we stay on the same M where we disable profiling.
1710 » m->locks++; 1715 » g->m->locks++;
1711 » if(m->profilehz != 0) 1716 » if(g->m->profilehz != 0)
1712 runtime·resetcpuprofiler(0); 1717 runtime·resetcpuprofiler(0);
1713 1718
1714 // This function is called before fork in syscall package. 1719 // This function is called before fork in syscall package.
1715 // Code between fork and exec must not allocate memory nor even try to g row stack. 1720 // Code between fork and exec must not allocate memory nor even try to g row stack.
1716 // Here we spoil g->stackguard to reliably detect any attempts to grow s tack. 1721 // Here we spoil g->stackguard to reliably detect any attempts to grow s tack.
1717 // runtime_AfterFork will undo this in parent process, but not in child. 1722 // runtime_AfterFork will undo this in parent process, but not in child.
1718 » m->forkstackguard = g->stackguard; 1723 » g->m->forkstackguard = g->stackguard;
1719 g->stackguard0 = StackPreempt-1; 1724 g->stackguard0 = StackPreempt-1;
1720 g->stackguard = StackPreempt-1; 1725 g->stackguard = StackPreempt-1;
1721 } 1726 }
1722 1727
1723 // Called from syscall package after fork in parent. 1728 // Called from syscall package after fork in parent.
1724 #pragma textflag NOSPLIT 1729 #pragma textflag NOSPLIT
1725 void 1730 void
1726 syscall·runtime_AfterFork(void) 1731 syscall·runtime_AfterFork(void)
1727 { 1732 {
1728 int32 hz; 1733 int32 hz;
1729 1734
1730 // See the comment in runtime_BeforeFork. 1735 // See the comment in runtime_BeforeFork.
1731 » g->stackguard0 = m->forkstackguard; 1736 » g->stackguard0 = g->m->forkstackguard;
1732 » g->stackguard = m->forkstackguard; 1737 » g->stackguard = g->m->forkstackguard;
1733 » m->forkstackguard = 0; 1738 » g->m->forkstackguard = 0;
1734 1739
1735 hz = runtime·sched.profilehz; 1740 hz = runtime·sched.profilehz;
1736 if(hz != 0) 1741 if(hz != 0)
1737 runtime·resetcpuprofiler(hz); 1742 runtime·resetcpuprofiler(hz);
1738 » m->locks--; 1743 » g->m->locks--;
1739 } 1744 }
1740 1745
1741 // Hook used by runtime·malg to call runtime·stackalloc on the 1746 // Hook used by runtime·malg to call runtime·stackalloc on the
1742 // scheduler stack. This exists because runtime·stackalloc insists 1747 // scheduler stack. This exists because runtime·stackalloc insists
1743 // on being called on the scheduler stack, to avoid trying to grow 1748 // on being called on the scheduler stack, to avoid trying to grow
1744 // the stack while allocating a new stack segment. 1749 // the stack while allocating a new stack segment.
1745 static void 1750 static void
1746 mstackalloc(G *gp) 1751 mstackalloc(G *gp)
1747 { 1752 {
1748 G *newg; 1753 G *newg;
(...skipping 14 matching lines...) Expand all
1763 byte *stk; 1768 byte *stk;
1764 1769
1765 if(StackTop < sizeof(Stktop)) { 1770 if(StackTop < sizeof(Stktop)) {
1766 runtime·printf("runtime: SizeofStktop=%d, should be >=%d\n", (in t32)StackTop, (int32)sizeof(Stktop)); 1771 runtime·printf("runtime: SizeofStktop=%d, should be >=%d\n", (in t32)StackTop, (int32)sizeof(Stktop));
1767 runtime·throw("runtime: bad stack.h"); 1772 runtime·throw("runtime: bad stack.h");
1768 } 1773 }
1769 1774
1770 newg = allocg(); 1775 newg = allocg();
1771 if(stacksize >= 0) { 1776 if(stacksize >= 0) {
1772 stacksize = runtime·round2(StackSystem + stacksize); 1777 stacksize = runtime·round2(StackSystem + stacksize);
1773 » » if(g == m->g0) { 1778 » » if(g == g->m->g0) {
1774 // running on scheduler stack already. 1779 // running on scheduler stack already.
1775 stk = runtime·stackalloc(newg, stacksize); 1780 stk = runtime·stackalloc(newg, stacksize);
1776 } else { 1781 } else {
1777 // have to call stackalloc on scheduler stack. 1782 // have to call stackalloc on scheduler stack.
1778 newg->stacksize = stacksize; 1783 newg->stacksize = stacksize;
1779 g->param = newg; 1784 g->param = newg;
1780 runtime·mcall(mstackalloc); 1785 runtime·mcall(mstackalloc);
1781 stk = g->param; 1786 stk = g->param;
1782 g->param = nil; 1787 g->param = nil;
1783 } 1788 }
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
1816 G* 1821 G*
1817 runtime·newproc1(FuncVal *fn, byte *argp, int32 narg, int32 nret, void *callerpc ) 1822 runtime·newproc1(FuncVal *fn, byte *argp, int32 narg, int32 nret, void *callerpc )
1818 { 1823 {
1819 byte *sp; 1824 byte *sp;
1820 G *newg; 1825 G *newg;
1821 P *p; 1826 P *p;
1822 int32 siz; 1827 int32 siz;
1823 1828
1824 //runtime·printf("newproc1 %p %p narg=%d nret=%d\n", fn->fn, argp, narg, nret); 1829 //runtime·printf("newproc1 %p %p narg=%d nret=%d\n", fn->fn, argp, narg, nret);
1825 if(fn == nil) { 1830 if(fn == nil) {
1826 » » m->throwing = -1; // do not dump full stacks 1831 » » g->m->throwing = -1; // do not dump full stacks
1827 runtime·throw("go of nil func value"); 1832 runtime·throw("go of nil func value");
1828 } 1833 }
1829 » m->locks++; // disable preemption because it can be holding p in a loca l var 1834 » g->m->locks++; // disable preemption because it can be holding p in a l ocal var
1830 siz = narg + nret; 1835 siz = narg + nret;
1831 siz = (siz+7) & ~7; 1836 siz = (siz+7) & ~7;
1832 1837
1833 // We could instead create a secondary stack frame 1838 // We could instead create a secondary stack frame
1834 // and make it look like goexit was on the original but 1839 // and make it look like goexit was on the original but
1835 // the call to the actual goroutine function was split. 1840 // the call to the actual goroutine function was split.
1836 // Not worth it: this is almost always an error. 1841 // Not worth it: this is almost always an error.
1837 if(siz > StackMin - 1024) 1842 if(siz > StackMin - 1024)
1838 runtime·throw("runtime.newproc: function arguments too large for new goroutine"); 1843 runtime·throw("runtime.newproc: function arguments too large for new goroutine");
1839 1844
1840 » p = m->p; 1845 » p = g->m->p;
1841 if((newg = gfget(p)) != nil) { 1846 if((newg = gfget(p)) != nil) {
1842 if(newg->stackguard - StackGuard != newg->stack0) 1847 if(newg->stackguard - StackGuard != newg->stack0)
1843 runtime·throw("invalid stack in newg"); 1848 runtime·throw("invalid stack in newg");
1844 } else { 1849 } else {
1845 newg = runtime·malg(StackMin); 1850 newg = runtime·malg(StackMin);
1846 allgadd(newg); 1851 allgadd(newg);
1847 } 1852 }
1848 1853
1849 sp = (byte*)newg->stackbase; 1854 sp = (byte*)newg->stackbase;
1850 sp -= siz; 1855 sp -= siz;
(...skipping 16 matching lines...) Expand all
1867 p->goidcacheend = p->goidcache + GoidCacheBatch; 1872 p->goidcacheend = p->goidcache + GoidCacheBatch;
1868 } 1873 }
1869 newg->goid = p->goidcache++; 1874 newg->goid = p->goidcache++;
1870 newg->panicwrap = 0; 1875 newg->panicwrap = 0;
1871 if(raceenabled) 1876 if(raceenabled)
1872 newg->racectx = runtime·racegostart((void*)callerpc); 1877 newg->racectx = runtime·racegostart((void*)callerpc);
1873 runqput(p, newg); 1878 runqput(p, newg);
1874 1879
1875 if(runtime·atomicload(&runtime·sched.npidle) != 0 && runtime·atomicload( &runtime·sched.nmspinning) == 0 && fn->fn != runtime·main) // TODO: fast atomic 1880 if(runtime·atomicload(&runtime·sched.npidle) != 0 && runtime·atomicload( &runtime·sched.nmspinning) == 0 && fn->fn != runtime·main) // TODO: fast atomic
1876 wakep(); 1881 wakep();
1877 » m->locks--; 1882 » g->m->locks--;
1878 » if(m->locks == 0 && g->preempt) // restore the preemption request in ca se we've cleared it in newstack 1883 » if(g->m->locks == 0 && g->preempt) // restore the preemption request in case we've cleared it in newstack
1879 g->stackguard0 = StackPreempt; 1884 g->stackguard0 = StackPreempt;
1880 return newg; 1885 return newg;
1881 } 1886 }
1882 1887
1883 static void 1888 static void
1884 allgadd(G *gp) 1889 allgadd(G *gp)
1885 { 1890 {
1886 G **new; 1891 G **new;
1887 uintptr cap; 1892 uintptr cap;
1888 1893
(...skipping 26 matching lines...) Expand all
1915 1920
1916 if(gp->stackguard - StackGuard != gp->stack0) 1921 if(gp->stackguard - StackGuard != gp->stack0)
1917 runtime·throw("invalid stack in gfput"); 1922 runtime·throw("invalid stack in gfput");
1918 stksize = gp->stackbase + sizeof(Stktop) - gp->stack0; 1923 stksize = gp->stackbase + sizeof(Stktop) - gp->stack0;
1919 if(stksize != gp->stacksize) { 1924 if(stksize != gp->stacksize) {
1920 runtime·printf("runtime: bad stacksize, goroutine %D, remain=%d, last=%d\n", 1925 runtime·printf("runtime: bad stacksize, goroutine %D, remain=%d, last=%d\n",
1921 gp->goid, (int32)gp->stacksize, (int32)stksize); 1926 gp->goid, (int32)gp->stacksize, (int32)stksize);
1922 runtime·throw("gfput: bad stacksize"); 1927 runtime·throw("gfput: bad stacksize");
1923 } 1928 }
1924 top = (Stktop*)gp->stackbase; 1929 top = (Stktop*)gp->stackbase;
1925 » if(stksize != StackMin) { 1930 » if(stksize != FixedStack) {
1926 // non-standard stack size - free it. 1931 // non-standard stack size - free it.
1927 runtime·stackfree(gp, (void*)gp->stack0, top); 1932 runtime·stackfree(gp, (void*)gp->stack0, top);
1928 gp->stack0 = 0; 1933 gp->stack0 = 0;
1929 gp->stackguard = 0; 1934 gp->stackguard = 0;
1930 gp->stackguard0 = 0; 1935 gp->stackguard0 = 0;
1931 gp->stackbase = 0; 1936 gp->stackbase = 0;
1932 } 1937 }
1933 gp->schedlink = p->gfree; 1938 gp->schedlink = p->gfree;
1934 p->gfree = gp; 1939 p->gfree = gp;
1935 p->gfreecnt++; 1940 p->gfreecnt++;
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
1967 } 1972 }
1968 runtime·unlock(&runtime·sched.gflock); 1973 runtime·unlock(&runtime·sched.gflock);
1969 goto retry; 1974 goto retry;
1970 } 1975 }
1971 if(gp) { 1976 if(gp) {
1972 p->gfree = gp->schedlink; 1977 p->gfree = gp->schedlink;
1973 p->gfreecnt--; 1978 p->gfreecnt--;
1974 1979
1975 if(gp->stack0 == 0) { 1980 if(gp->stack0 == 0) {
1976 // Stack was deallocated in gfput. Allocate a new one. 1981 // Stack was deallocated in gfput. Allocate a new one.
1977 » » » if(g == m->g0) { 1982 » » » if(g == g->m->g0) {
1978 stk = runtime·stackalloc(gp, FixedStack); 1983 stk = runtime·stackalloc(gp, FixedStack);
1979 } else { 1984 } else {
1980 gp->stacksize = FixedStack; 1985 gp->stacksize = FixedStack;
1981 g->param = gp; 1986 g->param = gp;
1982 runtime·mcall(mstackalloc); 1987 runtime·mcall(mstackalloc);
1983 stk = g->param; 1988 stk = g->param;
1984 g->param = nil; 1989 g->param = nil;
1985 } 1990 }
1986 gp->stack0 = (uintptr)stk; 1991 gp->stack0 = (uintptr)stk;
1987 gp->stackbase = (uintptr)stk + FixedStack - sizeof(Stkto p); 1992 gp->stackbase = (uintptr)stk + FixedStack - sizeof(Stkto p);
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
2032 n = MaxGomaxprocs; 2037 n = MaxGomaxprocs;
2033 runtime·lock(&runtime·sched); 2038 runtime·lock(&runtime·sched);
2034 ret = runtime·gomaxprocs; 2039 ret = runtime·gomaxprocs;
2035 if(n <= 0 || n == ret) { 2040 if(n <= 0 || n == ret) {
2036 runtime·unlock(&runtime·sched); 2041 runtime·unlock(&runtime·sched);
2037 return ret; 2042 return ret;
2038 } 2043 }
2039 runtime·unlock(&runtime·sched); 2044 runtime·unlock(&runtime·sched);
2040 2045
2041 runtime·semacquire(&runtime·worldsema, false); 2046 runtime·semacquire(&runtime·worldsema, false);
2042 » m->gcing = 1; 2047 » g->m->gcing = 1;
2043 runtime·stoptheworld(); 2048 runtime·stoptheworld();
2044 newprocs = n; 2049 newprocs = n;
2045 » m->gcing = 0; 2050 » g->m->gcing = 0;
2046 runtime·semrelease(&runtime·worldsema); 2051 runtime·semrelease(&runtime·worldsema);
2047 runtime·starttheworld(); 2052 runtime·starttheworld();
2048 2053
2049 return ret; 2054 return ret;
2050 } 2055 }
2051 2056
2052 // lockOSThread is called by runtime.LockOSThread and runtime.lockOSThread below 2057 // lockOSThread is called by runtime.LockOSThread and runtime.lockOSThread below
2053 // after they modify m->locked. Do not allow preemption during this call, 2058 // after they modify m->locked. Do not allow preemption during this call,
2054 // or else the m might be different in this function than in the caller. 2059 // or else the m might be different in this function than in the caller.
2055 #pragma textflag NOSPLIT 2060 #pragma textflag NOSPLIT
2056 static void 2061 static void
2057 lockOSThread(void) 2062 lockOSThread(void)
2058 { 2063 {
2059 » m->lockedg = g; 2064 » g->m->lockedg = g;
2060 » g->lockedm = m; 2065 » g->lockedm = g->m;
2061 } 2066 }
2062 2067
2063 void 2068 void
2064 runtime·LockOSThread(void) 2069 runtime·LockOSThread(void)
2065 { 2070 {
2066 » m->locked |= LockExternal; 2071 » g->m->locked |= LockExternal;
2067 lockOSThread(); 2072 lockOSThread();
2068 } 2073 }
2069 2074
2070 void 2075 void
2071 runtime·lockOSThread(void) 2076 runtime·lockOSThread(void)
2072 { 2077 {
2073 » m->locked += LockInternal; 2078 » g->m->locked += LockInternal;
2074 lockOSThread(); 2079 lockOSThread();
2075 } 2080 }
2076 2081
2077 2082
2078 // unlockOSThread is called by runtime.UnlockOSThread and runtime.unlockOSThread below 2083 // unlockOSThread is called by runtime.UnlockOSThread and runtime.unlockOSThread below
2079 // after they update m->locked. Do not allow preemption during this call, 2084 // after they update m->locked. Do not allow preemption during this call,
2080 // or else the m might be in different in this function than in the caller. 2085 // or else the m might be in different in this function than in the caller.
2081 #pragma textflag NOSPLIT 2086 #pragma textflag NOSPLIT
2082 static void 2087 static void
2083 unlockOSThread(void) 2088 unlockOSThread(void)
2084 { 2089 {
2085 » if(m->locked != 0) 2090 » if(g->m->locked != 0)
2086 return; 2091 return;
2087 » m->lockedg = nil; 2092 » g->m->lockedg = nil;
2088 g->lockedm = nil; 2093 g->lockedm = nil;
2089 } 2094 }
2090 2095
2091 void 2096 void
2092 runtime·UnlockOSThread(void) 2097 runtime·UnlockOSThread(void)
2093 { 2098 {
2094 » m->locked &= ~LockExternal; 2099 » g->m->locked &= ~LockExternal;
2095 unlockOSThread(); 2100 unlockOSThread();
2096 } 2101 }
2097 2102
2098 void 2103 void
2099 runtime·unlockOSThread(void) 2104 runtime·unlockOSThread(void)
2100 { 2105 {
2101 » if(m->locked < LockInternal) 2106 » if(g->m->locked < LockInternal)
2102 runtime·throw("runtime: internal error: misuse of lockOSThread/u nlockOSThread"); 2107 runtime·throw("runtime: internal error: misuse of lockOSThread/u nlockOSThread");
2103 » m->locked -= LockInternal; 2108 » g->m->locked -= LockInternal;
2104 unlockOSThread(); 2109 unlockOSThread();
2105 } 2110 }
2106 2111
2107 bool 2112 bool
2108 runtime·lockedOSThread(void) 2113 runtime·lockedOSThread(void)
2109 { 2114 {
2110 » return g->lockedm != nil && m->lockedg != nil; 2115 » return g->lockedm != nil && g->m->lockedg != nil;
2111 } 2116 }
2112 2117
2113 int32 2118 int32
2114 runtime·gcount(void) 2119 runtime·gcount(void)
2115 { 2120 {
2116 G *gp; 2121 G *gp;
2117 int32 n, s; 2122 int32 n, s;
2118 uintptr i; 2123 uintptr i;
2119 2124
2120 n = 0; 2125 n = 0;
(...skipping 199 matching lines...) Expand 10 before | Expand all | Expand 10 after
2320 // Force sane arguments. 2325 // Force sane arguments.
2321 if(hz < 0) 2326 if(hz < 0)
2322 hz = 0; 2327 hz = 0;
2323 if(hz == 0) 2328 if(hz == 0)
2324 fn = nil; 2329 fn = nil;
2325 if(fn == nil) 2330 if(fn == nil)
2326 hz = 0; 2331 hz = 0;
2327 2332
2328 // Disable preemption, otherwise we can be rescheduled to another thread 2333 // Disable preemption, otherwise we can be rescheduled to another thread
2329 // that has profiling enabled. 2334 // that has profiling enabled.
2330 » m->locks++; 2335 » g->m->locks++;
2331 2336
2332 // Stop profiler on this thread so that it is safe to lock prof. 2337 // Stop profiler on this thread so that it is safe to lock prof.
2333 // if a profiling signal came in while we had prof locked, 2338 // if a profiling signal came in while we had prof locked,
2334 // it would deadlock. 2339 // it would deadlock.
2335 runtime·resetcpuprofiler(0); 2340 runtime·resetcpuprofiler(0);
2336 2341
2337 runtime·lock(&prof); 2342 runtime·lock(&prof);
2338 prof.fn = fn; 2343 prof.fn = fn;
2339 prof.hz = hz; 2344 prof.hz = hz;
2340 runtime·unlock(&prof); 2345 runtime·unlock(&prof);
2341 runtime·lock(&runtime·sched); 2346 runtime·lock(&runtime·sched);
2342 runtime·sched.profilehz = hz; 2347 runtime·sched.profilehz = hz;
2343 runtime·unlock(&runtime·sched); 2348 runtime·unlock(&runtime·sched);
2344 2349
2345 if(hz != 0) 2350 if(hz != 0)
2346 runtime·resetcpuprofiler(hz); 2351 runtime·resetcpuprofiler(hz);
2347 2352
2348 » m->locks--; 2353 » g->m->locks--;
2349 } 2354 }
2350 2355
2351 // Change number of processors. The world is stopped, sched is locked. 2356 // Change number of processors. The world is stopped, sched is locked.
2352 static void 2357 static void
2353 procresize(int32 new) 2358 procresize(int32 new)
2354 { 2359 {
2355 int32 i, old; 2360 int32 i, old;
2356 bool empty; 2361 bool empty;
2357 G *gp; 2362 G *gp;
2358 P *p; 2363 P *p;
2359 2364
2360 old = runtime·gomaxprocs; 2365 old = runtime·gomaxprocs;
2361 if(old < 0 || old > MaxGomaxprocs || new <= 0 || new >MaxGomaxprocs) 2366 if(old < 0 || old > MaxGomaxprocs || new <= 0 || new >MaxGomaxprocs)
2362 runtime·throw("procresize: invalid arg"); 2367 runtime·throw("procresize: invalid arg");
2363 // initialize new P's 2368 // initialize new P's
2364 for(i = 0; i < new; i++) { 2369 for(i = 0; i < new; i++) {
2365 p = runtime·allp[i]; 2370 p = runtime·allp[i];
2366 if(p == nil) { 2371 if(p == nil) {
2367 p = (P*)runtime·mallocgc(sizeof(*p), 0, FlagNoInvokeGC); 2372 p = (P*)runtime·mallocgc(sizeof(*p), 0, FlagNoInvokeGC);
2368 p->id = i; 2373 p->id = i;
2369 p->status = Pgcstop; 2374 p->status = Pgcstop;
2370 runtime·atomicstorep(&runtime·allp[i], p); 2375 runtime·atomicstorep(&runtime·allp[i], p);
2371 } 2376 }
2372 if(p->mcache == nil) { 2377 if(p->mcache == nil) {
2373 if(old==0 && i==0) 2378 if(old==0 && i==0)
2374 » » » » p->mcache = m->mcache; // bootstrap 2379 » » » » p->mcache = g->m->mcache; // bootstrap
2375 else 2380 else
2376 p->mcache = runtime·allocmcache(); 2381 p->mcache = runtime·allocmcache();
2377 } 2382 }
2378 } 2383 }
2379 2384
2380 // redistribute runnable G's evenly 2385 // redistribute runnable G's evenly
2381 // collect all runnable goroutines in global queue preserving FIFO order 2386 // collect all runnable goroutines in global queue preserving FIFO order
2382 // FIFO order is required to ensure fairness even during frequent GCs 2387 // FIFO order is required to ensure fairness even during frequent GCs
2383 // see http://golang.org/issue/7126 2388 // see http://golang.org/issue/7126
2384 empty = false; 2389 empty = false;
(...skipping 30 matching lines...) Expand all
2415 // free unused P's 2420 // free unused P's
2416 for(i = new; i < old; i++) { 2421 for(i = new; i < old; i++) {
2417 p = runtime·allp[i]; 2422 p = runtime·allp[i];
2418 runtime·freemcache(p->mcache); 2423 runtime·freemcache(p->mcache);
2419 p->mcache = nil; 2424 p->mcache = nil;
2420 gfpurge(p); 2425 gfpurge(p);
2421 p->status = Pdead; 2426 p->status = Pdead;
2422 // can't free P itself because it can be referenced by an M in s yscall 2427 // can't free P itself because it can be referenced by an M in s yscall
2423 } 2428 }
2424 2429
2425 » if(m->p) 2430 » if(g->m->p)
2426 » » m->p->m = nil; 2431 » » g->m->p->m = nil;
2427 » m->p = nil; 2432 » g->m->p = nil;
2428 » m->mcache = nil; 2433 » g->m->mcache = nil;
2429 p = runtime·allp[0]; 2434 p = runtime·allp[0];
2430 p->m = nil; 2435 p->m = nil;
2431 p->status = Pidle; 2436 p->status = Pidle;
2432 acquirep(p); 2437 acquirep(p);
2433 for(i = new-1; i > 0; i--) { 2438 for(i = new-1; i > 0; i--) {
2434 p = runtime·allp[i]; 2439 p = runtime·allp[i];
2435 p->status = Pidle; 2440 p->status = Pidle;
2436 pidleput(p); 2441 pidleput(p);
2437 } 2442 }
2438 runtime·atomicstore((uint32*)&runtime·gomaxprocs, new); 2443 runtime·atomicstore((uint32*)&runtime·gomaxprocs, new);
2439 } 2444 }
2440 2445
2441 // Associate p and the current m. 2446 // Associate p and the current m.
2442 #pragma textflag NOSPLIT
2443 static void 2447 static void
2444 acquirep(P *p) 2448 acquirep(P *p)
2445 { 2449 {
2446 » if(m->p || m->mcache) 2450 » if(g->m->p || g->m->mcache)
2447 runtime·throw("acquirep: already in go"); 2451 runtime·throw("acquirep: already in go");
2448 if(p->m || p->status != Pidle) { 2452 if(p->m || p->status != Pidle) {
2449 » » //runtime·printf("acquirep: p->m=%p(%d) p->status=%d\n", p->m, p ->m ? p->m->id : 0, p->status); 2453 » » runtime·printf("acquirep: p->m=%p(%d) p->status=%d\n", p->m, p-> m ? p->m->id : 0, p->status);
2450 runtime·throw("acquirep: invalid p state"); 2454 runtime·throw("acquirep: invalid p state");
2451 } 2455 }
2452 » m->mcache = p->mcache; 2456 » g->m->mcache = p->mcache;
2453 » m->p = p; 2457 » g->m->p = p;
2454 » p->m = m; 2458 » p->m = g->m;
2455 p->status = Prunning; 2459 p->status = Prunning;
2456 } 2460 }
2457 2461
2458 // Disassociate p and the current m. 2462 // Disassociate p and the current m.
2459 static P* 2463 static P*
2460 releasep(void) 2464 releasep(void)
2461 { 2465 {
2462 P *p; 2466 P *p;
2463 2467
2464 » if(m->p == nil || m->mcache == nil) 2468 » if(g->m->p == nil || g->m->mcache == nil)
2465 runtime·throw("releasep: invalid arg"); 2469 runtime·throw("releasep: invalid arg");
2466 » p = m->p; 2470 » p = g->m->p;
2467 » if(p->m != m || p->mcache != m->mcache || p->status != Prunning) { 2471 » if(p->m != g->m || p->mcache != g->m->mcache || p->status != Prunning) {
2468 runtime·printf("releasep: m=%p m->p=%p p->m=%p m->mcache=%p p->m cache=%p p->status=%d\n", 2472 runtime·printf("releasep: m=%p m->p=%p p->m=%p m->mcache=%p p->m cache=%p p->status=%d\n",
2469 » » » m, m->p, p->m, m->mcache, p->mcache, p->status); 2473 » » » g->m, g->m->p, p->m, g->m->mcache, p->mcache, p->status) ;
2470 runtime·throw("releasep: invalid p state"); 2474 runtime·throw("releasep: invalid p state");
2471 } 2475 }
2472 » m->p = nil; 2476 » g->m->p = nil;
2473 » m->mcache = nil; 2477 » g->m->mcache = nil;
2474 p->m = nil; 2478 p->m = nil;
2475 p->status = Pidle; 2479 p->status = Pidle;
2476 return p; 2480 return p;
2477 } 2481 }
2478 2482
2479 static void 2483 static void
2480 incidlelocked(int32 v) 2484 incidlelocked(int32 v)
2481 { 2485 {
2482 runtime·lock(&runtime·sched); 2486 runtime·lock(&runtime·sched);
2483 runtime·sched.nmidlelocked += v; 2487 runtime·sched.nmidlelocked += v;
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
2521 grunning++; 2525 grunning++;
2522 else if(s == Grunnable || s == Grunning || s == Gsyscall) { 2526 else if(s == Grunnable || s == Grunning || s == Gsyscall) {
2523 runtime·unlock(&allglock); 2527 runtime·unlock(&allglock);
2524 runtime·printf("runtime: checkdead: find g %D in status %d\n", gp->goid, s); 2528 runtime·printf("runtime: checkdead: find g %D in status %d\n", gp->goid, s);
2525 runtime·throw("checkdead: runnable g"); 2529 runtime·throw("checkdead: runnable g");
2526 } 2530 }
2527 } 2531 }
2528 runtime·unlock(&allglock); 2532 runtime·unlock(&allglock);
2529 if(grunning == 0) // possible if main goroutine calls runtime·Goexit() 2533 if(grunning == 0) // possible if main goroutine calls runtime·Goexit()
2530 runtime·throw("no goroutines (main called runtime.Goexit) - dead lock!"); 2534 runtime·throw("no goroutines (main called runtime.Goexit) - dead lock!");
2531 » m->throwing = -1; // do not dump full stacks 2535 » g->m->throwing = -1; // do not dump full stacks
2532 runtime·throw("all goroutines are asleep - deadlock!"); 2536 runtime·throw("all goroutines are asleep - deadlock!");
2533 } 2537 }
2534 2538
2535 static void 2539 static void
2536 sysmon(void) 2540 sysmon(void)
2537 { 2541 {
2538 uint32 idle, delay; 2542 uint32 idle, delay;
2539 int64 now, lastpoll, lasttrace; 2543 int64 now, lastpoll, lasttrace;
2540 G *gp; 2544 G *gp;
2541 2545
(...skipping 150 matching lines...) Expand 10 before | Expand all | Expand 10 after
2692 // simultaneously executing runtime·newstack. 2696 // simultaneously executing runtime·newstack.
2693 // No lock needs to be held. 2697 // No lock needs to be held.
2694 // Returns true if preemption request was issued. 2698 // Returns true if preemption request was issued.
2695 static bool 2699 static bool
2696 preemptone(P *p) 2700 preemptone(P *p)
2697 { 2701 {
2698 M *mp; 2702 M *mp;
2699 G *gp; 2703 G *gp;
2700 2704
2701 mp = p->m; 2705 mp = p->m;
2702 » if(mp == nil || mp == m) 2706 » if(mp == nil || mp == g->m)
2703 return false; 2707 return false;
2704 gp = mp->curg; 2708 gp = mp->curg;
2705 if(gp == nil || gp == mp->g0) 2709 if(gp == nil || gp == mp->g0)
2706 return false; 2710 return false;
2707 gp->preempt = true; 2711 gp->preempt = true;
2708 gp->stackguard0 = StackPreempt; 2712 gp->stackguard0 = StackPreempt;
2709 return true; 2713 return true;
2710 } 2714 }
2711 2715
2712 void 2716 void
2713 runtime·schedtrace(bool detailed) 2717 runtime·schedtrace(bool detailed)
2714 { 2718 {
2715 static int64 starttime; 2719 static int64 starttime;
2716 int64 now; 2720 int64 now;
2717 int64 id1, id2, id3; 2721 int64 id1, id2, id3;
2718 int32 i, t, h; 2722 int32 i, t, h;
2719 uintptr gi; 2723 uintptr gi;
2720 int8 *fmt; 2724 int8 *fmt;
2721 M *mp, *lockedm; 2725 M *mp, *lockedm;
2722 G *gp, *lockedg; 2726 G *gp, *lockedg;
2723 P *p; 2727 P *p;
2724 2728
2725 now = runtime·nanotime(); 2729 now = runtime·nanotime();
2726 if(starttime == 0) 2730 if(starttime == 0)
2727 starttime = now; 2731 starttime = now;
2728 2732
2729 runtime·lock(&runtime·sched); 2733 runtime·lock(&runtime·sched);
2730 » runtime·printf("SCHED %Dms: gomaxprocs=%d idleprocs=%d threads=%d idleth reads=%d runqueue=%d", 2734 » runtime·printf("SCHED %Dms: gomaxprocs=%d idleprocs=%d threads=%d spinni ngthreads=%d idlethreads=%d runqueue=%d",
2731 (now-starttime)/1000000, runtime·gomaxprocs, runtime·sched.npidl e, runtime·sched.mcount, 2735 (now-starttime)/1000000, runtime·gomaxprocs, runtime·sched.npidl e, runtime·sched.mcount,
2732 » » runtime·sched.nmidle, runtime·sched.runqsize); 2736 » » runtime·sched.nmspinning, runtime·sched.nmidle, runtime·sched.ru nqsize);
2733 if(detailed) { 2737 if(detailed) {
2734 » » runtime·printf(" gcwaiting=%d nmidlelocked=%d nmspinning=%d stop wait=%d sysmonwait=%d\n", 2738 » » runtime·printf(" gcwaiting=%d nmidlelocked=%d stopwait=%d sysmon wait=%d\n",
2735 » » » runtime·sched.gcwaiting, runtime·sched.nmidlelocked, run time·sched.nmspinning, 2739 » » » runtime·sched.gcwaiting, runtime·sched.nmidlelocked,
2736 runtime·sched.stopwait, runtime·sched.sysmonwait); 2740 runtime·sched.stopwait, runtime·sched.sysmonwait);
2737 } 2741 }
2738 // We must be careful while reading data from P's, M's and G's. 2742 // We must be careful while reading data from P's, M's and G's.
2739 // Even if we hold schedlock, most data can be changed concurrently. 2743 // Even if we hold schedlock, most data can be changed concurrently.
2740 // E.g. (p->m ? p->m->id : -1) can crash if p->m changes from non-nil to nil. 2744 // E.g. (p->m ? p->m->id : -1) can crash if p->m changes from non-nil to nil.
2741 for(i = 0; i < runtime·gomaxprocs; i++) { 2745 for(i = 0; i < runtime·gomaxprocs; i++) {
2742 p = runtime·allp[i]; 2746 p = runtime·allp[i];
2743 if(p == nil) 2747 if(p == nil)
2744 continue; 2748 continue;
2745 mp = p->m; 2749 mp = p->m;
(...skipping 29 matching lines...) Expand all
2775 id2 = -1; 2779 id2 = -1;
2776 if(gp) 2780 if(gp)
2777 id2 = gp->goid; 2781 id2 = gp->goid;
2778 id3 = -1; 2782 id3 = -1;
2779 if(lockedg) 2783 if(lockedg)
2780 id3 = lockedg->goid; 2784 id3 = lockedg->goid;
2781 runtime·printf(" M%d: p=%D curg=%D mallocing=%d throwing=%d gci ng=%d" 2785 runtime·printf(" M%d: p=%D curg=%D mallocing=%d throwing=%d gci ng=%d"
2782 " locks=%d dying=%d helpgc=%d spinning=%d blocked=%d loc kedg=%D\n", 2786 " locks=%d dying=%d helpgc=%d spinning=%d blocked=%d loc kedg=%D\n",
2783 mp->id, id1, id2, 2787 mp->id, id1, id2,
2784 mp->mallocing, mp->throwing, mp->gcing, mp->locks, mp->d ying, mp->helpgc, 2788 mp->mallocing, mp->throwing, mp->gcing, mp->locks, mp->d ying, mp->helpgc,
2785 » » » mp->spinning, m->blocked, id3); 2789 » » » mp->spinning, g->m->blocked, id3);
2786 } 2790 }
2787 runtime·lock(&allglock); 2791 runtime·lock(&allglock);
2788 for(gi = 0; gi < runtime·allglen; gi++) { 2792 for(gi = 0; gi < runtime·allglen; gi++) {
2789 gp = runtime·allg[gi]; 2793 gp = runtime·allg[gi];
2790 mp = gp->m; 2794 mp = gp->m;
2791 lockedm = gp->lockedm; 2795 lockedm = gp->lockedm;
2792 runtime·printf(" G%D: status=%d(%s) m=%d lockedm=%d\n", 2796 runtime·printf(" G%D: status=%d(%s) m=%d lockedm=%d\n",
2793 gp->goid, gp->status, gp->waitreason, mp ? mp->id : -1, 2797 gp->goid, gp->status, gp->waitreason, mp ? mp->id : -1,
2794 lockedm ? lockedm->id : -1); 2798 lockedm ? lockedm->id : -1);
2795 } 2799 }
(...skipping 339 matching lines...) Expand 10 before | Expand all | Expand 10 after
3135 if(experiment[i+j] != name[j]) 3139 if(experiment[i+j] != name[j])
3136 goto nomatch; 3140 goto nomatch;
3137 if(experiment[i+j] != '\0' && experiment[i+j] != ',') 3141 if(experiment[i+j] != '\0' && experiment[i+j] != ',')
3138 goto nomatch; 3142 goto nomatch;
3139 return 1; 3143 return 1;
3140 } 3144 }
3141 nomatch:; 3145 nomatch:;
3142 } 3146 }
3143 return 0; 3147 return 0;
3144 } 3148 }
LEFTRIGHT

Powered by Google App Engine
RSS Feeds Recent Issues | This issue
This is Rietveld f62528b