Rietveld Code Review Tool
Help | Bug tracker | Discussion group | Source code | Sign in
(3427)

Delta Between Two Patch Sets: src/pkg/runtime/proc.c

Issue 10136043: code review 10136043: runtime: refactor mallocgc (Closed)
Left Patch Set: diff -r 71375a634b9a https://dvyukov%40google.com@code.google.com/p/go/ Created 11 years, 9 months ago
Right Patch Set: diff -r 654ca7de0282 https://dvyukov%40google.com@code.google.com/p/go/ Created 11 years, 8 months ago
Left:
Right:
Use n/p to move between diff chunks; N/P to move between comments. Please Sign in to add in-line comments.
Jump to:
Left: Side by side diff | Download
Right: Side by side diff | Download
« no previous file with change/comment | « src/pkg/runtime/mgc0.c ('k') | src/pkg/runtime/stack.c » ('j') | no next file with change/comment »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
LEFTRIGHT
1 // Copyright 2009 The Go Authors. All rights reserved. 1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style 2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file. 3 // license that can be found in the LICENSE file.
4 4
5 #include "runtime.h" 5 #include "runtime.h"
6 #include "arch_GOARCH.h" 6 #include "arch_GOARCH.h"
7 #include "malloc.h" 7 #include "malloc.h"
8 #include "stack.h" 8 #include "stack.h"
9 #include "race.h" 9 #include "race.h"
10 #include "type.h" 10 #include "type.h"
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after
80 static void runqgrow(P*); 80 static void runqgrow(P*);
81 static G* runqsteal(P*, P*); 81 static G* runqsteal(P*, P*);
82 static void mput(M*); 82 static void mput(M*);
83 static M* mget(void); 83 static M* mget(void);
84 static void mcommoninit(M*); 84 static void mcommoninit(M*);
85 static void schedule(void); 85 static void schedule(void);
86 static void procresize(int32); 86 static void procresize(int32);
87 static void acquirep(P*); 87 static void acquirep(P*);
88 static P* releasep(void); 88 static P* releasep(void);
89 static void newm(void(*)(void), P*); 89 static void newm(void(*)(void), P*);
90 static void goidle(void);
91 static void stopm(void); 90 static void stopm(void);
92 static void startm(P*, bool); 91 static void startm(P*, bool);
93 static void handoffp(P*); 92 static void handoffp(P*);
94 static void wakep(void); 93 static void wakep(void);
95 static void stoplockedm(void); 94 static void stoplockedm(void);
96 static void startlockedm(G*); 95 static void startlockedm(G*);
97 static void sysmon(void); 96 static void sysmon(void);
98 static uint32 retake(uint32*); 97 static uint32 retake(int64);
99 static void inclocked(int32); 98 static void inclocked(int32);
100 static void checkdead(void); 99 static void checkdead(void);
101 static void exitsyscall0(G*); 100 static void exitsyscall0(G*);
102 static void park0(G*); 101 static void park0(G*);
103 static void gosched0(G*);
104 static void goexit0(G*); 102 static void goexit0(G*);
105 static void gfput(P*, G*); 103 static void gfput(P*, G*);
106 static G* gfget(P*); 104 static G* gfget(P*);
107 static void gfpurge(P*); 105 static void gfpurge(P*);
108 static void globrunqput(G*); 106 static void globrunqput(G*);
109 static G* globrunqget(P*); 107 static G* globrunqget(P*, int32);
110 static P* pidleget(void); 108 static P* pidleget(void);
111 static void pidleput(P*); 109 static void pidleput(P*);
112 static void injectglist(G*); 110 static void injectglist(G*);
113 static void preemptall(void); 111 static void preemptall(void);
114 static void preemptone(P*); 112 static void preemptone(P*);
115 113
116 // The bootstrap sequence is: 114 // The bootstrap sequence is:
117 // 115 //
118 // call osinit 116 // call osinit
119 // call schedinit 117 // call schedinit
120 // make & queue new G 118 // make & queue new G
121 // call runtime·mstart 119 // call runtime·mstart
122 // 120 //
123 // The new G calls runtime·main. 121 // The new G calls runtime·main.
124 void 122 void
125 runtime·schedinit(void) 123 runtime·schedinit(void)
126 { 124 {
127 int32 n, procs; 125 int32 n, procs;
128 byte *p; 126 byte *p;
129 127
130 m->nomemprof++; 128 m->nomemprof++;
131 runtime·mprofinit(); 129 runtime·mprofinit();
132 runtime·mallocinit(); 130 runtime·mallocinit();
133 mcommoninit(m); 131 mcommoninit(m);
134 132
135 runtime·goargs(); 133 runtime·goargs();
136 runtime·goenvs(); 134 runtime·goenvs();
135 runtime·parsedebugvars();
137 136
138 // Allocate internal symbol table representation now, we need it for GC anyway. 137 // Allocate internal symbol table representation now, we need it for GC anyway.
139 runtime·symtabinit(); 138 runtime·symtabinit();
140 139
141 runtime·sched.lastpoll = runtime·nanotime(); 140 runtime·sched.lastpoll = runtime·nanotime();
142 procs = 1; 141 procs = 1;
143 p = runtime·getenv("GOMAXPROCS"); 142 p = runtime·getenv("GOMAXPROCS");
144 if(p != nil && (n = runtime·atoi(p)) > 0) { 143 if(p != nil && (n = runtime·atoi(p)) > 0) {
145 if(n > MaxGomaxprocs) 144 if(n > MaxGomaxprocs)
146 n = MaxGomaxprocs; 145 n = MaxGomaxprocs;
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after
227 runtime·printf("goroutine %D [%s]:\n", gp->goid, status); 226 runtime·printf("goroutine %D [%s]:\n", gp->goid, status);
228 } 227 }
229 228
230 void 229 void
231 runtime·tracebackothers(G *me) 230 runtime·tracebackothers(G *me)
232 { 231 {
233 G *gp; 232 G *gp;
234 int32 traceback; 233 int32 traceback;
235 234
236 traceback = runtime·gotraceback(nil); 235 traceback = runtime·gotraceback(nil);
236 ········
237 // Show the current goroutine first, if we haven't already.
238 if((gp = m->curg) != nil && gp != me) {
239 runtime·printf("\n");
240 runtime·goroutineheader(gp);
241 runtime·traceback(gp->sched.pc, gp->sched.sp, gp->sched.lr, gp);
242 }
243
237 for(gp = runtime·allg; gp != nil; gp = gp->alllink) { 244 for(gp = runtime·allg; gp != nil; gp = gp->alllink) {
238 » » if(gp == me || gp->status == Gdead) 245 » » if(gp == me || gp == m->curg || gp->status == Gdead)
239 continue; 246 continue;
240 if(gp->issystem && traceback < 2) 247 if(gp->issystem && traceback < 2)
241 continue; 248 continue;
242 runtime·printf("\n"); 249 runtime·printf("\n");
243 runtime·goroutineheader(gp); 250 runtime·goroutineheader(gp);
244 » » runtime·traceback(gp->sched.pc, gp->sched.sp, 0, gp); 251 » » if(gp->status == Grunning)
252 » » » runtime·printf("\tgoroutine running on other thread; sta ck unavailable\n");
253 » » else
254 » » » runtime·traceback(gp->sched.pc, gp->sched.sp, gp->sched. lr, gp);
245 } 255 }
246 } 256 }
247 257
248 static void 258 static void
249 mcommoninit(M *mp) 259 mcommoninit(M *mp)
250 { 260 {
251 // If there is no mcache runtime·callers() will crash, 261 // If there is no mcache runtime·callers() will crash,
252 // and we are most likely in sysmon thread so the stack is senseless any way. 262 // and we are most likely in sysmon thread so the stack is senseless any way.
253 if(m->mcache) 263 if(m->mcache)
254 runtime·callers(1, mp->createstack, nelem(mp->createstack)); 264 runtime·callers(1, mp->createstack, nelem(mp->createstack));
(...skipping 22 matching lines...) Expand all
277 m->locks++; // disable preemption because it can be holding p in a loca l var 287 m->locks++; // disable preemption because it can be holding p in a loca l var
278 if(gp->status != Gwaiting) { 288 if(gp->status != Gwaiting) {
279 runtime·printf("goroutine %D has status %d\n", gp->goid, gp->sta tus); 289 runtime·printf("goroutine %D has status %d\n", gp->goid, gp->sta tus);
280 runtime·throw("bad g->status in ready"); 290 runtime·throw("bad g->status in ready");
281 } 291 }
282 gp->status = Grunnable; 292 gp->status = Grunnable;
283 runqput(m->p, gp); 293 runqput(m->p, gp);
284 if(runtime·atomicload(&runtime·sched.npidle) != 0 && runtime·atomicload( &runtime·sched.nmspinning) == 0) // TODO: fast atomic 294 if(runtime·atomicload(&runtime·sched.npidle) != 0 && runtime·atomicload( &runtime·sched.nmspinning) == 0) // TODO: fast atomic
285 wakep(); 295 wakep();
286 m->locks--; 296 m->locks--;
297 if(m->locks == 0 && g->preempt) // restore the preemption request in ca se we've cleared it in newstack
298 g->stackguard0 = StackPreempt;
287 } 299 }
288 300
289 int32 301 int32
290 runtime·gcprocs(void) 302 runtime·gcprocs(void)
291 { 303 {
292 int32 n; 304 int32 n;
293 305
294 // Figure out how many CPUs to use during GC. 306 // Figure out how many CPUs to use during GC.
295 // Limited by gomaxprocs, number of actual CPUs, and MaxGcproc. 307 // Limited by gomaxprocs, number of actual CPUs, and MaxGcproc.
296 runtime·lock(&runtime·sched); 308 runtime·lock(&runtime·sched);
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
347 runtime·stoptheworld(void) 359 runtime·stoptheworld(void)
348 { 360 {
349 int32 i; 361 int32 i;
350 uint32 s; 362 uint32 s;
351 P *p; 363 P *p;
352 bool wait; 364 bool wait;
353 365
354 runtime·lock(&runtime·sched); 366 runtime·lock(&runtime·sched);
355 runtime·sched.stopwait = runtime·gomaxprocs; 367 runtime·sched.stopwait = runtime·gomaxprocs;
356 runtime·atomicstore((uint32*)&runtime·gcwaiting, 1); 368 runtime·atomicstore((uint32*)&runtime·gcwaiting, 1);
369 preemptall();
357 // stop current P 370 // stop current P
358 m->p->status = Pgcstop; 371 m->p->status = Pgcstop;
359 runtime·sched.stopwait--; 372 runtime·sched.stopwait--;
360 // try to retake all P's in Psyscall status 373 // try to retake all P's in Psyscall status
361 for(i = 0; i < runtime·gomaxprocs; i++) { 374 for(i = 0; i < runtime·gomaxprocs; i++) {
362 p = runtime·allp[i]; 375 p = runtime·allp[i];
363 s = p->status; 376 s = p->status;
364 if(s == Psyscall && runtime·cas(&p->status, s, Pgcstop)) 377 if(s == Psyscall && runtime·cas(&p->status, s, Pgcstop))
365 runtime·sched.stopwait--; 378 runtime·sched.stopwait--;
366 } 379 }
367 // stop idle P's 380 // stop idle P's
368 while(p = pidleget()) { 381 while(p = pidleget()) {
369 p->status = Pgcstop; 382 p->status = Pgcstop;
370 runtime·sched.stopwait--; 383 runtime·sched.stopwait--;
371 } 384 }
372 wait = runtime·sched.stopwait > 0; 385 wait = runtime·sched.stopwait > 0;
373 runtime·unlock(&runtime·sched); 386 runtime·unlock(&runtime·sched);
374 387
375 » // wait for remaining P's to stop voluntary 388 » // wait for remaining P's to stop voluntarily
376 if(wait) { 389 if(wait) {
377 » » runtime·notesleep(&runtime·sched.stopnote); 390 » » for(;;) {
378 » » runtime·noteclear(&runtime·sched.stopnote); 391 » » » // wait for 100us, then try to re-preempt in case of any races
392 » » » if(runtime·notetsleep(&runtime·sched.stopnote, 100*1000) ) {
393 » » » » runtime·noteclear(&runtime·sched.stopnote);
394 » » » » break;
395 » » » }
396 » » » preemptall();
397 » » }
379 } 398 }
380 if(runtime·sched.stopwait) 399 if(runtime·sched.stopwait)
381 runtime·throw("stoptheworld: not stopped"); 400 runtime·throw("stoptheworld: not stopped");
382 for(i = 0; i < runtime·gomaxprocs; i++) { 401 for(i = 0; i < runtime·gomaxprocs; i++) {
383 p = runtime·allp[i]; 402 p = runtime·allp[i];
384 if(p->status != Pgcstop) 403 if(p->status != Pgcstop)
385 runtime·throw("stoptheworld: not stopped"); 404 runtime·throw("stoptheworld: not stopped");
386 } 405 }
387 } 406 }
388 407
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after
451 // If GC could have used another helper proc, start one now, 470 // If GC could have used another helper proc, start one now,
452 // in the hope that it will be available next time. 471 // in the hope that it will be available next time.
453 // It would have been even better to start it before the collect ion, 472 // It would have been even better to start it before the collect ion,
454 // but doing so requires allocating memory, so it's tricky to 473 // but doing so requires allocating memory, so it's tricky to
455 // coordinate. This lazy approach works out in practice: 474 // coordinate. This lazy approach works out in practice:
456 // we don't mind if the first couple gc rounds don't have quite 475 // we don't mind if the first couple gc rounds don't have quite
457 // the maximum number of procs. 476 // the maximum number of procs.
458 newm(mhelpgc, nil); 477 newm(mhelpgc, nil);
459 } 478 }
460 m->locks--; 479 m->locks--;
480 if(m->locks == 0 && g->preempt) // restore the preemption request in ca se we've cleared it in newstack
481 g->stackguard0 = StackPreempt;
461 } 482 }
462 483
463 // Called to start an M. 484 // Called to start an M.
464 void 485 void
465 runtime·mstart(void) 486 runtime·mstart(void)
466 { 487 {
488 #ifdef GOOS_windows
489 #ifdef GOARCH_386
467 // It is used by windows-386 only. Unfortunately, seh needs 490 // It is used by windows-386 only. Unfortunately, seh needs
468 // to be located on os stack, and mstart runs on os stack 491 // to be located on os stack, and mstart runs on os stack
469 // for both m0 and m. 492 // for both m0 and m.
470 SEH seh; 493 SEH seh;
494 #endif
495 #endif
471 496
472 if(g != m->g0) 497 if(g != m->g0)
473 runtime·throw("bad runtime·mstart"); 498 runtime·throw("bad runtime·mstart");
474 499
475 // Record top of stack for use by mcall. 500 // Record top of stack for use by mcall.
476 // Once we call schedule we're never coming back, 501 // Once we call schedule we're never coming back,
477 // so other calls can reuse this stack space. 502 // so other calls can reuse this stack space.
478 runtime·gosave(&m->g0->sched); 503 runtime·gosave(&m->g0->sched);
479 m->g0->sched.pc = (uintptr)-1; // make sure it is never used 504 m->g0->sched.pc = (uintptr)-1; // make sure it is never used
480 m->g0->stackguard = m->g0->stackguard0; // cgo sets only stackguard0, c opy it to stackguard 505 m->g0->stackguard = m->g0->stackguard0; // cgo sets only stackguard0, c opy it to stackguard
506 #ifdef GOOS_windows
507 #ifdef GOARCH_386
481 m->seh = &seh; 508 m->seh = &seh;
509 #endif
510 #endif
482 runtime·asminit(); 511 runtime·asminit();
483 runtime·minit(); 512 runtime·minit();
484 513
485 // Install signal handlers; after minit so that minit can 514 // Install signal handlers; after minit so that minit can
486 // prepare the thread to be able to handle the signals. 515 // prepare the thread to be able to handle the signals.
487 if(m == &runtime·m0) 516 if(m == &runtime·m0)
488 runtime·initsig(); 517 runtime·initsig();
489 ········ 518 ········
490 if(m->mstartfn) 519 if(m->mstartfn)
491 m->mstartfn(); 520 m->mstartfn();
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
540 // In case of cgo, pthread_create will make us a stack. 569 // In case of cgo, pthread_create will make us a stack.
541 // Windows will layout sched stack on OS stack. 570 // Windows will layout sched stack on OS stack.
542 if(runtime·iscgo || Windows) 571 if(runtime·iscgo || Windows)
543 mp->g0 = runtime·malg(-1); 572 mp->g0 = runtime·malg(-1);
544 else 573 else
545 mp->g0 = runtime·malg(8192); 574 mp->g0 = runtime·malg(8192);
546 575
547 if(p == m->p) 576 if(p == m->p)
548 releasep(); 577 releasep();
549 m->locks--; 578 m->locks--;
579 if(m->locks == 0 && g->preempt) // restore the preemption request in ca se we've cleared it in newstack
580 g->stackguard0 = StackPreempt;
550 581
551 return mp; 582 return mp;
552 } 583 }
553 584
554 static M* lockextra(bool nilokay); 585 static M* lockextra(bool nilokay);
555 static void unlockextra(M*); 586 static void unlockextra(M*);
556 587
557 // needm is called when a cgo callback happens on a 588 // needm is called when a cgo callback happens on a
558 // thread without an m (a thread not created by Go). 589 // thread without an m (a thread not created by Go).
559 // In this case, needm is expected to find an m to use 590 // In this case, needm is expected to find an m to use
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
620 // Install m and g (= m->g0) and set the stack bounds 651 // Install m and g (= m->g0) and set the stack bounds
621 // to match the current stack. We don't actually know 652 // to match the current stack. We don't actually know
622 // how big the stack is, like we don't know how big any 653 // how big the stack is, like we don't know how big any
623 // scheduling stack is, but we assume there's at least 32 kB, 654 // scheduling stack is, but we assume there's at least 32 kB,
624 // which is more than enough for us. 655 // which is more than enough for us.
625 runtime·setmg(mp, mp->g0); 656 runtime·setmg(mp, mp->g0);
626 g->stackbase = (uintptr)(&x + 1024); 657 g->stackbase = (uintptr)(&x + 1024);
627 g->stackguard = (uintptr)(&x - 32*1024); 658 g->stackguard = (uintptr)(&x - 32*1024);
628 g->stackguard0 = g->stackguard; 659 g->stackguard0 = g->stackguard;
629 660
661 #ifdef GOOS_windows
662 #ifdef GOARCH_386
630 // On windows/386, we need to put an SEH frame (two words) 663 // On windows/386, we need to put an SEH frame (two words)
631 » // somewhere on the current stack. We are called 664 » // somewhere on the current stack. We are called from cgocallback_gofunc
632 » // from needm, and we know there is some available 665 » // and we know that it will leave two unused words below m->curg->sched. sp.
633 » // space one word into the argument frame. Use that. 666 » // Use those.
634 m->seh = (SEH*)((uintptr*)&x + 1); 667 m->seh = (SEH*)((uintptr*)&x + 1);
668 #endif
669 #endif
635 670
636 // Initialize this thread to use the m. 671 // Initialize this thread to use the m.
637 runtime·asminit(); 672 runtime·asminit();
638 runtime·minit(); 673 runtime·minit();
639 } 674 }
640 675
641 // newextram allocates an m and puts it on the extra list. 676 // newextram allocates an m and puts it on the extra list.
642 // It is called with a working local m, so that it can do things 677 // It is called with a working local m, so that it can do things
643 // like call schedlock and allocate. 678 // like call schedlock and allocate.
644 void 679 void
645 runtime·newextram(void) 680 runtime·newextram(void)
646 { 681 {
647 M *mp, *mnext; 682 M *mp, *mnext;
648 G *gp; 683 G *gp;
649 684
650 // Create extra goroutine locked to extra m. 685 // Create extra goroutine locked to extra m.
651 // The goroutine is the context in which the cgo callback will run. 686 // The goroutine is the context in which the cgo callback will run.
652 // The sched.pc will never be returned to, but setting it to 687 // The sched.pc will never be returned to, but setting it to
653 // runtime.goexit makes clear to the traceback routines where 688 // runtime.goexit makes clear to the traceback routines where
654 // the goroutine stack ends. 689 // the goroutine stack ends.
655 mp = runtime·allocm(nil); 690 mp = runtime·allocm(nil);
656 gp = runtime·malg(4096); 691 gp = runtime·malg(4096);
657 gp->sched.pc = (uintptr)runtime·goexit; 692 gp->sched.pc = (uintptr)runtime·goexit;
658 gp->sched.sp = gp->stackbase; 693 gp->sched.sp = gp->stackbase;
694 gp->sched.lr = 0;
659 gp->sched.g = gp; 695 gp->sched.g = gp;
660 gp->status = Gsyscall; 696 gp->status = Gsyscall;
661 mp->curg = gp; 697 mp->curg = gp;
662 mp->locked = LockInternal; 698 mp->locked = LockInternal;
663 mp->lockedg = gp; 699 mp->lockedg = gp;
664 gp->lockedm = mp; 700 gp->lockedm = mp;
665 // put on allg for garbage collector 701 // put on allg for garbage collector
666 runtime·lock(&runtime·sched); 702 runtime·lock(&runtime·sched);
667 if(runtime·lastg == nil) 703 if(runtime·lastg == nil)
668 runtime·allg = gp; 704 runtime·allg = gp;
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
703 // in which dropm happens on each cgo call, is still correct too. 739 // in which dropm happens on each cgo call, is still correct too.
704 // We may have to keep the current version on systems with cgo 740 // We may have to keep the current version on systems with cgo
705 // but without pthreads, like Windows. 741 // but without pthreads, like Windows.
706 void 742 void
707 runtime·dropm(void) 743 runtime·dropm(void)
708 { 744 {
709 M *mp, *mnext; 745 M *mp, *mnext;
710 746
711 // Undo whatever initialization minit did during needm. 747 // Undo whatever initialization minit did during needm.
712 runtime·unminit(); 748 runtime·unminit();
749
750 #ifdef GOOS_windows
751 #ifdef GOARCH_386
713 m->seh = nil; // reset dangling typed pointer 752 m->seh = nil; // reset dangling typed pointer
753 #endif
754 #endif
714 755
715 // Clear m and g, and return m to the extra list. 756 // Clear m and g, and return m to the extra list.
716 // After the call to setmg we can only call nosplit functions. 757 // After the call to setmg we can only call nosplit functions.
717 mp = m; 758 mp = m;
718 runtime·setmg(nil, nil); 759 runtime·setmg(nil, nil);
719 760
720 mnext = lockextra(true); 761 mnext = lockextra(true);
721 mp->schedlink = mnext; 762 mp->schedlink = mnext;
722 unlockextra(mp); 763 unlockextra(mp);
723 } 764 }
(...skipping 259 matching lines...) Expand 10 before | Expand all | Expand 10 after
983 static void 1024 static void
984 execute(G *gp) 1025 execute(G *gp)
985 { 1026 {
986 int32 hz; 1027 int32 hz;
987 1028
988 if(gp->status != Grunnable) { 1029 if(gp->status != Grunnable) {
989 runtime·printf("execute: bad g status %d\n", gp->status); 1030 runtime·printf("execute: bad g status %d\n", gp->status);
990 runtime·throw("execute: bad g status"); 1031 runtime·throw("execute: bad g status");
991 } 1032 }
992 gp->status = Grunning; 1033 gp->status = Grunning;
1034 gp->preempt = false;
993 gp->stackguard0 = gp->stackguard; 1035 gp->stackguard0 = gp->stackguard;
994 m->p->tick++; 1036 m->p->tick++;
995 m->curg = gp; 1037 m->curg = gp;
996 gp->m = m; 1038 gp->m = m;
997 1039
998 // Check whether the profiler needs to be turned on or off. 1040 // Check whether the profiler needs to be turned on or off.
999 hz = runtime·sched.profilehz; 1041 hz = runtime·sched.profilehz;
1000 if(m->profilehz != hz) 1042 if(m->profilehz != hz)
1001 runtime·resetcpuprofiler(hz); 1043 runtime·resetcpuprofiler(hz);
1002 1044
(...skipping 14 matching lines...) Expand all
1017 gcstopm(); 1059 gcstopm();
1018 goto top; 1060 goto top;
1019 } 1061 }
1020 // local runq 1062 // local runq
1021 gp = runqget(m->p); 1063 gp = runqget(m->p);
1022 if(gp) 1064 if(gp)
1023 return gp; 1065 return gp;
1024 // global runq 1066 // global runq
1025 if(runtime·sched.runqsize) { 1067 if(runtime·sched.runqsize) {
1026 runtime·lock(&runtime·sched); 1068 runtime·lock(&runtime·sched);
1027 » » gp = globrunqget(m->p); 1069 » » gp = globrunqget(m->p, 0);
1028 runtime·unlock(&runtime·sched); 1070 runtime·unlock(&runtime·sched);
1029 if(gp) 1071 if(gp)
1030 return gp; 1072 return gp;
1031 } 1073 }
1032 // poll network 1074 // poll network
1033 gp = runtime·netpoll(false); // non-blocking 1075 gp = runtime·netpoll(false); // non-blocking
1034 if(gp) { 1076 if(gp) {
1035 injectglist(gp->schedlink); 1077 injectglist(gp->schedlink);
1036 gp->status = Grunnable; 1078 gp->status = Grunnable;
1037 return gp; 1079 return gp;
(...skipping 20 matching lines...) Expand all
1058 return gp; 1100 return gp;
1059 } 1101 }
1060 stop: 1102 stop:
1061 // return P and block 1103 // return P and block
1062 runtime·lock(&runtime·sched); 1104 runtime·lock(&runtime·sched);
1063 if(runtime·gcwaiting) { 1105 if(runtime·gcwaiting) {
1064 runtime·unlock(&runtime·sched); 1106 runtime·unlock(&runtime·sched);
1065 goto top; 1107 goto top;
1066 } 1108 }
1067 if(runtime·sched.runqsize) { 1109 if(runtime·sched.runqsize) {
1068 » » gp = globrunqget(m->p); 1110 » » gp = globrunqget(m->p, 0);
1069 runtime·unlock(&runtime·sched); 1111 runtime·unlock(&runtime·sched);
1070 return gp; 1112 return gp;
1071 } 1113 }
1072 p = releasep(); 1114 p = releasep();
1073 pidleput(p); 1115 pidleput(p);
1074 runtime·unlock(&runtime·sched); 1116 runtime·unlock(&runtime·sched);
1075 if(m->spinning) { 1117 if(m->spinning) {
1076 m->spinning = false; 1118 m->spinning = false;
1077 runtime·xadd(&runtime·sched.nmspinning, -1); 1119 runtime·xadd(&runtime·sched.nmspinning, -1);
1078 } 1120 }
(...skipping 29 matching lines...) Expand all
1108 gp->status = Grunnable; 1150 gp->status = Grunnable;
1109 return gp; 1151 return gp;
1110 } 1152 }
1111 injectglist(gp); 1153 injectglist(gp);
1112 } 1154 }
1113 } 1155 }
1114 stopm(); 1156 stopm();
1115 goto top; 1157 goto top;
1116 } 1158 }
1117 1159
1160 static void
1161 resetspinning(void)
1162 {
1163 int32 nmspinning;
1164
1165 if(m->spinning) {
1166 m->spinning = false;
1167 nmspinning = runtime·xadd(&runtime·sched.nmspinning, -1);
1168 if(nmspinning < 0)
1169 runtime·throw("findrunnable: negative nmspinning");
1170 } else
1171 nmspinning = runtime·atomicload(&runtime·sched.nmspinning);
1172
1173 // M wakeup policy is deliberately somewhat conservative (see nmspinning handling),
1174 // so see if we need to wakeup another P here.
1175 if (nmspinning == 0 && runtime·atomicload(&runtime·sched.npidle) > 0)
1176 wakep();
1177 }
1178
1118 // Injects the list of runnable G's into the scheduler. 1179 // Injects the list of runnable G's into the scheduler.
1119 // Can run concurrently with GC. 1180 // Can run concurrently with GC.
1120 static void 1181 static void
1121 injectglist(G *glist) 1182 injectglist(G *glist)
1122 { 1183 {
1123 int32 n; 1184 int32 n;
1124 G *gp; 1185 G *gp;
1125 1186
1126 if(glist == nil) 1187 if(glist == nil)
1127 return; 1188 return;
1128 runtime·lock(&runtime·sched); 1189 runtime·lock(&runtime·sched);
1129 for(n = 0; glist; n++) { 1190 for(n = 0; glist; n++) {
1130 gp = glist; 1191 gp = glist;
1131 glist = gp->schedlink; 1192 glist = gp->schedlink;
1132 gp->status = Grunnable; 1193 gp->status = Grunnable;
1133 globrunqput(gp); 1194 globrunqput(gp);
1134 } 1195 }
1135 runtime·unlock(&runtime·sched); 1196 runtime·unlock(&runtime·sched);
1136 1197
1137 for(; n && runtime·sched.npidle; n--) 1198 for(; n && runtime·sched.npidle; n--)
1138 startm(nil, false); 1199 startm(nil, false);
1139 } 1200 }
1140 1201
1141 // One round of scheduler: find a runnable goroutine and execute it. 1202 // One round of scheduler: find a runnable goroutine and execute it.
1142 // Never returns. 1203 // Never returns.
1143 static void 1204 static void
1144 schedule(void) 1205 schedule(void)
1145 { 1206 {
1146 G *gp; 1207 G *gp;
1208 uint32 tick;
1147 1209
1148 if(m->locks) 1210 if(m->locks)
1149 runtime·throw("schedule: holding locks"); 1211 runtime·throw("schedule: holding locks");
1150 1212
1151 top: 1213 top:
1152 if(runtime·gcwaiting) { 1214 if(runtime·gcwaiting) {
1153 gcstopm(); 1215 gcstopm();
1154 goto top; 1216 goto top;
1155 } 1217 }
1156 1218
1157 » gp = runqget(m->p); 1219 » gp = nil;
1158 » if(gp && m->spinning) 1220 » // Check the global runnable queue once in a while to ensure fairness.
1159 » » runtime·throw("schedule: spinning with local work"); 1221 » // Otherwise two goroutines can completely occupy the local runqueue
1160 » if(gp == nil) 1222 » // by constantly respawning each other.
1161 » » gp = findrunnable(); 1223 » tick = m->p->tick;
1162 1224 » // This is a fancy way to say tick%61==0,
1163 » if(m->spinning) { 1225 » // it uses 2 MUL instructions instead of a single DIV and so is faster o n modern processors.
1164 » » m->spinning = false; 1226 » if(tick - (((uint64)tick*0x4325c53fu)>>36)*61 == 0 && runtime·sched.runq size > 0) {
1165 » » runtime·xadd(&runtime·sched.nmspinning, -1); 1227 » » runtime·lock(&runtime·sched);
1166 » } 1228 » » gp = globrunqget(m->p, 1);
1167 1229 » » runtime·unlock(&runtime·sched);
1168 » // M wakeup policy is deliberately somewhat conservative (see nmspinning handling), 1230 » » if(gp)
1169 » // so see if we need to wakeup another M here. 1231 » » » resetspinning();
1170 » if (m->p->runqhead != m->p->runqtail && 1232 » }
1171 » » runtime·atomicload(&runtime·sched.nmspinning) == 0 && 1233 » if(gp == nil) {
1172 » » runtime·atomicload(&runtime·sched.npidle) > 0) // TODO: fast at omic 1234 » » gp = runqget(m->p);
1173 » » wakep(); 1235 » » if(gp && m->spinning)
1236 » » » runtime·throw("schedule: spinning with local work");
1237 » }
1238 » if(gp == nil) {
1239 » » gp = findrunnable(); // blocks until work is available
1240 » » resetspinning();
1241 » }
1174 1242
1175 if(gp->lockedm) { 1243 if(gp->lockedm) {
1244 // Hands off own p to the locked m,
1245 // then blocks waiting for a new p.
1176 startlockedm(gp); 1246 startlockedm(gp);
1177 goto top; 1247 goto top;
1178 } 1248 }
1179 1249
1180 execute(gp); 1250 execute(gp);
1181 } 1251 }
1182 1252
1183 // Puts the current goroutine into a waiting state and unlocks the lock. 1253 // Puts the current goroutine into a waiting state and unlocks the lock.
1184 // The goroutine can be made runnable again by calling runtime·ready(gp). 1254 // The goroutine can be made runnable again by calling runtime·ready(gp).
1185 void 1255 void
(...skipping 21 matching lines...) Expand all
1207 stoplockedm(); 1277 stoplockedm();
1208 execute(gp); // Never returns. 1278 execute(gp); // Never returns.
1209 } 1279 }
1210 schedule(); 1280 schedule();
1211 } 1281 }
1212 1282
1213 // Scheduler yield. 1283 // Scheduler yield.
1214 void 1284 void
1215 runtime·gosched(void) 1285 runtime·gosched(void)
1216 { 1286 {
1217 » runtime·mcall(gosched0); 1287 » runtime·mcall(runtime·gosched0);
1218 } 1288 }
1219 1289
1220 // runtime·gosched continuation on g0. 1290 // runtime·gosched continuation on g0.
1221 static void 1291 void
1222 gosched0(G *gp) 1292 runtime·gosched0(G *gp)
1223 { 1293 {
1224 gp->status = Grunnable; 1294 gp->status = Grunnable;
1225 gp->m = nil; 1295 gp->m = nil;
1226 m->curg = nil; 1296 m->curg = nil;
1227 runtime·lock(&runtime·sched); 1297 runtime·lock(&runtime·sched);
1228 globrunqput(gp); 1298 globrunqput(gp);
1229 runtime·unlock(&runtime·sched); 1299 runtime·unlock(&runtime·sched);
1230 if(m->lockedg) { 1300 if(m->lockedg) {
1231 stoplockedm(); 1301 stoplockedm();
1232 execute(gp); // Never returns. 1302 execute(gp); // Never returns.
(...skipping 17 matching lines...) Expand all
1250 // runtime·goexit continuation on g0. 1320 // runtime·goexit continuation on g0.
1251 static void 1321 static void
1252 goexit0(G *gp) 1322 goexit0(G *gp)
1253 { 1323 {
1254 gp->status = Gdead; 1324 gp->status = Gdead;
1255 gp->m = nil; 1325 gp->m = nil;
1256 gp->lockedm = nil; 1326 gp->lockedm = nil;
1257 m->curg = nil; 1327 m->curg = nil;
1258 m->lockedg = nil; 1328 m->lockedg = nil;
1259 if(m->locked & ~LockExternal) { 1329 if(m->locked & ~LockExternal) {
1260 » » runtime·printf("invalid m->locked = %d", m->locked); 1330 » » runtime·printf("invalid m->locked = %d\n", m->locked);
1261 runtime·throw("internal lockOSThread error"); 1331 runtime·throw("internal lockOSThread error");
1262 }······· 1332 }·······
1263 m->locked = 0; 1333 m->locked = 0;
1264 runtime·unwindstack(gp, nil); 1334 runtime·unwindstack(gp, nil);
1265 gfput(m->p, gp); 1335 gfput(m->p, gp);
1266 schedule(); 1336 schedule();
1267 } 1337 }
1268 1338
1269 static void 1339 static void
1270 save(void *pc, uintptr sp) 1340 save(void *pc, uintptr sp)
(...skipping 109 matching lines...) Expand 10 before | Expand all | Expand 10 after
1380 if(m->p && m->p->status == Psyscall && runtime·cas(&m->p->status, Psysca ll, Prunning)) { 1450 if(m->p && m->p->status == Psyscall && runtime·cas(&m->p->status, Psysca ll, Prunning)) {
1381 // There's a cpu for us, so we can run. 1451 // There's a cpu for us, so we can run.
1382 m->mcache = m->p->mcache; 1452 m->mcache = m->p->mcache;
1383 m->p->m = m; 1453 m->p->m = m;
1384 m->p->tick++; 1454 m->p->tick++;
1385 g->status = Grunning; 1455 g->status = Grunning;
1386 // Garbage collector isn't running (since we are), 1456 // Garbage collector isn't running (since we are),
1387 // so okay to clear gcstack and gcsp. 1457 // so okay to clear gcstack and gcsp.
1388 g->gcstack = (uintptr)nil; 1458 g->gcstack = (uintptr)nil;
1389 g->gcsp = (uintptr)nil; 1459 g->gcsp = (uintptr)nil;
1460 if(g->preempt) // restore the preemption request in case we've cleared it in newstack
1461 g->stackguard0 = StackPreempt;
1390 return; 1462 return;
1391 } 1463 }
1392 1464
1393 if(g->isbackground) // do not consider blocked scavenger for deadlock d etection 1465 if(g->isbackground) // do not consider blocked scavenger for deadlock d etection
1394 inclocked(-1); 1466 inclocked(-1);
1395 // Try to get any other idle P. 1467 // Try to get any other idle P.
1396 m->p = nil; 1468 m->p = nil;
1397 if(runtime·sched.pidle) { 1469 if(runtime·sched.pidle) {
1398 runtime·lock(&runtime·sched); 1470 runtime·lock(&runtime·sched);
1399 p = pidleget(); 1471 p = pidleget();
1400 runtime·unlock(&runtime·sched); 1472 runtime·unlock(&runtime·sched);
1401 if(p) { 1473 if(p) {
1402 acquirep(p); 1474 acquirep(p);
1403 m->p->tick++; 1475 m->p->tick++;
1404 g->status = Grunning; 1476 g->status = Grunning;
1405 g->gcstack = (uintptr)nil; 1477 g->gcstack = (uintptr)nil;
1406 g->gcsp = (uintptr)nil; 1478 g->gcsp = (uintptr)nil;
1479 if(g->preempt) // restore the preemption request in cas e we've cleared it in newstack
1480 g->stackguard0 = StackPreempt;
1407 return; 1481 return;
1408 } 1482 }
1409 } 1483 }
1410 1484
1411 // Call the scheduler. 1485 // Call the scheduler.
1412 runtime·mcall(exitsyscall0); 1486 runtime·mcall(exitsyscall0);
1413 1487
1414 // Scheduler returned, so we're allowed to run now. 1488 // Scheduler returned, so we're allowed to run now.
1415 // Delete the gcstack information that we left for 1489 // Delete the gcstack information that we left for
1416 // the garbage collector during the system call. 1490 // the garbage collector during the system call.
(...skipping 150 matching lines...) Expand 10 before | Expand all | Expand 10 after
1567 newg->gopc = (uintptr)callerpc; 1641 newg->gopc = (uintptr)callerpc;
1568 newg->status = Grunnable; 1642 newg->status = Grunnable;
1569 newg->goid = runtime·xadd64(&runtime·sched.goidgen, 1); 1643 newg->goid = runtime·xadd64(&runtime·sched.goidgen, 1);
1570 if(raceenabled) 1644 if(raceenabled)
1571 newg->racectx = runtime·racegostart((void*)callerpc); 1645 newg->racectx = runtime·racegostart((void*)callerpc);
1572 runqput(m->p, newg); 1646 runqput(m->p, newg);
1573 1647
1574 if(runtime·atomicload(&runtime·sched.npidle) != 0 && runtime·atomicload( &runtime·sched.nmspinning) == 0 && fn->fn != runtime·main) // TODO: fast atomic 1648 if(runtime·atomicload(&runtime·sched.npidle) != 0 && runtime·atomicload( &runtime·sched.nmspinning) == 0 && fn->fn != runtime·main) // TODO: fast atomic
1575 wakep(); 1649 wakep();
1576 m->locks--; 1650 m->locks--;
1651 if(m->locks == 0 && g->preempt) // restore the preemption request in ca se we've cleared it in newstack
1652 g->stackguard0 = StackPreempt;
1577 return newg; 1653 return newg;
1578 } 1654 }
1579 1655
1580 // Put on gfree list. 1656 // Put on gfree list.
1581 // If local list is too long, transfer a batch to the global list. 1657 // If local list is too long, transfer a batch to the global list.
1582 static void 1658 static void
1583 gfput(P *p, G *gp) 1659 gfput(P *p, G *gp)
1584 { 1660 {
1585 if(gp->stackguard - StackGuard != gp->stack0) 1661 if(gp->stackguard - StackGuard != gp->stack0)
1586 runtime·throw("invalid stack in gfput"); 1662 runtime·throw("invalid stack in gfput");
(...skipping 221 matching lines...) Expand 10 before | Expand all | Expand 10 after
1808 if(!Windows && (m == nil || m->mcache == nil)) 1884 if(!Windows && (m == nil || m->mcache == nil))
1809 return; 1885 return;
1810 if(prof.fn == nil || prof.hz == 0) 1886 if(prof.fn == nil || prof.hz == 0)
1811 return; 1887 return;
1812 1888
1813 runtime·lock(&prof); 1889 runtime·lock(&prof);
1814 if(prof.fn == nil) { 1890 if(prof.fn == nil) {
1815 runtime·unlock(&prof); 1891 runtime·unlock(&prof);
1816 return; 1892 return;
1817 } 1893 }
1818 » n = runtime·gentraceback((uintptr)pc, (uintptr)sp, (uintptr)lr, gp, 0, p rof.pcbuf, nelem(prof.pcbuf), nil, nil); 1894 » n = runtime·gentraceback((uintptr)pc, (uintptr)sp, (uintptr)lr, gp, 0, p rof.pcbuf, nelem(prof.pcbuf), nil, nil, false);
1819 if(n > 0) 1895 if(n > 0)
1820 prof.fn(prof.pcbuf, n); 1896 prof.fn(prof.pcbuf, n);
1821 runtime·unlock(&prof); 1897 runtime·unlock(&prof);
1822 } 1898 }
1823 1899
1824 // Arrange to call fn with a traceback hz times a second. 1900 // Arrange to call fn with a traceback hz times a second.
1825 void 1901 void
1826 runtime·setcpuprofilerate(void (*fn)(uintptr*, int32), int32 hz) 1902 runtime·setcpuprofilerate(void (*fn)(uintptr*, int32), int32 hz)
1827 { 1903 {
1828 // Force sane arguments. 1904 // Force sane arguments.
(...skipping 176 matching lines...) Expand 10 before | Expand all | Expand 10 after
2005 m->throwing = -1; // do not dump full stacks 2081 m->throwing = -1; // do not dump full stacks
2006 runtime·throw("all goroutines are asleep - deadlock!"); 2082 runtime·throw("all goroutines are asleep - deadlock!");
2007 } 2083 }
2008 2084
2009 static void 2085 static void
2010 sysmon(void) 2086 sysmon(void)
2011 { 2087 {
2012 uint32 idle, delay; 2088 uint32 idle, delay;
2013 int64 now, lastpoll; 2089 int64 now, lastpoll;
2014 G *gp; 2090 G *gp;
2015 uint32 ticks[MaxGomaxprocs];
2016 2091
2017 idle = 0; // how many cycles in succession we had not wokeup somebody 2092 idle = 0; // how many cycles in succession we had not wokeup somebody
2018 delay = 0; 2093 delay = 0;
2019 for(;;) { 2094 for(;;) {
2020 if(idle == 0) // start with 20us sleep... 2095 if(idle == 0) // start with 20us sleep...
2021 delay = 20; 2096 delay = 20;
2022 else if(idle > 50) // start doubling the sleep after 1ms... 2097 else if(idle > 50) // start doubling the sleep after 1ms...
2023 delay *= 2; 2098 delay *= 2;
2024 if(delay > 10*1000) // up to 10ms 2099 if(delay > 10*1000) // up to 10ms
2025 delay = 10*1000; 2100 delay = 10*1000;
2026 runtime·usleep(delay); 2101 runtime·usleep(delay);
2027 if(runtime·gcwaiting || runtime·atomicload(&runtime·sched.npidle ) == runtime·gomaxprocs) { // TODO: fast atomic 2102 if(runtime·gcwaiting || runtime·atomicload(&runtime·sched.npidle ) == runtime·gomaxprocs) { // TODO: fast atomic
2028 runtime·lock(&runtime·sched); 2103 runtime·lock(&runtime·sched);
2029 if(runtime·atomicload(&runtime·gcwaiting) || runtime·ato micload(&runtime·sched.npidle) == runtime·gomaxprocs) { 2104 if(runtime·atomicload(&runtime·gcwaiting) || runtime·ato micload(&runtime·sched.npidle) == runtime·gomaxprocs) {
2030 runtime·atomicstore(&runtime·sched.sysmonwait, 1 ); 2105 runtime·atomicstore(&runtime·sched.sysmonwait, 1 );
2031 runtime·unlock(&runtime·sched); 2106 runtime·unlock(&runtime·sched);
2032 runtime·notesleep(&runtime·sched.sysmonnote); 2107 runtime·notesleep(&runtime·sched.sysmonnote);
2033 runtime·noteclear(&runtime·sched.sysmonnote); 2108 runtime·noteclear(&runtime·sched.sysmonnote);
2034 idle = 0; 2109 idle = 0;
2035 delay = 20; 2110 delay = 20;
2036 } else 2111 } else
2037 runtime·unlock(&runtime·sched); 2112 runtime·unlock(&runtime·sched);
2038 } 2113 }
2039 // poll network if not polled for more than 10ms 2114 // poll network if not polled for more than 10ms
2040 lastpoll = runtime·atomicload64(&runtime·sched.lastpoll); 2115 lastpoll = runtime·atomicload64(&runtime·sched.lastpoll);
2041 now = runtime·nanotime(); 2116 now = runtime·nanotime();
2042 if(lastpoll != 0 && lastpoll + 10*1000*1000 > now) { 2117 if(lastpoll != 0 && lastpoll + 10*1000*1000 > now) {
2118 runtime·cas64(&runtime·sched.lastpoll, lastpoll, now);
2043 gp = runtime·netpoll(false); // non-blocking 2119 gp = runtime·netpoll(false); // non-blocking
2044 injectglist(gp); 2120 injectglist(gp);
2045 } 2121 }
2046 // retake P's blocked in syscalls 2122 // retake P's blocked in syscalls
2047 » » if(retake(ticks)) 2123 » » // and preempt long running G's
2124 » » if(retake(now))
2048 idle = 0; 2125 idle = 0;
2049 else 2126 else
2050 idle++; 2127 idle++;
2051 } 2128 }
2052 } 2129 }
2053 2130
2131 typedef struct Pdesc Pdesc;
2132 struct Pdesc
2133 {
2134 uint32 tick;
2135 int64 when;
2136 };
2137 static Pdesc pdesc[MaxGomaxprocs];
2138
2054 static uint32 2139 static uint32
2055 retake(uint32 *ticks) 2140 retake(int64 now)
2056 { 2141 {
2057 uint32 i, s, n; 2142 uint32 i, s, n;
2058 int64 t; 2143 int64 t;
2059 P *p; 2144 P *p;
2145 Pdesc *pd;
2060 2146
2061 n = 0; 2147 n = 0;
2062 for(i = 0; i < runtime·gomaxprocs; i++) { 2148 for(i = 0; i < runtime·gomaxprocs; i++) {
2063 p = runtime·allp[i]; 2149 p = runtime·allp[i];
2064 if(p==nil) 2150 if(p==nil)
2065 continue; 2151 continue;
2066 t = p->tick; 2152 t = p->tick;
2067 » » if(ticks[i] != t) { 2153 » » pd = &pdesc[i];
2068 » » » ticks[i] = t; 2154 » » if(pd->tick != t) {
2155 » » » pd->tick = t;
2156 » » » pd->when = now;
2069 continue; 2157 continue;
2070 } 2158 }
2071 s = p->status; 2159 s = p->status;
2072 » » if(s != Psyscall) 2160 » » if(s == Psyscall) {
2073 » » » continue; 2161 » » » // Retake P from syscall if it's there for more than 1 s ysmon tick (20us).
2074 » » if(p->runqhead == p->runqtail && runtime·atomicload(&runtime·sch ed.nmspinning) + runtime·atomicload(&runtime·sched.npidle) > 0) // TODO: fast a tomic 2162 » » » // But only if there is other work to do.
2075 » » » continue; 2163 » » » if(p->runqhead == p->runqtail &&
2076 » » // Need to increment number of locked M's before the CAS. 2164 » » » » runtime·atomicload(&runtime·sched.nmspinning) + runtime·atomicload(&runtime·sched.npidle) > 0)
2077 » » // Otherwise the M from which we retake can exit the syscall, 2165 » » » » continue;
2078 » » // increment nmidle and report deadlock. 2166 » » » // Need to increment number of locked M's before the CAS .
2079 » » inclocked(-1); 2167 » » » // Otherwise the M from which we retake can exit the sys call,
2080 » » if(runtime·cas(&p->status, s, Pidle)) { 2168 » » » // increment nmidle and report deadlock.
2081 » » » n++; 2169 » » » inclocked(-1);
2082 » » » handoffp(p); 2170 » » » if(runtime·cas(&p->status, s, Pidle)) {
2083 » » } 2171 » » » » n++;
2084 » » inclocked(1); 2172 » » » » handoffp(p);
2173 » » » }
2174 » » » inclocked(1);
2175 » » } else if(s == Prunning) {
2176 » » » // Preempt G if it's running for more than 10ms.
2177 » » » if(pd->when + 10*1000*1000 > now)
2178 » » » » continue;
2179 » » » preemptone(p);
2180 » » }
2085 } 2181 }
2086 return n; 2182 return n;
2087 } 2183 }
2088 2184
2089 // Tell all goroutines that they have been preempted and they should stop. 2185 // Tell all goroutines that they have been preempted and they should stop.
2090 // This function is purely best-effort. It can fail to inform a goroutine if a 2186 // This function is purely best-effort. It can fail to inform a goroutine if a
2091 // processor just started running it. 2187 // processor just started running it.
2092 // No locks need to be held. 2188 // No locks need to be held.
2093 static void 2189 static void
2094 preemptall(void) 2190 preemptall(void)
(...skipping 14 matching lines...) Expand all
2109 // goroutine. It can send inform the wrong goroutine. Even if it informs the 2205 // goroutine. It can send inform the wrong goroutine. Even if it informs the
2110 // correct goroutine, that goroutine might ignore the request if it is 2206 // correct goroutine, that goroutine might ignore the request if it is
2111 // simultaneously executing runtime·newstack. 2207 // simultaneously executing runtime·newstack.
2112 // No lock needs to be held. 2208 // No lock needs to be held.
2113 static void 2209 static void
2114 preemptone(P *p) 2210 preemptone(P *p)
2115 { 2211 {
2116 M *mp; 2212 M *mp;
2117 G *gp; 2213 G *gp;
2118 2214
2215 // Preemption requires more robust traceback routines.
2216 // For now, disable.
2217 // The if(1) silences a compiler warning about the rest of the
2218 // function being unreachable.
2219 if(1) return;
2220
2119 mp = p->m; 2221 mp = p->m;
2120 if(mp == nil || mp == m) 2222 if(mp == nil || mp == m)
2121 return; 2223 return;
2122 gp = mp->curg; 2224 gp = mp->curg;
2123 if(gp == nil || gp == mp->g0) 2225 if(gp == nil || gp == mp->g0)
2124 return; 2226 return;
2227 gp->preempt = true;
2125 gp->stackguard0 = StackPreempt; 2228 gp->stackguard0 = StackPreempt;
2126 } 2229 }
2127 2230
2128 // Put mp on midle list. 2231 // Put mp on midle list.
2129 // Sched must be locked. 2232 // Sched must be locked.
2130 static void 2233 static void
2131 mput(M *mp) 2234 mput(M *mp)
2132 { 2235 {
2133 mp->schedlink = runtime·sched.midle; 2236 mp->schedlink = runtime·sched.midle;
2134 runtime·sched.midle = mp; 2237 runtime·sched.midle = mp;
(...skipping 25 matching lines...) Expand all
2160 runtime·sched.runqtail->schedlink = gp; 2263 runtime·sched.runqtail->schedlink = gp;
2161 else 2264 else
2162 runtime·sched.runqhead = gp; 2265 runtime·sched.runqhead = gp;
2163 runtime·sched.runqtail = gp; 2266 runtime·sched.runqtail = gp;
2164 runtime·sched.runqsize++; 2267 runtime·sched.runqsize++;
2165 } 2268 }
2166 2269
2167 // Try get a batch of G's from the global runnable queue. 2270 // Try get a batch of G's from the global runnable queue.
2168 // Sched must be locked. 2271 // Sched must be locked.
2169 static G* 2272 static G*
2170 globrunqget(P *p) 2273 globrunqget(P *p, int32 max)
2171 { 2274 {
2172 G *gp, *gp1; 2275 G *gp, *gp1;
2173 int32 n; 2276 int32 n;
2174 2277
2175 if(runtime·sched.runqsize == 0) 2278 if(runtime·sched.runqsize == 0)
2176 return nil; 2279 return nil;
2177 n = runtime·sched.runqsize/runtime·gomaxprocs+1; 2280 n = runtime·sched.runqsize/runtime·gomaxprocs+1;
2178 if(n > runtime·sched.runqsize) 2281 if(n > runtime·sched.runqsize)
2179 n = runtime·sched.runqsize; 2282 n = runtime·sched.runqsize;
2283 if(max > 0 && n > max)
2284 n = max;
2180 runtime·sched.runqsize -= n; 2285 runtime·sched.runqsize -= n;
2181 if(runtime·sched.runqsize == 0) 2286 if(runtime·sched.runqsize == 0)
2182 runtime·sched.runqtail = nil; 2287 runtime·sched.runqtail = nil;
2183 gp = runtime·sched.runqhead; 2288 gp = runtime·sched.runqhead;
2184 runtime·sched.runqhead = gp->schedlink; 2289 runtime·sched.runqhead = gp->schedlink;
2185 n--; 2290 n--;
2186 while(n--) { 2291 while(n--) {
2187 gp1 = runtime·sched.runqhead; 2292 gp1 = runtime·sched.runqhead;
2188 runtime·sched.runqhead = gp1->schedlink; 2293 runtime·sched.runqhead = gp1->schedlink;
2189 runqput(p, gp1); 2294 runqput(p, gp1);
(...skipping 232 matching lines...) Expand 10 before | Expand all | Expand 10 after
2422 } 2527 }
2423 } 2528 }
2424 if(s != i/2 && s != i/2+1) { 2529 if(s != i/2 && s != i/2+1) {
2425 runtime·printf("bad steal %d, want %d or %d, iter %d\n", 2530 runtime·printf("bad steal %d, want %d or %d, iter %d\n",
2426 s, i/2, i/2+1, i); 2531 s, i/2, i/2+1, i);
2427 runtime·throw("bad steal"); 2532 runtime·throw("bad steal");
2428 } 2533 }
2429 } 2534 }
2430 } 2535 }
2431 2536
2537 extern void runtime·morestack(void);
2538
2539 // Does f mark the top of a goroutine stack?
2432 bool 2540 bool
2433 runtime·haszeroargs(uintptr pc) 2541 runtime·topofstack(Func *f)
2434 { 2542 {
2435 » return pc == (uintptr)runtime·goexit || 2543 » return f->entry == (uintptr)runtime·goexit ||
2436 » » pc == (uintptr)runtime·mcall || 2544 » » f->entry == (uintptr)runtime·mstart ||
2437 » » pc == (uintptr)runtime·mstart || 2545 » » f->entry == (uintptr)runtime·mcall ||
2438 » » pc == (uintptr)_rt0_go; 2546 » » f->entry == (uintptr)runtime·morestack ||
2439 } 2547 » » f->entry == (uintptr)runtime·lessstack ||
2440 2548 » » f->entry == (uintptr)_rt0_go;
2549 }
LEFTRIGHT

Powered by Google App Engine
RSS Feeds Recent Issues | This issue
This is Rietveld f62528b