Rietveld Code Review Tool
Help | Bug tracker | Discussion group | Source code | Sign in
(1865)

Delta Between Two Patch Sets: src/pkg/runtime/proc.c

Issue 137070043: code review 137070043: runtime: adapt race detector for runtime written in Goadapt (Closed)
Left Patch Set: diff -r bc9245d02338ac13d7b3ca552c6e0bbd87bba65d https://dvyukov%40google.com@code.google.com/p/go/ Created 10 years, 6 months ago
Right Patch Set: diff -r babfcf4bc45863ec4b48f016fb970ad509c84251 https://dvyukov%40google.com@code.google.com/p/go/ Created 10 years, 6 months ago
Left:
Right:
Use n/p to move between diff chunks; N/P to move between comments. Please Sign in to add in-line comments.
Jump to:
Left: Side by side diff | Download
Right: Side by side diff | Download
« no previous file with change/comment | « src/pkg/runtime/cpuprof.go ('k') | src/pkg/runtime/race.c » ('j') | no next file with change/comment »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
LEFTRIGHT
1 // Copyright 2009 The Go Authors. All rights reserved. 1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style 2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file. 3 // license that can be found in the LICENSE file.
4 4
5 #include "runtime.h" 5 #include "runtime.h"
6 #include "arch_GOARCH.h" 6 #include "arch_GOARCH.h"
7 #include "zaexperiment.h" 7 #include "zaexperiment.h"
8 #include "malloc.h" 8 #include "malloc.h"
9 #include "stack.h" 9 #include "stack.h"
10 #include "race.h" 10 #include "race.h"
(...skipping 110 matching lines...) Expand 10 before | Expand all | Expand 10 after
121 static void globrunqputbatch(G*, G*, int32); 121 static void globrunqputbatch(G*, G*, int32);
122 static G* globrunqget(P*, int32); 122 static G* globrunqget(P*, int32);
123 static P* pidleget(void); 123 static P* pidleget(void);
124 static void pidleput(P*); 124 static void pidleput(P*);
125 static void injectglist(G*); 125 static void injectglist(G*);
126 static bool preemptall(void); 126 static bool preemptall(void);
127 static bool preemptone(P*); 127 static bool preemptone(P*);
128 static bool exitsyscallfast(void); 128 static bool exitsyscallfast(void);
129 static bool haveexperiment(int8*); 129 static bool haveexperiment(int8*);
130 static void allgadd(G*); 130 static void allgadd(G*);
131 static void dropg(void);
131 132
132 extern String runtime·buildVersion; 133 extern String runtime·buildVersion;
133 134
134 // The bootstrap sequence is: 135 // The bootstrap sequence is:
135 // 136 //
136 // call osinit 137 // call osinit
137 // call schedinit 138 // call schedinit
138 // make & queue new G 139 // make & queue new G
139 // call runtime·mstart 140 // call runtime·mstart
140 // 141 //
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
178 n = MaxGomaxprocs; 179 n = MaxGomaxprocs;
179 procs = n; 180 procs = n;
180 } 181 }
181 procresize(procs); 182 procresize(procs);
182 183
183 runtime·copystack = runtime·precisestack; 184 runtime·copystack = runtime·precisestack;
184 p = runtime·getenv("GOCOPYSTACK"); 185 p = runtime·getenv("GOCOPYSTACK");
185 if(p != nil && !runtime·strcmp(p, (byte*)"0")) 186 if(p != nil && !runtime·strcmp(p, (byte*)"0"))
186 runtime·copystack = false; 187 runtime·copystack = false;
187 188
188 mstats.enablegc = 1;
189
190 if(runtime·buildVersion.str == nil) { 189 if(runtime·buildVersion.str == nil) {
191 // Condition should never trigger. This code just serves 190 // Condition should never trigger. This code just serves
192 // to ensure runtime·buildVersion is kept in the resulting binar y. 191 // to ensure runtime·buildVersion is kept in the resulting binar y.
193 runtime·buildVersion.str = (uint8*)"unknown"; 192 runtime·buildVersion.str = (uint8*)"unknown";
194 runtime·buildVersion.len = 7; 193 runtime·buildVersion.len = 7;
195 } 194 }
196 } 195 }
197 196
198 extern void main·init(void); 197 extern void main·init(void);
199 extern void runtime·init(void); 198 extern void runtime·init(void);
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
241 // Defer unlock so that runtime.Goexit during init does the unlock too. 240 // Defer unlock so that runtime.Goexit during init does the unlock too.
242 d.fn = &initDone; 241 d.fn = &initDone;
243 d.siz = 0; 242 d.siz = 0;
244 d.link = g->defer; 243 d.link = g->defer;
245 d.argp = NoArgs; 244 d.argp = NoArgs;
246 d.special = true; 245 d.special = true;
247 g->defer = &d; 246 g->defer = &d;
248 247
249 if(g->m != &runtime·m0) 248 if(g->m != &runtime·m0)
250 runtime·throw("runtime·main not on m0"); 249 runtime·throw("runtime·main not on m0");
250
251 runtime·init(); 251 runtime·init();
252 mstats.enablegc = 1; // now that runtime is initialized, GC is okay
253
252 main·init(); 254 main·init();
253 255
254 if(g->defer != &d || d.fn != &initDone) 256 if(g->defer != &d || d.fn != &initDone)
255 runtime·throw("runtime: bad defer entry after init"); 257 runtime·throw("runtime: bad defer entry after init");
256 g->defer = d.link; 258 g->defer = d.link;
257 runtime·unlockOSThread(); 259 runtime·unlockOSThread();
258 260
259 main·main(); 261 main·main();
260 if(raceenabled) 262 if(raceenabled)
261 runtime·racefini(); 263 runtime·racefini();
262 264
263 // Make racy client program work: if panicking on 265 // Make racy client program work: if panicking on
264 // another goroutine at the same time as main returns, 266 // another goroutine at the same time as main returns,
265 // let the other goroutine finish printing the panic trace. 267 // let the other goroutine finish printing the panic trace.
266 // Once it does, it will exit. See issue 3934. 268 // Once it does, it will exit. See issue 3934.
267 if(runtime·panicking) 269 if(runtime·panicking)
268 runtime·park(nil, nil, runtime·gostringnocopy((byte*)"panicwait" )); 270 runtime·park(nil, nil, runtime·gostringnocopy((byte*)"panicwait" ));
269 271
270 runtime·exit(0); 272 runtime·exit(0);
271 for(;;) 273 for(;;)
272 *(int32*)runtime·main = 0; 274 *(int32*)runtime·main = 0;
273 } 275 }
274 276
275 void
276 runtime·goroutineheader(G *gp)
277 {
278 String status;
279 int64 waitfor;
280 uint32 gpstatus;
281
282 gpstatus = runtime·readgstatus(gp);
283 switch(gpstatus) {
284 case Gidle:
285 status = runtime·gostringnocopy((byte*)"idle");
286 break;
287 case Grunnable:
288 status = runtime·gostringnocopy((byte*)"runnable");
289 break;
290 case Grunning:
291 status = runtime·gostringnocopy((byte*)"running");
292 break;
293 case Gsyscall:
294 status = runtime·gostringnocopy((byte*)"syscall");
295 break;
296 case Gwaiting:
297 if(gp->waitreason.str != nil)
298 status = gp->waitreason;
299 else
300 status = runtime·gostringnocopy((byte*)"waiting");
301 break;
302 case Gscan:
303 status = runtime·gostringnocopy((byte*)"scan");
304 break;
305 case Gscanrunnable:
306 status = runtime·gostringnocopy((byte*)"scanrunnable");
307 break;
308 case Gscanrunning:
309 status = runtime·gostringnocopy((byte*)"scanrunning");
310 break;
311 case Gscansyscall:
312 status = runtime·gostringnocopy((byte*)"scansyscall");
313 break;
314 case Gscanenqueue:
315 status = runtime·gostringnocopy((byte*)"scanenqueue");
316 break;
317 case Gscanwaiting:
318 if(gp->waitreason.str != nil)
319 status = gp->waitreason;
320 else
321 status = runtime·gostringnocopy((byte*)"scanwaiting");
322 break;
323 case Gcopystack:
324 status = runtime·gostringnocopy((byte*)"copystack");
325 break;
326 default:
327 status = runtime·gostringnocopy((byte*)"???");
328 break;
329 }
330
331 // approx time the G is blocked, in minutes
332 waitfor = 0;
333 gpstatus = gpstatus&~Gscan; // drop the scan bit
334 if((gpstatus == Gwaiting || gpstatus == Gsyscall) && gp->waitsince != 0)
335 waitfor = (runtime·nanotime() - gp->waitsince) / (60LL*1000*1000 *1000);
336
337 runtime·printf("goroutine %D [%S", gp->goid, status);
338 if(waitfor >= 1)
339 runtime·printf(", %D minutes", waitfor);
340 if(gp->lockedm != nil)
341 runtime·printf(", locked to thread");
342 runtime·printf("]:\n");
343 }
344
345 static void 277 static void
346 dumpgstatus(G* gp) 278 dumpgstatus(G* gp)
347 { 279 {
348 » runtime·printf("runtime: gp=%p, goid=%D, gp->atomicstatus=%d\n", gp, gp- >goid, runtime·readgstatus(gp)); 280 » runtime·printf("runtime: gp: gp=%p, goid=%D, gp->atomicstatus=%x\n", gp, gp->goid, runtime·readgstatus(gp));
349 } 281 » runtime·printf("runtime: g: g=%p, goid=%D, g->atomicstatus=%x\n", g, g->goid, runtime·readgstatus(g));
350
351 void
352 runtime·tracebackothers(G *me)
353 {
354 » G *gp;
355 » int32 traceback;
356 » uintptr i;
357 » uint32 status;
358
359 » traceback = runtime·gotraceback(nil);
360 »·······
361 » // Show the current goroutine first, if we haven't already.
362 » if((gp = g->m->curg) != nil && gp != me) {
363 » » runtime·printf("\n");
364 » » runtime·goroutineheader(gp);
365 » » runtime·traceback(~(uintptr)0, ~(uintptr)0, 0, gp);
366 » }
367
368 » runtime·lock(&allglock);
369 » for(i = 0; i < runtime·allglen; i++) {
370 » » gp = runtime·allg[i];
371 » » if(gp == me || gp == g->m->curg || runtime·readgstatus(gp) == Gd ead)
372 » » » continue;
373 » » if(gp->issystem && traceback < 2)
374 » » » continue;
375 » » runtime·printf("\n");
376 » » runtime·goroutineheader(gp);
377 » » status = runtime·readgstatus(gp);
378 » » if((status&~Gscan) == Grunning){
379 » » » runtime·printf("\tgoroutine running on other thread; sta ck unavailable\n");
380 » » » runtime·printcreatedby(gp);
381 » » } else
382 » » » runtime·traceback(~(uintptr)0, ~(uintptr)0, 0, gp);
383 » }
384 » runtime·unlock(&allglock);
385 } 282 }
386 283
387 static void 284 static void
388 checkmcount(void) 285 checkmcount(void)
389 { 286 {
390 // sched lock is held 287 // sched lock is held
391 if(runtime·sched.mcount > runtime·sched.maxmcount){ 288 if(runtime·sched.mcount > runtime·sched.maxmcount){
392 runtime·printf("runtime: program exceeds %d-thread limit\n", run time·sched.maxmcount); 289 runtime·printf("runtime: program exceeds %d-thread limit\n", run time·sched.maxmcount);
393 runtime·throw("thread exhaustion"); 290 runtime·throw("thread exhaustion");
394 } 291 }
(...skipping 215 matching lines...) Expand 10 before | Expand all | Expand 10 after
610 // casgstatus will loop if the g->atomicstatus is in a Gscan status until the ro utine that· 507 // casgstatus will loop if the g->atomicstatus is in a Gscan status until the ro utine that·
611 // put it in the Gscan state is finished. 508 // put it in the Gscan state is finished.
612 void 509 void
613 runtime·casgstatus(G *gp, uint32 oldval, uint32 newval) 510 runtime·casgstatus(G *gp, uint32 oldval, uint32 newval)
614 { 511 {
615 if(isscanstatus(oldval) || isscanstatus(newval) || oldval == newval) { 512 if(isscanstatus(oldval) || isscanstatus(newval) || oldval == newval) {
616 runtime·printf("casgstatus: oldval=%d, newval=%d\n", oldval, new val); 513 runtime·printf("casgstatus: oldval=%d, newval=%d\n", oldval, new val);
617 runtime·throw("casgstatus: bad incoming values"); 514 runtime·throw("casgstatus: bad incoming values");
618 } 515 }
619 516
517 // loop if gp->atomicstatus is in a scan state giving
518 // GC time to finish and change the state to oldval.
620 while(!runtime·cas(&gp->atomicstatus, oldval, newval)) { 519 while(!runtime·cas(&gp->atomicstatus, oldval, newval)) {
621 » » // loop if gp->atomicstatus is in a scan state giving 520 » » // Help GC if needed.·
622 » » // GC time to finish and change the state to oldval. 521 » » if(gp->preemptscan && !gp->gcworkdone && (oldval == Grunning || oldval == Gsyscall)) {
623 » } 522 » » » gp->preemptscan = false;
523 » » » runtime·gcphasework(gp);
524 » » }
525 » }»······
526 }
527
528 // stopg ensures that gp is stopped at a GC safe point where its stack can be sc anned
529 // or in the context of a moving collector the pointers can be flipped from poin ting·
530 // to old object to pointing to new objects.·
531 // If stopg returns true, the caller knows gp is at a GC safe point and will rem ain there until
532 // the caller calls restartg.
533 // If stopg returns false, the caller is not responsible for calling restartg. T his can happen
534 // if another thread, either the gp itself or another GC thread is taking the re sponsibility·
535 // to do the GC work related to this thread.
536 bool
537 runtime·stopg(G *gp)
538 {
539 » uint32 s;
540
541 » for(;;) {
542 » » if(gp->gcworkdone)
543 » » » return false;
544
545 » » s = runtime·readgstatus(gp);
546 » » switch(s) {
547 » » default:
548 » » » dumpgstatus(gp);
549 » » » runtime·throw("stopg: gp->atomicstatus is not valid");
550
551 » » case Gdead:
552 » » » return false;
553
554 » » case Gcopystack:
555 » » » // Loop until a new stack is in place.
556 » » » break;
557
558 » » case Grunnable:
559 » » case Gsyscall:
560 » » case Gwaiting:
561 » » » // Claim goroutine by setting scan bit.
562 » » » if(!runtime·castogscanstatus(gp, s, s|Gscan))
563 » » » » break;
564 » » » // In scan state, do work.
565 » » » runtime·gcphasework(gp);
566 » » » return true;
567
568 » » case Gscanrunnable:
569 » » case Gscanwaiting:
570 » » case Gscansyscall:
571 » » » // Goroutine already claimed by another GC helper.
572 » » » return false;
573
574 » » case Grunning:
575 » » » // Claim goroutine, so we aren't racing with a status
576 » » » // transition away from Grunning.
577 » » » if(!runtime·castogscanstatus(gp, Grunning, Gscanrunning) )
578 » » » » break;
579
580 » » » // Mark gp for preemption.
581 » » » if(!gp->gcworkdone) {
582 » » » » gp->preemptscan = true;
583 » » » » gp->preempt = true;
584 » » » » gp->stackguard0 = StackPreempt;
585 » » » }
586
587 » » » // Unclaim.
588 » » » runtime·casfromgscanstatus(gp, Gscanrunning, Grunning);
589 » » » return false;
590 » » }
591 » }
592 » // Should not be here....
593 }
594
595 // The GC requests that this routine be moved from a scanmumble state to a mumbl e state.
596 void·
597 runtime·restartg (G *gp)
598 {
599 » uint32 s;
600
601 » s = runtime·readgstatus(gp);
602 » switch(s) {
603 » default:
604 » » dumpgstatus(gp);·
605 » » runtime·throw("restartg: unexpected status");
606
607 » case Gdead:
608 » » break;
609
610 » case Gscanrunnable:
611 » case Gscanwaiting:
612 » case Gscansyscall:
613 » » runtime·casfromgscanstatus(gp, s, s&~Gscan);
614 » » break;
615
616 » case Gscanenqueue:
617 » » // Scan is now completed.
618 » » // Goroutine now needs to be made runnable.
619 » » // We put it on the global run queue; ready blocks on the global scheduler lock.
620 » » runtime·casfromgscanstatus(gp, Gscanenqueue, Gwaiting);
621 » » if(gp != g->m->curg)
622 » » » runtime·throw("processing Gscanenqueue on wrong m");
623 » » dropg();
624 » » runtime·ready(gp);
625 » » break;
626 » }
627 }
628
629 static void
630 stopscanstart(G* gp)
631 {
632 » if(g == gp)
633 » » runtime·throw("GC not moved to G0");
634 » if(runtime·stopg(gp)) {
635 » » if(!isscanstatus(runtime·readgstatus(gp))) {
636 » » » dumpgstatus(gp);
637 » » » runtime·throw("GC not in scan state");
638 » » }
639 » » runtime·restartg(gp);
640 » }
641 }
642
643 // Runs on g0 and does the actual work after putting the g back on the run queue .
644 static void
645 mquiesce(G *gpmaster)
646 {
647 » G* gp;
648 » uint32 i;
649 » uint32 status;
650 » uint32 activeglen;
651
652 » activeglen = runtime·allglen;
653 » // enqueue the calling goroutine.
654 » runtime·restartg(gpmaster);
655 » for(i = 0; i < activeglen; i++) {
656 » » gp = runtime·allg[i];
657 » » if(runtime·readgstatus(gp) == Gdead)·
658 » » » gp->gcworkdone = true; // noop scan.
659 » » else·
660 » » » gp->gcworkdone = false;·
661 » » stopscanstart(gp);·
662 » }
663
664 » // Check that the G's gcwork (such as scanning) has been done. If not do it now.·
665 » // You can end up doing work here if the page trap on a Grunning Gorouti ne has
666 » // not been sprung or in some race situations. For example a runnable go es dead
667 » // and is started up again with a gp->gcworkdone set to false.
668 » for(i = 0; i < activeglen; i++) {
669 » » gp = runtime·allg[i];
670 » » while (!gp->gcworkdone) {
671 » » » status = runtime·readgstatus(gp);
672 » » » if(status == Gdead) {
673 » » » » gp->gcworkdone = true; // scan is a noop
674 » » » » break;
675 » » » » //do nothing, scan not needed.·
676 » » » }
677 » » » if(status == Grunning && gp->stackguard0 == (uintptr)Sta ckPreempt && runtime·notetsleep(&runtime·sched.stopnote, 100*1000)) // nanosecon d arg·
678 » » » » runtime·noteclear(&runtime·sched.stopnote);
679 » » » else·
680 » » » » stopscanstart(gp);
681 » » }
682 » }
683
684 » for(i = 0; i < activeglen; i++) {
685 » » gp = runtime·allg[i];
686 » » status = runtime·readgstatus(gp);
687 » » if(isscanstatus(status)) {
688 » » » runtime·printf("mstopandscang:bottom: post scan bad stat us gp=%p has status %x\n", gp, status);
689 » » » dumpgstatus(gp);
690 » » }
691 » » if(!gp->gcworkdone && status != Gdead) {
692 » » » runtime·printf("mstopandscang:bottom: post scan gp=%p->g cworkdone still false\n", gp);
693 » » » dumpgstatus(gp);
694 » » }
695 » }
696
697 » schedule(); // Never returns.
698 }
699
700 // quiesce moves all the goroutines to a GC safepoint which for now is a at pree mption point.
701 // If the global runtime·gcphase is GCmark quiesce will ensure that all of the g oroutine's stacks
702 // have been scanned before it returns.
703 void
704 runtime·quiesce(G* mastergp)
705 {
706 » void (*fn)(G*);
707
708 » runtime·castogscanstatus(mastergp, Grunning, Gscanenqueue);
709 » // Now move this to the g0 (aka m) stack.
710 » // g0 will potentially scan this thread and put mastergp on the runqueue ·
711 » fn = mquiesce;
712 » runtime·mcall(&fn);
624 } 713 }
625 714
626 // This is used by the GC as well as the routines that do stack dumps. In the ca se 715 // This is used by the GC as well as the routines that do stack dumps. In the ca se
627 // of GC all the routines can be reliably stopped. This is not always the case 716 // of GC all the routines can be reliably stopped. This is not always the case
628 // when the system is in panic or being exited. 717 // when the system is in panic or being exited.
629 void 718 void
630 runtime·stoptheworld(void) 719 runtime·stoptheworld(void)
631 { 720 {
632 int32 i; 721 int32 i;
633 uint32 s; 722 uint32 s;
(...skipping 905 matching lines...) Expand 10 before | Expand all | Expand 10 after
1539 g->m->curg->m = nil; 1628 g->m->curg->m = nil;
1540 g->m->curg = nil; 1629 g->m->curg = nil;
1541 } 1630 }
1542 } 1631 }
1543 1632
1544 // Puts the current goroutine into a waiting state and calls unlockf. 1633 // Puts the current goroutine into a waiting state and calls unlockf.
1545 // If unlockf returns false, the goroutine is resumed. 1634 // If unlockf returns false, the goroutine is resumed.
1546 void 1635 void
1547 runtime·park(bool(*unlockf)(G*, void*), void *lock, String reason) 1636 runtime·park(bool(*unlockf)(G*, void*), void *lock, String reason)
1548 { 1637 {
1638 void (*fn)(G*);
1639
1549 g->m->waitlock = lock; 1640 g->m->waitlock = lock;
1550 g->m->waitunlockf = unlockf; 1641 g->m->waitunlockf = unlockf;
1551 g->waitreason = reason; 1642 g->waitreason = reason;
1552 » runtime·mcall(runtime·park_m); 1643 » fn = runtime·park_m;
1644 » runtime·mcall(&fn);
1553 } 1645 }
1554 1646
1555 bool 1647 bool
1556 runtime·parkunlock_c(G *gp, void *lock) 1648 runtime·parkunlock_c(G *gp, void *lock)
1557 { 1649 {
1558 USED(gp); 1650 USED(gp);
1559 runtime·unlock(lock); 1651 runtime·unlock(lock);
1560 return true; 1652 return true;
1561 } 1653 }
1562 1654
(...skipping 24 matching lines...) Expand all
1587 } 1679 }
1588 } 1680 }
1589 1681
1590 schedule(); 1682 schedule();
1591 } 1683 }
1592 1684
1593 // Scheduler yield. 1685 // Scheduler yield.
1594 void 1686 void
1595 runtime·gosched(void) 1687 runtime·gosched(void)
1596 { 1688 {
1597 » runtime·mcall(runtime·gosched_m); 1689 » void (*fn)(G*);
1690 »·······
1691 » fn = runtime·gosched_m;
1692 » runtime·mcall(&fn);
1598 } 1693 }
1599 1694
1600 // runtime·gosched continuation on g0. 1695 // runtime·gosched continuation on g0.
1601 void 1696 void
1602 runtime·gosched_m(G *gp) 1697 runtime·gosched_m(G *gp)
1603 { 1698 {
1604 uint32 status; 1699 uint32 status;
1605 1700
1606 status = runtime·readgstatus(gp); 1701 status = runtime·readgstatus(gp);
1607 » if ((status&~Gscan) != Grunning){ 1702 » if((status&~Gscan) != Grunning){
1608 dumpgstatus(gp); 1703 dumpgstatus(gp);
1609 runtime·throw("bad g status"); 1704 runtime·throw("bad g status");
1610 } 1705 }
1611 runtime·casgstatus(gp, Grunning, Grunnable); 1706 runtime·casgstatus(gp, Grunning, Grunnable);
1612 dropg(); 1707 dropg();
1613 runtime·lock(&runtime·sched.lock); 1708 runtime·lock(&runtime·sched.lock);
1614 globrunqput(gp); 1709 globrunqput(gp);
1615 runtime·unlock(&runtime·sched.lock); 1710 runtime·unlock(&runtime·sched.lock);
1616 1711
1617 schedule(); 1712 schedule();
1618 } 1713 }
1619 1714
1620 // Finishes execution of the current goroutine. 1715 // Finishes execution of the current goroutine.
1621 // Need to mark it as nosplit, because it runs with sp > stackbase (as runtime·l essstack). 1716 // Need to mark it as nosplit, because it runs with sp > stackbase (as runtime·l essstack).
1622 // Since it does not return it does not matter. But if it is preempted 1717 // Since it does not return it does not matter. But if it is preempted
1623 // at the split stack check, GC will complain about inconsistent sp. 1718 // at the split stack check, GC will complain about inconsistent sp.
1624 #pragma textflag NOSPLIT 1719 #pragma textflag NOSPLIT
1625 void 1720 void
1626 runtime·goexit(void) 1721 runtime·goexit(void)
1627 { 1722 {
1723 void (*fn)(G*);
1724
1628 if(raceenabled) 1725 if(raceenabled)
1629 runtime·racegoend(); 1726 runtime·racegoend();
1630 » runtime·mcall(goexit0); 1727 » fn = goexit0;
1728 » runtime·mcall(&fn);
1631 } 1729 }
1632 1730
1633 // runtime·goexit continuation on g0. 1731 // runtime·goexit continuation on g0.
1634 static void 1732 static void
1635 goexit0(G *gp) 1733 goexit0(G *gp)
1636 { 1734 {
1637 runtime·casgstatus(gp, Grunning, Gdead); 1735 runtime·casgstatus(gp, Grunning, Gdead);
1638 gp->m = nil; 1736 gp->m = nil;
1639 gp->lockedm = nil; 1737 gp->lockedm = nil;
1640 g->m->lockedg = nil; 1738 g->m->lockedg = nil;
(...skipping 148 matching lines...) Expand 10 before | Expand all | Expand 10 after
1789 } 1887 }
1790 1888
1791 // The goroutine g exited its system call. 1889 // The goroutine g exited its system call.
1792 // Arrange for it to run on a cpu again. 1890 // Arrange for it to run on a cpu again.
1793 // This is called only from the go syscall library, not 1891 // This is called only from the go syscall library, not
1794 // from the low-level system calls used by the runtime. 1892 // from the low-level system calls used by the runtime.
1795 #pragma textflag NOSPLIT 1893 #pragma textflag NOSPLIT
1796 void 1894 void
1797 runtime·exitsyscall(void) 1895 runtime·exitsyscall(void)
1798 { 1896 {
1897 void (*fn)(G*);
1898
1799 g->m->locks++; // see comment in entersyscall 1899 g->m->locks++; // see comment in entersyscall
1800 1900
1801 g->waitsince = 0; 1901 g->waitsince = 0;
1802 if(exitsyscallfast()) { 1902 if(exitsyscallfast()) {
1803 // There's a cpu for us, so we can run. 1903 // There's a cpu for us, so we can run.
1804 g->m->p->syscalltick++; 1904 g->m->p->syscalltick++;
1805 // We need to cas the status and scan before resuming... 1905 // We need to cas the status and scan before resuming...
1806 runtime·casgstatus(g, Gsyscall, Grunning); 1906 runtime·casgstatus(g, Gsyscall, Grunning);
1807 1907
1808 // Garbage collector isn't running (since we are), 1908 // Garbage collector isn't running (since we are),
1809 // so okay to clear gcstack and gcsp. 1909 // so okay to clear gcstack and gcsp.
1810 g->syscallstack = (uintptr)nil; 1910 g->syscallstack = (uintptr)nil;
1811 g->syscallsp = (uintptr)nil; 1911 g->syscallsp = (uintptr)nil;
1812 g->m->locks--; 1912 g->m->locks--;
1813 if(g->preempt) { 1913 if(g->preempt) {
1814 // restore the preemption request in case we've cleared it in newstack 1914 // restore the preemption request in case we've cleared it in newstack
1815 g->stackguard0 = StackPreempt; 1915 g->stackguard0 = StackPreempt;
1816 } else { 1916 } else {
1817 // otherwise restore the real stackguard, we've spoiled it in entersyscall/entersyscallblock 1917 // otherwise restore the real stackguard, we've spoiled it in entersyscall/entersyscallblock
1818 g->stackguard0 = g->stackguard; 1918 g->stackguard0 = g->stackguard;
1819 } 1919 }
1820 return; 1920 return;
1821 } 1921 }
1822 1922
1823 g->m->locks--; 1923 g->m->locks--;
1824 1924
1825 // Call the scheduler. 1925 // Call the scheduler.
1826 » runtime·mcall(exitsyscall0); 1926 » fn = exitsyscall0;
1927 » runtime·mcall(&fn);
1827 1928
1828 // Scheduler returned, so we're allowed to run now. 1929 // Scheduler returned, so we're allowed to run now.
1829 // Delete the gcstack information that we left for 1930 // Delete the gcstack information that we left for
1830 // the garbage collector during the system call. 1931 // the garbage collector during the system call.
1831 // Must wait until now because until gosched returns 1932 // Must wait until now because until gosched returns
1832 // we don't know for sure that the garbage collector 1933 // we don't know for sure that the garbage collector
1833 // is not running. 1934 // is not running.
1834 g->syscallstack = (uintptr)nil; 1935 g->syscallstack = (uintptr)nil;
1835 g->syscallsp = (uintptr)nil; 1936 g->syscallsp = (uintptr)nil;
1836 g->m->p->syscalltick++; 1937 g->m->p->syscalltick++;
(...skipping 121 matching lines...) Expand 10 before | Expand all | Expand 10 after
1958 gp->param = runtime·stackalloc(newg, size); 2059 gp->param = runtime·stackalloc(newg, size);
1959 runtime·gogo(&gp->sched); 2060 runtime·gogo(&gp->sched);
1960 } 2061 }
1961 2062
1962 // Allocate a new g, with a stack big enough for stacksize bytes. 2063 // Allocate a new g, with a stack big enough for stacksize bytes.
1963 G* 2064 G*
1964 runtime·malg(int32 stacksize) 2065 runtime·malg(int32 stacksize)
1965 { 2066 {
1966 G *newg; 2067 G *newg;
1967 byte *stk; 2068 byte *stk;
2069 void (*fn)(G*);
1968 2070
1969 if(StackTop < sizeof(Stktop)) { 2071 if(StackTop < sizeof(Stktop)) {
1970 runtime·printf("runtime: SizeofStktop=%d, should be >=%d\n", (in t32)StackTop, (int32)sizeof(Stktop)); 2072 runtime·printf("runtime: SizeofStktop=%d, should be >=%d\n", (in t32)StackTop, (int32)sizeof(Stktop));
1971 runtime·throw("runtime: bad stack.h"); 2073 runtime·throw("runtime: bad stack.h");
1972 } 2074 }
1973 2075
1974 newg = allocg(); 2076 newg = allocg();
1975 if(stacksize >= 0) { 2077 if(stacksize >= 0) {
1976 stacksize = runtime·round2(StackSystem + stacksize); 2078 stacksize = runtime·round2(StackSystem + stacksize);
1977 if(g == g->m->g0) { 2079 if(g == g->m->g0) {
1978 // running on scheduler stack already. 2080 // running on scheduler stack already.
1979 stk = runtime·stackalloc(newg, stacksize); 2081 stk = runtime·stackalloc(newg, stacksize);
1980 } else { 2082 } else {
1981 // have to call stackalloc on scheduler stack. 2083 // have to call stackalloc on scheduler stack.
1982 newg->stacksize = stacksize; 2084 newg->stacksize = stacksize;
1983 g->param = newg; 2085 g->param = newg;
1984 » » » runtime·mcall(mstackalloc); 2086 » » » fn = mstackalloc;
2087 » » » runtime·mcall(&fn);
1985 stk = g->param; 2088 stk = g->param;
1986 g->param = nil; 2089 g->param = nil;
1987 } 2090 }
1988 newg->stack0 = (uintptr)stk; 2091 newg->stack0 = (uintptr)stk;
1989 newg->stackguard = (uintptr)stk + StackGuard; 2092 newg->stackguard = (uintptr)stk + StackGuard;
1990 newg->stackguard0 = newg->stackguard; 2093 newg->stackguard0 = newg->stackguard;
1991 newg->stackbase = (uintptr)stk + stacksize - sizeof(Stktop); 2094 newg->stackbase = (uintptr)stk + stacksize - sizeof(Stktop);
1992 } 2095 }
1993 return newg; 2096 return newg;
1994 } 2097 }
(...skipping 20 matching lines...) Expand all
2015 // Put it on the queue of g's waiting to run. 2118 // Put it on the queue of g's waiting to run.
2016 // The compiler turns a go statement into a call to this. 2119 // The compiler turns a go statement into a call to this.
2017 // Cannot split the stack because it assumes that the arguments 2120 // Cannot split the stack because it assumes that the arguments
2018 // are available sequentially after &fn; they would not be 2121 // are available sequentially after &fn; they would not be
2019 // copied if a stack split occurred. 2122 // copied if a stack split occurred.
2020 #pragma textflag NOSPLIT 2123 #pragma textflag NOSPLIT
2021 void 2124 void
2022 runtime·newproc(int32 siz, FuncVal* fn, ...) 2125 runtime·newproc(int32 siz, FuncVal* fn, ...)
2023 { 2126 {
2024 byte *argp; 2127 byte *argp;
2128 void (*mfn)(void);
2025 2129
2026 if(thechar == '5') 2130 if(thechar == '5')
2027 argp = (byte*)(&fn+2); // skip caller's saved LR 2131 argp = (byte*)(&fn+2); // skip caller's saved LR
2028 else 2132 else
2029 argp = (byte*)(&fn+1); 2133 argp = (byte*)(&fn+1);
2030 2134
2031 g->m->locks++; 2135 g->m->locks++;
2032 g->m->scalararg[0] = siz; 2136 g->m->scalararg[0] = siz;
2033 g->m->scalararg[1] = (uintptr)runtime·getcallerpc(&siz); 2137 g->m->scalararg[1] = (uintptr)runtime·getcallerpc(&siz);
2034 g->m->ptrarg[0] = argp; 2138 g->m->ptrarg[0] = argp;
2035 g->m->ptrarg[1] = fn; 2139 g->m->ptrarg[1] = fn;
2036 » runtime·onM(newproc_m); 2140 » mfn = newproc_m;
2141 » runtime·onM(&mfn);
2037 g->m->locks--; 2142 g->m->locks--;
2038 } 2143 }
2039 2144
2040 // Create a new g running fn with narg bytes of arguments starting 2145 // Create a new g running fn with narg bytes of arguments starting
2041 // at argp and returning nret bytes of results. callerpc is the 2146 // at argp and returning nret bytes of results. callerpc is the
2042 // address of the go statement that created this. The new g is put 2147 // address of the go statement that created this. The new g is put
2043 // on the queue of g's waiting to run. 2148 // on the queue of g's waiting to run.
2044 G* 2149 G*
2045 runtime·newproc1(FuncVal *fn, byte *argp, int32 narg, int32 nret, void *callerpc ) 2150 runtime·newproc1(FuncVal *fn, byte *argp, int32 narg, int32 nret, void *callerpc )
2046 { 2151 {
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after
2115 g->stackguard0 = StackPreempt; 2220 g->stackguard0 = StackPreempt;
2116 return newg; 2221 return newg;
2117 } 2222 }
2118 2223
2119 static void 2224 static void
2120 allgadd(G *gp) 2225 allgadd(G *gp)
2121 { 2226 {
2122 G **new; 2227 G **new;
2123 uintptr cap; 2228 uintptr cap;
2124 2229
2125 » if (runtime·readgstatus(gp) == Gidle)· 2230 » if(runtime·readgstatus(gp) == Gidle)·
2126 runtime·throw("allgadd: bad status Gidle"); 2231 runtime·throw("allgadd: bad status Gidle");
2127 2232
2128 runtime·lock(&allglock); 2233 runtime·lock(&allglock);
2129 if(runtime·allglen >= allgcap) { 2234 if(runtime·allglen >= allgcap) {
2130 cap = 4096/sizeof(new[0]); 2235 cap = 4096/sizeof(new[0]);
2131 if(cap < 2*allgcap) 2236 if(cap < 2*allgcap)
2132 cap = 2*allgcap; 2237 cap = 2*allgcap;
2133 new = runtime·mallocgc(cap*sizeof(new[0]), nil, 0); 2238 new = runtime·mallocgc(cap*sizeof(new[0]), nil, 0);
2134 if(new == nil) 2239 if(new == nil)
2135 runtime·throw("runtime: cannot allocate memory"); 2240 runtime·throw("runtime: cannot allocate memory");
(...skipping 10 matching lines...) Expand all
2146 } 2251 }
2147 2252
2148 // Put on gfree list. 2253 // Put on gfree list.
2149 // If local list is too long, transfer a batch to the global list. 2254 // If local list is too long, transfer a batch to the global list.
2150 static void 2255 static void
2151 gfput(P *p, G *gp) 2256 gfput(P *p, G *gp)
2152 { 2257 {
2153 uintptr stksize; 2258 uintptr stksize;
2154 Stktop *top; 2259 Stktop *top;
2155 2260
2156 » if (runtime·readgstatus(gp) != Gdead)· 2261 » if(runtime·readgstatus(gp) != Gdead)·
2157 runtime·throw("gfput: bad status (not Gdead)"); 2262 runtime·throw("gfput: bad status (not Gdead)");
2158 2263
2159 if(gp->stackguard - StackGuard != gp->stack0) 2264 if(gp->stackguard - StackGuard != gp->stack0)
2160 runtime·throw("invalid stack in gfput"); 2265 runtime·throw("invalid stack in gfput");
2161 stksize = gp->stackbase + sizeof(Stktop) - gp->stack0; 2266 stksize = gp->stackbase + sizeof(Stktop) - gp->stack0;
2162 if(stksize != gp->stacksize) { 2267 if(stksize != gp->stacksize) {
2163 runtime·printf("runtime: bad stacksize, goroutine %D, remain=%d, last=%d\n", 2268 runtime·printf("runtime: bad stacksize, goroutine %D, remain=%d, last=%d\n",
2164 gp->goid, (int32)gp->stacksize, (int32)stksize); 2269 gp->goid, (int32)gp->stacksize, (int32)stksize);
2165 runtime·throw("gfput: bad stacksize"); 2270 runtime·throw("gfput: bad stacksize");
2166 } 2271 }
(...skipping 23 matching lines...) Expand all
2190 } 2295 }
2191 } 2296 }
2192 2297
2193 // Get from gfree list. 2298 // Get from gfree list.
2194 // If local list is empty, grab a batch from global list. 2299 // If local list is empty, grab a batch from global list.
2195 static G* 2300 static G*
2196 gfget(P *p) 2301 gfget(P *p)
2197 { 2302 {
2198 G *gp; 2303 G *gp;
2199 byte *stk; 2304 byte *stk;
2305 void (*fn)(G*);
2200 2306
2201 retry: 2307 retry:
2202 gp = p->gfree; 2308 gp = p->gfree;
2203 if(gp == nil && runtime·sched.gfree) { 2309 if(gp == nil && runtime·sched.gfree) {
2204 runtime·lock(&runtime·sched.gflock); 2310 runtime·lock(&runtime·sched.gflock);
2205 while(p->gfreecnt < 32 && runtime·sched.gfree != nil) { 2311 while(p->gfreecnt < 32 && runtime·sched.gfree != nil) {
2206 p->gfreecnt++; 2312 p->gfreecnt++;
2207 gp = runtime·sched.gfree; 2313 gp = runtime·sched.gfree;
2208 runtime·sched.gfree = gp->schedlink; 2314 runtime·sched.gfree = gp->schedlink;
2209 runtime·sched.ngfree--; 2315 runtime·sched.ngfree--;
2210 gp->schedlink = p->gfree; 2316 gp->schedlink = p->gfree;
2211 p->gfree = gp; 2317 p->gfree = gp;
2212 } 2318 }
2213 runtime·unlock(&runtime·sched.gflock); 2319 runtime·unlock(&runtime·sched.gflock);
2214 goto retry; 2320 goto retry;
2215 } 2321 }
2216 if(gp) { 2322 if(gp) {
2217 p->gfree = gp->schedlink; 2323 p->gfree = gp->schedlink;
2218 p->gfreecnt--; 2324 p->gfreecnt--;
2219 2325
2220 if(gp->stack0 == 0) { 2326 if(gp->stack0 == 0) {
2221 // Stack was deallocated in gfput. Allocate a new one. 2327 // Stack was deallocated in gfput. Allocate a new one.
2222 if(g == g->m->g0) { 2328 if(g == g->m->g0) {
2223 stk = runtime·stackalloc(gp, FixedStack); 2329 stk = runtime·stackalloc(gp, FixedStack);
2224 } else { 2330 } else {
2225 gp->stacksize = FixedStack; 2331 gp->stacksize = FixedStack;
2226 g->param = gp; 2332 g->param = gp;
2227 » » » » runtime·mcall(mstackalloc); 2333 » » » » fn = mstackalloc;
2334 » » » » runtime·mcall(&fn);
2228 stk = g->param; 2335 stk = g->param;
2229 g->param = nil; 2336 g->param = nil;
2230 } 2337 }
2231 gp->stack0 = (uintptr)stk; 2338 gp->stack0 = (uintptr)stk;
2232 gp->stackbase = (uintptr)stk + FixedStack - sizeof(Stkto p); 2339 gp->stackbase = (uintptr)stk + FixedStack - sizeof(Stkto p);
2233 gp->stackguard = (uintptr)stk + StackGuard; 2340 gp->stackguard = (uintptr)stk + StackGuard;
2234 gp->stackguard0 = gp->stackguard; 2341 gp->stackguard0 = gp->stackguard;
2235 } else { 2342 } else {
2236 if(raceenabled) 2343 if(raceenabled)
2237 runtime·racemalloc((void*)gp->stack0, gp->stackb ase + sizeof(Stktop) - gp->stack0); 2344 runtime·racemalloc((void*)gp->stack0, gp->stackb ase + sizeof(Stktop) - gp->stack0);
(...skipping 603 matching lines...) Expand 10 before | Expand all | Expand 10 after
2841 // and preempt long running G's 2948 // and preempt long running G's
2842 if(retake(now)) 2949 if(retake(now))
2843 idle = 0; 2950 idle = 0;
2844 else 2951 else
2845 idle++; 2952 idle++;
2846 2953
2847 // check if we need to force a GC 2954 // check if we need to force a GC
2848 lastgc = runtime·atomicload64(&mstats.last_gc); 2955 lastgc = runtime·atomicload64(&mstats.last_gc);
2849 if(lastgc != 0 && unixnow - lastgc > forcegcperiod && runtime·at omicload(&runtime·forcegc.idle)) { 2956 if(lastgc != 0 && unixnow - lastgc > forcegcperiod && runtime·at omicload(&runtime·forcegc.idle)) {
2850 runtime·lock(&runtime·forcegc.lock); 2957 runtime·lock(&runtime·forcegc.lock);
2851 » » » runtime·forcegc.idle = 0; 2958 » » » if(runtime·forcegc.g != nil) {
2852 » » » runtime·forcegc.g->schedlink = nil; 2959 » » » » // Goroutine may be started but has not initiali zed g yet.
2853 » » » injectglist(runtime·forcegc.g); 2960 » » » » runtime·forcegc.idle = 0;
2961 » » » » runtime·forcegc.g->schedlink = nil;
2962 » » » » injectglist(runtime·forcegc.g);
2963 » » » }
2854 runtime·unlock(&runtime·forcegc.lock); 2964 runtime·unlock(&runtime·forcegc.lock);
2855 } 2965 }
2856 2966
2857 // scavenge heap once in a while 2967 // scavenge heap once in a while
2858 if(lastscavenge + scavengelimit/2 < now) { 2968 if(lastscavenge + scavengelimit/2 < now) {
2859 runtime·MHeap_Scavenge(nscavenge, now, scavengelimit); 2969 runtime·MHeap_Scavenge(nscavenge, now, scavengelimit);
2860 lastscavenge = now; 2970 lastscavenge = now;
2861 nscavenge++; 2971 nscavenge++;
2862 } 2972 }
2863 2973
(...skipping 501 matching lines...) Expand 10 before | Expand all | Expand 10 after
3365 } 3475 }
3366 } 3476 }
3367 if(s != i/2 && s != i/2+1) { 3477 if(s != i/2 && s != i/2+1) {
3368 runtime·printf("bad steal %d, want %d or %d, iter %d\n", 3478 runtime·printf("bad steal %d, want %d or %d, iter %d\n",
3369 s, i/2, i/2+1, i); 3479 s, i/2, i/2+1, i);
3370 runtime·throw("bad steal"); 3480 runtime·throw("bad steal");
3371 } 3481 }
3372 } 3482 }
3373 } 3483 }
3374 3484
3375 extern void runtime·morestack(void);
3376 uintptr runtime·externalthreadhandlerp;
3377
3378 // Does f mark the top of a goroutine stack?
3379 bool
3380 runtime·topofstack(Func *f)
3381 {
3382 return f->entry == (uintptr)runtime·goexit ||
3383 f->entry == (uintptr)runtime·mstart ||
3384 f->entry == (uintptr)runtime·mcall ||
3385 f->entry == (uintptr)runtime·onM ||
3386 f->entry == (uintptr)runtime·morestack ||
3387 f->entry == (uintptr)runtime·lessstack ||
3388 f->entry == (uintptr)_rt0_go ||
3389 (runtime·externalthreadhandlerp != 0 && f->entry == runtime·exte rnalthreadhandlerp);
3390 }
3391
3392 void 3485 void
3393 runtime·setmaxthreads_m(void) 3486 runtime·setmaxthreads_m(void)
3394 { 3487 {
3395 int32 in; 3488 int32 in;
3396 int32 out; 3489 int32 out;
3397 3490
3398 in = g->m->scalararg[0]; 3491 in = g->m->scalararg[0];
3399 3492
3400 runtime·lock(&runtime·sched.lock); 3493 runtime·lock(&runtime·sched.lock);
3401 out = runtime·sched.maxmcount; 3494 out = runtime·sched.maxmcount;
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
3439 p = mp->p->id; 3532 p = mp->p->id;
3440 FLUSH(&p); 3533 FLUSH(&p);
3441 } 3534 }
3442 3535
3443 #pragma textflag NOSPLIT 3536 #pragma textflag NOSPLIT
3444 void 3537 void
3445 sync·runtime_procUnpin() 3538 sync·runtime_procUnpin()
3446 { 3539 {
3447 g->m->locks--; 3540 g->m->locks--;
3448 } 3541 }
LEFTRIGHT

Powered by Google App Engine
RSS Feeds Recent Issues | This issue
This is Rietveld f62528b