Rietveld Code Review Tool
Help | Bug tracker | Discussion group | Source code | Sign in
(58)

Delta Between Two Patch Sets: src/pkg/runtime/proc.c

Issue 5279048: code review 5279048: runtime: faster and more scalable GC (Closed)
Left Patch Set: diff -r fd80a4497037 https://go.googlecode.com/hg/ Created 13 years, 3 months ago
Right Patch Set: diff -r f44057cc01b2 https://go.googlecode.com/hg/ Created 12 years, 11 months ago
Left:
Right:
Use n/p to move between diff chunks; N/P to move between comments. Please Sign in to add in-line comments.
Jump to:
Left: Side by side diff | Download
Right: Side by side diff | Download
« no previous file with change/comment | « src/pkg/runtime/mprof.goc ('k') | src/pkg/runtime/runtime.h » ('j') | no next file with change/comment »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
LEFTRIGHT
1 // Copyright 2009 The Go Authors. All rights reserved. 1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style 2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file. 3 // license that can be found in the LICENSE file.
4 4
5 #include "runtime.h" 5 #include "runtime.h"
6 #include "arch.h" 6 #include "arch_GOARCH.h"
7 #include "defs.h" 7 #include "defs_GOOS_GOARCH.h"
8 #include "malloc.h" 8 #include "malloc.h"
9 #include "os.h" 9 #include "os_GOOS.h"
10 #include "stack.h" 10 #include "stack.h"
11 11
12 bool runtime·iscgo; 12 bool runtime·iscgo;
13 13
14 static void unwindstack(G*, byte*); 14 static void unwindstack(G*, byte*);
15 static void schedule(G*); 15 static void schedule(G*);
16 16
17 typedef struct Sched Sched; 17 typedef struct Sched Sched;
18 18
19 M runtime·m0; 19 M runtime·m0;
(...skipping 137 matching lines...) Expand 10 before | Expand all | Expand 10 after
157 for(;;) { 157 for(;;) {
158 v = runtime·sched.atomic; 158 v = runtime·sched.atomic;
159 w = v; 159 w = v;
160 w &= ~(mcpuMask<<mcpumaxShift); 160 w &= ~(mcpuMask<<mcpumaxShift);
161 w |= n<<mcpumaxShift; 161 w |= n<<mcpumaxShift;
162 if(runtime·cas(&runtime·sched.atomic, v, w)) 162 if(runtime·cas(&runtime·sched.atomic, v, w))
163 break; 163 break;
164 } 164 }
165 } 165 }
166 166
167 // Keep trace of scavenger's goroutine for deadlock detection.
168 static G *scvg;
169
167 // The bootstrap sequence is: 170 // The bootstrap sequence is:
168 // 171 //
169 // call osinit 172 // call osinit
170 // call schedinit 173 // call schedinit
171 // make & queue new G 174 // make & queue new G
172 // call runtime·mstart 175 // call runtime·mstart
173 // 176 //
174 // The new G calls runtime·main. 177 // The new G calls runtime·main.
175 void 178 void
176 runtime·schedinit(void) 179 runtime·schedinit(void)
(...skipping 13 matching lines...) Expand all
190 // so that we don't need to call malloc when we crash. 193 // so that we don't need to call malloc when we crash.
191 // runtime·findfunc(0); 194 // runtime·findfunc(0);
192 195
193 runtime·gomaxprocs = 1; 196 runtime·gomaxprocs = 1;
194 p = runtime·getenv("GOMAXPROCS"); 197 p = runtime·getenv("GOMAXPROCS");
195 if(p != nil && (n = runtime·atoi(p)) != 0) { 198 if(p != nil && (n = runtime·atoi(p)) != 0) {
196 if(n > maxgomaxprocs) 199 if(n > maxgomaxprocs)
197 n = maxgomaxprocs; 200 n = maxgomaxprocs;
198 runtime·gomaxprocs = n; 201 runtime·gomaxprocs = n;
199 } 202 }
200 » setmcpumax(runtime·gomaxprocs); 203 » // wait for the main goroutine to start before taking
204 » // GOMAXPROCS into account.
205 » setmcpumax(1);
201 runtime·singleproc = runtime·gomaxprocs == 1; 206 runtime·singleproc = runtime·gomaxprocs == 1;
202 207
203 canaddmcpu(); // mcpu++ to account for bootstrap m 208 canaddmcpu(); // mcpu++ to account for bootstrap m
204 m->helpgc = 1; // flag to tell schedule() to mcpu-- 209 m->helpgc = 1; // flag to tell schedule() to mcpu--
205 runtime·sched.grunning++; 210 runtime·sched.grunning++;
206 211
207 mstats.enablegc = 1; 212 mstats.enablegc = 1;
208 m->nomemprof--; 213 m->nomemprof--;
209 } 214 }
210 215
211 extern void main·init(void); 216 extern void main·init(void);
212 extern void main·main(void); 217 extern void main·main(void);
213 218
214 // The main goroutine. 219 // The main goroutine.
215 void 220 void
216 runtime·main(void) 221 runtime·main(void)
217 { 222 {
218 // Lock the main goroutine onto this, the main OS thread, 223 // Lock the main goroutine onto this, the main OS thread,
219 // during initialization. Most programs won't care, but a few 224 // during initialization. Most programs won't care, but a few
220 // do require certain calls to be made by the main thread. 225 // do require certain calls to be made by the main thread.
221 // Those can arrange for main.main to run in the main thread 226 // Those can arrange for main.main to run in the main thread
222 // by calling runtime.LockOSThread during initialization 227 // by calling runtime.LockOSThread during initialization
223 // to preserve the lock. 228 // to preserve the lock.
224 runtime·LockOSThread(); 229 runtime·LockOSThread();
230 // From now on, newgoroutines may use non-main threads.
231 setmcpumax(runtime·gomaxprocs);
225 runtime·sched.init = true; 232 runtime·sched.init = true;
233 scvg = runtime·newproc1((byte*)runtime·MHeap_Scavenger, nil, 0, 0, runti me·main);
226 main·init(); 234 main·init();
227 runtime·sched.init = false; 235 runtime·sched.init = false;
228 if(!runtime·sched.lockmain) 236 if(!runtime·sched.lockmain)
229 runtime·UnlockOSThread(); 237 runtime·UnlockOSThread();
230 238
231 main·main(); 239 main·main();
232 runtime·exit(0); 240 runtime·exit(0);
233 for(;;) 241 for(;;)
234 *(int32*)runtime·main = 0; 242 *(int32*)runtime·main = 0;
235 } 243 }
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after
317 runtime·idlegoroutine(void) 325 runtime·idlegoroutine(void)
318 { 326 {
319 if(g->idlem != nil) 327 if(g->idlem != nil)
320 runtime·throw("g is already an idle goroutine"); 328 runtime·throw("g is already an idle goroutine");
321 g->idlem = m; 329 g->idlem = m;
322 } 330 }
323 331
324 static void 332 static void
325 mcommoninit(M *m) 333 mcommoninit(M *m)
326 { 334 {
335 m->id = runtime·sched.mcount++;
336 m->fastrand = 0x49f6428aUL + m->id + runtime·cputicks();
337 m->stackalloc = runtime·malloc(sizeof(*m->stackalloc));
338 runtime·FixAlloc_Init(m->stackalloc, FixedStack, runtime·SysAlloc, nil, nil);
339
340 if(m->mcache == nil)
341 m->mcache = runtime·allocmcache();
342
343 runtime·callers(1, m->createstack, nelem(m->createstack));
344
327 // Add to runtime·allm so garbage collector doesn't free m 345 // Add to runtime·allm so garbage collector doesn't free m
328 // when it is just in a register or thread-local storage. 346 // when it is just in a register or thread-local storage.
329 m->alllink = runtime·allm; 347 m->alllink = runtime·allm;
330 » // runtime·Cgocalls() iterates over allm w/o schedlock, 348 » // runtime·NumCgoCall() iterates over allm w/o schedlock,
331 // so we need to publish it safely. 349 // so we need to publish it safely.
332 runtime·atomicstorep(&runtime·allm, m); 350 runtime·atomicstorep(&runtime·allm, m);
333
334 m->id = runtime·sched.mcount++;
335 m->fastrand = 0x49f6428aUL + m->id;
336 m->stackalloc = runtime·malloc(sizeof(*m->stackalloc));
337 runtime·FixAlloc_Init(m->stackalloc, FixedStack, runtime·SysAlloc, nil, nil);
338
339 if(m->mcache == nil)
340 m->mcache = runtime·allocmcache();
341 } 351 }
342 352
343 // Try to increment mcpu. Report whether succeeded. 353 // Try to increment mcpu. Report whether succeeded.
344 static bool 354 static bool
345 canaddmcpu(void) 355 canaddmcpu(void)
346 { 356 {
347 uint32 v; 357 uint32 v;
348 358
349 for(;;) { 359 for(;;) {
350 v = runtime·sched.atomic; 360 v = runtime·sched.atomic;
(...skipping 222 matching lines...) Expand 10 before | Expand all | Expand 10 after
573 // We hold the sched lock, so no one else is manipulating the 583 // We hold the sched lock, so no one else is manipulating the
574 // g queue or changing mcpumax. Entersyscall can decrement 584 // g queue or changing mcpumax. Entersyscall can decrement
575 // mcpu, but if does so when there is something on the g queue, 585 // mcpu, but if does so when there is something on the g queue,
576 // the gwait bit will be set, so entersyscall will take the slow path 586 // the gwait bit will be set, so entersyscall will take the slow path
577 // and use the sched lock. So it cannot invalidate our decision . 587 // and use the sched lock. So it cannot invalidate our decision .
578 // 588 //
579 // Wait on global m queue. 589 // Wait on global m queue.
580 mput(m); 590 mput(m);
581 } 591 }
582 592
583 » v = runtime·atomicload(&runtime·sched.atomic); 593 » // Look for deadlock situation.
584 » if(runtime·sched.grunning == 0) 594 » if((scvg == nil && runtime·sched.grunning == 0) ||
595 » (scvg != nil && runtime·sched.grunning == 1 && runtime·sched.gwait == 0 &&
596 » (scvg->status == Grunning || scvg->status == Gsyscall))) {
585 runtime·throw("all goroutines are asleep - deadlock!"); 597 runtime·throw("all goroutines are asleep - deadlock!");
598 }
599
586 m->nextg = nil; 600 m->nextg = nil;
587 m->waitnextg = 1; 601 m->waitnextg = 1;
588 runtime·noteclear(&m->havenextg); 602 runtime·noteclear(&m->havenextg);
589 603
590 // Stoptheworld is waiting for all but its cpu to go to stop. 604 // Stoptheworld is waiting for all but its cpu to go to stop.
591 // Entersyscall might have decremented mcpu too, but if so 605 // Entersyscall might have decremented mcpu too, but if so
592 // it will see the waitstop and take the slow path. 606 // it will see the waitstop and take the slow path.
593 // Exitsyscall never increments mcpu beyond mcpumax. 607 // Exitsyscall never increments mcpu beyond mcpumax.
608 v = runtime·atomicload(&runtime·sched.atomic);
594 if(atomic_waitstop(v) && atomic_mcpu(v) <= atomic_mcpumax(v)) { 609 if(atomic_waitstop(v) && atomic_mcpu(v) <= atomic_mcpumax(v)) {
595 // set waitstop = 0 (known to be 1) 610 // set waitstop = 0 (known to be 1)
596 runtime·xadd(&runtime·sched.atomic, -1<<waitstopShift); 611 runtime·xadd(&runtime·sched.atomic, -1<<waitstopShift);
597 runtime·notewakeup(&runtime·sched.stopped); 612 runtime·notewakeup(&runtime·sched.stopped);
598 } 613 }
599 schedunlock(); 614 schedunlock();
600 615
601 runtime·notesleep(&m->havenextg); 616 runtime·notesleep(&m->havenextg);
602 if(m->helpgc) { 617 if(m->helpgc) {
603 runtime·gchelper(); 618 runtime·gchelper();
(...skipping 18 matching lines...) Expand all
622 if(n > runtime·ncpu) 637 if(n > runtime·ncpu)
623 n = runtime·ncpu; 638 n = runtime·ncpu;
624 if(n > MaxGcproc) 639 if(n > MaxGcproc)
625 n = MaxGcproc; 640 n = MaxGcproc;
626 if(n > runtime·sched.mwait+1) // one M is currently running 641 if(n > runtime·sched.mwait+1) // one M is currently running
627 n = runtime·sched.mwait+1; 642 n = runtime·sched.mwait+1;
628 return n; 643 return n;
629 } 644 }
630 645
631 void 646 void
632 runtime·helpgc(void) 647 runtime·helpgc(int32 nproc)
633 { 648 {
634 M *mp; 649 M *mp;
635 int32 n; 650 int32 n;
636 651
637 runtime·lock(&runtime·sched); 652 runtime·lock(&runtime·sched);
638 » for(n = runtime·gcprocs()-1; n; n--) { 653 » for(n = nproc-1; n; n--) {
639 mp = mget(nil); 654 mp = mget(nil);
640 if(mp == nil) 655 if(mp == nil)
641 runtime·throw("runtime·gcprocs inconsistency"); 656 runtime·throw("runtime·gcprocs inconsistency");
642 mp->helpgc = 1; 657 mp->helpgc = 1;
643 mp->waitnextg = 0; 658 mp->waitnextg = 0;
644 runtime·notewakeup(&mp->havenextg); 659 runtime·notewakeup(&mp->havenextg);
645 } 660 }
646 runtime·unlock(&runtime·sched); 661 runtime·unlock(&runtime·sched);
647 } 662 }
648 663
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after
713 // but m is not running a specific goroutine, 728 // but m is not running a specific goroutine,
714 // so set the helpgc flag as a signal to m's 729 // so set the helpgc flag as a signal to m's
715 // first schedule(nil) to mcpu-- and grunning--. 730 // first schedule(nil) to mcpu-- and grunning--.
716 m = runtime·newm(); 731 m = runtime·newm();
717 m->helpgc = 1; 732 m->helpgc = 1;
718 runtime·sched.grunning++; 733 runtime·sched.grunning++;
719 } 734 }
720 schedunlock(); 735 schedunlock();
721 } 736 }
722 737
738 // Tests that GC helper procs are properly started.
739 // That is, on one hand number of GC procs eventually reaches the maximum,
740 // and on the other hand no unbounded proc creation takes place.
741 void
742 runtime·CTestGcprocs(bool isShort)
743 {
744 int32 ncpu, i, procs, mwait0, gcprocs;
745
746 USED(isShort);
747
748 // temprary bump ncpu, restore before return
749 ncpu = runtime·ncpu;
750 runtime·ncpu = MaxGcproc;
751 procs = runtime·gomaxprocsfunc(-1);
752 gcprocs = 0;
753
754 // set GOMAXPROCS=1, so new procs should not be created
755 runtime·gomaxprocsfunc(1);
756 mwait0 = runtime·sched.mwait;
757
758 for(i=0; i<MaxGcproc*2; i++) {
759 // emulate GC cycle
760 runtime·stoptheworld();
761 gcprocs = runtime·gcprocs();
762 runtime·starttheworld();
763
764 if(gcprocs != 1) {
765 runtime·printf("gcprocs=%d\n", gcprocs);
766 runtime·panicstring("gcprocs != 1");
767 }
768
769 // give a new proc a change to start
770 g->status = Gwaiting;
771 g->waitreason = "test sleep";
772 runtime·tsleep(10000);
773 }
774
775 if(runtime·sched.mwait != mwait0) {
776 runtime·printf("mwait=%d, mwait0=%d\n", runtime·sched.mwait, mwa it0);
777 runtime·panicstring("new procs started when GOMAXPROCS=1");
778 }
779
780 // set GOMAXPROCS=MaxGcproc, so new procs can be created
781 runtime·gomaxprocsfunc(MaxGcproc);
782 mwait0 = runtime·sched.mwait;
783
784 for(i=0; i<MaxGcproc*2; i++) {
785 // emulate GC cycle
786 runtime·stoptheworld();
787 gcprocs = runtime·gcprocs();
788 runtime·starttheworld();
789
790 // give a new proc a change to start
791 g->status = Gwaiting;
792 g->waitreason = "test sleep";
793 runtime·tsleep(10000);
794 }
795
796 if(gcprocs != MaxGcproc) {
797 runtime·printf("gcprocs=%d, MaxGcproc=%d\n", gcprocs, MaxGcproc) ;
798 runtime·panicstring("new procs are not started");
799 }
800
801 if(runtime·sched.mwait > mwait0 + MaxGcproc - 1) {
802 runtime·printf("MaxGcproc=%d, mwait=%d, mwait0=%d\n",
803 MaxGcproc, runtime·sched.mwait, mwait0);
804 runtime·panicstring("a way too many procs started");
805 }
806
807 runtime·gomaxprocsfunc(procs);
808 runtime·ncpu = ncpu;
809 }
810
723 // Called to start an M. 811 // Called to start an M.
724 void 812 void
725 runtime·mstart(void) 813 runtime·mstart(void)
726 { 814 {
727 if(g != m->g0) 815 if(g != m->g0)
728 runtime·throw("bad runtime·mstart"); 816 runtime·throw("bad runtime·mstart");
729 817
730 // Record top of stack for use by mcall. 818 // Record top of stack for use by mcall.
731 // Once we call schedule we're never coming back, 819 // Once we call schedule we're never coming back,
732 // so other calls can reuse this stack space. 820 // so other calls can reuse this stack space.
733 runtime·gosave(&m->g0->sched); 821 runtime·gosave(&m->g0->sched);
734 m->g0->sched.pc = (void*)-1; // make sure it is never used 822 m->g0->sched.pc = (void*)-1; // make sure it is never used
735 823 » runtime·asminit();
736 runtime·minit(); 824 runtime·minit();
825
826 // Install signal handlers; after minit so that minit can
827 // prepare the thread to be able to handle the signals.
828 if(m == &runtime·m0)
829 runtime·initsig();
830
737 schedule(nil); 831 schedule(nil);
738 } 832 }
739 833
740 // When running with cgo, we call libcgo_thread_start 834 // When running with cgo, we call libcgo_thread_start
741 // to start threads for us so that we can play nicely with 835 // to start threads for us so that we can play nicely with
742 // foreign code. 836 // foreign code.
743 void (*libcgo_thread_start)(void*); 837 void (*libcgo_thread_start)(void*);
744 838
745 typedef struct CgoThreadStart CgoThreadStart; 839 typedef struct CgoThreadStart CgoThreadStart;
746 struct CgoThreadStart 840 struct CgoThreadStart
(...skipping 165 matching lines...) Expand 10 before | Expand all | Expand 10 after
912 // entersyscall is going to return immediately after. 1006 // entersyscall is going to return immediately after.
913 // It's okay to call matchmg and notewakeup even after 1007 // It's okay to call matchmg and notewakeup even after
914 // decrementing mcpu, because we haven't released the 1008 // decrementing mcpu, because we haven't released the
915 // sched lock yet, so the garbage collector cannot be running. 1009 // sched lock yet, so the garbage collector cannot be running.
916 #pragma textflag 7 1010 #pragma textflag 7
917 void 1011 void
918 runtime·entersyscall(void) 1012 runtime·entersyscall(void)
919 { 1013 {
920 uint32 v; 1014 uint32 v;
921 1015
1016 if(m->profilehz > 0)
1017 runtime·setprof(false);
1018
922 // Leave SP around for gc and traceback. 1019 // Leave SP around for gc and traceback.
923 runtime·gosave(&g->sched); 1020 runtime·gosave(&g->sched);
924 g->gcsp = g->sched.sp; 1021 g->gcsp = g->sched.sp;
925 g->gcstack = g->stackbase; 1022 g->gcstack = g->stackbase;
926 g->gcguard = g->stackguard; 1023 g->gcguard = g->stackguard;
927 g->status = Gsyscall; 1024 g->status = Gsyscall;
928 if(g->gcsp < g->gcguard-StackGuard || g->gcstack < g->gcsp) { 1025 if(g->gcsp < g->gcguard-StackGuard || g->gcstack < g->gcsp) {
929 // runtime·printf("entersyscall inconsistent %p [%p,%p]\n", 1026 // runtime·printf("entersyscall inconsistent %p [%p,%p]\n",
930 // g->gcsp, g->gcguard-StackGuard, g->gcstack); 1027 // g->gcsp, g->gcguard-StackGuard, g->gcstack);
931 runtime·throw("entersyscall"); 1028 runtime·throw("entersyscall");
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
975 // find that we still have mcpu <= mcpumax, then we can 1072 // find that we still have mcpu <= mcpumax, then we can
976 // start executing Go code immediately, without having to 1073 // start executing Go code immediately, without having to
977 // schedlock/schedunlock. 1074 // schedlock/schedunlock.
978 v = runtime·xadd(&runtime·sched.atomic, (1<<mcpuShift)); 1075 v = runtime·xadd(&runtime·sched.atomic, (1<<mcpuShift));
979 if(m->profilehz == runtime·sched.profilehz && atomic_mcpu(v) <= atomic_m cpumax(v)) { 1076 if(m->profilehz == runtime·sched.profilehz && atomic_mcpu(v) <= atomic_m cpumax(v)) {
980 // There's a cpu for us, so we can run. 1077 // There's a cpu for us, so we can run.
981 g->status = Grunning; 1078 g->status = Grunning;
982 // Garbage collector isn't running (since we are), 1079 // Garbage collector isn't running (since we are),
983 // so okay to clear gcstack. 1080 // so okay to clear gcstack.
984 g->gcstack = nil; 1081 g->gcstack = nil;
1082
1083 if(m->profilehz > 0)
1084 runtime·setprof(true);
985 return; 1085 return;
986 } 1086 }
987 1087
988 // Tell scheduler to put g back on the run queue: 1088 // Tell scheduler to put g back on the run queue:
989 // mostly equivalent to g->status = Grunning, 1089 // mostly equivalent to g->status = Grunning,
990 // but keeps the garbage collector from thinking 1090 // but keeps the garbage collector from thinking
991 // that g is running right now, which it's not. 1091 // that g is running right now, which it's not.
992 g->readyonstop = 1; 1092 g->readyonstop = 1;
993 1093
994 // All the cpus are taken. 1094 // All the cpus are taken.
(...skipping 12 matching lines...) Expand all
1007 } 1107 }
1008 1108
1009 // Called from runtime·lessstack when returning from a function which 1109 // Called from runtime·lessstack when returning from a function which
1010 // allocated a new stack segment. The function's return value is in 1110 // allocated a new stack segment. The function's return value is in
1011 // m->cret. 1111 // m->cret.
1012 void 1112 void
1013 runtime·oldstack(void) 1113 runtime·oldstack(void)
1014 { 1114 {
1015 Stktop *top, old; 1115 Stktop *top, old;
1016 uint32 argsize; 1116 uint32 argsize;
1117 uintptr cret;
1017 byte *sp; 1118 byte *sp;
1018 G *g1; 1119 G *g1;
1019 int32 goid; 1120 int32 goid;
1020 1121
1021 //printf("oldstack m->cret=%p\n", m->cret); 1122 //printf("oldstack m->cret=%p\n", m->cret);
1022 1123
1023 g1 = m->curg; 1124 g1 = m->curg;
1024 top = (Stktop*)g1->stackbase; 1125 top = (Stktop*)g1->stackbase;
1025 sp = (byte*)top; 1126 sp = (byte*)top;
1026 old = *top; 1127 old = *top;
1027 argsize = old.argsize; 1128 argsize = old.argsize;
1028 if(argsize > 0) { 1129 if(argsize > 0) {
1029 sp -= argsize; 1130 sp -= argsize;
1030 runtime·memmove(top->argp, sp, argsize); 1131 runtime·memmove(top->argp, sp, argsize);
1031 } 1132 }
1032 goid = old.gobuf.g->goid; // fault if g is bad, before gogo 1133 goid = old.gobuf.g->goid; // fault if g is bad, before gogo
1033 USED(goid); 1134 USED(goid);
1034 1135
1035 if(old.free != 0) 1136 if(old.free != 0)
1036 runtime·stackfree(g1->stackguard - StackGuard, old.free); 1137 runtime·stackfree(g1->stackguard - StackGuard, old.free);
1037 g1->stackbase = old.stackbase; 1138 g1->stackbase = old.stackbase;
1038 g1->stackguard = old.stackguard; 1139 g1->stackguard = old.stackguard;
1039 1140
1040 » runtime·gogo(&old.gobuf, m->cret); 1141 » cret = m->cret;
1142 » m->cret = 0; // drop reference
1143 » runtime·gogo(&old.gobuf, cret);
1041 } 1144 }
1042 1145
1043 // Called from reflect·call or from runtime·morestack when a new 1146 // Called from reflect·call or from runtime·morestack when a new
1044 // stack segment is needed. Allocate a new stack big enough for 1147 // stack segment is needed. Allocate a new stack big enough for
1045 // m->moreframesize bytes, copy m->moreargsize bytes to the new frame, 1148 // m->moreframesize bytes, copy m->moreargsize bytes to the new frame,
1046 // and then act as though runtime·lessstack called the function at 1149 // and then act as though runtime·lessstack called the function at
1047 // m->morepc. 1150 // m->morepc.
1048 void 1151 void
1049 runtime·newstack(void) 1152 runtime·newstack(void)
1050 { 1153 {
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
1096 1199
1097 //runtime·printf("newstack framesize=%d argsize=%d morepc=%p moreargp=%p gobuf=% p, %p top=%p old=%p\n", 1200 //runtime·printf("newstack framesize=%d argsize=%d morepc=%p moreargp=%p gobuf=% p, %p top=%p old=%p\n",
1098 //framesize, argsize, m->morepc, m->moreargp, m->morebuf.pc, m->morebuf.sp, top, g1->stackbase); 1201 //framesize, argsize, m->morepc, m->moreargp, m->morebuf.pc, m->morebuf.sp, top, g1->stackbase);
1099 1202
1100 top->stackbase = g1->stackbase; 1203 top->stackbase = g1->stackbase;
1101 top->stackguard = g1->stackguard; 1204 top->stackguard = g1->stackguard;
1102 top->gobuf = m->morebuf; 1205 top->gobuf = m->morebuf;
1103 top->argp = m->moreargp; 1206 top->argp = m->moreargp;
1104 top->argsize = argsize; 1207 top->argsize = argsize;
1105 top->free = free; 1208 top->free = free;
1209 m->moreargp = nil;
1210 m->morebuf.pc = nil;
1211 m->morebuf.sp = nil;
1106 1212
1107 // copy flag from panic 1213 // copy flag from panic
1108 top->panic = g1->ispanic; 1214 top->panic = g1->ispanic;
1109 g1->ispanic = false; 1215 g1->ispanic = false;
1110 1216
1111 g1->stackbase = (byte*)top; 1217 g1->stackbase = (byte*)top;
1112 g1->stackguard = stk + StackGuard; 1218 g1->stackguard = stk + StackGuard;
1113 1219
1114 sp = (byte*)top; 1220 sp = (byte*)top;
1115 if(argsize > 0) { 1221 if(argsize > 0) {
1116 sp -= argsize; 1222 sp -= argsize;
1117 » » runtime·memmove(sp, m->moreargp, argsize); 1223 » » runtime·memmove(sp, top->argp, argsize);
1118 } 1224 }
1119 if(thechar == '5') { 1225 if(thechar == '5') {
1120 // caller would have saved its LR below args. 1226 // caller would have saved its LR below args.
1121 sp -= sizeof(void*); 1227 sp -= sizeof(void*);
1122 *(void**)sp = nil; 1228 *(void**)sp = nil;
1123 } 1229 }
1124 1230
1125 // Continue as if lessstack had just called m->morepc 1231 // Continue as if lessstack had just called m->morepc
1126 // (the PC that decided to grow the stack). 1232 // (the PC that decided to grow the stack).
1127 label.sp = sp; 1233 label.sp = sp;
(...skipping 14 matching lines...) Expand all
1142 gp->param = runtime·stackalloc((uintptr)gp->param); 1248 gp->param = runtime·stackalloc((uintptr)gp->param);
1143 runtime·gogo(&gp->sched, 0); 1249 runtime·gogo(&gp->sched, 0);
1144 } 1250 }
1145 1251
1146 // Allocate a new g, with a stack big enough for stacksize bytes. 1252 // Allocate a new g, with a stack big enough for stacksize bytes.
1147 G* 1253 G*
1148 runtime·malg(int32 stacksize) 1254 runtime·malg(int32 stacksize)
1149 { 1255 {
1150 G *newg; 1256 G *newg;
1151 byte *stk; 1257 byte *stk;
1258
1259 if(StackTop < sizeof(Stktop)) {
1260 runtime·printf("runtime: SizeofStktop=%d, should be >=%d\n", (in t32)StackTop, (int32)sizeof(Stktop));
1261 runtime·throw("runtime: bad stack.h");
1262 }
1152 1263
1153 newg = runtime·malloc(sizeof(G)); 1264 newg = runtime·malloc(sizeof(G));
1154 if(stacksize >= 0) { 1265 if(stacksize >= 0) {
1155 if(g == m->g0) { 1266 if(g == m->g0) {
1156 // running on scheduler stack already. 1267 // running on scheduler stack already.
1157 stk = runtime·stackalloc(StackSystem + stacksize); 1268 stk = runtime·stackalloc(StackSystem + stacksize);
1158 } else { 1269 } else {
1159 // have to call stackalloc on scheduler stack. 1270 // have to call stackalloc on scheduler stack.
1160 g->param = (void*)(StackSystem + stacksize); 1271 g->param = (void*)(StackSystem + stacksize);
1161 runtime·mcall(mstackalloc); 1272 runtime·mcall(mstackalloc);
(...skipping 486 matching lines...) Expand 10 before | Expand all | Expand 10 after
1648 1759
1649 // for testing of wire, unwire 1760 // for testing of wire, unwire
1650 void 1761 void
1651 runtime·mid(uint32 ret) 1762 runtime·mid(uint32 ret)
1652 { 1763 {
1653 ret = m->id; 1764 ret = m->id;
1654 FLUSH(&ret); 1765 FLUSH(&ret);
1655 } 1766 }
1656 1767
1657 void 1768 void
1658 runtime·Goroutines(int32 ret) 1769 runtime·NumGoroutine(int32 ret)
1659 { 1770 {
1660 ret = runtime·sched.gcount; 1771 ret = runtime·sched.gcount;
1661 FLUSH(&ret); 1772 FLUSH(&ret);
1773 }
1774
1775 int32
1776 runtime·gcount(void)
1777 {
1778 return runtime·sched.gcount;
1662 } 1779 }
1663 1780
1664 int32 1781 int32
1665 runtime·mcount(void) 1782 runtime·mcount(void)
1666 { 1783 {
1667 return runtime·sched.mcount; 1784 return runtime·sched.mcount;
1668 } 1785 }
1669 1786
1670 void 1787 void
1671 runtime·badmcall(void) // called from assembly 1788 runtime·badmcall(void) // called from assembly
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after
1752 arg[0][k.len] = 0; 1869 arg[0][k.len] = 0;
1753 1870
1754 arg[1] = runtime·malloc(v.len + 1); 1871 arg[1] = runtime·malloc(v.len + 1);
1755 runtime·memmove(arg[1], v.str, v.len); 1872 runtime·memmove(arg[1], v.str, v.len);
1756 arg[1][v.len] = 0; 1873 arg[1][v.len] = 0;
1757 1874
1758 runtime·asmcgocall((void*)libcgo_setenv, arg); 1875 runtime·asmcgocall((void*)libcgo_setenv, arg);
1759 runtime·free(arg[0]); 1876 runtime·free(arg[0]);
1760 runtime·free(arg[1]); 1877 runtime·free(arg[1]);
1761 } 1878 }
LEFTRIGHT

Powered by Google App Engine
RSS Feeds Recent Issues | This issue
This is Rietveld f62528b