Rietveld Code Review Tool
Help | Bug tracker | Discussion group | Source code | Sign in
(393)

Delta Between Two Patch Sets: src/pkg/runtime/proc.c

Issue 46970043: code review 46970043: runtime: allocate goroutine ids in batches (Closed)
Left Patch Set: diff -r d5dbdcc7f614 https://dvyukov%40google.com@code.google.com/p/go/ Created 11 years, 2 months ago
Right Patch Set: diff -r 72c0dfd50949 https://dvyukov%40google.com@code.google.com/p/go/ Created 11 years, 1 month ago
Left:
Right:
Use n/p to move between diff chunks; N/P to move between comments. Please Sign in to add in-line comments.
Jump to:
Left: Side by side diff | Download
Right: Side by side diff | Download
« no previous file with change/comment | « no previous file | src/pkg/runtime/runtime.h » ('j') | no next file with change/comment »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
LEFTRIGHT
1 // Copyright 2009 The Go Authors. All rights reserved. 1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style 2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file. 3 // license that can be found in the LICENSE file.
4 4
5 #include "runtime.h" 5 #include "runtime.h"
6 #include "arch_GOARCH.h" 6 #include "arch_GOARCH.h"
7 #include "zaexperiment.h" 7 #include "zaexperiment.h"
8 #include "malloc.h" 8 #include "malloc.h"
9 #include "stack.h" 9 #include "stack.h"
10 #include "race.h" 10 #include "race.h"
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
51 uint32 gcwaiting; // gc is waiting to run 51 uint32 gcwaiting; // gc is waiting to run
52 int32 stopwait; 52 int32 stopwait;
53 Note stopnote; 53 Note stopnote;
54 uint32 sysmonwait; 54 uint32 sysmonwait;
55 Note sysmonnote; 55 Note sysmonnote;
56 uint64 lastpoll; 56 uint64 lastpoll;
57 57
58 int32 profilehz; // cpu profiling rate 58 int32 profilehz; // cpu profiling rate
59 }; 59 };
60 60
61 // The max value of GOMAXPROCS. 61 enum
62 // There are no fundamental restrictions on the value. 62 {
63 enum { MaxGomaxprocs = 1<<8 }; 63 » // The max value of GOMAXPROCS.
64 » // There are no fundamental restrictions on the value.
65 » MaxGomaxprocs = 1<<8,
66
67 » // Number of goroutine ids to grab from runtime·sched.goidgen to local p er-P cache at once.
68 » // 16 seems to provide enough amortization, but other than that it's mos tly arbitrary number.
69 » GoidCacheBatch = 16,
70 };
64 71
65 Sched runtime·sched; 72 Sched runtime·sched;
66 int32 runtime·gomaxprocs; 73 int32 runtime·gomaxprocs;
67 uint32 runtime·needextram; 74 uint32 runtime·needextram;
68 bool runtime·iscgo; 75 bool runtime·iscgo;
69 M runtime·m0; 76 M runtime·m0;
70 G» runtime·g0;» // idle goroutine for m0 77 G» runtime·g0;» // idle goroutine for m0
71 G*» runtime·allg;
72 G* runtime·lastg; 78 G* runtime·lastg;
73 M* runtime·allm; 79 M* runtime·allm;
74 M* runtime·extram; 80 M* runtime·extram;
75 int8* runtime·goos; 81 int8* runtime·goos;
76 int32 runtime·ncpu; 82 int32 runtime·ncpu;
77 static int32 newprocs; 83 static int32 newprocs;
78 84
85 static Lock allglock; // the following vars are protected by this lock or by s toptheworld
86 G** runtime·allg;
87 uintptr runtime·allglen;
88 static uintptr allgcap;
89
79 void runtime·mstart(void); 90 void runtime·mstart(void);
80 static void runqput(P*, G*); 91 static void runqput(P*, G*);
81 static G* runqget(P*); 92 static G* runqget(P*);
82 static void runqgrow(P*); 93 static bool runqputslow(P*, G*, uint32, uint32);
83 static G* runqsteal(P*, P*); 94 static G* runqsteal(P*, P*);
84 static void mput(M*); 95 static void mput(M*);
85 static M* mget(void); 96 static M* mget(void);
86 static void mcommoninit(M*); 97 static void mcommoninit(M*);
87 static void schedule(void); 98 static void schedule(void);
88 static void procresize(int32); 99 static void procresize(int32);
89 static void acquirep(P*); 100 static void acquirep(P*);
90 static P* releasep(void); 101 static P* releasep(void);
91 static void newm(void(*)(void), P*); 102 static void newm(void(*)(void), P*);
92 static void stopm(void); 103 static void stopm(void);
93 static void startm(P*, bool); 104 static void startm(P*, bool);
94 static void handoffp(P*); 105 static void handoffp(P*);
95 static void wakep(void); 106 static void wakep(void);
96 static void stoplockedm(void); 107 static void stoplockedm(void);
97 static void startlockedm(G*); 108 static void startlockedm(G*);
98 static void sysmon(void); 109 static void sysmon(void);
99 static uint32 retake(int64); 110 static uint32 retake(int64);
100 static void incidlelocked(int32); 111 static void incidlelocked(int32);
101 static void checkdead(void); 112 static void checkdead(void);
102 static void exitsyscall0(G*); 113 static void exitsyscall0(G*);
103 static void park0(G*); 114 static void park0(G*);
104 static void goexit0(G*); 115 static void goexit0(G*);
105 static void gfput(P*, G*); 116 static void gfput(P*, G*);
106 static G* gfget(P*); 117 static G* gfget(P*);
107 static void gfpurge(P*); 118 static void gfpurge(P*);
108 static void globrunqput(G*); 119 static void globrunqput(G*);
120 static void globrunqputbatch(G*, G*, int32);
109 static G* globrunqget(P*, int32); 121 static G* globrunqget(P*, int32);
110 static P* pidleget(void); 122 static P* pidleget(void);
111 static void pidleput(P*); 123 static void pidleput(P*);
112 static void injectglist(G*); 124 static void injectglist(G*);
113 static bool preemptall(void); 125 static bool preemptall(void);
114 static bool preemptone(P*); 126 static bool preemptone(P*);
115 static bool exitsyscallfast(void); 127 static bool exitsyscallfast(void);
116 static bool haveexperiment(int8*); 128 static bool haveexperiment(int8*);
129 static void allgadd(G*);
117 130
118 // The bootstrap sequence is: 131 // The bootstrap sequence is:
119 // 132 //
120 // call osinit 133 // call osinit
121 // call schedinit 134 // call schedinit
122 // make & queue new G 135 // make & queue new G
123 // call runtime·mstart 136 // call runtime·mstart
124 // 137 //
125 // The new G calls runtime·main. 138 // The new G calls runtime·main.
126 void 139 void
127 runtime·schedinit(void) 140 runtime·schedinit(void)
128 { 141 {
129 int32 n, procs; 142 int32 n, procs;
130 byte *p; 143 byte *p;
131 Eface i; 144 Eface i;
132 145
133 runtime·sched.maxmcount = 10000; 146 runtime·sched.maxmcount = 10000;
134 runtime·precisestack = haveexperiment("precisestack"); 147 runtime·precisestack = haveexperiment("precisestack");
135 148
136 runtime·mprofinit();
137 runtime·mallocinit(); 149 runtime·mallocinit();
138 mcommoninit(m); 150 mcommoninit(m);
139 ········ 151 ········
140 // Initialize the itable value for newErrorCString, 152 // Initialize the itable value for newErrorCString,
141 // so that the next time it gets called, possibly 153 // so that the next time it gets called, possibly
142 // in a fault during a garbage collection, it will not 154 // in a fault during a garbage collection, it will not
143 // need to allocated memory. 155 // need to allocated memory.
144 runtime·newErrorCString(0, &i); 156 runtime·newErrorCString(0, &i);
145 157
146 runtime·goargs(); 158 runtime·goargs();
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
197 // by calling runtime.LockOSThread during initialization 209 // by calling runtime.LockOSThread during initialization
198 // to preserve the lock. 210 // to preserve the lock.
199 runtime·lockOSThread(); 211 runtime·lockOSThread();
200 ········ 212 ········
201 // Defer unlock so that runtime.Goexit during init does the unlock too. 213 // Defer unlock so that runtime.Goexit during init does the unlock too.
202 d.fn = &initDone; 214 d.fn = &initDone;
203 d.siz = 0; 215 d.siz = 0;
204 d.link = g->defer; 216 d.link = g->defer;
205 d.argp = (void*)-1; 217 d.argp = (void*)-1;
206 d.special = true; 218 d.special = true;
207 d.free = false;
208 g->defer = &d; 219 g->defer = &d;
209 220
210 if(m != &runtime·m0) 221 if(m != &runtime·m0)
211 runtime·throw("runtime·main not on m0"); 222 runtime·throw("runtime·main not on m0");
212 runtime·newproc1(&scavenger, nil, 0, 0, runtime·main); 223 runtime·newproc1(&scavenger, nil, 0, 0, runtime·main);
213 main·init(); 224 main·init();
214 225
215 if(g->defer != &d || d.fn != &initDone) 226 if(g->defer != &d || d.fn != &initDone)
216 runtime·throw("runtime: bad defer entry after init"); 227 runtime·throw("runtime: bad defer entry after init");
217 g->defer = d.link; 228 g->defer = d.link;
(...skipping 12 matching lines...) Expand all
230 241
231 runtime·exit(0); 242 runtime·exit(0);
232 for(;;) 243 for(;;)
233 *(int32*)runtime·main = 0; 244 *(int32*)runtime·main = 0;
234 } 245 }
235 246
236 void 247 void
237 runtime·goroutineheader(G *gp) 248 runtime·goroutineheader(G *gp)
238 { 249 {
239 int8 *status; 250 int8 *status;
251 int64 waitfor;
240 252
241 switch(gp->status) { 253 switch(gp->status) {
242 case Gidle: 254 case Gidle:
243 status = "idle"; 255 status = "idle";
244 break; 256 break;
245 case Grunnable: 257 case Grunnable:
246 status = "runnable"; 258 status = "runnable";
247 break; 259 break;
248 case Grunning: 260 case Grunning:
249 status = "running"; 261 status = "running";
250 break; 262 break;
251 case Gsyscall: 263 case Gsyscall:
252 status = "syscall"; 264 status = "syscall";
253 break; 265 break;
254 case Gwaiting: 266 case Gwaiting:
255 if(gp->waitreason) 267 if(gp->waitreason)
256 status = gp->waitreason; 268 status = gp->waitreason;
257 else 269 else
258 status = "waiting"; 270 status = "waiting";
259 break; 271 break;
260 default: 272 default:
261 status = "???"; 273 status = "???";
262 break; 274 break;
263 } 275 }
264 » runtime·printf("goroutine %D [%s]:\n", gp->goid, status); 276
277 » // approx time the G is blocked, in minutes
278 » waitfor = 0;
279 » if((gp->status == Gwaiting || gp->status == Gsyscall) && gp->waitsince ! = 0)
280 » » waitfor = (runtime·nanotime() - gp->waitsince) / (60LL*1000*1000 *1000);
281
282 » if(waitfor < 1)
283 » » runtime·printf("goroutine %D [%s]:\n", gp->goid, status);
284 » else
285 » » runtime·printf("goroutine %D [%s, %D minutes]:\n", gp->goid, sta tus, waitfor);
265 } 286 }
266 287
267 void 288 void
268 runtime·tracebackothers(G *me) 289 runtime·tracebackothers(G *me)
269 { 290 {
270 G *gp; 291 G *gp;
271 int32 traceback; 292 int32 traceback;
293 uintptr i;
272 294
273 traceback = runtime·gotraceback(nil); 295 traceback = runtime·gotraceback(nil);
274 ········ 296 ········
275 // Show the current goroutine first, if we haven't already. 297 // Show the current goroutine first, if we haven't already.
276 if((gp = m->curg) != nil && gp != me) { 298 if((gp = m->curg) != nil && gp != me) {
277 runtime·printf("\n"); 299 runtime·printf("\n");
278 runtime·goroutineheader(gp); 300 runtime·goroutineheader(gp);
279 runtime·traceback(~(uintptr)0, ~(uintptr)0, 0, gp); 301 runtime·traceback(~(uintptr)0, ~(uintptr)0, 0, gp);
280 } 302 }
281 303
282 » for(gp = runtime·allg; gp != nil; gp = gp->alllink) { 304 » runtime·lock(&allglock);
305 » for(i = 0; i < runtime·allglen; i++) {
306 » » gp = runtime·allg[i];
283 if(gp == me || gp == m->curg || gp->status == Gdead) 307 if(gp == me || gp == m->curg || gp->status == Gdead)
284 continue; 308 continue;
285 if(gp->issystem && traceback < 2) 309 if(gp->issystem && traceback < 2)
286 continue; 310 continue;
287 runtime·printf("\n"); 311 runtime·printf("\n");
288 runtime·goroutineheader(gp); 312 runtime·goroutineheader(gp);
289 if(gp->status == Grunning) { 313 if(gp->status == Grunning) {
290 runtime·printf("\tgoroutine running on other thread; sta ck unavailable\n"); 314 runtime·printf("\tgoroutine running on other thread; sta ck unavailable\n");
291 runtime·printcreatedby(gp); 315 runtime·printcreatedby(gp);
292 } else 316 } else
293 runtime·traceback(~(uintptr)0, ~(uintptr)0, 0, gp); 317 runtime·traceback(~(uintptr)0, ~(uintptr)0, 0, gp);
294 } 318 }
319 runtime·unlock(&allglock);
295 } 320 }
296 321
297 static void 322 static void
298 checkmcount(void) 323 checkmcount(void)
299 { 324 {
300 // sched lock is held 325 // sched lock is held
301 if(runtime·sched.mcount > runtime·sched.maxmcount) { 326 if(runtime·sched.mcount > runtime·sched.maxmcount) {
302 runtime·printf("runtime: program exceeds %d-thread limit\n", run time·sched.maxmcount); 327 runtime·printf("runtime: program exceeds %d-thread limit\n", run time·sched.maxmcount);
303 runtime·throw("thread exhaustion"); 328 runtime·throw("thread exhaustion");
304 } 329 }
(...skipping 308 matching lines...) Expand 10 before | Expand all | Expand 10 after
613 // When running with cgo, we call _cgo_thread_start 638 // When running with cgo, we call _cgo_thread_start
614 // to start threads for us so that we can play nicely with 639 // to start threads for us so that we can play nicely with
615 // foreign code. 640 // foreign code.
616 void (*_cgo_thread_start)(void*); 641 void (*_cgo_thread_start)(void*);
617 642
618 typedef struct CgoThreadStart CgoThreadStart; 643 typedef struct CgoThreadStart CgoThreadStart;
619 struct CgoThreadStart 644 struct CgoThreadStart
620 { 645 {
621 M *m; 646 M *m;
622 G *g; 647 G *g;
648 uintptr *tls;
623 void (*fn)(void); 649 void (*fn)(void);
624 }; 650 };
625 651
626 // Allocate a new m unassociated with any thread. 652 // Allocate a new m unassociated with any thread.
627 // Can use p for allocation context if needed. 653 // Can use p for allocation context if needed.
628 M* 654 M*
629 runtime·allocm(P *p) 655 runtime·allocm(P *p)
630 { 656 {
631 M *mp; 657 M *mp;
632 static Type *mtype; // The Go type M 658 static Type *mtype; // The Go type M
633 659
634 m->locks++; // disable GC because it can be called from sysmon 660 m->locks++; // disable GC because it can be called from sysmon
635 if(m->p == nil) 661 if(m->p == nil)
636 acquirep(p); // temporarily borrow p for mallocs in this functi on 662 acquirep(p); // temporarily borrow p for mallocs in this functi on
637 if(mtype == nil) { 663 if(mtype == nil) {
638 Eface e; 664 Eface e;
639 runtime·gc_m_ptr(&e); 665 runtime·gc_m_ptr(&e);
640 mtype = ((PtrType*)e.type)->elem; 666 mtype = ((PtrType*)e.type)->elem;
641 } 667 }
642 668
643 mp = runtime·cnew(mtype); 669 mp = runtime·cnew(mtype);
644 mcommoninit(mp); 670 mcommoninit(mp);
645 671
646 » // In case of cgo, pthread_create will make us a stack. 672 » // In case of cgo or Solaris, pthread_create will make us a stack.
647 // Windows will layout sched stack on OS stack. 673 // Windows will layout sched stack on OS stack.
648 » if(runtime·iscgo || Windows) 674 » if(runtime·iscgo || Solaris || Windows)
649 mp->g0 = runtime·malg(-1); 675 mp->g0 = runtime·malg(-1);
650 else 676 else
651 mp->g0 = runtime·malg(8192); 677 mp->g0 = runtime·malg(8192);
652 678
653 if(p == m->p) 679 if(p == m->p)
654 releasep(); 680 releasep();
655 m->locks--; 681 m->locks--;
656 if(m->locks == 0 && g->preempt) // restore the preemption request in ca se we've cleared it in newstack 682 if(m->locks == 0 && g->preempt) // restore the preemption request in ca se we've cleared it in newstack
657 g->stackguard0 = StackPreempt; 683 g->stackguard0 = StackPreempt;
658 684
(...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after
776 gp->syscallguard = gp->stackguard; 802 gp->syscallguard = gp->stackguard;
777 gp->status = Gsyscall; 803 gp->status = Gsyscall;
778 mp->curg = gp; 804 mp->curg = gp;
779 mp->locked = LockInternal; 805 mp->locked = LockInternal;
780 mp->lockedg = gp; 806 mp->lockedg = gp;
781 gp->lockedm = mp; 807 gp->lockedm = mp;
782 gp->goid = runtime·xadd64(&runtime·sched.goidgen, 1); 808 gp->goid = runtime·xadd64(&runtime·sched.goidgen, 1);
783 if(raceenabled) 809 if(raceenabled)
784 gp->racectx = runtime·racegostart(runtime·newextram); 810 gp->racectx = runtime·racegostart(runtime·newextram);
785 // put on allg for garbage collector 811 // put on allg for garbage collector
786 » runtime·lock(&runtime·sched); 812 » allgadd(gp);
787 » if(runtime·lastg == nil)
788 » » runtime·allg = gp;
789 » else
790 » » runtime·lastg->alllink = gp;
791 » runtime·lastg = gp;
792 » runtime·unlock(&runtime·sched);
793 813
794 // Add m to the extra list. 814 // Add m to the extra list.
795 mnext = lockextra(true); 815 mnext = lockextra(true);
796 mp->schedlink = mnext; 816 mp->schedlink = mnext;
797 unlockextra(mp); 817 unlockextra(mp);
798 } 818 }
799 819
800 // dropm is called when a cgo callback has called needm but is now 820 // dropm is called when a cgo callback has called needm but is now
801 // done with the callback and returning back into the non-Go thread. 821 // done with the callback and returning back into the non-Go thread.
802 // It puts the current m back onto the extra list. 822 // It puts the current m back onto the extra list.
(...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after
897 mp->nextp = p; 917 mp->nextp = p;
898 mp->mstartfn = fn; 918 mp->mstartfn = fn;
899 919
900 if(runtime·iscgo) { 920 if(runtime·iscgo) {
901 CgoThreadStart ts; 921 CgoThreadStart ts;
902 922
903 if(_cgo_thread_start == nil) 923 if(_cgo_thread_start == nil)
904 runtime·throw("_cgo_thread_start missing"); 924 runtime·throw("_cgo_thread_start missing");
905 ts.m = mp; 925 ts.m = mp;
906 ts.g = mp->g0; 926 ts.g = mp->g0;
927 ts.tls = mp->tls;
907 ts.fn = runtime·mstart; 928 ts.fn = runtime·mstart;
908 runtime·asmcgocall(_cgo_thread_start, &ts); 929 runtime·asmcgocall(_cgo_thread_start, &ts);
909 return; 930 return;
910 } 931 }
911 runtime·newosproc(mp, (byte*)mp->g0->stackbase); 932 runtime·newosproc(mp, (byte*)mp->g0->stackbase);
912 } 933 }
913 934
914 // Stops execution of the current m until new work is available. 935 // Stops execution of the current m until new work is available.
915 // Returns with acquired P. 936 // Returns with acquired P.
916 static void 937 static void
(...skipping 24 matching lines...) Expand all
941 m->nextp = nil; 962 m->nextp = nil;
942 } 963 }
943 964
944 static void 965 static void
945 mspinning(void) 966 mspinning(void)
946 { 967 {
947 m->spinning = true; 968 m->spinning = true;
948 } 969 }
949 970
950 // Schedules some M to run the p (creates an M if necessary). 971 // Schedules some M to run the p (creates an M if necessary).
951 // If p==nil, tries to get an idle P, if no idle P's returns false. 972 // If p==nil, tries to get an idle P, if no idle P's does nothing.
952 static void 973 static void
953 startm(P *p, bool spinning) 974 startm(P *p, bool spinning)
954 { 975 {
955 M *mp; 976 M *mp;
956 void (*fn)(void); 977 void (*fn)(void);
957 978
958 runtime·lock(&runtime·sched); 979 runtime·lock(&runtime·sched);
959 if(p == nil) { 980 if(p == nil) {
960 p = pidleget(); 981 p = pidleget();
961 if(p == nil) { 982 if(p == nil) {
(...skipping 143 matching lines...) Expand 10 before | Expand all | Expand 10 after
1105 static void 1126 static void
1106 execute(G *gp) 1127 execute(G *gp)
1107 { 1128 {
1108 int32 hz; 1129 int32 hz;
1109 1130
1110 if(gp->status != Grunnable) { 1131 if(gp->status != Grunnable) {
1111 runtime·printf("execute: bad g status %d\n", gp->status); 1132 runtime·printf("execute: bad g status %d\n", gp->status);
1112 runtime·throw("execute: bad g status"); 1133 runtime·throw("execute: bad g status");
1113 } 1134 }
1114 gp->status = Grunning; 1135 gp->status = Grunning;
1136 gp->waitsince = 0;
1115 gp->preempt = false; 1137 gp->preempt = false;
1116 gp->stackguard0 = gp->stackguard; 1138 gp->stackguard0 = gp->stackguard;
1117 m->p->schedtick++; 1139 m->p->schedtick++;
1118 m->curg = gp; 1140 m->curg = gp;
1119 gp->m = m; 1141 gp->m = m;
1120 1142
1121 // Check whether the profiler needs to be turned on or off. 1143 // Check whether the profiler needs to be turned on or off.
1122 hz = runtime·sched.profilehz; 1144 hz = runtime·sched.profilehz;
1123 if(m->profilehz != hz) 1145 if(m->profilehz != hz)
1124 runtime·resetcpuprofiler(hz); 1146 runtime·resetcpuprofiler(hz);
(...skipping 403 matching lines...) Expand 10 before | Expand all | Expand 10 after
1528 // from the low-level system calls used by the runtime. 1550 // from the low-level system calls used by the runtime.
1529 #pragma textflag NOSPLIT 1551 #pragma textflag NOSPLIT
1530 void 1552 void
1531 runtime·exitsyscall(void) 1553 runtime·exitsyscall(void)
1532 { 1554 {
1533 m->locks++; // see comment in entersyscall 1555 m->locks++; // see comment in entersyscall
1534 1556
1535 if(g->isbackground) // do not consider blocked scavenger for deadlock d etection 1557 if(g->isbackground) // do not consider blocked scavenger for deadlock d etection
1536 incidlelocked(-1); 1558 incidlelocked(-1);
1537 1559
1560 g->waitsince = 0;
1538 if(exitsyscallfast()) { 1561 if(exitsyscallfast()) {
1539 // There's a cpu for us, so we can run. 1562 // There's a cpu for us, so we can run.
1540 m->p->syscalltick++; 1563 m->p->syscalltick++;
1541 g->status = Grunning; 1564 g->status = Grunning;
1542 // Garbage collector isn't running (since we are), 1565 // Garbage collector isn't running (since we are),
1543 // so okay to clear gcstack and gcsp. 1566 // so okay to clear gcstack and gcsp.
1544 g->syscallstack = (uintptr)nil; 1567 g->syscallstack = (uintptr)nil;
1545 g->syscallsp = (uintptr)nil; 1568 g->syscallsp = (uintptr)nil;
1546 m->locks--; 1569 m->locks--;
1547 if(g->preempt) { 1570 if(g->preempt) {
(...skipping 202 matching lines...) Expand 10 before | Expand all | Expand 10 after
1750 // Not worth it: this is almost always an error. 1773 // Not worth it: this is almost always an error.
1751 if(siz > StackMin - 1024) 1774 if(siz > StackMin - 1024)
1752 runtime·throw("runtime.newproc: function arguments too large for new goroutine"); 1775 runtime·throw("runtime.newproc: function arguments too large for new goroutine");
1753 1776
1754 p = m->p; 1777 p = m->p;
1755 if((newg = gfget(p)) != nil) { 1778 if((newg = gfget(p)) != nil) {
1756 if(newg->stackguard - StackGuard != newg->stack0) 1779 if(newg->stackguard - StackGuard != newg->stack0)
1757 runtime·throw("invalid stack in newg"); 1780 runtime·throw("invalid stack in newg");
1758 } else { 1781 } else {
1759 newg = runtime·malg(StackMin); 1782 newg = runtime·malg(StackMin);
1760 » » runtime·lock(&runtime·sched); 1783 » » allgadd(newg);
1761 » » if(runtime·lastg == nil)
1762 » » » runtime·allg = newg;
1763 » » else
1764 » » » runtime·lastg->alllink = newg;
1765 » » runtime·lastg = newg;
1766 » » runtime·unlock(&runtime·sched);
1767 } 1784 }
1768 1785
1769 sp = (byte*)newg->stackbase; 1786 sp = (byte*)newg->stackbase;
1770 sp -= siz; 1787 sp -= siz;
1771 runtime·memmove(sp, argp, narg); 1788 runtime·memmove(sp, argp, narg);
1772 if(thechar == '5') { 1789 if(thechar == '5') {
1773 // caller's LR 1790 // caller's LR
1774 sp -= sizeof(void*); 1791 sp -= sizeof(void*);
1775 *(void**)sp = nil; 1792 *(void**)sp = nil;
1776 } 1793 }
1777 1794
1778 runtime·memclr((byte*)&newg->sched, sizeof newg->sched); 1795 runtime·memclr((byte*)&newg->sched, sizeof newg->sched);
1779 newg->sched.sp = (uintptr)sp; 1796 newg->sched.sp = (uintptr)sp;
1780 newg->sched.pc = (uintptr)runtime·goexit; 1797 newg->sched.pc = (uintptr)runtime·goexit;
1781 newg->sched.g = newg; 1798 newg->sched.g = newg;
1782 runtime·gostartcallfn(&newg->sched, fn); 1799 runtime·gostartcallfn(&newg->sched, fn);
1783 newg->gopc = (uintptr)callerpc; 1800 newg->gopc = (uintptr)callerpc;
1784 newg->status = Grunnable; 1801 newg->status = Grunnable;
1785 if(p->goidcache == p->goidcacheend) { 1802 if(p->goidcache == p->goidcacheend) {
1786 » » p->goidcache = runtime·xadd64(&runtime·sched.goidgen, 16); 1803 » » p->goidcache = runtime·xadd64(&runtime·sched.goidgen, GoidCacheB atch);
1787 » » p->goidcacheend = p->goidcache + 16; 1804 » » p->goidcacheend = p->goidcache + GoidCacheBatch;
1788 } 1805 }
1789 newg->goid = p->goidcache++; 1806 newg->goid = p->goidcache++;
1790 newg->panicwrap = 0; 1807 newg->panicwrap = 0;
1791 if(raceenabled) 1808 if(raceenabled)
1792 newg->racectx = runtime·racegostart((void*)callerpc); 1809 newg->racectx = runtime·racegostart((void*)callerpc);
1793 runqput(p, newg); 1810 runqput(p, newg);
1794 1811
1795 if(runtime·atomicload(&runtime·sched.npidle) != 0 && runtime·atomicload( &runtime·sched.nmspinning) == 0 && fn->fn != runtime·main) // TODO: fast atomic 1812 if(runtime·atomicload(&runtime·sched.npidle) != 0 && runtime·atomicload( &runtime·sched.nmspinning) == 0 && fn->fn != runtime·main) // TODO: fast atomic
1796 wakep(); 1813 wakep();
1797 m->locks--; 1814 m->locks--;
1798 if(m->locks == 0 && g->preempt) // restore the preemption request in ca se we've cleared it in newstack 1815 if(m->locks == 0 && g->preempt) // restore the preemption request in ca se we've cleared it in newstack
1799 g->stackguard0 = StackPreempt; 1816 g->stackguard0 = StackPreempt;
1800 return newg; 1817 return newg;
1818 }
1819
1820 static void
1821 allgadd(G *gp)
1822 {
1823 G **new;
1824 uintptr cap;
1825
1826 runtime·lock(&allglock);
1827 if(runtime·allglen >= allgcap) {
1828 cap = 4096/sizeof(new[0]);
1829 if(cap < 2*allgcap)
1830 cap = 2*allgcap;
1831 new = runtime·malloc(cap*sizeof(new[0]));
1832 if(new == nil)
1833 runtime·throw("runtime: cannot allocate memory");
1834 if(runtime·allg != nil) {
1835 runtime·memmove(new, runtime·allg, runtime·allglen*sizeo f(new[0]));
1836 runtime·free(runtime·allg);
1837 }
1838 runtime·allg = new;
1839 allgcap = cap;
1840 }
1841 runtime·allg[runtime·allglen++] = gp;
1842 runtime·unlock(&allglock);
1801 } 1843 }
1802 1844
1803 // Put on gfree list. 1845 // Put on gfree list.
1804 // If local list is too long, transfer a batch to the global list. 1846 // If local list is too long, transfer a batch to the global list.
1805 static void 1847 static void
1806 gfput(P *p, G *gp) 1848 gfput(P *p, G *gp)
1807 { 1849 {
1808 if(gp->stackguard - StackGuard != gp->stack0) 1850 if(gp->stackguard - StackGuard != gp->stack0)
1809 runtime·throw("invalid stack in gfput"); 1851 runtime·throw("invalid stack in gfput");
1810 gp->schedlink = p->gfree; 1852 gp->schedlink = p->gfree;
(...skipping 171 matching lines...) Expand 10 before | Expand all | Expand 10 after
1982 { 2024 {
1983 ret = runtime·gcount(); 2025 ret = runtime·gcount();
1984 FLUSH(&ret); 2026 FLUSH(&ret);
1985 } 2027 }
1986 2028
1987 int32 2029 int32
1988 runtime·gcount(void) 2030 runtime·gcount(void)
1989 { 2031 {
1990 G *gp; 2032 G *gp;
1991 int32 n, s; 2033 int32 n, s;
2034 uintptr i;
1992 2035
1993 n = 0; 2036 n = 0;
1994 » runtime·lock(&runtime·sched); 2037 » runtime·lock(&allglock);
1995 // TODO(dvyukov): runtime.NumGoroutine() is O(N). 2038 // TODO(dvyukov): runtime.NumGoroutine() is O(N).
1996 // We do not want to increment/decrement centralized counter in newproc/ goexit, 2039 // We do not want to increment/decrement centralized counter in newproc/ goexit,
1997 // just to make runtime.NumGoroutine() faster. 2040 // just to make runtime.NumGoroutine() faster.
1998 // Compromise solution is to introduce per-P counters of active goroutin es. 2041 // Compromise solution is to introduce per-P counters of active goroutin es.
1999 » for(gp = runtime·allg; gp; gp = gp->alllink) { 2042 » for(i = 0; i < runtime·allglen; i++) {
2043 » » gp = runtime·allg[i];
2000 s = gp->status; 2044 s = gp->status;
2001 if(s == Grunnable || s == Grunning || s == Gsyscall || s == Gwai ting) 2045 if(s == Grunnable || s == Grunning || s == Gsyscall || s == Gwai ting)
2002 n++; 2046 n++;
2003 } 2047 }
2004 » runtime·unlock(&runtime·sched); 2048 » runtime·unlock(&allglock);
2005 return n; 2049 return n;
2006 } 2050 }
2007 2051
2008 int32 2052 int32
2009 runtime·mcount(void) 2053 runtime·mcount(void)
2010 { 2054 {
2011 return runtime·sched.mcount; 2055 return runtime·sched.mcount;
2012 } 2056 }
2013 2057
2014 void 2058 void
(...skipping 23 matching lines...) Expand all
2038 uintptr pcbuf[100]; 2082 uintptr pcbuf[100];
2039 } prof; 2083 } prof;
2040 2084
2041 static void 2085 static void
2042 System(void) 2086 System(void)
2043 { 2087 {
2044 } 2088 }
2045 2089
2046 // Called if we receive a SIGPROF signal. 2090 // Called if we receive a SIGPROF signal.
2047 void 2091 void
2048 runtime·sigprof(uint8 *pc, uint8 *sp, uint8 *lr, G *gp) 2092 runtime·sigprof(uint8 *pc, uint8 *sp, uint8 *lr, G *gp, M *mp)
2049 { 2093 {
2050 int32 n; 2094 int32 n;
2051 bool traceback; 2095 bool traceback;
2096 MCache *mcache;
2097 // Do not use global m in this function, use mp instead.
2098 // On windows one m is sending reports about all the g's, so m means a w rong thing.
2099 byte m;
2100
2101 m = 0;
2102 USED(m);
2052 2103
2053 if(prof.fn == nil || prof.hz == 0) 2104 if(prof.fn == nil || prof.hz == 0)
2054 return; 2105 return;
2055 » traceback = true; 2106
2056 » // Windows does profiling in a dedicated thread w/o m. 2107 » // Profiling runs concurrently with GC, so it must not allocate.
2057 » if(!Windows && (m == nil || m->mcache == nil)) 2108 » mcache = mp->mcache;
2058 » » traceback = false; 2109 » mp->mcache = nil;
2059 »······· 2110
2060 // Define that a "user g" is a user-created goroutine, and a "system g" 2111 // Define that a "user g" is a user-created goroutine, and a "system g"
2061 // is one that is m->g0 or m->gsignal. We've only made sure that we 2112 // is one that is m->g0 or m->gsignal. We've only made sure that we
2062 // can unwind user g's, so exclude the system g's. 2113 // can unwind user g's, so exclude the system g's.
2063 // 2114 //
2064 // It is not quite as easy as testing gp == m->curg (the current user g) 2115 // It is not quite as easy as testing gp == m->curg (the current user g)
2065 // because we might be interrupted for profiling halfway through a 2116 // because we might be interrupted for profiling halfway through a
2066 // goroutine switch. The switch involves updating three (or four) values : 2117 // goroutine switch. The switch involves updating three (or four) values :
2067 // g, PC, SP, and (on arm) LR. The PC must be the last to be updated, 2118 // g, PC, SP, and (on arm) LR. The PC must be the last to be updated,
2068 // because once it gets updated the new g is running. 2119 // because once it gets updated the new g is running.
2069 // 2120 //
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
2122 // The biggest drawback to this solution is that it requires that we can tell 2173 // The biggest drawback to this solution is that it requires that we can tell
2123 // whether it's safe to read from the memory pointed at by PC. 2174 // whether it's safe to read from the memory pointed at by PC.
2124 // In a correct program, we can test PC == nil and otherwise read, 2175 // In a correct program, we can test PC == nil and otherwise read,
2125 // but if a profiling signal happens at the instant that a program execu tes 2176 // but if a profiling signal happens at the instant that a program execu tes
2126 // a bad jump (before the program manages to handle the resulting fault) 2177 // a bad jump (before the program manages to handle the resulting fault)
2127 // the profiling handler could fault trying to read nonexistent memory. 2178 // the profiling handler could fault trying to read nonexistent memory.
2128 // 2179 //
2129 // To recap, there are no constraints on the assembly being used for the 2180 // To recap, there are no constraints on the assembly being used for the
2130 // transition. We simply require that g and SP match and that the PC is not 2181 // transition. We simply require that g and SP match and that the PC is not
2131 // in runtime.gogo. 2182 // in runtime.gogo.
2132 » // 2183 » traceback = true;
2133 » // On Windows, one m is sending reports about all the g's, so gp == m->c urg 2184 » if(gp == nil || gp != mp->curg ||
2134 » // is not a useful comparison. The profilem function in os_windows.c has
2135 » // already checked that gp is a user g.
2136 » if(gp == nil ||
2137 » (!Windows && gp != m->curg) ||
2138 (uintptr)sp < gp->stackguard - StackGuard || gp->stackbase < (uintptr )sp || 2185 (uintptr)sp < gp->stackguard - StackGuard || gp->stackbase < (uintptr )sp ||
2139 ((uint8*)runtime·gogo <= pc && pc < (uint8*)runtime·gogo + RuntimeGog oBytes)) 2186 ((uint8*)runtime·gogo <= pc && pc < (uint8*)runtime·gogo + RuntimeGog oBytes))
2140 traceback = false; 2187 traceback = false;
2141 2188
2142 // Race detector calls asmcgocall w/o entersyscall/exitsyscall, 2189 // Race detector calls asmcgocall w/o entersyscall/exitsyscall,
2143 // we can not currently unwind through asmcgocall. 2190 // we can not currently unwind through asmcgocall.
2144 » if(m != nil && m->racecall) 2191 » if(mp != nil && mp->racecall)
2145 traceback = false; 2192 traceback = false;
2146 2193
2147 runtime·lock(&prof); 2194 runtime·lock(&prof);
2148 if(prof.fn == nil) { 2195 if(prof.fn == nil) {
2149 runtime·unlock(&prof); 2196 runtime·unlock(&prof);
2197 mp->mcache = mcache;
2150 return; 2198 return;
2151 } 2199 }
2152 n = 0; 2200 n = 0;
2153 if(traceback) 2201 if(traceback)
2154 n = runtime·gentraceback((uintptr)pc, (uintptr)sp, (uintptr)lr, gp, 0, prof.pcbuf, nelem(prof.pcbuf), nil, nil, false); 2202 n = runtime·gentraceback((uintptr)pc, (uintptr)sp, (uintptr)lr, gp, 0, prof.pcbuf, nelem(prof.pcbuf), nil, nil, false);
2155 if(!traceback || n <= 0) { 2203 if(!traceback || n <= 0) {
2156 n = 2; 2204 n = 2;
2157 prof.pcbuf[0] = (uintptr)pc; 2205 prof.pcbuf[0] = (uintptr)pc;
2158 prof.pcbuf[1] = (uintptr)System + 1; 2206 prof.pcbuf[1] = (uintptr)System + 1;
2159 } 2207 }
2160 prof.fn(prof.pcbuf, n); 2208 prof.fn(prof.pcbuf, n);
2161 runtime·unlock(&prof); 2209 runtime·unlock(&prof);
2210 mp->mcache = mcache;
2162 } 2211 }
2163 2212
2164 // Arrange to call fn with a traceback hz times a second. 2213 // Arrange to call fn with a traceback hz times a second.
2165 void 2214 void
2166 runtime·setcpuprofilerate(void (*fn)(uintptr*, int32), int32 hz) 2215 runtime·setcpuprofilerate(void (*fn)(uintptr*, int32), int32 hz)
2167 { 2216 {
2168 // Force sane arguments. 2217 // Force sane arguments.
2169 if(hz < 0) 2218 if(hz < 0)
2170 hz = 0; 2219 hz = 0;
2171 if(hz == 0) 2220 if(hz == 0)
(...skipping 22 matching lines...) Expand all
2194 runtime·resetcpuprofiler(hz); 2243 runtime·resetcpuprofiler(hz);
2195 2244
2196 m->locks--; 2245 m->locks--;
2197 } 2246 }
2198 2247
2199 // Change number of processors. The world is stopped, sched is locked. 2248 // Change number of processors. The world is stopped, sched is locked.
2200 static void 2249 static void
2201 procresize(int32 new) 2250 procresize(int32 new)
2202 { 2251 {
2203 int32 i, old; 2252 int32 i, old;
2253 bool empty;
2204 G *gp; 2254 G *gp;
2205 P *p; 2255 P *p;
2206 2256
2207 old = runtime·gomaxprocs; 2257 old = runtime·gomaxprocs;
2208 if(old < 0 || old > MaxGomaxprocs || new <= 0 || new >MaxGomaxprocs) 2258 if(old < 0 || old > MaxGomaxprocs || new <= 0 || new >MaxGomaxprocs)
2209 runtime·throw("procresize: invalid arg"); 2259 runtime·throw("procresize: invalid arg");
2210 // initialize new P's 2260 // initialize new P's
2211 for(i = 0; i < new; i++) { 2261 for(i = 0; i < new; i++) {
2212 p = runtime·allp[i]; 2262 p = runtime·allp[i];
2213 if(p == nil) { 2263 if(p == nil) {
2214 p = (P*)runtime·mallocgc(sizeof(*p), 0, FlagNoInvokeGC); 2264 p = (P*)runtime·mallocgc(sizeof(*p), 0, FlagNoInvokeGC);
2215 p->id = i; 2265 p->id = i;
2216 p->status = Pgcstop; 2266 p->status = Pgcstop;
2217 runtime·atomicstorep(&runtime·allp[i], p); 2267 runtime·atomicstorep(&runtime·allp[i], p);
2218 } 2268 }
2219 if(p->mcache == nil) { 2269 if(p->mcache == nil) {
2220 if(old==0 && i==0) 2270 if(old==0 && i==0)
2221 p->mcache = m->mcache; // bootstrap 2271 p->mcache = m->mcache; // bootstrap
2222 else 2272 else
2223 p->mcache = runtime·allocmcache(); 2273 p->mcache = runtime·allocmcache();
2224 } 2274 }
2225 if(p->runq == nil) {
2226 p->runqsize = 128;
2227 p->runq = (G**)runtime·mallocgc(p->runqsize*sizeof(G*), 0, FlagNoInvokeGC);
2228 }
2229 } 2275 }
2230 2276
2231 // redistribute runnable G's evenly 2277 // redistribute runnable G's evenly
2232 » for(i = 0; i < old; i++) { 2278 » // collect all runnable goroutines in global queue preserving FIFO order
2233 » » p = runtime·allp[i]; 2279 » // FIFO order is required to ensure fairness even during frequent GCs
2234 » » while(gp = runqget(p)) 2280 » // see http://golang.org/issue/7126
2235 » » » globrunqput(gp); 2281 » empty = false;
2236 » } 2282 » while(!empty) {
2283 » » empty = true;
2284 » » for(i = 0; i < old; i++) {
2285 » » » p = runtime·allp[i];
2286 » » » if(p->runqhead == p->runqtail)
2287 » » » » continue;
2288 » » » empty = false;
2289 » » » // pop from tail of local queue
2290 » » » p->runqtail--;
2291 » » » gp = p->runq[p->runqtail%nelem(p->runq)];
2292 » » » // push onto head of global queue
2293 » » » gp->schedlink = runtime·sched.runqhead;
2294 » » » runtime·sched.runqhead = gp;
2295 » » » if(runtime·sched.runqtail == nil)
2296 » » » » runtime·sched.runqtail = gp;
2297 » » » runtime·sched.runqsize++;
2298 » » }
2299 » }
2300 » // fill local queues with at most nelem(p->runq)/2 goroutines
2237 // start at 1 because current M already executes some G and will acquire allp[0] below, 2301 // start at 1 because current M already executes some G and will acquire allp[0] below,
2238 // so if we have a spare G we want to put it into allp[1]. 2302 // so if we have a spare G we want to put it into allp[1].
2239 » for(i = 1; runtime·sched.runqhead; i++) { 2303 » for(i = 1; i < new * nelem(p->runq)/2 && runtime·sched.runqsize > 0; i++ ) {
2240 gp = runtime·sched.runqhead; 2304 gp = runtime·sched.runqhead;
2241 runtime·sched.runqhead = gp->schedlink; 2305 runtime·sched.runqhead = gp->schedlink;
2306 if(runtime·sched.runqhead == nil)
2307 runtime·sched.runqtail = nil;
2308 runtime·sched.runqsize--;
2242 runqput(runtime·allp[i%new], gp); 2309 runqput(runtime·allp[i%new], gp);
2243 } 2310 }
2244 runtime·sched.runqtail = nil;
2245 runtime·sched.runqsize = 0;
2246 2311
2247 // free unused P's 2312 // free unused P's
2248 for(i = new; i < old; i++) { 2313 for(i = new; i < old; i++) {
2249 p = runtime·allp[i]; 2314 p = runtime·allp[i];
2250 runtime·freemcache(p->mcache); 2315 runtime·freemcache(p->mcache);
2251 p->mcache = nil; 2316 p->mcache = nil;
2252 gfpurge(p); 2317 gfpurge(p);
2253 p->status = Pdead; 2318 p->status = Pdead;
2254 // can't free P itself because it can be referenced by an M in s yscall 2319 // can't free P itself because it can be referenced by an M in s yscall
2255 } 2320 }
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after
2317 runtime·unlock(&runtime·sched); 2382 runtime·unlock(&runtime·sched);
2318 } 2383 }
2319 2384
2320 // Check for deadlock situation. 2385 // Check for deadlock situation.
2321 // The check is based on number of running M's, if 0 -> deadlock. 2386 // The check is based on number of running M's, if 0 -> deadlock.
2322 static void 2387 static void
2323 checkdead(void) 2388 checkdead(void)
2324 { 2389 {
2325 G *gp; 2390 G *gp;
2326 int32 run, grunning, s; 2391 int32 run, grunning, s;
2392 uintptr i;
2327 2393
2328 // -1 for sysmon 2394 // -1 for sysmon
2329 run = runtime·sched.mcount - runtime·sched.nmidle - runtime·sched.nmidle locked - 1; 2395 run = runtime·sched.mcount - runtime·sched.nmidle - runtime·sched.nmidle locked - 1;
2330 if(run > 0) 2396 if(run > 0)
2331 return; 2397 return;
2332 if(run < 0) { 2398 if(run < 0) {
2333 runtime·printf("checkdead: nmidle=%d nmidlelocked=%d mcount=%d\n ", 2399 runtime·printf("checkdead: nmidle=%d nmidlelocked=%d mcount=%d\n ",
2334 runtime·sched.nmidle, runtime·sched.nmidlelocked, runtim e·sched.mcount); 2400 runtime·sched.nmidle, runtime·sched.nmidlelocked, runtim e·sched.mcount);
2335 runtime·throw("checkdead: inconsistent counts"); 2401 runtime·throw("checkdead: inconsistent counts");
2336 } 2402 }
2337 grunning = 0; 2403 grunning = 0;
2338 » for(gp = runtime·allg; gp; gp = gp->alllink) { 2404 » runtime·lock(&allglock);
2405 » for(i = 0; i < runtime·allglen; i++) {
2406 » » gp = runtime·allg[i];
2339 if(gp->isbackground) 2407 if(gp->isbackground)
2340 continue; 2408 continue;
2341 s = gp->status; 2409 s = gp->status;
2342 if(s == Gwaiting) 2410 if(s == Gwaiting)
2343 grunning++; 2411 grunning++;
2344 else if(s == Grunnable || s == Grunning || s == Gsyscall) { 2412 else if(s == Grunnable || s == Grunning || s == Gsyscall) {
2413 runtime·unlock(&allglock);
2345 runtime·printf("checkdead: find g %D in status %d\n", gp ->goid, s); 2414 runtime·printf("checkdead: find g %D in status %d\n", gp ->goid, s);
2346 runtime·throw("checkdead: runnable g"); 2415 runtime·throw("checkdead: runnable g");
2347 } 2416 }
2348 } 2417 }
2418 runtime·unlock(&allglock);
2349 if(grunning == 0) // possible if main goroutine calls runtime·Goexit() 2419 if(grunning == 0) // possible if main goroutine calls runtime·Goexit()
2350 runtime·exit(0); 2420 runtime·exit(0);
2351 m->throwing = -1; // do not dump full stacks 2421 m->throwing = -1; // do not dump full stacks
2352 runtime·throw("all goroutines are asleep - deadlock!"); 2422 runtime·throw("all goroutines are asleep - deadlock!");
2353 } 2423 }
2354 2424
2355 static void 2425 static void
2356 sysmon(void) 2426 sysmon(void)
2357 { 2427 {
2358 uint32 idle, delay; 2428 uint32 idle, delay;
(...skipping 165 matching lines...) Expand 10 before | Expand all | Expand 10 after
2524 gp->stackguard0 = StackPreempt; 2594 gp->stackguard0 = StackPreempt;
2525 return true; 2595 return true;
2526 } 2596 }
2527 2597
2528 void 2598 void
2529 runtime·schedtrace(bool detailed) 2599 runtime·schedtrace(bool detailed)
2530 { 2600 {
2531 static int64 starttime; 2601 static int64 starttime;
2532 int64 now; 2602 int64 now;
2533 int64 id1, id2, id3; 2603 int64 id1, id2, id3;
2534 » int32 i, q, t, h, s; 2604 » int32 i, t, h;
2605 » uintptr gi;
2535 int8 *fmt; 2606 int8 *fmt;
2536 M *mp, *lockedm; 2607 M *mp, *lockedm;
2537 G *gp, *lockedg; 2608 G *gp, *lockedg;
2538 P *p; 2609 P *p;
2539 2610
2540 now = runtime·nanotime(); 2611 now = runtime·nanotime();
2541 if(starttime == 0) 2612 if(starttime == 0)
2542 starttime = now; 2613 starttime = now;
2543 2614
2544 runtime·lock(&runtime·sched); 2615 runtime·lock(&runtime·sched);
2545 runtime·printf("SCHED %Dms: gomaxprocs=%d idleprocs=%d threads=%d idleth reads=%d runqueue=%d", 2616 runtime·printf("SCHED %Dms: gomaxprocs=%d idleprocs=%d threads=%d idleth reads=%d runqueue=%d",
2546 (now-starttime)/1000000, runtime·gomaxprocs, runtime·sched.npidl e, runtime·sched.mcount, 2617 (now-starttime)/1000000, runtime·gomaxprocs, runtime·sched.npidl e, runtime·sched.mcount,
2547 runtime·sched.nmidle, runtime·sched.runqsize); 2618 runtime·sched.nmidle, runtime·sched.runqsize);
2548 if(detailed) { 2619 if(detailed) {
2549 runtime·printf(" gcwaiting=%d nmidlelocked=%d nmspinning=%d stop wait=%d sysmonwait=%d\n", 2620 runtime·printf(" gcwaiting=%d nmidlelocked=%d nmspinning=%d stop wait=%d sysmonwait=%d\n",
2550 runtime·sched.gcwaiting, runtime·sched.nmidlelocked, run time·sched.nmspinning, 2621 runtime·sched.gcwaiting, runtime·sched.nmidlelocked, run time·sched.nmspinning,
2551 runtime·sched.stopwait, runtime·sched.sysmonwait); 2622 runtime·sched.stopwait, runtime·sched.sysmonwait);
2552 } 2623 }
2553 // We must be careful while reading data from P's, M's and G's. 2624 // We must be careful while reading data from P's, M's and G's.
2554 // Even if we hold schedlock, most data can be changed concurrently. 2625 // Even if we hold schedlock, most data can be changed concurrently.
2555 // E.g. (p->m ? p->m->id : -1) can crash if p->m changes from non-nil to nil. 2626 // E.g. (p->m ? p->m->id : -1) can crash if p->m changes from non-nil to nil.
2556 for(i = 0; i < runtime·gomaxprocs; i++) { 2627 for(i = 0; i < runtime·gomaxprocs; i++) {
2557 p = runtime·allp[i]; 2628 p = runtime·allp[i];
2558 if(p == nil) 2629 if(p == nil)
2559 continue; 2630 continue;
2560 mp = p->m; 2631 mp = p->m;
2561 » » t = p->runqtail; 2632 » » h = runtime·atomicload(&p->runqhead);
2562 » » h = p->runqhead; 2633 » » t = runtime·atomicload(&p->runqtail);
2563 » » s = p->runqsize;
2564 » » q = t - h;
2565 » » if(q < 0)
2566 » » » q += s;
2567 if(detailed) 2634 if(detailed)
2568 » » » runtime·printf(" P%d: status=%d schedtick=%d syscalltic k=%d m=%d runqsize=%d/%d gfreecnt=%d\n", 2635 » » » runtime·printf(" P%d: status=%d schedtick=%d syscalltic k=%d m=%d runqsize=%d gfreecnt=%d\n",
2569 » » » » i, p->status, p->schedtick, p->syscalltick, mp ? mp->id : -1, q, s, p->gfreecnt); 2636 » » » » i, p->status, p->schedtick, p->syscalltick, mp ? mp->id : -1, t-h, p->gfreecnt);
2570 else { 2637 else {
2571 // In non-detailed mode format lengths of per-P run queu es as: 2638 // In non-detailed mode format lengths of per-P run queu es as:
2572 // [len1 len2 len3 len4] 2639 // [len1 len2 len3 len4]
2573 fmt = " %d"; 2640 fmt = " %d";
2574 if(runtime·gomaxprocs == 1) 2641 if(runtime·gomaxprocs == 1)
2575 fmt = " [%d]\n"; 2642 fmt = " [%d]\n";
2576 else if(i == 0) 2643 else if(i == 0)
2577 fmt = " [%d"; 2644 fmt = " [%d";
2578 else if(i == runtime·gomaxprocs-1) 2645 else if(i == runtime·gomaxprocs-1)
2579 fmt = " %d]\n"; 2646 fmt = " %d]\n";
2580 » » » runtime·printf(fmt, q); 2647 » » » runtime·printf(fmt, t-h);
2581 } 2648 }
2582 } 2649 }
2583 if(!detailed) { 2650 if(!detailed) {
2584 runtime·unlock(&runtime·sched); 2651 runtime·unlock(&runtime·sched);
2585 return; 2652 return;
2586 } 2653 }
2587 for(mp = runtime·allm; mp; mp = mp->alllink) { 2654 for(mp = runtime·allm; mp; mp = mp->alllink) {
2588 p = mp->p; 2655 p = mp->p;
2589 gp = mp->curg; 2656 gp = mp->curg;
2590 lockedg = mp->lockedg; 2657 lockedg = mp->lockedg;
2591 id1 = -1; 2658 id1 = -1;
2592 if(p) 2659 if(p)
2593 id1 = p->id; 2660 id1 = p->id;
2594 id2 = -1; 2661 id2 = -1;
2595 if(gp) 2662 if(gp)
2596 id2 = gp->goid; 2663 id2 = gp->goid;
2597 id3 = -1; 2664 id3 = -1;
2598 if(lockedg) 2665 if(lockedg)
2599 id3 = lockedg->goid; 2666 id3 = lockedg->goid;
2600 runtime·printf(" M%d: p=%D curg=%D mallocing=%d throwing=%d gci ng=%d" 2667 runtime·printf(" M%d: p=%D curg=%D mallocing=%d throwing=%d gci ng=%d"
2601 " locks=%d dying=%d helpgc=%d spinning=%d lockedg=%D\n", 2668 " locks=%d dying=%d helpgc=%d spinning=%d lockedg=%D\n",
2602 mp->id, id1, id2, 2669 mp->id, id1, id2,
2603 mp->mallocing, mp->throwing, mp->gcing, mp->locks, mp->d ying, mp->helpgc, 2670 mp->mallocing, mp->throwing, mp->gcing, mp->locks, mp->d ying, mp->helpgc,
2604 mp->spinning, id3); 2671 mp->spinning, id3);
2605 } 2672 }
2606 » for(gp = runtime·allg; gp; gp = gp->alllink) { 2673 » runtime·lock(&allglock);
2674 » for(gi = 0; gi < runtime·allglen; gi++) {
2675 » » gp = runtime·allg[gi];
2607 mp = gp->m; 2676 mp = gp->m;
2608 lockedm = gp->lockedm; 2677 lockedm = gp->lockedm;
2609 runtime·printf(" G%D: status=%d(%s) m=%d lockedm=%d\n", 2678 runtime·printf(" G%D: status=%d(%s) m=%d lockedm=%d\n",
2610 gp->goid, gp->status, gp->waitreason, mp ? mp->id : -1, 2679 gp->goid, gp->status, gp->waitreason, mp ? mp->id : -1,
2611 lockedm ? lockedm->id : -1); 2680 lockedm ? lockedm->id : -1);
2612 } 2681 }
2682 runtime·unlock(&allglock);
2613 runtime·unlock(&runtime·sched); 2683 runtime·unlock(&runtime·sched);
2614 } 2684 }
2615 2685
2616 // Put mp on midle list. 2686 // Put mp on midle list.
2617 // Sched must be locked. 2687 // Sched must be locked.
2618 static void 2688 static void
2619 mput(M *mp) 2689 mput(M *mp)
2620 { 2690 {
2621 mp->schedlink = runtime·sched.midle; 2691 mp->schedlink = runtime·sched.midle;
2622 runtime·sched.midle = mp; 2692 runtime·sched.midle = mp;
(...skipping 22 matching lines...) Expand all
2645 { 2715 {
2646 gp->schedlink = nil; 2716 gp->schedlink = nil;
2647 if(runtime·sched.runqtail) 2717 if(runtime·sched.runqtail)
2648 runtime·sched.runqtail->schedlink = gp; 2718 runtime·sched.runqtail->schedlink = gp;
2649 else 2719 else
2650 runtime·sched.runqhead = gp; 2720 runtime·sched.runqhead = gp;
2651 runtime·sched.runqtail = gp; 2721 runtime·sched.runqtail = gp;
2652 runtime·sched.runqsize++; 2722 runtime·sched.runqsize++;
2653 } 2723 }
2654 2724
2725 // Put a batch of runnable goroutines on the global runnable queue.
2726 // Sched must be locked.
2727 static void
2728 globrunqputbatch(G *ghead, G *gtail, int32 n)
2729 {
2730 gtail->schedlink = nil;
2731 if(runtime·sched.runqtail)
2732 runtime·sched.runqtail->schedlink = ghead;
2733 else
2734 runtime·sched.runqhead = ghead;
2735 runtime·sched.runqtail = gtail;
2736 runtime·sched.runqsize += n;
2737 }
2738
2655 // Try get a batch of G's from the global runnable queue. 2739 // Try get a batch of G's from the global runnable queue.
2656 // Sched must be locked. 2740 // Sched must be locked.
2657 static G* 2741 static G*
2658 globrunqget(P *p, int32 max) 2742 globrunqget(P *p, int32 max)
2659 { 2743 {
2660 G *gp, *gp1; 2744 G *gp, *gp1;
2661 int32 n; 2745 int32 n;
2662 2746
2663 if(runtime·sched.runqsize == 0) 2747 if(runtime·sched.runqsize == 0)
2664 return nil; 2748 return nil;
2665 n = runtime·sched.runqsize/runtime·gomaxprocs+1; 2749 n = runtime·sched.runqsize/runtime·gomaxprocs+1;
2666 if(n > runtime·sched.runqsize) 2750 if(n > runtime·sched.runqsize)
2667 n = runtime·sched.runqsize; 2751 n = runtime·sched.runqsize;
2668 if(max > 0 && n > max) 2752 if(max > 0 && n > max)
2669 n = max; 2753 n = max;
2754 if(n > nelem(p->runq)/2)
2755 n = nelem(p->runq)/2;
2670 runtime·sched.runqsize -= n; 2756 runtime·sched.runqsize -= n;
2671 if(runtime·sched.runqsize == 0) 2757 if(runtime·sched.runqsize == 0)
2672 runtime·sched.runqtail = nil; 2758 runtime·sched.runqtail = nil;
2673 gp = runtime·sched.runqhead; 2759 gp = runtime·sched.runqhead;
2674 runtime·sched.runqhead = gp->schedlink; 2760 runtime·sched.runqhead = gp->schedlink;
2675 n--; 2761 n--;
2676 while(n--) { 2762 while(n--) {
2677 gp1 = runtime·sched.runqhead; 2763 gp1 = runtime·sched.runqhead;
2678 runtime·sched.runqhead = gp1->schedlink; 2764 runtime·sched.runqhead = gp1->schedlink;
2679 runqput(p, gp1); 2765 runqput(p, gp1);
(...skipping 19 matching lines...) Expand all
2699 P *p; 2785 P *p;
2700 2786
2701 p = runtime·sched.pidle; 2787 p = runtime·sched.pidle;
2702 if(p) { 2788 if(p) {
2703 runtime·sched.pidle = p->link; 2789 runtime·sched.pidle = p->link;
2704 runtime·xadd(&runtime·sched.npidle, -1); // TODO: fast atomic 2790 runtime·xadd(&runtime·sched.npidle, -1); // TODO: fast atomic
2705 } 2791 }
2706 return p; 2792 return p;
2707 } 2793 }
2708 2794
2709 // Put g on local runnable queue. 2795 // Try to put g on local runnable queue.
2710 // TODO(dvyukov): consider using lock-free queue. 2796 // If it's full, put onto global queue.
2797 // Executed only by the owner P.
2711 static void 2798 static void
2712 runqput(P *p, G *gp) 2799 runqput(P *p, G *gp)
2713 { 2800 {
2714 » int32 h, t, s; 2801 » uint32 h, t;
2715 2802
2716 » runtime·lock(p);
2717 retry: 2803 retry:
2718 » h = p->runqhead; 2804 » h = runtime·atomicload(&p->runqhead); // load-acquire, synchronize with consumers
2719 t = p->runqtail; 2805 t = p->runqtail;
2720 » s = p->runqsize; 2806 » if(t - h < nelem(p->runq)) {
2721 » if(t == h-1 || (h == 0 && t == s-1)) { 2807 » » p->runq[t%nelem(p->runq)] = gp;
2722 » » runqgrow(p); 2808 » » runtime·atomicstore(&p->runqtail, t+1); // store-release, makes the item available for consumption
2723 » » goto retry; 2809 » » return;
2724 » } 2810 » }
2725 » p->runq[t++] = gp; 2811 » if(runqputslow(p, gp, h, t))
2726 » if(t == s) 2812 » » return;
2727 » » t = 0; 2813 » // the queue is not full, now the put above must suceed
2728 » p->runqtail = t; 2814 » goto retry;
2729 » runtime·unlock(p); 2815 }
2816
2817 // Put g and a batch of work from local runnable queue on global queue.
2818 // Executed only by the owner P.
2819 static bool
2820 runqputslow(P *p, G *gp, uint32 h, uint32 t)
2821 {
2822 » G *batch[nelem(p->runq)/2+1];
2823 » uint32 n, i;
2824
2825 » // First, grab a batch from local queue.
2826 » n = t-h;
2827 » n = n/2;
2828 » if(n != nelem(p->runq)/2)
2829 » » runtime·throw("runqputslow: queue is not full");
2830 » for(i=0; i<n; i++)
2831 » » batch[i] = p->runq[(h+i)%nelem(p->runq)];
2832 » if(!runtime·cas(&p->runqhead, h, h+n)) // cas-release, commits consume
2833 » » return false;
2834 » batch[n] = gp;
2835 » // Link the goroutines.
2836 » for(i=0; i<n; i++)
2837 » » batch[i]->schedlink = batch[i+1];
2838 » // Now put the batch on global queue.
2839 » runtime·lock(&runtime·sched);
2840 » globrunqputbatch(batch[0], batch[n], n+1);
2841 » runtime·unlock(&runtime·sched);
2842 » return true;
2730 } 2843 }
2731 2844
2732 // Get g from local runnable queue. 2845 // Get g from local runnable queue.
2846 // Executed only by the owner P.
2733 static G* 2847 static G*
2734 runqget(P *p) 2848 runqget(P *p)
2735 { 2849 {
2736 G *gp; 2850 G *gp;
2737 » int32 t, h, s; 2851 » uint32 t, h;
2738 2852
2739 » if(p->runqhead == p->runqtail) 2853 » for(;;) {
2740 » » return nil; 2854 » » h = runtime·atomicload(&p->runqhead); // load-acquire, synchron ize with other consumers
2741 » runtime·lock(p); 2855 » » t = p->runqtail;
2742 » h = p->runqhead; 2856 » » if(t == h)
2743 » t = p->runqtail; 2857 » » » return nil;
2744 » s = p->runqsize; 2858 » » gp = p->runq[h%nelem(p->runq)];
2745 » if(t == h) { 2859 » » if(runtime·cas(&p->runqhead, h, h+1)) // cas-release, commits c onsume
2746 » » runtime·unlock(p); 2860 » » » return gp;
2747 » » return nil; 2861 » }
2748 » } 2862 }
2749 » gp = p->runq[h++]; 2863
2750 » if(h == s) 2864 // Grabs a batch of goroutines from local runnable queue.
2751 » » h = 0; 2865 // batch array must be of size nelem(p->runq)/2. Returns number of grabbed gorou tines.
2752 » p->runqhead = h; 2866 // Can be executed by any P.
2753 » runtime·unlock(p); 2867 static uint32
2754 » return gp; 2868 runqgrab(P *p, G **batch)
2755 } 2869 {
2756 2870 » uint32 t, h, n, i;
2757 // Grow local runnable queue. 2871
2758 // TODO(dvyukov): consider using fixed-size array 2872 » for(;;) {
2759 // and transfer excess to the global list (local queue can grow way too big). 2873 » » h = runtime·atomicload(&p->runqhead); // load-acquire, synchron ize with other consumers
2760 static void 2874 » » t = runtime·atomicload(&p->runqtail); // load-acquire, synchron ize with the producer
2761 runqgrow(P *p) 2875 » » n = t-h;
2762 { 2876 » » n = n - n/2;
2763 » G **q; 2877 » » if(n == 0)
2764 » int32 s, t, h, t2; 2878 » » » break;
2765 2879 » » if(n > nelem(p->runq)/2) // read inconsistent h and t
2766 » h = p->runqhead; 2880 » » » continue;
2767 » t = p->runqtail; 2881 » » for(i=0; i<n; i++)
2768 » s = p->runqsize; 2882 » » » batch[i] = p->runq[(h+i)%nelem(p->runq)];
2769 » t2 = 0; 2883 » » if(runtime·cas(&p->runqhead, h, h+n)) // cas-release, commits c onsume
2770 » q = runtime·malloc(2*s*sizeof(*q)); 2884 » » » break;
2771 » while(t != h) { 2885 » }
2772 » » q[t2++] = p->runq[h++]; 2886 » return n;
2773 » » if(h == s)
2774 » » » h = 0;
2775 » }
2776 » runtime·free(p->runq);
2777 » p->runq = q;
2778 » p->runqhead = 0;
2779 » p->runqtail = t2;
2780 » p->runqsize = 2*s;
2781 } 2887 }
2782 2888
2783 // Steal half of elements from local runnable queue of p2 2889 // Steal half of elements from local runnable queue of p2
2784 // and put onto local runnable queue of p. 2890 // and put onto local runnable queue of p.
2785 // Returns one of the stolen elements (or nil if failed). 2891 // Returns one of the stolen elements (or nil if failed).
2786 static G* 2892 static G*
2787 runqsteal(P *p, P *p2) 2893 runqsteal(P *p, P *p2)
2788 { 2894 {
2789 » G *gp, *gp1; 2895 » G *gp;
2790 » int32 t, h, s, t2, h2, s2, c, i; 2896 » G *batch[nelem(p->runq)/2];
2791 2897 » uint32 t, h, n, i;
2792 » if(p2->runqhead == p2->runqtail) 2898
2899 » n = runqgrab(p2, batch);
2900 » if(n == 0)
2793 return nil; 2901 return nil;
2794 » // sort locks to prevent deadlocks 2902 » n--;
2795 » if(p < p2) 2903 » gp = batch[n];
2796 » » runtime·lock(p); 2904 » if(n == 0)
2797 » runtime·lock(p2); 2905 » » return gp;
2798 » if(p2->runqhead == p2->runqtail) { 2906 » h = runtime·atomicload(&p->runqhead); // load-acquire, synchronize with consumers
2799 » » runtime·unlock(p2);
2800 » » if(p < p2)
2801 » » » runtime·unlock(p);
2802 » » return nil;
2803 » }
2804 » if(p >= p2)
2805 » » runtime·lock(p);
2806 » // now we've locked both queues and know the victim is not empty
2807 » h = p->runqhead;
2808 t = p->runqtail; 2907 t = p->runqtail;
2809 » s = p->runqsize; 2908 » if(t - h + n >= nelem(p->runq))
2810 » h2 = p2->runqhead; 2909 » » runtime·throw("runqsteal: runq overflow");
2811 » t2 = p2->runqtail; 2910 » for(i=0; i<n; i++, t++)
2812 » s2 = p2->runqsize; 2911 » » p->runq[t%nelem(p->runq)] = batch[i];
2813 » gp = p2->runq[h2++]; // return value 2912 » runtime·atomicstore(&p->runqtail, t); // store-release, makes the item available for consumption
2814 » if(h2 == s2)
2815 » » h2 = 0;
2816 » // steal roughly half
2817 » if(t2 > h2)
2818 » » c = (t2 - h2) / 2;
2819 » else
2820 » » c = (s2 - h2 + t2) / 2;
2821 » // copy
2822 » for(i = 0; i != c; i++) {
2823 » » // the target queue is full?
2824 » » if(t == h-1 || (h == 0 && t == s-1))
2825 » » » break;
2826 » » // the victim queue is empty?
2827 » » if(t2 == h2)
2828 » » » break;
2829 » » gp1 = p2->runq[h2++];
2830 » » if(h2 == s2)
2831 » » » h2 = 0;
2832 » » p->runq[t++] = gp1;
2833 » » if(t == s)
2834 » » » t = 0;
2835 » }
2836 » p->runqtail = t;
2837 » p2->runqhead = h2;
2838 » runtime·unlock(p2);
2839 » runtime·unlock(p);
2840 return gp; 2913 return gp;
2841 } 2914 }
2842 2915
2843 void 2916 void
2844 runtime·testSchedLocalQueue(void) 2917 runtime·testSchedLocalQueue(void)
2845 { 2918 {
2846 P p; 2919 P p;
2847 » G gs[1000]; 2920 » G gs[nelem(p.runq)];
2848 int32 i, j; 2921 int32 i, j;
2849 2922
2850 runtime·memclr((byte*)&p, sizeof(p)); 2923 runtime·memclr((byte*)&p, sizeof(p));
2851 p.runqsize = 1;
2852 p.runqhead = 0;
2853 p.runqtail = 0;
2854 p.runq = runtime·malloc(p.runqsize*sizeof(*p.runq));
2855 2924
2856 for(i = 0; i < nelem(gs); i++) { 2925 for(i = 0; i < nelem(gs); i++) {
2857 if(runqget(&p) != nil) 2926 if(runqget(&p) != nil)
2858 runtime·throw("runq is not empty initially"); 2927 runtime·throw("runq is not empty initially");
2859 for(j = 0; j < i; j++) 2928 for(j = 0; j < i; j++)
2860 runqput(&p, &gs[i]); 2929 runqput(&p, &gs[i]);
2861 for(j = 0; j < i; j++) { 2930 for(j = 0; j < i; j++) {
2862 if(runqget(&p) != &gs[i]) { 2931 if(runqget(&p) != &gs[i]) {
2863 runtime·printf("bad element at iter %d/%d\n", i, j); 2932 runtime·printf("bad element at iter %d/%d\n", i, j);
2864 runtime·throw("bad element"); 2933 runtime·throw("bad element");
2865 } 2934 }
2866 } 2935 }
2867 if(runqget(&p) != nil) 2936 if(runqget(&p) != nil)
2868 runtime·throw("runq is not empty afterwards"); 2937 runtime·throw("runq is not empty afterwards");
2869 } 2938 }
2870 } 2939 }
2871 2940
2872 void 2941 void
2873 runtime·testSchedLocalQueueSteal(void) 2942 runtime·testSchedLocalQueueSteal(void)
2874 { 2943 {
2875 P p1, p2; 2944 P p1, p2;
2876 » G gs[1000], *gp; 2945 » G gs[nelem(p1.runq)], *gp;
2877 int32 i, j, s; 2946 int32 i, j, s;
2878 2947
2879 runtime·memclr((byte*)&p1, sizeof(p1)); 2948 runtime·memclr((byte*)&p1, sizeof(p1));
2880 p1.runqsize = 1;
2881 p1.runqhead = 0;
2882 p1.runqtail = 0;
2883 p1.runq = runtime·malloc(p1.runqsize*sizeof(*p1.runq));
2884
2885 runtime·memclr((byte*)&p2, sizeof(p2)); 2949 runtime·memclr((byte*)&p2, sizeof(p2));
2886 p2.runqsize = nelem(gs);
2887 p2.runqhead = 0;
2888 p2.runqtail = 0;
2889 p2.runq = runtime·malloc(p2.runqsize*sizeof(*p2.runq));
2890 2950
2891 for(i = 0; i < nelem(gs); i++) { 2951 for(i = 0; i < nelem(gs); i++) {
2892 for(j = 0; j < i; j++) { 2952 for(j = 0; j < i; j++) {
2893 gs[j].sig = 0; 2953 gs[j].sig = 0;
2894 runqput(&p1, &gs[j]); 2954 runqput(&p1, &gs[j]);
2895 } 2955 }
2896 gp = runqsteal(&p2, &p1); 2956 gp = runqsteal(&p2, &p1);
2897 s = 0; 2957 s = 0;
2898 if(gp) { 2958 if(gp) {
2899 s++; 2959 s++;
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
2957 if(experiment[i+j] != name[j]) 3017 if(experiment[i+j] != name[j])
2958 goto nomatch; 3018 goto nomatch;
2959 if(experiment[i+j] != '\0' && experiment[i+j] != ',') 3019 if(experiment[i+j] != '\0' && experiment[i+j] != ',')
2960 goto nomatch; 3020 goto nomatch;
2961 return 1; 3021 return 1;
2962 } 3022 }
2963 nomatch:; 3023 nomatch:;
2964 } 3024 }
2965 return 0; 3025 return 0;
2966 } 3026 }
LEFTRIGHT

Powered by Google App Engine
RSS Feeds Recent Issues | This issue
This is Rietveld f62528b