LEFT | RIGHT |
1 // Copyright 2009 The Go Authors. All rights reserved. | 1 // Copyright 2009 The Go Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style | 2 // Use of this source code is governed by a BSD-style |
3 // license that can be found in the LICENSE file. | 3 // license that can be found in the LICENSE file. |
4 | 4 |
5 #include "runtime.h" | 5 #include "runtime.h" |
6 #include "arch_GOARCH.h" | 6 #include "arch_GOARCH.h" |
7 #include "defs_GOOS_GOARCH.h" | 7 #include "defs_GOOS_GOARCH.h" |
8 #include "malloc.h" | 8 #include "malloc.h" |
9 #include "os_GOOS.h" | 9 #include "os_GOOS.h" |
10 #include "stack.h" | 10 #include "stack.h" |
| 11 #include "race.h" |
| 12 #include "type.h" |
| 13 |
| 14 // TODO(dvyukov): if a thread w/o mcache catches a signal (in particular SIGABOR
T), |
| 15 // then it can't print dump. |
11 | 16 |
12 enum { maxgomaxprocs = 1<<10 }; | 17 enum { maxgomaxprocs = 1<<10 }; |
13 | 18 #define LOG if(0) runtime·printf |
14 enum { debug = 0 }; | |
15 #define LOG if(debug) runtime·printf | |
16 #define LOG1 runtime·printf | 19 #define LOG1 runtime·printf |
17 | 20 #define CHECK(cond, fmt) /*if(cond) {} else { runtime·printf fmt; runtime·throw(
"CHECK"); }*/ |
18 //FIXME: fix the comment. | |
19 | 21 |
20 // Go scheduler | 22 // Go scheduler |
21 // | 23 // |
22 // The go scheduler's job is to match ready-to-run goroutines (`g's) | 24 // The go scheduler's job is to match ready-to-run goroutines (`g's) |
23 // with waiting-for-work schedulers (`m's). If there are ready g's | 25 // with waiting-for-work schedulers (`m's). If there are ready g's |
24 // and no waiting m's, ready() will start a new m running in a new | 26 // and no waiting m's, ready() will start a new m running in a new |
25 // OS thread, so that all ready g's can run simultaneously, up to a limit. | 27 // OS thread, so that all ready g's can run simultaneously, up to a limit. |
26 // For now, m's never go away. | 28 // For now, m's never go away. |
27 // | 29 // |
28 // By default, Go keeps only one kernel thread (m) running user code | 30 // By default, Go keeps only one kernel thread (m) running user code |
29 // at a single time; other threads may be blocked in the operating system. | 31 // at a single time; other threads may be blocked in the operating system. |
30 // Setting the environment variable $GOMAXPROCS or calling | 32 // Setting the environment variable $GOMAXPROCS or calling |
31 // runtime.GOMAXPROCS() will change the number of user threads | 33 // runtime.GOMAXPROCS() will change the number of user threads |
32 // allowed to execute simultaneously. $GOMAXPROCS is thus an | 34 // allowed to execute simultaneously. $GOMAXPROCS is thus an |
33 // approximation of the maximum number of cores to use. | 35 // approximation of the maximum number of cores to use. |
34 // | 36 // |
35 // Even a program that can run without deadlock in a single process | 37 // Even a program that can run without deadlock in a single process |
36 // might use more m's if given the chance. For example, the prime | 38 // might use more m's if given the chance. For example, the prime |
37 // sieve will use as many m's as there are primes (up to runtime·sched.mmax), | 39 // sieve will use as many m's as there are primes (up to $GOMAXPROCS), |
38 // allowing different stages of the pipeline to execute in parallel. | 40 // allowing different stages of the pipeline to execute in parallel. |
39 // We could revisit this choice, only kicking off new m's for blocking | |
40 // system calls, but that would limit the amount of parallel computation | |
41 // that go would try to do. | |
42 // | |
43 // In general, one could imagine all sorts of refinements to the | |
44 // scheduler, but the goal now is just to get something working on | |
45 // Linux and OS X. | |
46 | 41 |
47 typedef struct Sched Sched; | 42 typedef struct Sched Sched; |
48 struct Sched { | 43 struct Sched { |
49 Lock; | 44 Lock; |
50 | 45 |
51 M* mhead; // m's waiting for work | 46 M* mhead; // m's waiting for work |
52 int32 mwait; // number of m's waiting for work | 47 int32 mwait; // number of m's waiting for work |
53 int32 mcount; // number of m's that have been created | 48 int32 mcount; // number of m's that have been created |
54 | 49 |
55 P* pidle; // idle P's | 50 P* pidle; // idle P's |
56 | 51 » int32» npidle; |
| 52 |
| 53 » G*» runqhead; |
| 54 » G*» runqtail; |
| 55 » int32» runqsize; |
| 56 |
| 57 » Lock gflock; |
57 G* gfree; | 58 G* gfree; |
| 59 int32 goidseq; |
| 60 |
| 61 int32 stopwait; |
| 62 Note stopnote; |
| 63 int32 sysmonwait; |
| 64 Note sysmonnote; |
58 | 65 |
59 int32 profilehz; // cpu profiling rate | 66 int32 profilehz; // cpu profiling rate |
60 | 67 |
61 bool init; // running initialization | 68 bool init; // running initialization |
62 bool lockmain; // init called runtime.LockOSThread | 69 bool lockmain; // init called runtime.LockOSThread |
63 }; | 70 }; |
64 | 71 |
65 Sched runtime·sched; | 72 Sched runtime·sched; |
66 int32 runtime·gomaxprocs; | 73 int32 runtime·gomaxprocs; |
67 bool runtime·singleproc; | 74 bool runtime·singleproc; |
68 bool runtime·iscgo; | 75 bool runtime·iscgo; |
69 int32 runtime·gcwaiting; | 76 int32 runtime·gcwaiting; |
70 M» » runtime·m0; | 77 M» runtime·m0; |
71 G» » runtime·g0;» // idle goroutine for m0 | 78 G» runtime·g0;» // idle goroutine for m0 |
72 static int32 newprocs; | 79 static int32 newprocs; |
73 | |
74 static M sysmonm; | |
75 static byte sysmonstk[64*1024]; | |
76 | 80 |
77 // Keep trace of scavenger's goroutine for deadlock detection. | 81 // Keep trace of scavenger's goroutine for deadlock detection. |
78 static G *scvg; | 82 static G *scvg; |
79 | 83 |
80 // Scheduling helpers. Sched must be locked. | 84 // Scheduling helpers. Sched must be locked. |
81 static void gput(P*, G*);» // put/get on ghead/gtail | 85 static void runqput(P*, G*);» // put/get on ghead/gtail |
82 static G* gget(P*); | 86 static G* runqget(P*); |
83 static void runqgrow(P*); | 87 static void runqgrow(P*); |
84 static G* runqsteal(P*, P*); | 88 static G* runqsteal(P*, P*); |
| 89 static void globrunqput(G*); |
| 90 static G* globrunqget(void); |
85 static void mput(M*); // put/get on mhead | 91 static void mput(M*); // put/get on mhead |
86 static M* mget(void); | 92 static M* mget(void); |
87 static void gfput(P*, G*); // put/get on gfree | 93 static void gfput(P*, G*); // put/get on gfree |
88 static G* gfget(P*); | 94 static G* gfget(P*); |
89 static void mcommoninit(M*); | 95 static void mcommoninit(M*); |
90 static void schedule(void); | 96 static void schedule(void); |
91 static void procresize(int32); | 97 static void procresize(int32); |
92 static void entergo(M*, P*); | 98 static void entergo(M*, P*); |
93 static void leavego(M*, uint32); | 99 static P* releasep(void); |
94 static void newm(P*, bool); | 100 static M* newm(void(*)(void), P*, bool); |
95 static void goidle(void); | 101 static void goidle(void); |
96 static void mstop(void); | 102 static void mstop(void); |
97 static void initgstack(G*, byte*, int32); | 103 static void initgstack(G*, byte*, int32); |
98 static void sysmon(void); | 104 static void sysmon(void); |
99 | 105 static void inject(G*, int32*, int32*); |
100 static void | 106 static P* pidleget(void); |
101 outputstats(void) | 107 static void pidleput(P*); |
102 { | |
103 » M *mp; | |
104 » SchedStats s; | |
105 » int32 i; | |
106 » uint64 *src, *dst; | |
107 | |
108 » runtime·memclr((byte*)&s, sizeof(s)); | |
109 » for(mp=runtime·allm; mp; mp=mp->alllink) { | |
110 » » src = (uint64*)&mp->schedstats; | |
111 » » dst = (uint64*)&s; | |
112 » » for(i=0; i<sizeof(s)/sizeof(uint64); i++) | |
113 » » » dst[i] += src[i]; | |
114 » } | |
115 » runtime·printf("SchedStats:\n"); | |
116 » runtime·printf("nm %D\n", s.nm); | |
117 » runtime·printf("sysexitfast %D\n", s.sysexitfast); | |
118 » runtime·printf("sysexitmed %D\n", s.sysexitmed); | |
119 » runtime·printf("sysexitslow %D\n", s.sysexitslow); | |
120 » runtime·printf("stealempty %D\n", s.stealempty); | |
121 » runtime·printf("stealn %D\n", s.stealn); | |
122 » runtime·printf("stealcnt %D (%D)\n", s.stealcnt, s.stea
lcnt / (s.stealn ? s.stealn : 1)); | |
123 » runtime·printf("gstart %D\n", s.gstart); | |
124 » runtime·printf("gend %D\n", s.gend); | |
125 » runtime·printf("gfput %D\n", s.gfput); | |
126 » runtime·printf("gfget %D\n", s.gfget); | |
127 » runtime·printf("galloc %D\n", s.galloc); | |
128 } | |
129 | 108 |
130 // The bootstrap sequence is: | 109 // The bootstrap sequence is: |
131 // | 110 // |
132 // call osinit | 111 // call osinit |
133 // call schedinit | 112 // call schedinit |
134 // make & queue new G | 113 // make & queue new G |
135 // call runtime·mstart | 114 // call runtime·mstart |
136 // | 115 // |
137 // The new G calls runtime·main. | 116 // The new G calls runtime·main. |
138 void | 117 void |
139 runtime·schedinit(void) | 118 runtime·schedinit(void) |
140 { | 119 { |
141 int32 n, procs; | 120 int32 n, procs; |
142 byte *p; | 121 byte *p; |
143 | 122 |
144 LOG("%d: runtime·schedinit\n", m->id); | 123 LOG("%d: runtime·schedinit\n", m->id); |
145 m->nomemprof++; | 124 m->nomemprof++; |
146 runtime·mallocinit(); | 125 runtime·mallocinit(); |
| 126 m->stackalloc = runtime·malloc(sizeof(*m->stackalloc)); //!!! it may be
GCed |
147 mcommoninit(m); | 127 mcommoninit(m); |
148 m->stackalloc = runtime·malloc(sizeof(*m->stackalloc)); | |
149 runtime·FixAlloc_Init(m->stackalloc, FixedStack, runtime·SysAlloc, nil,
nil); | |
150 if(runtime·gsignalstk) | 128 if(runtime·gsignalstk) |
151 m->gsignal = runtime·malg(runtime·gsignalstk); | 129 m->gsignal = runtime·malg(runtime·gsignalstk); |
152 | 130 |
153 runtime·goargs(); | 131 runtime·goargs(); |
154 runtime·goenvs(); | 132 runtime·goenvs(); |
155 | 133 |
156 // For debugging: | 134 // For debugging: |
157 // Allocate internal symbol table representation now, | 135 // Allocate internal symbol table representation now, |
158 // so that we don't need to call malloc when we crash. | 136 // so that we don't need to call malloc when we crash. |
159 // runtime·findfunc(0); | 137 // runtime·findfunc(0); |
160 | 138 |
161 procs = 1; | 139 procs = 1; |
162 p = runtime·getenv("GOMAXPROCS"); | 140 p = runtime·getenv("GOMAXPROCS"); |
163 if(p != nil && (n = runtime·atoi(p)) > 0) { | 141 if(p != nil && (n = runtime·atoi(p)) > 0) { |
164 if(n > maxgomaxprocs) | 142 if(n > maxgomaxprocs) |
165 n = maxgomaxprocs; | 143 n = maxgomaxprocs; |
166 procs = n; | 144 procs = n; |
167 } | 145 } |
168 runtime·allp = (P**)runtime·malloc((maxgomaxprocs+1)*sizeof(runtime·allp
[0])); | 146 runtime·allp = (P**)runtime·malloc((maxgomaxprocs+1)*sizeof(runtime·allp
[0])); |
169 procresize(procs); | 147 procresize(procs); |
170 | 148 |
171 mstats.enablegc = 1; | 149 mstats.enablegc = 1; |
172 m->nomemprof--; | 150 m->nomemprof--; |
| 151 |
| 152 if(raceenabled) |
| 153 runtime·raceinit(); |
173 } | 154 } |
174 | 155 |
175 extern void main·init(void); | 156 extern void main·init(void); |
176 extern void main·main(void); | 157 extern void main·main(void); |
177 | 158 |
178 // The main goroutine. | 159 // The main goroutine. |
179 void | 160 void |
180 runtime·main(void) | 161 runtime·main(void) |
181 { | 162 { |
182 G *sysmong; | |
183 | |
184 LOG("%d: runtime·main\n", m->id); | 163 LOG("%d: runtime·main\n", m->id); |
185 | 164 |
186 //TODO(dvyukov): block signals because that thread can't handle them | 165 //TODO(dvyukov): block signals because that thread can't handle them |
187 » sysmong = &sysmonm.g0buf; | 166 » newm(sysmon, nil, false); |
188 » sysmonm.g0 = sysmong; | |
189 » mcommoninit(&sysmonm); | |
190 » if(runtime·gsignalstk) | |
191 » » sysmonm.gsignal = runtime·malg(runtime·gsignalstk); | |
192 » initgstack(sysmong, sysmonstk, nelem(sysmonstk)); | |
193 » runtime·newosproc(&sysmonm, sysmong, (void*)sysmong->stackbase, sysmon); | |
194 | 167 |
195 // Lock the main goroutine onto this, the main OS thread, | 168 // Lock the main goroutine onto this, the main OS thread, |
196 // during initialization. Most programs won't care, but a few | 169 // during initialization. Most programs won't care, but a few |
197 // do require certain calls to be made by the main thread. | 170 // do require certain calls to be made by the main thread. |
198 // Those can arrange for main.main to run in the main thread | 171 // Those can arrange for main.main to run in the main thread |
199 // by calling runtime.LockOSThread during initialization | 172 // by calling runtime.LockOSThread during initialization |
200 // to preserve the lock. | 173 // to preserve the lock. |
201 runtime·LockOSThread(); | 174 runtime·LockOSThread(); |
202 runtime·sched.init = true; | 175 runtime·sched.init = true; |
203 if(m != &runtime·m0) | 176 if(m != &runtime·m0) |
204 runtime·throw("runtime·main not on m0"); | 177 runtime·throw("runtime·main not on m0"); |
205 scvg = runtime·newproc1((byte*)runtime·MHeap_Scavenger, nil, 0, 0, runti
me·main); | 178 scvg = runtime·newproc1((byte*)runtime·MHeap_Scavenger, nil, 0, 0, runti
me·main); |
206 main·init(); | 179 main·init(); |
207 runtime·sched.init = false; | 180 runtime·sched.init = false; |
208 if(!runtime·sched.lockmain) | 181 if(!runtime·sched.lockmain) |
209 runtime·UnlockOSThread(); | 182 runtime·UnlockOSThread(); |
210 | 183 |
211 main·main(); | 184 main·main(); |
212 » outputstats(); | 185 » if(raceenabled) |
| 186 » » runtime·racefini(); |
213 runtime·exit(0); | 187 runtime·exit(0); |
214 for(;;) | 188 for(;;) |
215 *(int32*)runtime·main = 0; | 189 *(int32*)runtime·main = 0; |
216 } | 190 } |
217 | 191 |
218 void | 192 void |
219 runtime·goroutineheader(G *gp) | 193 runtime·goroutineheader(G *gp) |
220 { | 194 { |
221 int8 *status; | 195 int8 *status; |
222 | 196 |
(...skipping 13 matching lines...) Expand all Loading... |
236 case Gwaiting: | 210 case Gwaiting: |
237 if(gp->waitreason) | 211 if(gp->waitreason) |
238 status = gp->waitreason; | 212 status = gp->waitreason; |
239 else | 213 else |
240 status = "waiting"; | 214 status = "waiting"; |
241 break; | 215 break; |
242 default: | 216 default: |
243 status = "???"; | 217 status = "???"; |
244 break; | 218 break; |
245 } | 219 } |
246 » runtime·printf("goroutine %p [%s]:\n", gp, status); | 220 » runtime·printf("goroutine %D [%s]:\n", gp->goid, status); |
247 } | 221 } |
248 | 222 |
249 void | 223 void |
250 runtime·tracebackothers(G *me) | 224 runtime·tracebackothers(G *me) |
251 { | 225 { |
252 G *gp; | 226 G *gp; |
253 » P *p, **pp; | 227 |
254 | 228 » for(gp = runtime·allg; gp != nil; gp = gp->alllink) { |
255 » for(pp=runtime·allp; p=*pp; pp++) { | 229 » » if(gp == me || gp->status == Gdead) |
256 » » for(gp = p->allg; gp != nil; gp = gp->alllink) { | 230 » » » continue; |
257 » » » if(gp == me || gp->status == Gdead) | 231 » » runtime·printf("\n"); |
258 » » » » continue; | 232 » » runtime·goroutineheader(gp); |
259 » » » runtime·printf("\n"); | 233 » » runtime·traceback(gp->sched.pc, (byte*)gp->sched.sp, 0, gp); |
260 » » » runtime·goroutineheader(gp); | |
261 » » » runtime·traceback(gp->sched.pc, (byte*)gp->sched.sp, 0,
gp); | |
262 » » } | |
263 } | 234 } |
264 } | 235 } |
265 | 236 |
266 static void | 237 static void |
267 mcommoninit(M *mp) | 238 mcommoninit(M *mp) |
268 { | 239 { |
269 runtime·lock(&runtime·sched); | 240 runtime·lock(&runtime·sched); |
270 mp->id = runtime·sched.mcount++; | 241 mp->id = runtime·sched.mcount++; |
271 mp->fastrand = 0x49f6428aUL + mp->id + runtime·cputicks(); | 242 mp->fastrand = 0x49f6428aUL + mp->id + runtime·cputicks(); |
| 243 runtime·FixAlloc_Init(mp->stackalloc, FixedStack, runtime·SysAlloc, nil,
nil); |
272 | 244 |
273 // does it allocate? | 245 // does it allocate? |
274 //runtime·callers(1, mp->createstack, nelem(mp->createstack)); | 246 //runtime·callers(1, mp->createstack, nelem(mp->createstack)); |
275 | 247 |
276 // Add to runtime·allm so garbage collector doesn't free m | 248 // Add to runtime·allm so garbage collector doesn't free m |
277 // when it is just in a register or thread-local storage. | 249 // when it is just in a register or thread-local storage. |
278 mp->alllink = runtime·allm; | 250 mp->alllink = runtime·allm; |
279 // runtime·NumCgoCall() iterates over allm w/o locks, | 251 // runtime·NumCgoCall() iterates over allm w/o locks, |
280 // so we need to publish it safely. | 252 // so we need to publish it safely. |
281 runtime·atomicstorep(&runtime·allm, mp); | 253 runtime·atomicstorep(&runtime·allm, mp); |
| 254 LOG("%d: mcommoninit %d m=%p stackalloc=%p\n", m->id, mp->id, mp, mp->st
ackalloc); |
282 runtime·unlock(&runtime·sched); | 255 runtime·unlock(&runtime·sched); |
| 256 } |
| 257 |
| 258 // Mark g ready to run. |
| 259 void |
| 260 runtime·ready(G *gp) |
| 261 { |
| 262 P *p; |
| 263 M *mp; |
| 264 |
| 265 if(gp->m) |
| 266 runtime·throw("bad g->m in ready"); |
| 267 |
| 268 // Mark runnable. |
| 269 if(gp->status == Grunnable || gp->status == Grunning) { |
| 270 runtime·printf("goroutine %D has status %d\n", gp->goid, gp->sta
tus); |
| 271 runtime·throw("bad g->status in ready"); |
| 272 } |
| 273 gp->status = Grunnable; |
| 274 runqput(m->p, gp); |
| 275 if(runtime·sched.pidle) { |
| 276 runtime·lock(&runtime·sched); |
| 277 p = pidleget(); |
| 278 if(p) { |
| 279 mp = mget(); |
| 280 runtime·unlock(&runtime·sched); |
| 281 if(mp) { |
| 282 entergo(mp, p); |
| 283 runtime·notewakeup(&mp->park); |
| 284 } else { |
| 285 newm(runtime·mstart, p, false); |
| 286 } |
| 287 } else |
| 288 runtime·unlock(&runtime·sched); |
| 289 } |
| 290 } |
| 291 |
| 292 static void |
| 293 munpark(M *mp, P *p) |
| 294 { |
| 295 if(mp) { |
| 296 entergo(mp, p); |
| 297 runtime·notewakeup(&mp->park); |
| 298 } else |
| 299 newm(runtime·mstart, p, false); |
| 300 } |
| 301 ································ |
| 302 int32 |
| 303 runtime·gcprocs(void) |
| 304 { |
| 305 int32 n; |
| 306 |
| 307 runtime·lock(&runtime·sched); |
| 308 n = runtime·gomaxprocs; |
| 309 if(n > runtime·ncpu) |
| 310 n = runtime·ncpu; |
| 311 if(n > MaxGcproc) |
| 312 n = MaxGcproc; |
| 313 runtime·unlock(&runtime·sched); |
| 314 return n; |
| 315 } |
| 316 |
| 317 void |
| 318 runtime·helpgc(int32 nproc) |
| 319 { |
| 320 M *mp; |
| 321 int32 n, pos; |
| 322 |
| 323 LOG("%d: helpgc(%d)\n", m->id, nproc); |
| 324 runtime·lock(&runtime·sched); |
| 325 pos = 0; |
| 326 for(n = 1; n < nproc; n++) { // one M is currently running |
| 327 if(runtime·allp[pos]->mcache == m->mcache) |
| 328 pos++; |
| 329 mp = mget(); |
| 330 if(mp == nil) { |
| 331 runtime·unlock(&runtime·sched); |
| 332 newm(runtime·mstart, runtime·allp[pos], true); |
| 333 runtime·lock(&runtime·sched); |
| 334 pos++; |
| 335 continue; |
| 336 } |
| 337 mp->helpgc = 1; |
| 338 mp->mcache = runtime·allp[pos]->mcache; |
| 339 pos++; |
| 340 LOG("%d: helpgc wake %d\n", m->id, mp->id); |
| 341 runtime·notewakeup(&mp->park); |
| 342 } |
| 343 runtime·unlock(&runtime·sched); |
| 344 } |
| 345 |
| 346 void |
| 347 runtime·stoptheworld(void) |
| 348 { |
| 349 int32 i; |
| 350 uint32 s; |
| 351 P *p; |
| 352 bool wait; |
| 353 |
| 354 LOG("%d: stoptheworld\n", m->id); |
| 355 runtime·lock(&runtime·sched); |
| 356 runtime·gcwaiting = 1; |
| 357 runtime·sched.stopwait = runtime·gomaxprocs; |
| 358 m->p->status = Plocked; |
| 359 runtime·sched.stopwait--; |
| 360 for(i=0; i<runtime·gomaxprocs; i++) { |
| 361 s = runtime·allp[i]->status; |
| 362 if(s == Psyscall && runtime·cas(&runtime·allp[i]->status, s, Plo
cked)) { |
| 363 LOG(" acquired syscall %d\n", i); |
| 364 runtime·sched.stopwait--; |
| 365 } |
| 366 } |
| 367 while(runtime·sched.pidle) { |
| 368 p = pidleget(); |
| 369 p->status = Plocked; |
| 370 runtime·sched.stopwait--; |
| 371 } |
| 372 CHECK(runtime·sched.stopwait >= 0, ("")); |
| 373 wait = runtime·sched.stopwait > 0; |
| 374 runtime·unlock(&runtime·sched); |
| 375 if(wait) { |
| 376 runtime·notesleep(&runtime·sched.stopnote); |
| 377 runtime·noteclear(&runtime·sched.stopnote); |
| 378 } |
| 379 LOG("%d: stoptheworld stopped\n", m->id); |
| 380 CHECK(runtime·sched.stopwait == 0, ("stoptheworld: stopwait == %d\n", ru
ntime·sched.stopwait)); |
| 381 for(i=0; i<runtime·gomaxprocs; i++) { |
| 382 CHECK(runtime·allp[i]->status == Plocked, ("stoptheworld: not st
opped (%d)\n", runtime·allp[i]->status)); |
| 383 } |
| 384 } |
| 385 |
| 386 void |
| 387 runtime·starttheworld(void) |
| 388 { |
| 389 //G *gp; |
| 390 //P *p; |
| 391 //M *mp; |
| 392 //int32 n, w; |
| 393 |
| 394 LOG("%d: starttheworld\n", m->id); |
| 395 runtime·gcwaiting = 0; |
| 396 if(newprocs) { |
| 397 procresize(newprocs); |
| 398 newprocs = 0; |
| 399 } else { |
| 400 procresize(runtime·gomaxprocs); |
| 401 } |
| 402 runtime·lock(&runtime·sched); |
| 403 /* |
| 404 gp = runtime·netwait(0, runtime·gomaxprocs); |
| 405 n = w = 0; |
| 406 inject(gp, &w, &n); |
| 407 while(runtime·sched.pidle) { |
| 408 p = pidleget(); |
| 409 mp = mget(); |
| 410 if(mp) { |
| 411 entergo(mp, p); |
| 412 runtime·notewakeup(&mp->park); |
| 413 } else { |
| 414 runtime·unlock(&runtime·sched); |
| 415 newm(runtime·mstart, p, false); |
| 416 runtime·lock(&runtime·sched); |
| 417 } |
| 418 } |
| 419 */ |
| 420 if(runtime·sched.sysmonwait) { |
| 421 runtime·sched.sysmonwait = 0; |
| 422 runtime·notewakeup(&runtime·sched.sysmonnote); |
| 423 } |
| 424 runtime·unlock(&runtime·sched); |
| 425 } |
| 426 |
| 427 // Called to start an M. |
| 428 void |
| 429 runtime·mstart(void) |
| 430 { |
| 431 // It is used by windows-386 only. Unfortunately, seh needs |
| 432 // to be located on os stack, and mstart runs on os stack |
| 433 // for both m0 and m. |
| 434 SEH seh; |
| 435 P *p; |
| 436 |
| 437 LOG("%d: mstart m=%p\n", m->id, m); |
| 438 if(g != m->g0) |
| 439 runtime·throw("bad runtime·mstart"); |
| 440 |
| 441 // Record top of stack for use by mcall. |
| 442 // Once we call schedule we're never coming back, |
| 443 // so other calls can reuse this stack space. |
| 444 runtime·gosave(&m->g0->sched); |
| 445 m->g0->sched.pc = (void*)-1; // make sure it is never used |
| 446 m->seh = &seh; |
| 447 runtime·asminit(); |
| 448 runtime·minit(); |
| 449 |
| 450 // Install signal handlers; after minit so that minit can |
| 451 // prepare the thread to be able to handle the signals. |
| 452 if(m == &runtime·m0) |
| 453 runtime·initsig(); |
| 454 |
| 455 if(m->helpgc) { |
| 456 LOG("%d: mstart helpgc\n", m->id); |
| 457 m->helpgc = 0; |
| 458 m->mcache = m->p->mcache; |
| 459 runtime·gchelper(); |
| 460 m->mcache = nil; |
| 461 m->p = nil; |
| 462 LOG("%d: gchelper end\n", m->id); |
| 463 mstop(); |
| 464 } else if(m != &runtime·m0) { |
| 465 p = m->p; |
| 466 m->p = nil; |
| 467 entergo(m, p); |
| 468 } |
| 469 LOG("%d: calling schedule\n", m->id); |
| 470 schedule(); |
| 471 |
| 472 // TODO(brainman): This point is never reached, because scheduler |
| 473 // does not release os threads at the moment. But once this path |
| 474 // is enabled, we must remove our seh here. |
| 475 } |
| 476 |
| 477 // When running with cgo, we call libcgo_thread_start |
| 478 // to start threads for us so that we can play nicely with |
| 479 // foreign code. |
| 480 void (*libcgo_thread_start)(void*); |
| 481 |
| 482 typedef struct CgoThreadStart CgoThreadStart; |
| 483 struct CgoThreadStart |
| 484 { |
| 485 M *m; |
| 486 G *g; |
| 487 void (*fn)(void); |
| 488 }; |
| 489 |
| 490 static void |
| 491 initgstack(G *newg, byte *stk, int32 stacksize) |
| 492 { |
| 493 newg->stack0 = (uintptr)stk; |
| 494 newg->stackguard = (uintptr)stk + StackGuard; |
| 495 newg->stackbase = (uintptr)stk + stacksize - sizeof(Stktop); |
| 496 runtime·memclr((byte*)newg->stackbase, sizeof(Stktop)); |
| 497 } |
| 498 |
| 499 // Create a new m. It will start off with a call to runtime·mstart. |
| 500 static M* |
| 501 newm(void(*fn)(void), P *p, bool helpgc) |
| 502 { |
| 503 M *mp; |
| 504 int32 addmem,stksiz, stkoff; |
| 505 //!!!static Type *mtype; // The Go type M |
| 506 |
| 507 LOG("%d: newm\n", m->id); |
| 508 addmem = sizeof(*mp->stackalloc); |
| 509 if(runtime·gsignalstk) |
| 510 addmem += sizeof(G) + runtime·gsignalstk; |
| 511 stkoff = sizeof(M) + addmem; |
| 512 stksiz = StackSystem + (fn == runtime·mstart ? 8192 : 64*1024); |
| 513 if(!runtime·iscgo && !Windows) |
| 514 addmem += stksiz; |
| 515 //!!! all that is now non-GC, can it break something? |
| 516 mp = runtime·SysAlloc(sizeof(M) + addmem); |
| 517 mp->stackalloc = (FixAlloc*)(mp+1); |
| 518 //!!!if(mtype == nil) { |
| 519 //!!! Eface e; |
| 520 //!!! runtime·gc_m_ptr(&e); |
| 521 //!!! mtype = ((PtrType*)e.type)->elem; |
| 522 //!!!} |
| 523 //!!! mp = runtime·cnew(mtype); |
| 524 mcommoninit(mp); |
| 525 mp->g0 = &mp->g0buf; |
| 526 mp->p = p; |
| 527 mp->helpgc = helpgc; |
| 528 if(runtime·gsignalstk) { |
| 529 mp->gsignal = (G*)((byte*)mp+sizeof(*mp)+sizeof(*mp->stackalloc)
); |
| 530 initgstack(mp->gsignal, (byte*)(mp->gsignal+1), runtime·gsignals
tk); |
| 531 } |
| 532 |
| 533 if(runtime·iscgo) { |
| 534 CgoThreadStart ts; |
| 535 |
| 536 if(libcgo_thread_start == nil) |
| 537 runtime·throw("libcgo_thread_start missing"); |
| 538 // pthread_create will make us a stack. |
| 539 ts.m = mp; |
| 540 ts.g = mp->g0; |
| 541 ts.fn = fn; |
| 542 runtime·asmcgocall(libcgo_thread_start, &ts); |
| 543 } else { |
| 544 // windows will layout sched stack on os stack |
| 545 if(!Windows) |
| 546 initgstack(mp->g0, (byte*)mp+stkoff, stksiz); |
| 547 runtime·newosproc(mp, mp->g0, (byte*)mp->g0->stackbase, fn); |
| 548 } |
| 549 return mp; |
| 550 } |
| 551 |
| 552 static void |
| 553 mstop(void) |
| 554 { |
| 555 LOG("%d: mstop\n", m->id); |
| 556 CHECK(m->locks == 0, ("")); |
| 557 CHECK(m->p == nil, ("mstop: p != nil\n")); |
| 558 retry: |
| 559 runtime·lock(&runtime·sched); |
| 560 mput(m); |
| 561 runtime·unlock(&runtime·sched); |
| 562 runtime·notesleep(&m->park); |
| 563 runtime·noteclear(&m->park); |
| 564 if(m->helpgc) { |
| 565 LOG("%d: gchelper\n", m->id); |
| 566 m->helpgc = 0; |
| 567 runtime·gchelper(); |
| 568 m->mcache = nil; |
| 569 LOG("%d: gchelper end\n", m->id); |
| 570 goto retry; |
| 571 } |
| 572 LOG("%d: mstop wake\n", m->id); |
| 573 if(m->p == nil) |
| 574 runtime·throw("mstop: p == nil"); |
| 575 } |
| 576 |
| 577 // Schedules gp to run on M. Never returns. |
| 578 static void |
| 579 execute(G *gp) |
| 580 { |
| 581 int32 hz; |
| 582 |
| 583 LOG("%d: start running goroutine %p\n", m->id, gp); |
| 584 CHECK(m->locks == 0, ("")); |
| 585 CHECK(g == m->g0, ("execute: not on g0\n")); |
| 586 CHECK(m->p != nil, ("execute: no p\n")); |
| 587 CHECK(gp->status == Grunnable, ("execute: gp=%d gp->status=%d\n", gp->go
id, gp->status)); |
| 588 CHECK(gp->m == nil, ("execute: gp->m=%p\n", gp->m)); |
| 589 CHECK(gp->lockedm == nil && m->lockedg == nil || gp->lockedm == m && m->
lockedg == gp, |
| 590 ("bad locking: gp->lockedm=%p m->lockedg=%p\n", gp->lockedm, m->
lockedg)); |
| 591 m->p->tick++; |
| 592 gp->status = Grunning; |
| 593 m->curg = gp; |
| 594 gp->m = m; |
| 595 |
| 596 // Check whether the profiler needs to be turned on or off. |
| 597 hz = runtime·sched.profilehz; |
| 598 if(m->profilehz != hz) |
| 599 runtime·resetcpuprofiler(hz); |
| 600 |
| 601 if(gp->sched.pc == (byte*)runtime·goexit) // kickoff |
| 602 runtime·gogocall(&gp->sched, (void(*)(void))gp->entry); |
| 603 runtime·gogo(&gp->sched, 0); |
| 604 } |
| 605 |
| 606 // One round of scheduler: find a goroutine and run it. |
| 607 // The argument is the goroutine that was running before |
| 608 // schedule was called, or nil if this is the first call. |
| 609 // Never returns. |
| 610 static void |
| 611 schedule(void) |
| 612 { |
| 613 int32 i, try; |
| 614 G *gp, *gp1; |
| 615 P *p; |
| 616 M *mp; |
| 617 |
| 618 LOG("%d: schedule p=%p\n", m->id, m->p); |
| 619 USED(&gp); |
| 620 CHECK(m->locks == 0, ("schedule: holding locks\n")); |
| 621 CHECK(m->lockedg == nil, ("schedule: locked M\n")); |
| 622 |
| 623 top: |
| 624 if(runtime·gcwaiting) { |
| 625 p = releasep(); |
| 626 p->status = Plocked; |
| 627 runtime·lock(&runtime·sched); |
| 628 runtime·sched.stopwait--; |
| 629 if(runtime·sched.stopwait == 0) |
| 630 runtime·notewakeup(&runtime·sched.stopnote); |
| 631 runtime·unlock(&runtime·sched); |
| 632 mstop(); |
| 633 goto top; |
| 634 } |
| 635 |
| 636 gp = runqget(m->p); |
| 637 if(gp == nil) { |
| 638 for(try=0; try<2; try++) { |
| 639 if(runtime·sched.runqsize) { |
| 640 runtime·lock(&runtime·sched); |
| 641 gp = globrunqget(); |
| 642 if(gp) { |
| 643 while(gp->schedlink != nil) { |
| 644 gp1 = gp; |
| 645 gp = gp1->schedlink; |
| 646 runqput(m->p, gp1); |
| 647 } |
| 648 } |
| 649 runtime·unlock(&runtime·sched); |
| 650 if(gp) |
| 651 goto haveg; |
| 652 } |
| 653 for(i=0; i<runtime·gomaxprocs; i++) { |
| 654 if(runtime·gcwaiting) |
| 655 goto top; |
| 656 p = runtime·allp[runtime·fastrand1()%runtime·gomaxprocs]
; |
| 657 if(p == m->p) |
| 658 gp = runqget(p); |
| 659 else |
| 660 gp = runqsteal(m->p, p); |
| 661 if(gp) |
| 662 break; |
| 663 } |
| 664 if(gp) |
| 665 break; |
| 666 if(try==0 && runtime·gcwaiting == 0) |
| 667 runtime·osyield(); |
| 668 } |
| 669 if(gp == nil) { |
| 670 p = releasep(); |
| 671 runtime·lock(&runtime·sched); |
| 672 if(runtime·gcwaiting) { |
| 673 p->status = Plocked; |
| 674 runtime·sched.stopwait--; |
| 675 if(runtime·sched.stopwait == 0) |
| 676 runtime·notewakeup(&runtime·sched.stopno
te); |
| 677 runtime·unlock(&runtime·sched); |
| 678 mstop(); |
| 679 goto top; |
| 680 } |
| 681 pidleput(p); |
| 682 if(runtime·sched.runqsize) { |
| 683 p = pidleget(); |
| 684 runtime·unlock(&runtime·sched); |
| 685 entergo(m, p); |
| 686 goto top; |
| 687 } |
| 688 runtime·unlock(&runtime·sched); |
| 689 for(i=0; i<runtime·gomaxprocs; i++) { |
| 690 p = runtime·allp[i]; |
| 691 if(p && p->runqhead != p->runqtail) { |
| 692 runtime·lock(&runtime·sched); |
| 693 p = pidleget(); |
| 694 runtime·unlock(&runtime·sched); |
| 695 if(p) { |
| 696 entergo(m, p); |
| 697 goto top; |
| 698 } |
| 699 break; |
| 700 } |
| 701 } |
| 702 mstop(); |
| 703 goto top; |
| 704 } |
| 705 } |
| 706 |
| 707 haveg: |
| 708 if(gp->lockedm) { |
| 709 mp = gp->lockedm; |
| 710 p = releasep(); |
| 711 entergo(mp, p); |
| 712 runtime·notewakeup(&mp->park); |
| 713 mstop(); |
| 714 goto top; |
| 715 } |
| 716 |
| 717 execute(gp); |
| 718 } |
| 719 |
| 720 static void |
| 721 park0(G *gp) |
| 722 { |
| 723 P *p; |
| 724 M *mp; |
| 725 |
| 726 USED(&gp); |
| 727 if(m->lockedg) { |
| 728 p = releasep(); |
| 729 if(m->waitunlockf) { |
| 730 m->waitunlockf(m->waitlock); |
| 731 m->waitunlockf = nil; |
| 732 } |
| 733 // After this point another thread may schedule gp on m again. |
| 734 // Schedule another M to run P. |
| 735 runtime·lock(&runtime·sched); |
| 736 mp = mget(); |
| 737 runtime·unlock(&runtime·sched); |
| 738 munpark(mp, p); |
| 739 // Wait until another thread schedules gp and so m again. |
| 740 runtime·notesleep(&m->park); |
| 741 runtime·noteclear(&m->park); |
| 742 execute(gp); // Never returns. |
| 743 } |
| 744 if(m->waitunlockf) { |
| 745 m->waitunlockf(m->waitlock); |
| 746 m->waitunlockf = nil; |
| 747 } |
| 748 schedule(); |
| 749 } |
| 750 |
| 751 // Puts the current goroutine into a waiting state and unlocks the lock. |
| 752 // The goroutine can be made runnable again by calling runtime·ready(gp). |
| 753 void |
| 754 runtime·park(void(*unlockf)(Lock*), Lock *l, int8 *reason) |
| 755 { |
| 756 LOG("%d: park l=%p reason=%s\n", m->id, l, reason); |
| 757 CHECK(g != m->g0, ("park of g0\n")); |
| 758 m->waitlock = l; |
| 759 m->waitunlockf = unlockf; |
| 760 g->status = Gwaiting; |
| 761 g->waitreason = reason; |
| 762 g->m = nil; |
| 763 runtime·mcall(park0); |
| 764 } |
| 765 |
| 766 static void |
| 767 gosched0(G *gp) |
| 768 { |
| 769 P *p; |
| 770 M *mp; |
| 771 |
| 772 LOG("%d: gosched0 gp=%p\n", m->id, gp); |
| 773 gp->status = Grunnable; |
| 774 gp->m = nil; |
| 775 if(m->lockedg) { |
| 776 p = releasep(); |
| 777 runtime·lock(&runtime·sched); |
| 778 globrunqput(gp); |
| 779 // After this point another thread may schedule gp on m again. |
| 780 // Schedule another M to run P. |
| 781 mp = mget(); |
| 782 runtime·unlock(&runtime·sched); |
| 783 munpark(mp, p); |
| 784 // Wait until another thread schedules gp and so m again. |
| 785 runtime·notesleep(&m->park); |
| 786 runtime·noteclear(&m->park); |
| 787 execute(gp); // Never returns. |
| 788 } |
| 789 runtime·lock(&runtime·sched); |
| 790 globrunqput(gp); |
| 791 runtime·unlock(&runtime·sched); |
| 792 schedule(); |
| 793 } |
| 794 |
| 795 void |
| 796 runtime·gosched(void) |
| 797 { |
| 798 runtime·mcall(gosched0); |
| 799 } |
| 800 |
| 801 // On g0. |
| 802 static void |
| 803 goexit0(G *gp) |
| 804 { |
| 805 gp->status = Gdead; |
| 806 gp->m = nil; |
| 807 gp->lockedm = nil; |
| 808 m->lockedg = nil; |
| 809 runtime·unwindstack(gp, nil); |
| 810 gfput(m->p, gp); |
| 811 schedule(); |
| 812 } |
| 813 |
| 814 void |
| 815 runtime·goexit(void) |
| 816 { |
| 817 runtime·mcall(goexit0); |
| 818 } |
| 819 |
| 820 // The goroutine g is about to enter a system call. |
| 821 // Record that it's not using the cpu anymore. |
| 822 // This is called only from the go syscall library and cgocall, |
| 823 // not from the low-level system calls used by the runtime. |
| 824 // |
| 825 // Entersyscall cannot split the stack: the runtime·gosave must |
| 826 // make g->sched refer to the caller's stack segment, because |
| 827 // entersyscall is going to return immediately after. |
| 828 #pragma textflag 7 |
| 829 void |
| 830 runtime·entersyscall(void) |
| 831 { |
| 832 P *p; |
| 833 M *mp; |
| 834 |
| 835 LOG("%d: entersyscall g=%p p=%p\n", m->id, g, m->p); |
| 836 if(m->profilehz > 0) |
| 837 runtime·setprof(false); |
| 838 |
| 839 // Leave SP around for gc and traceback. |
| 840 runtime·gosave(&g->sched); |
| 841 g->gcsp = g->sched.sp; |
| 842 g->gcstack = g->stackbase; |
| 843 g->gcguard = g->stackguard; |
| 844 g->status = Gsyscall; |
| 845 if(g->gcsp < g->gcguard-StackGuard || g->gcstack < g->gcsp) { |
| 846 // runtime·printf("entersyscall inconsistent %p [%p,%p]\n", |
| 847 // g->gcsp, g->gcguard-StackGuard, g->gcstack); |
| 848 runtime·throw("entersyscall"); |
| 849 } |
| 850 |
| 851 if(m->blockingsyscall) { |
| 852 m->blockingsyscall = false; |
| 853 p = releasep(); |
| 854 runtime·lock(&runtime·sched); |
| 855 mp = mget(); |
| 856 runtime·unlock(&runtime·sched); |
| 857 munpark(mp, p); |
| 858 return; |
| 859 } |
| 860 |
| 861 m->mcache = nil; |
| 862 m->p->m = nil; |
| 863 runtime·atomicstore(&m->p->status, Psyscall); |
| 864 if(runtime·gcwaiting) { |
| 865 runtime·lock(&runtime·sched); |
| 866 if (runtime·sched.stopwait > 0 && runtime·cas(&m->p->status, Psy
scall, Plocked)) { |
| 867 runtime·sched.stopwait--; |
| 868 if(runtime·sched.stopwait == 0) |
| 869 runtime·notewakeup(&runtime·sched.stopnote); |
| 870 } |
| 871 runtime·unlock(&runtime·sched); |
| 872 } |
| 873 } |
| 874 |
| 875 #pragma textflag 7 |
| 876 void |
| 877 runtime·entersyscallblock(void) |
| 878 { |
| 879 m->blockingsyscall = true; |
| 880 runtime·entersyscall(); |
| 881 } |
| 882 |
| 883 static void |
| 884 exitsyscall0(G *gp) |
| 885 { |
| 886 P *p; |
| 887 |
| 888 LOG("%d: exitsyscall0\n", m->id); |
| 889 gp->status = Grunnable; |
| 890 gp->m = nil; |
| 891 CHECK(m->park.waitm == nil, ("exitsyscall0: park is signalled\n")); |
| 892 runtime·lock(&runtime·sched); |
| 893 p = pidleget(); |
| 894 if(p == nil) |
| 895 globrunqput(gp); |
| 896 runtime·unlock(&runtime·sched); |
| 897 if(p) { |
| 898 entergo(m, p); |
| 899 execute(gp); // Never returns. |
| 900 } |
| 901 if(m->lockedg) { |
| 902 CHECK(m->lockedg == gp, ("exitsyscall0: inconsistent locking\n")
); |
| 903 // Wait until another thread schedules gp and so m again. |
| 904 runtime·notesleep(&m->park); |
| 905 runtime·noteclear(&m->park); |
| 906 execute(gp); // Never returns. |
| 907 } |
| 908 mstop(); |
| 909 schedule(); |
| 910 } |
| 911 |
| 912 // The goroutine g exited its system call. |
| 913 // Arrange for it to run on a cpu again. |
| 914 // This is called only from the go syscall library, not |
| 915 // from the low-level system calls used by the runtime. |
| 916 void |
| 917 runtime·exitsyscall(void) |
| 918 { |
| 919 uint32 s; |
| 920 P *p; |
| 921 |
| 922 LOG("%d: exitsyscall g=%p\n", m->id, g); |
| 923 |
| 924 // Check whether the profiler needs to be turned on. |
| 925 if(m->profilehz > 0) |
| 926 runtime·setprof(true); |
| 927 |
| 928 // Try to re-acquire the P. |
| 929 s = m->p ? m->p->status : Pidle; |
| 930 if(s == Psyscall && runtime·cas(&m->p->status, s, Pbusy)) { |
| 931 LOG("%d: exitsyscall fast\n", m->id); |
| 932 // There's a cpu for us, so we can run. |
| 933 m->mcache = m->p->mcache; |
| 934 m->p->m = m; |
| 935 g->status = Grunning; |
| 936 // Garbage collector isn't running (since we are), |
| 937 // so okay to clear gcstack. |
| 938 g->gcstack = (uintptr)nil; |
| 939 return; |
| 940 } |
| 941 |
| 942 // Try to get idle P. |
| 943 m->p = nil; |
| 944 if(runtime·sched.pidle) { |
| 945 runtime·lock(&runtime·sched); |
| 946 p = pidleget(); |
| 947 runtime·unlock(&runtime·sched); |
| 948 if(p) { |
| 949 entergo(m, p); |
| 950 g->gcstack = (uintptr)nil; |
| 951 return; |
| 952 } |
| 953 } |
| 954 |
| 955 LOG("%d: exitsyscall slow p->status=%d\n", m->id, s); |
| 956 |
| 957 runtime·mcall(exitsyscall0); |
| 958 |
| 959 // Gosched returned, so we're allowed to run now. |
| 960 // Delete the gcstack information that we left for |
| 961 // the garbage collector during the system call. |
| 962 // Must wait until now because until gosched returns |
| 963 // we don't know for sure that the garbage collector |
| 964 // is not running. |
| 965 g->gcstack = (uintptr)nil; |
| 966 } |
| 967 |
| 968 // Called from runtime·lessstack when returning from a function which |
| 969 // allocated a new stack segment. The function's return value is in |
| 970 // m->cret. |
| 971 void |
| 972 runtime·oldstack(void) |
| 973 { |
| 974 Stktop *top, old; |
| 975 uint32 argsize; |
| 976 uintptr cret; |
| 977 byte *sp; |
| 978 G *g1; |
| 979 |
| 980 //printf("oldstack m->cret=%p\n", m->cret); |
| 981 |
| 982 g1 = m->curg; |
| 983 top = (Stktop*)g1->stackbase; |
| 984 sp = (byte*)top; |
| 985 old = *top; |
| 986 argsize = old.argsize; |
| 987 if(argsize > 0) { |
| 988 sp -= argsize; |
| 989 runtime·memmove(top->argp, sp, argsize); |
| 990 } |
| 991 |
| 992 if(old.free != 0) |
| 993 runtime·stackfree((byte*)g1->stackguard - StackGuard, old.free); |
| 994 g1->stackbase = (uintptr)old.stackbase; |
| 995 g1->stackguard = (uintptr)old.stackguard; |
| 996 |
| 997 cret = m->cret; |
| 998 m->cret = 0; // drop reference |
| 999 runtime·gogo(&old.gobuf, cret); |
| 1000 } |
| 1001 |
| 1002 // Called from reflect·call or from runtime·morestack when a new |
| 1003 // stack segment is needed. Allocate a new stack big enough for |
| 1004 // m->moreframesize bytes, copy m->moreargsize bytes to the new frame, |
| 1005 // and then act as though runtime·lessstack called the function at |
| 1006 // m->morepc. |
| 1007 void |
| 1008 runtime·newstack(void) |
| 1009 { |
| 1010 int32 framesize, argsize; |
| 1011 Stktop *top; |
| 1012 byte *stk, *sp; |
| 1013 G *g1; |
| 1014 Gobuf label; |
| 1015 bool reflectcall; |
| 1016 uintptr free; |
| 1017 |
| 1018 framesize = m->moreframesize; |
| 1019 argsize = m->moreargsize; |
| 1020 g1 = m->curg; |
| 1021 |
| 1022 if(m->morebuf.sp < g1->stackguard - StackGuard) { |
| 1023 runtime·printf("runtime: split stack overflow: %p < %p\n", m->mo
rebuf.sp, g1->stackguard - StackGuard); |
| 1024 runtime·throw("runtime: split stack overflow"); |
| 1025 } |
| 1026 if(argsize % sizeof(uintptr) != 0) { |
| 1027 runtime·printf("runtime: stack split with misaligned argsize %d\
n", argsize); |
| 1028 runtime·throw("runtime: stack split argsize"); |
| 1029 } |
| 1030 |
| 1031 reflectcall = framesize==1; |
| 1032 if(reflectcall) |
| 1033 framesize = 0; |
| 1034 |
| 1035 if(reflectcall && m->morebuf.sp - sizeof(Stktop) - argsize - 32 > g1->st
ackguard) { |
| 1036 // special case: called from reflect.call (framesize==1) |
| 1037 // to call code with an arbitrary argument size, |
| 1038 // and we have enough space on the current stack. |
| 1039 // the new Stktop* is necessary to unwind, but |
| 1040 // we don't need to create a new segment. |
| 1041 top = (Stktop*)(m->morebuf.sp - sizeof(*top)); |
| 1042 stk = (byte*)g1->stackguard - StackGuard; |
| 1043 free = 0; |
| 1044 } else { |
| 1045 // allocate new segment. |
| 1046 framesize += argsize; |
| 1047 framesize += StackExtra; // room for more functions, Stkt
op. |
| 1048 if(framesize < StackMin) |
| 1049 framesize = StackMin; |
| 1050 framesize += StackSystem; |
| 1051 stk = runtime·stackalloc(framesize); |
| 1052 top = (Stktop*)(stk+framesize-sizeof(*top)); |
| 1053 free = framesize; |
| 1054 } |
| 1055 |
| 1056 //runtime·printf("newstack framesize=%d argsize=%d morepc=%p moreargp=%p gobuf=%
p, %p top=%p old=%p\n", |
| 1057 //framesize, argsize, m->morepc, m->moreargp, m->morebuf.pc, m->morebuf.sp, top,
g1->stackbase); |
| 1058 |
| 1059 top->stackbase = (byte*)g1->stackbase; |
| 1060 top->stackguard = (byte*)g1->stackguard; |
| 1061 top->gobuf = m->morebuf; |
| 1062 top->argp = m->moreargp; |
| 1063 top->argsize = argsize; |
| 1064 top->free = free; |
| 1065 m->moreargp = nil; |
| 1066 m->morebuf.pc = nil; |
| 1067 m->morebuf.sp = (uintptr)nil; |
| 1068 |
| 1069 // copy flag from panic |
| 1070 top->panic = g1->ispanic; |
| 1071 g1->ispanic = false; |
| 1072 |
| 1073 g1->stackbase = (uintptr)top; |
| 1074 g1->stackguard = (uintptr)stk + StackGuard; |
| 1075 |
| 1076 sp = (byte*)top; |
| 1077 if(argsize > 0) { |
| 1078 sp -= argsize; |
| 1079 runtime·memmove(sp, top->argp, argsize); |
| 1080 } |
| 1081 if(thechar == '5') { |
| 1082 // caller would have saved its LR below args. |
| 1083 sp -= sizeof(void*); |
| 1084 *(void**)sp = nil; |
| 1085 } |
| 1086 |
| 1087 // Continue as if lessstack had just called m->morepc |
| 1088 // (the PC that decided to grow the stack). |
| 1089 label.sp = (uintptr)sp; |
| 1090 label.pc = (byte*)runtime·lessstack; |
| 1091 label.g = m->curg; |
| 1092 runtime·gogocall(&label, m->morepc); |
| 1093 |
| 1094 *(int32*)345 = 123; // never return |
| 1095 } |
| 1096 |
| 1097 // Hook used by runtime·malg to call runtime·stackalloc on the |
| 1098 // scheduler stack. This exists because runtime·stackalloc insists |
| 1099 // on being called on the scheduler stack, to avoid trying to grow |
| 1100 // the stack while allocating a new stack segment. |
| 1101 static void |
| 1102 mstackalloc(G *gp) |
| 1103 { |
| 1104 gp->param = runtime·stackalloc((uintptr)gp->param); |
| 1105 runtime·gogo(&gp->sched, 0); |
| 1106 } |
| 1107 |
| 1108 // Allocate a new g, with a stack big enough for stacksize bytes. |
| 1109 G* |
| 1110 runtime·malg(int32 stacksize) |
| 1111 { |
| 1112 G *newg; |
| 1113 byte *stk; |
| 1114 |
| 1115 if(StackTop < sizeof(Stktop)) { |
| 1116 runtime·printf("runtime: SizeofStktop=%d, should be >=%d\n", (in
t32)StackTop, (int32)sizeof(Stktop)); |
| 1117 runtime·throw("runtime: bad stack.h"); |
| 1118 } |
| 1119 |
| 1120 newg = runtime·malloc(sizeof(G)); |
| 1121 if(stacksize >= 0) { |
| 1122 if(g == m->g0) { |
| 1123 // running on scheduler stack already. |
| 1124 stk = runtime·stackalloc(StackSystem + stacksize); |
| 1125 } else { |
| 1126 // have to call stackalloc on scheduler stack. |
| 1127 g->param = (void*)(StackSystem + stacksize); |
| 1128 runtime·mcall(mstackalloc); |
| 1129 stk = g->param; |
| 1130 g->param = nil; |
| 1131 } |
| 1132 newg->stack0 = (uintptr)stk; |
| 1133 newg->stackguard = (uintptr)stk + StackGuard; |
| 1134 newg->stackbase = (uintptr)stk + StackSystem + stacksize - sizeo
f(Stktop); |
| 1135 runtime·memclr((byte*)newg->stackbase, sizeof(Stktop)); |
| 1136 } |
| 1137 return newg; |
| 1138 } |
| 1139 |
| 1140 // Create a new g running fn with siz bytes of arguments. |
| 1141 // Put it on the queue of g's waiting to run. |
| 1142 // The compiler turns a go statement into a call to this. |
| 1143 // Cannot split the stack because it assumes that the arguments |
| 1144 // are available sequentially after &fn; they would not be |
| 1145 // copied if a stack split occurred. It's OK for this to call |
| 1146 // functions that split the stack. |
| 1147 #pragma textflag 7 |
| 1148 void |
| 1149 runtime·newproc(int32 siz, byte* fn, ...) |
| 1150 { |
| 1151 byte *argp; |
| 1152 |
| 1153 if(thechar == '5') |
| 1154 argp = (byte*)(&fn+2); // skip caller's saved LR |
| 1155 else |
| 1156 argp = (byte*)(&fn+1); |
| 1157 runtime·newproc1(fn, argp, siz, 0, runtime·getcallerpc(&siz)); |
| 1158 } |
| 1159 |
| 1160 // Create a new g running fn with narg bytes of arguments starting |
| 1161 // at argp and returning nret bytes of results. callerpc is the |
| 1162 // address of the go statement that created this. The new g is put |
| 1163 // on the queue of g's waiting to run. |
| 1164 G* |
| 1165 runtime·newproc1(byte *fn, byte *argp, int32 narg, int32 nret, void *callerpc) |
| 1166 { |
| 1167 byte *sp; |
| 1168 G *newg; |
| 1169 M *mp; |
| 1170 P *p; |
| 1171 int32 siz; |
| 1172 //int64 goid; |
| 1173 |
| 1174 //printf("newproc1 %p %p narg=%d nret=%d\n", fn, argp, narg, nret); |
| 1175 siz = narg + nret; |
| 1176 siz = (siz+7) & ~7; |
| 1177 |
| 1178 // We could instead create a secondary stack frame |
| 1179 // and make it look like goexit was on the original but |
| 1180 // the call to the actual goroutine function was split. |
| 1181 // Not worth it: this is almost always an error. |
| 1182 if(siz > StackMin - 1024) |
| 1183 runtime·throw("runtime.newproc: function arguments too large for
new goroutine"); |
| 1184 |
| 1185 if((newg = gfget(m->p)) != nil) { |
| 1186 //!!!if(raceenabled) |
| 1187 //!!! runtime·racegostart(goid, callerpc); |
| 1188 if(newg->stackguard - StackGuard != newg->stack0) |
| 1189 runtime·throw("invalid stack in newg"); |
| 1190 } else { |
| 1191 newg = runtime·malg(StackMin); |
| 1192 runtime·lock(&runtime·sched); |
| 1193 newg->goid = ++runtime·sched.goidseq; |
| 1194 if(runtime·lastg == nil) |
| 1195 runtime·allg = newg; |
| 1196 else |
| 1197 runtime·lastg->alllink = newg; |
| 1198 runtime·lastg = newg;··········· |
| 1199 runtime·unlock(&runtime·sched); |
| 1200 } |
| 1201 |
| 1202 sp = (byte*)newg->stackbase; |
| 1203 sp -= siz; |
| 1204 runtime·memmove(sp, argp, narg); |
| 1205 if(thechar == '5') { |
| 1206 // caller's LR |
| 1207 sp -= sizeof(void*); |
| 1208 *(void**)sp = nil; |
| 1209 } |
| 1210 |
| 1211 LOG("%d: newproc %p\n", m->id, newg); |
| 1212 newg->sched.sp = (uintptr)sp; |
| 1213 newg->sched.pc = (byte*)runtime·goexit; |
| 1214 newg->sched.g = newg; |
| 1215 newg->entry = fn; |
| 1216 newg->gopc = (uintptr)callerpc; |
| 1217 newg->status = Grunnable; |
| 1218 runqput(m->p, newg); |
| 1219 |
| 1220 if(runtime·sched.pidle && fn != (byte*)runtime·main) { |
| 1221 runtime·lock(&runtime·sched); |
| 1222 p = pidleget(); |
| 1223 if(p) { |
| 1224 mp = mget(); |
| 1225 runtime·unlock(&runtime·sched); |
| 1226 if(mp) { |
| 1227 entergo(mp, p); |
| 1228 runtime·notewakeup(&mp->park); |
| 1229 } else |
| 1230 newm(runtime·mstart, p, false); |
| 1231 } else |
| 1232 runtime·unlock(&runtime·sched); |
| 1233 } |
| 1234 return newg; |
| 1235 } |
| 1236 |
| 1237 void |
| 1238 runtime·Breakpoint(void) |
| 1239 { |
| 1240 runtime·breakpoint(); |
| 1241 } |
| 1242 |
| 1243 void |
| 1244 runtime·Gosched(void) |
| 1245 { |
| 1246 runtime·gosched(); |
| 1247 } |
| 1248 |
| 1249 // Implementation of runtime.GOMAXPROCS. |
| 1250 // delete when scheduler is stronger |
| 1251 int32 |
| 1252 runtime·gomaxprocsfunc(int32 n) |
| 1253 { |
| 1254 int32 ret; |
| 1255 |
| 1256 LOG("%d: gomaxprocsfunc %d\n", m->id, n); |
| 1257 if(n > maxgomaxprocs) |
| 1258 n = maxgomaxprocs; |
| 1259 runtime·lock(&runtime·sched); |
| 1260 ret = runtime·gomaxprocs; |
| 1261 if(n <= 0 || n == ret) { |
| 1262 runtime·unlock(&runtime·sched); |
| 1263 return ret; |
| 1264 } |
| 1265 runtime·unlock(&runtime·sched); |
| 1266 |
| 1267 runtime·semacquire(&runtime·worldsema); |
| 1268 m->gcing = 1; |
| 1269 runtime·stoptheworld(); |
| 1270 newprocs = n; |
| 1271 m->gcing = 0; |
| 1272 runtime·semrelease(&runtime·worldsema); |
| 1273 runtime·starttheworld(); |
| 1274 |
| 1275 return ret; |
| 1276 } |
| 1277 |
| 1278 void |
| 1279 runtime·LockOSThread(void) |
| 1280 { |
| 1281 if(m == &runtime·m0 && runtime·sched.init) { |
| 1282 runtime·sched.lockmain = true; |
| 1283 return; |
| 1284 } |
| 1285 m->lockedg = g; |
| 1286 g->lockedm = m; |
| 1287 } |
| 1288 |
| 1289 void |
| 1290 runtime·UnlockOSThread(void) |
| 1291 { |
| 1292 if(m == &runtime·m0 && runtime·sched.init) { |
| 1293 runtime·sched.lockmain = false; |
| 1294 return; |
| 1295 } |
| 1296 m->lockedg = nil; |
| 1297 g->lockedm = nil; |
| 1298 } |
| 1299 |
| 1300 bool |
| 1301 runtime·lockedOSThread(void) |
| 1302 { |
| 1303 return g->lockedm != nil && m->lockedg != nil; |
| 1304 } |
| 1305 |
| 1306 // for testing of callbacks |
| 1307 void |
| 1308 runtime·golockedOSThread(bool ret) |
| 1309 { |
| 1310 ret = runtime·lockedOSThread(); |
| 1311 FLUSH(&ret); |
| 1312 } |
| 1313 |
| 1314 // for testing of wire, unwire |
| 1315 void |
| 1316 runtime·mid(uint32 ret) |
| 1317 { |
| 1318 ret = m->id; |
| 1319 FLUSH(&ret); |
| 1320 } |
| 1321 |
| 1322 void |
| 1323 runtime·NumGoroutine(intgo ret) |
| 1324 { |
| 1325 //ret = runtime·sched.gcount; |
| 1326 ret = 1; |
| 1327 FLUSH(&ret); |
| 1328 } |
| 1329 |
| 1330 int32 |
| 1331 runtime·gcount(void) |
| 1332 { |
| 1333 //return runtime·sched.gcount; |
| 1334 return 1; |
| 1335 } |
| 1336 |
| 1337 int32 |
| 1338 runtime·mcount(void) |
| 1339 { |
| 1340 return runtime·sched.mcount; |
| 1341 } |
| 1342 |
| 1343 void |
| 1344 runtime·badmcall(void) // called from assembly |
| 1345 { |
| 1346 runtime·throw("runtime: mcall called on m->g0 stack"); |
| 1347 } |
| 1348 |
| 1349 void |
| 1350 runtime·badmcall2(void) // called from assembly |
| 1351 { |
| 1352 runtime·throw("runtime: mcall function returned"); |
| 1353 } |
| 1354 |
| 1355 static struct { |
| 1356 Lock; |
| 1357 void (*fn)(uintptr*, int32); |
| 1358 int32 hz; |
| 1359 uintptr pcbuf[100]; |
| 1360 } prof; |
| 1361 |
| 1362 // Called if we receive a SIGPROF signal. |
| 1363 void |
| 1364 runtime·sigprof(uint8 *pc, uint8 *sp, uint8 *lr, G *gp) |
| 1365 { |
| 1366 int32 n; |
| 1367 |
| 1368 if(prof.fn == nil || prof.hz == 0) |
| 1369 return; |
| 1370 |
| 1371 runtime·lock(&prof); |
| 1372 if(prof.fn == nil) { |
| 1373 runtime·unlock(&prof); |
| 1374 return; |
| 1375 } |
| 1376 n = runtime·gentraceback(pc, sp, lr, gp, 0, prof.pcbuf, nelem(prof.pcbuf
)); |
| 1377 if(n > 0) |
| 1378 prof.fn(prof.pcbuf, n); |
| 1379 runtime·unlock(&prof); |
| 1380 } |
| 1381 |
| 1382 // Arrange to call fn with a traceback hz times a second. |
| 1383 void |
| 1384 runtime·setcpuprofilerate(void (*fn)(uintptr*, int32), int32 hz) |
| 1385 { |
| 1386 // Force sane arguments. |
| 1387 if(hz < 0) |
| 1388 hz = 0; |
| 1389 if(hz == 0) |
| 1390 fn = nil; |
| 1391 if(fn == nil) |
| 1392 hz = 0; |
| 1393 |
| 1394 // Stop profiler on this cpu so that it is safe to lock prof. |
| 1395 // if a profiling signal came in while we had prof locked, |
| 1396 // it would deadlock. |
| 1397 runtime·resetcpuprofiler(0); |
| 1398 |
| 1399 runtime·lock(&prof); |
| 1400 prof.fn = fn; |
| 1401 prof.hz = hz; |
| 1402 runtime·unlock(&prof); |
| 1403 runtime·lock(&runtime·sched); |
| 1404 runtime·sched.profilehz = hz; |
| 1405 runtime·unlock(&runtime·sched); |
| 1406 |
| 1407 if(hz != 0) |
| 1408 runtime·resetcpuprofiler(hz); |
| 1409 } |
| 1410 |
| 1411 // Change number of processors. The world is stopped. |
| 1412 static void |
| 1413 procresize(int32 new) |
| 1414 { |
| 1415 int32 i, old; |
| 1416 G *gp; |
| 1417 P *p; |
| 1418 |
| 1419 runtime·lock(&runtime·sched); //!!! |
| 1420 old = runtime·gomaxprocs; |
| 1421 LOG("%d: procresize %d->%d\n", m->id, old, new); |
| 1422 if(old < 0 || old > maxgomaxprocs || new <= 0 || new > maxgomaxprocs) |
| 1423 runtime·throw("procresize: invalid arg"); |
| 1424 if(old == new) { |
| 1425 for(i=0; i<new; i++) { |
| 1426 p = runtime·allp[i]; |
| 1427 if(p == m->p) |
| 1428 p->status = Pbusy; |
| 1429 else { |
| 1430 p->status = Pidle; |
| 1431 pidleput(p); |
| 1432 } |
| 1433 } |
| 1434 runtime·unlock(&runtime·sched); |
| 1435 return; |
| 1436 } |
| 1437 |
| 1438 runtime·singleproc = new == 1; |
| 1439 runtime·gomaxprocs = new; |
| 1440 for(i=0; i<new; i++) { |
| 1441 p = runtime·allp[i]; |
| 1442 if(p == nil) { |
| 1443 p = (P*)runtime·mallocgc(sizeof(runtime·allp[i][0]), 0,
0, 1); |
| 1444 p->status = Plocked; |
| 1445 runtime·allp[i] = p; //@@@ store-release |
| 1446 } |
| 1447 if(p->mcache == nil) { |
| 1448 if(old==0 && i==0) |
| 1449 p->mcache = m->mcache; |
| 1450 else |
| 1451 p->mcache = runtime·allocmcache(); |
| 1452 } |
| 1453 if(p->runq == nil) { |
| 1454 p->runqsize = 1024; |
| 1455 p->runq = (G**)runtime·mallocgc(p->runqsize*sizeof(G*),
0, 0, 1); |
| 1456 } |
| 1457 } |
| 1458 |
| 1459 for(i=1; i<old; i++) { |
| 1460 for(;;) { |
| 1461 gp = runqget(runtime·allp[i]); |
| 1462 if(gp == nil) |
| 1463 break; |
| 1464 //TODO: spread more evenly. |
| 1465 runqput(runtime·allp[0], gp); |
| 1466 } |
| 1467 } |
| 1468 |
| 1469 for(i=new; i<old; i++) { |
| 1470 runtime·freemcache(runtime·allp[i]->mcache); |
| 1471 runtime·allp[i]->mcache = nil; |
| 1472 runtime·allp[i]->status = Pdead; |
| 1473 //TODO: free freeg |
| 1474 } |
| 1475 |
| 1476 if(m->p) |
| 1477 m->p->m = nil; |
| 1478 m->p = nil; |
| 1479 m->mcache = nil; |
| 1480 runtime·allp[0]->m = nil; |
| 1481 runtime·allp[0]->status = Pidle; |
| 1482 entergo(m, runtime·allp[0]); |
| 1483 for(i=1; i<new; i++) { |
| 1484 p = runtime·allp[i]; |
| 1485 p->status = Pidle; |
| 1486 pidleput(p); |
| 1487 } |
| 1488 runtime·unlock(&runtime·sched); |
| 1489 } |
| 1490 |
| 1491 static void |
| 1492 entergo(M *mp, P *p) |
| 1493 { |
| 1494 LOG("%d: entergo m=%d p=%p p->m=%p, p->status=%d, p->mcache=%p\n", m->id
, mp->id, p, p->m, p->status, p->mcache); |
| 1495 if(mp->p || mp->mcache) |
| 1496 runtime·throw("entergo: already in go"); |
| 1497 if(p->m || p->status != Pidle) { |
| 1498 runtime·printf("entergo: p->m=%p(%d) p->status=%d\n", p->m, p->m
? p->m->id : 0, p->status); |
| 1499 runtime·throw("entergo: invalid p state"); |
| 1500 } |
| 1501 mp->mcache = p->mcache; |
| 1502 mp->p = p; |
| 1503 p->m = mp; |
| 1504 p->status = Pbusy; |
| 1505 } |
| 1506 |
| 1507 static P* |
| 1508 releasep(void) |
| 1509 { |
| 1510 M *mp; |
| 1511 P *p; |
| 1512 |
| 1513 mp = m; |
| 1514 LOG("%d: releasep\n", mp->id); |
| 1515 // sched is locked |
| 1516 if(mp->p == nil || mp->mcache == nil) |
| 1517 runtime·throw("releasep: invalid arg"); |
| 1518 p = mp->p; |
| 1519 if(p->m != mp || p->mcache != mp->mcache || p->status != Pbusy) { |
| 1520 runtime·printf("releasep: m=%p m->p=%p p->m=%p m->mcache=%p p->m
cache=%p p->status=%d\n", |
| 1521 mp, mp->p, p->m, m->mcache, p->mcache, p->status); |
| 1522 runtime·throw("releasep: invalid p state"); |
| 1523 } |
| 1524 mp->p = nil; |
| 1525 mp->mcache = nil; |
| 1526 p->m = nil; |
| 1527 p->status = Pidle; |
| 1528 return p; |
| 1529 } |
| 1530 |
| 1531 typedef struct Pdesc Pdesc; |
| 1532 struct Pdesc |
| 1533 { |
| 1534 uint32 tick; |
| 1535 int64 when; |
| 1536 }; |
| 1537 |
| 1538 static void |
| 1539 retake(int64 now, Pdesc *ps) |
| 1540 { |
| 1541 uint32 i, s; |
| 1542 int64 t; |
| 1543 P *p; |
| 1544 M *mp; |
| 1545 |
| 1546 for(i=0; i<runtime·gomaxprocs; i++) { |
| 1547 p = runtime·allp[i]; |
| 1548 //!!! procresize may be in progress |
| 1549 // do something if GC is in progress (help). |
| 1550 if(p==nil) |
| 1551 continue; |
| 1552 t = p->tick; |
| 1553 if(ps[i].tick != t) { |
| 1554 ps[i].tick = t; |
| 1555 ps[i].when = now; |
| 1556 } |
| 1557 if(ps[i].when + 20*1000 > now) |
| 1558 continue; |
| 1559 s = p->status; |
| 1560 if(s == Psyscall && runtime·cas(&p->status, s, Pidle)) { |
| 1561 LOG("retake %p(%d)\n", p, i); |
| 1562 runtime·lock(&runtime·sched); |
| 1563 mp = mget(); |
| 1564 runtime·unlock(&runtime·sched); |
| 1565 munpark(mp, p); |
| 1566 } |
| 1567 } |
| 1568 } |
| 1569 |
| 1570 static Pdesc ps[maxgomaxprocs]; |
| 1571 |
| 1572 static void |
| 1573 sysmon(void) |
| 1574 { |
| 1575 int64 t0, now; |
| 1576 |
| 1577 // This is a special dedicated thread. |
| 1578 // It works w/o mcache nor stackalloc, it may work concurrently with GC. |
| 1579 runtime·asminit(); |
| 1580 runtime·minit(); |
| 1581 LOG("sysmon\n"); |
| 1582 t0 = runtime·nanotime(); |
| 1583 for(;;) { |
| 1584 //!!! sleep more if possible |
| 1585 runtime·usleep(20); |
| 1586 if(runtime·gcwaiting) { |
| 1587 runtime·lock(&runtime·sched); |
| 1588 if(runtime·gcwaiting) { |
| 1589 runtime·sched.sysmonwait = 1; |
| 1590 runtime·unlock(&runtime·sched); |
| 1591 runtime·notesleep(&runtime·sched.sysmonnote); |
| 1592 runtime·noteclear(&runtime·sched.sysmonnote); |
| 1593 } else |
| 1594 runtime·unlock(&runtime·sched); |
| 1595 } |
| 1596 now = runtime·nanotime() - t0; |
| 1597 retake(now, ps); |
| 1598 } |
| 1599 } |
| 1600 |
| 1601 static void |
| 1602 inject(G *gp0, int32 *w, int32 *n) |
| 1603 { |
| 1604 int32 nw; |
| 1605 G *gp; |
| 1606 M *mp; |
| 1607 P *p; |
| 1608 |
| 1609 runtime·lock(&runtime·sched); |
| 1610 while(gp0) { |
| 1611 gp = gp0; |
| 1612 gp0 = gp->schedlink; |
| 1613 gp->status = Grunnable; |
| 1614 globrunqput(gp); |
| 1615 (*n)++; |
| 1616 } |
| 1617 runtime·unlock(&runtime·sched); |
| 1618 |
| 1619 nw = *n; |
| 1620 while(runtime·sched.pidle && nw) { |
| 1621 runtime·lock(&runtime·sched); |
| 1622 if(runtime·sched.pidle == nil) { |
| 1623 runtime·unlock(&runtime·sched); |
| 1624 break; |
| 1625 } |
| 1626 (*w)++; |
| 1627 nw--; |
| 1628 p = pidleget(); |
| 1629 mp = mget(); |
| 1630 runtime·unlock(&runtime·sched); |
| 1631 if(mp) { |
| 1632 entergo(mp, p); |
| 1633 runtime·notewakeup(&mp->park); |
| 1634 } else |
| 1635 newm(runtime·mstart, p, false); |
| 1636 } |
| 1637 } |
| 1638 |
| 1639 static void |
| 1640 globrunqput(G *gp) |
| 1641 { |
| 1642 gp->schedlink = nil; |
| 1643 if(runtime·sched.runqtail) |
| 1644 runtime·sched.runqtail->schedlink = gp; |
| 1645 else |
| 1646 runtime·sched.runqhead = gp; |
| 1647 runtime·sched.runqtail = gp; |
| 1648 runtime·sched.runqsize++; |
| 1649 } |
| 1650 |
| 1651 static G* |
| 1652 globrunqget(void) |
| 1653 { |
| 1654 G *gp, *gp1; |
| 1655 int32 n; |
| 1656 |
| 1657 if(runtime·sched.runqsize == 0) |
| 1658 return nil; |
| 1659 n = runtime·sched.runqsize/runtime·gomaxprocs+1; |
| 1660 if(n > runtime·sched.runqsize) |
| 1661 n = runtime·sched.runqsize; |
| 1662 runtime·sched.runqsize -= n; |
| 1663 if(runtime·sched.runqsize == 0) |
| 1664 runtime·sched.runqtail = nil; |
| 1665 gp1 = nil; |
| 1666 while(n--) { |
| 1667 gp = runtime·sched.runqhead; |
| 1668 runtime·sched.runqhead = gp->schedlink; |
| 1669 gp->schedlink = gp1; |
| 1670 gp1 = gp; |
| 1671 } |
| 1672 return gp1; |
| 1673 } |
| 1674 |
| 1675 // sched is locked |
| 1676 static P* |
| 1677 pidleget(void) |
| 1678 { |
| 1679 P *p; |
| 1680 ········ |
| 1681 p = runtime·sched.pidle; |
| 1682 if(p) { |
| 1683 runtime·sched.pidle = p->link; |
| 1684 runtime·sched.npidle--; |
| 1685 } |
| 1686 return p; |
| 1687 } |
| 1688 |
| 1689 // sched is locked |
| 1690 static void |
| 1691 pidleput(P *p) |
| 1692 { |
| 1693 p->link = runtime·sched.pidle; |
| 1694 runtime·sched.pidle = p; |
| 1695 runtime·sched.npidle++; |
283 } | 1696 } |
284 | 1697 |
285 static void | 1698 static void |
286 runqgrow(P *p) | 1699 runqgrow(P *p) |
287 { | 1700 { |
288 G **q; | 1701 G **q; |
289 int32 s, t, h, t2; | 1702 int32 s, t, h, t2; |
290 | 1703 |
291 h = p->runqhead; | 1704 h = p->runqhead; |
292 t = p->runqtail; | 1705 t = p->runqtail; |
(...skipping 13 matching lines...) Expand all Loading... |
306 p->runqtail = t2; | 1719 p->runqtail = t2; |
307 p->runqsize = 2*s; | 1720 p->runqsize = 2*s; |
308 } | 1721 } |
309 | 1722 |
310 static G* | 1723 static G* |
311 runqsteal(P *p, P *p2) | 1724 runqsteal(P *p, P *p2) |
312 { | 1725 { |
313 G *gp, *gp1; | 1726 G *gp, *gp1; |
314 int32 t, h, s, t2, h2, s2, c, c1; | 1727 int32 t, h, s, t2, h2, s2, c, c1; |
315 | 1728 |
316 » if(p2->runqhead==p2->runqtail) { | 1729 » if(p2->runqhead==p2->runqtail) |
317 » » m->schedstats.stealempty++; | |
318 return nil; | 1730 return nil; |
319 } | |
320 if(p < p2) | 1731 if(p < p2) |
321 runtime·lock(p); | 1732 runtime·lock(p); |
322 runtime·lock(p2); | 1733 runtime·lock(p2); |
323 if(p2->runqhead==p2->runqtail) { | 1734 if(p2->runqhead==p2->runqtail) { |
324 runtime·unlock(p2); | 1735 runtime·unlock(p2); |
325 if(p < p2) | 1736 if(p < p2) |
326 runtime·unlock(p); | 1737 runtime·unlock(p); |
327 m->schedstats.stealempty++; | |
328 return nil; | 1738 return nil; |
329 } | 1739 } |
330 if(p >= p2) | 1740 if(p >= p2) |
331 runtime·lock(p); | 1741 runtime·lock(p); |
332 h = p->runqhead; | 1742 h = p->runqhead; |
333 t = p->runqtail; | 1743 t = p->runqtail; |
334 s = p->runqsize; | 1744 s = p->runqsize; |
335 h2 = p2->runqhead; | 1745 h2 = p2->runqhead; |
336 t2 = p2->runqtail; | 1746 t2 = p2->runqtail; |
337 s2 = p2->runqsize; | 1747 s2 = p2->runqsize; |
(...skipping 18 matching lines...) Expand all Loading... |
356 if(h2 == s2) | 1766 if(h2 == s2) |
357 h2 = 0; | 1767 h2 = 0; |
358 p->runq[t] = gp1; | 1768 p->runq[t] = gp1; |
359 t++; | 1769 t++; |
360 if(t==s) | 1770 if(t==s) |
361 t = 0; | 1771 t = 0; |
362 c1++; | 1772 c1++; |
363 } | 1773 } |
364 p->runqtail = t; | 1774 p->runqtail = t; |
365 p2->runqhead = h2; | 1775 p2->runqhead = h2; |
366 m->schedstats.stealn++; | |
367 m->schedstats.stealcnt += c1 + 1; | |
368 runtime·unlock(p2); | 1776 runtime·unlock(p2); |
369 runtime·unlock(p); | 1777 runtime·unlock(p); |
370 return gp; | 1778 return gp; |
371 } | 1779 } |
372 | 1780 |
373 // Put g on runnable queue. | 1781 // Put g on runnable queue. |
374 static void | 1782 static void |
375 gput(P *p, G *gp) | 1783 runqput(P *p, G *gp) |
376 { | 1784 { |
377 int32 h, t, s; | 1785 int32 h, t, s; |
378 | 1786 |
379 runtime·lock(p); | 1787 runtime·lock(p); |
380 retry: | 1788 retry: |
381 h = p->runqhead; | 1789 h = p->runqhead; |
382 t = p->runqtail; | 1790 t = p->runqtail; |
383 s = p->runqsize; | 1791 s = p->runqsize; |
384 if(t==h-1 || (h==0 && t==s-1)) { | 1792 if(t==h-1 || (h==0 && t==s-1)) { |
385 runqgrow(p); | 1793 runqgrow(p); |
386 goto retry; | 1794 goto retry; |
387 } | 1795 } |
388 p->runq[t] = gp; | 1796 p->runq[t] = gp; |
389 t++; | 1797 t++; |
390 if(t==s) | 1798 if(t==s) |
391 t = 0; | 1799 t = 0; |
392 p->runqtail = t; | 1800 p->runqtail = t; |
393 runtime·unlock(p); | 1801 runtime·unlock(p); |
394 } | 1802 } |
395 | 1803 |
396 // Get g from runnable queue. | 1804 // Get g from runnable queue. |
397 static G* | 1805 static G* |
398 gget(P *p) | 1806 runqget(P *p) |
399 { | 1807 { |
400 G *gp; | 1808 G *gp; |
401 int32 t, h, s; | 1809 int32 t, h, s; |
402 | 1810 |
403 if(p->runqhead==p->runqtail) | 1811 if(p->runqhead==p->runqtail) |
404 return nil; | 1812 return nil; |
405 runtime·lock(p); | 1813 runtime·lock(p); |
406 h = p->runqhead; | 1814 h = p->runqhead; |
407 t = p->runqtail; | 1815 t = p->runqtail; |
408 s = p->runqsize; | 1816 s = p->runqsize; |
(...skipping 25 matching lines...) Expand all Loading... |
434 { | 1842 { |
435 M *mp; | 1843 M *mp; |
436 | 1844 |
437 if((mp = runtime·sched.mhead) != nil){ | 1845 if((mp = runtime·sched.mhead) != nil){ |
438 runtime·sched.mhead = mp->schedlink; | 1846 runtime·sched.mhead = mp->schedlink; |
439 runtime·sched.mwait--; | 1847 runtime·sched.mwait--; |
440 } | 1848 } |
441 return mp; | 1849 return mp; |
442 } | 1850 } |
443 | 1851 |
444 // Mark g ready to run. | 1852 // Put on gfree list. |
445 void | |
446 runtime·ready(G *gp) | |
447 { | |
448 if(gp->m) | |
449 runtime·throw("bad g->m in ready"); | |
450 | |
451 // Mark runnable. | |
452 if(gp->status == Grunnable || gp->status == Grunning) { | |
453 runtime·printf("goroutine %p has status %d\n", gp, gp->status); | |
454 runtime·throw("bad g->status in ready"); | |
455 } | |
456 gp->status = Grunnable; | |
457 gput(m->p, gp); | |
458 //!!! check pidle | |
459 } | |
460 | |
461 int32 | |
462 runtime·gcprocs(void) | |
463 { | |
464 int32 n; | |
465 | |
466 runtime·lock(&runtime·sched); | |
467 // Figure out how many CPUs to use during GC. | |
468 // Limited by gomaxprocs, number of actual CPUs, and MaxGcproc. | |
469 n = runtime·gomaxprocs; | |
470 if(n > runtime·ncpu) | |
471 n = runtime·ncpu; | |
472 if(n > MaxGcproc) | |
473 n = MaxGcproc; | |
474 if(n > runtime·sched.mwait+1) // one M is currently running | |
475 n = runtime·sched.mwait+1; | |
476 runtime·unlock(&runtime·sched); | |
477 return n; | |
478 } | |
479 | |
480 void | |
481 runtime·helpgc(int32 nproc) | |
482 { | |
483 M *mp; | |
484 int32 n, pos; | |
485 | |
486 //!!! goidle might just take one idle m | |
487 LOG("%d: helpgc(%d)\n", m->id, nproc); | |
488 runtime·lock(&runtime·sched); | |
489 pos = 0; | |
490 for(n = 1; n < nproc; n++) { // one M is currently running | |
491 mp = mget(); | |
492 if(mp == nil) | |
493 runtime·throw("runtime·gcprocs inconsistency"); | |
494 mp->helpgc = 1; | |
495 if(runtime·allp[pos]->mcache == m->mcache) | |
496 pos++; | |
497 mp->mcache = runtime·allp[pos]->mcache; | |
498 mp->stackalloc = runtime·allp[pos]->stackalloc; | |
499 pos++; | |
500 LOG("%d: helpgc wake %d\n", m->id, mp->id); | |
501 runtime·notewakeup(&mp->park); | |
502 } | |
503 runtime·unlock(&runtime·sched); | |
504 } | |
505 | |
506 void | |
507 runtime·stoptheworld(void) | |
508 { | |
509 int32 acquired, i; | |
510 uint32 s; | |
511 P *p; | |
512 | |
513 LOG("%d: stoptheworld\n", m->id); | |
514 runtime·gcwaiting = 1; //@ atomic | |
515 acquired = 1; | |
516 m->p->status = Plocked; | |
517 while(acquired != runtime·gomaxprocs) { | |
518 LOG(" idle=%p\n", runtime·sched.pidle); | |
519 for(i=0; i<runtime·gomaxprocs; i++) | |
520 LOG(" %d status=%d\n", i, runtime·allp[i]->status); | |
521 for(i=0; i<runtime·gomaxprocs; i++) { | |
522 s = runtime·allp[i]->status; | |
523 if(s == Psyscall && runtime·cas(&runtime·allp[i]->status
, s, Plocked)) { | |
524 LOG(" acquired syscall %d\n", i); | |
525 acquired++; | |
526 } | |
527 } | |
528 runtime·lock(&runtime·sched); | |
529 while (runtime·sched.pidle != nil) { | |
530 p = runtime·sched.pidle; | |
531 p->status = Plocked; | |
532 runtime·sched.pidle = p->link; | |
533 LOG(" acquired idle\n"); | |
534 acquired++;············· | |
535 } | |
536 runtime·unlock(&runtime·sched); | |
537 //!!! replace with blocking | |
538 if(acquired != runtime·gomaxprocs) { | |
539 //for(i=0; i<runtime·gomaxprocs; i++) | |
540 // runtime·printf("p%d %d (%d-%d)\n", i, runtime·al
lp[i]->status, acquired, runtime·gomaxprocs); | |
541 runtime·usleep(1); | |
542 } | |
543 } | |
544 LOG("%d: stoptheworld stopped\n", m->id); | |
545 } | |
546 | |
547 void | |
548 runtime·starttheworld(void) | |
549 { | |
550 LOG("%d: starttheworld\n", m->id); | |
551 runtime·gcwaiting = 0; | |
552 if(newprocs) { | |
553 procresize(newprocs); | |
554 newprocs = 0; | |
555 } else { | |
556 procresize(runtime·gomaxprocs); | |
557 } | |
558 runtime·gosched(); | |
559 } | |
560 | |
561 // Called to start an M. | |
562 void | |
563 runtime·mstart(void) | |
564 { | |
565 // It is used by windows-386 only. Unfortunately, seh needs | |
566 // to be located on os stack, and mstart runs on os stack | |
567 // for both m0 and m. | |
568 SEH seh; | |
569 P *p; | |
570 | |
571 LOG("%d: mstart m=%p\n", m->id, m); | |
572 if(g != m->g0) | |
573 runtime·throw("bad runtime·mstart"); | |
574 | |
575 // Record top of stack for use by mcall. | |
576 // Once we call schedule we're never coming back, | |
577 // so other calls can reuse this stack space. | |
578 runtime·gosave(&m->g0->sched); | |
579 m->g0->sched.pc = (void*)-1; // make sure it is never used | |
580 m->seh = &seh; | |
581 runtime·asminit(); | |
582 runtime·minit(); | |
583 | |
584 // Install signal handlers; after minit so that minit can | |
585 // prepare the thread to be able to handle the signals. | |
586 if(m == &runtime·m0) | |
587 runtime·initsig(); | |
588 | |
589 if(m->helpgc) { | |
590 LOG("%d: mstart helpgc\n", m->id); | |
591 m->helpgc = 0; | |
592 m->mcache = m->p->mcache; | |
593 m->stackalloc = m->p->stackalloc; | |
594 runtime·gchelper(); | |
595 m->mcache = nil; | |
596 m->stackalloc = nil; | |
597 m->p = nil; | |
598 LOG("%d: gchelper end\n", m->id); | |
599 mstop(); | |
600 } else if(m != &runtime·m0) { | |
601 p = m->p; | |
602 m->p = nil; | |
603 entergo(m, p); | |
604 } | |
605 LOG("%d: calling schedule\n", m->id); | |
606 schedule(); | |
607 | |
608 // TODO(brainman): This point is never reached, because scheduler | |
609 // does not release os threads at the moment. But once this path | |
610 // is enabled, we must remove our seh here. | |
611 } | |
612 | |
613 // When running with cgo, we call libcgo_thread_start | |
614 // to start threads for us so that we can play nicely with | |
615 // foreign code. | |
616 void (*libcgo_thread_start)(void*); | |
617 | |
618 typedef struct CgoThreadStart CgoThreadStart; | |
619 struct CgoThreadStart | |
620 { | |
621 M *m; | |
622 G *g; | |
623 void (*fn)(void); | |
624 }; | |
625 | |
626 static void | |
627 initgstack(G *newg, byte *stk, int32 stacksize) | |
628 { | |
629 newg->stack0 = (uintptr)stk; | |
630 newg->stackguard = (uintptr)stk + StackGuard; | |
631 newg->stackbase = (uintptr)stk + stacksize - sizeof(Stktop); | |
632 runtime·memclr((byte*)newg->stackbase, sizeof(Stktop)); | |
633 } | |
634 | |
635 // Create a new m. It will start off with a call to runtime·mstart. | |
636 static void | |
637 newm(P *p, bool helpgc) | |
638 { | |
639 M *mp; | |
640 int32 addmem, stkoff; | |
641 | |
642 LOG("%d: newm\n", m->id); | |
643 m->schedstats.nm++; | |
644 addmem = 0; | |
645 if(runtime·gsignalstk) | |
646 addmem = sizeof(G) + runtime·gsignalstk; | |
647 stkoff = sizeof(M) + addmem; | |
648 if(!runtime·iscgo && !Windows) | |
649 addmem += StackSystem + 8192; | |
650 //!!! all that is now non-GC, can it break something? | |
651 mp = runtime·SysAlloc(sizeof(M) + addmem); | |
652 mcommoninit(mp); | |
653 mp->g0 = &mp->g0buf; | |
654 mp->p = p; | |
655 mp->helpgc = helpgc; | |
656 if(runtime·gsignalstk) { | |
657 mp->gsignal = (G*)(mp+1); | |
658 initgstack(mp->gsignal, (byte*)(mp->gsignal+1), runtime·gsignals
tk); | |
659 } | |
660 | |
661 if(runtime·iscgo) { | |
662 CgoThreadStart ts; | |
663 | |
664 if(libcgo_thread_start == nil) | |
665 runtime·throw("libcgo_thread_start missing"); | |
666 // pthread_create will make us a stack. | |
667 ts.m = mp; | |
668 ts.g = mp->g0; | |
669 ts.fn = runtime·mstart; | |
670 runtime·asmcgocall(libcgo_thread_start, &ts); | |
671 } else { | |
672 // windows will layout sched stack on os stack | |
673 if(!Windows) | |
674 initgstack(mp->g0, (byte*)mp+stkoff, StackSystem + 8192)
; | |
675 runtime·newosproc(mp, mp->g0, (byte*)mp->g0->stackbase, runtime·
mstart); | |
676 } | |
677 } | |
678 | |
679 static void | |
680 mstop(void) | |
681 { | |
682 LOG("%d: mstop\n", m->id); | |
683 if(m->p != nil) | |
684 runtime·throw("mstop: p != nil"); | |
685 retry: | |
686 runtime·noteclear(&m->park); | |
687 runtime·lock(&runtime·sched); | |
688 mput(m); | |
689 runtime·unlock(&runtime·sched); | |
690 runtime·notesleep(&m->park); | |
691 if(m->helpgc) { | |
692 LOG("%d: gchelper\n", m->id); | |
693 m->helpgc = 0; | |
694 runtime·gchelper(); | |
695 m->mcache = nil; | |
696 m->stackalloc = nil; | |
697 LOG("%d: gchelper end\n", m->id); | |
698 goto retry; | |
699 } | |
700 LOG("%d: mstop wake\n", m->id); | |
701 if(m->p == nil) | |
702 runtime·throw("mstop: p == nil"); | |
703 } | |
704 | |
705 // One round of scheduler: find a goroutine and run it. | |
706 // The argument is the goroutine that was running before | |
707 // schedule was called, or nil if this is the first call. | |
708 // Never returns. | |
709 static void | |
710 schedule(void) | |
711 { | |
712 int32 hz, i, try; | |
713 G *gp; | |
714 P *p; | |
715 | |
716 LOG("%d: schedule p=%p\n", m->id, m->p); | |
717 USED(&gp); | |
718 if(m->locks != 0) | |
719 runtime·throw("schedule holding locks"); | |
720 if(gp == m->g0) | |
721 runtime·throw("schedule of g0"); | |
722 | |
723 top: | |
724 if(runtime·gcwaiting) { | |
725 leavego(m, Pidle); | |
726 mstop(); | |
727 goto top; | |
728 } | |
729 | |
730 gp = gget(m->p); | |
731 if(gp == nil) { | |
732 //!!! random stealing | |
733 for(try=0; try<1; try++) { | |
734 for(i=0; i<runtime·gomaxprocs; i++) { | |
735 p = runtime·allp[i]; | |
736 if(p == m->p) | |
737 gp = gget(p); | |
738 else | |
739 gp = runqsteal(m->p, p); | |
740 if(gp) | |
741 break; | |
742 } | |
743 if(gp) | |
744 break; | |
745 //runtime·usleep(20); | |
746 } | |
747 if(gp == nil) { | |
748 leavego(m, Pidle); | |
749 for(i=0; i<runtime·gomaxprocs; i++) { | |
750 p = runtime·allp[i]; | |
751 if(p && p->runqhead != p->runqtail) { | |
752 runtime·lock(&runtime·sched); | |
753 p = runtime·sched.pidle; | |
754 if(p) | |
755 runtime·sched.pidle = p->link; | |
756 runtime·unlock(&runtime·sched); | |
757 if(p) { | |
758 p->status = Pbusy; | |
759 entergo(m, p); | |
760 goto top; | |
761 } | |
762 break; | |
763 } | |
764 } | |
765 mstop(); | |
766 goto top; | |
767 } | |
768 } | |
769 | |
770 LOG("%d: start running goroutine %p\n", m->id, gp); | |
771 m->p->tick++; | |
772 gp->status = Grunning; | |
773 m->curg = gp; | |
774 gp->m = m; | |
775 | |
776 // Check whether the profiler needs to be turned on or off. | |
777 hz = runtime·sched.profilehz; | |
778 if(m->profilehz != hz) | |
779 runtime·resetcpuprofiler(hz); | |
780 | |
781 if(gp->sched.pc == (byte*)runtime·goexit) { // kickoff | |
782 runtime·gogocall(&gp->sched, (void(*)(void))gp->entry); | |
783 } | |
784 runtime·gogo(&gp->sched, 0); | |
785 } | |
786 | |
787 static void | |
788 park0(G *gp) | |
789 { | |
790 USED(&gp); | |
791 if(m->waitunlockf) { | |
792 m->waitunlockf(m->waitlock); | |
793 m->waitunlockf = nil; | |
794 } | |
795 schedule(); | |
796 } | |
797 | |
798 // Atomically parks g and unlocks the lock. | |
799 void | |
800 runtime·park(void *l, void(*unlockf)(void*), int8 *reason) | |
801 { | |
802 LOG("%d: park l=%p reason=%s\n", m->id, l, reason); | |
803 if(g == m->g0) | |
804 runtime·throw("park of g0"); | |
805 m->waitlock = l; | |
806 m->waitunlockf = unlockf; | |
807 g->status = Gwaiting; | |
808 g->waitreason = reason; | |
809 g->m = nil; | |
810 runtime·mcall(park0); | |
811 } | |
812 | |
813 static void | |
814 gosched0(G *gp) | |
815 { | |
816 LOG("%d: gosched0 gp=%p\n", m->id, gp); | |
817 gp->status = Grunnable; | |
818 gp->m = nil; | |
819 gput(m->p, gp); | |
820 schedule(); | |
821 } | |
822 | |
823 void | |
824 runtime·gosched(void) | |
825 { | |
826 if(m->p->status != Pbusy) | |
827 runtime·throw("m->p->status != Pbusy"); | |
828 runtime·mcall(gosched0); | |
829 } | |
830 | |
831 // On g0. | |
832 static void | |
833 goexit0(G *gp) | |
834 { | |
835 gp->status = Gdead; | |
836 gp->m = nil; | |
837 if(gp->lockedm) { | |
838 gp->lockedm = nil; | |
839 m->lockedg = nil; | |
840 } | |
841 runtime·unwindstack(gp, nil); | |
842 gfput(m->p, gp); | |
843 m->schedstats.gend++; | |
844 m->schedstats.gfput++; | |
845 schedule(); | |
846 } | |
847 | |
848 void | |
849 runtime·goexit(void) | |
850 { | |
851 runtime·mcall(goexit0); | |
852 } | |
853 | |
854 // The goroutine g is about to enter a system call. | |
855 // Record that it's not using the cpu anymore. | |
856 // This is called only from the go syscall library and cgocall, | |
857 // not from the low-level system calls used by the runtime. | |
858 // | |
859 // Entersyscall cannot split the stack: the runtime·gosave must | |
860 // make g->sched refer to the caller's stack segment, because | |
861 // entersyscall is going to return immediately after. | |
862 #pragma textflag 7 | |
863 void | |
864 runtime·entersyscall(void) | |
865 { | |
866 LOG("%d: entersyscall g=%p p=%p\n", m->id, g, m->p); | |
867 //m->sysenterticks = runtime·cputicks(); | |
868 if(m->profilehz > 0) | |
869 runtime·setprof(false); | |
870 | |
871 // Leave SP around for gc and traceback. | |
872 runtime·gosave(&g->sched); | |
873 g->gcsp = g->sched.sp; | |
874 g->gcstack = g->stackbase; | |
875 g->gcguard = g->stackguard; | |
876 g->status = Gsyscall; | |
877 if(g->gcsp < g->gcguard-StackGuard || g->gcstack < g->gcsp) { | |
878 // runtime·printf("entersyscall inconsistent %p [%p,%p]\n", | |
879 // g->gcsp, g->gcguard-StackGuard, g->gcstack); | |
880 runtime·throw("entersyscall"); | |
881 } | |
882 | |
883 m->mcache = nil; | |
884 m->stackalloc = nil; | |
885 m->p->tick++; | |
886 m->p->m = nil; | |
887 m->p->status = Psyscall; //@@@ store-release | |
888 } | |
889 | |
890 static void | |
891 exitsyscall0(G *gp) | |
892 { | |
893 P *p; | |
894 | |
895 LOG("%d: exitsyscall0\n", m->id); | |
896 gp->status = Grunnable; | |
897 gp->m = nil; | |
898 //!!! anything better than put to allp[0]? | |
899 gput(runtime·allp[0], gp); | |
900 runtime·lock(&runtime·sched); | |
901 p = runtime·sched.pidle; | |
902 if(p) | |
903 runtime·sched.pidle = p->link; | |
904 runtime·unlock(&runtime·sched); | |
905 if(p) { | |
906 p->status = Pbusy; | |
907 entergo(m, p); | |
908 schedule(); | |
909 } | |
910 mstop(); | |
911 schedule(); | |
912 } | |
913 | |
914 // The goroutine g exited its system call. | |
915 // Arrange for it to run on a cpu again. | |
916 // This is called only from the go syscall library, not | |
917 // from the low-level system calls used by the runtime. | |
918 void | |
919 runtime·exitsyscall(void) | |
920 { | |
921 uint32 s; | |
922 int32 other; | |
923 | |
924 other = 0; | |
925 LOG("%d: exitsyscall g=%p\n", m->id, g); | |
926 | |
927 //runtime·printf("syscall=%d\n", (int32)(runtime·cputicks() - m->sysenterticks))
; | |
928 | |
929 s = m->p->status; | |
930 if(s==Psyscall && runtime·cas(&m->p->status, s, Pbusy)) { | |
931 if(other == 0) | |
932 m->schedstats.sysexitfast++; | |
933 else | |
934 m->schedstats.sysexitmed++; | |
935 LOG("%d: exitsyscall fast\n", m->id); | |
936 // There's a cpu for us, so we can run. | |
937 m->mcache = m->p->mcache; | |
938 m->stackalloc = m->p->stackalloc; | |
939 m->p->m = m; | |
940 g->status = Grunning; | |
941 // Garbage collector isn't running (since we are), | |
942 // so okay to clear gcstack. | |
943 g->gcstack = (uintptr)nil; | |
944 | |
945 // Check whether the profiler needs to be turned on or off. | |
946 if(m->profilehz > 0) | |
947 runtime·setprof(true); | |
948 return; | |
949 } | |
950 | |
951 //!!! try to get pidle | |
952 /* | |
953 P *idlep, *sysp, *p; | |
954 int32 i; | |
955 | |
956 idlep = nil; | |
957 sysp = nil; | |
958 for(i=0; i<runtime·gomaxprocs; i++) { | |
959 p = runtime·allp[i]; | |
960 if(p == nil) | |
961 continue; | |
962 if(p->status == Pidle) { | |
963 idlep = p; | |
964 break; | |
965 } else if(p->status == Psyscall) | |
966 sysp = p; | |
967 } | |
968 if(idlep == nil) | |
969 idlep = sysp; | |
970 if(idlep) { | |
971 other = 1; | |
972 goto retry; | |
973 } | |
974 */ | |
975 | |
976 m->schedstats.sysexitslow++; | |
977 LOG("%d: exitsyscall slow p->status=%d\n", m->id, s); | |
978 if(m->profilehz > 0) | |
979 runtime·setprof(true); | |
980 | |
981 m->p = nil; | |
982 runtime·mcall(exitsyscall0); | |
983 | |
984 // Gosched returned, so we're allowed to run now. | |
985 // Delete the gcstack information that we left for | |
986 // the garbage collector during the system call. | |
987 // Must wait until now because until gosched returns | |
988 // we don't know for sure that the garbage collector | |
989 // is not running. | |
990 g->gcstack = (uintptr)nil; | |
991 } | |
992 | |
993 // Called from runtime·lessstack when returning from a function which | |
994 // allocated a new stack segment. The function's return value is in | |
995 // m->cret. | |
996 void | |
997 runtime·oldstack(void) | |
998 { | |
999 Stktop *top, old; | |
1000 uint32 argsize; | |
1001 uintptr cret; | |
1002 byte *sp; | |
1003 G *g1; | |
1004 | |
1005 //printf("oldstack m->cret=%p\n", m->cret); | |
1006 | |
1007 g1 = m->curg; | |
1008 top = (Stktop*)g1->stackbase; | |
1009 sp = (byte*)top; | |
1010 old = *top; | |
1011 argsize = old.argsize; | |
1012 if(argsize > 0) { | |
1013 sp -= argsize; | |
1014 runtime·memmove(top->argp, sp, argsize); | |
1015 } | |
1016 | |
1017 if(old.free != 0) | |
1018 runtime·stackfree((byte*)g1->stackguard - StackGuard, old.free); | |
1019 g1->stackbase = (uintptr)old.stackbase; | |
1020 g1->stackguard = (uintptr)old.stackguard; | |
1021 | |
1022 cret = m->cret; | |
1023 m->cret = 0; // drop reference | |
1024 runtime·gogo(&old.gobuf, cret); | |
1025 } | |
1026 | |
1027 // Called from reflect·call or from runtime·morestack when a new | |
1028 // stack segment is needed. Allocate a new stack big enough for | |
1029 // m->moreframesize bytes, copy m->moreargsize bytes to the new frame, | |
1030 // and then act as though runtime·lessstack called the function at | |
1031 // m->morepc. | |
1032 void | |
1033 runtime·newstack(void) | |
1034 { | |
1035 int32 framesize, argsize; | |
1036 Stktop *top; | |
1037 byte *stk, *sp; | |
1038 G *g1; | |
1039 Gobuf label; | |
1040 bool reflectcall; | |
1041 uintptr free; | |
1042 | |
1043 framesize = m->moreframesize; | |
1044 argsize = m->moreargsize; | |
1045 g1 = m->curg; | |
1046 | |
1047 if(m->morebuf.sp < g1->stackguard - StackGuard) { | |
1048 runtime·printf("runtime: split stack overflow: %p < %p\n", m->mo
rebuf.sp, g1->stackguard - StackGuard); | |
1049 runtime·throw("runtime: split stack overflow"); | |
1050 } | |
1051 if(argsize % sizeof(uintptr) != 0) { | |
1052 runtime·printf("runtime: stack split with misaligned argsize %d\
n", argsize); | |
1053 runtime·throw("runtime: stack split argsize"); | |
1054 } | |
1055 | |
1056 reflectcall = framesize==1; | |
1057 if(reflectcall) | |
1058 framesize = 0; | |
1059 | |
1060 if(reflectcall && m->morebuf.sp - sizeof(Stktop) - argsize - 32 > g1->st
ackguard) { | |
1061 // special case: called from reflect.call (framesize==1) | |
1062 // to call code with an arbitrary argument size, | |
1063 // and we have enough space on the current stack. | |
1064 // the new Stktop* is necessary to unwind, but | |
1065 // we don't need to create a new segment. | |
1066 top = (Stktop*)(m->morebuf.sp - sizeof(*top)); | |
1067 stk = (byte*)g1->stackguard - StackGuard; | |
1068 free = 0; | |
1069 } else { | |
1070 // allocate new segment. | |
1071 framesize += argsize; | |
1072 framesize += StackExtra; // room for more functions, Stkt
op. | |
1073 if(framesize < StackMin) | |
1074 framesize = StackMin; | |
1075 framesize += StackSystem; | |
1076 stk = runtime·stackalloc(framesize); | |
1077 top = (Stktop*)(stk+framesize-sizeof(*top)); | |
1078 free = framesize; | |
1079 } | |
1080 | |
1081 //runtime·printf("newstack framesize=%d argsize=%d morepc=%p moreargp=%p gobuf=%
p, %p top=%p old=%p\n", | |
1082 //framesize, argsize, m->morepc, m->moreargp, m->morebuf.pc, m->morebuf.sp, top,
g1->stackbase); | |
1083 | |
1084 top->stackbase = (byte*)g1->stackbase; | |
1085 top->stackguard = (byte*)g1->stackguard; | |
1086 top->gobuf = m->morebuf; | |
1087 top->argp = m->moreargp; | |
1088 top->argsize = argsize; | |
1089 top->free = free; | |
1090 m->moreargp = nil; | |
1091 m->morebuf.pc = nil; | |
1092 m->morebuf.sp = (uintptr)nil; | |
1093 | |
1094 // copy flag from panic | |
1095 top->panic = g1->ispanic; | |
1096 g1->ispanic = false; | |
1097 | |
1098 g1->stackbase = (uintptr)top; | |
1099 g1->stackguard = (uintptr)stk + StackGuard; | |
1100 | |
1101 sp = (byte*)top; | |
1102 if(argsize > 0) { | |
1103 sp -= argsize; | |
1104 runtime·memmove(sp, top->argp, argsize); | |
1105 } | |
1106 if(thechar == '5') { | |
1107 // caller would have saved its LR below args. | |
1108 sp -= sizeof(void*); | |
1109 *(void**)sp = nil; | |
1110 } | |
1111 | |
1112 // Continue as if lessstack had just called m->morepc | |
1113 // (the PC that decided to grow the stack). | |
1114 label.sp = (uintptr)sp; | |
1115 label.pc = (byte*)runtime·lessstack; | |
1116 label.g = m->curg; | |
1117 runtime·gogocall(&label, m->morepc); | |
1118 | |
1119 *(int32*)345 = 123; // never return | |
1120 } | |
1121 | |
1122 // Hook used by runtime·malg to call runtime·stackalloc on the | |
1123 // scheduler stack. This exists because runtime·stackalloc insists | |
1124 // on being called on the scheduler stack, to avoid trying to grow | |
1125 // the stack while allocating a new stack segment. | |
1126 static void | |
1127 mstackalloc(G *gp) | |
1128 { | |
1129 gp->param = runtime·stackalloc((uintptr)gp->param); | |
1130 runtime·gogo(&gp->sched, 0); | |
1131 } | |
1132 | |
1133 // Allocate a new g, with a stack big enough for stacksize bytes. | |
1134 G* | |
1135 runtime·malg(int32 stacksize) | |
1136 { | |
1137 G *newg; | |
1138 byte *stk; | |
1139 | |
1140 if(StackTop < sizeof(Stktop)) { | |
1141 runtime·printf("runtime: SizeofStktop=%d, should be >=%d\n", (in
t32)StackTop, (int32)sizeof(Stktop)); | |
1142 runtime·throw("runtime: bad stack.h"); | |
1143 } | |
1144 | |
1145 newg = runtime·malloc(sizeof(G)); | |
1146 if(stacksize >= 0) { | |
1147 if(g == m->g0) { | |
1148 // running on scheduler stack already. | |
1149 stk = runtime·stackalloc(StackSystem + stacksize); | |
1150 } else { | |
1151 // have to call stackalloc on scheduler stack. | |
1152 g->param = (void*)(StackSystem + stacksize); | |
1153 runtime·mcall(mstackalloc); | |
1154 stk = g->param; | |
1155 g->param = nil; | |
1156 } | |
1157 newg->stack0 = (uintptr)stk; | |
1158 newg->stackguard = (uintptr)stk + StackGuard; | |
1159 newg->stackbase = (uintptr)stk + StackSystem + stacksize - sizeo
f(Stktop); | |
1160 runtime·memclr((byte*)newg->stackbase, sizeof(Stktop)); | |
1161 } | |
1162 return newg; | |
1163 } | |
1164 | |
1165 // Create a new g running fn with siz bytes of arguments. | |
1166 // Put it on the queue of g's waiting to run. | |
1167 // The compiler turns a go statement into a call to this. | |
1168 // Cannot split the stack because it assumes that the arguments | |
1169 // are available sequentially after &fn; they would not be | |
1170 // copied if a stack split occurred. It's OK for this to call | |
1171 // functions that split the stack. | |
1172 #pragma textflag 7 | |
1173 void | |
1174 runtime·newproc(int32 siz, byte* fn, ...) | |
1175 { | |
1176 byte *argp; | |
1177 | |
1178 if(thechar == '5') | |
1179 argp = (byte*)(&fn+2); // skip caller's saved LR | |
1180 else | |
1181 argp = (byte*)(&fn+1); | |
1182 runtime·newproc1(fn, argp, siz, 0, runtime·getcallerpc(&siz)); | |
1183 } | |
1184 | |
1185 // Create a new g running fn with narg bytes of arguments starting | |
1186 // at argp and returning nret bytes of results. callerpc is the | |
1187 // address of the go statement that created this. The new g is put | |
1188 // on the queue of g's waiting to run. | |
1189 G* | |
1190 runtime·newproc1(byte *fn, byte *argp, int32 narg, int32 nret, void *callerpc) | |
1191 { | |
1192 byte *sp; | |
1193 G *newg; | |
1194 int32 siz; | |
1195 | |
1196 //printf("newproc1 %p %p narg=%d nret=%d\n", fn, argp, narg, nret); | |
1197 m->schedstats.gstart++; | |
1198 siz = narg + nret; | |
1199 siz = (siz+7) & ~7; | |
1200 | |
1201 // We could instead create a secondary stack frame | |
1202 // and make it look like goexit was on the original but | |
1203 // the call to the actual goroutine function was split. | |
1204 // Not worth it: this is almost always an error. | |
1205 if(siz > StackMin - 1024) | |
1206 runtime·throw("runtime.newproc: function arguments too large for
new goroutine"); | |
1207 | |
1208 if((newg = gfget(m->p)) != nil) { | |
1209 m->schedstats.gfget++; | |
1210 if(newg->stackguard - StackGuard != newg->stack0) | |
1211 runtime·throw("invalid stack in newg"); | |
1212 } else { | |
1213 m->schedstats.galloc++; | |
1214 LOG("%d: MALG %p\n", m->id, fn); | |
1215 newg = runtime·malg(StackMin); | |
1216 newg->alllink = m->p->allg; | |
1217 m->p->allg = newg; | |
1218 } | |
1219 | |
1220 sp = (byte*)newg->stackbase; | |
1221 sp -= siz; | |
1222 runtime·memmove(sp, argp, narg); | |
1223 if(thechar == '5') { | |
1224 // caller's LR | |
1225 sp -= sizeof(void*); | |
1226 *(void**)sp = nil; | |
1227 } | |
1228 | |
1229 LOG("%d: newproc %p\n", m->id, newg); | |
1230 newg->sched.sp = (uintptr)sp; | |
1231 newg->sched.pc = (byte*)runtime·goexit; | |
1232 newg->sched.g = newg; | |
1233 newg->entry = fn; | |
1234 newg->gopc = (uintptr)callerpc; | |
1235 newg->status = Grunnable; | |
1236 gput(m->p, newg); | |
1237 if(runtime·sched.pidle && fn != (byte*)main·main) { | |
1238 runtime·lock(&runtime·sched); | |
1239 if(runtime·sched.pidle) { | |
1240 P *p; | |
1241 p = runtime·sched.pidle; | |
1242 runtime·sched.pidle = p->link; | |
1243 M *mp; | |
1244 mp = runtime·sched.mhead; | |
1245 if(mp) { | |
1246 runtime·sched.mhead = mp->schedlink; | |
1247 runtime·sched.mcount--; | |
1248 } | |
1249 runtime·unlock(&runtime·sched); | |
1250 p->status = Pbusy; | |
1251 if(mp) { | |
1252 entergo(mp, p); | |
1253 runtime·notewakeup(&mp->park); | |
1254 } else { | |
1255 newm(p, false); | |
1256 } | |
1257 } else | |
1258 runtime·unlock(&runtime·sched); | |
1259 } | |
1260 return newg; | |
1261 } | |
1262 | |
1263 // Put on gfree list. Sched must be locked. | |
1264 static void | 1853 static void |
1265 gfput(P *p, G *gp) | 1854 gfput(P *p, G *gp) |
1266 { | 1855 { |
1267 if(gp->stackguard - StackGuard != gp->stack0) | 1856 if(gp->stackguard - StackGuard != gp->stack0) |
1268 runtime·throw("invalid stack in gfput"); | 1857 runtime·throw("invalid stack in gfput"); |
1269 gp->schedlink = p->gfree; | 1858 gp->schedlink = p->gfree; |
1270 p->gfree = gp; | 1859 p->gfree = gp; |
1271 p->gfreecnt++; | 1860 p->gfreecnt++; |
1272 if(p->gfreecnt >= 64) { | 1861 if(p->gfreecnt >= 64) { |
1273 » » runtime·lock(&runtime·sched); | 1862 » » runtime·lock(&runtime·sched.gflock); |
1274 while(p->gfreecnt >= 32) { | 1863 while(p->gfreecnt >= 32) { |
1275 p->gfreecnt--; | 1864 p->gfreecnt--; |
1276 gp = p->gfree; | 1865 gp = p->gfree; |
1277 p->gfree = gp->schedlink; | 1866 p->gfree = gp->schedlink; |
1278 gp->schedlink = runtime·sched.gfree; | 1867 gp->schedlink = runtime·sched.gfree; |
1279 runtime·sched.gfree = gp; | 1868 runtime·sched.gfree = gp; |
1280 } | 1869 } |
1281 » » runtime·unlock(&runtime·sched); | 1870 » » runtime·unlock(&runtime·sched.gflock); |
1282 » } | 1871 » } |
1283 } | 1872 } |
1284 | 1873 |
1285 // Get from gfree list. Sched must be locked. | 1874 // Get from gfree list. |
1286 static G* | 1875 static G* |
1287 gfget(P *p) | 1876 gfget(P *p) |
1288 { | 1877 { |
1289 G *gp; | 1878 G *gp; |
1290 | 1879 |
1291 retry: | 1880 retry: |
1292 gp = p->gfree; | 1881 gp = p->gfree; |
1293 if(gp == nil && runtime·sched.gfree) { | 1882 if(gp == nil && runtime·sched.gfree) { |
1294 » » runtime·lock(&runtime·sched); | 1883 » » runtime·lock(&runtime·sched.gflock); |
1295 while(p->gfreecnt < 32 && runtime·sched.gfree) { | 1884 while(p->gfreecnt < 32 && runtime·sched.gfree) { |
1296 p->gfreecnt++; | 1885 p->gfreecnt++; |
1297 gp = runtime·sched.gfree; | 1886 gp = runtime·sched.gfree; |
1298 runtime·sched.gfree = gp->schedlink; | 1887 runtime·sched.gfree = gp->schedlink; |
1299 gp->schedlink = p->gfree; | 1888 gp->schedlink = p->gfree; |
1300 p->gfree = gp; | 1889 p->gfree = gp; |
1301 } | 1890 } |
1302 » » runtime·unlock(&runtime·sched); | 1891 » » runtime·unlock(&runtime·sched.gflock); |
1303 goto retry; | 1892 goto retry; |
1304 } | 1893 } |
1305 if(gp) { | 1894 if(gp) { |
1306 p->gfree = gp->schedlink; | 1895 p->gfree = gp->schedlink; |
1307 p->gfreecnt--; | 1896 p->gfreecnt--; |
1308 } | 1897 } |
1309 return gp; | 1898 return gp; |
1310 } | 1899 } |
1311 | |
1312 void | |
1313 runtime·Breakpoint(void) | |
1314 { | |
1315 runtime·breakpoint(); | |
1316 } | |
1317 | |
1318 void | |
1319 runtime·Gosched(void) | |
1320 { | |
1321 runtime·gosched(); | |
1322 } | |
1323 | |
1324 // Implementation of runtime.GOMAXPROCS. | |
1325 // delete when scheduler is stronger | |
1326 int32 | |
1327 runtime·gomaxprocsfunc(int32 n) | |
1328 { | |
1329 int32 ret; | |
1330 | |
1331 LOG("%d: gomaxprocsfunc %d\n", m->id, n); | |
1332 if(n > maxgomaxprocs) | |
1333 n = maxgomaxprocs; | |
1334 runtime·lock(&runtime·sched); | |
1335 ret = runtime·gomaxprocs; | |
1336 if(n <= 0 || n == ret) { | |
1337 runtime·unlock(&runtime·sched); | |
1338 return ret; | |
1339 } | |
1340 runtime·unlock(&runtime·sched); | |
1341 | |
1342 runtime·semacquire(&runtime·worldsema); | |
1343 m->gcing = 1; | |
1344 runtime·stoptheworld(); | |
1345 newprocs = n; | |
1346 m->gcing = 0; | |
1347 runtime·semrelease(&runtime·worldsema); | |
1348 runtime·starttheworld(); | |
1349 | |
1350 return ret; | |
1351 } | |
1352 | |
1353 void | |
1354 runtime·LockOSThread(void) | |
1355 { | |
1356 //!!! implement me. | |
1357 /* | |
1358 if(m == &runtime·m0 && runtime·sched.init) { | |
1359 runtime·sched.lockmain = true; | |
1360 return; | |
1361 } | |
1362 m->lockedg = g; | |
1363 g->lockedm = m; | |
1364 */ | |
1365 } | |
1366 | |
1367 void | |
1368 runtime·UnlockOSThread(void) | |
1369 { | |
1370 /* | |
1371 if(m == &runtime·m0 && runtime·sched.init) { | |
1372 runtime·sched.lockmain = false; | |
1373 return; | |
1374 } | |
1375 m->lockedg = nil; | |
1376 g->lockedm = nil; | |
1377 */ | |
1378 } | |
1379 | |
1380 bool | |
1381 runtime·lockedOSThread(void) | |
1382 { | |
1383 return g->lockedm != nil && m->lockedg != nil; | |
1384 } | |
1385 | |
1386 // for testing of callbacks | |
1387 void | |
1388 runtime·golockedOSThread(bool ret) | |
1389 { | |
1390 ret = runtime·lockedOSThread(); | |
1391 FLUSH(&ret); | |
1392 } | |
1393 | |
1394 // for testing of wire, unwire | |
1395 void | |
1396 runtime·mid(uint32 ret) | |
1397 { | |
1398 ret = m->id; | |
1399 FLUSH(&ret); | |
1400 } | |
1401 | |
1402 void | |
1403 runtime·NumGoroutine(int32 ret) | |
1404 { | |
1405 //ret = runtime·sched.gcount; | |
1406 ret = 1; | |
1407 FLUSH(&ret); | |
1408 } | |
1409 | |
1410 int32 | |
1411 runtime·gcount(void) | |
1412 { | |
1413 //return runtime·sched.gcount; | |
1414 return 1; | |
1415 } | |
1416 | |
1417 int32 | |
1418 runtime·mcount(void) | |
1419 { | |
1420 return runtime·sched.mcount; | |
1421 } | |
1422 | |
1423 void | |
1424 runtime·badmcall(void) // called from assembly | |
1425 { | |
1426 runtime·throw("runtime: mcall called on m->g0 stack"); | |
1427 } | |
1428 | |
1429 void | |
1430 runtime·badmcall2(void) // called from assembly | |
1431 { | |
1432 runtime·throw("runtime: mcall function returned"); | |
1433 } | |
1434 | |
1435 static struct { | |
1436 Lock; | |
1437 void (*fn)(uintptr*, int32); | |
1438 int32 hz; | |
1439 uintptr pcbuf[100]; | |
1440 } prof; | |
1441 | |
1442 // Called if we receive a SIGPROF signal. | |
1443 void | |
1444 runtime·sigprof(uint8 *pc, uint8 *sp, uint8 *lr, G *gp) | |
1445 { | |
1446 int32 n; | |
1447 | |
1448 if(prof.fn == nil || prof.hz == 0) | |
1449 return; | |
1450 | |
1451 runtime·lock(&prof); | |
1452 if(prof.fn == nil) { | |
1453 runtime·unlock(&prof); | |
1454 return; | |
1455 } | |
1456 n = runtime·gentraceback(pc, sp, lr, gp, 0, prof.pcbuf, nelem(prof.pcbuf
)); | |
1457 if(n > 0) | |
1458 prof.fn(prof.pcbuf, n); | |
1459 runtime·unlock(&prof); | |
1460 } | |
1461 | |
1462 // Arrange to call fn with a traceback hz times a second. | |
1463 void | |
1464 runtime·setcpuprofilerate(void (*fn)(uintptr*, int32), int32 hz) | |
1465 { | |
1466 // Force sane arguments. | |
1467 if(hz < 0) | |
1468 hz = 0; | |
1469 if(hz == 0) | |
1470 fn = nil; | |
1471 if(fn == nil) | |
1472 hz = 0; | |
1473 | |
1474 // Stop profiler on this cpu so that it is safe to lock prof. | |
1475 // if a profiling signal came in while we had prof locked, | |
1476 // it would deadlock. | |
1477 runtime·resetcpuprofiler(0); | |
1478 | |
1479 runtime·lock(&prof); | |
1480 prof.fn = fn; | |
1481 prof.hz = hz; | |
1482 runtime·unlock(&prof); | |
1483 runtime·lock(&runtime·sched); | |
1484 runtime·sched.profilehz = hz; | |
1485 runtime·unlock(&runtime·sched); | |
1486 | |
1487 if(hz != 0) | |
1488 runtime·resetcpuprofiler(hz); | |
1489 } | |
1490 | |
1491 // Change number of processors. The world is stopped. | |
1492 static void | |
1493 procresize(int32 new) | |
1494 { | |
1495 int32 i, old; | |
1496 G *gp; | |
1497 P *p; | |
1498 | |
1499 runtime·lock(&runtime·sched); //!!! | |
1500 old = runtime·gomaxprocs; | |
1501 LOG("%d: procresize %d->%d\n", m->id, old, new); | |
1502 if(old < 0 || old > maxgomaxprocs || new <= 0 || new > maxgomaxprocs) | |
1503 runtime·throw("procresize: invalid arg"); | |
1504 if(old == new) { | |
1505 for(i=0; i<new; i++) { | |
1506 if(runtime·allp[i] == m->p) | |
1507 runtime·allp[i]->status = Pbusy; | |
1508 else { | |
1509 runtime·allp[i]->status = Pidle; | |
1510 runtime·allp[i]->link = runtime·sched.pidle; | |
1511 runtime·sched.pidle = runtime·allp[i]; | |
1512 } | |
1513 } | |
1514 runtime·unlock(&runtime·sched); | |
1515 return; | |
1516 } | |
1517 | |
1518 runtime·singleproc = new == 1; | |
1519 runtime·gomaxprocs = new; | |
1520 for(i=0; i<new; i++) { | |
1521 p = runtime·allp[i]; | |
1522 if(p == nil) { | |
1523 //!!! goidle can see status Pidle | |
1524 p = (P*)runtime·mallocgc(sizeof(runtime·allp[i][0]), 0,
0, 1); | |
1525 p->status = Plocked; | |
1526 runtime·allp[i] = p; //@@@ store-release | |
1527 } | |
1528 if(p->mcache == nil) { | |
1529 if(old==0 && i==0) | |
1530 p->mcache = m->mcache; | |
1531 else | |
1532 p->mcache = runtime·allocmcache(); | |
1533 } | |
1534 if(p->stackalloc == nil) { | |
1535 if(old==0 && i==0) | |
1536 p->stackalloc = m->stackalloc; | |
1537 else { | |
1538 p->stackalloc = runtime·malloc(sizeof(*p->stacka
lloc)); | |
1539 runtime·FixAlloc_Init(p->stackalloc, FixedStack,
runtime·SysAlloc, nil, nil); | |
1540 } | |
1541 } | |
1542 if(p->runq == nil) { | |
1543 p->runqsize = 128; | |
1544 p->runq = (G**)runtime·mallocgc(p->runqsize*sizeof(G*),
0, 0, 1); | |
1545 } | |
1546 } | |
1547 | |
1548 for(i=1; i<old; i++) { | |
1549 for(;;) { | |
1550 gp = gget(runtime·allp[i]); | |
1551 if(gp == nil) | |
1552 break; | |
1553 //TODO: spread more evenly. | |
1554 gput(runtime·allp[0], gp); | |
1555 } | |
1556 } | |
1557 | |
1558 for(i=new; i<old; i++) { | |
1559 runtime·freemcache(runtime·allp[i]->mcache); | |
1560 runtime·allp[i]->mcache = nil; | |
1561 runtime·allp[i]->status = Pdead; | |
1562 //TODO: free freeg | |
1563 //TODO: free stackalloc | |
1564 } | |
1565 | |
1566 if(m->p) | |
1567 m->p->m = nil; | |
1568 m->p = nil; | |
1569 m->mcache = nil; | |
1570 m->stackalloc = nil; | |
1571 runtime·allp[0]->m = nil; | |
1572 runtime·allp[0]->status = Pbusy; | |
1573 entergo(m, runtime·allp[0]); | |
1574 for(i=1; i<new; i++) { | |
1575 runtime·allp[i]->status = Pidle; //@@@ store-release | |
1576 runtime·allp[i]->link = runtime·sched.pidle; | |
1577 runtime·sched.pidle = runtime·allp[i]; | |
1578 } | |
1579 runtime·unlock(&runtime·sched); | |
1580 } | |
1581 | |
1582 static void | |
1583 entergo(M *mp, P *p) | |
1584 { | |
1585 LOG("%d: entergo m=%d p=%p p->m=%p, p->status=%d, p->mcache=%p\n", m->id
, mp->id, p, p->m, p->status, p->mcache); | |
1586 if(mp->p || mp->mcache) | |
1587 runtime·throw("entergo: already in go"); | |
1588 if(p->m || p->status != Pbusy) { | |
1589 runtime·printf("entergo: p->m=%p(%d) p->status=%d\n", p->m, p->m
? p->m->id : 0, p->status); | |
1590 runtime·throw("entergo: invalid p state"); | |
1591 } | |
1592 mp->mcache = p->mcache; | |
1593 mp->stackalloc = p->stackalloc; | |
1594 mp->p = p; | |
1595 p->m = mp; | |
1596 } | |
1597 | |
1598 static void | |
1599 leavego(M *mp, uint32 status) | |
1600 { | |
1601 P *p; | |
1602 | |
1603 LOG("%d: leavego %d\n", mp->id, status); | |
1604 // sched is locked | |
1605 if(mp->p == nil || mp->mcache == nil) | |
1606 runtime·throw("leavego: invalid arg"); | |
1607 p = mp->p; | |
1608 if(p->m != mp || p->mcache != mp->mcache || p->status != Pbusy) { | |
1609 runtime·printf("leavego: m=%p m->p=%p p->m=%p m->mcache=%p p->mc
ache=%p p->status=%d\n", | |
1610 mp, mp->p, p->m, m->mcache, p->mcache, p->status); | |
1611 runtime·throw("leavego: invalid p state"); | |
1612 } | |
1613 mp->p = nil; | |
1614 mp->mcache = nil; | |
1615 mp->stackalloc = nil; | |
1616 p->m = nil; | |
1617 p->status = status; //@@@ store-release | |
1618 | |
1619 runtime·lock(&runtime·sched); | |
1620 p->link = runtime·sched.pidle; | |
1621 runtime·sched.pidle = p; | |
1622 runtime·unlock(&runtime·sched); | |
1623 } | |
1624 | |
1625 static void | |
1626 retake(void) | |
1627 { | |
1628 uint32 i, s; | |
1629 P *p; | |
1630 M *mp; | |
1631 | |
1632 for(i=0; i<runtime·gomaxprocs; i++) { | |
1633 p = runtime·allp[i]; | |
1634 //!!! procresize may be in progress | |
1635 // do something if GC is in progress (help). | |
1636 if(p==nil) | |
1637 continue; | |
1638 s = p->status; | |
1639 if(s == Psyscall && runtime·cas(&p->status, s, Pbusy)) { | |
1640 LOG("retake %p(%d)\n", p, i); | |
1641 runtime·lock(&runtime·sched); | |
1642 mp = mget(); | |
1643 runtime·unlock(&runtime·sched); | |
1644 if(mp) { | |
1645 entergo(mp, p); | |
1646 runtime·notewakeup(&mp->park); | |
1647 } else { | |
1648 newm(p, false); | |
1649 } | |
1650 } | |
1651 } | |
1652 } | |
1653 | |
1654 #pragma textflag 7 | |
1655 static void | |
1656 sysmon(void) | |
1657 { | |
1658 G *gp; | |
1659 | |
1660 // This is a special dedicated thread. | |
1661 // It works w/o mcache nor stackalloc, it may work concurrently with GC. | |
1662 runtime·asminit(); | |
1663 runtime·minit(); | |
1664 LOG("sysmon check\n"); | |
1665 for(;;) { | |
1666 LOG("sysmon check\n"); | |
1667 gp = runtime·netwait(20); | |
1668 retake(); | |
1669 while(gp) { | |
1670 //!!! can't grow runq | |
1671 gp = gp->schedlink; | |
1672 } | |
1673 } | |
1674 } | |
LEFT | RIGHT |