Rietveld Code Review Tool
Help | Bug tracker | Discussion group | Source code | Sign in
(2403)

Delta Between Two Patch Sets: src/pkg/runtime/malloc.goc

Issue 6114046: code review 6114046: runtime, reflect, ld, gc: garbage collection precision ... (Closed)
Left Patch Set: diff -r ecab7a7e7c7e https://go.googlecode.com/hg/ Created 11 years, 2 months ago
Right Patch Set: diff -r d6e06d0f3c29 https://code.google.com/p/go/ Created 10 years, 10 months ago
Left:
Right:
Use n/p to move between diff chunks; N/P to move between comments. Please Sign in to add in-line comments.
Jump to:
Left: Side by side diff | Download
Right: Side by side diff | Download
« no previous file with change/comment | « no previous file | src/pkg/runtime/mgc0.c » ('j') | no next file with change/comment »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
LEFTRIGHT
1 // Copyright 2009 The Go Authors. All rights reserved. 1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style 2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file. 3 // license that can be found in the LICENSE file.
4 4
5 // See malloc.h for overview. 5 // See malloc.h for overview.
6 // 6 //
7 // TODO(rsc): double-check stats. 7 // TODO(rsc): double-check stats.
8 8
9 package runtime 9 package runtime
10 #include "runtime.h" 10 #include "runtime.h"
11 #include "arch_GOARCH.h" 11 #include "arch_GOARCH.h"
12 #include "stack.h"
13 #include "malloc.h" 12 #include "malloc.h"
14 #include "defs_GOOS_GOARCH.h"
15 #include "type.h" 13 #include "type.h"
16 #include "typekind.h" 14 #include "typekind.h"
17 #include "race.h" 15 #include "race.h"
18 16
19 #pragma dataflag 16 /* mark mheap as 'no pointers', hiding from garbage collecto r */ 17 MHeap *runtime·mheap;
20 MHeap runtime·mheap; 18
19 int32» runtime·checking;
21 20
22 extern MStats mstats; // defined in zruntime_def_$GOOS_$GOARCH.go 21 extern MStats mstats; // defined in zruntime_def_$GOOS_$GOARCH.go
23 22
24 extern volatile intgo runtime·MemProfileRate; 23 extern volatile intgo runtime·MemProfileRate;
25 24
26 // Allocate an object of at least size bytes. 25 // Allocate an object of at least size bytes.
27 // Small objects are allocated from the per-thread cache's free lists. 26 // Small objects are allocated from the per-thread cache's free lists.
28 // Large objects (> 32 kB) are allocated straight from the heap. 27 // Large objects (> 32 kB) are allocated straight from the heap.
29 void* 28 void*
30 runtime·mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed) 29 runtime·mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed)
31 { 30 {
32 int32 sizeclass; 31 int32 sizeclass;
33 intgo rate; 32 intgo rate;
34 MCache *c; 33 MCache *c;
35 uintptr npages; 34 uintptr npages;
36 MSpan *s; 35 MSpan *s;
37 void *v; 36 void *v;
38 37
39 » if(runtime·gcwaiting && g != m->g0 && m->locks == 0) 38 » if(runtime·gcwaiting && g != m->g0 && m->locks == 0 && dogc)
40 runtime·gosched(); 39 runtime·gosched();
41 if(m->mallocing) 40 if(m->mallocing)
42 runtime·throw("malloc/free - deadlock"); 41 runtime·throw("malloc/free - deadlock");
43 if(runtime·gcing)
44 runtime·throw("malloc called while gcing");
45 m->mallocing = 1; 42 m->mallocing = 1;
46 if(size == 0) 43 if(size == 0)
47 size = 1; 44 size = 1;
48 45
49 if(DebugTypeAtBlockEnd) 46 if(DebugTypeAtBlockEnd)
50 size += sizeof(uintptr); 47 size += sizeof(uintptr);
51 48
52 c = m->mcache; 49 c = m->mcache;
53 c->local_nmalloc++; 50 c->local_nmalloc++;
54 if(size <= MaxSmallSize) { 51 if(size <= MaxSmallSize) {
55 // Allocate from mcache free lists. 52 // Allocate from mcache free lists.
56 » » sizeclass = runtime·SizeToClass(size); 53 » » // Inlined version of SizeToClass().
54 » » if(size <= 1024-8)
55 » » » sizeclass = runtime·size_to_class8[(size+7)>>3];
56 » » else
57 » » » sizeclass = runtime·size_to_class128[(size-1024+127) >> 7];
57 size = runtime·class_to_size[sizeclass]; 58 size = runtime·class_to_size[sizeclass];
58 v = runtime·MCache_Alloc(c, sizeclass, size, zeroed); 59 v = runtime·MCache_Alloc(c, sizeclass, size, zeroed);
59 if(v == nil) 60 if(v == nil)
60 runtime·throw("out of memory"); 61 runtime·throw("out of memory");
61 c->local_alloc += size; 62 c->local_alloc += size;
62 c->local_total_alloc += size; 63 c->local_total_alloc += size;
63 c->local_by_size[sizeclass].nmalloc++; 64 c->local_by_size[sizeclass].nmalloc++;
64 } else { 65 } else {
65 // TODO(rsc): Report tracebacks for very large allocations. 66 // TODO(rsc): Report tracebacks for very large allocations.
66 67
67 // Allocate directly from heap. 68 // Allocate directly from heap.
68 npages = size >> PageShift; 69 npages = size >> PageShift;
69 if((size & PageMask) != 0) 70 if((size & PageMask) != 0)
70 npages++; 71 npages++;
71 » » s = runtime·MHeap_Alloc(&runtime·mheap, npages, 0, 1, zeroed); 72 » » s = runtime·MHeap_Alloc(runtime·mheap, npages, 0, 1, zeroed);
72 if(s == nil) 73 if(s == nil)
73 runtime·throw("out of memory"); 74 runtime·throw("out of memory");
74 size = npages<<PageShift; 75 size = npages<<PageShift;
75 c->local_alloc += size; 76 c->local_alloc += size;
76 c->local_total_alloc += size; 77 c->local_total_alloc += size;
77 v = (void*)(s->start << PageShift); 78 v = (void*)(s->start << PageShift);
78 79
79 // setup for mark sweep 80 // setup for mark sweep
80 runtime·markspan(v, 0, 0, true); 81 runtime·markspan(v, 0, 0, true);
81 } 82 }
82 83
83 if (sizeof(void*) == 4 && c->local_total_alloc >= (1<<30)) { 84 if (sizeof(void*) == 4 && c->local_total_alloc >= (1<<30)) {
84 // purge cache stats to prevent overflow 85 // purge cache stats to prevent overflow
85 » » runtime·lock(&runtime·mheap); 86 » » runtime·lock(runtime·mheap);
86 runtime·purgecachedstats(c); 87 runtime·purgecachedstats(c);
87 » » runtime·unlock(&runtime·mheap); 88 » » runtime·unlock(runtime·mheap);
88 } 89 }
89 90
90 if(!(flag & FlagNoGC)) 91 if(!(flag & FlagNoGC))
91 runtime·markallocated(v, size, (flag&FlagNoPointers) != 0); 92 runtime·markallocated(v, size, (flag&FlagNoPointers) != 0);
92 93
93 if(DebugTypeAtBlockEnd) 94 if(DebugTypeAtBlockEnd)
94 *(uintptr*)((uintptr)v+size-sizeof(uintptr)) = 0; 95 *(uintptr*)((uintptr)v+size-sizeof(uintptr)) = 0;
95 96
96 m->mallocing = 0; 97 m->mallocing = 0;
97 98
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
156 157
157 if(raceenabled) 158 if(raceenabled)
158 runtime·racefree(v); 159 runtime·racefree(v);
159 160
160 // Find size class for v. 161 // Find size class for v.
161 sizeclass = s->sizeclass; 162 sizeclass = s->sizeclass;
162 c = m->mcache; 163 c = m->mcache;
163 if(sizeclass == 0) { 164 if(sizeclass == 0) {
164 // Large object. 165 // Large object.
165 size = s->npages<<PageShift; 166 size = s->npages<<PageShift;
166 » » *(uintptr*)(s->start<<PageShift) = 1;» // mark as "needs to be zeroed" 167 » » *(uintptr*)(s->start<<PageShift) = (uintptr)0xfeedfeedfeedfeedll ;» // mark as "needs to be zeroed"
167 // Must mark v freed before calling unmarkspan and MHeap_Free: 168 // Must mark v freed before calling unmarkspan and MHeap_Free:
168 // they might coalesce v into other spans and change the bitmap further. 169 // they might coalesce v into other spans and change the bitmap further.
169 runtime·markfreed(v, size); 170 runtime·markfreed(v, size);
170 runtime·unmarkspan(v, 1<<PageShift); 171 runtime·unmarkspan(v, 1<<PageShift);
171 » » runtime·MHeap_Free(&runtime·mheap, s, 1); 172 » » runtime·MHeap_Free(runtime·mheap, s, 1);
172 } else { 173 } else {
173 // Small object. 174 // Small object.
174 size = runtime·class_to_size[sizeclass]; 175 size = runtime·class_to_size[sizeclass];
175 if(size > sizeof(uintptr)) 176 if(size > sizeof(uintptr))
176 » » » ((uintptr*)v)[1] = 1;» // mark as "needs to be zeroed" 177 » » » ((uintptr*)v)[1] = (uintptr)0xfeedfeedfeedfeedll;» // mark as "needs to be zeroed"
177 // Must mark v freed before calling MCache_Free: 178 // Must mark v freed before calling MCache_Free:
178 // it might coalesce v and other blocks into a bigger span 179 // it might coalesce v and other blocks into a bigger span
179 // and change the bitmap further. 180 // and change the bitmap further.
180 runtime·markfreed(v, size); 181 runtime·markfreed(v, size);
181 c->local_by_size[sizeclass].nfree++; 182 c->local_by_size[sizeclass].nfree++;
182 runtime·MCache_Free(c, v, sizeclass, size); 183 runtime·MCache_Free(c, v, sizeclass, size);
183 } 184 }
184 c->local_nfree++; 185 c->local_nfree++;
185 c->local_alloc -= size; 186 c->local_alloc -= size;
186 if(prof) 187 if(prof)
187 runtime·MProf_Free(v, size); 188 runtime·MProf_Free(v, size);
188 m->mallocing = 0; 189 m->mallocing = 0;
189 } 190 }
190 191
191 int32 192 int32
192 runtime·mlookup(void *v, byte **base, uintptr *size, MSpan **sp) 193 runtime·mlookup(void *v, byte **base, uintptr *size, MSpan **sp)
193 { 194 {
194 uintptr n, i; 195 uintptr n, i;
195 byte *p; 196 byte *p;
196 MSpan *s; 197 MSpan *s;
197 198
198 m->mcache->local_nlookup++; 199 m->mcache->local_nlookup++;
199 if (sizeof(void*) == 4 && m->mcache->local_nlookup >= (1<<30)) { 200 if (sizeof(void*) == 4 && m->mcache->local_nlookup >= (1<<30)) {
200 // purge cache stats to prevent overflow 201 // purge cache stats to prevent overflow
201 » » runtime·lock(&runtime·mheap); 202 » » runtime·lock(runtime·mheap);
202 runtime·purgecachedstats(m->mcache); 203 runtime·purgecachedstats(m->mcache);
203 » » runtime·unlock(&runtime·mheap); 204 » » runtime·unlock(runtime·mheap);
204 » } 205 » }
205 206
206 » s = runtime·MHeap_LookupMaybe(&runtime·mheap, v); 207 » s = runtime·MHeap_LookupMaybe(runtime·mheap, v);
207 if(sp) 208 if(sp)
208 *sp = s; 209 *sp = s;
209 if(s == nil) { 210 if(s == nil) {
210 runtime·checkfreed(v, 1); 211 runtime·checkfreed(v, 1);
211 if(base) 212 if(base)
212 *base = nil; 213 *base = nil;
213 if(size) 214 if(size)
214 *size = 0; 215 *size = 0;
215 return 0; 216 return 0;
216 } 217 }
(...skipping 23 matching lines...) Expand all
240 241
241 return 1; 242 return 1;
242 } 243 }
243 244
244 MCache* 245 MCache*
245 runtime·allocmcache(void) 246 runtime·allocmcache(void)
246 { 247 {
247 intgo rate; 248 intgo rate;
248 MCache *c; 249 MCache *c;
249 250
250 » runtime·lock(&runtime·mheap); 251 » runtime·lock(runtime·mheap);
251 » c = runtime·FixAlloc_Alloc(&runtime·mheap.cachealloc); 252 » c = runtime·FixAlloc_Alloc(&runtime·mheap->cachealloc);
252 » mstats.mcache_inuse = runtime·mheap.cachealloc.inuse; 253 » mstats.mcache_inuse = runtime·mheap->cachealloc.inuse;
253 » mstats.mcache_sys = runtime·mheap.cachealloc.sys; 254 » mstats.mcache_sys = runtime·mheap->cachealloc.sys;
254 » runtime·unlock(&runtime·mheap); 255 » runtime·unlock(runtime·mheap);
255 runtime·memclr((byte*)c, sizeof(*c)); 256 runtime·memclr((byte*)c, sizeof(*c));
256 257
257 // Set first allocation sample size. 258 // Set first allocation sample size.
258 rate = runtime·MemProfileRate; 259 rate = runtime·MemProfileRate;
259 if(rate > 0x3fffffff) // make 2*rate not overflow 260 if(rate > 0x3fffffff) // make 2*rate not overflow
260 rate = 0x3fffffff; 261 rate = 0x3fffffff;
261 if(rate != 0) 262 if(rate != 0)
262 c->next_sample = runtime·fastrand1() % (2*rate); 263 c->next_sample = runtime·fastrand1() % (2*rate);
263 264
264 return c; 265 return c;
265 } 266 }
266 267
267 void 268 void
268 runtime·freemcache(MCache *c) 269 runtime·freemcache(MCache *c)
269 { 270 {
270 runtime·MCache_ReleaseAll(c); 271 runtime·MCache_ReleaseAll(c);
271 » runtime·lock(&runtime·mheap); 272 » runtime·lock(runtime·mheap);
272 runtime·purgecachedstats(c); 273 runtime·purgecachedstats(c);
273 » runtime·FixAlloc_Free(&runtime·mheap.cachealloc, c); 274 » runtime·FixAlloc_Free(&runtime·mheap->cachealloc, c);
274 » runtime·unlock(&runtime·mheap); 275 » runtime·unlock(runtime·mheap);
275 } 276 }
276 277
277 void 278 void
278 runtime·purgecachedstats(MCache *c) 279 runtime·purgecachedstats(MCache *c)
279 { 280 {
281 int32 i;
282
280 // Protected by either heap or GC lock. 283 // Protected by either heap or GC lock.
281 mstats.heap_alloc += c->local_cachealloc; 284 mstats.heap_alloc += c->local_cachealloc;
282 c->local_cachealloc = 0; 285 c->local_cachealloc = 0;
283 mstats.heap_objects += c->local_objects; 286 mstats.heap_objects += c->local_objects;
284 c->local_objects = 0; 287 c->local_objects = 0;
285 mstats.nmalloc += c->local_nmalloc; 288 mstats.nmalloc += c->local_nmalloc;
286 c->local_nmalloc = 0; 289 c->local_nmalloc = 0;
287 mstats.nfree += c->local_nfree; 290 mstats.nfree += c->local_nfree;
288 c->local_nfree = 0; 291 c->local_nfree = 0;
289 mstats.nlookup += c->local_nlookup; 292 mstats.nlookup += c->local_nlookup;
290 c->local_nlookup = 0; 293 c->local_nlookup = 0;
291 mstats.alloc += c->local_alloc; 294 mstats.alloc += c->local_alloc;
292 c->local_alloc= 0; 295 c->local_alloc= 0;
293 mstats.total_alloc += c->local_total_alloc; 296 mstats.total_alloc += c->local_total_alloc;
294 c->local_total_alloc= 0; 297 c->local_total_alloc= 0;
298 for(i=0; i<nelem(c->local_by_size); i++) {
299 mstats.by_size[i].nmalloc += c->local_by_size[i].nmalloc;
300 c->local_by_size[i].nmalloc = 0;
301 mstats.by_size[i].nfree += c->local_by_size[i].nfree;
302 c->local_by_size[i].nfree = 0;
303 }
295 } 304 }
296 305
297 uintptr runtime·sizeof_C_MStats = sizeof(MStats); 306 uintptr runtime·sizeof_C_MStats = sizeof(MStats);
298 307
299 #define MaxArena32 (2U<<30) 308 #define MaxArena32 (2U<<30)
300 309
301 void 310 void
302 runtime·mallocinit(void) 311 runtime·mallocinit(void)
303 { 312 {
304 byte *p; 313 byte *p;
305 uintptr arena_size, bitmap_size; 314 uintptr arena_size, bitmap_size;
306 extern byte end[]; 315 extern byte end[];
307 byte *want; 316 byte *want;
308 uintptr limit; 317 uintptr limit;
309 318
310 p = nil; 319 p = nil;
311 arena_size = 0; 320 arena_size = 0;
312 bitmap_size = 0; 321 bitmap_size = 0;
313 ········ 322 ········
314 // for 64-bit build 323 // for 64-bit build
315 USED(p); 324 USED(p);
316 USED(arena_size); 325 USED(arena_size);
317 USED(bitmap_size); 326 USED(bitmap_size);
318 327
328 if((runtime·mheap = runtime·SysAlloc(sizeof(*runtime·mheap))) == nil)
329 runtime·throw("runtime: cannot allocate heap metadata");
330
319 runtime·InitSizes(); 331 runtime·InitSizes();
320 332
321 » limit = runtime·memlimit(); 333 » // limit = runtime·memlimit();
334 » // See https://code.google.com/p/go/issues/detail?id=5049
335 » // TODO(rsc): Fix after 1.1.
336 » limit = 0;
322 337
323 // Set up the allocation arena, a contiguous area of memory where 338 // Set up the allocation arena, a contiguous area of memory where
324 // allocated data will be found. The arena begins with a bitmap large 339 // allocated data will be found. The arena begins with a bitmap large
325 // enough to hold 4 bits per allocated word. 340 // enough to hold 4 bits per allocated word.
326 if(sizeof(void*) == 8 && (limit == 0 || limit > (1<<30))) { 341 if(sizeof(void*) == 8 && (limit == 0 || limit > (1<<30))) {
327 // On a 64-bit machine, allocate from a single contiguous reserv ation. 342 // On a 64-bit machine, allocate from a single contiguous reserv ation.
328 // 128 GB (MaxMem) should be big enough for now. 343 // 128 GB (MaxMem) should be big enough for now.
329 // 344 //
330 // The code will work with the reservation at any address, but a sk 345 // The code will work with the reservation at any address, but a sk
331 // SysReserve to use 0x000000c000000000 if possible. 346 // SysReserve to use 0x000000c000000000 if possible.
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after
387 want = (byte*)(((uintptr)end + (1<<18) + (1<<20) - 1)&~((1<<20)- 1)); 402 want = (byte*)(((uintptr)end + (1<<18) + (1<<20) - 1)&~((1<<20)- 1));
388 p = runtime·SysReserve(want, bitmap_size + arena_size); 403 p = runtime·SysReserve(want, bitmap_size + arena_size);
389 if(p == nil) 404 if(p == nil)
390 runtime·throw("runtime: cannot reserve arena virtual add ress space"); 405 runtime·throw("runtime: cannot reserve arena virtual add ress space");
391 if((uintptr)p & (((uintptr)1<<PageShift)-1)) 406 if((uintptr)p & (((uintptr)1<<PageShift)-1))
392 runtime·printf("runtime: SysReserve returned unaligned a ddress %p; asked for %p", p, bitmap_size+arena_size); 407 runtime·printf("runtime: SysReserve returned unaligned a ddress %p; asked for %p", p, bitmap_size+arena_size);
393 } 408 }
394 if((uintptr)p & (((uintptr)1<<PageShift)-1)) 409 if((uintptr)p & (((uintptr)1<<PageShift)-1))
395 runtime·throw("runtime: SysReserve returned unaligned address"); 410 runtime·throw("runtime: SysReserve returned unaligned address");
396 411
397 » runtime·mheap.bitmap = p; 412 » runtime·mheap->bitmap = p;
398 » runtime·mheap.arena_start = p + bitmap_size; 413 » runtime·mheap->arena_start = p + bitmap_size;
399 » runtime·mheap.arena_used = runtime·mheap.arena_start; 414 » runtime·mheap->arena_used = runtime·mheap->arena_start;
400 » runtime·mheap.arena_end = runtime·mheap.arena_start + arena_size; 415 » runtime·mheap->arena_end = runtime·mheap->arena_start + arena_size;
401 416
402 // Initialize the rest of the allocator.········ 417 // Initialize the rest of the allocator.········
403 » runtime·MHeap_Init(&runtime·mheap, runtime·SysAlloc); 418 » runtime·MHeap_Init(runtime·mheap, runtime·SysAlloc);
404 m->mcache = runtime·allocmcache(); 419 m->mcache = runtime·allocmcache();
405 420
406 // See if it works. 421 // See if it works.
407 runtime·free(runtime·malloc(1)); 422 runtime·free(runtime·malloc(1));
408 } 423 }
409 424
410 void* 425 void*
411 runtime·MHeap_SysAlloc(MHeap *h, uintptr n) 426 runtime·MHeap_SysAlloc(MHeap *h, uintptr n)
412 { 427 {
413 byte *p; 428 byte *p;
(...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after
491 while(buf < endbuf) { 506 while(buf < endbuf) {
492 v = (void*)*buf; 507 v = (void*)*buf;
493 if(false) *buf = 0; 508 if(false) *buf = 0;
494 buf++; 509 buf++;
495 typ = *buf; 510 typ = *buf;
496 buf++; 511 buf++;
497 512
498 // (Manually inlined copy of runtime·MHeap_Lookup) 513 // (Manually inlined copy of runtime·MHeap_Lookup)
499 p = (uintptr)v>>PageShift; 514 p = (uintptr)v>>PageShift;
500 if(sizeof(void*) == 8) 515 if(sizeof(void*) == 8)
501 » » » p -= (uintptr)runtime·mheap.arena_start >> PageShift; 516 » » » p -= (uintptr)runtime·mheap->arena_start >> PageShift;
502 » » s = runtime·mheap.map[p]; 517 » » s = runtime·mheap->map[p];
503 518
504 if(s->sizeclass == 0) { 519 if(s->sizeclass == 0) {
505 s->types.compression = MTypes_Single; 520 s->types.compression = MTypes_Single;
506 s->types.data = typ; 521 s->types.data = typ;
507 continue; 522 continue;
508 } 523 }
509 524
510 size = s->elemsize; 525 size = s->elemsize;
511 ofs = ((uintptr)v - (s->start<<PageShift)) / size; 526 ofs = ((uintptr)v - (s->start<<PageShift)) / size;
512 527
513 switch(s->types.compression) { 528 switch(s->types.compression) {
514 case MTypes_Empty: 529 case MTypes_Empty:
515 ntypes = (s->npages << PageShift) / size; 530 ntypes = (s->npages << PageShift) / size;
516 nbytes3 = 8*sizeof(uintptr) + 1*ntypes; 531 nbytes3 = 8*sizeof(uintptr) + 1*ntypes;
517 532
518 if(!sysalloc) { 533 if(!sysalloc) {
519 » » » » data3 = runtime·mallocgc(nbytes3, FlagNoPointers , 0, 1); 534 » » » » data3 = runtime·mallocgc(nbytes3, FlagNoProfilin g|FlagNoPointers, 0, 1);
520 } else { 535 } else {
521 data3 = runtime·SysAlloc(nbytes3); 536 data3 = runtime·SysAlloc(nbytes3);
537 if(data3 == nil)
538 runtime·throw("runtime: cannot allocate memory");
522 if(0) runtime·printf("settype(0->3): SysAlloc(%x ) --> %p\n", (uint32)nbytes3, data3); 539 if(0) runtime·printf("settype(0->3): SysAlloc(%x ) --> %p\n", (uint32)nbytes3, data3);
523 } 540 }
524 541
525 s->types.compression = MTypes_Bytes; 542 s->types.compression = MTypes_Bytes;
526 s->types.sysalloc = sysalloc; 543 s->types.sysalloc = sysalloc;
527 s->types.data = (uintptr)data3; 544 s->types.data = (uintptr)data3;
528 545
529 ((uintptr*)data3)[1] = typ; 546 ((uintptr*)data3)[1] = typ;
530 data3[8*sizeof(uintptr) + ofs] = 1; 547 data3[8*sizeof(uintptr) + ofs] = 1;
531 break; 548 break;
(...skipping 13 matching lines...) Expand all
545 break; 562 break;
546 } 563 }
547 } 564 }
548 if(j < 8) { 565 if(j < 8) {
549 data3[8*sizeof(uintptr) + ofs] = j; 566 data3[8*sizeof(uintptr) + ofs] = j;
550 } else { 567 } else {
551 ntypes = (s->npages << PageShift) / size; 568 ntypes = (s->npages << PageShift) / size;
552 nbytes2 = ntypes * sizeof(uintptr); 569 nbytes2 = ntypes * sizeof(uintptr);
553 570
554 if(!sysalloc) { 571 if(!sysalloc) {
555 » » » » » data2 = runtime·mallocgc(nbytes2, FlagNo Pointers, 0, 1); 572 » » » » » data2 = runtime·mallocgc(nbytes2, FlagNo Profiling|FlagNoPointers, 0, 1);
556 } else { 573 } else {
557 data2 = runtime·SysAlloc(nbytes2); 574 data2 = runtime·SysAlloc(nbytes2);
575 if(data2 == nil)
576 runtime·throw("runtime: cannot a llocate memory");
558 if(0) runtime·printf("settype.(3->2): Sy sAlloc(%x) --> %p\n", (uint32)nbytes2, data2); 577 if(0) runtime·printf("settype.(3->2): Sy sAlloc(%x) --> %p\n", (uint32)nbytes2, data2);
559 } 578 }
560 579
561 sysalloc3 = s->types.sysalloc; 580 sysalloc3 = s->types.sysalloc;
562 581
563 s->types.compression = MTypes_Words; 582 s->types.compression = MTypes_Words;
564 s->types.sysalloc = sysalloc; 583 s->types.sysalloc = sysalloc;
565 s->types.data = (uintptr)data2; 584 s->types.data = (uintptr)data2;
566 585
567 // Move the contents of data3 to data2. Then dea llocate data3. 586 // Move the contents of data3 to data2. Then dea llocate data3.
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
605 buf[i+0] = (uintptr)v; 624 buf[i+0] = (uintptr)v;
606 buf[i+1] = t; 625 buf[i+1] = t;
607 i += 2; 626 i += 2;
608 mp->settype_bufsize = i; 627 mp->settype_bufsize = i;
609 628
610 if(i == nelem(mp->settype_buf)) { 629 if(i == nelem(mp->settype_buf)) {
611 runtime·settype_flush(mp, false); 630 runtime·settype_flush(mp, false);
612 } 631 }
613 632
614 if(DebugTypeAtBlockEnd) { 633 if(DebugTypeAtBlockEnd) {
615 » » s = runtime·MHeap_Lookup(&runtime·mheap, v); 634 » » s = runtime·MHeap_Lookup(runtime·mheap, v);
616 *(uintptr*)((uintptr)v+s->elemsize-sizeof(uintptr)) = t; 635 *(uintptr*)((uintptr)v+s->elemsize-sizeof(uintptr)) = t;
617 } 636 }
618 } 637 }
619 638
620 void 639 void
621 runtime·settype_sysfree(MSpan *s) 640 runtime·settype_sysfree(MSpan *s)
622 { 641 {
623 uintptr ntypes, nbytes; 642 uintptr ntypes, nbytes;
624 643
625 if(!s->types.sysalloc) 644 if(!s->types.sysalloc)
(...skipping 18 matching lines...) Expand all
644 } 663 }
645 } 664 }
646 665
647 uintptr 666 uintptr
648 runtime·gettype(void *v) 667 runtime·gettype(void *v)
649 { 668 {
650 MSpan *s; 669 MSpan *s;
651 uintptr t, ofs; 670 uintptr t, ofs;
652 byte *data; 671 byte *data;
653 672
654 » s = runtime·MHeap_LookupMaybe(&runtime·mheap, v); 673 » s = runtime·MHeap_LookupMaybe(runtime·mheap, v);
655 if(s != nil) { 674 if(s != nil) {
656 t = 0; 675 t = 0;
657 switch(s->types.compression) { 676 switch(s->types.compression) {
658 case MTypes_Empty: 677 case MTypes_Empty:
659 break; 678 break;
660 case MTypes_Single: 679 case MTypes_Single:
661 t = s->types.data; 680 t = s->types.data;
662 break; 681 break;
663 case MTypes_Words: 682 case MTypes_Words:
664 ofs = (uintptr)v - (s->start<<PageShift); 683 ofs = (uintptr)v - (s->start<<PageShift);
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after
743 if(false) { 762 if(false) {
744 runtime·printf("new %S: %p\n", *typ->string, ret ); 763 runtime·printf("new %S: %p\n", *typ->string, ret );
745 } 764 }
746 runtime·settype(ret, (uintptr)typ | TypeInfo_SingleObjec t); 765 runtime·settype(ret, (uintptr)typ | TypeInfo_SingleObjec t);
747 } 766 }
748 } 767 }
749 768
750 return ret; 769 return ret;
751 } 770 }
752 771
753 typedef struct StackCacheNode StackCacheNode;
754 struct StackCacheNode
755 {
756 StackCacheNode *next;
757 void* batch[StackCacheBatch-1];
758 };
759
760 static StackCacheNode *stackcache;
761 static Lock stackcachemu;
762
763 // stackcacherefill/stackcacherelease implement global cache of stack segments.
764 // The cache is required to prevent unlimited growth of per-thread caches.
765 static void
766 stackcacherefill(void)
767 {
768 StackCacheNode *n;
769 int32 i, pos;
770
771 runtime·lock(&stackcachemu);
772 n = stackcache;
773 if(n)
774 stackcache = n->next;
775 runtime·unlock(&stackcachemu);
776 if(n == nil) {
777 n = (StackCacheNode*)runtime·SysAlloc(FixedStack*StackCacheBatch );
778 if(n == nil)
779 runtime·throw("out of memory (staccachekrefill)");
780 runtime·xadd64(&mstats.stacks_sys, FixedStack*StackCacheBatch);
781 for(i = 0; i < StackCacheBatch-1; i++)
782 n->batch[i] = (byte*)n + (i+1)*FixedStack;
783 }
784 pos = m->stackcachepos;
785 for(i = 0; i < StackCacheBatch-1; i++) {
786 m->stackcache[pos] = n->batch[i];
787 pos = (pos + 1) % StackCacheSize;
788 }
789 m->stackcache[pos] = n;
790 pos = (pos + 1) % StackCacheSize;
791 m->stackcachepos = pos;
792 m->stackcachecnt += StackCacheBatch;
793 }
794
795 static void
796 stackcacherelease(void)
797 {
798 StackCacheNode *n;
799 uint32 i, pos;
800
801 pos = (m->stackcachepos - m->stackcachecnt) % StackCacheSize;
802 n = (StackCacheNode*)m->stackcache[pos];
803 pos = (pos + 1) % StackCacheSize;
804 for(i = 0; i < StackCacheBatch-1; i++) {
805 n->batch[i] = m->stackcache[pos];
806 pos = (pos + 1) % StackCacheSize;
807 }
808 m->stackcachecnt -= StackCacheBatch;
809 runtime·lock(&stackcachemu);
810 n->next = stackcache;
811 stackcache = n;
812 runtime·unlock(&stackcachemu);
813 }
814
815 void*
816 runtime·stackalloc(uint32 n)
817 {
818 uint32 pos;
819 void *v;
820
821 // Stackalloc must be called on scheduler stack, so that we
822 // never try to grow the stack during the code that stackalloc runs.
823 // Doing so would cause a deadlock (issue 1547).
824 if(g != m->g0)
825 runtime·throw("stackalloc not on scheduler stack");
826
827 // Stack allocator uses malloc/free most of the time,
828 // but if we're in the middle of malloc and need stack,
829 // we have to do something else to avoid deadlock.
830 // In that case, we fall back on a fixed-size free-list
831 // allocator, assuming that inside malloc all the stack
832 // frames are small, so that all the stack allocations
833 // will be a single size, the minimum (right now, 5k).
834 if(n == FixedStack || m->mallocing || m->gcing) {
835 if(n != FixedStack) {
836 runtime·printf("stackalloc: in malloc, size=%d want %d", FixedStack, n);
837 runtime·throw("stackalloc");
838 }
839 if(m->stackcachecnt == 0)
840 stackcacherefill();
841 pos = m->stackcachepos;
842 pos = (pos - 1) % StackCacheSize;
843 v = m->stackcache[pos];
844 m->stackcachepos = pos;
845 m->stackcachecnt--;
846 m->stackinuse++;
847 return v;
848 }
849 return runtime·mallocgc(n, FlagNoProfiling|FlagNoGC, 0, 0);
850 }
851
852 void
853 runtime·stackfree(void *v, uintptr n)
854 {
855 uint32 pos;
856
857 if(n == FixedStack || m->mallocing || m->gcing) {
858 if(m->stackcachecnt == StackCacheSize)
859 stackcacherelease();
860 pos = m->stackcachepos;
861 m->stackcache[pos] = v;
862 m->stackcachepos = (pos + 1) % StackCacheSize;
863 m->stackcachecnt++;
864 m->stackinuse--;
865 return;
866 }
867 runtime·free(v);
868 }
869
870 func GC() { 772 func GC() {
871 runtime·gc(1); 773 runtime·gc(1);
872 } 774 }
873 775
874 func SetFinalizer(obj Eface, finalizer Eface) { 776 func SetFinalizer(obj Eface, finalizer Eface) {
875 byte *base; 777 byte *base;
876 uintptr size; 778 uintptr size;
877 FuncType *ft; 779 FuncType *ft;
878 int32 i; 780 int32 i;
879 uintptr nret; 781 uintptr nret;
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
912 runtime·printf("runtime.SetFinalizer: finalizer already set\n"); 814 runtime·printf("runtime.SetFinalizer: finalizer already set\n");
913 goto throw; 815 goto throw;
914 } 816 }
915 return; 817 return;
916 818
917 badfunc: 819 badfunc:
918 runtime·printf("runtime.SetFinalizer: second argument is %S, not func(%S )\n", *finalizer.type->string, *obj.type->string); 820 runtime·printf("runtime.SetFinalizer: second argument is %S, not func(%S )\n", *finalizer.type->string, *obj.type->string);
919 throw: 821 throw:
920 runtime·throw("runtime.SetFinalizer"); 822 runtime·throw("runtime.SetFinalizer");
921 } 823 }
LEFTRIGHT

Powered by Google App Engine
RSS Feeds Recent Issues | This issue
This is Rietveld f62528b