Rietveld Code Review Tool
Help | Bug tracker | Discussion group | Source code | Sign in
(1546)

Delta Between Two Patch Sets: src/pkg/runtime/malloc.goc

Issue 6114046: code review 6114046: runtime, reflect, ld, gc: garbage collection precision ... (Closed)
Left Patch Set: diff -r 8f9b0fbf4c15 https://go.googlecode.com/hg/ Created 11 years, 1 month ago
Right Patch Set: diff -r d6e06d0f3c29 https://code.google.com/p/go/ Created 10 years, 10 months ago
Left:
Right:
Use n/p to move between diff chunks; N/P to move between comments. Please Sign in to add in-line comments.
Jump to:
Left: Side by side diff | Download
Right: Side by side diff | Download
« no previous file with change/comment | « no previous file | src/pkg/runtime/mgc0.c » ('j') | no next file with change/comment »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
LEFTRIGHT
1 // Copyright 2009 The Go Authors. All rights reserved. 1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style 2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file. 3 // license that can be found in the LICENSE file.
4 4
5 // See malloc.h for overview. 5 // See malloc.h for overview.
6 // 6 //
7 // TODO(rsc): double-check stats. 7 // TODO(rsc): double-check stats.
8 8
9 package runtime 9 package runtime
10 #include "runtime.h" 10 #include "runtime.h"
11 #include "arch_GOARCH.h" 11 #include "arch_GOARCH.h"
12 #include "stack.h"
13 #include "malloc.h" 12 #include "malloc.h"
14 #include "defs_GOOS_GOARCH.h"
15 #include "type.h" 13 #include "type.h"
16 #include "typekind.h" 14 #include "typekind.h"
17 #include "race.h" 15 #include "race.h"
18 16
19 #pragma dataflag 16 /* mark mheap as 'no pointers', hiding from garbage collecto r */ 17 MHeap *runtime·mheap;
20 MHeap runtime·mheap;
21 18
22 int32 runtime·checking; 19 int32 runtime·checking;
23 20
24 extern MStats mstats; // defined in zruntime_def_$GOOS_$GOARCH.go 21 extern MStats mstats; // defined in zruntime_def_$GOOS_$GOARCH.go
25 22
26 extern volatile intgo runtime·MemProfileRate; 23 extern volatile intgo runtime·MemProfileRate;
27 24
28 // Allocate an object of at least size bytes. 25 // Allocate an object of at least size bytes.
29 // Small objects are allocated from the per-thread cache's free lists. 26 // Small objects are allocated from the per-thread cache's free lists.
30 // Large objects (> 32 kB) are allocated straight from the heap. 27 // Large objects (> 32 kB) are allocated straight from the heap.
31 void* 28 void*
32 runtime·mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed) 29 runtime·mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed)
33 { 30 {
34 int32 sizeclass; 31 int32 sizeclass;
35 intgo rate; 32 intgo rate;
36 MCache *c; 33 MCache *c;
37 uintptr npages; 34 uintptr npages;
38 MSpan *s; 35 MSpan *s;
39 void *v; 36 void *v;
40 37
41 » if(runtime·gcwaiting && g != m->g0 && m->locks == 0) 38 » if(runtime·gcwaiting && g != m->g0 && m->locks == 0 && dogc)
42 runtime·gosched(); 39 runtime·gosched();
43 if(m->mallocing) 40 if(m->mallocing)
44 runtime·throw("malloc/free - deadlock"); 41 runtime·throw("malloc/free - deadlock");
45 if(runtime·gcing)
46 runtime·throw("malloc called while gcing");
47 m->mallocing = 1; 42 m->mallocing = 1;
48 if(size == 0) 43 if(size == 0)
49 size = 1; 44 size = 1;
50 45
51 if(DebugTypeAtBlockEnd) 46 if(DebugTypeAtBlockEnd)
52 size += sizeof(uintptr); 47 size += sizeof(uintptr);
53 48
54 c = m->mcache; 49 c = m->mcache;
55 c->local_nmalloc++; 50 c->local_nmalloc++;
56 if(size <= MaxSmallSize) { 51 if(size <= MaxSmallSize) {
57 // Allocate from mcache free lists. 52 // Allocate from mcache free lists.
58 » » sizeclass = runtime·SizeToClass(size); 53 » » // Inlined version of SizeToClass().
54 » » if(size <= 1024-8)
55 » » » sizeclass = runtime·size_to_class8[(size+7)>>3];
56 » » else
57 » » » sizeclass = runtime·size_to_class128[(size-1024+127) >> 7];
59 size = runtime·class_to_size[sizeclass]; 58 size = runtime·class_to_size[sizeclass];
60 v = runtime·MCache_Alloc(c, sizeclass, size, zeroed); 59 v = runtime·MCache_Alloc(c, sizeclass, size, zeroed);
61 if(v == nil) 60 if(v == nil)
62 runtime·throw("out of memory"); 61 runtime·throw("out of memory");
63 c->local_alloc += size; 62 c->local_alloc += size;
64 c->local_total_alloc += size; 63 c->local_total_alloc += size;
65 c->local_by_size[sizeclass].nmalloc++; 64 c->local_by_size[sizeclass].nmalloc++;
66 } else { 65 } else {
67 // TODO(rsc): Report tracebacks for very large allocations. 66 // TODO(rsc): Report tracebacks for very large allocations.
68 67
69 // Allocate directly from heap. 68 // Allocate directly from heap.
70 npages = size >> PageShift; 69 npages = size >> PageShift;
71 if((size & PageMask) != 0) 70 if((size & PageMask) != 0)
72 npages++; 71 npages++;
73 » » s = runtime·MHeap_Alloc(&runtime·mheap, npages, 0, 1, zeroed); 72 » » s = runtime·MHeap_Alloc(runtime·mheap, npages, 0, 1, zeroed);
74 if(s == nil) 73 if(s == nil)
75 runtime·throw("out of memory"); 74 runtime·throw("out of memory");
76 size = npages<<PageShift; 75 size = npages<<PageShift;
77 c->local_alloc += size; 76 c->local_alloc += size;
78 c->local_total_alloc += size; 77 c->local_total_alloc += size;
79 v = (void*)(s->start << PageShift); 78 v = (void*)(s->start << PageShift);
80 79
81 // setup for mark sweep 80 // setup for mark sweep
82 runtime·markspan(v, 0, 0, true); 81 runtime·markspan(v, 0, 0, true);
83 } 82 }
84 83
85 if (sizeof(void*) == 4 && c->local_total_alloc >= (1<<30)) { 84 if (sizeof(void*) == 4 && c->local_total_alloc >= (1<<30)) {
86 // purge cache stats to prevent overflow 85 // purge cache stats to prevent overflow
87 » » runtime·lock(&runtime·mheap); 86 » » runtime·lock(runtime·mheap);
88 runtime·purgecachedstats(c); 87 runtime·purgecachedstats(c);
89 » » runtime·unlock(&runtime·mheap); 88 » » runtime·unlock(runtime·mheap);
90 } 89 }
91 90
92 if(!(flag & FlagNoGC)) 91 if(!(flag & FlagNoGC))
93 runtime·markallocated(v, size, (flag&FlagNoPointers) != 0); 92 runtime·markallocated(v, size, (flag&FlagNoPointers) != 0);
94 93
95 if(DebugTypeAtBlockEnd) 94 if(DebugTypeAtBlockEnd)
96 *(uintptr*)((uintptr)v+size-sizeof(uintptr)) = 0; 95 *(uintptr*)((uintptr)v+size-sizeof(uintptr)) = 0;
97 96
98 m->mallocing = 0; 97 m->mallocing = 0;
99 98
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
158 157
159 if(raceenabled) 158 if(raceenabled)
160 runtime·racefree(v); 159 runtime·racefree(v);
161 160
162 // Find size class for v. 161 // Find size class for v.
163 sizeclass = s->sizeclass; 162 sizeclass = s->sizeclass;
164 c = m->mcache; 163 c = m->mcache;
165 if(sizeclass == 0) { 164 if(sizeclass == 0) {
166 // Large object. 165 // Large object.
167 size = s->npages<<PageShift; 166 size = s->npages<<PageShift;
168 » » *(uintptr*)(s->start<<PageShift) = 1;» // mark as "needs to be zeroed" 167 » » *(uintptr*)(s->start<<PageShift) = (uintptr)0xfeedfeedfeedfeedll ;» // mark as "needs to be zeroed"
169 // Must mark v freed before calling unmarkspan and MHeap_Free: 168 // Must mark v freed before calling unmarkspan and MHeap_Free:
170 // they might coalesce v into other spans and change the bitmap further. 169 // they might coalesce v into other spans and change the bitmap further.
171 runtime·markfreed(v, size); 170 runtime·markfreed(v, size);
172 runtime·unmarkspan(v, 1<<PageShift); 171 runtime·unmarkspan(v, 1<<PageShift);
173 » » runtime·MHeap_Free(&runtime·mheap, s, 1); 172 » » runtime·MHeap_Free(runtime·mheap, s, 1);
174 } else { 173 } else {
175 // Small object. 174 // Small object.
176 size = runtime·class_to_size[sizeclass]; 175 size = runtime·class_to_size[sizeclass];
177 if(size > sizeof(uintptr)) 176 if(size > sizeof(uintptr))
178 » » » ((uintptr*)v)[1] = 1;» // mark as "needs to be zeroed" 177 » » » ((uintptr*)v)[1] = (uintptr)0xfeedfeedfeedfeedll;» // mark as "needs to be zeroed"
179 // Must mark v freed before calling MCache_Free: 178 // Must mark v freed before calling MCache_Free:
180 // it might coalesce v and other blocks into a bigger span 179 // it might coalesce v and other blocks into a bigger span
181 // and change the bitmap further. 180 // and change the bitmap further.
182 runtime·markfreed(v, size); 181 runtime·markfreed(v, size);
183 c->local_by_size[sizeclass].nfree++; 182 c->local_by_size[sizeclass].nfree++;
184 runtime·MCache_Free(c, v, sizeclass, size); 183 runtime·MCache_Free(c, v, sizeclass, size);
185 } 184 }
186 c->local_nfree++; 185 c->local_nfree++;
187 c->local_alloc -= size; 186 c->local_alloc -= size;
188 if(prof) 187 if(prof)
189 runtime·MProf_Free(v, size); 188 runtime·MProf_Free(v, size);
190 m->mallocing = 0; 189 m->mallocing = 0;
191 } 190 }
192 191
193 int32 192 int32
194 runtime·mlookup(void *v, byte **base, uintptr *size, MSpan **sp) 193 runtime·mlookup(void *v, byte **base, uintptr *size, MSpan **sp)
195 { 194 {
196 uintptr n, i; 195 uintptr n, i;
197 byte *p; 196 byte *p;
198 MSpan *s; 197 MSpan *s;
199 198
200 m->mcache->local_nlookup++; 199 m->mcache->local_nlookup++;
201 if (sizeof(void*) == 4 && m->mcache->local_nlookup >= (1<<30)) { 200 if (sizeof(void*) == 4 && m->mcache->local_nlookup >= (1<<30)) {
202 // purge cache stats to prevent overflow 201 // purge cache stats to prevent overflow
203 » » runtime·lock(&runtime·mheap); 202 » » runtime·lock(runtime·mheap);
204 runtime·purgecachedstats(m->mcache); 203 runtime·purgecachedstats(m->mcache);
205 » » runtime·unlock(&runtime·mheap); 204 » » runtime·unlock(runtime·mheap);
206 » } 205 » }
207 206
208 » s = runtime·MHeap_LookupMaybe(&runtime·mheap, v); 207 » s = runtime·MHeap_LookupMaybe(runtime·mheap, v);
209 if(sp) 208 if(sp)
210 *sp = s; 209 *sp = s;
211 if(s == nil) { 210 if(s == nil) {
212 runtime·checkfreed(v, 1); 211 runtime·checkfreed(v, 1);
213 if(base) 212 if(base)
214 *base = nil; 213 *base = nil;
215 if(size) 214 if(size)
216 *size = 0; 215 *size = 0;
217 return 0; 216 return 0;
218 } 217 }
(...skipping 23 matching lines...) Expand all
242 241
243 return 1; 242 return 1;
244 } 243 }
245 244
246 MCache* 245 MCache*
247 runtime·allocmcache(void) 246 runtime·allocmcache(void)
248 { 247 {
249 intgo rate; 248 intgo rate;
250 MCache *c; 249 MCache *c;
251 250
252 » runtime·lock(&runtime·mheap); 251 » runtime·lock(runtime·mheap);
253 » c = runtime·FixAlloc_Alloc(&runtime·mheap.cachealloc); 252 » c = runtime·FixAlloc_Alloc(&runtime·mheap->cachealloc);
254 » mstats.mcache_inuse = runtime·mheap.cachealloc.inuse; 253 » mstats.mcache_inuse = runtime·mheap->cachealloc.inuse;
255 » mstats.mcache_sys = runtime·mheap.cachealloc.sys; 254 » mstats.mcache_sys = runtime·mheap->cachealloc.sys;
256 » runtime·unlock(&runtime·mheap); 255 » runtime·unlock(runtime·mheap);
257 runtime·memclr((byte*)c, sizeof(*c)); 256 runtime·memclr((byte*)c, sizeof(*c));
258 257
259 // Set first allocation sample size. 258 // Set first allocation sample size.
260 rate = runtime·MemProfileRate; 259 rate = runtime·MemProfileRate;
261 if(rate > 0x3fffffff) // make 2*rate not overflow 260 if(rate > 0x3fffffff) // make 2*rate not overflow
262 rate = 0x3fffffff; 261 rate = 0x3fffffff;
263 if(rate != 0) 262 if(rate != 0)
264 c->next_sample = runtime·fastrand1() % (2*rate); 263 c->next_sample = runtime·fastrand1() % (2*rate);
265 264
266 return c; 265 return c;
267 } 266 }
268 267
269 void 268 void
270 runtime·freemcache(MCache *c) 269 runtime·freemcache(MCache *c)
271 { 270 {
272 runtime·MCache_ReleaseAll(c); 271 runtime·MCache_ReleaseAll(c);
273 » runtime·lock(&runtime·mheap); 272 » runtime·lock(runtime·mheap);
274 runtime·purgecachedstats(c); 273 runtime·purgecachedstats(c);
275 » runtime·FixAlloc_Free(&runtime·mheap.cachealloc, c); 274 » runtime·FixAlloc_Free(&runtime·mheap->cachealloc, c);
276 » runtime·unlock(&runtime·mheap); 275 » runtime·unlock(runtime·mheap);
277 } 276 }
278 277
279 void 278 void
280 runtime·purgecachedstats(MCache *c) 279 runtime·purgecachedstats(MCache *c)
281 { 280 {
281 int32 i;
282
282 // Protected by either heap or GC lock. 283 // Protected by either heap or GC lock.
283 mstats.heap_alloc += c->local_cachealloc; 284 mstats.heap_alloc += c->local_cachealloc;
284 c->local_cachealloc = 0; 285 c->local_cachealloc = 0;
285 mstats.heap_objects += c->local_objects; 286 mstats.heap_objects += c->local_objects;
286 c->local_objects = 0; 287 c->local_objects = 0;
287 mstats.nmalloc += c->local_nmalloc; 288 mstats.nmalloc += c->local_nmalloc;
288 c->local_nmalloc = 0; 289 c->local_nmalloc = 0;
289 mstats.nfree += c->local_nfree; 290 mstats.nfree += c->local_nfree;
290 c->local_nfree = 0; 291 c->local_nfree = 0;
291 mstats.nlookup += c->local_nlookup; 292 mstats.nlookup += c->local_nlookup;
292 c->local_nlookup = 0; 293 c->local_nlookup = 0;
293 mstats.alloc += c->local_alloc; 294 mstats.alloc += c->local_alloc;
294 c->local_alloc= 0; 295 c->local_alloc= 0;
295 mstats.total_alloc += c->local_total_alloc; 296 mstats.total_alloc += c->local_total_alloc;
296 c->local_total_alloc= 0; 297 c->local_total_alloc= 0;
298 for(i=0; i<nelem(c->local_by_size); i++) {
299 mstats.by_size[i].nmalloc += c->local_by_size[i].nmalloc;
300 c->local_by_size[i].nmalloc = 0;
301 mstats.by_size[i].nfree += c->local_by_size[i].nfree;
302 c->local_by_size[i].nfree = 0;
303 }
297 } 304 }
298 305
299 uintptr runtime·sizeof_C_MStats = sizeof(MStats); 306 uintptr runtime·sizeof_C_MStats = sizeof(MStats);
300 307
301 #define MaxArena32 (2U<<30) 308 #define MaxArena32 (2U<<30)
302 309
303 void 310 void
304 runtime·mallocinit(void) 311 runtime·mallocinit(void)
305 { 312 {
306 byte *p; 313 byte *p;
307 uintptr arena_size, bitmap_size; 314 uintptr arena_size, bitmap_size;
308 extern byte end[]; 315 extern byte end[];
309 byte *want; 316 byte *want;
310 uintptr limit; 317 uintptr limit;
311 318
312 p = nil; 319 p = nil;
313 arena_size = 0; 320 arena_size = 0;
314 bitmap_size = 0; 321 bitmap_size = 0;
315 ········ 322 ········
316 // for 64-bit build 323 // for 64-bit build
317 USED(p); 324 USED(p);
318 USED(arena_size); 325 USED(arena_size);
319 USED(bitmap_size); 326 USED(bitmap_size);
320 327
328 if((runtime·mheap = runtime·SysAlloc(sizeof(*runtime·mheap))) == nil)
329 runtime·throw("runtime: cannot allocate heap metadata");
330
321 runtime·InitSizes(); 331 runtime·InitSizes();
322 332
323 » limit = runtime·memlimit(); 333 » // limit = runtime·memlimit();
334 » // See https://code.google.com/p/go/issues/detail?id=5049
335 » // TODO(rsc): Fix after 1.1.
336 » limit = 0;
324 337
325 // Set up the allocation arena, a contiguous area of memory where 338 // Set up the allocation arena, a contiguous area of memory where
326 // allocated data will be found. The arena begins with a bitmap large 339 // allocated data will be found. The arena begins with a bitmap large
327 // enough to hold 4 bits per allocated word. 340 // enough to hold 4 bits per allocated word.
328 if(sizeof(void*) == 8 && (limit == 0 || limit > (1<<30))) { 341 if(sizeof(void*) == 8 && (limit == 0 || limit > (1<<30))) {
329 // On a 64-bit machine, allocate from a single contiguous reserv ation. 342 // On a 64-bit machine, allocate from a single contiguous reserv ation.
330 // 128 GB (MaxMem) should be big enough for now. 343 // 128 GB (MaxMem) should be big enough for now.
331 // 344 //
332 // The code will work with the reservation at any address, but a sk 345 // The code will work with the reservation at any address, but a sk
333 // SysReserve to use 0x000000c000000000 if possible. 346 // SysReserve to use 0x000000c000000000 if possible.
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after
389 want = (byte*)(((uintptr)end + (1<<18) + (1<<20) - 1)&~((1<<20)- 1)); 402 want = (byte*)(((uintptr)end + (1<<18) + (1<<20) - 1)&~((1<<20)- 1));
390 p = runtime·SysReserve(want, bitmap_size + arena_size); 403 p = runtime·SysReserve(want, bitmap_size + arena_size);
391 if(p == nil) 404 if(p == nil)
392 runtime·throw("runtime: cannot reserve arena virtual add ress space"); 405 runtime·throw("runtime: cannot reserve arena virtual add ress space");
393 if((uintptr)p & (((uintptr)1<<PageShift)-1)) 406 if((uintptr)p & (((uintptr)1<<PageShift)-1))
394 runtime·printf("runtime: SysReserve returned unaligned a ddress %p; asked for %p", p, bitmap_size+arena_size); 407 runtime·printf("runtime: SysReserve returned unaligned a ddress %p; asked for %p", p, bitmap_size+arena_size);
395 } 408 }
396 if((uintptr)p & (((uintptr)1<<PageShift)-1)) 409 if((uintptr)p & (((uintptr)1<<PageShift)-1))
397 runtime·throw("runtime: SysReserve returned unaligned address"); 410 runtime·throw("runtime: SysReserve returned unaligned address");
398 411
399 » runtime·mheap.bitmap = p; 412 » runtime·mheap->bitmap = p;
400 » runtime·mheap.arena_start = p + bitmap_size; 413 » runtime·mheap->arena_start = p + bitmap_size;
401 » runtime·mheap.arena_used = runtime·mheap.arena_start; 414 » runtime·mheap->arena_used = runtime·mheap->arena_start;
402 » runtime·mheap.arena_end = runtime·mheap.arena_start + arena_size; 415 » runtime·mheap->arena_end = runtime·mheap->arena_start + arena_size;
403 416
404 // Initialize the rest of the allocator.········ 417 // Initialize the rest of the allocator.········
405 » runtime·MHeap_Init(&runtime·mheap, runtime·SysAlloc); 418 » runtime·MHeap_Init(runtime·mheap, runtime·SysAlloc);
406 m->mcache = runtime·allocmcache(); 419 m->mcache = runtime·allocmcache();
407 420
408 // See if it works. 421 // See if it works.
409 runtime·free(runtime·malloc(1)); 422 runtime·free(runtime·malloc(1));
410 } 423 }
411 424
412 void* 425 void*
413 runtime·MHeap_SysAlloc(MHeap *h, uintptr n) 426 runtime·MHeap_SysAlloc(MHeap *h, uintptr n)
414 { 427 {
415 byte *p; 428 byte *p;
(...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after
493 while(buf < endbuf) { 506 while(buf < endbuf) {
494 v = (void*)*buf; 507 v = (void*)*buf;
495 if(false) *buf = 0; 508 if(false) *buf = 0;
496 buf++; 509 buf++;
497 typ = *buf; 510 typ = *buf;
498 buf++; 511 buf++;
499 512
500 // (Manually inlined copy of runtime·MHeap_Lookup) 513 // (Manually inlined copy of runtime·MHeap_Lookup)
501 p = (uintptr)v>>PageShift; 514 p = (uintptr)v>>PageShift;
502 if(sizeof(void*) == 8) 515 if(sizeof(void*) == 8)
503 » » » p -= (uintptr)runtime·mheap.arena_start >> PageShift; 516 » » » p -= (uintptr)runtime·mheap->arena_start >> PageShift;
504 » » s = runtime·mheap.map[p]; 517 » » s = runtime·mheap->map[p];
505 518
506 if(s->sizeclass == 0) { 519 if(s->sizeclass == 0) {
507 s->types.compression = MTypes_Single; 520 s->types.compression = MTypes_Single;
508 s->types.data = typ; 521 s->types.data = typ;
509 continue; 522 continue;
510 } 523 }
511 524
512 size = s->elemsize; 525 size = s->elemsize;
513 ofs = ((uintptr)v - (s->start<<PageShift)) / size; 526 ofs = ((uintptr)v - (s->start<<PageShift)) / size;
514 527
515 switch(s->types.compression) { 528 switch(s->types.compression) {
516 case MTypes_Empty: 529 case MTypes_Empty:
517 ntypes = (s->npages << PageShift) / size; 530 ntypes = (s->npages << PageShift) / size;
518 nbytes3 = 8*sizeof(uintptr) + 1*ntypes; 531 nbytes3 = 8*sizeof(uintptr) + 1*ntypes;
519 532
520 if(!sysalloc) { 533 if(!sysalloc) {
521 » » » » data3 = runtime·mallocgc(nbytes3, FlagNoPointers , 0, 1); 534 » » » » data3 = runtime·mallocgc(nbytes3, FlagNoProfilin g|FlagNoPointers, 0, 1);
522 } else { 535 } else {
523 data3 = runtime·SysAlloc(nbytes3); 536 data3 = runtime·SysAlloc(nbytes3);
537 if(data3 == nil)
538 runtime·throw("runtime: cannot allocate memory");
524 if(0) runtime·printf("settype(0->3): SysAlloc(%x ) --> %p\n", (uint32)nbytes3, data3); 539 if(0) runtime·printf("settype(0->3): SysAlloc(%x ) --> %p\n", (uint32)nbytes3, data3);
525 } 540 }
526 541
527 s->types.compression = MTypes_Bytes; 542 s->types.compression = MTypes_Bytes;
528 s->types.sysalloc = sysalloc; 543 s->types.sysalloc = sysalloc;
529 s->types.data = (uintptr)data3; 544 s->types.data = (uintptr)data3;
530 545
531 ((uintptr*)data3)[1] = typ; 546 ((uintptr*)data3)[1] = typ;
532 data3[8*sizeof(uintptr) + ofs] = 1; 547 data3[8*sizeof(uintptr) + ofs] = 1;
533 break; 548 break;
(...skipping 13 matching lines...) Expand all
547 break; 562 break;
548 } 563 }
549 } 564 }
550 if(j < 8) { 565 if(j < 8) {
551 data3[8*sizeof(uintptr) + ofs] = j; 566 data3[8*sizeof(uintptr) + ofs] = j;
552 } else { 567 } else {
553 ntypes = (s->npages << PageShift) / size; 568 ntypes = (s->npages << PageShift) / size;
554 nbytes2 = ntypes * sizeof(uintptr); 569 nbytes2 = ntypes * sizeof(uintptr);
555 570
556 if(!sysalloc) { 571 if(!sysalloc) {
557 » » » » » data2 = runtime·mallocgc(nbytes2, FlagNo Pointers, 0, 1); 572 » » » » » data2 = runtime·mallocgc(nbytes2, FlagNo Profiling|FlagNoPointers, 0, 1);
558 } else { 573 } else {
559 data2 = runtime·SysAlloc(nbytes2); 574 data2 = runtime·SysAlloc(nbytes2);
575 if(data2 == nil)
576 runtime·throw("runtime: cannot a llocate memory");
560 if(0) runtime·printf("settype.(3->2): Sy sAlloc(%x) --> %p\n", (uint32)nbytes2, data2); 577 if(0) runtime·printf("settype.(3->2): Sy sAlloc(%x) --> %p\n", (uint32)nbytes2, data2);
561 } 578 }
562 579
563 sysalloc3 = s->types.sysalloc; 580 sysalloc3 = s->types.sysalloc;
564 581
565 s->types.compression = MTypes_Words; 582 s->types.compression = MTypes_Words;
566 s->types.sysalloc = sysalloc; 583 s->types.sysalloc = sysalloc;
567 s->types.data = (uintptr)data2; 584 s->types.data = (uintptr)data2;
568 585
569 // Move the contents of data3 to data2. Then dea llocate data3. 586 // Move the contents of data3 to data2. Then dea llocate data3.
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
607 buf[i+0] = (uintptr)v; 624 buf[i+0] = (uintptr)v;
608 buf[i+1] = t; 625 buf[i+1] = t;
609 i += 2; 626 i += 2;
610 mp->settype_bufsize = i; 627 mp->settype_bufsize = i;
611 628
612 if(i == nelem(mp->settype_buf)) { 629 if(i == nelem(mp->settype_buf)) {
613 runtime·settype_flush(mp, false); 630 runtime·settype_flush(mp, false);
614 } 631 }
615 632
616 if(DebugTypeAtBlockEnd) { 633 if(DebugTypeAtBlockEnd) {
617 » » s = runtime·MHeap_Lookup(&runtime·mheap, v); 634 » » s = runtime·MHeap_Lookup(runtime·mheap, v);
618 *(uintptr*)((uintptr)v+s->elemsize-sizeof(uintptr)) = t; 635 *(uintptr*)((uintptr)v+s->elemsize-sizeof(uintptr)) = t;
619 } 636 }
620 } 637 }
621 638
622 void 639 void
623 runtime·settype_sysfree(MSpan *s) 640 runtime·settype_sysfree(MSpan *s)
624 { 641 {
625 uintptr ntypes, nbytes; 642 uintptr ntypes, nbytes;
626 643
627 if(!s->types.sysalloc) 644 if(!s->types.sysalloc)
(...skipping 18 matching lines...) Expand all
646 } 663 }
647 } 664 }
648 665
649 uintptr 666 uintptr
650 runtime·gettype(void *v) 667 runtime·gettype(void *v)
651 { 668 {
652 MSpan *s; 669 MSpan *s;
653 uintptr t, ofs; 670 uintptr t, ofs;
654 byte *data; 671 byte *data;
655 672
656 » s = runtime·MHeap_LookupMaybe(&runtime·mheap, v); 673 » s = runtime·MHeap_LookupMaybe(runtime·mheap, v);
657 if(s != nil) { 674 if(s != nil) {
658 t = 0; 675 t = 0;
659 switch(s->types.compression) { 676 switch(s->types.compression) {
660 case MTypes_Empty: 677 case MTypes_Empty:
661 break; 678 break;
662 case MTypes_Single: 679 case MTypes_Single:
663 t = s->types.data; 680 t = s->types.data;
664 break; 681 break;
665 case MTypes_Words: 682 case MTypes_Words:
666 ofs = (uintptr)v - (s->start<<PageShift); 683 ofs = (uintptr)v - (s->start<<PageShift);
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after
745 if(false) { 762 if(false) {
746 runtime·printf("new %S: %p\n", *typ->string, ret ); 763 runtime·printf("new %S: %p\n", *typ->string, ret );
747 } 764 }
748 runtime·settype(ret, (uintptr)typ | TypeInfo_SingleObjec t); 765 runtime·settype(ret, (uintptr)typ | TypeInfo_SingleObjec t);
749 } 766 }
750 } 767 }
751 768
752 return ret; 769 return ret;
753 } 770 }
754 771
755 typedef struct StackCacheNode StackCacheNode;
756 struct StackCacheNode
757 {
758 StackCacheNode *next;
759 void* batch[StackCacheBatch-1];
760 };
761
762 static StackCacheNode *stackcache;
763 static Lock stackcachemu;
764
765 // stackcacherefill/stackcacherelease implement global cache of stack segments.
766 // The cache is required to prevent unlimited growth of per-thread caches.
767 static void
768 stackcacherefill(void)
769 {
770 StackCacheNode *n;
771 int32 i, pos;
772
773 runtime·lock(&stackcachemu);
774 n = stackcache;
775 if(n)
776 stackcache = n->next;
777 runtime·unlock(&stackcachemu);
778 if(n == nil) {
779 n = (StackCacheNode*)runtime·SysAlloc(FixedStack*StackCacheBatch );
780 if(n == nil)
781 runtime·throw("out of memory (staccachekrefill)");
782 runtime·xadd64(&mstats.stacks_sys, FixedStack*StackCacheBatch);
783 for(i = 0; i < StackCacheBatch-1; i++)
784 n->batch[i] = (byte*)n + (i+1)*FixedStack;
785 }
786 pos = m->stackcachepos;
787 for(i = 0; i < StackCacheBatch-1; i++) {
788 m->stackcache[pos] = n->batch[i];
789 pos = (pos + 1) % StackCacheSize;
790 }
791 m->stackcache[pos] = n;
792 pos = (pos + 1) % StackCacheSize;
793 m->stackcachepos = pos;
794 m->stackcachecnt += StackCacheBatch;
795 }
796
797 static void
798 stackcacherelease(void)
799 {
800 StackCacheNode *n;
801 uint32 i, pos;
802
803 pos = (m->stackcachepos - m->stackcachecnt) % StackCacheSize;
804 n = (StackCacheNode*)m->stackcache[pos];
805 pos = (pos + 1) % StackCacheSize;
806 for(i = 0; i < StackCacheBatch-1; i++) {
807 n->batch[i] = m->stackcache[pos];
808 pos = (pos + 1) % StackCacheSize;
809 }
810 m->stackcachecnt -= StackCacheBatch;
811 runtime·lock(&stackcachemu);
812 n->next = stackcache;
813 stackcache = n;
814 runtime·unlock(&stackcachemu);
815 }
816
817 void*
818 runtime·stackalloc(uint32 n)
819 {
820 uint32 pos;
821 void *v;
822
823 // Stackalloc must be called on scheduler stack, so that we
824 // never try to grow the stack during the code that stackalloc runs.
825 // Doing so would cause a deadlock (issue 1547).
826 if(g != m->g0)
827 runtime·throw("stackalloc not on scheduler stack");
828
829 // Stack allocator uses malloc/free most of the time,
830 // but if we're in the middle of malloc and need stack,
831 // we have to do something else to avoid deadlock.
832 // In that case, we fall back on a fixed-size free-list
833 // allocator, assuming that inside malloc all the stack
834 // frames are small, so that all the stack allocations
835 // will be a single size, the minimum (right now, 5k).
836 if(n == FixedStack || m->mallocing || m->gcing) {
837 if(n != FixedStack) {
838 runtime·printf("stackalloc: in malloc, size=%d want %d", FixedStack, n);
839 runtime·throw("stackalloc");
840 }
841 if(m->stackcachecnt == 0)
842 stackcacherefill();
843 pos = m->stackcachepos;
844 pos = (pos - 1) % StackCacheSize;
845 v = m->stackcache[pos];
846 m->stackcachepos = pos;
847 m->stackcachecnt--;
848 m->stackinuse++;
849 return v;
850 }
851 return runtime·mallocgc(n, FlagNoProfiling|FlagNoGC, 0, 0);
852 }
853
854 void
855 runtime·stackfree(void *v, uintptr n)
856 {
857 uint32 pos;
858
859 if(n == FixedStack || m->mallocing || m->gcing) {
860 if(m->stackcachecnt == StackCacheSize)
861 stackcacherelease();
862 pos = m->stackcachepos;
863 m->stackcache[pos] = v;
864 m->stackcachepos = (pos + 1) % StackCacheSize;
865 m->stackcachecnt++;
866 m->stackinuse--;
867 return;
868 }
869 runtime·free(v);
870 }
871
872 func GC() { 772 func GC() {
873 runtime·gc(1); 773 runtime·gc(1);
874 } 774 }
875 775
876 func SetFinalizer(obj Eface, finalizer Eface) { 776 func SetFinalizer(obj Eface, finalizer Eface) {
877 byte *base; 777 byte *base;
878 uintptr size; 778 uintptr size;
879 FuncType *ft; 779 FuncType *ft;
880 int32 i; 780 int32 i;
881 uintptr nret; 781 uintptr nret;
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
914 runtime·printf("runtime.SetFinalizer: finalizer already set\n"); 814 runtime·printf("runtime.SetFinalizer: finalizer already set\n");
915 goto throw; 815 goto throw;
916 } 816 }
917 return; 817 return;
918 818
919 badfunc: 819 badfunc:
920 runtime·printf("runtime.SetFinalizer: second argument is %S, not func(%S )\n", *finalizer.type->string, *obj.type->string); 820 runtime·printf("runtime.SetFinalizer: second argument is %S, not func(%S )\n", *finalizer.type->string, *obj.type->string);
921 throw: 821 throw:
922 runtime·throw("runtime.SetFinalizer"); 822 runtime·throw("runtime.SetFinalizer");
923 } 823 }
LEFTRIGHT

Powered by Google App Engine
RSS Feeds Recent Issues | This issue
This is Rietveld f62528b