Rietveld Code Review Tool
Help | Bug tracker | Discussion group | Source code | Sign in
(745)

Delta Between Two Patch Sets: src/pkg/runtime/malloc.goc

Issue 6441097: runtime: improved scheduler (Closed)
Left Patch Set: diff -r 2518eee18c4f https://go.googlecode.com/hg/ Created 11 years, 7 months ago
Right Patch Set: diff -r 107e46216b58 https://dvyukov%40google.com@code.google.com/p/go/ Created 11 years, 4 months ago
Left:
Right:
Use n/p to move between diff chunks; N/P to move between comments. Please Sign in to add in-line comments.
Jump to:
Left: Side by side diff | Download
Right: Side by side diff | Download
« no previous file with change/comment | « src/pkg/runtime/cpuprof.c ('k') | src/pkg/runtime/mgc0.c » ('j') | no next file with change/comment »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
LEFTRIGHT
1 // Copyright 2009 The Go Authors. All rights reserved. 1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style 2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file. 3 // license that can be found in the LICENSE file.
4 4
5 // See malloc.h for overview. 5 // See malloc.h for overview.
6 // 6 //
7 // TODO(rsc): double-check stats. 7 // TODO(rsc): double-check stats.
8 8
9 package runtime 9 package runtime
10 #include "runtime.h" 10 #include "runtime.h"
11 #include "arch_GOARCH.h" 11 #include "arch_GOARCH.h"
12 #include "stack.h" 12 #include "stack.h"
13 #include "malloc.h" 13 #include "malloc.h"
14 #include "defs_GOOS_GOARCH.h" 14 #include "defs_GOOS_GOARCH.h"
15 #include "type.h" 15 #include "type.h"
16 #include "typekind.h" 16 #include "typekind.h"
17 #include "race.h"
17 18
18 #pragma dataflag 16 /* mark mheap as 'no pointers', hiding from garbage collecto r */ 19 #pragma dataflag 16 /* mark mheap as 'no pointers', hiding from garbage collecto r */
19 MHeap runtime·mheap; 20 MHeap runtime·mheap;
20 21
21 extern MStats mstats;» // defined in extern.go 22 extern MStats mstats;» // defined in zruntime_def_$GOOS_$GOARCH.go
22 23
23 extern volatile int32 runtime·MemProfileRate; 24 extern volatile intgo runtime·MemProfileRate;
24 25
25 // Allocate an object of at least size bytes. 26 // Allocate an object of at least size bytes.
26 // Small objects are allocated from the per-thread cache's free lists. 27 // Small objects are allocated from the per-thread cache's free lists.
27 // Large objects (> 32 kB) are allocated straight from the heap. 28 // Large objects (> 32 kB) are allocated straight from the heap.
28 void* 29 void*
29 runtime·mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed) 30 runtime·mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed)
30 { 31 {
31 » int32 sizeclass, rate; 32 » int32 sizeclass;
33 » intgo rate;
32 MCache *c; 34 MCache *c;
33 uintptr npages; 35 uintptr npages;
34 MSpan *s; 36 MSpan *s;
35 void *v; 37 void *v;
36 38
37 » if(m->mcache == nil) 39 » if(m->mcache == nil) {
40 » » runtime·printf("NOMCACHE\n");
41 » » runtime·usleep(1000000000);
42 » » *(int32*)111 = 111;
38 runtime·throw("mallocgc: no mcache"); 43 runtime·throw("mallocgc: no mcache");
44 }
39 if(runtime·gcwaiting && g != m->g0 && m->locks == 0) 45 if(runtime·gcwaiting && g != m->g0 && m->locks == 0)
40 runtime·gosched(); 46 runtime·gosched();
41 if(m->mallocing) 47 if(m->mallocing)
42 runtime·throw(m->mallocing==1 ? "malloc/malloc - deadlock" : "fr ee/malloc - deadlock"); 48 runtime·throw(m->mallocing==1 ? "malloc/malloc - deadlock" : "fr ee/malloc - deadlock");
43 m->mallocing = 1; 49 m->mallocing = 1;
44 if(size == 0) 50 if(size == 0)
45 size = 1; 51 size = 1;
52
53 if(DebugTypeAtBlockEnd)
54 size += sizeof(uintptr);
46 55
47 c = m->mcache; 56 c = m->mcache;
48 c->local_nmalloc++; 57 c->local_nmalloc++;
49 if(size <= MaxSmallSize) { 58 if(size <= MaxSmallSize) {
50 // Allocate from mcache free lists. 59 // Allocate from mcache free lists.
51 sizeclass = runtime·SizeToClass(size); 60 sizeclass = runtime·SizeToClass(size);
52 size = runtime·class_to_size[sizeclass]; 61 size = runtime·class_to_size[sizeclass];
53 v = runtime·MCache_Alloc(c, sizeclass, size, zeroed); 62 v = runtime·MCache_Alloc(c, sizeclass, size, zeroed);
54 if(v == nil) 63 if(v == nil)
55 runtime·throw("out of memory"); 64 runtime·throw("out of memory");
(...skipping 22 matching lines...) Expand all
78 if (sizeof(void*) == 4 && c->local_total_alloc >= (1<<30)) { 87 if (sizeof(void*) == 4 && c->local_total_alloc >= (1<<30)) {
79 // purge cache stats to prevent overflow 88 // purge cache stats to prevent overflow
80 runtime·lock(&runtime·mheap); 89 runtime·lock(&runtime·mheap);
81 runtime·purgecachedstats(c); 90 runtime·purgecachedstats(c);
82 runtime·unlock(&runtime·mheap); 91 runtime·unlock(&runtime·mheap);
83 } 92 }
84 93
85 if(!(flag & FlagNoGC)) 94 if(!(flag & FlagNoGC))
86 runtime·markallocated(v, size, (flag&FlagNoPointers) != 0); 95 runtime·markallocated(v, size, (flag&FlagNoPointers) != 0);
87 96
97 if(DebugTypeAtBlockEnd)
98 *(uintptr*)((uintptr)v+size-sizeof(uintptr)) = 0;
99
88 m->mallocing = 0; 100 m->mallocing = 0;
89 101
90 if(!(flag & FlagNoProfiling) && (rate = runtime·MemProfileRate) > 0) { 102 if(!(flag & FlagNoProfiling) && (rate = runtime·MemProfileRate) > 0) {
91 if(size >= rate) 103 if(size >= rate)
92 goto profile; 104 goto profile;
93 if(m->mcache->next_sample > size) 105 if(m->mcache->next_sample > size)
94 m->mcache->next_sample -= size; 106 m->mcache->next_sample -= size;
95 else { 107 else {
96 // pick next profile time 108 // pick next profile time
97 // If you change this, also change allocmcache. 109 // If you change this, also change allocmcache.
98 if(rate > 0x3fffffff) // make 2*rate not overflow 110 if(rate > 0x3fffffff) // make 2*rate not overflow
99 rate = 0x3fffffff; 111 rate = 0x3fffffff;
100 m->mcache->next_sample = runtime·fastrand1() % (2*rate); 112 m->mcache->next_sample = runtime·fastrand1() % (2*rate);
101 profile: 113 profile:
102 runtime·setblockspecial(v, true); 114 runtime·setblockspecial(v, true);
103 runtime·MProf_Malloc(v, size); 115 runtime·MProf_Malloc(v, size);
104 } 116 }
105 } 117 }
106 118
107 if(dogc && mstats.heap_alloc >= mstats.next_gc) 119 if(dogc && mstats.heap_alloc >= mstats.next_gc)
108 runtime·gc(0); 120 runtime·gc(0);
121
122 if(raceenabled) {
123 runtime·racemalloc(v, size, m->racepc);
124 m->racepc = nil;
125 }
109 return v; 126 return v;
110 } 127 }
111 128
112 void* 129 void*
113 runtime·malloc(uintptr size) 130 runtime·malloc(uintptr size)
114 { 131 {
115 return runtime·mallocgc(size, 0, 0, 1); 132 return runtime·mallocgc(size, 0, 0, 1);
116 } 133 }
117 134
118 // Free the object whose base pointer is v. 135 // Free the object whose base pointer is v.
(...skipping 14 matching lines...) Expand all
133 150
134 if(m->mallocing) 151 if(m->mallocing)
135 runtime·throw(m->mallocing==1 ? "malloc/free - deadlock" : "free /free - deadlock"); 152 runtime·throw(m->mallocing==1 ? "malloc/free - deadlock" : "free /free - deadlock");
136 m->mallocing = 2; 153 m->mallocing = 2;
137 154
138 if(!runtime·mlookup(v, nil, nil, &s)) { 155 if(!runtime·mlookup(v, nil, nil, &s)) {
139 runtime·printf("free %p: not an allocated block\n", v); 156 runtime·printf("free %p: not an allocated block\n", v);
140 runtime·throw("free runtime·mlookup"); 157 runtime·throw("free runtime·mlookup");
141 } 158 }
142 prof = runtime·blockspecial(v); 159 prof = runtime·blockspecial(v);
160
161 if(raceenabled)
162 runtime·racefree(v);
143 163
144 // Find size class for v. 164 // Find size class for v.
145 sizeclass = s->sizeclass; 165 sizeclass = s->sizeclass;
146 c = m->mcache; 166 c = m->mcache;
147 if(sizeclass == 0) { 167 if(sizeclass == 0) {
148 // Large object. 168 // Large object.
149 size = s->npages<<PageShift; 169 size = s->npages<<PageShift;
150 *(uintptr*)(s->start<<PageShift) = 1; // mark as "needs to be zeroed" 170 *(uintptr*)(s->start<<PageShift) = 1; // mark as "needs to be zeroed"
151 // Must mark v freed before calling unmarkspan and MHeap_Free: 171 // Must mark v freed before calling unmarkspan and MHeap_Free:
152 // they might coalesce v into other spans and change the bitmap further. 172 // they might coalesce v into other spans and change the bitmap further.
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after
207 if(size) 227 if(size)
208 *size = s->npages<<PageShift; 228 *size = s->npages<<PageShift;
209 return 1; 229 return 1;
210 } 230 }
211 231
212 if((byte*)v >= (byte*)s->limit) { 232 if((byte*)v >= (byte*)s->limit) {
213 // pointers past the last block do not count as pointers. 233 // pointers past the last block do not count as pointers.
214 return 0; 234 return 0;
215 } 235 }
216 236
217 » n = runtime·class_to_size[s->sizeclass]; 237 » n = s->elemsize;
218 if(base) { 238 if(base) {
219 i = ((byte*)v - p)/n; 239 i = ((byte*)v - p)/n;
220 *base = p + i*n; 240 *base = p + i*n;
221 } 241 }
222 if(size) 242 if(size)
223 *size = n; 243 *size = n;
224 244
225 return 1; 245 return 1;
226 } 246 }
227 247
228 MCache* 248 MCache*
229 runtime·allocmcache(void) 249 runtime·allocmcache(void)
230 { 250 {
231 » int32 rate; 251 » intgo rate;
232 MCache *c; 252 MCache *c;
233 253
234 runtime·lock(&runtime·mheap); 254 runtime·lock(&runtime·mheap);
235 c = runtime·FixAlloc_Alloc(&runtime·mheap.cachealloc); 255 c = runtime·FixAlloc_Alloc(&runtime·mheap.cachealloc);
236 mstats.mcache_inuse = runtime·mheap.cachealloc.inuse; 256 mstats.mcache_inuse = runtime·mheap.cachealloc.inuse;
237 mstats.mcache_sys = runtime·mheap.cachealloc.sys; 257 mstats.mcache_sys = runtime·mheap.cachealloc.sys;
238 runtime·unlock(&runtime·mheap); 258 runtime·unlock(&runtime·mheap);
239 runtime·memclr((byte*)c, sizeof(*c)); 259 runtime·memclr((byte*)c, sizeof(*c));
240 260
241 // Set first allocation sample size. 261 // Set first allocation sample size.
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
302 322
303 runtime·InitSizes(); 323 runtime·InitSizes();
304 324
305 limit = runtime·memlimit(); 325 limit = runtime·memlimit();
306 326
307 // Set up the allocation arena, a contiguous area of memory where 327 // Set up the allocation arena, a contiguous area of memory where
308 // allocated data will be found. The arena begins with a bitmap large 328 // allocated data will be found. The arena begins with a bitmap large
309 // enough to hold 4 bits per allocated word. 329 // enough to hold 4 bits per allocated word.
310 if(sizeof(void*) == 8 && (limit == 0 || limit > (1<<30))) { 330 if(sizeof(void*) == 8 && (limit == 0 || limit > (1<<30))) {
311 // On a 64-bit machine, allocate from a single contiguous reserv ation. 331 // On a 64-bit machine, allocate from a single contiguous reserv ation.
312 » » // 16 GB should be big enough for now. 332 » » // 128 GB (MaxMem) should be big enough for now.
313 // 333 //
314 // The code will work with the reservation at any address, but a sk 334 // The code will work with the reservation at any address, but a sk
315 » » // SysReserve to use 0x000000f800000000 if possible. 335 » » // SysReserve to use 0x000000c000000000 if possible.
316 » » // Allocating a 16 GB region takes away 36 bits, and the amd64 336 » » // Allocating a 128 GB region takes away 37 bits, and the amd64
317 // doesn't let us choose the top 17 bits, so that leaves the 11 bits 337 // doesn't let us choose the top 17 bits, so that leaves the 11 bits
318 » » // in the middle of 0x00f8 for us to choose. Choosing 0x00f8 me ans 338 » » // in the middle of 0x00c0 for us to choose. Choosing 0x00c0 me ans
319 » » // that the valid memory addresses will begin 0x00f8, 0x00f9, 0x 00fa, 0x00fb. 339 » » // that the valid memory addresses will begin 0x00c0, 0x00c1, .. ., 0x0x00df.
320 » » // None of the bytes f8 f9 fa fb can appear in valid UTF-8, and 340 » » // In little-endian, that's c0 00, c1 00, ..., df 00. None of th ose are valid
321 » » // they are otherwise as far from ff (likely a common byte) as p ossible. 341 » » // UTF-8 sequences, and they are otherwise as far away from·
322 » » // Choosing 0x00 for the leading 6 bits was more arbitrary, but it 342 » » // ff (likely a common byte) as possible. An earlier attempt to use 0x11f8·
323 » » // is not a common ASCII code point either. Using 0x11f8 instea d
324 // caused out of memory errors on OS X during thread allocations . 343 // caused out of memory errors on OS X during thread allocations .
325 // These choices are both for debuggability and to reduce the 344 // These choices are both for debuggability and to reduce the
326 // odds of the conservative garbage collector not collecting mem ory 345 // odds of the conservative garbage collector not collecting mem ory
327 // because some non-pointer block of memory had a bit pattern 346 // because some non-pointer block of memory had a bit pattern
328 // that matched a memory address. 347 // that matched a memory address.
329 // 348 //
330 » » // Actually we reserve 17 GB (because the bitmap ends up being 1 GB) 349 » » // Actually we reserve 136 GB (because the bitmap ends up being 8 GB)
331 » » // but it hardly matters: fc is not valid UTF-8 either, and we h ave to 350 » » // but it hardly matters: e0 00 is not valid UTF-8 either.
332 » » // allocate 15 GB before we get that far.
333 // 351 //
334 // If this fails we fall back to the 32 bit memory mechanism 352 // If this fails we fall back to the 32 bit memory mechanism
335 » » arena_size = 16LL<<30; 353 » » arena_size = MaxMem;
336 bitmap_size = arena_size / (sizeof(void*)*8/4); 354 bitmap_size = arena_size / (sizeof(void*)*8/4);
337 » » p = runtime·SysReserve((void*)(0x00f8ULL<<32), bitmap_size + are na_size); 355 » » p = runtime·SysReserve((void*)(0x00c0ULL<<32), bitmap_size + are na_size);
338 } 356 }
339 if (p == nil) { 357 if (p == nil) {
340 // On a 32-bit machine, we can't typically get away 358 // On a 32-bit machine, we can't typically get away
341 // with a giant virtual address space reservation. 359 // with a giant virtual address space reservation.
342 // Instead we map the memory information bitmap 360 // Instead we map the memory information bitmap
343 // immediately after the data segment, large enough 361 // immediately after the data segment, large enough
344 // to handle another 2GB of mappings (256 MB), 362 // to handle another 2GB of mappings (256 MB),
345 // along with a reservation for another 512 MB of memory. 363 // along with a reservation for another 512 MB of memory.
346 // When that gets used up, we'll start asking the kernel 364 // When that gets used up, we'll start asking the kernel
347 // for any memory anywhere and hope it's in the 2GB 365 // for any memory anywhere and hope it's in the 2GB
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after
413 if(p == h->arena_end) 431 if(p == h->arena_end)
414 h->arena_end = new_end; 432 h->arena_end = new_end;
415 } 433 }
416 } 434 }
417 if(n <= h->arena_end - h->arena_used) { 435 if(n <= h->arena_end - h->arena_used) {
418 // Keep taking from our reservation. 436 // Keep taking from our reservation.
419 p = h->arena_used; 437 p = h->arena_used;
420 runtime·SysMap(p, n); 438 runtime·SysMap(p, n);
421 h->arena_used += n; 439 h->arena_used += n;
422 runtime·MHeap_MapBits(h); 440 runtime·MHeap_MapBits(h);
441 if(raceenabled)
442 runtime·racemapshadow(p, n);
423 return p; 443 return p;
424 } 444 }
425 ········ 445 ········
426 // If using 64-bit, our reservation is all we have. 446 // If using 64-bit, our reservation is all we have.
427 if(sizeof(void*) == 8 && (uintptr)h->bitmap >= 0xffffffffU) 447 if(sizeof(void*) == 8 && (uintptr)h->bitmap >= 0xffffffffU)
428 return nil; 448 return nil;
429 449
430 // On 32-bit, once the reservation is gone we can 450 // On 32-bit, once the reservation is gone we can
431 // try to get memory at a location chosen by the OS 451 // try to get memory at a location chosen by the OS
432 // and hope that it is in the range we allocated bitmap for. 452 // and hope that it is in the range we allocated bitmap for.
433 p = runtime·SysAlloc(n); 453 p = runtime·SysAlloc(n);
434 if(p == nil) 454 if(p == nil)
435 return nil; 455 return nil;
436 456
437 if(p < h->arena_start || p+n - h->arena_start >= MaxArena32) { 457 if(p < h->arena_start || p+n - h->arena_start >= MaxArena32) {
438 runtime·printf("runtime: memory allocated by OS (%p) not in usab le range [%p,%p)\n", 458 runtime·printf("runtime: memory allocated by OS (%p) not in usab le range [%p,%p)\n",
439 p, h->arena_start, h->arena_start+MaxArena32); 459 p, h->arena_start, h->arena_start+MaxArena32);
440 runtime·SysFree(p, n); 460 runtime·SysFree(p, n);
441 return nil; 461 return nil;
442 } 462 }
443 463
444 if(p+n > h->arena_used) { 464 if(p+n > h->arena_used) {
445 h->arena_used = p+n; 465 h->arena_used = p+n;
446 if(h->arena_used > h->arena_end) 466 if(h->arena_used > h->arena_end)
447 h->arena_end = h->arena_used; 467 h->arena_end = h->arena_used;
448 runtime·MHeap_MapBits(h); 468 runtime·MHeap_MapBits(h);
469 if(raceenabled)
470 runtime·racemapshadow(p, n);
449 } 471 }
450 ········ 472 ········
451 return p; 473 return p;
452 } 474 }
453 475
476 static Lock settype_lock;
477
478 void
479 runtime·settype_flush(M *m, bool sysalloc)
480 {
481 uintptr *buf, *endbuf;
482 uintptr size, ofs, j, t;
483 uintptr ntypes, nbytes2, nbytes3;
484 uintptr *data2;
485 byte *data3;
486 bool sysalloc3;
487 void *v;
488 uintptr typ, p;
489 MSpan *s;
490
491 buf = m->settype_buf;
492 endbuf = buf + m->settype_bufsize;
493
494 runtime·lock(&settype_lock);
495 while(buf < endbuf) {
496 v = (void*)*buf;
497 *buf = 0;
498 buf++;
499 typ = *buf;
500 buf++;
501
502 // (Manually inlined copy of runtime·MHeap_Lookup)
503 p = (uintptr)v>>PageShift;
504 if(sizeof(void*) == 8)
505 p -= (uintptr)runtime·mheap.arena_start >> PageShift;
506 s = runtime·mheap.map[p];
507
508 if(s->sizeclass == 0) {
509 s->types.compression = MTypes_Single;
510 s->types.data = typ;
511 continue;
512 }
513
514 size = s->elemsize;
515 ofs = ((uintptr)v - (s->start<<PageShift)) / size;
516
517 switch(s->types.compression) {
518 case MTypes_Empty:
519 ntypes = (s->npages << PageShift) / size;
520 nbytes3 = 8*sizeof(uintptr) + 1*ntypes;
521
522 if(!sysalloc) {
523 data3 = runtime·mallocgc(nbytes3, FlagNoPointers , 0, 1);
524 } else {
525 data3 = runtime·SysAlloc(nbytes3);
526 if(0) runtime·printf("settype(0->3): SysAlloc(%x ) --> %p\n", (uint32)nbytes3, data3);
527 }
528
529 s->types.compression = MTypes_Bytes;
530 s->types.sysalloc = sysalloc;
531 s->types.data = (uintptr)data3;
532
533 ((uintptr*)data3)[1] = typ;
534 data3[8*sizeof(uintptr) + ofs] = 1;
535 break;
536
537 case MTypes_Words:
538 ((uintptr*)s->types.data)[ofs] = typ;
539 break;
540
541 case MTypes_Bytes:
542 data3 = (byte*)s->types.data;
543 for(j=1; j<8; j++) {
544 if(((uintptr*)data3)[j] == typ) {
545 break;
546 }
547 if(((uintptr*)data3)[j] == 0) {
548 ((uintptr*)data3)[j] = typ;
549 break;
550 }
551 }
552 if(j < 8) {
553 data3[8*sizeof(uintptr) + ofs] = j;
554 } else {
555 ntypes = (s->npages << PageShift) / size;
556 nbytes2 = ntypes * sizeof(uintptr);
557
558 if(!sysalloc) {
559 data2 = runtime·mallocgc(nbytes2, FlagNo Pointers, 0, 1);
560 } else {
561 data2 = runtime·SysAlloc(nbytes2);
562 if(0) runtime·printf("settype.(3->2): Sy sAlloc(%x) --> %p\n", (uint32)nbytes2, data2);
563 }
564
565 sysalloc3 = s->types.sysalloc;
566
567 s->types.compression = MTypes_Words;
568 s->types.sysalloc = sysalloc;
569 s->types.data = (uintptr)data2;
570
571 // Move the contents of data3 to data2. Then dea llocate data3.
572 for(j=0; j<ntypes; j++) {
573 t = data3[8*sizeof(uintptr) + j];
574 t = ((uintptr*)data3)[t];
575 data2[j] = t;
576 }
577 if(sysalloc3) {
578 nbytes3 = 8*sizeof(uintptr) + 1*ntypes;
579 if(0) runtime·printf("settype.(3->2): Sy sFree(%p,%x)\n", data3, (uint32)nbytes3);
580 runtime·SysFree(data3, nbytes3);
581 }
582
583 data2[ofs] = typ;
584 }
585 break;
586 }
587 }
588 runtime·unlock(&settype_lock);
589
590 m->settype_bufsize = 0;
591 }
592
593 // It is forbidden to use this function if it is possible that
594 // explicit deallocation via calling runtime·free(v) may happen.
595 void
596 runtime·settype(void *v, uintptr t)
597 {
598 M *m1;
599 uintptr *buf;
600 uintptr i;
601 MSpan *s;
602
603 if(t == 0)
604 runtime·throw("settype: zero type");
605
606 m1 = m;
607 buf = m1->settype_buf;
608 i = m1->settype_bufsize;
609 buf[i+0] = (uintptr)v;
610 buf[i+1] = t;
611 i += 2;
612 m1->settype_bufsize = i;
613
614 if(i == nelem(m1->settype_buf)) {
615 runtime·settype_flush(m1, false);
616 }
617
618 if(DebugTypeAtBlockEnd) {
619 s = runtime·MHeap_Lookup(&runtime·mheap, v);
620 *(uintptr*)((uintptr)v+s->elemsize-sizeof(uintptr)) = t;
621 }
622 }
623
624 void
625 runtime·settype_sysfree(MSpan *s)
626 {
627 uintptr ntypes, nbytes;
628
629 if(!s->types.sysalloc)
630 return;
631
632 nbytes = (uintptr)-1;
633
634 switch (s->types.compression) {
635 case MTypes_Words:
636 ntypes = (s->npages << PageShift) / s->elemsize;
637 nbytes = ntypes * sizeof(uintptr);
638 break;
639 case MTypes_Bytes:
640 ntypes = (s->npages << PageShift) / s->elemsize;
641 nbytes = 8*sizeof(uintptr) + 1*ntypes;
642 break;
643 }
644
645 if(nbytes != (uintptr)-1) {
646 if(0) runtime·printf("settype: SysFree(%p,%x)\n", (void*)s->type s.data, (uint32)nbytes);
647 runtime·SysFree((void*)s->types.data, nbytes);
648 }
649 }
650
651 uintptr
652 runtime·gettype(void *v)
653 {
654 MSpan *s;
655 uintptr t, ofs;
656 byte *data;
657
658 s = runtime·MHeap_LookupMaybe(&runtime·mheap, v);
659 if(s != nil) {
660 t = 0;
661 switch(s->types.compression) {
662 case MTypes_Empty:
663 break;
664 case MTypes_Single:
665 t = s->types.data;
666 break;
667 case MTypes_Words:
668 ofs = (uintptr)v - (s->start<<PageShift);
669 t = ((uintptr*)s->types.data)[ofs/s->elemsize];
670 break;
671 case MTypes_Bytes:
672 ofs = (uintptr)v - (s->start<<PageShift);
673 data = (byte*)s->types.data;
674 t = data[8*sizeof(uintptr) + ofs/s->elemsize];
675 t = ((uintptr*)data)[t];
676 break;
677 default:
678 runtime·throw("runtime·gettype: invalid compression kind ");
679 }
680 if(0) {
681 runtime·lock(&settype_lock);
682 runtime·printf("%p -> %d,%X\n", v, (int32)s->types.compr ession, (int64)t);
683 runtime·unlock(&settype_lock);
684 }
685 return t;
686 }
687 return 0;
688 }
689
454 // Runtime stubs. 690 // Runtime stubs.
455 691
456 void* 692 void*
457 runtime·mal(uintptr n) 693 runtime·mal(uintptr n)
458 { 694 {
459 return runtime·mallocgc(n, 0, 1, 1); 695 return runtime·mallocgc(n, 0, 1, 1);
460 } 696 }
461 697
462 func new(typ *Type) (ret *uint8) { 698 #pragma textflag 7
463 » uint32 flag = typ->kind&KindNoPointers ? FlagNoPointers : 0; 699 void
700 runtime·new(Type *typ, uint8 *ret)
701 {
702 » uint32 flag;
703
704 » if(raceenabled)
705 » » m->racepc = runtime·getcallerpc(&typ);
706 » flag = typ->kind&KindNoPointers ? FlagNoPointers : 0;
464 ret = runtime·mallocgc(typ->size, flag, 1, 1); 707 ret = runtime·mallocgc(typ->size, flag, 1, 1);
708
709 if(UseSpanType && !flag) {
710 if(false) {
711 runtime·printf("new %S: %p\n", *typ->string, ret);
712 }
713 runtime·settype(ret, (uintptr)typ | TypeInfo_SingleObject);
714 }
715
465 FLUSH(&ret); 716 FLUSH(&ret);
717 }
718
719 // same as runtime·new, but callable from C
720 void*
721 runtime·cnew(Type *typ)
722 {
723 uint32 flag;
724 void *ret;
725
726 if(raceenabled)
727 m->racepc = runtime·getcallerpc(&typ);
728 flag = typ->kind&KindNoPointers ? FlagNoPointers : 0;
729 ret = runtime·mallocgc(typ->size, flag, 1, 1);
730
731 if(UseSpanType && !flag) {
732 if(false) {
733 runtime·printf("new %S: %p\n", *typ->string, ret);
734 }
735 runtime·settype(ret, (uintptr)typ | TypeInfo_SingleObject);
736 }
737 return ret;
466 } 738 }
467 739
468 void* 740 void*
469 runtime·stackalloc(uint32 n) 741 runtime·stackalloc(uint32 n)
470 { 742 {
471 // Stackalloc must be called on scheduler stack, so that we 743 // Stackalloc must be called on scheduler stack, so that we
472 // never try to grow the stack during the code that stackalloc runs. 744 // never try to grow the stack during the code that stackalloc runs.
473 // Doing so would cause a deadlock (issue 1547). 745 // Doing so would cause a deadlock (issue 1547).
474 if(g != m->g0) 746 if(g != m->g0)
475 runtime·throw("stackalloc not on scheduler stack"); 747 runtime·throw("stackalloc not on scheduler stack");
(...skipping 26 matching lines...) Expand all
502 } 774 }
503 775
504 func GC() { 776 func GC() {
505 runtime·gc(1); 777 runtime·gc(1);
506 } 778 }
507 779
508 func SetFinalizer(obj Eface, finalizer Eface) { 780 func SetFinalizer(obj Eface, finalizer Eface) {
509 byte *base; 781 byte *base;
510 uintptr size; 782 uintptr size;
511 FuncType *ft; 783 FuncType *ft;
512 » int32 i, nret; 784 » int32 i;
785 » uintptr nret;
513 Type *t; 786 Type *t;
514 787
515 if(obj.type == nil) { 788 if(obj.type == nil) {
516 runtime·printf("runtime.SetFinalizer: first argument is nil inte rface\n"); 789 runtime·printf("runtime.SetFinalizer: first argument is nil inte rface\n");
517 goto throw; 790 goto throw;
518 } 791 }
519 if(obj.type->kind != KindPtr) { 792 if(obj.type->kind != KindPtr) {
520 runtime·printf("runtime.SetFinalizer: first argument is %S, not pointer\n", *obj.type->string); 793 runtime·printf("runtime.SetFinalizer: first argument is %S, not pointer\n", *obj.type->string);
521 goto throw; 794 goto throw;
522 } 795 }
(...skipping 22 matching lines...) Expand all
545 runtime·printf("runtime.SetFinalizer: finalizer already set\n"); 818 runtime·printf("runtime.SetFinalizer: finalizer already set\n");
546 goto throw; 819 goto throw;
547 } 820 }
548 return; 821 return;
549 822
550 badfunc: 823 badfunc:
551 runtime·printf("runtime.SetFinalizer: second argument is %S, not func(%S )\n", *finalizer.type->string, *obj.type->string); 824 runtime·printf("runtime.SetFinalizer: second argument is %S, not func(%S )\n", *finalizer.type->string, *obj.type->string);
552 throw: 825 throw:
553 runtime·throw("runtime.SetFinalizer"); 826 runtime·throw("runtime.SetFinalizer");
554 } 827 }
LEFTRIGHT

Powered by Google App Engine
RSS Feeds Recent Issues | This issue
This is Rietveld f62528b