Rietveld Code Review Tool
Help | Bug tracker | Discussion group | Source code | Sign in
(1183)

Delta Between Two Patch Sets: src/pkg/runtime/malloc.goc

Issue 55100044: code review 55100044: runtime: use custom thunks for race calls instead of cgo (Closed)
Left Patch Set: diff -r 5f58c6d04b6d https://dvyukov%40google.com@code.google.com/p/go/ Created 10 years, 1 month ago
Right Patch Set: diff -r 340da08f5f54 https://dvyukov%40google.com@code.google.com/p/go/ Created 10 years ago
Left:
Right:
Use n/p to move between diff chunks; N/P to move between comments. Please Sign in to add in-line comments.
Jump to:
Left: Side by side diff | Download
Right: Side by side diff | Download
« no previous file with change/comment | « src/pkg/runtime/cgocall.c ('k') | src/pkg/runtime/proc.c » ('j') | no next file with change/comment »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
LEFTRIGHT
1 // Copyright 2009 The Go Authors. All rights reserved. 1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style 2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file. 3 // license that can be found in the LICENSE file.
4 4
5 // See malloc.h for overview. 5 // See malloc.h for overview.
6 // 6 //
7 // TODO(rsc): double-check stats. 7 // TODO(rsc): double-check stats.
8 8
9 package runtime 9 package runtime
10 #include "runtime.h" 10 #include "runtime.h"
11 #include "arch_GOARCH.h" 11 #include "arch_GOARCH.h"
12 #include "malloc.h" 12 #include "malloc.h"
13 #include "type.h" 13 #include "type.h"
14 #include "typekind.h" 14 #include "typekind.h"
15 #include "race.h" 15 #include "race.h"
16 #include "stack.h" 16 #include "stack.h"
17 #include "../../cmd/ld/textflag.h" 17 #include "../../cmd/ld/textflag.h"
18 18
19 // Mark mheap as 'no pointers', it does not contain interesting pointers but occ upies ~45K. 19 // Mark mheap as 'no pointers', it does not contain interesting pointers but occ upies ~45K.
20 #pragma dataflag NOPTR 20 #pragma dataflag NOPTR
21 MHeap runtime·mheap; 21 MHeap runtime·mheap;
22 MStats mstats; 22 MStats mstats;
23 23
24 int32 runtime·checking; 24 int32 runtime·checking;
25 25
26 extern MStats mstats; // defined in zruntime_def_$GOOS_$GOARCH.go 26 extern MStats mstats; // defined in zruntime_def_$GOOS_$GOARCH.go
27 27
28 extern volatile intgo runtime·MemProfileRate; 28 extern volatile intgo runtime·MemProfileRate;
29 29
30 static void* largealloc(uint32, uintptr*); 30 static MSpan* largealloc(uint32, uintptr*);
31 static void profilealloc(void *v, uintptr size, uintptr typ); 31 static void profilealloc(void *v, uintptr size, uintptr typ);
32 static void settype(MSpan *s, void *v, uintptr typ);
32 33
33 // Allocate an object of at least size bytes. 34 // Allocate an object of at least size bytes.
34 // Small objects are allocated from the per-thread cache's free lists. 35 // Small objects are allocated from the per-thread cache's free lists.
35 // Large objects (> 32 kB) are allocated straight from the heap. 36 // Large objects (> 32 kB) are allocated straight from the heap.
36 // If the block will be freed with runtime·free(), typ must be 0. 37 // If the block will be freed with runtime·free(), typ must be 0.
37 void* 38 void*
38 runtime·mallocgc(uintptr size, uintptr typ, uint32 flag) 39 runtime·mallocgc(uintptr size, uintptr typ, uint32 flag)
39 { 40 {
40 int32 sizeclass; 41 int32 sizeclass;
41 uintptr tinysize, size1; 42 uintptr tinysize, size1;
42 intgo rate; 43 intgo rate;
43 MCache *c; 44 MCache *c;
44 » MCacheList *l; 45 » MSpan *s;
45 MLink *v, *next; 46 MLink *v, *next;
46 byte *tiny; 47 byte *tiny;
47 48
48 if(size == 0) { 49 if(size == 0) {
49 // All 0-length allocations use this pointer. 50 // All 0-length allocations use this pointer.
50 // The language does not require the allocations to 51 // The language does not require the allocations to
51 // have distinct values. 52 // have distinct values.
52 return &runtime·zerobase; 53 return &runtime·zerobase;
53 } 54 }
54 if(m->mallocing) 55 if(m->mallocing)
55 runtime·throw("malloc/free - deadlock"); 56 runtime·throw("malloc/free - deadlock");
56 » // Disable preemption during settype_flush. 57 » // Disable preemption during settype.
57 » // We can not use m->mallocing for this, because settype_flush calls mal locgc. 58 » // We can not use m->mallocing for this, because settype calls mallocgc.
58 m->locks++; 59 m->locks++;
59 m->mallocing = 1; 60 m->mallocing = 1;
60 61
61 if(DebugTypeAtBlockEnd) 62 if(DebugTypeAtBlockEnd)
62 size += sizeof(uintptr); 63 size += sizeof(uintptr);
63 64
64 c = m->mcache; 65 c = m->mcache;
65 if(!runtime·debug.efence && size <= MaxSmallSize) { 66 if(!runtime·debug.efence && size <= MaxSmallSize) {
66 if((flag&(FlagNoScan|FlagNoGC)) == FlagNoScan && size < TinySize ) { 67 if((flag&(FlagNoScan|FlagNoGC)) == FlagNoScan && size < TinySize ) {
67 // Tiny allocator. 68 // Tiny allocator.
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
111 c->tiny += size1; 112 c->tiny += size1;
112 c->tinysize -= size1; 113 c->tinysize -= size1;
113 m->mallocing = 0; 114 m->mallocing = 0;
114 m->locks--; 115 m->locks--;
115 if(m->locks == 0 && g->preempt) // rest ore the preemption request in case we've cleared it in newstack 116 if(m->locks == 0 && g->preempt) // rest ore the preemption request in case we've cleared it in newstack
116 g->stackguard0 = StackPreempt; 117 g->stackguard0 = StackPreempt;
117 return v; 118 return v;
118 } 119 }
119 } 120 }
120 // Allocate a new TinySize block. 121 // Allocate a new TinySize block.
121 » » » l = &c->list[TinySizeClass]; 122 » » » s = c->alloc[TinySizeClass];
122 » » » if(l->list == nil) 123 » » » if(s->freelist == nil)
123 » » » » runtime·MCache_Refill(c, TinySizeClass); 124 » » » » s = runtime·MCache_Refill(c, TinySizeClass);
124 » » » v = l->list; 125 » » » v = s->freelist;
125 next = v->next; 126 next = v->next;
127 s->freelist = next;
128 s->ref++;
126 if(next != nil) // prefetching nil leads to a DTLB miss 129 if(next != nil) // prefetching nil leads to a DTLB miss
127 PREFETCH(next); 130 PREFETCH(next);
128 l->list = next;
129 l->nlist--;
130 ((uint64*)v)[0] = 0; 131 ((uint64*)v)[0] = 0;
131 ((uint64*)v)[1] = 0; 132 ((uint64*)v)[1] = 0;
132 // See if we need to replace the existing tiny block wit h the new one 133 // See if we need to replace the existing tiny block wit h the new one
133 // based on amount of remaining free space. 134 // based on amount of remaining free space.
134 if(TinySize-size > tinysize) { 135 if(TinySize-size > tinysize) {
135 c->tiny = (byte*)v + size; 136 c->tiny = (byte*)v + size;
136 c->tinysize = TinySize - size; 137 c->tinysize = TinySize - size;
137 } 138 }
138 size = TinySize; 139 size = TinySize;
139 goto done; 140 goto done;
140 } 141 }
141 // Allocate from mcache free lists. 142 // Allocate from mcache free lists.
142 // Inlined version of SizeToClass(). 143 // Inlined version of SizeToClass().
143 if(size <= 1024-8) 144 if(size <= 1024-8)
144 sizeclass = runtime·size_to_class8[(size+7)>>3]; 145 sizeclass = runtime·size_to_class8[(size+7)>>3];
145 else 146 else
146 sizeclass = runtime·size_to_class128[(size-1024+127) >> 7]; 147 sizeclass = runtime·size_to_class128[(size-1024+127) >> 7];
147 size = runtime·class_to_size[sizeclass]; 148 size = runtime·class_to_size[sizeclass];
148 » » l = &c->list[sizeclass]; 149 » » s = c->alloc[sizeclass];
149 » » if(l->list == nil) 150 » » if(s->freelist == nil)
150 » » » runtime·MCache_Refill(c, sizeclass); 151 » » » s = runtime·MCache_Refill(c, sizeclass);
151 » » v = l->list; 152 » » v = s->freelist;
152 next = v->next; 153 next = v->next;
154 s->freelist = next;
155 s->ref++;
153 if(next != nil) // prefetching nil leads to a DTLB miss 156 if(next != nil) // prefetching nil leads to a DTLB miss
154 PREFETCH(next); 157 PREFETCH(next);
155 l->list = next;
156 l->nlist--;
157 if(!(flag & FlagNoZero)) { 158 if(!(flag & FlagNoZero)) {
158 v->next = nil; 159 v->next = nil;
159 // block is zeroed iff second word is zero ... 160 // block is zeroed iff second word is zero ...
160 if(size > 2*sizeof(uintptr) && ((uintptr*)v)[1] != 0) 161 if(size > 2*sizeof(uintptr) && ((uintptr*)v)[1] != 0)
161 runtime·memclr((byte*)v, size); 162 runtime·memclr((byte*)v, size);
162 } 163 }
163 done: 164 done:
164 c->local_cachealloc += size; 165 c->local_cachealloc += size;
165 } else { 166 } else {
166 // Allocate directly from heap. 167 // Allocate directly from heap.
167 » » v = largealloc(flag, &size); 168 » » s = largealloc(flag, &size);
169 » » v = (void*)(s->start << PageShift);
168 } 170 }
169 171
170 if(flag & FlagNoGC) 172 if(flag & FlagNoGC)
171 runtime·marknogc(v); 173 runtime·marknogc(v);
172 else if(!(flag & FlagNoScan)) 174 else if(!(flag & FlagNoScan))
173 runtime·markscan(v); 175 runtime·markscan(v);
174 176
175 if(DebugTypeAtBlockEnd) 177 if(DebugTypeAtBlockEnd)
176 *(uintptr*)((uintptr)v+size-sizeof(uintptr)) = typ; 178 *(uintptr*)((uintptr)v+size-sizeof(uintptr)) = typ;
177 179
180 m->mallocing = 0;
178 // TODO: save type even if FlagNoScan? Potentially expensive but might help 181 // TODO: save type even if FlagNoScan? Potentially expensive but might help
179 // heap profiling/tracing. 182 // heap profiling/tracing.
180 » if(UseSpanType && !(flag & FlagNoScan) && typ != 0) { 183 » if(UseSpanType && !(flag & FlagNoScan) && typ != 0)
181 » » uintptr *buf, i; 184 » » settype(s, v, typ);
182 185
183 » » buf = m->settype_buf;
184 » » i = m->settype_bufsize;
185 » » buf[i++] = (uintptr)v;
186 » » buf[i++] = typ;
187 » » m->settype_bufsize = i;
188 » }
189
190 » m->mallocing = 0;
191 » if(UseSpanType && !(flag & FlagNoScan) && typ != 0 && m->settype_bufsize == nelem(m->settype_buf))
192 » » runtime·settype_flush(m);
193 if(raceenabled) 186 if(raceenabled)
194 runtime·racemalloc(v, size); 187 runtime·racemalloc(v, size);
195 188
196 if(runtime·debug.allocfreetrace) 189 if(runtime·debug.allocfreetrace)
197 goto profile; 190 goto profile;
198 191
199 if(!(flag & FlagNoProfiling) && (rate = runtime·MemProfileRate) > 0) { 192 if(!(flag & FlagNoProfiling) && (rate = runtime·MemProfileRate) > 0) {
200 if(size < rate && size < c->next_sample) 193 if(size < rate && size < c->next_sample)
201 c->next_sample -= size; 194 c->next_sample -= size;
202 else { 195 else {
203 profile: 196 profile:
204 profilealloc(v, size, typ); 197 profilealloc(v, size, typ);
205 } 198 }
206 } 199 }
207 200
208 m->locks--; 201 m->locks--;
209 if(m->locks == 0 && g->preempt) // restore the preemption request in ca se we've cleared it in newstack 202 if(m->locks == 0 && g->preempt) // restore the preemption request in ca se we've cleared it in newstack
210 g->stackguard0 = StackPreempt; 203 g->stackguard0 = StackPreempt;
211 204
212 if(!(flag & FlagNoInvokeGC) && mstats.heap_alloc >= mstats.next_gc) 205 if(!(flag & FlagNoInvokeGC) && mstats.heap_alloc >= mstats.next_gc)
213 runtime·gc(0); 206 runtime·gc(0);
214 207
215 return v; 208 return v;
216 } 209 }
217 210
218 static void* 211 static MSpan*
219 largealloc(uint32 flag, uintptr *sizep) 212 largealloc(uint32 flag, uintptr *sizep)
220 { 213 {
221 uintptr npages, size; 214 uintptr npages, size;
222 MSpan *s; 215 MSpan *s;
223 void *v; 216 void *v;
224 217
225 // Allocate directly from heap. 218 // Allocate directly from heap.
226 size = *sizep; 219 size = *sizep;
227 if(size + PageSize < size) 220 if(size + PageSize < size)
228 runtime·throw("out of memory"); 221 runtime·throw("out of memory");
229 npages = size >> PageShift; 222 npages = size >> PageShift;
230 if((size & PageMask) != 0) 223 if((size & PageMask) != 0)
231 npages++; 224 npages++;
232 s = runtime·MHeap_Alloc(&runtime·mheap, npages, 0, 1, !(flag & FlagNoZer o)); 225 s = runtime·MHeap_Alloc(&runtime·mheap, npages, 0, 1, !(flag & FlagNoZer o));
233 if(s == nil) 226 if(s == nil)
234 runtime·throw("out of memory"); 227 runtime·throw("out of memory");
235 s->limit = (byte*)(s->start<<PageShift) + size; 228 s->limit = (byte*)(s->start<<PageShift) + size;
236 *sizep = npages<<PageShift; 229 *sizep = npages<<PageShift;
237 v = (void*)(s->start << PageShift); 230 v = (void*)(s->start << PageShift);
238 // setup for mark sweep 231 // setup for mark sweep
239 runtime·markspan(v, 0, 0, true); 232 runtime·markspan(v, 0, 0, true);
240 » return v; 233 » return s;
241 } 234 }
242 235
243 static void 236 static void
244 profilealloc(void *v, uintptr size, uintptr typ) 237 profilealloc(void *v, uintptr size, uintptr typ)
245 { 238 {
246 uintptr rate; 239 uintptr rate;
247 int32 next; 240 int32 next;
248 MCache *c; 241 MCache *c;
249 242
250 c = m->mcache; 243 c = m->mcache;
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
308 301
309 if(s->specials != nil) 302 if(s->specials != nil)
310 runtime·freeallspecials(s, v, size); 303 runtime·freeallspecials(s, v, size);
311 304
312 c = m->mcache; 305 c = m->mcache;
313 if(sizeclass == 0) { 306 if(sizeclass == 0) {
314 // Large object. 307 // Large object.
315 s->needzero = 1; 308 s->needzero = 1;
316 // Must mark v freed before calling unmarkspan and MHeap_Free: 309 // Must mark v freed before calling unmarkspan and MHeap_Free:
317 // they might coalesce v into other spans and change the bitmap further. 310 // they might coalesce v into other spans and change the bitmap further.
318 » » runtime·markfreed(v, size); 311 » » runtime·markfreed(v);
319 runtime·unmarkspan(v, 1<<PageShift); 312 runtime·unmarkspan(v, 1<<PageShift);
320 if(runtime·debug.efence) 313 if(runtime·debug.efence)
321 runtime·SysFree((void*)(s->start<<PageShift), size, &mst ats.heap_sys); 314 runtime·SysFree((void*)(s->start<<PageShift), size, &mst ats.heap_sys);
322 else 315 else
323 runtime·MHeap_Free(&runtime·mheap, s, 1); 316 runtime·MHeap_Free(&runtime·mheap, s, 1);
324 c->local_nlargefree++; 317 c->local_nlargefree++;
325 c->local_largefree += size; 318 c->local_largefree += size;
326 } else { 319 } else {
327 // Small object. 320 // Small object.
328 if(size > 2*sizeof(uintptr)) 321 if(size > 2*sizeof(uintptr))
329 ((uintptr*)v)[1] = (uintptr)0xfeedfeedfeedfeedll; // mark as "needs to be zeroed" 322 ((uintptr*)v)[1] = (uintptr)0xfeedfeedfeedfeedll; // mark as "needs to be zeroed"
330 else if(size > sizeof(uintptr)) 323 else if(size > sizeof(uintptr))
331 ((uintptr*)v)[1] = 0; 324 ((uintptr*)v)[1] = 0;
332 // Must mark v freed before calling MCache_Free: 325 // Must mark v freed before calling MCache_Free:
333 // it might coalesce v and other blocks into a bigger span 326 // it might coalesce v and other blocks into a bigger span
334 // and change the bitmap further. 327 // and change the bitmap further.
335 runtime·markfreed(v, size);
336 c->local_nsmallfree[sizeclass]++; 328 c->local_nsmallfree[sizeclass]++;
337 » » runtime·MCache_Free(c, v, sizeclass, size); 329 » » c->local_cachealloc -= size;
330 » » if(c->alloc[sizeclass] == s) {
331 » » » // We own the span, so we can just add v to the freelist
332 » » » runtime·markfreed(v);
333 » » » ((MLink*)v)->next = s->freelist;
334 » » » s->freelist = v;
335 » » » s->ref--;
336 » » } else {
337 » » » // Someone else owns this span. Add to free queue.
338 » » » runtime·MCache_Free(c, v, sizeclass, size);
339 » » }
338 } 340 }
339 m->mallocing = 0; 341 m->mallocing = 0;
340 } 342 }
341 343
342 int32 344 int32
343 runtime·mlookup(void *v, byte **base, uintptr *size, MSpan **sp) 345 runtime·mlookup(void *v, byte **base, uintptr *size, MSpan **sp)
344 { 346 {
345 uintptr n, i; 347 uintptr n, i;
346 byte *p; 348 byte *p;
347 MSpan *s; 349 MSpan *s;
(...skipping 30 matching lines...) Expand all
378 380
379 n = s->elemsize; 381 n = s->elemsize;
380 if(base) { 382 if(base) {
381 i = ((byte*)v - p)/n; 383 i = ((byte*)v - p)/n;
382 *base = p + i*n; 384 *base = p + i*n;
383 } 385 }
384 if(size) 386 if(size)
385 *size = n; 387 *size = n;
386 388
387 return 1; 389 return 1;
388 }
389
390 MCache*
391 runtime·allocmcache(void)
392 {
393 intgo rate;
394 MCache *c;
395
396 runtime·lock(&runtime·mheap);
397 c = runtime·FixAlloc_Alloc(&runtime·mheap.cachealloc);
398 runtime·unlock(&runtime·mheap);
399 runtime·memclr((byte*)c, sizeof(*c));
400
401 // Set first allocation sample size.
402 rate = runtime·MemProfileRate;
403 if(rate > 0x3fffffff) // make 2*rate not overflow
404 rate = 0x3fffffff;
405 if(rate != 0)
406 c->next_sample = runtime·fastrand1() % (2*rate);
407
408 return c;
409 }
410
411 void
412 runtime·freemcache(MCache *c)
413 {
414 runtime·MCache_ReleaseAll(c);
415 runtime·lock(&runtime·mheap);
416 runtime·purgecachedstats(c);
417 runtime·FixAlloc_Free(&runtime·mheap.cachealloc, c);
418 runtime·unlock(&runtime·mheap);
419 } 390 }
420 391
421 void 392 void
422 runtime·purgecachedstats(MCache *c) 393 runtime·purgecachedstats(MCache *c)
423 { 394 {
424 MHeap *h; 395 MHeap *h;
425 int32 i; 396 int32 i;
426 397
427 // Protected by either heap or GC lock. 398 // Protected by either heap or GC lock.
428 h = &runtime·mheap; 399 h = &runtime·mheap;
(...skipping 257 matching lines...) Expand 10 before | Expand all | Expand 10 after
686 persistent.pos += size; 657 persistent.pos += size;
687 runtime·unlock(&persistent); 658 runtime·unlock(&persistent);
688 if(stat != &mstats.other_sys) { 659 if(stat != &mstats.other_sys) {
689 // reaccount the allocation against provided stat 660 // reaccount the allocation against provided stat
690 runtime·xadd64(stat, size); 661 runtime·xadd64(stat, size);
691 runtime·xadd64(&mstats.other_sys, -(uint64)size); 662 runtime·xadd64(&mstats.other_sys, -(uint64)size);
692 } 663 }
693 return p; 664 return p;
694 } 665 }
695 666
696 static Lock settype_lock; 667 static void
697 668 settype(MSpan *s, void *v, uintptr typ)
698 void 669 {
699 runtime·settype_flush(M *mp)
700 {
701 » uintptr *buf, *endbuf;
702 uintptr size, ofs, j, t; 670 uintptr size, ofs, j, t;
703 uintptr ntypes, nbytes2, nbytes3; 671 uintptr ntypes, nbytes2, nbytes3;
704 uintptr *data2; 672 uintptr *data2;
705 byte *data3; 673 byte *data3;
706 » void *v; 674
707 » uintptr typ, p; 675 » if(s->sizeclass == 0) {
708 » MSpan *s; 676 » » s->types.compression = MTypes_Single;
709 677 » » s->types.data = typ;
710 » buf = mp->settype_buf; 678 » » return;
711 » endbuf = buf + mp->settype_bufsize; 679 » }
712 680 » size = s->elemsize;
713 » runtime·lock(&settype_lock); 681 » ofs = ((uintptr)v - (s->start<<PageShift)) / size;
714 » while(buf < endbuf) { 682
715 » » v = (void*)*buf; 683 » switch(s->types.compression) {
716 » » *buf = 0; 684 » case MTypes_Empty:
717 » » buf++; 685 » » ntypes = (s->npages << PageShift) / size;
718 » » typ = *buf; 686 » » nbytes3 = 8*sizeof(uintptr) + 1*ntypes;
719 » » buf++; 687 » » data3 = runtime·mallocgc(nbytes3, 0, FlagNoProfiling|FlagNoScan| FlagNoInvokeGC);
720 688 » » s->types.compression = MTypes_Bytes;
721 » » // (Manually inlined copy of runtime·MHeap_Lookup) 689 » » s->types.data = (uintptr)data3;
722 » » p = (uintptr)v>>PageShift; 690 » » ((uintptr*)data3)[1] = typ;
723 » » p -= (uintptr)runtime·mheap.arena_start >> PageShift; 691 » » data3[8*sizeof(uintptr) + ofs] = 1;
724 » » s = runtime·mheap.spans[p]; 692 » » break;
725 693 » »·······
726 » » if(s->sizeclass == 0) { 694 » case MTypes_Words:
727 » » » s->types.compression = MTypes_Single; 695 » » ((uintptr*)s->types.data)[ofs] = typ;
728 » » » s->types.data = typ; 696 » » break;
729 » » » continue; 697 » »·······
730 » » } 698 » case MTypes_Bytes:
731 699 » » data3 = (byte*)s->types.data;
732 » » size = s->elemsize; 700 » » for(j=1; j<8; j++) {
733 » » ofs = ((uintptr)v - (s->start<<PageShift)) / size; 701 » » » if(((uintptr*)data3)[j] == typ) {
734 702 » » » » break;
735 » » switch(s->types.compression) { 703 » » » }
736 » » case MTypes_Empty: 704 » » » if(((uintptr*)data3)[j] == 0) {
705 » » » » ((uintptr*)data3)[j] = typ;
706 » » » » break;
707 » » » }
708 » » }
709 » » if(j < 8) {
710 » » » data3[8*sizeof(uintptr) + ofs] = j;
711 » » } else {
737 ntypes = (s->npages << PageShift) / size; 712 ntypes = (s->npages << PageShift) / size;
738 » » » nbytes3 = 8*sizeof(uintptr) + 1*ntypes; 713 » » » nbytes2 = ntypes * sizeof(uintptr);
739 » » » data3 = runtime·mallocgc(nbytes3, 0, FlagNoProfiling|Fla gNoScan|FlagNoInvokeGC); 714 » » » data2 = runtime·mallocgc(nbytes2, 0, FlagNoProfiling|Fla gNoScan|FlagNoInvokeGC);
740 » » » s->types.compression = MTypes_Bytes; 715 » » » s->types.compression = MTypes_Words;
741 » » » s->types.data = (uintptr)data3; 716 » » » s->types.data = (uintptr)data2;
742 » » » ((uintptr*)data3)[1] = typ; 717 » » »·······
743 » » » data3[8*sizeof(uintptr) + ofs] = 1; 718 » » » // Move the contents of data3 to data2. Then deallocate data3.
744 » » » break; 719 » » » for(j=0; j<ntypes; j++) {
745 720 » » » » t = data3[8*sizeof(uintptr) + j];
746 » » case MTypes_Words: 721 » » » » t = ((uintptr*)data3)[t];
747 » » » ((uintptr*)s->types.data)[ofs] = typ; 722 » » » » data2[j] = t;
748 » » » break;
749
750 » » case MTypes_Bytes:
751 » » » data3 = (byte*)s->types.data;
752 » » » for(j=1; j<8; j++) {
753 » » » » if(((uintptr*)data3)[j] == typ) {
754 » » » » » break;
755 » » » » }
756 » » » » if(((uintptr*)data3)[j] == 0) {
757 » » » » » ((uintptr*)data3)[j] = typ;
758 » » » » » break;
759 » » » » }
760 } 723 }
761 » » » if(j < 8) { 724 » » » data2[ofs] = typ;
762 » » » » data3[8*sizeof(uintptr) + ofs] = j; 725 » » }
763 » » » } else { 726 » » break;
764 » » » » ntypes = (s->npages << PageShift) / size; 727 » }
765 » » » » nbytes2 = ntypes * sizeof(uintptr);
766 » » » » data2 = runtime·mallocgc(nbytes2, 0, FlagNoProfi ling|FlagNoScan|FlagNoInvokeGC);
767 » » » » s->types.compression = MTypes_Words;
768 » » » » s->types.data = (uintptr)data2;
769
770 » » » » // Move the contents of data3 to data2. Then dea llocate data3.
771 » » » » for(j=0; j<ntypes; j++) {
772 » » » » » t = data3[8*sizeof(uintptr) + j];
773 » » » » » t = ((uintptr*)data3)[t];
774 » » » » » data2[j] = t;
775 » » » » }
776 » » » » data2[ofs] = typ;
777 » » » }
778 » » » break;
779 » » }
780 » }
781 » runtime·unlock(&settype_lock);
782
783 » mp->settype_bufsize = 0;
784 } 728 }
785 729
786 uintptr 730 uintptr
787 runtime·gettype(void *v) 731 runtime·gettype(void *v)
788 { 732 {
789 MSpan *s; 733 MSpan *s;
790 uintptr t, ofs; 734 uintptr t, ofs;
791 byte *data; 735 byte *data;
792 736
793 s = runtime·MHeap_LookupMaybe(&runtime·mheap, v); 737 s = runtime·MHeap_LookupMaybe(&runtime·mheap, v);
(...skipping 12 matching lines...) Expand all
806 case MTypes_Bytes: 750 case MTypes_Bytes:
807 ofs = (uintptr)v - (s->start<<PageShift); 751 ofs = (uintptr)v - (s->start<<PageShift);
808 data = (byte*)s->types.data; 752 data = (byte*)s->types.data;
809 t = data[8*sizeof(uintptr) + ofs/s->elemsize]; 753 t = data[8*sizeof(uintptr) + ofs/s->elemsize];
810 t = ((uintptr*)data)[t]; 754 t = ((uintptr*)data)[t];
811 break; 755 break;
812 default: 756 default:
813 runtime·throw("runtime·gettype: invalid compression kind "); 757 runtime·throw("runtime·gettype: invalid compression kind ");
814 } 758 }
815 if(0) { 759 if(0) {
816 runtime·lock(&settype_lock);
817 runtime·printf("%p -> %d,%X\n", v, (int32)s->types.compr ession, (int64)t); 760 runtime·printf("%p -> %d,%X\n", v, (int32)s->types.compr ession, (int64)t);
818 runtime·unlock(&settype_lock);
819 } 761 }
820 return t; 762 return t;
821 } 763 }
822 return 0; 764 return 0;
823 } 765 }
824 766
825 // Runtime stubs. 767 // Runtime stubs.
826 768
827 void* 769 void*
828 runtime·mal(uintptr n) 770 runtime·mal(uintptr n)
(...skipping 23 matching lines...) Expand all
852 return cnew(typ, 1, TypeInfo_SingleObject); 794 return cnew(typ, 1, TypeInfo_SingleObject);
853 } 795 }
854 796
855 void* 797 void*
856 runtime·cnewarray(Type *typ, intgo n) 798 runtime·cnewarray(Type *typ, intgo n)
857 { 799 {
858 return cnew(typ, n, TypeInfo_Array); 800 return cnew(typ, n, TypeInfo_Array);
859 } 801 }
860 802
861 func GC() { 803 func GC() {
804 // We assume that the user expects unused memory to have
805 // been freed when GC returns. To ensure this, run gc(1) twice.
806 // The first will do a collection, and the second will force the
807 // first's sweeping to finish before doing a second collection.
808 // The second collection is overkill, but we assume the user
809 // has a good reason for calling runtime.GC and can stand the
810 // expense. At the least, this fixes all the calls to runtime.GC in
811 // tests that expect finalizers to start running when GC returns.
812 runtime·gc(1);
862 runtime·gc(1); 813 runtime·gc(1);
863 } 814 }
864 815
865 func SetFinalizer(obj Eface, finalizer Eface) { 816 func SetFinalizer(obj Eface, finalizer Eface) {
866 byte *base; 817 byte *base;
867 uintptr size; 818 uintptr size;
868 FuncType *ft; 819 FuncType *ft;
869 int32 i; 820 int32 i;
870 uintptr nret; 821 uintptr nret;
871 Type *t; 822 Type *t;
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
929 // NOTE: asking to remove a finalizer when there currently isn't one set is OK. 880 // NOTE: asking to remove a finalizer when there currently isn't one set is OK.
930 runtime·removefinalizer(obj.data); 881 runtime·removefinalizer(obj.data);
931 } 882 }
932 return; 883 return;
933 884
934 badfunc: 885 badfunc:
935 runtime·printf("runtime.SetFinalizer: cannot pass %S to finalizer %S\n", *obj.type->string, *finalizer.type->string); 886 runtime·printf("runtime.SetFinalizer: cannot pass %S to finalizer %S\n", *obj.type->string, *finalizer.type->string);
936 throw: 887 throw:
937 runtime·throw("runtime.SetFinalizer"); 888 runtime·throw("runtime.SetFinalizer");
938 } 889 }
LEFTRIGHT

Powered by Google App Engine
RSS Feeds Recent Issues | This issue
This is Rietveld f62528b