Rietveld Code Review Tool
Help | Bug tracker | Discussion group | Source code | Sign in
(3513)

Delta Between Two Patch Sets: src/pkg/runtime/malloc.goc

Issue 55100044: code review 55100044: runtime: use custom thunks for race calls instead of cgo (Closed)
Left Patch Set: diff -r efb71a1d099d https://dvyukov%40google.com@code.google.com/p/go/ Created 10 years, 1 month ago
Right Patch Set: diff -r 340da08f5f54 https://dvyukov%40google.com@code.google.com/p/go/ Created 10 years ago
Left:
Right:
Use n/p to move between diff chunks; N/P to move between comments. Please Sign in to add in-line comments.
Jump to:
Left: Side by side diff | Download
Right: Side by side diff | Download
« no previous file with change/comment | « src/pkg/runtime/cgocall.c ('k') | src/pkg/runtime/proc.c » ('j') | no next file with change/comment »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
LEFTRIGHT
1 // Copyright 2009 The Go Authors. All rights reserved. 1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style 2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file. 3 // license that can be found in the LICENSE file.
4 4
5 // See malloc.h for overview. 5 // See malloc.h for overview.
6 // 6 //
7 // TODO(rsc): double-check stats. 7 // TODO(rsc): double-check stats.
8 8
9 package runtime 9 package runtime
10 #include "runtime.h" 10 #include "runtime.h"
11 #include "arch_GOARCH.h" 11 #include "arch_GOARCH.h"
12 #include "malloc.h" 12 #include "malloc.h"
13 #include "type.h" 13 #include "type.h"
14 #include "typekind.h" 14 #include "typekind.h"
15 #include "race.h" 15 #include "race.h"
16 #include "stack.h" 16 #include "stack.h"
17 #include "../../cmd/ld/textflag.h" 17 #include "../../cmd/ld/textflag.h"
18 18
19 // Mark mheap as 'no pointers', it does not contain interesting pointers but occ upies ~45K. 19 // Mark mheap as 'no pointers', it does not contain interesting pointers but occ upies ~45K.
20 #pragma dataflag NOPTR 20 #pragma dataflag NOPTR
21 MHeap runtime·mheap; 21 MHeap runtime·mheap;
22 MStats mstats; 22 MStats mstats;
23 23
24 int32 runtime·checking; 24 int32 runtime·checking;
25 25
26 extern MStats mstats; // defined in zruntime_def_$GOOS_$GOARCH.go 26 extern MStats mstats; // defined in zruntime_def_$GOOS_$GOARCH.go
27 27
28 extern volatile intgo runtime·MemProfileRate; 28 extern volatile intgo runtime·MemProfileRate;
29 29
30 static void* largealloc(uint32, uintptr*); 30 static MSpan* largealloc(uint32, uintptr*);
31 static void profilealloc(void *v, uintptr size, uintptr typ);
32 static void settype(MSpan *s, void *v, uintptr typ);
31 33
32 // Allocate an object of at least size bytes. 34 // Allocate an object of at least size bytes.
33 // Small objects are allocated from the per-thread cache's free lists. 35 // Small objects are allocated from the per-thread cache's free lists.
34 // Large objects (> 32 kB) are allocated straight from the heap. 36 // Large objects (> 32 kB) are allocated straight from the heap.
35 // If the block will be freed with runtime·free(), typ must be 0. 37 // If the block will be freed with runtime·free(), typ must be 0.
36 void* 38 void*
37 runtime·mallocgc(uintptr size, uintptr typ, uint32 flag) 39 runtime·mallocgc(uintptr size, uintptr typ, uint32 flag)
38 { 40 {
39 int32 sizeclass; 41 int32 sizeclass;
40 uintptr tinysize, size1; 42 uintptr tinysize, size1;
41 intgo rate; 43 intgo rate;
42 MCache *c; 44 MCache *c;
43 » MCacheList *l; 45 » MSpan *s;
44 MLink *v, *next; 46 MLink *v, *next;
45 byte *tiny; 47 byte *tiny;
46 48
47 if(size == 0) { 49 if(size == 0) {
48 // All 0-length allocations use this pointer. 50 // All 0-length allocations use this pointer.
49 // The language does not require the allocations to 51 // The language does not require the allocations to
50 // have distinct values. 52 // have distinct values.
51 return &runtime·zerobase; 53 return &runtime·zerobase;
52 } 54 }
53 if(m->mallocing) 55 if(m->mallocing)
54 runtime·throw("malloc/free - deadlock"); 56 runtime·throw("malloc/free - deadlock");
55 » // Disable preemption during settype_flush. 57 » // Disable preemption during settype.
56 » // We can not use m->mallocing for this, because settype_flush calls mal locgc. 58 » // We can not use m->mallocing for this, because settype calls mallocgc.
57 m->locks++; 59 m->locks++;
58 m->mallocing = 1; 60 m->mallocing = 1;
59 61
60 if(DebugTypeAtBlockEnd) 62 if(DebugTypeAtBlockEnd)
61 size += sizeof(uintptr); 63 size += sizeof(uintptr);
62 64
63 c = m->mcache; 65 c = m->mcache;
64 if(!runtime·debug.efence && size <= MaxSmallSize) { 66 if(!runtime·debug.efence && size <= MaxSmallSize) {
65 if((flag&(FlagNoScan|FlagNoGC)) == FlagNoScan && size < TinySize ) { 67 if((flag&(FlagNoScan|FlagNoGC)) == FlagNoScan && size < TinySize ) {
66 // Tiny allocator. 68 // Tiny allocator.
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
110 c->tiny += size1; 112 c->tiny += size1;
111 c->tinysize -= size1; 113 c->tinysize -= size1;
112 m->mallocing = 0; 114 m->mallocing = 0;
113 m->locks--; 115 m->locks--;
114 if(m->locks == 0 && g->preempt) // rest ore the preemption request in case we've cleared it in newstack 116 if(m->locks == 0 && g->preempt) // rest ore the preemption request in case we've cleared it in newstack
115 g->stackguard0 = StackPreempt; 117 g->stackguard0 = StackPreempt;
116 return v; 118 return v;
117 } 119 }
118 } 120 }
119 // Allocate a new TinySize block. 121 // Allocate a new TinySize block.
120 » » » l = &c->list[TinySizeClass]; 122 » » » s = c->alloc[TinySizeClass];
121 » » » if(l->list == nil) 123 » » » if(s->freelist == nil)
122 » » » » runtime·MCache_Refill(c, TinySizeClass); 124 » » » » s = runtime·MCache_Refill(c, TinySizeClass);
123 » » » v = l->list; 125 » » » v = s->freelist;
124 next = v->next; 126 next = v->next;
127 s->freelist = next;
128 s->ref++;
125 if(next != nil) // prefetching nil leads to a DTLB miss 129 if(next != nil) // prefetching nil leads to a DTLB miss
126 PREFETCH(next); 130 PREFETCH(next);
127 l->list = next;
128 l->nlist--;
129 ((uint64*)v)[0] = 0; 131 ((uint64*)v)[0] = 0;
130 ((uint64*)v)[1] = 0; 132 ((uint64*)v)[1] = 0;
131 // See if we need to replace the existing tiny block wit h the new one 133 // See if we need to replace the existing tiny block wit h the new one
132 // based on amount of remaining free space. 134 // based on amount of remaining free space.
133 if(TinySize-size > tinysize) { 135 if(TinySize-size > tinysize) {
134 c->tiny = (byte*)v + size; 136 c->tiny = (byte*)v + size;
135 c->tinysize = TinySize - size; 137 c->tinysize = TinySize - size;
136 } 138 }
137 size = TinySize; 139 size = TinySize;
138 goto done; 140 goto done;
139 } 141 }
140 // Allocate from mcache free lists. 142 // Allocate from mcache free lists.
141 // Inlined version of SizeToClass(). 143 // Inlined version of SizeToClass().
142 if(size <= 1024-8) 144 if(size <= 1024-8)
143 sizeclass = runtime·size_to_class8[(size+7)>>3]; 145 sizeclass = runtime·size_to_class8[(size+7)>>3];
144 else 146 else
145 sizeclass = runtime·size_to_class128[(size-1024+127) >> 7]; 147 sizeclass = runtime·size_to_class128[(size-1024+127) >> 7];
146 size = runtime·class_to_size[sizeclass]; 148 size = runtime·class_to_size[sizeclass];
147 » » l = &c->list[sizeclass]; 149 » » s = c->alloc[sizeclass];
148 » » if(l->list == nil) 150 » » if(s->freelist == nil)
149 » » » runtime·MCache_Refill(c, sizeclass); 151 » » » s = runtime·MCache_Refill(c, sizeclass);
150 » » v = l->list; 152 » » v = s->freelist;
151 next = v->next; 153 next = v->next;
154 s->freelist = next;
155 s->ref++;
152 if(next != nil) // prefetching nil leads to a DTLB miss 156 if(next != nil) // prefetching nil leads to a DTLB miss
153 PREFETCH(next); 157 PREFETCH(next);
154 l->list = next;
155 l->nlist--;
156 if(!(flag & FlagNoZero)) { 158 if(!(flag & FlagNoZero)) {
157 v->next = nil; 159 v->next = nil;
158 // block is zeroed iff second word is zero ... 160 // block is zeroed iff second word is zero ...
159 if(size > 2*sizeof(uintptr) && ((uintptr*)v)[1] != 0) 161 if(size > 2*sizeof(uintptr) && ((uintptr*)v)[1] != 0)
160 runtime·memclr((byte*)v, size); 162 runtime·memclr((byte*)v, size);
161 } 163 }
162 done: 164 done:
163 c->local_cachealloc += size; 165 c->local_cachealloc += size;
164 } else { 166 } else {
165 // Allocate directly from heap. 167 // Allocate directly from heap.
166 » » v = largealloc(flag, &size); 168 » » s = largealloc(flag, &size);
169 » » v = (void*)(s->start << PageShift);
167 } 170 }
168 171
169 if(flag & FlagNoGC) 172 if(flag & FlagNoGC)
170 runtime·marknogc(v); 173 runtime·marknogc(v);
171 else if(!(flag & FlagNoScan)) 174 else if(!(flag & FlagNoScan))
172 runtime·markscan(v); 175 runtime·markscan(v);
173 176
174 if(DebugTypeAtBlockEnd) 177 if(DebugTypeAtBlockEnd)
175 *(uintptr*)((uintptr)v+size-sizeof(uintptr)) = typ; 178 *(uintptr*)((uintptr)v+size-sizeof(uintptr)) = typ;
176 179
180 m->mallocing = 0;
177 // TODO: save type even if FlagNoScan? Potentially expensive but might help 181 // TODO: save type even if FlagNoScan? Potentially expensive but might help
178 // heap profiling/tracing. 182 // heap profiling/tracing.
179 » if(UseSpanType && !(flag & FlagNoScan) && typ != 0) { 183 » if(UseSpanType && !(flag & FlagNoScan) && typ != 0)
180 » » uintptr *buf, i; 184 » » settype(s, v, typ);
181 185
182 » » buf = m->settype_buf;
183 » » i = m->settype_bufsize;
184 » » buf[i++] = (uintptr)v;
185 » » buf[i++] = typ;
186 » » m->settype_bufsize = i;
187 » }
188
189 » m->mallocing = 0;
190 » if(UseSpanType && !(flag & FlagNoScan) && typ != 0 && m->settype_bufsize == nelem(m->settype_buf))
191 » » runtime·settype_flush(m);
192 if(raceenabled) 186 if(raceenabled)
193 runtime·racemalloc(v, size); 187 runtime·racemalloc(v, size);
188
189 if(runtime·debug.allocfreetrace)
190 goto profile;
191
192 if(!(flag & FlagNoProfiling) && (rate = runtime·MemProfileRate) > 0) {
193 if(size < rate && size < c->next_sample)
194 c->next_sample -= size;
195 else {
196 profile:
197 profilealloc(v, size, typ);
198 }
199 }
200
194 m->locks--; 201 m->locks--;
195 if(m->locks == 0 && g->preempt) // restore the preemption request in ca se we've cleared it in newstack 202 if(m->locks == 0 && g->preempt) // restore the preemption request in ca se we've cleared it in newstack
196 g->stackguard0 = StackPreempt; 203 g->stackguard0 = StackPreempt;
197 204
198 if(runtime·debug.allocfreetrace)
199 goto profile;
200
201 if(!(flag & FlagNoProfiling) && (rate = runtime·MemProfileRate) > 0) {
202 if(size >= rate)
203 goto profile;
204 if(m->mcache->next_sample > size)
205 m->mcache->next_sample -= size;
206 else {
207 // pick next profile time
208 // If you change this, also change allocmcache.
209 if(rate > 0x3fffffff) // make 2*rate not overflow
210 rate = 0x3fffffff;
211 m->mcache->next_sample = runtime·fastrand1() % (2*rate);
212 profile:
213 runtime·MProf_Malloc(v, size, typ);
214 }
215 }
216
217 if(!(flag & FlagNoInvokeGC) && mstats.heap_alloc >= mstats.next_gc) 205 if(!(flag & FlagNoInvokeGC) && mstats.heap_alloc >= mstats.next_gc)
218 runtime·gc(0); 206 runtime·gc(0);
219 207
220 return v; 208 return v;
221 } 209 }
222 210
223 static void* 211 static MSpan*
224 largealloc(uint32 flag, uintptr *sizep) 212 largealloc(uint32 flag, uintptr *sizep)
225 { 213 {
226 uintptr npages, size; 214 uintptr npages, size;
227 MSpan *s; 215 MSpan *s;
228 void *v; 216 void *v;
229 217
230 // Allocate directly from heap. 218 // Allocate directly from heap.
231 size = *sizep; 219 size = *sizep;
232 if(size + PageSize < size) 220 if(size + PageSize < size)
233 runtime·throw("out of memory"); 221 runtime·throw("out of memory");
234 npages = size >> PageShift; 222 npages = size >> PageShift;
235 if((size & PageMask) != 0) 223 if((size & PageMask) != 0)
236 npages++; 224 npages++;
237 s = runtime·MHeap_Alloc(&runtime·mheap, npages, 0, 1, !(flag & FlagNoZer o)); 225 s = runtime·MHeap_Alloc(&runtime·mheap, npages, 0, 1, !(flag & FlagNoZer o));
238 if(s == nil) 226 if(s == nil)
239 runtime·throw("out of memory"); 227 runtime·throw("out of memory");
240 s->limit = (byte*)(s->start<<PageShift) + size; 228 s->limit = (byte*)(s->start<<PageShift) + size;
241 *sizep = npages<<PageShift; 229 *sizep = npages<<PageShift;
242 v = (void*)(s->start << PageShift); 230 v = (void*)(s->start << PageShift);
243 // setup for mark sweep 231 // setup for mark sweep
244 runtime·markspan(v, 0, 0, true); 232 runtime·markspan(v, 0, 0, true);
245 » return v; 233 » return s;
234 }
235
236 static void
237 profilealloc(void *v, uintptr size, uintptr typ)
238 {
239 » uintptr rate;
240 » int32 next;
241 » MCache *c;
242
243 » c = m->mcache;
244 » rate = runtime·MemProfileRate;
245 » if(size < rate) {
246 » » // pick next profile time
247 » » // If you change this, also change allocmcache.
248 » » if(rate > 0x3fffffff)» // make 2*rate not overflow
249 » » » rate = 0x3fffffff;
250 » » next = runtime·fastrand1() % (2*rate);
251 » » // Subtract the "remainder" of the current allocation.
252 » » // Otherwise objects that are close in size to sampling rate
253 » » // will be under-sampled, because we consistently discard this r emainder.
254 » » next -= (size - c->next_sample);
255 » » if(next < 0)
256 » » » next = 0;
257 » » c->next_sample = next;
258 » }
259 » runtime·MProf_Malloc(v, size, typ);
246 } 260 }
247 261
248 void* 262 void*
249 runtime·malloc(uintptr size) 263 runtime·malloc(uintptr size)
250 { 264 {
251 return runtime·mallocgc(size, 0, FlagNoInvokeGC); 265 return runtime·mallocgc(size, 0, FlagNoInvokeGC);
252 } 266 }
253 267
254 // Free the object whose base pointer is v. 268 // Free the object whose base pointer is v.
255 void 269 void
(...skipping 18 matching lines...) Expand all
274 runtime·printf("free %p: not an allocated block\n", v); 288 runtime·printf("free %p: not an allocated block\n", v);
275 runtime·throw("free runtime·mlookup"); 289 runtime·throw("free runtime·mlookup");
276 } 290 }
277 size = s->elemsize; 291 size = s->elemsize;
278 sizeclass = s->sizeclass; 292 sizeclass = s->sizeclass;
279 // Objects that are smaller than TinySize can be allocated using tiny al loc, 293 // Objects that are smaller than TinySize can be allocated using tiny al loc,
280 // if then such object is combined with an object with finalizer, we wil l crash. 294 // if then such object is combined with an object with finalizer, we wil l crash.
281 if(size < TinySize) 295 if(size < TinySize)
282 runtime·throw("freeing too small block"); 296 runtime·throw("freeing too small block");
283 297
298 // Ensure that the span is swept.
299 // If we free into an unswept span, we will corrupt GC bitmaps.
300 runtime·MSpan_EnsureSwept(s);
301
284 if(s->specials != nil) 302 if(s->specials != nil)
285 runtime·freeallspecials(s, v, size); 303 runtime·freeallspecials(s, v, size);
286 304
287 c = m->mcache; 305 c = m->mcache;
288 if(sizeclass == 0) { 306 if(sizeclass == 0) {
289 // Large object. 307 // Large object.
290 » » *(uintptr*)(s->start<<PageShift) = (uintptr)0xfeedfeedfeedfeedll ;» // mark as "needs to be zeroed" 308 » » s->needzero = 1;
291 // Must mark v freed before calling unmarkspan and MHeap_Free: 309 // Must mark v freed before calling unmarkspan and MHeap_Free:
292 // they might coalesce v into other spans and change the bitmap further. 310 // they might coalesce v into other spans and change the bitmap further.
293 » » runtime·markfreed(v, size); 311 » » runtime·markfreed(v);
294 runtime·unmarkspan(v, 1<<PageShift); 312 runtime·unmarkspan(v, 1<<PageShift);
295 if(runtime·debug.efence) 313 if(runtime·debug.efence)
296 runtime·SysFree((void*)(s->start<<PageShift), size, &mst ats.heap_sys); 314 runtime·SysFree((void*)(s->start<<PageShift), size, &mst ats.heap_sys);
297 else 315 else
298 runtime·MHeap_Free(&runtime·mheap, s, 1); 316 runtime·MHeap_Free(&runtime·mheap, s, 1);
299 c->local_nlargefree++; 317 c->local_nlargefree++;
300 c->local_largefree += size; 318 c->local_largefree += size;
301 } else { 319 } else {
302 // Small object. 320 // Small object.
303 if(size > 2*sizeof(uintptr)) 321 if(size > 2*sizeof(uintptr))
304 ((uintptr*)v)[1] = (uintptr)0xfeedfeedfeedfeedll; // mark as "needs to be zeroed" 322 ((uintptr*)v)[1] = (uintptr)0xfeedfeedfeedfeedll; // mark as "needs to be zeroed"
305 else if(size > sizeof(uintptr)) 323 else if(size > sizeof(uintptr))
306 ((uintptr*)v)[1] = 0; 324 ((uintptr*)v)[1] = 0;
307 // Must mark v freed before calling MCache_Free: 325 // Must mark v freed before calling MCache_Free:
308 // it might coalesce v and other blocks into a bigger span 326 // it might coalesce v and other blocks into a bigger span
309 // and change the bitmap further. 327 // and change the bitmap further.
310 runtime·markfreed(v, size);
311 c->local_nsmallfree[sizeclass]++; 328 c->local_nsmallfree[sizeclass]++;
312 » » runtime·MCache_Free(c, v, sizeclass, size); 329 » » c->local_cachealloc -= size;
330 » » if(c->alloc[sizeclass] == s) {
331 » » » // We own the span, so we can just add v to the freelist
332 » » » runtime·markfreed(v);
333 » » » ((MLink*)v)->next = s->freelist;
334 » » » s->freelist = v;
335 » » » s->ref--;
336 » » } else {
337 » » » // Someone else owns this span. Add to free queue.
338 » » » runtime·MCache_Free(c, v, sizeclass, size);
339 » » }
313 } 340 }
314 m->mallocing = 0; 341 m->mallocing = 0;
315 } 342 }
316 343
317 int32 344 int32
318 runtime·mlookup(void *v, byte **base, uintptr *size, MSpan **sp) 345 runtime·mlookup(void *v, byte **base, uintptr *size, MSpan **sp)
319 { 346 {
320 uintptr n, i; 347 uintptr n, i;
321 byte *p; 348 byte *p;
322 MSpan *s; 349 MSpan *s;
(...skipping 30 matching lines...) Expand all
353 380
354 n = s->elemsize; 381 n = s->elemsize;
355 if(base) { 382 if(base) {
356 i = ((byte*)v - p)/n; 383 i = ((byte*)v - p)/n;
357 *base = p + i*n; 384 *base = p + i*n;
358 } 385 }
359 if(size) 386 if(size)
360 *size = n; 387 *size = n;
361 388
362 return 1; 389 return 1;
363 }
364
365 MCache*
366 runtime·allocmcache(void)
367 {
368 intgo rate;
369 MCache *c;
370
371 runtime·lock(&runtime·mheap);
372 c = runtime·FixAlloc_Alloc(&runtime·mheap.cachealloc);
373 runtime·unlock(&runtime·mheap);
374 runtime·memclr((byte*)c, sizeof(*c));
375
376 // Set first allocation sample size.
377 rate = runtime·MemProfileRate;
378 if(rate > 0x3fffffff) // make 2*rate not overflow
379 rate = 0x3fffffff;
380 if(rate != 0)
381 c->next_sample = runtime·fastrand1() % (2*rate);
382
383 return c;
384 }
385
386 void
387 runtime·freemcache(MCache *c)
388 {
389 runtime·MCache_ReleaseAll(c);
390 runtime·lock(&runtime·mheap);
391 runtime·purgecachedstats(c);
392 runtime·FixAlloc_Free(&runtime·mheap.cachealloc, c);
393 runtime·unlock(&runtime·mheap);
394 } 390 }
395 391
396 void 392 void
397 runtime·purgecachedstats(MCache *c) 393 runtime·purgecachedstats(MCache *c)
398 { 394 {
399 MHeap *h; 395 MHeap *h;
400 int32 i; 396 int32 i;
401 397
402 // Protected by either heap or GC lock. 398 // Protected by either heap or GC lock.
403 h = &runtime·mheap; 399 h = &runtime·mheap;
(...skipping 257 matching lines...) Expand 10 before | Expand all | Expand 10 after
661 persistent.pos += size; 657 persistent.pos += size;
662 runtime·unlock(&persistent); 658 runtime·unlock(&persistent);
663 if(stat != &mstats.other_sys) { 659 if(stat != &mstats.other_sys) {
664 // reaccount the allocation against provided stat 660 // reaccount the allocation against provided stat
665 runtime·xadd64(stat, size); 661 runtime·xadd64(stat, size);
666 runtime·xadd64(&mstats.other_sys, -(uint64)size); 662 runtime·xadd64(&mstats.other_sys, -(uint64)size);
667 } 663 }
668 return p; 664 return p;
669 } 665 }
670 666
671 static Lock settype_lock; 667 static void
672 668 settype(MSpan *s, void *v, uintptr typ)
673 void 669 {
674 runtime·settype_flush(M *mp)
675 {
676 » uintptr *buf, *endbuf;
677 uintptr size, ofs, j, t; 670 uintptr size, ofs, j, t;
678 uintptr ntypes, nbytes2, nbytes3; 671 uintptr ntypes, nbytes2, nbytes3;
679 uintptr *data2; 672 uintptr *data2;
680 byte *data3; 673 byte *data3;
681 » void *v; 674
682 » uintptr typ, p; 675 » if(s->sizeclass == 0) {
683 » MSpan *s; 676 » » s->types.compression = MTypes_Single;
684 677 » » s->types.data = typ;
685 » buf = mp->settype_buf; 678 » » return;
686 » endbuf = buf + mp->settype_bufsize; 679 » }
687 680 » size = s->elemsize;
688 » runtime·lock(&settype_lock); 681 » ofs = ((uintptr)v - (s->start<<PageShift)) / size;
689 » while(buf < endbuf) { 682
690 » » v = (void*)*buf; 683 » switch(s->types.compression) {
691 » » *buf = 0; 684 » case MTypes_Empty:
692 » » buf++; 685 » » ntypes = (s->npages << PageShift) / size;
693 » » typ = *buf; 686 » » nbytes3 = 8*sizeof(uintptr) + 1*ntypes;
694 » » buf++; 687 » » data3 = runtime·mallocgc(nbytes3, 0, FlagNoProfiling|FlagNoScan| FlagNoInvokeGC);
695 688 » » s->types.compression = MTypes_Bytes;
696 » » // (Manually inlined copy of runtime·MHeap_Lookup) 689 » » s->types.data = (uintptr)data3;
697 » » p = (uintptr)v>>PageShift; 690 » » ((uintptr*)data3)[1] = typ;
698 » » p -= (uintptr)runtime·mheap.arena_start >> PageShift; 691 » » data3[8*sizeof(uintptr) + ofs] = 1;
699 » » s = runtime·mheap.spans[p]; 692 » » break;
700 693 » »·······
701 » » if(s->sizeclass == 0) { 694 » case MTypes_Words:
702 » » » s->types.compression = MTypes_Single; 695 » » ((uintptr*)s->types.data)[ofs] = typ;
703 » » » s->types.data = typ; 696 » » break;
704 » » » continue; 697 » »·······
705 » » } 698 » case MTypes_Bytes:
706 699 » » data3 = (byte*)s->types.data;
707 » » size = s->elemsize; 700 » » for(j=1; j<8; j++) {
708 » » ofs = ((uintptr)v - (s->start<<PageShift)) / size; 701 » » » if(((uintptr*)data3)[j] == typ) {
709 702 » » » » break;
710 » » switch(s->types.compression) { 703 » » » }
711 » » case MTypes_Empty: 704 » » » if(((uintptr*)data3)[j] == 0) {
705 » » » » ((uintptr*)data3)[j] = typ;
706 » » » » break;
707 » » » }
708 » » }
709 » » if(j < 8) {
710 » » » data3[8*sizeof(uintptr) + ofs] = j;
711 » » } else {
712 ntypes = (s->npages << PageShift) / size; 712 ntypes = (s->npages << PageShift) / size;
713 » » » nbytes3 = 8*sizeof(uintptr) + 1*ntypes; 713 » » » nbytes2 = ntypes * sizeof(uintptr);
714 » » » data3 = runtime·mallocgc(nbytes3, 0, FlagNoProfiling|Fla gNoScan|FlagNoInvokeGC); 714 » » » data2 = runtime·mallocgc(nbytes2, 0, FlagNoProfiling|Fla gNoScan|FlagNoInvokeGC);
715 » » » s->types.compression = MTypes_Bytes; 715 » » » s->types.compression = MTypes_Words;
716 » » » s->types.data = (uintptr)data3; 716 » » » s->types.data = (uintptr)data2;
717 » » » ((uintptr*)data3)[1] = typ; 717 » » »·······
718 » » » data3[8*sizeof(uintptr) + ofs] = 1; 718 » » » // Move the contents of data3 to data2. Then deallocate data3.
719 » » » break; 719 » » » for(j=0; j<ntypes; j++) {
720 720 » » » » t = data3[8*sizeof(uintptr) + j];
721 » » case MTypes_Words: 721 » » » » t = ((uintptr*)data3)[t];
722 » » » ((uintptr*)s->types.data)[ofs] = typ; 722 » » » » data2[j] = t;
723 » » » break;
724
725 » » case MTypes_Bytes:
726 » » » data3 = (byte*)s->types.data;
727 » » » for(j=1; j<8; j++) {
728 » » » » if(((uintptr*)data3)[j] == typ) {
729 » » » » » break;
730 » » » » }
731 » » » » if(((uintptr*)data3)[j] == 0) {
732 » » » » » ((uintptr*)data3)[j] = typ;
733 » » » » » break;
734 » » » » }
735 } 723 }
736 » » » if(j < 8) { 724 » » » data2[ofs] = typ;
737 » » » » data3[8*sizeof(uintptr) + ofs] = j; 725 » » }
738 » » » } else { 726 » » break;
739 » » » » ntypes = (s->npages << PageShift) / size; 727 » }
740 » » » » nbytes2 = ntypes * sizeof(uintptr);
741 » » » » data2 = runtime·mallocgc(nbytes2, 0, FlagNoProfi ling|FlagNoScan|FlagNoInvokeGC);
742 » » » » s->types.compression = MTypes_Words;
743 » » » » s->types.data = (uintptr)data2;
744
745 » » » » // Move the contents of data3 to data2. Then dea llocate data3.
746 » » » » for(j=0; j<ntypes; j++) {
747 » » » » » t = data3[8*sizeof(uintptr) + j];
748 » » » » » t = ((uintptr*)data3)[t];
749 » » » » » data2[j] = t;
750 » » » » }
751 » » » » data2[ofs] = typ;
752 » » » }
753 » » » break;
754 » » }
755 » }
756 » runtime·unlock(&settype_lock);
757
758 » mp->settype_bufsize = 0;
759 } 728 }
760 729
761 uintptr 730 uintptr
762 runtime·gettype(void *v) 731 runtime·gettype(void *v)
763 { 732 {
764 MSpan *s; 733 MSpan *s;
765 uintptr t, ofs; 734 uintptr t, ofs;
766 byte *data; 735 byte *data;
767 736
768 s = runtime·MHeap_LookupMaybe(&runtime·mheap, v); 737 s = runtime·MHeap_LookupMaybe(&runtime·mheap, v);
(...skipping 12 matching lines...) Expand all
781 case MTypes_Bytes: 750 case MTypes_Bytes:
782 ofs = (uintptr)v - (s->start<<PageShift); 751 ofs = (uintptr)v - (s->start<<PageShift);
783 data = (byte*)s->types.data; 752 data = (byte*)s->types.data;
784 t = data[8*sizeof(uintptr) + ofs/s->elemsize]; 753 t = data[8*sizeof(uintptr) + ofs/s->elemsize];
785 t = ((uintptr*)data)[t]; 754 t = ((uintptr*)data)[t];
786 break; 755 break;
787 default: 756 default:
788 runtime·throw("runtime·gettype: invalid compression kind "); 757 runtime·throw("runtime·gettype: invalid compression kind ");
789 } 758 }
790 if(0) { 759 if(0) {
791 runtime·lock(&settype_lock);
792 runtime·printf("%p -> %d,%X\n", v, (int32)s->types.compr ession, (int64)t); 760 runtime·printf("%p -> %d,%X\n", v, (int32)s->types.compr ession, (int64)t);
793 runtime·unlock(&settype_lock);
794 } 761 }
795 return t; 762 return t;
796 } 763 }
797 return 0; 764 return 0;
798 } 765 }
799 766
800 // Runtime stubs. 767 // Runtime stubs.
801 768
802 void* 769 void*
803 runtime·mal(uintptr n) 770 runtime·mal(uintptr n)
804 { 771 {
805 return runtime·mallocgc(n, 0, 0); 772 return runtime·mallocgc(n, 0, 0);
806 } 773 }
807 774
808 #pragma textflag NOSPLIT 775 #pragma textflag NOSPLIT
809 void 776 func new(typ *Type) (ret *uint8) {
810 runtime·new(Type *typ, uint8 *ret)
811 {
812 ret = runtime·mallocgc(typ->size, (uintptr)typ | TypeInfo_SingleObject, typ->kind&KindNoPointers ? FlagNoScan : 0); 777 ret = runtime·mallocgc(typ->size, (uintptr)typ | TypeInfo_SingleObject, typ->kind&KindNoPointers ? FlagNoScan : 0);
813 FLUSH(&ret);
814 } 778 }
815 779
816 static void* 780 static void*
817 cnew(Type *typ, intgo n, int32 objtyp) 781 cnew(Type *typ, intgo n, int32 objtyp)
818 { 782 {
819 if((objtyp&(PtrSize-1)) != objtyp) 783 if((objtyp&(PtrSize-1)) != objtyp)
820 runtime·throw("runtime: invalid objtyp"); 784 runtime·throw("runtime: invalid objtyp");
821 if(n < 0 || (typ->size > 0 && n > MaxMem/typ->size)) 785 if(n < 0 || (typ->size > 0 && n > MaxMem/typ->size))
822 runtime·panicstring("runtime: allocation size out of range"); 786 runtime·panicstring("runtime: allocation size out of range");
823 return runtime·mallocgc(typ->size*n, (uintptr)typ | objtyp, typ->kind&Ki ndNoPointers ? FlagNoScan : 0); 787 return runtime·mallocgc(typ->size*n, (uintptr)typ | objtyp, typ->kind&Ki ndNoPointers ? FlagNoScan : 0);
824 } 788 }
825 789
826 // same as runtime·new, but callable from C 790 // same as runtime·new, but callable from C
827 void* 791 void*
828 runtime·cnew(Type *typ) 792 runtime·cnew(Type *typ)
829 { 793 {
830 return cnew(typ, 1, TypeInfo_SingleObject); 794 return cnew(typ, 1, TypeInfo_SingleObject);
831 } 795 }
832 796
833 void* 797 void*
834 runtime·cnewarray(Type *typ, intgo n) 798 runtime·cnewarray(Type *typ, intgo n)
835 { 799 {
836 return cnew(typ, n, TypeInfo_Array); 800 return cnew(typ, n, TypeInfo_Array);
837 } 801 }
838 802
839 func GC() { 803 func GC() {
804 // We assume that the user expects unused memory to have
805 // been freed when GC returns. To ensure this, run gc(1) twice.
806 // The first will do a collection, and the second will force the
807 // first's sweeping to finish before doing a second collection.
808 // The second collection is overkill, but we assume the user
809 // has a good reason for calling runtime.GC and can stand the
810 // expense. At the least, this fixes all the calls to runtime.GC in
811 // tests that expect finalizers to start running when GC returns.
812 runtime·gc(1);
840 runtime·gc(1); 813 runtime·gc(1);
841 } 814 }
842 815
843 func SetFinalizer(obj Eface, finalizer Eface) { 816 func SetFinalizer(obj Eface, finalizer Eface) {
844 byte *base; 817 byte *base;
845 uintptr size; 818 uintptr size;
846 FuncType *ft; 819 FuncType *ft;
847 int32 i; 820 int32 i;
848 uintptr nret; 821 uintptr nret;
849 Type *t; 822 Type *t;
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
907 // NOTE: asking to remove a finalizer when there currently isn't one set is OK. 880 // NOTE: asking to remove a finalizer when there currently isn't one set is OK.
908 runtime·removefinalizer(obj.data); 881 runtime·removefinalizer(obj.data);
909 } 882 }
910 return; 883 return;
911 884
912 badfunc: 885 badfunc:
913 runtime·printf("runtime.SetFinalizer: cannot pass %S to finalizer %S\n", *obj.type->string, *finalizer.type->string); 886 runtime·printf("runtime.SetFinalizer: cannot pass %S to finalizer %S\n", *obj.type->string, *finalizer.type->string);
914 throw: 887 throw:
915 runtime·throw("runtime.SetFinalizer"); 888 runtime·throw("runtime.SetFinalizer");
916 } 889 }
LEFTRIGHT

Powered by Google App Engine
RSS Feeds Recent Issues | This issue
This is Rietveld f62528b