Rietveld Code Review Tool
Help | Bug tracker | Discussion group | Source code | Sign in
(2611)

Delta Between Two Patch Sets: src/pkg/runtime/malloc.goc

Issue 6114046: code review 6114046: runtime, reflect, ld, gc: garbage collection precision ... (Closed)
Left Patch Set: diff -r 9e5ed0741dc8 https://go.googlecode.com/hg/ Created 11 years, 10 months ago
Right Patch Set: diff -r d6e06d0f3c29 https://code.google.com/p/go/ Created 10 years, 10 months ago
Left:
Right:
Use n/p to move between diff chunks; N/P to move between comments. Please Sign in to add in-line comments.
Jump to:
Left: Side by side diff | Download
Right: Side by side diff | Download
« no previous file with change/comment | « no previous file | src/pkg/runtime/mgc0.c » ('j') | no next file with change/comment »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
LEFTRIGHT
1 // Copyright 2009 The Go Authors. All rights reserved. 1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style 2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file. 3 // license that can be found in the LICENSE file.
4 4
5 // See malloc.h for overview. 5 // See malloc.h for overview.
6 // 6 //
7 // TODO(rsc): double-check stats. 7 // TODO(rsc): double-check stats.
8 8
9 package runtime 9 package runtime
10 #include "runtime.h" 10 #include "runtime.h"
11 #include "arch_GOARCH.h" 11 #include "arch_GOARCH.h"
12 #include "stack.h"
13 #include "malloc.h" 12 #include "malloc.h"
14 #include "defs_GOOS_GOARCH.h"
15 #include "type.h" 13 #include "type.h"
16 14 #include "typekind.h"
17 #pragma dataflag 16 /* mark mheap as 'no pointers', hiding from garbage collecto r */ 15 #include "race.h"
18 MHeap runtime·mheap; 16
19 17 MHeap *runtime·mheap;
20 extern MStats mstats;» // defined in extern.go 18
21 19 int32» runtime·checking;
22 extern volatile int32 runtime·MemProfileRate; 20
21 extern MStats mstats;» // defined in zruntime_def_$GOOS_$GOARCH.go
22
23 extern volatile intgo runtime·MemProfileRate;
23 24
24 // Allocate an object of at least size bytes. 25 // Allocate an object of at least size bytes.
25 // Small objects are allocated from the per-thread cache's free lists. 26 // Small objects are allocated from the per-thread cache's free lists.
26 // Large objects (> 32 kB) are allocated straight from the heap. 27 // Large objects (> 32 kB) are allocated straight from the heap.
27 void* 28 void*
28 runtime·mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed) 29 runtime·mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed)
29 { 30 {
30 » int32 sizeclass, rate; 31 » int32 sizeclass;
32 » intgo rate;
31 MCache *c; 33 MCache *c;
32 uintptr npages; 34 uintptr npages;
33 MSpan *s; 35 MSpan *s;
34 void *v; 36 void *v;
35 37
36 » if(runtime·gcwaiting && g != m->g0 && m->locks == 0) 38 » if(runtime·gcwaiting && g != m->g0 && m->locks == 0 && dogc)
37 runtime·gosched(); 39 runtime·gosched();
38 if(m->mallocing) 40 if(m->mallocing)
39 runtime·throw("malloc/free - deadlock"); 41 runtime·throw("malloc/free - deadlock");
40 m->mallocing = 1; 42 m->mallocing = 1;
41 if(size == 0) 43 if(size == 0)
42 size = 1; 44 size = 1;
43 45
44 if(DebugTypeAtBlockEnd) 46 if(DebugTypeAtBlockEnd)
45 size += sizeof(uintptr); 47 size += sizeof(uintptr);
46 48
47 c = m->mcache; 49 c = m->mcache;
48 c->local_nmalloc++; 50 c->local_nmalloc++;
49 if(size <= MaxSmallSize) { 51 if(size <= MaxSmallSize) {
50 // Allocate from mcache free lists. 52 // Allocate from mcache free lists.
51 » » sizeclass = runtime·SizeToClass(size); 53 » » // Inlined version of SizeToClass().
54 » » if(size <= 1024-8)
55 » » » sizeclass = runtime·size_to_class8[(size+7)>>3];
56 » » else
57 » » » sizeclass = runtime·size_to_class128[(size-1024+127) >> 7];
52 size = runtime·class_to_size[sizeclass]; 58 size = runtime·class_to_size[sizeclass];
53 v = runtime·MCache_Alloc(c, sizeclass, size, zeroed); 59 v = runtime·MCache_Alloc(c, sizeclass, size, zeroed);
54 if(v == nil) 60 if(v == nil)
55 runtime·throw("out of memory"); 61 runtime·throw("out of memory");
56 c->local_alloc += size; 62 c->local_alloc += size;
57 c->local_total_alloc += size; 63 c->local_total_alloc += size;
58 c->local_by_size[sizeclass].nmalloc++; 64 c->local_by_size[sizeclass].nmalloc++;
59 } else { 65 } else {
60 // TODO(rsc): Report tracebacks for very large allocations. 66 // TODO(rsc): Report tracebacks for very large allocations.
61 67
62 // Allocate directly from heap. 68 // Allocate directly from heap.
63 npages = size >> PageShift; 69 npages = size >> PageShift;
64 if((size & PageMask) != 0) 70 if((size & PageMask) != 0)
65 npages++; 71 npages++;
66 » » s = runtime·MHeap_Alloc(&runtime·mheap, npages, 0, 1, zeroed); 72 » » s = runtime·MHeap_Alloc(runtime·mheap, npages, 0, 1, zeroed);
67 if(s == nil) 73 if(s == nil)
68 runtime·throw("out of memory"); 74 runtime·throw("out of memory");
69 size = npages<<PageShift; 75 size = npages<<PageShift;
70 c->local_alloc += size; 76 c->local_alloc += size;
71 c->local_total_alloc += size; 77 c->local_total_alloc += size;
72 v = (void*)(s->start << PageShift); 78 v = (void*)(s->start << PageShift);
73 79
74 // setup for mark sweep 80 // setup for mark sweep
75 runtime·markspan(v, 0, 0, true); 81 runtime·markspan(v, 0, 0, true);
76 } 82 }
83
84 if (sizeof(void*) == 4 && c->local_total_alloc >= (1<<30)) {
85 // purge cache stats to prevent overflow
86 runtime·lock(runtime·mheap);
87 runtime·purgecachedstats(c);
88 runtime·unlock(runtime·mheap);
89 }
90
77 if(!(flag & FlagNoGC)) 91 if(!(flag & FlagNoGC))
78 runtime·markallocated(v, size, (flag&FlagNoPointers) != 0); 92 runtime·markallocated(v, size, (flag&FlagNoPointers) != 0);
79 93
80 if(DebugTypeAtBlockEnd) 94 if(DebugTypeAtBlockEnd)
81 *(uintptr*)((uintptr)v+size-sizeof(uintptr)) = 0; 95 *(uintptr*)((uintptr)v+size-sizeof(uintptr)) = 0;
82 96
83 m->mallocing = 0; 97 m->mallocing = 0;
84 98
85 if(!(flag & FlagNoProfiling) && (rate = runtime·MemProfileRate) > 0) { 99 if(!(flag & FlagNoProfiling) && (rate = runtime·MemProfileRate) > 0) {
86 if(size >= rate) 100 if(size >= rate)
87 goto profile; 101 goto profile;
88 if(m->mcache->next_sample > size) 102 if(m->mcache->next_sample > size)
89 m->mcache->next_sample -= size; 103 m->mcache->next_sample -= size;
90 else { 104 else {
91 // pick next profile time 105 // pick next profile time
92 // If you change this, also change allocmcache. 106 // If you change this, also change allocmcache.
93 if(rate > 0x3fffffff) // make 2*rate not overflow 107 if(rate > 0x3fffffff) // make 2*rate not overflow
94 rate = 0x3fffffff; 108 rate = 0x3fffffff;
95 m->mcache->next_sample = runtime·fastrand1() % (2*rate); 109 m->mcache->next_sample = runtime·fastrand1() % (2*rate);
96 profile: 110 profile:
97 runtime·setblockspecial(v, true); 111 runtime·setblockspecial(v, true);
98 runtime·MProf_Malloc(v, size); 112 runtime·MProf_Malloc(v, size);
99 } 113 }
100 } 114 }
101 115
102 if(dogc && mstats.heap_alloc >= mstats.next_gc) 116 if(dogc && mstats.heap_alloc >= mstats.next_gc)
103 runtime·gc(0); 117 runtime·gc(0);
118
119 if(raceenabled) {
120 runtime·racemalloc(v, size, m->racepc);
121 m->racepc = nil;
122 }
104 return v; 123 return v;
105 } 124 }
106 125
107 void* 126 void*
108 runtime·malloc(uintptr size) 127 runtime·malloc(uintptr size)
109 { 128 {
110 return runtime·mallocgc(size, 0, 0, 1); 129 return runtime·mallocgc(size, 0, 0, 1);
111 } 130 }
112 131
113 // Free the object whose base pointer is v. 132 // Free the object whose base pointer is v.
(...skipping 14 matching lines...) Expand all
128 147
129 if(m->mallocing) 148 if(m->mallocing)
130 runtime·throw("malloc/free - deadlock"); 149 runtime·throw("malloc/free - deadlock");
131 m->mallocing = 1; 150 m->mallocing = 1;
132 151
133 if(!runtime·mlookup(v, nil, nil, &s)) { 152 if(!runtime·mlookup(v, nil, nil, &s)) {
134 runtime·printf("free %p: not an allocated block\n", v); 153 runtime·printf("free %p: not an allocated block\n", v);
135 runtime·throw("free runtime·mlookup"); 154 runtime·throw("free runtime·mlookup");
136 } 155 }
137 prof = runtime·blockspecial(v); 156 prof = runtime·blockspecial(v);
157
158 if(raceenabled)
159 runtime·racefree(v);
138 160
139 // Find size class for v. 161 // Find size class for v.
140 sizeclass = s->sizeclass; 162 sizeclass = s->sizeclass;
141 c = m->mcache; 163 c = m->mcache;
142 if(sizeclass == 0) { 164 if(sizeclass == 0) {
143 // Large object. 165 // Large object.
144 size = s->npages<<PageShift; 166 size = s->npages<<PageShift;
145 » » *(uintptr*)(s->start<<PageShift) = 1;» // mark as "needs to be zeroed" 167 » » *(uintptr*)(s->start<<PageShift) = (uintptr)0xfeedfeedfeedfeedll ;» // mark as "needs to be zeroed"
146 // Must mark v freed before calling unmarkspan and MHeap_Free: 168 // Must mark v freed before calling unmarkspan and MHeap_Free:
147 // they might coalesce v into other spans and change the bitmap further. 169 // they might coalesce v into other spans and change the bitmap further.
148 runtime·markfreed(v, size); 170 runtime·markfreed(v, size);
149 runtime·unmarkspan(v, 1<<PageShift); 171 runtime·unmarkspan(v, 1<<PageShift);
150 » » runtime·MHeap_Free(&runtime·mheap, s, 1); 172 » » runtime·MHeap_Free(runtime·mheap, s, 1);
151 } else { 173 } else {
152 // Small object. 174 // Small object.
153 size = runtime·class_to_size[sizeclass]; 175 size = runtime·class_to_size[sizeclass];
154 if(size > sizeof(uintptr)) 176 if(size > sizeof(uintptr))
155 » » » ((uintptr*)v)[1] = 1;» // mark as "needs to be zeroed" 177 » » » ((uintptr*)v)[1] = (uintptr)0xfeedfeedfeedfeedll;» // mark as "needs to be zeroed"
156 // Must mark v freed before calling MCache_Free: 178 // Must mark v freed before calling MCache_Free:
157 // it might coalesce v and other blocks into a bigger span 179 // it might coalesce v and other blocks into a bigger span
158 // and change the bitmap further. 180 // and change the bitmap further.
159 runtime·markfreed(v, size); 181 runtime·markfreed(v, size);
160 c->local_by_size[sizeclass].nfree++; 182 c->local_by_size[sizeclass].nfree++;
161 runtime·MCache_Free(c, v, sizeclass, size); 183 runtime·MCache_Free(c, v, sizeclass, size);
162 } 184 }
163 c->local_nfree++; 185 c->local_nfree++;
164 c->local_alloc -= size; 186 c->local_alloc -= size;
165 if(prof) 187 if(prof)
166 runtime·MProf_Free(v, size); 188 runtime·MProf_Free(v, size);
167 m->mallocing = 0; 189 m->mallocing = 0;
168 } 190 }
169 191
170 int32 192 int32
171 runtime·mlookup(void *v, byte **base, uintptr *size, MSpan **sp) 193 runtime·mlookup(void *v, byte **base, uintptr *size, MSpan **sp)
172 { 194 {
173 uintptr n, i; 195 uintptr n, i;
174 byte *p; 196 byte *p;
175 MSpan *s; 197 MSpan *s;
176 198
177 m->mcache->local_nlookup++; 199 m->mcache->local_nlookup++;
178 » s = runtime·MHeap_LookupMaybe(&runtime·mheap, v); 200 » if (sizeof(void*) == 4 && m->mcache->local_nlookup >= (1<<30)) {
201 » » // purge cache stats to prevent overflow
202 » » runtime·lock(runtime·mheap);
203 » » runtime·purgecachedstats(m->mcache);
204 » » runtime·unlock(runtime·mheap);
205 » }
206
207 » s = runtime·MHeap_LookupMaybe(runtime·mheap, v);
179 if(sp) 208 if(sp)
180 *sp = s; 209 *sp = s;
181 if(s == nil) { 210 if(s == nil) {
182 runtime·checkfreed(v, 1); 211 runtime·checkfreed(v, 1);
183 if(base) 212 if(base)
184 *base = nil; 213 *base = nil;
185 if(size) 214 if(size)
186 *size = 0; 215 *size = 0;
187 return 0; 216 return 0;
188 } 217 }
(...skipping 20 matching lines...) Expand all
209 } 238 }
210 if(size) 239 if(size)
211 *size = n; 240 *size = n;
212 241
213 return 1; 242 return 1;
214 } 243 }
215 244
216 MCache* 245 MCache*
217 runtime·allocmcache(void) 246 runtime·allocmcache(void)
218 { 247 {
219 » int32 rate; 248 » intgo rate;
220 MCache *c; 249 MCache *c;
221 250
222 » runtime·lock(&runtime·mheap); 251 » runtime·lock(runtime·mheap);
223 » c = runtime·FixAlloc_Alloc(&runtime·mheap.cachealloc); 252 » c = runtime·FixAlloc_Alloc(&runtime·mheap->cachealloc);
224 » mstats.mcache_inuse = runtime·mheap.cachealloc.inuse; 253 » mstats.mcache_inuse = runtime·mheap->cachealloc.inuse;
225 » mstats.mcache_sys = runtime·mheap.cachealloc.sys; 254 » mstats.mcache_sys = runtime·mheap->cachealloc.sys;
226 » runtime·unlock(&runtime·mheap); 255 » runtime·unlock(runtime·mheap);
256 » runtime·memclr((byte*)c, sizeof(*c));
227 257
228 // Set first allocation sample size. 258 // Set first allocation sample size.
229 rate = runtime·MemProfileRate; 259 rate = runtime·MemProfileRate;
230 if(rate > 0x3fffffff) // make 2*rate not overflow 260 if(rate > 0x3fffffff) // make 2*rate not overflow
231 rate = 0x3fffffff; 261 rate = 0x3fffffff;
232 if(rate != 0) 262 if(rate != 0)
233 c->next_sample = runtime·fastrand1() % (2*rate); 263 c->next_sample = runtime·fastrand1() % (2*rate);
234 264
235 return c; 265 return c;
236 } 266 }
237 267
238 void 268 void
239 runtime·purgecachedstats(M* m) 269 runtime·freemcache(MCache *c)
240 { 270 {
241 » MCache *c; 271 » runtime·MCache_ReleaseAll(c);
272 » runtime·lock(runtime·mheap);
273 » runtime·purgecachedstats(c);
274 » runtime·FixAlloc_Free(&runtime·mheap->cachealloc, c);
275 » runtime·unlock(runtime·mheap);
276 }
277
278 void
279 runtime·purgecachedstats(MCache *c)
280 {
281 » int32 i;
242 282
243 // Protected by either heap or GC lock. 283 // Protected by either heap or GC lock.
244 c = m->mcache;
245 mstats.heap_alloc += c->local_cachealloc; 284 mstats.heap_alloc += c->local_cachealloc;
246 c->local_cachealloc = 0; 285 c->local_cachealloc = 0;
247 mstats.heap_objects += c->local_objects; 286 mstats.heap_objects += c->local_objects;
248 c->local_objects = 0; 287 c->local_objects = 0;
249 mstats.nmalloc += c->local_nmalloc; 288 mstats.nmalloc += c->local_nmalloc;
250 c->local_nmalloc = 0; 289 c->local_nmalloc = 0;
251 mstats.nfree += c->local_nfree; 290 mstats.nfree += c->local_nfree;
252 c->local_nfree = 0; 291 c->local_nfree = 0;
253 mstats.nlookup += c->local_nlookup; 292 mstats.nlookup += c->local_nlookup;
254 c->local_nlookup = 0; 293 c->local_nlookup = 0;
255 mstats.alloc += c->local_alloc; 294 mstats.alloc += c->local_alloc;
256 c->local_alloc= 0; 295 c->local_alloc= 0;
257 mstats.total_alloc += c->local_total_alloc; 296 mstats.total_alloc += c->local_total_alloc;
258 c->local_total_alloc= 0; 297 c->local_total_alloc= 0;
298 for(i=0; i<nelem(c->local_by_size); i++) {
299 mstats.by_size[i].nmalloc += c->local_by_size[i].nmalloc;
300 c->local_by_size[i].nmalloc = 0;
301 mstats.by_size[i].nfree += c->local_by_size[i].nfree;
302 c->local_by_size[i].nfree = 0;
303 }
259 } 304 }
260 305
261 uintptr runtime·sizeof_C_MStats = sizeof(MStats); 306 uintptr runtime·sizeof_C_MStats = sizeof(MStats);
262 307
263 #define MaxArena32 (2U<<30) 308 #define MaxArena32 (2U<<30)
264 309
265 void 310 void
266 runtime·mallocinit(void) 311 runtime·mallocinit(void)
267 { 312 {
268 byte *p; 313 byte *p;
269 uintptr arena_size, bitmap_size; 314 uintptr arena_size, bitmap_size;
270 extern byte end[]; 315 extern byte end[];
271 byte *want; 316 byte *want;
272 uintptr limit; 317 uintptr limit;
273 318
274 p = nil; 319 p = nil;
275 arena_size = 0; 320 arena_size = 0;
276 bitmap_size = 0; 321 bitmap_size = 0;
277 ········ 322 ········
278 // for 64-bit build 323 // for 64-bit build
279 USED(p); 324 USED(p);
280 USED(arena_size); 325 USED(arena_size);
281 USED(bitmap_size); 326 USED(bitmap_size);
282 327
328 if((runtime·mheap = runtime·SysAlloc(sizeof(*runtime·mheap))) == nil)
329 runtime·throw("runtime: cannot allocate heap metadata");
330
283 runtime·InitSizes(); 331 runtime·InitSizes();
284 332
285 » limit = runtime·memlimit(); 333 » // limit = runtime·memlimit();
334 » // See https://code.google.com/p/go/issues/detail?id=5049
335 » // TODO(rsc): Fix after 1.1.
336 » limit = 0;
286 337
287 // Set up the allocation arena, a contiguous area of memory where 338 // Set up the allocation arena, a contiguous area of memory where
288 // allocated data will be found. The arena begins with a bitmap large 339 // allocated data will be found. The arena begins with a bitmap large
289 // enough to hold 4 bits per allocated word. 340 // enough to hold 4 bits per allocated word.
290 if(sizeof(void*) == 8 && (limit == 0 || limit > (1<<30))) { 341 if(sizeof(void*) == 8 && (limit == 0 || limit > (1<<30))) {
291 // On a 64-bit machine, allocate from a single contiguous reserv ation. 342 // On a 64-bit machine, allocate from a single contiguous reserv ation.
292 » » // 16 GB should be big enough for now. 343 » » // 128 GB (MaxMem) should be big enough for now.
293 // 344 //
294 // The code will work with the reservation at any address, but a sk 345 // The code will work with the reservation at any address, but a sk
295 » » // SysReserve to use 0x000000f800000000 if possible. 346 » » // SysReserve to use 0x000000c000000000 if possible.
296 » » // Allocating a 16 GB region takes away 36 bits, and the amd64 347 » » // Allocating a 128 GB region takes away 37 bits, and the amd64
297 // doesn't let us choose the top 17 bits, so that leaves the 11 bits 348 // doesn't let us choose the top 17 bits, so that leaves the 11 bits
298 » » // in the middle of 0x00f8 for us to choose. Choosing 0x00f8 me ans 349 » » // in the middle of 0x00c0 for us to choose. Choosing 0x00c0 me ans
299 » » // that the valid memory addresses will begin 0x00f8, 0x00f9, 0x 00fa, 0x00fb. 350 » » // that the valid memory addresses will begin 0x00c0, 0x00c1, .. ., 0x0x00df.
300 » » // None of the bytes f8 f9 fa fb can appear in valid UTF-8, and 351 » » // In little-endian, that's c0 00, c1 00, ..., df 00. None of th ose are valid
301 » » // they are otherwise as far from ff (likely a common byte) as p ossible. 352 » » // UTF-8 sequences, and they are otherwise as far away from·
302 » » // Choosing 0x00 for the leading 6 bits was more arbitrary, but it 353 » » // ff (likely a common byte) as possible. An earlier attempt to use 0x11f8·
303 » » // is not a common ASCII code point either. Using 0x11f8 instea d
304 // caused out of memory errors on OS X during thread allocations . 354 // caused out of memory errors on OS X during thread allocations .
305 // These choices are both for debuggability and to reduce the 355 // These choices are both for debuggability and to reduce the
306 // odds of the conservative garbage collector not collecting mem ory 356 // odds of the conservative garbage collector not collecting mem ory
307 // because some non-pointer block of memory had a bit pattern 357 // because some non-pointer block of memory had a bit pattern
308 // that matched a memory address. 358 // that matched a memory address.
309 // 359 //
310 » » // Actually we reserve 17 GB (because the bitmap ends up being 1 GB) 360 » » // Actually we reserve 136 GB (because the bitmap ends up being 8 GB)
311 » » // but it hardly matters: fc is not valid UTF-8 either, and we h ave to 361 » » // but it hardly matters: e0 00 is not valid UTF-8 either.
312 » » // allocate 15 GB before we get that far.
313 // 362 //
314 // If this fails we fall back to the 32 bit memory mechanism 363 // If this fails we fall back to the 32 bit memory mechanism
315 » » arena_size = 16LL<<30; 364 » » arena_size = MaxMem;
316 bitmap_size = arena_size / (sizeof(void*)*8/4); 365 bitmap_size = arena_size / (sizeof(void*)*8/4);
317 » » p = runtime·SysReserve((void*)(0x00f8ULL<<32), bitmap_size + are na_size); 366 » » p = runtime·SysReserve((void*)(0x00c0ULL<<32), bitmap_size + are na_size);
318 } 367 }
319 if (p == nil) { 368 if (p == nil) {
320 // On a 32-bit machine, we can't typically get away 369 // On a 32-bit machine, we can't typically get away
321 // with a giant virtual address space reservation. 370 // with a giant virtual address space reservation.
322 // Instead we map the memory information bitmap 371 // Instead we map the memory information bitmap
323 // immediately after the data segment, large enough 372 // immediately after the data segment, large enough
324 // to handle another 2GB of mappings (256 MB), 373 // to handle another 2GB of mappings (256 MB),
325 // along with a reservation for another 512 MB of memory. 374 // along with a reservation for another 512 MB of memory.
326 // When that gets used up, we'll start asking the kernel 375 // When that gets used up, we'll start asking the kernel
327 // for any memory anywhere and hope it's in the 2GB 376 // for any memory anywhere and hope it's in the 2GB
(...skipping 25 matching lines...) Expand all
353 want = (byte*)(((uintptr)end + (1<<18) + (1<<20) - 1)&~((1<<20)- 1)); 402 want = (byte*)(((uintptr)end + (1<<18) + (1<<20) - 1)&~((1<<20)- 1));
354 p = runtime·SysReserve(want, bitmap_size + arena_size); 403 p = runtime·SysReserve(want, bitmap_size + arena_size);
355 if(p == nil) 404 if(p == nil)
356 runtime·throw("runtime: cannot reserve arena virtual add ress space"); 405 runtime·throw("runtime: cannot reserve arena virtual add ress space");
357 if((uintptr)p & (((uintptr)1<<PageShift)-1)) 406 if((uintptr)p & (((uintptr)1<<PageShift)-1))
358 runtime·printf("runtime: SysReserve returned unaligned a ddress %p; asked for %p", p, bitmap_size+arena_size); 407 runtime·printf("runtime: SysReserve returned unaligned a ddress %p; asked for %p", p, bitmap_size+arena_size);
359 } 408 }
360 if((uintptr)p & (((uintptr)1<<PageShift)-1)) 409 if((uintptr)p & (((uintptr)1<<PageShift)-1))
361 runtime·throw("runtime: SysReserve returned unaligned address"); 410 runtime·throw("runtime: SysReserve returned unaligned address");
362 411
363 » runtime·mheap.bitmap = p; 412 » runtime·mheap->bitmap = p;
364 » runtime·mheap.arena_start = p + bitmap_size; 413 » runtime·mheap->arena_start = p + bitmap_size;
365 » runtime·mheap.arena_used = runtime·mheap.arena_start; 414 » runtime·mheap->arena_used = runtime·mheap->arena_start;
366 » runtime·mheap.arena_end = runtime·mheap.arena_start + arena_size; 415 » runtime·mheap->arena_end = runtime·mheap->arena_start + arena_size;
367 416
368 // Initialize the rest of the allocator.········ 417 // Initialize the rest of the allocator.········
369 » runtime·MHeap_Init(&runtime·mheap, runtime·SysAlloc); 418 » runtime·MHeap_Init(runtime·mheap, runtime·SysAlloc);
370 m->mcache = runtime·allocmcache(); 419 m->mcache = runtime·allocmcache();
371 420
372 // See if it works. 421 // See if it works.
373 runtime·free(runtime·malloc(1)); 422 runtime·free(runtime·malloc(1));
374 } 423 }
375 424
376 void* 425 void*
377 runtime·MHeap_SysAlloc(MHeap *h, uintptr n) 426 runtime·MHeap_SysAlloc(MHeap *h, uintptr n)
378 { 427 {
379 byte *p; 428 byte *p;
(...skipping 13 matching lines...) Expand all
393 if(p == h->arena_end) 442 if(p == h->arena_end)
394 h->arena_end = new_end; 443 h->arena_end = new_end;
395 } 444 }
396 } 445 }
397 if(n <= h->arena_end - h->arena_used) { 446 if(n <= h->arena_end - h->arena_used) {
398 // Keep taking from our reservation. 447 // Keep taking from our reservation.
399 p = h->arena_used; 448 p = h->arena_used;
400 runtime·SysMap(p, n); 449 runtime·SysMap(p, n);
401 h->arena_used += n; 450 h->arena_used += n;
402 runtime·MHeap_MapBits(h); 451 runtime·MHeap_MapBits(h);
452 if(raceenabled)
453 runtime·racemapshadow(p, n);
403 return p; 454 return p;
404 } 455 }
405 ········ 456 ········
406 // If using 64-bit, our reservation is all we have. 457 // If using 64-bit, our reservation is all we have.
407 if(sizeof(void*) == 8 && (uintptr)h->bitmap >= 0xffffffffU) 458 if(sizeof(void*) == 8 && (uintptr)h->bitmap >= 0xffffffffU)
408 return nil; 459 return nil;
409 460
410 // On 32-bit, once the reservation is gone we can 461 // On 32-bit, once the reservation is gone we can
411 // try to get memory at a location chosen by the OS 462 // try to get memory at a location chosen by the OS
412 // and hope that it is in the range we allocated bitmap for. 463 // and hope that it is in the range we allocated bitmap for.
413 p = runtime·SysAlloc(n); 464 p = runtime·SysAlloc(n);
414 if(p == nil) 465 if(p == nil)
415 return nil; 466 return nil;
416 467
417 if(p < h->arena_start || p+n - h->arena_start >= MaxArena32) { 468 if(p < h->arena_start || p+n - h->arena_start >= MaxArena32) {
418 runtime·printf("runtime: memory allocated by OS (%p) not in usab le range [%p,%p)\n", 469 runtime·printf("runtime: memory allocated by OS (%p) not in usab le range [%p,%p)\n",
419 p, h->arena_start, h->arena_start+MaxArena32); 470 p, h->arena_start, h->arena_start+MaxArena32);
420 runtime·SysFree(p, n); 471 runtime·SysFree(p, n);
421 return nil; 472 return nil;
422 } 473 }
423 474
424 if(p+n > h->arena_used) { 475 if(p+n > h->arena_used) {
425 h->arena_used = p+n; 476 h->arena_used = p+n;
426 if(h->arena_used > h->arena_end) 477 if(h->arena_used > h->arena_end)
427 h->arena_end = h->arena_used; 478 h->arena_end = h->arena_used;
428 runtime·MHeap_MapBits(h); 479 runtime·MHeap_MapBits(h);
480 if(raceenabled)
481 runtime·racemapshadow(p, n);
429 } 482 }
430 ········ 483 ········
431 return p; 484 return p;
432 } 485 }
433 486
434 static Lock settype_lock; 487 static Lock settype_lock;
435 488
436 void 489 void
437 runtime·settype_flush(M *m, bool sysalloc) 490 runtime·settype_flush(M *mp, bool sysalloc)
438 { 491 {
439 » uintptr *buf = m->settype_buf; 492 » uintptr *buf, *endbuf;
440 » uintptr *endbuf = buf+m->settype_bufsize; 493 » uintptr size, ofs, j, t;
494 » uintptr ntypes, nbytes2, nbytes3;
495 » uintptr *data2;
496 » byte *data3;
497 » bool sysalloc3;
498 » void *v;
499 » uintptr typ, p;
500 » MSpan *s;
501
502 » buf = mp->settype_buf;
503 » endbuf = buf + mp->settype_bufsize;
441 504
442 runtime·lock(&settype_lock); 505 runtime·lock(&settype_lock);
443 while(buf < endbuf) { 506 while(buf < endbuf) {
444 void *v;
445 uintptr typ, p;
446 MSpan *s;
447 ················
448 v = (void*)*buf; 507 v = (void*)*buf;
449 » » *buf = 0; // Garbage collector may later find the pointer in th e buffer, so it is better to reset it 508 » » if(false) *buf = 0;
450 buf++; 509 buf++;
451 typ = *buf; 510 typ = *buf;
452 buf++; 511 buf++;
453 512
454 // (Manually inlined copy of runtime·MHeap_Lookup) 513 // (Manually inlined copy of runtime·MHeap_Lookup)
455 p = (uintptr)v>>PageShift; 514 p = (uintptr)v>>PageShift;
456 if(sizeof(void*) == 8) 515 if(sizeof(void*) == 8)
457 » » » p -= (uintptr)runtime·mheap.arena_start >> PageShift; 516 » » » p -= (uintptr)runtime·mheap->arena_start >> PageShift;
458 » » s = runtime·mheap.map[p]; 517 » » s = runtime·mheap->map[p];
459 518
460 » » if(s->sizeclass != 0) { 519 » » if(s->sizeclass == 0) {
461 » » » uintptr size = s->elemsize; 520 » » » s->types.compression = MTypes_Single;
462 » » » uintptr ofs = ((uintptr)v - (s->start<<PageShift)) / siz e; 521 » » » s->types.data = typ;
463 522 » » » continue;
464 » » » switch(s->types.compression) { 523 » » }
465 » » » » case 0: { 524
466 » » » » » uintptr ntypes = (s->npages << PageShift ) / size; 525 » » size = s->elemsize;
467 » » » » » uintptr nbytes3 = 8*sizeof(uintptr) + 1* ntypes; 526 » » ofs = ((uintptr)v - (s->start<<PageShift)) / size;
468 » » » » » byte *data3; 527
469 528 » » switch(s->types.compression) {
470 » » » » » if(!sysalloc) { 529 » » case MTypes_Empty:
471 » » » » » » data3 = runtime·mallocgc(nbytes3 , FlagNoPointers, 0, 1); 530 » » » ntypes = (s->npages << PageShift) / size;
472 » » » » » } else { 531 » » » nbytes3 = 8*sizeof(uintptr) + 1*ntypes;
473 » » » » » » data3 = runtime·SysAlloc(nbytes3 ); 532
474 » » » » » » if(0) runtime·printf("settype(0- >3): SysAlloc(%x) --> %p\n", (uint32)nbytes3, data3); 533 » » » if(!sysalloc) {
475 » » » » » } 534 » » » » data3 = runtime·mallocgc(nbytes3, FlagNoProfilin g|FlagNoPointers, 0, 1);
476 535 » » » } else {
477 » » » » » s->types.compression = 3; 536 » » » » data3 = runtime·SysAlloc(nbytes3);
478 » » » » » s->types.sysalloc = sysalloc; 537 » » » » if(data3 == nil)
479 » » » » » s->types.data = (uintptr)data3; 538 » » » » » runtime·throw("runtime: cannot allocate memory");
480 539 » » » » if(0) runtime·printf("settype(0->3): SysAlloc(%x ) --> %p\n", (uint32)nbytes3, data3);
481 » » » » » ((uintptr*)data3)[1] = typ; 540 » » » }
482 » » » » » data3[8*sizeof(uintptr) + ofs] = 1; 541
542 » » » s->types.compression = MTypes_Bytes;
543 » » » s->types.sysalloc = sysalloc;
544 » » » s->types.data = (uintptr)data3;
545
546 » » » ((uintptr*)data3)[1] = typ;
547 » » » data3[8*sizeof(uintptr) + ofs] = 1;
548 » » » break;
549
550 » » case MTypes_Words:
551 » » » ((uintptr*)s->types.data)[ofs] = typ;
552 » » » break;
553
554 » » case MTypes_Bytes:
555 » » » data3 = (byte*)s->types.data;
556 » » » for(j=1; j<8; j++) {
557 » » » » if(((uintptr*)data3)[j] == typ) {
483 break; 558 break;
484 } 559 }
485 » » » » case 2: { 560 » » » » if(((uintptr*)data3)[j] == 0) {
486 » » » » » ((uintptr*)s->types.data)[ofs] = typ; 561 » » » » » ((uintptr*)data3)[j] = typ;
487 » » » » » break;
488 » » » » }
489 » » » » case 3: {
490 » » » » » byte *data3 = (byte*)s->types.data;
491 » » » » » uintptr j;
492 » » » » » for(j=1; j<8; j++) {
493 » » » » » » if( ((uintptr*)data3)[j] == typ ) {
494 » » » » » » » break;
495 » » » » » » }
496 » » » » » » if( ((uintptr*)data3)[j] == 0 ) {
497 » » » » » » » ((uintptr*)data3)[j] = t yp;
498 » » » » » » » break;
499 » » » » » » }
500 » » » » » }
501 » » » » » if(j < 8) {
502 » » » » » » data3[8*sizeof(uintptr) + ofs] = j;
503 » » » » » } else {
504 » » » » » » uintptr ntypes = (s->npages << P ageShift) / size;
505 » » » » » » uintptr nbytes2 = ntypes * sizeo f(uintptr);
506 » » » » » » uintptr *data2;
507
508 » » » » » » if(!sysalloc) {
509 » » » » » » » data2 = runtime·mallocgc (nbytes2, FlagNoPointers, 0, 1);
510 » » » » » » } else {
511 » » » » » » » data2 = runtime·SysAlloc (nbytes2);
512 » » » » » » » if(0) runtime·printf("se ttype.(3->2): SysAlloc(%x) --> %p\n", (uint32)nbytes2, data2);
513 » » » » » » }
514
515 » » » » » » bool sysalloc3 = s->types.sysall oc;
516
517 » » » » » » s->types.compression = 2;
518 » » » » » » s->types.sysalloc = sysalloc;
519 » » » » » » s->types.data = (uintptr)data2;
520
521 » » » » » » // Move the contents of data3 to data2. Then deallocate data3.
522 » » » » » » for(j=0; j<ntypes; j++) {
523 » » » » » » » uintptr t;
524 » » » » » » » t = data3[8*sizeof(uintp tr) + j];
525 » » » » » » » t = ((uintptr*)data3)[t] ;
526 » » » » » » » data2[j] = t;
527 » » » » » » }
528 » » » » » » if(sysalloc3) {
529 » » » » » » » uintptr nbytes3 = 8*size of(uintptr) + 1*ntypes;
530 » » » » » » » if(0) runtime·printf("se ttype.(3->2): SysFree(%p,%x)\n", data3, (uint32)nbytes3);
531 » » » » » » » runtime·SysFree(data3, n bytes3);
532 » » » » » » }
533
534 » » » » » » data2[ofs] = typ;
535 » » » » » }
536 break; 562 break;
537 } 563 }
538 } 564 }
539 » » } else { 565 » » » if(j < 8) {
540 » » » s->types.compression = 1; 566 » » » » data3[8*sizeof(uintptr) + ofs] = j;
541 » » » s->types.data = typ; 567 » » » } else {
568 » » » » ntypes = (s->npages << PageShift) / size;
569 » » » » nbytes2 = ntypes * sizeof(uintptr);
570
571 » » » » if(!sysalloc) {
572 » » » » » data2 = runtime·mallocgc(nbytes2, FlagNo Profiling|FlagNoPointers, 0, 1);
573 » » » » } else {
574 » » » » » data2 = runtime·SysAlloc(nbytes2);
575 » » » » » if(data2 == nil)
576 » » » » » » runtime·throw("runtime: cannot a llocate memory");
577 » » » » » if(0) runtime·printf("settype.(3->2): Sy sAlloc(%x) --> %p\n", (uint32)nbytes2, data2);
578 » » » » }
579
580 » » » » sysalloc3 = s->types.sysalloc;
581
582 » » » » s->types.compression = MTypes_Words;
583 » » » » s->types.sysalloc = sysalloc;
584 » » » » s->types.data = (uintptr)data2;
585
586 » » » » // Move the contents of data3 to data2. Then dea llocate data3.
587 » » » » for(j=0; j<ntypes; j++) {
588 » » » » » t = data3[8*sizeof(uintptr) + j];
589 » » » » » t = ((uintptr*)data3)[t];
590 » » » » » data2[j] = t;
591 » » » » }
592 » » » » if(sysalloc3) {
593 » » » » » nbytes3 = 8*sizeof(uintptr) + 1*ntypes;
594 » » » » » if(0) runtime·printf("settype.(3->2): Sy sFree(%p,%x)\n", data3, (uint32)nbytes3);
595 » » » » » runtime·SysFree(data3, nbytes3);
596 » » » » }
597
598 » » » » data2[ofs] = typ;
599 » » » }
600 » » » break;
542 } 601 }
543 } 602 }
544 runtime·unlock(&settype_lock); 603 runtime·unlock(&settype_lock);
545 604
546 » m->settype_bufsize = 0; 605 » mp->settype_bufsize = 0;
547 } 606 }
548 607
549 // It is forbidden to use this function if it is possible that 608 // It is forbidden to use this function if it is possible that
550 // explicit deallocation by calling runtime·free(v) may happen. 609 // explicit deallocation via calling runtime·free(v) may happen.
551 void 610 void
552 runtime·settype(void *v, uintptr t) 611 runtime·settype(void *v, uintptr t)
553 { 612 {
554 » M *m1; 613 » M *mp;
555 uintptr *buf; 614 uintptr *buf;
556 uintptr i; 615 uintptr i;
616 MSpan *s;
557 617
558 if(t == 0) 618 if(t == 0)
559 runtime·throw("settype: zero type"); 619 runtime·throw("settype: zero type");
560 620
561 » m1 = m; 621 » mp = m;
562 » buf = m1->settype_buf; 622 » buf = mp->settype_buf;
563 » i = m1->settype_bufsize; 623 » i = mp->settype_bufsize;
564 buf[i+0] = (uintptr)v; 624 buf[i+0] = (uintptr)v;
565 buf[i+1] = t; 625 buf[i+1] = t;
566 i += 2; 626 i += 2;
567 » m1->settype_bufsize = i; 627 » mp->settype_bufsize = i;
568 628
569 » if(i == nelem(m1->settype_buf)) { 629 » if(i == nelem(mp->settype_buf)) {
570 » » runtime·settype_flush(m1, false); 630 » » runtime·settype_flush(mp, false);
571 } 631 }
572 632
573 if(DebugTypeAtBlockEnd) { 633 if(DebugTypeAtBlockEnd) {
574 » » MSpan *s = runtime·MHeap_Lookup(&runtime·mheap, v); 634 » » s = runtime·MHeap_Lookup(runtime·mheap, v);
575
576 *(uintptr*)((uintptr)v+s->elemsize-sizeof(uintptr)) = t; 635 *(uintptr*)((uintptr)v+s->elemsize-sizeof(uintptr)) = t;
577 } 636 }
578 } 637 }
579 638
580 void 639 void
581 runtime·settype_sysfree(MSpan *s) 640 runtime·settype_sysfree(MSpan *s)
582 { 641 {
583 » uintptr nbytes; 642 » uintptr ntypes, nbytes;
584 643
585 if(!s->types.sysalloc) 644 if(!s->types.sysalloc)
586 return; 645 return;
587 646
588 nbytes = (uintptr)-1; 647 nbytes = (uintptr)-1;
589 » if(s->types.compression==2) { 648
590 » » uintptr ntypes = (s->npages << PageShift) / s->elemsize; 649 » switch (s->types.compression) {
650 » case MTypes_Words:
651 » » ntypes = (s->npages << PageShift) / s->elemsize;
591 nbytes = ntypes * sizeof(uintptr); 652 nbytes = ntypes * sizeof(uintptr);
592 » } 653 » » break;
593 » else if(s->types.compression==3) { 654 » case MTypes_Bytes:
594 » » uintptr ntypes = (s->npages << PageShift) / s->elemsize; 655 » » ntypes = (s->npages << PageShift) / s->elemsize;
595 nbytes = 8*sizeof(uintptr) + 1*ntypes; 656 nbytes = 8*sizeof(uintptr) + 1*ntypes;
657 break;
596 } 658 }
597 659
598 if(nbytes != (uintptr)-1) { 660 if(nbytes != (uintptr)-1) {
599 if(0) runtime·printf("settype: SysFree(%p,%x)\n", (void*)s->type s.data, (uint32)nbytes); 661 if(0) runtime·printf("settype: SysFree(%p,%x)\n", (void*)s->type s.data, (uint32)nbytes);
600 runtime·SysFree((void*)s->types.data, nbytes); 662 runtime·SysFree((void*)s->types.data, nbytes);
601 } 663 }
602 } 664 }
603 665
604 uintptr 666 uintptr
605 runtime·gettype(void *v) { 667 runtime·gettype(void *v)
606 » MSpan *s = runtime·MHeap_LookupMaybe(&runtime·mheap, v); 668 {
669 » MSpan *s;
670 » uintptr t, ofs;
671 » byte *data;
672
673 » s = runtime·MHeap_LookupMaybe(runtime·mheap, v);
607 if(s != nil) { 674 if(s != nil) {
608 » » uintptr t = 0; 675 » » t = 0;
609 switch(s->types.compression) { 676 switch(s->types.compression) {
610 » » » case 0: 677 » » case MTypes_Empty:
611 » » » » break; 678 » » » break;
612 » » » case 1: 679 » » case MTypes_Single:
613 » » » » t = s->types.data; 680 » » » t = s->types.data;
614 » » » » break; 681 » » » break;
615 » » » case 2: { 682 » » case MTypes_Words:
616 » » » » uintptr ofs = (uintptr)v - (s->start<<PageShift) ; 683 » » » ofs = (uintptr)v - (s->start<<PageShift);
617 » » » » t = ((uintptr*)s->types.data)[ofs/s->elemsize]; 684 » » » t = ((uintptr*)s->types.data)[ofs/s->elemsize];
618 » » » » break; 685 » » » break;
619 » » » } 686 » » case MTypes_Bytes:
620 » » » case 3: { 687 » » » ofs = (uintptr)v - (s->start<<PageShift);
621 » » » » uintptr ofs = (uintptr)v - (s->start<<PageShift) ; 688 » » » data = (byte*)s->types.data;
622 » » » » byte *data = (byte*)s->types.data; 689 » » » t = data[8*sizeof(uintptr) + ofs/s->elemsize];
623 » » » » t = data[8*sizeof(uintptr) + ofs/s->elemsize]; 690 » » » t = ((uintptr*)data)[t];
624 » » » » t = ((uintptr*)data)[t]; 691 » » » break;
625 » » » » break; 692 » » default:
626 » » » } 693 » » » runtime·throw("runtime·gettype: invalid compression kind ");
627 » » » default:
628 » » » » runtime·throw("runtime·gettype: invalid compress ion kind");
629 } 694 }
630 if(0) { 695 if(0) {
631 runtime·lock(&settype_lock); 696 runtime·lock(&settype_lock);
632 runtime·printf("%p -> %d,%X\n", v, (int32)s->types.compr ession, (int64)t); 697 runtime·printf("%p -> %d,%X\n", v, (int32)s->types.compr ession, (int64)t);
633 runtime·unlock(&settype_lock); 698 runtime·unlock(&settype_lock);
634 } 699 }
635 return t; 700 return t;
636 } 701 }
637 return 0; 702 return 0;
638 } 703 }
639 704
640 // Runtime stubs. 705 // Runtime stubs.
641 706
642 void* 707 void*
643 runtime·mal(uintptr n) 708 runtime·mal(uintptr n)
644 { 709 {
645 return runtime·mallocgc(n, 0, 1, 1); 710 return runtime·mallocgc(n, 0, 1, 1);
646 } 711 }
647 712
713 #pragma textflag 7
714 void
715 runtime·new(Type *typ, uint8 *ret)
716 {
717 uint32 flag;
718
719 if(raceenabled)
720 m->racepc = runtime·getcallerpc(&typ);
721
722 if(typ->size == 0) {
723 // All 0-length allocations use this pointer.
724 // The language does not require the allocations to
725 // have distinct values.
726 ret = (uint8*)&runtime·zerobase;
727 } else {
728 flag = typ->kind&KindNoPointers ? FlagNoPointers : 0;
729 ret = runtime·mallocgc(typ->size, flag, 1, 1);
730
731 if(UseSpanType && !flag) {
732 if(false) {
733 runtime·printf("new %S: %p\n", *typ->string, ret );
734 }
735 runtime·settype(ret, (uintptr)typ | TypeInfo_SingleObjec t);
736 }
737 }
738
739 FLUSH(&ret);
740 }
741
648 // same as runtime·new, but callable from C 742 // same as runtime·new, but callable from C
649 void* 743 void*
650 runtime·cnew(Type *typ) 744 runtime·cnew(Type *typ)
651 { 745 {
652 » uint32 flag = typ->kind&KindNoPointers ? FlagNoPointers : 0; 746 » uint32 flag;
653 » void *ret = runtime·mallocgc(typ->size, flag, 1, 1); 747 » void *ret;
654 748
655 » if(UseSpanType && !flag) { 749 » if(raceenabled)
656 » » //runtime·printf("new %S\n", *typ->string); 750 » » m->racepc = runtime·getcallerpc(&typ);
657 » » runtime·settype(ret, (uintptr)typ | TypeInfo_SingleObject); 751
752 » if(typ->size == 0) {
753 » » // All 0-length allocations use this pointer.
754 » » // The language does not require the allocations to
755 » » // have distinct values.
756 » » ret = (uint8*)&runtime·zerobase;
757 » } else {
758 » » flag = typ->kind&KindNoPointers ? FlagNoPointers : 0;
759 » » ret = runtime·mallocgc(typ->size, flag, 1, 1);
760
761 » » if(UseSpanType && !flag) {
762 » » » if(false) {
763 » » » » runtime·printf("new %S: %p\n", *typ->string, ret );
764 » » » }
765 » » » runtime·settype(ret, (uintptr)typ | TypeInfo_SingleObjec t);
766 » » }
658 } 767 }
659 768
660 return ret; 769 return ret;
661 }
662
663 func new(typ *Type) (ret *uint8) {
664 uint32 flag = typ->kind&KindNoPointers ? FlagNoPointers : 0;
665 ret = runtime·mallocgc(typ->size, flag, 1, 1);
666
667 if(UseSpanType && !flag) {
668 //runtime·printf("new %S\n", *typ->string);
669 runtime·settype(ret, (uintptr)typ | TypeInfo_SingleObject);
670 }
671
672 FLUSH(&ret);
673 }
674
675 void*
676 runtime·stackalloc(uint32 n)
677 {
678 // Stackalloc must be called on scheduler stack, so that we
679 // never try to grow the stack during the code that stackalloc runs.
680 // Doing so would cause a deadlock (issue 1547).
681 if(g != m->g0)
682 runtime·throw("stackalloc not on scheduler stack");
683
684 // Stack allocator uses malloc/free most of the time,
685 // but if we're in the middle of malloc and need stack,
686 // we have to do something else to avoid deadlock.
687 // In that case, we fall back on a fixed-size free-list
688 // allocator, assuming that inside malloc all the stack
689 // frames are small, so that all the stack allocations
690 // will be a single size, the minimum (right now, 5k).
691 if(m->mallocing || m->gcing || n == FixedStack) {
692 if(n != FixedStack) {
693 runtime·printf("stackalloc: in malloc, size=%d want %d", FixedStack, n);
694 runtime·throw("stackalloc");
695 }
696 return runtime·FixAlloc_Alloc(m->stackalloc);
697 }
698 return runtime·mallocgc(n, FlagNoProfiling|FlagNoGC, 0, 0);
699 }
700
701 void
702 runtime·stackfree(void *v, uintptr n)
703 {
704 if(m->mallocing || m->gcing || n == FixedStack) {
705 runtime·FixAlloc_Free(m->stackalloc, v);
706 return;
707 }
708 runtime·free(v);
709 } 770 }
710 771
711 func GC() { 772 func GC() {
712 runtime·gc(1); 773 runtime·gc(1);
713 } 774 }
714 775
715 func SetFinalizer(obj Eface, finalizer Eface) { 776 func SetFinalizer(obj Eface, finalizer Eface) {
716 byte *base; 777 byte *base;
717 uintptr size; 778 uintptr size;
718 FuncType *ft; 779 FuncType *ft;
719 » int32 i, nret; 780 » int32 i;
781 » uintptr nret;
720 Type *t; 782 Type *t;
721 783
722 if(obj.type == nil) { 784 if(obj.type == nil) {
723 runtime·printf("runtime.SetFinalizer: first argument is nil inte rface\n"); 785 runtime·printf("runtime.SetFinalizer: first argument is nil inte rface\n");
724 goto throw; 786 goto throw;
725 } 787 }
726 if(obj.type->kind != KindPtr) { 788 if(obj.type->kind != KindPtr) {
727 runtime·printf("runtime.SetFinalizer: first argument is %S, not pointer\n", *obj.type->string); 789 runtime·printf("runtime.SetFinalizer: first argument is %S, not pointer\n", *obj.type->string);
728 goto throw; 790 goto throw;
729 } 791 }
(...skipping 22 matching lines...) Expand all
752 runtime·printf("runtime.SetFinalizer: finalizer already set\n"); 814 runtime·printf("runtime.SetFinalizer: finalizer already set\n");
753 goto throw; 815 goto throw;
754 } 816 }
755 return; 817 return;
756 818
757 badfunc: 819 badfunc:
758 runtime·printf("runtime.SetFinalizer: second argument is %S, not func(%S )\n", *finalizer.type->string, *obj.type->string); 820 runtime·printf("runtime.SetFinalizer: second argument is %S, not func(%S )\n", *finalizer.type->string, *obj.type->string);
759 throw: 821 throw:
760 runtime·throw("runtime.SetFinalizer"); 822 runtime·throw("runtime.SetFinalizer");
761 } 823 }
LEFTRIGHT

Powered by Google App Engine
RSS Feeds Recent Issues | This issue
This is Rietveld f62528b