Rietveld Code Review Tool
Help | Bug tracker | Discussion group | Source code | Sign in
(530)

Delta Between Two Patch Sets: src/pkg/runtime/malloc.goc

Issue 6114046: code review 6114046: runtime, reflect, ld, gc: garbage collection precision ... (Closed)
Left Patch Set: diff -r cc8a35781b5e https://go.googlecode.com/hg/ Created 11 years, 11 months ago
Right Patch Set: diff -r d6e06d0f3c29 https://code.google.com/p/go/ Created 10 years, 10 months ago
Left:
Right:
Use n/p to move between diff chunks; N/P to move between comments. Please Sign in to add in-line comments.
Jump to:
Left: Side by side diff | Download
Right: Side by side diff | Download
« no previous file with change/comment | « no previous file | src/pkg/runtime/mgc0.c » ('j') | no next file with change/comment »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
LEFTRIGHT
1 // Copyright 2009 The Go Authors. All rights reserved. 1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style 2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file. 3 // license that can be found in the LICENSE file.
4 4
5 // See malloc.h for overview. 5 // See malloc.h for overview.
6 // 6 //
7 // TODO(rsc): double-check stats. 7 // TODO(rsc): double-check stats.
8 8
9 package runtime 9 package runtime
10 #include "runtime.h" 10 #include "runtime.h"
11 #include "arch_GOARCH.h" 11 #include "arch_GOARCH.h"
12 #include "stack.h"
13 #include "malloc.h" 12 #include "malloc.h"
14 #include "defs_GOOS_GOARCH.h"
15 #include "type.h" 13 #include "type.h"
16 14 #include "typekind.h"
17 #pragma dataflag 16 /* mark mheap as 'no pointers', hiding from garbage collecto r */ 15 #include "race.h"
18 MHeap runtime·mheap; 16
19 17 MHeap *runtime·mheap;
20 extern MStats mstats;» // defined in extern.go 18
21 19 int32» runtime·checking;
22 extern volatile int32 runtime·MemProfileRate; 20
21 extern MStats mstats;» // defined in zruntime_def_$GOOS_$GOARCH.go
22
23 extern volatile intgo runtime·MemProfileRate;
23 24
24 // Allocate an object of at least size bytes. 25 // Allocate an object of at least size bytes.
25 // Small objects are allocated from the per-thread cache's free lists. 26 // Small objects are allocated from the per-thread cache's free lists.
26 // Large objects (> 32 kB) are allocated straight from the heap. 27 // Large objects (> 32 kB) are allocated straight from the heap.
27 void* 28 void*
28 runtime·mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed) 29 runtime·mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed)
29 { 30 {
30 » int32 sizeclass, rate; 31 » int32 sizeclass;
32 » intgo rate;
31 MCache *c; 33 MCache *c;
32 uintptr npages; 34 uintptr npages;
33 MSpan *s; 35 MSpan *s;
34 void *v; 36 void *v;
35 37
36 » if(runtime·gcwaiting && g != m->g0 && m->locks == 0) 38 » if(runtime·gcwaiting && g != m->g0 && m->locks == 0 && dogc)
37 runtime·gosched(); 39 runtime·gosched();
38 if(m->mallocing) 40 if(m->mallocing)
39 runtime·throw("malloc/free - deadlock"); 41 runtime·throw("malloc/free - deadlock");
40 m->mallocing = 1; 42 m->mallocing = 1;
41 if(size == 0) 43 if(size == 0)
42 size = 1; 44 size = 1;
43 45
46 if(DebugTypeAtBlockEnd)
47 size += sizeof(uintptr);
48
44 c = m->mcache; 49 c = m->mcache;
45 c->local_nmalloc++; 50 c->local_nmalloc++;
46 if(size <= MaxSmallSize) { 51 if(size <= MaxSmallSize) {
47 // Allocate from mcache free lists. 52 // Allocate from mcache free lists.
48 » » sizeclass = runtime·SizeToClass(size); 53 » » // Inlined version of SizeToClass().
54 » » if(size <= 1024-8)
55 » » » sizeclass = runtime·size_to_class8[(size+7)>>3];
56 » » else
57 » » » sizeclass = runtime·size_to_class128[(size-1024+127) >> 7];
49 size = runtime·class_to_size[sizeclass]; 58 size = runtime·class_to_size[sizeclass];
50 v = runtime·MCache_Alloc(c, sizeclass, size, zeroed); 59 v = runtime·MCache_Alloc(c, sizeclass, size, zeroed);
51 if(v == nil) 60 if(v == nil)
52 runtime·throw("out of memory"); 61 runtime·throw("out of memory");
53 c->local_alloc += size; 62 c->local_alloc += size;
54 c->local_total_alloc += size; 63 c->local_total_alloc += size;
55 c->local_by_size[sizeclass].nmalloc++; 64 c->local_by_size[sizeclass].nmalloc++;
56 } else { 65 } else {
57 // TODO(rsc): Report tracebacks for very large allocations. 66 // TODO(rsc): Report tracebacks for very large allocations.
58 67
59 // Allocate directly from heap. 68 // Allocate directly from heap.
60 npages = size >> PageShift; 69 npages = size >> PageShift;
61 if((size & PageMask) != 0) 70 if((size & PageMask) != 0)
62 npages++; 71 npages++;
63 » » s = runtime·MHeap_Alloc(&runtime·mheap, npages, 0, 1); 72 » » s = runtime·MHeap_Alloc(runtime·mheap, npages, 0, 1, zeroed);
64 if(s == nil) 73 if(s == nil)
65 runtime·throw("out of memory"); 74 runtime·throw("out of memory");
66 size = npages<<PageShift; 75 size = npages<<PageShift;
67 c->local_alloc += size; 76 c->local_alloc += size;
68 c->local_total_alloc += size; 77 c->local_total_alloc += size;
69 v = (void*)(s->start << PageShift); 78 v = (void*)(s->start << PageShift);
70 79
71 // setup for mark sweep 80 // setup for mark sweep
72 runtime·markspan(v, 0, 0, true); 81 runtime·markspan(v, 0, 0, true);
73 } 82 }
83
84 if (sizeof(void*) == 4 && c->local_total_alloc >= (1<<30)) {
85 // purge cache stats to prevent overflow
86 runtime·lock(runtime·mheap);
87 runtime·purgecachedstats(c);
88 runtime·unlock(runtime·mheap);
89 }
90
74 if(!(flag & FlagNoGC)) 91 if(!(flag & FlagNoGC))
75 runtime·markallocated(v, size, (flag&FlagNoPointers) != 0); 92 runtime·markallocated(v, size, (flag&FlagNoPointers) != 0);
93
94 if(DebugTypeAtBlockEnd)
95 *(uintptr*)((uintptr)v+size-sizeof(uintptr)) = 0;
76 96
77 m->mallocing = 0; 97 m->mallocing = 0;
78 98
79 if(!(flag & FlagNoProfiling) && (rate = runtime·MemProfileRate) > 0) { 99 if(!(flag & FlagNoProfiling) && (rate = runtime·MemProfileRate) > 0) {
80 if(size >= rate) 100 if(size >= rate)
81 goto profile; 101 goto profile;
82 if(m->mcache->next_sample > size) 102 if(m->mcache->next_sample > size)
83 m->mcache->next_sample -= size; 103 m->mcache->next_sample -= size;
84 else { 104 else {
85 // pick next profile time 105 // pick next profile time
86 // If you change this, also change allocmcache. 106 // If you change this, also change allocmcache.
87 if(rate > 0x3fffffff) // make 2*rate not overflow 107 if(rate > 0x3fffffff) // make 2*rate not overflow
88 rate = 0x3fffffff; 108 rate = 0x3fffffff;
89 m->mcache->next_sample = runtime·fastrand1() % (2*rate); 109 m->mcache->next_sample = runtime·fastrand1() % (2*rate);
90 profile: 110 profile:
91 runtime·setblockspecial(v, true); 111 runtime·setblockspecial(v, true);
92 runtime·MProf_Malloc(v, size); 112 runtime·MProf_Malloc(v, size);
93 } 113 }
94 } 114 }
95 115
96 if(dogc && mstats.heap_alloc >= mstats.next_gc) 116 if(dogc && mstats.heap_alloc >= mstats.next_gc)
97 runtime·gc(0); 117 runtime·gc(0);
118
119 if(raceenabled) {
120 runtime·racemalloc(v, size, m->racepc);
121 m->racepc = nil;
122 }
98 return v; 123 return v;
99 } 124 }
100 125
101 void* 126 void*
102 runtime·malloc(uintptr size) 127 runtime·malloc(uintptr size)
103 { 128 {
104 return runtime·mallocgc(size, 0, 0, 1); 129 return runtime·mallocgc(size, 0, 0, 1);
105 } 130 }
106 131
107 // Free the object whose base pointer is v. 132 // Free the object whose base pointer is v.
(...skipping 14 matching lines...) Expand all
122 147
123 if(m->mallocing) 148 if(m->mallocing)
124 runtime·throw("malloc/free - deadlock"); 149 runtime·throw("malloc/free - deadlock");
125 m->mallocing = 1; 150 m->mallocing = 1;
126 151
127 if(!runtime·mlookup(v, nil, nil, &s)) { 152 if(!runtime·mlookup(v, nil, nil, &s)) {
128 runtime·printf("free %p: not an allocated block\n", v); 153 runtime·printf("free %p: not an allocated block\n", v);
129 runtime·throw("free runtime·mlookup"); 154 runtime·throw("free runtime·mlookup");
130 } 155 }
131 prof = runtime·blockspecial(v); 156 prof = runtime·blockspecial(v);
157
158 if(raceenabled)
159 runtime·racefree(v);
132 160
133 // Find size class for v. 161 // Find size class for v.
134 sizeclass = s->sizeclass; 162 sizeclass = s->sizeclass;
135 c = m->mcache; 163 c = m->mcache;
136 if(sizeclass == 0) { 164 if(sizeclass == 0) {
137 // Large object. 165 // Large object.
138 size = s->npages<<PageShift; 166 size = s->npages<<PageShift;
139 » » *(uintptr*)(s->start<<PageShift) = 1;» // mark as "needs to be zeroed" 167 » » *(uintptr*)(s->start<<PageShift) = (uintptr)0xfeedfeedfeedfeedll ;» // mark as "needs to be zeroed"
140 // Must mark v freed before calling unmarkspan and MHeap_Free: 168 // Must mark v freed before calling unmarkspan and MHeap_Free:
141 // they might coalesce v into other spans and change the bitmap further. 169 // they might coalesce v into other spans and change the bitmap further.
142 runtime·markfreed(v, size); 170 runtime·markfreed(v, size);
143 runtime·unmarkspan(v, 1<<PageShift); 171 runtime·unmarkspan(v, 1<<PageShift);
144 » » runtime·MHeap_Free(&runtime·mheap, s, 1); 172 » » runtime·MHeap_Free(runtime·mheap, s, 1);
145 } else { 173 } else {
146 // Small object. 174 // Small object.
147 size = runtime·class_to_size[sizeclass]; 175 size = runtime·class_to_size[sizeclass];
148 if(size > sizeof(uintptr)) 176 if(size > sizeof(uintptr))
149 » » » ((uintptr*)v)[1] = 1;» // mark as "needs to be zeroed" 177 » » » ((uintptr*)v)[1] = (uintptr)0xfeedfeedfeedfeedll;» // mark as "needs to be zeroed"
150 // Must mark v freed before calling MCache_Free: 178 // Must mark v freed before calling MCache_Free:
151 // it might coalesce v and other blocks into a bigger span 179 // it might coalesce v and other blocks into a bigger span
152 // and change the bitmap further. 180 // and change the bitmap further.
153 runtime·markfreed(v, size); 181 runtime·markfreed(v, size);
154 c->local_by_size[sizeclass].nfree++; 182 c->local_by_size[sizeclass].nfree++;
155 runtime·MCache_Free(c, v, sizeclass, size); 183 runtime·MCache_Free(c, v, sizeclass, size);
156 if(s->objtype != nil) {
157 uintptr ofs = (uintptr)v - (s->start<<PageShift);
158 ((Type**)s->objtype)[ofs/size] = nil;
159 }
160 } 184 }
161 c->local_nfree++; 185 c->local_nfree++;
162 c->local_alloc -= size; 186 c->local_alloc -= size;
163 if(prof) 187 if(prof)
164 runtime·MProf_Free(v, size); 188 runtime·MProf_Free(v, size);
165 m->mallocing = 0; 189 m->mallocing = 0;
166 } 190 }
167 191
168 int32 192 int32
169 runtime·mlookup(void *v, byte **base, uintptr *size, MSpan **sp) 193 runtime·mlookup(void *v, byte **base, uintptr *size, MSpan **sp)
170 { 194 {
171 uintptr n, i; 195 uintptr n, i;
172 byte *p; 196 byte *p;
173 MSpan *s; 197 MSpan *s;
174 198
175 m->mcache->local_nlookup++; 199 m->mcache->local_nlookup++;
176 » s = runtime·MHeap_LookupMaybe(&runtime·mheap, v); 200 » if (sizeof(void*) == 4 && m->mcache->local_nlookup >= (1<<30)) {
201 » » // purge cache stats to prevent overflow
202 » » runtime·lock(runtime·mheap);
203 » » runtime·purgecachedstats(m->mcache);
204 » » runtime·unlock(runtime·mheap);
205 » }
206
207 » s = runtime·MHeap_LookupMaybe(runtime·mheap, v);
177 if(sp) 208 if(sp)
178 *sp = s; 209 *sp = s;
179 if(s == nil) { 210 if(s == nil) {
180 runtime·checkfreed(v, 1); 211 runtime·checkfreed(v, 1);
181 if(base) 212 if(base)
182 *base = nil; 213 *base = nil;
183 if(size) 214 if(size)
184 *size = 0; 215 *size = 0;
185 return 0; 216 return 0;
186 } 217 }
187 218
188 p = (byte*)((uintptr)s->start<<PageShift); 219 p = (byte*)((uintptr)s->start<<PageShift);
189 if(s->sizeclass == 0) { 220 if(s->sizeclass == 0) {
190 // Large object. 221 // Large object.
191 if(base) 222 if(base)
192 *base = p; 223 *base = p;
193 if(size) 224 if(size)
194 *size = s->npages<<PageShift; 225 *size = s->npages<<PageShift;
195 return 1; 226 return 1;
196 } 227 }
197 228
198 if((byte*)v >= (byte*)s->limit) { 229 if((byte*)v >= (byte*)s->limit) {
199 // pointers past the last block do not count as pointers. 230 // pointers past the last block do not count as pointers.
200 return 0; 231 return 0;
201 } 232 }
202 233
203 » n = runtime·class_to_size[s->sizeclass]; 234 » n = s->elemsize;
204 if(base) { 235 if(base) {
205 i = ((byte*)v - p)/n; 236 i = ((byte*)v - p)/n;
206 *base = p + i*n; 237 *base = p + i*n;
207 } 238 }
208 if(size) 239 if(size)
209 *size = n; 240 *size = n;
210 241
211 return 1; 242 return 1;
212 } 243 }
213 244
214 MCache* 245 MCache*
215 runtime·allocmcache(void) 246 runtime·allocmcache(void)
216 { 247 {
217 » int32 rate; 248 » intgo rate;
218 MCache *c; 249 MCache *c;
219 250
220 » runtime·lock(&runtime·mheap); 251 » runtime·lock(runtime·mheap);
221 » c = runtime·FixAlloc_Alloc(&runtime·mheap.cachealloc); 252 » c = runtime·FixAlloc_Alloc(&runtime·mheap->cachealloc);
222 » mstats.mcache_inuse = runtime·mheap.cachealloc.inuse; 253 » mstats.mcache_inuse = runtime·mheap->cachealloc.inuse;
223 » mstats.mcache_sys = runtime·mheap.cachealloc.sys; 254 » mstats.mcache_sys = runtime·mheap->cachealloc.sys;
224 » runtime·unlock(&runtime·mheap); 255 » runtime·unlock(runtime·mheap);
256 » runtime·memclr((byte*)c, sizeof(*c));
225 257
226 // Set first allocation sample size. 258 // Set first allocation sample size.
227 rate = runtime·MemProfileRate; 259 rate = runtime·MemProfileRate;
228 if(rate > 0x3fffffff) // make 2*rate not overflow 260 if(rate > 0x3fffffff) // make 2*rate not overflow
229 rate = 0x3fffffff; 261 rate = 0x3fffffff;
230 if(rate != 0) 262 if(rate != 0)
231 c->next_sample = runtime·fastrand1() % (2*rate); 263 c->next_sample = runtime·fastrand1() % (2*rate);
232 264
233 return c; 265 return c;
234 } 266 }
235 267
236 void 268 void
237 runtime·purgecachedstats(M* m) 269 runtime·freemcache(MCache *c)
238 { 270 {
239 » MCache *c; 271 » runtime·MCache_ReleaseAll(c);
272 » runtime·lock(runtime·mheap);
273 » runtime·purgecachedstats(c);
274 » runtime·FixAlloc_Free(&runtime·mheap->cachealloc, c);
275 » runtime·unlock(runtime·mheap);
276 }
277
278 void
279 runtime·purgecachedstats(MCache *c)
280 {
281 » int32 i;
240 282
241 // Protected by either heap or GC lock. 283 // Protected by either heap or GC lock.
242 c = m->mcache;
243 mstats.heap_alloc += c->local_cachealloc; 284 mstats.heap_alloc += c->local_cachealloc;
244 c->local_cachealloc = 0; 285 c->local_cachealloc = 0;
245 mstats.heap_objects += c->local_objects; 286 mstats.heap_objects += c->local_objects;
246 c->local_objects = 0; 287 c->local_objects = 0;
247 mstats.nmalloc += c->local_nmalloc; 288 mstats.nmalloc += c->local_nmalloc;
248 c->local_nmalloc = 0; 289 c->local_nmalloc = 0;
249 mstats.nfree += c->local_nfree; 290 mstats.nfree += c->local_nfree;
250 c->local_nfree = 0; 291 c->local_nfree = 0;
251 mstats.nlookup += c->local_nlookup; 292 mstats.nlookup += c->local_nlookup;
252 c->local_nlookup = 0; 293 c->local_nlookup = 0;
253 mstats.alloc += c->local_alloc; 294 mstats.alloc += c->local_alloc;
254 c->local_alloc= 0; 295 c->local_alloc= 0;
255 mstats.total_alloc += c->local_total_alloc; 296 mstats.total_alloc += c->local_total_alloc;
256 c->local_total_alloc= 0; 297 c->local_total_alloc= 0;
298 for(i=0; i<nelem(c->local_by_size); i++) {
299 mstats.by_size[i].nmalloc += c->local_by_size[i].nmalloc;
300 c->local_by_size[i].nmalloc = 0;
301 mstats.by_size[i].nfree += c->local_by_size[i].nfree;
302 c->local_by_size[i].nfree = 0;
303 }
257 } 304 }
258 305
259 uintptr runtime·sizeof_C_MStats = sizeof(MStats); 306 uintptr runtime·sizeof_C_MStats = sizeof(MStats);
260 307
261 #define MaxArena32 (2U<<30) 308 #define MaxArena32 (2U<<30)
262 309
263 void 310 void
264 runtime·mallocinit(void) 311 runtime·mallocinit(void)
265 { 312 {
266 byte *p; 313 byte *p;
267 uintptr arena_size, bitmap_size; 314 uintptr arena_size, bitmap_size;
268 extern byte end[]; 315 extern byte end[];
269 byte *want; 316 byte *want;
270 uintptr limit; 317 uintptr limit;
271 318
272 p = nil; 319 p = nil;
273 arena_size = 0; 320 arena_size = 0;
274 bitmap_size = 0; 321 bitmap_size = 0;
275 ········ 322 ········
276 // for 64-bit build 323 // for 64-bit build
277 USED(p); 324 USED(p);
278 USED(arena_size); 325 USED(arena_size);
279 USED(bitmap_size); 326 USED(bitmap_size);
280 327
328 if((runtime·mheap = runtime·SysAlloc(sizeof(*runtime·mheap))) == nil)
329 runtime·throw("runtime: cannot allocate heap metadata");
330
281 runtime·InitSizes(); 331 runtime·InitSizes();
282 332
283 » limit = runtime·memlimit(); 333 » // limit = runtime·memlimit();
334 » // See https://code.google.com/p/go/issues/detail?id=5049
335 » // TODO(rsc): Fix after 1.1.
336 » limit = 0;
284 337
285 // Set up the allocation arena, a contiguous area of memory where 338 // Set up the allocation arena, a contiguous area of memory where
286 // allocated data will be found. The arena begins with a bitmap large 339 // allocated data will be found. The arena begins with a bitmap large
287 // enough to hold 4 bits per allocated word. 340 // enough to hold 4 bits per allocated word.
288 if(sizeof(void*) == 8 && (limit == 0 || limit > (1<<30))) { 341 if(sizeof(void*) == 8 && (limit == 0 || limit > (1<<30))) {
289 // On a 64-bit machine, allocate from a single contiguous reserv ation. 342 // On a 64-bit machine, allocate from a single contiguous reserv ation.
290 » » // 16 GB should be big enough for now. 343 » » // 128 GB (MaxMem) should be big enough for now.
291 // 344 //
292 // The code will work with the reservation at any address, but a sk 345 // The code will work with the reservation at any address, but a sk
293 » » // SysReserve to use 0x000000f800000000 if possible. 346 » » // SysReserve to use 0x000000c000000000 if possible.
294 » » // Allocating a 16 GB region takes away 36 bits, and the amd64 347 » » // Allocating a 128 GB region takes away 37 bits, and the amd64
295 // doesn't let us choose the top 17 bits, so that leaves the 11 bits 348 // doesn't let us choose the top 17 bits, so that leaves the 11 bits
296 » » // in the middle of 0x00f8 for us to choose. Choosing 0x00f8 me ans 349 » » // in the middle of 0x00c0 for us to choose. Choosing 0x00c0 me ans
297 » » // that the valid memory addresses will begin 0x00f8, 0x00f9, 0x 00fa, 0x00fb. 350 » » // that the valid memory addresses will begin 0x00c0, 0x00c1, .. ., 0x0x00df.
298 » » // None of the bytes f8 f9 fa fb can appear in valid UTF-8, and 351 » » // In little-endian, that's c0 00, c1 00, ..., df 00. None of th ose are valid
299 » » // they are otherwise as far from ff (likely a common byte) as p ossible. 352 » » // UTF-8 sequences, and they are otherwise as far away from·
300 » » // Choosing 0x00 for the leading 6 bits was more arbitrary, but it 353 » » // ff (likely a common byte) as possible. An earlier attempt to use 0x11f8·
301 » » // is not a common ASCII code point either. Using 0x11f8 instea d
302 // caused out of memory errors on OS X during thread allocations . 354 // caused out of memory errors on OS X during thread allocations .
303 // These choices are both for debuggability and to reduce the 355 // These choices are both for debuggability and to reduce the
304 // odds of the conservative garbage collector not collecting mem ory 356 // odds of the conservative garbage collector not collecting mem ory
305 // because some non-pointer block of memory had a bit pattern 357 // because some non-pointer block of memory had a bit pattern
306 // that matched a memory address. 358 // that matched a memory address.
307 // 359 //
308 » » // Actually we reserve 17 GB (because the bitmap ends up being 1 GB) 360 » » // Actually we reserve 136 GB (because the bitmap ends up being 8 GB)
309 » » // but it hardly matters: fc is not valid UTF-8 either, and we h ave to 361 » » // but it hardly matters: e0 00 is not valid UTF-8 either.
310 » » // allocate 15 GB before we get that far.
311 // 362 //
312 // If this fails we fall back to the 32 bit memory mechanism 363 // If this fails we fall back to the 32 bit memory mechanism
313 » » arena_size = 16LL<<30; 364 » » arena_size = MaxMem;
314 bitmap_size = arena_size / (sizeof(void*)*8/4); 365 bitmap_size = arena_size / (sizeof(void*)*8/4);
315 » » p = runtime·SysReserve((void*)(0x00f8ULL<<32), bitmap_size + are na_size); 366 » » p = runtime·SysReserve((void*)(0x00c0ULL<<32), bitmap_size + are na_size);
316 } 367 }
317 if (p == nil) { 368 if (p == nil) {
318 // On a 32-bit machine, we can't typically get away 369 // On a 32-bit machine, we can't typically get away
319 // with a giant virtual address space reservation. 370 // with a giant virtual address space reservation.
320 // Instead we map the memory information bitmap 371 // Instead we map the memory information bitmap
321 // immediately after the data segment, large enough 372 // immediately after the data segment, large enough
322 // to handle another 2GB of mappings (256 MB), 373 // to handle another 2GB of mappings (256 MB),
323 // along with a reservation for another 512 MB of memory. 374 // along with a reservation for another 512 MB of memory.
324 // When that gets used up, we'll start asking the kernel 375 // When that gets used up, we'll start asking the kernel
325 // for any memory anywhere and hope it's in the 2GB 376 // for any memory anywhere and hope it's in the 2GB
(...skipping 25 matching lines...) Expand all
351 want = (byte*)(((uintptr)end + (1<<18) + (1<<20) - 1)&~((1<<20)- 1)); 402 want = (byte*)(((uintptr)end + (1<<18) + (1<<20) - 1)&~((1<<20)- 1));
352 p = runtime·SysReserve(want, bitmap_size + arena_size); 403 p = runtime·SysReserve(want, bitmap_size + arena_size);
353 if(p == nil) 404 if(p == nil)
354 runtime·throw("runtime: cannot reserve arena virtual add ress space"); 405 runtime·throw("runtime: cannot reserve arena virtual add ress space");
355 if((uintptr)p & (((uintptr)1<<PageShift)-1)) 406 if((uintptr)p & (((uintptr)1<<PageShift)-1))
356 runtime·printf("runtime: SysReserve returned unaligned a ddress %p; asked for %p", p, bitmap_size+arena_size); 407 runtime·printf("runtime: SysReserve returned unaligned a ddress %p; asked for %p", p, bitmap_size+arena_size);
357 } 408 }
358 if((uintptr)p & (((uintptr)1<<PageShift)-1)) 409 if((uintptr)p & (((uintptr)1<<PageShift)-1))
359 runtime·throw("runtime: SysReserve returned unaligned address"); 410 runtime·throw("runtime: SysReserve returned unaligned address");
360 411
361 » runtime·mheap.bitmap = p; 412 » runtime·mheap->bitmap = p;
362 » runtime·mheap.arena_start = p + bitmap_size; 413 » runtime·mheap->arena_start = p + bitmap_size;
363 » runtime·mheap.arena_used = runtime·mheap.arena_start; 414 » runtime·mheap->arena_used = runtime·mheap->arena_start;
364 » runtime·mheap.arena_end = runtime·mheap.arena_start + arena_size; 415 » runtime·mheap->arena_end = runtime·mheap->arena_start + arena_size;
365 416
366 // Initialize the rest of the allocator.········ 417 // Initialize the rest of the allocator.········
367 » runtime·MHeap_Init(&runtime·mheap, runtime·SysAlloc); 418 » runtime·MHeap_Init(runtime·mheap, runtime·SysAlloc);
368 m->mcache = runtime·allocmcache(); 419 m->mcache = runtime·allocmcache();
369 420
370 // See if it works. 421 // See if it works.
371 runtime·free(runtime·malloc(1)); 422 runtime·free(runtime·malloc(1));
372 } 423 }
373 424
374 void* 425 void*
375 runtime·MHeap_SysAlloc(MHeap *h, uintptr n) 426 runtime·MHeap_SysAlloc(MHeap *h, uintptr n)
376 { 427 {
377 byte *p; 428 byte *p;
(...skipping 13 matching lines...) Expand all
391 if(p == h->arena_end) 442 if(p == h->arena_end)
392 h->arena_end = new_end; 443 h->arena_end = new_end;
393 } 444 }
394 } 445 }
395 if(n <= h->arena_end - h->arena_used) { 446 if(n <= h->arena_end - h->arena_used) {
396 // Keep taking from our reservation. 447 // Keep taking from our reservation.
397 p = h->arena_used; 448 p = h->arena_used;
398 runtime·SysMap(p, n); 449 runtime·SysMap(p, n);
399 h->arena_used += n; 450 h->arena_used += n;
400 runtime·MHeap_MapBits(h); 451 runtime·MHeap_MapBits(h);
452 if(raceenabled)
453 runtime·racemapshadow(p, n);
401 return p; 454 return p;
402 } 455 }
403 ········ 456 ········
404 // If using 64-bit, our reservation is all we have. 457 // If using 64-bit, our reservation is all we have.
405 if(sizeof(void*) == 8 && (uintptr)h->bitmap >= 0xffffffffU) 458 if(sizeof(void*) == 8 && (uintptr)h->bitmap >= 0xffffffffU)
406 return nil; 459 return nil;
407 460
408 // On 32-bit, once the reservation is gone we can 461 // On 32-bit, once the reservation is gone we can
409 // try to get memory at a location chosen by the OS 462 // try to get memory at a location chosen by the OS
410 // and hope that it is in the range we allocated bitmap for. 463 // and hope that it is in the range we allocated bitmap for.
411 p = runtime·SysAlloc(n); 464 p = runtime·SysAlloc(n);
412 if(p == nil) 465 if(p == nil)
413 return nil; 466 return nil;
414 467
415 if(p < h->arena_start || p+n - h->arena_start >= MaxArena32) { 468 if(p < h->arena_start || p+n - h->arena_start >= MaxArena32) {
416 runtime·printf("runtime: memory allocated by OS (%p) not in usab le range [%p,%p)\n", 469 runtime·printf("runtime: memory allocated by OS (%p) not in usab le range [%p,%p)\n",
417 p, h->arena_start, h->arena_start+MaxArena32); 470 p, h->arena_start, h->arena_start+MaxArena32);
418 runtime·SysFree(p, n); 471 runtime·SysFree(p, n);
419 return nil; 472 return nil;
420 } 473 }
421 474
422 if(p+n > h->arena_used) { 475 if(p+n > h->arena_used) {
423 h->arena_used = p+n; 476 h->arena_used = p+n;
424 if(h->arena_used > h->arena_end) 477 if(h->arena_used > h->arena_end)
425 h->arena_end = h->arena_used; 478 h->arena_end = h->arena_used;
426 runtime·MHeap_MapBits(h); 479 runtime·MHeap_MapBits(h);
480 if(raceenabled)
481 runtime·racemapshadow(p, n);
427 } 482 }
428 ········ 483 ········
429 return p; 484 return p;
430 } 485 }
431 486
487 static Lock settype_lock;
488
489 void
490 runtime·settype_flush(M *mp, bool sysalloc)
491 {
492 uintptr *buf, *endbuf;
493 uintptr size, ofs, j, t;
494 uintptr ntypes, nbytes2, nbytes3;
495 uintptr *data2;
496 byte *data3;
497 bool sysalloc3;
498 void *v;
499 uintptr typ, p;
500 MSpan *s;
501
502 buf = mp->settype_buf;
503 endbuf = buf + mp->settype_bufsize;
504
505 runtime·lock(&settype_lock);
506 while(buf < endbuf) {
507 v = (void*)*buf;
508 if(false) *buf = 0;
509 buf++;
510 typ = *buf;
511 buf++;
512
513 // (Manually inlined copy of runtime·MHeap_Lookup)
514 p = (uintptr)v>>PageShift;
515 if(sizeof(void*) == 8)
516 p -= (uintptr)runtime·mheap->arena_start >> PageShift;
517 s = runtime·mheap->map[p];
518
519 if(s->sizeclass == 0) {
520 s->types.compression = MTypes_Single;
521 s->types.data = typ;
522 continue;
523 }
524
525 size = s->elemsize;
526 ofs = ((uintptr)v - (s->start<<PageShift)) / size;
527
528 switch(s->types.compression) {
529 case MTypes_Empty:
530 ntypes = (s->npages << PageShift) / size;
531 nbytes3 = 8*sizeof(uintptr) + 1*ntypes;
532
533 if(!sysalloc) {
534 data3 = runtime·mallocgc(nbytes3, FlagNoProfilin g|FlagNoPointers, 0, 1);
535 } else {
536 data3 = runtime·SysAlloc(nbytes3);
537 if(data3 == nil)
538 runtime·throw("runtime: cannot allocate memory");
539 if(0) runtime·printf("settype(0->3): SysAlloc(%x ) --> %p\n", (uint32)nbytes3, data3);
540 }
541
542 s->types.compression = MTypes_Bytes;
543 s->types.sysalloc = sysalloc;
544 s->types.data = (uintptr)data3;
545
546 ((uintptr*)data3)[1] = typ;
547 data3[8*sizeof(uintptr) + ofs] = 1;
548 break;
549
550 case MTypes_Words:
551 ((uintptr*)s->types.data)[ofs] = typ;
552 break;
553
554 case MTypes_Bytes:
555 data3 = (byte*)s->types.data;
556 for(j=1; j<8; j++) {
557 if(((uintptr*)data3)[j] == typ) {
558 break;
559 }
560 if(((uintptr*)data3)[j] == 0) {
561 ((uintptr*)data3)[j] = typ;
562 break;
563 }
564 }
565 if(j < 8) {
566 data3[8*sizeof(uintptr) + ofs] = j;
567 } else {
568 ntypes = (s->npages << PageShift) / size;
569 nbytes2 = ntypes * sizeof(uintptr);
570
571 if(!sysalloc) {
572 data2 = runtime·mallocgc(nbytes2, FlagNo Profiling|FlagNoPointers, 0, 1);
573 } else {
574 data2 = runtime·SysAlloc(nbytes2);
575 if(data2 == nil)
576 runtime·throw("runtime: cannot a llocate memory");
577 if(0) runtime·printf("settype.(3->2): Sy sAlloc(%x) --> %p\n", (uint32)nbytes2, data2);
578 }
579
580 sysalloc3 = s->types.sysalloc;
581
582 s->types.compression = MTypes_Words;
583 s->types.sysalloc = sysalloc;
584 s->types.data = (uintptr)data2;
585
586 // Move the contents of data3 to data2. Then dea llocate data3.
587 for(j=0; j<ntypes; j++) {
588 t = data3[8*sizeof(uintptr) + j];
589 t = ((uintptr*)data3)[t];
590 data2[j] = t;
591 }
592 if(sysalloc3) {
593 nbytes3 = 8*sizeof(uintptr) + 1*ntypes;
594 if(0) runtime·printf("settype.(3->2): Sy sFree(%p,%x)\n", data3, (uint32)nbytes3);
595 runtime·SysFree(data3, nbytes3);
596 }
597
598 data2[ofs] = typ;
599 }
600 break;
601 }
602 }
603 runtime·unlock(&settype_lock);
604
605 mp->settype_bufsize = 0;
606 }
607
608 // It is forbidden to use this function if it is possible that
609 // explicit deallocation via calling runtime·free(v) may happen.
432 void 610 void
433 runtime·settype(void *v, uintptr t) 611 runtime·settype(void *v, uintptr t)
434 { 612 {
613 M *mp;
614 uintptr *buf;
615 uintptr i;
435 MSpan *s; 616 MSpan *s;
436 » uintptr p; 617
437 618 » if(t == 0)
438 » // (Manually inlined copy of runtime·MHeap_Lookup) 619 » » runtime·throw("settype: zero type");
439 » p = (uintptr)v; 620
440 » if(sizeof(void*) == 8) 621 » mp = m;
441 » » p -= (uintptr)runtime·mheap.arena_start; 622 » buf = mp->settype_buf;
442 » s = runtime·mheap.map[p >> PageShift]; 623 » i = mp->settype_bufsize;
443 624 » buf[i+0] = (uintptr)v;
444 » if(s->sizeclass != 0) { 625 » buf[i+1] = t;
445 » » uintptr size = runtime·class_to_size[s->sizeclass]; 626 » i += 2;
446 » » if(s->objtype == nil) { 627 » mp->settype_bufsize = i;
447 » » » uintptr ntypes = (s->npages << PageShift) / size; 628
448 » » » s->objtype = runtime·mallocgc(ntypes*sizeof(Type*), Flag NoPointers, 0, 1); 629 » if(i == nelem(mp->settype_buf)) {
449 » » } 630 » » runtime·settype_flush(mp, false);
450 631 » }
451 » » uintptr ofs = (uintptr)v - (s->start<<PageShift); 632
452 » » ((uintptr*)s->objtype)[ofs/size] = t; 633 » if(DebugTypeAtBlockEnd) {
453 » } else { 634 » » s = runtime·MHeap_Lookup(runtime·mheap, v);
454 » » s->objtype = (void*)t; 635 » » *(uintptr*)((uintptr)v+s->elemsize-sizeof(uintptr)) = t;
455 » } 636 » }
637 }
638
639 void
640 runtime·settype_sysfree(MSpan *s)
641 {
642 » uintptr ntypes, nbytes;
643
644 » if(!s->types.sysalloc)
645 » » return;
646
647 » nbytes = (uintptr)-1;
648
649 » switch (s->types.compression) {
650 » case MTypes_Words:
651 » » ntypes = (s->npages << PageShift) / s->elemsize;
652 » » nbytes = ntypes * sizeof(uintptr);
653 » » break;
654 » case MTypes_Bytes:
655 » » ntypes = (s->npages << PageShift) / s->elemsize;
656 » » nbytes = 8*sizeof(uintptr) + 1*ntypes;
657 » » break;
658 » }
659
660 » if(nbytes != (uintptr)-1) {
661 » » if(0) runtime·printf("settype: SysFree(%p,%x)\n", (void*)s->type s.data, (uint32)nbytes);
662 » » runtime·SysFree((void*)s->types.data, nbytes);
663 » }
664 }
665
666 uintptr
667 runtime·gettype(void *v)
668 {
669 » MSpan *s;
670 » uintptr t, ofs;
671 » byte *data;
672
673 » s = runtime·MHeap_LookupMaybe(runtime·mheap, v);
674 » if(s != nil) {
675 » » t = 0;
676 » » switch(s->types.compression) {
677 » » case MTypes_Empty:
678 » » » break;
679 » » case MTypes_Single:
680 » » » t = s->types.data;
681 » » » break;
682 » » case MTypes_Words:
683 » » » ofs = (uintptr)v - (s->start<<PageShift);
684 » » » t = ((uintptr*)s->types.data)[ofs/s->elemsize];
685 » » » break;
686 » » case MTypes_Bytes:
687 » » » ofs = (uintptr)v - (s->start<<PageShift);
688 » » » data = (byte*)s->types.data;
689 » » » t = data[8*sizeof(uintptr) + ofs/s->elemsize];
690 » » » t = ((uintptr*)data)[t];
691 » » » break;
692 » » default:
693 » » » runtime·throw("runtime·gettype: invalid compression kind ");
694 » » }
695 » » if(0) {
696 » » » runtime·lock(&settype_lock);
697 » » » runtime·printf("%p -> %d,%X\n", v, (int32)s->types.compr ession, (int64)t);
698 » » » runtime·unlock(&settype_lock);
699 » » }
700 » » return t;
701 » }
702 » return 0;
456 } 703 }
457 704
458 // Runtime stubs. 705 // Runtime stubs.
459 706
460 void* 707 void*
461 runtime·mal(uintptr n) 708 runtime·mal(uintptr n)
462 { 709 {
463 return runtime·mallocgc(n, 0, 1, 1); 710 return runtime·mallocgc(n, 0, 1, 1);
464 } 711 }
465 712
466 func new(typ *Type) (ret *uint8) { 713 #pragma textflag 7
467 » uint32 flag = typ->kind&KindNoPointers ? FlagNoPointers : 0; 714 void
468 » ret = runtime·mallocgc(typ->size, flag, 1, 1); 715 runtime·new(Type *typ, uint8 *ret)
469 716 {
470 » if(UseSpanType && !flag) { 717 » uint32 flag;
471 » » //runtime·printf("new %S\n", *typ->string); 718
472 » » runtime·settype(ret, (uintptr)typ); 719 » if(raceenabled)
720 » » m->racepc = runtime·getcallerpc(&typ);
721
722 » if(typ->size == 0) {
723 » » // All 0-length allocations use this pointer.
724 » » // The language does not require the allocations to
725 » » // have distinct values.
726 » » ret = (uint8*)&runtime·zerobase;
727 » } else {
728 » » flag = typ->kind&KindNoPointers ? FlagNoPointers : 0;
729 » » ret = runtime·mallocgc(typ->size, flag, 1, 1);
730
731 » » if(UseSpanType && !flag) {
732 » » » if(false) {
733 » » » » runtime·printf("new %S: %p\n", *typ->string, ret );
734 » » » }
735 » » » runtime·settype(ret, (uintptr)typ | TypeInfo_SingleObjec t);
736 » » }
473 } 737 }
474 738
475 FLUSH(&ret); 739 FLUSH(&ret);
476 } 740 }
477 741
742 // same as runtime·new, but callable from C
478 void* 743 void*
479 runtime·stackalloc(uint32 n) 744 runtime·cnew(Type *typ)
480 { 745 {
481 » // Stackalloc must be called on scheduler stack, so that we 746 » uint32 flag;
482 » // never try to grow the stack during the code that stackalloc runs. 747 » void *ret;
483 » // Doing so would cause a deadlock (issue 1547). 748
484 » if(g != m->g0) 749 » if(raceenabled)
485 » » runtime·throw("stackalloc not on scheduler stack"); 750 » » m->racepc = runtime·getcallerpc(&typ);
486 751
487 » // Stack allocator uses malloc/free most of the time, 752 » if(typ->size == 0) {
488 » // but if we're in the middle of malloc and need stack, 753 » » // All 0-length allocations use this pointer.
489 » // we have to do something else to avoid deadlock. 754 » » // The language does not require the allocations to
490 » // In that case, we fall back on a fixed-size free-list 755 » » // have distinct values.
491 » // allocator, assuming that inside malloc all the stack 756 » » ret = (uint8*)&runtime·zerobase;
492 » // frames are small, so that all the stack allocations 757 » } else {
493 » // will be a single size, the minimum (right now, 5k). 758 » » flag = typ->kind&KindNoPointers ? FlagNoPointers : 0;
494 » if(m->mallocing || m->gcing || n == FixedStack) { 759 » » ret = runtime·mallocgc(typ->size, flag, 1, 1);
495 » » if(n != FixedStack) { 760
496 » » » runtime·printf("stackalloc: in malloc, size=%d want %d", FixedStack, n); 761 » » if(UseSpanType && !flag) {
497 » » » runtime·throw("stackalloc"); 762 » » » if(false) {
498 » » } 763 » » » » runtime·printf("new %S: %p\n", *typ->string, ret );
499 » » return runtime·FixAlloc_Alloc(m->stackalloc); 764 » » » }
500 » } 765 » » » runtime·settype(ret, (uintptr)typ | TypeInfo_SingleObjec t);
501 » return runtime·mallocgc(n, FlagNoProfiling|FlagNoGC, 0, 0); 766 » » }
502 } 767 » }
503 768
504 void 769 » return ret;
505 runtime·stackfree(void *v, uintptr n)
506 {
507 » if(m->mallocing || m->gcing || n == FixedStack) {
508 » » runtime·FixAlloc_Free(m->stackalloc, v);
509 » » return;
510 » }
511 » runtime·free(v);
512 } 770 }
513 771
514 func GC() { 772 func GC() {
515 runtime·gc(1); 773 runtime·gc(1);
516 } 774 }
517 775
518 func SetFinalizer(obj Eface, finalizer Eface) { 776 func SetFinalizer(obj Eface, finalizer Eface) {
519 byte *base; 777 byte *base;
520 uintptr size; 778 uintptr size;
521 FuncType *ft; 779 FuncType *ft;
522 » int32 i, nret; 780 » int32 i;
781 » uintptr nret;
523 Type *t; 782 Type *t;
524 783
525 if(obj.type == nil) { 784 if(obj.type == nil) {
526 runtime·printf("runtime.SetFinalizer: first argument is nil inte rface\n"); 785 runtime·printf("runtime.SetFinalizer: first argument is nil inte rface\n");
527 goto throw; 786 goto throw;
528 } 787 }
529 if(obj.type->kind != KindPtr) { 788 if(obj.type->kind != KindPtr) {
530 runtime·printf("runtime.SetFinalizer: first argument is %S, not pointer\n", *obj.type->string); 789 runtime·printf("runtime.SetFinalizer: first argument is %S, not pointer\n", *obj.type->string);
531 goto throw; 790 goto throw;
532 } 791 }
(...skipping 22 matching lines...) Expand all
555 runtime·printf("runtime.SetFinalizer: finalizer already set\n"); 814 runtime·printf("runtime.SetFinalizer: finalizer already set\n");
556 goto throw; 815 goto throw;
557 } 816 }
558 return; 817 return;
559 818
560 badfunc: 819 badfunc:
561 runtime·printf("runtime.SetFinalizer: second argument is %S, not func(%S )\n", *finalizer.type->string, *obj.type->string); 820 runtime·printf("runtime.SetFinalizer: second argument is %S, not func(%S )\n", *finalizer.type->string, *obj.type->string);
562 throw: 821 throw:
563 runtime·throw("runtime.SetFinalizer"); 822 runtime·throw("runtime.SetFinalizer");
564 } 823 }
LEFTRIGHT

Powered by Google App Engine
RSS Feeds Recent Issues | This issue
This is Rietveld f62528b