LEFT | RIGHT |
(no file at all) | |
1 // Copyright 2009 The Go Authors. All rights reserved. | 1 // Copyright 2009 The Go Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style | 2 // Use of this source code is governed by a BSD-style |
3 // license that can be found in the LICENSE file. | 3 // license that can be found in the LICENSE file. |
4 | 4 |
5 // Memory allocator, based on tcmalloc. | 5 // Memory allocator, based on tcmalloc. |
6 // http://goog-perftools.sourceforge.net/doc/tcmalloc.html | 6 // http://goog-perftools.sourceforge.net/doc/tcmalloc.html |
7 | 7 |
8 // The main allocator works in runs of pages. | 8 // The main allocator works in runs of pages. |
9 // Small allocation sizes (up to and including 32 kB) are | 9 // Small allocation sizes (up to and including 32 kB) are |
10 // rounded to one of about 100 size classes, each of which | 10 // rounded to one of about 100 size classes, each of which |
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
86 typedef struct MStats MStats; | 86 typedef struct MStats MStats; |
87 typedef struct MLink MLink; | 87 typedef struct MLink MLink; |
88 typedef struct GCStats GCStats; | 88 typedef struct GCStats GCStats; |
89 | 89 |
90 enum | 90 enum |
91 { | 91 { |
92 PageShift = 13, | 92 PageShift = 13, |
93 PageSize = 1<<PageShift, | 93 PageSize = 1<<PageShift, |
94 PageMask = PageSize - 1, | 94 PageMask = PageSize - 1, |
95 }; | 95 }; |
96 typedef»uintptr»PageID;»» // address >> PageShift | 96 typedef»uintptr»pageID;»» // address >> PageShift |
97 | 97 |
98 enum | 98 enum |
99 { | 99 { |
100 // Computed constant. The definition of MaxSmallSize and the | 100 // Computed constant. The definition of MaxSmallSize and the |
101 // algorithm in msize.c produce some number of different allocation | 101 // algorithm in msize.c produce some number of different allocation |
102 // size classes. NumSizeClasses is that number. It's needed here | 102 // size classes. NumSizeClasses is that number. It's needed here |
103 // because there are static arrays of this length; when msize runs its | 103 // because there are static arrays of this length; when msize runs its |
104 // size choosing algorithm it double-checks that NumSizeClasses agrees. | 104 // size choosing algorithm it double-checks that NumSizeClasses agrees. |
105 NumSizeClasses = 67, | 105 NumSizeClasses = 67, |
106 | 106 |
(...skipping 27 matching lines...) Expand all Loading... |
134 #else | 134 #else |
135 MHeapMap_Bits = 37 - PageShift, | 135 MHeapMap_Bits = 37 - PageShift, |
136 #endif | 136 #endif |
137 #else | 137 #else |
138 MHeapMap_Bits = 32 - PageShift, | 138 MHeapMap_Bits = 32 - PageShift, |
139 #endif | 139 #endif |
140 | 140 |
141 // Max number of threads to run garbage collection. | 141 // Max number of threads to run garbage collection. |
142 // 2, 3, and 4 are all plausible maximums depending | 142 // 2, 3, and 4 are all plausible maximums depending |
143 // on the hardware details of the machine. The garbage | 143 // on the hardware details of the machine. The garbage |
144 » // collector scales well to 8 cpus. | 144 » // collector scales well to 32 cpus. |
145 » MaxGcproc = 8, | 145 » MaxGcproc = 32, |
146 }; | 146 }; |
147 | 147 |
148 // Maximum memory allocation size, a hint for callers. | 148 // Maximum memory allocation size, a hint for callers. |
149 // This must be a #define instead of an enum because it | 149 // This must be a #define instead of an enum because it |
150 // is so large. | 150 // is so large. |
151 #ifdef _64BIT | 151 #ifdef _64BIT |
152 #define MaxMem (1ULL<<(MHeapMap_Bits+PageShift)) /* 128 GB or 32 GB */ | 152 #define MaxMem (1ULL<<(MHeapMap_Bits+PageShift)) /* 128 GB or 32 GB */ |
153 #else | 153 #else |
154 #define MaxMem ((uintptr)-1) | 154 #define MaxMem ((uintptr)-1) |
155 #endif | 155 #endif |
156 | 156 |
157 // A generic linked list of blocks. (Typically the block is bigger than sizeof(
MLink).) | 157 // A generic linked list of blocks. (Typically the block is bigger than sizeof(
MLink).) |
158 struct MLink | 158 struct MLink |
159 { | 159 { |
160 MLink *next; | 160 MLink *next; |
161 }; | 161 }; |
162 | 162 |
163 // SysAlloc obtains a large chunk of zeroed memory from the | 163 // sysAlloc obtains a large chunk of zeroed memory from the |
164 // operating system, typically on the order of a hundred kilobytes | 164 // operating system, typically on the order of a hundred kilobytes |
165 // or a megabyte. | 165 // or a megabyte. |
166 // NOTE: SysAlloc returns OS-aligned memory, but the heap allocator | 166 // NOTE: sysAlloc returns OS-aligned memory, but the heap allocator |
167 // may use larger alignment, so the caller must be careful to realign the | 167 // may use larger alignment, so the caller must be careful to realign the |
168 // memory obtained by SysAlloc. | 168 // memory obtained by sysAlloc. |
169 // | 169 // |
170 // SysUnused notifies the operating system that the contents | 170 // SysUnused notifies the operating system that the contents |
171 // of the memory region are no longer needed and can be reused | 171 // of the memory region are no longer needed and can be reused |
172 // for other purposes. | 172 // for other purposes. |
173 // SysUsed notifies the operating system that the contents | 173 // SysUsed notifies the operating system that the contents |
174 // of the memory region are needed again. | 174 // of the memory region are needed again. |
175 // | 175 // |
176 // SysFree returns it unconditionally; this is only used if | 176 // SysFree returns it unconditionally; this is only used if |
177 // an out-of-memory error has been detected midway through | 177 // an out-of-memory error has been detected midway through |
178 // an allocation. It is okay if SysFree is a no-op. | 178 // an allocation. It is okay if SysFree is a no-op. |
179 // | 179 // |
180 // SysReserve reserves address space without allocating memory. | 180 // SysReserve reserves address space without allocating memory. |
181 // If the pointer passed to it is non-nil, the caller wants the | 181 // If the pointer passed to it is non-nil, the caller wants the |
182 // reservation there, but SysReserve can still choose another | 182 // reservation there, but SysReserve can still choose another |
183 // location if that one is unavailable. On some systems and in some | 183 // location if that one is unavailable. On some systems and in some |
184 // cases SysReserve will simply check that the address space is | 184 // cases SysReserve will simply check that the address space is |
185 // available and not actually reserve it. If SysReserve returns | 185 // available and not actually reserve it. If SysReserve returns |
186 // non-nil, it sets *reserved to true if the address space is | 186 // non-nil, it sets *reserved to true if the address space is |
187 // reserved, false if it has merely been checked. | 187 // reserved, false if it has merely been checked. |
188 // NOTE: SysReserve returns OS-aligned memory, but the heap allocator | 188 // NOTE: SysReserve returns OS-aligned memory, but the heap allocator |
189 // may use larger alignment, so the caller must be careful to realign the | 189 // may use larger alignment, so the caller must be careful to realign the |
190 // memory obtained by SysAlloc. | 190 // memory obtained by sysAlloc. |
191 // | 191 // |
192 // SysMap maps previously reserved address space for use. | 192 // SysMap maps previously reserved address space for use. |
193 // The reserved argument is true if the address space was really | 193 // The reserved argument is true if the address space was really |
194 // reserved, not merely checked. | 194 // reserved, not merely checked. |
195 // | 195 // |
196 // SysFault marks a (already SysAlloc'd) region to fault | 196 // SysFault marks a (already sysAlloc'd) region to fault |
197 // if accessed. Used only for debugging the runtime. | 197 // if accessed. Used only for debugging the runtime. |
198 | 198 |
199 void*» runtime·SysAlloc(uintptr nbytes, uint64 *stat); | 199 void*» runtime·sysAlloc(uintptr nbytes, uint64 *stat); |
200 void runtime·SysFree(void *v, uintptr nbytes, uint64 *stat); | 200 void runtime·SysFree(void *v, uintptr nbytes, uint64 *stat); |
201 void runtime·SysUnused(void *v, uintptr nbytes); | 201 void runtime·SysUnused(void *v, uintptr nbytes); |
202 void runtime·SysUsed(void *v, uintptr nbytes); | 202 void runtime·SysUsed(void *v, uintptr nbytes); |
203 void runtime·SysMap(void *v, uintptr nbytes, bool reserved, uint64 *stat); | 203 void runtime·SysMap(void *v, uintptr nbytes, bool reserved, uint64 *stat); |
204 void* runtime·SysReserve(void *v, uintptr nbytes, bool *reserved); | 204 void* runtime·SysReserve(void *v, uintptr nbytes, bool *reserved); |
205 void runtime·SysFault(void *v, uintptr nbytes); | 205 void runtime·SysFault(void *v, uintptr nbytes); |
206 | 206 |
207 // FixAlloc is a simple free-list allocator for fixed size objects. | 207 // FixAlloc is a simple free-list allocator for fixed size objects. |
208 // Malloc uses a FixAlloc wrapped around SysAlloc to manages its | 208 // Malloc uses a FixAlloc wrapped around sysAlloc to manages its |
209 // MCache and MSpan objects. | 209 // MCache and MSpan objects. |
210 // | 210 // |
211 // Memory returned by FixAlloc_Alloc is not zeroed. | 211 // Memory returned by FixAlloc_Alloc is not zeroed. |
212 // The caller is responsible for locking around FixAlloc calls. | 212 // The caller is responsible for locking around FixAlloc calls. |
213 // Callers can keep state in the object but the first word is | 213 // Callers can keep state in the object but the first word is |
214 // smashed by freeing and reallocating. | 214 // smashed by freeing and reallocating. |
215 struct FixAlloc | 215 struct FixAlloc |
216 { | 216 { |
217 uintptr size; | 217 uintptr size; |
218 void (*first)(void *arg, byte *p); // called first time p is return
ed | 218 void (*first)(void *arg, byte *p); // called first time p is return
ed |
(...skipping 16 matching lines...) Expand all Loading... |
235 { | 235 { |
236 // General statistics. | 236 // General statistics. |
237 uint64 alloc; // bytes allocated and still in use | 237 uint64 alloc; // bytes allocated and still in use |
238 uint64 total_alloc; // bytes allocated (even if freed) | 238 uint64 total_alloc; // bytes allocated (even if freed) |
239 uint64 sys; // bytes obtained from system (should be sum of
xxx_sys below, no locking, approximate) | 239 uint64 sys; // bytes obtained from system (should be sum of
xxx_sys below, no locking, approximate) |
240 uint64 nlookup; // number of pointer lookups | 240 uint64 nlookup; // number of pointer lookups |
241 uint64 nmalloc; // number of mallocs | 241 uint64 nmalloc; // number of mallocs |
242 uint64 nfree; // number of frees | 242 uint64 nfree; // number of frees |
243 | 243 |
244 // Statistics about malloc heap. | 244 // Statistics about malloc heap. |
245 » // protected by mheap.Lock | 245 » // protected by mheap.lock |
246 uint64 heap_alloc; // bytes allocated and still in use | 246 uint64 heap_alloc; // bytes allocated and still in use |
247 uint64 heap_sys; // bytes obtained from system | 247 uint64 heap_sys; // bytes obtained from system |
248 uint64 heap_idle; // bytes in idle spans | 248 uint64 heap_idle; // bytes in idle spans |
249 uint64 heap_inuse; // bytes in non-idle spans | 249 uint64 heap_inuse; // bytes in non-idle spans |
250 uint64 heap_released; // bytes released to the OS | 250 uint64 heap_released; // bytes released to the OS |
251 uint64 heap_objects; // total number of allocated objects | 251 uint64 heap_objects; // total number of allocated objects |
252 | 252 |
253 // Statistics about allocation of low-level fixed-size structures. | 253 // Statistics about allocation of low-level fixed-size structures. |
254 // Protected by FixAlloc locks. | 254 // Protected by FixAlloc locks. |
255 uint64 stacks_inuse; // this number is included in heap_inuse above | 255 uint64 stacks_inuse; // this number is included in heap_inuse above |
(...skipping 20 matching lines...) Expand all Loading... |
276 struct { | 276 struct { |
277 uint32 size; | 277 uint32 size; |
278 uint64 nmalloc; | 278 uint64 nmalloc; |
279 uint64 nfree; | 279 uint64 nfree; |
280 } by_size[NumSizeClasses]; | 280 } by_size[NumSizeClasses]; |
281 }; | 281 }; |
282 | 282 |
283 #define mstats runtime·memstats | 283 #define mstats runtime·memstats |
284 extern MStats mstats; | 284 extern MStats mstats; |
285 void runtime·updatememstats(GCStats *stats); | 285 void runtime·updatememstats(GCStats *stats); |
| 286 void runtime·ReadMemStats(MStats *stats); |
286 | 287 |
287 // Size classes. Computed and initialized by InitSizes. | 288 // Size classes. Computed and initialized by InitSizes. |
288 // | 289 // |
289 // SizeToClass(0 <= n <= MaxSmallSize) returns the size class, | 290 // SizeToClass(0 <= n <= MaxSmallSize) returns the size class, |
290 // 1 <= sizeclass < NumSizeClasses, for n. | 291 // 1 <= sizeclass < NumSizeClasses, for n. |
291 // Size class 0 is reserved to mean "not small". | 292 // Size class 0 is reserved to mean "not small". |
292 // | 293 // |
293 // class_to_size[i] = largest size in class i | 294 // class_to_size[i] = largest size in class i |
294 // class_to_allocnpages[i] = number of pages to allocate when | 295 // class_to_allocnpages[i] = number of pages to allocate when |
295 // making new objects in class i | 296 // making new objects in class i |
296 | 297 |
297 int32 runtime·SizeToClass(int32); | 298 int32 runtime·SizeToClass(int32); |
298 uintptr runtime·roundupsize(uintptr); | 299 uintptr runtime·roundupsize(uintptr); |
299 extern int32 runtime·class_to_size[NumSizeClasses]; | 300 extern int32 runtime·class_to_size[NumSizeClasses]; |
300 extern int32 runtime·class_to_allocnpages[NumSizeClasses]; | 301 extern int32 runtime·class_to_allocnpages[NumSizeClasses]; |
301 extern int8 runtime·size_to_class8[1024/8 + 1]; | 302 extern int8 runtime·size_to_class8[1024/8 + 1]; |
302 extern int8 runtime·size_to_class128[(MaxSmallSize-1024)/128 + 1]; | 303 extern int8 runtime·size_to_class128[(MaxSmallSize-1024)/128 + 1]; |
303 extern void runtime·InitSizes(void); | 304 extern void runtime·InitSizes(void); |
304 | 305 |
305 | |
306 typedef struct MCacheList MCacheList; | 306 typedef struct MCacheList MCacheList; |
307 struct MCacheList | 307 struct MCacheList |
308 { | 308 { |
309 MLink *list; | 309 MLink *list; |
310 uint32 nlist; | 310 uint32 nlist; |
311 }; | 311 }; |
312 | 312 |
313 typedef struct StackFreeList StackFreeList; | 313 typedef struct StackFreeList StackFreeList; |
314 struct StackFreeList | 314 struct StackFreeList |
315 { | 315 { |
316 MLink *list; // linked list of free stacks | 316 MLink *list; // linked list of free stacks |
317 uintptr size; // total size of stacks in list | 317 uintptr size; // total size of stacks in list |
318 }; | 318 }; |
| 319 |
| 320 typedef struct SudoG SudoG; |
319 | 321 |
320 // Per-thread (in Go, per-P) cache for small objects. | 322 // Per-thread (in Go, per-P) cache for small objects. |
321 // No locking needed because it is per-thread (per-P). | 323 // No locking needed because it is per-thread (per-P). |
322 struct MCache | 324 struct MCache |
323 { | 325 { |
324 // The following members are accessed on every malloc, | 326 // The following members are accessed on every malloc, |
325 // so they are grouped here for better caching. | 327 // so they are grouped here for better caching. |
326 int32 next_sample; // trigger heap sample after allocating
this many bytes | 328 int32 next_sample; // trigger heap sample after allocating
this many bytes |
327 intptr local_cachealloc; // bytes allocated (or freed) from cache
since last lock of heap | 329 intptr local_cachealloc; // bytes allocated (or freed) from cache
since last lock of heap |
328 // Allocator cache for tiny objects w/o pointers. | 330 // Allocator cache for tiny objects w/o pointers. |
329 // See "Tiny allocator" comment in malloc.goc. | 331 // See "Tiny allocator" comment in malloc.goc. |
330 byte* tiny; | 332 byte* tiny; |
331 uintptr tinysize; | 333 uintptr tinysize; |
332 // The rest is not accessed on every malloc. | 334 // The rest is not accessed on every malloc. |
333 MSpan* alloc[NumSizeClasses]; // spans to allocate from | 335 MSpan* alloc[NumSizeClasses]; // spans to allocate from |
334 | 336 |
335 StackFreeList stackcache[NumStackOrders]; | 337 StackFreeList stackcache[NumStackOrders]; |
336 | 338 |
| 339 SudoG* sudogcache; |
| 340 |
337 void* gcworkbuf; | 341 void* gcworkbuf; |
338 | 342 |
339 // Local allocator stats, flushed during GC. | 343 // Local allocator stats, flushed during GC. |
340 uintptr local_nlookup; // number of pointer lookups | 344 uintptr local_nlookup; // number of pointer lookups |
341 uintptr local_largefree; // bytes freed for large objects (>MaxSm
allSize) | 345 uintptr local_largefree; // bytes freed for large objects (>MaxSm
allSize) |
342 uintptr local_nlargefree; // number of frees for large objects (>M
axSmallSize) | 346 uintptr local_nlargefree; // number of frees for large objects (>M
axSmallSize) |
343 uintptr local_nsmallfree[NumSizeClasses]; // number of frees for s
mall objects (<=MaxSmallSize) | 347 uintptr local_nsmallfree[NumSizeClasses]; // number of frees for s
mall objects (<=MaxSmallSize) |
344 }; | 348 }; |
345 | 349 |
346 MSpan* runtime·MCache_Refill(MCache *c, int32 sizeclass); | 350 MSpan* runtime·MCache_Refill(MCache *c, int32 sizeclass); |
(...skipping 16 matching lines...) Expand all Loading... |
363 { | 367 { |
364 Special* next; // linked list in span | 368 Special* next; // linked list in span |
365 uint16 offset; // span offset of object | 369 uint16 offset; // span offset of object |
366 byte kind; // kind of Special | 370 byte kind; // kind of Special |
367 }; | 371 }; |
368 | 372 |
369 // The described object has a finalizer set for it. | 373 // The described object has a finalizer set for it. |
370 typedef struct SpecialFinalizer SpecialFinalizer; | 374 typedef struct SpecialFinalizer SpecialFinalizer; |
371 struct SpecialFinalizer | 375 struct SpecialFinalizer |
372 { | 376 { |
373 » Special; | 377 » Special»» special; |
374 FuncVal* fn; | 378 FuncVal* fn; |
375 uintptr nret; | 379 uintptr nret; |
376 Type* fint; | 380 Type* fint; |
377 PtrType* ot; | 381 PtrType* ot; |
378 }; | 382 }; |
379 | 383 |
380 // The described object is being heap profiled. | 384 // The described object is being heap profiled. |
381 typedef struct Bucket Bucket; // from mprof.h | 385 typedef struct Bucket Bucket; // from mprof.h |
382 typedef struct SpecialProfile SpecialProfile; | 386 typedef struct SpecialProfile SpecialProfile; |
383 struct SpecialProfile | 387 struct SpecialProfile |
384 { | 388 { |
385 » Special; | 389 » Special»special; |
386 Bucket* b; | 390 Bucket* b; |
387 }; | 391 }; |
388 | 392 |
389 // An MSpan is a run of pages. | 393 // An MSpan is a run of pages. |
390 enum | 394 enum |
391 { | 395 { |
392 MSpanInUse = 0, // allocated for garbage collected heap | 396 MSpanInUse = 0, // allocated for garbage collected heap |
393 MSpanStack, // allocated for use by stack allocator | 397 MSpanStack, // allocated for use by stack allocator |
394 MSpanFree, | 398 MSpanFree, |
395 MSpanListHead, | 399 MSpanListHead, |
396 MSpanDead, | 400 MSpanDead, |
397 }; | 401 }; |
398 struct MSpan | 402 struct MSpan |
399 { | 403 { |
400 MSpan *next; // in a span linked list | 404 MSpan *next; // in a span linked list |
401 MSpan *prev; // in a span linked list | 405 MSpan *prev; // in a span linked list |
402 » PageID» start;» » // starting page number | 406 » pageID» start;» » // starting page number |
403 uintptr npages; // number of pages in span | 407 uintptr npages; // number of pages in span |
404 MLink *freelist; // list of free objects | 408 MLink *freelist; // list of free objects |
405 // sweep generation: | 409 // sweep generation: |
406 // if sweepgen == h->sweepgen - 2, the span needs sweeping | 410 // if sweepgen == h->sweepgen - 2, the span needs sweeping |
407 // if sweepgen == h->sweepgen - 1, the span is currently being swept | 411 // if sweepgen == h->sweepgen - 1, the span is currently being swept |
408 // if sweepgen == h->sweepgen, the span is swept and ready to use | 412 // if sweepgen == h->sweepgen, the span is swept and ready to use |
409 // h->sweepgen is incremented by 2 after every GC | 413 // h->sweepgen is incremented by 2 after every GC |
410 uint32 sweepgen; | 414 uint32 sweepgen; |
411 uint16 ref; // capacity - number of objects in freelist | 415 uint16 ref; // capacity - number of objects in freelist |
412 uint8 sizeclass; // size class | 416 uint8 sizeclass; // size class |
413 bool incache; // being used by an MCache | 417 bool incache; // being used by an MCache |
414 uint8 state; // MSpanInUse etc | 418 uint8 state; // MSpanInUse etc |
415 uint8 needzero; // needs to be zeroed before allocation | 419 uint8 needzero; // needs to be zeroed before allocation |
416 uintptr elemsize; // computed from sizeclass or from npages | 420 uintptr elemsize; // computed from sizeclass or from npages |
417 int64 unusedsince; // First time spotted by GC in MSpanFree state | 421 int64 unusedsince; // First time spotted by GC in MSpanFree state |
418 uintptr npreleased; // number of pages released to the OS | 422 uintptr npreleased; // number of pages released to the OS |
419 byte *limit; // end of data in span | 423 byte *limit; // end of data in span |
420 » Lock» specialLock;» // guards specials list | 424 » Mutex» specialLock;» // guards specials list |
421 Special *specials; // linked list of special records sorted by offs
et. | 425 Special *specials; // linked list of special records sorted by offs
et. |
422 }; | 426 }; |
423 | 427 |
424 void» runtime·MSpan_Init(MSpan *span, PageID start, uintptr npages); | 428 void» runtime·MSpan_Init(MSpan *span, pageID start, uintptr npages); |
425 void runtime·MSpan_EnsureSwept(MSpan *span); | 429 void runtime·MSpan_EnsureSwept(MSpan *span); |
426 bool» runtime·MSpan_Sweep(MSpan *span); | 430 bool» runtime·MSpan_Sweep(MSpan *span, bool preserve); |
427 | 431 |
428 // Every MSpan is in one doubly-linked list, | 432 // Every MSpan is in one doubly-linked list, |
429 // either one of the MHeap's free lists or one of the | 433 // either one of the MHeap's free lists or one of the |
430 // MCentral's span lists. We use empty MSpan structures as list heads. | 434 // MCentral's span lists. We use empty MSpan structures as list heads. |
431 void runtime·MSpanList_Init(MSpan *list); | 435 void runtime·MSpanList_Init(MSpan *list); |
432 bool runtime·MSpanList_IsEmpty(MSpan *list); | 436 bool runtime·MSpanList_IsEmpty(MSpan *list); |
433 void runtime·MSpanList_Insert(MSpan *list, MSpan *span); | 437 void runtime·MSpanList_Insert(MSpan *list, MSpan *span); |
434 void runtime·MSpanList_InsertBack(MSpan *list, MSpan *span); | 438 void runtime·MSpanList_InsertBack(MSpan *list, MSpan *span); |
435 void runtime·MSpanList_Remove(MSpan *span); // from whatever list it is in | 439 void runtime·MSpanList_Remove(MSpan *span); // from whatever list it is in |
436 | 440 |
437 | 441 |
438 // Central list of free objects of a given size. | 442 // Central list of free objects of a given size. |
439 struct MCentral | 443 struct MCentral |
440 { | 444 { |
441 » Lock; | 445 » Mutex lock; |
442 int32 sizeclass; | 446 int32 sizeclass; |
443 MSpan nonempty; // list of spans with a free object | 447 MSpan nonempty; // list of spans with a free object |
444 MSpan empty; // list of spans with no free objects (or cached in an M
Cache) | 448 MSpan empty; // list of spans with no free objects (or cached in an M
Cache) |
445 }; | 449 }; |
446 | 450 |
447 void runtime·MCentral_Init(MCentral *c, int32 sizeclass); | 451 void runtime·MCentral_Init(MCentral *c, int32 sizeclass); |
448 MSpan* runtime·MCentral_CacheSpan(MCentral *c); | 452 MSpan* runtime·MCentral_CacheSpan(MCentral *c); |
449 void runtime·MCentral_UncacheSpan(MCentral *c, MSpan *s); | 453 void runtime·MCentral_UncacheSpan(MCentral *c, MSpan *s); |
450 bool» runtime·MCentral_FreeSpan(MCentral *c, MSpan *s, int32 n, MLink *start,
MLink *end); | 454 bool» runtime·MCentral_FreeSpan(MCentral *c, MSpan *s, int32 n, MLink *start,
MLink *end, bool preserve); |
451 | 455 |
452 // Main malloc heap. | 456 // Main malloc heap. |
453 // The heap itself is the "free[]" and "large" arrays, | 457 // The heap itself is the "free[]" and "large" arrays, |
454 // but all the other global data is here too. | 458 // but all the other global data is here too. |
455 struct MHeap | 459 struct MHeap |
456 { | 460 { |
457 » Lock; | 461 » Mutex lock; |
458 MSpan free[MaxMHeapList]; // free lists of given length | 462 MSpan free[MaxMHeapList]; // free lists of given length |
459 MSpan freelarge; // free lists length >= MaxMHeapList | 463 MSpan freelarge; // free lists length >= MaxMHeapList |
460 MSpan busy[MaxMHeapList]; // busy lists of large objects of given
length | 464 MSpan busy[MaxMHeapList]; // busy lists of large objects of given
length |
461 MSpan busylarge; // busy lists of large objects length >=
MaxMHeapList | 465 MSpan busylarge; // busy lists of large objects length >=
MaxMHeapList |
462 MSpan **allspans; // all spans out there | 466 MSpan **allspans; // all spans out there |
463 » MSpan **sweepspans;» » // copy of allspans referenced by sweepe
r | 467 » MSpan **gcspans;» » // copy of allspans referenced by GC mar
ker or sweeper |
464 uint32 nspan; | 468 uint32 nspan; |
465 uint32 nspancap; | 469 uint32 nspancap; |
466 uint32 sweepgen; // sweep generation, see comment in MSpa
n | 470 uint32 sweepgen; // sweep generation, see comment in MSpa
n |
467 uint32 sweepdone; // all spans are swept | 471 uint32 sweepdone; // all spans are swept |
468 | 472 |
469 // span lookup | 473 // span lookup |
470 MSpan** spans; | 474 MSpan** spans; |
471 uintptr spans_mapped; | 475 uintptr spans_mapped; |
472 | 476 |
473 // range of addresses we might see in the heap | 477 // range of addresses we might see in the heap |
474 byte *bitmap; | 478 byte *bitmap; |
475 uintptr bitmap_mapped; | 479 uintptr bitmap_mapped; |
476 byte *arena_start; | 480 byte *arena_start; |
477 byte *arena_used; | 481 byte *arena_used; |
478 byte *arena_end; | 482 byte *arena_end; |
479 bool arena_reserved; | 483 bool arena_reserved; |
480 | 484 |
481 // central free lists for small size classes. | 485 // central free lists for small size classes. |
482 // the padding makes sure that the MCentrals are | 486 // the padding makes sure that the MCentrals are |
483 » // spaced CacheLineSize bytes apart, so that each MCentral.Lock | 487 » // spaced CacheLineSize bytes apart, so that each MCentral.lock |
484 // gets its own cache line. | 488 // gets its own cache line. |
485 struct { | 489 struct { |
486 » » MCentral; | 490 » » MCentral mcentral; |
487 byte pad[CacheLineSize]; | 491 byte pad[CacheLineSize]; |
488 } central[NumSizeClasses]; | 492 } central[NumSizeClasses]; |
489 | 493 |
490 FixAlloc spanalloc; // allocator for Span* | 494 FixAlloc spanalloc; // allocator for Span* |
491 FixAlloc cachealloc; // allocator for MCache* | 495 FixAlloc cachealloc; // allocator for MCache* |
492 FixAlloc specialfinalizeralloc; // allocator for SpecialFinalizer* | 496 FixAlloc specialfinalizeralloc; // allocator for SpecialFinalizer* |
493 FixAlloc specialprofilealloc; // allocator for SpecialProfile* | 497 FixAlloc specialprofilealloc; // allocator for SpecialProfile* |
494 » Lock speciallock; // lock for sepcial record allocators. | 498 » Mutex speciallock; // lock for sepcial record allocators. |
495 | 499 |
496 // Malloc stats. | 500 // Malloc stats. |
497 uint64 largefree; // bytes freed for large objects (>MaxSmallSize) | 501 uint64 largefree; // bytes freed for large objects (>MaxSmallSize) |
498 uint64 nlargefree; // number of frees for large objects (>MaxSmallS
ize) | 502 uint64 nlargefree; // number of frees for large objects (>MaxSmallS
ize) |
499 uint64 nsmallfree[NumSizeClasses]; // number of frees for small obj
ects (<=MaxSmallSize) | 503 uint64 nsmallfree[NumSizeClasses]; // number of frees for small obj
ects (<=MaxSmallSize) |
500 }; | 504 }; |
501 #define runtime·mheap runtime·mheap_ | 505 #define runtime·mheap runtime·mheap_ |
502 extern MHeap runtime·mheap; | 506 extern MHeap runtime·mheap; |
503 | 507 |
504 void runtime·MHeap_Init(MHeap *h); | 508 void runtime·MHeap_Init(MHeap *h); |
505 MSpan* runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large
, bool needzero); | 509 MSpan* runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large
, bool needzero); |
506 MSpan* runtime·MHeap_AllocStack(MHeap *h, uintptr npage); | 510 MSpan* runtime·MHeap_AllocStack(MHeap *h, uintptr npage); |
507 void runtime·MHeap_Free(MHeap *h, MSpan *s, int32 acct); | 511 void runtime·MHeap_Free(MHeap *h, MSpan *s, int32 acct); |
508 void runtime·MHeap_FreeStack(MHeap *h, MSpan *s); | 512 void runtime·MHeap_FreeStack(MHeap *h, MSpan *s); |
509 MSpan* runtime·MHeap_Lookup(MHeap *h, void *v); | 513 MSpan* runtime·MHeap_Lookup(MHeap *h, void *v); |
510 MSpan* runtime·MHeap_LookupMaybe(MHeap *h, void *v); | 514 MSpan* runtime·MHeap_LookupMaybe(MHeap *h, void *v); |
511 void* runtime·MHeap_SysAlloc(MHeap *h, uintptr n); | 515 void* runtime·MHeap_SysAlloc(MHeap *h, uintptr n); |
512 void runtime·MHeap_MapBits(MHeap *h); | 516 void runtime·MHeap_MapBits(MHeap *h); |
513 void runtime·MHeap_MapSpans(MHeap *h); | 517 void runtime·MHeap_MapSpans(MHeap *h); |
514 void» runtime·MHeap_Scavenger(void); | 518 void» runtime·MHeap_Scavenge(int32 k, uint64 now, uint64 limit); |
515 | 519 |
516 void*» runtime·mallocgc(uintptr size, Type* typ, uint32 flag); | |
517 void* runtime·persistentalloc(uintptr size, uintptr align, uint64 *stat); | 520 void* runtime·persistentalloc(uintptr size, uintptr align, uint64 *stat); |
518 int32 runtime·mlookup(void *v, byte **base, uintptr *size, MSpan **s); | 521 int32 runtime·mlookup(void *v, byte **base, uintptr *size, MSpan **s); |
519 void runtime·gc(int32 force); | |
520 uintptr runtime·sweepone(void); | 522 uintptr runtime·sweepone(void); |
521 void runtime·markallocated(void *v, uintptr size, uintptr size0, Type* typ, b
ool scan); | |
522 void runtime·markspan(void *v, uintptr size, uintptr n, bool leftover); | 523 void runtime·markspan(void *v, uintptr size, uintptr n, bool leftover); |
523 void runtime·unmarkspan(void *v, uintptr size); | 524 void runtime·unmarkspan(void *v, uintptr size); |
524 void runtime·purgecachedstats(MCache*); | 525 void runtime·purgecachedstats(MCache*); |
525 void* runtime·cnew(Type*); | 526 void* runtime·cnew(Type*); |
526 void* runtime·cnewarray(Type*, intgo); | 527 void* runtime·cnewarray(Type*, intgo); |
527 void runtime·tracealloc(void*, uintptr, Type*); | 528 void runtime·tracealloc(void*, uintptr, Type*); |
528 void runtime·tracefree(void*, uintptr); | 529 void runtime·tracefree(void*, uintptr); |
529 void runtime·tracegc(void); | 530 void runtime·tracegc(void); |
| 531 extern Type* runtime·conservative; |
530 | 532 |
531 int32 runtime·gcpercent; | 533 int32 runtime·gcpercent; |
532 int32 runtime·readgogc(void); | 534 int32 runtime·readgogc(void); |
533 void runtime·clearpools(void); | 535 void runtime·clearpools(void); |
534 | 536 |
535 enum | 537 enum |
536 { | 538 { |
537 // flags to malloc | 539 // flags to malloc |
538 FlagNoScan = 1<<0, // GC doesn't have to scan object | 540 FlagNoScan = 1<<0, // GC doesn't have to scan object |
539 » FlagNoProfiling»= 1<<1,»// must not profile | 541 » FlagNoZero» = 1<<1, // don't zero memory |
540 » FlagNoGC» = 1<<2,»// must not free or scan for pointers | 542 }; |
541 » FlagNoZero» = 1<<3, // don't zero memory | 543 |
542 » FlagNoInvokeGC» = 1<<4, // don't invoke GC | 544 void» runtime·mProf_Malloc(void*, uintptr); |
543 }; | 545 void» runtime·mProf_Free(Bucket*, uintptr, bool); |
544 | 546 void» runtime·mProf_GC(void); |
545 void» runtime·MProf_Malloc(void*, uintptr); | 547 void» runtime·iterate_memprof(void (**callback)(Bucket*, uintptr, uintptr*, ui
ntptr, uintptr, uintptr)); |
546 void» runtime·MProf_Free(Bucket*, uintptr, bool); | |
547 void» runtime·MProf_GC(void); | |
548 void» runtime·iterate_memprof(void (*callback)(Bucket*, uintptr, uintptr*, uin
tptr, uintptr, uintptr)); | |
549 int32 runtime·gcprocs(void); | 548 int32 runtime·gcprocs(void); |
550 void runtime·helpgc(int32 nproc); | 549 void runtime·helpgc(int32 nproc); |
551 void runtime·gchelper(void); | 550 void runtime·gchelper(void); |
552 void runtime·createfing(void); | 551 void runtime·createfing(void); |
553 G* runtime·wakefing(void); | 552 G* runtime·wakefing(void); |
554 void runtime·getgcmask(byte*, Type*, byte**, uintptr*); | 553 void runtime·getgcmask(byte*, Type*, byte**, uintptr*); |
| 554 |
| 555 typedef struct Finalizer Finalizer; |
| 556 struct Finalizer |
| 557 { |
| 558 FuncVal *fn; // function to call |
| 559 void *arg; // ptr to object |
| 560 uintptr nret; // bytes of return values from fn |
| 561 Type *fint; // type of first argument of fn |
| 562 PtrType *ot; // type of ptr to object |
| 563 }; |
| 564 |
| 565 typedef struct FinBlock FinBlock; |
| 566 struct FinBlock |
| 567 { |
| 568 FinBlock *alllink; |
| 569 FinBlock *next; |
| 570 int32 cnt; |
| 571 int32 cap; |
| 572 Finalizer fin[1]; |
| 573 }; |
| 574 extern Mutex runtime·finlock; // protects the following variables |
555 extern G* runtime·fing; | 575 extern G* runtime·fing; |
556 extern bool runtime·fingwait; | 576 extern bool runtime·fingwait; |
557 extern bool runtime·fingwake; | 577 extern bool runtime·fingwake; |
558 | 578 extern FinBlock»*runtime·finq;» » // list of finalizers that are to be exe
cuted |
559 void» runtime·setprofilebucket(void *p, Bucket *b); | 579 extern FinBlock»*runtime·finc;» » // cache of free blocks |
| 580 |
| 581 void» runtime·setprofilebucket_m(void); |
560 | 582 |
561 bool runtime·addfinalizer(void*, FuncVal *fn, uintptr, Type*, PtrType*); | 583 bool runtime·addfinalizer(void*, FuncVal *fn, uintptr, Type*, PtrType*); |
562 void runtime·removefinalizer(void*); | 584 void runtime·removefinalizer(void*); |
563 void runtime·queuefinalizer(byte *p, FuncVal *fn, uintptr nret, Type *fint, P
trType *ot); | 585 void runtime·queuefinalizer(byte *p, FuncVal *fn, uintptr nret, Type *fint, P
trType *ot); |
564 bool runtime·freespecial(Special *s, void *p, uintptr size, bool freed); | 586 bool runtime·freespecial(Special *s, void *p, uintptr size, bool freed); |
565 | 587 |
566 // Information from the compiler about the layout of stack frames. | 588 // Information from the compiler about the layout of stack frames. |
567 typedef struct BitVector BitVector; | 589 typedef struct BitVector BitVector; |
568 struct BitVector | 590 struct BitVector |
569 { | 591 { |
570 int32 n; // # of bits | 592 int32 n; // # of bits |
571 uint8 *bytedata; | 593 uint8 *bytedata; |
572 }; | 594 }; |
573 typedef struct StackMap StackMap; | 595 typedef struct StackMap StackMap; |
574 struct StackMap | 596 struct StackMap |
575 { | 597 { |
576 int32 n; // number of bitmaps | 598 int32 n; // number of bitmaps |
577 int32 nbit; // number of bits in each bitmap | 599 int32 nbit; // number of bits in each bitmap |
578 uint8 bytedata[]; | 600 uint8 bytedata[]; |
579 }; | 601 }; |
580 // Returns pointer map data for the given stackmap index | 602 // Returns pointer map data for the given stackmap index |
581 // (the index is encoded in PCDATA_StackMapIndex). | 603 // (the index is encoded in PCDATA_StackMapIndex). |
582 BitVector runtime·stackmapdata(StackMap *stackmap, int32 n); | 604 BitVector runtime·stackmapdata(StackMap *stackmap, int32 n); |
583 | 605 |
| 606 extern BitVector runtime·gcdatamask; |
| 607 extern BitVector runtime·gcbssmask; |
| 608 |
584 // defined in mgc0.go | 609 // defined in mgc0.go |
585 void runtime·gc_m_ptr(Eface*); | 610 void runtime·gc_m_ptr(Eface*); |
586 void runtime·gc_g_ptr(Eface*); | 611 void runtime·gc_g_ptr(Eface*); |
587 void runtime·gc_itab_ptr(Eface*); | 612 void runtime·gc_itab_ptr(Eface*); |
588 | 613 |
589 int32» runtime·setgcpercent(int32); | 614 void runtime·setgcpercent_m(void); |
590 | 615 |
591 // Value we use to mark dead pointers when GODEBUG=gcdead=1. | 616 // Value we use to mark dead pointers when GODEBUG=gcdead=1. |
592 #define PoisonGC ((uintptr)0xf969696969696969ULL) | 617 #define PoisonGC ((uintptr)0xf969696969696969ULL) |
593 #define PoisonStack ((uintptr)0x6868686868686868ULL) | 618 #define PoisonStack ((uintptr)0x6868686868686868ULL) |
LEFT | RIGHT |