LEFT | RIGHT |
(no file at all) | |
1 // Copyright 2009 The Go Authors. All rights reserved. | 1 // Copyright 2009 The Go Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style | 2 // Use of this source code is governed by a BSD-style |
3 // license that can be found in the LICENSE file. | 3 // license that can be found in the LICENSE file. |
4 | 4 |
5 // Page heap. | 5 // Page heap. |
6 // | 6 // |
7 // See malloc.h for overview. | 7 // See malloc.h for overview. |
8 // | 8 // |
9 // When a MSpan is in the heap free list, state == MSpanFree | 9 // When a MSpan is in the heap free list, state == MSpanFree |
10 // and heapmap(s->start) == span, heapmap(s->start+s->npages-1) == span. | 10 // and heapmap(s->start) == span, heapmap(s->start+s->npages-1) == span. |
(...skipping 155 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
166 } | 166 } |
167 | 167 |
168 // Allocate a new span of npage pages from the heap | 168 // Allocate a new span of npage pages from the heap |
169 // and record its size class in the HeapMap and HeapMapCache. | 169 // and record its size class in the HeapMap and HeapMapCache. |
170 MSpan* | 170 MSpan* |
171 runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large, bool n
eedzero) | 171 runtime·MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large, bool n
eedzero) |
172 { | 172 { |
173 MSpan *s; | 173 MSpan *s; |
174 | 174 |
175 runtime·lock(h); | 175 runtime·lock(h); |
176 » mstats.heap_alloc += m->mcache->local_cachealloc; | 176 » mstats.heap_alloc += g->m->mcache->local_cachealloc; |
177 » m->mcache->local_cachealloc = 0; | 177 » g->m->mcache->local_cachealloc = 0; |
178 s = MHeap_AllocLocked(h, npage, sizeclass); | 178 s = MHeap_AllocLocked(h, npage, sizeclass); |
179 if(s != nil) { | 179 if(s != nil) { |
180 mstats.heap_inuse += npage<<PageShift; | 180 mstats.heap_inuse += npage<<PageShift; |
181 if(large) { | 181 if(large) { |
182 mstats.heap_objects++; | 182 mstats.heap_objects++; |
183 mstats.heap_alloc += npage<<PageShift; | 183 mstats.heap_alloc += npage<<PageShift; |
184 // Swept spans are at the end of lists. | 184 // Swept spans are at the end of lists. |
185 if(s->npages < nelem(h->free)) | 185 if(s->npages < nelem(h->free)) |
186 runtime·MSpanList_InsertBack(&h->busy[s->npages]
, s); | 186 runtime·MSpanList_InsertBack(&h->busy[s->npages]
, s); |
187 else | 187 else |
(...skipping 189 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
377 if(s == nil || p < s->start || v >= s->limit || s->state != MSpanInUse) | 377 if(s == nil || p < s->start || v >= s->limit || s->state != MSpanInUse) |
378 return nil; | 378 return nil; |
379 return s; | 379 return s; |
380 } | 380 } |
381 | 381 |
382 // Free the span back into the heap. | 382 // Free the span back into the heap. |
383 void | 383 void |
384 runtime·MHeap_Free(MHeap *h, MSpan *s, int32 acct) | 384 runtime·MHeap_Free(MHeap *h, MSpan *s, int32 acct) |
385 { | 385 { |
386 runtime·lock(h); | 386 runtime·lock(h); |
387 » mstats.heap_alloc += m->mcache->local_cachealloc; | 387 » mstats.heap_alloc += g->m->mcache->local_cachealloc; |
388 » m->mcache->local_cachealloc = 0; | 388 » g->m->mcache->local_cachealloc = 0; |
389 mstats.heap_inuse -= s->npages<<PageShift; | 389 mstats.heap_inuse -= s->npages<<PageShift; |
390 if(acct) { | 390 if(acct) { |
391 mstats.heap_alloc -= s->npages<<PageShift; | 391 mstats.heap_alloc -= s->npages<<PageShift; |
392 mstats.heap_objects--; | 392 mstats.heap_objects--; |
393 } | 393 } |
394 MHeap_FreeLocked(h, s); | 394 MHeap_FreeLocked(h, s); |
395 runtime·unlock(h); | 395 runtime·unlock(h); |
396 } | 396 } |
397 | 397 |
398 static void | 398 static void |
(...skipping 250 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
649 Special **t, *x; | 649 Special **t, *x; |
650 uintptr offset; | 650 uintptr offset; |
651 byte kind; | 651 byte kind; |
652 | 652 |
653 span = runtime·MHeap_LookupMaybe(&runtime·mheap, p); | 653 span = runtime·MHeap_LookupMaybe(&runtime·mheap, p); |
654 if(span == nil) | 654 if(span == nil) |
655 runtime·throw("addspecial on invalid pointer"); | 655 runtime·throw("addspecial on invalid pointer"); |
656 | 656 |
657 // Ensure that the span is swept. | 657 // Ensure that the span is swept. |
658 // GC accesses specials list w/o locks. And it's just much safer. | 658 // GC accesses specials list w/o locks. And it's just much safer. |
659 » m->locks++; | 659 » g->m->locks++; |
660 runtime·MSpan_EnsureSwept(span); | 660 runtime·MSpan_EnsureSwept(span); |
661 | 661 |
662 offset = (uintptr)p - (span->start << PageShift); | 662 offset = (uintptr)p - (span->start << PageShift); |
663 kind = s->kind; | 663 kind = s->kind; |
664 | 664 |
665 runtime·lock(&span->specialLock); | 665 runtime·lock(&span->specialLock); |
666 | 666 |
667 // Find splice point, check for existing record. | 667 // Find splice point, check for existing record. |
668 t = &span->specials; | 668 t = &span->specials; |
669 while((x = *t) != nil) { | 669 while((x = *t) != nil) { |
670 if(offset == x->offset && kind == x->kind) { | 670 if(offset == x->offset && kind == x->kind) { |
671 runtime·unlock(&span->specialLock); | 671 runtime·unlock(&span->specialLock); |
672 » » » m->locks--; | 672 » » » g->m->locks--; |
673 return false; // already exists | 673 return false; // already exists |
674 } | 674 } |
675 if(offset < x->offset || (offset == x->offset && kind < x->kind)
) | 675 if(offset < x->offset || (offset == x->offset && kind < x->kind)
) |
676 break; | 676 break; |
677 t = &x->next; | 677 t = &x->next; |
678 } | 678 } |
679 // Splice in record, fill in offset. | 679 // Splice in record, fill in offset. |
680 s->offset = offset; | 680 s->offset = offset; |
681 s->next = x; | 681 s->next = x; |
682 *t = s; | 682 *t = s; |
683 runtime·unlock(&span->specialLock); | 683 runtime·unlock(&span->specialLock); |
684 » m->locks--; | 684 » g->m->locks--; |
685 return true; | 685 return true; |
686 } | 686 } |
687 | 687 |
688 // Removes the Special record of the given kind for the object p. | 688 // Removes the Special record of the given kind for the object p. |
689 // Returns the record if the record existed, nil otherwise. | 689 // Returns the record if the record existed, nil otherwise. |
690 // The caller must FixAlloc_Free the result. | 690 // The caller must FixAlloc_Free the result. |
691 static Special* | 691 static Special* |
692 removespecial(void *p, byte kind) | 692 removespecial(void *p, byte kind) |
693 { | 693 { |
694 MSpan *span; | 694 MSpan *span; |
695 Special *s, **t; | 695 Special *s, **t; |
696 uintptr offset; | 696 uintptr offset; |
697 | 697 |
698 span = runtime·MHeap_LookupMaybe(&runtime·mheap, p); | 698 span = runtime·MHeap_LookupMaybe(&runtime·mheap, p); |
699 if(span == nil) | 699 if(span == nil) |
700 runtime·throw("removespecial on invalid pointer"); | 700 runtime·throw("removespecial on invalid pointer"); |
701 | 701 |
702 // Ensure that the span is swept. | 702 // Ensure that the span is swept. |
703 // GC accesses specials list w/o locks. And it's just much safer. | 703 // GC accesses specials list w/o locks. And it's just much safer. |
704 » m->locks++; | 704 » g->m->locks++; |
705 runtime·MSpan_EnsureSwept(span); | 705 runtime·MSpan_EnsureSwept(span); |
706 | 706 |
707 offset = (uintptr)p - (span->start << PageShift); | 707 offset = (uintptr)p - (span->start << PageShift); |
708 | 708 |
709 runtime·lock(&span->specialLock); | 709 runtime·lock(&span->specialLock); |
710 t = &span->specials; | 710 t = &span->specials; |
711 while((s = *t) != nil) { | 711 while((s = *t) != nil) { |
712 // This function is used for finalizers only, so we don't check
for | 712 // This function is used for finalizers only, so we don't check
for |
713 // "interior" specials (p must be exactly equal to s->offset). | 713 // "interior" specials (p must be exactly equal to s->offset). |
714 if(offset == s->offset && kind == s->kind) { | 714 if(offset == s->offset && kind == s->kind) { |
715 *t = s->next; | 715 *t = s->next; |
716 runtime·unlock(&span->specialLock); | 716 runtime·unlock(&span->specialLock); |
717 » » » m->locks--; | 717 » » » g->m->locks--; |
718 return s; | 718 return s; |
719 } | 719 } |
720 t = &s->next; | 720 t = &s->next; |
721 } | 721 } |
722 runtime·unlock(&span->specialLock); | 722 runtime·unlock(&span->specialLock); |
723 » m->locks--; | 723 » g->m->locks--; |
724 return nil; | 724 return nil; |
725 } | 725 } |
726 | 726 |
727 // Adds a finalizer to the object p. Returns true if it succeeded. | 727 // Adds a finalizer to the object p. Returns true if it succeeded. |
728 bool | 728 bool |
729 runtime·addfinalizer(void *p, FuncVal *f, uintptr nret, Type *fint, PtrType *ot) | 729 runtime·addfinalizer(void *p, FuncVal *f, uintptr nret, Type *fint, PtrType *ot) |
730 { | 730 { |
731 SpecialFinalizer *s; | 731 SpecialFinalizer *s; |
732 | 732 |
733 runtime·lock(&runtime·mheap.speciallock); | 733 runtime·lock(&runtime·mheap.speciallock); |
(...skipping 189 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
923 runtime·unlock(c); | 923 runtime·unlock(c); |
924 } else { | 924 } else { |
925 // Swept spans are at the end of lists. | 925 // Swept spans are at the end of lists. |
926 if(s->npages < nelem(h->free)) | 926 if(s->npages < nelem(h->free)) |
927 runtime·MSpanList_InsertBack(&h->busy[s->npages], s); | 927 runtime·MSpanList_InsertBack(&h->busy[s->npages], s); |
928 else | 928 else |
929 runtime·MSpanList_InsertBack(&h->busylarge, s); | 929 runtime·MSpanList_InsertBack(&h->busylarge, s); |
930 runtime·unlock(h); | 930 runtime·unlock(h); |
931 } | 931 } |
932 } | 932 } |
LEFT | RIGHT |