LEFT | RIGHT |
1 // Copyright 2009 The Go Authors. All rights reserved. | 1 // Copyright 2009 The Go Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style | 2 // Use of this source code is governed by a BSD-style |
3 // license that can be found in the LICENSE file. | 3 // license that can be found in the LICENSE file. |
4 | 4 |
5 // Garbage collector. | 5 // Garbage collector. |
6 | 6 |
7 #include "runtime.h" | 7 #include "runtime.h" |
8 #include "arch_GOARCH.h" | 8 #include "arch_GOARCH.h" |
9 #include "malloc.h" | 9 #include "malloc.h" |
10 #include "stack.h" | 10 #include "stack.h" |
| 11 #include "race.h" |
11 | 12 |
12 enum { | 13 enum { |
13 Debug = 0, | 14 Debug = 0, |
14 PtrSize = sizeof(void*), | |
15 DebugMark = 0, // run second pass to check mark | 15 DebugMark = 0, // run second pass to check mark |
16 DataBlock = 8*1024, | 16 DataBlock = 8*1024, |
17 | 17 |
18 // Four bits per word (see #defines below). | 18 // Four bits per word (see #defines below). |
19 wordsPerBitmapWord = sizeof(void*)*8/4, | 19 wordsPerBitmapWord = sizeof(void*)*8/4, |
20 bitShift = sizeof(void*)*8/4, | 20 bitShift = sizeof(void*)*8/4, |
21 }; | 21 }; |
22 | 22 |
23 // Bits in per-word bitmap. | 23 // Bits in per-word bitmap. |
24 // #defines because enum might not be able to hold the values. | 24 // #defines because enum might not be able to hold the values. |
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
76 LFNode node; // must be first | 76 LFNode node; // must be first |
77 uintptr nobj; | 77 uintptr nobj; |
78 byte *obj[512-(sizeof(LFNode)+sizeof(uintptr))/sizeof(byte*)]; | 78 byte *obj[512-(sizeof(LFNode)+sizeof(uintptr))/sizeof(byte*)]; |
79 }; | 79 }; |
80 | 80 |
81 typedef struct Finalizer Finalizer; | 81 typedef struct Finalizer Finalizer; |
82 struct Finalizer | 82 struct Finalizer |
83 { | 83 { |
84 void (*fn)(void*); | 84 void (*fn)(void*); |
85 void *arg; | 85 void *arg; |
86 » int32 nret; | 86 » uintptr nret; |
87 }; | 87 }; |
88 | 88 |
89 typedef struct FinBlock FinBlock; | 89 typedef struct FinBlock FinBlock; |
90 struct FinBlock | 90 struct FinBlock |
91 { | 91 { |
92 FinBlock *alllink; | 92 FinBlock *alllink; |
93 FinBlock *next; | 93 FinBlock *next; |
94 int32 cnt; | 94 int32 cnt; |
95 int32 cap; | 95 int32 cap; |
96 Finalizer fin[1]; | 96 Finalizer fin[1]; |
(...skipping 463 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
560 if(gp->gcstack != (uintptr)nil) { | 560 if(gp->gcstack != (uintptr)nil) { |
561 stk = (Stktop*)gp->gcstack; | 561 stk = (Stktop*)gp->gcstack; |
562 sp = (byte*)gp->gcsp; | 562 sp = (byte*)gp->gcsp; |
563 guard = (byte*)gp->gcguard; | 563 guard = (byte*)gp->gcguard; |
564 } | 564 } |
565 } | 565 } |
566 | 566 |
567 n = 0; | 567 n = 0; |
568 while(stk) { | 568 while(stk) { |
569 if(sp < guard-StackGuard || (byte*)stk < sp) { | 569 if(sp < guard-StackGuard || (byte*)stk < sp) { |
570 » » » runtime·printf("scanstack inconsistent: g%p#%d sp=%p not
in [%p,%p]\n", gp, n, sp, guard-StackGuard, stk); | 570 » » » runtime·printf("scanstack inconsistent: g%D#%d sp=%p not
in [%p,%p]\n", gp->goid, n, sp, guard-StackGuard, stk); |
571 runtime·throw("scanstack"); | 571 runtime·throw("scanstack"); |
572 } | 572 } |
573 addroot(sp, (byte*)stk - sp); | 573 addroot(sp, (byte*)stk - sp); |
574 sp = (byte*)stk->gobuf.sp; | 574 sp = (byte*)stk->gobuf.sp; |
575 guard = stk->stackguard; | 575 guard = stk->stackguard; |
576 stk = (Stktop*)stk->stackbase; | 576 stk = (Stktop*)stk->stackbase; |
577 n++; | 577 n++; |
578 } | 578 } |
579 } | 579 } |
580 | 580 |
581 static void | 581 static void |
582 addfinroots(void *v) | 582 addfinroots(void *v) |
583 { | 583 { |
584 uintptr size; | 584 uintptr size; |
585 | 585 |
586 size = 0; | 586 size = 0; |
587 if(!runtime·mlookup(v, &v, &size, nil) || !runtime·blockspecial(v)) | 587 if(!runtime·mlookup(v, &v, &size, nil) || !runtime·blockspecial(v)) |
588 runtime·throw("mark - finalizer inconsistency"); | 588 runtime·throw("mark - finalizer inconsistency"); |
589 | 589 |
590 // do not mark the finalizer block itself. just mark the things it poin
ts at. | 590 // do not mark the finalizer block itself. just mark the things it poin
ts at. |
591 addroot(v, size); | 591 addroot(v, size); |
592 } | 592 } |
593 | 593 |
594 static void | 594 static void |
595 addroots(void) | 595 addroots(void) |
596 { | 596 { |
597 G *gp; | 597 G *gp; |
598 FinBlock *fb; | 598 FinBlock *fb; |
599 » byte *d; | 599 » byte *p; |
600 » P *p, **pp; | 600 » MSpan *s, **allspans; |
| 601 » uint32 spanidx; |
601 | 602 |
602 work.nroot = 0; | 603 work.nroot = 0; |
603 | 604 |
604 // mark data+bss. | 605 // mark data+bss. |
605 » for(d=data; d<ebss; d+=DataBlock) | 606 » for(p=data; p<ebss; p+=DataBlock) |
606 » » addroot(d, d+DataBlock < ebss ? DataBlock : ebss-d); | 607 » » addroot(p, p+DataBlock < ebss ? DataBlock : ebss-p); |
607 | 608 |
608 » for(pp=runtime·allp; p=*pp; pp++) { | 609 » // MSpan.types |
609 » » for(gp=p->allg; gp!=nil; gp=gp->alllink) { | 610 » allspans = runtime·mheap.allspans; |
610 » » » switch(gp->status){ | 611 » for(spanidx=0; spanidx<runtime·mheap.nspan; spanidx++) { |
611 » » » default: | 612 » » s = allspans[spanidx]; |
612 » » » » runtime·printf("unexpected G.status %d\n", gp->s
tatus); | 613 » » if(s->state == MSpanInUse) { |
613 » » » » runtime·throw("mark - bad status"); | 614 » » » switch(s->types.compression) { |
614 » » » case Gdead: | 615 » » » case MTypes_Empty: |
| 616 » » » case MTypes_Single: |
615 break; | 617 break; |
616 » » » case Grunning: | 618 » » » case MTypes_Words: |
617 » » » » if(gp != g) | 619 » » » case MTypes_Bytes: |
618 » » » » » runtime·throw("mark - world not stopped"
); | 620 » » » » addroot((byte*)&s->types.data, sizeof(void*)); |
619 » » » » addstackroots(gp); | |
620 » » » » break; | |
621 » » » case Grunnable: | |
622 » » » case Gsyscall: | |
623 » » » case Gwaiting: | |
624 » » » » addstackroots(gp); | |
625 break; | 621 break; |
626 } | 622 } |
627 } | 623 } |
628 } | 624 } |
629 | 625 |
| 626 for(gp=runtime·allg; gp!=nil; gp=gp->alllink) { |
| 627 switch(gp->status){ |
| 628 default: |
| 629 runtime·printf("unexpected G.status %d\n", gp->status); |
| 630 runtime·throw("mark - bad status"); |
| 631 case Gdead: |
| 632 break; |
| 633 case Grunning: |
| 634 if(gp != g) |
| 635 runtime·throw("mark - world not stopped"); |
| 636 addstackroots(gp); |
| 637 break; |
| 638 case Grunnable: |
| 639 case Gsyscall: |
| 640 case Gwaiting: |
| 641 addstackroots(gp); |
| 642 break; |
| 643 } |
| 644 } |
| 645 |
630 runtime·walkfintab(addfinroots); | 646 runtime·walkfintab(addfinroots); |
631 | 647 |
632 for(fb=allfin; fb; fb=fb->alllink) | 648 for(fb=allfin; fb; fb=fb->alllink) |
633 addroot((byte*)fb->fin, fb->cnt*sizeof(fb->fin[0])); | 649 addroot((byte*)fb->fin, fb->cnt*sizeof(fb->fin[0])); |
634 } | 650 } |
635 | 651 |
636 static bool | 652 static bool |
637 handlespecial(byte *p, uintptr size) | 653 handlespecial(byte *p, uintptr size) |
638 { | 654 { |
639 void (*fn)(void*); | 655 void (*fn)(void*); |
640 » int32 nret; | 656 » uintptr nret; |
641 FinBlock *block; | 657 FinBlock *block; |
642 Finalizer *f; | 658 Finalizer *f; |
643 | 659 |
644 if(!runtime·getfinalizer(p, true, &fn, &nret)) { | 660 if(!runtime·getfinalizer(p, true, &fn, &nret)) { |
645 runtime·setblockspecial(p, false); | 661 runtime·setblockspecial(p, false); |
646 runtime·MProf_Free(p, size); | 662 runtime·MProf_Free(p, size); |
647 return false; | 663 return false; |
648 } | 664 } |
649 | 665 |
650 runtime·lock(&finlock); | 666 runtime·lock(&finlock); |
(...skipping 21 matching lines...) Expand all Loading... |
672 // Sweep frees or collects finalizers for blocks not marked in the mark phase. | 688 // Sweep frees or collects finalizers for blocks not marked in the mark phase. |
673 // It clears the mark bits in preparation for the next GC round. | 689 // It clears the mark bits in preparation for the next GC round. |
674 static void | 690 static void |
675 sweepspan(ParFor *desc, uint32 idx) | 691 sweepspan(ParFor *desc, uint32 idx) |
676 { | 692 { |
677 int32 cl, n, npages; | 693 int32 cl, n, npages; |
678 uintptr size; | 694 uintptr size; |
679 byte *p; | 695 byte *p; |
680 MCache *c; | 696 MCache *c; |
681 byte *arena_start; | 697 byte *arena_start; |
682 » MLink *start, *end; | 698 » MLink head, *end; |
683 int32 nfree; | 699 int32 nfree; |
| 700 byte *type_data; |
| 701 byte compression; |
| 702 uintptr type_data_inc; |
684 MSpan *s; | 703 MSpan *s; |
685 | 704 |
686 USED(&desc); | 705 USED(&desc); |
687 s = runtime·mheap.allspans[idx]; | 706 s = runtime·mheap.allspans[idx]; |
688 // Stamp newly unused spans. The scavenger will use that | 707 // Stamp newly unused spans. The scavenger will use that |
689 // info to potentially give back some pages to the OS. | 708 // info to potentially give back some pages to the OS. |
690 if(s->state == MSpanFree && s->unusedsince == 0) | 709 if(s->state == MSpanFree && s->unusedsince == 0) |
691 s->unusedsince = runtime·nanotime(); | 710 s->unusedsince = runtime·nanotime(); |
692 if(s->state != MSpanInUse) | 711 if(s->state != MSpanInUse) |
693 return; | 712 return; |
694 arena_start = runtime·mheap.arena_start; | 713 arena_start = runtime·mheap.arena_start; |
695 p = (byte*)(s->start << PageShift); | 714 p = (byte*)(s->start << PageShift); |
696 cl = s->sizeclass; | 715 cl = s->sizeclass; |
| 716 size = s->elemsize; |
697 if(cl == 0) { | 717 if(cl == 0) { |
698 size = s->npages<<PageShift; | |
699 n = 1; | 718 n = 1; |
700 } else { | 719 } else { |
701 // Chunk full of small blocks. | 720 // Chunk full of small blocks. |
702 size = runtime·class_to_size[cl]; | |
703 npages = runtime·class_to_allocnpages[cl]; | 721 npages = runtime·class_to_allocnpages[cl]; |
704 n = (npages << PageShift) / size; | 722 n = (npages << PageShift) / size; |
705 } | 723 } |
706 nfree = 0; | 724 nfree = 0; |
707 » start = end = nil; | 725 » end = &head; |
708 c = m->mcache; | 726 c = m->mcache; |
| 727 ········ |
| 728 type_data = (byte*)s->types.data; |
| 729 type_data_inc = sizeof(uintptr); |
| 730 compression = s->types.compression; |
| 731 switch(compression) { |
| 732 case MTypes_Bytes: |
| 733 type_data += 8*sizeof(uintptr); |
| 734 type_data_inc = 1; |
| 735 break; |
| 736 } |
709 | 737 |
710 // Sweep through n objects of given size starting at p. | 738 // Sweep through n objects of given size starting at p. |
711 // This thread owns the span now, so it can manipulate | 739 // This thread owns the span now, so it can manipulate |
712 // the block bitmap without atomic operations. | 740 // the block bitmap without atomic operations. |
713 » for(; n > 0; n--, p += size) { | 741 » for(; n > 0; n--, p += size, type_data+=type_data_inc) { |
714 uintptr off, *bitp, shift, bits; | 742 uintptr off, *bitp, shift, bits; |
715 | 743 |
716 off = (uintptr*)p - (uintptr*)arena_start; | 744 off = (uintptr*)p - (uintptr*)arena_start; |
717 bitp = (uintptr*)arena_start - off/wordsPerBitmapWord - 1; | 745 bitp = (uintptr*)arena_start - off/wordsPerBitmapWord - 1; |
718 shift = off % wordsPerBitmapWord; | 746 shift = off % wordsPerBitmapWord; |
719 bits = *bitp>>shift; | 747 bits = *bitp>>shift; |
720 | 748 |
721 if((bits & bitAllocated) == 0) | 749 if((bits & bitAllocated) == 0) |
722 continue; | 750 continue; |
723 | 751 |
(...skipping 11 matching lines...) Expand all Loading... |
735 // In DebugMark mode, the bit has been coopted so | 763 // In DebugMark mode, the bit has been coopted so |
736 // we have to assume all blocks are special. | 764 // we have to assume all blocks are special. |
737 if(DebugMark || (bits & bitSpecial) != 0) { | 765 if(DebugMark || (bits & bitSpecial) != 0) { |
738 if(handlespecial(p, size)) | 766 if(handlespecial(p, size)) |
739 continue; | 767 continue; |
740 } | 768 } |
741 | 769 |
742 // Mark freed; restore block boundary bit. | 770 // Mark freed; restore block boundary bit. |
743 *bitp = (*bitp & ~(bitMask<<shift)) | (bitBlockBoundary<<shift); | 771 *bitp = (*bitp & ~(bitMask<<shift)) | (bitBlockBoundary<<shift); |
744 | 772 |
745 » » if(s->sizeclass == 0) { | 773 » » if(cl == 0) { |
746 // Free large span. | 774 // Free large span. |
747 runtime·unmarkspan(p, 1<<PageShift); | 775 runtime·unmarkspan(p, 1<<PageShift); |
748 *(uintptr*)p = 1; // needs zeroing | 776 *(uintptr*)p = 1; // needs zeroing |
749 runtime·MHeap_Free(&runtime·mheap, s, 1); | 777 runtime·MHeap_Free(&runtime·mheap, s, 1); |
750 c->local_alloc -= size; | 778 c->local_alloc -= size; |
751 c->local_nfree++; | 779 c->local_nfree++; |
752 } else { | 780 } else { |
753 // Free small object. | 781 // Free small object. |
| 782 switch(compression) { |
| 783 case MTypes_Words: |
| 784 *(uintptr*)type_data = 0; |
| 785 break; |
| 786 case MTypes_Bytes: |
| 787 *(byte*)type_data = 0; |
| 788 break; |
| 789 } |
754 if(size > sizeof(uintptr)) | 790 if(size > sizeof(uintptr)) |
755 ((uintptr*)p)[1] = 1; // mark as "needs to be
zeroed" | 791 ((uintptr*)p)[1] = 1; // mark as "needs to be
zeroed" |
756 » » » if(nfree) | 792 » » »······· |
757 » » » » end->next = (MLink*)p; | 793 » » » end->next = (MLink*)p; |
758 » » » else | |
759 » » » » start = (MLink*)p; | |
760 end = (MLink*)p; | 794 end = (MLink*)p; |
761 nfree++; | 795 nfree++; |
762 } | 796 } |
763 } | 797 } |
764 | 798 |
765 if(nfree) { | 799 if(nfree) { |
766 » » c->local_by_size[s->sizeclass].nfree += nfree; | 800 » » c->local_by_size[cl].nfree += nfree; |
767 c->local_alloc -= size * nfree; | 801 c->local_alloc -= size * nfree; |
768 c->local_nfree += nfree; | 802 c->local_nfree += nfree; |
769 c->local_cachealloc -= nfree * size; | 803 c->local_cachealloc -= nfree * size; |
770 c->local_objects -= nfree; | 804 c->local_objects -= nfree; |
771 » » runtime·MCentral_FreeSpan(&runtime·mheap.central[cl], s, nfree,
start, end); | 805 » » runtime·MCentral_FreeSpan(&runtime·mheap.central[cl], s, nfree,
head.next, end); |
772 » } | 806 » } |
773 } | 807 } |
774 | 808 |
| 809 static void |
| 810 dumpspan(uint32 idx) |
| 811 { |
| 812 » int32 sizeclass, n, npages, i, column; |
| 813 » uintptr size; |
| 814 » byte *p; |
| 815 » byte *arena_start; |
| 816 » MSpan *s; |
| 817 » bool allocated, special; |
| 818 |
| 819 » s = runtime·mheap.allspans[idx]; |
| 820 » if(s->state != MSpanInUse) |
| 821 » » return; |
| 822 » arena_start = runtime·mheap.arena_start; |
| 823 » p = (byte*)(s->start << PageShift); |
| 824 » sizeclass = s->sizeclass; |
| 825 » size = s->elemsize; |
| 826 » if(sizeclass == 0) { |
| 827 » » n = 1; |
| 828 » } else { |
| 829 » » npages = runtime·class_to_allocnpages[sizeclass]; |
| 830 » » n = (npages << PageShift) / size; |
| 831 » } |
| 832 »······· |
| 833 » runtime·printf("%p .. %p:\n", p, p+n*size); |
| 834 » column = 0; |
| 835 » for(; n>0; n--, p+=size) { |
| 836 » » uintptr off, *bitp, shift, bits; |
| 837 |
| 838 » » off = (uintptr*)p - (uintptr*)arena_start; |
| 839 » » bitp = (uintptr*)arena_start - off/wordsPerBitmapWord - 1; |
| 840 » » shift = off % wordsPerBitmapWord; |
| 841 » » bits = *bitp>>shift; |
| 842 |
| 843 » » allocated = ((bits & bitAllocated) != 0); |
| 844 » » special = ((bits & bitSpecial) != 0); |
| 845 |
| 846 » » for(i=0; i<size; i+=sizeof(void*)) { |
| 847 » » » if(column == 0) { |
| 848 » » » » runtime·printf("\t"); |
| 849 » » » } |
| 850 » » » if(i == 0) { |
| 851 » » » » runtime·printf(allocated ? "(" : "["); |
| 852 » » » » runtime·printf(special ? "@" : ""); |
| 853 » » » » runtime·printf("%p: ", p+i); |
| 854 » » » } else { |
| 855 » » » » runtime·printf(" "); |
| 856 » » » } |
| 857 |
| 858 » » » runtime·printf("%p", *(void**)(p+i)); |
| 859 |
| 860 » » » if(i+sizeof(void*) >= size) { |
| 861 » » » » runtime·printf(allocated ? ") " : "] "); |
| 862 » » » } |
| 863 |
| 864 » » » column++; |
| 865 » » » if(column == 8) { |
| 866 » » » » runtime·printf("\n"); |
| 867 » » » » column = 0; |
| 868 » » » } |
| 869 » » } |
| 870 » } |
| 871 » runtime·printf("\n"); |
| 872 } |
| 873 |
| 874 // A debugging function to dump the contents of memory |
| 875 void |
| 876 runtime·memorydump(void) |
| 877 { |
| 878 » uint32 spanidx; |
| 879 |
| 880 » for(spanidx=0; spanidx<runtime·mheap.nspan; spanidx++) { |
| 881 » » dumpspan(spanidx); |
| 882 » } |
| 883 } |
775 void | 884 void |
776 runtime·gchelper(void) | 885 runtime·gchelper(void) |
777 { | 886 { |
778 // parallel mark for over gc roots | 887 // parallel mark for over gc roots |
779 runtime·parfordo(work.markfor); | 888 runtime·parfordo(work.markfor); |
780 // help other threads scan secondary blocks | 889 // help other threads scan secondary blocks |
781 scanblock(nil, 0); | 890 scanblock(nil, 0); |
782 | 891 |
783 if(DebugMark) { | 892 if(DebugMark) { |
784 // wait while the main thread executes mark(debug_scanblock) | 893 // wait while the main thread executes mark(debug_scanblock) |
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
844 mstats.stacks_sys = stacks_sys; | 953 mstats.stacks_sys = stacks_sys; |
845 } | 954 } |
846 | 955 |
847 void | 956 void |
848 runtime·gc(int32 force) | 957 runtime·gc(int32 force) |
849 { | 958 { |
850 int64 t0, t1, t2, t3; | 959 int64 t0, t1, t2, t3; |
851 uint64 heap0, heap1, obj0, obj1; | 960 uint64 heap0, heap1, obj0, obj1; |
852 byte *p; | 961 byte *p; |
853 GCStats stats; | 962 GCStats stats; |
| 963 M *m1; |
854 uint32 i; | 964 uint32 i; |
| 965 |
| 966 // The atomic operations are not atomic if the uint64s |
| 967 // are not aligned on uint64 boundaries. This has been |
| 968 // a problem in the past. |
| 969 if((((uintptr)&work.empty) & 7) != 0) |
| 970 runtime·throw("runtime: gc work buffer is misaligned"); |
855 | 971 |
856 // The gc is turned off (via enablegc) until | 972 // The gc is turned off (via enablegc) until |
857 // the bootstrap has completed. | 973 // the bootstrap has completed. |
858 // Also, malloc gets called in the guts | 974 // Also, malloc gets called in the guts |
859 // of a number of libraries that might be | 975 // of a number of libraries that might be |
860 // holding locks. To avoid priority inversion | 976 // holding locks. To avoid priority inversion |
861 // problems, don't bother trying to run gc | 977 // problems, don't bother trying to run gc |
862 // while holding a lock. The next mallocgc | 978 // while holding a lock. The next mallocgc |
863 // without a lock will do the gc instead. | 979 // without a lock will do the gc instead. |
864 if(!mstats.enablegc || m->locks > 0 || runtime·panicking) | 980 if(!mstats.enablegc || m->locks > 0 || runtime·panicking) |
(...skipping 18 matching lines...) Expand all Loading... |
883 runtime·semacquire(&runtime·worldsema); | 999 runtime·semacquire(&runtime·worldsema); |
884 if(!force && mstats.heap_alloc < mstats.next_gc) { | 1000 if(!force && mstats.heap_alloc < mstats.next_gc) { |
885 runtime·semrelease(&runtime·worldsema); | 1001 runtime·semrelease(&runtime·worldsema); |
886 return; | 1002 return; |
887 } | 1003 } |
888 | 1004 |
889 t0 = runtime·nanotime(); | 1005 t0 = runtime·nanotime(); |
890 | 1006 |
891 m->gcing = 1; | 1007 m->gcing = 1; |
892 runtime·stoptheworld(); | 1008 runtime·stoptheworld(); |
| 1009 |
| 1010 for(m1=runtime·allm; m1; m1=m1->alllink) |
| 1011 runtime·settype_flush(m1, false); |
893 | 1012 |
894 heap0 = 0; | 1013 heap0 = 0; |
895 obj0 = 0; | 1014 obj0 = 0; |
896 if(gctrace) { | 1015 if(gctrace) { |
897 cachestats(nil); | 1016 cachestats(nil); |
898 heap0 = mstats.heap_alloc; | 1017 heap0 = mstats.heap_alloc; |
899 obj0 = mstats.nmalloc - mstats.nfree; | 1018 obj0 = mstats.nmalloc - mstats.nfree; |
900 } | 1019 } |
901 | 1020 |
902 work.nwait = 0; | 1021 work.nwait = 0; |
(...skipping 119 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1022 // runs when everyone else is stopped, and | 1141 // runs when everyone else is stopped, and |
1023 // runfinq only stops at the gosched() or | 1142 // runfinq only stops at the gosched() or |
1024 // during the calls in the for loop. | 1143 // during the calls in the for loop. |
1025 fb = finq; | 1144 fb = finq; |
1026 finq = nil; | 1145 finq = nil; |
1027 if(fb == nil) { | 1146 if(fb == nil) { |
1028 fingwait = 1; | 1147 fingwait = 1; |
1029 runtime·park(nil, nil, "finalizer wait"); | 1148 runtime·park(nil, nil, "finalizer wait"); |
1030 continue; | 1149 continue; |
1031 } | 1150 } |
| 1151 if(raceenabled) |
| 1152 runtime·racefingo(); |
1032 for(; fb; fb=next) { | 1153 for(; fb; fb=next) { |
1033 next = fb->next; | 1154 next = fb->next; |
1034 for(i=0; i<fb->cnt; i++) { | 1155 for(i=0; i<fb->cnt; i++) { |
1035 f = &fb->fin[i]; | 1156 f = &fb->fin[i]; |
1036 framesz = sizeof(uintptr) + f->nret; | 1157 framesz = sizeof(uintptr) + f->nret; |
1037 if(framecap < framesz) { | 1158 if(framecap < framesz) { |
1038 runtime·free(frame); | 1159 runtime·free(frame); |
1039 frame = runtime·mal(framesz); | 1160 frame = runtime·mal(framesz); |
1040 framecap = framesz; | 1161 framecap = framesz; |
1041 } | 1162 } |
(...skipping 205 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1247 uintptr n; | 1368 uintptr n; |
1248 | 1369 |
1249 n = (h->arena_used - h->arena_start) / wordsPerBitmapWord; | 1370 n = (h->arena_used - h->arena_start) / wordsPerBitmapWord; |
1250 n = (n+bitmapChunk-1) & ~(bitmapChunk-1); | 1371 n = (n+bitmapChunk-1) & ~(bitmapChunk-1); |
1251 if(h->bitmap_mapped >= n) | 1372 if(h->bitmap_mapped >= n) |
1252 return; | 1373 return; |
1253 | 1374 |
1254 runtime·SysMap(h->arena_start - n, n - h->bitmap_mapped); | 1375 runtime·SysMap(h->arena_start - n, n - h->bitmap_mapped); |
1255 h->bitmap_mapped = n; | 1376 h->bitmap_mapped = n; |
1256 } | 1377 } |
LEFT | RIGHT |