LEFT | RIGHT |
1 // Copyright 2009 The Go Authors. All rights reserved. | 1 // Copyright 2009 The Go Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style | 2 // Use of this source code is governed by a BSD-style |
3 // license that can be found in the LICENSE file. | 3 // license that can be found in the LICENSE file. |
4 | 4 |
5 // Garbage collector (GC). | 5 // Garbage collector (GC). |
6 // | 6 // |
7 // GC is: | 7 // GC is: |
8 // - mark&sweep | 8 // - mark&sweep |
9 // - mostly precise (with the exception of some C-allocated objects, assembly fr
ames/arguments, etc) | 9 // - mostly precise (with the exception of some C-allocated objects, assembly fr
ames/arguments, etc) |
10 // - parallel (up to MaxGcproc threads) | 10 // - parallel (up to MaxGcproc threads) |
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
60 #include "typekind.h" | 60 #include "typekind.h" |
61 #include "funcdata.h" | 61 #include "funcdata.h" |
62 #include "../../cmd/ld/textflag.h" | 62 #include "../../cmd/ld/textflag.h" |
63 | 63 |
64 enum { | 64 enum { |
65 Debug = 0, | 65 Debug = 0, |
66 CollectStats = 0, | 66 CollectStats = 0, |
67 ConcurrentSweep = 1, | 67 ConcurrentSweep = 1, |
68 | 68 |
69 WorkbufSize = 16*1024, | 69 WorkbufSize = 16*1024, |
70 RootBlockSize = 4*1024, | |
71 FinBlockSize = 4*1024, | 70 FinBlockSize = 4*1024, |
72 | 71 |
73 handoffThreshold = 4, | 72 handoffThreshold = 4, |
74 IntermediateBufferCapacity = 64, | 73 IntermediateBufferCapacity = 64, |
75 | 74 |
76 // Bits in type information | 75 // Bits in type information |
77 PRECISE = 1, | 76 PRECISE = 1, |
78 LOOP = 2, | 77 LOOP = 2, |
79 PC_BITS = PRECISE | LOOP, | 78 PC_BITS = PRECISE | LOOP, |
80 | 79 |
(...skipping 123 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
204 extern byte gcbss[]; | 203 extern byte gcbss[]; |
205 | 204 |
206 static G *fing; | 205 static G *fing; |
207 static FinBlock *finq; // list of finalizers that are to be executed | 206 static FinBlock *finq; // list of finalizers that are to be executed |
208 static FinBlock *finc; // cache of free blocks | 207 static FinBlock *finc; // cache of free blocks |
209 static FinBlock *allfin; // list of all blocks | 208 static FinBlock *allfin; // list of all blocks |
210 static int32 fingwait; | 209 static int32 fingwait; |
211 static Lock gclock; | 210 static Lock gclock; |
212 | 211 |
213 static void runfinq(void); | 212 static void runfinq(void); |
| 213 static void wakefing(void); |
214 static void bgsweep(void); | 214 static void bgsweep(void); |
215 static Workbuf* getempty(Workbuf*); | 215 static Workbuf* getempty(Workbuf*); |
216 static Workbuf* getfull(Workbuf*); | 216 static Workbuf* getfull(Workbuf*); |
217 static void putempty(Workbuf*); | 217 static void putempty(Workbuf*); |
218 static Workbuf* handoff(Workbuf*); | 218 static Workbuf* handoff(Workbuf*); |
219 static void gchelperstart(void); | 219 static void gchelperstart(void); |
220 static void addfinroots(void *wbufp, void *v); | |
221 static void flushallmcaches(void); | 220 static void flushallmcaches(void); |
222 static bool scanframe(Stkframe *frame, void *wbufp); | 221 static bool scanframe(Stkframe *frame, void *wbufp); |
223 static void addstackroots(G *gp, Workbuf **wbufp); | 222 static void addstackroots(G *gp, Workbuf **wbufp); |
224 | 223 |
225 static FuncVal runfinqv = {runfinq}; | 224 static FuncVal runfinqv = {runfinq}; |
226 static FuncVal bgsweepv = {bgsweep}; | 225 static FuncVal bgsweepv = {bgsweep}; |
227 | 226 |
228 static struct { | 227 static struct { |
229 uint64 full; // lock-free list of full blocks | 228 uint64 full; // lock-free list of full blocks |
230 uint64 empty; // lock-free list of empty blocks | 229 uint64 empty; // lock-free list of empty blocks |
(...skipping 504 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
735 static void | 734 static void |
736 scanblock(Workbuf *wbuf, bool keepworking) | 735 scanblock(Workbuf *wbuf, bool keepworking) |
737 { | 736 { |
738 byte *b, *arena_start, *arena_used; | 737 byte *b, *arena_start, *arena_used; |
739 uintptr n, i, end_b, elemsize, size, ti, objti, count, type, nobj; | 738 uintptr n, i, end_b, elemsize, size, ti, objti, count, type, nobj; |
740 uintptr *pc, precise_type, nominal_size; | 739 uintptr *pc, precise_type, nominal_size; |
741 uintptr *chan_ret, chancap; | 740 uintptr *chan_ret, chancap; |
742 void *obj; | 741 void *obj; |
743 Type *t; | 742 Type *t; |
744 Slice *sliceptr; | 743 Slice *sliceptr; |
| 744 String *stringptr; |
745 Frame *stack_ptr, stack_top, stack[GC_STACK_CAPACITY+4]; | 745 Frame *stack_ptr, stack_top, stack[GC_STACK_CAPACITY+4]; |
746 BufferList *scanbuffers; | 746 BufferList *scanbuffers; |
747 Scanbuf sbuf; | 747 Scanbuf sbuf; |
748 Eface *eface; | 748 Eface *eface; |
749 Iface *iface; | 749 Iface *iface; |
750 Hchan *chan; | 750 Hchan *chan; |
751 ChanType *chantype; | 751 ChanType *chantype; |
752 Obj *wp; | 752 Obj *wp; |
753 | 753 |
754 if(sizeof(Workbuf) % WorkbufSize != 0) | 754 if(sizeof(Workbuf) % WorkbufSize != 0) |
(...skipping 150 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
905 } | 905 } |
906 pc += 3; | 906 pc += 3; |
907 break; | 907 break; |
908 | 908 |
909 case GC_APTR: | 909 case GC_APTR: |
910 obj = *(void**)(stack_top.b + pc[1]); | 910 obj = *(void**)(stack_top.b + pc[1]); |
911 pc += 2; | 911 pc += 2; |
912 break; | 912 break; |
913 | 913 |
914 case GC_STRING: | 914 case GC_STRING: |
915 » » » obj = *(void**)(stack_top.b + pc[1]); | 915 » » » stringptr = (String*)(stack_top.b + pc[1]); |
916 » » » markonly(obj); | 916 » » » if(stringptr->len != 0) { |
| 917 » » » » obj = stringptr->str; |
| 918 » » » » markonly(obj); |
| 919 » » » } |
917 pc += 2; | 920 pc += 2; |
918 continue; | 921 continue; |
919 | 922 |
920 case GC_EFACE: | 923 case GC_EFACE: |
921 eface = (Eface*)(stack_top.b + pc[1]); | 924 eface = (Eface*)(stack_top.b + pc[1]); |
922 pc += 2; | 925 pc += 2; |
923 if(eface->type == nil) | 926 if(eface->type == nil) |
924 continue; | 927 continue; |
925 | 928 |
926 // eface->type | 929 // eface->type |
(...skipping 295 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1222 Workbuf *wbuf; | 1225 Workbuf *wbuf; |
1223 FinBlock *fb; | 1226 FinBlock *fb; |
1224 MHeap *h; | 1227 MHeap *h; |
1225 MSpan **allspans, *s; | 1228 MSpan **allspans, *s; |
1226 uint32 spanidx, sg; | 1229 uint32 spanidx, sg; |
1227 G *gp; | 1230 G *gp; |
1228 void *p; | 1231 void *p; |
1229 | 1232 |
1230 USED(&desc); | 1233 USED(&desc); |
1231 wbuf = getempty(nil); | 1234 wbuf = getempty(nil); |
| 1235 // Note: if you add a case here, please also update heapdump.c:dumproots
. |
1232 switch(i) { | 1236 switch(i) { |
1233 case RootData: | 1237 case RootData: |
1234 enqueue1(&wbuf, (Obj){data, edata - data, (uintptr)gcdata}); | 1238 enqueue1(&wbuf, (Obj){data, edata - data, (uintptr)gcdata}); |
1235 break; | 1239 break; |
1236 | 1240 |
1237 case RootBss: | 1241 case RootBss: |
1238 enqueue1(&wbuf, (Obj){bss, ebss - bss, (uintptr)gcbss}); | 1242 enqueue1(&wbuf, (Obj){bss, ebss - bss, (uintptr)gcbss}); |
1239 break; | 1243 break; |
1240 | 1244 |
1241 case RootFinalizers: | 1245 case RootFinalizers: |
(...skipping 199 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1441 enqueue1(wbufp, (Obj){scanp+PtrSize, PtrSize, 0}); | 1445 enqueue1(wbufp, (Obj){scanp+PtrSize, PtrSize, 0}); |
1442 } | 1446 } |
1443 | 1447 |
1444 // Starting from scanp, scans words corresponding to set bits. | 1448 // Starting from scanp, scans words corresponding to set bits. |
1445 static void | 1449 static void |
1446 scanbitvector(byte *scanp, BitVector *bv, bool afterprologue, void *wbufp) | 1450 scanbitvector(byte *scanp, BitVector *bv, bool afterprologue, void *wbufp) |
1447 { | 1451 { |
1448 uintptr word, bits; | 1452 uintptr word, bits; |
1449 uint32 *wordp; | 1453 uint32 *wordp; |
1450 int32 i, remptrs; | 1454 int32 i, remptrs; |
| 1455 byte *p; |
1451 | 1456 |
1452 wordp = bv->data; | 1457 wordp = bv->data; |
1453 for(remptrs = bv->n; remptrs > 0; remptrs -= 32) { | 1458 for(remptrs = bv->n; remptrs > 0; remptrs -= 32) { |
1454 word = *wordp++; | 1459 word = *wordp++; |
1455 if(remptrs < 32) | 1460 if(remptrs < 32) |
1456 i = remptrs; | 1461 i = remptrs; |
1457 else | 1462 else |
1458 i = 32; | 1463 i = 32; |
1459 i /= BitsPerPointer; | 1464 i /= BitsPerPointer; |
1460 for(; i > 0; i--) { | 1465 for(; i > 0; i--) { |
1461 bits = word & 3; | 1466 bits = word & 3; |
1462 » » » if(bits != BitsNoPointer && *(void**)scanp != nil) | 1467 » » » switch(bits) { |
1463 » » » » if(bits == BitsPointer) | 1468 » » » case BitsDead: |
| 1469 » » » » if(runtime·debug.gcdead) |
| 1470 » » » » » *(uintptr*)scanp = (uintptr)0x6969696969
696969LL; |
| 1471 » » » » break; |
| 1472 » » » case BitsScalar: |
| 1473 » » » » break; |
| 1474 » » » case BitsPointer: |
| 1475 » » » » p = *(byte**)scanp; |
| 1476 » » » » if(p != nil) |
1464 enqueue1(wbufp, (Obj){scanp, PtrSize, 0}
); | 1477 enqueue1(wbufp, (Obj){scanp, PtrSize, 0}
); |
1465 » » » » else | 1478 » » » » break; |
1466 » » » » » scaninterfacedata(bits, scanp, afterprol
ogue, wbufp); | 1479 » » » case BitsMultiWord: |
| 1480 » » » » p = *(byte**)scanp; |
| 1481 » » » » if(p != nil) { |
| 1482 » » » » » word >>= BitsPerPointer; |
| 1483 » » » » » scanp += PtrSize; |
| 1484 » » » » » i--; |
| 1485 » » » » » if(i == 0) { |
| 1486 » » » » » » // Get next chunk of bits |
| 1487 » » » » » » remptrs -= 32; |
| 1488 » » » » » » word = *wordp++; |
| 1489 » » » » » » if(remptrs < 32) |
| 1490 » » » » » » » i = remptrs; |
| 1491 » » » » » » else |
| 1492 » » » » » » » i = 32; |
| 1493 » » » » » » i /= BitsPerPointer; |
| 1494 » » » » » } |
| 1495 » » » » » switch(word & 3) { |
| 1496 » » » » » case BitsString: |
| 1497 » » » » » » if(((String*)(scanp - PtrSize))-
>len != 0) |
| 1498 » » » » » » » markonly(p); |
| 1499 » » » » » » break; |
| 1500 » » » » » case BitsSlice: |
| 1501 » » » » » » if(((Slice*)(scanp - PtrSize))->
cap < ((Slice*)(scanp - PtrSize))->len) |
| 1502 » » » » » » » runtime·throw("slice cap
acity smaller than length"); |
| 1503 » » » » » » if(((Slice*)(scanp - PtrSize))->
cap != 0) |
| 1504 » » » » » » » enqueue1(wbufp, (Obj){sc
anp - PtrSize, PtrSize, 0}); |
| 1505 » » » » » » break; |
| 1506 » » » » » case BitsIface: |
| 1507 » » » » » case BitsEface: |
| 1508 » » » » » » scaninterfacedata(word & 3, scan
p - PtrSize, afterprologue, wbufp); |
| 1509 » » » » » » break; |
| 1510 » » » » » } |
| 1511 » » » » } |
| 1512 » » » } |
1467 word >>= BitsPerPointer; | 1513 word >>= BitsPerPointer; |
1468 scanp += PtrSize; | 1514 scanp += PtrSize; |
1469 } | 1515 } |
1470 } | 1516 } |
1471 } | 1517 } |
1472 | 1518 |
1473 // Scan a stack frame: local variables and function arguments/results. | 1519 // Scan a stack frame: local variables and function arguments/results. |
1474 static bool | 1520 static bool |
1475 scanframe(Stkframe *frame, void *wbufp) | 1521 scanframe(Stkframe *frame, void *wbufp) |
1476 { | 1522 { |
(...skipping 148 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1625 finq->cnt++; | 1671 finq->cnt++; |
1626 f->fn = fn; | 1672 f->fn = fn; |
1627 f->nret = nret; | 1673 f->nret = nret; |
1628 f->fint = fint; | 1674 f->fint = fint; |
1629 f->ot = ot; | 1675 f->ot = ot; |
1630 f->arg = p; | 1676 f->arg = p; |
1631 runtime·unlock(&gclock); | 1677 runtime·unlock(&gclock); |
1632 } | 1678 } |
1633 | 1679 |
1634 void | 1680 void |
| 1681 runtime·iterate_finq(void (*callback)(FuncVal*, byte*, uintptr, Type*, PtrType*)
) |
| 1682 { |
| 1683 FinBlock *fb; |
| 1684 Finalizer *f; |
| 1685 uintptr i; |
| 1686 |
| 1687 for(fb = allfin; fb; fb = fb->alllink) { |
| 1688 for(i = 0; i < fb->cnt; i++) { |
| 1689 f = &fb->fin[i]; |
| 1690 callback(f->fn, f->arg, f->nret, f->fint, f->ot); |
| 1691 } |
| 1692 } |
| 1693 } |
| 1694 |
| 1695 void |
1635 runtime·MSpan_EnsureSwept(MSpan *s) | 1696 runtime·MSpan_EnsureSwept(MSpan *s) |
1636 { | 1697 { |
1637 uint32 sg; | 1698 uint32 sg; |
1638 | 1699 |
1639 // Caller must disable preemption. | 1700 // Caller must disable preemption. |
1640 // Otherwise when this function returns the span can become unswept agai
n | 1701 // Otherwise when this function returns the span can become unswept agai
n |
1641 // (if GC is triggered on another goroutine). | 1702 // (if GC is triggered on another goroutine). |
1642 » if(m->locks == 0 && m->mallocing == 0) | 1703 » if(m->locks == 0 && m->mallocing == 0 && g != m->g0) |
1643 runtime·throw("MSpan_EnsureSwept: m is not locked"); | 1704 runtime·throw("MSpan_EnsureSwept: m is not locked"); |
1644 | 1705 |
1645 sg = runtime·mheap.sweepgen; | 1706 sg = runtime·mheap.sweepgen; |
1646 if(runtime·atomicload(&s->sweepgen) == sg) | 1707 if(runtime·atomicload(&s->sweepgen) == sg) |
1647 return; | 1708 return; |
1648 if(runtime·cas(&s->sweepgen, sg-2, sg-1)) { | 1709 if(runtime·cas(&s->sweepgen, sg-2, sg-1)) { |
1649 runtime·MSpan_Sweep(s); | 1710 runtime·MSpan_Sweep(s); |
1650 return; | 1711 return; |
1651 } | 1712 } |
1652 // unfortunate condition, and we don't have efficient means to wait | 1713 // unfortunate condition, and we don't have efficient means to wait |
(...skipping 108 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1761 bits = *bitp>>shift; | 1822 bits = *bitp>>shift; |
1762 | 1823 |
1763 if((bits & bitAllocated) == 0) | 1824 if((bits & bitAllocated) == 0) |
1764 continue; | 1825 continue; |
1765 | 1826 |
1766 if((bits & bitMarked) != 0) { | 1827 if((bits & bitMarked) != 0) { |
1767 *bitp &= ~(bitMarked<<shift); | 1828 *bitp &= ~(bitMarked<<shift); |
1768 continue; | 1829 continue; |
1769 } | 1830 } |
1770 | 1831 |
1771 » » // Clear mark, scan, and special bits. | 1832 » » // Clear mark and scan bits. |
1772 » » *bitp &= ~((bitScan|bitMarked|bitSpecial)<<shift); | 1833 » » *bitp &= ~((bitScan|bitMarked)<<shift); |
1773 | 1834 |
1774 if(cl == 0) { | 1835 if(cl == 0) { |
1775 // Free large span. | 1836 // Free large span. |
1776 runtime·unmarkspan(p, 1<<PageShift); | 1837 runtime·unmarkspan(p, 1<<PageShift); |
1777 s->needzero = 1; | 1838 s->needzero = 1; |
1778 // important to set sweepgen before returning it to heap | 1839 // important to set sweepgen before returning it to heap |
1779 runtime·atomicstore(&s->sweepgen, sweepgen); | 1840 runtime·atomicstore(&s->sweepgen, sweepgen); |
1780 sweepgenset = true; | 1841 sweepgenset = true; |
1781 // See note about SysFault vs SysFree in malloc.goc. | 1842 // See note about SysFault vs SysFree in malloc.goc. |
1782 if(runtime·debug.efence) | 1843 if(runtime·debug.efence) |
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1833 } | 1894 } |
1834 return res; | 1895 return res; |
1835 } | 1896 } |
1836 | 1897 |
1837 // State of background sweep. | 1898 // State of background sweep. |
1838 // Pretected by gclock. | 1899 // Pretected by gclock. |
1839 static struct | 1900 static struct |
1840 { | 1901 { |
1841 G* g; | 1902 G* g; |
1842 bool parked; | 1903 bool parked; |
| 1904 uint32 lastsweepgen; |
1843 | 1905 |
1844 MSpan** spans; | 1906 MSpan** spans; |
1845 uint32 nspan; | 1907 uint32 nspan; |
1846 uint32 spanidx; | 1908 uint32 spanidx; |
1847 } sweep; | 1909 } sweep; |
1848 | 1910 |
1849 // background sweeping goroutine | 1911 // background sweeping goroutine |
1850 static void | 1912 static void |
1851 bgsweep(void) | 1913 bgsweep(void) |
1852 { | 1914 { |
1853 g->issystem = 1; | 1915 g->issystem = 1; |
1854 for(;;) { | 1916 for(;;) { |
1855 while(runtime·sweepone() != -1) { | 1917 while(runtime·sweepone() != -1) { |
1856 gcstats.nbgsweep++; | 1918 gcstats.nbgsweep++; |
| 1919 if(sweep.lastsweepgen != runtime·mheap.sweepgen) { |
| 1920 // If bgsweep does not catch up for any reason |
| 1921 // (does not finish before next GC), |
| 1922 // we still need to kick off runfinq at least on
ce per GC. |
| 1923 sweep.lastsweepgen = runtime·mheap.sweepgen; |
| 1924 wakefing(); |
| 1925 } |
1857 runtime·gosched(); | 1926 runtime·gosched(); |
1858 } | 1927 } |
| 1928 // kick off goroutine to run queued finalizers |
| 1929 wakefing(); |
1859 runtime·lock(&gclock); | 1930 runtime·lock(&gclock); |
1860 » » if(finq != nil) { | 1931 » » if(!runtime·mheap.sweepdone) { |
1861 » » » // kick off or wake up goroutine to run queued finalizer
s | 1932 » » » // It's possible if GC has happened between sweepone has |
1862 » » » if(fing == nil) | 1933 » » » // returned -1 and gclock lock. |
1863 » » » » fing = runtime·newproc1(&runfinqv, nil, 0, 0, ru
ntime·gc); | 1934 » » » runtime·unlock(&gclock); |
1864 » » » else if(fingwait) { | 1935 » » » continue; |
1865 » » » » fingwait = 0; | |
1866 » » » » runtime·ready(fing); | |
1867 » » » } | |
1868 } | 1936 } |
1869 sweep.parked = true; | 1937 sweep.parked = true; |
1870 runtime·parkunlock(&gclock, "GC sweep wait"); | 1938 runtime·parkunlock(&gclock, "GC sweep wait"); |
1871 } | 1939 } |
1872 } | 1940 } |
1873 | 1941 |
1874 // sweeps one span | 1942 // sweeps one span |
1875 // returns number of pages returned to heap, or -1 if there is nothing to sweep | 1943 // returns number of pages returned to heap, or -1 if there is nothing to sweep |
1876 uintptr | 1944 uintptr |
1877 runtime·sweepone(void) | 1945 runtime·sweepone(void) |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1909 } | 1977 } |
1910 | 1978 |
1911 static void | 1979 static void |
1912 dumpspan(uint32 idx) | 1980 dumpspan(uint32 idx) |
1913 { | 1981 { |
1914 int32 sizeclass, n, npages, i, column; | 1982 int32 sizeclass, n, npages, i, column; |
1915 uintptr size; | 1983 uintptr size; |
1916 byte *p; | 1984 byte *p; |
1917 byte *arena_start; | 1985 byte *arena_start; |
1918 MSpan *s; | 1986 MSpan *s; |
1919 » bool allocated, special; | 1987 » bool allocated; |
1920 | 1988 |
1921 s = runtime·mheap.allspans[idx]; | 1989 s = runtime·mheap.allspans[idx]; |
1922 if(s->state != MSpanInUse) | 1990 if(s->state != MSpanInUse) |
1923 return; | 1991 return; |
1924 arena_start = runtime·mheap.arena_start; | 1992 arena_start = runtime·mheap.arena_start; |
1925 p = (byte*)(s->start << PageShift); | 1993 p = (byte*)(s->start << PageShift); |
1926 sizeclass = s->sizeclass; | 1994 sizeclass = s->sizeclass; |
1927 size = s->elemsize; | 1995 size = s->elemsize; |
1928 if(sizeclass == 0) { | 1996 if(sizeclass == 0) { |
1929 n = 1; | 1997 n = 1; |
1930 } else { | 1998 } else { |
1931 npages = runtime·class_to_allocnpages[sizeclass]; | 1999 npages = runtime·class_to_allocnpages[sizeclass]; |
1932 n = (npages << PageShift) / size; | 2000 n = (npages << PageShift) / size; |
1933 } | 2001 } |
1934 ········ | 2002 ········ |
1935 runtime·printf("%p .. %p:\n", p, p+n*size); | 2003 runtime·printf("%p .. %p:\n", p, p+n*size); |
1936 column = 0; | 2004 column = 0; |
1937 for(; n>0; n--, p+=size) { | 2005 for(; n>0; n--, p+=size) { |
1938 uintptr off, *bitp, shift, bits; | 2006 uintptr off, *bitp, shift, bits; |
1939 | 2007 |
1940 off = (uintptr*)p - (uintptr*)arena_start; | 2008 off = (uintptr*)p - (uintptr*)arena_start; |
1941 bitp = (uintptr*)arena_start - off/wordsPerBitmapWord - 1; | 2009 bitp = (uintptr*)arena_start - off/wordsPerBitmapWord - 1; |
1942 shift = off % wordsPerBitmapWord; | 2010 shift = off % wordsPerBitmapWord; |
1943 bits = *bitp>>shift; | 2011 bits = *bitp>>shift; |
1944 | 2012 |
1945 allocated = ((bits & bitAllocated) != 0); | 2013 allocated = ((bits & bitAllocated) != 0); |
1946 special = ((bits & bitSpecial) != 0); | |
1947 | 2014 |
1948 for(i=0; i<size; i+=sizeof(void*)) { | 2015 for(i=0; i<size; i+=sizeof(void*)) { |
1949 if(column == 0) { | 2016 if(column == 0) { |
1950 runtime·printf("\t"); | 2017 runtime·printf("\t"); |
1951 } | 2018 } |
1952 if(i == 0) { | 2019 if(i == 0) { |
1953 runtime·printf(allocated ? "(" : "["); | 2020 runtime·printf(allocated ? "(" : "["); |
1954 runtime·printf(special ? "@" : ""); | |
1955 runtime·printf("%p: ", p+i); | 2021 runtime·printf("%p: ", p+i); |
1956 } else { | 2022 } else { |
1957 runtime·printf(" "); | 2023 runtime·printf(" "); |
1958 } | 2024 } |
1959 | 2025 |
1960 runtime·printf("%p", *(void**)(p+i)); | 2026 runtime·printf("%p", *(void**)(p+i)); |
1961 | 2027 |
1962 if(i+sizeof(void*) >= size) { | 2028 if(i+sizeof(void*) >= size) { |
1963 runtime·printf(allocated ? ") " : "] "); | 2029 runtime·printf(allocated ? ") " : "] "); |
1964 } | 2030 } |
(...skipping 15 matching lines...) Expand all Loading... |
1980 uint32 spanidx; | 2046 uint32 spanidx; |
1981 | 2047 |
1982 for(spanidx=0; spanidx<runtime·mheap.nspan; spanidx++) { | 2048 for(spanidx=0; spanidx<runtime·mheap.nspan; spanidx++) { |
1983 dumpspan(spanidx); | 2049 dumpspan(spanidx); |
1984 } | 2050 } |
1985 } | 2051 } |
1986 | 2052 |
1987 void | 2053 void |
1988 runtime·gchelper(void) | 2054 runtime·gchelper(void) |
1989 { | 2055 { |
1990 » int32 nproc; | 2056 » uint32 nproc; |
1991 | 2057 |
1992 gchelperstart(); | 2058 gchelperstart(); |
1993 | 2059 |
1994 // parallel mark for over gc roots | 2060 // parallel mark for over gc roots |
1995 runtime·parfordo(work.markfor); | 2061 runtime·parfordo(work.markfor); |
1996 | 2062 |
1997 // help other threads scan secondary blocks | 2063 // help other threads scan secondary blocks |
1998 scanblock(nil, true); | 2064 scanblock(nil, true); |
1999 | 2065 |
2000 bufferList[m->helpgc].busy = 0; | 2066 bufferList[m->helpgc].busy = 0; |
(...skipping 24 matching lines...) Expand all Loading... |
2025 | 2091 |
2026 // Flush MCache's to MCentral. | 2092 // Flush MCache's to MCentral. |
2027 for(pp=runtime·allp; p=*pp; pp++) { | 2093 for(pp=runtime·allp; p=*pp; pp++) { |
2028 c = p->mcache; | 2094 c = p->mcache; |
2029 if(c==nil) | 2095 if(c==nil) |
2030 continue; | 2096 continue; |
2031 runtime·MCache_ReleaseAll(c); | 2097 runtime·MCache_ReleaseAll(c); |
2032 } | 2098 } |
2033 } | 2099 } |
2034 | 2100 |
2035 static void | 2101 void |
2036 updatememstats(GCStats *stats) | 2102 runtime·updatememstats(GCStats *stats) |
2037 { | 2103 { |
2038 M *mp; | 2104 M *mp; |
2039 MSpan *s; | 2105 MSpan *s; |
2040 int32 i; | 2106 int32 i; |
2041 uint64 stacks_inuse, smallfree; | 2107 uint64 stacks_inuse, smallfree; |
2042 uint64 *src, *dst; | 2108 uint64 *src, *dst; |
2043 | 2109 |
2044 if(stats) | 2110 if(stats) |
2045 runtime·memclr((byte*)stats, sizeof(*stats)); | 2111 runtime·memclr((byte*)stats, sizeof(*stats)); |
2046 stacks_inuse = 0; | 2112 stacks_inuse = 0; |
(...skipping 159 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2206 | 2272 |
2207 // all done | 2273 // all done |
2208 m->gcing = 0; | 2274 m->gcing = 0; |
2209 m->locks++; | 2275 m->locks++; |
2210 runtime·semrelease(&runtime·worldsema); | 2276 runtime·semrelease(&runtime·worldsema); |
2211 runtime·starttheworld(); | 2277 runtime·starttheworld(); |
2212 m->locks--; | 2278 m->locks--; |
2213 | 2279 |
2214 // now that gc is done, kick off finalizer thread if needed | 2280 // now that gc is done, kick off finalizer thread if needed |
2215 if(!ConcurrentSweep) { | 2281 if(!ConcurrentSweep) { |
2216 » » if(finq != nil) { | 2282 » » // kick off goroutine to run queued finalizers |
2217 » » » runtime·lock(&gclock); | 2283 » » wakefing(); |
2218 » » » // kick off or wake up goroutine to run queued finalizer
s | |
2219 » » » if(fing == nil) | |
2220 » » » » fing = runtime·newproc1(&runfinqv, nil, 0, 0, ru
ntime·gc); | |
2221 » » » else if(fingwait) { | |
2222 » » » » fingwait = 0; | |
2223 » » » » runtime·ready(fing); | |
2224 » » » } | |
2225 » » » runtime·unlock(&gclock); | |
2226 » » } | |
2227 // give the queued finalizers, if any, a chance to run | 2284 // give the queued finalizers, if any, a chance to run |
2228 runtime·gosched(); | 2285 runtime·gosched(); |
2229 } | 2286 } |
2230 } | 2287 } |
2231 | 2288 |
2232 static void | 2289 static void |
2233 mgc(G *gp) | 2290 mgc(G *gp) |
2234 { | 2291 { |
2235 gc(gp->param); | 2292 gc(gp->param); |
2236 gp->param = nil; | 2293 gp->param = nil; |
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2302 t4 = runtime·nanotime(); | 2359 t4 = runtime·nanotime(); |
2303 mstats.last_gc = t4; | 2360 mstats.last_gc = t4; |
2304 mstats.pause_ns[mstats.numgc%nelem(mstats.pause_ns)] = t4 - t0; | 2361 mstats.pause_ns[mstats.numgc%nelem(mstats.pause_ns)] = t4 - t0; |
2305 mstats.pause_total_ns += t4 - t0; | 2362 mstats.pause_total_ns += t4 - t0; |
2306 mstats.numgc++; | 2363 mstats.numgc++; |
2307 if(mstats.debuggc) | 2364 if(mstats.debuggc) |
2308 runtime·printf("pause %D\n", t4-t0); | 2365 runtime·printf("pause %D\n", t4-t0); |
2309 | 2366 |
2310 if(runtime·debug.gctrace) { | 2367 if(runtime·debug.gctrace) { |
2311 heap1 = mstats.heap_alloc; | 2368 heap1 = mstats.heap_alloc; |
2312 » » updatememstats(&stats); | 2369 » » runtime·updatememstats(&stats); |
2313 if(heap1 != mstats.heap_alloc) { | 2370 if(heap1 != mstats.heap_alloc) { |
2314 runtime·printf("runtime: mstats skew: heap=%D/%D\n", hea
p1, mstats.heap_alloc); | 2371 runtime·printf("runtime: mstats skew: heap=%D/%D\n", hea
p1, mstats.heap_alloc); |
2315 runtime·throw("mstats skew"); | 2372 runtime·throw("mstats skew"); |
2316 } | 2373 } |
2317 obj = mstats.nmalloc - mstats.nfree; | 2374 obj = mstats.nmalloc - mstats.nfree; |
2318 | 2375 |
2319 stats.nprocyield += work.markfor->nprocyield; | 2376 stats.nprocyield += work.markfor->nprocyield; |
2320 stats.nosyield += work.markfor->nosyield; | 2377 stats.nosyield += work.markfor->nosyield; |
2321 stats.nsleep += work.markfor->nsleep; | 2378 stats.nsleep += work.markfor->nsleep; |
2322 | 2379 |
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2402 void | 2459 void |
2403 runtime·ReadMemStats(MStats *stats) | 2460 runtime·ReadMemStats(MStats *stats) |
2404 { | 2461 { |
2405 // Have to acquire worldsema to stop the world, | 2462 // Have to acquire worldsema to stop the world, |
2406 // because stoptheworld can only be used by | 2463 // because stoptheworld can only be used by |
2407 // one goroutine at a time, and there might be | 2464 // one goroutine at a time, and there might be |
2408 // a pending garbage collection already calling it. | 2465 // a pending garbage collection already calling it. |
2409 runtime·semacquire(&runtime·worldsema, false); | 2466 runtime·semacquire(&runtime·worldsema, false); |
2410 m->gcing = 1; | 2467 m->gcing = 1; |
2411 runtime·stoptheworld(); | 2468 runtime·stoptheworld(); |
2412 » updatememstats(nil); | 2469 » runtime·updatememstats(nil); |
2413 // Size of the trailing by_size array differs between Go and C, | 2470 // Size of the trailing by_size array differs between Go and C, |
2414 // NumSizeClasses was changed, but we can not change Go struct because o
f backward compatibility. | 2471 // NumSizeClasses was changed, but we can not change Go struct because o
f backward compatibility. |
2415 runtime·memcopy(runtime·sizeof_C_MStats, stats, &mstats); | 2472 runtime·memcopy(runtime·sizeof_C_MStats, stats, &mstats); |
2416 m->gcing = 0; | 2473 m->gcing = 0; |
2417 m->locks++; | 2474 m->locks++; |
2418 runtime·semrelease(&runtime·worldsema); | 2475 runtime·semrelease(&runtime·worldsema); |
2419 runtime·starttheworld(); | 2476 runtime·starttheworld(); |
2420 m->locks--; | 2477 m->locks--; |
2421 } | 2478 } |
2422 | 2479 |
(...skipping 147 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2570 fb = nil; | 2627 fb = nil; |
2571 next = nil; | 2628 next = nil; |
2572 i = 0; | 2629 i = 0; |
2573 ef = nil; | 2630 ef = nil; |
2574 ef1.type = nil; | 2631 ef1.type = nil; |
2575 ef1.data = nil; | 2632 ef1.data = nil; |
2576 runtime·gc(1); // trigger another gc to clean up the finalized
objects, if possible | 2633 runtime·gc(1); // trigger another gc to clean up the finalized
objects, if possible |
2577 } | 2634 } |
2578 } | 2635 } |
2579 | 2636 |
| 2637 static void |
| 2638 wakefing(void) |
| 2639 { |
| 2640 if(finq == nil) |
| 2641 return; |
| 2642 runtime·lock(&gclock); |
| 2643 // kick off or wake up goroutine to run queued finalizers |
| 2644 if(fing == nil) |
| 2645 fing = runtime·newproc1(&runfinqv, nil, 0, 0, runtime·gc); |
| 2646 else if(fingwait) { |
| 2647 fingwait = 0; |
| 2648 runtime·ready(fing); |
| 2649 } |
| 2650 runtime·unlock(&gclock); |
| 2651 } |
| 2652 |
2580 void | 2653 void |
2581 runtime·marknogc(void *v) | 2654 runtime·marknogc(void *v) |
2582 { | 2655 { |
2583 » uintptr *b, obits, bits, off, shift; | 2656 » uintptr *b, off, shift; |
2584 | 2657 |
2585 off = (uintptr*)v - (uintptr*)runtime·mheap.arena_start; // word offset | 2658 off = (uintptr*)v - (uintptr*)runtime·mheap.arena_start; // word offset |
2586 b = (uintptr*)runtime·mheap.arena_start - off/wordsPerBitmapWord - 1; | 2659 b = (uintptr*)runtime·mheap.arena_start - off/wordsPerBitmapWord - 1; |
2587 shift = off % wordsPerBitmapWord; | 2660 shift = off % wordsPerBitmapWord; |
2588 | 2661 » *b = (*b & ~(bitAllocated<<shift)) | bitBlockBoundary<<shift; |
2589 » for(;;) { | |
2590 » » obits = *b; | |
2591 » » if((obits>>shift & bitMask) != bitAllocated) | |
2592 » » » runtime·throw("bad initial state for marknogc"); | |
2593 » » bits = (obits & ~(bitAllocated<<shift)) | bitBlockBoundary<<shif
t; | |
2594 » » if(runtime·gomaxprocs == 1) { | |
2595 » » » *b = bits; | |
2596 » » » break; | |
2597 » » } else { | |
2598 » » » // more than one goroutine is potentially running: use a
tomic op | |
2599 » » » if(runtime·casp((void**)b, (void*)obits, (void*)bits)) | |
2600 » » » » break; | |
2601 » » } | |
2602 » } | |
2603 } | 2662 } |
2604 | 2663 |
2605 void | 2664 void |
2606 runtime·markscan(void *v) | 2665 runtime·markscan(void *v) |
2607 { | 2666 { |
2608 uintptr *b, off, shift; | 2667 uintptr *b, off, shift; |
2609 | 2668 |
2610 off = (uintptr*)v - (uintptr*)runtime·mheap.arena_start; // word offset | 2669 off = (uintptr*)v - (uintptr*)runtime·mheap.arena_start; // word offset |
2611 b = (uintptr*)runtime·mheap.arena_start - off/wordsPerBitmapWord - 1; | 2670 b = (uintptr*)runtime·mheap.arena_start - off/wordsPerBitmapWord - 1; |
2612 shift = off % wordsPerBitmapWord; | 2671 shift = off % wordsPerBitmapWord; |
(...skipping 114 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2727 bitmapChunk = 8192 | 2786 bitmapChunk = 8192 |
2728 }; | 2787 }; |
2729 uintptr n; | 2788 uintptr n; |
2730 | 2789 |
2731 n = (h->arena_used - h->arena_start) / wordsPerBitmapWord; | 2790 n = (h->arena_used - h->arena_start) / wordsPerBitmapWord; |
2732 n = ROUND(n, bitmapChunk); | 2791 n = ROUND(n, bitmapChunk); |
2733 n = ROUND(n, PhysPageSize); | 2792 n = ROUND(n, PhysPageSize); |
2734 if(h->bitmap_mapped >= n) | 2793 if(h->bitmap_mapped >= n) |
2735 return; | 2794 return; |
2736 | 2795 |
2737 » runtime·SysMap(h->arena_start - n, n - h->bitmap_mapped, &mstats.gc_sys)
; | 2796 » runtime·SysMap(h->arena_start - n, n - h->bitmap_mapped, h->arena_reserv
ed, &mstats.gc_sys); |
2738 h->bitmap_mapped = n; | 2797 h->bitmap_mapped = n; |
2739 } | 2798 } |
LEFT | RIGHT |