LEFT | RIGHT |
1 // Copyright 2009 The Go Authors. All rights reserved. | 1 // Copyright 2009 The Go Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style | 2 // Use of this source code is governed by a BSD-style |
3 // license that can be found in the LICENSE file. | 3 // license that can be found in the LICENSE file. |
4 | 4 |
5 // Garbage collector (GC). | 5 // Garbage collector (GC). |
6 // | 6 // |
7 // GC is: | 7 // GC is: |
8 // - mark&sweep | 8 // - mark&sweep |
9 // - mostly precise (with the exception of some C-allocated objects, assembly fr
ames/arguments, etc) | 9 // - mostly precise (with the exception of some C-allocated objects, assembly fr
ames/arguments, etc) |
10 // - parallel (up to MaxGcproc threads) | 10 // - parallel (up to MaxGcproc threads) |
(...skipping 192 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
203 extern byte gcbss[]; | 203 extern byte gcbss[]; |
204 | 204 |
205 static G *fing; | 205 static G *fing; |
206 static FinBlock *finq; // list of finalizers that are to be executed | 206 static FinBlock *finq; // list of finalizers that are to be executed |
207 static FinBlock *finc; // cache of free blocks | 207 static FinBlock *finc; // cache of free blocks |
208 static FinBlock *allfin; // list of all blocks | 208 static FinBlock *allfin; // list of all blocks |
209 static int32 fingwait; | 209 static int32 fingwait; |
210 static Lock gclock; | 210 static Lock gclock; |
211 | 211 |
212 static void runfinq(void); | 212 static void runfinq(void); |
| 213 static void wakefing(void); |
213 static void bgsweep(void); | 214 static void bgsweep(void); |
214 static Workbuf* getempty(Workbuf*); | 215 static Workbuf* getempty(Workbuf*); |
215 static Workbuf* getfull(Workbuf*); | 216 static Workbuf* getfull(Workbuf*); |
216 static void putempty(Workbuf*); | 217 static void putempty(Workbuf*); |
217 static Workbuf* handoff(Workbuf*); | 218 static Workbuf* handoff(Workbuf*); |
218 static void gchelperstart(void); | 219 static void gchelperstart(void); |
219 static void flushallmcaches(void); | 220 static void flushallmcaches(void); |
220 static bool scanframe(Stkframe *frame, void *wbufp); | 221 static bool scanframe(Stkframe *frame, void *wbufp); |
221 static void addstackroots(G *gp, Workbuf **wbufp); | 222 static void addstackroots(G *gp, Workbuf **wbufp); |
222 | 223 |
(...skipping 510 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
733 static void | 734 static void |
734 scanblock(Workbuf *wbuf, bool keepworking) | 735 scanblock(Workbuf *wbuf, bool keepworking) |
735 { | 736 { |
736 byte *b, *arena_start, *arena_used; | 737 byte *b, *arena_start, *arena_used; |
737 uintptr n, i, end_b, elemsize, size, ti, objti, count, type, nobj; | 738 uintptr n, i, end_b, elemsize, size, ti, objti, count, type, nobj; |
738 uintptr *pc, precise_type, nominal_size; | 739 uintptr *pc, precise_type, nominal_size; |
739 uintptr *chan_ret, chancap; | 740 uintptr *chan_ret, chancap; |
740 void *obj; | 741 void *obj; |
741 Type *t; | 742 Type *t; |
742 Slice *sliceptr; | 743 Slice *sliceptr; |
| 744 String *stringptr; |
743 Frame *stack_ptr, stack_top, stack[GC_STACK_CAPACITY+4]; | 745 Frame *stack_ptr, stack_top, stack[GC_STACK_CAPACITY+4]; |
744 BufferList *scanbuffers; | 746 BufferList *scanbuffers; |
745 Scanbuf sbuf; | 747 Scanbuf sbuf; |
746 Eface *eface; | 748 Eface *eface; |
747 Iface *iface; | 749 Iface *iface; |
748 Hchan *chan; | 750 Hchan *chan; |
749 ChanType *chantype; | 751 ChanType *chantype; |
750 Obj *wp; | 752 Obj *wp; |
751 | 753 |
752 if(sizeof(Workbuf) % WorkbufSize != 0) | 754 if(sizeof(Workbuf) % WorkbufSize != 0) |
(...skipping 150 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
903 } | 905 } |
904 pc += 3; | 906 pc += 3; |
905 break; | 907 break; |
906 | 908 |
907 case GC_APTR: | 909 case GC_APTR: |
908 obj = *(void**)(stack_top.b + pc[1]); | 910 obj = *(void**)(stack_top.b + pc[1]); |
909 pc += 2; | 911 pc += 2; |
910 break; | 912 break; |
911 | 913 |
912 case GC_STRING: | 914 case GC_STRING: |
913 » » » obj = *(void**)(stack_top.b + pc[1]); | 915 » » » stringptr = (String*)(stack_top.b + pc[1]); |
914 » » » markonly(obj); | 916 » » » if(stringptr->len != 0) { |
| 917 » » » » obj = stringptr->str; |
| 918 » » » » markonly(obj); |
| 919 » » » } |
915 pc += 2; | 920 pc += 2; |
916 continue; | 921 continue; |
917 | 922 |
918 case GC_EFACE: | 923 case GC_EFACE: |
919 eface = (Eface*)(stack_top.b + pc[1]); | 924 eface = (Eface*)(stack_top.b + pc[1]); |
920 pc += 2; | 925 pc += 2; |
921 if(eface->type == nil) | 926 if(eface->type == nil) |
922 continue; | 927 continue; |
923 | 928 |
924 // eface->type | 929 // eface->type |
(...skipping 295 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1220 Workbuf *wbuf; | 1225 Workbuf *wbuf; |
1221 FinBlock *fb; | 1226 FinBlock *fb; |
1222 MHeap *h; | 1227 MHeap *h; |
1223 MSpan **allspans, *s; | 1228 MSpan **allspans, *s; |
1224 uint32 spanidx, sg; | 1229 uint32 spanidx, sg; |
1225 G *gp; | 1230 G *gp; |
1226 void *p; | 1231 void *p; |
1227 | 1232 |
1228 USED(&desc); | 1233 USED(&desc); |
1229 wbuf = getempty(nil); | 1234 wbuf = getempty(nil); |
| 1235 // Note: if you add a case here, please also update heapdump.c:dumproots
. |
1230 switch(i) { | 1236 switch(i) { |
1231 case RootData: | 1237 case RootData: |
1232 enqueue1(&wbuf, (Obj){data, edata - data, (uintptr)gcdata}); | 1238 enqueue1(&wbuf, (Obj){data, edata - data, (uintptr)gcdata}); |
1233 break; | 1239 break; |
1234 | 1240 |
1235 case RootBss: | 1241 case RootBss: |
1236 enqueue1(&wbuf, (Obj){bss, ebss - bss, (uintptr)gcbss}); | 1242 enqueue1(&wbuf, (Obj){bss, ebss - bss, (uintptr)gcbss}); |
1237 break; | 1243 break; |
1238 | 1244 |
1239 case RootFinalizers: | 1245 case RootFinalizers: |
(...skipping 199 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1439 enqueue1(wbufp, (Obj){scanp+PtrSize, PtrSize, 0}); | 1445 enqueue1(wbufp, (Obj){scanp+PtrSize, PtrSize, 0}); |
1440 } | 1446 } |
1441 | 1447 |
1442 // Starting from scanp, scans words corresponding to set bits. | 1448 // Starting from scanp, scans words corresponding to set bits. |
1443 static void | 1449 static void |
1444 scanbitvector(byte *scanp, BitVector *bv, bool afterprologue, void *wbufp) | 1450 scanbitvector(byte *scanp, BitVector *bv, bool afterprologue, void *wbufp) |
1445 { | 1451 { |
1446 uintptr word, bits; | 1452 uintptr word, bits; |
1447 uint32 *wordp; | 1453 uint32 *wordp; |
1448 int32 i, remptrs; | 1454 int32 i, remptrs; |
| 1455 byte *p; |
1449 | 1456 |
1450 wordp = bv->data; | 1457 wordp = bv->data; |
1451 for(remptrs = bv->n; remptrs > 0; remptrs -= 32) { | 1458 for(remptrs = bv->n; remptrs > 0; remptrs -= 32) { |
1452 word = *wordp++; | 1459 word = *wordp++; |
1453 if(remptrs < 32) | 1460 if(remptrs < 32) |
1454 i = remptrs; | 1461 i = remptrs; |
1455 else | 1462 else |
1456 i = 32; | 1463 i = 32; |
1457 i /= BitsPerPointer; | 1464 i /= BitsPerPointer; |
1458 for(; i > 0; i--) { | 1465 for(; i > 0; i--) { |
1459 bits = word & 3; | 1466 bits = word & 3; |
1460 » » » if(bits != BitsNoPointer && *(void**)scanp != nil) | 1467 » » » switch(bits) { |
1461 » » » » if(bits == BitsPointer) | 1468 » » » case BitsDead: |
| 1469 » » » » if(runtime·debug.gcdead) |
| 1470 » » » » » *(uintptr*)scanp = (uintptr)0x6969696969
696969LL; |
| 1471 » » » » break; |
| 1472 » » » case BitsScalar: |
| 1473 » » » » break; |
| 1474 » » » case BitsPointer: |
| 1475 » » » » p = *(byte**)scanp; |
| 1476 » » » » if(p != nil) |
1462 enqueue1(wbufp, (Obj){scanp, PtrSize, 0}
); | 1477 enqueue1(wbufp, (Obj){scanp, PtrSize, 0}
); |
1463 » » » » else | 1478 » » » » break; |
1464 » » » » » scaninterfacedata(bits, scanp, afterprol
ogue, wbufp); | 1479 » » » case BitsMultiWord: |
| 1480 » » » » p = *(byte**)scanp; |
| 1481 » » » » if(p != nil) { |
| 1482 » » » » » word >>= BitsPerPointer; |
| 1483 » » » » » scanp += PtrSize; |
| 1484 » » » » » i--; |
| 1485 » » » » » if(i == 0) { |
| 1486 » » » » » » // Get next chunk of bits |
| 1487 » » » » » » remptrs -= 32; |
| 1488 » » » » » » word = *wordp++; |
| 1489 » » » » » » if(remptrs < 32) |
| 1490 » » » » » » » i = remptrs; |
| 1491 » » » » » » else |
| 1492 » » » » » » » i = 32; |
| 1493 » » » » » » i /= BitsPerPointer; |
| 1494 » » » » » } |
| 1495 » » » » » switch(word & 3) { |
| 1496 » » » » » case BitsString: |
| 1497 » » » » » » if(((String*)(scanp - PtrSize))-
>len != 0) |
| 1498 » » » » » » » markonly(p); |
| 1499 » » » » » » break; |
| 1500 » » » » » case BitsSlice: |
| 1501 » » » » » » if(((Slice*)(scanp - PtrSize))->
cap < ((Slice*)(scanp - PtrSize))->len) |
| 1502 » » » » » » » runtime·throw("slice cap
acity smaller than length"); |
| 1503 » » » » » » if(((Slice*)(scanp - PtrSize))->
cap != 0) |
| 1504 » » » » » » » enqueue1(wbufp, (Obj){sc
anp - PtrSize, PtrSize, 0}); |
| 1505 » » » » » » break; |
| 1506 » » » » » case BitsIface: |
| 1507 » » » » » case BitsEface: |
| 1508 » » » » » » scaninterfacedata(word & 3, scan
p - PtrSize, afterprologue, wbufp); |
| 1509 » » » » » » break; |
| 1510 » » » » » } |
| 1511 » » » » } |
| 1512 » » » } |
1465 word >>= BitsPerPointer; | 1513 word >>= BitsPerPointer; |
1466 scanp += PtrSize; | 1514 scanp += PtrSize; |
1467 } | 1515 } |
1468 } | 1516 } |
1469 } | 1517 } |
1470 | 1518 |
1471 // Scan a stack frame: local variables and function arguments/results. | 1519 // Scan a stack frame: local variables and function arguments/results. |
1472 static bool | 1520 static bool |
1473 scanframe(Stkframe *frame, void *wbufp) | 1521 scanframe(Stkframe *frame, void *wbufp) |
1474 { | 1522 { |
(...skipping 148 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1623 finq->cnt++; | 1671 finq->cnt++; |
1624 f->fn = fn; | 1672 f->fn = fn; |
1625 f->nret = nret; | 1673 f->nret = nret; |
1626 f->fint = fint; | 1674 f->fint = fint; |
1627 f->ot = ot; | 1675 f->ot = ot; |
1628 f->arg = p; | 1676 f->arg = p; |
1629 runtime·unlock(&gclock); | 1677 runtime·unlock(&gclock); |
1630 } | 1678 } |
1631 | 1679 |
1632 void | 1680 void |
| 1681 runtime·iterate_finq(void (*callback)(FuncVal*, byte*, uintptr, Type*, PtrType*)
) |
| 1682 { |
| 1683 FinBlock *fb; |
| 1684 Finalizer *f; |
| 1685 uintptr i; |
| 1686 |
| 1687 for(fb = allfin; fb; fb = fb->alllink) { |
| 1688 for(i = 0; i < fb->cnt; i++) { |
| 1689 f = &fb->fin[i]; |
| 1690 callback(f->fn, f->arg, f->nret, f->fint, f->ot); |
| 1691 } |
| 1692 } |
| 1693 } |
| 1694 |
| 1695 void |
1633 runtime·MSpan_EnsureSwept(MSpan *s) | 1696 runtime·MSpan_EnsureSwept(MSpan *s) |
1634 { | 1697 { |
1635 uint32 sg; | 1698 uint32 sg; |
1636 | 1699 |
1637 // Caller must disable preemption. | 1700 // Caller must disable preemption. |
1638 // Otherwise when this function returns the span can become unswept agai
n | 1701 // Otherwise when this function returns the span can become unswept agai
n |
1639 // (if GC is triggered on another goroutine). | 1702 // (if GC is triggered on another goroutine). |
1640 » if(m->locks == 0 && m->mallocing == 0) | 1703 » if(m->locks == 0 && m->mallocing == 0 && g != m->g0) |
1641 runtime·throw("MSpan_EnsureSwept: m is not locked"); | 1704 runtime·throw("MSpan_EnsureSwept: m is not locked"); |
1642 | 1705 |
1643 sg = runtime·mheap.sweepgen; | 1706 sg = runtime·mheap.sweepgen; |
1644 if(runtime·atomicload(&s->sweepgen) == sg) | 1707 if(runtime·atomicload(&s->sweepgen) == sg) |
1645 return; | 1708 return; |
1646 if(runtime·cas(&s->sweepgen, sg-2, sg-1)) { | 1709 if(runtime·cas(&s->sweepgen, sg-2, sg-1)) { |
1647 runtime·MSpan_Sweep(s); | 1710 runtime·MSpan_Sweep(s); |
1648 return; | 1711 return; |
1649 } | 1712 } |
1650 // unfortunate condition, and we don't have efficient means to wait | 1713 // unfortunate condition, and we don't have efficient means to wait |
(...skipping 180 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1831 } | 1894 } |
1832 return res; | 1895 return res; |
1833 } | 1896 } |
1834 | 1897 |
1835 // State of background sweep. | 1898 // State of background sweep. |
1836 // Pretected by gclock. | 1899 // Pretected by gclock. |
1837 static struct | 1900 static struct |
1838 { | 1901 { |
1839 G* g; | 1902 G* g; |
1840 bool parked; | 1903 bool parked; |
| 1904 uint32 lastsweepgen; |
1841 | 1905 |
1842 MSpan** spans; | 1906 MSpan** spans; |
1843 uint32 nspan; | 1907 uint32 nspan; |
1844 uint32 spanidx; | 1908 uint32 spanidx; |
1845 } sweep; | 1909 } sweep; |
1846 | 1910 |
1847 // background sweeping goroutine | 1911 // background sweeping goroutine |
1848 static void | 1912 static void |
1849 bgsweep(void) | 1913 bgsweep(void) |
1850 { | 1914 { |
1851 g->issystem = 1; | 1915 g->issystem = 1; |
1852 for(;;) { | 1916 for(;;) { |
1853 while(runtime·sweepone() != -1) { | 1917 while(runtime·sweepone() != -1) { |
1854 gcstats.nbgsweep++; | 1918 gcstats.nbgsweep++; |
| 1919 if(sweep.lastsweepgen != runtime·mheap.sweepgen) { |
| 1920 // If bgsweep does not catch up for any reason |
| 1921 // (does not finish before next GC), |
| 1922 // we still need to kick off runfinq at least on
ce per GC. |
| 1923 sweep.lastsweepgen = runtime·mheap.sweepgen; |
| 1924 wakefing(); |
| 1925 } |
1855 runtime·gosched(); | 1926 runtime·gosched(); |
1856 } | 1927 } |
| 1928 // kick off goroutine to run queued finalizers |
| 1929 wakefing(); |
1857 runtime·lock(&gclock); | 1930 runtime·lock(&gclock); |
1858 » » if(finq != nil) { | 1931 » » if(!runtime·mheap.sweepdone) { |
1859 » » » // kick off or wake up goroutine to run queued finalizer
s | 1932 » » » // It's possible if GC has happened between sweepone has |
1860 » » » if(fing == nil) | 1933 » » » // returned -1 and gclock lock. |
1861 » » » » fing = runtime·newproc1(&runfinqv, nil, 0, 0, ru
ntime·gc); | 1934 » » » runtime·unlock(&gclock); |
1862 » » » else if(fingwait) { | 1935 » » » continue; |
1863 » » » » fingwait = 0; | |
1864 » » » » runtime·ready(fing); | |
1865 » » » } | |
1866 } | 1936 } |
1867 sweep.parked = true; | 1937 sweep.parked = true; |
1868 runtime·parkunlock(&gclock, "GC sweep wait"); | 1938 runtime·parkunlock(&gclock, "GC sweep wait"); |
1869 } | 1939 } |
1870 } | 1940 } |
1871 | 1941 |
1872 // sweeps one span | 1942 // sweeps one span |
1873 // returns number of pages returned to heap, or -1 if there is nothing to sweep | 1943 // returns number of pages returned to heap, or -1 if there is nothing to sweep |
1874 uintptr | 1944 uintptr |
1875 runtime·sweepone(void) | 1945 runtime·sweepone(void) |
(...skipping 100 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1976 uint32 spanidx; | 2046 uint32 spanidx; |
1977 | 2047 |
1978 for(spanidx=0; spanidx<runtime·mheap.nspan; spanidx++) { | 2048 for(spanidx=0; spanidx<runtime·mheap.nspan; spanidx++) { |
1979 dumpspan(spanidx); | 2049 dumpspan(spanidx); |
1980 } | 2050 } |
1981 } | 2051 } |
1982 | 2052 |
1983 void | 2053 void |
1984 runtime·gchelper(void) | 2054 runtime·gchelper(void) |
1985 { | 2055 { |
1986 » int32 nproc; | 2056 » uint32 nproc; |
1987 | 2057 |
1988 gchelperstart(); | 2058 gchelperstart(); |
1989 | 2059 |
1990 // parallel mark for over gc roots | 2060 // parallel mark for over gc roots |
1991 runtime·parfordo(work.markfor); | 2061 runtime·parfordo(work.markfor); |
1992 | 2062 |
1993 // help other threads scan secondary blocks | 2063 // help other threads scan secondary blocks |
1994 scanblock(nil, true); | 2064 scanblock(nil, true); |
1995 | 2065 |
1996 bufferList[m->helpgc].busy = 0; | 2066 bufferList[m->helpgc].busy = 0; |
(...skipping 24 matching lines...) Expand all Loading... |
2021 | 2091 |
2022 // Flush MCache's to MCentral. | 2092 // Flush MCache's to MCentral. |
2023 for(pp=runtime·allp; p=*pp; pp++) { | 2093 for(pp=runtime·allp; p=*pp; pp++) { |
2024 c = p->mcache; | 2094 c = p->mcache; |
2025 if(c==nil) | 2095 if(c==nil) |
2026 continue; | 2096 continue; |
2027 runtime·MCache_ReleaseAll(c); | 2097 runtime·MCache_ReleaseAll(c); |
2028 } | 2098 } |
2029 } | 2099 } |
2030 | 2100 |
2031 static void | 2101 void |
2032 updatememstats(GCStats *stats) | 2102 runtime·updatememstats(GCStats *stats) |
2033 { | 2103 { |
2034 M *mp; | 2104 M *mp; |
2035 MSpan *s; | 2105 MSpan *s; |
2036 int32 i; | 2106 int32 i; |
2037 uint64 stacks_inuse, smallfree; | 2107 uint64 stacks_inuse, smallfree; |
2038 uint64 *src, *dst; | 2108 uint64 *src, *dst; |
2039 | 2109 |
2040 if(stats) | 2110 if(stats) |
2041 runtime·memclr((byte*)stats, sizeof(*stats)); | 2111 runtime·memclr((byte*)stats, sizeof(*stats)); |
2042 stacks_inuse = 0; | 2112 stacks_inuse = 0; |
(...skipping 159 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2202 | 2272 |
2203 // all done | 2273 // all done |
2204 m->gcing = 0; | 2274 m->gcing = 0; |
2205 m->locks++; | 2275 m->locks++; |
2206 runtime·semrelease(&runtime·worldsema); | 2276 runtime·semrelease(&runtime·worldsema); |
2207 runtime·starttheworld(); | 2277 runtime·starttheworld(); |
2208 m->locks--; | 2278 m->locks--; |
2209 | 2279 |
2210 // now that gc is done, kick off finalizer thread if needed | 2280 // now that gc is done, kick off finalizer thread if needed |
2211 if(!ConcurrentSweep) { | 2281 if(!ConcurrentSweep) { |
2212 » » if(finq != nil) { | 2282 » » // kick off goroutine to run queued finalizers |
2213 » » » runtime·lock(&gclock); | 2283 » » wakefing(); |
2214 » » » // kick off or wake up goroutine to run queued finalizer
s | |
2215 » » » if(fing == nil) | |
2216 » » » » fing = runtime·newproc1(&runfinqv, nil, 0, 0, ru
ntime·gc); | |
2217 » » » else if(fingwait) { | |
2218 » » » » fingwait = 0; | |
2219 » » » » runtime·ready(fing); | |
2220 » » » } | |
2221 » » » runtime·unlock(&gclock); | |
2222 » » } | |
2223 // give the queued finalizers, if any, a chance to run | 2284 // give the queued finalizers, if any, a chance to run |
2224 runtime·gosched(); | 2285 runtime·gosched(); |
2225 } | 2286 } |
2226 } | 2287 } |
2227 | 2288 |
2228 static void | 2289 static void |
2229 mgc(G *gp) | 2290 mgc(G *gp) |
2230 { | 2291 { |
2231 gc(gp->param); | 2292 gc(gp->param); |
2232 gp->param = nil; | 2293 gp->param = nil; |
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2298 t4 = runtime·nanotime(); | 2359 t4 = runtime·nanotime(); |
2299 mstats.last_gc = t4; | 2360 mstats.last_gc = t4; |
2300 mstats.pause_ns[mstats.numgc%nelem(mstats.pause_ns)] = t4 - t0; | 2361 mstats.pause_ns[mstats.numgc%nelem(mstats.pause_ns)] = t4 - t0; |
2301 mstats.pause_total_ns += t4 - t0; | 2362 mstats.pause_total_ns += t4 - t0; |
2302 mstats.numgc++; | 2363 mstats.numgc++; |
2303 if(mstats.debuggc) | 2364 if(mstats.debuggc) |
2304 runtime·printf("pause %D\n", t4-t0); | 2365 runtime·printf("pause %D\n", t4-t0); |
2305 | 2366 |
2306 if(runtime·debug.gctrace) { | 2367 if(runtime·debug.gctrace) { |
2307 heap1 = mstats.heap_alloc; | 2368 heap1 = mstats.heap_alloc; |
2308 » » updatememstats(&stats); | 2369 » » runtime·updatememstats(&stats); |
2309 if(heap1 != mstats.heap_alloc) { | 2370 if(heap1 != mstats.heap_alloc) { |
2310 runtime·printf("runtime: mstats skew: heap=%D/%D\n", hea
p1, mstats.heap_alloc); | 2371 runtime·printf("runtime: mstats skew: heap=%D/%D\n", hea
p1, mstats.heap_alloc); |
2311 runtime·throw("mstats skew"); | 2372 runtime·throw("mstats skew"); |
2312 } | 2373 } |
2313 obj = mstats.nmalloc - mstats.nfree; | 2374 obj = mstats.nmalloc - mstats.nfree; |
2314 | 2375 |
2315 stats.nprocyield += work.markfor->nprocyield; | 2376 stats.nprocyield += work.markfor->nprocyield; |
2316 stats.nosyield += work.markfor->nosyield; | 2377 stats.nosyield += work.markfor->nosyield; |
2317 stats.nsleep += work.markfor->nsleep; | 2378 stats.nsleep += work.markfor->nsleep; |
2318 | 2379 |
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2398 void | 2459 void |
2399 runtime·ReadMemStats(MStats *stats) | 2460 runtime·ReadMemStats(MStats *stats) |
2400 { | 2461 { |
2401 // Have to acquire worldsema to stop the world, | 2462 // Have to acquire worldsema to stop the world, |
2402 // because stoptheworld can only be used by | 2463 // because stoptheworld can only be used by |
2403 // one goroutine at a time, and there might be | 2464 // one goroutine at a time, and there might be |
2404 // a pending garbage collection already calling it. | 2465 // a pending garbage collection already calling it. |
2405 runtime·semacquire(&runtime·worldsema, false); | 2466 runtime·semacquire(&runtime·worldsema, false); |
2406 m->gcing = 1; | 2467 m->gcing = 1; |
2407 runtime·stoptheworld(); | 2468 runtime·stoptheworld(); |
2408 » updatememstats(nil); | 2469 » runtime·updatememstats(nil); |
2409 // Size of the trailing by_size array differs between Go and C, | 2470 // Size of the trailing by_size array differs between Go and C, |
2410 // NumSizeClasses was changed, but we can not change Go struct because o
f backward compatibility. | 2471 // NumSizeClasses was changed, but we can not change Go struct because o
f backward compatibility. |
2411 runtime·memcopy(runtime·sizeof_C_MStats, stats, &mstats); | 2472 runtime·memcopy(runtime·sizeof_C_MStats, stats, &mstats); |
2412 m->gcing = 0; | 2473 m->gcing = 0; |
2413 m->locks++; | 2474 m->locks++; |
2414 runtime·semrelease(&runtime·worldsema); | 2475 runtime·semrelease(&runtime·worldsema); |
2415 runtime·starttheworld(); | 2476 runtime·starttheworld(); |
2416 m->locks--; | 2477 m->locks--; |
2417 } | 2478 } |
2418 | 2479 |
(...skipping 147 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2566 fb = nil; | 2627 fb = nil; |
2567 next = nil; | 2628 next = nil; |
2568 i = 0; | 2629 i = 0; |
2569 ef = nil; | 2630 ef = nil; |
2570 ef1.type = nil; | 2631 ef1.type = nil; |
2571 ef1.data = nil; | 2632 ef1.data = nil; |
2572 runtime·gc(1); // trigger another gc to clean up the finalized
objects, if possible | 2633 runtime·gc(1); // trigger another gc to clean up the finalized
objects, if possible |
2573 } | 2634 } |
2574 } | 2635 } |
2575 | 2636 |
| 2637 static void |
| 2638 wakefing(void) |
| 2639 { |
| 2640 if(finq == nil) |
| 2641 return; |
| 2642 runtime·lock(&gclock); |
| 2643 // kick off or wake up goroutine to run queued finalizers |
| 2644 if(fing == nil) |
| 2645 fing = runtime·newproc1(&runfinqv, nil, 0, 0, runtime·gc); |
| 2646 else if(fingwait) { |
| 2647 fingwait = 0; |
| 2648 runtime·ready(fing); |
| 2649 } |
| 2650 runtime·unlock(&gclock); |
| 2651 } |
| 2652 |
2576 void | 2653 void |
2577 runtime·marknogc(void *v) | 2654 runtime·marknogc(void *v) |
2578 { | 2655 { |
2579 uintptr *b, off, shift; | 2656 uintptr *b, off, shift; |
2580 | 2657 |
2581 off = (uintptr*)v - (uintptr*)runtime·mheap.arena_start; // word offset | 2658 off = (uintptr*)v - (uintptr*)runtime·mheap.arena_start; // word offset |
2582 b = (uintptr*)runtime·mheap.arena_start - off/wordsPerBitmapWord - 1; | 2659 b = (uintptr*)runtime·mheap.arena_start - off/wordsPerBitmapWord - 1; |
2583 shift = off % wordsPerBitmapWord; | 2660 shift = off % wordsPerBitmapWord; |
2584 *b = (*b & ~(bitAllocated<<shift)) | bitBlockBoundary<<shift; | 2661 *b = (*b & ~(bitAllocated<<shift)) | bitBlockBoundary<<shift; |
2585 } | 2662 } |
(...skipping 123 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2709 bitmapChunk = 8192 | 2786 bitmapChunk = 8192 |
2710 }; | 2787 }; |
2711 uintptr n; | 2788 uintptr n; |
2712 | 2789 |
2713 n = (h->arena_used - h->arena_start) / wordsPerBitmapWord; | 2790 n = (h->arena_used - h->arena_start) / wordsPerBitmapWord; |
2714 n = ROUND(n, bitmapChunk); | 2791 n = ROUND(n, bitmapChunk); |
2715 n = ROUND(n, PhysPageSize); | 2792 n = ROUND(n, PhysPageSize); |
2716 if(h->bitmap_mapped >= n) | 2793 if(h->bitmap_mapped >= n) |
2717 return; | 2794 return; |
2718 | 2795 |
2719 » runtime·SysMap(h->arena_start - n, n - h->bitmap_mapped, &mstats.gc_sys)
; | 2796 » runtime·SysMap(h->arena_start - n, n - h->bitmap_mapped, h->arena_reserv
ed, &mstats.gc_sys); |
2720 h->bitmap_mapped = n; | 2797 h->bitmap_mapped = n; |
2721 } | 2798 } |
LEFT | RIGHT |