Left: | ||
Right: |
OLD | NEW |
---|---|
1 // Copyright 2009 The Go Authors. All rights reserved. | 1 // Copyright 2009 The Go Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style | 2 // Use of this source code is governed by a BSD-style |
3 // license that can be found in the LICENSE file. | 3 // license that can be found in the LICENSE file. |
4 | 4 |
5 #include "runtime.h" | 5 #include "runtime.h" |
6 #include "arch_GOARCH.h" | 6 #include "arch_GOARCH.h" |
7 #include "zaexperiment.h" | 7 #include "zaexperiment.h" |
8 #include "malloc.h" | 8 #include "malloc.h" |
9 #include "stack.h" | 9 #include "stack.h" |
10 #include "race.h" | 10 #include "race.h" |
(...skipping 1725 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1736 runtime·printf("invalid m->locked = %d\n", g->m->locked); | 1736 runtime·printf("invalid m->locked = %d\n", g->m->locked); |
1737 runtime·throw("internal lockOSThread error"); | 1737 runtime·throw("internal lockOSThread error"); |
1738 }······· | 1738 }······· |
1739 g->m->locked = 0; | 1739 g->m->locked = 0; |
1740 gfput(g->m->p, gp); | 1740 gfput(g->m->p, gp); |
1741 schedule(); | 1741 schedule(); |
1742 } | 1742 } |
1743 | 1743 |
1744 #pragma textflag NOSPLIT | 1744 #pragma textflag NOSPLIT |
1745 static void | 1745 static void |
1746 save(void *pc, uintptr sp) | 1746 save(void *argp) |
1747 { | 1747 { |
1748 » g->sched.pc = (uintptr)pc; | 1748 » g->sched.pc = (uintptr)runtime·getcallerpc(argp); |
rsc
2014/09/16 17:24:05
This is wrong. getcallerpc and getcallersp are onl
dvyukov
2014/09/16 17:32:15
This was done because of the NOSPLIT sequence over
marcan
2014/09/17 06:22:07
Done; see comment below.
| |
1749 » g->sched.sp = sp; | 1749 » g->sched.sp = runtime·getcallersp(argp); |
1750 g->sched.lr = 0; | 1750 g->sched.lr = 0; |
1751 g->sched.ret = 0; | 1751 g->sched.ret = 0; |
1752 g->sched.ctxt = 0; | 1752 g->sched.ctxt = 0; |
1753 g->sched.g = g; | 1753 g->sched.g = g; |
1754 } | 1754 } |
1755 | 1755 |
1756 static void entersyscall_bad(void); | 1756 static void entersyscall_bad(void); |
1757 static void entersyscall_sysmon(void); | 1757 static void entersyscall_sysmon(void); |
1758 static void entersyscall_gcwait(void); | 1758 static void entersyscall_gcwait(void); |
1759 | 1759 |
1760 // The goroutine g is about to enter a system call. | 1760 // The goroutine g is about to enter a system call. |
1761 // Record that it's not using the cpu anymore. | 1761 // Record that it's not using the cpu anymore. |
1762 // This is called only from the go syscall library and cgocall, | 1762 // This is called only from the go syscall library and cgocall, |
1763 // not from the low-level system calls used by the runtime. | 1763 // not from the low-level system calls used by the runtime. |
1764 // | 1764 // |
1765 // Entersyscall cannot split the stack: the runtime·gosave must | 1765 // Entersyscall cannot split the stack: the runtime·gosave must |
1766 // make g->sched refer to the caller's stack segment, because | 1766 // make g->sched refer to the caller's stack segment, because |
1767 // entersyscall is going to return immediately after. | 1767 // entersyscall is going to return immediately after. |
1768 // | 1768 // |
1769 // Nothing entersyscall calls can split the stack either. | 1769 // Nothing entersyscall calls can split the stack either. |
1770 // We cannot safely move the stack during an active call to syscall, | 1770 // We cannot safely move the stack during an active call to syscall, |
1771 // because we do not know which of the uintptr arguments are | 1771 // because we do not know which of the uintptr arguments are |
1772 // really pointers (back into the stack). | 1772 // really pointers (back into the stack). |
1773 // In practice, this means that we make the fast path run through | 1773 // In practice, this means that we make the fast path run through |
1774 // entersyscall doing no-split things, and the slow path has to use onM | 1774 // entersyscall doing no-split things, and the slow path has to use onM |
1775 // to run bigger things on the m stack. | 1775 // to run bigger things on the m stack. |
1776 #pragma textflag NOSPLIT | 1776 #pragma textflag NOSPLIT |
1777 static void | |
1778 entersyscallcommon(void *argp) | |
1779 { | |
1780 void (*fn)(void); | |
1781 | |
1782 runtime·casgstatus(g, Grunning, Gsyscall); | |
1783 if(g->syscallsp < g->stack.lo || g->stack.hi < g->syscallsp) { | |
1784 fn = entersyscall_bad; | |
1785 runtime·onM(&fn); | |
1786 } | |
1787 | |
1788 if(runtime·atomicload(&runtime·sched.sysmonwait)) { // TODO: fast atomi c | |
1789 fn = entersyscall_sysmon; | |
1790 runtime·onM(&fn); | |
1791 save(argp); | |
1792 } | |
1793 | |
1794 g->m->mcache = nil; | |
1795 g->m->p->m = nil; | |
1796 runtime·atomicstore(&g->m->p->status, Psyscall); | |
1797 if(runtime·sched.gcwaiting) { | |
1798 fn = entersyscall_gcwait; | |
1799 runtime·onM(&fn); | |
1800 save(argp); | |
1801 } | |
1802 | |
1803 // Goroutines must not split stacks in Gsyscall status (it would corrupt g->sched). | |
1804 // We set stackguard to StackPreempt so that first split stack check cal ls morestack. | |
1805 // Morestack detects this case and throws. | |
1806 g->stackguard0 = StackPreempt; | |
1807 g->m->locks--; | |
1808 } | |
1809 | |
1810 // Standard syscall entry used by the go syscall library and normal cgo calls. | |
1811 #pragma textflag NOSPLIT | |
1777 void | 1812 void |
1778 ·entersyscall(int32 dummy) | 1813 ·entersyscall(int32 dummy) |
1779 { | 1814 { |
1780 void (*fn)(void); | |
1781 | |
1782 // Disable preemption because during this function g is in Gsyscall stat us, | 1815 // Disable preemption because during this function g is in Gsyscall stat us, |
1783 // but can have inconsistent g->sched, do not let GC observe it. | 1816 // but can have inconsistent g->sched, do not let GC observe it. |
1784 g->m->locks++; | 1817 g->m->locks++; |
1785 » | 1818 |
1786 // Entersyscall must not call any function that might split/grow the sta ck. | 1819 // Entersyscall must not call any function that might split/grow the sta ck. |
1787 // (See details in comment above.) | 1820 // (See details in comment above.) |
1788 // Catch calls that might, by replacing the stack guard with something t hat | 1821 // Catch calls that might, by replacing the stack guard with something t hat |
1789 // will trip any stack check and leaving a flag to tell newstack to die. | 1822 // will trip any stack check and leaving a flag to tell newstack to die. |
1790 g->stackguard0 = StackPreempt; | 1823 g->stackguard0 = StackPreempt; |
1791 g->throwsplit = 1; | 1824 g->throwsplit = 1; |
1792 | 1825 |
1793 // Leave SP around for GC and traceback. | 1826 // Leave SP around for GC and traceback. |
1794 » save(runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy)); | 1827 » save(&dummy); |
1795 g->syscallsp = g->sched.sp; | 1828 g->syscallsp = g->sched.sp; |
1796 g->syscallpc = g->sched.pc; | 1829 g->syscallpc = g->sched.pc; |
1797 runtime·casgstatus(g, Grunning, Gsyscall); | |
1798 if(g->syscallsp < g->stack.lo || g->stack.hi < g->syscallsp) { | |
1799 fn = entersyscall_bad; | |
1800 runtime·onM(&fn); | |
1801 } | |
1802 | 1830 |
1803 » if(runtime·atomicload(&runtime·sched.sysmonwait)) { // TODO: fast atomi c | 1831 » // entersyscallcommon does g->m->locks--; |
1804 » » fn = entersyscall_sysmon; | 1832 » entersyscallcommon(&dummy); |
1805 » » runtime·onM(&fn); | 1833 } |
1806 » » save(runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy)); | |
1807 » } | |
1808 | 1834 |
1809 » g->m->mcache = nil; | 1835 // Syscall re-entry used by cgo callbacks. Instead of saving the caller's SP |
1810 » g->m->p->m = nil; | 1836 // and PC, restores explicitly saved values. This is needed when exitsyscall |
1811 » runtime·atomicstore(&g->m->p->status, Psyscall); | 1837 // will be called from a function further up in the call stack than the parent, |
1812 » if(runtime·sched.gcwaiting) { | 1838 // as g->syscallsp must always point to a valid stack frame. |
1813 » » fn = entersyscall_gcwait; | 1839 #pragma textflag NOSPLIT |
1814 » » runtime·onM(&fn); | 1840 void |
1815 » » save(runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy)); | 1841 runtime·reentersyscall(uintptr syscallpc, uintptr syscallsp) |
1816 » } | 1842 { |
1843 » // Disable preemption because during this function g is in Gsyscall stat us, | |
1844 » // but can have inconsistent g->sched, do not let GC observe it. | |
1845 » g->m->locks++; | |
1817 | 1846 |
1818 » // Goroutines must not split stacks in Gsyscall status (it would corrupt g->sched). | 1847 » // Entersyscall must not call any function that might split/grow the sta ck. |
1819 » // We set stackguard to StackPreempt so that first split stack check cal ls morestack. | 1848 » // (See details in comment above.) |
1820 » // Morestack detects this case and throws. | 1849 » // Catch calls that might, by replacing the stack guard with something t hat |
1850 » // will trip any stack check and leaving a flag to tell newstack to die. | |
1821 g->stackguard0 = StackPreempt; | 1851 g->stackguard0 = StackPreempt; |
1822 » g->m->locks--; | 1852 » g->throwsplit = 1; |
1853 | |
1854 » // Leave SP around for GC and traceback. | |
1855 » // NOTE: save saves bad pc/sp here | |
1856 » // Reconsider whether we need save in entersyscall at all; | |
rsc
2014/09/16 17:24:05
I don't understand this "reconsider" comment. I do
dvyukov
2014/09/16 17:32:15
Why? What is broken with current code?
rsc
2014/09/16 17:46:25
As written save is writing things that are wrong (
marcan
2014/09/17 06:22:07
If we're always saving the same thing to g->syscal
| |
1857 » // if yes, then g->sched.pc/sp need to be set to syscallpc/syscallsp; | |
1858 » // otherwise, remove it. | |
1859 » save(&syscallpc); | |
1860 » g->syscallsp = syscallsp; | |
1861 » g->syscallpc = syscallpc; | |
1862 | |
1863 » // entersyscallcommon does g->m->locks--; | |
1864 » entersyscallcommon(&syscallpc); | |
1823 } | 1865 } |
1824 | 1866 |
1825 static void | 1867 static void |
1826 entersyscall_bad(void) | 1868 entersyscall_bad(void) |
1827 { | 1869 { |
1828 G *gp; | 1870 G *gp; |
1829 ········ | 1871 ········ |
1830 gp = g->m->curg; | 1872 gp = g->m->curg; |
1831 runtime·printf("entersyscall inconsistent %p [%p,%p]\n", | 1873 runtime·printf("entersyscall inconsistent %p [%p,%p]\n", |
1832 gp->syscallsp, gp->stack.lo, gp->stack.hi); | 1874 gp->syscallsp, gp->stack.lo, gp->stack.hi); |
(...skipping 29 matching lines...) Expand all Loading... | |
1862 void | 1904 void |
1863 ·entersyscallblock(int32 dummy) | 1905 ·entersyscallblock(int32 dummy) |
1864 { | 1906 { |
1865 void (*fn)(void); | 1907 void (*fn)(void); |
1866 | 1908 |
1867 g->m->locks++; // see comment in entersyscall | 1909 g->m->locks++; // see comment in entersyscall |
1868 g->throwsplit = 1; | 1910 g->throwsplit = 1; |
1869 g->stackguard0 = StackPreempt; // see comment in entersyscall | 1911 g->stackguard0 = StackPreempt; // see comment in entersyscall |
1870 | 1912 |
1871 // Leave SP around for GC and traceback. | 1913 // Leave SP around for GC and traceback. |
1872 » save(runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy)); | 1914 » save(&dummy); |
1873 g->syscallsp = g->sched.sp; | 1915 g->syscallsp = g->sched.sp; |
1874 g->syscallpc = g->sched.pc; | 1916 g->syscallpc = g->sched.pc; |
1875 runtime·casgstatus(g, Grunning, Gsyscall); | 1917 runtime·casgstatus(g, Grunning, Gsyscall); |
1876 if(g->syscallsp < g->stack.lo || g->stack.hi < g->syscallsp) { | 1918 if(g->syscallsp < g->stack.lo || g->stack.hi < g->syscallsp) { |
1877 fn = entersyscall_bad; | 1919 fn = entersyscall_bad; |
1878 runtime·onM(&fn); | 1920 runtime·onM(&fn); |
1879 } | 1921 } |
1880 ········ | 1922 ········ |
1881 fn = entersyscallblock_handoff; | 1923 fn = entersyscallblock_handoff; |
1882 runtime·onM(&fn); | 1924 runtime·onM(&fn); |
1883 | 1925 |
1884 // Resave for traceback during blocked call. | 1926 // Resave for traceback during blocked call. |
1885 » save(runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy)); | 1927 » save(&dummy); |
1886 | 1928 |
1887 g->m->locks--; | 1929 g->m->locks--; |
1888 } | 1930 } |
1889 | 1931 |
1890 static void | 1932 static void |
1891 entersyscallblock_handoff(void) | 1933 entersyscallblock_handoff(void) |
1892 { | 1934 { |
1893 handoffp(releasep()); | 1935 handoffp(releasep()); |
1894 } | 1936 } |
1895 | 1937 |
1896 // The goroutine g exited its system call. | 1938 // The goroutine g exited its system call. |
1897 // Arrange for it to run on a cpu again. | 1939 // Arrange for it to run on a cpu again. |
1898 // This is called only from the go syscall library, not | 1940 // This is called only from the go syscall library, not |
1899 // from the low-level system calls used by the runtime. | 1941 // from the low-level system calls used by the runtime. |
1900 #pragma textflag NOSPLIT | 1942 #pragma textflag NOSPLIT |
1901 void | 1943 void |
1902 runtime·exitsyscall(void) | 1944 ·exitsyscall(int32 dummy) |
1903 { | 1945 { |
1904 void (*fn)(G*); | 1946 void (*fn)(G*); |
1905 | 1947 |
1906 g->m->locks++; // see comment in entersyscall | 1948 g->m->locks++; // see comment in entersyscall |
1907 | 1949 |
1950 if(runtime·getcallersp(&dummy) > g->syscallsp) | |
1951 runtime·throw("exitsyscall: syscall frame is no longer valid"); | |
1952 | |
1908 g->waitsince = 0; | 1953 g->waitsince = 0; |
1909 if(exitsyscallfast()) { | 1954 if(exitsyscallfast()) { |
1910 // There's a cpu for us, so we can run. | 1955 // There's a cpu for us, so we can run. |
1911 g->m->p->syscalltick++; | 1956 g->m->p->syscalltick++; |
1912 // We need to cas the status and scan before resuming... | 1957 // We need to cas the status and scan before resuming... |
1913 runtime·casgstatus(g, Gsyscall, Grunning); | 1958 runtime·casgstatus(g, Gsyscall, Grunning); |
1914 | 1959 |
1915 // Garbage collector isn't running (since we are), | 1960 // Garbage collector isn't running (since we are), |
1916 // so okay to clear syscallsp. | 1961 // so okay to clear syscallsp. |
1917 g->syscallsp = (uintptr)nil; | 1962 g->syscallsp = (uintptr)nil; |
(...skipping 1613 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3531 p = mp->p->id; | 3576 p = mp->p->id; |
3532 FLUSH(&p); | 3577 FLUSH(&p); |
3533 } | 3578 } |
3534 | 3579 |
3535 #pragma textflag NOSPLIT | 3580 #pragma textflag NOSPLIT |
3536 void | 3581 void |
3537 sync·runtime_procUnpin() | 3582 sync·runtime_procUnpin() |
3538 { | 3583 { |
3539 g->m->locks--; | 3584 g->m->locks--; |
3540 } | 3585 } |
OLD | NEW |