Rietveld Code Review Tool
Help | Bug tracker | Discussion group | Source code | Sign in
(387)

Delta Between Two Patch Sets: src/pkg/runtime/malloc.goc

Issue 6997052: code review 6997052: runtime: less aggressive per-thread stack segment caching (Closed)
Left Patch Set: diff -r f4e5087c1c19 https://dvyukov%40google.com@code.google.com/p/go/ Created 11 years, 3 months ago
Right Patch Set: diff -r 019884311591 https://dvyukov%40google.com@code.google.com/p/go/ Created 11 years, 3 months ago
Left:
Right:
Use n/p to move between diff chunks; N/P to move between comments. Please Sign in to add in-line comments.
Jump to:
Left: Side by side diff | Download
Right: Side by side diff | Download
« no previous file with change/comment | « src/pkg/runtime/malloc.h ('k') | src/pkg/runtime/mfixalloc.c » ('j') | no next file with change/comment »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
LEFTRIGHT
1 // Copyright 2009 The Go Authors. All rights reserved. 1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style 2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file. 3 // license that can be found in the LICENSE file.
4 4
5 // See malloc.h for overview. 5 // See malloc.h for overview.
6 // 6 //
7 // TODO(rsc): double-check stats. 7 // TODO(rsc): double-check stats.
8 8
9 package runtime 9 package runtime
10 #include "runtime.h" 10 #include "runtime.h"
(...skipping 733 matching lines...) Expand 10 before | Expand all | Expand 10 after
744 runtime·settype(ret, (uintptr)typ | TypeInfo_SingleObjec t); 744 runtime·settype(ret, (uintptr)typ | TypeInfo_SingleObjec t);
745 } 745 }
746 } 746 }
747 747
748 return ret; 748 return ret;
749 } 749 }
750 750
751 static void* stackcache; 751 static void* stackcache;
752 static Lock stackcachemu; 752 static Lock stackcachemu;
753 753
754 // stackrefill/stackrelease implement global cache of stack segments.
755 // The cache is required to prevent unlimited growth of per-thread caches.
756 static void
757 stackrefill(FixAlloc *a)
758 {
759 void *v;
760 int32 i;
761
762 runtime·lock(&stackcachemu);
763 // If the global cache is empty, get a new chunk from SysAlloc.
764 if(stackcache == nil) {
765 v = runtime·SysAlloc(FixAllocChunk);
766 if(v == nil)
767 runtime·throw("out of memory (stackrefill)");
768 a->sys += FixAllocChunk;
769 for(i = 0; i < FixAllocChunk / FixedStack; i++, v = (byte*)v + F ixedStack) {
770 *(void**)v = stackcache;
771 stackcache = v;
772 }
773 }
774 // Transfer up to LWM segments from the global cache to the local cache.
775 while(stackcache != nil && a->cached < StackPerThreadLWM) {
776 v = stackcache;
777 stackcache = *(void**)v;
778 runtime·FixAlloc_Free(a, v);
779 }
780 runtime·unlock(&stackcachemu);
781 }
782
783 static void
784 stackrelease(FixAlloc *a)
785 {
786 void *v;
787
788 runtime·lock(&stackcachemu);
789 // Release HWM-LWM segments from the local cache to the global cache.
790 while(a->cached >= StackPerThreadLWM) {
791 v = runtime·FixAlloc_Alloc(a);
792 *(void**)v = stackcache;
793 stackcache = v;
794 }
795 runtime·unlock(&stackcachemu);
796 }
797
754 void* 798 void*
755 runtime·stackalloc(uint32 n) 799 runtime·stackalloc(uint32 n)
756 { 800 {
757 // Stackalloc must be called on scheduler stack, so that we 801 // Stackalloc must be called on scheduler stack, so that we
758 // never try to grow the stack during the code that stackalloc runs. 802 // never try to grow the stack during the code that stackalloc runs.
759 // Doing so would cause a deadlock (issue 1547). 803 // Doing so would cause a deadlock (issue 1547).
760 if(g != m->g0) 804 if(g != m->g0)
761 runtime·throw("stackalloc not on scheduler stack"); 805 runtime·throw("stackalloc not on scheduler stack");
762 806
763 // Stack allocator uses malloc/free most of the time, 807 // Stack allocator uses malloc/free most of the time,
764 // but if we're in the middle of malloc and need stack, 808 // but if we're in the middle of malloc and need stack,
765 // we have to do something else to avoid deadlock. 809 // we have to do something else to avoid deadlock.
766 // In that case, we fall back on a fixed-size free-list 810 // In that case, we fall back on a fixed-size free-list
767 // allocator, assuming that inside malloc all the stack 811 // allocator, assuming that inside malloc all the stack
768 // frames are small, so that all the stack allocations 812 // frames are small, so that all the stack allocations
769 // will be a single size, the minimum (right now, 5k). 813 // will be a single size, the minimum (right now, 5k).
770 if(m->mallocing || m->gcing || n == FixedStack) { 814 if(m->mallocing || m->gcing || n == FixedStack) {
771 if(n != FixedStack) { 815 if(n != FixedStack) {
772 runtime·printf("stackalloc: in malloc, size=%d want %d", FixedStack, n); 816 runtime·printf("stackalloc: in malloc, size=%d want %d", FixedStack, n);
773 runtime·throw("stackalloc"); 817 runtime·throw("stackalloc");
774 } 818 }
775 » » if(m->stackalloc->cached == 0 && stackcache != nil) { 819 » » if(m->stackalloc->cached == 0)
776 » » » runtime·lock(&stackcachemu); 820 » » » stackrefill(m->stackalloc);
777 » » » if (stackcache != nil) {
778 » » » » void *v = stackcache;
779 » » » » stackcache = *(void**)v;
780 » » » » runtime·unlock(&stackcachemu);
781 » » » » return v;
782 » » » }
783 » » » runtime·unlock(&stackcachemu);
784 » » }
785 return runtime·FixAlloc_Alloc(m->stackalloc); 821 return runtime·FixAlloc_Alloc(m->stackalloc);
786 } 822 }
787 return runtime·mallocgc(n, FlagNoProfiling|FlagNoGC, 0, 0); 823 return runtime·mallocgc(n, FlagNoProfiling|FlagNoGC, 0, 0);
788 } 824 }
789 825
790 void 826 void
791 runtime·stackfree(void *v, uintptr n) 827 runtime·stackfree(void *v, uintptr n)
792 { 828 {
793 if(m->mallocing || m->gcing || n == FixedStack) { 829 if(m->mallocing || m->gcing || n == FixedStack) {
794 » » if(m->stackalloc->cached > 8) { 830 » » if(m->stackalloc->cached >= StackPerThreadHWM)
795 » » » runtime·lock(&stackcachemu); 831 » » » stackrelease(m->stackalloc);
796 » » » *(void**)v = stackcache;
797 » » » stackcache = v;
798 » » » runtime·unlock(&stackcachemu);
799 » » » return;
800 » » }
801 runtime·FixAlloc_Free(m->stackalloc, v); 832 runtime·FixAlloc_Free(m->stackalloc, v);
802 return; 833 return;
803 } 834 }
804 runtime·free(v); 835 runtime·free(v);
805 } 836 }
806 837
807 func GC() { 838 func GC() {
808 runtime·gc(1); 839 runtime·gc(1);
809 } 840 }
810 841
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
849 runtime·printf("runtime.SetFinalizer: finalizer already set\n"); 880 runtime·printf("runtime.SetFinalizer: finalizer already set\n");
850 goto throw; 881 goto throw;
851 } 882 }
852 return; 883 return;
853 884
854 badfunc: 885 badfunc:
855 runtime·printf("runtime.SetFinalizer: second argument is %S, not func(%S )\n", *finalizer.type->string, *obj.type->string); 886 runtime·printf("runtime.SetFinalizer: second argument is %S, not func(%S )\n", *finalizer.type->string, *obj.type->string);
856 throw: 887 throw:
857 runtime·throw("runtime.SetFinalizer"); 888 runtime·throw("runtime.SetFinalizer");
858 } 889 }
LEFTRIGHT

Powered by Google App Engine
RSS Feeds Recent Issues | This issue
This is Rietveld f62528b