Rietveld Code Review Tool
Help | Bug tracker | Discussion group | Source code | Sign in
(730)

Delta Between Two Patch Sets: src/pkg/runtime/mgc0.c

Issue 5279048: code review 5279048: runtime: faster and more scalable GC (Closed)
Left Patch Set: diff -r f624d7f32d22 https://go.googlecode.com/hg/ Created 13 years, 3 months ago
Right Patch Set: diff -r f44057cc01b2 https://go.googlecode.com/hg/ Created 12 years, 11 months ago
Left:
Right:
Use n/p to move between diff chunks; N/P to move between comments. Please Sign in to add in-line comments.
Jump to:
Left: Side by side diff | Download
Right: Side by side diff | Download
« no previous file with change/comment | « src/pkg/runtime/mcentral.c ('k') | src/pkg/runtime/mheap.c » ('j') | no next file with change/comment »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
LEFTRIGHT
1 // Copyright 2009 The Go Authors. All rights reserved. 1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style 2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file. 3 // license that can be found in the LICENSE file.
4 4
5 // Garbage collector. 5 // Garbage collector.
6 6
7 #include "runtime.h" 7 #include "runtime.h"
8 #include "arch.h" 8 #include "arch_GOARCH.h"
9 #include "malloc.h" 9 #include "malloc.h"
10 #include "stack.h" 10 #include "stack.h"
11 11
12 enum { 12 enum {
13 Debug = 0, 13 Debug = 0,
14 PtrSize = sizeof(void*), 14 PtrSize = sizeof(void*),
15 DebugMark = 0, // run second pass to check mark 15 DebugMark = 0, // run second pass to check mark
16 DataBlock = 8*1024, 16 DataBlock = 8*1024,
17 17
18 // Four bits per word (see #defines below). 18 // Four bits per word (see #defines below).
(...skipping 27 matching lines...) Expand all
46 // /* then test bits & bitAllocated, bits & bitMarked, etc. */ 46 // /* then test bits & bitAllocated, bits & bitMarked, etc. */
47 // 47 //
48 #define bitAllocated ((uintptr)1<<(bitShift*0)) 48 #define bitAllocated ((uintptr)1<<(bitShift*0))
49 #define bitNoPointers ((uintptr)1<<(bitShift*1)) /* when bitAlloc ated is set */ 49 #define bitNoPointers ((uintptr)1<<(bitShift*1)) /* when bitAlloc ated is set */
50 #define bitMarked ((uintptr)1<<(bitShift*2)) /* when bitAlloc ated is set */ 50 #define bitMarked ((uintptr)1<<(bitShift*2)) /* when bitAlloc ated is set */
51 #define bitSpecial ((uintptr)1<<(bitShift*3)) /* when bitAlloc ated is set - has finalizer or being profiled */ 51 #define bitSpecial ((uintptr)1<<(bitShift*3)) /* when bitAlloc ated is set - has finalizer or being profiled */
52 #define bitBlockBoundary ((uintptr)1<<(bitShift*1)) /* when bitAlloc ated is NOT set */ 52 #define bitBlockBoundary ((uintptr)1<<(bitShift*1)) /* when bitAlloc ated is NOT set */
53 53
54 #define bitMask (bitBlockBoundary | bitAllocated | bitMarked | bitSpecial) 54 #define bitMask (bitBlockBoundary | bitAllocated | bitMarked | bitSpecial)
55 55
56 // Holding worldsema grants an M the right to try to stop the world.
57 // The procedure is:
58 //
59 // runtime·semacquire(&runtime·worldsema);
60 // m->gcing = 1;
61 // runtime·stoptheworld();
62 //
63 // ... do stuff ...
64 //
65 // m->gcing = 0;
66 // runtime·semrelease(&runtime·worldsema);
67 // runtime·starttheworld();
68 //
69 uint32 runtime·worldsema = 1;
56 static int32 gctrace; 70 static int32 gctrace;
57 71
58 typedef struct Workbuf Workbuf; 72 typedef struct Workbuf Workbuf;
59 struct Workbuf 73 struct Workbuf
60 { 74 {
61 Workbuf *next; 75 Workbuf *next;
62 uintptr pushcnt; 76 uintptr pushcnt;
63 uintptr nobj; 77 uintptr nobj;
64 byte *obj[512-3]; 78 byte *obj[512-3];
65 }; 79 };
(...skipping 10 matching lines...) Expand all
76 struct FinBlock 90 struct FinBlock
77 { 91 {
78 FinBlock *alllink; 92 FinBlock *alllink;
79 FinBlock *next; 93 FinBlock *next;
80 int32 cnt; 94 int32 cnt;
81 int32 cap; 95 int32 cap;
82 Finalizer fin[1]; 96 Finalizer fin[1];
83 }; 97 };
84 98
85 extern byte data[]; 99 extern byte data[];
86 extern byte end[]; 100 extern byte etext[];
101 extern byte ebss[];
87 102
88 static G *fing; 103 static G *fing;
89 static FinBlock *finq; // list of finalizers that are to be executed 104 static FinBlock *finq; // list of finalizers that are to be executed
90 static FinBlock *finc; // cache of free blocks 105 static FinBlock *finc; // cache of free blocks
91 static FinBlock *allfin; // list of all blocks 106 static FinBlock *allfin; // list of all blocks
92 static Lock finlock; 107 static Lock finlock;
93 static int32 fingwait; 108 static int32 fingwait;
94 109
95 static void runfinq(void); 110 static void runfinq(void);
96 static Workbuf* getempty(Workbuf*); 111 static Workbuf* getempty(Workbuf*);
(...skipping 158 matching lines...) Expand 10 before | Expand all | Expand 10 after
255 if(((xbits>>j) & (bitAllocated|bitBlockBoundary) ) != 0) { 270 if(((xbits>>j) & (bitAllocated|bitBlockBoundary) ) != 0) {
256 obj = (byte*)obj - (shift-j)*PtrSize; 271 obj = (byte*)obj - (shift-j)*PtrSize;
257 shift = j; 272 shift = j;
258 bits = xbits>>shift; 273 bits = xbits>>shift;
259 goto found; 274 goto found;
260 } 275 }
261 } 276 }
262 277
263 // Otherwise consult span table to find beginning. 278 // Otherwise consult span table to find beginning.
264 // (Manually inlined copy of MHeap_LookupMaybe.) 279 // (Manually inlined copy of MHeap_LookupMaybe.)
265 m->gcstats.naddrlookup++;
266 k = (uintptr)obj>>PageShift; 280 k = (uintptr)obj>>PageShift;
267 x = k; 281 x = k;
268 if(sizeof(void*) == 8) 282 if(sizeof(void*) == 8)
269 x -= (uintptr)arena_start>>PageShift; 283 x -= (uintptr)arena_start>>PageShift;
270 s = runtime·mheap.map[x]; 284 s = runtime·mheap.map[x];
271 if(s == nil || k < s->start || k - s->start >= s->npages || s->state != MSpanInUse) 285 if(s == nil || k < s->start || k - s->start >= s->npages || s->state != MSpanInUse)
272 continue; 286 continue;
273 p = (byte*)((uintptr)s->start<<PageShift); 287 p = (byte*)((uintptr)s->start<<PageShift);
274 if(s->sizeclass == 0) { 288 if(s->sizeclass == 0) {
275 obj = p; 289 obj = p;
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after
346 // Emptied our buffer: refill. 360 // Emptied our buffer: refill.
347 wbuf = getfull(wbuf); 361 wbuf = getfull(wbuf);
348 if(wbuf == nil) 362 if(wbuf == nil)
349 return; 363 return;
350 nobj = wbuf->nobj; 364 nobj = wbuf->nobj;
351 wp = wbuf->obj + wbuf->nobj; 365 wp = wbuf->obj + wbuf->nobj;
352 } 366 }
353 b = *--wp; 367 b = *--wp;
354 nobj--; 368 nobj--;
355 369
356 » » // Figure out n = size of b. Start by loading bits for b. 370 » » // Ask span about size class.
357 » » off = (uintptr*)b - (uintptr*)arena_start;
358 » » bitp = (uintptr*)arena_start - off/wordsPerBitmapWord - 1;
359 » » shift = off % wordsPerBitmapWord;
360 » » xbits = *bitp;
361 » » bits = xbits >> shift;
362
363 » » // Might be small; look for nearby block boundary.
364 » » // A block boundary is marked by either bitBlockBoundary
365 » » // or bitAllocated being set (see notes near their definition).
366 » » enum {
367 » » » boundary = bitBlockBoundary|bitAllocated
368 » » };
369 » » // Look for a block boundary both after and before b
370 » » // in the same bitmap word.
371 » » //
372 » » // A block boundary j words after b is indicated by
373 » » //» bits>>j & boundary
374 » » // assuming shift+j < bitShift. (If shift+j >= bitShift then
375 » » // we'll be bleeding other bit types like bitMarked into our tes t.)
376 » » // Instead of inserting the conditional shift+j < bitShift into the loop,
377 » » // we can let j range from 1 to bitShift as long as we first
378 » » // apply a mask to keep only the bits corresponding
379 » » // to shift+j < bitShift aka j < bitShift-shift.
380 » » bits &= (boundary<<(bitShift-shift)) - boundary;
381
382 » » // A block boundary j words before b is indicated by
383 » » //» xbits>>(shift-j) & boundary
384 » » // (assuming shift >= j). There is no cleverness here
385 » » // avoid the test, because when j gets too large the shift
386 » » // turns negative, which is undefined in C.
387
388 » » for(j=1; j<bitShift; j++) {
389 » » » if(((bits>>j)&boundary) != 0 || shift>=j && ((xbits>>(sh ift-j))&boundary) != 0) {
390 » » » » n = j*PtrSize;
391 » » » » goto scan;
392 » » » }
393 » » }
394
395 » » // Fall back to asking span about size class.
396 // (Manually inlined copy of MHeap_Lookup.) 371 // (Manually inlined copy of MHeap_Lookup.)
397 m->gcstats.nsizelookup++;
398 x = (uintptr)b>>PageShift; 372 x = (uintptr)b>>PageShift;
399 if(sizeof(void*) == 8) 373 if(sizeof(void*) == 8)
400 x -= (uintptr)arena_start>>PageShift; 374 x -= (uintptr)arena_start>>PageShift;
401 s = runtime·mheap.map[x]; 375 s = runtime·mheap.map[x];
402 if(s->sizeclass == 0) 376 if(s->sizeclass == 0)
403 n = s->npages<<PageShift; 377 n = s->npages<<PageShift;
404 else 378 else
405 n = runtime·class_to_size[s->sizeclass]; 379 n = runtime·class_to_size[s->sizeclass];
406 scan:;
407 } 380 }
408 } 381 }
409 382
410 // debug_scanblock is the debug copy of scanblock. 383 // debug_scanblock is the debug copy of scanblock.
411 // it is simpler, slower, single-threaded, recursive, 384 // it is simpler, slower, single-threaded, recursive,
412 // and uses bitSpecial as the mark bit. 385 // and uses bitSpecial as the mark bit.
413 static void 386 static void
414 debug_scanblock(byte *b, int64 n) 387 debug_scanblock(byte *b, int64 n)
415 { 388 {
416 byte *obj, *p; 389 byte *obj, *p;
(...skipping 244 matching lines...) Expand 10 before | Expand all | Expand 10 after
661 634
662 // do not mark the finalizer block itself. just mark the things it poin ts at. 635 // do not mark the finalizer block itself. just mark the things it poin ts at.
663 addroot(v, size); 636 addroot(v, size);
664 } 637 }
665 638
666 static void 639 static void
667 addroots(void) 640 addroots(void)
668 { 641 {
669 G *gp; 642 G *gp;
670 FinBlock *fb; 643 FinBlock *fb;
671 » byte *p, *e; 644 » byte *p;
672 645
673 work.nroot = 0; 646 work.nroot = 0;
674 647
675 » e = (byte*)&runtime·mheap; 648 » // mark data+bss.
676 » for(p=data; p<e; p+=DataBlock) 649 » for(p=data; p<ebss; p+=DataBlock)
677 » » addroot(p, p+DataBlock<e?DataBlock:e-p); 650 » » addroot(p, p+DataBlock<ebss?DataBlock:ebss-p);
678
679 » e = end;
680 » for(p=(byte*)(&runtime·mheap+1); p<e; p+=DataBlock)
681 » » addroot(p, p+DataBlock<e?DataBlock:e-p);
682 651
683 runtime·walkfintab(addfinroots); 652 runtime·walkfintab(addfinroots);
684 653
685 for(fb=allfin; fb; fb=fb->alllink) 654 for(fb=allfin; fb; fb=fb->alllink)
686 addroot((byte*)fb->fin, fb->cnt*sizeof(fb->fin[0])); 655 addroot((byte*)fb->fin, fb->cnt*sizeof(fb->fin[0]));
687 656
688 for(gp=runtime·allg; gp!=nil; gp=gp->alllink) { 657 for(gp=runtime·allg; gp!=nil; gp=gp->alllink) {
689 switch(gp->status){ 658 switch(gp->status){
690 default: 659 default:
691 runtime·printf("unexpected G.status %d\n", gp->s tatus); 660 runtime·printf("unexpected G.status %d\n", gp->s tatus);
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
752 uintptr size; 721 uintptr size;
753 byte *p; 722 byte *p;
754 MCache *c; 723 MCache *c;
755 byte *arena_start; 724 byte *arena_start;
756 MLink *start, *end; 725 MLink *start, *end;
757 int32 nfree; 726 int32 nfree;
758 727
759 USED(desc); 728 USED(desc);
760 729
761 s = runtime·mheap.allspans[spanidx]; 730 s = runtime·mheap.allspans[spanidx];
731 // Stamp newly unused spans. The scavenger will use that
732 // info to potentially give back some pages to the OS.
733 if(s->state == MSpanFree && s->unusedsince == 0)
734 s->unusedsince = runtime·nanotime();
762 if(s->state != MSpanInUse) 735 if(s->state != MSpanInUse)
763 return; 736 return;
764 737
765 arena_start = runtime·mheap.arena_start; 738 arena_start = runtime·mheap.arena_start;
766 739
767 p = (byte*)(s->start << PageShift); 740 p = (byte*)(s->start << PageShift);
768 cl = s->sizeclass; 741 cl = s->sizeclass;
769 if(cl == 0) { 742 if(cl == 0) {
770 size = s->npages<<PageShift; 743 size = s->npages<<PageShift;
771 n = 1; 744 n = 1;
(...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after
865 runtime·usleep(10); 838 runtime·usleep(10);
866 } 839 }
867 840
868 // parallel sweep for over all spans 841 // parallel sweep for over all spans
869 parfor(&work.sweepfor); 842 parfor(&work.sweepfor);
870 843
871 if(runtime·xadd(&work.ndone, +1) == work.nproc-1) 844 if(runtime·xadd(&work.ndone, +1) == work.nproc-1)
872 runtime·notewakeup(&work.alldone); 845 runtime·notewakeup(&work.alldone);
873 } 846 }
874 847
875 // Semaphore, not Lock, so that the goroutine
876 // reschedules when there is contention rather
877 // than spinning.
878 static uint32 gcsema = 1;
879
880 // Initialized from $GOGC. GOGC=off means no gc. 848 // Initialized from $GOGC. GOGC=off means no gc.
881 // 849 //
882 // Next gc is after we've allocated an extra amount of 850 // Next gc is after we've allocated an extra amount of
883 // memory proportional to the amount already in use. 851 // memory proportional to the amount already in use.
884 // If gcpercent=100 and we're using 4M, we'll gc again 852 // If gcpercent=100 and we're using 4M, we'll gc again
885 // when we get to 8M. This keeps the gc cost in linear 853 // when we get to 8M. This keeps the gc cost in linear
886 // proportion to the allocation cost. Adjusting gcpercent 854 // proportion to the allocation cost. Adjusting gcpercent
887 // just changes the linear constant (and also the amount of 855 // just changes the linear constant (and also the amount of
888 // extra memory used). 856 // extra memory used).
889 static int32 gcpercent = -2; 857 static int32 gcpercent = -2;
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after
963 else 931 else
964 gcpercent = runtime·atoi(p); 932 gcpercent = runtime·atoi(p);
965 933
966 p = runtime·getenv("GOGCTRACE"); 934 p = runtime·getenv("GOGCTRACE");
967 if(p != nil) 935 if(p != nil)
968 gctrace = runtime·atoi(p); 936 gctrace = runtime·atoi(p);
969 } 937 }
970 if(gcpercent < 0) 938 if(gcpercent < 0)
971 return; 939 return;
972 940
973 » runtime·semacquire(&gcsema); 941 » runtime·semacquire(&runtime·worldsema);
974 if(!force && mstats.heap_alloc < mstats.next_gc) { 942 if(!force && mstats.heap_alloc < mstats.next_gc) {
975 » » runtime·semrelease(&gcsema); 943 » » runtime·semrelease(&runtime·worldsema);
976 return; 944 return;
977 } 945 }
978 946
979 t0 = runtime·nanotime(); 947 t0 = runtime·nanotime();
980 948
981 m->gcing = 1; 949 m->gcing = 1;
982 runtime·stoptheworld(); 950 runtime·stoptheworld();
983 951
984 heap0 = 0; 952 heap0 = 0;
985 obj0 = 0; 953 obj0 = 0;
986 if(gctrace) { 954 if(gctrace) {
987 cachestats(0); 955 cachestats(0);
988 heap0 = mstats.heap_alloc; 956 heap0 = mstats.heap_alloc;
989 obj0 = mstats.nmalloc - mstats.nfree; 957 obj0 = mstats.nmalloc - mstats.nfree;
990 } 958 }
991 959
992 work.nproc = runtime·gcprocs(); 960 work.nproc = runtime·gcprocs();
993 work.nwait = 0; 961 work.nwait = 0;
994 work.ndone = 0; 962 work.ndone = 0;
995 work.debugmarkdone = 0; 963 work.debugmarkdone = 0;
996 addroots(); 964 addroots();
997 parforsetup(&work.markfor, markroot, work.nproc, work.nroot, nil, false) ; 965 parforsetup(&work.markfor, markroot, work.nproc, work.nroot, nil, false) ;
998 parforsetup(&work.sweepfor, sweepspan, work.nproc, runtime·mheap.nspan, nil, true); 966 parforsetup(&work.sweepfor, sweepspan, work.nproc, runtime·mheap.nspan, nil, true);
999 if(work.nproc > 1) { 967 if(work.nproc > 1) {
1000 runtime·noteclear(&work.alldone); 968 runtime·noteclear(&work.alldone);
1001 » » runtime·helpgc(); 969 » » runtime·helpgc(work.nproc);
1002 } 970 }
1003 971
1004 parfor(&work.markfor); 972 parfor(&work.markfor);
1005 scanblock(nil, 0); 973 scanblock(nil, 0);
1006 974
1007 if(DebugMark) { 975 if(DebugMark) {
1008 for(i=0; i<work.nroot; i++) 976 for(i=0; i<work.nroot; i++)
1009 debug_scanblock(work.roots[i].p, work.roots[i].n); 977 debug_scanblock(work.roots[i].p, work.roots[i].n);
1010 runtime·xchg(&work.debugmarkdone, 1); 978 runtime·xchg(&work.debugmarkdone, 1);
1011 } 979 }
(...skipping 20 matching lines...) Expand all
1032 m->locks--; 1000 m->locks--;
1033 } 1001 }
1034 ········ 1002 ········
1035 if(work.nproc > 1) 1003 if(work.nproc > 1)
1036 runtime·notesleep(&work.alldone); 1004 runtime·notesleep(&work.alldone);
1037 1005
1038 heap1 = mstats.heap_alloc; 1006 heap1 = mstats.heap_alloc;
1039 obj1 = mstats.nmalloc - mstats.nfree; 1007 obj1 = mstats.nmalloc - mstats.nfree;
1040 1008
1041 t3 = runtime·nanotime(); 1009 t3 = runtime·nanotime();
1010 mstats.last_gc = t3;
1042 mstats.pause_ns[mstats.numgc%nelem(mstats.pause_ns)] = t3 - t0; 1011 mstats.pause_ns[mstats.numgc%nelem(mstats.pause_ns)] = t3 - t0;
1043 mstats.pause_total_ns += t3 - t0; 1012 mstats.pause_total_ns += t3 - t0;
1044 mstats.numgc++; 1013 mstats.numgc++;
1045 if(mstats.debuggc) 1014 if(mstats.debuggc)
1046 runtime·printf("pause %D\n", t3-t0); 1015 runtime·printf("pause %D\n", t3-t0);
1047 1016
1048 if(gctrace) { 1017 if(gctrace) {
1049 » » runtime·printf("gc%d(%d): %D+%D+%D ms %D -> %D MB %D -> %D (%D-% D) objects %D pointer lookups (%D size, %D addr), %D(%D) steals, %D(%D) handoffs , %D/%D/%D yields\n", 1018 » » runtime·printf("gc%d(%d): %D+%D+%D ms %D -> %D MB %D -> %D (%D-% D) objects, %D(%D) steals, %D(%D) handoffs, %D/%D/%D yields\n",
1050 mstats.numgc, work.nproc, (t1-t0)/1000000, (t2-t1)/10000 00, (t3-t2)/1000000, 1019 mstats.numgc, work.nproc, (t1-t0)/1000000, (t2-t1)/10000 00, (t3-t2)/1000000,
1051 heap0>>20, heap1>>20, obj0, obj1, 1020 heap0>>20, heap1>>20, obj0, obj1,
1052 mstats.nmalloc, mstats.nfree, 1021 mstats.nmalloc, mstats.nfree,
1053 stats.nsizelookup+stats.naddrlookup, stats.nsizelookup, stats.naddrlookup,
1054 stats.nsteal, stats.nstealcnt, 1022 stats.nsteal, stats.nstealcnt,
1055 stats.nhandoff, stats.nhandoffcnt, 1023 stats.nhandoff, stats.nhandoffcnt,
1056 stats.nprocyield, stats.nosyield, stats.nsleep); 1024 stats.nprocyield, stats.nosyield, stats.nsleep);
1057 } 1025 }
1058 1026 »·······
1059 » runtime·semrelease(&gcsema); 1027 » runtime·MProf_GC();
1028 » runtime·semrelease(&runtime·worldsema);
1060 runtime·starttheworld(); 1029 runtime·starttheworld();
1061 1030
1062 // give the queued finalizers, if any, a chance to run 1031 // give the queued finalizers, if any, a chance to run
1063 if(finq != nil) 1032 if(finq != nil)
1064 runtime·gosched(); 1033 runtime·gosched();
1065 1034
1066 if(gctrace > 1 && !force) 1035 if(gctrace > 1 && !force)
1067 runtime·gc(1); 1036 runtime·gc(1);
1068 } 1037 }
1069 1038
1070 void 1039 void
1071 runtime·UpdateMemStats(void) 1040 runtime·ReadMemStats(MStats *stats)
1072 { 1041 {
1073 » // Have to acquire gcsema to stop the world, 1042 » // Have to acquire worldsema to stop the world,
1074 // because stoptheworld can only be used by 1043 // because stoptheworld can only be used by
1075 // one goroutine at a time, and there might be 1044 // one goroutine at a time, and there might be
1076 // a pending garbage collection already calling it. 1045 // a pending garbage collection already calling it.
1077 » runtime·semacquire(&gcsema); 1046 » runtime·semacquire(&runtime·worldsema);
1078 m->gcing = 1; 1047 m->gcing = 1;
1079 runtime·stoptheworld(); 1048 runtime·stoptheworld();
1080 cachestats(0); 1049 cachestats(0);
1050 *stats = mstats;
1081 m->gcing = 0; 1051 m->gcing = 0;
1082 » runtime·semrelease(&gcsema); 1052 » runtime·semrelease(&runtime·worldsema);
1083 runtime·starttheworld(); 1053 runtime·starttheworld();
1084 } 1054 }
1085 1055
1086 static void 1056 static void
1087 runfinq(void) 1057 runfinq(void)
1088 { 1058 {
1089 Finalizer *f; 1059 Finalizer *f;
1090 FinBlock *fb, *next; 1060 FinBlock *fb, *next;
1091 byte *frame; 1061 byte *frame;
1092 uint32 framesz, framecap, i; 1062 uint32 framesz, framecap, i;
(...skipping 682 matching lines...) Expand 10 before | Expand all | Expand 10 after
1775 runtime·newproc1((byte*)testParforProc, (byte*)&arg, sizeof(arg) , 0, runtime·CTestParforParallel); 1745 runtime·newproc1((byte*)testParforProc, (byte*)&arg, sizeof(arg) , 0, runtime·CTestParforParallel);
1776 m->locks--; 1746 m->locks--;
1777 parfor(&desc); 1747 parfor(&desc);
1778 for(i=0; i<N; i++) { 1748 for(i=0; i<N; i++) {
1779 if(data[i] != i*i) 1749 if(data[i] != i*i)
1780 runtime·panicstring("incorrect result"); 1750 runtime·panicstring("incorrect result");
1781 } 1751 }
1782 runtime·free(data); 1752 runtime·free(data);
1783 runtime·gomaxprocsfunc(procs); 1753 runtime·gomaxprocsfunc(procs);
1784 } 1754 }
LEFTRIGHT

Powered by Google App Engine
RSS Feeds Recent Issues | This issue
This is Rietveld f62528b