Rietveld Code Review Tool
Help | Bug tracker | Discussion group | Source code | Sign in
(6)

Delta Between Two Patch Sets: src/pkg/runtime/mgc0.c

Issue 5279048: code review 5279048: runtime: faster and more scalable GC (Closed)
Left Patch Set: diff -r fd80a4497037 https://go.googlecode.com/hg/ Created 13 years, 3 months ago
Right Patch Set: diff -r f44057cc01b2 https://go.googlecode.com/hg/ Created 12 years, 11 months ago
Left:
Right:
Use n/p to move between diff chunks; N/P to move between comments. Please Sign in to add in-line comments.
Jump to:
Left: Side by side diff | Download
Right: Side by side diff | Download
« no previous file with change/comment | « src/pkg/runtime/mcentral.c ('k') | src/pkg/runtime/mheap.c » ('j') | no next file with change/comment »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
LEFTRIGHT
1 // Copyright 2009 The Go Authors. All rights reserved. 1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style 2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file. 3 // license that can be found in the LICENSE file.
4 4
5 // Garbage collector. 5 // Garbage collector.
6 6
7 #include "runtime.h" 7 #include "runtime.h"
8 #include "arch.h" 8 #include "arch_GOARCH.h"
9 #include "malloc.h" 9 #include "malloc.h"
10 #include "stack.h" 10 #include "stack.h"
11 11
12 enum { 12 enum {
13 Debug = 0, 13 Debug = 0,
14 PtrSize = sizeof(void*), 14 PtrSize = sizeof(void*),
15 DebugMark = 0, // run second pass to check mark 15 DebugMark = 0, // run second pass to check mark
16 DataBlock = 8*1024, 16 DataBlock = 8*1024,
17 17
18 // Four bits per word (see #defines below). 18 // Four bits per word (see #defines below).
(...skipping 27 matching lines...) Expand all
46 // /* then test bits & bitAllocated, bits & bitMarked, etc. */ 46 // /* then test bits & bitAllocated, bits & bitMarked, etc. */
47 // 47 //
48 #define bitAllocated ((uintptr)1<<(bitShift*0)) 48 #define bitAllocated ((uintptr)1<<(bitShift*0))
49 #define bitNoPointers ((uintptr)1<<(bitShift*1)) /* when bitAlloc ated is set */ 49 #define bitNoPointers ((uintptr)1<<(bitShift*1)) /* when bitAlloc ated is set */
50 #define bitMarked ((uintptr)1<<(bitShift*2)) /* when bitAlloc ated is set */ 50 #define bitMarked ((uintptr)1<<(bitShift*2)) /* when bitAlloc ated is set */
51 #define bitSpecial ((uintptr)1<<(bitShift*3)) /* when bitAlloc ated is set - has finalizer or being profiled */ 51 #define bitSpecial ((uintptr)1<<(bitShift*3)) /* when bitAlloc ated is set - has finalizer or being profiled */
52 #define bitBlockBoundary ((uintptr)1<<(bitShift*1)) /* when bitAlloc ated is NOT set */ 52 #define bitBlockBoundary ((uintptr)1<<(bitShift*1)) /* when bitAlloc ated is NOT set */
53 53
54 #define bitMask (bitBlockBoundary | bitAllocated | bitMarked | bitSpecial) 54 #define bitMask (bitBlockBoundary | bitAllocated | bitMarked | bitSpecial)
55 55
56 // Holding worldsema grants an M the right to try to stop the world.
57 // The procedure is:
58 //
59 // runtime·semacquire(&runtime·worldsema);
60 // m->gcing = 1;
61 // runtime·stoptheworld();
62 //
63 // ... do stuff ...
64 //
65 // m->gcing = 0;
66 // runtime·semrelease(&runtime·worldsema);
67 // runtime·starttheworld();
68 //
69 uint32 runtime·worldsema = 1;
56 static int32 gctrace; 70 static int32 gctrace;
57 71
58 typedef struct Workbuf Workbuf; 72 typedef struct Workbuf Workbuf;
59 struct Workbuf 73 struct Workbuf
60 { 74 {
61 Workbuf *next; 75 Workbuf *next;
62 uintptr pushcnt; 76 uintptr pushcnt;
63 uintptr nobj; 77 uintptr nobj;
64 byte *obj[512-3]; 78 byte *obj[512-3];
65 }; 79 };
(...skipping 10 matching lines...) Expand all
76 struct FinBlock 90 struct FinBlock
77 { 91 {
78 FinBlock *alllink; 92 FinBlock *alllink;
79 FinBlock *next; 93 FinBlock *next;
80 int32 cnt; 94 int32 cnt;
81 int32 cap; 95 int32 cap;
82 Finalizer fin[1]; 96 Finalizer fin[1];
83 }; 97 };
84 98
85 extern byte data[]; 99 extern byte data[];
86 extern byte end[]; 100 extern byte etext[];
101 extern byte ebss[];
87 102
88 static G *fing; 103 static G *fing;
89 static FinBlock *finq; // list of finalizers that are to be executed 104 static FinBlock *finq; // list of finalizers that are to be executed
90 static FinBlock *finc; // cache of free blocks 105 static FinBlock *finc; // cache of free blocks
91 static FinBlock *allfin; // list of all blocks 106 static FinBlock *allfin; // list of all blocks
92 static Lock finlock; 107 static Lock finlock;
93 static int32 fingwait; 108 static int32 fingwait;
94 109
95 static void runfinq(void); 110 static void runfinq(void);
96 static Workbuf* getempty(Workbuf*); 111 static Workbuf* getempty(Workbuf*);
(...skipping 158 matching lines...) Expand 10 before | Expand all | Expand 10 after
255 if(((xbits>>j) & (bitAllocated|bitBlockBoundary) ) != 0) { 270 if(((xbits>>j) & (bitAllocated|bitBlockBoundary) ) != 0) {
256 obj = (byte*)obj - (shift-j)*PtrSize; 271 obj = (byte*)obj - (shift-j)*PtrSize;
257 shift = j; 272 shift = j;
258 bits = xbits>>shift; 273 bits = xbits>>shift;
259 goto found; 274 goto found;
260 } 275 }
261 } 276 }
262 277
263 // Otherwise consult span table to find beginning. 278 // Otherwise consult span table to find beginning.
264 // (Manually inlined copy of MHeap_LookupMaybe.) 279 // (Manually inlined copy of MHeap_LookupMaybe.)
265 m->gcstats.naddrlookup++;
266 k = (uintptr)obj>>PageShift; 280 k = (uintptr)obj>>PageShift;
267 x = k; 281 x = k;
268 if(sizeof(void*) == 8) 282 if(sizeof(void*) == 8)
269 x -= (uintptr)arena_start>>PageShift; 283 x -= (uintptr)arena_start>>PageShift;
270 s = runtime·mheap.map[x]; 284 s = runtime·mheap.map[x];
271 if(s == nil || k < s->start || k - s->start >= s->npages || s->state != MSpanInUse) 285 if(s == nil || k < s->start || k - s->start >= s->npages || s->state != MSpanInUse)
272 continue; 286 continue;
273 p = (byte*)((uintptr)s->start<<PageShift); 287 p = (byte*)((uintptr)s->start<<PageShift);
274 if(s->sizeclass == 0) { 288 if(s->sizeclass == 0) {
275 obj = p; 289 obj = p;
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after
346 // Emptied our buffer: refill. 360 // Emptied our buffer: refill.
347 wbuf = getfull(wbuf); 361 wbuf = getfull(wbuf);
348 if(wbuf == nil) 362 if(wbuf == nil)
349 return; 363 return;
350 nobj = wbuf->nobj; 364 nobj = wbuf->nobj;
351 wp = wbuf->obj + wbuf->nobj; 365 wp = wbuf->obj + wbuf->nobj;
352 } 366 }
353 b = *--wp; 367 b = *--wp;
354 nobj--; 368 nobj--;
355 369
356 » » // Figure out n = size of b. Start by loading bits for b. 370 » » // Ask span about size class.
357 » » off = (uintptr*)b - (uintptr*)arena_start;
358 » » bitp = (uintptr*)arena_start - off/wordsPerBitmapWord - 1;
359 » » shift = off % wordsPerBitmapWord;
360 » » xbits = *bitp;
361 » » bits = xbits >> shift;
362
363 » » // Might be small; look for nearby block boundary.
364 » » // A block boundary is marked by either bitBlockBoundary
365 » » // or bitAllocated being set (see notes near their definition).
366 » » enum {
367 » » » boundary = bitBlockBoundary|bitAllocated
368 » » };
369 » » // Look for a block boundary both after and before b
370 » » // in the same bitmap word.
371 » » //
372 » » // A block boundary j words after b is indicated by
373 » » //» bits>>j & boundary
374 » » // assuming shift+j < bitShift. (If shift+j >= bitShift then
375 » » // we'll be bleeding other bit types like bitMarked into our tes t.)
376 » » // Instead of inserting the conditional shift+j < bitShift into the loop,
377 » » // we can let j range from 1 to bitShift as long as we first
378 » » // apply a mask to keep only the bits corresponding
379 » » // to shift+j < bitShift aka j < bitShift-shift.
380 » » bits &= (boundary<<(bitShift-shift)) - boundary;
381
382 » » // A block boundary j words before b is indicated by
383 » » //» xbits>>(shift-j) & boundary
384 » » // (assuming shift >= j). There is no cleverness here
385 » » // avoid the test, because when j gets too large the shift
386 » » // turns negative, which is undefined in C.
387
388 » » for(j=1; j<bitShift; j++) {
389 » » » if(((bits>>j)&boundary) != 0 || shift>=j && ((xbits>>(sh ift-j))&boundary) != 0) {
390 » » » » n = j*PtrSize;
391 » » » » goto scan;
392 » » » }
393 » » }
394
395 » » // Fall back to asking span about size class.
396 // (Manually inlined copy of MHeap_Lookup.) 371 // (Manually inlined copy of MHeap_Lookup.)
397 m->gcstats.nsizelookup++;
398 x = (uintptr)b>>PageShift; 372 x = (uintptr)b>>PageShift;
399 if(sizeof(void*) == 8) 373 if(sizeof(void*) == 8)
400 x -= (uintptr)arena_start>>PageShift; 374 x -= (uintptr)arena_start>>PageShift;
401 s = runtime·mheap.map[x]; 375 s = runtime·mheap.map[x];
402 if(s->sizeclass == 0) 376 if(s->sizeclass == 0)
403 n = s->npages<<PageShift; 377 n = s->npages<<PageShift;
404 else 378 else
405 n = runtime·class_to_size[s->sizeclass]; 379 n = runtime·class_to_size[s->sizeclass];
406 scan:;
407 } 380 }
408 } 381 }
409 382
410 // debug_scanblock is the debug copy of scanblock. 383 // debug_scanblock is the debug copy of scanblock.
411 // it is simpler, slower, single-threaded, recursive, 384 // it is simpler, slower, single-threaded, recursive,
412 // and uses bitSpecial as the mark bit. 385 // and uses bitSpecial as the mark bit.
413 static void 386 static void
414 debug_scanblock(byte *b, int64 n) 387 debug_scanblock(byte *b, int64 n)
415 { 388 {
416 byte *obj, *p; 389 byte *obj, *p;
(...skipping 179 matching lines...) Expand 10 before | Expand all | Expand 10 after
596 } 569 }
597 work.roots = new; 570 work.roots = new;
598 work.rootcap = cap; 571 work.rootcap = cap;
599 } 572 }
600 work.roots[work.nroot].p = p; 573 work.roots[work.nroot].p = p;
601 work.roots[work.nroot].n = n; 574 work.roots[work.nroot].n = n;
602 work.nroot++; 575 work.nroot++;
603 } 576 }
604 577
605 static void 578 static void
606 collectstackroots(G *gp) 579 addstackroots(G *gp)
607 { 580 {
608 M *mp; 581 M *mp;
609 int32 n; 582 int32 n;
610 Stktop *stk; 583 Stktop *stk;
611 byte *sp, *guard; 584 byte *sp, *guard;
612 585
613 stk = (Stktop*)gp->stackbase; 586 stk = (Stktop*)gp->stackbase;
614 guard = gp->stackguard; 587 guard = gp->stackguard;
615 588
616 if(gp == g) { 589 if(gp == g) {
617 // Scanning our own stack: start at &gp. 590 // Scanning our own stack: start at &gp.
618 sp = (byte*)&gp; 591 sp = (byte*)&gp;
619 } else if((mp = gp->m) != nil && mp->helpgc) { 592 } else if((mp = gp->m) != nil && mp->helpgc) {
620 // gchelper's stack is in active use and has no interesting poin ters. 593 // gchelper's stack is in active use and has no interesting poin ters.
621 return; 594 return;
622 } else { 595 } else {
623 // Scanning another goroutine's stack. 596 // Scanning another goroutine's stack.
624 // The goroutine is usually asleep (the world is stopped). 597 // The goroutine is usually asleep (the world is stopped).
625 sp = gp->sched.sp; 598 sp = gp->sched.sp;
626 » » 599
627 // The exception is that if the goroutine is about to enter or m ight 600 // The exception is that if the goroutine is about to enter or m ight
628 // have just exited a system call, it may be executing code such 601 // have just exited a system call, it may be executing code such
629 // as schedlock and may have needed to start a new stack segment . 602 // as schedlock and may have needed to start a new stack segment .
630 // Use the stack segment and stack pointer at the time of 603 // Use the stack segment and stack pointer at the time of
631 // the system call instead, since that won't change underfoot. 604 // the system call instead, since that won't change underfoot.
632 if(gp->gcstack != nil) { 605 if(gp->gcstack != nil) {
633 stk = (Stktop*)gp->gcstack; 606 stk = (Stktop*)gp->gcstack;
634 sp = gp->gcsp; 607 sp = gp->gcsp;
635 guard = gp->gcguard; 608 guard = gp->gcguard;
636 } 609 }
637 } 610 }
638 611
639 n = 0; 612 n = 0;
640 while(stk) { 613 while(stk) {
641 if(sp < guard-StackGuard || (byte*)stk < sp) { 614 if(sp < guard-StackGuard || (byte*)stk < sp) {
642 runtime·printf("scanstack inconsistent: g%d#%d sp=%p not in [%p,%p]\n", gp->goid, n, sp, guard-StackGuard, stk); 615 runtime·printf("scanstack inconsistent: g%d#%d sp=%p not in [%p,%p]\n", gp->goid, n, sp, guard-StackGuard, stk);
643 runtime·throw("scanstack"); 616 runtime·throw("scanstack");
644 } 617 }
645 addroot(sp, (byte*)stk - sp); 618 addroot(sp, (byte*)stk - sp);
646 sp = stk->gobuf.sp; 619 sp = stk->gobuf.sp;
647 guard = stk->stackguard; 620 guard = stk->stackguard;
648 stk = (Stktop*)stk->stackbase; 621 stk = (Stktop*)stk->stackbase;
649 n++; 622 n++;
650 } 623 }
651 } 624 }
652 625
653 static void 626 static void
654 collectfinroots(void *v) 627 addfinroots(void *v)
655 { 628 {
656 uintptr size; 629 uintptr size;
657 630
658 size = 0; 631 size = 0;
659 if(!runtime·mlookup(v, &v, &size, nil) || !runtime·blockspecial(v)) 632 if(!runtime·mlookup(v, &v, &size, nil) || !runtime·blockspecial(v))
660 runtime·throw("mark - finalizer inconsistency"); 633 runtime·throw("mark - finalizer inconsistency");
661 634
662 // do not mark the finalizer block itself. just mark the things it poin ts at. 635 // do not mark the finalizer block itself. just mark the things it poin ts at.
663 addroot(v, size); 636 addroot(v, size);
664 } 637 }
665 638
666 static void 639 static void
667 collectroots(void) 640 addroots(void)
668 { 641 {
669 G *gp; 642 G *gp;
670 FinBlock *fb; 643 FinBlock *fb;
671 » byte *p, *e; 644 » byte *p;
672 645
673 work.nroot = 0; 646 work.nroot = 0;
674 647
675 » e = (byte*)&runtime·mheap; 648 » // mark data+bss.
676 » for(p=data; p<e; p+=DataBlock) 649 » for(p=data; p<ebss; p+=DataBlock)
677 » » addroot(p, p+DataBlock<e?DataBlock:e-p); 650 » » addroot(p, p+DataBlock<ebss?DataBlock:ebss-p);
678 651
679 » e = end; 652 » runtime·walkfintab(addfinroots);
680 » for(p=(byte*)(&runtime·mheap+1); p<e; p+=DataBlock)
681 » » addroot(p, p+DataBlock<e?DataBlock:e-p);
682
683 » runtime·walkfintab(collectfinroots);
684 653
685 for(fb=allfin; fb; fb=fb->alllink) 654 for(fb=allfin; fb; fb=fb->alllink)
686 addroot((byte*)fb->fin, fb->cnt*sizeof(fb->fin[0])); 655 addroot((byte*)fb->fin, fb->cnt*sizeof(fb->fin[0]));
687 656
688 for(gp=runtime·allg; gp!=nil; gp=gp->alllink) { 657 for(gp=runtime·allg; gp!=nil; gp=gp->alllink) {
689 switch(gp->status){ 658 switch(gp->status){
690 default: 659 default:
691 runtime·printf("unexpected G.status %d\n", gp->s tatus); 660 runtime·printf("unexpected G.status %d\n", gp->s tatus);
692 runtime·throw("mark - bad status"); 661 runtime·throw("mark - bad status");
693 case Gdead: 662 case Gdead:
694 break; 663 break;
695 case Grunning: 664 case Grunning:
696 if(gp != g) 665 if(gp != g)
697 runtime·throw("mark - world not stopped" ); 666 runtime·throw("mark - world not stopped" );
698 » » » » collectstackroots(gp); 667 » » » » addstackroots(gp);
699 break; 668 break;
700 case Grunnable: 669 case Grunnable:
701 case Gsyscall: 670 case Gsyscall:
702 case Gwaiting: 671 case Gwaiting:
703 » » » » collectstackroots(gp); 672 » » » » addstackroots(gp);
704 break; 673 break;
705 } 674 }
706 } 675 }
707 } 676 }
708 677
709 static bool 678 static bool
710 handlespecial(byte *p, uintptr size) 679 handlespecial(byte *p, uintptr size)
711 { 680 {
712 void (*fn)(void*); 681 void (*fn)(void*);
713 int32 nret; 682 int32 nret;
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
752 uintptr size; 721 uintptr size;
753 byte *p; 722 byte *p;
754 MCache *c; 723 MCache *c;
755 byte *arena_start; 724 byte *arena_start;
756 MLink *start, *end; 725 MLink *start, *end;
757 int32 nfree; 726 int32 nfree;
758 727
759 USED(desc); 728 USED(desc);
760 729
761 s = runtime·mheap.allspans[spanidx]; 730 s = runtime·mheap.allspans[spanidx];
731 // Stamp newly unused spans. The scavenger will use that
732 // info to potentially give back some pages to the OS.
733 if(s->state == MSpanFree && s->unusedsince == 0)
734 s->unusedsince = runtime·nanotime();
762 if(s->state != MSpanInUse) 735 if(s->state != MSpanInUse)
763 return; 736 return;
764 737
765 arena_start = runtime·mheap.arena_start; 738 arena_start = runtime·mheap.arena_start;
766 739
767 p = (byte*)(s->start << PageShift); 740 p = (byte*)(s->start << PageShift);
768 cl = s->sizeclass; 741 cl = s->sizeclass;
769 if(cl == 0) { 742 if(cl == 0) {
770 size = s->npages<<PageShift; 743 size = s->npages<<PageShift;
771 n = 1; 744 n = 1;
772 } else { 745 } else {
773 // Chunk full of small blocks. 746 // Chunk full of small blocks.
774 size = runtime·class_to_size[cl]; 747 size = runtime·class_to_size[cl];
775 npages = runtime·class_to_allocnpages[cl]; 748 npages = runtime·class_to_allocnpages[cl];
776 n = (npages << PageShift) / size; 749 n = (npages << PageShift) / size;
777 } 750 }
778 c = m->mcache; 751 c = m->mcache;
779 nfree = 0; 752 nfree = 0;
780 start = end = nil; 753 start = end = nil;
781 754
782 // Sweep through n objects of given size starting at p. 755 // Sweep through n objects of given size starting at p.
783 // This thread owns the span now, so it can manipulate 756 // This thread owns the span now, so it can manipulate
784 // the block bitmap without atomic operations. 757 // the block bitmap without atomic operations.
785 for(; n > 0; n--, p += size) { 758 for(; n > 0; n--, p += size) {
786 uintptr off, *bitp, shift, bits; 759 uintptr off, *bitp, shift, bits;
787 760
788 off = (uintptr*)p - (uintptr*)arena_start; 761 off = (uintptr*)p - (uintptr*)arena_start;
789 bitp = (uintptr*)arena_start - off/wordsPerBitmapWord - 1; 762 bitp = (uintptr*)arena_start - off/wordsPerBitmapWord - 1;
790 PREFETCH((byte*)bitp - size); 763 » » PREFETCH((byte*)bitp - size);
791 shift = off % wordsPerBitmapWord; 764 shift = off % wordsPerBitmapWord;
792 bits = *bitp>>shift; 765 bits = *bitp>>shift;
793 766
794 if((bits & bitAllocated) == 0) 767 if((bits & bitAllocated) == 0)
795 continue; 768 continue;
796 769
797 if((bits & bitMarked) != 0) { 770 if((bits & bitMarked) != 0) {
798 if(DebugMark) { 771 if(DebugMark) {
799 if(!(bits & bitSpecial)) 772 if(!(bits & bitSpecial))
800 runtime·printf("found spurious mark on % p\n", p); 773 runtime·printf("found spurious mark on % p\n", p);
(...skipping 16 matching lines...) Expand all
817 790
818 if(cl == 0) { 791 if(cl == 0) {
819 // Free large span. 792 // Free large span.
820 runtime·unmarkspan(p, 1<<PageShift); 793 runtime·unmarkspan(p, 1<<PageShift);
821 *(uintptr*)p = 1; // needs zeroing 794 *(uintptr*)p = 1; // needs zeroing
822 runtime·MHeap_Free(&runtime·mheap, s, 1); 795 runtime·MHeap_Free(&runtime·mheap, s, 1);
823 c->local_alloc -= size; 796 c->local_alloc -= size;
824 c->local_nfree++; 797 c->local_nfree++;
825 } else { 798 } else {
826 // Free small object. 799 // Free small object.
827 // if(size > sizeof(uintptr))
828 // ((uintptr*)p)[1] = 1; // mark as "needs to be zeroed"
829 if(nfree) { 800 if(nfree) {
830 PREFETCH(p); 801 » » » » PREFETCH(p);
831 if(size > sizeof(uintptr)) 802 if(size > sizeof(uintptr))
832 ((uintptr*)end)[1] = 1; // mark as "ne eds to be zeroed" 803 ((uintptr*)end)[1] = 1; // mark as "ne eds to be zeroed"
833 end->next = (MLink*)p; 804 end->next = (MLink*)p;
834 end = (MLink*)p; 805 end = (MLink*)p;
835 } else { 806 } else {
836 start = (MLink*)p; 807 start = (MLink*)p;
837 end = (MLink*)p; 808 end = (MLink*)p;
838 } 809 }
839 nfree++; 810 nfree++;
840 } 811 }
(...skipping 25 matching lines...) Expand all
866 while(runtime·atomicload(&work.debugmarkdone) == 0) 837 while(runtime·atomicload(&work.debugmarkdone) == 0)
867 runtime·usleep(10); 838 runtime·usleep(10);
868 } 839 }
869 840
870 // parallel sweep for over all spans 841 // parallel sweep for over all spans
871 parfor(&work.sweepfor); 842 parfor(&work.sweepfor);
872 843
873 if(runtime·xadd(&work.ndone, +1) == work.nproc-1) 844 if(runtime·xadd(&work.ndone, +1) == work.nproc-1)
874 runtime·notewakeup(&work.alldone); 845 runtime·notewakeup(&work.alldone);
875 } 846 }
876
877 // Semaphore, not Lock, so that the goroutine
878 // reschedules when there is contention rather
879 // than spinning.
880 static uint32 gcsema = 1;
881 847
882 // Initialized from $GOGC. GOGC=off means no gc. 848 // Initialized from $GOGC. GOGC=off means no gc.
883 // 849 //
884 // Next gc is after we've allocated an extra amount of 850 // Next gc is after we've allocated an extra amount of
885 // memory proportional to the amount already in use. 851 // memory proportional to the amount already in use.
886 // If gcpercent=100 and we're using 4M, we'll gc again 852 // If gcpercent=100 and we're using 4M, we'll gc again
887 // when we get to 8M. This keeps the gc cost in linear 853 // when we get to 8M. This keeps the gc cost in linear
888 // proportion to the allocation cost. Adjusting gcpercent 854 // proportion to the allocation cost. Adjusting gcpercent
889 // just changes the linear constant (and also the amount of 855 // just changes the linear constant (and also the amount of
890 // extra memory used). 856 // extra memory used).
(...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after
965 else 931 else
966 gcpercent = runtime·atoi(p); 932 gcpercent = runtime·atoi(p);
967 933
968 p = runtime·getenv("GOGCTRACE"); 934 p = runtime·getenv("GOGCTRACE");
969 if(p != nil) 935 if(p != nil)
970 gctrace = runtime·atoi(p); 936 gctrace = runtime·atoi(p);
971 } 937 }
972 if(gcpercent < 0) 938 if(gcpercent < 0)
973 return; 939 return;
974 940
975 » runtime·semacquire(&gcsema); 941 » runtime·semacquire(&runtime·worldsema);
976 if(!force && mstats.heap_alloc < mstats.next_gc) { 942 if(!force && mstats.heap_alloc < mstats.next_gc) {
977 » » runtime·semrelease(&gcsema); 943 » » runtime·semrelease(&runtime·worldsema);
978 return; 944 return;
979 } 945 }
980 946
981 t0 = runtime·nanotime(); 947 t0 = runtime·nanotime();
982 948
983 m->gcing = 1; 949 m->gcing = 1;
984 runtime·stoptheworld(); 950 runtime·stoptheworld();
985 951
986 heap0 = 0; 952 heap0 = 0;
987 obj0 = 0; 953 obj0 = 0;
988 if(gctrace) { 954 if(gctrace) {
989 cachestats(0); 955 cachestats(0);
990 heap0 = mstats.heap_alloc; 956 heap0 = mstats.heap_alloc;
991 obj0 = mstats.nmalloc - mstats.nfree; 957 obj0 = mstats.nmalloc - mstats.nfree;
992 } 958 }
993 959
994 work.nproc = runtime·gcprocs(); 960 work.nproc = runtime·gcprocs();
995 work.nwait = 0; 961 work.nwait = 0;
996 work.ndone = 0; 962 work.ndone = 0;
997 work.debugmarkdone = 0; 963 work.debugmarkdone = 0;
998 » collectroots(); 964 » addroots();
999 parforsetup(&work.markfor, markroot, work.nproc, work.nroot, nil, false) ; 965 parforsetup(&work.markfor, markroot, work.nproc, work.nroot, nil, false) ;
1000 parforsetup(&work.sweepfor, sweepspan, work.nproc, runtime·mheap.nspan, nil, true); 966 parforsetup(&work.sweepfor, sweepspan, work.nproc, runtime·mheap.nspan, nil, true);
1001 if(work.nproc > 1) { 967 if(work.nproc > 1) {
1002 runtime·noteclear(&work.alldone); 968 runtime·noteclear(&work.alldone);
1003 » » runtime·helpgc(); 969 » » runtime·helpgc(work.nproc);
1004 } 970 }
1005 971
1006 parfor(&work.markfor); 972 parfor(&work.markfor);
1007 scanblock(nil, 0); 973 scanblock(nil, 0);
1008 974
1009 if(DebugMark) { 975 if(DebugMark) {
1010 for(i=0; i<work.nroot; i++) 976 for(i=0; i<work.nroot; i++)
1011 debug_scanblock(work.roots[i].p, work.roots[i].n); 977 debug_scanblock(work.roots[i].p, work.roots[i].n);
1012 runtime·xchg(&work.debugmarkdone, 1); 978 runtime·xchg(&work.debugmarkdone, 1);
1013 } 979 }
(...skipping 20 matching lines...) Expand all
1034 m->locks--; 1000 m->locks--;
1035 } 1001 }
1036 ········ 1002 ········
1037 if(work.nproc > 1) 1003 if(work.nproc > 1)
1038 runtime·notesleep(&work.alldone); 1004 runtime·notesleep(&work.alldone);
1039 1005
1040 heap1 = mstats.heap_alloc; 1006 heap1 = mstats.heap_alloc;
1041 obj1 = mstats.nmalloc - mstats.nfree; 1007 obj1 = mstats.nmalloc - mstats.nfree;
1042 1008
1043 t3 = runtime·nanotime(); 1009 t3 = runtime·nanotime();
1010 mstats.last_gc = t3;
1044 mstats.pause_ns[mstats.numgc%nelem(mstats.pause_ns)] = t3 - t0; 1011 mstats.pause_ns[mstats.numgc%nelem(mstats.pause_ns)] = t3 - t0;
1045 mstats.pause_total_ns += t3 - t0; 1012 mstats.pause_total_ns += t3 - t0;
1046 mstats.numgc++; 1013 mstats.numgc++;
1047 if(mstats.debuggc) 1014 if(mstats.debuggc)
1048 runtime·printf("pause %D\n", t3-t0); 1015 runtime·printf("pause %D\n", t3-t0);
1049 1016
1050 if(gctrace) { 1017 if(gctrace) {
1051 » » runtime·printf("gc%d(%d): %D+%D+%D ms %D -> %D MB %D -> %D (%D-% D) objects %D pointer lookups (%D size, %D addr), %D(%D) steals, %D(%D) handoffs , %D/%D/%D yields\n", 1018 » » runtime·printf("gc%d(%d): %D+%D+%D ms %D -> %D MB %D -> %D (%D-% D) objects, %D(%D) steals, %D(%D) handoffs, %D/%D/%D yields\n",
1052 mstats.numgc, work.nproc, (t1-t0)/1000000, (t2-t1)/10000 00, (t3-t2)/1000000, 1019 mstats.numgc, work.nproc, (t1-t0)/1000000, (t2-t1)/10000 00, (t3-t2)/1000000,
1053 heap0>>20, heap1>>20, obj0, obj1, 1020 heap0>>20, heap1>>20, obj0, obj1,
1054 mstats.nmalloc, mstats.nfree, 1021 mstats.nmalloc, mstats.nfree,
1055 stats.nsizelookup+stats.naddrlookup, stats.nsizelookup, stats.naddrlookup,
1056 stats.nsteal, stats.nstealcnt, 1022 stats.nsteal, stats.nstealcnt,
1057 stats.nhandoff, stats.nhandoffcnt, 1023 stats.nhandoff, stats.nhandoffcnt,
1058 stats.nprocyield, stats.nosyield, stats.nsleep); 1024 stats.nprocyield, stats.nosyield, stats.nsleep);
1059 } 1025 }
1060 1026 »·······
1061 » runtime·semrelease(&gcsema); 1027 » runtime·MProf_GC();
1028 » runtime·semrelease(&runtime·worldsema);
1062 runtime·starttheworld(); 1029 runtime·starttheworld();
1063 1030
1064 // give the queued finalizers, if any, a chance to run 1031 // give the queued finalizers, if any, a chance to run
1065 if(finq != nil) 1032 if(finq != nil)
1066 runtime·gosched(); 1033 runtime·gosched();
1067 1034
1068 if(gctrace > 1 && !force) 1035 if(gctrace > 1 && !force)
1069 runtime·gc(1); 1036 runtime·gc(1);
1070 } 1037 }
1071 1038
1072 void 1039 void
1073 runtime·UpdateMemStats(void) 1040 runtime·ReadMemStats(MStats *stats)
1074 { 1041 {
1075 » // Have to acquire gcsema to stop the world, 1042 » // Have to acquire worldsema to stop the world,
1076 // because stoptheworld can only be used by 1043 // because stoptheworld can only be used by
1077 // one goroutine at a time, and there might be 1044 // one goroutine at a time, and there might be
1078 // a pending garbage collection already calling it. 1045 // a pending garbage collection already calling it.
1079 » runtime·semacquire(&gcsema); 1046 » runtime·semacquire(&runtime·worldsema);
1080 m->gcing = 1; 1047 m->gcing = 1;
1081 runtime·stoptheworld(); 1048 runtime·stoptheworld();
1082 cachestats(0); 1049 cachestats(0);
1050 *stats = mstats;
1083 m->gcing = 0; 1051 m->gcing = 0;
1084 » runtime·semrelease(&gcsema); 1052 » runtime·semrelease(&runtime·worldsema);
1085 runtime·starttheworld(); 1053 runtime·starttheworld();
1086 } 1054 }
1087 1055
1088 static void 1056 static void
1089 runfinq(void) 1057 runfinq(void)
1090 { 1058 {
1091 Finalizer *f; 1059 Finalizer *f;
1092 FinBlock *fb, *next; 1060 FinBlock *fb, *next;
1093 byte *frame; 1061 byte *frame;
1094 uint32 framesz, framecap, i; 1062 uint32 framesz, framecap, i;
(...skipping 236 matching lines...) Expand 10 before | Expand all | Expand 10 after
1331 1299
1332 n = (h->arena_used - h->arena_start) / wordsPerBitmapWord; 1300 n = (h->arena_used - h->arena_start) / wordsPerBitmapWord;
1333 n = (n+bitmapChunk-1) & ~(bitmapChunk-1); 1301 n = (n+bitmapChunk-1) & ~(bitmapChunk-1);
1334 if(h->bitmap_mapped >= n) 1302 if(h->bitmap_mapped >= n)
1335 return; 1303 return;
1336 1304
1337 runtime·SysMap(h->arena_start - n, n - h->bitmap_mapped); 1305 runtime·SysMap(h->arena_start - n, n - h->bitmap_mapped);
1338 h->bitmap_mapped = n; 1306 h->bitmap_mapped = n;
1339 } 1307 }
1340 1308
1309 #ifdef _64BIT
1341 // Amd64 uses 48-bit virtual addresses, 47-th bit is used as kernel/user flag. 1310 // Amd64 uses 48-bit virtual addresses, 47-th bit is used as kernel/user flag.
1342 // So we use 17msb of pointers as ABA counter. 1311 // So we use 17msb of pointers as ABA counter.
1343 #define PTR_BITS 47 1312 # define PTR_BITS 47
1313 #else
1314 # define PTR_BITS 32
1315 #endif
1344 #define PTR_MASK ((1ull<<PTR_BITS)-1) 1316 #define PTR_MASK ((1ull<<PTR_BITS)-1)
1345 1317
1346 static void 1318 static void
1347 lifopush(uint64 *a, Workbuf *b) 1319 lifopush(uint64 *a, Workbuf *b)
1348 { 1320 {
1349 uint64 old, new; 1321 uint64 old, new;
1350 » 1322
1351 if((uint64)b != ((uint64)b&PTR_MASK)) { 1323 if((uint64)b != ((uint64)b&PTR_MASK)) {
1352 runtime·printf("p=%p\n", b); 1324 runtime·printf("p=%p\n", b);
1353 runtime·throw("lifopush: invalid pointer"); 1325 runtime·throw("lifopush: invalid pointer");
1354 } 1326 }
1355 » 1327
1356 if(work.nproc == 1) { 1328 if(work.nproc == 1) {
1357 b->next = (Workbuf*)(*a&PTR_MASK); 1329 b->next = (Workbuf*)(*a&PTR_MASK);
1358 *a = (uint64)b; 1330 *a = (uint64)b;
1359 return; 1331 return;
1360 } 1332 }
1361 » 1333
1362 b->pushcnt++; 1334 b->pushcnt++;
1363 new = (uint64)b|(((uint64)b->pushcnt)<<PTR_BITS); 1335 new = (uint64)b|(((uint64)b->pushcnt)<<PTR_BITS);
1364 old = runtime·atomicload64(a); 1336 old = runtime·atomicload64(a);
1365 for(;;) { 1337 for(;;) {
1366 b->next = (Workbuf*)(old&PTR_MASK); 1338 b->next = (Workbuf*)(old&PTR_MASK);
1367 if(runtime·cas64(a, &old, new)) 1339 if(runtime·cas64(a, &old, new))
1368 break; 1340 break;
1369 } 1341 }
1370 } 1342 }
1371 1343
(...skipping 19 matching lines...) Expand all
1391 b2 = runtime·atomicloadp(&b->next); 1363 b2 = runtime·atomicloadp(&b->next);
1392 new = 0; 1364 new = 0;
1393 if(b2 != nil) 1365 if(b2 != nil)
1394 new = (uint64)b2|(((uint64)b2->pushcnt)<<PTR_BITS); 1366 new = (uint64)b2|(((uint64)b2->pushcnt)<<PTR_BITS);
1395 if(runtime·cas64(a, &old, new)) 1367 if(runtime·cas64(a, &old, new))
1396 return b; 1368 return b;
1397 } 1369 }
1398 } 1370 }
1399 1371
1400 void 1372 void
1401 runtime·CTestLockFreeStack(void) 1373 runtime·CTestLockFreeStack(bool isShort)
1402 { 1374 {
1403 uint64 stack; 1375 uint64 stack;
1404 Workbuf *b; 1376 Workbuf *b;
1377
1378 USED(isShort);
1405 1379
1406 for(work.nproc=1; work.nproc<=2; work.nproc++) { 1380 for(work.nproc=1; work.nproc<=2; work.nproc++) {
1407 // check the stack is initially empty 1381 // check the stack is initially empty
1408 stack = 0; 1382 stack = 0;
1409 if(lifopop(&stack) != nil) 1383 if(lifopop(&stack) != nil)
1410 runtime·panicstring("stack is not empty"); 1384 runtime·panicstring("stack is not empty");
1411 1385
1412 // push one element 1386 // push one element
1413 b = (Workbuf*)runtime·mallocgc(sizeof(Workbuf), FlagNoGC, 0, 0); 1387 b = (Workbuf*)runtime·mallocgc(sizeof(Workbuf), FlagNoGC, 0, 0);
1414 b->nobj = 42; 1388 b->nobj = 42;
1415 lifopush(&stack, b); 1389 lifopush(&stack, b);
1416 » 1390
1417 // push another 1391 // push another
1418 b = (Workbuf*)runtime·mallocgc(sizeof(Workbuf), FlagNoGC, 0, 0); 1392 b = (Workbuf*)runtime·mallocgc(sizeof(Workbuf), FlagNoGC, 0, 0);
1419 b->nobj = 43; 1393 b->nobj = 43;
1420 lifopush(&stack, b); 1394 lifopush(&stack, b);
1421 1395
1422 // pop one element 1396 // pop one element
1423 b = lifopop(&stack); 1397 b = lifopop(&stack);
1424 if(b == nil) 1398 if(b == nil)
1425 runtime·panicstring("stack is empty"); 1399 runtime·panicstring("stack is empty");
1426 if(b->nobj != 43) 1400 if(b->nobj != 43)
(...skipping 12 matching lines...) Expand all
1439 if(stack != 0) 1413 if(stack != 0)
1440 runtime·panicstring("stack is not empty"); 1414 runtime·panicstring("stack is not empty");
1441 if(lifopop(&stack) != nil) 1415 if(lifopop(&stack) != nil)
1442 runtime·panicstring("stack is not empty"); 1416 runtime·panicstring("stack is not empty");
1443 } 1417 }
1444 } 1418 }
1445 1419
1446 typedef struct StackTestCtx StackTestCtx; 1420 typedef struct StackTestCtx StackTestCtx;
1447 struct StackTestCtx 1421 struct StackTestCtx
1448 { 1422 {
1423 uint32 niter;
1449 uint32 waitsema; 1424 uint32 waitsema;
1450 uint64 stack[2]; 1425 uint64 stack[2];
1451 }; 1426 };
1452 1427
1453 static void 1428 static void
1454 stackTestProc(StackTestCtx *ctx) 1429 stackTestProc(StackTestCtx *ctx)
1455 { 1430 {
1456 int32 i, n; 1431 int32 i, n;
1457 Workbuf *b; 1432 Workbuf *b;
1458 1433
1459 » for(i=0; i<100000; i++) { 1434 » for(i=0; i<ctx->niter; i++) {
1460 n = runtime·fastrand1()%2; 1435 n = runtime·fastrand1()%2;
1461 b = lifopop(&ctx->stack[n]); 1436 b = lifopop(&ctx->stack[n]);
1462 if(b==nil) 1437 if(b==nil)
1463 continue; 1438 continue;
1464 n = runtime·fastrand1()%2; 1439 n = runtime·fastrand1()%2;
1465 lifopush(&ctx->stack[n], b); 1440 lifopush(&ctx->stack[n], b);
1466 } 1441 }
1467 runtime·semrelease(&ctx->waitsema); 1442 runtime·semrelease(&ctx->waitsema);
1468 } 1443 }
1469 1444
1470 void 1445 void
1471 runtime·CTestLockFreeStackStress(void) 1446 runtime·CTestLockFreeStackStress(bool isShort)
1472 { 1447 {
1473 StackTestCtx ctx, *arg; 1448 StackTestCtx ctx, *arg;
1474 Workbuf *b; 1449 Workbuf *b;
1475 int32 i, sum, sum2, cnt, procs; 1450 int32 i, sum, sum2, cnt, procs;
1476 const int32 N = 100; 1451 const int32 N = 100;
1477 const int32 P = 8; 1452 const int32 P = 8;
1478 const int32 G = 16; 1453 const int32 G = 16;
1479 1454
1480 procs = runtime·gomaxprocsfunc(P); 1455 procs = runtime·gomaxprocsfunc(P);
1481 work.nproc = P; 1456 work.nproc = P;
1457 ctx.niter = isShort ? 10000 : 100000;
1482 ctx.waitsema = 0; 1458 ctx.waitsema = 0;
1483 ctx.stack[0] = 0; 1459 ctx.stack[0] = 0;
1484 ctx.stack[1] = 0; 1460 ctx.stack[1] = 0;
1485 arg = &ctx; 1461 arg = &ctx;
1486 sum = 0; 1462 sum = 0;
1487 for(i=0; i<N; i++) { 1463 for(i=0; i<N; i++) {
1488 b = (Workbuf*)runtime·mallocgc(sizeof(Workbuf), FlagNoGC, 0, 0); 1464 b = (Workbuf*)runtime·mallocgc(sizeof(Workbuf), FlagNoGC, 0, 0);
1489 b->nobj = i; 1465 b->nobj = i;
1490 sum += i; 1466 sum += i;
1491 lifopush(&ctx.stack[i%2], b); 1467 lifopush(&ctx.stack[i%2], b);
(...skipping 164 matching lines...) Expand 10 before | Expand all | Expand 10 after
1656 } 1632 }
1657 1633
1658 static void 1634 static void
1659 testParforProc(Parfor *desc) 1635 testParforProc(Parfor *desc)
1660 { 1636 {
1661 parfor(desc); 1637 parfor(desc);
1662 } 1638 }
1663 1639
1664 // Simple serial sanity test for parallelfor. 1640 // Simple serial sanity test for parallelfor.
1665 void 1641 void
1666 runtime·CTestParfor(void) 1642 runtime·CTestParfor(bool isShort)
1667 { 1643 {
1668 Parfor desc; 1644 Parfor desc;
1669 uint64 *data; 1645 uint64 *data;
1670 uint32 i, N; 1646 uint32 i, N;
1647
1648 USED(isShort);
1671 1649
1672 N = 1000; 1650 N = 1000;
1673 data = (uint64*)runtime·mal(N*sizeof(uint64)); 1651 data = (uint64*)runtime·mal(N*sizeof(uint64));
1674 for(i=0; i<N; i++) 1652 for(i=0; i<N; i++)
1675 data[i] = i; 1653 data[i] = i;
1676 parforsetup(&desc, testParforBody, 1, N, data, true); 1654 parforsetup(&desc, testParforBody, 1, N, data, true);
1677 parfor(&desc); 1655 parfor(&desc);
1678 for(i=0; i<N; i++) { 1656 for(i=0; i<N; i++) {
1679 if(data[i] != i*i) 1657 if(data[i] != i*i)
1680 runtime·panicstring("incorrect result"); 1658 runtime·panicstring("incorrect result");
1681 } 1659 }
1682 runtime·free(data); 1660 runtime·free(data);
1683 } 1661 }
1684 1662
1685 // Test that iterations are properly distributed. 1663 // Test that iterations are properly distributed.
1686 void 1664 void
1687 runtime·CTestParforSetup(void) 1665 runtime·CTestParforSetup(bool isShort)
1688 { 1666 {
1689 Parfor desc; 1667 Parfor desc;
1690 uint32 n, t, i, begin, end, size, end0, size0, sum; 1668 uint32 n, t, i, begin, end, size, end0, size0, sum;
1669
1670 USED(isShort);
1691 1671
1692 for(n=0; n<100; n++) { 1672 for(n=0; n<100; n++) {
1693 for(t=1; t<=MaxGcproc; t++) { 1673 for(t=1; t<=MaxGcproc; t++) {
1694 parforsetup(&desc, testParforBody, t, n, 0, true); 1674 parforsetup(&desc, testParforBody, t, n, 0, true);
1695 sum = 0; 1675 sum = 0;
1696 size0 = 0; 1676 size0 = 0;
1697 end0 = 0; 1677 end0 = 0;
1698 for(i=0; i<t; i++) { 1678 for(i=0; i<t; i++) {
1699 begin = (uint32)desc.thr[i].pos; 1679 begin = (uint32)desc.thr[i].pos;
1700 end = (uint32)(desc.thr[i].pos>>32); 1680 end = (uint32)(desc.thr[i].pos>>32);
(...skipping 12 matching lines...) Expand all
1713 end0 = end; 1693 end0 = end;
1714 } 1694 }
1715 if(sum != n) 1695 if(sum != n)
1716 runtime·panicstring("incorrect sum"); 1696 runtime·panicstring("incorrect sum");
1717 } 1697 }
1718 } 1698 }
1719 } 1699 }
1720 1700
1721 // Test that nonblocking parallelfor does not block. 1701 // Test that nonblocking parallelfor does not block.
1722 void 1702 void
1723 runtime·CTestParforNonblock(void) 1703 runtime·CTestParforNonblock(bool isShort)
1724 { 1704 {
1725 Parfor desc; 1705 Parfor desc;
1726 uint64 *data; 1706 uint64 *data;
1727 uint32 i, N; 1707 uint32 i, N;
1728 » 1708
1709 » USED(isShort);
1710
1729 N = 1000; 1711 N = 1000;
1730 data = (uint64*)runtime·mal(N*sizeof(uint64)); 1712 data = (uint64*)runtime·mal(N*sizeof(uint64));
1731 for(i=0; i<N; i++) 1713 for(i=0; i<N; i++)
1732 data[i] = i; 1714 data[i] = i;
1733 parforsetup(&desc, testParforBody, MaxGcproc, N, data, false); 1715 parforsetup(&desc, testParforBody, MaxGcproc, N, data, false);
1734 for(i=0; i<MaxGcproc; i++) 1716 for(i=0; i<MaxGcproc; i++)
1735 parfor(&desc); 1717 parfor(&desc);
1736 for(i=0; i<N; i++) { 1718 for(i=0; i<N; i++) {
1737 if(data[i] != i*i) 1719 if(data[i] != i*i)
1738 runtime·panicstring("incorrect result"); 1720 runtime·panicstring("incorrect result");
1739 } 1721 }
1740 runtime·free(data); 1722 runtime·free(data);
1741 } 1723 }
1742 1724
1743 // Test parallel parallelfor. 1725 // Test parallel parallelfor.
1744 void 1726 void
1745 runtime·CTestParforParallel(void) 1727 runtime·CTestParforParallel(bool isShort)
1746 { 1728 {
1747 Parfor desc, *arg; 1729 Parfor desc, *arg;
1748 uint64 *data; 1730 uint64 *data;
1749 uint32 i, N; 1731 uint32 i, N;
1750 int32 procs; 1732 int32 procs;
1751 1733
1752 procs = runtime·gomaxprocsfunc(MaxGcproc+1); 1734 procs = runtime·gomaxprocsfunc(MaxGcproc+1);
1753 N = 10000; 1735 N = 10000;
1736 if(isShort)
1737 N /= 10;
1754 data = (uint64*)runtime·mal(N*sizeof(uint64)); 1738 data = (uint64*)runtime·mal(N*sizeof(uint64));
1755 for(i=0; i<N; i++) 1739 for(i=0; i<N; i++)
1756 data[i] = i; 1740 data[i] = i;
1757 parforsetup(&desc, testParforBody, MaxGcproc, N, data, true); 1741 parforsetup(&desc, testParforBody, MaxGcproc, N, data, true);
1758 arg = &desc; 1742 arg = &desc;
1759 m->locks++; // disable gc during the mallocs in newproc 1743 m->locks++; // disable gc during the mallocs in newproc
1760 for(i=1; i<MaxGcproc; i++) 1744 for(i=1; i<MaxGcproc; i++)
1761 runtime·newproc1((byte*)testParforProc, (byte*)&arg, sizeof(arg) , 0, runtime·CTestParforParallel); 1745 runtime·newproc1((byte*)testParforProc, (byte*)&arg, sizeof(arg) , 0, runtime·CTestParforParallel);
1762 m->locks--; 1746 m->locks--;
1763 parfor(&desc); 1747 parfor(&desc);
1764 for(i=0; i<N; i++) { 1748 for(i=0; i<N; i++) {
1765 if(data[i] != i*i) 1749 if(data[i] != i*i)
1766 runtime·panicstring("incorrect result"); 1750 runtime·panicstring("incorrect result");
1767 } 1751 }
1768 runtime·free(data); 1752 runtime·free(data);
1769 runtime·gomaxprocsfunc(procs); 1753 runtime·gomaxprocsfunc(procs);
1770 } 1754 }
LEFTRIGHT

Powered by Google App Engine
RSS Feeds Recent Issues | This issue
This is Rietveld f62528b