Rietveld Code Review Tool
Help | Bug tracker | Discussion group | Source code | Sign in
(241)

Side by Side Diff: src/pkg/sync/pool.go

Issue 46010043: code review 46010043: sync: scalable Pool (Closed)
Patch Set: diff -r 50f52d5c2bb7 https://dvyukov%40google.com@code.google.com/p/go/ Created 10 years, 2 months ago
Left:
Right:
Use n/p to move between diff chunks; N/P to move between comments. Please Sign in to add in-line comments.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/pkg/runtime/proc.c ('k') | src/pkg/sync/pool_test.go » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2013 The Go Authors. All rights reserved. 1 // Copyright 2013 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style 2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file. 3 // license that can be found in the LICENSE file.
4 4
5 package sync 5 package sync
6 6
7 import (
8 "runtime"
9 "sync/atomic"
10 "unsafe"
11 )
12
13 const (
14 cacheLineSize = 128
15 poolLocalSize = 2 * cacheLineSize
16 poolLocalCap = poolLocalSize/unsafe.Sizeof(*(*interface{})(nil)) - 1
17 )
18
7 // A Pool is a set of temporary objects that may be individually saved 19 // A Pool is a set of temporary objects that may be individually saved
8 // and retrieved. 20 // and retrieved.
9 // 21 //
10 // Any item stored in the Pool may be removed automatically by the 22 // Any item stored in the Pool may be removed automatically by the
11 // implementation at any time without notification. 23 // implementation at any time without notification.
12 // If the Pool holds the only reference when this happens, the item 24 // If the Pool holds the only reference when this happens, the item
13 // might be deallocated. 25 // might be deallocated.
14 // 26 //
15 // A Pool is safe for use by multiple goroutines simultaneously. 27 // A Pool is safe for use by multiple goroutines simultaneously.
16 // 28 //
17 // Pool's intended use is for free lists maintained in global variables, 29 // Pool's intended use is for free lists maintained in global variables,
18 // typically accessed by multiple goroutines simultaneously. Using a 30 // typically accessed by multiple goroutines simultaneously. Using a
19 // Pool instead of a custom free list allows the runtime to reclaim 31 // Pool instead of a custom free list allows the runtime to reclaim
20 // entries from the pool when it makes sense to do so. An 32 // entries from the pool when it makes sense to do so. An
21 // appropriate use of sync.Pool is to create a pool of temporary buffers 33 // appropriate use of sync.Pool is to create a pool of temporary buffers
22 // shared between independent clients of a global resource. On the 34 // shared between independent clients of a global resource. On the
23 // other hand, if a free list is maintained as part of an object used 35 // other hand, if a free list is maintained as part of an object used
24 // only by a single client and freed when the client completes, 36 // only by a single client and freed when the client completes,
25 // implementing that free list as a Pool is not appropriate. 37 // implementing that free list as a Pool is not appropriate.
26 // 38 //
27 // This is an experimental type and might not be released. 39 // This is an experimental type and might not be released.
28 type Pool struct { 40 type Pool struct {
29 » next *Pool // for use by runtime. must be first. 41 » // The following fields are known to runtime.
30 » list []interface{} // offset known to runtime 42 » next *Pool // for use by runtime
31 » mu Mutex // guards list 43 » local *poolLocal // local fixed-size per-P pool, actually an arra y
44 » localSize uintptr // size of the local array
45 » globalOffset uintptr // offset of global
46 » // The rest is not known to runtime.
32 47
33 // New optionally specifies a function to generate 48 // New optionally specifies a function to generate
34 // a value when Get would otherwise return nil. 49 // a value when Get would otherwise return nil.
35 // It may not be changed concurrently with calls to Get. 50 // It may not be changed concurrently with calls to Get.
36 New func() interface{} 51 New func() interface{}
52
53 pad [cacheLineSize]byte
54 // Read-mostly date above this point, mutable data follows.
55 mu Mutex
56 global []interface{} // global fallback pool
37 } 57 }
38 58
39 func runtime_registerPool(*Pool) 59 // Local per-P Pool appendix.
60 type poolLocal struct {
61 » tail int
62 » unused int
63 » buf [poolLocalCap]interface{}
64 }
65
66 func init() {
67 » var v poolLocal
68 » if unsafe.Sizeof(v) != poolLocalSize {
69 » » panic("sync: incorrect pool size")
70 » }
71 }
40 72
41 // Put adds x to the pool. 73 // Put adds x to the pool.
42 func (p *Pool) Put(x interface{}) { 74 func (p *Pool) Put(x interface{}) {
43 if x == nil { 75 if x == nil {
44 return 76 return
45 } 77 }
46 » p.mu.Lock() 78 » l := p.pin()
47 » if p.list == nil { 79 » t := l.tail
48 » » runtime_registerPool(p) 80 » if t < int(poolLocalCap) {
81 » » l.buf[t] = x
82 » » l.tail = t + 1
83 » » runtime_procUnpin()
84 » » return
49 } 85 }
50 » p.list = append(p.list, x) 86 » p.putSlow(l, x)
51 » p.mu.Unlock()
52 } 87 }
53 88
54 // Get selects an arbitrary item from the Pool, removes it from the 89 // Get selects an arbitrary item from the Pool, removes it from the
55 // Pool, and returns it to the caller. 90 // Pool, and returns it to the caller.
56 // Get may choose to ignore the pool and treat it as empty. 91 // Get may choose to ignore the pool and treat it as empty.
57 // Callers should not assume any relation between values passed to Put and 92 // Callers should not assume any relation between values passed to Put and
58 // the values returned by Get. 93 // the values returned by Get.
59 // 94 //
60 // If Get would otherwise return nil and p.New is non-nil, Get returns 95 // If Get would otherwise return nil and p.New is non-nil, Get returns
61 // the result of calling p.New. 96 // the result of calling p.New.
62 func (p *Pool) Get() interface{} { 97 func (p *Pool) Get() interface{} {
98 l := p.pin()
99 t := l.tail
100 if t > 0 {
101 t -= 1
102 x := l.buf[t]
103 l.tail = t
104 runtime_procUnpin()
105 return x
106 }
107 return p.getSlow()
108 }
109
110 func (p *Pool) putSlow(l *poolLocal, x interface{}) {
111 // Grab half of items from local pool and put to global pool.
112 // Can not lock the mutex while pinned.
113 const N = int(poolLocalCap/2 + 1)
114 var buf [N]interface{}
115 buf[0] = x
116 for i := 1; i < N; i++ {
117 l.tail--
118 buf[i] = l.buf[l.tail]
119 }
120 runtime_procUnpin()
121
63 p.mu.Lock() 122 p.mu.Lock()
64 » var x interface{} 123 » p.global = append(p.global, buf[:]...)
65 » if n := len(p.list); n > 0 { 124 » p.mu.Unlock()
66 » » x = p.list[n-1] 125 }
67 » » p.list[n-1] = nil // Just to be safe 126
68 » » p.list = p.list[:n-1] 127 func (p *Pool) getSlow() (x interface{}) {
128 » // Grab a batch of items from global pool and put to local pool.
129 » // Can not lock the mutex while pinned.
130 » runtime_procUnpin()
131 » p.mu.Lock()
132 » pid := runtime_procPin()
133 » s := p.localSize
134 » l := p.local
135 » if uintptr(pid) < s {
136 » » l = indexLocal(l, pid)
137 » » // Get the item to return.
138 » » last := len(p.global) - 1
139 » » if last >= 0 {
140 » » » x = p.global[last]
141 » » » p.global = p.global[:last]
142 » » }
143 » » // Try to refill local pool, we may have been rescheduled to ano ther P.
144 » » if last > 0 && l.tail == 0 {
145 » » » n := int(poolLocalCap / 2)
146 » » » gl := len(p.global)
147 » » » if n > gl {
148 » » » » n = gl
149 » » » }
150 » » » copy(l.buf[:], p.global[gl-n:])
151 » » » p.global = p.global[:gl-n]
152 » » » l.tail = n
153 » » }
69 } 154 }
155 runtime_procUnpin()
70 p.mu.Unlock() 156 p.mu.Unlock()
157
71 if x == nil && p.New != nil { 158 if x == nil && p.New != nil {
72 x = p.New() 159 x = p.New()
73 } 160 }
74 » return x 161 » return
75 } 162 }
163
164 // pin pins current goroutine to P, disables preemption and returns poolLocal po ol for the P.
165 // Caller must call runtime_procUnpin() when done with the pool.
166 func (p *Pool) pin() *poolLocal {
167 pid := runtime_procPin()
168 // In pinSlow we store to localSize and then to local, here we load in o pposite order.
169 // Since we've disabled preemption, GC can not happen in between.
170 // Thus here we must observe local at least as large localSize.
171 // We can observe a newer/larger local, it is fine (we must observe its zero-initialized-ness).
172 s := atomic.LoadUintptr(&p.localSize) // load-acquire
173 l := p.local // load-consume
174 if uintptr(pid) < s {
175 return indexLocal(l, pid)
176 }
177 return p.pinSlow()
178 }
179
180 func (p *Pool) pinSlow() *poolLocal {
181 // Retry under the mutex.
182 runtime_procUnpin()
183 p.mu.Lock()
184 defer p.mu.Unlock()
185 pid := runtime_procPin()
186 s := p.localSize
187 l := p.local
188 if uintptr(pid) < s {
189 return indexLocal(l, pid)
190 }
191 if p.local == nil {
192 p.globalOffset = unsafe.Offsetof(p.global)
193 runtime_registerPool(p)
194 }
195 // If GOMAXPROCS changes between GCs, we re-allocate the array and lose the old one.
196 size := runtime.GOMAXPROCS(0)
197 local := make([]poolLocal, size)
198 atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&p.local)), unsafe. Pointer(&local[0])) // store-release
199 atomic.StoreUintptr(&p.localSize, uintptr(size)) // store-release
200 return &local[pid]
201 }
202
203 func indexLocal(l *poolLocal, i int) *poolLocal {
204 return (*poolLocal)(unsafe.Pointer(uintptr(unsafe.Pointer(l)) + unsafe.S izeof(*l)*uintptr(i))) // uh...
205 }
206
207 // Implemented in runtime.
208 func runtime_registerPool(*Pool)
209 func runtime_procPin() int
210 func runtime_procUnpin()
OLDNEW
« no previous file with comments | « src/pkg/runtime/proc.c ('k') | src/pkg/sync/pool_test.go » ('j') | no next file with comments »

Powered by Google App Engine
RSS Feeds Recent Issues | This issue
This is Rietveld f62528b