-
Notifications
You must be signed in to change notification settings - Fork 3
/
Copy pathreqproc.go
233 lines (201 loc) · 5.52 KB
/
reqproc.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
package usbdlib
import (
"bufio"
"context"
"fmt"
"io"
"log"
"runtime"
"sync"
)
//RecommendWorkerCount returns a empircally derived heuristic for the optimal
// number of work goroutines on this machine
func RecommendWorkerCount() int {
return runtime.NumCPU() * 6
}
//ReqProcessor reqs requests from the NBD command stream (an io.ReadWriter, but
// typically a *NbdStream) exectutes them against the provided Device implementation
// and then writes responses back to the command stream.
type reqProcessor struct {
blockSize int64
cmdStrm io.ReadWriteCloser
dev Device
reqQueue chan *request
respQueue chan *response
reqPool, respPool sync.Pool
ctx context.Context
ctxCancel context.CancelFunc
workersWg sync.WaitGroup
}
func processRequests(ctx context.Context, cmdStrm io.ReadWriteCloser, device Device, workerCount int) error {
ctx, cancel := context.WithCancel(ctx)
this := &reqProcessor{
blockSize: device.BlockSize(),
cmdStrm: cmdStrm,
dev: device,
reqQueue: make(chan *request, 64),
respQueue: make(chan *response, 32),
reqPool: sync.Pool{New: newRequest},
respPool: sync.Pool{New: newResponse},
ctx: ctx,
ctxCancel: cancel,
}
go this.readStrmWorker()
go this.writeStrmWorker()
if workerCount < 1 {
workerCount = RecommendWorkerCount()
}
if workerCount -= 2; workerCount < 1 {
workerCount = 1
}
this.workersWg.Add(workerCount)
for i := 0; i < workerCount; i++ {
go this.reqIOWorker()
}
//Shutdown
<-ctx.Done() //readStrmWorker is checking this and will close this.reqQueue killing IO workers
this.workersWg.Wait() //All IO workers have shutdown
close(this.respQueue) //Kills writeStrmWorker
if err := this.dev.Close(); err != nil {
return fmt.Errorf("Could not close userspace device: %w", err)
}
return nil
}
func (proc *reqProcessor) readStrmWorker() {
defer close(proc.reqQueue)
bufStrm := bufio.NewReaderSize(proc.cmdStrm, 16*1024*1024)
var req *request
var err error
flushMu := new(sync.RWMutex)
for {
if err = proc.ctx.Err(); err != nil {
return
}
req = proc.reqPool.Get().(*request)
if err = req.Decode(bufStrm); err != nil {
if proc.ctx.Err() != nil {
return
}
log.Printf("ReqProcessor::readWorker(): Decode failed; Details: %s", err)
if err = proc.cmdStrm.Close(); err != nil {
log.Printf("ReqProcessor::readWorker(): Could not close command stream; Details: %s", err)
}
proc.ctxCancel()
return
}
switch req.reqType {
case nbdWrite, nbdTrim:
flushMu.RLock()
req.flushMu = flushMu
case nbdFlush:
req.flushMu = flushMu
flushMu = new(sync.RWMutex)
case nbdDisconnect:
proc.ctxCancel()
return
}
proc.reqQueue <- req
}
}
func (proc *reqProcessor) writeStrmWorker() {
var resp *response
var open bool
var err error
bufStrm := bufio.NewWriterSize(proc.cmdStrm, 16*1024*1024)
defer bufStrm.Flush()
for {
select {
case resp, open = <-proc.respQueue:
if !open {
return
}
if resp.Write(proc.cmdStrm); err != nil {
log.Printf("ReqProcessor::writeWorker(): Response reply failed; Details: %s", err)
proc.ctxCancel()
}
proc.respPool.Put(resp)
default:
if err = bufStrm.Flush(); err != nil {
log.Printf("ReqProcessor::writeWorker(): Flushing buffered replies failed; Details: %s", err)
proc.ctxCancel()
}
select {
case resp, open = <-proc.respQueue:
if !open {
return
}
if resp.Write(proc.cmdStrm); err != nil {
log.Printf("ReqProcessor::writeWorker(): Response reply failed; Details: %s", err)
proc.ctxCancel()
}
proc.respPool.Put(resp)
}
}
}
}
func (proc *reqProcessor) reqIOWorker() {
defer proc.workersWg.Done()
var req *request
var resp *response
for req = range proc.reqQueue {
resp = proc.execute(req, proc.respPool.Get().(*response))
select {
case proc.respQueue <- resp:
proc.reqPool.Put(req)
default:
proc.reqPool.Put(req)
proc.respQueue <- resp
}
}
}
func (proc *reqProcessor) execute(req *request, resp *response) *response {
if resp == nil {
resp = new(response)
}
var err error
var errCode nbdErr
switch req.reqType {
case nbdRead:
if req.pos%proc.blockSize != 0 || int64(req.count)%proc.blockSize != 0 {
err = fmt.Errorf("Assertion failed: Read request was not block aligned (pos=%d,len=%d)", req.pos, req.count)
errCode = ndbRespErrInvalid
break
}
_, err = proc.dev.ReadAt(resp.GetReadBuffer(req), req.pos)
if err != nil {
errCode = ndbRespErrIO
}
case nbdWrite:
if req.pos%proc.blockSize != 0 || int64(req.count)%proc.blockSize != 0 {
err = fmt.Errorf("Assertion failed: Write request was not block aligned (pos=%d,len=%d)", req.pos, req.count)
errCode = ndbRespErrInvalid
break
}
defer req.flushMu.RUnlock()
_, err = proc.dev.WriteAt(req.writeBuffer, req.pos)
if err != nil {
errCode = ndbRespErrIO
}
case nbdFlush:
req.flushMu.Lock()
req.flushMu.Unlock() //We can release right away, we just need to ensure previous writes finished
err = proc.dev.Flush()
if err != nil {
errCode = ndbRespErrIO
}
case nbdTrim:
defer req.flushMu.RUnlock()
err = proc.dev.Trim(req.pos, req.count)
if err != nil {
errCode = ndbRespErrIO
}
default:
errCode = ndbRespErrInvalid
err = fmt.Errorf("Assertion failed: Unknown NBD request type: %d", req.reqType)
}
if err != nil {
log.Printf("ReqProcessor::execute(): WARNING: Request(type = %d, pos = %d, count = %d) and will return %s. Failure was: %s", req.reqType, req.pos, req.count, errCode, err)
}
resp.Set(req, errCode)
return resp
}