8
8
9
9
import Dispatch
10
10
import Foundation
11
- import NIOConcurrencyHelpers
12
11
import NIO
13
-
12
+ import NIOConcurrencyHelpers
14
13
import TSCUtility
15
14
16
-
17
15
/// Run the given computations on a given array in batches, exercising
18
16
/// a specified amount of parallelism.
19
17
///
20
18
/// - Discussion:
21
19
/// For some blocking operations (such as file system accesses) executing
22
20
/// them on the NIO loops is very expensive since it blocks the event
23
21
/// processing machinery. Here we use extra threads for such operations.
24
- public class LLBBatchingFutureOperationQueue {
22
+ public struct LLBBatchingFutureOperationQueue {
25
23
26
24
/// Threads capable of running futures.
27
25
public let group : LLBFuturesDispatchGroup
@@ -35,28 +33,33 @@ public class LLBBatchingFutureOperationQueue {
35
33
36
34
/// Maximum number of operations executed concurrently.
37
35
public var maxOpCount : Int {
38
- get { lock . withLock { maxOpCount_ } }
39
- set { scheduleMoreTasks { maxOpCount_ = newValue } }
36
+ get { concurrencyLimiter . maximumConcurrency }
37
+ set { concurrencyLimiter . maximumConcurrency = Self . bridged ( maxOpCount : newValue ) }
40
38
}
41
39
42
40
/// Return the number of operations currently queued.
43
- public var opCount : Int { lock. withLock { opCount_ } }
41
+ public var opCount : Int { concurrencyLimiter. sharesInUse }
42
+
43
+ /// Name to be used for dispatch queue
44
+ private let name : String
44
45
45
- /// Queue of outstanding operations
46
- private let dispatchQueue : DispatchQueue
46
+ /// QoS passed to DispatchQueue
47
+ private let qos : DispatchQoS
47
48
48
49
/// Lock protecting state.
49
50
private let lock = NIOConcurrencyHelpers . Lock ( )
50
51
51
- private var maxOpCount_ : Int
52
-
53
- private var opCount_ : Int
52
+ /// Limits number of concurrent operations being executed
53
+ private let concurrencyLimiter : ConcurrencyLimiter
54
54
55
55
/// The queue of operations to run.
56
56
private var workQueue = NIO . CircularBuffer < DispatchWorkItem > ( )
57
57
58
58
@available ( * , deprecated, message: " 'qualityOfService' is deprecated: Use 'dispatchQoS' " )
59
- public convenience init ( name: String , group: LLBFuturesDispatchGroup , maxConcurrentOperationCount maxOpCount: Int , qualityOfService: QualityOfService ) {
59
+ public init (
60
+ name: String , group: LLBFuturesDispatchGroup , maxConcurrentOperationCount maxOpCount: Int ,
61
+ qualityOfService: QualityOfService
62
+ ) {
60
63
let dispatchQoS : DispatchQoS
61
64
62
65
switch qualityOfService {
@@ -81,94 +84,72 @@ public class LLBBatchingFutureOperationQueue {
81
84
/// - group: Threads capable of running futures.
82
85
/// - maxConcurrentOperationCount:
83
86
/// Operations to execute in parallel.
84
- public convenience init ( name: String , group: LLBFuturesDispatchGroup , maxConcurrentOperationCount maxOpCount: Int ) {
87
+ public init ( name: String , group: LLBFuturesDispatchGroup , maxConcurrentOperationCount maxOpCount: Int ) {
85
88
self . init ( name: name, group: group, maxConcurrentOperationCount: maxOpCount, dispatchQoS: . default)
86
89
}
87
90
88
- public init ( name: String , group: LLBFuturesDispatchGroup , maxConcurrentOperationCount maxOpCnt: Int , dispatchQoS: DispatchQoS ) {
91
+ public init (
92
+ name: String , group: LLBFuturesDispatchGroup , maxConcurrentOperationCount maxOpCnt: Int ,
93
+ dispatchQoS: DispatchQoS
94
+ ) {
89
95
self . group = group
90
- self . dispatchQueue = DispatchQueue ( label: name, qos: dispatchQoS, attributes: . concurrent)
91
- self . opCount_ = 0
92
- self . maxOpCount_ = maxOpCnt
96
+ self . name = name
97
+ self . qos = dispatchQoS
98
+
99
+ self . concurrencyLimiter = ConcurrencyLimiter ( maximumConcurrency: Self . bridged ( maxOpCount: maxOpCnt) )
93
100
}
94
101
95
102
public func execute< T> ( _ body: @escaping ( ) throws -> T ) -> LLBFuture < T > {
96
- let promise = group. any ( ) . makePromise ( of: T . self)
103
+ return self . concurrencyLimiter. withReplenishableLimit ( eventLoop: group. any ( ) ) { eventLoop in
104
+ let promise = eventLoop. makePromise ( of: T . self)
97
105
98
- let workItem = DispatchWorkItem {
99
- promise. fulfill ( body)
100
- self . scheduleMoreTasks {
101
- self . opCount_ -= 1
106
+ DispatchQueue ( label: self . name, qos: self . qos) . async {
107
+ promise. fulfill ( body)
102
108
}
103
- }
104
109
105
- self . scheduleMoreTasks {
106
- workQueue. append ( workItem)
110
+ return promise. futureResult
107
111
}
108
-
109
- return promise. futureResult
110
112
}
111
113
112
114
public func execute< T> ( _ body: @escaping ( ) -> LLBFuture < T > ) -> LLBFuture < T > {
113
- let promise = group. any ( ) . makePromise ( of: T . self)
115
+ return self . concurrencyLimiter. withReplenishableLimit ( eventLoop: group. any ( ) ) { eventLoop in
116
+ let promise = eventLoop. makePromise ( of: T . self)
114
117
115
- let workItem = DispatchWorkItem {
116
- let f = body ( )
117
- f. cascade ( to: promise)
118
-
119
- _ = try ? f. wait ( )
120
-
121
- self . scheduleMoreTasks {
122
- self . opCount_ -= 1
118
+ DispatchQueue ( label: self . name, qos: self . qos) . async {
119
+ body ( ) . cascade ( to: promise)
123
120
}
124
- }
125
121
126
- self . scheduleMoreTasks {
127
- workQueue. append ( workItem)
122
+ return promise. futureResult
128
123
}
129
-
130
- return promise. futureResult
131
124
}
132
125
133
126
/// Order-preserving parallel execution. Wait for everything to complete.
134
127
@inlinable
135
- public func execute< A, T> ( _ args: [ A ] , minStride: Int = 1 , _ body: @escaping ( ArraySlice < A > ) throws -> [ T ] ) -> LLBFuture < [ T ] > {
128
+ public func execute< A, T> ( _ args: [ A ] , minStride: Int = 1 , _ body: @escaping ( ArraySlice < A > ) throws -> [ T ] )
129
+ -> LLBFuture < [ T ] >
130
+ {
136
131
let futures : [ LLBFuture < [ T ] > ] = executeNoWait ( args, minStride: minStride, body)
137
132
let loop = futures. first? . eventLoop ?? group. next ( )
138
- return LLBFuture < [ T ] > . whenAllSucceed ( futures, on: loop) . map { $0. flatMap { $0 } }
133
+ return LLBFuture < [ T ] > . whenAllSucceed ( futures, on: loop) . map { $0. flatMap { $0 } }
139
134
}
140
135
141
136
/// Order-preserving parallel execution.
142
137
/// Do not wait for all executions to complete, returning individual futures.
143
138
@inlinable
144
- public func executeNoWait< A, T> ( _ args: [ A ] , minStride: Int = 1 , maxStride: Int = Int . max, _ body: @escaping ( ArraySlice < A > ) throws -> [ T ] ) -> [ LLBFuture < [ T ] > ] {
145
- let batches : [ ArraySlice < A > ] = args. tsc_sliceBy ( maxStride: max ( minStride, min ( maxStride, args. count / maxOpCount) ) )
146
- return batches. map { arg in execute { try body ( arg) } }
139
+ public func executeNoWait< A, T> (
140
+ _ args: [ A ] , minStride: Int = 1 , maxStride: Int = Int . max, _ body: @escaping ( ArraySlice < A > ) throws -> [ T ]
141
+ ) -> [ LLBFuture < [ T ] > ] {
142
+ let batches : [ ArraySlice < A > ] = args. tsc_sliceBy (
143
+ maxStride: max ( minStride, min ( maxStride, args. count / maxOpCount) ) )
144
+ return batches. map { arg in execute { try body ( arg) } }
147
145
}
148
146
149
- private func scheduleMoreTasks( performUnderLock: ( ) -> Void ) {
150
- let toExecute : [ DispatchWorkItem ] = lock. withLock {
151
- performUnderLock ( )
152
-
153
- var scheduleItems : [ DispatchWorkItem ] = [ ]
154
-
155
- while opCount_ < maxOpCount_ {
156
-
157
- // Schedule a new operation, if available.
158
- guard let op = workQueue. popFirst ( ) else {
159
- break
160
- }
161
-
162
- self . opCount_ += 1
163
- scheduleItems. append ( op)
164
- }
165
-
166
- return scheduleItems
167
- }
168
-
169
- for workItem in toExecute {
170
- dispatchQueue. async ( execute: workItem)
147
+ private static func bridged( maxOpCount: Int ) -> Int {
148
+ switch maxOpCount {
149
+ case OperationQueue . defaultMaxConcurrentOperationCount:
150
+ return System . coreCount
151
+ default :
152
+ return maxOpCount
171
153
}
172
154
}
173
-
174
155
}
0 commit comments