Skip to content

Commit 429e085

Browse files
committed
Bug 1906078 - Avoid pathological VecDeque behavior on parallel traversal. r=dshin
Using split_off to split the front chunk from the queue seems like it'd be efficient, but it's not: rust-lang/rust#127281 Instead, pass the range of nodes to distribute, and use VecDeque::from_iter in the callee. Then, just truncate() the queue to the remaining local work. Differential Revision: https://phabricator.services.mozilla.com/D215705
1 parent 24ce94a commit 429e085

File tree

1 file changed

+10
-15
lines changed

1 file changed

+10
-15
lines changed

servo/components/style/parallel.rs

+10-15
Original file line numberDiff line numberDiff line change
@@ -95,7 +95,7 @@ fn distribute_one_chunk<'a, 'scope, E, D>(
9595

9696
/// Distributes all items into the thread pool, in `work_unit_max` chunks.
9797
fn distribute_work<'a, 'scope, E, D>(
98-
mut items: VecDeque<SendNode<E::ConcreteNode>>,
98+
mut items: impl Iterator<Item = SendNode<E::ConcreteNode>>,
9999
traversal_root: OpaqueNode,
100100
work_unit_max: usize,
101101
traversal_data: PerLevelTraversalData,
@@ -106,28 +106,22 @@ fn distribute_work<'a, 'scope, E, D>(
106106
E: TElement + 'scope,
107107
D: DomTraversal<E>,
108108
{
109-
while items.len() > work_unit_max {
110-
let rest = items.split_off(work_unit_max);
109+
use std::iter::FromIterator;
110+
loop {
111+
let chunk = VecDeque::from_iter(items.by_ref().take(work_unit_max));
112+
if chunk.is_empty() {
113+
return;
114+
}
111115
distribute_one_chunk(
112-
items,
116+
chunk,
113117
traversal_root,
114118
work_unit_max,
115119
traversal_data,
116120
scope,
117121
traversal,
118122
tls,
119123
);
120-
items = rest;
121124
}
122-
distribute_one_chunk(
123-
items,
124-
traversal_root,
125-
work_unit_max,
126-
traversal_data,
127-
scope,
128-
traversal,
129-
tls,
130-
);
131125
}
132126

133127
/// Processes `discovered` items, possibly spawning work in other threads as needed.
@@ -175,14 +169,15 @@ pub fn style_trees<'a, 'scope, E, D>(
175169
let mut traversal_data_copy = traversal_data.clone();
176170
traversal_data_copy.current_dom_depth += 1;
177171
distribute_work(
178-
discovered.split_off(kept_work),
172+
discovered.range(kept_work..).cloned(),
179173
traversal_root,
180174
work_unit_max,
181175
traversal_data_copy,
182176
scope.unwrap(),
183177
traversal,
184178
tls,
185179
);
180+
discovered.truncate(kept_work);
186181
}
187182

188183
if nodes_remaining_at_current_depth == 0 {

0 commit comments

Comments
 (0)