Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(query): support spill for new agg hashtable #14905

Merged
merged 11 commits into from
Mar 13, 2024
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 23 additions & 3 deletions src/query/expression/src/aggregate/aggregate_hashtable.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,9 @@
// A new AggregateHashtable which inspired by duckdb's https://duckdb.org/2022/03/07/aggregate-hashtable.html

use std::sync::atomic::Ordering;
use std::sync::Arc;

use bumpalo::Bump;
use databend_common_exception::Result;

use super::partitioned_payload::PartitionedPayload;
Expand Down Expand Up @@ -44,7 +46,7 @@ pub struct AggregateHashTable {
pub payload: PartitionedPayload,
// use for append rows directly during deserialize
pub direct_append: bool,
config: HashTableConfig,
pub config: HashTableConfig,
current_radix_bits: u64,
entries: Vec<Entry>,
count: usize,
Expand All @@ -59,23 +61,30 @@ impl AggregateHashTable {
group_types: Vec<DataType>,
aggrs: Vec<AggregateFunctionRef>,
config: HashTableConfig,
arena: Arc<Bump>,
) -> Self {
let capacity = Self::initial_capacity();
Self::new_with_capacity(group_types, aggrs, config, capacity)
Self::new_with_capacity(group_types, aggrs, config, capacity, arena)
}

pub fn new_with_capacity(
group_types: Vec<DataType>,
aggrs: Vec<AggregateFunctionRef>,
config: HashTableConfig,
capacity: usize,
arena: Arc<Bump>,
) -> Self {
Self {
entries: vec![0u64; capacity],
count: 0,
direct_append: false,
current_radix_bits: config.initial_radix_bits,
payload: PartitionedPayload::new(group_types, aggrs, 1 << config.initial_radix_bits),
payload: PartitionedPayload::new(
group_types,
aggrs,
1 << config.initial_radix_bits,
vec![arena],
),
capacity,
config,
}
Expand Down Expand Up @@ -446,6 +455,7 @@ impl AggregateHashTable {
self.payload.group_types.clone(),
self.payload.aggrs.clone(),
1,
vec![Arc::new(Bump::new())],
);
let payload = std::mem::replace(&mut self.payload, temp_payload);
let mut state = PayloadFlushState::default();
Expand Down Expand Up @@ -530,6 +540,16 @@ impl AggregateHashTable {
pub fn reset_count(&mut self) {
self.count = 0;
}

pub fn allocated_bytes(&self) -> usize {
self.payload.memory_size()
+ self
.payload
.arenas
.iter()
.map(|arena| arena.allocated_bytes())
.sum::<usize>()
}
}

/// Upper 16 bits are salt
Expand Down
21 changes: 15 additions & 6 deletions src/query/expression/src/aggregate/partitioned_payload.rs
Original file line number Diff line number Diff line change
Expand Up @@ -54,14 +54,13 @@ impl PartitionedPayload {
group_types: Vec<DataType>,
aggrs: Vec<AggregateFunctionRef>,
partition_count: u64,
arenas: Vec<Arc<Bump>>,
) -> Self {
let radix_bits = partition_count.trailing_zeros() as u64;
debug_assert_eq!(1 << radix_bits, partition_count);

let arena = Arc::new(Bump::new());

let payloads = (0..partition_count)
.map(|_| Payload::new(arena.clone(), group_types.clone(), aggrs.clone()))
.map(|_| Payload::new(arenas[0].clone(), group_types.clone(), aggrs.clone()))
.collect_vec();

let group_sizes = payloads[0].group_sizes.clone();
Expand All @@ -85,7 +84,7 @@ impl PartitionedPayload {
state_layout,
partition_count,

arenas: vec![arena],
arenas,
mask_v: mask(radix_bits),
shift_v: shift(radix_bits),
}
Expand Down Expand Up @@ -145,13 +144,14 @@ impl PartitionedPayload {
self.group_types.clone(),
self.aggrs.clone(),
new_partition_count as u64,
self.arenas.clone(),
);

new_partition_payload.combine(self, state);
new_partition_payload
}

pub fn combine(&mut self, mut other: PartitionedPayload, state: &mut PayloadFlushState) {
pub fn combine(&mut self, other: PartitionedPayload, state: &mut PayloadFlushState) {
if other.partition_count == self.partition_count {
for (l, r) in self.payloads.iter_mut().zip(other.payloads.into_iter()) {
l.combine(r);
Expand All @@ -163,7 +163,6 @@ impl PartitionedPayload {
self.combine_single(payload, state)
}
}
self.arenas.append(&mut other.arenas);
}

pub fn combine_single(&mut self, mut other: Payload, state: &mut PayloadFlushState) {
Expand Down Expand Up @@ -250,6 +249,16 @@ impl PartitionedPayload {
pub fn memory_size(&self) -> usize {
self.payloads.iter().map(|x| x.memory_size()).sum()
}

pub fn include_arena(&self, other: &Arc<Bump>) -> bool {
for arena in self.arenas.iter() {
if Arc::ptr_eq(arena, other) {
return true;
}
}

false
}
}

#[inline]
Expand Down
21 changes: 18 additions & 3 deletions src/query/expression/src/aggregate/payload.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ use std::sync::Arc;

use bumpalo::Bump;
use databend_common_base::runtime::drop_guard;
use itertools::Itertools;
use strength_reduce::StrengthReducedU64;

use super::payload_row::rowformat_size;
Expand All @@ -27,6 +28,8 @@ use crate::store;
use crate::types::DataType;
use crate::AggregateFunctionRef;
use crate::Column;
use crate::ColumnBuilder;
use crate::DataBlock;
use crate::PayloadFlushState;
use crate::SelectVector;
use crate::StateAddr;
Expand All @@ -41,7 +44,6 @@ use crate::MAX_PAGE_SIZE;
// [STATE_ADDRS] is the state_addrs of the aggregate functions, 8 bytes each
pub struct Payload {
pub arena: Arc<Bump>,
pub arenas: Vec<Arc<Bump>>,
// if true, the states are moved out of the payload into other payload, and will not be dropped
pub state_move_out: bool,
pub group_types: Vec<DataType>,
Expand Down Expand Up @@ -124,8 +126,7 @@ impl Payload {
let row_per_page = (u16::MAX as usize).min(MAX_PAGE_SIZE / tuple_size).max(1);

Self {
arena: arena.clone(),
arenas: vec![arena],
arena,
state_move_out: false,
pages: vec![],
current_write_page: 0,
Expand Down Expand Up @@ -376,6 +377,20 @@ impl Payload {
state.flush_page_row = end;
true
}

pub fn empty_block(&self) -> DataBlock {
let columns = self
.aggrs
.iter()
.map(|f| ColumnBuilder::with_capacity(&f.return_type().unwrap(), 0).build())
.chain(
self.group_types
.iter()
.map(|t| ColumnBuilder::with_capacity(t, 0).build()),
)
.collect_vec();
DataBlock::new_from_columns(columns)
}
}

impl Drop for Payload {
Expand Down
68 changes: 68 additions & 0 deletions src/query/expression/src/aggregate/payload_flush.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,10 @@ use crate::types::NumberType;
use crate::types::TimestampType;
use crate::types::ValueType;
use crate::with_number_mapped_type;
use crate::AggregateFunctionRef;
use crate::Column;
use crate::ColumnBuilder;
use crate::DataBlock;
use crate::Scalar;
use crate::StateAddr;
use crate::BATCH_SIZE;
Expand Down Expand Up @@ -108,6 +110,67 @@ impl PartitionedPayload {
}

impl Payload {
pub fn aggregate_flush_all(&self) -> DataBlock {
let mut state = PayloadFlushState::default();
let mut blocks = vec![];

while self.flush(&mut state) {
let row_count = state.row_count;

let mut state_builders: Vec<BinaryColumnBuilder> = self
.aggrs
.iter()
.map(|agg| state_serializer(agg, row_count))
.collect();

for place in state.state_places.as_slice()[0..row_count].iter() {
for (idx, (addr_offset, aggr)) in self
.state_addr_offsets
.iter()
.zip(self.aggrs.iter())
.enumerate()
{
let arg_place = place.next(*addr_offset);
aggr.serialize(arg_place, &mut state_builders[idx].data)
.unwrap();
state_builders[idx].commit_row();
}
}

let mut cols = Vec::with_capacity(self.aggrs.len() + self.group_types.len());
for builder in state_builders.into_iter() {
let col = Column::Binary(builder.build());
cols.push(col);
}

cols.extend_from_slice(&state.take_group_columns());

blocks.push(DataBlock::new_from_columns(cols));
}

if blocks.is_empty() {
return self.empty_block();
}

DataBlock::concat(&blocks).unwrap()
Freejww marked this conversation as resolved.
Show resolved Hide resolved
}

pub fn group_by_flush_all(&self) -> DataBlock {
let mut state = PayloadFlushState::default();
let mut blocks = vec![];

while self.flush(&mut state) {
let cols = state.take_group_columns();
blocks.push(DataBlock::new_from_columns(cols));
}

if blocks.is_empty() {
return self.empty_block();
}

DataBlock::concat(&blocks).unwrap()
}

pub fn flush(&self, state: &mut PayloadFlushState) -> bool {
if state.flush_page >= self.pages.len() {
return false;
Expand Down Expand Up @@ -286,3 +349,8 @@ impl Payload {
builder.build()
}
}

fn state_serializer(func: &AggregateFunctionRef, row: usize) -> BinaryColumnBuilder {
let size = func.serialize_size_per_row().unwrap_or(4);
BinaryColumnBuilder::with_capacity(row, row * size)
}
19 changes: 15 additions & 4 deletions src/query/functions/tests/it/aggregates/agg_hashtable.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.

use std::sync::Arc;

use bumpalo::Bump;
use databend_common_expression::block_debug::assert_block_value_sort_eq;
use databend_common_expression::types::ArgType;
use databend_common_expression::types::BooleanType;
Expand Down Expand Up @@ -87,16 +90,24 @@ fn test_agg_hashtable() {
let params: Vec<Vec<Column>> = aggrs.iter().map(|_| vec![columns[1].clone()]).collect();

let config = HashTableConfig::default();
let mut hashtable =
AggregateHashTable::new(group_types.clone(), aggrs.clone(), config.clone());
let mut hashtable = AggregateHashTable::new(
group_types.clone(),
aggrs.clone(),
config.clone(),
Arc::new(Bump::new()),
);

let mut state = ProbeState::default();
let _ = hashtable
.add_groups(&mut state, &group_columns, &params, &[], n)
.unwrap();

let mut hashtable2 =
AggregateHashTable::new(group_types.clone(), aggrs.clone(), config.clone());
let mut hashtable2 = AggregateHashTable::new(
group_types.clone(),
aggrs.clone(),
config.clone(),
Arc::new(Bump::new()),
);

let mut state2 = ProbeState::default();
let _ = hashtable2
Expand Down
Loading
Loading