Skip to content

Commit

Permalink
Merge branch 'test_bug2' of https://github.com/JackTan25/databend int…
Browse files Browse the repository at this point in the history
…o test_bug2
  • Loading branch information
JackTan25 committed Jul 9, 2023
2 parents 02a409a + 3eb642e commit 167fec4
Show file tree
Hide file tree
Showing 213 changed files with 4,994 additions and 1,963 deletions.
5 changes: 4 additions & 1 deletion .github/actions/artifact_failure/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,10 @@ runs:
docker logs "$line" > .databend/docker/"$line".log
done
tar -zcf target/failure-${{ inputs.name }}.tar.gz .databend
tar -zcf target/failure-${{ inputs.name }}.tar.gz \
nohup.out \
.databend/ \
./**/.databend \
- uses: actions/upload-artifact@v3
with:
name: ${{ inputs.name }}
Expand Down
1 change: 1 addition & 0 deletions .github/workflows/reuse.linux.yml
Original file line number Diff line number Diff line change
Expand Up @@ -214,6 +214,7 @@ jobs:
- "ee"
format:
- "parquet"
- "native"
handlers:
- "mysql,http,clickhouse"
steps:
Expand Down
3 changes: 3 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

3 changes: 3 additions & 0 deletions benchmark/clickbench/benchmark_cloud.sh
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,9 @@ until bendsql --query="SHOW WAREHOUSES LIKE '${CLOUD_WAREHOUSE}'" | grep -q "Run
sleep 10
done

echo "Checking session settings..."
bendsql --query="select * from system.settings where value != default;" -o table

echo "Running queries..."

# analyze table
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ Deletes the table.
**See also:**
- [CREATE TABLE](./10-ddl-create-table.md)
- [UNDROP TABLE](./21-ddl-undrop-table.md)
- [TRUNCATE TABLE](40-ddl-truncate-table.md)

## Syntax

Expand Down
55 changes: 42 additions & 13 deletions docs/doc/14-sql-commands/00-ddl/20-table/40-ddl-truncate-table.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,29 +2,58 @@
title: TRUNCATE TABLE
---

Empties the table completely.
Removes all data from a table while preserving the table's schema. It deletes all rows in the table, making it an empty table with the same columns and constraints. Please note that, it does not release the disk space allocated to the table. To release the disk space, include the PURGE option, which is used to release the disk space allocated to the table when the truncate operation is performed.

See also: [DROP TABLE](20-ddl-drop-table.md)

## Syntax

```sql
TRUNCATE TABLE [db.]name
TRUNCATE TABLE [db.]table_name [PURGE]
```

## Examples

```sql
CREATE TABLE test(a BIGINT UNSIGNED, b VARCHAR) Engine = Fuse;
root@localhost> CREATE TABLE test_truncate(a BIGINT UNSIGNED, b VARCHAR);
Processed in (0.027 sec)

INSERT INTO test(a,b) VALUES(888, 'stars');
root@localhost> INSERT INTO test_truncate(a,b) VALUES(1234, 'databend');
1 rows affected in (0.060 sec)

SELECT * FROM test;
+------+---------+
| a | b |
+------+---------+
| 888 | stars |
+------+---------+
root@localhost> SELECT * FROM test_truncate;

TRUNCATE TABLE test;
SELECT
*
FROM
test_truncate

SELECT * FROM test;
```
┌───────────────────┐
│ a │ b │
│ UInt64 │ String │
├────────┼──────────┤
1234 │ databend │
└───────────────────┘
1 row in 0.019 sec. Processed 1 rows, 1B (53.26 rows/s, 17.06 KiB/s)

root@localhost> TRUNCATE TABLE test_truncate;

TRUNCATE TABLE test_truncate

0 row in 0.047 sec. Processed 0 rows, 0B (0 rows/s, 0B/s)

root@localhost> SELECT * FROM test_truncate;

SELECT
*
FROM
test_truncate

0 row in 0.017 sec. Processed 0 rows, 0B (0 rows/s, 0B/s)

root@localhost> TRUNCATE TABLE test_truncate PURGE;

TRUNCATE TABLE test_truncate PURGE

0 row in 0.118 sec. Processed 0 rows, 0B (0 rows/s, 0B/s)
```
22 changes: 21 additions & 1 deletion src/common/arrow/src/parquet_write.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,12 @@ use std::io::Write;

use arrow::array::Array;
use arrow::chunk::Chunk;
use arrow::datatypes::DataType;
use arrow::datatypes::Schema;
use arrow::error::Result;
use arrow::io::parquet::write::to_parquet_schema;
use arrow::io::parquet::write::RowGroupIterator;
use parquet2::metadata::KeyValue;
use parquet2::metadata::ThriftFileMetaData;
use parquet2::write::FileWriter;
use parquet2::write::WriteOptions;
Expand All @@ -36,6 +38,18 @@ where
A: AsRef<dyn Array> + 'static + Send + Sync,
I: Iterator<Item = Result<Chunk<A>>>,
{
// add extension data type to parquet meta.
let mut key_values = Vec::new();
for field in &schema.fields {
if let DataType::Extension(ty, _, _) = &field.data_type {
let key_value = KeyValue {
key: field.name.clone(),
value: Some(ty.clone()),
};
key_values.push(key_value);
}
}

let parquet_schema = to_parquet_schema(&schema)?;

// Arrow2 should be honored
Expand All @@ -45,7 +59,13 @@ where
for group in row_groups {
file_writer.write(group?)?;
}
let file_size = file_writer.end(None)?;

let key_value_metadata = if !key_values.is_empty() {
Some(key_values)
} else {
None
};
let file_size = file_writer.end(key_value_metadata)?;
let (_meta_size, thrift_file_meta_data) = file_writer.into_inner_and_metadata();
Ok((file_size, thrift_file_meta_data))
}
1 change: 1 addition & 0 deletions src/common/base/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -32,5 +32,6 @@ pub mod mem_allocator;
pub mod rangemap;
pub mod runtime;

pub use runtime::dump_backtrace;
pub use runtime::match_join_handle;
pub use runtime::set_alloc_error_hook;
79 changes: 79 additions & 0 deletions src/common/base/src/runtime/backtrace.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
// Copyright 2021 Datafuse Labs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

use std::fmt::Write;

#[derive(Debug)]
struct AsyncTaskItem {
stack_frames: Vec<String>,
}

pub fn dump_backtrace(wait_for_running_tasks: bool) -> String {
let tree = async_backtrace::taskdump_tree(wait_for_running_tasks);

let mut tasks = vec![];
let mut polling_tasks = vec![];
let mut current_stack_frames = vec![];

let mut first = true;
let mut is_polling = false;
for line in tree.lines() {
if line.starts_with(|x: char| !x.is_ascii_whitespace()) {
if !first {
match is_polling {
true => polling_tasks.push(AsyncTaskItem {
stack_frames: std::mem::take(&mut current_stack_frames),
}),
false => tasks.push(AsyncTaskItem {
stack_frames: std::mem::take(&mut current_stack_frames),
}),
};

is_polling = false;
}

first = false;
}

if line.ends_with("[POLLING]") {
is_polling = true;
}

current_stack_frames.push(line.to_string());
}

match is_polling {
true => polling_tasks.push(AsyncTaskItem {
stack_frames: std::mem::take(&mut current_stack_frames),
}),
false => tasks.push(AsyncTaskItem {
stack_frames: std::mem::take(&mut current_stack_frames),
}),
};

let mut output = String::new();
for mut tasks in [tasks, polling_tasks] {
tasks.sort_by(|l, r| Ord::cmp(&l.stack_frames.len(), &r.stack_frames.len()));

for item in tasks.into_iter().rev() {
for frame in item.stack_frames {
writeln!(output, "{}", frame).unwrap();
}

writeln!(output).unwrap();
}
}

output
}
2 changes: 2 additions & 0 deletions src/common/base/src/runtime/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.

mod backtrace;
mod catch_unwind;
mod global_runtime;
#[allow(clippy::module_inception)]
Expand All @@ -20,6 +21,7 @@ mod runtime_tracker;
mod thread;
mod thread_pool;

pub use backtrace::dump_backtrace;
pub use catch_unwind::catch_unwind;
pub use catch_unwind::CatchUnwindFuture;
pub use global_runtime::GlobalIORuntime;
Expand Down
3 changes: 1 addition & 2 deletions src/common/exception/src/exception_code.rs
Original file line number Diff line number Diff line change
Expand Up @@ -184,8 +184,7 @@ build_exceptions! {

// Index related errors.
UnsupportedIndex(1601),
IndexAlreadyRefreshed(1602),
RefreshIndexError(1603),
RefreshIndexError(1602),
}

// Meta service errors [2001, 3000].
Expand Down
66 changes: 2 additions & 64 deletions src/common/http/src/debug/stack.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.

use std::fmt::Write;

use common_base::dump_backtrace;
use poem::web::Query;
use poem::IntoResponse;

Expand All @@ -22,68 +21,7 @@ pub struct DumpStackRequest {
wait_for_running_tasks: bool,
}

#[derive(Debug)]
struct AsyncTaskItem {
stack_frames: Vec<String>,
}

#[poem::handler]
pub async fn debug_dump_stack(req: Option<Query<DumpStackRequest>>) -> impl IntoResponse {
let tree =
async_backtrace::taskdump_tree(req.map(|x| x.wait_for_running_tasks).unwrap_or(false));

let mut tasks = vec![];
let mut polling_tasks = vec![];
let mut current_stack_frames = vec![];

let mut first = true;
let mut is_polling = false;
for line in tree.lines() {
if line.starts_with(|x: char| !x.is_ascii_whitespace()) {
if !first {
match is_polling {
true => polling_tasks.push(AsyncTaskItem {
stack_frames: std::mem::take(&mut current_stack_frames),
}),
false => tasks.push(AsyncTaskItem {
stack_frames: std::mem::take(&mut current_stack_frames),
}),
};

is_polling = false;
}

first = false;
}

if line.ends_with("[POLLING]") {
is_polling = true;
}

current_stack_frames.push(line.to_string());
}

match is_polling {
true => polling_tasks.push(AsyncTaskItem {
stack_frames: std::mem::take(&mut current_stack_frames),
}),
false => tasks.push(AsyncTaskItem {
stack_frames: std::mem::take(&mut current_stack_frames),
}),
};

let mut output = String::new();
for mut tasks in [tasks, polling_tasks] {
tasks.sort_by(|l, r| Ord::cmp(&l.stack_frames.len(), &r.stack_frames.len()));

for item in tasks.into_iter().rev() {
for frame in item.stack_frames {
writeln!(output, "{}", frame).unwrap();
}

writeln!(output).unwrap();
}
}

output
dump_backtrace(req.map(|x| x.wait_for_running_tasks).unwrap_or(false))
}
2 changes: 2 additions & 0 deletions src/common/metrics/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
pub mod counter;
mod dump;
mod recorder;
mod reset;

pub use dump::dump_metric_samples;
pub use dump::HistogramCount;
Expand All @@ -39,3 +40,4 @@ pub use recorder::label_increment_gauge_with_val_and_labels;
pub use recorder::try_handle;
pub use recorder::LABEL_KEY_CLUSTER;
pub use recorder::LABEL_KEY_TENANT;
pub use reset::reset_metrics;
Loading

0 comments on commit 167fec4

Please sign in to comment.