diff --git a/be/src/pipeline/exec/schema_scan_operator.cpp b/be/src/pipeline/exec/schema_scan_operator.cpp index ddc2821cac14a1..8faee3ef581beb 100644 --- a/be/src/pipeline/exec/schema_scan_operator.cpp +++ b/be/src/pipeline/exec/schema_scan_operator.cpp @@ -70,7 +70,9 @@ SchemaScanOperatorX::SchemaScanOperatorX(ObjectPool* pool, const TPlanNode& tnod _common_scanner_param(new SchemaScannerCommonParam()), _tuple_id(tnode.schema_scan_node.tuple_id), _tuple_idx(0), - _slot_num(0) {} + _slot_num(0) { + Base::_is_serial_operator = tnode.__isset.is_serial_operator && tnode.is_serial_operator; +} Status SchemaScanOperatorX::init(const TPlanNode& tnode, RuntimeState* state) { RETURN_IF_ERROR(Base::init(tnode, state)); diff --git a/regression-test/suites/query_p0/system/test_query_sys.groovy b/regression-test/suites/query_p0/system/test_query_sys.groovy index 7b6ca1027b479c..b192a87ca30b00 100644 --- a/regression-test/suites/query_p0/system/test_query_sys.groovy +++ b/regression-test/suites/query_p0/system/test_query_sys.groovy @@ -54,4 +54,12 @@ suite("test_query_sys", "query,p0") { sql "select * from http_stream('format'='csv');" exception "No Alive backends" } + + // `workload_group_resource_usage` will be refresh 30s after BE startup so sleep 30s to get a stable result + sleep(30000) + sql """set parallel_pipeline_task_num=8""" + def rows1 = sql """ select count(*) from information_schema.workload_group_resource_usage; """ + sql """set parallel_pipeline_task_num=1""" + def rows2 = sql """ select count(*) from information_schema.workload_group_resource_usage; """ + assertEquals(rows1, rows2) }