diff --git a/analytic_engine/src/memtable/key.rs b/analytic_engine/src/memtable/key.rs index 43f8c00838..b352976359 100644 --- a/analytic_engine/src/memtable/key.rs +++ b/analytic_engine/src/memtable/key.rs @@ -121,9 +121,8 @@ impl<'a> Encoder for ComparableInternalKey<'a> { fn encode(&self, buf: &mut B, value: &Row) -> Result<()> { let encoder = MemComparable; - for idx in 0..self.schema.num_key_columns() { - // Encode each column in primary key - encoder.encode(buf, &value[idx]).context(EncodeKeyDatum)?; + for idx in self.schema.primary_key_indexes() { + encoder.encode(buf, &value[*idx]).context(EncodeKeyDatum)?; } SequenceCodec.encode(buf, &self.sequence)?; @@ -133,11 +132,9 @@ impl<'a> Encoder for ComparableInternalKey<'a> { fn estimate_encoded_size(&self, value: &Row) -> usize { let encoder = MemComparable; let mut total_len = 0; - for idx in 0..self.schema.num_key_columns() { - // Size of each column in primary key - total_len += encoder.estimate_encoded_size(&value[idx]); + for idx in self.schema.primary_key_indexes() { + total_len += encoder.estimate_encoded_size(&value[*idx]); } - // The size of sequence total_len += KEY_SEQUENCE_BYTES_LEN; total_len diff --git a/analytic_engine/src/meta/meta_update.rs b/analytic_engine/src/meta/meta_update.rs index 601810e3ea..6ea0749591 100644 --- a/analytic_engine/src/meta/meta_update.rs +++ b/analytic_engine/src/meta/meta_update.rs @@ -226,7 +226,6 @@ impl TryFrom for AddTableMeta { fn try_from(src: meta_pb::AddTableMeta) -> Result { let table_schema = src.schema.context(EmptyTableSchema)?; let opts = src.options.context(EmptyTableOptions)?; - Ok(Self { space_id: src.space_id, table_id: TableId::from(src.table_id), diff --git a/analytic_engine/src/row_iter/dedup.rs b/analytic_engine/src/row_iter/dedup.rs index cd58b0157f..ecc9657326 100644 --- a/analytic_engine/src/row_iter/dedup.rs +++ b/analytic_engine/src/row_iter/dedup.rs @@ -84,8 +84,8 @@ impl DedupIterator { } // Dedup batch. - for col_idx in 0..self.schema.num_key_columns() { - let column = record_batch.column(col_idx); + for col_idx in self.schema.primary_key_idx() { + let column = record_batch.column(*col_idx); column.dedup(&mut self.selected_rows); } diff --git a/analytic_engine/src/table/mod.rs b/analytic_engine/src/table/mod.rs index 99597864ba..57ec8565b2 100644 --- a/analytic_engine/src/table/mod.rs +++ b/analytic_engine/src/table/mod.rs @@ -139,7 +139,7 @@ impl Table for TableImpl { async fn get(&self, request: GetRequest) -> Result> { let schema = request.projected_schema.to_record_schema_with_key(); - let primary_key_columns = schema.key_columns(); + let primary_key_columns = &schema.key_columns()[..]; ensure!( primary_key_columns.len() == request.primary_key.len(), GetInvalidPrimaryKey { @@ -199,7 +199,11 @@ impl Table for TableImpl { result_columns.push(col.datum(row_idx)); } - if request.primary_key == result_columns[..schema.num_key_columns()] { + let mut result_columns_k = vec![]; + for col_idx in schema.primary_key_idx() { + result_columns_k.push(result_columns[*col_idx].clone()); + } + if request.primary_key == result_columns_k { return Ok(Some(Row::from_datums(result_columns))); } result_columns.clear(); diff --git a/common_types/src/schema.rs b/common_types/src/schema.rs index e808b476f5..6261f650d0 100644 --- a/common_types/src/schema.rs +++ b/common_types/src/schema.rs @@ -457,7 +457,7 @@ impl TryFrom for RecordSchema { #[derive(Debug, Clone, PartialEq)] pub struct RecordSchemaWithKey { record_schema: RecordSchema, - num_key_columns: usize, + primary_key_indexes: Vec, } impl RecordSchemaWithKey { @@ -466,7 +466,11 @@ impl RecordSchemaWithKey { } pub fn compare_row(&self, lhs: &LR, rhs: &RR) -> Ordering { - compare_row(self.num_key_columns, lhs, rhs) + compare_row(&self.primary_key_indexes, lhs, rhs) + } + + pub fn primary_key_idx(&self) -> &[usize] { + &self.primary_key_indexes } pub fn index_of(&self, name: &str) -> Option { @@ -478,8 +482,18 @@ impl RecordSchemaWithKey { } /// Returns an immutable reference of the key column vector. - pub fn key_columns(&self) -> &[ColumnSchema] { - &self.columns()[..self.num_key_columns] + pub fn key_columns(&self) -> Vec { + self.columns() + .iter() + .enumerate() + .filter_map(|(idx, col)| { + if self.primary_key_indexes.contains(&idx) { + Some(col.clone()) + } else { + None + } + }) + .collect::>() } pub(crate) fn into_record_schema(self) -> RecordSchema { @@ -489,11 +503,6 @@ impl RecordSchemaWithKey { pub fn to_arrow_schema_ref(&self) -> ArrowSchemaRef { self.record_schema.to_arrow_schema_ref() } - - #[inline] - pub fn num_key_columns(&self) -> usize { - self.num_key_columns - } } /// Compare the two rows. @@ -501,15 +510,15 @@ impl RecordSchemaWithKey { /// REQUIRES: the two rows must have the same number of key columns as /// `num_key_columns`. pub fn compare_row( - num_key_columns: usize, + primary_key_idx: &[usize], lhs: &LR, rhs: &RR, ) -> Ordering { - for column_idx in 0..num_key_columns { + for column_idx in primary_key_idx { // caller should ensure the row view is valid. // TODO(xikai): unwrap may not a good way to handle the error. - let left_datum = lhs.column_by_idx(column_idx); - let right_datum = rhs.column_by_idx(column_idx); + let left_datum = lhs.column_by_idx(*column_idx); + let right_datum = rhs.column_by_idx(*column_idx); // the two datums must be of the same kind type. match left_datum.partial_cmp(&right_datum).unwrap() { Ordering::Equal => continue, @@ -534,8 +543,8 @@ pub struct Schema { /// The underlying arrow schema, data type of fields must be supported by /// datum arrow_schema: ArrowSchemaRef, - /// The number of primary key columns - num_key_columns: usize, + /// The primary key index list in columns + primary_key_indexes: Vec, /// Index of timestamp key column // TODO(yingwen): Maybe we can remove the restriction that timestamp column must exists in // schema (mainly for projected schema) @@ -556,12 +565,12 @@ impl fmt::Debug for Schema { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Schema") // arrow_schema is ignored. - .field("num_key_columns", &self.num_key_columns) .field("timestamp_index", &self.timestamp_index) .field("tsid_index", &self.tsid_index) .field("enable_tsid_primary_key", &self.enable_tsid_primary_key) .field("column_schemas", &self.column_schemas) .field("version", &self.version) + .field("primary_key_indexes", &self.primary_key_indexes) .finish() } } @@ -589,13 +598,36 @@ impl Schema { } /// Returns an immutable reference of the key column vector. - pub fn key_columns(&self) -> &[ColumnSchema] { - &self.columns()[..self.num_key_columns] + pub fn key_columns(&self) -> Vec { + let columns = self.column_schemas.columns(); + let mut result = vec![]; + for (idx, col) in columns.iter().enumerate() { + if idx == self.timestamp_index { + result.push(col.clone()); + continue; + } + + if self.primary_key_indexes.contains(&idx) { + result.push(col.clone()); + } + } + result } /// Returns an immutable reference of the normal column vector. - pub fn normal_columns(&self) -> &[ColumnSchema] { - &self.columns()[self.num_key_columns..] + pub fn normal_columns(&self) -> Vec { + let columns = self.column_schemas.columns(); + let mut result = vec![]; + for (idx, col) in columns.iter().enumerate() { + if idx == self.timestamp_index { + continue; + } + + if !self.primary_key_indexes.contains(&idx) { + result.push(col.clone()); + } + } + result } /// Returns index of the tsid column. @@ -617,6 +649,11 @@ impl Schema { self.column_schemas.num_columns() } + /// Returns true if idx is primary key idnex + pub fn is_primary_key_index(&self, idx: &usize) -> bool { + self.primary_key_indexes.contains(idx) + } + /// Returns an immutable reference of a specific [ColumnSchema] selected by /// name. pub fn column_with_name(&self, name: &str) -> Option<&ColumnSchema> { @@ -652,10 +689,13 @@ impl Schema { self.column_schemas.index_of(name) } - /// Returns the number of columns in primary key - #[inline] - pub fn num_key_columns(&self) -> usize { - self.num_key_columns + pub fn primary_key_indexes(&self) -> &[usize] { + &self.primary_key_indexes + } + + /// Return the number of columns index in primary key + pub fn num_primary_key_columns(&self) -> usize { + self.primary_key_indexes.len() } /// Get the name of the timestamp column @@ -700,7 +740,7 @@ impl Schema { /// /// REQUIRES: the two rows must have the key columns defined by the schema. pub fn compare_row(&self, lhs: &R, rhs: &R) -> Ordering { - compare_row(self.num_key_columns, lhs, rhs) + compare_row(&self.primary_key_indexes, lhs, rhs) } /// Returns `Ok` if rows with `writer_schema` can write to table with the @@ -774,7 +814,7 @@ impl Schema { pub fn to_record_schema_with_key(&self) -> RecordSchemaWithKey { RecordSchemaWithKey { record_schema: self.to_record_schema(), - num_key_columns: self.num_key_columns, + primary_key_indexes: self.primary_key_indexes.clone(), } } @@ -783,18 +823,14 @@ impl Schema { &self, projection: &[usize], ) -> RecordSchemaWithKey { - let mut columns = Vec::with_capacity(self.num_key_columns); - // Keep all key columns in order. - for key_column in self.key_columns() { - columns.push(key_column.clone()); - } - - // Collect normal columns needed by the projection. - for p in projection { - if *p >= self.num_key_columns { - // A normal column - let normal_column = &self.columns()[*p]; - columns.push(normal_column.clone()); + let mut primary_key_indexes = Vec::with_capacity(self.num_primary_key_columns()); + let mut columns = Vec::with_capacity(self.num_primary_key_columns()); + for (idx, col) in self.columns().iter().enumerate() { + if self.is_primary_key_index(&idx) { + primary_key_indexes.push(columns.len()); + columns.push(col.clone()); + } else if projection.contains(&idx) { + columns.push(col.clone()); } } @@ -803,7 +839,7 @@ impl Schema { RecordSchemaWithKey { record_schema, - num_key_columns: self.num_key_columns, + primary_key_indexes, } } @@ -849,12 +885,12 @@ impl TryFrom for Schema { let mut builder = Builder::with_capacity(schema.columns.len()) .version(schema.version) .enable_tsid_primary_key(schema.enable_tsid_primary_key); + let primary_key_indexes = schema.primary_key_indexes; for (i, column_schema_pb) in schema.columns.into_iter().enumerate() { let column = ColumnSchema::try_from(column_schema_pb).context(ColumnSchemaDeserializeFailed)?; - - if i < schema.num_key_columns as usize { + if primary_key_indexes.contains(&(i as u64)) { builder = builder.add_key_column(column)?; } else { builder = builder.add_normal_column(column)?; @@ -873,13 +909,19 @@ impl From<&Schema> for common_pb::TableSchema { .map(|v| common_pb::ColumnSchema::from(v.clone())) .collect(); - common_pb::TableSchema { - num_key_columns: schema.num_key_columns as u32, + let table_schema = common_pb::TableSchema { timestamp_index: schema.timestamp_index as u32, enable_tsid_primary_key: schema.enable_tsid_primary_key, version: schema.version, columns, - } + primary_key_indexes: schema + .primary_key_indexes + .iter() + .map(|i| *i as u64) + .collect(), + }; + + table_schema } } @@ -887,8 +929,8 @@ impl From<&Schema> for common_pb::TableSchema { #[must_use] pub struct Builder { columns: Vec, - /// The number of primary key columns - num_key_columns: usize, + /// The indexes of primary key columns + primary_key_indexes: Vec, /// Timestamp column index timestamp_index: Option, column_names: HashSet, @@ -918,7 +960,7 @@ impl Builder { pub fn with_capacity(capacity: usize) -> Self { Self { columns: Vec::with_capacity(capacity), - num_key_columns: 0, + primary_key_indexes: Vec::new(), timestamp_index: None, column_names: HashSet::with_capacity(capacity), column_ids: HashSet::with_capacity(capacity), @@ -947,10 +989,11 @@ impl Builder { given_column: column.name, } ); - self.timestamp_index = Some(self.num_key_columns); + self.timestamp_index = Some(self.columns.len()); } - self.insert_new_key_column(column); + self.primary_key_indexes.push(self.columns.len()); + self.insert_new_column(column); Ok(self) } @@ -960,7 +1003,7 @@ impl Builder { self.may_alloc_column_id(&mut column); self.validate_column(&column, false)?; - self.insert_new_normal_column(column); + self.insert_new_column(column); Ok(self) } @@ -1024,15 +1067,7 @@ impl Builder { Ok(()) } - fn insert_new_key_column(&mut self, column: ColumnSchema) { - self.column_names.insert(column.name.clone()); - self.column_ids.insert(column.id); - - self.columns.insert(self.num_key_columns, column); - self.num_key_columns += 1; - } - - fn insert_new_normal_column(&mut self, column: ColumnSchema) { + fn insert_new_column(&mut self, column: ColumnSchema) { self.column_names.insert(column.name.clone()); self.column_ids.insert(column.id); @@ -1062,9 +1097,13 @@ impl Builder { let column_schemas = Arc::new(ColumnSchemas::new(columns)); + let mut primary_key_indexes = Vec::new(); + for i in 0..num_key_columns { + primary_key_indexes.push(i); + } Ok(Schema { arrow_schema, - num_key_columns, + primary_key_indexes, timestamp_index, tsid_index, enable_tsid_primary_key, @@ -1091,7 +1130,7 @@ impl Builder { [ ( ArrowSchemaMetaKey::NumKeyColumns.to_string(), - self.num_key_columns.to_string(), + self.primary_key_indexes.len().to_string(), ), ( ArrowSchemaMetaKey::TimestampIndex.to_string(), @@ -1136,10 +1175,11 @@ impl Builder { /// Build the schema pub fn build(self) -> Result { let timestamp_index = self.timestamp_index.context(MissingTimestampKey)?; + // Timestamp key column is exists, so key columns should not be zero - assert!(self.num_key_columns > 0); + assert!(!self.primary_key_indexes.is_empty()); if self.enable_tsid_primary_key { - ensure!(self.num_key_columns == 2, InvalidTsidSchema); + ensure!(self.primary_key_indexes.len() == 2, InvalidTsidSchema); } let tsid_index = Self::find_tsid_index(self.enable_tsid_primary_key, &self.columns)?; @@ -1149,7 +1189,7 @@ impl Builder { Ok(Schema { arrow_schema: Arc::new(ArrowSchema::new_with_metadata(fields, meta)), - num_key_columns: self.num_key_columns, + primary_key_indexes: self.primary_key_indexes, timestamp_index, tsid_index, enable_tsid_primary_key: self.enable_tsid_primary_key, @@ -1268,7 +1308,7 @@ mod tests { // Length related test assert_eq!(4, schema.columns().len()); assert_eq!(4, schema.num_columns()); - assert_eq!(2, schema.num_key_columns()); + assert_eq!(2, schema.primary_key_indexes.len()); assert_eq!(1, schema.timestamp_index()); // Test key columns @@ -1339,12 +1379,12 @@ mod tests { .unwrap(); let columns = schema.columns(); - assert_eq!(2, columns[0].id); - assert_eq!("key1", columns[0].name); - assert_eq!(3, columns[1].id); - assert_eq!("key2", columns[1].name); - assert_eq!(1, columns[2].id); - assert_eq!("field1", columns[2].name); + assert_eq!(1, columns[0].id); + assert_eq!("field1", columns[0].name); + assert_eq!(2, columns[1].id); + assert_eq!("key1", columns[1].name); + assert_eq!(3, columns[2].id); + assert_eq!("key2", columns[2].name); assert_eq!(4, columns[3].id); assert_eq!("field2", columns[3].name); } @@ -1510,12 +1550,12 @@ mod tests { // Check key1 assert_eq!("key1", &columns[0].name); assert_eq!(2, columns[0].id); - // Check key2 - assert_eq!("key2", &columns[1].name); - assert_eq!(6, columns[1].id); // Check field1 - assert_eq!("field1", &columns[2].name); - assert_eq!(5, columns[2].id); + assert_eq!("field1", &columns[1].name); + assert_eq!(5, columns[1].id); + // Check key2 + assert_eq!("key2", &columns[2].name); + assert_eq!(6, columns[2].id); // Check field2 assert_eq!("field2", &columns[3].name); assert_eq!(7, columns[3].id); diff --git a/interpreters/src/alter_table.rs b/interpreters/src/alter_table.rs index acfce81adc..f3ccace49d 100644 --- a/interpreters/src/alter_table.rs +++ b/interpreters/src/alter_table.rs @@ -84,16 +84,16 @@ fn build_new_schema(current_schema: &Schema, column_schemas: Vec) schema::Builder::with_capacity(current_schema.num_columns() + column_schemas.len()) // Increment the schema version. .version(current_version + 1); - // Add existing columns to builder. - for key_column in current_schema.key_columns() { - builder = builder - .add_key_column(key_column.clone()) - .context(AddColumnSchema)?; - } - for normal_column in current_schema.normal_columns() { - builder = builder - .add_normal_column(normal_column.clone()) - .context(AddColumnSchema)?; + for (idx, column) in current_schema.columns().iter().enumerate() { + if current_schema.is_primary_key_index(&idx) { + builder = builder + .add_key_column(column.clone()) + .context(AddColumnSchema)?; + } else { + builder = builder + .add_normal_column(column.clone()) + .context(AddColumnSchema)?; + } } builder = builder diff --git a/interpreters/src/describe.rs b/interpreters/src/describe.rs index 5d0fe768f5..f2638e2e2e 100644 --- a/interpreters/src/describe.rs +++ b/interpreters/src/describe.rs @@ -40,7 +40,6 @@ impl DescribeInterpreter { fn table_ref_to_record_batch(table_ref: TableRef) -> Result { let table_schema = table_ref.schema(); let num_columns = table_schema.num_columns(); - let num_key_columns = table_schema.num_key_columns(); let mut names = Vec::with_capacity(num_columns); let mut types = Vec::with_capacity(num_columns); @@ -50,7 +49,7 @@ impl DescribeInterpreter { for (idx, col) in table_schema.columns().iter().enumerate() { names.push(col.name.to_string()); types.push(col.data_type.to_string()); - is_primary_keys.push(idx < num_key_columns); + is_primary_keys.push(table_schema.is_primary_key_index(&idx)); is_nullables.push(col.is_nullable); is_tags.push(col.is_tag); } diff --git a/proto/protos/common.proto b/proto/protos/common.proto index 19491632fb..a70d09c143 100644 --- a/proto/protos/common.proto +++ b/proto/protos/common.proto @@ -44,16 +44,16 @@ message ColumnSchema { // Table Schema message TableSchema { - // Schema of each column - repeated ColumnSchema columns = 1; - // Version of the schema - uint32 version = 2; - // Key column num - uint32 num_key_columns = 3; - // Timestamp index in columns - uint32 timestamp_index = 4; - // Enable auto generated tsid as primary key - bool enable_tsid_primary_key = 5; + // Schema of each column + repeated ColumnSchema columns = 1; + // Version of the schema + uint32 version = 2; + // Timestamp index in columns + uint32 timestamp_index = 3; + // Enable auto generated tsid as primary key + bool enable_tsid_primary_key = 4; + // Primary key index + repeated uint64 primary_key_indexes = 5; } // Time range of [start, end) diff --git a/server/src/grpc/storage_service/mod.rs b/server/src/grpc/storage_service/mod.rs index 57822eba4f..e28945e43c 100644 --- a/server/src/grpc/storage_service/mod.rs +++ b/server/src/grpc/storage_service/mod.rs @@ -804,7 +804,7 @@ mod tests { let schema = schema.unwrap(); assert_eq!(8, schema.num_columns()); - assert_eq!(2, schema.num_key_columns()); + assert_eq!(2, schema.num_primary_key_columns()); assert_eq!(TIMESTAMP_COLUMN, schema.timestamp_name()); let tsid = schema.tsid_column(); assert!(tsid.is_some()); diff --git a/sql/src/planner.rs b/sql/src/planner.rs index 970954ec62..acfed7a66d 100644 --- a/sql/src/planner.rs +++ b/sql/src/planner.rs @@ -344,6 +344,13 @@ impl<'a, P: MetaProvider> PlannerDelegate<'a, P> { .map(|col| Ok((col.name.value.as_str(), parse_column(col)?))) .collect::>>()?; + let name_column_index_map = stmt + .columns + .iter() + .enumerate() + .map(|(idx, col)| (col.name.value.as_str(), idx)) + .collect::>(); + // Tsid column is a reserved column. ensure!( !name_column_map.contains_key(TSID_COLUMN), @@ -352,10 +359,14 @@ impl<'a, P: MetaProvider> PlannerDelegate<'a, P> { } ); - // Find timestamp key and primary key constraint. - let mut primary_key_constraint_idx = None; + // Find timestamp key and primary key contraint + + let mut timestamp_column_idx = None; let mut timestamp_name = None; - for (idx, constraint) in stmt.constraints.iter().enumerate() { + + let mut primary_key_column_idxs = vec![]; + + for constraint in stmt.constraints.iter() { if let TableConstraint::Unique { columns, is_primary, @@ -363,10 +374,15 @@ impl<'a, P: MetaProvider> PlannerDelegate<'a, P> { } = constraint { if *is_primary { - primary_key_constraint_idx = Some(idx); + // Build primary key, the builder will check timestamp column is in primary key. + for column in columns { + if let Some(idx) = name_column_index_map.get(&*column.value) { + primary_key_column_idxs.push(*idx); + } + } } else if parser::is_timestamp_key_constraint(constraint) { // Only one timestamp key constraint - ensure!(timestamp_name.is_none(), InvalidTimestampKey); + ensure!(timestamp_column_idx.is_none(), InvalidTimestampKey); // Only one column in constraint ensure!(columns.len() == 1, InvalidTimestampKey); @@ -379,7 +395,11 @@ impl<'a, P: MetaProvider> PlannerDelegate<'a, P> { timestamp_column.data_type == DatumKind::Timestamp, InvalidTimestampKey ); + let column_idx = name_column_index_map + .get(name as &str) + .context(TimestampColumnNotFound { name })?; + timestamp_column_idx = Some(*column_idx); timestamp_name = Some(name.clone()); } } @@ -397,49 +417,39 @@ impl<'a, P: MetaProvider> PlannerDelegate<'a, P> { ) } - // Build primary key, the builder will check timestamp column is in primary key. - if let Some(idx) = primary_key_constraint_idx { - // If primary key is already provided, use that primary key. - if let TableConstraint::Unique { columns, .. } = &stmt.constraints[idx] { - for col in columns { - let key_column = if TSID_COLUMN == col.value { - schema_builder = schema_builder.enable_tsid_primary_key(true); - Self::tsid_column_schema()? - } else { - name_column_map - .remove(&*col.value) - .with_context(|| PrimaryKeyNotFound { - name: col.value.clone(), - })? - }; - // The schema builder will checks there is only one timestamp column in primary - // key. + let timestamp_col_idx = timestamp_column_idx.context(RequireTimestamp)?; + // The key columns have been consumed. + for (idx, col) in stmt.columns.iter().enumerate() { + let col_name = col.name.value.as_str(); + if let Some(col) = name_column_map.remove(col_name) { + if !primary_key_column_idxs.is_empty() { + if primary_key_column_idxs.contains(&idx) { + let key_column = if TSID_COLUMN == col.name { + schema_builder = schema_builder.enable_tsid_primary_key(true); + Self::tsid_column_schema()? + } else { + col + }; + schema_builder = schema_builder + .add_key_column(key_column) + .context(BuildTableSchema)?; + continue; + } + } else if timestamp_col_idx == idx { + // If primary key is not set, Use (timestamp, tsid) as primary key. schema_builder = schema_builder - .add_key_column(key_column) + .enable_tsid_primary_key(true) + .add_key_column(col) + .context(BuildTableSchema)? + .add_key_column(Self::tsid_column_schema()?) .context(BuildTableSchema)?; + continue; } - } - } else { - // If primary key is not set, Use (timestamp, tsid) as primary key. - let timestamp_column = name_column_map.remove(timestamp_name.as_str()).context( - TimestampColumnNotFound { - name: ×tamp_name, - }, - )?; - let column_schema = Self::tsid_column_schema()?; - schema_builder = schema_builder - .enable_tsid_primary_key(true) - .add_key_column(timestamp_column) - .context(BuildTableSchema)? - .add_key_column(column_schema) - .context(BuildTableSchema)?; - } - // The key columns have been consumed. - for col in name_column_map.into_values() { - schema_builder = schema_builder - .add_normal_column(col) - .context(BuildTableSchema)?; + schema_builder = schema_builder + .add_normal_column(col) + .context(BuildTableSchema)?; + } } let table_schema = schema_builder.build().context(BuildTableSchema)?; @@ -1003,7 +1013,6 @@ mod tests { if_not_exists: true, table: "t", table_schema: Schema { - num_key_columns: 2, timestamp_index: 1, tsid_index: None, enable_tsid_primary_key: false, @@ -1102,6 +1111,10 @@ mod tests { ], }, version: 1, + primary_key_indexes: [ + 0, + 1, + ], }, options: { "arena_block_size": "1KB", @@ -1168,7 +1181,6 @@ mod tests { 100, ), schema: Schema { - num_key_columns: 2, timestamp_index: 1, tsid_index: None, enable_tsid_primary_key: false, @@ -1217,11 +1229,14 @@ mod tests { ], }, version: 1, + primary_key_indexes: [ + 0, + 1, + ], }, }, rows: RowGroup { schema: Schema { - num_key_columns: 2, timestamp_index: 1, tsid_index: None, enable_tsid_primary_key: false, @@ -1270,6 +1285,10 @@ mod tests { ], }, version: 1, + primary_key_indexes: [ + 0, + 1, + ], }, rows: [ Row { @@ -1355,7 +1374,6 @@ mod tests { 100, ), schema: Schema { - num_key_columns: 2, timestamp_index: 1, tsid_index: None, enable_tsid_primary_key: false, @@ -1404,6 +1422,10 @@ mod tests { ], }, version: 1, + primary_key_indexes: [ + 0, + 1, + ], }, }, }, @@ -1428,7 +1450,6 @@ mod tests { 100, ), schema: Schema { - num_key_columns: 2, timestamp_index: 1, tsid_index: None, enable_tsid_primary_key: false, @@ -1477,6 +1498,10 @@ mod tests { ], }, version: 1, + primary_key_indexes: [ + 0, + 1, + ], }, }, operations: AddColumn( @@ -1515,7 +1540,6 @@ mod tests { 100, ), schema: Schema { - num_key_columns: 2, timestamp_index: 1, tsid_index: None, enable_tsid_primary_key: false, @@ -1564,6 +1588,10 @@ mod tests { ], }, version: 1, + primary_key_indexes: [ + 0, + 1, + ], }, }, operations: ModifySetting( @@ -1594,7 +1622,6 @@ mod tests { 100, ), schema: Schema { - num_key_columns: 2, timestamp_index: 1, tsid_index: None, enable_tsid_primary_key: false, @@ -1643,6 +1670,10 @@ mod tests { ], }, version: 1, + primary_key_indexes: [ + 0, + 1, + ], }, }, obj_type: Table, diff --git a/sql/src/promql/convert.rs b/sql/src/promql/convert.rs index 930c248367..383ced4e6f 100644 --- a/sql/src/promql/convert.rs +++ b/sql/src/promql/convert.rs @@ -34,6 +34,7 @@ use crate::{ const INIT_LEVEL: usize = 1; const DEFAULT_LOOKBACK: i64 = 300_000; +#[allow(clippy::large_enum_variant)] #[derive(Debug, Snafu)] pub enum Error { #[snafu(display("Invalid expr, expected: {}, actual:{:?}", expected, actual))] diff --git a/tests/cases/local/05_ddl/alter_table.result b/tests/cases/local/05_ddl/alter_table.result index 316f0f71f8..cf14f52448 100644 --- a/tests/cases/local/05_ddl/alter_table.result +++ b/tests/cases/local/05_ddl/alter_table.result @@ -12,8 +12,8 @@ affected_rows: 1 SELECT * FROM `05_alter_table_t0`; -t,tsid,a, -Timestamp(Timestamp(1)),Int64(0),Int32(1), +a,t,tsid, +Int32(1),Timestamp(Timestamp(1)),Int64(0), ALTER TABLE `05_alter_table_t0` RENAME TO `t1`; @@ -27,9 +27,9 @@ affected_rows: 0 DESCRIBE TABLE `05_alter_table_t0`; name,type,is_primary,is_nullable,is_tag, +String(StringBytes(b"a")),String(StringBytes(b"int")),Boolean(false),Boolean(true),Boolean(false), String(StringBytes(b"t")),String(StringBytes(b"timestamp")),Boolean(true),Boolean(false),Boolean(false), String(StringBytes(b"tsid")),String(StringBytes(b"uint64")),Boolean(true),Boolean(false),Boolean(false), -String(StringBytes(b"a")),String(StringBytes(b"int")),Boolean(false),Boolean(true),Boolean(false), String(StringBytes(b"b")),String(StringBytes(b"string")),Boolean(false),Boolean(true),Boolean(false), @@ -39,9 +39,9 @@ affected_rows: 1 SELECT * FROM `05_alter_table_t0`; -t,tsid,a,b, -Timestamp(Timestamp(1)),Int64(0),Int32(1),Null, -Timestamp(Timestamp(2)),Int64(0),Int32(2),String(StringBytes(b"2")), +a,t,tsid,b, +Int32(1),Timestamp(Timestamp(1)),Int64(0),Null, +Int32(2),Timestamp(Timestamp(2)),Int64(0),String(StringBytes(b"2")), ALTER TABLE `05_alter_table_t0` DROP COLUMN b; @@ -51,17 +51,17 @@ Failed to execute query, err: Server(ServerError { code: 500, msg: "Failed to cr DESCRIBE TABLE `05_alter_table_t0`; name,type,is_primary,is_nullable,is_tag, +String(StringBytes(b"a")),String(StringBytes(b"int")),Boolean(false),Boolean(true),Boolean(false), String(StringBytes(b"t")),String(StringBytes(b"timestamp")),Boolean(true),Boolean(false),Boolean(false), String(StringBytes(b"tsid")),String(StringBytes(b"uint64")),Boolean(true),Boolean(false),Boolean(false), -String(StringBytes(b"a")),String(StringBytes(b"int")),Boolean(false),Boolean(true),Boolean(false), String(StringBytes(b"b")),String(StringBytes(b"string")),Boolean(false),Boolean(true),Boolean(false), SELECT * FROM `05_alter_table_t0`; -t,tsid,a,b, -Timestamp(Timestamp(1)),Int64(0),Int32(1),Null, -Timestamp(Timestamp(2)),Int64(0),Int32(2),String(StringBytes(b"2")), +a,t,tsid,b, +Int32(1),Timestamp(Timestamp(1)),Int64(0),Null, +Int32(2),Timestamp(Timestamp(2)),Int64(0),String(StringBytes(b"2")), DROP TABLE `05_alter_table_t0`; diff --git a/tests/cases/local/05_ddl/create_tables.result b/tests/cases/local/05_ddl/create_tables.result index f5ec680e7b..99a2790456 100644 --- a/tests/cases/local/05_ddl/create_tables.result +++ b/tests/cases/local/05_ddl/create_tables.result @@ -84,15 +84,15 @@ affected_rows: 0 describe table `05_create_tables_t4`; name,type,is_primary,is_nullable,is_tag, +String(StringBytes(b"a")),String(StringBytes(b"int")),Boolean(false),Boolean(true),Boolean(false), String(StringBytes(b"t")),String(StringBytes(b"timestamp")),Boolean(true),Boolean(false),Boolean(false), String(StringBytes(b"tsid")),String(StringBytes(b"uint64")),Boolean(true),Boolean(false),Boolean(false), -String(StringBytes(b"a")),String(StringBytes(b"int")),Boolean(false),Boolean(true),Boolean(false), show create table `05_create_tables_t4`; Table,Create Table, -String(StringBytes(b"05_create_tables_t4")),String(StringBytes(b"CREATE TABLE `05_create_tables_t4` (`t` timestamp NOT NULL, `tsid` uint64 NOT NULL, `a` int, PRIMARY KEY(t,tsid), TIMESTAMP KEY(t)) ENGINE=Analytic WITH(arena_block_size='2097152', compaction_strategy='default', compression='ZSTD', enable_ttl='true', num_rows_per_row_group='8192', segment_duration='', storage_format='COLUMNAR', ttl='7d', update_mode='OVERWRITE', write_buffer_size='33554432')")), +String(StringBytes(b"05_create_tables_t4")),String(StringBytes(b"CREATE TABLE `05_create_tables_t4` (`a` int, `t` timestamp NOT NULL, `tsid` uint64 NOT NULL, PRIMARY KEY(t,tsid), TIMESTAMP KEY(t)) ENGINE=Analytic WITH(arena_block_size='2097152', compaction_strategy='default', compression='ZSTD', enable_ttl='true', num_rows_per_row_group='8192', segment_duration='', storage_format='COLUMNAR', ttl='7d', update_mode='OVERWRITE', write_buffer_size='33554432')")), CREATE TABLE `05_create_tables_t5`(c1 int, t timestamp NOT NULL TIMESTAMP KEY) ENGINE = Analytic; @@ -102,15 +102,15 @@ affected_rows: 0 describe table `05_create_tables_t5`; name,type,is_primary,is_nullable,is_tag, +String(StringBytes(b"c1")),String(StringBytes(b"int")),Boolean(false),Boolean(true),Boolean(false), String(StringBytes(b"t")),String(StringBytes(b"timestamp")),Boolean(true),Boolean(false),Boolean(false), String(StringBytes(b"tsid")),String(StringBytes(b"uint64")),Boolean(true),Boolean(false),Boolean(false), -String(StringBytes(b"c1")),String(StringBytes(b"int")),Boolean(false),Boolean(true),Boolean(false), show create table `05_create_tables_t5`; Table,Create Table, -String(StringBytes(b"05_create_tables_t5")),String(StringBytes(b"CREATE TABLE `05_create_tables_t5` (`t` timestamp NOT NULL, `tsid` uint64 NOT NULL, `c1` int, PRIMARY KEY(t,tsid), TIMESTAMP KEY(t)) ENGINE=Analytic WITH(arena_block_size='2097152', compaction_strategy='default', compression='ZSTD', enable_ttl='true', num_rows_per_row_group='8192', segment_duration='', storage_format='COLUMNAR', ttl='7d', update_mode='OVERWRITE', write_buffer_size='33554432')")), +String(StringBytes(b"05_create_tables_t5")),String(StringBytes(b"CREATE TABLE `05_create_tables_t5` (`c1` int, `t` timestamp NOT NULL, `tsid` uint64 NOT NULL, PRIMARY KEY(t,tsid), TIMESTAMP KEY(t)) ENGINE=Analytic WITH(arena_block_size='2097152', compaction_strategy='default', compression='ZSTD', enable_ttl='true', num_rows_per_row_group='8192', segment_duration='', storage_format='COLUMNAR', ttl='7d', update_mode='OVERWRITE', write_buffer_size='33554432')")), CREATE TABLE `05_create_tables_t6`(c1 int, t1 timestamp NOT NULL TIMESTAMP KEY, t2 timestamp NOT NULL TIMESTAMP KEY) ENGINE = Analytic; @@ -124,15 +124,15 @@ affected_rows: 0 describe table `05_create_tables_t7`; name,type,is_primary,is_nullable,is_tag, +String(StringBytes(b"c1")),String(StringBytes(b"int")),Boolean(false),Boolean(true),Boolean(false), String(StringBytes(b"t")),String(StringBytes(b"timestamp")),Boolean(true),Boolean(false),Boolean(false), String(StringBytes(b"tsid")),String(StringBytes(b"uint64")),Boolean(true),Boolean(false),Boolean(false), -String(StringBytes(b"c1")),String(StringBytes(b"int")),Boolean(false),Boolean(true),Boolean(false), show create table `05_create_tables_t7`; Table,Create Table, -String(StringBytes(b"05_create_tables_t7")),String(StringBytes(b"CREATE TABLE `05_create_tables_t7` (`t` timestamp NOT NULL, `tsid` uint64 NOT NULL, `c1` int COMMENT 'id', PRIMARY KEY(t,tsid), TIMESTAMP KEY(t)) ENGINE=Analytic WITH(arena_block_size='2097152', compaction_strategy='default', compression='ZSTD', enable_ttl='true', num_rows_per_row_group='8192', segment_duration='', storage_format='COLUMNAR', ttl='7d', update_mode='OVERWRITE', write_buffer_size='33554432')")), +String(StringBytes(b"05_create_tables_t7")),String(StringBytes(b"CREATE TABLE `05_create_tables_t7` (`c1` int COMMENT 'id', `t` timestamp NOT NULL, `tsid` uint64 NOT NULL, PRIMARY KEY(t,tsid), TIMESTAMP KEY(t)) ENGINE=Analytic WITH(arena_block_size='2097152', compaction_strategy='default', compression='ZSTD', enable_ttl='true', num_rows_per_row_group='8192', segment_duration='', storage_format='COLUMNAR', ttl='7d', update_mode='OVERWRITE', write_buffer_size='33554432')")), CREATE TABLE `05_create_tables_t8`(c1 int, t1 timestamp NOT NULL TIMESTAMP KEY) ENGINE = Analytic; @@ -142,7 +142,7 @@ affected_rows: 0 show create table `05_create_tables_t8`; Table,Create Table, -String(StringBytes(b"05_create_tables_t8")),String(StringBytes(b"CREATE TABLE `05_create_tables_t8` (`t1` timestamp NOT NULL, `tsid` uint64 NOT NULL, `c1` int, PRIMARY KEY(t1,tsid), TIMESTAMP KEY(t1)) ENGINE=Analytic WITH(arena_block_size='2097152', compaction_strategy='default', compression='ZSTD', enable_ttl='true', num_rows_per_row_group='8192', segment_duration='', storage_format='COLUMNAR', ttl='7d', update_mode='OVERWRITE', write_buffer_size='33554432')")), +String(StringBytes(b"05_create_tables_t8")),String(StringBytes(b"CREATE TABLE `05_create_tables_t8` (`c1` int, `t1` timestamp NOT NULL, `tsid` uint64 NOT NULL, PRIMARY KEY(t1,tsid), TIMESTAMP KEY(t1)) ENGINE=Analytic WITH(arena_block_size='2097152', compaction_strategy='default', compression='ZSTD', enable_ttl='true', num_rows_per_row_group='8192', segment_duration='', storage_format='COLUMNAR', ttl='7d', update_mode='OVERWRITE', write_buffer_size='33554432')")), drop table `05_create_tables_t8`; @@ -156,7 +156,7 @@ affected_rows: 0 show create table `05_create_tables_t8`; Table,Create Table, -String(StringBytes(b"05_create_tables_t8")),String(StringBytes(b"CREATE TABLE `05_create_tables_t8` (`t1` timestamp NOT NULL, `tsid` uint64 NOT NULL, `c1` int, PRIMARY KEY(t1,tsid), TIMESTAMP KEY(t1)) ENGINE=Analytic WITH(arena_block_size='2097152', compaction_strategy='default', compression='ZSTD', enable_ttl='true', num_rows_per_row_group='8192', segment_duration='', storage_format='COLUMNAR', ttl='7d', update_mode='OVERWRITE', write_buffer_size='33554432')")), +String(StringBytes(b"05_create_tables_t8")),String(StringBytes(b"CREATE TABLE `05_create_tables_t8` (`c1` int, `t1` timestamp NOT NULL, `tsid` uint64 NOT NULL, PRIMARY KEY(t1,tsid), TIMESTAMP KEY(t1)) ENGINE=Analytic WITH(arena_block_size='2097152', compaction_strategy='default', compression='ZSTD', enable_ttl='true', num_rows_per_row_group='8192', segment_duration='', storage_format='COLUMNAR', ttl='7d', update_mode='OVERWRITE', write_buffer_size='33554432')")), drop table `05_create_tables_t8`; @@ -170,7 +170,7 @@ affected_rows: 0 show create table `05_create_tables_t8`; Table,Create Table, -String(StringBytes(b"05_create_tables_t8")),String(StringBytes(b"CREATE TABLE `05_create_tables_t8` (`t1` timestamp NOT NULL, `tsid` uint64 NOT NULL, `c1` int, PRIMARY KEY(t1,tsid), TIMESTAMP KEY(t1)) ENGINE=Analytic WITH(arena_block_size='2097152', compaction_strategy='default', compression='ZSTD', enable_ttl='true', num_rows_per_row_group='8192', segment_duration='', storage_format='HYBRID', ttl='7d', update_mode='OVERWRITE', write_buffer_size='33554432')")), +String(StringBytes(b"05_create_tables_t8")),String(StringBytes(b"CREATE TABLE `05_create_tables_t8` (`c1` int, `t1` timestamp NOT NULL, `tsid` uint64 NOT NULL, PRIMARY KEY(t1,tsid), TIMESTAMP KEY(t1)) ENGINE=Analytic WITH(arena_block_size='2097152', compaction_strategy='default', compression='ZSTD', enable_ttl='true', num_rows_per_row_group='8192', segment_duration='', storage_format='HYBRID', ttl='7d', update_mode='OVERWRITE', write_buffer_size='33554432')")), drop table `05_create_tables_t8`; @@ -184,7 +184,7 @@ affected_rows: 0 show create table `05_create_tables_t9`; Table,Create Table, -String(StringBytes(b"05_create_tables_t9")),String(StringBytes(b"CREATE TABLE `05_create_tables_t9` (`t1` timestamp NOT NULL, `tsid` uint64 NOT NULL, `c1` int, `c2` bigint DEFAULT 0, `c3` uint32 DEFAULT 1 + 1, `c4` string DEFAULT 'xxx', `c5` uint32 DEFAULT c3 * 2 + 1, PRIMARY KEY(t1,tsid), TIMESTAMP KEY(t1)) ENGINE=Analytic WITH(arena_block_size='2097152', compaction_strategy='default', compression='ZSTD', enable_ttl='true', num_rows_per_row_group='8192', segment_duration='', storage_format='COLUMNAR', ttl='7d', update_mode='OVERWRITE', write_buffer_size='33554432')")), +String(StringBytes(b"05_create_tables_t9")),String(StringBytes(b"CREATE TABLE `05_create_tables_t9` (`c1` int, `c2` bigint DEFAULT 0, `c3` uint32 DEFAULT 1 + 1, `c4` string DEFAULT 'xxx', `c5` uint32 DEFAULT c3 * 2 + 1, `t1` timestamp NOT NULL, `tsid` uint64 NOT NULL, PRIMARY KEY(t1,tsid), TIMESTAMP KEY(t1)) ENGINE=Analytic WITH(arena_block_size='2097152', compaction_strategy='default', compression='ZSTD', enable_ttl='true', num_rows_per_row_group='8192', segment_duration='', storage_format='COLUMNAR', ttl='7d', update_mode='OVERWRITE', write_buffer_size='33554432')")), drop table `05_create_tables_t9`; diff --git a/tests/cases/local/06_show/show_create_table.result b/tests/cases/local/06_show/show_create_table.result index 73c4f00bbb..2fe5f481e1 100644 --- a/tests/cases/local/06_show/show_create_table.result +++ b/tests/cases/local/06_show/show_create_table.result @@ -17,7 +17,7 @@ affected_rows: 0 SHOW CREATE TABLE `06_show_a`; Table,Create Table, -String(StringBytes(b"06_show_a")),String(StringBytes(b"CREATE TABLE `06_show_a` (`t` timestamp NOT NULL, `tsid` uint64 NOT NULL, `a` bigint, `b` int DEFAULT 3, `c` string DEFAULT 'x', `d` smallint, PRIMARY KEY(t,tsid), TIMESTAMP KEY(t)) ENGINE=Analytic WITH(arena_block_size='2097152', compaction_strategy='default', compression='ZSTD', enable_ttl='true', num_rows_per_row_group='8192', segment_duration='', storage_format='COLUMNAR', ttl='7d', update_mode='OVERWRITE', write_buffer_size='33554432')")), +String(StringBytes(b"06_show_a")),String(StringBytes(b"CREATE TABLE `06_show_a` (`a` bigint, `b` int DEFAULT 3, `c` string DEFAULT 'x', `d` smallint, `t` timestamp NOT NULL, `tsid` uint64 NOT NULL, PRIMARY KEY(t,tsid), TIMESTAMP KEY(t)) ENGINE=Analytic WITH(arena_block_size='2097152', compaction_strategy='default', compression='ZSTD', enable_ttl='true', num_rows_per_row_group='8192', segment_duration='', storage_format='COLUMNAR', ttl='7d', update_mode='OVERWRITE', write_buffer_size='33554432')")), CREATE TABLE `06_show_b` (a bigint, b int null default null, c string, d smallint null, t timestamp NOT NULL, TIMESTAMP KEY(t)) ENGINE = Analytic; @@ -27,7 +27,7 @@ affected_rows: 0 SHOW CREATE TABLE `06_show_b`; Table,Create Table, -String(StringBytes(b"06_show_b")),String(StringBytes(b"CREATE TABLE `06_show_b` (`t` timestamp NOT NULL, `tsid` uint64 NOT NULL, `a` bigint, `b` int DEFAULT NULL, `c` string, `d` smallint, PRIMARY KEY(t,tsid), TIMESTAMP KEY(t)) ENGINE=Analytic WITH(arena_block_size='2097152', compaction_strategy='default', compression='ZSTD', enable_ttl='true', num_rows_per_row_group='8192', segment_duration='', storage_format='COLUMNAR', ttl='7d', update_mode='OVERWRITE', write_buffer_size='33554432')")), +String(StringBytes(b"06_show_b")),String(StringBytes(b"CREATE TABLE `06_show_b` (`a` bigint, `b` int DEFAULT NULL, `c` string, `d` smallint, `t` timestamp NOT NULL, `tsid` uint64 NOT NULL, PRIMARY KEY(t,tsid), TIMESTAMP KEY(t)) ENGINE=Analytic WITH(arena_block_size='2097152', compaction_strategy='default', compression='ZSTD', enable_ttl='true', num_rows_per_row_group='8192', segment_duration='', storage_format='COLUMNAR', ttl='7d', update_mode='OVERWRITE', write_buffer_size='33554432')")), CREATE TABLE `06_show_c` (a int, t timestamp NOT NULL, TIMESTAMP KEY(t)) ENGINE = Analytic; @@ -37,7 +37,7 @@ affected_rows: 0 SHOW CREATE TABLE `06_show_c`; Table,Create Table, -String(StringBytes(b"06_show_c")),String(StringBytes(b"CREATE TABLE `06_show_c` (`t` timestamp NOT NULL, `tsid` uint64 NOT NULL, `a` int, PRIMARY KEY(t,tsid), TIMESTAMP KEY(t)) ENGINE=Analytic WITH(arena_block_size='2097152', compaction_strategy='default', compression='ZSTD', enable_ttl='true', num_rows_per_row_group='8192', segment_duration='', storage_format='COLUMNAR', ttl='7d', update_mode='OVERWRITE', write_buffer_size='33554432')")), +String(StringBytes(b"06_show_c")),String(StringBytes(b"CREATE TABLE `06_show_c` (`a` int, `t` timestamp NOT NULL, `tsid` uint64 NOT NULL, PRIMARY KEY(t,tsid), TIMESTAMP KEY(t)) ENGINE=Analytic WITH(arena_block_size='2097152', compaction_strategy='default', compression='ZSTD', enable_ttl='true', num_rows_per_row_group='8192', segment_duration='', storage_format='COLUMNAR', ttl='7d', update_mode='OVERWRITE', write_buffer_size='33554432')")), DROP TABLE `06_show_a`; diff --git a/tests/cases/local/basic.result b/tests/cases/local/basic.result index 1cbbe35c7f..a6c89fecd1 100644 --- a/tests/cases/local/basic.result +++ b/tests/cases/local/basic.result @@ -8,8 +8,8 @@ affected_rows: 1 SELECT * FROM demo; -t,tsid,name,value, -Timestamp(Timestamp(1651737067000)),Int64(-6317898613073581291),String(StringBytes(b"ceresdb")),Double(100.0), +name,value,t,tsid, +String(StringBytes(b"ceresdb")),Double(100.0),Timestamp(Timestamp(1651737067000)),Int64(102432447525557625), INSERT INTO demo(t, name, value) VALUES(1651737067001, "ceresdb", 100); @@ -18,9 +18,9 @@ affected_rows: 1 SELECT * FROM demo; -t,tsid,name,value, -Timestamp(Timestamp(1651737067000)),Int64(-6317898613073581291),String(StringBytes(b"ceresdb")),Double(100.0), -Timestamp(Timestamp(1651737067001)),Int64(-6317898613073581291),String(StringBytes(b"ceresdb")),Double(100.0), +name,value,t,tsid, +String(StringBytes(b"ceresdb")),Double(100.0),Timestamp(Timestamp(1651737067000)),Int64(102432447525557625), +String(StringBytes(b"ceresdb")),Double(100.0),Timestamp(Timestamp(1651737067001)),Int64(102432447525557625), CREATE TABLE "DeMo"("nAmE" string TAG, value double NOT NULL, t timestamp NOT NULL, TIMESTAMP KEY(t)) ENGINE=Analytic with (enable_ttl='false');