diff --git a/docs/generated/sql/bnf/BUILD.bazel b/docs/generated/sql/bnf/BUILD.bazel
index 4b3e3390dfaf..f49cf804096c 100644
--- a/docs/generated/sql/bnf/BUILD.bazel
+++ b/docs/generated/sql/bnf/BUILD.bazel
@@ -77,7 +77,7 @@ FILES = [
"check_table_level",
"close_cursor_stmt",
"col_qualification",
- "column_def",
+ "column_table_def",
"comment",
"commit_transaction",
"copy_from_stmt",
diff --git a/docs/generated/sql/bnf/alter_table_partition_by.bnf b/docs/generated/sql/bnf/alter_table_partition_by.bnf
index b11ee35dc480..a828c0ce3a27 100644
--- a/docs/generated/sql/bnf/alter_table_partition_by.bnf
+++ b/docs/generated/sql/bnf/alter_table_partition_by.bnf
@@ -1,3 +1,3 @@
alter_onetable_stmt ::=
- 'ALTER' 'TABLE' table_name 'PARTITION' 'ALL' 'BY' partition_by_inner ( ( ',' ( 'RENAME' opt_column column_name 'TO' column_name | 'RENAME' 'CONSTRAINT' column_name 'TO' column_name | 'ADD' column_def | 'ADD' 'IF' 'NOT' 'EXISTS' column_def | 'ADD' 'COLUMN' column_def | 'ADD' 'COLUMN' 'IF' 'NOT' 'EXISTS' column_def | 'ALTER' opt_column column_name alter_column_default | 'ALTER' opt_column column_name alter_column_on_update | 'ALTER' opt_column column_name alter_column_visible | 'ALTER' opt_column column_name 'DROP' 'NOT' 'NULL' | 'ALTER' opt_column column_name 'DROP' 'STORED' | 'ALTER' opt_column column_name 'SET' 'NOT' 'NULL' | 'DROP' opt_column 'IF' 'EXISTS' column_name opt_drop_behavior | 'DROP' opt_column column_name opt_drop_behavior | 'ALTER' opt_column column_name opt_set_data 'TYPE' typename opt_collate opt_alter_column_using | 'ADD' table_constraint opt_validate_behavior | 'ADD' 'CONSTRAINT' 'IF' 'NOT' 'EXISTS' constraint_name constraint_elem opt_validate_behavior | 'ALTER' 'PRIMARY' 'KEY' 'USING' 'COLUMNS' '(' index_params ')' opt_hash_sharded opt_with_storage_parameter_list | 'VALIDATE' 'CONSTRAINT' constraint_name | 'DROP' 'CONSTRAINT' 'IF' 'EXISTS' constraint_name opt_drop_behavior | 'DROP' 'CONSTRAINT' constraint_name opt_drop_behavior | 'EXPERIMENTAL_AUDIT' 'SET' audit_mode | ( 'PARTITION' 'BY' partition_by_inner | 'PARTITION' 'ALL' 'BY' partition_by_inner ) | 'SET' '(' storage_parameter_list ')' | 'RESET' '(' storage_parameter_key_list ')' ) ) )*
- | 'ALTER' 'TABLE' 'IF' 'EXISTS' table_name 'PARTITION' 'ALL' 'BY' partition_by_inner ( ( ',' ( 'RENAME' opt_column column_name 'TO' column_name | 'RENAME' 'CONSTRAINT' column_name 'TO' column_name | 'ADD' column_def | 'ADD' 'IF' 'NOT' 'EXISTS' column_def | 'ADD' 'COLUMN' column_def | 'ADD' 'COLUMN' 'IF' 'NOT' 'EXISTS' column_def | 'ALTER' opt_column column_name alter_column_default | 'ALTER' opt_column column_name alter_column_on_update | 'ALTER' opt_column column_name alter_column_visible | 'ALTER' opt_column column_name 'DROP' 'NOT' 'NULL' | 'ALTER' opt_column column_name 'DROP' 'STORED' | 'ALTER' opt_column column_name 'SET' 'NOT' 'NULL' | 'DROP' opt_column 'IF' 'EXISTS' column_name opt_drop_behavior | 'DROP' opt_column column_name opt_drop_behavior | 'ALTER' opt_column column_name opt_set_data 'TYPE' typename opt_collate opt_alter_column_using | 'ADD' table_constraint opt_validate_behavior | 'ADD' 'CONSTRAINT' 'IF' 'NOT' 'EXISTS' constraint_name constraint_elem opt_validate_behavior | 'ALTER' 'PRIMARY' 'KEY' 'USING' 'COLUMNS' '(' index_params ')' opt_hash_sharded opt_with_storage_parameter_list | 'VALIDATE' 'CONSTRAINT' constraint_name | 'DROP' 'CONSTRAINT' 'IF' 'EXISTS' constraint_name opt_drop_behavior | 'DROP' 'CONSTRAINT' constraint_name opt_drop_behavior | 'EXPERIMENTAL_AUDIT' 'SET' audit_mode | ( 'PARTITION' 'BY' partition_by_inner | 'PARTITION' 'ALL' 'BY' partition_by_inner ) | 'SET' '(' storage_parameter_list ')' | 'RESET' '(' storage_parameter_key_list ')' ) ) )*
+ 'ALTER' 'TABLE' table_name 'PARTITION' 'ALL' 'BY' partition_by_inner ( ( ',' ( 'RENAME' opt_column column_name 'TO' column_name | 'RENAME' 'CONSTRAINT' column_name 'TO' column_name | 'ADD' column_table_def | 'ADD' 'IF' 'NOT' 'EXISTS' column_table_def | 'ADD' 'COLUMN' column_table_def | 'ADD' 'COLUMN' 'IF' 'NOT' 'EXISTS' column_table_def | 'ALTER' opt_column column_name alter_column_default | 'ALTER' opt_column column_name alter_column_on_update | 'ALTER' opt_column column_name alter_column_visible | 'ALTER' opt_column column_name 'DROP' 'NOT' 'NULL' | 'ALTER' opt_column column_name 'DROP' 'STORED' | 'ALTER' opt_column column_name 'SET' 'NOT' 'NULL' | 'DROP' opt_column 'IF' 'EXISTS' column_name opt_drop_behavior | 'DROP' opt_column column_name opt_drop_behavior | 'ALTER' opt_column column_name opt_set_data 'TYPE' typename opt_collate opt_alter_column_using | 'ADD' table_constraint opt_validate_behavior | 'ADD' 'CONSTRAINT' 'IF' 'NOT' 'EXISTS' constraint_name constraint_elem opt_validate_behavior | 'ALTER' 'PRIMARY' 'KEY' 'USING' 'COLUMNS' '(' index_params ')' opt_hash_sharded opt_with_storage_parameter_list | 'VALIDATE' 'CONSTRAINT' constraint_name | 'DROP' 'CONSTRAINT' 'IF' 'EXISTS' constraint_name opt_drop_behavior | 'DROP' 'CONSTRAINT' constraint_name opt_drop_behavior | 'EXPERIMENTAL_AUDIT' 'SET' audit_mode | ( 'PARTITION' 'BY' partition_by_inner | 'PARTITION' 'ALL' 'BY' partition_by_inner ) | 'SET' '(' storage_parameter_list ')' | 'RESET' '(' storage_parameter_key_list ')' ) ) )*
+ | 'ALTER' 'TABLE' 'IF' 'EXISTS' table_name 'PARTITION' 'ALL' 'BY' partition_by_inner ( ( ',' ( 'RENAME' opt_column column_name 'TO' column_name | 'RENAME' 'CONSTRAINT' column_name 'TO' column_name | 'ADD' column_table_def | 'ADD' 'IF' 'NOT' 'EXISTS' column_table_def | 'ADD' 'COLUMN' column_table_def | 'ADD' 'COLUMN' 'IF' 'NOT' 'EXISTS' column_table_def | 'ALTER' opt_column column_name alter_column_default | 'ALTER' opt_column column_name alter_column_on_update | 'ALTER' opt_column column_name alter_column_visible | 'ALTER' opt_column column_name 'DROP' 'NOT' 'NULL' | 'ALTER' opt_column column_name 'DROP' 'STORED' | 'ALTER' opt_column column_name 'SET' 'NOT' 'NULL' | 'DROP' opt_column 'IF' 'EXISTS' column_name opt_drop_behavior | 'DROP' opt_column column_name opt_drop_behavior | 'ALTER' opt_column column_name opt_set_data 'TYPE' typename opt_collate opt_alter_column_using | 'ADD' table_constraint opt_validate_behavior | 'ADD' 'CONSTRAINT' 'IF' 'NOT' 'EXISTS' constraint_name constraint_elem opt_validate_behavior | 'ALTER' 'PRIMARY' 'KEY' 'USING' 'COLUMNS' '(' index_params ')' opt_hash_sharded opt_with_storage_parameter_list | 'VALIDATE' 'CONSTRAINT' constraint_name | 'DROP' 'CONSTRAINT' 'IF' 'EXISTS' constraint_name opt_drop_behavior | 'DROP' 'CONSTRAINT' constraint_name opt_drop_behavior | 'EXPERIMENTAL_AUDIT' 'SET' audit_mode | ( 'PARTITION' 'BY' partition_by_inner | 'PARTITION' 'ALL' 'BY' partition_by_inner ) | 'SET' '(' storage_parameter_list ')' | 'RESET' '(' storage_parameter_key_list ')' ) ) )*
diff --git a/docs/generated/sql/bnf/check_column_level.bnf b/docs/generated/sql/bnf/check_column_level.bnf
index 68e1da18ce2f..9cf320eccee9 100644
--- a/docs/generated/sql/bnf/check_column_level.bnf
+++ b/docs/generated/sql/bnf/check_column_level.bnf
@@ -1,2 +1,2 @@
stmt_block ::=
- 'CREATE' 'TABLE' table_name '(' column_name column_type 'CHECK' '(' check_expr ')' ( column_constraints | ) ( ',' ( column_def ( ',' column_def )* ) | ) ( table_constraints | ) ')' ')'
+ 'CREATE' 'TABLE' table_name '(' column_name column_type 'CHECK' '(' check_expr ')' ( column_constraints | ) ( ',' ( column_table_def ( ',' column_table_def )* ) | ) ( table_constraints | ) ')' ')'
diff --git a/docs/generated/sql/bnf/check_table_level.bnf b/docs/generated/sql/bnf/check_table_level.bnf
index 799741a33b87..4ba8e0c0c48f 100644
--- a/docs/generated/sql/bnf/check_table_level.bnf
+++ b/docs/generated/sql/bnf/check_table_level.bnf
@@ -1,2 +1,2 @@
stmt_block ::=
- 'CREATE' 'TABLE' table_name '(' ( column_def ( ',' column_def )* ) ( 'CONSTRAINT' constraint_name | ) 'CHECK' '(' check_expr ')' ( table_constraints | ) ')'
+ 'CREATE' 'TABLE' table_name '(' ( column_table_def ( ',' column_table_def )* ) ( 'CONSTRAINT' constraint_name | ) 'CHECK' '(' check_expr ')' ( table_constraints | ) ')'
diff --git a/docs/generated/sql/bnf/column_def.bnf b/docs/generated/sql/bnf/column_table_def.bnf
similarity index 70%
rename from docs/generated/sql/bnf/column_def.bnf
rename to docs/generated/sql/bnf/column_table_def.bnf
index 8da88cb31903..24c5babebeee 100644
--- a/docs/generated/sql/bnf/column_def.bnf
+++ b/docs/generated/sql/bnf/column_table_def.bnf
@@ -1,2 +1,2 @@
-column_def ::=
+column_table_def ::=
column_name typename ( ( col_qualification ) )*
diff --git a/docs/generated/sql/bnf/create_table_stmt.bnf b/docs/generated/sql/bnf/create_table_stmt.bnf
index 46a04cc2cd88..ee40d76b05d7 100644
--- a/docs/generated/sql/bnf/create_table_stmt.bnf
+++ b/docs/generated/sql/bnf/create_table_stmt.bnf
@@ -1,3 +1,3 @@
create_table_stmt ::=
- 'CREATE' opt_persistence_temp_table 'TABLE' table_name '(' ( ( ( ( column_def | index_def | family_def | table_constraint opt_validate_behavior | 'LIKE' table_name like_table_option_list ) ) ( ( ',' ( column_def | index_def | family_def | table_constraint opt_validate_behavior | 'LIKE' table_name like_table_option_list ) ) )* ) | ) ')' opt_partition_by_table ( opt_with_storage_parameter_list ) ( 'ON' 'COMMIT' 'PRESERVE' 'ROWS' ) opt_locality
- | 'CREATE' opt_persistence_temp_table 'TABLE' 'IF' 'NOT' 'EXISTS' table_name '(' ( ( ( ( column_def | index_def | family_def | table_constraint opt_validate_behavior | 'LIKE' table_name like_table_option_list ) ) ( ( ',' ( column_def | index_def | family_def | table_constraint opt_validate_behavior | 'LIKE' table_name like_table_option_list ) ) )* ) | ) ')' opt_partition_by_table ( opt_with_storage_parameter_list ) ( 'ON' 'COMMIT' 'PRESERVE' 'ROWS' ) opt_locality
+ 'CREATE' opt_persistence_temp_table 'TABLE' table_name '(' ( ( ( ( column_table_def | index_def | family_def | table_constraint opt_validate_behavior | 'LIKE' table_name like_table_option_list ) ) ( ( ',' ( column_table_def | index_def | family_def | table_constraint opt_validate_behavior | 'LIKE' table_name like_table_option_list ) ) )* ) | ) ')' opt_partition_by_table ( opt_with_storage_parameter_list ) ( 'ON' 'COMMIT' 'PRESERVE' 'ROWS' ) opt_locality
+ | 'CREATE' opt_persistence_temp_table 'TABLE' 'IF' 'NOT' 'EXISTS' table_name '(' ( ( ( ( column_table_def | index_def | family_def | table_constraint opt_validate_behavior | 'LIKE' table_name like_table_option_list ) ) ( ( ',' ( column_table_def | index_def | family_def | table_constraint opt_validate_behavior | 'LIKE' table_name like_table_option_list ) ) )* ) | ) ')' opt_partition_by_table ( opt_with_storage_parameter_list ) ( 'ON' 'COMMIT' 'PRESERVE' 'ROWS' ) opt_locality
diff --git a/docs/generated/sql/bnf/default_value_column_level.bnf b/docs/generated/sql/bnf/default_value_column_level.bnf
index 84eee1641203..61b218b3a308 100644
--- a/docs/generated/sql/bnf/default_value_column_level.bnf
+++ b/docs/generated/sql/bnf/default_value_column_level.bnf
@@ -1,2 +1,2 @@
stmt_block ::=
- 'CREATE' 'TABLE' table_name '(' column_name column_type 'DEFAULT' default_value ( column_constraints | ) ( ',' ( column_def ( ',' column_def )* ) | ) ( table_constraints | ) ')' ')'
+ 'CREATE' 'TABLE' table_name '(' column_name column_type 'DEFAULT' default_value ( column_constraints | ) ( ',' ( column_table_def ( ',' column_table_def )* ) | ) ( table_constraints | ) ')' ')'
diff --git a/docs/generated/sql/bnf/foreign_key_column_level.bnf b/docs/generated/sql/bnf/foreign_key_column_level.bnf
index a13da4f43917..d0aa363d3b5d 100644
--- a/docs/generated/sql/bnf/foreign_key_column_level.bnf
+++ b/docs/generated/sql/bnf/foreign_key_column_level.bnf
@@ -1,2 +1,2 @@
stmt_block ::=
- 'CREATE' 'TABLE' table_name '(' column_name column_type 'REFERENCES' parent_table ( '(' ref_column_name ')' | ) ( column_constraints | ) ( ',' ( column_def ( ',' column_def )* ) | ) ( table_constraints | ) ')' ')'
+ 'CREATE' 'TABLE' table_name '(' column_name column_type 'REFERENCES' parent_table ( '(' ref_column_name ')' | ) ( column_constraints | ) ( ',' ( column_table_def ( ',' column_table_def )* ) | ) ( table_constraints | ) ')' ')'
diff --git a/docs/generated/sql/bnf/foreign_key_table_level.bnf b/docs/generated/sql/bnf/foreign_key_table_level.bnf
index 7d11c4e45bb5..fd714bf17eb2 100644
--- a/docs/generated/sql/bnf/foreign_key_table_level.bnf
+++ b/docs/generated/sql/bnf/foreign_key_table_level.bnf
@@ -1,2 +1,2 @@
stmt_block ::=
- 'CREATE' 'TABLE' table_name '(' ( column_def ( ',' column_def )* ) ( 'CONSTRAINT' constraint_name | ) 'FOREIGN KEY' '(' ( fk_column_name ( ',' fk_column_name )* ) ')' 'REFERENCES' parent_table ( '(' ( ref_column_name ( ',' ref_column_name )* ) ')' | ) ( table_constraints | ) ')'
+ 'CREATE' 'TABLE' table_name '(' ( column_table_def ( ',' column_table_def )* ) ( 'CONSTRAINT' constraint_name | ) 'FOREIGN KEY' '(' ( fk_column_name ( ',' fk_column_name )* ) ')' 'REFERENCES' parent_table ( '(' ( ref_column_name ( ',' ref_column_name )* ) ')' | ) ( table_constraints | ) ')'
diff --git a/docs/generated/sql/bnf/not_null_column_level.bnf b/docs/generated/sql/bnf/not_null_column_level.bnf
index e8e6da436a56..8e32276759c3 100644
--- a/docs/generated/sql/bnf/not_null_column_level.bnf
+++ b/docs/generated/sql/bnf/not_null_column_level.bnf
@@ -1,2 +1,2 @@
stmt_block ::=
- 'CREATE' 'TABLE' table_name '(' column_name column_type 'NOT NULL' ( column_constraints | ) ( ',' ( column_def ( ',' column_def )* ) | ) ( table_constraints | ) ')' ')'
+ 'CREATE' 'TABLE' table_name '(' column_name column_type 'NOT NULL' ( column_constraints | ) ( ',' ( column_table_def ( ',' column_table_def )* ) | ) ( table_constraints | ) ')' ')'
diff --git a/docs/generated/sql/bnf/primary_key_column_level.bnf b/docs/generated/sql/bnf/primary_key_column_level.bnf
index 1a9d75fb2900..2a59ba4ed18e 100644
--- a/docs/generated/sql/bnf/primary_key_column_level.bnf
+++ b/docs/generated/sql/bnf/primary_key_column_level.bnf
@@ -1,2 +1,2 @@
stmt_block ::=
- 'CREATE' 'TABLE' table_name '(' column_name column_type 'PRIMARY KEY' ( column_constraints | ) ( ',' ( column_def ( ',' column_def )* ) | ) ( table_constraints | ) ')' ')'
+ 'CREATE' 'TABLE' table_name '(' column_name column_type 'PRIMARY KEY' ( column_constraints | ) ( ',' ( column_table_def ( ',' column_table_def )* ) | ) ( table_constraints | ) ')' ')'
diff --git a/docs/generated/sql/bnf/primary_key_table_level.bnf b/docs/generated/sql/bnf/primary_key_table_level.bnf
index 55c0cd91874f..4b704f8d7bfa 100644
--- a/docs/generated/sql/bnf/primary_key_table_level.bnf
+++ b/docs/generated/sql/bnf/primary_key_table_level.bnf
@@ -1,2 +1,2 @@
stmt_block ::=
- 'CREATE' 'TABLE' table_name '(' ( column_def ( ',' column_def )* ) ( 'CONSTRAINT' name | ) 'PRIMARY KEY' '(' ( column_name ( ',' column_name )* ) ')' ( table_constraints | ) ')'
+ 'CREATE' 'TABLE' table_name '(' ( column_table_def ( ',' column_table_def )* ) ( 'CONSTRAINT' name | ) 'PRIMARY KEY' '(' ( column_name ( ',' column_name )* ) ')' ( table_constraints | ) ')'
diff --git a/docs/generated/sql/bnf/stmt_block.bnf b/docs/generated/sql/bnf/stmt_block.bnf
index 9ee2f35bf423..f041578916ea 100644
--- a/docs/generated/sql/bnf/stmt_block.bnf
+++ b/docs/generated/sql/bnf/stmt_block.bnf
@@ -2589,7 +2589,7 @@ table_ref ::=
| 'LATERAL' select_with_parens opt_ordinality opt_alias_clause
| joined_table
| '(' joined_table ')' opt_ordinality alias_clause
- | func_table opt_ordinality opt_alias_clause
+ | func_table opt_ordinality opt_func_alias_clause
| 'LATERAL' func_table opt_ordinality opt_alias_clause
| '[' row_source_extension_stmt ']' opt_ordinality opt_alias_clause
@@ -3035,8 +3035,8 @@ bare_col_label ::=
| bare_label_keywords
common_table_expr ::=
- table_alias_name opt_column_list 'AS' '(' preparable_stmt ')'
- | table_alias_name opt_column_list 'AS' materialize_clause '(' preparable_stmt ')'
+ table_alias_name opt_col_def_list_no_types 'AS' '(' preparable_stmt ')'
+ | table_alias_name opt_col_def_list_no_types 'AS' materialize_clause '(' preparable_stmt ')'
index_flags_param_list ::=
( index_flags_param ) ( ( ',' index_flags_param ) )*
@@ -3117,13 +3117,17 @@ joined_table ::=
| table_ref 'NATURAL' 'JOIN' table_ref
alias_clause ::=
- 'AS' table_alias_name opt_column_list
- | table_alias_name opt_column_list
+ 'AS' table_alias_name opt_col_def_list_no_types
+ | table_alias_name opt_col_def_list_no_types
func_table ::=
func_expr_windowless
| 'ROWS' 'FROM' '(' rowsfrom_list ')'
+opt_func_alias_clause ::=
+ func_alias_clause
+ |
+
row_source_extension_stmt ::=
delete_stmt
| explain_stmt
@@ -3183,10 +3187,10 @@ user_priority ::=
alter_table_cmd ::=
'RENAME' opt_column column_name 'TO' column_name
| 'RENAME' 'CONSTRAINT' column_name 'TO' column_name
- | 'ADD' column_def
- | 'ADD' 'IF' 'NOT' 'EXISTS' column_def
- | 'ADD' 'COLUMN' column_def
- | 'ADD' 'COLUMN' 'IF' 'NOT' 'EXISTS' column_def
+ | 'ADD' column_table_def
+ | 'ADD' 'IF' 'NOT' 'EXISTS' column_table_def
+ | 'ADD' 'COLUMN' column_table_def
+ | 'ADD' 'COLUMN' 'IF' 'NOT' 'EXISTS' column_table_def
| 'ALTER' opt_column column_name alter_column_default
| 'ALTER' opt_column column_name alter_column_on_update
| 'ALTER' opt_column column_name alter_column_visible
@@ -3393,7 +3397,7 @@ storage_parameter ::=
storage_parameter_key '=' var_value
table_elem ::=
- column_def
+ column_table_def
| index_def
| family_def
| table_constraint opt_validate_behavior
@@ -3446,6 +3450,10 @@ bare_label_keywords ::=
| 'VOLATILE'
| 'SETOF'
+opt_col_def_list_no_types ::=
+ '(' col_def_list_no_types ')'
+ |
+
materialize_clause ::=
'MATERIALIZED'
| 'NOT' 'MATERIALIZED'
@@ -3510,6 +3518,10 @@ join_qual ::=
rowsfrom_list ::=
( rowsfrom_item ) ( ( ',' rowsfrom_item ) )*
+func_alias_clause ::=
+ 'AS' table_alias_name opt_col_def_list
+ | table_alias_name opt_col_def_list
+
func_arg ::=
func_arg_class param_name func_arg_type
| param_name func_arg_class func_arg_type
@@ -3531,7 +3543,7 @@ opt_column ::=
'COLUMN'
|
-column_def ::=
+column_table_def ::=
column_name typename col_qual_list
alter_column_default ::=
@@ -3720,6 +3732,9 @@ create_as_constraint_elem ::=
func_as ::=
'SCONST'
+col_def_list_no_types ::=
+ ( name ) ( ( ',' name ) )*
+
group_by_item ::=
a_expr
@@ -3731,7 +3746,10 @@ join_outer ::=
|
rowsfrom_item ::=
- func_expr_windowless
+ func_expr_windowless opt_func_alias_clause
+
+opt_col_def_list ::=
+ '(' col_def_list ')'
func_arg_class ::=
'IN'
@@ -3807,6 +3825,9 @@ create_as_col_qualification_elem ::=
create_as_params ::=
( create_as_param ) ( ( ',' create_as_param ) )*
+col_def_list ::=
+ ( col_def ) ( ( ',' col_def ) )*
+
col_qualification ::=
'CONSTRAINT' constraint_name col_qualification_elem
| col_qualification_elem
@@ -3836,6 +3857,10 @@ opt_partition_by ::=
create_as_param ::=
column_name
+col_def ::=
+ name
+ | name typename
+
col_qualification_elem ::=
'NOT' 'NULL'
| 'NULL'
diff --git a/docs/generated/sql/bnf/table_ref.bnf b/docs/generated/sql/bnf/table_ref.bnf
index dd24ece24f8e..711d02ef6ca6 100644
--- a/docs/generated/sql/bnf/table_ref.bnf
+++ b/docs/generated/sql/bnf/table_ref.bnf
@@ -1,9 +1,9 @@
table_ref ::=
- table_name ( '@' index_name | ) ( 'WITH' 'ORDINALITY' | ) ( ( 'AS' table_alias_name ( '(' ( ( name ) ( ( ',' name ) )* ) ')' | ) | table_alias_name ( '(' ( ( name ) ( ( ',' name ) )* ) ')' | ) ) | )
- | '(' select_stmt ')' ( 'WITH' 'ORDINALITY' | ) ( ( 'AS' table_alias_name ( '(' ( ( name ) ( ( ',' name ) )* ) ')' | ) | table_alias_name ( '(' ( ( name ) ( ( ',' name ) )* ) ')' | ) ) | )
- | 'LATERAL' '(' select_stmt ')' ( 'WITH' 'ORDINALITY' | ) ( ( 'AS' table_alias_name ( '(' ( ( name ) ( ( ',' name ) )* ) ')' | ) | table_alias_name ( '(' ( ( name ) ( ( ',' name ) )* ) ')' | ) ) | )
+ table_name ( '@' index_name | ) ( 'WITH' 'ORDINALITY' | ) ( ( 'AS' table_alias_name opt_col_def_list_no_types | table_alias_name opt_col_def_list_no_types ) | )
+ | '(' select_stmt ')' ( 'WITH' 'ORDINALITY' | ) ( ( 'AS' table_alias_name opt_col_def_list_no_types | table_alias_name opt_col_def_list_no_types ) | )
+ | 'LATERAL' '(' select_stmt ')' ( 'WITH' 'ORDINALITY' | ) ( ( 'AS' table_alias_name opt_col_def_list_no_types | table_alias_name opt_col_def_list_no_types ) | )
| joined_table
- | '(' joined_table ')' ( 'WITH' 'ORDINALITY' | ) ( ( 'AS' table_alias_name ( '(' ( ( name ) ( ( ',' name ) )* ) ')' | ) | table_alias_name ( '(' ( ( name ) ( ( ',' name ) )* ) ')' | ) ) | )
- | func_application ( 'WITH' 'ORDINALITY' | ) ( ( 'AS' table_alias_name ( '(' ( ( name ) ( ( ',' name ) )* ) ')' | ) | table_alias_name ( '(' ( ( name ) ( ( ',' name ) )* ) ')' | ) ) | )
- | 'LATERAL' func_application ( 'WITH' 'ORDINALITY' | ) ( ( 'AS' table_alias_name ( '(' ( ( name ) ( ( ',' name ) )* ) ')' | ) | table_alias_name ( '(' ( ( name ) ( ( ',' name ) )* ) ')' | ) ) | )
- | '[' row_source_extension_stmt ']' ( 'WITH' 'ORDINALITY' | ) ( ( 'AS' table_alias_name ( '(' ( ( name ) ( ( ',' name ) )* ) ')' | ) | table_alias_name ( '(' ( ( name ) ( ( ',' name ) )* ) ')' | ) ) | )
+ | '(' joined_table ')' ( 'WITH' 'ORDINALITY' | ) ( 'AS' table_alias_name opt_col_def_list_no_types | table_alias_name opt_col_def_list_no_types )
+ | func_application ( 'WITH' 'ORDINALITY' | ) opt_func_alias_clause
+ | 'LATERAL' func_application ( 'WITH' 'ORDINALITY' | ) ( ( 'AS' table_alias_name opt_col_def_list_no_types | table_alias_name opt_col_def_list_no_types ) | )
+ | '[' row_source_extension_stmt ']' ( 'WITH' 'ORDINALITY' | ) ( ( 'AS' table_alias_name opt_col_def_list_no_types | table_alias_name opt_col_def_list_no_types ) | )
diff --git a/docs/generated/sql/bnf/unique_column_level.bnf b/docs/generated/sql/bnf/unique_column_level.bnf
index a1621a96ba97..784a79aa2ab8 100644
--- a/docs/generated/sql/bnf/unique_column_level.bnf
+++ b/docs/generated/sql/bnf/unique_column_level.bnf
@@ -1,2 +1,2 @@
stmt_block ::=
- 'CREATE' 'TABLE' table_name '(' column_name column_type 'UNIQUE' ( column_constraints | ) ( ',' ( column_def ( ',' column_def )* ) | ) ( table_constraints | ) ')' ')'
+ 'CREATE' 'TABLE' table_name '(' column_name column_type 'UNIQUE' ( column_constraints | ) ( ',' ( column_table_def ( ',' column_table_def )* ) | ) ( table_constraints | ) ')' ')'
diff --git a/docs/generated/sql/bnf/unique_table_level.bnf b/docs/generated/sql/bnf/unique_table_level.bnf
index dcd0abc3e5e0..e57b084f555e 100644
--- a/docs/generated/sql/bnf/unique_table_level.bnf
+++ b/docs/generated/sql/bnf/unique_table_level.bnf
@@ -1,2 +1,2 @@
stmt_block ::=
- 'CREATE' 'TABLE' table_name '(' ( column_def ( ',' column_def )* ) ( 'CONSTRAINT' name | ) 'UNIQUE' '(' ( column_name ( ',' column_name )* ) ')' ( table_constraints | ) ')'
+ 'CREATE' 'TABLE' table_name '(' ( column_table_def ( ',' column_table_def )* ) ( 'CONSTRAINT' name | ) 'UNIQUE' '(' ( column_name ( ',' column_name )* ) ')' ( table_constraints | ) ')'
diff --git a/docs/generated/sql/bnf/with_clause.bnf b/docs/generated/sql/bnf/with_clause.bnf
index 05b08523f7e4..a512476d702d 100644
--- a/docs/generated/sql/bnf/with_clause.bnf
+++ b/docs/generated/sql/bnf/with_clause.bnf
@@ -1,3 +1,3 @@
with_clause ::=
- 'WITH' ( ( ( table_alias_name ( '(' ( ( name ) ( ( ',' name ) )* ) ')' | ) 'AS' '(' preparable_stmt ')' | table_alias_name ( '(' ( ( name ) ( ( ',' name ) )* ) ')' | ) 'AS' ( 'MATERIALIZED' | 'NOT' 'MATERIALIZED' ) '(' preparable_stmt ')' ) ) ( ( ',' ( table_alias_name ( '(' ( ( name ) ( ( ',' name ) )* ) ')' | ) 'AS' '(' preparable_stmt ')' | table_alias_name ( '(' ( ( name ) ( ( ',' name ) )* ) ')' | ) 'AS' ( 'MATERIALIZED' | 'NOT' 'MATERIALIZED' ) '(' preparable_stmt ')' ) ) )* ) ( insert_stmt | update_stmt | delete_stmt | upsert_stmt | select_stmt )
- | 'WITH' 'RECURSIVE' ( ( ( table_alias_name ( '(' ( ( name ) ( ( ',' name ) )* ) ')' | ) 'AS' '(' preparable_stmt ')' | table_alias_name ( '(' ( ( name ) ( ( ',' name ) )* ) ')' | ) 'AS' ( 'MATERIALIZED' | 'NOT' 'MATERIALIZED' ) '(' preparable_stmt ')' ) ) ( ( ',' ( table_alias_name ( '(' ( ( name ) ( ( ',' name ) )* ) ')' | ) 'AS' '(' preparable_stmt ')' | table_alias_name ( '(' ( ( name ) ( ( ',' name ) )* ) ')' | ) 'AS' ( 'MATERIALIZED' | 'NOT' 'MATERIALIZED' ) '(' preparable_stmt ')' ) ) )* ) ( insert_stmt | update_stmt | delete_stmt | upsert_stmt | select_stmt )
+ 'WITH' ( ( ( table_alias_name opt_col_def_list_no_types 'AS' '(' preparable_stmt ')' | table_alias_name opt_col_def_list_no_types 'AS' ( 'MATERIALIZED' | 'NOT' 'MATERIALIZED' ) '(' preparable_stmt ')' ) ) ( ( ',' ( table_alias_name opt_col_def_list_no_types 'AS' '(' preparable_stmt ')' | table_alias_name opt_col_def_list_no_types 'AS' ( 'MATERIALIZED' | 'NOT' 'MATERIALIZED' ) '(' preparable_stmt ')' ) ) )* ) ( insert_stmt | update_stmt | delete_stmt | upsert_stmt | select_stmt )
+ | 'WITH' 'RECURSIVE' ( ( ( table_alias_name opt_col_def_list_no_types 'AS' '(' preparable_stmt ')' | table_alias_name opt_col_def_list_no_types 'AS' ( 'MATERIALIZED' | 'NOT' 'MATERIALIZED' ) '(' preparable_stmt ')' ) ) ( ( ',' ( table_alias_name opt_col_def_list_no_types 'AS' '(' preparable_stmt ')' | table_alias_name opt_col_def_list_no_types 'AS' ( 'MATERIALIZED' | 'NOT' 'MATERIALIZED' ) '(' preparable_stmt ')' ) ) )* ) ( insert_stmt | update_stmt | delete_stmt | upsert_stmt | select_stmt )
diff --git a/docs/generated/sql/functions.md b/docs/generated/sql/functions.md
index ef9bb7c02de4..af263a1e8880 100644
--- a/docs/generated/sql/functions.md
+++ b/docs/generated/sql/functions.md
@@ -1270,6 +1270,10 @@ the locality flag on node startup. Returns an error if no region is set.
pg_options_to_table(options: string[]) → tuple{string AS option_name, string AS option_value} | Converts the options array format to a table.
diff --git a/pkg/ccl/backupccl/full_cluster_backup_restore_test.go b/pkg/ccl/backupccl/full_cluster_backup_restore_test.go
index 5d209a3cf9ae..eca4f676fe5d 100644
--- a/pkg/ccl/backupccl/full_cluster_backup_restore_test.go
+++ b/pkg/ccl/backupccl/full_cluster_backup_restore_test.go
@@ -643,6 +643,7 @@ func TestClusterRestoreFailCleanup(t *testing.T) {
{"database_role_settings"},
{"external_connections"},
{"locations"},
+ {"privileges"},
{"role_id_seq"},
{"role_members"},
{"role_options"},
@@ -735,6 +736,7 @@ func TestClusterRestoreFailCleanup(t *testing.T) {
{"database_role_settings"},
{"external_connections"},
{"locations"},
+ {"privileges"},
{"role_id_seq"},
{"role_members"},
{"role_options"},
diff --git a/pkg/ccl/backupccl/system_schema.go b/pkg/ccl/backupccl/system_schema.go
index 1e9564199680..7a6100d42443 100644
--- a/pkg/ccl/backupccl/system_schema.go
+++ b/pkg/ccl/backupccl/system_schema.go
@@ -538,7 +538,7 @@ var systemTableBackupConfiguration = map[string]systemBackupConfiguration{
expectMissingInSystemTenant: true,
},
systemschema.SystemPrivilegeTable.GetName(): {
- shouldIncludeInClusterBackup: optOutOfClusterBackup,
+ shouldIncludeInClusterBackup: optInToClusterBackup, // No desc ID columns.
},
systemschema.SystemExternalConnectionsTable.GetName(): {
shouldIncludeInClusterBackup: optInToClusterBackup, // No desc ID columns.
diff --git a/pkg/ccl/backupccl/testdata/backup-restore/external-connections-nodelocal b/pkg/ccl/backupccl/testdata/backup-restore/external-connections-nodelocal
index 46c572330efa..73f1e1c55cde 100644
--- a/pkg/ccl/backupccl/testdata/backup-restore/external-connections-nodelocal
+++ b/pkg/ccl/backupccl/testdata/backup-restore/external-connections-nodelocal
@@ -33,6 +33,7 @@ external_connections table full
foo table full
locations table full
postgres database full
+privileges table full
public schema full
public schema full
public schema full
diff --git a/pkg/ccl/backupccl/testdata/backup-restore/external-connections-userfile b/pkg/ccl/backupccl/testdata/backup-restore/external-connections-userfile
index 838360575e5b..c7fdf073bbd8 100644
--- a/pkg/ccl/backupccl/testdata/backup-restore/external-connections-userfile
+++ b/pkg/ccl/backupccl/testdata/backup-restore/external-connections-userfile
@@ -33,6 +33,7 @@ external_connections table full
foo table full
locations table full
postgres database full
+privileges table full
public schema full
public schema full
public schema full
diff --git a/pkg/ccl/backupccl/testdata/backup-restore/system-privileges-table b/pkg/ccl/backupccl/testdata/backup-restore/system-privileges-table
new file mode 100644
index 000000000000..4589f30f9ebd
--- /dev/null
+++ b/pkg/ccl/backupccl/testdata/backup-restore/system-privileges-table
@@ -0,0 +1,49 @@
+new-server name=s1
+----
+
+exec-sql
+CREATE USER testuser;
+CREATE USER testuser2;
+GRANT SYSTEM MODIFYCLUSTERSETTING, VIEWACTIVITY TO testuser;
+GRANT SELECT ON crdb_internal.tables TO testuser;
+CREATE EXTERNAL CONNECTION foo AS 'nodelocal://0/foo';
+GRANT USAGE ON EXTERNAL CONNECTION foo TO testuser2;
+GRANT SYSTEM VIEWACTIVITYREDACTED TO testuser2;
+GRANT SELECT ON crdb_internal.databases, crdb_internal.tables TO testuser2;
+GRANT ALL ON EXTERNAL CONNECTION foo TO testuser2;
+----
+
+query-sql
+SELECT * FROM system.privileges
+----
+root /externalconn/foo {ALL} {}
+testuser /global/ {MODIFYCLUSTERSETTING,VIEWACTIVITY} {}
+testuser /vtable/crdb_internal/tables {SELECT} {}
+testuser2 /externalconn/foo {ALL} {}
+testuser2 /global/ {VIEWACTIVITYREDACTED} {}
+testuser2 /vtable/crdb_internal/databases {SELECT} {}
+testuser2 /vtable/crdb_internal/tables {SELECT} {}
+
+exec-sql
+BACKUP INTO 'nodelocal://0/test/'
+----
+
+# Start a new cluster with the same IO dir.
+new-server name=s2 share-io-dir=s1
+----
+
+# Restore into the new cluster.
+exec-sql server=s2
+RESTORE FROM LATEST IN 'nodelocal://0/test/'
+----
+
+query-sql server=s2
+SELECT * FROM system.privileges
+----
+root /externalconn/foo {ALL} {}
+testuser /global/ {MODIFYCLUSTERSETTING,VIEWACTIVITY} {}
+testuser /vtable/crdb_internal/tables {SELECT} {}
+testuser2 /externalconn/foo {ALL} {}
+testuser2 /global/ {VIEWACTIVITYREDACTED} {}
+testuser2 /vtable/crdb_internal/databases {SELECT} {}
+testuser2 /vtable/crdb_internal/tables {SELECT} {}
diff --git a/pkg/cmd/docgen/diagrams.go b/pkg/cmd/docgen/diagrams.go
index eec70929c836..82b42685be79 100644
--- a/pkg/cmd/docgen/diagrams.go
+++ b/pkg/cmd/docgen/diagrams.go
@@ -342,7 +342,7 @@ var specs = []stmtSpec{
{
name: "add_column",
stmt: "alter_onetable_stmt",
- inline: []string{"alter_table_cmds", "alter_table_cmd", "column_def", "col_qual_list"},
+ inline: []string{"alter_table_cmds", "alter_table_cmd", "column_table_def", "col_qual_list"},
regreplace: map[string]string{
` \( \( col_qualification \) \)\* .*`: `( ( col_qualification ) )*`,
},
@@ -499,7 +499,7 @@ var specs = []stmtSpec{
{
name: "alter_table",
stmt: "alter_onetable_stmt",
- inline: []string{"alter_table_cmds", "alter_table_cmd", "column_def", "opt_drop_behavior", "alter_column_default", "opt_column", "opt_set_data", "table_constraint", "opt_collate", "opt_alter_column_using"},
+ inline: []string{"alter_table_cmds", "alter_table_cmd", "column_table_def", "opt_drop_behavior", "alter_column_default", "opt_column", "opt_set_data", "table_constraint", "opt_collate", "opt_alter_column_using"},
replace: map[string]string{
"'VALIDATE' 'CONSTRAINT' name": "",
"opt_validate_behavior": "",
@@ -593,18 +593,18 @@ var specs = []stmtSpec{
{
name: "check_column_level",
stmt: "stmt_block",
- replace: map[string]string{" stmt": " 'CREATE' 'TABLE' table_name '(' column_name column_type 'CHECK' '(' check_expr ')' ( column_constraints | ) ( ',' ( column_def ( ',' column_def )* ) | ) ( table_constraints | ) ')' ')'"},
+ replace: map[string]string{" stmt": " 'CREATE' 'TABLE' table_name '(' column_name column_type 'CHECK' '(' check_expr ')' ( column_constraints | ) ( ',' ( column_table_def ( ',' column_table_def )* ) | ) ( table_constraints | ) ')' ')'"},
unlink: []string{"table_name", "column_name", "column_type", "check_expr", "column_constraints", "table_constraints"},
},
{
name: "check_table_level",
stmt: "stmt_block",
- replace: map[string]string{" stmt": " 'CREATE' 'TABLE' table_name '(' ( column_def ( ',' column_def )* ) ( 'CONSTRAINT' constraint_name | ) 'CHECK' '(' check_expr ')' ( table_constraints | ) ')'"},
+ replace: map[string]string{" stmt": " 'CREATE' 'TABLE' table_name '(' ( column_table_def ( ',' column_table_def )* ) ( 'CONSTRAINT' constraint_name | ) 'CHECK' '(' check_expr ')' ( table_constraints | ) ')'"},
unlink: []string{"table_name", "check_expr", "table_constraints"},
},
{
- name: "column_def",
- stmt: "column_def",
+ name: "column_table_def",
+ stmt: "column_table_def",
inline: []string{"col_qual_list"},
},
{
@@ -771,7 +771,7 @@ var specs = []stmtSpec{
name: "default_value_column_level",
stmt: "stmt_block",
replace: map[string]string{
- " stmt": " 'CREATE' 'TABLE' table_name '(' column_name column_type 'DEFAULT' default_value ( column_constraints | ) ( ',' ( column_def ( ',' column_def )* ) | ) ( table_constraints | ) ')' ')'",
+ " stmt": " 'CREATE' 'TABLE' table_name '(' column_name column_type 'DEFAULT' default_value ( column_constraints | ) ( ',' ( column_table_def ( ',' column_table_def )* ) | ) ( table_constraints | ) ')' ')'",
},
unlink: []string{"table_name", "column_name", "column_type", "default_value", "table_constraints"},
},
@@ -946,13 +946,13 @@ var specs = []stmtSpec{
{
name: "foreign_key_column_level",
stmt: "stmt_block",
- replace: map[string]string{" stmt": " 'CREATE' 'TABLE' table_name '(' column_name column_type 'REFERENCES' parent_table ( '(' ref_column_name ')' | ) ( column_constraints | ) ( ',' ( column_def ( ',' column_def )* ) | ) ( table_constraints | ) ')' ')'"},
+ replace: map[string]string{" stmt": " 'CREATE' 'TABLE' table_name '(' column_name column_type 'REFERENCES' parent_table ( '(' ref_column_name ')' | ) ( column_constraints | ) ( ',' ( column_table_def ( ',' column_table_def )* ) | ) ( table_constraints | ) ')' ')'"},
unlink: []string{"table_name", "column_name", "column_type", "parent_table", "table_constraints"},
},
{
name: "foreign_key_table_level",
stmt: "stmt_block",
- replace: map[string]string{" stmt": " 'CREATE' 'TABLE' table_name '(' ( column_def ( ',' column_def )* ) ( 'CONSTRAINT' constraint_name | ) 'FOREIGN KEY' '(' ( fk_column_name ( ',' fk_column_name )* ) ')' 'REFERENCES' parent_table ( '(' ( ref_column_name ( ',' ref_column_name )* ) ')' | ) ( table_constraints | ) ')'"},
+ replace: map[string]string{" stmt": " 'CREATE' 'TABLE' table_name '(' ( column_table_def ( ',' column_table_def )* ) ( 'CONSTRAINT' constraint_name | ) 'FOREIGN KEY' '(' ( fk_column_name ( ',' fk_column_name )* ) ')' 'REFERENCES' parent_table ( '(' ( ref_column_name ( ',' ref_column_name )* ) ')' | ) ( table_constraints | ) ')'"},
unlink: []string{"table_name", "column_name", "parent_table", "table_constraints"},
},
{
@@ -1032,7 +1032,7 @@ var specs = []stmtSpec{
{
name: "not_null_column_level",
stmt: "stmt_block",
- replace: map[string]string{" stmt": " 'CREATE' 'TABLE' table_name '(' column_name column_type 'NOT NULL' ( column_constraints | ) ( ',' ( column_def ( ',' column_def )* ) | ) ( table_constraints | ) ')' ')'"},
+ replace: map[string]string{" stmt": " 'CREATE' 'TABLE' table_name '(' column_name column_type 'NOT NULL' ( column_constraints | ) ( ',' ( column_table_def ( ',' column_table_def )* ) | ) ( table_constraints | ) ')' ')'"},
unlink: []string{"table_name", "column_name", "column_type", "table_constraints"},
},
{
@@ -1054,13 +1054,13 @@ var specs = []stmtSpec{
{
name: "primary_key_column_level",
stmt: "stmt_block",
- replace: map[string]string{" stmt": " 'CREATE' 'TABLE' table_name '(' column_name column_type 'PRIMARY KEY' ( column_constraints | ) ( ',' ( column_def ( ',' column_def )* ) | ) ( table_constraints | ) ')' ')'"},
+ replace: map[string]string{" stmt": " 'CREATE' 'TABLE' table_name '(' column_name column_type 'PRIMARY KEY' ( column_constraints | ) ( ',' ( column_table_def ( ',' column_table_def )* ) | ) ( table_constraints | ) ')' ')'"},
unlink: []string{"table_name", "column_name", "column_type", "table_constraints"},
},
{
name: "primary_key_table_level",
stmt: "stmt_block",
- replace: map[string]string{" stmt": " 'CREATE' 'TABLE' table_name '(' ( column_def ( ',' column_def )* ) ( 'CONSTRAINT' name | ) 'PRIMARY KEY' '(' ( column_name ( ',' column_name )* ) ')' ( table_constraints | ) ')'"},
+ replace: map[string]string{" stmt": " 'CREATE' 'TABLE' table_name '(' ( column_table_def ( ',' column_table_def )* ) ( 'CONSTRAINT' name | ) 'PRIMARY KEY' '(' ( column_name ( ',' column_name )* ) ')' ( table_constraints | ) ')'"},
unlink: []string{"table_name", "column_name", "table_constraints"},
},
{
@@ -1465,13 +1465,13 @@ var specs = []stmtSpec{
{
name: "unique_column_level",
stmt: "stmt_block",
- replace: map[string]string{" stmt": " 'CREATE' 'TABLE' table_name '(' column_name column_type 'UNIQUE' ( column_constraints | ) ( ',' ( column_def ( ',' column_def )* ) | ) ( table_constraints | ) ')' ')'"},
+ replace: map[string]string{" stmt": " 'CREATE' 'TABLE' table_name '(' column_name column_type 'UNIQUE' ( column_constraints | ) ( ',' ( column_table_def ( ',' column_table_def )* ) | ) ( table_constraints | ) ')' ')'"},
unlink: []string{"table_name", "column_name", "column_type", "table_constraints"},
},
{
name: "unique_table_level",
stmt: "stmt_block",
- replace: map[string]string{" stmt": " 'CREATE' 'TABLE' table_name '(' ( column_def ( ',' column_def )* ) ( 'CONSTRAINT' name | ) 'UNIQUE' '(' ( column_name ( ',' column_name )* ) ')' ( table_constraints | ) ')'"},
+ replace: map[string]string{" stmt": " 'CREATE' 'TABLE' table_name '(' ( column_table_def ( ',' column_table_def )* ) ( 'CONSTRAINT' name | ) 'UNIQUE' '(' ( column_name ( ',' column_name )* ) ')' ( table_constraints | ) ')'"},
unlink: []string{"table_name", "check_expr", "table_constraints"},
},
{
diff --git a/pkg/gen/bnf.bzl b/pkg/gen/bnf.bzl
index 53ad4f3c63c6..9cc4a3029a9f 100644
--- a/pkg/gen/bnf.bzl
+++ b/pkg/gen/bnf.bzl
@@ -77,7 +77,7 @@ BNF_SRCS = [
"//docs/generated/sql/bnf:check_table_level.bnf",
"//docs/generated/sql/bnf:close_cursor_stmt.bnf",
"//docs/generated/sql/bnf:col_qualification.bnf",
- "//docs/generated/sql/bnf:column_def.bnf",
+ "//docs/generated/sql/bnf:column_table_def.bnf",
"//docs/generated/sql/bnf:comment.bnf",
"//docs/generated/sql/bnf:commit_transaction.bnf",
"//docs/generated/sql/bnf:copy_from_stmt.bnf",
diff --git a/pkg/gen/diagrams.bzl b/pkg/gen/diagrams.bzl
index de0cdeddd2f4..683246ba0b66 100644
--- a/pkg/gen/diagrams.bzl
+++ b/pkg/gen/diagrams.bzl
@@ -76,7 +76,7 @@ DIAGRAMS_SRCS = [
"//docs/generated/sql/bnf:check_table_level.html",
"//docs/generated/sql/bnf:close_cursor.html",
"//docs/generated/sql/bnf:col_qualification.html",
- "//docs/generated/sql/bnf:column_def.html",
+ "//docs/generated/sql/bnf:column_table_def.html",
"//docs/generated/sql/bnf:comment.html",
"//docs/generated/sql/bnf:commit_transaction.html",
"//docs/generated/sql/bnf:copy_from.html",
diff --git a/pkg/gen/docs.bzl b/pkg/gen/docs.bzl
index dcd3cce1ef44..98dace1220e5 100644
--- a/pkg/gen/docs.bzl
+++ b/pkg/gen/docs.bzl
@@ -89,7 +89,7 @@ DOCS_SRCS = [
"//docs/generated/sql/bnf:check_table_level.bnf",
"//docs/generated/sql/bnf:close_cursor_stmt.bnf",
"//docs/generated/sql/bnf:col_qualification.bnf",
- "//docs/generated/sql/bnf:column_def.bnf",
+ "//docs/generated/sql/bnf:column_table_def.bnf",
"//docs/generated/sql/bnf:comment.bnf",
"//docs/generated/sql/bnf:commit_transaction.bnf",
"//docs/generated/sql/bnf:copy_from_stmt.bnf",
diff --git a/pkg/internal/sqlsmith/relational.go b/pkg/internal/sqlsmith/relational.go
index 2089eeb0a98f..509c0a3ca3ee 100644
--- a/pkg/internal/sqlsmith/relational.go
+++ b/pkg/internal/sqlsmith/relational.go
@@ -478,11 +478,11 @@ func (s *Smither) makeWith() (*tree.With, tableRefs) {
}
alias := s.name("with")
tblName := tree.NewUnqualifiedTableName(alias)
- cols := make(tree.NameList, len(stmtRefs))
+ cols := make(tree.ColumnDefList, len(stmtRefs))
defs := make([]*tree.ColumnTableDef, len(stmtRefs))
for i, r := range stmtRefs {
var err error
- cols[i] = r.item.ColumnName
+ cols[i].Name = r.item.ColumnName
defs[i], err = tree.NewColumnTableDef(r.item.ColumnName, r.typ, false /* isSerial */, nil)
if err != nil {
panic(err)
@@ -561,15 +561,15 @@ func makeSelectTable(s *Smither, refs colRefs, forJoin bool) (tree.TableExpr, co
}
table := s.name("tab")
- names := make(tree.NameList, len(stmtRefs))
+ names := make(tree.ColumnDefList, len(stmtRefs))
clauseRefs := make(colRefs, len(stmtRefs))
for i, ref := range stmtRefs {
- names[i] = s.name("col")
+ names[i].Name = s.name("col")
clauseRefs[i] = &colRef{
typ: ref.typ,
item: tree.NewColumnItem(
tree.NewUnqualifiedTableName(table),
- names[i],
+ names[i].Name,
),
}
}
@@ -1163,15 +1163,15 @@ func makeValues(s *Smither, desiredTypes []*types.T, refs colRefs) (tree.TableEx
values.Rows[i] = tuple
}
table := s.name("tab")
- names := make(tree.NameList, len(desiredTypes))
+ names := make(tree.ColumnDefList, len(desiredTypes))
valuesRefs := make(colRefs, len(desiredTypes))
for i, typ := range desiredTypes {
- names[i] = s.name("col")
+ names[i].Name = s.name("col")
valuesRefs[i] = &colRef{
typ: typ,
item: tree.NewColumnItem(
tree.NewUnqualifiedTableName(table),
- names[i],
+ names[i].Name,
),
}
}
diff --git a/pkg/sql/distsql_physical_planner.go b/pkg/sql/distsql_physical_planner.go
index 28a94c4a3712..4c93debcb33e 100644
--- a/pkg/sql/distsql_physical_planner.go
+++ b/pkg/sql/distsql_physical_planner.go
@@ -3585,9 +3585,10 @@ func createProjectSetSpec(
planCtx *PlanningCtx, n *projectSetPlanningInfo, indexVarMap []int,
) (*execinfrapb.ProjectSetSpec, error) {
spec := execinfrapb.ProjectSetSpec{
- Exprs: make([]execinfrapb.Expression, len(n.exprs)),
- GeneratedColumns: make([]*types.T, len(n.columns)-n.numColsInSource),
- NumColsPerGen: make([]uint32, len(n.exprs)),
+ Exprs: make([]execinfrapb.Expression, len(n.exprs)),
+ GeneratedColumns: make([]*types.T, len(n.columns)-n.numColsInSource),
+ GeneratedColumnLabels: make([]string, len(n.columns)-n.numColsInSource),
+ NumColsPerGen: make([]uint32, len(n.exprs)),
}
for i, expr := range n.exprs {
var err error
@@ -3598,6 +3599,7 @@ func createProjectSetSpec(
}
for i, col := range n.columns[n.numColsInSource:] {
spec.GeneratedColumns[i] = col.Typ
+ spec.GeneratedColumnLabels[i] = col.Name
}
for i, n := range n.numColsPerGen {
spec.NumColsPerGen[i] = uint32(n)
diff --git a/pkg/sql/execinfrapb/processors_sql.proto b/pkg/sql/execinfrapb/processors_sql.proto
index 684ce304fb83..d9fa0913ab70 100644
--- a/pkg/sql/execinfrapb/processors_sql.proto
+++ b/pkg/sql/execinfrapb/processors_sql.proto
@@ -925,6 +925,10 @@ message ProjectSetSpec {
// The number of columns each expression returns. Same length as exprs.
repeated uint32 num_cols_per_gen = 3;
+
+ // Column labels for the generated values. Needed for some builtin functions
+ // (like record_to_json) that require the column labels to do their jobs.
+ repeated string generated_column_labels = 4;
}
// WindowerSpec is the specification of a processor that performs computations
diff --git a/pkg/sql/logictest/logic.go b/pkg/sql/logictest/logic.go
index bd3ebd724d26..e274ac7bfe4e 100644
--- a/pkg/sql/logictest/logic.go
+++ b/pkg/sql/logictest/logic.go
@@ -2635,7 +2635,8 @@ func (t *logicTest) processSubtest(
// Don't error if --rewrite is specified, since the expected
// results are ignored in that case.
if !*rewriteResultsInTestfiles && len(results) != len(query.colTypes) {
- return errors.Errorf("expected results are invalid: unexpected column count")
+ return errors.Errorf("expected results are invalid: unexpected column count %d != %d (%s)",
+ len(results), len(query.colTypes), results)
}
query.expectedResults = append(query.expectedResults, results...)
}
diff --git a/pkg/sql/logictest/testdata/logic_test/json_builtins b/pkg/sql/logictest/testdata/logic_test/json_builtins
index 0b253ee97af3..4fb2be2daa0a 100644
--- a/pkg/sql/logictest/testdata/logic_test/json_builtins
+++ b/pkg/sql/logictest/testdata/logic_test/json_builtins
@@ -1375,3 +1375,113 @@ SELECT * FROM json_populate_recordset(NULL::testtab, '[{"i": 3, "ia": [1,2,3], "
----
3 {1,2,3} foo {a,b} 2017-01-01 00:00:00 +0000 +0000 {"a": "b", "c": 3, "d": [1, false, true, null, {"1": "2"}]}
NULL NULL NULL NULL NULL NULL
+
+query error invalid non-object argument to json_to_record
+SELECT * FROM json_to_record('3') AS t(a INT)
+
+query error invalid non-object argument to json_to_record
+SELECT * FROM json_to_record('"a"') AS t(a TEXT)
+
+query error invalid non-object argument to json_to_record
+SELECT * FROM json_to_record('null') AS t(a INT)
+
+query error invalid non-object argument to json_to_record
+SELECT * FROM json_to_record('true') AS t(a INT)
+
+query error invalid non-object argument to json_to_record
+SELECT * FROM json_to_record('[1,2]') AS t(a INT)
+
+query error column definition list is required for functions returning \"record\"
+SELECT * FROM json_to_record('{"a": "b"}') AS t(a)
+
+query error column definition list is required for functions returning \"record\"
+SELECT * FROM json_to_record('{"a": "b"}')
+
+# Test that non-record generators don't permit col definition lists (with types).
+query error a column definition list is only allowed for functions returning \"record\"
+SELECT * FROM generate_series(1,10) g(g int)
+
+statement ok
+CREATE TABLE j (j) AS SELECT '{
+ "str": "a",
+ "int": 1,
+ "bool": true,
+ "nul": null,
+ "dec": 2.45,
+ "arrint": [1,2],
+ "arrmixed": [1,2,true],
+ "arrstr": ["a", "b"],
+ "arrbool": [true, false],
+ "obj": {"i": 3, "t": "blah", "z": true}
+ }'::JSONB
+
+statement ok
+INSERT INTO j VALUES('{"str": "zzz"}')
+
+query TIBTFTTTTT
+SELECT t.* FROM j, json_to_record(j.j) AS t(
+ str TEXT,
+ int INT,
+ bool BOOL,
+ nul TEXT,
+ dec DECIMAL,
+ arrint INT[],
+ arrmixed TEXT,
+ arrstr TEXT[],
+ arrbool BOOL[],
+ obj TEXT
+) ORDER BY rowid
+----
+a 1 true NULL 2.45 {1,2} [1, 2, true] {a,b} {t,f} {"i": 3, "t": "blah", "z": true}
+zzz NULL NULL NULL NULL NULL NULL NULL NULL NULL
+
+# Test that mismatched types return an error
+query error could not parse \"true\" as type int
+SELECT t.bool FROM j, json_to_record(j.j) AS t(bool INT)
+
+# But types can be coerced.
+query TT rowsort
+SELECT t.* FROM j, json_to_record(j.j) AS t(int TEXT, bool TEXT)
+----
+1 true
+NULL NULL
+
+# Mixed type arrays
+query error could not parse \"2\" as type bool
+SELECT t.arrmixed FROM j, json_to_record(j.j) AS t(arrmixed BOOL[])
+
+# Record with custom type
+query T rowsort
+SELECT t.obj FROM j, json_to_record(j.j) AS t(obj testtab)
+----
+(3,,blah,,,)
+NULL
+
+# Test json_to_recordset
+query TIBTFTTTTT
+SELECT t.* FROM j, json_to_recordset(j.j || '[]' || j.j) AS t(
+ str TEXT,
+ int INT,
+ bool BOOL,
+ nul TEXT,
+ dec DECIMAL,
+ arrint INT[],
+ arrmixed TEXT,
+ arrstr TEXT[],
+ arrbool BOOL[],
+ obj TEXT
+) ORDER BY rowid
+----
+a 1 true NULL 2.45 {1,2} [1, 2, true] {a,b} {t,f} {"i": 3, "t": "blah", "z": true}
+a 1 true NULL 2.45 {1,2} [1, 2, true] {a,b} {t,f} {"i": 3, "t": "blah", "z": true}
+zzz NULL NULL NULL NULL NULL NULL NULL NULL NULL
+zzz NULL NULL NULL NULL NULL NULL NULL NULL NULL
+
+query TT rowsort
+SELECT * FROM jsonb_to_recordset('[{"foo": "bar"}, {"foo": "bar2"}]') AS t(foo TEXT),
+ jsonb_to_recordset('[{"foo": "blah"}, {"foo": "blah2"}]') AS u(foo TEXT)
+----
+bar blah
+bar blah2
+bar2 blah
+bar2 blah2
diff --git a/pkg/sql/logictest/testdata/logic_test/new_schema_changer b/pkg/sql/logictest/testdata/logic_test/new_schema_changer
index 18384d537c96..901df3d8dbee 100644
--- a/pkg/sql/logictest/testdata/logic_test/new_schema_changer
+++ b/pkg/sql/logictest/testdata/logic_test/new_schema_changer
@@ -1414,7 +1414,7 @@ CREATE TABLE public.t (
k INT8 NOT NULL AS (i + 3:::INT8) STORED,
j INT8 NULL DEFAULT 42:::INT8,
CONSTRAINT t_pkey PRIMARY KEY (i ASC),
- UNIQUE INDEX t_expr_key (k ASC)
+ UNIQUE INDEX t_k_key (k ASC)
)
query III rowsort
diff --git a/pkg/sql/logictest/testdata/logic_test/pg_catalog b/pkg/sql/logictest/testdata/logic_test/pg_catalog
index 8897ce56e637..b6d587a2a45f 100644
--- a/pkg/sql/logictest/testdata/logic_test/pg_catalog
+++ b/pkg/sql/logictest/testdata/logic_test/pg_catalog
@@ -4663,7 +4663,7 @@ FROM pg_proc p
JOIN pg_type t ON t.typinput = p.oid
WHERE t.typname = '_int4'
----
-2013 array_in array_in
+2017 array_in array_in
## #16285
## int2vectors should be 0-indexed
@@ -4701,7 +4701,7 @@ SELECT cur_max_builtin_oid FROM [SELECT max(oid) as cur_max_builtin_oid FROM pg_
query TT
SELECT proname, oid FROM pg_catalog.pg_proc WHERE oid = $cur_max_builtin_oid
----
-to_regtype 2033
+to_regtype 2037
## Ensure that unnest works with oid wrapper arrays
diff --git a/pkg/sql/logictest/testdata/logic_test/pgoidtype b/pkg/sql/logictest/testdata/logic_test/pgoidtype
index 5f363e125bbc..aacccd73027f 100644
--- a/pkg/sql/logictest/testdata/logic_test/pgoidtype
+++ b/pkg/sql/logictest/testdata/logic_test/pgoidtype
@@ -83,7 +83,7 @@ WHERE relname = 'pg_constraint'
query OOOO
SELECT 'upper'::REGPROC, 'upper'::REGPROCEDURE, 'pg_catalog.upper'::REGPROCEDURE, 'upper'::REGPROC::OID
----
-upper upper upper 829
+upper upper upper 833
query error invalid function name
SELECT 'invalid.more.pg_catalog.upper'::REGPROCEDURE
@@ -91,7 +91,7 @@ SELECT 'invalid.more.pg_catalog.upper'::REGPROCEDURE
query OOO
SELECT 'upper(int)'::REGPROC, 'upper(int)'::REGPROCEDURE, 'upper(int)'::REGPROC::OID
----
-upper upper 829
+upper upper 833
query error unknown function: blah\(\)
SELECT 'blah(ignored, ignored)'::REGPROC, 'blah(ignored, ignored)'::REGPROCEDURE
diff --git a/pkg/sql/opt/optbuilder/scalar.go b/pkg/sql/opt/optbuilder/scalar.go
index 629cdf23897d..0f7af4e96cbb 100644
--- a/pkg/sql/opt/optbuilder/scalar.go
+++ b/pkg/sql/opt/optbuilder/scalar.go
@@ -535,15 +535,16 @@ func (b *Builder) buildFunction(
panic(err)
}
- if f.ResolvedOverload().Body != "" {
+ overload := f.ResolvedOverload()
+ if overload.Body != "" {
return b.buildUDF(f, def, inScope, outScope, outCol, colRefs)
}
- if f.ResolvedOverload().Class == tree.AggregateClass {
+ if overload.Class == tree.AggregateClass {
panic(errors.AssertionFailedf("aggregate function should have been replaced"))
}
- if f.ResolvedOverload().Class == tree.WindowClass {
+ if overload.Class == tree.WindowClass {
panic(errors.AssertionFailedf("window function should have been replaced"))
}
@@ -556,12 +557,12 @@ func (b *Builder) buildFunction(
out = b.factory.ConstructFunction(args, &memo.FunctionPrivate{
Name: def.Name,
Typ: f.ResolvedType(),
- Properties: &f.ResolvedOverload().FunctionProperties,
- Overload: f.ResolvedOverload(),
+ Properties: &overload.FunctionProperties,
+ Overload: overload,
})
- if f.ResolvedOverload().Class == tree.GeneratorClass {
- return b.finishBuildGeneratorFunction(f, out, inScope, outScope, outCol)
+ if overload.Class == tree.GeneratorClass {
+ return b.finishBuildGeneratorFunction(f, overload, out, inScope, outScope, outCol)
}
// Add a dependency on sequences that are used as a string argument.
diff --git a/pkg/sql/opt/optbuilder/scope.go b/pkg/sql/opt/optbuilder/scope.go
index 558cb6bde44a..3a98d0723af4 100644
--- a/pkg/sql/opt/optbuilder/scope.go
+++ b/pkg/sql/opt/optbuilder/scope.go
@@ -102,6 +102,10 @@ type scope struct {
// scopes.
ctes map[string]*cteSource
+ // alias is set to the last data source alias we've come across, if we
+ // are processing a data source with an alias.
+ alias *tree.AliasClause
+
// context is the current context in the SQL query (e.g., "SELECT" or
// "HAVING"). It is used for error messages and to identify scoping errors
// (e.g., aggregates are not allowed in the FROM clause of their own query
diff --git a/pkg/sql/opt/optbuilder/select.go b/pkg/sql/opt/optbuilder/select.go
index d59ae3b7b976..a4866a6dd958 100644
--- a/pkg/sql/opt/optbuilder/select.go
+++ b/pkg/sql/opt/optbuilder/select.go
@@ -58,6 +58,8 @@ func (b *Builder) buildDataSource(
indexFlags = source.IndexFlags
}
if source.As.Alias != "" {
+ inScope = inScope.push()
+ inScope.alias = &source.As
locking = locking.filter(source.As.Alias)
}
@@ -335,7 +337,7 @@ func (b *Builder) renameSource(as tree.AliasClause, scope *scope) {
// table name.
noColNameSpecified := len(colAlias) == 0
if scope.isAnonymousTable() && noColNameSpecified && scope.singleSRFColumn {
- colAlias = tree.NameList{as.Alias}
+ colAlias = tree.ColumnDefList{tree.ColumnDef{Name: as.Alias}}
}
// If an alias was specified, use that to qualify the column names.
@@ -365,11 +367,11 @@ func (b *Builder) renameSource(as tree.AliasClause, scope *scope) {
if col.visibility != visible {
continue
}
- col.name = scopeColName(colAlias[aliasIdx])
+ col.name = scopeColName(colAlias[aliasIdx].Name)
if isScan {
// Override column metadata alias.
colMeta := b.factory.Metadata().ColumnMeta(col.id)
- colMeta.Alias = string(colAlias[aliasIdx])
+ colMeta.Alias = string(colAlias[aliasIdx].Name)
}
aliasIdx++
}
diff --git a/pkg/sql/opt/optbuilder/srfs.go b/pkg/sql/opt/optbuilder/srfs.go
index 07246e011327..ebdf79d6c509 100644
--- a/pkg/sql/opt/optbuilder/srfs.go
+++ b/pkg/sql/opt/optbuilder/srfs.go
@@ -15,6 +15,8 @@ import (
"github.com/cockroachdb/cockroach/pkg/sql/opt"
"github.com/cockroachdb/cockroach/pkg/sql/opt/memo"
+ "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode"
+ "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util/errorutil/unimplemented"
@@ -143,12 +145,45 @@ func (b *Builder) buildZip(exprs tree.Exprs, inScope *scope) (outScope *scope) {
// (SRF) such as generate_series() or unnest(). It synthesizes new columns in
// outScope for each of the SRF's output columns.
func (b *Builder) finishBuildGeneratorFunction(
- f *tree.FuncExpr, fn opt.ScalarExpr, inScope, outScope *scope, outCol *scopeColumn,
+ f *tree.FuncExpr,
+ def *tree.Overload,
+ fn opt.ScalarExpr,
+ inScope, outScope *scope,
+ outCol *scopeColumn,
) (out opt.ScalarExpr) {
+ lastAlias := inScope.alias
+ if def.ReturnsRecordType {
+ if lastAlias == nil {
+ panic(pgerror.New(pgcode.Syntax, "a column definition list is required for functions returning \"record\""))
+ }
+ } else if lastAlias != nil {
+ // Non-record type return with a table alias that includes types is not
+ // permitted.
+ for _, c := range lastAlias.Cols {
+ if c.Type != nil {
+ panic(pgerror.Newf(pgcode.Syntax, "a column definition list is only allowed for functions returning \"record\""))
+ }
+ }
+ }
// Add scope columns.
if outCol != nil {
// Single-column return type.
b.populateSynthesizedColumn(outCol, fn)
+ } else if def.ReturnsRecordType && lastAlias != nil && len(lastAlias.Cols) > 0 {
+ // If we're building a generator function that returns a record type, like
+ // json_to_record, we need to know the alias that was assigned to the
+ // generator function - without that, we won't know the list of columns
+ // to output.
+ for _, c := range lastAlias.Cols {
+ if c.Type == nil {
+ panic(pgerror.Newf(pgcode.Syntax, "a column definition list is required for functions returning \"record\""))
+ }
+ typ, err := tree.ResolveType(b.ctx, c.Type, b.semaCtx.TypeResolver)
+ if err != nil {
+ panic(err)
+ }
+ b.synthesizeColumn(outScope, scopeColName(c.Name), typ, nil, fn)
+ }
} else {
// Multi-column return type. Use the tuple labels in the SRF's return type
// as column aliases.
diff --git a/pkg/sql/opt/optbuilder/testdata/srfs b/pkg/sql/opt/optbuilder/testdata/srfs
index 612356e2614f..f1c96dfa37f8 100644
--- a/pkg/sql/opt/optbuilder/testdata/srfs
+++ b/pkg/sql/opt/optbuilder/testdata/srfs
@@ -1000,3 +1000,92 @@ project
└── projections
├── (unnest:6).a [as=a:7]
└── (unnest:6).b [as=b:8]
+
+# Test record-returning SRFs, which need to keep track of the most recently set
+# AS clause above them.
+
+exec-ddl
+CREATE TABLE j (j json)
+----
+
+build
+SELECT t.a, t.z FROM j, json_to_record(j.j) AS t(a text, z int)
+----
+project
+ ├── columns: a:5 z:6
+ └── inner-join-apply
+ ├── columns: j:1 rowid:2!null crdb_internal_mvcc_timestamp:3 tableoid:4 a:5 z:6
+ ├── scan j
+ │ └── columns: j:1 rowid:2!null crdb_internal_mvcc_timestamp:3 tableoid:4
+ ├── project-set
+ │ ├── columns: a:5 z:6
+ │ ├── values
+ │ │ └── ()
+ │ └── zip
+ │ └── json_to_record(j:1)
+ └── filters (true)
+
+# Test that outer AS clauses don't get incorrectly propagated down into
+# json_to_record.
+build
+SELECT blah.x || 'foo' FROM (SELECT t.a, t.z FROM j, json_to_record(j.j) AS t(a text, z int)) AS blah(x)
+----
+project
+ ├── columns: "?column?":7
+ ├── project
+ │ ├── columns: a:5 z:6
+ │ └── inner-join-apply
+ │ ├── columns: j:1 rowid:2!null crdb_internal_mvcc_timestamp:3 tableoid:4 a:5 z:6
+ │ ├── scan j
+ │ │ └── columns: j:1 rowid:2!null crdb_internal_mvcc_timestamp:3 tableoid:4
+ │ ├── project-set
+ │ │ ├── columns: a:5 z:6
+ │ │ ├── values
+ │ │ │ └── ()
+ │ │ └── zip
+ │ │ └── json_to_record(j:1)
+ │ └── filters (true)
+ └── projections
+ └── a:5 || 'foo' [as="?column?":7]
+
+# Test that outer AS clauses don't get incorrectly propagated down into
+# json_to_record, even if an inner one is missing.
+build
+SELECT * FROM (SELECT t.a, t.z FROM j, json_to_record(j.j)) AS blah(a)
+----
+error (42601): a column definition list is required for functions returning "record"
+
+# Test that nested AS clauses and json_to_record statements work okay.
+build
+SELECT blah.a || 'foo', * FROM
+(SELECT t.a, t.z, j.x FROM (SELECT * FROM j, json_to_record('{"foo": "bar"}') AS x(foo text)) j(j, x),
+ json_to_record(j.j) AS t(a text, z int)) AS blah(a, z, x)
+----
+project
+ ├── columns: "?column?":8 a:6 z:7 x:5
+ ├── project
+ │ ├── columns: foo:5 a:6 z:7
+ │ └── inner-join-apply
+ │ ├── columns: j:1 foo:5 a:6 z:7
+ │ ├── project
+ │ │ ├── columns: j:1 foo:5
+ │ │ └── inner-join-apply
+ │ │ ├── columns: j:1 rowid:2!null crdb_internal_mvcc_timestamp:3 tableoid:4 foo:5
+ │ │ ├── scan j
+ │ │ │ └── columns: j:1 rowid:2!null crdb_internal_mvcc_timestamp:3 tableoid:4
+ │ │ ├── project-set
+ │ │ │ ├── columns: foo:5
+ │ │ │ ├── values
+ │ │ │ │ └── ()
+ │ │ │ └── zip
+ │ │ │ └── json_to_record('{"foo": "bar"}')
+ │ │ └── filters (true)
+ │ ├── project-set
+ │ │ ├── columns: a:6 z:7
+ │ │ ├── values
+ │ │ │ └── ()
+ │ │ └── zip
+ │ │ └── json_to_record(j:1)
+ │ └── filters (true)
+ └── projections
+ └── a:6 || 'foo' [as="?column?":8]
diff --git a/pkg/sql/opt/optbuilder/with.go b/pkg/sql/opt/optbuilder/with.go
index 2dee1a4bc40c..48f1ef9864bd 100644
--- a/pkg/sql/opt/optbuilder/with.go
+++ b/pkg/sql/opt/optbuilder/with.go
@@ -382,7 +382,7 @@ func (b *Builder) getCTECols(cteScope *scope, name tree.AliasClause) physical.Pr
))
}
for i := range presentation {
- presentation[i].Alias = string(name.Cols[i])
+ presentation[i].Alias = string(name.Cols[i].Name)
}
return presentation
}
diff --git a/pkg/sql/parser/parse_test.go b/pkg/sql/parser/parse_test.go
index 855e4b116623..ea2ca683263d 100644
--- a/pkg/sql/parser/parse_test.go
+++ b/pkg/sql/parser/parse_test.go
@@ -523,8 +523,6 @@ func TestUnimplementedSyntax(t *testing.T) {
{`INSERT INTO foo(a, a.b) VALUES (1,2)`, 27792, ``, ``},
- {`SELECT * FROM ROWS FROM (a(b) AS (d))`, 0, `ROWS FROM with col_def_list`, ``},
-
{`SELECT a(b) 'c'`, 0, `a(...) SCONST`, ``},
{`SELECT UNIQUE (SELECT b)`, 0, `UNIQUE predicate`, ``},
{`SELECT GROUPING (a,b,c)`, 0, `d_expr grouping`, ``},
diff --git a/pkg/sql/parser/sql.y b/pkg/sql/parser/sql.y
index 0ffb76464e29..9a6eecb7a3e3 100644
--- a/pkg/sql/parser/sql.y
+++ b/pkg/sql/parser/sql.y
@@ -359,9 +359,15 @@ func (u *sqlSymUnion) slct() *tree.Select {
func (u *sqlSymUnion) selectStmt() tree.SelectStatement {
return u.val.(tree.SelectStatement)
}
-func (u *sqlSymUnion) colDef() *tree.ColumnTableDef {
+func (u *sqlSymUnion) colTableDef() *tree.ColumnTableDef {
return u.val.(*tree.ColumnTableDef)
}
+func (u *sqlSymUnion) colDef() tree.ColumnDef {
+ return u.val.(tree.ColumnDef)
+}
+func (u *sqlSymUnion) colDefList() tree.ColumnDefList {
+ return u.val.(tree.ColumnDefList)
+}
func (u *sqlSymUnion) constraintDef() tree.ConstraintTableDef {
return u.val.(tree.ConstraintTableDef)
}
@@ -1398,7 +1404,8 @@ func (u *sqlSymUnion) functionObjs() tree.FuncObjs {
%type first_or_next
%type insert_rest
-%type opt_col_def_list
+%type opt_col_def_list col_def_list opt_col_def_list_no_types col_def_list_no_types
+%type col_def
%type <*tree.OnConflict> on_conflict
%type begin_transaction
@@ -1407,7 +1414,7 @@ func (u *sqlSymUnion) functionObjs() tree.FuncObjs {
%type opt_hash_sharded_bucket_count
%type <*tree.ShardedIndexDef> opt_hash_sharded
%type opt_storing
-%type <*tree.ColumnTableDef> column_def
+%type <*tree.ColumnTableDef> column_table_def
%type table_elem
%type where_clause opt_where_clause
%type <*tree.ArraySubscript> array_subscript
@@ -1429,7 +1436,7 @@ func (u *sqlSymUnion) functionObjs() tree.FuncObjs {
%type <[]*tree.When> when_clause_list
%type sub_type
%type numeric_only
-%type alias_clause opt_alias_clause
+%type alias_clause opt_alias_clause func_alias_clause opt_func_alias_clause
%type opt_ordinality opt_compact
%type <*tree.Order> sortby
%type index_elem index_elem_options create_as_param
@@ -2527,24 +2534,24 @@ alter_table_cmd:
$$.val = &tree.AlterTableRenameConstraint{Constraint: tree.Name($3), NewName: tree.Name($5) }
}
// ALTER TABLE ADD
-| ADD column_def
+| ADD column_table_def
{
- $$.val = &tree.AlterTableAddColumn{IfNotExists: false, ColumnDef: $2.colDef()}
+ $$.val = &tree.AlterTableAddColumn{IfNotExists: false, ColumnDef: $2.colTableDef()}
}
// ALTER TABLE ADD IF NOT EXISTS
-| ADD IF NOT EXISTS column_def
+| ADD IF NOT EXISTS column_table_def
{
- $$.val = &tree.AlterTableAddColumn{IfNotExists: true, ColumnDef: $5.colDef()}
+ $$.val = &tree.AlterTableAddColumn{IfNotExists: true, ColumnDef: $5.colTableDef()}
}
// ALTER TABLE ADD COLUMN
-| ADD COLUMN column_def
+| ADD COLUMN column_table_def
{
- $$.val = &tree.AlterTableAddColumn{IfNotExists: false, ColumnDef: $3.colDef()}
+ $$.val = &tree.AlterTableAddColumn{IfNotExists: false, ColumnDef: $3.colTableDef()}
}
// ALTER TABLE ADD COLUMN IF NOT EXISTS
-| ADD COLUMN IF NOT EXISTS column_def
+| ADD COLUMN IF NOT EXISTS column_table_def
{
- $$.val = &tree.AlterTableAddColumn{IfNotExists: true, ColumnDef: $6.colDef()}
+ $$.val = &tree.AlterTableAddColumn{IfNotExists: true, ColumnDef: $6.colTableDef()}
}
// ALTER TABLE ALTER [COLUMN] {SET DEFAULT |DROP DEFAULT}
| ALTER opt_column column_name alter_column_default
@@ -8254,9 +8261,9 @@ table_elem_list:
}
table_elem:
- column_def
+ column_table_def
{
- $$.val = $1.colDef()
+ $$.val = $1.colTableDef()
}
| index_def
| family_def
@@ -8441,7 +8448,7 @@ range_partition:
// Treat SERIAL pseudo-types as separate case so that types.T does not have to
// support them as first-class types (e.g. they should not be supported as CAST
// target types).
-column_def:
+column_table_def:
column_name typename col_qual_list
{
typ := $2.typeReference()
@@ -11234,20 +11241,20 @@ materialize_clause:
}
common_table_expr:
- table_alias_name opt_column_list AS '(' preparable_stmt ')'
+ table_alias_name opt_col_def_list_no_types AS '(' preparable_stmt ')'
{
$$.val = &tree.CTE{
- Name: tree.AliasClause{Alias: tree.Name($1), Cols: $2.nameList() },
+ Name: tree.AliasClause{Alias: tree.Name($1), Cols: $2.colDefList() },
Mtr: tree.MaterializeClause{
Set: false,
},
Stmt: $5.stmt(),
}
}
-| table_alias_name opt_column_list AS materialize_clause '(' preparable_stmt ')'
+| table_alias_name opt_col_def_list_no_types AS materialize_clause '(' preparable_stmt ')'
{
$$.val = &tree.CTE{
- Name: tree.AliasClause{Alias: tree.Name($1), Cols: $2.nameList() },
+ Name: tree.AliasClause{Alias: tree.Name($1), Cols: $2.colDefList() },
Mtr: tree.MaterializeClause{
Materialize: $4.bool(),
Set: true,
@@ -11752,7 +11759,7 @@ table_ref:
{
$$.val = &tree.AliasedTableExpr{Expr: &tree.ParenTableExpr{Expr: $2.tblExpr()}, Ordinality: $4.bool(), As: $5.aliasClause()}
}
-| func_table opt_ordinality opt_alias_clause
+| func_table opt_ordinality opt_func_alias_clause
{
f := $1.tblExpr()
$$.val = &tree.AliasedTableExpr{
@@ -11820,16 +11827,61 @@ rowsfrom_list:
{ $$.val = append($1.exprs(), $3.expr()) }
rowsfrom_item:
- func_expr_windowless opt_col_def_list
+ func_expr_windowless opt_func_alias_clause
{
$$.val = $1.expr()
}
+opt_col_def_list_no_types:
+ '(' col_def_list_no_types ')'
+ {
+ $$.val = $2.colDefList()
+ }
+| /* EMPTY */
+ {
+ $$.val = tree.ColumnDefList(nil)
+ }
+
+col_def_list_no_types:
+ name
+ {
+ $$.val = tree.ColumnDefList{tree.ColumnDef{Name: tree.Name($1)}}
+ }
+| col_def_list_no_types ',' name
+ {
+ $$.val = append($1.colDefList(), tree.ColumnDef{Name: tree.Name($3)})
+ }
+
+
opt_col_def_list:
/* EMPTY */
- { }
-| AS '(' error
- { return unimplemented(sqllex, "ROWS FROM with col_def_list") }
+ {
+ $$.val = tree.ColumnDefList(nil)
+ }
+| '(' col_def_list ')'
+ {
+ $$.val = $2.colDefList()
+ }
+
+col_def_list:
+ col_def
+ {
+ $$.val = tree.ColumnDefList{$1.colDef()}
+ }
+| col_def_list ',' col_def
+ {
+ $$.val = append($1.colDefList(), $3.colDef())
+ }
+
+col_def:
+ name
+ {
+ $$.val = tree.ColumnDef{Name: tree.Name($1)}
+ }
+| name typename
+ {
+ $$.val = tree.ColumnDef{Name: tree.Name($1), Type: $2.typeReference()}
+ }
opt_tableref_col_list:
/* EMPTY */ { $$.val = nil }
@@ -11897,13 +11949,13 @@ joined_table:
}
alias_clause:
- AS table_alias_name opt_column_list
+ AS table_alias_name opt_col_def_list_no_types
{
- $$.val = tree.AliasClause{Alias: tree.Name($2), Cols: $3.nameList()}
+ $$.val = tree.AliasClause{Alias: tree.Name($2), Cols: $3.colDefList()}
}
-| table_alias_name opt_column_list
+| table_alias_name opt_col_def_list_no_types
{
- $$.val = tree.AliasClause{Alias: tree.Name($1), Cols: $2.nameList()}
+ $$.val = tree.AliasClause{Alias: tree.Name($1), Cols: $2.colDefList()}
}
opt_alias_clause:
@@ -11913,6 +11965,23 @@ opt_alias_clause:
$$.val = tree.AliasClause{}
}
+func_alias_clause:
+ AS table_alias_name opt_col_def_list
+ {
+ $$.val = tree.AliasClause{Alias: tree.Name($2), Cols: $3.colDefList()}
+ }
+| table_alias_name opt_col_def_list
+ {
+ $$.val = tree.AliasClause{Alias: tree.Name($1), Cols: $2.colDefList()}
+ }
+
+opt_func_alias_clause:
+ func_alias_clause
+| /* EMPTY */
+ {
+ $$.val = tree.AliasClause{}
+ }
+
as_of_clause:
AS_LA OF SYSTEM TIME a_expr
{
diff --git a/pkg/sql/parser/testdata/select_clauses b/pkg/sql/parser/testdata/select_clauses
index 471144618058..116c288d1265 100644
--- a/pkg/sql/parser/testdata/select_clauses
+++ b/pkg/sql/parser/testdata/select_clauses
@@ -3035,3 +3035,11 @@ SELECT * FROM ((t1 NATURAL JOIN t2 WITH ORDINALITY AS o1)) WITH ORDINALITY AS o2
SELECT (*) FROM ((t1 NATURAL JOIN t2 WITH ORDINALITY AS o1)) WITH ORDINALITY AS o2 -- fully parenthesized
SELECT * FROM ((t1 NATURAL JOIN t2 WITH ORDINALITY AS o1)) WITH ORDINALITY AS o2 -- literals removed
SELECT * FROM ((_ NATURAL JOIN _ WITH ORDINALITY AS _)) WITH ORDINALITY AS _ -- identifiers removed
+
+parse
+SELECT * FROM json_to_record('') AS t(a INT, b TEXT, c foo)
+----
+SELECT * FROM ROWS FROM (json_to_record('')) AS t (a INT8, b STRING, c foo) -- normalized!
+SELECT (*) FROM ROWS FROM ((json_to_record(('')))) AS t (a INT8, b STRING, c foo) -- fully parenthesized
+SELECT * FROM ROWS FROM (json_to_record('_')) AS t (a INT8, b STRING, c foo) -- literals removed
+SELECT * FROM ROWS FROM (json_to_record('')) AS _ (_ INT8, _ STRING, _ foo) -- identifiers removed
diff --git a/pkg/sql/rowexec/project_set.go b/pkg/sql/rowexec/project_set.go
index 9b435d6fdf41..020870c97469 100644
--- a/pkg/sql/rowexec/project_set.go
+++ b/pkg/sql/rowexec/project_set.go
@@ -174,6 +174,11 @@ func (ps *projectSetProcessor) nextInputRow() (
if gen == nil {
gen = builtins.EmptyGenerator()
}
+ if aliasSetter, ok := gen.(eval.AliasAwareValueGenerator); ok {
+ if err := aliasSetter.SetAlias(ps.spec.GeneratedColumns, ps.spec.GeneratedColumnLabels); err != nil {
+ return nil, nil, err
+ }
+ }
if err := gen.Start(ps.Ctx, ps.FlowCtx.Txn); err != nil {
return nil, nil, err
}
diff --git a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/alter_table.go b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/alter_table.go
index f16de8029fe7..61abef4987b0 100644
--- a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/alter_table.go
+++ b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/alter_table.go
@@ -168,6 +168,7 @@ func AlterTable(b BuildCtx, n *tree.AlterTable) {
if !ok {
panic(scerrors.NotImplementedError(n))
}
+ b.IncrementSchemaChangeAlterCounter("table", cmd.TelemetryName())
// Invoke the callback function, with the concrete types.
fn := reflect.ValueOf(info.fn)
fn.Call([]reflect.Value{
diff --git a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/alter_table_add_column.go b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/alter_table_add_column.go
index ac1ff66f0e6e..01a43c5d905e 100644
--- a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/alter_table_add_column.go
+++ b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/alter_table_add_column.go
@@ -40,7 +40,6 @@ import (
func alterTableAddColumn(
b BuildCtx, tn *tree.TableName, tbl *scpb.Table, t *tree.AlterTableAddColumn,
) {
- b.IncrementSchemaChangeAlterCounter("table", "add_column")
d := t.ColumnDef
// We don't support handling zone config related properties for tables, so
// throw an unsupported error.
@@ -350,7 +349,7 @@ func addColumn(b BuildCtx, spec addColumnSpec, n tree.NodeFormatter) (backing *s
// Otherwise, create a new primary index target and swap it with the existing
// primary index.
- out := makePrimaryIndexSpec(b, existing)
+ out := makeIndexSpec(b, existing.TableID, existing.IndexID)
inColumns := make([]indexColumnSpec, len(out.columns)+1)
for i, ic := range out.columns {
inColumns[i] = makeIndexColumnSpec(ic)
@@ -360,10 +359,10 @@ func addColumn(b BuildCtx, spec addColumnSpec, n tree.NodeFormatter) (backing *s
kind: scpb.IndexColumn_STORED,
}
out.apply(b.Drop)
- in, temp := makeSwapPrimaryIndexSpec(b, out, inColumns)
+ in, temp := makeSwapIndexSpec(b, out, out.primary.IndexID, inColumns)
in.apply(b.Add)
temp.apply(b.AddTransient)
- return in.idx
+ return in.primary
}
// handleAddColumnFreshlyAddedPrimaryIndex is used when adding a column to a
@@ -443,7 +442,7 @@ func getNextStoredIndexColumnOrdinal(allTargets ElementResultSet, idx *scpb.Prim
func getImplicitSecondaryIndexName(
b BuildCtx, tbl *scpb.Table, id descpb.IndexID, numImplicitColumns int,
) string {
- elts := b.QueryByID(tbl.TableID)
+ elts := b.QueryByID(tbl.TableID).Filter(notAbsentTargetFilter)
var idx *scpb.Index
scpb.ForEachSecondaryIndex(elts, func(current scpb.Status, target scpb.TargetStatus, e *scpb.SecondaryIndex) {
if e.IndexID == id {
@@ -458,37 +457,24 @@ func getImplicitSecondaryIndexName(
// final word (either "idx" or "key").
segments := make([]string, 0, len(keyColumns)+2)
// Add the table name segment.
- var tblName *scpb.Namespace
- scpb.ForEachNamespace(b, func(current scpb.Status, target scpb.TargetStatus, e *scpb.Namespace) {
- if e.DescriptorID == tbl.TableID {
- tblName = e
- }
- })
+ _, _, tblName := scpb.FindNamespace(elts)
if tblName == nil {
panic(errors.AssertionFailedf("unable to find table name."))
}
segments = append(segments, tblName.Name)
- findColumnNameByID := func(colID descpb.ColumnID) ElementResultSet {
- var columnName *scpb.ColumnName
- scpb.ForEachColumnName(b, func(current scpb.Status, target scpb.TargetStatus, e *scpb.ColumnName) {
- if e.ColumnID == colID {
- columnName = e
- }
- })
- if columnName == nil {
- panic(errors.AssertionFailedf("unable to find column name."))
- }
- return b.ResolveColumn(tbl.TableID, tree.Name(columnName.Name), ResolveParams{})
- }
// Add the key column segments. For inaccessible columns, use "expr" as the
// segment. If there are multiple inaccessible columns, add an incrementing
// integer suffix.
exprCount := 0
for i, n := numImplicitColumns, len(keyColumns); i < n; i++ {
+ colID := keyColumns[i].ColumnID
+ colElts := elts.Filter(hasColumnIDAttrFilter(colID))
+ _, _, col := scpb.FindColumn(colElts)
+ if col == nil {
+ panic(errors.AssertionFailedf("unable to find column %d.", colID))
+ }
var segmentName string
- colElts := findColumnNameByID(keyColumns[i].ColumnID)
- _, _, col := scpb.FindColumnType(colElts)
- if col.ComputeExpr != nil {
+ if col.IsInaccessible {
if exprCount == 0 {
segmentName = "expr"
} else {
diff --git a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/alter_table_alter_primary_key.go b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/alter_table_alter_primary_key.go
index f42528a8a07f..2361a70dde42 100644
--- a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/alter_table_alter_primary_key.go
+++ b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/alter_table_alter_primary_key.go
@@ -67,10 +67,11 @@ func alterPrimaryKey(b BuildCtx, tn *tree.TableName, tbl *scpb.Table, t alterPri
// TODO (xiang): This section contains all fall-back cases and need to
// be removed to fully support `ALTER PRIMARY KEY`.
- fallBackIfConcurrentSchemaChange(b, tbl.TableID)
+ fallBackIfConcurrentSchemaChange(b, t, tbl.TableID)
fallBackIfRequestToBeSharded(t)
- fallBackIfSecondaryIndexExists(b, tbl.TableID)
- fallBackIfRegionalByRowTable(b, tbl.TableID)
+ fallBackIfSecondaryIndexExists(b, t, tbl.TableID)
+ fallBackIfShardedIndexExists(b, t, tbl.TableID)
+ fallBackIfRegionalByRowTable(b, t, tbl.TableID)
fallBackIfDescColInRowLevelTTLTables(b, tbl.TableID, t)
fallBackIfZoneConfigExists(b, t.n, tbl.TableID)
@@ -90,38 +91,11 @@ func alterPrimaryKey(b BuildCtx, tn *tree.TableName, tbl *scpb.Table, t alterPri
// Handle special case where the old primary key is the hidden rowid column.
// In this case, drop this column if it is not referenced anywhere.
rowidToDrop := getPrimaryIndexDefaultRowIDColumn(b, tbl.TableID, oldPrimaryIndexElem.IndexID)
- if rowidToDrop != nil {
- canBeDropped := true
- walkDropColumnDependencies(b, rowidToDrop, func(e scpb.Element) {
- switch e := e.(type) {
- case *scpb.Column:
- if e.TableID != rowidToDrop.TableID || e.ColumnID != rowidToDrop.ColumnID {
- canBeDropped = false
- }
- case *scpb.ColumnDefaultExpression:
- if e.TableID != rowidToDrop.TableID || e.ColumnID != rowidToDrop.ColumnID {
- canBeDropped = false
- }
- case *scpb.ColumnOnUpdateExpression:
- if e.TableID != rowidToDrop.TableID || e.ColumnID != rowidToDrop.ColumnID {
- canBeDropped = false
- }
- case *scpb.UniqueWithoutIndexConstraint, *scpb.CheckConstraint, *scpb.ForeignKeyConstraint:
- canBeDropped = false
- case *scpb.View, *scpb.Sequence:
- canBeDropped = false
- case *scpb.SecondaryIndex:
- // TODO(postamar): support dropping rowid in the presence of secondary
- // indexes if the column is only present in key suffixes.
- canBeDropped = false
- }
- })
- if !canBeDropped {
- rowidToDrop = nil
- }
+ if !checkIfRowIDColumnCanBeDropped(b, rowidToDrop) {
+ rowidToDrop = nil
}
- out := makePrimaryIndexSpec(b, oldPrimaryIndexElem)
+ out := makeIndexSpec(b, oldPrimaryIndexElem.TableID, oldPrimaryIndexElem.IndexID)
inColumns := make([]indexColumnSpec, 0, len(out.columns))
{
allColumns := getSortedAllColumnIDsInTable(b, tbl.TableID)
@@ -161,16 +135,18 @@ func alterPrimaryKey(b BuildCtx, tn *tree.TableName, tbl *scpb.Table, t alterPri
}
out.apply(b.Drop)
sharding := makeShardedDescriptor(b, t)
+ var sourcePrimaryIndexElem *scpb.PrimaryIndex
if rowidToDrop == nil {
// We're NOT dropping the rowid column => do one primary index swap.
- in, tempIn := makeSwapPrimaryIndexSpec(b, out, inColumns)
- in.idx.Sharding = sharding
+ in, tempIn := makeSwapIndexSpec(b, out, out.primary.IndexID, inColumns)
+ in.primary.Sharding = sharding
if t.Name != "" {
in.name.Name = string(t.Name)
}
in.apply(b.Add)
tempIn.apply(b.AddTransient)
- newPrimaryIndexElem = in.idx
+ newPrimaryIndexElem = in.primary
+ sourcePrimaryIndexElem = in.primary
} else {
// We ARE dropping the rowid column => swap indexes twice and drop column.
unionColumns := append(inColumns[:len(inColumns):len(inColumns)], indexColumnSpec{
@@ -178,20 +154,27 @@ func alterPrimaryKey(b BuildCtx, tn *tree.TableName, tbl *scpb.Table, t alterPri
kind: scpb.IndexColumn_STORED,
})
// Swap once to the new PK but storing rowid.
- union, tempUnion := makeSwapPrimaryIndexSpec(b, out, unionColumns)
- union.idx.Sharding = protoutil.Clone(sharding).(*catpb.ShardedDescriptor)
+ union, tempUnion := makeSwapIndexSpec(b, out, out.primary.IndexID, unionColumns)
+ union.primary.Sharding = protoutil.Clone(sharding).(*catpb.ShardedDescriptor)
union.apply(b.AddTransient)
tempUnion.apply(b.AddTransient)
// Swap again to the final primary index: same PK but NOT storing rowid.
- in, tempIn := makeSwapPrimaryIndexSpec(b, union, inColumns)
+ in, tempIn := makeSwapIndexSpec(b, union, union.primary.IndexID, inColumns)
+ in.primary.Sharding = sharding
if t.Name != "" {
in.name.Name = string(t.Name)
}
- in.idx.Sharding = sharding
in.apply(b.Add)
tempIn.apply(b.AddTransient)
- newPrimaryIndexElem = in.idx
- // Drop the rowid column
+ newPrimaryIndexElem = in.primary
+ sourcePrimaryIndexElem = union.primary
+ }
+
+ // Recreate all secondary indexes.
+ recreateAllSecondaryIndexes(b, tbl, newPrimaryIndexElem, sourcePrimaryIndexElem)
+
+ // Drop the rowid column, if applicable.
+ if rowidToDrop != nil {
elts := b.QueryByID(rowidToDrop.TableID).Filter(hasColumnIDAttrFilter(rowidToDrop.ColumnID))
dropColumn(b, tn, tbl, t.n, rowidToDrop, elts, tree.DropRestrict)
}
@@ -319,7 +302,7 @@ func isNewPrimaryKeySameAsOldPrimaryKey(b BuildCtx, tbl *scpb.Table, t alterPrim
// fallBackIfConcurrentSchemaChange panics with an unimplemented error if
// there are any other concurrent schema change on this table. This is determined
// by searching for any element that is currently not in its terminal status.
-func fallBackIfConcurrentSchemaChange(b BuildCtx, tableID catid.DescID) {
+func fallBackIfConcurrentSchemaChange(b BuildCtx, t alterPrimaryKeySpec, tableID catid.DescID) {
b.QueryByID(tableID).ForEachElementStatus(func(current scpb.Status, target scpb.TargetStatus, e scpb.Element) {
if current != target.Status() {
_, _, ns := scpb.FindNamespace(b.QueryByID(tableID))
@@ -327,8 +310,7 @@ func fallBackIfConcurrentSchemaChange(b BuildCtx, tableID catid.DescID) {
panic(errors.AssertionFailedf("programming error: resolving table %v does not "+
"give a Namespace element", tableID))
}
- panic(scerrors.NotImplementedErrorf(
- nil,
+ panic(scerrors.NotImplementedErrorf(t.n,
"cannot perform a primary key change on %v with other schema changes on %v in the same transaction",
ns.Name, ns.Name))
}
@@ -339,29 +321,55 @@ func fallBackIfConcurrentSchemaChange(b BuildCtx, tableID catid.DescID) {
// if it is requested to be hash-sharded.
func fallBackIfRequestToBeSharded(t alterPrimaryKeySpec) {
if t.Sharded != nil {
- panic(scerrors.NotImplementedErrorf(nil, "ALTER PRIMARY KEY USING HASH is not yet supported."))
+ panic(scerrors.NotImplementedErrorf(t.n, "ALTER PRIMARY KEY USING HASH is not yet supported."))
}
}
// fallBackIfSecondaryIndexExists panics with an unimplemented
// error if there exists secondary indexes on the table, which might
// need to be rewritten.
-func fallBackIfSecondaryIndexExists(b BuildCtx, tableID catid.DescID) {
+func fallBackIfSecondaryIndexExists(b BuildCtx, t alterPrimaryKeySpec, tableID catid.DescID) {
_, _, sie := scpb.FindSecondaryIndex(b.QueryByID(tableID))
if sie != nil {
- panic(scerrors.NotImplementedErrorf(nil, "ALTER PRIMARY KEY on a table with secondary index "+
+ panic(scerrors.NotImplementedErrorf(t.n, "ALTER PRIMARY KEY on a table with secondary index "+
"is not yet supported because they might need to be rewritten."))
}
}
+// fallBackIfShardedIndexExists panics with an unimplemented
+// error if there exists shared secondary indexes on the table.
+func fallBackIfShardedIndexExists(b BuildCtx, t alterPrimaryKeySpec, tableID catid.DescID) {
+ tableElts := b.QueryByID(tableID).Filter(notAbsentTargetFilter)
+ var hasSecondary bool
+ scpb.ForEachSecondaryIndex(tableElts, func(_ scpb.Status, _ scpb.TargetStatus, idx *scpb.SecondaryIndex) {
+ hasSecondary = true
+ if idx.Sharding != nil {
+ panic(scerrors.NotImplementedErrorf(t.n, "ALTER PRIMARY KEY on a table with sharded secondary "+
+ "indexes is not yet supported."))
+ }
+ })
+ // Primary index sharding only matters if there are secondary indexes: even
+ // if we drop the sharding on the primary, we need to maintain it on the
+ // secondaries if they exist.
+ if !hasSecondary {
+ return
+ }
+ scpb.ForEachPrimaryIndex(tableElts, func(_ scpb.Status, _ scpb.TargetStatus, idx *scpb.PrimaryIndex) {
+ if idx.Sharding != nil {
+ panic(scerrors.NotImplementedErrorf(t.n, "ALTER PRIMARY KEY on a table with sharded primary "+
+ "indexes is not yet supported."))
+ }
+ })
+}
+
// fallBackIfRegionalByRowTable panics with an unimplemented
// error if it's a REGIONAL BY ROW table because we need to
// include the implicit REGION column when constructing the
// new primary key.
-func fallBackIfRegionalByRowTable(b BuildCtx, tableID catid.DescID) {
+func fallBackIfRegionalByRowTable(b BuildCtx, t alterPrimaryKeySpec, tableID catid.DescID) {
_, _, rbrElem := scpb.FindTableLocalityRegionalByRow(b.QueryByID(tableID))
if rbrElem != nil {
- panic(scerrors.NotImplementedErrorf(nil, "ALTER PRIMARY KEY on a REGIONAL BY ROW table "+
+ panic(scerrors.NotImplementedErrorf(t.n, "ALTER PRIMARY KEY on a REGIONAL BY ROW table "+
"is not yet supported."))
}
}
@@ -379,13 +387,13 @@ func fallBackIfDescColInRowLevelTTLTables(b BuildCtx, tableID catid.DescID, t al
// key columns, and there is no inbound/outbound foreign keys.
for _, col := range t.Columns {
if indexColumnDirection(col.Direction) != catpb.IndexColumn_ASC {
- panic(scerrors.NotImplementedErrorf(nil, "non-ascending ordering on PRIMARY KEYs are not supported"))
+ panic(scerrors.NotImplementedErrorf(t.n, "non-ascending ordering on PRIMARY KEYs are not supported"))
}
}
_, _, ns := scpb.FindNamespace(b.QueryByID(tableID))
- hasFKConstraintError := scerrors.NotImplementedErrorf(nil, fmt.Sprintf(`foreign keys to/from
-table with TTL "%s" are not permitted`, ns.Name))
+ hasFKConstraintError := scerrors.NotImplementedErrorf(t.n,
+ `foreign keys to/from table with TTL %q are not permitted`, ns.Name)
// Panic if there is any inbound/outbound FK constraints.
_, _, inboundFKElem := scpb.FindForeignKeyConstraint(b.BackReferences(tableID))
if inboundFKElem != nil {
@@ -528,6 +536,17 @@ func makeShardedDescriptor(b BuildCtx, t alterPrimaryKeySpec) *catpb.ShardedDesc
}
}
+// recreateAllSecondaryIndexes recreates all secondary indexes. While the key
+// columns remain the same in the face of a primary key change, the key suffix
+// columns or the stored columns may not.
+func recreateAllSecondaryIndexes(
+ b BuildCtx, tbl *scpb.Table, newPrimaryIndex, sourcePrimaryIndex *scpb.PrimaryIndex,
+) {
+ // TODO(postamar): implement in 23.1
+ // Nothing needs to be done because fallBackIfSecondaryIndexExists ensures
+ // that there are no secondary indexes by the time this function is called.
+}
+
// maybeAddUniqueIndexForOldPrimaryKey constructs and adds all necessary elements
// for a unique index on the old primary key columns, if certain conditions are
// met (see comments of shouldCreateUniqueIndexOnOldPrimaryKeyColumns for details).
@@ -836,6 +855,50 @@ func getPrimaryIndexDefaultRowIDColumn(
return column
}
+// checkIfRowIDColumnCanBeDropped returns true iff the rowid column is not
+// referenced anywhere, and can therefore be dropped.
+func checkIfRowIDColumnCanBeDropped(b BuildCtx, rowidToDrop *scpb.Column) bool {
+ if rowidToDrop == nil {
+ return false
+ }
+ canBeDropped := true
+ walkDropColumnDependencies(b, rowidToDrop, func(e scpb.Element) {
+ if !canBeDropped {
+ return
+ }
+ switch e := e.(type) {
+ case *scpb.Column:
+ if e.TableID != rowidToDrop.TableID || e.ColumnID != rowidToDrop.ColumnID {
+ canBeDropped = false
+ }
+ case *scpb.ColumnDefaultExpression:
+ if e.TableID != rowidToDrop.TableID || e.ColumnID != rowidToDrop.ColumnID {
+ canBeDropped = false
+ }
+ case *scpb.ColumnOnUpdateExpression:
+ if e.TableID != rowidToDrop.TableID || e.ColumnID != rowidToDrop.ColumnID {
+ canBeDropped = false
+ }
+ case *scpb.UniqueWithoutIndexConstraint, *scpb.CheckConstraint, *scpb.ForeignKeyConstraint:
+ canBeDropped = false
+ case *scpb.View, *scpb.Sequence:
+ canBeDropped = false
+ case *scpb.SecondaryIndex:
+ isOnlyKeySuffixColumn := true
+ indexElts := b.QueryByID(rowidToDrop.TableID).Filter(publicTargetFilter).Filter(hasIndexIDAttrFilter(e.IndexID))
+ scpb.ForEachIndexColumn(indexElts, func(_ scpb.Status, _ scpb.TargetStatus, ic *scpb.IndexColumn) {
+ if rowidToDrop.ColumnID == ic.ColumnID && ic.Kind != scpb.IndexColumn_KEY_SUFFIX {
+ isOnlyKeySuffixColumn = false
+ }
+ })
+ if !isOnlyKeySuffixColumn {
+ canBeDropped = false
+ }
+ }
+ })
+ return canBeDropped
+}
+
// getAllColumnsNameToIDMapping constructs a name to ID mapping
// for all non-system columns.
func getAllColumnsNameToIDMapping(
diff --git a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/alter_table_drop_column.go b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/alter_table_drop_column.go
index de307df8efec..17bc65755cfe 100644
--- a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/alter_table_drop_column.go
+++ b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/alter_table_drop_column.go
@@ -33,13 +33,12 @@ func alterTableDropColumn(
) {
fallBackIfZoneConfigExists(b, n, tbl.TableID)
checkSafeUpdatesForDropColumn(b)
+ checkRegionalByRowColumnConflict(b, tbl, n)
col, elts, done := resolveColumnForDropColumn(b, tn, tbl, n)
if done {
return
}
checkRowLevelTTLColumn(b, tn, tbl, n, col)
- checkRegionalByRowColumnConflict(b, tbl, n)
- b.IncrementSchemaChangeAlterCounter("table", "drop_column")
checkColumnNotInaccessible(col, n)
dropColumn(b, tn, tbl, n, col, elts, n.DropBehavior)
}
@@ -389,7 +388,7 @@ func handleDropColumnPrimaryIndexes(
func handleDropColumnCreateNewPrimaryIndex(
b BuildCtx, existing *scpb.PrimaryIndex, col *scpb.Column,
) *scpb.PrimaryIndex {
- out := makePrimaryIndexSpec(b, existing)
+ out := makeIndexSpec(b, existing.TableID, existing.IndexID)
inColumns := make([]indexColumnSpec, 0, len(out.columns)-1)
var dropped *scpb.IndexColumn
for _, ic := range out.columns {
@@ -406,11 +405,11 @@ func handleDropColumnCreateNewPrimaryIndex(
panic(errors.AssertionFailedf("can only drop columns which are stored in the primary index, this one is %v ",
dropped.Kind))
}
- in, temp := makeSwapPrimaryIndexSpec(b, out, inColumns)
+ in, temp := makeSwapIndexSpec(b, out, out.primary.IndexID, inColumns)
out.apply(b.Drop)
in.apply(b.Add)
temp.apply(b.AddTransient)
- return in.idx
+ return in.primary
}
func handleDropColumnFreshlyAddedPrimaryIndex(
diff --git a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/helpers.go b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/helpers.go
index 096f28f5be03..c4e8f7cc9130 100644
--- a/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/helpers.go
+++ b/pkg/sql/schemachanger/scbuild/internal/scbuildstmt/helpers.go
@@ -408,76 +408,125 @@ func indexColumnDirection(d tree.Direction) catpb.IndexColumn_Direction {
}
}
-// primaryIndexSpec holds a primary index element and its children.
-type primaryIndexSpec struct {
- idx *scpb.PrimaryIndex
- name *scpb.IndexName
- partitioning *scpb.IndexPartitioning
- columns []*scpb.IndexColumn
+// indexSpec holds an index element and its children.
+type indexSpec struct {
+ primary *scpb.PrimaryIndex
+ secondary *scpb.SecondaryIndex
+ temporary *scpb.TemporaryIndex
+
+ name *scpb.IndexName
+ partial *scpb.SecondaryIndexPartial
+ partitioning *scpb.IndexPartitioning
+ columns []*scpb.IndexColumn
+ idxComment *scpb.IndexComment
+ constrComment *scpb.ConstraintComment
}
// apply makes it possible to conveniently define build targets for all
-// the elements in the primaryIndexSpec.
-func (s primaryIndexSpec) apply(fn func(e scpb.Element)) {
- fn(s.idx)
+// the elements in the indexSpec.
+func (s indexSpec) apply(fn func(e scpb.Element)) {
+ if s.primary != nil {
+ fn(s.primary)
+ }
+ if s.secondary != nil {
+ fn(s.secondary)
+ }
+ if s.temporary != nil {
+ fn(s.temporary)
+ }
if s.name != nil {
fn(s.name)
}
+ if s.partial != nil {
+ fn(s.partial)
+ }
if s.partitioning != nil {
fn(s.partitioning)
}
for _, ic := range s.columns {
fn(ic)
}
+ if s.idxComment != nil {
+ fn(s.idxComment)
+ }
+ if s.constrComment != nil {
+ fn(s.constrComment)
+ }
}
-// clone conveniently deep-copies all the elements in the primaryIndexSpec.
-func (s primaryIndexSpec) clone() (c primaryIndexSpec) {
- c.idx = protoutil.Clone(s.idx).(*scpb.PrimaryIndex)
+// clone conveniently deep-copies all the elements in the indexSpec.
+func (s indexSpec) clone() (c indexSpec) {
+ if s.primary != nil {
+ c.primary = protoutil.Clone(s.primary).(*scpb.PrimaryIndex)
+ }
+ if s.secondary != nil {
+ c.secondary = protoutil.Clone(s.secondary).(*scpb.SecondaryIndex)
+ }
+ if s.temporary != nil {
+ c.temporary = protoutil.Clone(s.temporary).(*scpb.TemporaryIndex)
+ }
if s.name != nil {
c.name = protoutil.Clone(s.name).(*scpb.IndexName)
}
+ if s.partial != nil {
+ c.partial = protoutil.Clone(s.partial).(*scpb.SecondaryIndexPartial)
+ }
if s.partitioning != nil {
c.partitioning = protoutil.Clone(s.partitioning).(*scpb.IndexPartitioning)
}
for _, ic := range s.columns {
c.columns = append(c.columns, protoutil.Clone(ic).(*scpb.IndexColumn))
}
+ if s.idxComment != nil {
+ c.idxComment = protoutil.Clone(s.idxComment).(*scpb.IndexComment)
+ }
+ if s.constrComment != nil {
+ c.constrComment = protoutil.Clone(s.constrComment).(*scpb.ConstraintComment)
+ }
return c
}
-// makePrimaryIndexSpec constructs a primaryIndexSpec based on an existing
-// scpb.PrimaryIndex element.
-func makePrimaryIndexSpec(b BuildCtx, idx *scpb.PrimaryIndex) (s primaryIndexSpec) {
- s.idx = idx
- publicIdxTargets := b.QueryByID(idx.TableID).Filter(publicTargetFilter).Filter(hasIndexIDAttrFilter(idx.IndexID))
- _, _, s.name = scpb.FindIndexName(publicIdxTargets)
- _, _, s.partitioning = scpb.FindIndexPartitioning(publicIdxTargets)
- scpb.ForEachIndexColumn(publicIdxTargets, func(_ scpb.Status, _ scpb.TargetStatus, ic *scpb.IndexColumn) {
+// makeIndexSpec constructs an indexSpec based on an existing index element.
+func makeIndexSpec(b BuildCtx, tableID catid.DescID, indexID catid.IndexID) (s indexSpec) {
+ tableElts := b.QueryByID(tableID).Filter(notAbsentTargetFilter)
+ idxElts := tableElts.Filter(hasIndexIDAttrFilter(indexID))
+ var constraintID catid.ConstraintID
+ var n int
+ _, _, s.primary = scpb.FindPrimaryIndex(idxElts)
+ if s.primary != nil {
+ constraintID = s.primary.ConstraintID
+ n++
+ }
+ _, _, s.secondary = scpb.FindSecondaryIndex(idxElts)
+ if s.secondary != nil {
+ constraintID = s.secondary.ConstraintID
+ n++
+ }
+ _, _, s.temporary = scpb.FindTemporaryIndex(idxElts)
+ if s.temporary != nil {
+ constraintID = s.temporary.ConstraintID
+ n++
+ }
+ if n != 1 {
+ panic(errors.AssertionFailedf("invalid index spec for TableID=%d and IndexID=%d: "+
+ "primary=%v, secondary=%v, temporary=%v",
+ tableID, indexID, s.primary != nil, s.secondary != nil, s.temporary != nil))
+ }
+ _, _, s.name = scpb.FindIndexName(idxElts)
+ _, _, s.partial = scpb.FindSecondaryIndexPartial(idxElts)
+ _, _, s.partitioning = scpb.FindIndexPartitioning(idxElts)
+ scpb.ForEachIndexColumn(idxElts, func(_ scpb.Status, _ scpb.TargetStatus, ic *scpb.IndexColumn) {
s.columns = append(s.columns, ic)
})
+ _, _, s.idxComment = scpb.FindIndexComment(idxElts)
+ scpb.ForEachConstraintComment(tableElts, func(_ scpb.Status, _ scpb.TargetStatus, cc *scpb.ConstraintComment) {
+ if cc.ConstraintID == constraintID {
+ s.constrComment = cc
+ }
+ })
return s
}
-// tempIndexSpec holds a temporary index element and its children.
-type tempIndexSpec struct {
- idx *scpb.TemporaryIndex
- partitioning *scpb.IndexPartitioning
- columns []*scpb.IndexColumn
-}
-
-// apply makes it possible to conveniently define build targets for all
-// the elements in the tempIndexSpec.
-func (s tempIndexSpec) apply(fn func(e scpb.Element)) {
- fn(s.idx)
- if s.partitioning != nil {
- fn(s.partitioning)
- }
- for _, ic := range s.columns {
- fn(ic)
- }
-}
-
// indexColumnSpec specifies how to construct a scpb.IndexColumn element.
type indexColumnSpec struct {
columnID catid.ColumnID
@@ -493,29 +542,66 @@ func makeIndexColumnSpec(ic *scpb.IndexColumn) indexColumnSpec {
}
}
-// makeSwapPrimaryIndexSpec constructs a primaryIndexSpec and an accompanying
-// tempIndexSpec to swap out an existing primary index with.
-func makeSwapPrimaryIndexSpec(
- b BuildCtx, out primaryIndexSpec, inColumns []indexColumnSpec,
-) (in primaryIndexSpec, temp tempIndexSpec) {
+// makeSwapIndexSpec constructs a pair of indexSpec for the new index and the
+// accompanying temporary index to swap out an existing index with.
+func makeSwapIndexSpec(
+ b BuildCtx, out indexSpec, sourceIndexID catid.IndexID, inColumns []indexColumnSpec,
+) (in, temp indexSpec) {
+ isSecondary := out.secondary != nil
+ // Determine table ID and validate input.
+ var tableID catid.DescID
+ {
+ var n int
+ var outID catid.IndexID
+ if isSecondary {
+ tableID = out.secondary.TableID
+ outID = out.secondary.IndexID
+ n++
+ }
+ if out.primary != nil {
+ tableID = out.primary.TableID
+ outID = out.primary.IndexID
+ n++
+ }
+ if out.temporary != nil {
+ tableID = out.primary.TableID
+ outID = out.primary.IndexID
+ }
+ if n != 1 {
+ panic(errors.AssertionFailedf("invalid swap source index spec for TableID=%d and IndexID=%d: "+
+ "primary=%v, secondary=%v, temporary=%v",
+ tableID, outID, out.primary != nil, isSecondary, out.temporary != nil))
+ }
+ }
+ // Determine old and new IDs.
var inID, tempID catid.IndexID
var inConstraintID, tempConstraintID catid.ConstraintID
{
- _, _, tbl := scpb.FindTable(b.QueryByID(out.idx.TableID).Filter(notAbsentTargetFilter))
+ _, _, tbl := scpb.FindTable(b.QueryByID(tableID).Filter(notAbsentTargetFilter))
inID = b.NextTableIndexID(tbl)
inConstraintID = b.NextTableConstraintID(tbl.TableID)
tempID = inID + 1
tempConstraintID = inConstraintID + 1
}
+ // Setup new primary or secondary index.
{
in = out.clone()
- in.idx.IndexID = inID
- in.idx.SourceIndexID = out.idx.IndexID
- in.idx.TemporaryIndexID = tempID
- in.idx.ConstraintID = inConstraintID
+ var idx *scpb.Index
+ if isSecondary {
+ idx = &in.secondary.Index
+ } else {
+ idx = &in.primary.Index
+ }
+ idx.IndexID = inID
+ idx.SourceIndexID = sourceIndexID
+ idx.TemporaryIndexID = tempID
+ idx.ConstraintID = inConstraintID
if in.name != nil {
in.name.IndexID = inID
}
+ if in.partial != nil {
+ in.partial.IndexID = inID
+ }
if in.partitioning != nil {
in.partitioning.IndexID = inID
}
@@ -525,7 +611,7 @@ func makeSwapPrimaryIndexSpec(
ordinalInKind := m[cs.kind]
m[cs.kind] = ordinalInKind + 1
in.columns = append(in.columns, &scpb.IndexColumn{
- TableID: in.idx.TableID,
+ TableID: idx.TableID,
IndexID: inID,
ColumnID: cs.columnID,
OrdinalInKind: ordinalInKind,
@@ -533,13 +619,29 @@ func makeSwapPrimaryIndexSpec(
Direction: cs.direction,
})
}
+ if in.idxComment != nil {
+ in.idxComment.IndexID = inID
+ }
+ if in.constrComment != nil {
+ in.constrComment.ConstraintID = inConstraintID
+ }
}
+ // Setup temporary index.
{
s := in.clone()
- temp.idx = &scpb.TemporaryIndex{Index: s.idx.Index}
- temp.idx.IndexID = tempID
- temp.idx.TemporaryIndexID = 0
- temp.idx.ConstraintID = tempConstraintID
+ if isSecondary {
+ temp.temporary = &scpb.TemporaryIndex{Index: s.secondary.Index}
+ } else {
+ temp.temporary = &scpb.TemporaryIndex{Index: s.primary.Index}
+ }
+ temp.temporary.IndexID = tempID
+ temp.temporary.TemporaryIndexID = 0
+ temp.temporary.ConstraintID = tempConstraintID
+ temp.temporary.IsUsingSecondaryEncoding = isSecondary
+ if s.partial != nil {
+ temp.partial = s.partial
+ temp.partial.IndexID = tempID
+ }
if s.partitioning != nil {
temp.partitioning = s.partitioning
temp.partitioning.IndexID = tempID
diff --git a/pkg/sql/schemachanger/scbuild/testdata/alter_table_alter_primary_key b/pkg/sql/schemachanger/scbuild/testdata/alter_table_alter_primary_key
index e037c30ec85a..8a9ecd7b9621 100644
--- a/pkg/sql/schemachanger/scbuild/testdata/alter_table_alter_primary_key
+++ b/pkg/sql/schemachanger/scbuild/testdata/alter_table_alter_primary_key
@@ -1,5 +1,7 @@
setup
-CREATE TABLE defaultdb.foo (i INT PRIMARY KEY, j INT NOT NULL);
+CREATE TABLE defaultdb.foo (i INT NOT NULL, j INT NOT NULL, CONSTRAINT pkey PRIMARY KEY (i));
+COMMENT ON INDEX pkey IS 'pkey is an index';
+COMMENT ON CONSTRAINT pkey ON defaultdb.foo IS 'pkey is a constraint';
CREATE TABLE defaultdb.bar (i INT NOT NULL);
----
@@ -12,16 +14,24 @@ ALTER TABLE defaultdb.foo ALTER PRIMARY KEY USING COLUMNS (j)
{columnId: 2, indexId: 1, kind: STORED, tableId: 104}
- [[PrimaryIndex:{DescID: 104, IndexID: 1, ConstraintID: 1}, ABSENT], PUBLIC]
{constraintId: 1, indexId: 1, isUnique: true, tableId: 104}
-- [[IndexName:{DescID: 104, Name: foo_pkey, IndexID: 1}, ABSENT], PUBLIC]
- {indexId: 1, name: foo_pkey, tableId: 104}
+- [[IndexName:{DescID: 104, Name: pkey, IndexID: 1}, ABSENT], PUBLIC]
+ {indexId: 1, name: pkey, tableId: 104}
+- [[IndexComment:{DescID: 104, IndexID: 1, Comment: pkey is an index}, ABSENT], PUBLIC]
+ {comment: pkey is an index, indexId: 1, tableId: 104}
+- [[ConstraintComment:{DescID: 104, ConstraintID: 1, Comment: pkey is a constraint}, ABSENT], PUBLIC]
+ {comment: pkey is a constraint, constraintId: 1, tableId: 104}
- [[PrimaryIndex:{DescID: 104, IndexID: 2, ConstraintID: 2, TemporaryIndexID: 3, SourceIndexID: 1}, PUBLIC], ABSENT]
{constraintId: 2, indexId: 2, isUnique: true, sourceIndexId: 1, tableId: 104, temporaryIndexId: 3}
-- [[IndexName:{DescID: 104, Name: foo_pkey, IndexID: 2}, PUBLIC], ABSENT]
- {indexId: 2, name: foo_pkey, tableId: 104}
+- [[IndexName:{DescID: 104, Name: pkey, IndexID: 2}, PUBLIC], ABSENT]
+ {indexId: 2, name: pkey, tableId: 104}
- [[IndexColumn:{DescID: 104, ColumnID: 2, IndexID: 2}, PUBLIC], ABSENT]
{columnId: 2, indexId: 2, tableId: 104}
- [[IndexColumn:{DescID: 104, ColumnID: 1, IndexID: 2}, PUBLIC], ABSENT]
{columnId: 1, indexId: 2, kind: STORED, tableId: 104}
+- [[IndexComment:{DescID: 104, IndexID: 2, Comment: pkey is an index}, PUBLIC], ABSENT]
+ {comment: pkey is an index, indexId: 2, tableId: 104}
+- [[ConstraintComment:{DescID: 104, ConstraintID: 2, Comment: pkey is a constraint}, PUBLIC], ABSENT]
+ {comment: pkey is a constraint, constraintId: 2, tableId: 104}
- [[TemporaryIndex:{DescID: 104, IndexID: 3, ConstraintID: 3, SourceIndexID: 1}, TRANSIENT_ABSENT], ABSENT]
{constraintId: 3, indexId: 3, isUnique: true, sourceIndexId: 1, tableId: 104}
- [[IndexColumn:{DescID: 104, ColumnID: 2, IndexID: 3}, TRANSIENT_ABSENT], ABSENT]
diff --git a/pkg/sql/schemachanger/scbuild/testdata/alter_table_drop_column b/pkg/sql/schemachanger/scbuild/testdata/alter_table_drop_column
index b1546fbb792c..88ded82691b6 100644
--- a/pkg/sql/schemachanger/scbuild/testdata/alter_table_drop_column
+++ b/pkg/sql/schemachanger/scbuild/testdata/alter_table_drop_column
@@ -8,6 +8,7 @@ CREATE TABLE defaultdb.t (
INDEX (j),
INDEX (j, k)
);
+COMMENT ON COLUMN defaultdb.t.j IS 'column will drop';
----
build
@@ -21,6 +22,8 @@ ALTER TABLE defaultdb.t DROP COLUMN j
{columnId: 2, tableId: 104, type: {family: IntFamily, oid: 20, width: 64}}
- [[ColumnDefaultExpression:{DescID: 104, ColumnID: 2}, ABSENT], PUBLIC]
{columnId: 2, expr: '42:::INT8', tableId: 104}
+- [[ColumnComment:{DescID: 104, ColumnID: 2, Comment: column will drop}, ABSENT], PUBLIC]
+ {columnId: 2, comment: column will drop, pgAttributeNum: 2, tableId: 104}
- [[IndexColumn:{DescID: 104, ColumnID: 1, IndexID: 1}, ABSENT], PUBLIC]
{columnId: 1, indexId: 1, tableId: 104}
- [[IndexColumn:{DescID: 104, ColumnID: 2, IndexID: 1}, ABSENT], PUBLIC]
diff --git a/pkg/sql/schemachanger/scdecomp/decomp.go b/pkg/sql/schemachanger/scdecomp/decomp.go
index 201331281ac9..c15ed4a06ddf 100644
--- a/pkg/sql/schemachanger/scdecomp/decomp.go
+++ b/pkg/sql/schemachanger/scdecomp/decomp.go
@@ -552,6 +552,17 @@ func (w *walkCtx) walkIndex(tbl catalog.TableDescriptor, idx catalog.Index) {
} else if err != nil {
panic(err)
}
+ if constraintID := idx.GetConstraintID(); constraintID != 0 {
+ if comment, ok, err := w.commentCache.GetConstraintComment(w.ctx, tbl.GetID(), constraintID); err == nil && ok {
+ w.ev(scpb.Status_PUBLIC, &scpb.ConstraintComment{
+ TableID: tbl.GetID(),
+ ConstraintID: constraintID,
+ Comment: comment,
+ })
+ } else if err != nil {
+ panic(err)
+ }
+ }
}
func (w *walkCtx) walkUniqueWithoutIndexConstraint(
diff --git a/pkg/sql/schemachanger/scdecomp/dependencies.go b/pkg/sql/schemachanger/scdecomp/dependencies.go
index c04838d78ba0..6d5fe4b12753 100644
--- a/pkg/sql/schemachanger/scdecomp/dependencies.go
+++ b/pkg/sql/schemachanger/scdecomp/dependencies.go
@@ -26,23 +26,23 @@ type CommentGetter interface {
GetDatabaseComment(ctx context.Context, dbID catid.DescID) (comment string, ok bool, err error)
// GetSchemaComment returns comment for a schema. `ok` returned indicates if
- // the // comment actually exists or not.
+ // the comment actually exists or not.
GetSchemaComment(ctx context.Context, schemaID catid.DescID) (comment string, ok bool, err error)
// GetTableComment returns comment for a table. `ok` returned indicates if the
- // // comment actually exists or not.
+ // comment actually exists or not.
GetTableComment(ctx context.Context, tableID catid.DescID) (comment string, ok bool, err error)
// GetColumnComment returns comment for a column. `ok` returned indicates if
- // the // comment actually exists or not.
+ // the comment actually exists or not.
GetColumnComment(ctx context.Context, tableID catid.DescID, pgAttrNum catid.PGAttributeNum) (comment string, ok bool, err error)
// GetIndexComment returns comment for an index. `ok` returned indicates if
- // the // comment actually exists or not.
+ // the comment actually exists or not.
GetIndexComment(ctx context.Context, tableID catid.DescID, indexID catid.IndexID) (comment string, ok bool, err error)
// GetConstraintComment returns comment for a constraint. `ok` returned
- // indicates if the // comment actually exists or not.
+ // indicates if the comment actually exists or not.
GetConstraintComment(ctx context.Context, tableID catid.DescID, constraintID catid.ConstraintID) (comment string, ok bool, err error)
}
diff --git a/pkg/sql/schemachanger/scdecomp/testdata/table b/pkg/sql/schemachanger/scdecomp/testdata/table
index f0ed16a3e557..813a066cd44b 100644
--- a/pkg/sql/schemachanger/scdecomp/testdata/table
+++ b/pkg/sql/schemachanger/scdecomp/testdata/table
@@ -12,6 +12,7 @@ COMMENT ON INDEX tbl@tbl_pkey IS 'tbl_pkey is a primary key';
COMMENT ON COLUMN tbl.id IS 'id is a identifier';
COMMENT ON CONSTRAINT myfk ON tbl IS 'must have a parent';
ALTER TABLE tbl CONFIGURE ZONE USING gc.ttlseconds=10;
+COMMENT ON CONSTRAINT tbl_pkey ON tbl IS 'primary key';
----
decompose
@@ -553,6 +554,11 @@ ElementState:
constraintId: 2
tableId: 105
Status: PUBLIC
+- ConstraintComment:
+ comment: primary key
+ constraintId: 1
+ tableId: 105
+ Status: PUBLIC
- Namespace:
databaseId: 100
descriptorId: 105
diff --git a/pkg/sql/schemachanger/scexec/scmutationexec/eventlog.go b/pkg/sql/schemachanger/scexec/scmutationexec/eventlog.go
index 84db12debdfe..86b3d3fd8a8d 100644
--- a/pkg/sql/schemachanger/scexec/scmutationexec/eventlog.go
+++ b/pkg/sql/schemachanger/scexec/scmutationexec/eventlog.go
@@ -180,13 +180,26 @@ func asCommentEventPayload(
if err != nil {
return nil, err
}
- constraint, err := tbl.FindConstraintWithID(e.ConstraintID)
- if err != nil {
- return nil, err
+ var constraintName string
+ if constraint, err := tbl.FindConstraintWithID(e.ConstraintID); err != nil {
+ // FindConstraintWithID excludes dropping indexes for no good reason.
+ // TODO(postamar): improve catalog.TableDescriptor interface
+ for _, idx := range tbl.AllIndexes() {
+ if idx.Dropped() && idx.GetConstraintID() == e.ConstraintID {
+ constraintName = idx.GetName()
+ err = nil
+ break
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ constraintName = constraint.GetConstraintName()
}
return &eventpb.CommentOnConstraint{
TableName: fullName,
- ConstraintName: constraint.GetConstraintName(),
+ ConstraintName: constraintName,
Comment: e.Comment,
NullComment: isNullComment,
}, nil
diff --git a/pkg/sql/schemachanger/scplan/internal/rules/BUILD.bazel b/pkg/sql/schemachanger/scplan/internal/rules/BUILD.bazel
index 99eb03874764..2fe34b8ee766 100644
--- a/pkg/sql/schemachanger/scplan/internal/rules/BUILD.bazel
+++ b/pkg/sql/schemachanger/scplan/internal/rules/BUILD.bazel
@@ -5,9 +5,11 @@ go_library(
name = "rules",
srcs = [
"dep_add_column.go",
+ "dep_add_constraint.go",
"dep_add_index.go",
"dep_add_index_and_column.go",
"dep_drop_column.go",
+ "dep_drop_constraint.go",
"dep_drop_index.go",
"dep_drop_index_and_column.go",
"dep_drop_object.go",
diff --git a/pkg/sql/schemachanger/scplan/internal/rules/assertions_test.go b/pkg/sql/schemachanger/scplan/internal/rules/assertions_test.go
index 424db280335b..6a158872ea7b 100644
--- a/pkg/sql/schemachanger/scplan/internal/rules/assertions_test.go
+++ b/pkg/sql/schemachanger/scplan/internal/rules/assertions_test.go
@@ -34,6 +34,7 @@ func TestRuleAssertions(t *testing.T) {
checkIsWithExpression,
checkIsColumnDependent,
checkIsIndexDependent,
+ checkIsConstraintDependent,
} {
var fni interface{} = fn
fullName := runtime.FuncForPC(reflect.ValueOf(fni).Pointer()).Name()
@@ -124,8 +125,7 @@ func checkIsWithExpression(e scpb.Element) error {
// element.
func checkIsColumnDependent(e scpb.Element) error {
// Exclude columns themselves.
- switch e.(type) {
- case *scpb.Column:
+ if isColumn(e) {
return nil
}
// A column dependent should have a ColumnID attribute.
@@ -158,3 +158,22 @@ func checkIsIndexDependent(e scpb.Element) error {
}
return nil
}
+
+// Assert that checkIsConstraintDependent covers all elements of a constraint
+// element.
+func checkIsConstraintDependent(e scpb.Element) error {
+ // Exclude constraints themselves.
+ if isConstraint(e) {
+ return nil
+ }
+ // A constraint dependent should have a ConstraintID attribute.
+ _, err := screl.Schema.GetAttribute(screl.ConstraintID, e)
+ if isConstraintDependent(e) {
+ if err != nil {
+ return errors.New("verifies isConstraintDependent but doesn't have ConstraintID attr")
+ }
+ } else if err == nil {
+ return errors.New("has ConstraintID attr but doesn't verify isConstraintDependent")
+ }
+ return nil
+}
diff --git a/pkg/sql/schemachanger/scplan/internal/rules/dep_add_constraint.go b/pkg/sql/schemachanger/scplan/internal/rules/dep_add_constraint.go
new file mode 100644
index 000000000000..e7c8a6074d6a
--- /dev/null
+++ b/pkg/sql/schemachanger/scplan/internal/rules/dep_add_constraint.go
@@ -0,0 +1,36 @@
+// Copyright 2022 The Cockroach Authors.
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package rules
+
+import (
+ "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/rel"
+ "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb"
+ "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/scgraph"
+)
+
+// These rules ensure that constraint-dependent elements, like a constraint's
+// name, etc. appear once the constraint reaches a suitable state.
+func init() {
+
+ registerDepRule(
+ "constraint dependent public right before constraint",
+ scgraph.SameStagePrecedence,
+ "constraint", "dependent",
+ func(from, to nodeVars) rel.Clauses {
+ return rel.Clauses{
+ from.typeFilter(isConstraint),
+ to.typeFilter(isConstraintDependent),
+ joinOnConstraintID(from, to, "table-id", "constraint-id"),
+ statusesToPublicOrTransient(from, scpb.Status_PUBLIC, to, scpb.Status_PUBLIC),
+ }
+ },
+ )
+}
diff --git a/pkg/sql/schemachanger/scplan/internal/rules/dep_drop_constraint.go b/pkg/sql/schemachanger/scplan/internal/rules/dep_drop_constraint.go
new file mode 100644
index 000000000000..36790a4bd9f4
--- /dev/null
+++ b/pkg/sql/schemachanger/scplan/internal/rules/dep_drop_constraint.go
@@ -0,0 +1,50 @@
+// Copyright 2022 The Cockroach Authors.
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package rules
+
+import (
+ "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/rel"
+ "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scpb"
+ "github.com/cockroachdb/cockroach/pkg/sql/schemachanger/scplan/internal/scgraph"
+)
+
+// These rules ensure that constraint-dependent elements, like an constraint's
+// name, etc. disappear once the constraint reaches a suitable state.
+func init() {
+
+ registerDepRuleForDrop(
+ "constraint dependent absent right before constraint",
+ scgraph.SameStagePrecedence,
+ "dependent", "constraint",
+ scpb.Status_ABSENT, scpb.Status_ABSENT,
+ func(from, to nodeVars) rel.Clauses {
+ return rel.Clauses{
+ from.typeFilter(isConstraintDependent),
+ to.typeFilter(isConstraint, not(isIndex)),
+ joinOnConstraintID(from, to, "table-id", "constraint-id"),
+ }
+ },
+ )
+
+ registerDepRuleForDrop(
+ "constraint dependent absent right before constraint",
+ scgraph.SameStagePrecedence,
+ "dependent", "constraint",
+ scpb.Status_VALIDATED, scpb.Status_ABSENT,
+ func(from, to nodeVars) rel.Clauses {
+ return rel.Clauses{
+ from.typeFilter(isConstraintDependent),
+ to.typeFilter(isConstraint, isIndex),
+ joinOnConstraintID(from, to, "table-id", "constraint-id"),
+ }
+ },
+ )
+}
diff --git a/pkg/sql/schemachanger/scplan/internal/rules/helpers.go b/pkg/sql/schemachanger/scplan/internal/rules/helpers.go
index 8ca12d7aea63..56c3d914f4cc 100644
--- a/pkg/sql/schemachanger/scplan/internal/rules/helpers.go
+++ b/pkg/sql/schemachanger/scplan/internal/rules/helpers.go
@@ -385,6 +385,32 @@ func isIndexDependent(e scpb.Element) bool {
return false
}
+func isConstraint(e scpb.Element) bool {
+ switch e.(type) {
+ case *scpb.PrimaryIndex, *scpb.SecondaryIndex, *scpb.TemporaryIndex:
+ return true
+ case *scpb.CheckConstraint, *scpb.UniqueWithoutIndexConstraint, *scpb.ForeignKeyConstraint:
+ return true
+ }
+ return false
+}
+
+func isConstraintDependent(e scpb.Element) bool {
+ switch e.(type) {
+ case *scpb.ConstraintName:
+ return true
+ case *scpb.ConstraintComment:
+ return true
+ }
+ return false
+}
+
+func not(predicate func(e scpb.Element) bool) func(e scpb.Element) bool {
+ return func(e scpb.Element) bool {
+ return !predicate(e)
+ }
+}
+
// registerDepRuleForDrop is a convenience function which calls
// registerDepRule with the cross-product of (ToAbsent,Transient)^2 target
// states, which can't easily be composed.
diff --git a/pkg/sql/schemachanger/scplan/internal/rules/testdata/deprules b/pkg/sql/schemachanger/scplan/internal/rules/testdata/deprules
index d7206b4eaaa2..be551f8434aa 100644
--- a/pkg/sql/schemachanger/scplan/internal/rules/testdata/deprules
+++ b/pkg/sql/schemachanger/scplan/internal/rules/testdata/deprules
@@ -151,10 +151,9 @@ deprules
- $column[Type] = '*scpb.Column'
- $dependent[Type] IN ['*scpb.ColumnName', '*scpb.ColumnType', '*scpb.ColumnDefaultExpression', '*scpb.ColumnOnUpdateExpression', '*scpb.SequenceOwner', '*scpb.ColumnComment', '*scpb.IndexColumn']
- joinOnColumnID($column, $dependent, $table-id, $col-id)
- - $column-target[TargetStatus] = ABSENT
+ - toAbsent($column-target, $dependent-target)
- $column-node[CurrentStatus] = WRITE_ONLY
- - $dependent-target[TargetStatus] = TRANSIENT_ABSENT
- - $dependent-node[CurrentStatus] = TRANSIENT_ABSENT
+ - $dependent-node[CurrentStatus] = ABSENT
- joinTargetNode($column, $column-target, $column-node)
- joinTargetNode($dependent, $dependent-target, $dependent-node)
- name: column no longer public before dependents
@@ -179,8 +178,9 @@ deprules
- $column[Type] = '*scpb.Column'
- $dependent[Type] IN ['*scpb.ColumnName', '*scpb.ColumnType', '*scpb.ColumnDefaultExpression', '*scpb.ColumnOnUpdateExpression', '*scpb.SequenceOwner', '*scpb.ColumnComment', '*scpb.IndexColumn']
- joinOnColumnID($column, $dependent, $table-id, $col-id)
- - transient($column-target, $dependent-target)
- - $column-node[CurrentStatus] = TRANSIENT_WRITE_ONLY
+ - $column-target[TargetStatus] = ABSENT
+ - $column-node[CurrentStatus] = WRITE_ONLY
+ - $dependent-target[TargetStatus] = TRANSIENT_ABSENT
- $dependent-node[CurrentStatus] = TRANSIENT_ABSENT
- joinTargetNode($column, $column-target, $column-node)
- joinTargetNode($dependent, $dependent-target, $dependent-node)
@@ -192,9 +192,9 @@ deprules
- $column[Type] = '*scpb.Column'
- $dependent[Type] IN ['*scpb.ColumnName', '*scpb.ColumnType', '*scpb.ColumnDefaultExpression', '*scpb.ColumnOnUpdateExpression', '*scpb.SequenceOwner', '*scpb.ColumnComment', '*scpb.IndexColumn']
- joinOnColumnID($column, $dependent, $table-id, $col-id)
- - toAbsent($column-target, $dependent-target)
- - $column-node[CurrentStatus] = WRITE_ONLY
- - $dependent-node[CurrentStatus] = ABSENT
+ - transient($column-target, $dependent-target)
+ - $column-node[CurrentStatus] = TRANSIENT_WRITE_ONLY
+ - $dependent-node[CurrentStatus] = TRANSIENT_ABSENT
- joinTargetNode($column, $column-target, $column-node)
- joinTargetNode($dependent, $dependent-target, $dependent-node)
- name: column type dependents removed right before column type
@@ -224,6 +224,127 @@ deprules
- relationIsNotBeingDropped(*scpb.ColumnType)($column-type)
- joinTargetNode($column-type, $column-type-target, $column-type-node)
- joinTargetNode($column, $column-target, $column-node)
+- name: constraint dependent absent right before constraint
+ from: dependent-node
+ kind: SameStagePrecedence
+ to: constraint-node
+ query:
+ - $dependent[Type] IN ['*scpb.ConstraintName', '*scpb.ConstraintComment']
+ - $constraint[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex']
+ - joinOnConstraintID($dependent, $constraint, $table-id, $constraint-id)
+ - toAbsent($dependent-target, $constraint-target)
+ - $dependent-node[CurrentStatus] = VALIDATED
+ - $constraint-node[CurrentStatus] = ABSENT
+ - joinTargetNode($dependent, $dependent-target, $dependent-node)
+ - joinTargetNode($constraint, $constraint-target, $constraint-node)
+- name: constraint dependent absent right before constraint
+ from: dependent-node
+ kind: SameStagePrecedence
+ to: constraint-node
+ query:
+ - $dependent[Type] IN ['*scpb.ConstraintName', '*scpb.ConstraintComment']
+ - $constraint[Type] IN ['*scpb.UniqueWithoutIndexConstraint', '*scpb.CheckConstraint', '*scpb.ForeignKeyConstraint']
+ - joinOnConstraintID($dependent, $constraint, $table-id, $constraint-id)
+ - toAbsent($dependent-target, $constraint-target)
+ - $dependent-node[CurrentStatus] = ABSENT
+ - $constraint-node[CurrentStatus] = ABSENT
+ - joinTargetNode($dependent, $dependent-target, $dependent-node)
+ - joinTargetNode($constraint, $constraint-target, $constraint-node)
+- name: constraint dependent absent right before constraint
+ from: dependent-node
+ kind: SameStagePrecedence
+ to: constraint-node
+ query:
+ - $dependent[Type] IN ['*scpb.ConstraintName', '*scpb.ConstraintComment']
+ - $constraint[Type] IN ['*scpb.UniqueWithoutIndexConstraint', '*scpb.CheckConstraint', '*scpb.ForeignKeyConstraint']
+ - joinOnConstraintID($dependent, $constraint, $table-id, $constraint-id)
+ - transient($dependent-target, $constraint-target)
+ - $dependent-node[CurrentStatus] = TRANSIENT_ABSENT
+ - $constraint-node[CurrentStatus] = TRANSIENT_ABSENT
+ - joinTargetNode($dependent, $dependent-target, $dependent-node)
+ - joinTargetNode($constraint, $constraint-target, $constraint-node)
+- name: constraint dependent absent right before constraint
+ from: dependent-node
+ kind: SameStagePrecedence
+ to: constraint-node
+ query:
+ - $dependent[Type] IN ['*scpb.ConstraintName', '*scpb.ConstraintComment']
+ - $constraint[Type] IN ['*scpb.UniqueWithoutIndexConstraint', '*scpb.CheckConstraint', '*scpb.ForeignKeyConstraint']
+ - joinOnConstraintID($dependent, $constraint, $table-id, $constraint-id)
+ - $dependent-target[TargetStatus] = TRANSIENT_ABSENT
+ - $dependent-node[CurrentStatus] = TRANSIENT_ABSENT
+ - $constraint-target[TargetStatus] = ABSENT
+ - $constraint-node[CurrentStatus] = ABSENT
+ - joinTargetNode($dependent, $dependent-target, $dependent-node)
+ - joinTargetNode($constraint, $constraint-target, $constraint-node)
+- name: constraint dependent absent right before constraint
+ from: dependent-node
+ kind: SameStagePrecedence
+ to: constraint-node
+ query:
+ - $dependent[Type] IN ['*scpb.ConstraintName', '*scpb.ConstraintComment']
+ - $constraint[Type] IN ['*scpb.UniqueWithoutIndexConstraint', '*scpb.CheckConstraint', '*scpb.ForeignKeyConstraint']
+ - joinOnConstraintID($dependent, $constraint, $table-id, $constraint-id)
+ - $dependent-target[TargetStatus] = ABSENT
+ - $dependent-node[CurrentStatus] = ABSENT
+ - $constraint-target[TargetStatus] = TRANSIENT_ABSENT
+ - $constraint-node[CurrentStatus] = TRANSIENT_ABSENT
+ - joinTargetNode($dependent, $dependent-target, $dependent-node)
+ - joinTargetNode($constraint, $constraint-target, $constraint-node)
+- name: constraint dependent absent right before constraint
+ from: dependent-node
+ kind: SameStagePrecedence
+ to: constraint-node
+ query:
+ - $dependent[Type] IN ['*scpb.ConstraintName', '*scpb.ConstraintComment']
+ - $constraint[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex']
+ - joinOnConstraintID($dependent, $constraint, $table-id, $constraint-id)
+ - transient($dependent-target, $constraint-target)
+ - $dependent-node[CurrentStatus] = TRANSIENT_VALIDATED
+ - $constraint-node[CurrentStatus] = TRANSIENT_ABSENT
+ - joinTargetNode($dependent, $dependent-target, $dependent-node)
+ - joinTargetNode($constraint, $constraint-target, $constraint-node)
+- name: constraint dependent absent right before constraint
+ from: dependent-node
+ kind: SameStagePrecedence
+ to: constraint-node
+ query:
+ - $dependent[Type] IN ['*scpb.ConstraintName', '*scpb.ConstraintComment']
+ - $constraint[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex']
+ - joinOnConstraintID($dependent, $constraint, $table-id, $constraint-id)
+ - $dependent-target[TargetStatus] = TRANSIENT_ABSENT
+ - $dependent-node[CurrentStatus] = TRANSIENT_VALIDATED
+ - $constraint-target[TargetStatus] = ABSENT
+ - $constraint-node[CurrentStatus] = ABSENT
+ - joinTargetNode($dependent, $dependent-target, $dependent-node)
+ - joinTargetNode($constraint, $constraint-target, $constraint-node)
+- name: constraint dependent absent right before constraint
+ from: dependent-node
+ kind: SameStagePrecedence
+ to: constraint-node
+ query:
+ - $dependent[Type] IN ['*scpb.ConstraintName', '*scpb.ConstraintComment']
+ - $constraint[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex']
+ - joinOnConstraintID($dependent, $constraint, $table-id, $constraint-id)
+ - $dependent-target[TargetStatus] = ABSENT
+ - $dependent-node[CurrentStatus] = VALIDATED
+ - $constraint-target[TargetStatus] = TRANSIENT_ABSENT
+ - $constraint-node[CurrentStatus] = TRANSIENT_ABSENT
+ - joinTargetNode($dependent, $dependent-target, $dependent-node)
+ - joinTargetNode($constraint, $constraint-target, $constraint-node)
+- name: constraint dependent public right before constraint
+ from: constraint-node
+ kind: SameStagePrecedence
+ to: dependent-node
+ query:
+ - $constraint[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex', '*scpb.UniqueWithoutIndexConstraint', '*scpb.CheckConstraint', '*scpb.ForeignKeyConstraint']
+ - $dependent[Type] IN ['*scpb.ConstraintName', '*scpb.ConstraintComment']
+ - joinOnConstraintID($constraint, $dependent, $table-id, $constraint-id)
+ - toPublicOrTransient($constraint-target, $dependent-target)
+ - $constraint-node[CurrentStatus] = PUBLIC
+ - $dependent-node[CurrentStatus] = PUBLIC
+ - joinTargetNode($constraint, $constraint-target, $constraint-node)
+ - joinTargetNode($dependent, $dependent-target, $dependent-node)
- name: dependents removed before column
from: dependent-node
kind: Precedence
@@ -232,9 +353,10 @@ deprules
- $dependent[Type] IN ['*scpb.ColumnName', '*scpb.ColumnType', '*scpb.ColumnDefaultExpression', '*scpb.ColumnOnUpdateExpression', '*scpb.SequenceOwner', '*scpb.ColumnComment', '*scpb.IndexColumn']
- $column[Type] = '*scpb.Column'
- joinOnColumnID($dependent, $column, $table-id, $col-id)
- - toAbsent($dependent-target, $column-target)
+ - $dependent-target[TargetStatus] = ABSENT
- $dependent-node[CurrentStatus] = ABSENT
- - $column-node[CurrentStatus] = ABSENT
+ - $column-target[TargetStatus] = TRANSIENT_ABSENT
+ - $column-node[CurrentStatus] = TRANSIENT_ABSENT
- joinTargetNode($dependent, $dependent-target, $dependent-node)
- joinTargetNode($column, $column-target, $column-node)
- name: dependents removed before column
@@ -245,9 +367,9 @@ deprules
- $dependent[Type] IN ['*scpb.ColumnName', '*scpb.ColumnType', '*scpb.ColumnDefaultExpression', '*scpb.ColumnOnUpdateExpression', '*scpb.SequenceOwner', '*scpb.ColumnComment', '*scpb.IndexColumn']
- $column[Type] = '*scpb.Column'
- joinOnColumnID($dependent, $column, $table-id, $col-id)
- - transient($dependent-target, $column-target)
- - $dependent-node[CurrentStatus] = TRANSIENT_ABSENT
- - $column-node[CurrentStatus] = TRANSIENT_ABSENT
+ - toAbsent($dependent-target, $column-target)
+ - $dependent-node[CurrentStatus] = ABSENT
+ - $column-node[CurrentStatus] = ABSENT
- joinTargetNode($dependent, $dependent-target, $dependent-node)
- joinTargetNode($column, $column-target, $column-node)
- name: dependents removed before column
@@ -258,9 +380,8 @@ deprules
- $dependent[Type] IN ['*scpb.ColumnName', '*scpb.ColumnType', '*scpb.ColumnDefaultExpression', '*scpb.ColumnOnUpdateExpression', '*scpb.SequenceOwner', '*scpb.ColumnComment', '*scpb.IndexColumn']
- $column[Type] = '*scpb.Column'
- joinOnColumnID($dependent, $column, $table-id, $col-id)
- - $dependent-target[TargetStatus] = ABSENT
- - $dependent-node[CurrentStatus] = ABSENT
- - $column-target[TargetStatus] = TRANSIENT_ABSENT
+ - transient($dependent-target, $column-target)
+ - $dependent-node[CurrentStatus] = TRANSIENT_ABSENT
- $column-node[CurrentStatus] = TRANSIENT_ABSENT
- joinTargetNode($dependent, $dependent-target, $dependent-node)
- joinTargetNode($column, $column-target, $column-node)
@@ -578,10 +699,9 @@ deprules
- joinOnColumnID($index-column, $column, $table-id, $column-id)
- joinOnColumnID($index-column, $column-type, $table-id, $column-id)
- relationIsNotBeingDropped(*scpb.ColumnType)($column-type)
- - $index-target[TargetStatus] = TRANSIENT_ABSENT
+ - transient($index-target, $column-target)
- $index-node[CurrentStatus] = TRANSIENT_ABSENT
- - $column-target[TargetStatus] = ABSENT
- - $column-node[CurrentStatus] = ABSENT
+ - $column-node[CurrentStatus] = TRANSIENT_ABSENT
- joinTargetNode($index, $index-target, $index-node)
- joinTargetNode($column, $column-target, $column-node)
- name: indexes containing column reach absent before column
@@ -596,9 +716,10 @@ deprules
- joinOnColumnID($index-column, $column, $table-id, $column-id)
- joinOnColumnID($index-column, $column-type, $table-id, $column-id)
- relationIsNotBeingDropped(*scpb.ColumnType)($column-type)
- - transient($index-target, $column-target)
+ - $index-target[TargetStatus] = TRANSIENT_ABSENT
- $index-node[CurrentStatus] = TRANSIENT_ABSENT
- - $column-node[CurrentStatus] = TRANSIENT_ABSENT
+ - $column-target[TargetStatus] = ABSENT
+ - $column-node[CurrentStatus] = ABSENT
- joinTargetNode($index, $index-target, $index-node)
- joinTargetNode($column, $column-target, $column-node)
- name: old index absent before new index public when swapping with transient
@@ -630,9 +751,8 @@ deprules
- $index[Type] = '*scpb.SecondaryIndex'
- joinOnIndexID($partial-predicate, $index, $table-id, $index-id)
- relationIsNotBeingDropped(*scpb.SecondaryIndexPartial)($partial-predicate)
- - $partial-predicate-target[TargetStatus] = TRANSIENT_ABSENT
- - $partial-predicate-node[CurrentStatus] = TRANSIENT_ABSENT
- - $index-target[TargetStatus] = ABSENT
+ - toAbsent($partial-predicate-target, $index-target)
+ - $partial-predicate-node[CurrentStatus] = ABSENT
- $index-node[CurrentStatus] = ABSENT
- joinTargetNode($partial-predicate, $partial-predicate-target, $partial-predicate-node)
- joinTargetNode($index, $index-target, $index-node)
@@ -645,9 +765,8 @@ deprules
- $index[Type] = '*scpb.SecondaryIndex'
- joinOnIndexID($partial-predicate, $index, $table-id, $index-id)
- relationIsNotBeingDropped(*scpb.SecondaryIndexPartial)($partial-predicate)
- - $partial-predicate-target[TargetStatus] = ABSENT
- - $partial-predicate-node[CurrentStatus] = ABSENT
- - $index-target[TargetStatus] = TRANSIENT_ABSENT
+ - transient($partial-predicate-target, $index-target)
+ - $partial-predicate-node[CurrentStatus] = TRANSIENT_ABSENT
- $index-node[CurrentStatus] = TRANSIENT_ABSENT
- joinTargetNode($partial-predicate, $partial-predicate-target, $partial-predicate-node)
- joinTargetNode($index, $index-target, $index-node)
@@ -660,9 +779,10 @@ deprules
- $index[Type] = '*scpb.SecondaryIndex'
- joinOnIndexID($partial-predicate, $index, $table-id, $index-id)
- relationIsNotBeingDropped(*scpb.SecondaryIndexPartial)($partial-predicate)
- - toAbsent($partial-predicate-target, $index-target)
+ - $partial-predicate-target[TargetStatus] = ABSENT
- $partial-predicate-node[CurrentStatus] = ABSENT
- - $index-node[CurrentStatus] = ABSENT
+ - $index-target[TargetStatus] = TRANSIENT_ABSENT
+ - $index-node[CurrentStatus] = TRANSIENT_ABSENT
- joinTargetNode($partial-predicate, $partial-predicate-target, $partial-predicate-node)
- joinTargetNode($index, $index-target, $index-node)
- name: partial predicate removed right before secondary index when not dropping relation
@@ -674,9 +794,10 @@ deprules
- $index[Type] = '*scpb.SecondaryIndex'
- joinOnIndexID($partial-predicate, $index, $table-id, $index-id)
- relationIsNotBeingDropped(*scpb.SecondaryIndexPartial)($partial-predicate)
- - transient($partial-predicate-target, $index-target)
+ - $partial-predicate-target[TargetStatus] = TRANSIENT_ABSENT
- $partial-predicate-node[CurrentStatus] = TRANSIENT_ABSENT
- - $index-node[CurrentStatus] = TRANSIENT_ABSENT
+ - $index-target[TargetStatus] = ABSENT
+ - $index-node[CurrentStatus] = ABSENT
- joinTargetNode($partial-predicate, $partial-predicate-target, $partial-predicate-node)
- joinTargetNode($index, $index-target, $index-node)
- name: primary index swap
@@ -689,8 +810,8 @@ deprules
- joinOnDescID($old-index, $new-index, $table-id)
- $old-index[IndexID] = $old-index-id
- $new-index[SourceIndexID] = $old-index-id
- - $old-index-target[TargetStatus] = ABSENT
- - $old-index-node[CurrentStatus] = VALIDATED
+ - $old-index-target[TargetStatus] = TRANSIENT_ABSENT
+ - $old-index-node[CurrentStatus] = TRANSIENT_VALIDATED
- $new-index-target[TargetStatus] IN [PUBLIC, TRANSIENT_ABSENT]
- $new-index-node[CurrentStatus] = PUBLIC
- joinTargetNode($old-index, $old-index-target, $old-index-node)
@@ -705,8 +826,8 @@ deprules
- joinOnDescID($old-index, $new-index, $table-id)
- $old-index[IndexID] = $old-index-id
- $new-index[SourceIndexID] = $old-index-id
- - $old-index-target[TargetStatus] = TRANSIENT_ABSENT
- - $old-index-node[CurrentStatus] = TRANSIENT_VALIDATED
+ - $old-index-target[TargetStatus] = ABSENT
+ - $old-index-node[CurrentStatus] = VALIDATED
- $new-index-target[TargetStatus] IN [PUBLIC, TRANSIENT_ABSENT]
- $new-index-node[CurrentStatus] = PUBLIC
- joinTargetNode($old-index, $old-index-target, $old-index-node)
@@ -765,9 +886,9 @@ deprules
- $index[Type] = '*scpb.IndexColumn'
- $index-column[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex']
- joinOnIndexID($index, $index-column, $table-id, $index-id)
- - toAbsent($index-target, $index-column-target)
- - $index-node[CurrentStatus] = DELETE_ONLY
- - $index-column-node[CurrentStatus] = ABSENT
+ - transient($index-target, $index-column-target)
+ - $index-node[CurrentStatus] = TRANSIENT_DELETE_ONLY
+ - $index-column-node[CurrentStatus] = TRANSIENT_ABSENT
- joinTargetNode($index, $index-target, $index-node)
- joinTargetNode($index-column, $index-column-target, $index-column-node)
- name: remove columns from index right before removing index
@@ -778,9 +899,9 @@ deprules
- $index[Type] = '*scpb.IndexColumn'
- $index-column[Type] IN ['*scpb.PrimaryIndex', '*scpb.SecondaryIndex', '*scpb.TemporaryIndex']
- joinOnIndexID($index, $index-column, $table-id, $index-id)
- - transient($index-target, $index-column-target)
- - $index-node[CurrentStatus] = TRANSIENT_DELETE_ONLY
- - $index-column-node[CurrentStatus] = TRANSIENT_ABSENT
+ - toAbsent($index-target, $index-column-target)
+ - $index-node[CurrentStatus] = DELETE_ONLY
+ - $index-column-node[CurrentStatus] = ABSENT
- joinTargetNode($index, $index-target, $index-node)
- joinTargetNode($index-column, $index-column-target, $index-column-node)
- name: remove columns from index right before removing index
diff --git a/pkg/sql/schemachanger/scplan/testdata/drop_index b/pkg/sql/schemachanger/scplan/testdata/drop_index
index 76ec4d19e4cc..0770b1536fb5 100644
--- a/pkg/sql/schemachanger/scplan/testdata/drop_index
+++ b/pkg/sql/schemachanger/scplan/testdata/drop_index
@@ -387,14 +387,14 @@ PostCommitNonRevertiblePhase stage 1 of 2 with 6 MutationType ops
*scop.MakeDroppedColumnDeleteOnly
ColumnID: 5
TableID: 104
- *scop.RemoveCheckConstraint
- ConstraintID: 2
- TableID: 104
*scop.NotImplemented
ElementType: scpb.ConstraintName
*scop.MakeDroppedIndexDeleteOnly
IndexID: 6
TableID: 104
+ *scop.RemoveCheckConstraint
+ ConstraintID: 2
+ TableID: 104
*scop.SetJobStateOnDescriptor
DescriptorID: 104
*scop.UpdateSchemaChangerJob
@@ -476,6 +476,10 @@ DROP INDEX idx3 CASCADE
to: [Column:{DescID: 104, ColumnID: 5}, ABSENT]
kind: SameStagePrecedence
rules: [dependents removed before column; column type removed right before column when not dropping relation]
+- from: [ConstraintName:{DescID: 104, Name: check_crdb_internal_i_shard_16, ConstraintID: 2}, ABSENT]
+ to: [CheckConstraint:{DescID: 104, ConstraintID: 2}, ABSENT]
+ kind: SameStagePrecedence
+ rule: constraint dependent absent right before constraint
- from: [IndexColumn:{DescID: 104, ColumnID: 1, IndexID: 6}, ABSENT]
to: [SecondaryIndex:{DescID: 104, IndexID: 6, ConstraintID: 0}, ABSENT]
kind: Precedence
diff --git a/pkg/sql/schemachanger/scplan/testdata/drop_table b/pkg/sql/schemachanger/scplan/testdata/drop_table
index 25fc1d81e5c1..43dc214c77d0 100644
--- a/pkg/sql/schemachanger/scplan/testdata/drop_table
+++ b/pkg/sql/schemachanger/scplan/testdata/drop_table
@@ -214,20 +214,6 @@ PostCommitNonRevertiblePhase stage 1 of 2 with 32 MutationType ops
TypeIDs:
- 107
- 108
- *scop.RemoveForeignKeyBackReference
- OriginConstraintID: 2
- OriginTableID: 109
- ReferencedTableID: 104
- *scop.RemoveForeignKeyConstraint
- ConstraintID: 2
- TableID: 109
- *scop.RemoveForeignKeyBackReference
- OriginConstraintID: 3
- OriginTableID: 109
- ReferencedTableID: 105
- *scop.RemoveForeignKeyConstraint
- ConstraintID: 3
- TableID: 109
*scop.MarkDescriptorAsDropped
DescID: 110
*scop.RemoveAllTableComments
@@ -252,6 +238,20 @@ PostCommitNonRevertiblePhase stage 1 of 2 with 32 MutationType ops
TableID: 109
*scop.RemoveOwnerBackReferenceInSequence
SequenceID: 110
+ *scop.RemoveForeignKeyBackReference
+ OriginConstraintID: 2
+ OriginTableID: 109
+ ReferencedTableID: 104
+ *scop.RemoveForeignKeyConstraint
+ ConstraintID: 2
+ TableID: 109
+ *scop.RemoveForeignKeyBackReference
+ OriginConstraintID: 3
+ OriginTableID: 109
+ ReferencedTableID: 105
+ *scop.RemoveForeignKeyConstraint
+ ConstraintID: 3
+ TableID: 109
*scop.DrainDescriptorName
Namespace:
DatabaseID: 100
@@ -754,6 +754,18 @@ DROP TABLE defaultdb.shipments CASCADE;
to: [Column:{DescID: 111, ColumnID: 4294967295}, ABSENT]
kind: Precedence
rule: dependents removed before column
+- from: [ConstraintComment:{DescID: 109, ConstraintID: 2, Comment: customer is not god}, ABSENT]
+ to: [ForeignKeyConstraint:{DescID: 109, ConstraintID: 2, ReferencedDescID: 104}, ABSENT]
+ kind: SameStagePrecedence
+ rule: constraint dependent absent right before constraint
+- from: [ConstraintName:{DescID: 109, Name: fk_customers, ConstraintID: 2}, ABSENT]
+ to: [ForeignKeyConstraint:{DescID: 109, ConstraintID: 2, ReferencedDescID: 104}, ABSENT]
+ kind: SameStagePrecedence
+ rule: constraint dependent absent right before constraint
+- from: [ConstraintName:{DescID: 109, Name: fk_orders, ConstraintID: 3}, ABSENT]
+ to: [ForeignKeyConstraint:{DescID: 109, ConstraintID: 3, ReferencedDescID: 105}, ABSENT]
+ kind: SameStagePrecedence
+ rule: constraint dependent absent right before constraint
- from: [IndexColumn:{DescID: 109, ColumnID: 1, IndexID: 1}, ABSENT]
to: [Column:{DescID: 109, ColumnID: 1}, ABSENT]
kind: Precedence
@@ -1311,6 +1323,12 @@ PostCommitNonRevertiblePhase stage 1 of 2 with 16 MutationType ops
TypeIDs:
- 112
- 113
+ *scop.DrainDescriptorName
+ Namespace:
+ DatabaseID: 100
+ DescriptorID: 114
+ Name: greeter
+ SchemaID: 101
*scop.RemoveCheckConstraint
ConstraintID: 2
TableID: 114
@@ -1319,12 +1337,6 @@ PostCommitNonRevertiblePhase stage 1 of 2 with 16 MutationType ops
TypeIDs:
- 112
- 113
- *scop.DrainDescriptorName
- Namespace:
- DatabaseID: 100
- DescriptorID: 114
- Name: greeter
- SchemaID: 101
*scop.SetJobStateOnDescriptor
DescriptorID: 112
*scop.SetJobStateOnDescriptor
diff --git a/pkg/sql/schemachanger/testdata/end_to_end/alter_table_add_primary_key_drop_rowid b/pkg/sql/schemachanger/testdata/end_to_end/alter_table_add_primary_key_drop_rowid
index a45410091d49..483336cfc9b5 100644
--- a/pkg/sql/schemachanger/testdata/end_to_end/alter_table_add_primary_key_drop_rowid
+++ b/pkg/sql/schemachanger/testdata/end_to_end/alter_table_add_primary_key_drop_rowid
@@ -11,6 +11,7 @@ begin transaction #1
# begin StatementPhase
checking for feature: ALTER TABLE
increment telemetry for sql.schema.alter_table
+increment telemetry for sql.schema.alter_table.add_constraint
## StatementPhase stage 1 of 1 with 11 MutationType ops
upsert descriptor #104
...
diff --git a/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_primary_key_drop_rowid b/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_primary_key_drop_rowid
index e2bd1576a5ef..ef6cdd91e324 100644
--- a/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_primary_key_drop_rowid
+++ b/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_primary_key_drop_rowid
@@ -11,6 +11,7 @@ begin transaction #1
# begin StatementPhase
checking for feature: ALTER TABLE
increment telemetry for sql.schema.alter_table
+increment telemetry for sql.schema.alter_table.alter_primary_key
## StatementPhase stage 1 of 1 with 11 MutationType ops
upsert descriptor #104
...
diff --git a/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_primary_key_vanilla b/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_primary_key_vanilla
index 0c05f907e3f4..9469001dc0ac 100644
--- a/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_primary_key_vanilla
+++ b/pkg/sql/schemachanger/testdata/end_to_end/alter_table_alter_primary_key_vanilla
@@ -12,6 +12,7 @@ begin transaction #1
# begin StatementPhase
checking for feature: ALTER TABLE
increment telemetry for sql.schema.alter_table
+increment telemetry for sql.schema.alter_table.alter_primary_key
## StatementPhase stage 1 of 1 with 12 MutationType ops
upsert descriptor #104
...
diff --git a/pkg/sql/schemachanger/testdata/explain/drop_index_hash_sharded_index b/pkg/sql/schemachanger/testdata/explain/drop_index_hash_sharded_index
index 55e12c246fd8..b7218dab87b8 100644
--- a/pkg/sql/schemachanger/testdata/explain/drop_index_hash_sharded_index
+++ b/pkg/sql/schemachanger/testdata/explain/drop_index_hash_sharded_index
@@ -36,9 +36,9 @@ Schema change plan for DROP INDEX ‹defaultdb›.‹public›.‹t›@‹idx›
│ │ └── PUBLIC → ABSENT ConstraintName:{DescID: 104, Name: check_crdb_internal_j_shard_16, ConstraintID: 2}
│ └── 6 Mutation operations
│ ├── MakeDroppedColumnDeleteOnly {"ColumnID":3,"TableID":104}
- │ ├── RemoveCheckConstraint {"ConstraintID":2,"TableID":104}
│ ├── NotImplemented {"ElementType":"scpb.ConstraintN..."}
│ ├── MakeDroppedIndexDeleteOnly {"IndexID":2,"TableID":104}
+ │ ├── RemoveCheckConstraint {"ConstraintID":2,"TableID":104}
│ ├── SetJobStateOnDescriptor {"DescriptorID":104}
│ └── UpdateSchemaChangerJob {"IsNonCancelable":true,"RunningStatus":"PostCommitNonRev..."}
└── Stage 2 of 2 in PostCommitNonRevertiblePhase
diff --git a/pkg/sql/schemachanger/testdata/explain_verbose/drop_index_hash_sharded_index b/pkg/sql/schemachanger/testdata/explain_verbose/drop_index_hash_sharded_index
index 41a029039426..cfcb983e035c 100644
--- a/pkg/sql/schemachanger/testdata/explain_verbose/drop_index_hash_sharded_index
+++ b/pkg/sql/schemachanger/testdata/explain_verbose/drop_index_hash_sharded_index
@@ -137,7 +137,10 @@ EXPLAIN (ddl, verbose) DROP INDEX idx CASCADE;
│ │ │ VALIDATED → DELETE_ONLY
│ │ │
│ │ ├── • CheckConstraint:{DescID: 104, ConstraintID: 2}
- │ │ │ PUBLIC → ABSENT
+ │ │ │ │ PUBLIC → ABSENT
+ │ │ │ │
+ │ │ │ └── • SameStagePrecedence dependency from ABSENT ConstraintName:{DescID: 104, Name: check_crdb_internal_j_shard_16, ConstraintID: 2}
+ │ │ │ rule: "constraint dependent absent right before constraint"
│ │ │
│ │ └── • ConstraintName:{DescID: 104, Name: check_crdb_internal_j_shard_16, ConstraintID: 2}
│ │ PUBLIC → ABSENT
@@ -148,10 +151,6 @@ EXPLAIN (ddl, verbose) DROP INDEX idx CASCADE;
│ │ ColumnID: 3
│ │ TableID: 104
│ │
- │ ├── • RemoveCheckConstraint
- │ │ ConstraintID: 2
- │ │ TableID: 104
- │ │
│ ├── • NotImplemented
│ │ ElementType: scpb.ConstraintName
│ │
@@ -159,6 +158,10 @@ EXPLAIN (ddl, verbose) DROP INDEX idx CASCADE;
│ │ IndexID: 2
│ │ TableID: 104
│ │
+ │ ├── • RemoveCheckConstraint
+ │ │ ConstraintID: 2
+ │ │ TableID: 104
+ │ │
│ ├── • SetJobStateOnDescriptor
│ │ DescriptorID: 104
│ │
diff --git a/pkg/sql/sem/builtins/builtins.go b/pkg/sql/sem/builtins/builtins.go
index e77c5eae9794..814a75620f92 100644
--- a/pkg/sql/sem/builtins/builtins.go
+++ b/pkg/sql/sem/builtins/builtins.go
@@ -3832,9 +3832,6 @@ value if you rely on the HLC for accuracy.`,
// The behavior of both the JSON and JSONB data types in CockroachDB is
// similar to the behavior of the JSONB data type in Postgres.
- "json_to_recordset": makeBuiltin(tree.FunctionProperties{UnsupportedWithIssue: 33285, Category: builtinconstants.CategoryJSON}),
- "jsonb_to_recordset": makeBuiltin(tree.FunctionProperties{UnsupportedWithIssue: 33285, Category: builtinconstants.CategoryJSON}),
-
"jsonb_path_exists": makeBuiltin(tree.FunctionProperties{UnsupportedWithIssue: 22513, Category: builtinconstants.CategoryJSON}),
"jsonb_path_exists_opr": makeBuiltin(tree.FunctionProperties{UnsupportedWithIssue: 22513, Category: builtinconstants.CategoryJSON}),
"jsonb_path_match": makeBuiltin(tree.FunctionProperties{UnsupportedWithIssue: 22513, Category: builtinconstants.CategoryJSON}),
diff --git a/pkg/sql/sem/builtins/generator_builtins.go b/pkg/sql/sem/builtins/generator_builtins.go
index ae6c39cb4ea9..16fbc20a4867 100644
--- a/pkg/sql/sem/builtins/generator_builtins.go
+++ b/pkg/sql/sem/builtins/generator_builtins.go
@@ -72,6 +72,14 @@ func genPropsWithLabels(returnLabels []string) tree.FunctionProperties {
}
}
+func recordGenProps() tree.FunctionProperties {
+ return tree.FunctionProperties{
+ Class: tree.GeneratorClass,
+ Category: builtinconstants.CategoryGenerator,
+ ReturnsRecordType: true,
+ }
+}
+
var aclexplodeGeneratorType = types.MakeLabeledTuple(
[]*types.T{types.Oid, types.Oid, types.String, types.Bool},
[]string{"grantor", "grantee", "privilege_type", "is_grantable"},
@@ -378,6 +386,11 @@ var generators = map[string]builtinDefinition{
"jsonb_populate_recordset": makeBuiltin(jsonPopulateProps, makeJSONPopulateImpl(makeJSONPopulateRecordSetGenerator,
"Expands the outermost array of objects in from_json to a set of rows whose columns match the record type defined by base")),
+ "json_to_record": makeBuiltin(recordGenProps(), jsonToRecordImpl),
+ "jsonb_to_record": makeBuiltin(recordGenProps(), jsonToRecordImpl),
+ "json_to_recordset": makeBuiltin(recordGenProps(), jsonToRecordSetImpl),
+ "jsonb_to_recordset": makeBuiltin(recordGenProps(), jsonToRecordSetImpl),
+
"crdb_internal.check_consistency": makeBuiltin(
tree.FunctionProperties{
Class: tree.GeneratorClass,
@@ -1381,6 +1394,26 @@ var jsonEachTextImpl = makeGeneratorOverload(
volatility.Immutable,
)
+var jsonToRecordImpl = makeGeneratorOverload(
+ tree.ArgTypes{{"input", types.Jsonb}},
+ // NOTE: this type will never actually get used. It is replaced in the
+ // optimizer by looking at the most recent AS alias clause.
+ types.EmptyTuple,
+ makeJSONRecordGenerator,
+ "Builds an arbitrary record from a JSON object.",
+ volatility.Stable,
+)
+
+var jsonToRecordSetImpl = makeGeneratorOverload(
+ tree.ArgTypes{{"input", types.Jsonb}},
+ // NOTE: this type will never actually get used. It is replaced in the
+ // optimizer by looking at the most recent AS alias clause.
+ types.EmptyTuple,
+ makeJSONRecordSetGenerator,
+ "Builds an arbitrary set of records from a JSON array of objects.",
+ volatility.Stable,
+)
+
var jsonEachGeneratorLabels = []string{"key", "value"}
var jsonEachGeneratorType = types.MakeLabeledTuple(
@@ -1665,6 +1698,134 @@ func (j *jsonPopulateRecordSetGenerator) Values() (tree.Datums, error) {
return output.D, nil
}
+func makeJSONRecordGenerator(evalCtx *eval.Context, args tree.Datums) (eval.ValueGenerator, error) {
+ target := tree.MustBeDJSON(args[0])
+ return &jsonRecordGenerator{
+ evalCtx: evalCtx,
+ target: target.JSON,
+ }, nil
+}
+
+type jsonRecordGenerator struct {
+ evalCtx *eval.Context
+ target json.JSON
+
+ wasCalled bool
+ values tree.Datums
+ types []*types.T
+ labels []string
+ // labelToRowIndexMap maps the column label to its position within the row.
+ labelToRowIndexMap map[string]int
+}
+
+func (j *jsonRecordGenerator) SetAlias(types []*types.T, labels []string) error {
+ j.types = types
+ j.labels = labels
+ j.labelToRowIndexMap = make(map[string]int)
+ for i := range types {
+ j.labelToRowIndexMap[j.labels[i]] = i
+ }
+ if len(types) != len(labels) {
+ return errors.AssertionFailedf("unexpected mismatched types/labels list in json record generator %v %v", types, labels)
+ }
+ return nil
+}
+
+func (j jsonRecordGenerator) ResolvedType() *types.T {
+ return types.AnyTuple
+}
+
+func (j *jsonRecordGenerator) Start(ctx context.Context, _ *kv.Txn) error {
+ j.values = make(tree.Datums, len(j.types))
+ if j.target.Type() != json.ObjectJSONType {
+ return pgerror.Newf(pgcode.InvalidParameterValue,
+ "invalid non-object argument to json_to_record")
+ }
+ return nil
+}
+
+func (j *jsonRecordGenerator) Next(ctx context.Context) (bool, error) {
+ if j.wasCalled {
+ return false, nil
+ }
+ for i := range j.values {
+ j.values[i] = tree.DNull
+ }
+ iter, err := j.target.ObjectIter()
+ if err != nil {
+ return false, err
+ }
+ for iter.Next() {
+ idx, ok := j.labelToRowIndexMap[iter.Key()]
+ if !ok {
+ continue
+ }
+ v := iter.Value()
+ datum, err := eval.PopulateDatumWithJSON(j.evalCtx, v, j.types[idx])
+ if err != nil {
+ return false, err
+ }
+ j.values[idx] = datum
+ }
+
+ j.wasCalled = true
+ return true, nil
+}
+
+func (j jsonRecordGenerator) Values() (tree.Datums, error) {
+ return j.values, nil
+}
+
+func (j jsonRecordGenerator) Close(ctx context.Context) {}
+
+type jsonRecordSetGenerator struct {
+ jsonRecordGenerator
+
+ arr tree.DJSON
+ nextIndex int
+}
+
+func makeJSONRecordSetGenerator(
+ evalCtx *eval.Context, args tree.Datums,
+) (eval.ValueGenerator, error) {
+ arr := tree.MustBeDJSON(args[0])
+ return &jsonRecordSetGenerator{
+ arr: arr,
+ jsonRecordGenerator: jsonRecordGenerator{
+ evalCtx: evalCtx,
+ },
+ }, nil
+}
+
+func (j *jsonRecordSetGenerator) Start(ctx context.Context, _ *kv.Txn) error {
+ j.values = make(tree.Datums, len(j.types))
+ if j.arr.Type() != json.ArrayJSONType {
+ return pgerror.Newf(pgcode.InvalidParameterValue,
+ "argument to json_to_recordset must be an array of objects")
+ }
+ j.nextIndex = -1
+ return nil
+}
+
+func (j *jsonRecordSetGenerator) Next(ctx context.Context) (bool, error) {
+ j.nextIndex++
+ next, err := j.arr.FetchValIdx(j.nextIndex)
+ if err != nil || next == nil {
+ return false, err
+ }
+ if next.Type() != json.ObjectJSONType {
+ return false, pgerror.Newf(pgcode.InvalidParameterValue,
+ "argument to json_to_recordset must be an array of objects")
+ }
+ j.target = next
+ j.wasCalled = false
+ _, err = j.jsonRecordGenerator.Next(ctx)
+ if err != nil {
+ return false, err
+ }
+ return true, nil
+}
+
type checkConsistencyGenerator struct {
consistencyChecker eval.ConsistencyCheckRunner
from, to roachpb.Key
diff --git a/pkg/sql/sem/eval/generators.go b/pkg/sql/sem/eval/generators.go
index a3e9f3ad47bc..111b9845affd 100644
--- a/pkg/sql/sem/eval/generators.go
+++ b/pkg/sql/sem/eval/generators.go
@@ -83,6 +83,12 @@ type ValueGenerator interface {
Close(ctx context.Context)
}
+// AliasAwareValueGenerator is a value generator that can inspect the alias with
+// which it was invoked. SetAlias will always be run before Start.
+type AliasAwareValueGenerator interface {
+ SetAlias(types []*types.T, labels []string) error
+}
+
// CallbackValueGenerator is a ValueGenerator that calls a supplied callback for
// producing the values. To be used with
// eval.TestingKnobs.CallbackGenerators.
diff --git a/pkg/sql/sem/tree/function_definition.go b/pkg/sql/sem/tree/function_definition.go
index 4eb323ded2ff..16767b7bc781 100644
--- a/pkg/sql/sem/tree/function_definition.go
+++ b/pkg/sql/sem/tree/function_definition.go
@@ -131,6 +131,16 @@ type FunctionProperties struct {
// VectorizeStreaming indicates that the function is of "streaming" nature
// from the perspective of the vectorized execution engine.
VectorizeStreaming bool
+
+ // ReturnsRecordType indicates that this function is a record-returning
+ // function, which implies that it's unusable without a corresponding type
+ // alias.
+ //
+ // For example, consider the case of json_to_record('{"a":"b", "c":"d"}').
+ // This function returns an error unless it as an `AS t(a,b,c)` declaration,
+ // since its definition is to pick out the JSON attributes within the input
+ // that match, by name, to the columns in the aliased record type.
+ ReturnsRecordType bool
}
// ShouldDocument returns whether the built-in function should be included in
diff --git a/pkg/sql/sem/tree/select.go b/pkg/sql/sem/tree/select.go
index 2c5da2703a8f..3743b8423ec3 100644
--- a/pkg/sql/sem/tree/select.go
+++ b/pkg/sql/sem/tree/select.go
@@ -188,24 +188,55 @@ func (node *SelectExpr) Format(ctx *FmtCtx) {
}
}
-// AliasClause represents an alias, optionally with a column list:
-// "AS name" or "AS name(col1, col2)".
+// AliasClause represents an alias, optionally with a column def list:
+// "AS name", "AS name(col1, col2)", or "AS name(col1 INT, col2 STRING)".
+// Note that the last form is only valid in the context of record-returning
+// functions, which also require the last form to define their output types.
type AliasClause struct {
Alias Name
- Cols NameList
+ Cols ColumnDefList
}
// Format implements the NodeFormatter interface.
-func (a *AliasClause) Format(ctx *FmtCtx) {
- ctx.FormatNode(&a.Alias)
- if len(a.Cols) != 0 {
+func (f *AliasClause) Format(ctx *FmtCtx) {
+ ctx.FormatNode(&f.Alias)
+ if len(f.Cols) != 0 {
// Format as "alias (col1, col2, ...)".
ctx.WriteString(" (")
- ctx.FormatNode(&a.Cols)
+ ctx.FormatNode(&f.Cols)
ctx.WriteByte(')')
}
}
+// ColumnDef represents a column definition in the context of a record type
+// alias, like in select * from json_to_record(...) AS foo(a INT, b INT).
+type ColumnDef struct {
+ Name Name
+ Type ResolvableTypeReference
+}
+
+// Format implements the NodeFormatter interface.
+func (c *ColumnDef) Format(ctx *FmtCtx) {
+ ctx.FormatNode(&c.Name)
+ if c.Type != nil {
+ ctx.WriteByte(' ')
+ ctx.WriteString(c.Type.SQLString())
+ }
+}
+
+// ColumnDefList represents a list of ColumnDefs.
+type ColumnDefList []ColumnDef
+
+// Format implements the NodeFormatter interface.
+func (c *ColumnDefList) Format(ctx *FmtCtx) {
+ for i := range *c {
+ if i > 0 {
+ ctx.WriteString(", ")
+ }
+ ctx.FormatNode(&(*c)[i])
+ }
+}
+
// AsOfClause represents an as of time.
type AsOfClause struct {
Expr Expr
|